1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk/stdinc.h" 36 37 #include "spdk/blob.h" 38 #include "spdk/crc32.h" 39 #include "spdk/env.h" 40 #include "spdk/queue.h" 41 #include "spdk/thread.h" 42 #include "spdk/bit_array.h" 43 #include "spdk/bit_pool.h" 44 #include "spdk/likely.h" 45 #include "spdk/util.h" 46 #include "spdk/string.h" 47 48 #include "spdk_internal/assert.h" 49 #include "spdk/log.h" 50 51 #include "blobstore.h" 52 53 #define BLOB_CRC32C_INITIAL 0xffffffffUL 54 55 static int bs_register_md_thread(struct spdk_blob_store *bs); 56 static int bs_unregister_md_thread(struct spdk_blob_store *bs); 57 static void blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 58 static void blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 59 uint64_t cluster, uint32_t extent, spdk_blob_op_complete cb_fn, void *cb_arg); 60 61 static int blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 62 uint16_t value_len, bool internal); 63 static int blob_get_xattr_value(struct spdk_blob *blob, const char *name, 64 const void **value, size_t *value_len, bool internal); 65 static int blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal); 66 67 static void blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 68 spdk_blob_op_complete cb_fn, void *cb_arg); 69 70 static int 71 blob_id_cmp(struct spdk_blob *blob1, struct spdk_blob *blob2) 72 { 73 return (blob1->id < blob2->id ? -1 : blob1->id > blob2->id); 74 } 75 76 RB_GENERATE_STATIC(spdk_blob_tree, spdk_blob, link, blob_id_cmp); 77 78 static void 79 blob_verify_md_op(struct spdk_blob *blob) 80 { 81 assert(blob != NULL); 82 assert(spdk_get_thread() == blob->bs->md_thread); 83 assert(blob->state != SPDK_BLOB_STATE_LOADING); 84 } 85 86 static struct spdk_blob_list * 87 bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid) 88 { 89 struct spdk_blob_list *snapshot_entry = NULL; 90 91 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 92 if (snapshot_entry->id == blobid) { 93 break; 94 } 95 } 96 97 return snapshot_entry; 98 } 99 100 static void 101 bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page) 102 { 103 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 104 assert(spdk_bit_array_get(bs->used_md_pages, page) == false); 105 106 spdk_bit_array_set(bs->used_md_pages, page); 107 } 108 109 static void 110 bs_release_md_page(struct spdk_blob_store *bs, uint32_t page) 111 { 112 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 113 assert(spdk_bit_array_get(bs->used_md_pages, page) == true); 114 115 spdk_bit_array_clear(bs->used_md_pages, page); 116 } 117 118 static uint32_t 119 bs_claim_cluster(struct spdk_blob_store *bs) 120 { 121 uint32_t cluster_num; 122 123 cluster_num = spdk_bit_pool_allocate_bit(bs->used_clusters); 124 if (cluster_num == UINT32_MAX) { 125 return UINT32_MAX; 126 } 127 128 SPDK_DEBUGLOG(blob, "Claiming cluster %u\n", cluster_num); 129 bs->num_free_clusters--; 130 131 return cluster_num; 132 } 133 134 static void 135 bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 136 { 137 assert(cluster_num < spdk_bit_pool_capacity(bs->used_clusters)); 138 assert(spdk_bit_pool_is_allocated(bs->used_clusters, cluster_num) == true); 139 assert(bs->num_free_clusters < bs->total_clusters); 140 141 SPDK_DEBUGLOG(blob, "Releasing cluster %u\n", cluster_num); 142 143 spdk_bit_pool_free_bit(bs->used_clusters, cluster_num); 144 bs->num_free_clusters++; 145 } 146 147 static int 148 blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster) 149 { 150 uint64_t *cluster_lba = &blob->active.clusters[cluster_num]; 151 152 blob_verify_md_op(blob); 153 154 if (*cluster_lba != 0) { 155 return -EEXIST; 156 } 157 158 *cluster_lba = bs_cluster_to_lba(blob->bs, cluster); 159 return 0; 160 } 161 162 static int 163 bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num, 164 uint64_t *cluster, uint32_t *lowest_free_md_page, bool update_map) 165 { 166 uint32_t *extent_page = 0; 167 168 *cluster = bs_claim_cluster(blob->bs); 169 if (*cluster == UINT32_MAX) { 170 /* No more free clusters. Cannot satisfy the request */ 171 return -ENOSPC; 172 } 173 174 if (blob->use_extent_table) { 175 extent_page = bs_cluster_to_extent_page(blob, cluster_num); 176 if (*extent_page == 0) { 177 /* Extent page shall never occupy md_page so start the search from 1 */ 178 if (*lowest_free_md_page == 0) { 179 *lowest_free_md_page = 1; 180 } 181 /* No extent_page is allocated for the cluster */ 182 *lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, 183 *lowest_free_md_page); 184 if (*lowest_free_md_page == UINT32_MAX) { 185 /* No more free md pages. Cannot satisfy the request */ 186 bs_release_cluster(blob->bs, *cluster); 187 return -ENOSPC; 188 } 189 bs_claim_md_page(blob->bs, *lowest_free_md_page); 190 } 191 } 192 193 SPDK_DEBUGLOG(blob, "Claiming cluster %" PRIu64 " for blob %" PRIu64 "\n", *cluster, blob->id); 194 195 if (update_map) { 196 blob_insert_cluster(blob, cluster_num, *cluster); 197 if (blob->use_extent_table && *extent_page == 0) { 198 *extent_page = *lowest_free_md_page; 199 } 200 } 201 202 return 0; 203 } 204 205 static void 206 blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs) 207 { 208 xattrs->count = 0; 209 xattrs->names = NULL; 210 xattrs->ctx = NULL; 211 xattrs->get_value = NULL; 212 } 213 214 void 215 spdk_blob_opts_init(struct spdk_blob_opts *opts, size_t opts_size) 216 { 217 if (!opts) { 218 SPDK_ERRLOG("opts should not be NULL\n"); 219 return; 220 } 221 222 if (!opts_size) { 223 SPDK_ERRLOG("opts_size should not be zero value\n"); 224 return; 225 } 226 227 memset(opts, 0, opts_size); 228 opts->opts_size = opts_size; 229 230 #define FIELD_OK(field) \ 231 offsetof(struct spdk_blob_opts, field) + sizeof(opts->field) <= opts_size 232 233 #define SET_FIELD(field, value) \ 234 if (FIELD_OK(field)) { \ 235 opts->field = value; \ 236 } \ 237 238 SET_FIELD(num_clusters, 0); 239 SET_FIELD(thin_provision, false); 240 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 241 242 if (FIELD_OK(xattrs)) { 243 blob_xattrs_init(&opts->xattrs); 244 } 245 246 SET_FIELD(use_extent_table, true); 247 248 #undef FIELD_OK 249 #undef SET_FIELD 250 } 251 252 void 253 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts, size_t opts_size) 254 { 255 if (!opts) { 256 SPDK_ERRLOG("opts should not be NULL\n"); 257 return; 258 } 259 260 if (!opts_size) { 261 SPDK_ERRLOG("opts_size should not be zero value\n"); 262 return; 263 } 264 265 memset(opts, 0, opts_size); 266 opts->opts_size = opts_size; 267 268 #define FIELD_OK(field) \ 269 offsetof(struct spdk_blob_open_opts, field) + sizeof(opts->field) <= opts_size 270 271 #define SET_FIELD(field, value) \ 272 if (FIELD_OK(field)) { \ 273 opts->field = value; \ 274 } \ 275 276 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 277 278 #undef FIELD_OK 279 #undef SET_FILED 280 } 281 282 static struct spdk_blob * 283 blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id) 284 { 285 struct spdk_blob *blob; 286 287 blob = calloc(1, sizeof(*blob)); 288 if (!blob) { 289 return NULL; 290 } 291 292 blob->id = id; 293 blob->bs = bs; 294 295 blob->parent_id = SPDK_BLOBID_INVALID; 296 297 blob->state = SPDK_BLOB_STATE_DIRTY; 298 blob->extent_rle_found = false; 299 blob->extent_table_found = false; 300 blob->active.num_pages = 1; 301 blob->active.pages = calloc(1, sizeof(*blob->active.pages)); 302 if (!blob->active.pages) { 303 free(blob); 304 return NULL; 305 } 306 307 blob->active.pages[0] = bs_blobid_to_page(id); 308 309 TAILQ_INIT(&blob->xattrs); 310 TAILQ_INIT(&blob->xattrs_internal); 311 TAILQ_INIT(&blob->pending_persists); 312 TAILQ_INIT(&blob->persists_to_complete); 313 314 return blob; 315 } 316 317 static void 318 xattrs_free(struct spdk_xattr_tailq *xattrs) 319 { 320 struct spdk_xattr *xattr, *xattr_tmp; 321 322 TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) { 323 TAILQ_REMOVE(xattrs, xattr, link); 324 free(xattr->name); 325 free(xattr->value); 326 free(xattr); 327 } 328 } 329 330 static void 331 blob_free(struct spdk_blob *blob) 332 { 333 assert(blob != NULL); 334 assert(TAILQ_EMPTY(&blob->pending_persists)); 335 assert(TAILQ_EMPTY(&blob->persists_to_complete)); 336 337 free(blob->active.extent_pages); 338 free(blob->clean.extent_pages); 339 free(blob->active.clusters); 340 free(blob->clean.clusters); 341 free(blob->active.pages); 342 free(blob->clean.pages); 343 344 xattrs_free(&blob->xattrs); 345 xattrs_free(&blob->xattrs_internal); 346 347 if (blob->back_bs_dev) { 348 blob->back_bs_dev->destroy(blob->back_bs_dev); 349 } 350 351 free(blob); 352 } 353 354 struct freeze_io_ctx { 355 struct spdk_bs_cpl cpl; 356 struct spdk_blob *blob; 357 }; 358 359 static void 360 blob_io_sync(struct spdk_io_channel_iter *i) 361 { 362 spdk_for_each_channel_continue(i, 0); 363 } 364 365 static void 366 blob_execute_queued_io(struct spdk_io_channel_iter *i) 367 { 368 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 369 struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch); 370 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 371 struct spdk_bs_request_set *set; 372 struct spdk_bs_user_op_args *args; 373 spdk_bs_user_op_t *op, *tmp; 374 375 TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) { 376 set = (struct spdk_bs_request_set *)op; 377 args = &set->u.user_op; 378 379 if (args->blob == ctx->blob) { 380 TAILQ_REMOVE(&ch->queued_io, op, link); 381 bs_user_op_execute(op); 382 } 383 } 384 385 spdk_for_each_channel_continue(i, 0); 386 } 387 388 static void 389 blob_io_cpl(struct spdk_io_channel_iter *i, int status) 390 { 391 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 392 393 ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0); 394 395 free(ctx); 396 } 397 398 static void 399 blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 400 { 401 struct freeze_io_ctx *ctx; 402 403 ctx = calloc(1, sizeof(*ctx)); 404 if (!ctx) { 405 cb_fn(cb_arg, -ENOMEM); 406 return; 407 } 408 409 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 410 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 411 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 412 ctx->blob = blob; 413 414 /* Freeze I/O on blob */ 415 blob->frozen_refcnt++; 416 417 if (blob->frozen_refcnt == 1) { 418 spdk_for_each_channel(blob->bs, blob_io_sync, ctx, blob_io_cpl); 419 } else { 420 cb_fn(cb_arg, 0); 421 free(ctx); 422 } 423 } 424 425 static void 426 blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 427 { 428 struct freeze_io_ctx *ctx; 429 430 ctx = calloc(1, sizeof(*ctx)); 431 if (!ctx) { 432 cb_fn(cb_arg, -ENOMEM); 433 return; 434 } 435 436 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 437 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 438 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 439 ctx->blob = blob; 440 441 assert(blob->frozen_refcnt > 0); 442 443 blob->frozen_refcnt--; 444 445 if (blob->frozen_refcnt == 0) { 446 spdk_for_each_channel(blob->bs, blob_execute_queued_io, ctx, blob_io_cpl); 447 } else { 448 cb_fn(cb_arg, 0); 449 free(ctx); 450 } 451 } 452 453 static int 454 blob_mark_clean(struct spdk_blob *blob) 455 { 456 uint32_t *extent_pages = NULL; 457 uint64_t *clusters = NULL; 458 uint32_t *pages = NULL; 459 460 assert(blob != NULL); 461 462 if (blob->active.num_extent_pages) { 463 assert(blob->active.extent_pages); 464 extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages)); 465 if (!extent_pages) { 466 return -ENOMEM; 467 } 468 memcpy(extent_pages, blob->active.extent_pages, 469 blob->active.num_extent_pages * sizeof(*extent_pages)); 470 } 471 472 if (blob->active.num_clusters) { 473 assert(blob->active.clusters); 474 clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters)); 475 if (!clusters) { 476 free(extent_pages); 477 return -ENOMEM; 478 } 479 memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 480 } 481 482 if (blob->active.num_pages) { 483 assert(blob->active.pages); 484 pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages)); 485 if (!pages) { 486 free(extent_pages); 487 free(clusters); 488 return -ENOMEM; 489 } 490 memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 491 } 492 493 free(blob->clean.extent_pages); 494 free(blob->clean.clusters); 495 free(blob->clean.pages); 496 497 blob->clean.num_extent_pages = blob->active.num_extent_pages; 498 blob->clean.extent_pages = blob->active.extent_pages; 499 blob->clean.num_clusters = blob->active.num_clusters; 500 blob->clean.clusters = blob->active.clusters; 501 blob->clean.num_pages = blob->active.num_pages; 502 blob->clean.pages = blob->active.pages; 503 504 blob->active.extent_pages = extent_pages; 505 blob->active.clusters = clusters; 506 blob->active.pages = pages; 507 508 /* If the metadata was dirtied again while the metadata was being written to disk, 509 * we do not want to revert the DIRTY state back to CLEAN here. 510 */ 511 if (blob->state == SPDK_BLOB_STATE_LOADING) { 512 blob->state = SPDK_BLOB_STATE_CLEAN; 513 } 514 515 return 0; 516 } 517 518 static int 519 blob_deserialize_xattr(struct spdk_blob *blob, 520 struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal) 521 { 522 struct spdk_xattr *xattr; 523 524 if (desc_xattr->length != sizeof(desc_xattr->name_length) + 525 sizeof(desc_xattr->value_length) + 526 desc_xattr->name_length + desc_xattr->value_length) { 527 return -EINVAL; 528 } 529 530 xattr = calloc(1, sizeof(*xattr)); 531 if (xattr == NULL) { 532 return -ENOMEM; 533 } 534 535 xattr->name = malloc(desc_xattr->name_length + 1); 536 if (xattr->name == NULL) { 537 free(xattr); 538 return -ENOMEM; 539 } 540 541 xattr->value = malloc(desc_xattr->value_length); 542 if (xattr->value == NULL) { 543 free(xattr->name); 544 free(xattr); 545 return -ENOMEM; 546 } 547 548 memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length); 549 xattr->name[desc_xattr->name_length] = '\0'; 550 xattr->value_len = desc_xattr->value_length; 551 memcpy(xattr->value, 552 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 553 desc_xattr->value_length); 554 555 TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link); 556 557 return 0; 558 } 559 560 561 static int 562 blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob) 563 { 564 struct spdk_blob_md_descriptor *desc; 565 size_t cur_desc = 0; 566 void *tmp; 567 568 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 569 while (cur_desc < sizeof(page->descriptors)) { 570 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 571 if (desc->length == 0) { 572 /* If padding and length are 0, this terminates the page */ 573 break; 574 } 575 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 576 struct spdk_blob_md_descriptor_flags *desc_flags; 577 578 desc_flags = (struct spdk_blob_md_descriptor_flags *)desc; 579 580 if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) { 581 return -EINVAL; 582 } 583 584 if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) != 585 SPDK_BLOB_INVALID_FLAGS_MASK) { 586 return -EINVAL; 587 } 588 589 if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) != 590 SPDK_BLOB_DATA_RO_FLAGS_MASK) { 591 blob->data_ro = true; 592 blob->md_ro = true; 593 } 594 595 if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) != 596 SPDK_BLOB_MD_RO_FLAGS_MASK) { 597 blob->md_ro = true; 598 } 599 600 if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 601 blob->data_ro = true; 602 blob->md_ro = true; 603 } 604 605 blob->invalid_flags = desc_flags->invalid_flags; 606 blob->data_ro_flags = desc_flags->data_ro_flags; 607 blob->md_ro_flags = desc_flags->md_ro_flags; 608 609 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 610 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 611 unsigned int i, j; 612 unsigned int cluster_count = blob->active.num_clusters; 613 614 if (blob->extent_table_found) { 615 /* Extent Table already present in the md, 616 * both descriptors should never be at the same time. */ 617 return -EINVAL; 618 } 619 blob->extent_rle_found = true; 620 621 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 622 623 if (desc_extent_rle->length == 0 || 624 (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) { 625 return -EINVAL; 626 } 627 628 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 629 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 630 if (desc_extent_rle->extents[i].cluster_idx != 0) { 631 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, 632 desc_extent_rle->extents[i].cluster_idx + j)) { 633 return -EINVAL; 634 } 635 } 636 cluster_count++; 637 } 638 } 639 640 if (cluster_count == 0) { 641 return -EINVAL; 642 } 643 tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters)); 644 if (tmp == NULL) { 645 return -ENOMEM; 646 } 647 blob->active.clusters = tmp; 648 blob->active.cluster_array_size = cluster_count; 649 650 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 651 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 652 if (desc_extent_rle->extents[i].cluster_idx != 0) { 653 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 654 desc_extent_rle->extents[i].cluster_idx + j); 655 } else if (spdk_blob_is_thin_provisioned(blob)) { 656 blob->active.clusters[blob->active.num_clusters++] = 0; 657 } else { 658 return -EINVAL; 659 } 660 } 661 } 662 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 663 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 664 uint32_t num_extent_pages = blob->active.num_extent_pages; 665 uint32_t i, j; 666 size_t extent_pages_length; 667 668 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 669 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 670 671 if (blob->extent_rle_found) { 672 /* This means that Extent RLE is present in MD, 673 * both should never be at the same time. */ 674 return -EINVAL; 675 } else if (blob->extent_table_found && 676 desc_extent_table->num_clusters != blob->remaining_clusters_in_et) { 677 /* Number of clusters in this ET does not match number 678 * from previously read EXTENT_TABLE. */ 679 return -EINVAL; 680 } 681 682 if (desc_extent_table->length == 0 || 683 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 684 return -EINVAL; 685 } 686 687 blob->extent_table_found = true; 688 689 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 690 num_extent_pages += desc_extent_table->extent_page[i].num_pages; 691 } 692 693 if (num_extent_pages > 0) { 694 tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t)); 695 if (tmp == NULL) { 696 return -ENOMEM; 697 } 698 blob->active.extent_pages = tmp; 699 } 700 blob->active.extent_pages_array_size = num_extent_pages; 701 702 blob->remaining_clusters_in_et = desc_extent_table->num_clusters; 703 704 /* Extent table entries contain md page numbers for extent pages. 705 * Zeroes represent unallocated extent pages, those are run-length-encoded. 706 */ 707 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 708 if (desc_extent_table->extent_page[i].page_idx != 0) { 709 assert(desc_extent_table->extent_page[i].num_pages == 1); 710 blob->active.extent_pages[blob->active.num_extent_pages++] = 711 desc_extent_table->extent_page[i].page_idx; 712 } else if (spdk_blob_is_thin_provisioned(blob)) { 713 for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) { 714 blob->active.extent_pages[blob->active.num_extent_pages++] = 0; 715 } 716 } else { 717 return -EINVAL; 718 } 719 } 720 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 721 struct spdk_blob_md_descriptor_extent_page *desc_extent; 722 unsigned int i; 723 unsigned int cluster_count = 0; 724 size_t cluster_idx_length; 725 726 if (blob->extent_rle_found) { 727 /* This means that Extent RLE is present in MD, 728 * both should never be at the same time. */ 729 return -EINVAL; 730 } 731 732 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 733 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 734 735 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 736 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 737 return -EINVAL; 738 } 739 740 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 741 if (desc_extent->cluster_idx[i] != 0) { 742 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, desc_extent->cluster_idx[i])) { 743 return -EINVAL; 744 } 745 } 746 cluster_count++; 747 } 748 749 if (cluster_count == 0) { 750 return -EINVAL; 751 } 752 753 /* When reading extent pages sequentially starting cluster idx should match 754 * current size of a blob. 755 * If changed to batch reading, this check shall be removed. */ 756 if (desc_extent->start_cluster_idx != blob->active.num_clusters) { 757 return -EINVAL; 758 } 759 760 tmp = realloc(blob->active.clusters, 761 (cluster_count + blob->active.num_clusters) * sizeof(*blob->active.clusters)); 762 if (tmp == NULL) { 763 return -ENOMEM; 764 } 765 blob->active.clusters = tmp; 766 blob->active.cluster_array_size = (cluster_count + blob->active.num_clusters); 767 768 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 769 if (desc_extent->cluster_idx[i] != 0) { 770 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 771 desc_extent->cluster_idx[i]); 772 } else if (spdk_blob_is_thin_provisioned(blob)) { 773 blob->active.clusters[blob->active.num_clusters++] = 0; 774 } else { 775 return -EINVAL; 776 } 777 } 778 assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters); 779 assert(blob->remaining_clusters_in_et >= cluster_count); 780 blob->remaining_clusters_in_et -= cluster_count; 781 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 782 int rc; 783 784 rc = blob_deserialize_xattr(blob, 785 (struct spdk_blob_md_descriptor_xattr *) desc, false); 786 if (rc != 0) { 787 return rc; 788 } 789 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 790 int rc; 791 792 rc = blob_deserialize_xattr(blob, 793 (struct spdk_blob_md_descriptor_xattr *) desc, true); 794 if (rc != 0) { 795 return rc; 796 } 797 } else { 798 /* Unrecognized descriptor type. Do not fail - just continue to the 799 * next descriptor. If this descriptor is associated with some feature 800 * defined in a newer version of blobstore, that version of blobstore 801 * should create and set an associated feature flag to specify if this 802 * blob can be loaded or not. 803 */ 804 } 805 806 /* Advance to the next descriptor */ 807 cur_desc += sizeof(*desc) + desc->length; 808 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 809 break; 810 } 811 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 812 } 813 814 return 0; 815 } 816 817 static bool bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page); 818 819 static int 820 blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob) 821 { 822 assert(blob != NULL); 823 assert(blob->state == SPDK_BLOB_STATE_LOADING); 824 825 if (bs_load_cur_extent_page_valid(extent_page) == false) { 826 return -ENOENT; 827 } 828 829 return blob_parse_page(extent_page, blob); 830 } 831 832 static int 833 blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count, 834 struct spdk_blob *blob) 835 { 836 const struct spdk_blob_md_page *page; 837 uint32_t i; 838 int rc; 839 void *tmp; 840 841 assert(page_count > 0); 842 assert(pages[0].sequence_num == 0); 843 assert(blob != NULL); 844 assert(blob->state == SPDK_BLOB_STATE_LOADING); 845 assert(blob->active.clusters == NULL); 846 847 /* The blobid provided doesn't match what's in the MD, this can 848 * happen for example if a bogus blobid is passed in through open. 849 */ 850 if (blob->id != pages[0].id) { 851 SPDK_ERRLOG("Blobid (%" PRIu64 ") doesn't match what's in metadata (%" PRIu64 ")\n", 852 blob->id, pages[0].id); 853 return -ENOENT; 854 } 855 856 tmp = realloc(blob->active.pages, page_count * sizeof(*blob->active.pages)); 857 if (!tmp) { 858 return -ENOMEM; 859 } 860 blob->active.pages = tmp; 861 862 blob->active.pages[0] = pages[0].id; 863 864 for (i = 1; i < page_count; i++) { 865 assert(spdk_bit_array_get(blob->bs->used_md_pages, pages[i - 1].next)); 866 blob->active.pages[i] = pages[i - 1].next; 867 } 868 blob->active.num_pages = page_count; 869 870 for (i = 0; i < page_count; i++) { 871 page = &pages[i]; 872 873 assert(page->id == blob->id); 874 assert(page->sequence_num == i); 875 876 rc = blob_parse_page(page, blob); 877 if (rc != 0) { 878 return rc; 879 } 880 } 881 882 return 0; 883 } 884 885 static int 886 blob_serialize_add_page(const struct spdk_blob *blob, 887 struct spdk_blob_md_page **pages, 888 uint32_t *page_count, 889 struct spdk_blob_md_page **last_page) 890 { 891 struct spdk_blob_md_page *page, *tmp_pages; 892 893 assert(pages != NULL); 894 assert(page_count != NULL); 895 896 *last_page = NULL; 897 if (*page_count == 0) { 898 assert(*pages == NULL); 899 *pages = spdk_malloc(SPDK_BS_PAGE_SIZE, 0, 900 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 901 if (*pages == NULL) { 902 return -ENOMEM; 903 } 904 *page_count = 1; 905 } else { 906 assert(*pages != NULL); 907 tmp_pages = spdk_realloc(*pages, SPDK_BS_PAGE_SIZE * (*page_count + 1), 0); 908 if (tmp_pages == NULL) { 909 return -ENOMEM; 910 } 911 (*page_count)++; 912 *pages = tmp_pages; 913 } 914 915 page = &(*pages)[*page_count - 1]; 916 memset(page, 0, sizeof(*page)); 917 page->id = blob->id; 918 page->sequence_num = *page_count - 1; 919 page->next = SPDK_INVALID_MD_PAGE; 920 *last_page = page; 921 922 return 0; 923 } 924 925 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor. 926 * Update required_sz on both success and failure. 927 * 928 */ 929 static int 930 blob_serialize_xattr(const struct spdk_xattr *xattr, 931 uint8_t *buf, size_t buf_sz, 932 size_t *required_sz, bool internal) 933 { 934 struct spdk_blob_md_descriptor_xattr *desc; 935 936 *required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) + 937 strlen(xattr->name) + 938 xattr->value_len; 939 940 if (buf_sz < *required_sz) { 941 return -1; 942 } 943 944 desc = (struct spdk_blob_md_descriptor_xattr *)buf; 945 946 desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR; 947 desc->length = sizeof(desc->name_length) + 948 sizeof(desc->value_length) + 949 strlen(xattr->name) + 950 xattr->value_len; 951 desc->name_length = strlen(xattr->name); 952 desc->value_length = xattr->value_len; 953 954 memcpy(desc->name, xattr->name, desc->name_length); 955 memcpy((void *)((uintptr_t)desc->name + desc->name_length), 956 xattr->value, 957 desc->value_length); 958 959 return 0; 960 } 961 962 static void 963 blob_serialize_extent_table_entry(const struct spdk_blob *blob, 964 uint64_t start_ep, uint64_t *next_ep, 965 uint8_t **buf, size_t *remaining_sz) 966 { 967 struct spdk_blob_md_descriptor_extent_table *desc; 968 size_t cur_sz; 969 uint64_t i, et_idx; 970 uint32_t extent_page, ep_len; 971 972 /* The buffer must have room for at least num_clusters entry */ 973 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters); 974 if (*remaining_sz < cur_sz) { 975 *next_ep = start_ep; 976 return; 977 } 978 979 desc = (struct spdk_blob_md_descriptor_extent_table *)*buf; 980 desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE; 981 982 desc->num_clusters = blob->active.num_clusters; 983 984 ep_len = 1; 985 et_idx = 0; 986 for (i = start_ep; i < blob->active.num_extent_pages; i++) { 987 if (*remaining_sz < cur_sz + sizeof(desc->extent_page[0])) { 988 /* If we ran out of buffer space, return */ 989 break; 990 } 991 992 extent_page = blob->active.extent_pages[i]; 993 /* Verify that next extent_page is unallocated */ 994 if (extent_page == 0 && 995 (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) { 996 ep_len++; 997 continue; 998 } 999 desc->extent_page[et_idx].page_idx = extent_page; 1000 desc->extent_page[et_idx].num_pages = ep_len; 1001 et_idx++; 1002 1003 ep_len = 1; 1004 cur_sz += sizeof(desc->extent_page[et_idx]); 1005 } 1006 *next_ep = i; 1007 1008 desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx; 1009 *remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length; 1010 *buf += sizeof(struct spdk_blob_md_descriptor) + desc->length; 1011 } 1012 1013 static int 1014 blob_serialize_extent_table(const struct spdk_blob *blob, 1015 struct spdk_blob_md_page **pages, 1016 struct spdk_blob_md_page *cur_page, 1017 uint32_t *page_count, uint8_t **buf, 1018 size_t *remaining_sz) 1019 { 1020 uint64_t last_extent_page; 1021 int rc; 1022 1023 last_extent_page = 0; 1024 /* At least single extent table entry has to be always persisted. 1025 * Such case occurs with num_extent_pages == 0. */ 1026 while (last_extent_page <= blob->active.num_extent_pages) { 1027 blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf, 1028 remaining_sz); 1029 1030 if (last_extent_page == blob->active.num_extent_pages) { 1031 break; 1032 } 1033 1034 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1035 if (rc < 0) { 1036 return rc; 1037 } 1038 1039 *buf = (uint8_t *)cur_page->descriptors; 1040 *remaining_sz = sizeof(cur_page->descriptors); 1041 } 1042 1043 return 0; 1044 } 1045 1046 static void 1047 blob_serialize_extent_rle(const struct spdk_blob *blob, 1048 uint64_t start_cluster, uint64_t *next_cluster, 1049 uint8_t **buf, size_t *buf_sz) 1050 { 1051 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 1052 size_t cur_sz; 1053 uint64_t i, extent_idx; 1054 uint64_t lba, lba_per_cluster, lba_count; 1055 1056 /* The buffer must have room for at least one extent */ 1057 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]); 1058 if (*buf_sz < cur_sz) { 1059 *next_cluster = start_cluster; 1060 return; 1061 } 1062 1063 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf; 1064 desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE; 1065 1066 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1067 1068 lba = blob->active.clusters[start_cluster]; 1069 lba_count = lba_per_cluster; 1070 extent_idx = 0; 1071 for (i = start_cluster + 1; i < blob->active.num_clusters; i++) { 1072 if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) { 1073 /* Run-length encode sequential non-zero LBA */ 1074 lba_count += lba_per_cluster; 1075 continue; 1076 } else if (lba == 0 && blob->active.clusters[i] == 0) { 1077 /* Run-length encode unallocated clusters */ 1078 lba_count += lba_per_cluster; 1079 continue; 1080 } 1081 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1082 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1083 extent_idx++; 1084 1085 cur_sz += sizeof(desc_extent_rle->extents[extent_idx]); 1086 1087 if (*buf_sz < cur_sz) { 1088 /* If we ran out of buffer space, return */ 1089 *next_cluster = i; 1090 break; 1091 } 1092 1093 lba = blob->active.clusters[i]; 1094 lba_count = lba_per_cluster; 1095 } 1096 1097 if (*buf_sz >= cur_sz) { 1098 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1099 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1100 extent_idx++; 1101 1102 *next_cluster = blob->active.num_clusters; 1103 } 1104 1105 desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx; 1106 *buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1107 *buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1108 } 1109 1110 static int 1111 blob_serialize_extents_rle(const struct spdk_blob *blob, 1112 struct spdk_blob_md_page **pages, 1113 struct spdk_blob_md_page *cur_page, 1114 uint32_t *page_count, uint8_t **buf, 1115 size_t *remaining_sz) 1116 { 1117 uint64_t last_cluster; 1118 int rc; 1119 1120 last_cluster = 0; 1121 while (last_cluster < blob->active.num_clusters) { 1122 blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz); 1123 1124 if (last_cluster == blob->active.num_clusters) { 1125 break; 1126 } 1127 1128 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1129 if (rc < 0) { 1130 return rc; 1131 } 1132 1133 *buf = (uint8_t *)cur_page->descriptors; 1134 *remaining_sz = sizeof(cur_page->descriptors); 1135 } 1136 1137 return 0; 1138 } 1139 1140 static void 1141 blob_serialize_extent_page(const struct spdk_blob *blob, 1142 uint64_t cluster, struct spdk_blob_md_page *page) 1143 { 1144 struct spdk_blob_md_descriptor_extent_page *desc_extent; 1145 uint64_t i, extent_idx; 1146 uint64_t lba, lba_per_cluster; 1147 uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 1148 1149 desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors; 1150 desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE; 1151 1152 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1153 1154 desc_extent->start_cluster_idx = start_cluster_idx; 1155 extent_idx = 0; 1156 for (i = start_cluster_idx; i < blob->active.num_clusters; i++) { 1157 lba = blob->active.clusters[i]; 1158 desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster; 1159 if (extent_idx >= SPDK_EXTENTS_PER_EP) { 1160 break; 1161 } 1162 } 1163 desc_extent->length = sizeof(desc_extent->start_cluster_idx) + 1164 sizeof(desc_extent->cluster_idx[0]) * extent_idx; 1165 } 1166 1167 static void 1168 blob_serialize_flags(const struct spdk_blob *blob, 1169 uint8_t *buf, size_t *buf_sz) 1170 { 1171 struct spdk_blob_md_descriptor_flags *desc; 1172 1173 /* 1174 * Flags get serialized first, so we should always have room for the flags 1175 * descriptor. 1176 */ 1177 assert(*buf_sz >= sizeof(*desc)); 1178 1179 desc = (struct spdk_blob_md_descriptor_flags *)buf; 1180 desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS; 1181 desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor); 1182 desc->invalid_flags = blob->invalid_flags; 1183 desc->data_ro_flags = blob->data_ro_flags; 1184 desc->md_ro_flags = blob->md_ro_flags; 1185 1186 *buf_sz -= sizeof(*desc); 1187 } 1188 1189 static int 1190 blob_serialize_xattrs(const struct spdk_blob *blob, 1191 const struct spdk_xattr_tailq *xattrs, bool internal, 1192 struct spdk_blob_md_page **pages, 1193 struct spdk_blob_md_page *cur_page, 1194 uint32_t *page_count, uint8_t **buf, 1195 size_t *remaining_sz) 1196 { 1197 const struct spdk_xattr *xattr; 1198 int rc; 1199 1200 TAILQ_FOREACH(xattr, xattrs, link) { 1201 size_t required_sz = 0; 1202 1203 rc = blob_serialize_xattr(xattr, 1204 *buf, *remaining_sz, 1205 &required_sz, internal); 1206 if (rc < 0) { 1207 /* Need to add a new page to the chain */ 1208 rc = blob_serialize_add_page(blob, pages, page_count, 1209 &cur_page); 1210 if (rc < 0) { 1211 spdk_free(*pages); 1212 *pages = NULL; 1213 *page_count = 0; 1214 return rc; 1215 } 1216 1217 *buf = (uint8_t *)cur_page->descriptors; 1218 *remaining_sz = sizeof(cur_page->descriptors); 1219 1220 /* Try again */ 1221 required_sz = 0; 1222 rc = blob_serialize_xattr(xattr, 1223 *buf, *remaining_sz, 1224 &required_sz, internal); 1225 1226 if (rc < 0) { 1227 spdk_free(*pages); 1228 *pages = NULL; 1229 *page_count = 0; 1230 return rc; 1231 } 1232 } 1233 1234 *remaining_sz -= required_sz; 1235 *buf += required_sz; 1236 } 1237 1238 return 0; 1239 } 1240 1241 static int 1242 blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages, 1243 uint32_t *page_count) 1244 { 1245 struct spdk_blob_md_page *cur_page; 1246 int rc; 1247 uint8_t *buf; 1248 size_t remaining_sz; 1249 1250 assert(pages != NULL); 1251 assert(page_count != NULL); 1252 assert(blob != NULL); 1253 assert(blob->state == SPDK_BLOB_STATE_DIRTY); 1254 1255 *pages = NULL; 1256 *page_count = 0; 1257 1258 /* A blob always has at least 1 page, even if it has no descriptors */ 1259 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1260 if (rc < 0) { 1261 return rc; 1262 } 1263 1264 buf = (uint8_t *)cur_page->descriptors; 1265 remaining_sz = sizeof(cur_page->descriptors); 1266 1267 /* Serialize flags */ 1268 blob_serialize_flags(blob, buf, &remaining_sz); 1269 buf += sizeof(struct spdk_blob_md_descriptor_flags); 1270 1271 /* Serialize xattrs */ 1272 rc = blob_serialize_xattrs(blob, &blob->xattrs, false, 1273 pages, cur_page, page_count, &buf, &remaining_sz); 1274 if (rc < 0) { 1275 return rc; 1276 } 1277 1278 /* Serialize internal xattrs */ 1279 rc = blob_serialize_xattrs(blob, &blob->xattrs_internal, true, 1280 pages, cur_page, page_count, &buf, &remaining_sz); 1281 if (rc < 0) { 1282 return rc; 1283 } 1284 1285 if (blob->use_extent_table) { 1286 /* Serialize extent table */ 1287 rc = blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1288 } else { 1289 /* Serialize extents */ 1290 rc = blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1291 } 1292 1293 return rc; 1294 } 1295 1296 struct spdk_blob_load_ctx { 1297 struct spdk_blob *blob; 1298 1299 struct spdk_blob_md_page *pages; 1300 uint32_t num_pages; 1301 uint32_t next_extent_page; 1302 spdk_bs_sequence_t *seq; 1303 1304 spdk_bs_sequence_cpl cb_fn; 1305 void *cb_arg; 1306 }; 1307 1308 static uint32_t 1309 blob_md_page_calc_crc(void *page) 1310 { 1311 uint32_t crc; 1312 1313 crc = BLOB_CRC32C_INITIAL; 1314 crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc); 1315 crc ^= BLOB_CRC32C_INITIAL; 1316 1317 return crc; 1318 1319 } 1320 1321 static void 1322 blob_load_final(struct spdk_blob_load_ctx *ctx, int bserrno) 1323 { 1324 struct spdk_blob *blob = ctx->blob; 1325 1326 if (bserrno == 0) { 1327 blob_mark_clean(blob); 1328 } 1329 1330 ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno); 1331 1332 /* Free the memory */ 1333 spdk_free(ctx->pages); 1334 free(ctx); 1335 } 1336 1337 static void 1338 blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 1339 { 1340 struct spdk_blob_load_ctx *ctx = cb_arg; 1341 struct spdk_blob *blob = ctx->blob; 1342 1343 if (bserrno == 0) { 1344 blob->back_bs_dev = bs_create_blob_bs_dev(snapshot); 1345 if (blob->back_bs_dev == NULL) { 1346 bserrno = -ENOMEM; 1347 } 1348 } 1349 if (bserrno != 0) { 1350 SPDK_ERRLOG("Snapshot fail\n"); 1351 } 1352 1353 blob_load_final(ctx, bserrno); 1354 } 1355 1356 static void blob_update_clear_method(struct spdk_blob *blob); 1357 1358 static void 1359 blob_load_backing_dev(void *cb_arg) 1360 { 1361 struct spdk_blob_load_ctx *ctx = cb_arg; 1362 struct spdk_blob *blob = ctx->blob; 1363 const void *value; 1364 size_t len; 1365 int rc; 1366 1367 if (spdk_blob_is_thin_provisioned(blob)) { 1368 rc = blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true); 1369 if (rc == 0) { 1370 if (len != sizeof(spdk_blob_id)) { 1371 blob_load_final(ctx, -EINVAL); 1372 return; 1373 } 1374 /* open snapshot blob and continue in the callback function */ 1375 blob->parent_id = *(spdk_blob_id *)value; 1376 spdk_bs_open_blob(blob->bs, blob->parent_id, 1377 blob_load_snapshot_cpl, ctx); 1378 return; 1379 } else { 1380 /* add zeroes_dev for thin provisioned blob */ 1381 blob->back_bs_dev = bs_create_zeroes_dev(); 1382 } 1383 } else { 1384 /* standard blob */ 1385 blob->back_bs_dev = NULL; 1386 } 1387 blob_load_final(ctx, 0); 1388 } 1389 1390 static void 1391 blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1392 { 1393 struct spdk_blob_load_ctx *ctx = cb_arg; 1394 struct spdk_blob *blob = ctx->blob; 1395 struct spdk_blob_md_page *page; 1396 uint64_t i; 1397 uint32_t crc; 1398 uint64_t lba; 1399 void *tmp; 1400 uint64_t sz; 1401 1402 if (bserrno) { 1403 SPDK_ERRLOG("Extent page read failed: %d\n", bserrno); 1404 blob_load_final(ctx, bserrno); 1405 return; 1406 } 1407 1408 if (ctx->pages == NULL) { 1409 /* First iteration of this function, allocate buffer for single EXTENT_PAGE */ 1410 ctx->pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 1411 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 1412 if (!ctx->pages) { 1413 blob_load_final(ctx, -ENOMEM); 1414 return; 1415 } 1416 ctx->num_pages = 1; 1417 ctx->next_extent_page = 0; 1418 } else { 1419 page = &ctx->pages[0]; 1420 crc = blob_md_page_calc_crc(page); 1421 if (crc != page->crc) { 1422 blob_load_final(ctx, -EINVAL); 1423 return; 1424 } 1425 1426 if (page->next != SPDK_INVALID_MD_PAGE) { 1427 blob_load_final(ctx, -EINVAL); 1428 return; 1429 } 1430 1431 bserrno = blob_parse_extent_page(page, blob); 1432 if (bserrno) { 1433 blob_load_final(ctx, bserrno); 1434 return; 1435 } 1436 } 1437 1438 for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) { 1439 if (blob->active.extent_pages[i] != 0) { 1440 /* Extent page was allocated, read and parse it. */ 1441 lba = bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]); 1442 ctx->next_extent_page = i + 1; 1443 1444 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1445 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 1446 blob_load_cpl_extents_cpl, ctx); 1447 return; 1448 } else { 1449 /* Thin provisioned blobs can point to unallocated extent pages. 1450 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */ 1451 1452 sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP); 1453 blob->active.num_clusters += sz; 1454 blob->remaining_clusters_in_et -= sz; 1455 1456 assert(spdk_blob_is_thin_provisioned(blob)); 1457 assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0); 1458 1459 tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 1460 if (tmp == NULL) { 1461 blob_load_final(ctx, -ENOMEM); 1462 return; 1463 } 1464 memset(tmp + sizeof(*blob->active.clusters) * blob->active.cluster_array_size, 0, 1465 sizeof(*blob->active.clusters) * (blob->active.num_clusters - blob->active.cluster_array_size)); 1466 blob->active.clusters = tmp; 1467 blob->active.cluster_array_size = blob->active.num_clusters; 1468 } 1469 } 1470 1471 blob_load_backing_dev(ctx); 1472 } 1473 1474 static void 1475 blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1476 { 1477 struct spdk_blob_load_ctx *ctx = cb_arg; 1478 struct spdk_blob *blob = ctx->blob; 1479 struct spdk_blob_md_page *page; 1480 int rc; 1481 uint32_t crc; 1482 uint32_t current_page; 1483 1484 if (ctx->num_pages == 1) { 1485 current_page = bs_blobid_to_page(blob->id); 1486 } else { 1487 assert(ctx->num_pages != 0); 1488 page = &ctx->pages[ctx->num_pages - 2]; 1489 current_page = page->next; 1490 } 1491 1492 if (bserrno) { 1493 SPDK_ERRLOG("Metadata page %d read failed for blobid %" PRIu64 ": %d\n", 1494 current_page, blob->id, bserrno); 1495 blob_load_final(ctx, bserrno); 1496 return; 1497 } 1498 1499 page = &ctx->pages[ctx->num_pages - 1]; 1500 crc = blob_md_page_calc_crc(page); 1501 if (crc != page->crc) { 1502 SPDK_ERRLOG("Metadata page %d crc mismatch for blobid %" PRIu64 "\n", 1503 current_page, blob->id); 1504 blob_load_final(ctx, -EINVAL); 1505 return; 1506 } 1507 1508 if (page->next != SPDK_INVALID_MD_PAGE) { 1509 struct spdk_blob_md_page *tmp_pages; 1510 uint32_t next_page = page->next; 1511 uint64_t next_lba = bs_md_page_to_lba(blob->bs, next_page); 1512 1513 /* Read the next page */ 1514 tmp_pages = spdk_realloc(ctx->pages, (sizeof(*page) * (ctx->num_pages + 1)), 0); 1515 if (tmp_pages == NULL) { 1516 blob_load_final(ctx, -ENOMEM); 1517 return; 1518 } 1519 ctx->num_pages++; 1520 ctx->pages = tmp_pages; 1521 1522 bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1], 1523 next_lba, 1524 bs_byte_to_lba(blob->bs, sizeof(*page)), 1525 blob_load_cpl, ctx); 1526 return; 1527 } 1528 1529 /* Parse the pages */ 1530 rc = blob_parse(ctx->pages, ctx->num_pages, blob); 1531 if (rc) { 1532 blob_load_final(ctx, rc); 1533 return; 1534 } 1535 1536 if (blob->extent_table_found == true) { 1537 /* If EXTENT_TABLE was found, that means support for it should be enabled. */ 1538 assert(blob->extent_rle_found == false); 1539 blob->use_extent_table = true; 1540 } else { 1541 /* If EXTENT_RLE or no extent_* descriptor was found disable support 1542 * for extent table. No extent_* descriptors means that blob has length of 0 1543 * and no extent_rle descriptors were persisted for it. 1544 * EXTENT_TABLE if used, is always present in metadata regardless of length. */ 1545 blob->use_extent_table = false; 1546 } 1547 1548 /* Check the clear_method stored in metadata vs what may have been passed 1549 * via spdk_bs_open_blob_ext() and update accordingly. 1550 */ 1551 blob_update_clear_method(blob); 1552 1553 spdk_free(ctx->pages); 1554 ctx->pages = NULL; 1555 1556 if (blob->extent_table_found) { 1557 blob_load_cpl_extents_cpl(seq, ctx, 0); 1558 } else { 1559 blob_load_backing_dev(ctx); 1560 } 1561 } 1562 1563 /* Load a blob from disk given a blobid */ 1564 static void 1565 blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1566 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1567 { 1568 struct spdk_blob_load_ctx *ctx; 1569 struct spdk_blob_store *bs; 1570 uint32_t page_num; 1571 uint64_t lba; 1572 1573 blob_verify_md_op(blob); 1574 1575 bs = blob->bs; 1576 1577 ctx = calloc(1, sizeof(*ctx)); 1578 if (!ctx) { 1579 cb_fn(seq, cb_arg, -ENOMEM); 1580 return; 1581 } 1582 1583 ctx->blob = blob; 1584 ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, 0); 1585 if (!ctx->pages) { 1586 free(ctx); 1587 cb_fn(seq, cb_arg, -ENOMEM); 1588 return; 1589 } 1590 ctx->num_pages = 1; 1591 ctx->cb_fn = cb_fn; 1592 ctx->cb_arg = cb_arg; 1593 ctx->seq = seq; 1594 1595 page_num = bs_blobid_to_page(blob->id); 1596 lba = bs_md_page_to_lba(blob->bs, page_num); 1597 1598 blob->state = SPDK_BLOB_STATE_LOADING; 1599 1600 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1601 bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE), 1602 blob_load_cpl, ctx); 1603 } 1604 1605 struct spdk_blob_persist_ctx { 1606 struct spdk_blob *blob; 1607 1608 struct spdk_bs_super_block *super; 1609 1610 struct spdk_blob_md_page *pages; 1611 uint32_t next_extent_page; 1612 struct spdk_blob_md_page *extent_page; 1613 1614 spdk_bs_sequence_t *seq; 1615 spdk_bs_sequence_cpl cb_fn; 1616 void *cb_arg; 1617 TAILQ_ENTRY(spdk_blob_persist_ctx) link; 1618 }; 1619 1620 static void 1621 bs_batch_clear_dev(struct spdk_blob_persist_ctx *ctx, spdk_bs_batch_t *batch, uint64_t lba, 1622 uint64_t lba_count) 1623 { 1624 switch (ctx->blob->clear_method) { 1625 case BLOB_CLEAR_WITH_DEFAULT: 1626 case BLOB_CLEAR_WITH_UNMAP: 1627 bs_batch_unmap_dev(batch, lba, lba_count); 1628 break; 1629 case BLOB_CLEAR_WITH_WRITE_ZEROES: 1630 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1631 break; 1632 case BLOB_CLEAR_WITH_NONE: 1633 default: 1634 break; 1635 } 1636 } 1637 1638 static void blob_persist_check_dirty(struct spdk_blob_persist_ctx *ctx); 1639 1640 static void 1641 blob_persist_complete_cb(void *arg) 1642 { 1643 struct spdk_blob_persist_ctx *ctx = arg; 1644 1645 /* Call user callback */ 1646 ctx->cb_fn(ctx->seq, ctx->cb_arg, 0); 1647 1648 /* Free the memory */ 1649 spdk_free(ctx->pages); 1650 free(ctx); 1651 } 1652 1653 static void 1654 blob_persist_complete(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx, int bserrno) 1655 { 1656 struct spdk_blob_persist_ctx *next_persist, *tmp; 1657 struct spdk_blob *blob = ctx->blob; 1658 1659 if (bserrno == 0) { 1660 blob_mark_clean(blob); 1661 } 1662 1663 assert(ctx == TAILQ_FIRST(&blob->persists_to_complete)); 1664 1665 /* Complete all persists that were pending when the current persist started */ 1666 TAILQ_FOREACH_SAFE(next_persist, &blob->persists_to_complete, link, tmp) { 1667 TAILQ_REMOVE(&blob->persists_to_complete, next_persist, link); 1668 spdk_thread_send_msg(spdk_get_thread(), blob_persist_complete_cb, next_persist); 1669 } 1670 1671 if (TAILQ_EMPTY(&blob->pending_persists)) { 1672 return; 1673 } 1674 1675 /* Queue up all pending persists for completion and start blob persist with first one */ 1676 TAILQ_SWAP(&blob->persists_to_complete, &blob->pending_persists, spdk_blob_persist_ctx, link); 1677 next_persist = TAILQ_FIRST(&blob->persists_to_complete); 1678 1679 blob->state = SPDK_BLOB_STATE_DIRTY; 1680 blob_persist_check_dirty(next_persist); 1681 } 1682 1683 static void 1684 blob_persist_clear_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1685 { 1686 struct spdk_blob_persist_ctx *ctx = cb_arg; 1687 struct spdk_blob *blob = ctx->blob; 1688 struct spdk_blob_store *bs = blob->bs; 1689 size_t i; 1690 1691 if (bserrno != 0) { 1692 blob_persist_complete(seq, ctx, bserrno); 1693 return; 1694 } 1695 1696 /* Release all extent_pages that were truncated */ 1697 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1698 /* Nothing to release if it was not allocated */ 1699 if (blob->active.extent_pages[i] != 0) { 1700 bs_release_md_page(bs, blob->active.extent_pages[i]); 1701 } 1702 } 1703 1704 if (blob->active.num_extent_pages == 0) { 1705 free(blob->active.extent_pages); 1706 blob->active.extent_pages = NULL; 1707 blob->active.extent_pages_array_size = 0; 1708 } else if (blob->active.num_extent_pages != blob->active.extent_pages_array_size) { 1709 #ifndef __clang_analyzer__ 1710 void *tmp; 1711 1712 /* scan-build really can't figure reallocs, workaround it */ 1713 tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages); 1714 assert(tmp != NULL); 1715 blob->active.extent_pages = tmp; 1716 #endif 1717 blob->active.extent_pages_array_size = blob->active.num_extent_pages; 1718 } 1719 1720 blob_persist_complete(seq, ctx, bserrno); 1721 } 1722 1723 static void 1724 blob_persist_clear_extents(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1725 { 1726 struct spdk_blob *blob = ctx->blob; 1727 struct spdk_blob_store *bs = blob->bs; 1728 size_t i; 1729 uint64_t lba; 1730 uint64_t lba_count; 1731 spdk_bs_batch_t *batch; 1732 1733 batch = bs_sequence_to_batch(seq, blob_persist_clear_extents_cpl, ctx); 1734 lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 1735 1736 /* Clear all extent_pages that were truncated */ 1737 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1738 /* Nothing to clear if it was not allocated */ 1739 if (blob->active.extent_pages[i] != 0) { 1740 lba = bs_md_page_to_lba(bs, blob->active.extent_pages[i]); 1741 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1742 } 1743 } 1744 1745 bs_batch_close(batch); 1746 } 1747 1748 static void 1749 blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1750 { 1751 struct spdk_blob_persist_ctx *ctx = cb_arg; 1752 struct spdk_blob *blob = ctx->blob; 1753 struct spdk_blob_store *bs = blob->bs; 1754 size_t i; 1755 1756 if (bserrno != 0) { 1757 blob_persist_complete(seq, ctx, bserrno); 1758 return; 1759 } 1760 1761 pthread_mutex_lock(&bs->used_clusters_mutex); 1762 /* Release all clusters that were truncated */ 1763 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1764 uint32_t cluster_num = bs_lba_to_cluster(bs, blob->active.clusters[i]); 1765 1766 /* Nothing to release if it was not allocated */ 1767 if (blob->active.clusters[i] != 0) { 1768 bs_release_cluster(bs, cluster_num); 1769 } 1770 } 1771 pthread_mutex_unlock(&bs->used_clusters_mutex); 1772 1773 if (blob->active.num_clusters == 0) { 1774 free(blob->active.clusters); 1775 blob->active.clusters = NULL; 1776 blob->active.cluster_array_size = 0; 1777 } else if (blob->active.num_clusters != blob->active.cluster_array_size) { 1778 #ifndef __clang_analyzer__ 1779 void *tmp; 1780 1781 /* scan-build really can't figure reallocs, workaround it */ 1782 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters); 1783 assert(tmp != NULL); 1784 blob->active.clusters = tmp; 1785 1786 #endif 1787 blob->active.cluster_array_size = blob->active.num_clusters; 1788 } 1789 1790 /* Move on to clearing extent pages */ 1791 blob_persist_clear_extents(seq, ctx); 1792 } 1793 1794 static void 1795 blob_persist_clear_clusters(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1796 { 1797 struct spdk_blob *blob = ctx->blob; 1798 struct spdk_blob_store *bs = blob->bs; 1799 spdk_bs_batch_t *batch; 1800 size_t i; 1801 uint64_t lba; 1802 uint64_t lba_count; 1803 1804 /* Clusters don't move around in blobs. The list shrinks or grows 1805 * at the end, but no changes ever occur in the middle of the list. 1806 */ 1807 1808 batch = bs_sequence_to_batch(seq, blob_persist_clear_clusters_cpl, ctx); 1809 1810 /* Clear all clusters that were truncated */ 1811 lba = 0; 1812 lba_count = 0; 1813 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1814 uint64_t next_lba = blob->active.clusters[i]; 1815 uint64_t next_lba_count = bs_cluster_to_lba(bs, 1); 1816 1817 if (next_lba > 0 && (lba + lba_count) == next_lba) { 1818 /* This cluster is contiguous with the previous one. */ 1819 lba_count += next_lba_count; 1820 continue; 1821 } else if (next_lba == 0) { 1822 continue; 1823 } 1824 1825 /* This cluster is not contiguous with the previous one. */ 1826 1827 /* If a run of LBAs previously existing, clear them now */ 1828 if (lba_count > 0) { 1829 bs_batch_clear_dev(ctx, batch, lba, lba_count); 1830 } 1831 1832 /* Start building the next batch */ 1833 lba = next_lba; 1834 if (next_lba > 0) { 1835 lba_count = next_lba_count; 1836 } else { 1837 lba_count = 0; 1838 } 1839 } 1840 1841 /* If we ended with a contiguous set of LBAs, clear them now */ 1842 if (lba_count > 0) { 1843 bs_batch_clear_dev(ctx, batch, lba, lba_count); 1844 } 1845 1846 bs_batch_close(batch); 1847 } 1848 1849 static void 1850 blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1851 { 1852 struct spdk_blob_persist_ctx *ctx = cb_arg; 1853 struct spdk_blob *blob = ctx->blob; 1854 struct spdk_blob_store *bs = blob->bs; 1855 size_t i; 1856 1857 if (bserrno != 0) { 1858 blob_persist_complete(seq, ctx, bserrno); 1859 return; 1860 } 1861 1862 /* This loop starts at 1 because the first page is special and handled 1863 * below. The pages (except the first) are never written in place, 1864 * so any pages in the clean list must be zeroed. 1865 */ 1866 for (i = 1; i < blob->clean.num_pages; i++) { 1867 bs_release_md_page(bs, blob->clean.pages[i]); 1868 } 1869 1870 if (blob->active.num_pages == 0) { 1871 uint32_t page_num; 1872 1873 page_num = bs_blobid_to_page(blob->id); 1874 bs_release_md_page(bs, page_num); 1875 } 1876 1877 /* Move on to clearing clusters */ 1878 blob_persist_clear_clusters(seq, ctx); 1879 } 1880 1881 static void 1882 blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1883 { 1884 struct spdk_blob_persist_ctx *ctx = cb_arg; 1885 struct spdk_blob *blob = ctx->blob; 1886 struct spdk_blob_store *bs = blob->bs; 1887 uint64_t lba; 1888 uint64_t lba_count; 1889 spdk_bs_batch_t *batch; 1890 size_t i; 1891 1892 if (bserrno != 0) { 1893 blob_persist_complete(seq, ctx, bserrno); 1894 return; 1895 } 1896 1897 batch = bs_sequence_to_batch(seq, blob_persist_zero_pages_cpl, ctx); 1898 1899 lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 1900 1901 /* This loop starts at 1 because the first page is special and handled 1902 * below. The pages (except the first) are never written in place, 1903 * so any pages in the clean list must be zeroed. 1904 */ 1905 for (i = 1; i < blob->clean.num_pages; i++) { 1906 lba = bs_md_page_to_lba(bs, blob->clean.pages[i]); 1907 1908 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1909 } 1910 1911 /* The first page will only be zeroed if this is a delete. */ 1912 if (blob->active.num_pages == 0) { 1913 uint32_t page_num; 1914 1915 /* The first page in the metadata goes where the blobid indicates */ 1916 page_num = bs_blobid_to_page(blob->id); 1917 lba = bs_md_page_to_lba(bs, page_num); 1918 1919 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1920 } 1921 1922 bs_batch_close(batch); 1923 } 1924 1925 static void 1926 blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1927 { 1928 struct spdk_blob_persist_ctx *ctx = cb_arg; 1929 struct spdk_blob *blob = ctx->blob; 1930 struct spdk_blob_store *bs = blob->bs; 1931 uint64_t lba; 1932 uint32_t lba_count; 1933 struct spdk_blob_md_page *page; 1934 1935 if (bserrno != 0) { 1936 blob_persist_complete(seq, ctx, bserrno); 1937 return; 1938 } 1939 1940 if (blob->active.num_pages == 0) { 1941 /* Move on to the next step */ 1942 blob_persist_zero_pages(seq, ctx, 0); 1943 return; 1944 } 1945 1946 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 1947 1948 page = &ctx->pages[0]; 1949 /* The first page in the metadata goes where the blobid indicates */ 1950 lba = bs_md_page_to_lba(bs, bs_blobid_to_page(blob->id)); 1951 1952 bs_sequence_write_dev(seq, page, lba, lba_count, 1953 blob_persist_zero_pages, ctx); 1954 } 1955 1956 static void 1957 blob_persist_write_page_chain(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1958 { 1959 struct spdk_blob *blob = ctx->blob; 1960 struct spdk_blob_store *bs = blob->bs; 1961 uint64_t lba; 1962 uint32_t lba_count; 1963 struct spdk_blob_md_page *page; 1964 spdk_bs_batch_t *batch; 1965 size_t i; 1966 1967 /* Clusters don't move around in blobs. The list shrinks or grows 1968 * at the end, but no changes ever occur in the middle of the list. 1969 */ 1970 1971 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 1972 1973 batch = bs_sequence_to_batch(seq, blob_persist_write_page_root, ctx); 1974 1975 /* This starts at 1. The root page is not written until 1976 * all of the others are finished 1977 */ 1978 for (i = 1; i < blob->active.num_pages; i++) { 1979 page = &ctx->pages[i]; 1980 assert(page->sequence_num == i); 1981 1982 lba = bs_md_page_to_lba(bs, blob->active.pages[i]); 1983 1984 bs_batch_write_dev(batch, page, lba, lba_count); 1985 } 1986 1987 bs_batch_close(batch); 1988 } 1989 1990 static int 1991 blob_resize(struct spdk_blob *blob, uint64_t sz) 1992 { 1993 uint64_t i; 1994 uint64_t *tmp; 1995 uint64_t cluster; 1996 uint32_t lfmd; /* lowest free md page */ 1997 uint64_t num_clusters; 1998 uint32_t *ep_tmp; 1999 uint64_t new_num_ep = 0, current_num_ep = 0; 2000 struct spdk_blob_store *bs; 2001 2002 bs = blob->bs; 2003 2004 blob_verify_md_op(blob); 2005 2006 if (blob->active.num_clusters == sz) { 2007 return 0; 2008 } 2009 2010 if (blob->active.num_clusters < blob->active.cluster_array_size) { 2011 /* If this blob was resized to be larger, then smaller, then 2012 * larger without syncing, then the cluster array already 2013 * contains spare assigned clusters we can use. 2014 */ 2015 num_clusters = spdk_min(blob->active.cluster_array_size, 2016 sz); 2017 } else { 2018 num_clusters = blob->active.num_clusters; 2019 } 2020 2021 if (blob->use_extent_table) { 2022 /* Round up since every cluster beyond current Extent Table size, 2023 * requires new extent page. */ 2024 new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP); 2025 current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP); 2026 } 2027 2028 /* Check first that we have enough clusters and md pages before we start claiming them. */ 2029 if (sz > num_clusters && spdk_blob_is_thin_provisioned(blob) == false) { 2030 if ((sz - num_clusters) > bs->num_free_clusters) { 2031 return -ENOSPC; 2032 } 2033 lfmd = 0; 2034 for (i = current_num_ep; i < new_num_ep ; i++) { 2035 lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd); 2036 if (lfmd == UINT32_MAX) { 2037 /* No more free md pages. Cannot satisfy the request */ 2038 return -ENOSPC; 2039 } 2040 } 2041 } 2042 2043 if (sz > num_clusters) { 2044 /* Expand the cluster array if necessary. 2045 * We only shrink the array when persisting. 2046 */ 2047 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz); 2048 if (sz > 0 && tmp == NULL) { 2049 return -ENOMEM; 2050 } 2051 memset(tmp + blob->active.cluster_array_size, 0, 2052 sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size)); 2053 blob->active.clusters = tmp; 2054 blob->active.cluster_array_size = sz; 2055 2056 /* Expand the extents table, only if enough clusters were added */ 2057 if (new_num_ep > current_num_ep && blob->use_extent_table) { 2058 ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep); 2059 if (new_num_ep > 0 && ep_tmp == NULL) { 2060 return -ENOMEM; 2061 } 2062 memset(ep_tmp + blob->active.extent_pages_array_size, 0, 2063 sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size)); 2064 blob->active.extent_pages = ep_tmp; 2065 blob->active.extent_pages_array_size = new_num_ep; 2066 } 2067 } 2068 2069 blob->state = SPDK_BLOB_STATE_DIRTY; 2070 2071 if (spdk_blob_is_thin_provisioned(blob) == false) { 2072 cluster = 0; 2073 lfmd = 0; 2074 pthread_mutex_lock(&blob->bs->used_clusters_mutex); 2075 for (i = num_clusters; i < sz; i++) { 2076 bs_allocate_cluster(blob, i, &cluster, &lfmd, true); 2077 lfmd++; 2078 } 2079 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 2080 } 2081 2082 blob->active.num_clusters = sz; 2083 blob->active.num_extent_pages = new_num_ep; 2084 2085 return 0; 2086 } 2087 2088 static void 2089 blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx) 2090 { 2091 spdk_bs_sequence_t *seq = ctx->seq; 2092 struct spdk_blob *blob = ctx->blob; 2093 struct spdk_blob_store *bs = blob->bs; 2094 uint64_t i; 2095 uint32_t page_num; 2096 void *tmp; 2097 int rc; 2098 2099 /* Generate the new metadata */ 2100 rc = blob_serialize(blob, &ctx->pages, &blob->active.num_pages); 2101 if (rc < 0) { 2102 blob_persist_complete(seq, ctx, rc); 2103 return; 2104 } 2105 2106 assert(blob->active.num_pages >= 1); 2107 2108 /* Resize the cache of page indices */ 2109 tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 2110 if (!tmp) { 2111 blob_persist_complete(seq, ctx, -ENOMEM); 2112 return; 2113 } 2114 blob->active.pages = tmp; 2115 2116 /* Assign this metadata to pages. This requires two passes - 2117 * one to verify that there are enough pages and a second 2118 * to actually claim them. */ 2119 page_num = 0; 2120 /* Note that this loop starts at one. The first page location is fixed by the blobid. */ 2121 for (i = 1; i < blob->active.num_pages; i++) { 2122 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2123 if (page_num == UINT32_MAX) { 2124 blob_persist_complete(seq, ctx, -ENOMEM); 2125 return; 2126 } 2127 page_num++; 2128 } 2129 2130 page_num = 0; 2131 blob->active.pages[0] = bs_blobid_to_page(blob->id); 2132 for (i = 1; i < blob->active.num_pages; i++) { 2133 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2134 ctx->pages[i - 1].next = page_num; 2135 /* Now that previous metadata page is complete, calculate the crc for it. */ 2136 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2137 blob->active.pages[i] = page_num; 2138 bs_claim_md_page(bs, page_num); 2139 SPDK_DEBUGLOG(blob, "Claiming page %u for blob %" PRIu64 "\n", page_num, blob->id); 2140 page_num++; 2141 } 2142 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2143 /* Start writing the metadata from last page to first */ 2144 blob->state = SPDK_BLOB_STATE_CLEAN; 2145 blob_persist_write_page_chain(seq, ctx); 2146 } 2147 2148 static void 2149 blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2150 { 2151 struct spdk_blob_persist_ctx *ctx = cb_arg; 2152 struct spdk_blob *blob = ctx->blob; 2153 size_t i; 2154 uint32_t extent_page_id; 2155 uint32_t page_count = 0; 2156 int rc; 2157 2158 if (ctx->extent_page != NULL) { 2159 spdk_free(ctx->extent_page); 2160 ctx->extent_page = NULL; 2161 } 2162 2163 if (bserrno != 0) { 2164 blob_persist_complete(seq, ctx, bserrno); 2165 return; 2166 } 2167 2168 /* Only write out Extent Pages when blob was resized. */ 2169 for (i = ctx->next_extent_page; i < blob->active.extent_pages_array_size; i++) { 2170 extent_page_id = blob->active.extent_pages[i]; 2171 if (extent_page_id == 0) { 2172 /* No Extent Page to persist */ 2173 assert(spdk_blob_is_thin_provisioned(blob)); 2174 continue; 2175 } 2176 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id)); 2177 ctx->next_extent_page = i + 1; 2178 rc = blob_serialize_add_page(ctx->blob, &ctx->extent_page, &page_count, &ctx->extent_page); 2179 if (rc < 0) { 2180 blob_persist_complete(seq, ctx, rc); 2181 return; 2182 } 2183 2184 blob->state = SPDK_BLOB_STATE_DIRTY; 2185 blob_serialize_extent_page(blob, i * SPDK_EXTENTS_PER_EP, ctx->extent_page); 2186 2187 ctx->extent_page->crc = blob_md_page_calc_crc(ctx->extent_page); 2188 2189 bs_sequence_write_dev(seq, ctx->extent_page, bs_md_page_to_lba(blob->bs, extent_page_id), 2190 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 2191 blob_persist_write_extent_pages, ctx); 2192 return; 2193 } 2194 2195 blob_persist_generate_new_md(ctx); 2196 } 2197 2198 static void 2199 blob_persist_start(struct spdk_blob_persist_ctx *ctx) 2200 { 2201 spdk_bs_sequence_t *seq = ctx->seq; 2202 struct spdk_blob *blob = ctx->blob; 2203 2204 if (blob->active.num_pages == 0) { 2205 /* This is the signal that the blob should be deleted. 2206 * Immediately jump to the clean up routine. */ 2207 assert(blob->clean.num_pages > 0); 2208 blob->state = SPDK_BLOB_STATE_CLEAN; 2209 blob_persist_zero_pages(seq, ctx, 0); 2210 return; 2211 2212 } 2213 2214 if (blob->clean.num_clusters < blob->active.num_clusters) { 2215 /* Blob was resized up */ 2216 assert(blob->clean.num_extent_pages <= blob->active.num_extent_pages); 2217 ctx->next_extent_page = spdk_max(1, blob->clean.num_extent_pages) - 1; 2218 } else if (blob->active.num_clusters < blob->active.cluster_array_size) { 2219 /* Blob was resized down */ 2220 assert(blob->clean.num_extent_pages >= blob->active.num_extent_pages); 2221 ctx->next_extent_page = spdk_max(1, blob->active.num_extent_pages) - 1; 2222 } else { 2223 /* No change in size occurred */ 2224 blob_persist_generate_new_md(ctx); 2225 return; 2226 } 2227 2228 blob_persist_write_extent_pages(seq, ctx, 0); 2229 } 2230 2231 static void 2232 blob_persist_dirty_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2233 { 2234 struct spdk_blob_persist_ctx *ctx = cb_arg; 2235 2236 spdk_free(ctx->super); 2237 2238 if (bserrno != 0) { 2239 blob_persist_complete(seq, ctx, bserrno); 2240 return; 2241 } 2242 2243 ctx->blob->bs->clean = 0; 2244 2245 blob_persist_start(ctx); 2246 } 2247 2248 static void 2249 bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2250 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg); 2251 2252 2253 static void 2254 blob_persist_dirty(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2255 { 2256 struct spdk_blob_persist_ctx *ctx = cb_arg; 2257 2258 if (bserrno != 0) { 2259 spdk_free(ctx->super); 2260 blob_persist_complete(seq, ctx, bserrno); 2261 return; 2262 } 2263 2264 ctx->super->clean = 0; 2265 if (ctx->super->size == 0) { 2266 ctx->super->size = ctx->blob->bs->dev->blockcnt * ctx->blob->bs->dev->blocklen; 2267 } 2268 2269 bs_write_super(seq, ctx->blob->bs, ctx->super, blob_persist_dirty_cpl, ctx); 2270 } 2271 2272 static void 2273 blob_persist_check_dirty(struct spdk_blob_persist_ctx *ctx) 2274 { 2275 if (ctx->blob->bs->clean) { 2276 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 2277 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 2278 if (!ctx->super) { 2279 blob_persist_complete(ctx->seq, ctx, -ENOMEM); 2280 return; 2281 } 2282 2283 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(ctx->blob->bs, 0), 2284 bs_byte_to_lba(ctx->blob->bs, sizeof(*ctx->super)), 2285 blob_persist_dirty, ctx); 2286 } else { 2287 blob_persist_start(ctx); 2288 } 2289 } 2290 2291 /* Write a blob to disk */ 2292 static void 2293 blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 2294 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2295 { 2296 struct spdk_blob_persist_ctx *ctx; 2297 2298 blob_verify_md_op(blob); 2299 2300 if (blob->state == SPDK_BLOB_STATE_CLEAN && TAILQ_EMPTY(&blob->persists_to_complete)) { 2301 cb_fn(seq, cb_arg, 0); 2302 return; 2303 } 2304 2305 ctx = calloc(1, sizeof(*ctx)); 2306 if (!ctx) { 2307 cb_fn(seq, cb_arg, -ENOMEM); 2308 return; 2309 } 2310 ctx->blob = blob; 2311 ctx->seq = seq; 2312 ctx->cb_fn = cb_fn; 2313 ctx->cb_arg = cb_arg; 2314 2315 /* Multiple blob persists can affect one another, via blob->state or 2316 * blob mutable data changes. To prevent it, queue up the persists. */ 2317 if (!TAILQ_EMPTY(&blob->persists_to_complete)) { 2318 TAILQ_INSERT_TAIL(&blob->pending_persists, ctx, link); 2319 return; 2320 } 2321 TAILQ_INSERT_HEAD(&blob->persists_to_complete, ctx, link); 2322 2323 blob_persist_check_dirty(ctx); 2324 } 2325 2326 struct spdk_blob_copy_cluster_ctx { 2327 struct spdk_blob *blob; 2328 uint8_t *buf; 2329 uint64_t page; 2330 uint64_t new_cluster; 2331 uint32_t new_extent_page; 2332 spdk_bs_sequence_t *seq; 2333 }; 2334 2335 static void 2336 blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno) 2337 { 2338 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2339 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq; 2340 TAILQ_HEAD(, spdk_bs_request_set) requests; 2341 spdk_bs_user_op_t *op; 2342 2343 TAILQ_INIT(&requests); 2344 TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link); 2345 2346 while (!TAILQ_EMPTY(&requests)) { 2347 op = TAILQ_FIRST(&requests); 2348 TAILQ_REMOVE(&requests, op, link); 2349 if (bserrno == 0) { 2350 bs_user_op_execute(op); 2351 } else { 2352 bs_user_op_abort(op, bserrno); 2353 } 2354 } 2355 2356 spdk_free(ctx->buf); 2357 free(ctx); 2358 } 2359 2360 static void 2361 blob_insert_cluster_cpl(void *cb_arg, int bserrno) 2362 { 2363 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2364 2365 if (bserrno) { 2366 if (bserrno == -EEXIST) { 2367 /* The metadata insert failed because another thread 2368 * allocated the cluster first. Free our cluster 2369 * but continue without error. */ 2370 bserrno = 0; 2371 } 2372 pthread_mutex_lock(&ctx->blob->bs->used_clusters_mutex); 2373 bs_release_cluster(ctx->blob->bs, ctx->new_cluster); 2374 pthread_mutex_unlock(&ctx->blob->bs->used_clusters_mutex); 2375 if (ctx->new_extent_page != 0) { 2376 bs_release_md_page(ctx->blob->bs, ctx->new_extent_page); 2377 } 2378 } 2379 2380 bs_sequence_finish(ctx->seq, bserrno); 2381 } 2382 2383 static void 2384 blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2385 { 2386 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2387 uint32_t cluster_number; 2388 2389 if (bserrno) { 2390 /* The write failed, so jump to the final completion handler */ 2391 bs_sequence_finish(seq, bserrno); 2392 return; 2393 } 2394 2395 cluster_number = bs_page_to_cluster(ctx->blob->bs, ctx->page); 2396 2397 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2398 ctx->new_extent_page, blob_insert_cluster_cpl, ctx); 2399 } 2400 2401 static void 2402 blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2403 { 2404 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2405 2406 if (bserrno != 0) { 2407 /* The read failed, so jump to the final completion handler */ 2408 bs_sequence_finish(seq, bserrno); 2409 return; 2410 } 2411 2412 /* Write whole cluster */ 2413 bs_sequence_write_dev(seq, ctx->buf, 2414 bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2415 bs_cluster_to_lba(ctx->blob->bs, 1), 2416 blob_write_copy_cpl, ctx); 2417 } 2418 2419 static void 2420 bs_allocate_and_copy_cluster(struct spdk_blob *blob, 2421 struct spdk_io_channel *_ch, 2422 uint64_t io_unit, spdk_bs_user_op_t *op) 2423 { 2424 struct spdk_bs_cpl cpl; 2425 struct spdk_bs_channel *ch; 2426 struct spdk_blob_copy_cluster_ctx *ctx; 2427 uint32_t cluster_start_page; 2428 uint32_t cluster_number; 2429 int rc; 2430 2431 ch = spdk_io_channel_get_ctx(_ch); 2432 2433 if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) { 2434 /* There are already operations pending. Queue this user op 2435 * and return because it will be re-executed when the outstanding 2436 * cluster allocation completes. */ 2437 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2438 return; 2439 } 2440 2441 /* Round the io_unit offset down to the first page in the cluster */ 2442 cluster_start_page = bs_io_unit_to_cluster_start(blob, io_unit); 2443 2444 /* Calculate which index in the metadata cluster array the corresponding 2445 * cluster is supposed to be at. */ 2446 cluster_number = bs_io_unit_to_cluster_number(blob, io_unit); 2447 2448 ctx = calloc(1, sizeof(*ctx)); 2449 if (!ctx) { 2450 bs_user_op_abort(op, -ENOMEM); 2451 return; 2452 } 2453 2454 assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0); 2455 2456 ctx->blob = blob; 2457 ctx->page = cluster_start_page; 2458 2459 if (blob->parent_id != SPDK_BLOBID_INVALID) { 2460 ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, 2461 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 2462 if (!ctx->buf) { 2463 SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n", 2464 blob->bs->cluster_sz); 2465 free(ctx); 2466 bs_user_op_abort(op, -ENOMEM); 2467 return; 2468 } 2469 } 2470 2471 pthread_mutex_lock(&blob->bs->used_clusters_mutex); 2472 rc = bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page, 2473 false); 2474 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 2475 if (rc != 0) { 2476 spdk_free(ctx->buf); 2477 free(ctx); 2478 bs_user_op_abort(op, rc); 2479 return; 2480 } 2481 2482 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2483 cpl.u.blob_basic.cb_fn = blob_allocate_and_copy_cluster_cpl; 2484 cpl.u.blob_basic.cb_arg = ctx; 2485 2486 ctx->seq = bs_sequence_start(_ch, &cpl); 2487 if (!ctx->seq) { 2488 pthread_mutex_lock(&blob->bs->used_clusters_mutex); 2489 bs_release_cluster(blob->bs, ctx->new_cluster); 2490 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 2491 spdk_free(ctx->buf); 2492 free(ctx); 2493 bs_user_op_abort(op, -ENOMEM); 2494 return; 2495 } 2496 2497 /* Queue the user op to block other incoming operations */ 2498 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2499 2500 if (blob->parent_id != SPDK_BLOBID_INVALID) { 2501 /* Read cluster from backing device */ 2502 bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf, 2503 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2504 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz), 2505 blob_write_copy, ctx); 2506 } else { 2507 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2508 ctx->new_extent_page, blob_insert_cluster_cpl, ctx); 2509 } 2510 } 2511 2512 static inline bool 2513 blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length, 2514 uint64_t *lba, uint64_t *lba_count) 2515 { 2516 *lba_count = length; 2517 2518 if (!bs_io_unit_is_allocated(blob, io_unit)) { 2519 assert(blob->back_bs_dev != NULL); 2520 *lba = bs_io_unit_to_back_dev_lba(blob, io_unit); 2521 *lba_count = bs_io_unit_to_back_dev_lba(blob, *lba_count); 2522 return false; 2523 } else { 2524 *lba = bs_blob_io_unit_to_lba(blob, io_unit); 2525 return true; 2526 } 2527 } 2528 2529 struct op_split_ctx { 2530 struct spdk_blob *blob; 2531 struct spdk_io_channel *channel; 2532 uint64_t io_unit_offset; 2533 uint64_t io_units_remaining; 2534 void *curr_payload; 2535 enum spdk_blob_op_type op_type; 2536 spdk_bs_sequence_t *seq; 2537 bool in_submit_ctx; 2538 bool completed_in_submit_ctx; 2539 bool done; 2540 }; 2541 2542 static void 2543 blob_request_submit_op_split_next(void *cb_arg, int bserrno) 2544 { 2545 struct op_split_ctx *ctx = cb_arg; 2546 struct spdk_blob *blob = ctx->blob; 2547 struct spdk_io_channel *ch = ctx->channel; 2548 enum spdk_blob_op_type op_type = ctx->op_type; 2549 uint8_t *buf; 2550 uint64_t offset; 2551 uint64_t length; 2552 uint64_t op_length; 2553 2554 if (bserrno != 0 || ctx->io_units_remaining == 0) { 2555 bs_sequence_finish(ctx->seq, bserrno); 2556 if (ctx->in_submit_ctx) { 2557 /* Defer freeing of the ctx object, since it will be 2558 * accessed when this unwinds back to the submisison 2559 * context. 2560 */ 2561 ctx->done = true; 2562 } else { 2563 free(ctx); 2564 } 2565 return; 2566 } 2567 2568 if (ctx->in_submit_ctx) { 2569 /* If this split operation completed in the context 2570 * of its submission, mark the flag and return immediately 2571 * to avoid recursion. 2572 */ 2573 ctx->completed_in_submit_ctx = true; 2574 return; 2575 } 2576 2577 while (true) { 2578 ctx->completed_in_submit_ctx = false; 2579 2580 offset = ctx->io_unit_offset; 2581 length = ctx->io_units_remaining; 2582 buf = ctx->curr_payload; 2583 op_length = spdk_min(length, bs_num_io_units_to_cluster_boundary(blob, 2584 offset)); 2585 2586 /* Update length and payload for next operation */ 2587 ctx->io_units_remaining -= op_length; 2588 ctx->io_unit_offset += op_length; 2589 if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) { 2590 ctx->curr_payload += op_length * blob->bs->io_unit_size; 2591 } 2592 2593 assert(!ctx->in_submit_ctx); 2594 ctx->in_submit_ctx = true; 2595 2596 switch (op_type) { 2597 case SPDK_BLOB_READ: 2598 spdk_blob_io_read(blob, ch, buf, offset, op_length, 2599 blob_request_submit_op_split_next, ctx); 2600 break; 2601 case SPDK_BLOB_WRITE: 2602 spdk_blob_io_write(blob, ch, buf, offset, op_length, 2603 blob_request_submit_op_split_next, ctx); 2604 break; 2605 case SPDK_BLOB_UNMAP: 2606 spdk_blob_io_unmap(blob, ch, offset, op_length, 2607 blob_request_submit_op_split_next, ctx); 2608 break; 2609 case SPDK_BLOB_WRITE_ZEROES: 2610 spdk_blob_io_write_zeroes(blob, ch, offset, op_length, 2611 blob_request_submit_op_split_next, ctx); 2612 break; 2613 case SPDK_BLOB_READV: 2614 case SPDK_BLOB_WRITEV: 2615 SPDK_ERRLOG("readv/write not valid\n"); 2616 bs_sequence_finish(ctx->seq, -EINVAL); 2617 free(ctx); 2618 return; 2619 } 2620 2621 #ifndef __clang_analyzer__ 2622 /* scan-build reports a false positive around accessing the ctx here. It 2623 * forms a path that recursively calls this function, but then says 2624 * "assuming ctx->in_submit_ctx is false", when that isn't possible. 2625 * This path does free(ctx), returns to here, and reports a use-after-free 2626 * bug. Wrapping this bit of code so that scan-build doesn't see it 2627 * works around the scan-build bug. 2628 */ 2629 assert(ctx->in_submit_ctx); 2630 ctx->in_submit_ctx = false; 2631 2632 /* If the operation completed immediately, loop back and submit the 2633 * next operation. Otherwise we can return and the next split 2634 * operation will get submitted when this current operation is 2635 * later completed asynchronously. 2636 */ 2637 if (ctx->completed_in_submit_ctx) { 2638 continue; 2639 } else if (ctx->done) { 2640 free(ctx); 2641 } 2642 #endif 2643 break; 2644 } 2645 } 2646 2647 static void 2648 blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob, 2649 void *payload, uint64_t offset, uint64_t length, 2650 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 2651 { 2652 struct op_split_ctx *ctx; 2653 spdk_bs_sequence_t *seq; 2654 struct spdk_bs_cpl cpl; 2655 2656 assert(blob != NULL); 2657 2658 ctx = calloc(1, sizeof(struct op_split_ctx)); 2659 if (ctx == NULL) { 2660 cb_fn(cb_arg, -ENOMEM); 2661 return; 2662 } 2663 2664 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2665 cpl.u.blob_basic.cb_fn = cb_fn; 2666 cpl.u.blob_basic.cb_arg = cb_arg; 2667 2668 seq = bs_sequence_start(ch, &cpl); 2669 if (!seq) { 2670 free(ctx); 2671 cb_fn(cb_arg, -ENOMEM); 2672 return; 2673 } 2674 2675 ctx->blob = blob; 2676 ctx->channel = ch; 2677 ctx->curr_payload = payload; 2678 ctx->io_unit_offset = offset; 2679 ctx->io_units_remaining = length; 2680 ctx->op_type = op_type; 2681 ctx->seq = seq; 2682 2683 blob_request_submit_op_split_next(ctx, 0); 2684 } 2685 2686 static void 2687 blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob, 2688 void *payload, uint64_t offset, uint64_t length, 2689 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 2690 { 2691 struct spdk_bs_cpl cpl; 2692 uint64_t lba; 2693 uint64_t lba_count; 2694 bool is_allocated; 2695 2696 assert(blob != NULL); 2697 2698 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2699 cpl.u.blob_basic.cb_fn = cb_fn; 2700 cpl.u.blob_basic.cb_arg = cb_arg; 2701 2702 if (blob->frozen_refcnt) { 2703 /* This blob I/O is frozen */ 2704 spdk_bs_user_op_t *op; 2705 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 2706 2707 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 2708 if (!op) { 2709 cb_fn(cb_arg, -ENOMEM); 2710 return; 2711 } 2712 2713 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 2714 2715 return; 2716 } 2717 2718 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 2719 2720 switch (op_type) { 2721 case SPDK_BLOB_READ: { 2722 spdk_bs_batch_t *batch; 2723 2724 batch = bs_batch_open(_ch, &cpl); 2725 if (!batch) { 2726 cb_fn(cb_arg, -ENOMEM); 2727 return; 2728 } 2729 2730 if (is_allocated) { 2731 /* Read from the blob */ 2732 bs_batch_read_dev(batch, payload, lba, lba_count); 2733 } else { 2734 /* Read from the backing block device */ 2735 bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count); 2736 } 2737 2738 bs_batch_close(batch); 2739 break; 2740 } 2741 case SPDK_BLOB_WRITE: 2742 case SPDK_BLOB_WRITE_ZEROES: { 2743 if (is_allocated) { 2744 /* Write to the blob */ 2745 spdk_bs_batch_t *batch; 2746 2747 if (lba_count == 0) { 2748 cb_fn(cb_arg, 0); 2749 return; 2750 } 2751 2752 batch = bs_batch_open(_ch, &cpl); 2753 if (!batch) { 2754 cb_fn(cb_arg, -ENOMEM); 2755 return; 2756 } 2757 2758 if (op_type == SPDK_BLOB_WRITE) { 2759 bs_batch_write_dev(batch, payload, lba, lba_count); 2760 } else { 2761 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2762 } 2763 2764 bs_batch_close(batch); 2765 } else { 2766 /* Queue this operation and allocate the cluster */ 2767 spdk_bs_user_op_t *op; 2768 2769 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 2770 if (!op) { 2771 cb_fn(cb_arg, -ENOMEM); 2772 return; 2773 } 2774 2775 bs_allocate_and_copy_cluster(blob, _ch, offset, op); 2776 } 2777 break; 2778 } 2779 case SPDK_BLOB_UNMAP: { 2780 spdk_bs_batch_t *batch; 2781 2782 batch = bs_batch_open(_ch, &cpl); 2783 if (!batch) { 2784 cb_fn(cb_arg, -ENOMEM); 2785 return; 2786 } 2787 2788 if (is_allocated) { 2789 bs_batch_unmap_dev(batch, lba, lba_count); 2790 } 2791 2792 bs_batch_close(batch); 2793 break; 2794 } 2795 case SPDK_BLOB_READV: 2796 case SPDK_BLOB_WRITEV: 2797 SPDK_ERRLOG("readv/write not valid\n"); 2798 cb_fn(cb_arg, -EINVAL); 2799 break; 2800 } 2801 } 2802 2803 static void 2804 blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel, 2805 void *payload, uint64_t offset, uint64_t length, 2806 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 2807 { 2808 assert(blob != NULL); 2809 2810 if (blob->data_ro && op_type != SPDK_BLOB_READ) { 2811 cb_fn(cb_arg, -EPERM); 2812 return; 2813 } 2814 2815 if (length == 0) { 2816 cb_fn(cb_arg, 0); 2817 return; 2818 } 2819 2820 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 2821 cb_fn(cb_arg, -EINVAL); 2822 return; 2823 } 2824 if (length <= bs_num_io_units_to_cluster_boundary(blob, offset)) { 2825 blob_request_submit_op_single(_channel, blob, payload, offset, length, 2826 cb_fn, cb_arg, op_type); 2827 } else { 2828 blob_request_submit_op_split(_channel, blob, payload, offset, length, 2829 cb_fn, cb_arg, op_type); 2830 } 2831 } 2832 2833 struct rw_iov_ctx { 2834 struct spdk_blob *blob; 2835 struct spdk_io_channel *channel; 2836 spdk_blob_op_complete cb_fn; 2837 void *cb_arg; 2838 bool read; 2839 int iovcnt; 2840 struct iovec *orig_iov; 2841 uint64_t io_unit_offset; 2842 uint64_t io_units_remaining; 2843 uint64_t io_units_done; 2844 struct spdk_blob_ext_io_opts *ext_io_opts; 2845 struct iovec iov[0]; 2846 }; 2847 2848 static void 2849 rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2850 { 2851 assert(cb_arg == NULL); 2852 bs_sequence_finish(seq, bserrno); 2853 } 2854 2855 static void 2856 rw_iov_split_next(void *cb_arg, int bserrno) 2857 { 2858 struct rw_iov_ctx *ctx = cb_arg; 2859 struct spdk_blob *blob = ctx->blob; 2860 struct iovec *iov, *orig_iov; 2861 int iovcnt; 2862 size_t orig_iovoff; 2863 uint64_t io_units_count, io_units_to_boundary, io_unit_offset; 2864 uint64_t byte_count; 2865 2866 if (bserrno != 0 || ctx->io_units_remaining == 0) { 2867 ctx->cb_fn(ctx->cb_arg, bserrno); 2868 free(ctx); 2869 return; 2870 } 2871 2872 io_unit_offset = ctx->io_unit_offset; 2873 io_units_to_boundary = bs_num_io_units_to_cluster_boundary(blob, io_unit_offset); 2874 io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary); 2875 /* 2876 * Get index and offset into the original iov array for our current position in the I/O sequence. 2877 * byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will 2878 * point to the current position in the I/O sequence. 2879 */ 2880 byte_count = ctx->io_units_done * blob->bs->io_unit_size; 2881 orig_iov = &ctx->orig_iov[0]; 2882 orig_iovoff = 0; 2883 while (byte_count > 0) { 2884 if (byte_count >= orig_iov->iov_len) { 2885 byte_count -= orig_iov->iov_len; 2886 orig_iov++; 2887 } else { 2888 orig_iovoff = byte_count; 2889 byte_count = 0; 2890 } 2891 } 2892 2893 /* 2894 * Build an iov array for the next I/O in the sequence. byte_count will keep track of how many 2895 * bytes of this next I/O remain to be accounted for in the new iov array. 2896 */ 2897 byte_count = io_units_count * blob->bs->io_unit_size; 2898 iov = &ctx->iov[0]; 2899 iovcnt = 0; 2900 while (byte_count > 0) { 2901 assert(iovcnt < ctx->iovcnt); 2902 iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff); 2903 iov->iov_base = orig_iov->iov_base + orig_iovoff; 2904 byte_count -= iov->iov_len; 2905 orig_iovoff = 0; 2906 orig_iov++; 2907 iov++; 2908 iovcnt++; 2909 } 2910 2911 ctx->io_unit_offset += io_units_count; 2912 ctx->io_units_remaining -= io_units_count; 2913 ctx->io_units_done += io_units_count; 2914 iov = &ctx->iov[0]; 2915 2916 if (ctx->read) { 2917 spdk_blob_io_readv_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 2918 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 2919 } else { 2920 spdk_blob_io_writev_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 2921 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 2922 } 2923 } 2924 2925 static void 2926 blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel, 2927 struct iovec *iov, int iovcnt, 2928 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg, bool read, 2929 struct spdk_blob_ext_io_opts *ext_io_opts) 2930 { 2931 struct spdk_bs_cpl cpl; 2932 2933 assert(blob != NULL); 2934 2935 if (!read && blob->data_ro) { 2936 cb_fn(cb_arg, -EPERM); 2937 return; 2938 } 2939 2940 if (length == 0) { 2941 cb_fn(cb_arg, 0); 2942 return; 2943 } 2944 2945 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 2946 cb_fn(cb_arg, -EINVAL); 2947 return; 2948 } 2949 2950 /* 2951 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having 2952 * to split a request that spans a cluster boundary. For I/O that do not span a cluster boundary, 2953 * there will be no noticeable difference compared to using a batch. For I/O that do span a cluster 2954 * boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need 2955 * to allocate a separate iov array and split the I/O such that none of the resulting 2956 * smaller I/O cross a cluster boundary. These smaller I/O will be issued in sequence (not in parallel) 2957 * but since this case happens very infrequently, any performance impact will be negligible. 2958 * 2959 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs 2960 * for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them 2961 * in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called 2962 * when the batch was completed, to allow for freeing the memory for the iov arrays. 2963 */ 2964 if (spdk_likely(length <= bs_num_io_units_to_cluster_boundary(blob, offset))) { 2965 uint64_t lba_count; 2966 uint64_t lba; 2967 bool is_allocated; 2968 2969 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2970 cpl.u.blob_basic.cb_fn = cb_fn; 2971 cpl.u.blob_basic.cb_arg = cb_arg; 2972 2973 if (blob->frozen_refcnt) { 2974 /* This blob I/O is frozen */ 2975 enum spdk_blob_op_type op_type; 2976 spdk_bs_user_op_t *op; 2977 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel); 2978 2979 op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV; 2980 op = bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length); 2981 if (!op) { 2982 cb_fn(cb_arg, -ENOMEM); 2983 return; 2984 } 2985 2986 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 2987 2988 return; 2989 } 2990 2991 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 2992 2993 if (read) { 2994 spdk_bs_sequence_t *seq; 2995 2996 seq = bs_sequence_start(_channel, &cpl); 2997 if (!seq) { 2998 cb_fn(cb_arg, -ENOMEM); 2999 return; 3000 } 3001 3002 seq->ext_io_opts = ext_io_opts; 3003 3004 if (is_allocated) { 3005 bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3006 } else { 3007 bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count, 3008 rw_iov_done, NULL); 3009 } 3010 } else { 3011 if (is_allocated) { 3012 spdk_bs_sequence_t *seq; 3013 3014 seq = bs_sequence_start(_channel, &cpl); 3015 if (!seq) { 3016 cb_fn(cb_arg, -ENOMEM); 3017 return; 3018 } 3019 3020 seq->ext_io_opts = ext_io_opts; 3021 3022 bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3023 } else { 3024 /* Queue this operation and allocate the cluster */ 3025 spdk_bs_user_op_t *op; 3026 3027 op = bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, 3028 length); 3029 if (!op) { 3030 cb_fn(cb_arg, -ENOMEM); 3031 return; 3032 } 3033 3034 op->ext_io_opts = ext_io_opts; 3035 3036 bs_allocate_and_copy_cluster(blob, _channel, offset, op); 3037 } 3038 } 3039 } else { 3040 struct rw_iov_ctx *ctx; 3041 3042 ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec)); 3043 if (ctx == NULL) { 3044 cb_fn(cb_arg, -ENOMEM); 3045 return; 3046 } 3047 3048 ctx->blob = blob; 3049 ctx->channel = _channel; 3050 ctx->cb_fn = cb_fn; 3051 ctx->cb_arg = cb_arg; 3052 ctx->read = read; 3053 ctx->orig_iov = iov; 3054 ctx->iovcnt = iovcnt; 3055 ctx->io_unit_offset = offset; 3056 ctx->io_units_remaining = length; 3057 ctx->io_units_done = 0; 3058 ctx->ext_io_opts = ext_io_opts; 3059 3060 rw_iov_split_next(ctx, 0); 3061 } 3062 } 3063 3064 static struct spdk_blob * 3065 blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid) 3066 { 3067 struct spdk_blob find; 3068 3069 if (spdk_bit_array_get(bs->open_blobids, blobid) == 0) { 3070 return NULL; 3071 } 3072 3073 find.id = blobid; 3074 return RB_FIND(spdk_blob_tree, &bs->open_blobs, &find); 3075 } 3076 3077 static void 3078 blob_get_snapshot_and_clone_entries(struct spdk_blob *blob, 3079 struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry) 3080 { 3081 assert(blob != NULL); 3082 *snapshot_entry = NULL; 3083 *clone_entry = NULL; 3084 3085 if (blob->parent_id == SPDK_BLOBID_INVALID) { 3086 return; 3087 } 3088 3089 TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) { 3090 if ((*snapshot_entry)->id == blob->parent_id) { 3091 break; 3092 } 3093 } 3094 3095 if (*snapshot_entry != NULL) { 3096 TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) { 3097 if ((*clone_entry)->id == blob->id) { 3098 break; 3099 } 3100 } 3101 3102 assert(*clone_entry != NULL); 3103 } 3104 } 3105 3106 static int 3107 bs_channel_create(void *io_device, void *ctx_buf) 3108 { 3109 struct spdk_blob_store *bs = io_device; 3110 struct spdk_bs_channel *channel = ctx_buf; 3111 struct spdk_bs_dev *dev; 3112 uint32_t max_ops = bs->max_channel_ops; 3113 uint32_t i; 3114 3115 dev = bs->dev; 3116 3117 channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set)); 3118 if (!channel->req_mem) { 3119 return -1; 3120 } 3121 3122 TAILQ_INIT(&channel->reqs); 3123 3124 for (i = 0; i < max_ops; i++) { 3125 TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link); 3126 } 3127 3128 channel->bs = bs; 3129 channel->dev = dev; 3130 channel->dev_channel = dev->create_channel(dev); 3131 3132 if (!channel->dev_channel) { 3133 SPDK_ERRLOG("Failed to create device channel.\n"); 3134 free(channel->req_mem); 3135 return -1; 3136 } 3137 3138 TAILQ_INIT(&channel->need_cluster_alloc); 3139 TAILQ_INIT(&channel->queued_io); 3140 3141 return 0; 3142 } 3143 3144 static void 3145 bs_channel_destroy(void *io_device, void *ctx_buf) 3146 { 3147 struct spdk_bs_channel *channel = ctx_buf; 3148 spdk_bs_user_op_t *op; 3149 3150 while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) { 3151 op = TAILQ_FIRST(&channel->need_cluster_alloc); 3152 TAILQ_REMOVE(&channel->need_cluster_alloc, op, link); 3153 bs_user_op_abort(op, -EIO); 3154 } 3155 3156 while (!TAILQ_EMPTY(&channel->queued_io)) { 3157 op = TAILQ_FIRST(&channel->queued_io); 3158 TAILQ_REMOVE(&channel->queued_io, op, link); 3159 bs_user_op_abort(op, -EIO); 3160 } 3161 3162 free(channel->req_mem); 3163 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3164 } 3165 3166 static void 3167 bs_dev_destroy(void *io_device) 3168 { 3169 struct spdk_blob_store *bs = io_device; 3170 struct spdk_blob *blob, *blob_tmp; 3171 3172 bs->dev->destroy(bs->dev); 3173 3174 RB_FOREACH_SAFE(blob, spdk_blob_tree, &bs->open_blobs, blob_tmp) { 3175 RB_REMOVE(spdk_blob_tree, &bs->open_blobs, blob); 3176 spdk_bit_array_clear(bs->open_blobids, blob->id); 3177 blob_free(blob); 3178 } 3179 3180 pthread_mutex_destroy(&bs->used_clusters_mutex); 3181 3182 spdk_bit_array_free(&bs->open_blobids); 3183 spdk_bit_array_free(&bs->used_blobids); 3184 spdk_bit_array_free(&bs->used_md_pages); 3185 spdk_bit_pool_free(&bs->used_clusters); 3186 /* 3187 * If this function is called for any reason except a successful unload, 3188 * the unload_cpl type will be NONE and this will be a nop. 3189 */ 3190 bs_call_cpl(&bs->unload_cpl, bs->unload_err); 3191 3192 free(bs); 3193 } 3194 3195 static int 3196 bs_blob_list_add(struct spdk_blob *blob) 3197 { 3198 spdk_blob_id snapshot_id; 3199 struct spdk_blob_list *snapshot_entry = NULL; 3200 struct spdk_blob_list *clone_entry = NULL; 3201 3202 assert(blob != NULL); 3203 3204 snapshot_id = blob->parent_id; 3205 if (snapshot_id == SPDK_BLOBID_INVALID) { 3206 return 0; 3207 } 3208 3209 snapshot_entry = bs_get_snapshot_entry(blob->bs, snapshot_id); 3210 if (snapshot_entry == NULL) { 3211 /* Snapshot not found */ 3212 snapshot_entry = calloc(1, sizeof(struct spdk_blob_list)); 3213 if (snapshot_entry == NULL) { 3214 return -ENOMEM; 3215 } 3216 snapshot_entry->id = snapshot_id; 3217 TAILQ_INIT(&snapshot_entry->clones); 3218 TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link); 3219 } else { 3220 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 3221 if (clone_entry->id == blob->id) { 3222 break; 3223 } 3224 } 3225 } 3226 3227 if (clone_entry == NULL) { 3228 /* Clone not found */ 3229 clone_entry = calloc(1, sizeof(struct spdk_blob_list)); 3230 if (clone_entry == NULL) { 3231 return -ENOMEM; 3232 } 3233 clone_entry->id = blob->id; 3234 TAILQ_INIT(&clone_entry->clones); 3235 TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link); 3236 snapshot_entry->clone_count++; 3237 } 3238 3239 return 0; 3240 } 3241 3242 static void 3243 bs_blob_list_remove(struct spdk_blob *blob) 3244 { 3245 struct spdk_blob_list *snapshot_entry = NULL; 3246 struct spdk_blob_list *clone_entry = NULL; 3247 3248 blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry); 3249 3250 if (snapshot_entry == NULL) { 3251 return; 3252 } 3253 3254 blob->parent_id = SPDK_BLOBID_INVALID; 3255 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3256 free(clone_entry); 3257 3258 snapshot_entry->clone_count--; 3259 } 3260 3261 static int 3262 bs_blob_list_free(struct spdk_blob_store *bs) 3263 { 3264 struct spdk_blob_list *snapshot_entry; 3265 struct spdk_blob_list *snapshot_entry_tmp; 3266 struct spdk_blob_list *clone_entry; 3267 struct spdk_blob_list *clone_entry_tmp; 3268 3269 TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) { 3270 TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) { 3271 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3272 free(clone_entry); 3273 } 3274 TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link); 3275 free(snapshot_entry); 3276 } 3277 3278 return 0; 3279 } 3280 3281 static void 3282 bs_free(struct spdk_blob_store *bs) 3283 { 3284 bs_blob_list_free(bs); 3285 3286 bs_unregister_md_thread(bs); 3287 spdk_io_device_unregister(bs, bs_dev_destroy); 3288 } 3289 3290 void 3291 spdk_bs_opts_init(struct spdk_bs_opts *opts, size_t opts_size) 3292 { 3293 3294 if (!opts) { 3295 SPDK_ERRLOG("opts should not be NULL\n"); 3296 return; 3297 } 3298 3299 if (!opts_size) { 3300 SPDK_ERRLOG("opts_size should not be zero value\n"); 3301 return; 3302 } 3303 3304 memset(opts, 0, opts_size); 3305 opts->opts_size = opts_size; 3306 3307 #define FIELD_OK(field) \ 3308 offsetof(struct spdk_bs_opts, field) + sizeof(opts->field) <= opts_size 3309 3310 #define SET_FIELD(field, value) \ 3311 if (FIELD_OK(field)) { \ 3312 opts->field = value; \ 3313 } \ 3314 3315 SET_FIELD(cluster_sz, SPDK_BLOB_OPTS_CLUSTER_SZ); 3316 SET_FIELD(num_md_pages, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3317 SET_FIELD(max_md_ops, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3318 SET_FIELD(max_channel_ops, SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS); 3319 SET_FIELD(clear_method, BS_CLEAR_WITH_UNMAP); 3320 3321 if (FIELD_OK(bstype)) { 3322 memset(&opts->bstype, 0, sizeof(opts->bstype)); 3323 } 3324 3325 SET_FIELD(iter_cb_fn, NULL); 3326 SET_FIELD(iter_cb_arg, NULL); 3327 SET_FIELD(force_recover, false); 3328 3329 #undef FIELD_OK 3330 #undef SET_FIELD 3331 } 3332 3333 static int 3334 bs_opts_verify(struct spdk_bs_opts *opts) 3335 { 3336 if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 || 3337 opts->max_channel_ops == 0) { 3338 SPDK_ERRLOG("Blobstore options cannot be set to 0\n"); 3339 return -1; 3340 } 3341 3342 return 0; 3343 } 3344 3345 /* START spdk_bs_load */ 3346 3347 /* spdk_bs_load_ctx is used for init, load, unload and dump code paths. */ 3348 3349 struct spdk_bs_load_ctx { 3350 struct spdk_blob_store *bs; 3351 struct spdk_bs_super_block *super; 3352 3353 struct spdk_bs_md_mask *mask; 3354 bool in_page_chain; 3355 uint32_t page_index; 3356 uint32_t cur_page; 3357 struct spdk_blob_md_page *page; 3358 3359 uint64_t num_extent_pages; 3360 uint32_t *extent_page_num; 3361 struct spdk_blob_md_page *extent_pages; 3362 struct spdk_bit_array *used_clusters; 3363 3364 spdk_bs_sequence_t *seq; 3365 spdk_blob_op_with_handle_complete iter_cb_fn; 3366 void *iter_cb_arg; 3367 struct spdk_blob *blob; 3368 spdk_blob_id blobid; 3369 3370 bool force_recover; 3371 3372 /* These fields are used in the spdk_bs_dump path. */ 3373 bool dumping; 3374 FILE *fp; 3375 spdk_bs_dump_print_xattr print_xattr_fn; 3376 char xattr_name[4096]; 3377 }; 3378 3379 static int 3380 bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs, 3381 struct spdk_bs_load_ctx **_ctx) 3382 { 3383 struct spdk_blob_store *bs; 3384 struct spdk_bs_load_ctx *ctx; 3385 uint64_t dev_size; 3386 int rc; 3387 3388 dev_size = dev->blocklen * dev->blockcnt; 3389 if (dev_size < opts->cluster_sz) { 3390 /* Device size cannot be smaller than cluster size of blobstore */ 3391 SPDK_INFOLOG(blob, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n", 3392 dev_size, opts->cluster_sz); 3393 return -ENOSPC; 3394 } 3395 if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) { 3396 /* Cluster size cannot be smaller than page size */ 3397 SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n", 3398 opts->cluster_sz, SPDK_BS_PAGE_SIZE); 3399 return -EINVAL; 3400 } 3401 bs = calloc(1, sizeof(struct spdk_blob_store)); 3402 if (!bs) { 3403 return -ENOMEM; 3404 } 3405 3406 ctx = calloc(1, sizeof(struct spdk_bs_load_ctx)); 3407 if (!ctx) { 3408 free(bs); 3409 return -ENOMEM; 3410 } 3411 3412 ctx->bs = bs; 3413 ctx->iter_cb_fn = opts->iter_cb_fn; 3414 ctx->iter_cb_arg = opts->iter_cb_arg; 3415 ctx->force_recover = opts->force_recover; 3416 3417 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 3418 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3419 if (!ctx->super) { 3420 free(ctx); 3421 free(bs); 3422 return -ENOMEM; 3423 } 3424 3425 RB_INIT(&bs->open_blobs); 3426 TAILQ_INIT(&bs->snapshots); 3427 bs->dev = dev; 3428 bs->md_thread = spdk_get_thread(); 3429 assert(bs->md_thread != NULL); 3430 3431 /* 3432 * Do not use bs_lba_to_cluster() here since blockcnt may not be an 3433 * even multiple of the cluster size. 3434 */ 3435 bs->cluster_sz = opts->cluster_sz; 3436 bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen); 3437 ctx->used_clusters = spdk_bit_array_create(bs->total_clusters); 3438 if (!ctx->used_clusters) { 3439 spdk_free(ctx->super); 3440 free(ctx); 3441 free(bs); 3442 return -ENOMEM; 3443 } 3444 3445 bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE; 3446 if (spdk_u32_is_pow2(bs->pages_per_cluster)) { 3447 bs->pages_per_cluster_shift = spdk_u32log2(bs->pages_per_cluster); 3448 } 3449 bs->num_free_clusters = bs->total_clusters; 3450 bs->io_unit_size = dev->blocklen; 3451 3452 bs->max_channel_ops = opts->max_channel_ops; 3453 bs->super_blob = SPDK_BLOBID_INVALID; 3454 memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype)); 3455 3456 /* The metadata is assumed to be at least 1 page */ 3457 bs->used_md_pages = spdk_bit_array_create(1); 3458 bs->used_blobids = spdk_bit_array_create(0); 3459 bs->open_blobids = spdk_bit_array_create(0); 3460 3461 pthread_mutex_init(&bs->used_clusters_mutex, NULL); 3462 3463 spdk_io_device_register(bs, bs_channel_create, bs_channel_destroy, 3464 sizeof(struct spdk_bs_channel), "blobstore"); 3465 rc = bs_register_md_thread(bs); 3466 if (rc == -1) { 3467 spdk_io_device_unregister(bs, NULL); 3468 pthread_mutex_destroy(&bs->used_clusters_mutex); 3469 spdk_bit_array_free(&bs->open_blobids); 3470 spdk_bit_array_free(&bs->used_blobids); 3471 spdk_bit_array_free(&bs->used_md_pages); 3472 spdk_bit_array_free(&ctx->used_clusters); 3473 spdk_free(ctx->super); 3474 free(ctx); 3475 free(bs); 3476 /* FIXME: this is a lie but don't know how to get a proper error code here */ 3477 return -ENOMEM; 3478 } 3479 3480 *_ctx = ctx; 3481 *_bs = bs; 3482 return 0; 3483 } 3484 3485 static void 3486 bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno) 3487 { 3488 assert(bserrno != 0); 3489 3490 spdk_free(ctx->super); 3491 bs_sequence_finish(ctx->seq, bserrno); 3492 bs_free(ctx->bs); 3493 spdk_bit_array_free(&ctx->used_clusters); 3494 free(ctx); 3495 } 3496 3497 static void 3498 bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 3499 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 3500 { 3501 /* Update the values in the super block */ 3502 super->super_blob = bs->super_blob; 3503 memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype)); 3504 super->crc = blob_md_page_calc_crc(super); 3505 bs_sequence_write_dev(seq, super, bs_page_to_lba(bs, 0), 3506 bs_byte_to_lba(bs, sizeof(*super)), 3507 cb_fn, cb_arg); 3508 } 3509 3510 static void 3511 bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3512 { 3513 struct spdk_bs_load_ctx *ctx = arg; 3514 uint64_t mask_size, lba, lba_count; 3515 3516 /* Write out the used clusters mask */ 3517 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 3518 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3519 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3520 if (!ctx->mask) { 3521 bs_load_ctx_fail(ctx, -ENOMEM); 3522 return; 3523 } 3524 3525 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS; 3526 ctx->mask->length = ctx->bs->total_clusters; 3527 /* We could get here through the normal unload path, or through dirty 3528 * shutdown recovery. For the normal unload path, we use the mask from 3529 * the bit pool. For dirty shutdown recovery, we don't have a bit pool yet - 3530 * only the bit array from the load ctx. 3531 */ 3532 if (ctx->bs->used_clusters) { 3533 assert(ctx->mask->length == spdk_bit_pool_capacity(ctx->bs->used_clusters)); 3534 spdk_bit_pool_store_mask(ctx->bs->used_clusters, ctx->mask->mask); 3535 } else { 3536 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->used_clusters)); 3537 spdk_bit_array_store_mask(ctx->used_clusters, ctx->mask->mask); 3538 } 3539 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 3540 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 3541 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3542 } 3543 3544 static void 3545 bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3546 { 3547 struct spdk_bs_load_ctx *ctx = arg; 3548 uint64_t mask_size, lba, lba_count; 3549 3550 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 3551 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3552 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3553 if (!ctx->mask) { 3554 bs_load_ctx_fail(ctx, -ENOMEM); 3555 return; 3556 } 3557 3558 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES; 3559 ctx->mask->length = ctx->super->md_len; 3560 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages)); 3561 3562 spdk_bit_array_store_mask(ctx->bs->used_md_pages, ctx->mask->mask); 3563 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 3564 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 3565 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3566 } 3567 3568 static void 3569 bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3570 { 3571 struct spdk_bs_load_ctx *ctx = arg; 3572 uint64_t mask_size, lba, lba_count; 3573 3574 if (ctx->super->used_blobid_mask_len == 0) { 3575 /* 3576 * This is a pre-v3 on-disk format where the blobid mask does not get 3577 * written to disk. 3578 */ 3579 cb_fn(seq, arg, 0); 3580 return; 3581 } 3582 3583 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 3584 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 3585 SPDK_MALLOC_DMA); 3586 if (!ctx->mask) { 3587 bs_load_ctx_fail(ctx, -ENOMEM); 3588 return; 3589 } 3590 3591 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS; 3592 ctx->mask->length = ctx->super->md_len; 3593 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids)); 3594 3595 spdk_bit_array_store_mask(ctx->bs->used_blobids, ctx->mask->mask); 3596 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 3597 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 3598 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3599 } 3600 3601 static void 3602 blob_set_thin_provision(struct spdk_blob *blob) 3603 { 3604 blob_verify_md_op(blob); 3605 blob->invalid_flags |= SPDK_BLOB_THIN_PROV; 3606 blob->state = SPDK_BLOB_STATE_DIRTY; 3607 } 3608 3609 static void 3610 blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method) 3611 { 3612 blob_verify_md_op(blob); 3613 blob->clear_method = clear_method; 3614 blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT); 3615 blob->state = SPDK_BLOB_STATE_DIRTY; 3616 } 3617 3618 static void bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno); 3619 3620 static void 3621 bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno) 3622 { 3623 struct spdk_bs_load_ctx *ctx = cb_arg; 3624 spdk_blob_id id; 3625 int64_t page_num; 3626 3627 /* Iterate to next blob (we can't use spdk_bs_iter_next function as our 3628 * last blob has been removed */ 3629 page_num = bs_blobid_to_page(ctx->blobid); 3630 page_num++; 3631 page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num); 3632 if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) { 3633 bs_load_iter(ctx, NULL, -ENOENT); 3634 return; 3635 } 3636 3637 id = bs_page_to_blobid(page_num); 3638 3639 spdk_bs_open_blob(ctx->bs, id, bs_load_iter, ctx); 3640 } 3641 3642 static void 3643 bs_delete_corrupted_close_cb(void *cb_arg, int bserrno) 3644 { 3645 struct spdk_bs_load_ctx *ctx = cb_arg; 3646 3647 if (bserrno != 0) { 3648 SPDK_ERRLOG("Failed to close corrupted blob\n"); 3649 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3650 return; 3651 } 3652 3653 spdk_bs_delete_blob(ctx->bs, ctx->blobid, bs_delete_corrupted_blob_cpl, ctx); 3654 } 3655 3656 static void 3657 bs_delete_corrupted_blob(void *cb_arg, int bserrno) 3658 { 3659 struct spdk_bs_load_ctx *ctx = cb_arg; 3660 uint64_t i; 3661 3662 if (bserrno != 0) { 3663 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 3664 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3665 return; 3666 } 3667 3668 /* Snapshot and clone have the same copy of cluster map and extent pages 3669 * at this point. Let's clear both for snapshot now, 3670 * so that it won't be cleared for clone later when we remove snapshot. 3671 * Also set thin provision to pass data corruption check */ 3672 for (i = 0; i < ctx->blob->active.num_clusters; i++) { 3673 ctx->blob->active.clusters[i] = 0; 3674 } 3675 for (i = 0; i < ctx->blob->active.num_extent_pages; i++) { 3676 ctx->blob->active.extent_pages[i] = 0; 3677 } 3678 3679 ctx->blob->md_ro = false; 3680 3681 blob_set_thin_provision(ctx->blob); 3682 3683 ctx->blobid = ctx->blob->id; 3684 3685 spdk_blob_close(ctx->blob, bs_delete_corrupted_close_cb, ctx); 3686 } 3687 3688 static void 3689 bs_update_corrupted_blob(void *cb_arg, int bserrno) 3690 { 3691 struct spdk_bs_load_ctx *ctx = cb_arg; 3692 3693 if (bserrno != 0) { 3694 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 3695 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3696 return; 3697 } 3698 3699 ctx->blob->md_ro = false; 3700 blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true); 3701 blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true); 3702 spdk_blob_set_read_only(ctx->blob); 3703 3704 if (ctx->iter_cb_fn) { 3705 ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0); 3706 } 3707 bs_blob_list_add(ctx->blob); 3708 3709 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3710 } 3711 3712 static void 3713 bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno) 3714 { 3715 struct spdk_bs_load_ctx *ctx = cb_arg; 3716 3717 if (bserrno != 0) { 3718 SPDK_ERRLOG("Failed to open clone of a corrupted blob\n"); 3719 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3720 return; 3721 } 3722 3723 if (blob->parent_id == ctx->blob->id) { 3724 /* Power failure occurred before updating clone (snapshot delete case) 3725 * or after updating clone (creating snapshot case) - keep snapshot */ 3726 spdk_blob_close(blob, bs_update_corrupted_blob, ctx); 3727 } else { 3728 /* Power failure occurred after updating clone (snapshot delete case) 3729 * or before updating clone (creating snapshot case) - remove snapshot */ 3730 spdk_blob_close(blob, bs_delete_corrupted_blob, ctx); 3731 } 3732 } 3733 3734 static void 3735 bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno) 3736 { 3737 struct spdk_bs_load_ctx *ctx = arg; 3738 const void *value; 3739 size_t len; 3740 int rc = 0; 3741 3742 if (bserrno == 0) { 3743 /* Examine blob if it is corrupted after power failure. Fix 3744 * the ones that can be fixed and remove any other corrupted 3745 * ones. If it is not corrupted just process it */ 3746 rc = blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true); 3747 if (rc != 0) { 3748 rc = blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true); 3749 if (rc != 0) { 3750 /* Not corrupted - process it and continue with iterating through blobs */ 3751 if (ctx->iter_cb_fn) { 3752 ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0); 3753 } 3754 bs_blob_list_add(blob); 3755 spdk_bs_iter_next(ctx->bs, blob, bs_load_iter, ctx); 3756 return; 3757 } 3758 3759 } 3760 3761 assert(len == sizeof(spdk_blob_id)); 3762 3763 ctx->blob = blob; 3764 3765 /* Open clone to check if we are able to fix this blob or should we remove it */ 3766 spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, bs_examine_clone, ctx); 3767 return; 3768 } else if (bserrno == -ENOENT) { 3769 bserrno = 0; 3770 } else { 3771 /* 3772 * This case needs to be looked at further. Same problem 3773 * exists with applications that rely on explicit blob 3774 * iteration. We should just skip the blob that failed 3775 * to load and continue on to the next one. 3776 */ 3777 SPDK_ERRLOG("Error in iterating blobs\n"); 3778 } 3779 3780 ctx->iter_cb_fn = NULL; 3781 3782 spdk_free(ctx->super); 3783 spdk_free(ctx->mask); 3784 bs_sequence_finish(ctx->seq, bserrno); 3785 free(ctx); 3786 } 3787 3788 static void bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 3789 3790 static void 3791 bs_load_complete(struct spdk_bs_load_ctx *ctx) 3792 { 3793 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 3794 if (ctx->dumping) { 3795 bs_dump_read_md_page(ctx->seq, ctx); 3796 return; 3797 } 3798 spdk_bs_iter_first(ctx->bs, bs_load_iter, ctx); 3799 } 3800 3801 static void 3802 bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3803 { 3804 struct spdk_bs_load_ctx *ctx = cb_arg; 3805 int rc; 3806 3807 /* The type must be correct */ 3808 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS); 3809 3810 /* The length of the mask (in bits) must not be greater than 3811 * the length of the buffer (converted to bits) */ 3812 assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8)); 3813 3814 /* The length of the mask must be exactly equal to the size 3815 * (in pages) of the metadata region */ 3816 assert(ctx->mask->length == ctx->super->md_len); 3817 3818 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length); 3819 if (rc < 0) { 3820 spdk_free(ctx->mask); 3821 bs_load_ctx_fail(ctx, rc); 3822 return; 3823 } 3824 3825 spdk_bit_array_load_mask(ctx->bs->used_blobids, ctx->mask->mask); 3826 bs_load_complete(ctx); 3827 } 3828 3829 static void 3830 bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3831 { 3832 struct spdk_bs_load_ctx *ctx = cb_arg; 3833 uint64_t lba, lba_count, mask_size; 3834 int rc; 3835 3836 if (bserrno != 0) { 3837 bs_load_ctx_fail(ctx, bserrno); 3838 return; 3839 } 3840 3841 /* The type must be correct */ 3842 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 3843 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 3844 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 3845 struct spdk_blob_md_page) * 8)); 3846 /* The length of the mask must be exactly equal to the total number of clusters */ 3847 assert(ctx->mask->length == ctx->bs->total_clusters); 3848 3849 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->mask->length); 3850 if (rc < 0) { 3851 spdk_free(ctx->mask); 3852 bs_load_ctx_fail(ctx, rc); 3853 return; 3854 } 3855 3856 spdk_bit_array_load_mask(ctx->used_clusters, ctx->mask->mask); 3857 ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->used_clusters); 3858 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 3859 3860 spdk_free(ctx->mask); 3861 3862 /* Read the used blobids mask */ 3863 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 3864 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 3865 SPDK_MALLOC_DMA); 3866 if (!ctx->mask) { 3867 bs_load_ctx_fail(ctx, -ENOMEM); 3868 return; 3869 } 3870 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 3871 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 3872 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 3873 bs_load_used_blobids_cpl, ctx); 3874 } 3875 3876 static void 3877 bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3878 { 3879 struct spdk_bs_load_ctx *ctx = cb_arg; 3880 uint64_t lba, lba_count, mask_size; 3881 int rc; 3882 3883 if (bserrno != 0) { 3884 bs_load_ctx_fail(ctx, bserrno); 3885 return; 3886 } 3887 3888 /* The type must be correct */ 3889 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES); 3890 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 3891 assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE * 3892 8)); 3893 /* The length of the mask must be exactly equal to the size (in pages) of the metadata region */ 3894 if (ctx->mask->length != ctx->super->md_len) { 3895 SPDK_ERRLOG("mismatched md_len in used_pages mask: " 3896 "mask->length=%" PRIu32 " super->md_len=%" PRIu32 "\n", 3897 ctx->mask->length, ctx->super->md_len); 3898 assert(false); 3899 } 3900 3901 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length); 3902 if (rc < 0) { 3903 spdk_free(ctx->mask); 3904 bs_load_ctx_fail(ctx, rc); 3905 return; 3906 } 3907 3908 spdk_bit_array_load_mask(ctx->bs->used_md_pages, ctx->mask->mask); 3909 spdk_free(ctx->mask); 3910 3911 /* Read the used clusters mask */ 3912 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 3913 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 3914 SPDK_MALLOC_DMA); 3915 if (!ctx->mask) { 3916 bs_load_ctx_fail(ctx, -ENOMEM); 3917 return; 3918 } 3919 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 3920 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 3921 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 3922 bs_load_used_clusters_cpl, ctx); 3923 } 3924 3925 static void 3926 bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx) 3927 { 3928 uint64_t lba, lba_count, mask_size; 3929 3930 /* Read the used pages mask */ 3931 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 3932 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3933 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3934 if (!ctx->mask) { 3935 bs_load_ctx_fail(ctx, -ENOMEM); 3936 return; 3937 } 3938 3939 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 3940 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 3941 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 3942 bs_load_used_pages_cpl, ctx); 3943 } 3944 3945 static int 3946 bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_page *page) 3947 { 3948 struct spdk_blob_store *bs = ctx->bs; 3949 struct spdk_blob_md_descriptor *desc; 3950 size_t cur_desc = 0; 3951 3952 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 3953 while (cur_desc < sizeof(page->descriptors)) { 3954 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 3955 if (desc->length == 0) { 3956 /* If padding and length are 0, this terminates the page */ 3957 break; 3958 } 3959 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 3960 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 3961 unsigned int i, j; 3962 unsigned int cluster_count = 0; 3963 uint32_t cluster_idx; 3964 3965 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 3966 3967 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 3968 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 3969 cluster_idx = desc_extent_rle->extents[i].cluster_idx; 3970 /* 3971 * cluster_idx = 0 means an unallocated cluster - don't mark that 3972 * in the used cluster map. 3973 */ 3974 if (cluster_idx != 0) { 3975 SPDK_NOTICELOG("Recover: cluster %" PRIu32 "\n", cluster_idx + j); 3976 spdk_bit_array_set(ctx->used_clusters, cluster_idx + j); 3977 if (bs->num_free_clusters == 0) { 3978 return -ENOSPC; 3979 } 3980 bs->num_free_clusters--; 3981 } 3982 cluster_count++; 3983 } 3984 } 3985 if (cluster_count == 0) { 3986 return -EINVAL; 3987 } 3988 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 3989 struct spdk_blob_md_descriptor_extent_page *desc_extent; 3990 uint32_t i; 3991 uint32_t cluster_count = 0; 3992 uint32_t cluster_idx; 3993 size_t cluster_idx_length; 3994 3995 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 3996 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 3997 3998 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 3999 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 4000 return -EINVAL; 4001 } 4002 4003 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 4004 cluster_idx = desc_extent->cluster_idx[i]; 4005 /* 4006 * cluster_idx = 0 means an unallocated cluster - don't mark that 4007 * in the used cluster map. 4008 */ 4009 if (cluster_idx != 0) { 4010 if (cluster_idx < desc_extent->start_cluster_idx && 4011 cluster_idx >= desc_extent->start_cluster_idx + cluster_count) { 4012 return -EINVAL; 4013 } 4014 spdk_bit_array_set(ctx->used_clusters, cluster_idx); 4015 if (bs->num_free_clusters == 0) { 4016 return -ENOSPC; 4017 } 4018 bs->num_free_clusters--; 4019 } 4020 cluster_count++; 4021 } 4022 4023 if (cluster_count == 0) { 4024 return -EINVAL; 4025 } 4026 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4027 /* Skip this item */ 4028 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4029 /* Skip this item */ 4030 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 4031 /* Skip this item */ 4032 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 4033 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 4034 uint32_t num_extent_pages = ctx->num_extent_pages; 4035 uint32_t i; 4036 size_t extent_pages_length; 4037 void *tmp; 4038 4039 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 4040 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 4041 4042 if (desc_extent_table->length == 0 || 4043 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 4044 return -EINVAL; 4045 } 4046 4047 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4048 if (desc_extent_table->extent_page[i].page_idx != 0) { 4049 if (desc_extent_table->extent_page[i].num_pages != 1) { 4050 return -EINVAL; 4051 } 4052 num_extent_pages += 1; 4053 } 4054 } 4055 4056 if (num_extent_pages > 0) { 4057 tmp = realloc(ctx->extent_page_num, num_extent_pages * sizeof(uint32_t)); 4058 if (tmp == NULL) { 4059 return -ENOMEM; 4060 } 4061 ctx->extent_page_num = tmp; 4062 4063 /* Extent table entries contain md page numbers for extent pages. 4064 * Zeroes represent unallocated extent pages, those are run-length-encoded. 4065 */ 4066 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4067 if (desc_extent_table->extent_page[i].page_idx != 0) { 4068 ctx->extent_page_num[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx; 4069 ctx->num_extent_pages += 1; 4070 } 4071 } 4072 } 4073 } else { 4074 /* Error */ 4075 return -EINVAL; 4076 } 4077 /* Advance to the next descriptor */ 4078 cur_desc += sizeof(*desc) + desc->length; 4079 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 4080 break; 4081 } 4082 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 4083 } 4084 return 0; 4085 } 4086 4087 static bool bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page) 4088 { 4089 uint32_t crc; 4090 struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4091 size_t desc_len; 4092 4093 crc = blob_md_page_calc_crc(page); 4094 if (crc != page->crc) { 4095 return false; 4096 } 4097 4098 /* Extent page should always be of sequence num 0. */ 4099 if (page->sequence_num != 0) { 4100 return false; 4101 } 4102 4103 /* Descriptor type must be EXTENT_PAGE. */ 4104 if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4105 return false; 4106 } 4107 4108 /* Descriptor length cannot exceed the page. */ 4109 desc_len = sizeof(*desc) + desc->length; 4110 if (desc_len > sizeof(page->descriptors)) { 4111 return false; 4112 } 4113 4114 /* It has to be the only descriptor in the page. */ 4115 if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) { 4116 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len); 4117 if (desc->length != 0) { 4118 return false; 4119 } 4120 } 4121 4122 return true; 4123 } 4124 4125 static bool bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx) 4126 { 4127 uint32_t crc; 4128 struct spdk_blob_md_page *page = ctx->page; 4129 4130 crc = blob_md_page_calc_crc(page); 4131 if (crc != page->crc) { 4132 return false; 4133 } 4134 4135 /* First page of a sequence should match the blobid. */ 4136 if (page->sequence_num == 0 && 4137 bs_page_to_blobid(ctx->cur_page) != page->id) { 4138 return false; 4139 } 4140 assert(bs_load_cur_extent_page_valid(page) == false); 4141 4142 return true; 4143 } 4144 4145 static void 4146 bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx); 4147 4148 static void 4149 bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4150 { 4151 struct spdk_bs_load_ctx *ctx = cb_arg; 4152 4153 if (bserrno != 0) { 4154 bs_load_ctx_fail(ctx, bserrno); 4155 return; 4156 } 4157 4158 bs_load_complete(ctx); 4159 } 4160 4161 static void 4162 bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4163 { 4164 struct spdk_bs_load_ctx *ctx = cb_arg; 4165 4166 spdk_free(ctx->mask); 4167 ctx->mask = NULL; 4168 4169 if (bserrno != 0) { 4170 bs_load_ctx_fail(ctx, bserrno); 4171 return; 4172 } 4173 4174 bs_write_used_clusters(seq, ctx, bs_load_write_used_clusters_cpl); 4175 } 4176 4177 static void 4178 bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4179 { 4180 struct spdk_bs_load_ctx *ctx = cb_arg; 4181 4182 spdk_free(ctx->mask); 4183 ctx->mask = NULL; 4184 4185 if (bserrno != 0) { 4186 bs_load_ctx_fail(ctx, bserrno); 4187 return; 4188 } 4189 4190 bs_write_used_blobids(seq, ctx, bs_load_write_used_blobids_cpl); 4191 } 4192 4193 static void 4194 bs_load_write_used_md(struct spdk_bs_load_ctx *ctx) 4195 { 4196 bs_write_used_md(ctx->seq, ctx, bs_load_write_used_pages_cpl); 4197 } 4198 4199 static void 4200 bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx) 4201 { 4202 uint64_t num_md_clusters; 4203 uint64_t i; 4204 4205 ctx->in_page_chain = false; 4206 4207 do { 4208 ctx->page_index++; 4209 } while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true); 4210 4211 if (ctx->page_index < ctx->super->md_len) { 4212 ctx->cur_page = ctx->page_index; 4213 bs_load_replay_cur_md_page(ctx); 4214 } else { 4215 /* Claim all of the clusters used by the metadata */ 4216 num_md_clusters = spdk_divide_round_up( 4217 ctx->super->md_start + ctx->super->md_len, ctx->bs->pages_per_cluster); 4218 for (i = 0; i < num_md_clusters; i++) { 4219 spdk_bit_array_set(ctx->used_clusters, i); 4220 } 4221 ctx->bs->num_free_clusters -= num_md_clusters; 4222 spdk_free(ctx->page); 4223 bs_load_write_used_md(ctx); 4224 } 4225 } 4226 4227 static void 4228 bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4229 { 4230 struct spdk_bs_load_ctx *ctx = cb_arg; 4231 uint32_t page_num; 4232 uint64_t i; 4233 4234 if (bserrno != 0) { 4235 spdk_free(ctx->extent_pages); 4236 bs_load_ctx_fail(ctx, bserrno); 4237 return; 4238 } 4239 4240 for (i = 0; i < ctx->num_extent_pages; i++) { 4241 /* Extent pages are only read when present within in chain md. 4242 * Integrity of md is not right if that page was not a valid extent page. */ 4243 if (bs_load_cur_extent_page_valid(&ctx->extent_pages[i]) != true) { 4244 spdk_free(ctx->extent_pages); 4245 bs_load_ctx_fail(ctx, -EILSEQ); 4246 return; 4247 } 4248 4249 page_num = ctx->extent_page_num[i]; 4250 spdk_bit_array_set(ctx->bs->used_md_pages, page_num); 4251 if (bs_load_replay_md_parse_page(ctx, &ctx->extent_pages[i])) { 4252 spdk_free(ctx->extent_pages); 4253 bs_load_ctx_fail(ctx, -EILSEQ); 4254 return; 4255 } 4256 } 4257 4258 spdk_free(ctx->extent_pages); 4259 free(ctx->extent_page_num); 4260 ctx->extent_page_num = NULL; 4261 ctx->num_extent_pages = 0; 4262 4263 bs_load_replay_md_chain_cpl(ctx); 4264 } 4265 4266 static void 4267 bs_load_replay_extent_pages(struct spdk_bs_load_ctx *ctx) 4268 { 4269 spdk_bs_batch_t *batch; 4270 uint32_t page; 4271 uint64_t lba; 4272 uint64_t i; 4273 4274 ctx->extent_pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE * ctx->num_extent_pages, 0, 4275 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4276 if (!ctx->extent_pages) { 4277 bs_load_ctx_fail(ctx, -ENOMEM); 4278 return; 4279 } 4280 4281 batch = bs_sequence_to_batch(ctx->seq, bs_load_replay_extent_page_cpl, ctx); 4282 4283 for (i = 0; i < ctx->num_extent_pages; i++) { 4284 page = ctx->extent_page_num[i]; 4285 assert(page < ctx->super->md_len); 4286 lba = bs_md_page_to_lba(ctx->bs, page); 4287 bs_batch_read_dev(batch, &ctx->extent_pages[i], lba, 4288 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE)); 4289 } 4290 4291 bs_batch_close(batch); 4292 } 4293 4294 static void 4295 bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4296 { 4297 struct spdk_bs_load_ctx *ctx = cb_arg; 4298 uint32_t page_num; 4299 struct spdk_blob_md_page *page; 4300 4301 if (bserrno != 0) { 4302 bs_load_ctx_fail(ctx, bserrno); 4303 return; 4304 } 4305 4306 page_num = ctx->cur_page; 4307 page = ctx->page; 4308 if (bs_load_cur_md_page_valid(ctx) == true) { 4309 if (page->sequence_num == 0 || ctx->in_page_chain == true) { 4310 bs_claim_md_page(ctx->bs, page_num); 4311 if (page->sequence_num == 0) { 4312 SPDK_NOTICELOG("Recover: blob %" PRIu32 "\n", page_num); 4313 spdk_bit_array_set(ctx->bs->used_blobids, page_num); 4314 } 4315 if (bs_load_replay_md_parse_page(ctx, page)) { 4316 bs_load_ctx_fail(ctx, -EILSEQ); 4317 return; 4318 } 4319 if (page->next != SPDK_INVALID_MD_PAGE) { 4320 ctx->in_page_chain = true; 4321 ctx->cur_page = page->next; 4322 bs_load_replay_cur_md_page(ctx); 4323 return; 4324 } 4325 if (ctx->num_extent_pages != 0) { 4326 bs_load_replay_extent_pages(ctx); 4327 return; 4328 } 4329 } 4330 } 4331 bs_load_replay_md_chain_cpl(ctx); 4332 } 4333 4334 static void 4335 bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx) 4336 { 4337 uint64_t lba; 4338 4339 assert(ctx->cur_page < ctx->super->md_len); 4340 lba = bs_md_page_to_lba(ctx->bs, ctx->cur_page); 4341 bs_sequence_read_dev(ctx->seq, ctx->page, lba, 4342 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 4343 bs_load_replay_md_cpl, ctx); 4344 } 4345 4346 static void 4347 bs_load_replay_md(struct spdk_bs_load_ctx *ctx) 4348 { 4349 ctx->page_index = 0; 4350 ctx->cur_page = 0; 4351 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 4352 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4353 if (!ctx->page) { 4354 bs_load_ctx_fail(ctx, -ENOMEM); 4355 return; 4356 } 4357 bs_load_replay_cur_md_page(ctx); 4358 } 4359 4360 static void 4361 bs_recover(struct spdk_bs_load_ctx *ctx) 4362 { 4363 int rc; 4364 4365 SPDK_NOTICELOG("Performing recovery on blobstore\n"); 4366 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len); 4367 if (rc < 0) { 4368 bs_load_ctx_fail(ctx, -ENOMEM); 4369 return; 4370 } 4371 4372 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len); 4373 if (rc < 0) { 4374 bs_load_ctx_fail(ctx, -ENOMEM); 4375 return; 4376 } 4377 4378 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4379 if (rc < 0) { 4380 bs_load_ctx_fail(ctx, -ENOMEM); 4381 return; 4382 } 4383 4384 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->super->md_len); 4385 if (rc < 0) { 4386 bs_load_ctx_fail(ctx, -ENOMEM); 4387 return; 4388 } 4389 4390 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 4391 bs_load_replay_md(ctx); 4392 } 4393 4394 static int 4395 bs_parse_super(struct spdk_bs_load_ctx *ctx) 4396 { 4397 int rc; 4398 4399 if (ctx->super->size == 0) { 4400 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 4401 } 4402 4403 if (ctx->super->io_unit_size == 0) { 4404 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 4405 } 4406 4407 ctx->bs->clean = 1; 4408 ctx->bs->cluster_sz = ctx->super->cluster_size; 4409 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 4410 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 4411 if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) { 4412 ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster); 4413 } 4414 ctx->bs->io_unit_size = ctx->super->io_unit_size; 4415 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4416 if (rc < 0) { 4417 return -ENOMEM; 4418 } 4419 ctx->bs->md_start = ctx->super->md_start; 4420 ctx->bs->md_len = ctx->super->md_len; 4421 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 4422 if (rc < 0) { 4423 return -ENOMEM; 4424 } 4425 4426 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 4427 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 4428 ctx->bs->super_blob = ctx->super->super_blob; 4429 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 4430 4431 return 0; 4432 } 4433 4434 static void 4435 bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4436 { 4437 struct spdk_bs_load_ctx *ctx = cb_arg; 4438 uint32_t crc; 4439 int rc; 4440 static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH]; 4441 4442 if (ctx->super->version > SPDK_BS_VERSION || 4443 ctx->super->version < SPDK_BS_INITIAL_VERSION) { 4444 bs_load_ctx_fail(ctx, -EILSEQ); 4445 return; 4446 } 4447 4448 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 4449 sizeof(ctx->super->signature)) != 0) { 4450 bs_load_ctx_fail(ctx, -EILSEQ); 4451 return; 4452 } 4453 4454 crc = blob_md_page_calc_crc(ctx->super); 4455 if (crc != ctx->super->crc) { 4456 bs_load_ctx_fail(ctx, -EILSEQ); 4457 return; 4458 } 4459 4460 if (memcmp(&ctx->bs->bstype, &ctx->super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 4461 SPDK_DEBUGLOG(blob, "Bstype matched - loading blobstore\n"); 4462 } else if (memcmp(&ctx->bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 4463 SPDK_DEBUGLOG(blob, "Bstype wildcard used - loading blobstore regardless bstype\n"); 4464 } else { 4465 SPDK_DEBUGLOG(blob, "Unexpected bstype\n"); 4466 SPDK_LOGDUMP(blob, "Expected:", ctx->bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 4467 SPDK_LOGDUMP(blob, "Found:", ctx->super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 4468 bs_load_ctx_fail(ctx, -ENXIO); 4469 return; 4470 } 4471 4472 if (ctx->super->size > ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen) { 4473 SPDK_NOTICELOG("Size mismatch, dev size: %" PRIu64 ", blobstore size: %" PRIu64 "\n", 4474 ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen, ctx->super->size); 4475 bs_load_ctx_fail(ctx, -EILSEQ); 4476 return; 4477 } 4478 4479 rc = bs_parse_super(ctx); 4480 if (rc < 0) { 4481 bs_load_ctx_fail(ctx, rc); 4482 return; 4483 } 4484 4485 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0 || ctx->force_recover) { 4486 bs_recover(ctx); 4487 } else { 4488 bs_load_read_used_pages(ctx); 4489 } 4490 } 4491 4492 static inline int 4493 bs_opts_copy(struct spdk_bs_opts *src, struct spdk_bs_opts *dst) 4494 { 4495 4496 if (!src->opts_size) { 4497 SPDK_ERRLOG("opts_size should not be zero value\n"); 4498 return -1; 4499 } 4500 4501 #define FIELD_OK(field) \ 4502 offsetof(struct spdk_bs_opts, field) + sizeof(src->field) <= src->opts_size 4503 4504 #define SET_FIELD(field) \ 4505 if (FIELD_OK(field)) { \ 4506 dst->field = src->field; \ 4507 } \ 4508 4509 SET_FIELD(cluster_sz); 4510 SET_FIELD(num_md_pages); 4511 SET_FIELD(max_md_ops); 4512 SET_FIELD(max_channel_ops); 4513 SET_FIELD(clear_method); 4514 4515 if (FIELD_OK(bstype)) { 4516 memcpy(&dst->bstype, &src->bstype, sizeof(dst->bstype)); 4517 } 4518 SET_FIELD(iter_cb_fn); 4519 SET_FIELD(iter_cb_arg); 4520 SET_FIELD(force_recover); 4521 4522 dst->opts_size = src->opts_size; 4523 4524 /* You should not remove this statement, but need to update the assert statement 4525 * if you add a new field, and also add a corresponding SET_FIELD statement */ 4526 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_opts) == 72, "Incorrect size"); 4527 4528 #undef FIELD_OK 4529 #undef SET_FIELD 4530 4531 return 0; 4532 } 4533 4534 void 4535 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 4536 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 4537 { 4538 struct spdk_blob_store *bs; 4539 struct spdk_bs_cpl cpl; 4540 struct spdk_bs_load_ctx *ctx; 4541 struct spdk_bs_opts opts = {}; 4542 int err; 4543 4544 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 4545 4546 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 4547 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 4548 dev->destroy(dev); 4549 cb_fn(cb_arg, NULL, -EINVAL); 4550 return; 4551 } 4552 4553 spdk_bs_opts_init(&opts, sizeof(opts)); 4554 if (o) { 4555 if (bs_opts_copy(o, &opts)) { 4556 return; 4557 } 4558 } 4559 4560 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 4561 dev->destroy(dev); 4562 cb_fn(cb_arg, NULL, -EINVAL); 4563 return; 4564 } 4565 4566 err = bs_alloc(dev, &opts, &bs, &ctx); 4567 if (err) { 4568 dev->destroy(dev); 4569 cb_fn(cb_arg, NULL, err); 4570 return; 4571 } 4572 4573 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 4574 cpl.u.bs_handle.cb_fn = cb_fn; 4575 cpl.u.bs_handle.cb_arg = cb_arg; 4576 cpl.u.bs_handle.bs = bs; 4577 4578 ctx->seq = bs_sequence_start(bs->md_channel, &cpl); 4579 if (!ctx->seq) { 4580 spdk_free(ctx->super); 4581 free(ctx); 4582 bs_free(bs); 4583 cb_fn(cb_arg, NULL, -ENOMEM); 4584 return; 4585 } 4586 4587 /* Read the super block */ 4588 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 4589 bs_byte_to_lba(bs, sizeof(*ctx->super)), 4590 bs_load_super_cpl, ctx); 4591 } 4592 4593 /* END spdk_bs_load */ 4594 4595 /* START spdk_bs_dump */ 4596 4597 static void 4598 bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 4599 { 4600 spdk_free(ctx->super); 4601 4602 /* 4603 * We need to defer calling bs_call_cpl() until after 4604 * dev destruction, so tuck these away for later use. 4605 */ 4606 ctx->bs->unload_err = bserrno; 4607 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 4608 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 4609 4610 bs_sequence_finish(seq, 0); 4611 bs_free(ctx->bs); 4612 free(ctx); 4613 } 4614 4615 static void 4616 bs_dump_print_xattr(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 4617 { 4618 struct spdk_blob_md_descriptor_xattr *desc_xattr; 4619 uint32_t i; 4620 const char *type; 4621 4622 desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc; 4623 4624 if (desc_xattr->length != 4625 sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) + 4626 desc_xattr->name_length + desc_xattr->value_length) { 4627 } 4628 4629 memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length); 4630 ctx->xattr_name[desc_xattr->name_length] = '\0'; 4631 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4632 type = "XATTR"; 4633 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4634 type = "XATTR_INTERNAL"; 4635 } else { 4636 assert(false); 4637 type = "XATTR_?"; 4638 } 4639 fprintf(ctx->fp, "%s: name = \"%s\"\n", type, ctx->xattr_name); 4640 fprintf(ctx->fp, " value = \""); 4641 ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name, 4642 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 4643 desc_xattr->value_length); 4644 fprintf(ctx->fp, "\"\n"); 4645 for (i = 0; i < desc_xattr->value_length; i++) { 4646 if (i % 16 == 0) { 4647 fprintf(ctx->fp, " "); 4648 } 4649 fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i)); 4650 if ((i + 1) % 16 == 0) { 4651 fprintf(ctx->fp, "\n"); 4652 } 4653 } 4654 if (i % 16 != 0) { 4655 fprintf(ctx->fp, "\n"); 4656 } 4657 } 4658 4659 struct type_flag_desc { 4660 uint64_t mask; 4661 uint64_t val; 4662 const char *name; 4663 }; 4664 4665 static void 4666 bs_dump_print_type_bits(struct spdk_bs_load_ctx *ctx, uint64_t flags, 4667 struct type_flag_desc *desc, size_t numflags) 4668 { 4669 uint64_t covered = 0; 4670 size_t i; 4671 4672 for (i = 0; i < numflags; i++) { 4673 if ((desc[i].mask & flags) != desc[i].val) { 4674 continue; 4675 } 4676 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " %s", desc[i].val, desc[i].name); 4677 if (desc[i].mask != desc[i].val) { 4678 fprintf(ctx->fp, " (mask 0x%" PRIx64 " value 0x%" PRIx64 ")", 4679 desc[i].mask, desc[i].val); 4680 } 4681 fprintf(ctx->fp, "\n"); 4682 covered |= desc[i].mask; 4683 } 4684 if ((flags & ~covered) != 0) { 4685 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " Unknown\n", flags & ~covered); 4686 } 4687 } 4688 4689 static void 4690 bs_dump_print_type_flags(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 4691 { 4692 struct spdk_blob_md_descriptor_flags *type_desc; 4693 #define ADD_FLAG(f) { f, f, #f } 4694 #define ADD_MASK_VAL(m, v) { m, v, #v } 4695 static struct type_flag_desc invalid[] = { 4696 ADD_FLAG(SPDK_BLOB_THIN_PROV), 4697 ADD_FLAG(SPDK_BLOB_INTERNAL_XATTR), 4698 ADD_FLAG(SPDK_BLOB_EXTENT_TABLE), 4699 }; 4700 static struct type_flag_desc data_ro[] = { 4701 ADD_FLAG(SPDK_BLOB_READ_ONLY), 4702 }; 4703 static struct type_flag_desc md_ro[] = { 4704 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_DEFAULT), 4705 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_NONE), 4706 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_UNMAP), 4707 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_WRITE_ZEROES), 4708 }; 4709 #undef ADD_FLAG 4710 #undef ADD_MASK_VAL 4711 4712 type_desc = (struct spdk_blob_md_descriptor_flags *)desc; 4713 fprintf(ctx->fp, "Flags:\n"); 4714 fprintf(ctx->fp, "\tinvalid: 0x%016" PRIx64 "\n", type_desc->invalid_flags); 4715 bs_dump_print_type_bits(ctx, type_desc->invalid_flags, invalid, 4716 SPDK_COUNTOF(invalid)); 4717 fprintf(ctx->fp, "\tdata_ro: 0x%016" PRIx64 "\n", type_desc->data_ro_flags); 4718 bs_dump_print_type_bits(ctx, type_desc->data_ro_flags, data_ro, 4719 SPDK_COUNTOF(data_ro)); 4720 fprintf(ctx->fp, "\t md_ro: 0x%016" PRIx64 "\n", type_desc->md_ro_flags); 4721 bs_dump_print_type_bits(ctx, type_desc->md_ro_flags, md_ro, 4722 SPDK_COUNTOF(md_ro)); 4723 } 4724 4725 static void 4726 bs_dump_print_extent_table(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 4727 { 4728 struct spdk_blob_md_descriptor_extent_table *et_desc; 4729 uint64_t num_extent_pages; 4730 uint32_t et_idx; 4731 4732 et_desc = (struct spdk_blob_md_descriptor_extent_table *)desc; 4733 num_extent_pages = (et_desc->length - sizeof(et_desc->num_clusters)) / 4734 sizeof(et_desc->extent_page[0]); 4735 4736 fprintf(ctx->fp, "Extent table:\n"); 4737 for (et_idx = 0; et_idx < num_extent_pages; et_idx++) { 4738 if (et_desc->extent_page[et_idx].page_idx == 0) { 4739 /* Zeroes represent unallocated extent pages. */ 4740 continue; 4741 } 4742 fprintf(ctx->fp, "\tExtent page: %5" PRIu32 " length %3" PRIu32 4743 " at LBA %" PRIu64 "\n", et_desc->extent_page[et_idx].page_idx, 4744 et_desc->extent_page[et_idx].num_pages, 4745 bs_md_page_to_lba(ctx->bs, et_desc->extent_page[et_idx].page_idx)); 4746 } 4747 } 4748 4749 static void 4750 bs_dump_print_md_page(struct spdk_bs_load_ctx *ctx) 4751 { 4752 uint32_t page_idx = ctx->cur_page; 4753 struct spdk_blob_md_page *page = ctx->page; 4754 struct spdk_blob_md_descriptor *desc; 4755 size_t cur_desc = 0; 4756 uint32_t crc; 4757 4758 fprintf(ctx->fp, "=========\n"); 4759 fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx); 4760 fprintf(ctx->fp, "Start LBA: %" PRIu64 "\n", bs_md_page_to_lba(ctx->bs, page_idx)); 4761 fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id); 4762 fprintf(ctx->fp, "Sequence: %" PRIu32 "\n", page->sequence_num); 4763 if (page->next == SPDK_INVALID_MD_PAGE) { 4764 fprintf(ctx->fp, "Next: None\n"); 4765 } else { 4766 fprintf(ctx->fp, "Next: %" PRIu32 "\n", page->next); 4767 } 4768 fprintf(ctx->fp, "In used bit array%s:", ctx->super->clean ? "" : " (not clean: dubious)"); 4769 if (spdk_bit_array_get(ctx->bs->used_md_pages, page_idx)) { 4770 fprintf(ctx->fp, " md"); 4771 } 4772 if (spdk_bit_array_get(ctx->bs->used_blobids, page_idx)) { 4773 fprintf(ctx->fp, " blob"); 4774 } 4775 fprintf(ctx->fp, "\n"); 4776 4777 crc = blob_md_page_calc_crc(page); 4778 fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch"); 4779 4780 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4781 while (cur_desc < sizeof(page->descriptors)) { 4782 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 4783 if (desc->length == 0) { 4784 /* If padding and length are 0, this terminates the page */ 4785 break; 4786 } 4787 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 4788 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 4789 unsigned int i; 4790 4791 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 4792 4793 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 4794 if (desc_extent_rle->extents[i].cluster_idx != 0) { 4795 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 4796 desc_extent_rle->extents[i].cluster_idx); 4797 } else { 4798 fprintf(ctx->fp, "Unallocated Extent - "); 4799 } 4800 fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length); 4801 fprintf(ctx->fp, "\n"); 4802 } 4803 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4804 struct spdk_blob_md_descriptor_extent_page *desc_extent; 4805 unsigned int i; 4806 4807 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 4808 4809 for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) { 4810 if (desc_extent->cluster_idx[i] != 0) { 4811 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 4812 desc_extent->cluster_idx[i]); 4813 } else { 4814 fprintf(ctx->fp, "Unallocated Extent"); 4815 } 4816 fprintf(ctx->fp, "\n"); 4817 } 4818 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4819 bs_dump_print_xattr(ctx, desc); 4820 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4821 bs_dump_print_xattr(ctx, desc); 4822 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 4823 bs_dump_print_type_flags(ctx, desc); 4824 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 4825 bs_dump_print_extent_table(ctx, desc); 4826 } else { 4827 /* Error */ 4828 fprintf(ctx->fp, "Unknown descriptor type %" PRIu8 "\n", desc->type); 4829 } 4830 /* Advance to the next descriptor */ 4831 cur_desc += sizeof(*desc) + desc->length; 4832 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 4833 break; 4834 } 4835 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 4836 } 4837 } 4838 4839 static void 4840 bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4841 { 4842 struct spdk_bs_load_ctx *ctx = cb_arg; 4843 4844 if (bserrno != 0) { 4845 bs_dump_finish(seq, ctx, bserrno); 4846 return; 4847 } 4848 4849 if (ctx->page->id != 0) { 4850 bs_dump_print_md_page(ctx); 4851 } 4852 4853 ctx->cur_page++; 4854 4855 if (ctx->cur_page < ctx->super->md_len) { 4856 bs_dump_read_md_page(seq, ctx); 4857 } else { 4858 spdk_free(ctx->page); 4859 bs_dump_finish(seq, ctx, 0); 4860 } 4861 } 4862 4863 static void 4864 bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 4865 { 4866 struct spdk_bs_load_ctx *ctx = cb_arg; 4867 uint64_t lba; 4868 4869 assert(ctx->cur_page < ctx->super->md_len); 4870 lba = bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 4871 bs_sequence_read_dev(seq, ctx->page, lba, 4872 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 4873 bs_dump_read_md_page_cpl, ctx); 4874 } 4875 4876 static void 4877 bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4878 { 4879 struct spdk_bs_load_ctx *ctx = cb_arg; 4880 int rc; 4881 4882 fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature); 4883 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 4884 sizeof(ctx->super->signature)) != 0) { 4885 fprintf(ctx->fp, "(Mismatch)\n"); 4886 bs_dump_finish(seq, ctx, bserrno); 4887 return; 4888 } else { 4889 fprintf(ctx->fp, "(OK)\n"); 4890 } 4891 fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version); 4892 fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc, 4893 (ctx->super->crc == blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch"); 4894 fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype); 4895 fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size); 4896 fprintf(ctx->fp, "Super Blob ID: "); 4897 if (ctx->super->super_blob == SPDK_BLOBID_INVALID) { 4898 fprintf(ctx->fp, "(None)\n"); 4899 } else { 4900 fprintf(ctx->fp, "0x%" PRIx64 "\n", ctx->super->super_blob); 4901 } 4902 fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean); 4903 fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start); 4904 fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len); 4905 fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start); 4906 fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len); 4907 fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start); 4908 fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len); 4909 fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start); 4910 fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len); 4911 4912 ctx->cur_page = 0; 4913 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 4914 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4915 if (!ctx->page) { 4916 bs_dump_finish(seq, ctx, -ENOMEM); 4917 return; 4918 } 4919 4920 rc = bs_parse_super(ctx); 4921 if (rc < 0) { 4922 bs_load_ctx_fail(ctx, rc); 4923 return; 4924 } 4925 4926 bs_load_read_used_pages(ctx); 4927 } 4928 4929 void 4930 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn, 4931 spdk_bs_op_complete cb_fn, void *cb_arg) 4932 { 4933 struct spdk_blob_store *bs; 4934 struct spdk_bs_cpl cpl; 4935 struct spdk_bs_load_ctx *ctx; 4936 struct spdk_bs_opts opts = {}; 4937 int err; 4938 4939 SPDK_DEBUGLOG(blob, "Dumping blobstore from dev %p\n", dev); 4940 4941 spdk_bs_opts_init(&opts, sizeof(opts)); 4942 4943 err = bs_alloc(dev, &opts, &bs, &ctx); 4944 if (err) { 4945 dev->destroy(dev); 4946 cb_fn(cb_arg, err); 4947 return; 4948 } 4949 4950 ctx->dumping = true; 4951 ctx->fp = fp; 4952 ctx->print_xattr_fn = print_xattr_fn; 4953 4954 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 4955 cpl.u.bs_basic.cb_fn = cb_fn; 4956 cpl.u.bs_basic.cb_arg = cb_arg; 4957 4958 ctx->seq = bs_sequence_start(bs->md_channel, &cpl); 4959 if (!ctx->seq) { 4960 spdk_free(ctx->super); 4961 free(ctx); 4962 bs_free(bs); 4963 cb_fn(cb_arg, -ENOMEM); 4964 return; 4965 } 4966 4967 /* Read the super block */ 4968 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 4969 bs_byte_to_lba(bs, sizeof(*ctx->super)), 4970 bs_dump_super_cpl, ctx); 4971 } 4972 4973 /* END spdk_bs_dump */ 4974 4975 /* START spdk_bs_init */ 4976 4977 static void 4978 bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4979 { 4980 struct spdk_bs_load_ctx *ctx = cb_arg; 4981 4982 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 4983 spdk_free(ctx->super); 4984 free(ctx); 4985 4986 bs_sequence_finish(seq, bserrno); 4987 } 4988 4989 static void 4990 bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4991 { 4992 struct spdk_bs_load_ctx *ctx = cb_arg; 4993 4994 /* Write super block */ 4995 bs_sequence_write_dev(seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 4996 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 4997 bs_init_persist_super_cpl, ctx); 4998 } 4999 5000 void 5001 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 5002 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 5003 { 5004 struct spdk_bs_load_ctx *ctx; 5005 struct spdk_blob_store *bs; 5006 struct spdk_bs_cpl cpl; 5007 spdk_bs_sequence_t *seq; 5008 spdk_bs_batch_t *batch; 5009 uint64_t num_md_lba; 5010 uint64_t num_md_pages; 5011 uint64_t num_md_clusters; 5012 uint32_t i; 5013 struct spdk_bs_opts opts = {}; 5014 int rc; 5015 uint64_t lba, lba_count; 5016 5017 SPDK_DEBUGLOG(blob, "Initializing blobstore on dev %p\n", dev); 5018 5019 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 5020 SPDK_ERRLOG("unsupported dev block length of %d\n", 5021 dev->blocklen); 5022 dev->destroy(dev); 5023 cb_fn(cb_arg, NULL, -EINVAL); 5024 return; 5025 } 5026 5027 spdk_bs_opts_init(&opts, sizeof(opts)); 5028 if (o) { 5029 if (bs_opts_copy(o, &opts)) { 5030 return; 5031 } 5032 } 5033 5034 if (bs_opts_verify(&opts) != 0) { 5035 dev->destroy(dev); 5036 cb_fn(cb_arg, NULL, -EINVAL); 5037 return; 5038 } 5039 5040 rc = bs_alloc(dev, &opts, &bs, &ctx); 5041 if (rc) { 5042 dev->destroy(dev); 5043 cb_fn(cb_arg, NULL, rc); 5044 return; 5045 } 5046 5047 if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) { 5048 /* By default, allocate 1 page per cluster. 5049 * Technically, this over-allocates metadata 5050 * because more metadata will reduce the number 5051 * of usable clusters. This can be addressed with 5052 * more complex math in the future. 5053 */ 5054 bs->md_len = bs->total_clusters; 5055 } else { 5056 bs->md_len = opts.num_md_pages; 5057 } 5058 rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len); 5059 if (rc < 0) { 5060 spdk_free(ctx->super); 5061 free(ctx); 5062 bs_free(bs); 5063 cb_fn(cb_arg, NULL, -ENOMEM); 5064 return; 5065 } 5066 5067 rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len); 5068 if (rc < 0) { 5069 spdk_free(ctx->super); 5070 free(ctx); 5071 bs_free(bs); 5072 cb_fn(cb_arg, NULL, -ENOMEM); 5073 return; 5074 } 5075 5076 rc = spdk_bit_array_resize(&bs->open_blobids, bs->md_len); 5077 if (rc < 0) { 5078 spdk_free(ctx->super); 5079 free(ctx); 5080 bs_free(bs); 5081 cb_fn(cb_arg, NULL, -ENOMEM); 5082 return; 5083 } 5084 5085 memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5086 sizeof(ctx->super->signature)); 5087 ctx->super->version = SPDK_BS_VERSION; 5088 ctx->super->length = sizeof(*ctx->super); 5089 ctx->super->super_blob = bs->super_blob; 5090 ctx->super->clean = 0; 5091 ctx->super->cluster_size = bs->cluster_sz; 5092 ctx->super->io_unit_size = bs->io_unit_size; 5093 memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype)); 5094 5095 /* Calculate how many pages the metadata consumes at the front 5096 * of the disk. 5097 */ 5098 5099 /* The super block uses 1 page */ 5100 num_md_pages = 1; 5101 5102 /* The used_md_pages mask requires 1 bit per metadata page, rounded 5103 * up to the nearest page, plus a header. 5104 */ 5105 ctx->super->used_page_mask_start = num_md_pages; 5106 ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5107 spdk_divide_round_up(bs->md_len, 8), 5108 SPDK_BS_PAGE_SIZE); 5109 num_md_pages += ctx->super->used_page_mask_len; 5110 5111 /* The used_clusters mask requires 1 bit per cluster, rounded 5112 * up to the nearest page, plus a header. 5113 */ 5114 ctx->super->used_cluster_mask_start = num_md_pages; 5115 ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5116 spdk_divide_round_up(bs->total_clusters, 8), 5117 SPDK_BS_PAGE_SIZE); 5118 num_md_pages += ctx->super->used_cluster_mask_len; 5119 5120 /* The used_blobids mask requires 1 bit per metadata page, rounded 5121 * up to the nearest page, plus a header. 5122 */ 5123 ctx->super->used_blobid_mask_start = num_md_pages; 5124 ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5125 spdk_divide_round_up(bs->md_len, 8), 5126 SPDK_BS_PAGE_SIZE); 5127 num_md_pages += ctx->super->used_blobid_mask_len; 5128 5129 /* The metadata region size was chosen above */ 5130 ctx->super->md_start = bs->md_start = num_md_pages; 5131 ctx->super->md_len = bs->md_len; 5132 num_md_pages += bs->md_len; 5133 5134 num_md_lba = bs_page_to_lba(bs, num_md_pages); 5135 5136 ctx->super->size = dev->blockcnt * dev->blocklen; 5137 5138 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 5139 5140 num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster); 5141 if (num_md_clusters > bs->total_clusters) { 5142 SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, " 5143 "please decrease number of pages reserved for metadata " 5144 "or increase cluster size.\n"); 5145 spdk_free(ctx->super); 5146 spdk_bit_array_free(&ctx->used_clusters); 5147 free(ctx); 5148 bs_free(bs); 5149 cb_fn(cb_arg, NULL, -ENOMEM); 5150 return; 5151 } 5152 /* Claim all of the clusters used by the metadata */ 5153 for (i = 0; i < num_md_clusters; i++) { 5154 spdk_bit_array_set(ctx->used_clusters, i); 5155 } 5156 5157 bs->num_free_clusters -= num_md_clusters; 5158 bs->total_data_clusters = bs->num_free_clusters; 5159 5160 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 5161 cpl.u.bs_handle.cb_fn = cb_fn; 5162 cpl.u.bs_handle.cb_arg = cb_arg; 5163 cpl.u.bs_handle.bs = bs; 5164 5165 seq = bs_sequence_start(bs->md_channel, &cpl); 5166 if (!seq) { 5167 spdk_free(ctx->super); 5168 free(ctx); 5169 bs_free(bs); 5170 cb_fn(cb_arg, NULL, -ENOMEM); 5171 return; 5172 } 5173 5174 batch = bs_sequence_to_batch(seq, bs_init_trim_cpl, ctx); 5175 5176 /* Clear metadata space */ 5177 bs_batch_write_zeroes_dev(batch, 0, num_md_lba); 5178 5179 lba = num_md_lba; 5180 lba_count = ctx->bs->dev->blockcnt - lba; 5181 switch (opts.clear_method) { 5182 case BS_CLEAR_WITH_UNMAP: 5183 /* Trim data clusters */ 5184 bs_batch_unmap_dev(batch, lba, lba_count); 5185 break; 5186 case BS_CLEAR_WITH_WRITE_ZEROES: 5187 /* Write_zeroes to data clusters */ 5188 bs_batch_write_zeroes_dev(batch, lba, lba_count); 5189 break; 5190 case BS_CLEAR_WITH_NONE: 5191 default: 5192 break; 5193 } 5194 5195 bs_batch_close(batch); 5196 } 5197 5198 /* END spdk_bs_init */ 5199 5200 /* START spdk_bs_destroy */ 5201 5202 static void 5203 bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5204 { 5205 struct spdk_bs_load_ctx *ctx = cb_arg; 5206 struct spdk_blob_store *bs = ctx->bs; 5207 5208 /* 5209 * We need to defer calling bs_call_cpl() until after 5210 * dev destruction, so tuck these away for later use. 5211 */ 5212 bs->unload_err = bserrno; 5213 memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5214 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5215 5216 bs_sequence_finish(seq, bserrno); 5217 5218 bs_free(bs); 5219 free(ctx); 5220 } 5221 5222 void 5223 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, 5224 void *cb_arg) 5225 { 5226 struct spdk_bs_cpl cpl; 5227 spdk_bs_sequence_t *seq; 5228 struct spdk_bs_load_ctx *ctx; 5229 5230 SPDK_DEBUGLOG(blob, "Destroying blobstore\n"); 5231 5232 if (!RB_EMPTY(&bs->open_blobs)) { 5233 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5234 cb_fn(cb_arg, -EBUSY); 5235 return; 5236 } 5237 5238 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5239 cpl.u.bs_basic.cb_fn = cb_fn; 5240 cpl.u.bs_basic.cb_arg = cb_arg; 5241 5242 ctx = calloc(1, sizeof(*ctx)); 5243 if (!ctx) { 5244 cb_fn(cb_arg, -ENOMEM); 5245 return; 5246 } 5247 5248 ctx->bs = bs; 5249 5250 seq = bs_sequence_start(bs->md_channel, &cpl); 5251 if (!seq) { 5252 free(ctx); 5253 cb_fn(cb_arg, -ENOMEM); 5254 return; 5255 } 5256 5257 /* Write zeroes to the super block */ 5258 bs_sequence_write_zeroes_dev(seq, 5259 bs_page_to_lba(bs, 0), 5260 bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)), 5261 bs_destroy_trim_cpl, ctx); 5262 } 5263 5264 /* END spdk_bs_destroy */ 5265 5266 /* START spdk_bs_unload */ 5267 5268 static void 5269 bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno) 5270 { 5271 spdk_bs_sequence_t *seq = ctx->seq; 5272 5273 spdk_free(ctx->super); 5274 5275 /* 5276 * We need to defer calling bs_call_cpl() until after 5277 * dev destruction, so tuck these away for later use. 5278 */ 5279 ctx->bs->unload_err = bserrno; 5280 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5281 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5282 5283 bs_sequence_finish(seq, bserrno); 5284 5285 bs_free(ctx->bs); 5286 free(ctx); 5287 } 5288 5289 static void 5290 bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5291 { 5292 struct spdk_bs_load_ctx *ctx = cb_arg; 5293 5294 bs_unload_finish(ctx, bserrno); 5295 } 5296 5297 static void 5298 bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5299 { 5300 struct spdk_bs_load_ctx *ctx = cb_arg; 5301 5302 spdk_free(ctx->mask); 5303 5304 if (bserrno != 0) { 5305 bs_unload_finish(ctx, bserrno); 5306 return; 5307 } 5308 5309 ctx->super->clean = 1; 5310 5311 bs_write_super(seq, ctx->bs, ctx->super, bs_unload_write_super_cpl, ctx); 5312 } 5313 5314 static void 5315 bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5316 { 5317 struct spdk_bs_load_ctx *ctx = cb_arg; 5318 5319 spdk_free(ctx->mask); 5320 ctx->mask = NULL; 5321 5322 if (bserrno != 0) { 5323 bs_unload_finish(ctx, bserrno); 5324 return; 5325 } 5326 5327 bs_write_used_clusters(seq, ctx, bs_unload_write_used_clusters_cpl); 5328 } 5329 5330 static void 5331 bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5332 { 5333 struct spdk_bs_load_ctx *ctx = cb_arg; 5334 5335 spdk_free(ctx->mask); 5336 ctx->mask = NULL; 5337 5338 if (bserrno != 0) { 5339 bs_unload_finish(ctx, bserrno); 5340 return; 5341 } 5342 5343 bs_write_used_blobids(seq, ctx, bs_unload_write_used_blobids_cpl); 5344 } 5345 5346 static void 5347 bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5348 { 5349 struct spdk_bs_load_ctx *ctx = cb_arg; 5350 5351 if (bserrno != 0) { 5352 bs_unload_finish(ctx, bserrno); 5353 return; 5354 } 5355 5356 bs_write_used_md(seq, cb_arg, bs_unload_write_used_pages_cpl); 5357 } 5358 5359 void 5360 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) 5361 { 5362 struct spdk_bs_cpl cpl; 5363 struct spdk_bs_load_ctx *ctx; 5364 5365 SPDK_DEBUGLOG(blob, "Syncing blobstore\n"); 5366 5367 if (!RB_EMPTY(&bs->open_blobs)) { 5368 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5369 cb_fn(cb_arg, -EBUSY); 5370 return; 5371 } 5372 5373 ctx = calloc(1, sizeof(*ctx)); 5374 if (!ctx) { 5375 cb_fn(cb_arg, -ENOMEM); 5376 return; 5377 } 5378 5379 ctx->bs = bs; 5380 5381 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5382 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5383 if (!ctx->super) { 5384 free(ctx); 5385 cb_fn(cb_arg, -ENOMEM); 5386 return; 5387 } 5388 5389 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5390 cpl.u.bs_basic.cb_fn = cb_fn; 5391 cpl.u.bs_basic.cb_arg = cb_arg; 5392 5393 ctx->seq = bs_sequence_start(bs->md_channel, &cpl); 5394 if (!ctx->seq) { 5395 spdk_free(ctx->super); 5396 free(ctx); 5397 cb_fn(cb_arg, -ENOMEM); 5398 return; 5399 } 5400 5401 /* Read super block */ 5402 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5403 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5404 bs_unload_read_super_cpl, ctx); 5405 } 5406 5407 /* END spdk_bs_unload */ 5408 5409 /* START spdk_bs_set_super */ 5410 5411 struct spdk_bs_set_super_ctx { 5412 struct spdk_blob_store *bs; 5413 struct spdk_bs_super_block *super; 5414 }; 5415 5416 static void 5417 bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5418 { 5419 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5420 5421 if (bserrno != 0) { 5422 SPDK_ERRLOG("Unable to write to super block of blobstore\n"); 5423 } 5424 5425 spdk_free(ctx->super); 5426 5427 bs_sequence_finish(seq, bserrno); 5428 5429 free(ctx); 5430 } 5431 5432 static void 5433 bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5434 { 5435 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5436 5437 if (bserrno != 0) { 5438 SPDK_ERRLOG("Unable to read super block of blobstore\n"); 5439 spdk_free(ctx->super); 5440 bs_sequence_finish(seq, bserrno); 5441 free(ctx); 5442 return; 5443 } 5444 5445 bs_write_super(seq, ctx->bs, ctx->super, bs_set_super_write_cpl, ctx); 5446 } 5447 5448 void 5449 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid, 5450 spdk_bs_op_complete cb_fn, void *cb_arg) 5451 { 5452 struct spdk_bs_cpl cpl; 5453 spdk_bs_sequence_t *seq; 5454 struct spdk_bs_set_super_ctx *ctx; 5455 5456 SPDK_DEBUGLOG(blob, "Setting super blob id on blobstore\n"); 5457 5458 ctx = calloc(1, sizeof(*ctx)); 5459 if (!ctx) { 5460 cb_fn(cb_arg, -ENOMEM); 5461 return; 5462 } 5463 5464 ctx->bs = bs; 5465 5466 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5467 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5468 if (!ctx->super) { 5469 free(ctx); 5470 cb_fn(cb_arg, -ENOMEM); 5471 return; 5472 } 5473 5474 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5475 cpl.u.bs_basic.cb_fn = cb_fn; 5476 cpl.u.bs_basic.cb_arg = cb_arg; 5477 5478 seq = bs_sequence_start(bs->md_channel, &cpl); 5479 if (!seq) { 5480 spdk_free(ctx->super); 5481 free(ctx); 5482 cb_fn(cb_arg, -ENOMEM); 5483 return; 5484 } 5485 5486 bs->super_blob = blobid; 5487 5488 /* Read super block */ 5489 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 5490 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5491 bs_set_super_read_cpl, ctx); 5492 } 5493 5494 /* END spdk_bs_set_super */ 5495 5496 void 5497 spdk_bs_get_super(struct spdk_blob_store *bs, 5498 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5499 { 5500 if (bs->super_blob == SPDK_BLOBID_INVALID) { 5501 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT); 5502 } else { 5503 cb_fn(cb_arg, bs->super_blob, 0); 5504 } 5505 } 5506 5507 uint64_t 5508 spdk_bs_get_cluster_size(struct spdk_blob_store *bs) 5509 { 5510 return bs->cluster_sz; 5511 } 5512 5513 uint64_t 5514 spdk_bs_get_page_size(struct spdk_blob_store *bs) 5515 { 5516 return SPDK_BS_PAGE_SIZE; 5517 } 5518 5519 uint64_t 5520 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs) 5521 { 5522 return bs->io_unit_size; 5523 } 5524 5525 uint64_t 5526 spdk_bs_free_cluster_count(struct spdk_blob_store *bs) 5527 { 5528 return bs->num_free_clusters; 5529 } 5530 5531 uint64_t 5532 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs) 5533 { 5534 return bs->total_data_clusters; 5535 } 5536 5537 static int 5538 bs_register_md_thread(struct spdk_blob_store *bs) 5539 { 5540 bs->md_channel = spdk_get_io_channel(bs); 5541 if (!bs->md_channel) { 5542 SPDK_ERRLOG("Failed to get IO channel.\n"); 5543 return -1; 5544 } 5545 5546 return 0; 5547 } 5548 5549 static int 5550 bs_unregister_md_thread(struct spdk_blob_store *bs) 5551 { 5552 spdk_put_io_channel(bs->md_channel); 5553 5554 return 0; 5555 } 5556 5557 spdk_blob_id spdk_blob_get_id(struct spdk_blob *blob) 5558 { 5559 assert(blob != NULL); 5560 5561 return blob->id; 5562 } 5563 5564 uint64_t spdk_blob_get_num_pages(struct spdk_blob *blob) 5565 { 5566 assert(blob != NULL); 5567 5568 return bs_cluster_to_page(blob->bs, blob->active.num_clusters); 5569 } 5570 5571 uint64_t spdk_blob_get_num_io_units(struct spdk_blob *blob) 5572 { 5573 assert(blob != NULL); 5574 5575 return spdk_blob_get_num_pages(blob) * bs_io_unit_per_page(blob->bs); 5576 } 5577 5578 uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob) 5579 { 5580 assert(blob != NULL); 5581 5582 return blob->active.num_clusters; 5583 } 5584 5585 /* START spdk_bs_create_blob */ 5586 5587 static void 5588 bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5589 { 5590 struct spdk_blob *blob = cb_arg; 5591 uint32_t page_idx = bs_blobid_to_page(blob->id); 5592 5593 if (bserrno != 0) { 5594 spdk_bit_array_clear(blob->bs->used_blobids, page_idx); 5595 bs_release_md_page(blob->bs, page_idx); 5596 } 5597 5598 blob_free(blob); 5599 5600 bs_sequence_finish(seq, bserrno); 5601 } 5602 5603 static int 5604 blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs, 5605 bool internal) 5606 { 5607 uint64_t i; 5608 size_t value_len = 0; 5609 int rc; 5610 const void *value = NULL; 5611 if (xattrs->count > 0 && xattrs->get_value == NULL) { 5612 return -EINVAL; 5613 } 5614 for (i = 0; i < xattrs->count; i++) { 5615 xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len); 5616 if (value == NULL || value_len == 0) { 5617 return -EINVAL; 5618 } 5619 rc = blob_set_xattr(blob, xattrs->names[i], value, value_len, internal); 5620 if (rc < 0) { 5621 return rc; 5622 } 5623 } 5624 return 0; 5625 } 5626 5627 static void 5628 blob_opts_copy(const struct spdk_blob_opts *src, struct spdk_blob_opts *dst) 5629 { 5630 #define FIELD_OK(field) \ 5631 offsetof(struct spdk_blob_opts, field) + sizeof(src->field) <= src->opts_size 5632 5633 #define SET_FIELD(field) \ 5634 if (FIELD_OK(field)) { \ 5635 dst->field = src->field; \ 5636 } \ 5637 5638 SET_FIELD(num_clusters); 5639 SET_FIELD(thin_provision); 5640 SET_FIELD(clear_method); 5641 5642 if (FIELD_OK(xattrs)) { 5643 memcpy(&dst->xattrs, &src->xattrs, sizeof(src->xattrs)); 5644 } 5645 5646 SET_FIELD(use_extent_table); 5647 5648 dst->opts_size = src->opts_size; 5649 5650 /* You should not remove this statement, but need to update the assert statement 5651 * if you add a new field, and also add a corresponding SET_FIELD statement */ 5652 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_opts) == 64, "Incorrect size"); 5653 5654 #undef FIELD_OK 5655 #undef SET_FIELD 5656 } 5657 5658 static void 5659 bs_create_blob(struct spdk_blob_store *bs, 5660 const struct spdk_blob_opts *opts, 5661 const struct spdk_blob_xattr_opts *internal_xattrs, 5662 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5663 { 5664 struct spdk_blob *blob; 5665 uint32_t page_idx; 5666 struct spdk_bs_cpl cpl; 5667 struct spdk_blob_opts opts_local; 5668 struct spdk_blob_xattr_opts internal_xattrs_default; 5669 spdk_bs_sequence_t *seq; 5670 spdk_blob_id id; 5671 int rc; 5672 5673 assert(spdk_get_thread() == bs->md_thread); 5674 5675 page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 5676 if (page_idx == UINT32_MAX) { 5677 cb_fn(cb_arg, 0, -ENOMEM); 5678 return; 5679 } 5680 spdk_bit_array_set(bs->used_blobids, page_idx); 5681 bs_claim_md_page(bs, page_idx); 5682 5683 id = bs_page_to_blobid(page_idx); 5684 5685 SPDK_DEBUGLOG(blob, "Creating blob with id %" PRIu64 " at page %u\n", id, page_idx); 5686 5687 blob = blob_alloc(bs, id); 5688 if (!blob) { 5689 spdk_bit_array_clear(bs->used_blobids, page_idx); 5690 bs_release_md_page(bs, page_idx); 5691 cb_fn(cb_arg, 0, -ENOMEM); 5692 return; 5693 } 5694 5695 spdk_blob_opts_init(&opts_local, sizeof(opts_local)); 5696 if (opts) { 5697 blob_opts_copy(opts, &opts_local); 5698 } 5699 5700 blob->use_extent_table = opts_local.use_extent_table; 5701 if (blob->use_extent_table) { 5702 blob->invalid_flags |= SPDK_BLOB_EXTENT_TABLE; 5703 } 5704 5705 if (!internal_xattrs) { 5706 blob_xattrs_init(&internal_xattrs_default); 5707 internal_xattrs = &internal_xattrs_default; 5708 } 5709 5710 rc = blob_set_xattrs(blob, &opts_local.xattrs, false); 5711 if (rc < 0) { 5712 blob_free(blob); 5713 spdk_bit_array_clear(bs->used_blobids, page_idx); 5714 bs_release_md_page(bs, page_idx); 5715 cb_fn(cb_arg, 0, rc); 5716 return; 5717 } 5718 5719 rc = blob_set_xattrs(blob, internal_xattrs, true); 5720 if (rc < 0) { 5721 blob_free(blob); 5722 spdk_bit_array_clear(bs->used_blobids, page_idx); 5723 bs_release_md_page(bs, page_idx); 5724 cb_fn(cb_arg, 0, rc); 5725 return; 5726 } 5727 5728 if (opts_local.thin_provision) { 5729 blob_set_thin_provision(blob); 5730 } 5731 5732 blob_set_clear_method(blob, opts_local.clear_method); 5733 5734 rc = blob_resize(blob, opts_local.num_clusters); 5735 if (rc < 0) { 5736 blob_free(blob); 5737 spdk_bit_array_clear(bs->used_blobids, page_idx); 5738 bs_release_md_page(bs, page_idx); 5739 cb_fn(cb_arg, 0, rc); 5740 return; 5741 } 5742 cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 5743 cpl.u.blobid.cb_fn = cb_fn; 5744 cpl.u.blobid.cb_arg = cb_arg; 5745 cpl.u.blobid.blobid = blob->id; 5746 5747 seq = bs_sequence_start(bs->md_channel, &cpl); 5748 if (!seq) { 5749 blob_free(blob); 5750 spdk_bit_array_clear(bs->used_blobids, page_idx); 5751 bs_release_md_page(bs, page_idx); 5752 cb_fn(cb_arg, 0, -ENOMEM); 5753 return; 5754 } 5755 5756 blob_persist(seq, blob, bs_create_blob_cpl, blob); 5757 } 5758 5759 void spdk_bs_create_blob(struct spdk_blob_store *bs, 5760 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5761 { 5762 bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg); 5763 } 5764 5765 void spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts, 5766 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5767 { 5768 bs_create_blob(bs, opts, NULL, cb_fn, cb_arg); 5769 } 5770 5771 /* END spdk_bs_create_blob */ 5772 5773 /* START blob_cleanup */ 5774 5775 struct spdk_clone_snapshot_ctx { 5776 struct spdk_bs_cpl cpl; 5777 int bserrno; 5778 bool frozen; 5779 5780 struct spdk_io_channel *channel; 5781 5782 /* Current cluster for inflate operation */ 5783 uint64_t cluster; 5784 5785 /* For inflation force allocation of all unallocated clusters and remove 5786 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */ 5787 bool allocate_all; 5788 5789 struct { 5790 spdk_blob_id id; 5791 struct spdk_blob *blob; 5792 bool md_ro; 5793 } original; 5794 struct { 5795 spdk_blob_id id; 5796 struct spdk_blob *blob; 5797 } new; 5798 5799 /* xattrs specified for snapshot/clones only. They have no impact on 5800 * the original blobs xattrs. */ 5801 const struct spdk_blob_xattr_opts *xattrs; 5802 }; 5803 5804 static void 5805 bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno) 5806 { 5807 struct spdk_clone_snapshot_ctx *ctx = cb_arg; 5808 struct spdk_bs_cpl *cpl = &ctx->cpl; 5809 5810 if (bserrno != 0) { 5811 if (ctx->bserrno != 0) { 5812 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 5813 } else { 5814 ctx->bserrno = bserrno; 5815 } 5816 } 5817 5818 switch (cpl->type) { 5819 case SPDK_BS_CPL_TYPE_BLOBID: 5820 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno); 5821 break; 5822 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 5823 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 5824 break; 5825 default: 5826 SPDK_UNREACHABLE(); 5827 break; 5828 } 5829 5830 free(ctx); 5831 } 5832 5833 static void 5834 bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 5835 { 5836 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5837 struct spdk_blob *origblob = ctx->original.blob; 5838 5839 if (bserrno != 0) { 5840 if (ctx->bserrno != 0) { 5841 SPDK_ERRLOG("Unfreeze error %d\n", bserrno); 5842 } else { 5843 ctx->bserrno = bserrno; 5844 } 5845 } 5846 5847 ctx->original.id = origblob->id; 5848 origblob->locked_operation_in_progress = false; 5849 5850 /* Revert md_ro to original state */ 5851 origblob->md_ro = ctx->original.md_ro; 5852 5853 spdk_blob_close(origblob, bs_clone_snapshot_cleanup_finish, ctx); 5854 } 5855 5856 static void 5857 bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno) 5858 { 5859 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5860 struct spdk_blob *origblob = ctx->original.blob; 5861 5862 if (bserrno != 0) { 5863 if (ctx->bserrno != 0) { 5864 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 5865 } else { 5866 ctx->bserrno = bserrno; 5867 } 5868 } 5869 5870 if (ctx->frozen) { 5871 /* Unfreeze any outstanding I/O */ 5872 blob_unfreeze_io(origblob, bs_snapshot_unfreeze_cpl, ctx); 5873 } else { 5874 bs_snapshot_unfreeze_cpl(ctx, 0); 5875 } 5876 5877 } 5878 5879 static void 5880 bs_clone_snapshot_newblob_cleanup(struct spdk_clone_snapshot_ctx *ctx, int bserrno) 5881 { 5882 struct spdk_blob *newblob = ctx->new.blob; 5883 5884 if (bserrno != 0) { 5885 if (ctx->bserrno != 0) { 5886 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 5887 } else { 5888 ctx->bserrno = bserrno; 5889 } 5890 } 5891 5892 ctx->new.id = newblob->id; 5893 spdk_blob_close(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 5894 } 5895 5896 /* END blob_cleanup */ 5897 5898 /* START spdk_bs_create_snapshot */ 5899 5900 static void 5901 bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2) 5902 { 5903 uint64_t *cluster_temp; 5904 uint32_t *extent_page_temp; 5905 5906 cluster_temp = blob1->active.clusters; 5907 blob1->active.clusters = blob2->active.clusters; 5908 blob2->active.clusters = cluster_temp; 5909 5910 extent_page_temp = blob1->active.extent_pages; 5911 blob1->active.extent_pages = blob2->active.extent_pages; 5912 blob2->active.extent_pages = extent_page_temp; 5913 } 5914 5915 static void 5916 bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno) 5917 { 5918 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5919 struct spdk_blob *origblob = ctx->original.blob; 5920 struct spdk_blob *newblob = ctx->new.blob; 5921 5922 if (bserrno != 0) { 5923 bs_snapshot_swap_cluster_maps(newblob, origblob); 5924 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 5925 return; 5926 } 5927 5928 /* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */ 5929 bserrno = blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true); 5930 if (bserrno != 0) { 5931 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 5932 return; 5933 } 5934 5935 bs_blob_list_add(ctx->original.blob); 5936 5937 spdk_blob_set_read_only(newblob); 5938 5939 /* sync snapshot metadata */ 5940 spdk_blob_sync_md(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 5941 } 5942 5943 static void 5944 bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno) 5945 { 5946 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5947 struct spdk_blob *origblob = ctx->original.blob; 5948 struct spdk_blob *newblob = ctx->new.blob; 5949 5950 if (bserrno != 0) { 5951 /* return cluster map back to original */ 5952 bs_snapshot_swap_cluster_maps(newblob, origblob); 5953 5954 /* Newblob md sync failed. Valid clusters are only present in origblob. 5955 * Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occurred. 5956 * Newblob needs to be reverted to thin_provisioned state at creation to properly close. */ 5957 blob_set_thin_provision(newblob); 5958 assert(spdk_mem_all_zero(newblob->active.clusters, 5959 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 5960 assert(spdk_mem_all_zero(newblob->active.extent_pages, 5961 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 5962 5963 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 5964 return; 5965 } 5966 5967 /* Set internal xattr for snapshot id */ 5968 bserrno = blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true); 5969 if (bserrno != 0) { 5970 /* return cluster map back to original */ 5971 bs_snapshot_swap_cluster_maps(newblob, origblob); 5972 blob_set_thin_provision(newblob); 5973 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 5974 return; 5975 } 5976 5977 /* Create new back_bs_dev for snapshot */ 5978 origblob->back_bs_dev = bs_create_blob_bs_dev(newblob); 5979 if (origblob->back_bs_dev == NULL) { 5980 /* return cluster map back to original */ 5981 bs_snapshot_swap_cluster_maps(newblob, origblob); 5982 blob_set_thin_provision(newblob); 5983 bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL); 5984 return; 5985 } 5986 5987 bs_blob_list_remove(origblob); 5988 origblob->parent_id = newblob->id; 5989 /* set clone blob as thin provisioned */ 5990 blob_set_thin_provision(origblob); 5991 5992 bs_blob_list_add(newblob); 5993 5994 /* sync clone metadata */ 5995 spdk_blob_sync_md(origblob, bs_snapshot_origblob_sync_cpl, ctx); 5996 } 5997 5998 static void 5999 bs_snapshot_freeze_cpl(void *cb_arg, int rc) 6000 { 6001 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6002 struct spdk_blob *origblob = ctx->original.blob; 6003 struct spdk_blob *newblob = ctx->new.blob; 6004 int bserrno; 6005 6006 if (rc != 0) { 6007 bs_clone_snapshot_newblob_cleanup(ctx, rc); 6008 return; 6009 } 6010 6011 ctx->frozen = true; 6012 6013 if (newblob->back_bs_dev) { 6014 newblob->back_bs_dev->destroy(newblob->back_bs_dev); 6015 } 6016 /* set new back_bs_dev for snapshot */ 6017 newblob->back_bs_dev = origblob->back_bs_dev; 6018 /* Set invalid flags from origblob */ 6019 newblob->invalid_flags = origblob->invalid_flags; 6020 6021 /* inherit parent from original blob if set */ 6022 newblob->parent_id = origblob->parent_id; 6023 if (origblob->parent_id != SPDK_BLOBID_INVALID) { 6024 /* Set internal xattr for snapshot id */ 6025 bserrno = blob_set_xattr(newblob, BLOB_SNAPSHOT, 6026 &origblob->parent_id, sizeof(spdk_blob_id), true); 6027 if (bserrno != 0) { 6028 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6029 return; 6030 } 6031 } 6032 6033 /* swap cluster maps */ 6034 bs_snapshot_swap_cluster_maps(newblob, origblob); 6035 6036 /* Set the clear method on the new blob to match the original. */ 6037 blob_set_clear_method(newblob, origblob->clear_method); 6038 6039 /* sync snapshot metadata */ 6040 spdk_blob_sync_md(newblob, bs_snapshot_newblob_sync_cpl, ctx); 6041 } 6042 6043 static void 6044 bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6045 { 6046 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6047 struct spdk_blob *origblob = ctx->original.blob; 6048 struct spdk_blob *newblob = _blob; 6049 6050 if (bserrno != 0) { 6051 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6052 return; 6053 } 6054 6055 ctx->new.blob = newblob; 6056 assert(spdk_blob_is_thin_provisioned(newblob)); 6057 assert(spdk_mem_all_zero(newblob->active.clusters, 6058 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6059 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6060 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6061 6062 blob_freeze_io(origblob, bs_snapshot_freeze_cpl, ctx); 6063 } 6064 6065 static void 6066 bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6067 { 6068 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6069 struct spdk_blob *origblob = ctx->original.blob; 6070 6071 if (bserrno != 0) { 6072 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6073 return; 6074 } 6075 6076 ctx->new.id = blobid; 6077 ctx->cpl.u.blobid.blobid = blobid; 6078 6079 spdk_bs_open_blob(origblob->bs, ctx->new.id, bs_snapshot_newblob_open_cpl, ctx); 6080 } 6081 6082 6083 static void 6084 bs_xattr_snapshot(void *arg, const char *name, 6085 const void **value, size_t *value_len) 6086 { 6087 assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0); 6088 6089 struct spdk_blob *blob = (struct spdk_blob *)arg; 6090 *value = &blob->id; 6091 *value_len = sizeof(blob->id); 6092 } 6093 6094 static void 6095 bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6096 { 6097 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6098 struct spdk_blob_opts opts; 6099 struct spdk_blob_xattr_opts internal_xattrs; 6100 char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS }; 6101 6102 if (bserrno != 0) { 6103 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6104 return; 6105 } 6106 6107 ctx->original.blob = _blob; 6108 6109 if (_blob->data_ro || _blob->md_ro) { 6110 SPDK_DEBUGLOG(blob, "Cannot create snapshot from read only blob with id %" PRIu64 "\n", 6111 _blob->id); 6112 ctx->bserrno = -EINVAL; 6113 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6114 return; 6115 } 6116 6117 if (_blob->locked_operation_in_progress) { 6118 SPDK_DEBUGLOG(blob, "Cannot create snapshot - another operation in progress\n"); 6119 ctx->bserrno = -EBUSY; 6120 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6121 return; 6122 } 6123 6124 _blob->locked_operation_in_progress = true; 6125 6126 spdk_blob_opts_init(&opts, sizeof(opts)); 6127 blob_xattrs_init(&internal_xattrs); 6128 6129 /* Change the size of new blob to the same as in original blob, 6130 * but do not allocate clusters */ 6131 opts.thin_provision = true; 6132 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6133 opts.use_extent_table = _blob->use_extent_table; 6134 6135 /* If there are any xattrs specified for snapshot, set them now */ 6136 if (ctx->xattrs) { 6137 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6138 } 6139 /* Set internal xattr SNAPSHOT_IN_PROGRESS */ 6140 internal_xattrs.count = 1; 6141 internal_xattrs.ctx = _blob; 6142 internal_xattrs.names = xattrs_names; 6143 internal_xattrs.get_value = bs_xattr_snapshot; 6144 6145 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6146 bs_snapshot_newblob_create_cpl, ctx); 6147 } 6148 6149 void spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid, 6150 const struct spdk_blob_xattr_opts *snapshot_xattrs, 6151 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6152 { 6153 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6154 6155 if (!ctx) { 6156 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6157 return; 6158 } 6159 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6160 ctx->cpl.u.blobid.cb_fn = cb_fn; 6161 ctx->cpl.u.blobid.cb_arg = cb_arg; 6162 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6163 ctx->bserrno = 0; 6164 ctx->frozen = false; 6165 ctx->original.id = blobid; 6166 ctx->xattrs = snapshot_xattrs; 6167 6168 spdk_bs_open_blob(bs, ctx->original.id, bs_snapshot_origblob_open_cpl, ctx); 6169 } 6170 /* END spdk_bs_create_snapshot */ 6171 6172 /* START spdk_bs_create_clone */ 6173 6174 static void 6175 bs_xattr_clone(void *arg, const char *name, 6176 const void **value, size_t *value_len) 6177 { 6178 assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0); 6179 6180 struct spdk_blob *blob = (struct spdk_blob *)arg; 6181 *value = &blob->id; 6182 *value_len = sizeof(blob->id); 6183 } 6184 6185 static void 6186 bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6187 { 6188 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6189 struct spdk_blob *clone = _blob; 6190 6191 ctx->new.blob = clone; 6192 bs_blob_list_add(clone); 6193 6194 spdk_blob_close(clone, bs_clone_snapshot_origblob_cleanup, ctx); 6195 } 6196 6197 static void 6198 bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6199 { 6200 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6201 6202 ctx->cpl.u.blobid.blobid = blobid; 6203 spdk_bs_open_blob(ctx->original.blob->bs, blobid, bs_clone_newblob_open_cpl, ctx); 6204 } 6205 6206 static void 6207 bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6208 { 6209 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6210 struct spdk_blob_opts opts; 6211 struct spdk_blob_xattr_opts internal_xattrs; 6212 char *xattr_names[] = { BLOB_SNAPSHOT }; 6213 6214 if (bserrno != 0) { 6215 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6216 return; 6217 } 6218 6219 ctx->original.blob = _blob; 6220 ctx->original.md_ro = _blob->md_ro; 6221 6222 if (!_blob->data_ro || !_blob->md_ro) { 6223 SPDK_DEBUGLOG(blob, "Clone not from read-only blob\n"); 6224 ctx->bserrno = -EINVAL; 6225 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6226 return; 6227 } 6228 6229 if (_blob->locked_operation_in_progress) { 6230 SPDK_DEBUGLOG(blob, "Cannot create clone - another operation in progress\n"); 6231 ctx->bserrno = -EBUSY; 6232 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6233 return; 6234 } 6235 6236 _blob->locked_operation_in_progress = true; 6237 6238 spdk_blob_opts_init(&opts, sizeof(opts)); 6239 blob_xattrs_init(&internal_xattrs); 6240 6241 opts.thin_provision = true; 6242 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6243 opts.use_extent_table = _blob->use_extent_table; 6244 if (ctx->xattrs) { 6245 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6246 } 6247 6248 /* Set internal xattr BLOB_SNAPSHOT */ 6249 internal_xattrs.count = 1; 6250 internal_xattrs.ctx = _blob; 6251 internal_xattrs.names = xattr_names; 6252 internal_xattrs.get_value = bs_xattr_clone; 6253 6254 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6255 bs_clone_newblob_create_cpl, ctx); 6256 } 6257 6258 void spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid, 6259 const struct spdk_blob_xattr_opts *clone_xattrs, 6260 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6261 { 6262 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6263 6264 if (!ctx) { 6265 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6266 return; 6267 } 6268 6269 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6270 ctx->cpl.u.blobid.cb_fn = cb_fn; 6271 ctx->cpl.u.blobid.cb_arg = cb_arg; 6272 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6273 ctx->bserrno = 0; 6274 ctx->xattrs = clone_xattrs; 6275 ctx->original.id = blobid; 6276 6277 spdk_bs_open_blob(bs, ctx->original.id, bs_clone_origblob_open_cpl, ctx); 6278 } 6279 6280 /* END spdk_bs_create_clone */ 6281 6282 /* START spdk_bs_inflate_blob */ 6283 6284 static void 6285 bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno) 6286 { 6287 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6288 struct spdk_blob *_blob = ctx->original.blob; 6289 6290 if (bserrno != 0) { 6291 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6292 return; 6293 } 6294 6295 /* Temporarily override md_ro flag for MD modification */ 6296 _blob->md_ro = false; 6297 6298 bserrno = blob_set_xattr(_blob, BLOB_SNAPSHOT, &_parent->id, sizeof(spdk_blob_id), true); 6299 if (bserrno != 0) { 6300 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6301 return; 6302 } 6303 6304 assert(_parent != NULL); 6305 6306 bs_blob_list_remove(_blob); 6307 _blob->parent_id = _parent->id; 6308 6309 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 6310 _blob->back_bs_dev = bs_create_blob_bs_dev(_parent); 6311 bs_blob_list_add(_blob); 6312 6313 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 6314 } 6315 6316 static void 6317 bs_inflate_blob_done(struct spdk_clone_snapshot_ctx *ctx) 6318 { 6319 struct spdk_blob *_blob = ctx->original.blob; 6320 struct spdk_blob *_parent; 6321 6322 if (ctx->allocate_all) { 6323 /* remove thin provisioning */ 6324 bs_blob_list_remove(_blob); 6325 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 6326 _blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV; 6327 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 6328 _blob->back_bs_dev = NULL; 6329 _blob->parent_id = SPDK_BLOBID_INVALID; 6330 } else { 6331 _parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob; 6332 if (_parent->parent_id != SPDK_BLOBID_INVALID) { 6333 /* We must change the parent of the inflated blob */ 6334 spdk_bs_open_blob(_blob->bs, _parent->parent_id, 6335 bs_inflate_blob_set_parent_cpl, ctx); 6336 return; 6337 } 6338 6339 bs_blob_list_remove(_blob); 6340 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 6341 _blob->parent_id = SPDK_BLOBID_INVALID; 6342 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 6343 _blob->back_bs_dev = bs_create_zeroes_dev(); 6344 } 6345 6346 /* Temporarily override md_ro flag for MD modification */ 6347 _blob->md_ro = false; 6348 _blob->state = SPDK_BLOB_STATE_DIRTY; 6349 6350 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 6351 } 6352 6353 /* Check if cluster needs allocation */ 6354 static inline bool 6355 bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all) 6356 { 6357 struct spdk_blob_bs_dev *b; 6358 6359 assert(blob != NULL); 6360 6361 if (blob->active.clusters[cluster] != 0) { 6362 /* Cluster is already allocated */ 6363 return false; 6364 } 6365 6366 if (blob->parent_id == SPDK_BLOBID_INVALID) { 6367 /* Blob have no parent blob */ 6368 return allocate_all; 6369 } 6370 6371 b = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 6372 return (allocate_all || b->blob->active.clusters[cluster] != 0); 6373 } 6374 6375 static void 6376 bs_inflate_blob_touch_next(void *cb_arg, int bserrno) 6377 { 6378 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6379 struct spdk_blob *_blob = ctx->original.blob; 6380 struct spdk_bs_cpl cpl; 6381 spdk_bs_user_op_t *op; 6382 uint64_t offset; 6383 6384 if (bserrno != 0) { 6385 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6386 return; 6387 } 6388 6389 for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) { 6390 if (bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) { 6391 break; 6392 } 6393 } 6394 6395 if (ctx->cluster < _blob->active.num_clusters) { 6396 offset = bs_cluster_to_lba(_blob->bs, ctx->cluster); 6397 6398 /* We may safely increment a cluster before copying */ 6399 ctx->cluster++; 6400 6401 /* Use a dummy 0B read as a context for cluster copy */ 6402 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 6403 cpl.u.blob_basic.cb_fn = bs_inflate_blob_touch_next; 6404 cpl.u.blob_basic.cb_arg = ctx; 6405 6406 op = bs_user_op_alloc(ctx->channel, &cpl, SPDK_BLOB_READ, _blob, 6407 NULL, 0, offset, 0); 6408 if (!op) { 6409 bs_clone_snapshot_origblob_cleanup(ctx, -ENOMEM); 6410 return; 6411 } 6412 6413 bs_allocate_and_copy_cluster(_blob, ctx->channel, offset, op); 6414 } else { 6415 bs_inflate_blob_done(ctx); 6416 } 6417 } 6418 6419 static void 6420 bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6421 { 6422 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6423 uint64_t clusters_needed; 6424 uint64_t i; 6425 6426 if (bserrno != 0) { 6427 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6428 return; 6429 } 6430 6431 ctx->original.blob = _blob; 6432 ctx->original.md_ro = _blob->md_ro; 6433 6434 if (_blob->locked_operation_in_progress) { 6435 SPDK_DEBUGLOG(blob, "Cannot inflate blob - another operation in progress\n"); 6436 ctx->bserrno = -EBUSY; 6437 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6438 return; 6439 } 6440 6441 _blob->locked_operation_in_progress = true; 6442 6443 if (!ctx->allocate_all && _blob->parent_id == SPDK_BLOBID_INVALID) { 6444 /* This blob have no parent, so we cannot decouple it. */ 6445 SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n"); 6446 bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 6447 return; 6448 } 6449 6450 if (spdk_blob_is_thin_provisioned(_blob) == false) { 6451 /* This is not thin provisioned blob. No need to inflate. */ 6452 bs_clone_snapshot_origblob_cleanup(ctx, 0); 6453 return; 6454 } 6455 6456 /* Do two passes - one to verify that we can obtain enough clusters 6457 * and another to actually claim them. 6458 */ 6459 clusters_needed = 0; 6460 for (i = 0; i < _blob->active.num_clusters; i++) { 6461 if (bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) { 6462 clusters_needed++; 6463 } 6464 } 6465 6466 if (clusters_needed > _blob->bs->num_free_clusters) { 6467 /* Not enough free clusters. Cannot satisfy the request. */ 6468 bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC); 6469 return; 6470 } 6471 6472 ctx->cluster = 0; 6473 bs_inflate_blob_touch_next(ctx, 0); 6474 } 6475 6476 static void 6477 bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 6478 spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg) 6479 { 6480 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6481 6482 if (!ctx) { 6483 cb_fn(cb_arg, -ENOMEM); 6484 return; 6485 } 6486 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 6487 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 6488 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 6489 ctx->bserrno = 0; 6490 ctx->original.id = blobid; 6491 ctx->channel = channel; 6492 ctx->allocate_all = allocate_all; 6493 6494 spdk_bs_open_blob(bs, ctx->original.id, bs_inflate_blob_open_cpl, ctx); 6495 } 6496 6497 void 6498 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 6499 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 6500 { 6501 bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg); 6502 } 6503 6504 void 6505 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 6506 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 6507 { 6508 bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg); 6509 } 6510 /* END spdk_bs_inflate_blob */ 6511 6512 /* START spdk_blob_resize */ 6513 struct spdk_bs_resize_ctx { 6514 spdk_blob_op_complete cb_fn; 6515 void *cb_arg; 6516 struct spdk_blob *blob; 6517 uint64_t sz; 6518 int rc; 6519 }; 6520 6521 static void 6522 bs_resize_unfreeze_cpl(void *cb_arg, int rc) 6523 { 6524 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 6525 6526 if (rc != 0) { 6527 SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc); 6528 } 6529 6530 if (ctx->rc != 0) { 6531 SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc); 6532 rc = ctx->rc; 6533 } 6534 6535 ctx->blob->locked_operation_in_progress = false; 6536 6537 ctx->cb_fn(ctx->cb_arg, rc); 6538 free(ctx); 6539 } 6540 6541 static void 6542 bs_resize_freeze_cpl(void *cb_arg, int rc) 6543 { 6544 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 6545 6546 if (rc != 0) { 6547 ctx->blob->locked_operation_in_progress = false; 6548 ctx->cb_fn(ctx->cb_arg, rc); 6549 free(ctx); 6550 return; 6551 } 6552 6553 ctx->rc = blob_resize(ctx->blob, ctx->sz); 6554 6555 blob_unfreeze_io(ctx->blob, bs_resize_unfreeze_cpl, ctx); 6556 } 6557 6558 void 6559 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg) 6560 { 6561 struct spdk_bs_resize_ctx *ctx; 6562 6563 blob_verify_md_op(blob); 6564 6565 SPDK_DEBUGLOG(blob, "Resizing blob %" PRIu64 " to %" PRIu64 " clusters\n", blob->id, sz); 6566 6567 if (blob->md_ro) { 6568 cb_fn(cb_arg, -EPERM); 6569 return; 6570 } 6571 6572 if (sz == blob->active.num_clusters) { 6573 cb_fn(cb_arg, 0); 6574 return; 6575 } 6576 6577 if (blob->locked_operation_in_progress) { 6578 cb_fn(cb_arg, -EBUSY); 6579 return; 6580 } 6581 6582 ctx = calloc(1, sizeof(*ctx)); 6583 if (!ctx) { 6584 cb_fn(cb_arg, -ENOMEM); 6585 return; 6586 } 6587 6588 blob->locked_operation_in_progress = true; 6589 ctx->cb_fn = cb_fn; 6590 ctx->cb_arg = cb_arg; 6591 ctx->blob = blob; 6592 ctx->sz = sz; 6593 blob_freeze_io(blob, bs_resize_freeze_cpl, ctx); 6594 } 6595 6596 /* END spdk_blob_resize */ 6597 6598 6599 /* START spdk_bs_delete_blob */ 6600 6601 static void 6602 bs_delete_close_cpl(void *cb_arg, int bserrno) 6603 { 6604 spdk_bs_sequence_t *seq = cb_arg; 6605 6606 bs_sequence_finish(seq, bserrno); 6607 } 6608 6609 static void 6610 bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 6611 { 6612 struct spdk_blob *blob = cb_arg; 6613 6614 if (bserrno != 0) { 6615 /* 6616 * We already removed this blob from the blobstore tailq, so 6617 * we need to free it here since this is the last reference 6618 * to it. 6619 */ 6620 blob_free(blob); 6621 bs_delete_close_cpl(seq, bserrno); 6622 return; 6623 } 6624 6625 /* 6626 * This will immediately decrement the ref_count and call 6627 * the completion routine since the metadata state is clean. 6628 * By calling spdk_blob_close, we reduce the number of call 6629 * points into code that touches the blob->open_ref count 6630 * and the blobstore's blob list. 6631 */ 6632 spdk_blob_close(blob, bs_delete_close_cpl, seq); 6633 } 6634 6635 struct delete_snapshot_ctx { 6636 struct spdk_blob_list *parent_snapshot_entry; 6637 struct spdk_blob *snapshot; 6638 bool snapshot_md_ro; 6639 struct spdk_blob *clone; 6640 bool clone_md_ro; 6641 spdk_blob_op_with_handle_complete cb_fn; 6642 void *cb_arg; 6643 int bserrno; 6644 uint32_t next_extent_page; 6645 }; 6646 6647 static void 6648 delete_blob_cleanup_finish(void *cb_arg, int bserrno) 6649 { 6650 struct delete_snapshot_ctx *ctx = cb_arg; 6651 6652 if (bserrno != 0) { 6653 SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno); 6654 } 6655 6656 assert(ctx != NULL); 6657 6658 if (bserrno != 0 && ctx->bserrno == 0) { 6659 ctx->bserrno = bserrno; 6660 } 6661 6662 ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno); 6663 free(ctx); 6664 } 6665 6666 static void 6667 delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno) 6668 { 6669 struct delete_snapshot_ctx *ctx = cb_arg; 6670 6671 if (bserrno != 0) { 6672 ctx->bserrno = bserrno; 6673 SPDK_ERRLOG("Clone cleanup error %d\n", bserrno); 6674 } 6675 6676 if (ctx->bserrno != 0) { 6677 assert(blob_lookup(ctx->snapshot->bs, ctx->snapshot->id) == NULL); 6678 RB_INSERT(spdk_blob_tree, &ctx->snapshot->bs->open_blobs, ctx->snapshot); 6679 spdk_bit_array_set(ctx->snapshot->bs->open_blobids, ctx->snapshot->id); 6680 } 6681 6682 ctx->snapshot->locked_operation_in_progress = false; 6683 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 6684 6685 spdk_blob_close(ctx->snapshot, delete_blob_cleanup_finish, ctx); 6686 } 6687 6688 static void 6689 delete_snapshot_cleanup_clone(void *cb_arg, int bserrno) 6690 { 6691 struct delete_snapshot_ctx *ctx = cb_arg; 6692 6693 ctx->clone->locked_operation_in_progress = false; 6694 ctx->clone->md_ro = ctx->clone_md_ro; 6695 6696 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 6697 } 6698 6699 static void 6700 delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 6701 { 6702 struct delete_snapshot_ctx *ctx = cb_arg; 6703 6704 if (bserrno) { 6705 ctx->bserrno = bserrno; 6706 delete_snapshot_cleanup_clone(ctx, 0); 6707 return; 6708 } 6709 6710 ctx->clone->locked_operation_in_progress = false; 6711 spdk_blob_close(ctx->clone, delete_blob_cleanup_finish, ctx); 6712 } 6713 6714 static void 6715 delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno) 6716 { 6717 struct delete_snapshot_ctx *ctx = cb_arg; 6718 struct spdk_blob_list *parent_snapshot_entry = NULL; 6719 struct spdk_blob_list *snapshot_entry = NULL; 6720 struct spdk_blob_list *clone_entry = NULL; 6721 struct spdk_blob_list *snapshot_clone_entry = NULL; 6722 6723 if (bserrno) { 6724 SPDK_ERRLOG("Failed to sync MD on blob\n"); 6725 ctx->bserrno = bserrno; 6726 delete_snapshot_cleanup_clone(ctx, 0); 6727 return; 6728 } 6729 6730 /* Get snapshot entry for the snapshot we want to remove */ 6731 snapshot_entry = bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id); 6732 6733 assert(snapshot_entry != NULL); 6734 6735 /* Remove clone entry in this snapshot (at this point there can be only one clone) */ 6736 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 6737 assert(clone_entry != NULL); 6738 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 6739 snapshot_entry->clone_count--; 6740 assert(TAILQ_EMPTY(&snapshot_entry->clones)); 6741 6742 if (ctx->snapshot->parent_id != SPDK_BLOBID_INVALID) { 6743 /* This snapshot is at the same time a clone of another snapshot - we need to 6744 * update parent snapshot (remove current clone, add new one inherited from 6745 * the snapshot that is being removed) */ 6746 6747 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 6748 * snapshot that we are removing */ 6749 blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry, 6750 &snapshot_clone_entry); 6751 6752 /* Switch clone entry in parent snapshot */ 6753 TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link); 6754 TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link); 6755 free(snapshot_clone_entry); 6756 } else { 6757 /* No parent snapshot - just remove clone entry */ 6758 free(clone_entry); 6759 } 6760 6761 /* Restore md_ro flags */ 6762 ctx->clone->md_ro = ctx->clone_md_ro; 6763 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 6764 6765 blob_unfreeze_io(ctx->clone, delete_snapshot_unfreeze_cpl, ctx); 6766 } 6767 6768 static void 6769 delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno) 6770 { 6771 struct delete_snapshot_ctx *ctx = cb_arg; 6772 uint64_t i; 6773 6774 ctx->snapshot->md_ro = false; 6775 6776 if (bserrno) { 6777 SPDK_ERRLOG("Failed to sync MD on clone\n"); 6778 ctx->bserrno = bserrno; 6779 6780 /* Restore snapshot to previous state */ 6781 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 6782 if (bserrno != 0) { 6783 delete_snapshot_cleanup_clone(ctx, bserrno); 6784 return; 6785 } 6786 6787 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 6788 return; 6789 } 6790 6791 /* Clear cluster map entries for snapshot */ 6792 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 6793 if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) { 6794 ctx->snapshot->active.clusters[i] = 0; 6795 } 6796 } 6797 for (i = 0; i < ctx->snapshot->active.num_extent_pages && 6798 i < ctx->clone->active.num_extent_pages; i++) { 6799 if (ctx->clone->active.extent_pages[i] == ctx->snapshot->active.extent_pages[i]) { 6800 ctx->snapshot->active.extent_pages[i] = 0; 6801 } 6802 } 6803 6804 blob_set_thin_provision(ctx->snapshot); 6805 ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY; 6806 6807 if (ctx->parent_snapshot_entry != NULL) { 6808 ctx->snapshot->back_bs_dev = NULL; 6809 } 6810 6811 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_cpl, ctx); 6812 } 6813 6814 static void 6815 delete_snapshot_update_extent_pages_cpl(struct delete_snapshot_ctx *ctx) 6816 { 6817 /* Delete old backing bs_dev from clone (related to snapshot that will be removed) */ 6818 ctx->clone->back_bs_dev->destroy(ctx->clone->back_bs_dev); 6819 6820 /* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */ 6821 if (ctx->parent_snapshot_entry != NULL) { 6822 /* ...to parent snapshot */ 6823 ctx->clone->parent_id = ctx->parent_snapshot_entry->id; 6824 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 6825 blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id, 6826 sizeof(spdk_blob_id), 6827 true); 6828 } else { 6829 /* ...to blobid invalid and zeroes dev */ 6830 ctx->clone->parent_id = SPDK_BLOBID_INVALID; 6831 ctx->clone->back_bs_dev = bs_create_zeroes_dev(); 6832 blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true); 6833 } 6834 6835 spdk_blob_sync_md(ctx->clone, delete_snapshot_sync_clone_cpl, ctx); 6836 } 6837 6838 static void 6839 delete_snapshot_update_extent_pages(void *cb_arg, int bserrno) 6840 { 6841 struct delete_snapshot_ctx *ctx = cb_arg; 6842 uint32_t *extent_page; 6843 uint64_t i; 6844 6845 for (i = ctx->next_extent_page; i < ctx->snapshot->active.num_extent_pages && 6846 i < ctx->clone->active.num_extent_pages; i++) { 6847 if (ctx->snapshot->active.extent_pages[i] == 0) { 6848 /* No extent page to use from snapshot */ 6849 continue; 6850 } 6851 6852 extent_page = &ctx->clone->active.extent_pages[i]; 6853 if (*extent_page == 0) { 6854 /* Copy extent page from snapshot when clone did not have a matching one */ 6855 *extent_page = ctx->snapshot->active.extent_pages[i]; 6856 continue; 6857 } 6858 6859 /* Clone and snapshot both contain partially filled matching extent pages. 6860 * Update the clone extent page in place with cluster map containing the mix of both. */ 6861 ctx->next_extent_page = i + 1; 6862 6863 blob_write_extent_page(ctx->clone, *extent_page, i * SPDK_EXTENTS_PER_EP, 6864 delete_snapshot_update_extent_pages, ctx); 6865 return; 6866 } 6867 delete_snapshot_update_extent_pages_cpl(ctx); 6868 } 6869 6870 static void 6871 delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno) 6872 { 6873 struct delete_snapshot_ctx *ctx = cb_arg; 6874 uint64_t i; 6875 6876 /* Temporarily override md_ro flag for clone for MD modification */ 6877 ctx->clone_md_ro = ctx->clone->md_ro; 6878 ctx->clone->md_ro = false; 6879 6880 if (bserrno) { 6881 SPDK_ERRLOG("Failed to sync MD with xattr on blob\n"); 6882 ctx->bserrno = bserrno; 6883 delete_snapshot_cleanup_clone(ctx, 0); 6884 return; 6885 } 6886 6887 /* Copy snapshot map to clone map (only unallocated clusters in clone) */ 6888 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 6889 if (ctx->clone->active.clusters[i] == 0) { 6890 ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i]; 6891 } 6892 } 6893 ctx->next_extent_page = 0; 6894 delete_snapshot_update_extent_pages(ctx, 0); 6895 } 6896 6897 static void 6898 delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno) 6899 { 6900 struct delete_snapshot_ctx *ctx = cb_arg; 6901 6902 if (bserrno) { 6903 SPDK_ERRLOG("Failed to freeze I/O on clone\n"); 6904 ctx->bserrno = bserrno; 6905 delete_snapshot_cleanup_clone(ctx, 0); 6906 return; 6907 } 6908 6909 /* Temporarily override md_ro flag for snapshot for MD modification */ 6910 ctx->snapshot_md_ro = ctx->snapshot->md_ro; 6911 ctx->snapshot->md_ro = false; 6912 6913 /* Mark blob as pending for removal for power failure safety, use clone id for recovery */ 6914 ctx->bserrno = blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id, 6915 sizeof(spdk_blob_id), true); 6916 if (ctx->bserrno != 0) { 6917 delete_snapshot_cleanup_clone(ctx, 0); 6918 return; 6919 } 6920 6921 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 6922 } 6923 6924 static void 6925 delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno) 6926 { 6927 struct delete_snapshot_ctx *ctx = cb_arg; 6928 6929 if (bserrno) { 6930 SPDK_ERRLOG("Failed to open clone\n"); 6931 ctx->bserrno = bserrno; 6932 delete_snapshot_cleanup_snapshot(ctx, 0); 6933 return; 6934 } 6935 6936 ctx->clone = clone; 6937 6938 if (clone->locked_operation_in_progress) { 6939 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress on its clone\n"); 6940 ctx->bserrno = -EBUSY; 6941 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 6942 return; 6943 } 6944 6945 clone->locked_operation_in_progress = true; 6946 6947 blob_freeze_io(clone, delete_snapshot_freeze_io_cb, ctx); 6948 } 6949 6950 static void 6951 update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx) 6952 { 6953 struct spdk_blob_list *snapshot_entry = NULL; 6954 struct spdk_blob_list *clone_entry = NULL; 6955 struct spdk_blob_list *snapshot_clone_entry = NULL; 6956 6957 /* Get snapshot entry for the snapshot we want to remove */ 6958 snapshot_entry = bs_get_snapshot_entry(snapshot->bs, snapshot->id); 6959 6960 assert(snapshot_entry != NULL); 6961 6962 /* Get clone of the snapshot (at this point there can be only one clone) */ 6963 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 6964 assert(snapshot_entry->clone_count == 1); 6965 assert(clone_entry != NULL); 6966 6967 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 6968 * snapshot that we are removing */ 6969 blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry, 6970 &snapshot_clone_entry); 6971 6972 spdk_bs_open_blob(snapshot->bs, clone_entry->id, delete_snapshot_open_clone_cb, ctx); 6973 } 6974 6975 static void 6976 bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno) 6977 { 6978 spdk_bs_sequence_t *seq = cb_arg; 6979 struct spdk_blob_list *snapshot_entry = NULL; 6980 uint32_t page_num; 6981 6982 if (bserrno) { 6983 SPDK_ERRLOG("Failed to remove blob\n"); 6984 bs_sequence_finish(seq, bserrno); 6985 return; 6986 } 6987 6988 /* Remove snapshot from the list */ 6989 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 6990 if (snapshot_entry != NULL) { 6991 TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link); 6992 free(snapshot_entry); 6993 } 6994 6995 page_num = bs_blobid_to_page(blob->id); 6996 spdk_bit_array_clear(blob->bs->used_blobids, page_num); 6997 blob->state = SPDK_BLOB_STATE_DIRTY; 6998 blob->active.num_pages = 0; 6999 blob_resize(blob, 0); 7000 7001 blob_persist(seq, blob, bs_delete_persist_cpl, blob); 7002 } 7003 7004 static int 7005 bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone) 7006 { 7007 struct spdk_blob_list *snapshot_entry = NULL; 7008 struct spdk_blob_list *clone_entry = NULL; 7009 struct spdk_blob *clone = NULL; 7010 bool has_one_clone = false; 7011 7012 /* Check if this is a snapshot with clones */ 7013 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 7014 if (snapshot_entry != NULL) { 7015 if (snapshot_entry->clone_count > 1) { 7016 SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n"); 7017 return -EBUSY; 7018 } else if (snapshot_entry->clone_count == 1) { 7019 has_one_clone = true; 7020 } 7021 } 7022 7023 /* Check if someone has this blob open (besides this delete context): 7024 * - open_ref = 1 - only this context opened blob, so it is ok to remove it 7025 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot 7026 * and that is ok, because we will update it accordingly */ 7027 if (blob->open_ref <= 2 && has_one_clone) { 7028 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 7029 assert(clone_entry != NULL); 7030 clone = blob_lookup(blob->bs, clone_entry->id); 7031 7032 if (blob->open_ref == 2 && clone == NULL) { 7033 /* Clone is closed and someone else opened this blob */ 7034 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 7035 return -EBUSY; 7036 } 7037 7038 *update_clone = true; 7039 return 0; 7040 } 7041 7042 if (blob->open_ref > 1) { 7043 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 7044 return -EBUSY; 7045 } 7046 7047 assert(has_one_clone == false); 7048 *update_clone = false; 7049 return 0; 7050 } 7051 7052 static void 7053 bs_delete_enomem_close_cpl(void *cb_arg, int bserrno) 7054 { 7055 spdk_bs_sequence_t *seq = cb_arg; 7056 7057 bs_sequence_finish(seq, -ENOMEM); 7058 } 7059 7060 static void 7061 bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 7062 { 7063 spdk_bs_sequence_t *seq = cb_arg; 7064 struct delete_snapshot_ctx *ctx; 7065 bool update_clone = false; 7066 7067 if (bserrno != 0) { 7068 bs_sequence_finish(seq, bserrno); 7069 return; 7070 } 7071 7072 blob_verify_md_op(blob); 7073 7074 ctx = calloc(1, sizeof(*ctx)); 7075 if (ctx == NULL) { 7076 spdk_blob_close(blob, bs_delete_enomem_close_cpl, seq); 7077 return; 7078 } 7079 7080 ctx->snapshot = blob; 7081 ctx->cb_fn = bs_delete_blob_finish; 7082 ctx->cb_arg = seq; 7083 7084 /* Check if blob can be removed and if it is a snapshot with clone on top of it */ 7085 ctx->bserrno = bs_is_blob_deletable(blob, &update_clone); 7086 if (ctx->bserrno) { 7087 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 7088 return; 7089 } 7090 7091 if (blob->locked_operation_in_progress) { 7092 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress\n"); 7093 ctx->bserrno = -EBUSY; 7094 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 7095 return; 7096 } 7097 7098 blob->locked_operation_in_progress = true; 7099 7100 /* 7101 * Remove the blob from the blob_store list now, to ensure it does not 7102 * get returned after this point by blob_lookup(). 7103 */ 7104 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 7105 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 7106 7107 if (update_clone) { 7108 /* This blob is a snapshot with active clone - update clone first */ 7109 update_clone_on_snapshot_deletion(blob, ctx); 7110 } else { 7111 /* This blob does not have any clones - just remove it */ 7112 bs_blob_list_remove(blob); 7113 bs_delete_blob_finish(seq, blob, 0); 7114 free(ctx); 7115 } 7116 } 7117 7118 void 7119 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 7120 spdk_blob_op_complete cb_fn, void *cb_arg) 7121 { 7122 struct spdk_bs_cpl cpl; 7123 spdk_bs_sequence_t *seq; 7124 7125 SPDK_DEBUGLOG(blob, "Deleting blob %" PRIu64 "\n", blobid); 7126 7127 assert(spdk_get_thread() == bs->md_thread); 7128 7129 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7130 cpl.u.blob_basic.cb_fn = cb_fn; 7131 cpl.u.blob_basic.cb_arg = cb_arg; 7132 7133 seq = bs_sequence_start(bs->md_channel, &cpl); 7134 if (!seq) { 7135 cb_fn(cb_arg, -ENOMEM); 7136 return; 7137 } 7138 7139 spdk_bs_open_blob(bs, blobid, bs_delete_open_cpl, seq); 7140 } 7141 7142 /* END spdk_bs_delete_blob */ 7143 7144 /* START spdk_bs_open_blob */ 7145 7146 static void 7147 bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7148 { 7149 struct spdk_blob *blob = cb_arg; 7150 struct spdk_blob *existing; 7151 7152 if (bserrno != 0) { 7153 blob_free(blob); 7154 seq->cpl.u.blob_handle.blob = NULL; 7155 bs_sequence_finish(seq, bserrno); 7156 return; 7157 } 7158 7159 existing = blob_lookup(blob->bs, blob->id); 7160 if (existing) { 7161 blob_free(blob); 7162 existing->open_ref++; 7163 seq->cpl.u.blob_handle.blob = existing; 7164 bs_sequence_finish(seq, 0); 7165 return; 7166 } 7167 7168 blob->open_ref++; 7169 7170 spdk_bit_array_set(blob->bs->open_blobids, blob->id); 7171 RB_INSERT(spdk_blob_tree, &blob->bs->open_blobs, blob); 7172 7173 bs_sequence_finish(seq, bserrno); 7174 } 7175 7176 static inline void 7177 blob_open_opts_copy(const struct spdk_blob_open_opts *src, struct spdk_blob_open_opts *dst) 7178 { 7179 #define FIELD_OK(field) \ 7180 offsetof(struct spdk_blob_opts, field) + sizeof(src->field) <= src->opts_size 7181 7182 #define SET_FIELD(field) \ 7183 if (FIELD_OK(field)) { \ 7184 dst->field = src->field; \ 7185 } \ 7186 7187 SET_FIELD(clear_method); 7188 7189 dst->opts_size = src->opts_size; 7190 7191 /* You should not remove this statement, but need to update the assert statement 7192 * if you add a new field, and also add a corresponding SET_FIELD statement */ 7193 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_open_opts) == 16, "Incorrect size"); 7194 7195 #undef FIELD_OK 7196 #undef SET_FIELD 7197 } 7198 7199 static void 7200 bs_open_blob(struct spdk_blob_store *bs, 7201 spdk_blob_id blobid, 7202 struct spdk_blob_open_opts *opts, 7203 spdk_blob_op_with_handle_complete cb_fn, 7204 void *cb_arg) 7205 { 7206 struct spdk_blob *blob; 7207 struct spdk_bs_cpl cpl; 7208 struct spdk_blob_open_opts opts_local; 7209 spdk_bs_sequence_t *seq; 7210 uint32_t page_num; 7211 7212 SPDK_DEBUGLOG(blob, "Opening blob %" PRIu64 "\n", blobid); 7213 assert(spdk_get_thread() == bs->md_thread); 7214 7215 page_num = bs_blobid_to_page(blobid); 7216 if (spdk_bit_array_get(bs->used_blobids, page_num) == false) { 7217 /* Invalid blobid */ 7218 cb_fn(cb_arg, NULL, -ENOENT); 7219 return; 7220 } 7221 7222 blob = blob_lookup(bs, blobid); 7223 if (blob) { 7224 blob->open_ref++; 7225 cb_fn(cb_arg, blob, 0); 7226 return; 7227 } 7228 7229 blob = blob_alloc(bs, blobid); 7230 if (!blob) { 7231 cb_fn(cb_arg, NULL, -ENOMEM); 7232 return; 7233 } 7234 7235 spdk_blob_open_opts_init(&opts_local, sizeof(opts_local)); 7236 if (opts) { 7237 blob_open_opts_copy(opts, &opts_local); 7238 } 7239 7240 blob->clear_method = opts_local.clear_method; 7241 7242 cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE; 7243 cpl.u.blob_handle.cb_fn = cb_fn; 7244 cpl.u.blob_handle.cb_arg = cb_arg; 7245 cpl.u.blob_handle.blob = blob; 7246 7247 seq = bs_sequence_start(bs->md_channel, &cpl); 7248 if (!seq) { 7249 blob_free(blob); 7250 cb_fn(cb_arg, NULL, -ENOMEM); 7251 return; 7252 } 7253 7254 blob_load(seq, blob, bs_open_blob_cpl, blob); 7255 } 7256 7257 void spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 7258 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7259 { 7260 bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg); 7261 } 7262 7263 void spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid, 7264 struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7265 { 7266 bs_open_blob(bs, blobid, opts, cb_fn, cb_arg); 7267 } 7268 7269 /* END spdk_bs_open_blob */ 7270 7271 /* START spdk_blob_set_read_only */ 7272 int spdk_blob_set_read_only(struct spdk_blob *blob) 7273 { 7274 blob_verify_md_op(blob); 7275 7276 blob->data_ro_flags |= SPDK_BLOB_READ_ONLY; 7277 7278 blob->state = SPDK_BLOB_STATE_DIRTY; 7279 return 0; 7280 } 7281 /* END spdk_blob_set_read_only */ 7282 7283 /* START spdk_blob_sync_md */ 7284 7285 static void 7286 blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7287 { 7288 struct spdk_blob *blob = cb_arg; 7289 7290 if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 7291 blob->data_ro = true; 7292 blob->md_ro = true; 7293 } 7294 7295 bs_sequence_finish(seq, bserrno); 7296 } 7297 7298 static void 7299 blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 7300 { 7301 struct spdk_bs_cpl cpl; 7302 spdk_bs_sequence_t *seq; 7303 7304 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7305 cpl.u.blob_basic.cb_fn = cb_fn; 7306 cpl.u.blob_basic.cb_arg = cb_arg; 7307 7308 seq = bs_sequence_start(blob->bs->md_channel, &cpl); 7309 if (!seq) { 7310 cb_fn(cb_arg, -ENOMEM); 7311 return; 7312 } 7313 7314 blob_persist(seq, blob, blob_sync_md_cpl, blob); 7315 } 7316 7317 void 7318 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 7319 { 7320 blob_verify_md_op(blob); 7321 7322 SPDK_DEBUGLOG(blob, "Syncing blob %" PRIu64 "\n", blob->id); 7323 7324 if (blob->md_ro) { 7325 assert(blob->state == SPDK_BLOB_STATE_CLEAN); 7326 cb_fn(cb_arg, 0); 7327 return; 7328 } 7329 7330 blob_sync_md(blob, cb_fn, cb_arg); 7331 } 7332 7333 /* END spdk_blob_sync_md */ 7334 7335 struct spdk_blob_insert_cluster_ctx { 7336 struct spdk_thread *thread; 7337 struct spdk_blob *blob; 7338 uint32_t cluster_num; /* cluster index in blob */ 7339 uint32_t cluster; /* cluster on disk */ 7340 uint32_t extent_page; /* extent page on disk */ 7341 int rc; 7342 spdk_blob_op_complete cb_fn; 7343 void *cb_arg; 7344 }; 7345 7346 static void 7347 blob_insert_cluster_msg_cpl(void *arg) 7348 { 7349 struct spdk_blob_insert_cluster_ctx *ctx = arg; 7350 7351 ctx->cb_fn(ctx->cb_arg, ctx->rc); 7352 free(ctx); 7353 } 7354 7355 static void 7356 blob_insert_cluster_msg_cb(void *arg, int bserrno) 7357 { 7358 struct spdk_blob_insert_cluster_ctx *ctx = arg; 7359 7360 ctx->rc = bserrno; 7361 spdk_thread_send_msg(ctx->thread, blob_insert_cluster_msg_cpl, ctx); 7362 } 7363 7364 static void 7365 blob_insert_new_ep_cb(void *arg, int bserrno) 7366 { 7367 struct spdk_blob_insert_cluster_ctx *ctx = arg; 7368 uint32_t *extent_page; 7369 7370 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 7371 *extent_page = ctx->extent_page; 7372 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 7373 blob_sync_md(ctx->blob, blob_insert_cluster_msg_cb, ctx); 7374 } 7375 7376 static void 7377 blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7378 { 7379 struct spdk_blob_md_page *page = cb_arg; 7380 7381 bs_sequence_finish(seq, bserrno); 7382 spdk_free(page); 7383 } 7384 7385 static void 7386 blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 7387 spdk_blob_op_complete cb_fn, void *cb_arg) 7388 { 7389 spdk_bs_sequence_t *seq; 7390 struct spdk_bs_cpl cpl; 7391 struct spdk_blob_md_page *page = NULL; 7392 uint32_t page_count = 0; 7393 int rc; 7394 7395 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7396 cpl.u.blob_basic.cb_fn = cb_fn; 7397 cpl.u.blob_basic.cb_arg = cb_arg; 7398 7399 seq = bs_sequence_start(blob->bs->md_channel, &cpl); 7400 if (!seq) { 7401 cb_fn(cb_arg, -ENOMEM); 7402 return; 7403 } 7404 rc = blob_serialize_add_page(blob, &page, &page_count, &page); 7405 if (rc < 0) { 7406 bs_sequence_finish(seq, rc); 7407 return; 7408 } 7409 7410 blob_serialize_extent_page(blob, cluster_num, page); 7411 7412 page->crc = blob_md_page_calc_crc(page); 7413 7414 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true); 7415 7416 bs_sequence_write_dev(seq, page, bs_md_page_to_lba(blob->bs, extent), 7417 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 7418 blob_persist_extent_page_cpl, page); 7419 } 7420 7421 static void 7422 blob_insert_cluster_msg(void *arg) 7423 { 7424 struct spdk_blob_insert_cluster_ctx *ctx = arg; 7425 uint32_t *extent_page; 7426 7427 ctx->rc = blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster); 7428 if (ctx->rc != 0) { 7429 spdk_thread_send_msg(ctx->thread, blob_insert_cluster_msg_cpl, ctx); 7430 return; 7431 } 7432 7433 if (ctx->blob->use_extent_table == false) { 7434 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 7435 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 7436 blob_sync_md(ctx->blob, blob_insert_cluster_msg_cb, ctx); 7437 return; 7438 } 7439 7440 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 7441 if (*extent_page == 0) { 7442 /* Extent page requires allocation. 7443 * It was already claimed in the used_md_pages map and placed in ctx. */ 7444 assert(ctx->extent_page != 0); 7445 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 7446 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, 7447 blob_insert_new_ep_cb, ctx); 7448 } else { 7449 /* It is possible for original thread to allocate extent page for 7450 * different cluster in the same extent page. In such case proceed with 7451 * updating the existing extent page, but release the additional one. */ 7452 if (ctx->extent_page != 0) { 7453 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 7454 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 7455 ctx->extent_page = 0; 7456 } 7457 /* Extent page already allocated. 7458 * Every cluster allocation, requires just an update of single extent page. */ 7459 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, 7460 blob_insert_cluster_msg_cb, ctx); 7461 } 7462 } 7463 7464 static void 7465 blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 7466 uint64_t cluster, uint32_t extent_page, spdk_blob_op_complete cb_fn, void *cb_arg) 7467 { 7468 struct spdk_blob_insert_cluster_ctx *ctx; 7469 7470 ctx = calloc(1, sizeof(*ctx)); 7471 if (ctx == NULL) { 7472 cb_fn(cb_arg, -ENOMEM); 7473 return; 7474 } 7475 7476 ctx->thread = spdk_get_thread(); 7477 ctx->blob = blob; 7478 ctx->cluster_num = cluster_num; 7479 ctx->cluster = cluster; 7480 ctx->extent_page = extent_page; 7481 ctx->cb_fn = cb_fn; 7482 ctx->cb_arg = cb_arg; 7483 7484 spdk_thread_send_msg(blob->bs->md_thread, blob_insert_cluster_msg, ctx); 7485 } 7486 7487 /* START spdk_blob_close */ 7488 7489 static void 7490 blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7491 { 7492 struct spdk_blob *blob = cb_arg; 7493 7494 if (bserrno == 0) { 7495 blob->open_ref--; 7496 if (blob->open_ref == 0) { 7497 /* 7498 * Blobs with active.num_pages == 0 are deleted blobs. 7499 * these blobs are removed from the blob_store list 7500 * when the deletion process starts - so don't try to 7501 * remove them again. 7502 */ 7503 if (blob->active.num_pages > 0) { 7504 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 7505 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 7506 } 7507 blob_free(blob); 7508 } 7509 } 7510 7511 bs_sequence_finish(seq, bserrno); 7512 } 7513 7514 void spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 7515 { 7516 struct spdk_bs_cpl cpl; 7517 spdk_bs_sequence_t *seq; 7518 7519 blob_verify_md_op(blob); 7520 7521 SPDK_DEBUGLOG(blob, "Closing blob %" PRIu64 "\n", blob->id); 7522 7523 if (blob->open_ref == 0) { 7524 cb_fn(cb_arg, -EBADF); 7525 return; 7526 } 7527 7528 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7529 cpl.u.blob_basic.cb_fn = cb_fn; 7530 cpl.u.blob_basic.cb_arg = cb_arg; 7531 7532 seq = bs_sequence_start(blob->bs->md_channel, &cpl); 7533 if (!seq) { 7534 cb_fn(cb_arg, -ENOMEM); 7535 return; 7536 } 7537 7538 /* Sync metadata */ 7539 blob_persist(seq, blob, blob_close_cpl, blob); 7540 } 7541 7542 /* END spdk_blob_close */ 7543 7544 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs) 7545 { 7546 return spdk_get_io_channel(bs); 7547 } 7548 7549 void spdk_bs_free_io_channel(struct spdk_io_channel *channel) 7550 { 7551 spdk_put_io_channel(channel); 7552 } 7553 7554 void spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel, 7555 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 7556 { 7557 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 7558 SPDK_BLOB_UNMAP); 7559 } 7560 7561 void spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel, 7562 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 7563 { 7564 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 7565 SPDK_BLOB_WRITE_ZEROES); 7566 } 7567 7568 void spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel, 7569 void *payload, uint64_t offset, uint64_t length, 7570 spdk_blob_op_complete cb_fn, void *cb_arg) 7571 { 7572 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 7573 SPDK_BLOB_WRITE); 7574 } 7575 7576 void spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel, 7577 void *payload, uint64_t offset, uint64_t length, 7578 spdk_blob_op_complete cb_fn, void *cb_arg) 7579 { 7580 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 7581 SPDK_BLOB_READ); 7582 } 7583 7584 void spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel, 7585 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 7586 spdk_blob_op_complete cb_fn, void *cb_arg) 7587 { 7588 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, NULL); 7589 } 7590 7591 void spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel, 7592 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 7593 spdk_blob_op_complete cb_fn, void *cb_arg) 7594 { 7595 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, NULL); 7596 } 7597 7598 void 7599 spdk_blob_io_writev_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 7600 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 7601 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 7602 { 7603 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, 7604 io_opts); 7605 } 7606 7607 void 7608 spdk_blob_io_readv_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 7609 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 7610 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 7611 { 7612 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, 7613 io_opts); 7614 } 7615 7616 struct spdk_bs_iter_ctx { 7617 int64_t page_num; 7618 struct spdk_blob_store *bs; 7619 7620 spdk_blob_op_with_handle_complete cb_fn; 7621 void *cb_arg; 7622 }; 7623 7624 static void 7625 bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 7626 { 7627 struct spdk_bs_iter_ctx *ctx = cb_arg; 7628 struct spdk_blob_store *bs = ctx->bs; 7629 spdk_blob_id id; 7630 7631 if (bserrno == 0) { 7632 ctx->cb_fn(ctx->cb_arg, _blob, bserrno); 7633 free(ctx); 7634 return; 7635 } 7636 7637 ctx->page_num++; 7638 ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num); 7639 if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) { 7640 ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT); 7641 free(ctx); 7642 return; 7643 } 7644 7645 id = bs_page_to_blobid(ctx->page_num); 7646 7647 spdk_bs_open_blob(bs, id, bs_iter_cpl, ctx); 7648 } 7649 7650 void 7651 spdk_bs_iter_first(struct spdk_blob_store *bs, 7652 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7653 { 7654 struct spdk_bs_iter_ctx *ctx; 7655 7656 ctx = calloc(1, sizeof(*ctx)); 7657 if (!ctx) { 7658 cb_fn(cb_arg, NULL, -ENOMEM); 7659 return; 7660 } 7661 7662 ctx->page_num = -1; 7663 ctx->bs = bs; 7664 ctx->cb_fn = cb_fn; 7665 ctx->cb_arg = cb_arg; 7666 7667 bs_iter_cpl(ctx, NULL, -1); 7668 } 7669 7670 static void 7671 bs_iter_close_cpl(void *cb_arg, int bserrno) 7672 { 7673 struct spdk_bs_iter_ctx *ctx = cb_arg; 7674 7675 bs_iter_cpl(ctx, NULL, -1); 7676 } 7677 7678 void 7679 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob, 7680 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7681 { 7682 struct spdk_bs_iter_ctx *ctx; 7683 7684 assert(blob != NULL); 7685 7686 ctx = calloc(1, sizeof(*ctx)); 7687 if (!ctx) { 7688 cb_fn(cb_arg, NULL, -ENOMEM); 7689 return; 7690 } 7691 7692 ctx->page_num = bs_blobid_to_page(blob->id); 7693 ctx->bs = bs; 7694 ctx->cb_fn = cb_fn; 7695 ctx->cb_arg = cb_arg; 7696 7697 /* Close the existing blob */ 7698 spdk_blob_close(blob, bs_iter_close_cpl, ctx); 7699 } 7700 7701 static int 7702 blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 7703 uint16_t value_len, bool internal) 7704 { 7705 struct spdk_xattr_tailq *xattrs; 7706 struct spdk_xattr *xattr; 7707 size_t desc_size; 7708 void *tmp; 7709 7710 blob_verify_md_op(blob); 7711 7712 if (blob->md_ro) { 7713 return -EPERM; 7714 } 7715 7716 desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len; 7717 if (desc_size > SPDK_BS_MAX_DESC_SIZE) { 7718 SPDK_DEBUGLOG(blob, "Xattr '%s' of size %zu does not fix into single page %zu\n", name, 7719 desc_size, SPDK_BS_MAX_DESC_SIZE); 7720 return -ENOMEM; 7721 } 7722 7723 if (internal) { 7724 xattrs = &blob->xattrs_internal; 7725 blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR; 7726 } else { 7727 xattrs = &blob->xattrs; 7728 } 7729 7730 TAILQ_FOREACH(xattr, xattrs, link) { 7731 if (!strcmp(name, xattr->name)) { 7732 tmp = malloc(value_len); 7733 if (!tmp) { 7734 return -ENOMEM; 7735 } 7736 7737 free(xattr->value); 7738 xattr->value_len = value_len; 7739 xattr->value = tmp; 7740 memcpy(xattr->value, value, value_len); 7741 7742 blob->state = SPDK_BLOB_STATE_DIRTY; 7743 7744 return 0; 7745 } 7746 } 7747 7748 xattr = calloc(1, sizeof(*xattr)); 7749 if (!xattr) { 7750 return -ENOMEM; 7751 } 7752 7753 xattr->name = strdup(name); 7754 if (!xattr->name) { 7755 free(xattr); 7756 return -ENOMEM; 7757 } 7758 7759 xattr->value_len = value_len; 7760 xattr->value = malloc(value_len); 7761 if (!xattr->value) { 7762 free(xattr->name); 7763 free(xattr); 7764 return -ENOMEM; 7765 } 7766 memcpy(xattr->value, value, value_len); 7767 TAILQ_INSERT_TAIL(xattrs, xattr, link); 7768 7769 blob->state = SPDK_BLOB_STATE_DIRTY; 7770 7771 return 0; 7772 } 7773 7774 int 7775 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 7776 uint16_t value_len) 7777 { 7778 return blob_set_xattr(blob, name, value, value_len, false); 7779 } 7780 7781 static int 7782 blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal) 7783 { 7784 struct spdk_xattr_tailq *xattrs; 7785 struct spdk_xattr *xattr; 7786 7787 blob_verify_md_op(blob); 7788 7789 if (blob->md_ro) { 7790 return -EPERM; 7791 } 7792 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 7793 7794 TAILQ_FOREACH(xattr, xattrs, link) { 7795 if (!strcmp(name, xattr->name)) { 7796 TAILQ_REMOVE(xattrs, xattr, link); 7797 free(xattr->value); 7798 free(xattr->name); 7799 free(xattr); 7800 7801 if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) { 7802 blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR; 7803 } 7804 blob->state = SPDK_BLOB_STATE_DIRTY; 7805 7806 return 0; 7807 } 7808 } 7809 7810 return -ENOENT; 7811 } 7812 7813 int 7814 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name) 7815 { 7816 return blob_remove_xattr(blob, name, false); 7817 } 7818 7819 static int 7820 blob_get_xattr_value(struct spdk_blob *blob, const char *name, 7821 const void **value, size_t *value_len, bool internal) 7822 { 7823 struct spdk_xattr *xattr; 7824 struct spdk_xattr_tailq *xattrs; 7825 7826 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 7827 7828 TAILQ_FOREACH(xattr, xattrs, link) { 7829 if (!strcmp(name, xattr->name)) { 7830 *value = xattr->value; 7831 *value_len = xattr->value_len; 7832 return 0; 7833 } 7834 } 7835 return -ENOENT; 7836 } 7837 7838 int 7839 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 7840 const void **value, size_t *value_len) 7841 { 7842 blob_verify_md_op(blob); 7843 7844 return blob_get_xattr_value(blob, name, value, value_len, false); 7845 } 7846 7847 struct spdk_xattr_names { 7848 uint32_t count; 7849 const char *names[0]; 7850 }; 7851 7852 static int 7853 blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names) 7854 { 7855 struct spdk_xattr *xattr; 7856 int count = 0; 7857 7858 TAILQ_FOREACH(xattr, xattrs, link) { 7859 count++; 7860 } 7861 7862 *names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *)); 7863 if (*names == NULL) { 7864 return -ENOMEM; 7865 } 7866 7867 TAILQ_FOREACH(xattr, xattrs, link) { 7868 (*names)->names[(*names)->count++] = xattr->name; 7869 } 7870 7871 return 0; 7872 } 7873 7874 int 7875 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names) 7876 { 7877 blob_verify_md_op(blob); 7878 7879 return blob_get_xattr_names(&blob->xattrs, names); 7880 } 7881 7882 uint32_t 7883 spdk_xattr_names_get_count(struct spdk_xattr_names *names) 7884 { 7885 assert(names != NULL); 7886 7887 return names->count; 7888 } 7889 7890 const char * 7891 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index) 7892 { 7893 if (index >= names->count) { 7894 return NULL; 7895 } 7896 7897 return names->names[index]; 7898 } 7899 7900 void 7901 spdk_xattr_names_free(struct spdk_xattr_names *names) 7902 { 7903 free(names); 7904 } 7905 7906 struct spdk_bs_type 7907 spdk_bs_get_bstype(struct spdk_blob_store *bs) 7908 { 7909 return bs->bstype; 7910 } 7911 7912 void 7913 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype) 7914 { 7915 memcpy(&bs->bstype, &bstype, sizeof(bstype)); 7916 } 7917 7918 bool 7919 spdk_blob_is_read_only(struct spdk_blob *blob) 7920 { 7921 assert(blob != NULL); 7922 return (blob->data_ro || blob->md_ro); 7923 } 7924 7925 bool 7926 spdk_blob_is_snapshot(struct spdk_blob *blob) 7927 { 7928 struct spdk_blob_list *snapshot_entry; 7929 7930 assert(blob != NULL); 7931 7932 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 7933 if (snapshot_entry == NULL) { 7934 return false; 7935 } 7936 7937 return true; 7938 } 7939 7940 bool 7941 spdk_blob_is_clone(struct spdk_blob *blob) 7942 { 7943 assert(blob != NULL); 7944 7945 if (blob->parent_id != SPDK_BLOBID_INVALID) { 7946 assert(spdk_blob_is_thin_provisioned(blob)); 7947 return true; 7948 } 7949 7950 return false; 7951 } 7952 7953 bool 7954 spdk_blob_is_thin_provisioned(struct spdk_blob *blob) 7955 { 7956 assert(blob != NULL); 7957 return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 7958 } 7959 7960 static void 7961 blob_update_clear_method(struct spdk_blob *blob) 7962 { 7963 enum blob_clear_method stored_cm; 7964 7965 assert(blob != NULL); 7966 7967 /* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored 7968 * in metadata previously. If something other than the default was 7969 * specified, ignore stored value and used what was passed in. 7970 */ 7971 stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT); 7972 7973 if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) { 7974 blob->clear_method = stored_cm; 7975 } else if (blob->clear_method != stored_cm) { 7976 SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n", 7977 blob->clear_method, stored_cm); 7978 } 7979 } 7980 7981 spdk_blob_id 7982 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id) 7983 { 7984 struct spdk_blob_list *snapshot_entry = NULL; 7985 struct spdk_blob_list *clone_entry = NULL; 7986 7987 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 7988 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 7989 if (clone_entry->id == blob_id) { 7990 return snapshot_entry->id; 7991 } 7992 } 7993 } 7994 7995 return SPDK_BLOBID_INVALID; 7996 } 7997 7998 int 7999 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids, 8000 size_t *count) 8001 { 8002 struct spdk_blob_list *snapshot_entry, *clone_entry; 8003 size_t n; 8004 8005 snapshot_entry = bs_get_snapshot_entry(bs, blobid); 8006 if (snapshot_entry == NULL) { 8007 *count = 0; 8008 return 0; 8009 } 8010 8011 if (ids == NULL || *count < snapshot_entry->clone_count) { 8012 *count = snapshot_entry->clone_count; 8013 return -ENOMEM; 8014 } 8015 *count = snapshot_entry->clone_count; 8016 8017 n = 0; 8018 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 8019 ids[n++] = clone_entry->id; 8020 } 8021 8022 return 0; 8023 } 8024 8025 SPDK_LOG_REGISTER_COMPONENT(blob) 8026