1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk/stdinc.h" 36 37 #include "spdk/blob.h" 38 #include "spdk/crc32.h" 39 #include "spdk/env.h" 40 #include "spdk/queue.h" 41 #include "spdk/thread.h" 42 #include "spdk/bit_array.h" 43 #include "spdk/bit_pool.h" 44 #include "spdk/likely.h" 45 #include "spdk/util.h" 46 #include "spdk/string.h" 47 48 #include "spdk_internal/assert.h" 49 #include "spdk/log.h" 50 51 #include "blobstore.h" 52 53 #define BLOB_CRC32C_INITIAL 0xffffffffUL 54 55 static int bs_register_md_thread(struct spdk_blob_store *bs); 56 static int bs_unregister_md_thread(struct spdk_blob_store *bs); 57 static void blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 58 static void blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 59 uint64_t cluster, uint32_t extent, struct spdk_blob_md_page *page, 60 spdk_blob_op_complete cb_fn, void *cb_arg); 61 62 static int blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 63 uint16_t value_len, bool internal); 64 static int blob_get_xattr_value(struct spdk_blob *blob, const char *name, 65 const void **value, size_t *value_len, bool internal); 66 static int blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal); 67 68 static void blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 69 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg); 70 71 static int 72 blob_id_cmp(struct spdk_blob *blob1, struct spdk_blob *blob2) 73 { 74 return (blob1->id < blob2->id ? -1 : blob1->id > blob2->id); 75 } 76 77 RB_GENERATE_STATIC(spdk_blob_tree, spdk_blob, link, blob_id_cmp); 78 79 static void 80 blob_verify_md_op(struct spdk_blob *blob) 81 { 82 assert(blob != NULL); 83 assert(spdk_get_thread() == blob->bs->md_thread); 84 assert(blob->state != SPDK_BLOB_STATE_LOADING); 85 } 86 87 static struct spdk_blob_list * 88 bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid) 89 { 90 struct spdk_blob_list *snapshot_entry = NULL; 91 92 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 93 if (snapshot_entry->id == blobid) { 94 break; 95 } 96 } 97 98 return snapshot_entry; 99 } 100 101 static void 102 bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page) 103 { 104 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 105 assert(spdk_bit_array_get(bs->used_md_pages, page) == false); 106 107 spdk_bit_array_set(bs->used_md_pages, page); 108 } 109 110 static void 111 bs_release_md_page(struct spdk_blob_store *bs, uint32_t page) 112 { 113 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 114 assert(spdk_bit_array_get(bs->used_md_pages, page) == true); 115 116 spdk_bit_array_clear(bs->used_md_pages, page); 117 } 118 119 static uint32_t 120 bs_claim_cluster(struct spdk_blob_store *bs) 121 { 122 uint32_t cluster_num; 123 124 cluster_num = spdk_bit_pool_allocate_bit(bs->used_clusters); 125 if (cluster_num == UINT32_MAX) { 126 return UINT32_MAX; 127 } 128 129 SPDK_DEBUGLOG(blob, "Claiming cluster %u\n", cluster_num); 130 bs->num_free_clusters--; 131 132 return cluster_num; 133 } 134 135 static void 136 bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 137 { 138 assert(cluster_num < spdk_bit_pool_capacity(bs->used_clusters)); 139 assert(spdk_bit_pool_is_allocated(bs->used_clusters, cluster_num) == true); 140 assert(bs->num_free_clusters < bs->total_clusters); 141 142 SPDK_DEBUGLOG(blob, "Releasing cluster %u\n", cluster_num); 143 144 spdk_bit_pool_free_bit(bs->used_clusters, cluster_num); 145 bs->num_free_clusters++; 146 } 147 148 static int 149 blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster) 150 { 151 uint64_t *cluster_lba = &blob->active.clusters[cluster_num]; 152 153 blob_verify_md_op(blob); 154 155 if (*cluster_lba != 0) { 156 return -EEXIST; 157 } 158 159 *cluster_lba = bs_cluster_to_lba(blob->bs, cluster); 160 return 0; 161 } 162 163 static int 164 bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num, 165 uint64_t *cluster, uint32_t *lowest_free_md_page, bool update_map) 166 { 167 uint32_t *extent_page = 0; 168 169 *cluster = bs_claim_cluster(blob->bs); 170 if (*cluster == UINT32_MAX) { 171 /* No more free clusters. Cannot satisfy the request */ 172 return -ENOSPC; 173 } 174 175 if (blob->use_extent_table) { 176 extent_page = bs_cluster_to_extent_page(blob, cluster_num); 177 if (*extent_page == 0) { 178 /* Extent page shall never occupy md_page so start the search from 1 */ 179 if (*lowest_free_md_page == 0) { 180 *lowest_free_md_page = 1; 181 } 182 /* No extent_page is allocated for the cluster */ 183 *lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, 184 *lowest_free_md_page); 185 if (*lowest_free_md_page == UINT32_MAX) { 186 /* No more free md pages. Cannot satisfy the request */ 187 bs_release_cluster(blob->bs, *cluster); 188 return -ENOSPC; 189 } 190 bs_claim_md_page(blob->bs, *lowest_free_md_page); 191 } 192 } 193 194 SPDK_DEBUGLOG(blob, "Claiming cluster %" PRIu64 " for blob %" PRIu64 "\n", *cluster, blob->id); 195 196 if (update_map) { 197 blob_insert_cluster(blob, cluster_num, *cluster); 198 if (blob->use_extent_table && *extent_page == 0) { 199 *extent_page = *lowest_free_md_page; 200 } 201 } 202 203 return 0; 204 } 205 206 static void 207 blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs) 208 { 209 xattrs->count = 0; 210 xattrs->names = NULL; 211 xattrs->ctx = NULL; 212 xattrs->get_value = NULL; 213 } 214 215 void 216 spdk_blob_opts_init(struct spdk_blob_opts *opts, size_t opts_size) 217 { 218 if (!opts) { 219 SPDK_ERRLOG("opts should not be NULL\n"); 220 return; 221 } 222 223 if (!opts_size) { 224 SPDK_ERRLOG("opts_size should not be zero value\n"); 225 return; 226 } 227 228 memset(opts, 0, opts_size); 229 opts->opts_size = opts_size; 230 231 #define FIELD_OK(field) \ 232 offsetof(struct spdk_blob_opts, field) + sizeof(opts->field) <= opts_size 233 234 #define SET_FIELD(field, value) \ 235 if (FIELD_OK(field)) { \ 236 opts->field = value; \ 237 } \ 238 239 SET_FIELD(num_clusters, 0); 240 SET_FIELD(thin_provision, false); 241 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 242 243 if (FIELD_OK(xattrs)) { 244 blob_xattrs_init(&opts->xattrs); 245 } 246 247 SET_FIELD(use_extent_table, true); 248 249 #undef FIELD_OK 250 #undef SET_FIELD 251 } 252 253 void 254 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts, size_t opts_size) 255 { 256 if (!opts) { 257 SPDK_ERRLOG("opts should not be NULL\n"); 258 return; 259 } 260 261 if (!opts_size) { 262 SPDK_ERRLOG("opts_size should not be zero value\n"); 263 return; 264 } 265 266 memset(opts, 0, opts_size); 267 opts->opts_size = opts_size; 268 269 #define FIELD_OK(field) \ 270 offsetof(struct spdk_blob_open_opts, field) + sizeof(opts->field) <= opts_size 271 272 #define SET_FIELD(field, value) \ 273 if (FIELD_OK(field)) { \ 274 opts->field = value; \ 275 } \ 276 277 SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT); 278 279 #undef FIELD_OK 280 #undef SET_FILED 281 } 282 283 static struct spdk_blob * 284 blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id) 285 { 286 struct spdk_blob *blob; 287 288 blob = calloc(1, sizeof(*blob)); 289 if (!blob) { 290 return NULL; 291 } 292 293 blob->id = id; 294 blob->bs = bs; 295 296 blob->parent_id = SPDK_BLOBID_INVALID; 297 298 blob->state = SPDK_BLOB_STATE_DIRTY; 299 blob->extent_rle_found = false; 300 blob->extent_table_found = false; 301 blob->active.num_pages = 1; 302 blob->active.pages = calloc(1, sizeof(*blob->active.pages)); 303 if (!blob->active.pages) { 304 free(blob); 305 return NULL; 306 } 307 308 blob->active.pages[0] = bs_blobid_to_page(id); 309 310 TAILQ_INIT(&blob->xattrs); 311 TAILQ_INIT(&blob->xattrs_internal); 312 TAILQ_INIT(&blob->pending_persists); 313 TAILQ_INIT(&blob->persists_to_complete); 314 315 return blob; 316 } 317 318 static void 319 xattrs_free(struct spdk_xattr_tailq *xattrs) 320 { 321 struct spdk_xattr *xattr, *xattr_tmp; 322 323 TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) { 324 TAILQ_REMOVE(xattrs, xattr, link); 325 free(xattr->name); 326 free(xattr->value); 327 free(xattr); 328 } 329 } 330 331 static void 332 blob_free(struct spdk_blob *blob) 333 { 334 assert(blob != NULL); 335 assert(TAILQ_EMPTY(&blob->pending_persists)); 336 assert(TAILQ_EMPTY(&blob->persists_to_complete)); 337 338 free(blob->active.extent_pages); 339 free(blob->clean.extent_pages); 340 free(blob->active.clusters); 341 free(blob->clean.clusters); 342 free(blob->active.pages); 343 free(blob->clean.pages); 344 345 xattrs_free(&blob->xattrs); 346 xattrs_free(&blob->xattrs_internal); 347 348 if (blob->back_bs_dev) { 349 blob->back_bs_dev->destroy(blob->back_bs_dev); 350 } 351 352 free(blob); 353 } 354 355 struct freeze_io_ctx { 356 struct spdk_bs_cpl cpl; 357 struct spdk_blob *blob; 358 }; 359 360 static void 361 blob_io_sync(struct spdk_io_channel_iter *i) 362 { 363 spdk_for_each_channel_continue(i, 0); 364 } 365 366 static void 367 blob_execute_queued_io(struct spdk_io_channel_iter *i) 368 { 369 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 370 struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch); 371 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 372 struct spdk_bs_request_set *set; 373 struct spdk_bs_user_op_args *args; 374 spdk_bs_user_op_t *op, *tmp; 375 376 TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) { 377 set = (struct spdk_bs_request_set *)op; 378 args = &set->u.user_op; 379 380 if (args->blob == ctx->blob) { 381 TAILQ_REMOVE(&ch->queued_io, op, link); 382 bs_user_op_execute(op); 383 } 384 } 385 386 spdk_for_each_channel_continue(i, 0); 387 } 388 389 static void 390 blob_io_cpl(struct spdk_io_channel_iter *i, int status) 391 { 392 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 393 394 ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0); 395 396 free(ctx); 397 } 398 399 static void 400 blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 401 { 402 struct freeze_io_ctx *ctx; 403 404 ctx = calloc(1, sizeof(*ctx)); 405 if (!ctx) { 406 cb_fn(cb_arg, -ENOMEM); 407 return; 408 } 409 410 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 411 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 412 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 413 ctx->blob = blob; 414 415 /* Freeze I/O on blob */ 416 blob->frozen_refcnt++; 417 418 if (blob->frozen_refcnt == 1) { 419 spdk_for_each_channel(blob->bs, blob_io_sync, ctx, blob_io_cpl); 420 } else { 421 cb_fn(cb_arg, 0); 422 free(ctx); 423 } 424 } 425 426 static void 427 blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 428 { 429 struct freeze_io_ctx *ctx; 430 431 ctx = calloc(1, sizeof(*ctx)); 432 if (!ctx) { 433 cb_fn(cb_arg, -ENOMEM); 434 return; 435 } 436 437 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 438 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 439 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 440 ctx->blob = blob; 441 442 assert(blob->frozen_refcnt > 0); 443 444 blob->frozen_refcnt--; 445 446 if (blob->frozen_refcnt == 0) { 447 spdk_for_each_channel(blob->bs, blob_execute_queued_io, ctx, blob_io_cpl); 448 } else { 449 cb_fn(cb_arg, 0); 450 free(ctx); 451 } 452 } 453 454 static int 455 blob_mark_clean(struct spdk_blob *blob) 456 { 457 uint32_t *extent_pages = NULL; 458 uint64_t *clusters = NULL; 459 uint32_t *pages = NULL; 460 461 assert(blob != NULL); 462 463 if (blob->active.num_extent_pages) { 464 assert(blob->active.extent_pages); 465 extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages)); 466 if (!extent_pages) { 467 return -ENOMEM; 468 } 469 memcpy(extent_pages, blob->active.extent_pages, 470 blob->active.num_extent_pages * sizeof(*extent_pages)); 471 } 472 473 if (blob->active.num_clusters) { 474 assert(blob->active.clusters); 475 clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters)); 476 if (!clusters) { 477 free(extent_pages); 478 return -ENOMEM; 479 } 480 memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 481 } 482 483 if (blob->active.num_pages) { 484 assert(blob->active.pages); 485 pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages)); 486 if (!pages) { 487 free(extent_pages); 488 free(clusters); 489 return -ENOMEM; 490 } 491 memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 492 } 493 494 free(blob->clean.extent_pages); 495 free(blob->clean.clusters); 496 free(blob->clean.pages); 497 498 blob->clean.num_extent_pages = blob->active.num_extent_pages; 499 blob->clean.extent_pages = blob->active.extent_pages; 500 blob->clean.num_clusters = blob->active.num_clusters; 501 blob->clean.clusters = blob->active.clusters; 502 blob->clean.num_pages = blob->active.num_pages; 503 blob->clean.pages = blob->active.pages; 504 505 blob->active.extent_pages = extent_pages; 506 blob->active.clusters = clusters; 507 blob->active.pages = pages; 508 509 /* If the metadata was dirtied again while the metadata was being written to disk, 510 * we do not want to revert the DIRTY state back to CLEAN here. 511 */ 512 if (blob->state == SPDK_BLOB_STATE_LOADING) { 513 blob->state = SPDK_BLOB_STATE_CLEAN; 514 } 515 516 return 0; 517 } 518 519 static int 520 blob_deserialize_xattr(struct spdk_blob *blob, 521 struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal) 522 { 523 struct spdk_xattr *xattr; 524 525 if (desc_xattr->length != sizeof(desc_xattr->name_length) + 526 sizeof(desc_xattr->value_length) + 527 desc_xattr->name_length + desc_xattr->value_length) { 528 return -EINVAL; 529 } 530 531 xattr = calloc(1, sizeof(*xattr)); 532 if (xattr == NULL) { 533 return -ENOMEM; 534 } 535 536 xattr->name = malloc(desc_xattr->name_length + 1); 537 if (xattr->name == NULL) { 538 free(xattr); 539 return -ENOMEM; 540 } 541 542 xattr->value = malloc(desc_xattr->value_length); 543 if (xattr->value == NULL) { 544 free(xattr->name); 545 free(xattr); 546 return -ENOMEM; 547 } 548 549 memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length); 550 xattr->name[desc_xattr->name_length] = '\0'; 551 xattr->value_len = desc_xattr->value_length; 552 memcpy(xattr->value, 553 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 554 desc_xattr->value_length); 555 556 TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link); 557 558 return 0; 559 } 560 561 562 static int 563 blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob) 564 { 565 struct spdk_blob_md_descriptor *desc; 566 size_t cur_desc = 0; 567 void *tmp; 568 569 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 570 while (cur_desc < sizeof(page->descriptors)) { 571 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 572 if (desc->length == 0) { 573 /* If padding and length are 0, this terminates the page */ 574 break; 575 } 576 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 577 struct spdk_blob_md_descriptor_flags *desc_flags; 578 579 desc_flags = (struct spdk_blob_md_descriptor_flags *)desc; 580 581 if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) { 582 return -EINVAL; 583 } 584 585 if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) != 586 SPDK_BLOB_INVALID_FLAGS_MASK) { 587 return -EINVAL; 588 } 589 590 if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) != 591 SPDK_BLOB_DATA_RO_FLAGS_MASK) { 592 blob->data_ro = true; 593 blob->md_ro = true; 594 } 595 596 if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) != 597 SPDK_BLOB_MD_RO_FLAGS_MASK) { 598 blob->md_ro = true; 599 } 600 601 if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 602 blob->data_ro = true; 603 blob->md_ro = true; 604 } 605 606 blob->invalid_flags = desc_flags->invalid_flags; 607 blob->data_ro_flags = desc_flags->data_ro_flags; 608 blob->md_ro_flags = desc_flags->md_ro_flags; 609 610 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 611 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 612 unsigned int i, j; 613 unsigned int cluster_count = blob->active.num_clusters; 614 615 if (blob->extent_table_found) { 616 /* Extent Table already present in the md, 617 * both descriptors should never be at the same time. */ 618 return -EINVAL; 619 } 620 blob->extent_rle_found = true; 621 622 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 623 624 if (desc_extent_rle->length == 0 || 625 (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) { 626 return -EINVAL; 627 } 628 629 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 630 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 631 if (desc_extent_rle->extents[i].cluster_idx != 0) { 632 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, 633 desc_extent_rle->extents[i].cluster_idx + j)) { 634 return -EINVAL; 635 } 636 } 637 cluster_count++; 638 } 639 } 640 641 if (cluster_count == 0) { 642 return -EINVAL; 643 } 644 tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters)); 645 if (tmp == NULL) { 646 return -ENOMEM; 647 } 648 blob->active.clusters = tmp; 649 blob->active.cluster_array_size = cluster_count; 650 651 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 652 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 653 if (desc_extent_rle->extents[i].cluster_idx != 0) { 654 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 655 desc_extent_rle->extents[i].cluster_idx + j); 656 } else if (spdk_blob_is_thin_provisioned(blob)) { 657 blob->active.clusters[blob->active.num_clusters++] = 0; 658 } else { 659 return -EINVAL; 660 } 661 } 662 } 663 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 664 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 665 uint32_t num_extent_pages = blob->active.num_extent_pages; 666 uint32_t i, j; 667 size_t extent_pages_length; 668 669 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 670 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 671 672 if (blob->extent_rle_found) { 673 /* This means that Extent RLE is present in MD, 674 * both should never be at the same time. */ 675 return -EINVAL; 676 } else if (blob->extent_table_found && 677 desc_extent_table->num_clusters != blob->remaining_clusters_in_et) { 678 /* Number of clusters in this ET does not match number 679 * from previously read EXTENT_TABLE. */ 680 return -EINVAL; 681 } 682 683 if (desc_extent_table->length == 0 || 684 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 685 return -EINVAL; 686 } 687 688 blob->extent_table_found = true; 689 690 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 691 num_extent_pages += desc_extent_table->extent_page[i].num_pages; 692 } 693 694 if (num_extent_pages > 0) { 695 tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t)); 696 if (tmp == NULL) { 697 return -ENOMEM; 698 } 699 blob->active.extent_pages = tmp; 700 } 701 blob->active.extent_pages_array_size = num_extent_pages; 702 703 blob->remaining_clusters_in_et = desc_extent_table->num_clusters; 704 705 /* Extent table entries contain md page numbers for extent pages. 706 * Zeroes represent unallocated extent pages, those are run-length-encoded. 707 */ 708 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 709 if (desc_extent_table->extent_page[i].page_idx != 0) { 710 assert(desc_extent_table->extent_page[i].num_pages == 1); 711 blob->active.extent_pages[blob->active.num_extent_pages++] = 712 desc_extent_table->extent_page[i].page_idx; 713 } else if (spdk_blob_is_thin_provisioned(blob)) { 714 for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) { 715 blob->active.extent_pages[blob->active.num_extent_pages++] = 0; 716 } 717 } else { 718 return -EINVAL; 719 } 720 } 721 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 722 struct spdk_blob_md_descriptor_extent_page *desc_extent; 723 unsigned int i; 724 unsigned int cluster_count = 0; 725 size_t cluster_idx_length; 726 727 if (blob->extent_rle_found) { 728 /* This means that Extent RLE is present in MD, 729 * both should never be at the same time. */ 730 return -EINVAL; 731 } 732 733 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 734 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 735 736 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 737 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 738 return -EINVAL; 739 } 740 741 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 742 if (desc_extent->cluster_idx[i] != 0) { 743 if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, desc_extent->cluster_idx[i])) { 744 return -EINVAL; 745 } 746 } 747 cluster_count++; 748 } 749 750 if (cluster_count == 0) { 751 return -EINVAL; 752 } 753 754 /* When reading extent pages sequentially starting cluster idx should match 755 * current size of a blob. 756 * If changed to batch reading, this check shall be removed. */ 757 if (desc_extent->start_cluster_idx != blob->active.num_clusters) { 758 return -EINVAL; 759 } 760 761 tmp = realloc(blob->active.clusters, 762 (cluster_count + blob->active.num_clusters) * sizeof(*blob->active.clusters)); 763 if (tmp == NULL) { 764 return -ENOMEM; 765 } 766 blob->active.clusters = tmp; 767 blob->active.cluster_array_size = (cluster_count + blob->active.num_clusters); 768 769 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 770 if (desc_extent->cluster_idx[i] != 0) { 771 blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs, 772 desc_extent->cluster_idx[i]); 773 } else if (spdk_blob_is_thin_provisioned(blob)) { 774 blob->active.clusters[blob->active.num_clusters++] = 0; 775 } else { 776 return -EINVAL; 777 } 778 } 779 assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters); 780 assert(blob->remaining_clusters_in_et >= cluster_count); 781 blob->remaining_clusters_in_et -= cluster_count; 782 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 783 int rc; 784 785 rc = blob_deserialize_xattr(blob, 786 (struct spdk_blob_md_descriptor_xattr *) desc, false); 787 if (rc != 0) { 788 return rc; 789 } 790 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 791 int rc; 792 793 rc = blob_deserialize_xattr(blob, 794 (struct spdk_blob_md_descriptor_xattr *) desc, true); 795 if (rc != 0) { 796 return rc; 797 } 798 } else { 799 /* Unrecognized descriptor type. Do not fail - just continue to the 800 * next descriptor. If this descriptor is associated with some feature 801 * defined in a newer version of blobstore, that version of blobstore 802 * should create and set an associated feature flag to specify if this 803 * blob can be loaded or not. 804 */ 805 } 806 807 /* Advance to the next descriptor */ 808 cur_desc += sizeof(*desc) + desc->length; 809 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 810 break; 811 } 812 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 813 } 814 815 return 0; 816 } 817 818 static bool bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page); 819 820 static int 821 blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob) 822 { 823 assert(blob != NULL); 824 assert(blob->state == SPDK_BLOB_STATE_LOADING); 825 826 if (bs_load_cur_extent_page_valid(extent_page) == false) { 827 return -ENOENT; 828 } 829 830 return blob_parse_page(extent_page, blob); 831 } 832 833 static int 834 blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count, 835 struct spdk_blob *blob) 836 { 837 const struct spdk_blob_md_page *page; 838 uint32_t i; 839 int rc; 840 void *tmp; 841 842 assert(page_count > 0); 843 assert(pages[0].sequence_num == 0); 844 assert(blob != NULL); 845 assert(blob->state == SPDK_BLOB_STATE_LOADING); 846 assert(blob->active.clusters == NULL); 847 848 /* The blobid provided doesn't match what's in the MD, this can 849 * happen for example if a bogus blobid is passed in through open. 850 */ 851 if (blob->id != pages[0].id) { 852 SPDK_ERRLOG("Blobid (%" PRIu64 ") doesn't match what's in metadata (%" PRIu64 ")\n", 853 blob->id, pages[0].id); 854 return -ENOENT; 855 } 856 857 tmp = realloc(blob->active.pages, page_count * sizeof(*blob->active.pages)); 858 if (!tmp) { 859 return -ENOMEM; 860 } 861 blob->active.pages = tmp; 862 863 blob->active.pages[0] = pages[0].id; 864 865 for (i = 1; i < page_count; i++) { 866 assert(spdk_bit_array_get(blob->bs->used_md_pages, pages[i - 1].next)); 867 blob->active.pages[i] = pages[i - 1].next; 868 } 869 blob->active.num_pages = page_count; 870 871 for (i = 0; i < page_count; i++) { 872 page = &pages[i]; 873 874 assert(page->id == blob->id); 875 assert(page->sequence_num == i); 876 877 rc = blob_parse_page(page, blob); 878 if (rc != 0) { 879 return rc; 880 } 881 } 882 883 return 0; 884 } 885 886 static int 887 blob_serialize_add_page(const struct spdk_blob *blob, 888 struct spdk_blob_md_page **pages, 889 uint32_t *page_count, 890 struct spdk_blob_md_page **last_page) 891 { 892 struct spdk_blob_md_page *page, *tmp_pages; 893 894 assert(pages != NULL); 895 assert(page_count != NULL); 896 897 *last_page = NULL; 898 if (*page_count == 0) { 899 assert(*pages == NULL); 900 *pages = spdk_malloc(SPDK_BS_PAGE_SIZE, 0, 901 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 902 if (*pages == NULL) { 903 return -ENOMEM; 904 } 905 *page_count = 1; 906 } else { 907 assert(*pages != NULL); 908 tmp_pages = spdk_realloc(*pages, SPDK_BS_PAGE_SIZE * (*page_count + 1), 0); 909 if (tmp_pages == NULL) { 910 return -ENOMEM; 911 } 912 (*page_count)++; 913 *pages = tmp_pages; 914 } 915 916 page = &(*pages)[*page_count - 1]; 917 memset(page, 0, sizeof(*page)); 918 page->id = blob->id; 919 page->sequence_num = *page_count - 1; 920 page->next = SPDK_INVALID_MD_PAGE; 921 *last_page = page; 922 923 return 0; 924 } 925 926 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor. 927 * Update required_sz on both success and failure. 928 * 929 */ 930 static int 931 blob_serialize_xattr(const struct spdk_xattr *xattr, 932 uint8_t *buf, size_t buf_sz, 933 size_t *required_sz, bool internal) 934 { 935 struct spdk_blob_md_descriptor_xattr *desc; 936 937 *required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) + 938 strlen(xattr->name) + 939 xattr->value_len; 940 941 if (buf_sz < *required_sz) { 942 return -1; 943 } 944 945 desc = (struct spdk_blob_md_descriptor_xattr *)buf; 946 947 desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR; 948 desc->length = sizeof(desc->name_length) + 949 sizeof(desc->value_length) + 950 strlen(xattr->name) + 951 xattr->value_len; 952 desc->name_length = strlen(xattr->name); 953 desc->value_length = xattr->value_len; 954 955 memcpy(desc->name, xattr->name, desc->name_length); 956 memcpy((void *)((uintptr_t)desc->name + desc->name_length), 957 xattr->value, 958 desc->value_length); 959 960 return 0; 961 } 962 963 static void 964 blob_serialize_extent_table_entry(const struct spdk_blob *blob, 965 uint64_t start_ep, uint64_t *next_ep, 966 uint8_t **buf, size_t *remaining_sz) 967 { 968 struct spdk_blob_md_descriptor_extent_table *desc; 969 size_t cur_sz; 970 uint64_t i, et_idx; 971 uint32_t extent_page, ep_len; 972 973 /* The buffer must have room for at least num_clusters entry */ 974 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters); 975 if (*remaining_sz < cur_sz) { 976 *next_ep = start_ep; 977 return; 978 } 979 980 desc = (struct spdk_blob_md_descriptor_extent_table *)*buf; 981 desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE; 982 983 desc->num_clusters = blob->active.num_clusters; 984 985 ep_len = 1; 986 et_idx = 0; 987 for (i = start_ep; i < blob->active.num_extent_pages; i++) { 988 if (*remaining_sz < cur_sz + sizeof(desc->extent_page[0])) { 989 /* If we ran out of buffer space, return */ 990 break; 991 } 992 993 extent_page = blob->active.extent_pages[i]; 994 /* Verify that next extent_page is unallocated */ 995 if (extent_page == 0 && 996 (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) { 997 ep_len++; 998 continue; 999 } 1000 desc->extent_page[et_idx].page_idx = extent_page; 1001 desc->extent_page[et_idx].num_pages = ep_len; 1002 et_idx++; 1003 1004 ep_len = 1; 1005 cur_sz += sizeof(desc->extent_page[et_idx]); 1006 } 1007 *next_ep = i; 1008 1009 desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx; 1010 *remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length; 1011 *buf += sizeof(struct spdk_blob_md_descriptor) + desc->length; 1012 } 1013 1014 static int 1015 blob_serialize_extent_table(const struct spdk_blob *blob, 1016 struct spdk_blob_md_page **pages, 1017 struct spdk_blob_md_page *cur_page, 1018 uint32_t *page_count, uint8_t **buf, 1019 size_t *remaining_sz) 1020 { 1021 uint64_t last_extent_page; 1022 int rc; 1023 1024 last_extent_page = 0; 1025 /* At least single extent table entry has to be always persisted. 1026 * Such case occurs with num_extent_pages == 0. */ 1027 while (last_extent_page <= blob->active.num_extent_pages) { 1028 blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf, 1029 remaining_sz); 1030 1031 if (last_extent_page == blob->active.num_extent_pages) { 1032 break; 1033 } 1034 1035 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1036 if (rc < 0) { 1037 return rc; 1038 } 1039 1040 *buf = (uint8_t *)cur_page->descriptors; 1041 *remaining_sz = sizeof(cur_page->descriptors); 1042 } 1043 1044 return 0; 1045 } 1046 1047 static void 1048 blob_serialize_extent_rle(const struct spdk_blob *blob, 1049 uint64_t start_cluster, uint64_t *next_cluster, 1050 uint8_t **buf, size_t *buf_sz) 1051 { 1052 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 1053 size_t cur_sz; 1054 uint64_t i, extent_idx; 1055 uint64_t lba, lba_per_cluster, lba_count; 1056 1057 /* The buffer must have room for at least one extent */ 1058 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]); 1059 if (*buf_sz < cur_sz) { 1060 *next_cluster = start_cluster; 1061 return; 1062 } 1063 1064 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf; 1065 desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE; 1066 1067 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1068 1069 lba = blob->active.clusters[start_cluster]; 1070 lba_count = lba_per_cluster; 1071 extent_idx = 0; 1072 for (i = start_cluster + 1; i < blob->active.num_clusters; i++) { 1073 if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) { 1074 /* Run-length encode sequential non-zero LBA */ 1075 lba_count += lba_per_cluster; 1076 continue; 1077 } else if (lba == 0 && blob->active.clusters[i] == 0) { 1078 /* Run-length encode unallocated clusters */ 1079 lba_count += lba_per_cluster; 1080 continue; 1081 } 1082 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1083 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1084 extent_idx++; 1085 1086 cur_sz += sizeof(desc_extent_rle->extents[extent_idx]); 1087 1088 if (*buf_sz < cur_sz) { 1089 /* If we ran out of buffer space, return */ 1090 *next_cluster = i; 1091 break; 1092 } 1093 1094 lba = blob->active.clusters[i]; 1095 lba_count = lba_per_cluster; 1096 } 1097 1098 if (*buf_sz >= cur_sz) { 1099 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1100 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1101 extent_idx++; 1102 1103 *next_cluster = blob->active.num_clusters; 1104 } 1105 1106 desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx; 1107 *buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1108 *buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1109 } 1110 1111 static int 1112 blob_serialize_extents_rle(const struct spdk_blob *blob, 1113 struct spdk_blob_md_page **pages, 1114 struct spdk_blob_md_page *cur_page, 1115 uint32_t *page_count, uint8_t **buf, 1116 size_t *remaining_sz) 1117 { 1118 uint64_t last_cluster; 1119 int rc; 1120 1121 last_cluster = 0; 1122 while (last_cluster < blob->active.num_clusters) { 1123 blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz); 1124 1125 if (last_cluster == blob->active.num_clusters) { 1126 break; 1127 } 1128 1129 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1130 if (rc < 0) { 1131 return rc; 1132 } 1133 1134 *buf = (uint8_t *)cur_page->descriptors; 1135 *remaining_sz = sizeof(cur_page->descriptors); 1136 } 1137 1138 return 0; 1139 } 1140 1141 static void 1142 blob_serialize_extent_page(const struct spdk_blob *blob, 1143 uint64_t cluster, struct spdk_blob_md_page *page) 1144 { 1145 struct spdk_blob_md_descriptor_extent_page *desc_extent; 1146 uint64_t i, extent_idx; 1147 uint64_t lba, lba_per_cluster; 1148 uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 1149 1150 desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors; 1151 desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE; 1152 1153 lba_per_cluster = bs_cluster_to_lba(blob->bs, 1); 1154 1155 desc_extent->start_cluster_idx = start_cluster_idx; 1156 extent_idx = 0; 1157 for (i = start_cluster_idx; i < blob->active.num_clusters; i++) { 1158 lba = blob->active.clusters[i]; 1159 desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster; 1160 if (extent_idx >= SPDK_EXTENTS_PER_EP) { 1161 break; 1162 } 1163 } 1164 desc_extent->length = sizeof(desc_extent->start_cluster_idx) + 1165 sizeof(desc_extent->cluster_idx[0]) * extent_idx; 1166 } 1167 1168 static void 1169 blob_serialize_flags(const struct spdk_blob *blob, 1170 uint8_t *buf, size_t *buf_sz) 1171 { 1172 struct spdk_blob_md_descriptor_flags *desc; 1173 1174 /* 1175 * Flags get serialized first, so we should always have room for the flags 1176 * descriptor. 1177 */ 1178 assert(*buf_sz >= sizeof(*desc)); 1179 1180 desc = (struct spdk_blob_md_descriptor_flags *)buf; 1181 desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS; 1182 desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor); 1183 desc->invalid_flags = blob->invalid_flags; 1184 desc->data_ro_flags = blob->data_ro_flags; 1185 desc->md_ro_flags = blob->md_ro_flags; 1186 1187 *buf_sz -= sizeof(*desc); 1188 } 1189 1190 static int 1191 blob_serialize_xattrs(const struct spdk_blob *blob, 1192 const struct spdk_xattr_tailq *xattrs, bool internal, 1193 struct spdk_blob_md_page **pages, 1194 struct spdk_blob_md_page *cur_page, 1195 uint32_t *page_count, uint8_t **buf, 1196 size_t *remaining_sz) 1197 { 1198 const struct spdk_xattr *xattr; 1199 int rc; 1200 1201 TAILQ_FOREACH(xattr, xattrs, link) { 1202 size_t required_sz = 0; 1203 1204 rc = blob_serialize_xattr(xattr, 1205 *buf, *remaining_sz, 1206 &required_sz, internal); 1207 if (rc < 0) { 1208 /* Need to add a new page to the chain */ 1209 rc = blob_serialize_add_page(blob, pages, page_count, 1210 &cur_page); 1211 if (rc < 0) { 1212 spdk_free(*pages); 1213 *pages = NULL; 1214 *page_count = 0; 1215 return rc; 1216 } 1217 1218 *buf = (uint8_t *)cur_page->descriptors; 1219 *remaining_sz = sizeof(cur_page->descriptors); 1220 1221 /* Try again */ 1222 required_sz = 0; 1223 rc = blob_serialize_xattr(xattr, 1224 *buf, *remaining_sz, 1225 &required_sz, internal); 1226 1227 if (rc < 0) { 1228 spdk_free(*pages); 1229 *pages = NULL; 1230 *page_count = 0; 1231 return rc; 1232 } 1233 } 1234 1235 *remaining_sz -= required_sz; 1236 *buf += required_sz; 1237 } 1238 1239 return 0; 1240 } 1241 1242 static int 1243 blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages, 1244 uint32_t *page_count) 1245 { 1246 struct spdk_blob_md_page *cur_page; 1247 int rc; 1248 uint8_t *buf; 1249 size_t remaining_sz; 1250 1251 assert(pages != NULL); 1252 assert(page_count != NULL); 1253 assert(blob != NULL); 1254 assert(blob->state == SPDK_BLOB_STATE_DIRTY); 1255 1256 *pages = NULL; 1257 *page_count = 0; 1258 1259 /* A blob always has at least 1 page, even if it has no descriptors */ 1260 rc = blob_serialize_add_page(blob, pages, page_count, &cur_page); 1261 if (rc < 0) { 1262 return rc; 1263 } 1264 1265 buf = (uint8_t *)cur_page->descriptors; 1266 remaining_sz = sizeof(cur_page->descriptors); 1267 1268 /* Serialize flags */ 1269 blob_serialize_flags(blob, buf, &remaining_sz); 1270 buf += sizeof(struct spdk_blob_md_descriptor_flags); 1271 1272 /* Serialize xattrs */ 1273 rc = blob_serialize_xattrs(blob, &blob->xattrs, false, 1274 pages, cur_page, page_count, &buf, &remaining_sz); 1275 if (rc < 0) { 1276 return rc; 1277 } 1278 1279 /* Serialize internal xattrs */ 1280 rc = blob_serialize_xattrs(blob, &blob->xattrs_internal, true, 1281 pages, cur_page, page_count, &buf, &remaining_sz); 1282 if (rc < 0) { 1283 return rc; 1284 } 1285 1286 if (blob->use_extent_table) { 1287 /* Serialize extent table */ 1288 rc = blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1289 } else { 1290 /* Serialize extents */ 1291 rc = blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1292 } 1293 1294 return rc; 1295 } 1296 1297 struct spdk_blob_load_ctx { 1298 struct spdk_blob *blob; 1299 1300 struct spdk_blob_md_page *pages; 1301 uint32_t num_pages; 1302 uint32_t next_extent_page; 1303 spdk_bs_sequence_t *seq; 1304 1305 spdk_bs_sequence_cpl cb_fn; 1306 void *cb_arg; 1307 }; 1308 1309 static uint32_t 1310 blob_md_page_calc_crc(void *page) 1311 { 1312 uint32_t crc; 1313 1314 crc = BLOB_CRC32C_INITIAL; 1315 crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc); 1316 crc ^= BLOB_CRC32C_INITIAL; 1317 1318 return crc; 1319 1320 } 1321 1322 static void 1323 blob_load_final(struct spdk_blob_load_ctx *ctx, int bserrno) 1324 { 1325 struct spdk_blob *blob = ctx->blob; 1326 1327 if (bserrno == 0) { 1328 blob_mark_clean(blob); 1329 } 1330 1331 ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno); 1332 1333 /* Free the memory */ 1334 spdk_free(ctx->pages); 1335 free(ctx); 1336 } 1337 1338 static void 1339 blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 1340 { 1341 struct spdk_blob_load_ctx *ctx = cb_arg; 1342 struct spdk_blob *blob = ctx->blob; 1343 1344 if (bserrno == 0) { 1345 blob->back_bs_dev = bs_create_blob_bs_dev(snapshot); 1346 if (blob->back_bs_dev == NULL) { 1347 bserrno = -ENOMEM; 1348 } 1349 } 1350 if (bserrno != 0) { 1351 SPDK_ERRLOG("Snapshot fail\n"); 1352 } 1353 1354 blob_load_final(ctx, bserrno); 1355 } 1356 1357 static void blob_update_clear_method(struct spdk_blob *blob); 1358 1359 static void 1360 blob_load_backing_dev(void *cb_arg) 1361 { 1362 struct spdk_blob_load_ctx *ctx = cb_arg; 1363 struct spdk_blob *blob = ctx->blob; 1364 const void *value; 1365 size_t len; 1366 int rc; 1367 1368 if (spdk_blob_is_thin_provisioned(blob)) { 1369 rc = blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true); 1370 if (rc == 0) { 1371 if (len != sizeof(spdk_blob_id)) { 1372 blob_load_final(ctx, -EINVAL); 1373 return; 1374 } 1375 /* open snapshot blob and continue in the callback function */ 1376 blob->parent_id = *(spdk_blob_id *)value; 1377 spdk_bs_open_blob(blob->bs, blob->parent_id, 1378 blob_load_snapshot_cpl, ctx); 1379 return; 1380 } else { 1381 /* add zeroes_dev for thin provisioned blob */ 1382 blob->back_bs_dev = bs_create_zeroes_dev(); 1383 } 1384 } else { 1385 /* standard blob */ 1386 blob->back_bs_dev = NULL; 1387 } 1388 blob_load_final(ctx, 0); 1389 } 1390 1391 static void 1392 blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1393 { 1394 struct spdk_blob_load_ctx *ctx = cb_arg; 1395 struct spdk_blob *blob = ctx->blob; 1396 struct spdk_blob_md_page *page; 1397 uint64_t i; 1398 uint32_t crc; 1399 uint64_t lba; 1400 void *tmp; 1401 uint64_t sz; 1402 1403 if (bserrno) { 1404 SPDK_ERRLOG("Extent page read failed: %d\n", bserrno); 1405 blob_load_final(ctx, bserrno); 1406 return; 1407 } 1408 1409 if (ctx->pages == NULL) { 1410 /* First iteration of this function, allocate buffer for single EXTENT_PAGE */ 1411 ctx->pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 1412 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 1413 if (!ctx->pages) { 1414 blob_load_final(ctx, -ENOMEM); 1415 return; 1416 } 1417 ctx->num_pages = 1; 1418 ctx->next_extent_page = 0; 1419 } else { 1420 page = &ctx->pages[0]; 1421 crc = blob_md_page_calc_crc(page); 1422 if (crc != page->crc) { 1423 blob_load_final(ctx, -EINVAL); 1424 return; 1425 } 1426 1427 if (page->next != SPDK_INVALID_MD_PAGE) { 1428 blob_load_final(ctx, -EINVAL); 1429 return; 1430 } 1431 1432 bserrno = blob_parse_extent_page(page, blob); 1433 if (bserrno) { 1434 blob_load_final(ctx, bserrno); 1435 return; 1436 } 1437 } 1438 1439 for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) { 1440 if (blob->active.extent_pages[i] != 0) { 1441 /* Extent page was allocated, read and parse it. */ 1442 lba = bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]); 1443 ctx->next_extent_page = i + 1; 1444 1445 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1446 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 1447 blob_load_cpl_extents_cpl, ctx); 1448 return; 1449 } else { 1450 /* Thin provisioned blobs can point to unallocated extent pages. 1451 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */ 1452 1453 sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP); 1454 blob->active.num_clusters += sz; 1455 blob->remaining_clusters_in_et -= sz; 1456 1457 assert(spdk_blob_is_thin_provisioned(blob)); 1458 assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0); 1459 1460 tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 1461 if (tmp == NULL) { 1462 blob_load_final(ctx, -ENOMEM); 1463 return; 1464 } 1465 memset(tmp + sizeof(*blob->active.clusters) * blob->active.cluster_array_size, 0, 1466 sizeof(*blob->active.clusters) * (blob->active.num_clusters - blob->active.cluster_array_size)); 1467 blob->active.clusters = tmp; 1468 blob->active.cluster_array_size = blob->active.num_clusters; 1469 } 1470 } 1471 1472 blob_load_backing_dev(ctx); 1473 } 1474 1475 static void 1476 blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1477 { 1478 struct spdk_blob_load_ctx *ctx = cb_arg; 1479 struct spdk_blob *blob = ctx->blob; 1480 struct spdk_blob_md_page *page; 1481 int rc; 1482 uint32_t crc; 1483 uint32_t current_page; 1484 1485 if (ctx->num_pages == 1) { 1486 current_page = bs_blobid_to_page(blob->id); 1487 } else { 1488 assert(ctx->num_pages != 0); 1489 page = &ctx->pages[ctx->num_pages - 2]; 1490 current_page = page->next; 1491 } 1492 1493 if (bserrno) { 1494 SPDK_ERRLOG("Metadata page %d read failed for blobid %" PRIu64 ": %d\n", 1495 current_page, blob->id, bserrno); 1496 blob_load_final(ctx, bserrno); 1497 return; 1498 } 1499 1500 page = &ctx->pages[ctx->num_pages - 1]; 1501 crc = blob_md_page_calc_crc(page); 1502 if (crc != page->crc) { 1503 SPDK_ERRLOG("Metadata page %d crc mismatch for blobid %" PRIu64 "\n", 1504 current_page, blob->id); 1505 blob_load_final(ctx, -EINVAL); 1506 return; 1507 } 1508 1509 if (page->next != SPDK_INVALID_MD_PAGE) { 1510 struct spdk_blob_md_page *tmp_pages; 1511 uint32_t next_page = page->next; 1512 uint64_t next_lba = bs_md_page_to_lba(blob->bs, next_page); 1513 1514 /* Read the next page */ 1515 tmp_pages = spdk_realloc(ctx->pages, (sizeof(*page) * (ctx->num_pages + 1)), 0); 1516 if (tmp_pages == NULL) { 1517 blob_load_final(ctx, -ENOMEM); 1518 return; 1519 } 1520 ctx->num_pages++; 1521 ctx->pages = tmp_pages; 1522 1523 bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1], 1524 next_lba, 1525 bs_byte_to_lba(blob->bs, sizeof(*page)), 1526 blob_load_cpl, ctx); 1527 return; 1528 } 1529 1530 /* Parse the pages */ 1531 rc = blob_parse(ctx->pages, ctx->num_pages, blob); 1532 if (rc) { 1533 blob_load_final(ctx, rc); 1534 return; 1535 } 1536 1537 if (blob->extent_table_found == true) { 1538 /* If EXTENT_TABLE was found, that means support for it should be enabled. */ 1539 assert(blob->extent_rle_found == false); 1540 blob->use_extent_table = true; 1541 } else { 1542 /* If EXTENT_RLE or no extent_* descriptor was found disable support 1543 * for extent table. No extent_* descriptors means that blob has length of 0 1544 * and no extent_rle descriptors were persisted for it. 1545 * EXTENT_TABLE if used, is always present in metadata regardless of length. */ 1546 blob->use_extent_table = false; 1547 } 1548 1549 /* Check the clear_method stored in metadata vs what may have been passed 1550 * via spdk_bs_open_blob_ext() and update accordingly. 1551 */ 1552 blob_update_clear_method(blob); 1553 1554 spdk_free(ctx->pages); 1555 ctx->pages = NULL; 1556 1557 if (blob->extent_table_found) { 1558 blob_load_cpl_extents_cpl(seq, ctx, 0); 1559 } else { 1560 blob_load_backing_dev(ctx); 1561 } 1562 } 1563 1564 /* Load a blob from disk given a blobid */ 1565 static void 1566 blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1567 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1568 { 1569 struct spdk_blob_load_ctx *ctx; 1570 struct spdk_blob_store *bs; 1571 uint32_t page_num; 1572 uint64_t lba; 1573 1574 blob_verify_md_op(blob); 1575 1576 bs = blob->bs; 1577 1578 ctx = calloc(1, sizeof(*ctx)); 1579 if (!ctx) { 1580 cb_fn(seq, cb_arg, -ENOMEM); 1581 return; 1582 } 1583 1584 ctx->blob = blob; 1585 ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, 0); 1586 if (!ctx->pages) { 1587 free(ctx); 1588 cb_fn(seq, cb_arg, -ENOMEM); 1589 return; 1590 } 1591 ctx->num_pages = 1; 1592 ctx->cb_fn = cb_fn; 1593 ctx->cb_arg = cb_arg; 1594 ctx->seq = seq; 1595 1596 page_num = bs_blobid_to_page(blob->id); 1597 lba = bs_md_page_to_lba(blob->bs, page_num); 1598 1599 blob->state = SPDK_BLOB_STATE_LOADING; 1600 1601 bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1602 bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE), 1603 blob_load_cpl, ctx); 1604 } 1605 1606 struct spdk_blob_persist_ctx { 1607 struct spdk_blob *blob; 1608 1609 struct spdk_bs_super_block *super; 1610 1611 struct spdk_blob_md_page *pages; 1612 uint32_t next_extent_page; 1613 struct spdk_blob_md_page *extent_page; 1614 1615 spdk_bs_sequence_t *seq; 1616 spdk_bs_sequence_cpl cb_fn; 1617 void *cb_arg; 1618 TAILQ_ENTRY(spdk_blob_persist_ctx) link; 1619 }; 1620 1621 static void 1622 bs_batch_clear_dev(struct spdk_blob_persist_ctx *ctx, spdk_bs_batch_t *batch, uint64_t lba, 1623 uint64_t lba_count) 1624 { 1625 switch (ctx->blob->clear_method) { 1626 case BLOB_CLEAR_WITH_DEFAULT: 1627 case BLOB_CLEAR_WITH_UNMAP: 1628 bs_batch_unmap_dev(batch, lba, lba_count); 1629 break; 1630 case BLOB_CLEAR_WITH_WRITE_ZEROES: 1631 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1632 break; 1633 case BLOB_CLEAR_WITH_NONE: 1634 default: 1635 break; 1636 } 1637 } 1638 1639 static void blob_persist_check_dirty(struct spdk_blob_persist_ctx *ctx); 1640 1641 static void 1642 blob_persist_complete_cb(void *arg) 1643 { 1644 struct spdk_blob_persist_ctx *ctx = arg; 1645 1646 /* Call user callback */ 1647 ctx->cb_fn(ctx->seq, ctx->cb_arg, 0); 1648 1649 /* Free the memory */ 1650 spdk_free(ctx->pages); 1651 free(ctx); 1652 } 1653 1654 static void 1655 blob_persist_complete(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx, int bserrno) 1656 { 1657 struct spdk_blob_persist_ctx *next_persist, *tmp; 1658 struct spdk_blob *blob = ctx->blob; 1659 1660 if (bserrno == 0) { 1661 blob_mark_clean(blob); 1662 } 1663 1664 assert(ctx == TAILQ_FIRST(&blob->persists_to_complete)); 1665 1666 /* Complete all persists that were pending when the current persist started */ 1667 TAILQ_FOREACH_SAFE(next_persist, &blob->persists_to_complete, link, tmp) { 1668 TAILQ_REMOVE(&blob->persists_to_complete, next_persist, link); 1669 spdk_thread_send_msg(spdk_get_thread(), blob_persist_complete_cb, next_persist); 1670 } 1671 1672 if (TAILQ_EMPTY(&blob->pending_persists)) { 1673 return; 1674 } 1675 1676 /* Queue up all pending persists for completion and start blob persist with first one */ 1677 TAILQ_SWAP(&blob->persists_to_complete, &blob->pending_persists, spdk_blob_persist_ctx, link); 1678 next_persist = TAILQ_FIRST(&blob->persists_to_complete); 1679 1680 blob->state = SPDK_BLOB_STATE_DIRTY; 1681 blob_persist_check_dirty(next_persist); 1682 } 1683 1684 static void 1685 blob_persist_clear_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1686 { 1687 struct spdk_blob_persist_ctx *ctx = cb_arg; 1688 struct spdk_blob *blob = ctx->blob; 1689 struct spdk_blob_store *bs = blob->bs; 1690 size_t i; 1691 1692 if (bserrno != 0) { 1693 blob_persist_complete(seq, ctx, bserrno); 1694 return; 1695 } 1696 1697 /* Release all extent_pages that were truncated */ 1698 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1699 /* Nothing to release if it was not allocated */ 1700 if (blob->active.extent_pages[i] != 0) { 1701 bs_release_md_page(bs, blob->active.extent_pages[i]); 1702 } 1703 } 1704 1705 if (blob->active.num_extent_pages == 0) { 1706 free(blob->active.extent_pages); 1707 blob->active.extent_pages = NULL; 1708 blob->active.extent_pages_array_size = 0; 1709 } else if (blob->active.num_extent_pages != blob->active.extent_pages_array_size) { 1710 #ifndef __clang_analyzer__ 1711 void *tmp; 1712 1713 /* scan-build really can't figure reallocs, workaround it */ 1714 tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages); 1715 assert(tmp != NULL); 1716 blob->active.extent_pages = tmp; 1717 #endif 1718 blob->active.extent_pages_array_size = blob->active.num_extent_pages; 1719 } 1720 1721 blob_persist_complete(seq, ctx, bserrno); 1722 } 1723 1724 static void 1725 blob_persist_clear_extents(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1726 { 1727 struct spdk_blob *blob = ctx->blob; 1728 struct spdk_blob_store *bs = blob->bs; 1729 size_t i; 1730 uint64_t lba; 1731 uint64_t lba_count; 1732 spdk_bs_batch_t *batch; 1733 1734 batch = bs_sequence_to_batch(seq, blob_persist_clear_extents_cpl, ctx); 1735 lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 1736 1737 /* Clear all extent_pages that were truncated */ 1738 for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) { 1739 /* Nothing to clear if it was not allocated */ 1740 if (blob->active.extent_pages[i] != 0) { 1741 lba = bs_md_page_to_lba(bs, blob->active.extent_pages[i]); 1742 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1743 } 1744 } 1745 1746 bs_batch_close(batch); 1747 } 1748 1749 static void 1750 blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1751 { 1752 struct spdk_blob_persist_ctx *ctx = cb_arg; 1753 struct spdk_blob *blob = ctx->blob; 1754 struct spdk_blob_store *bs = blob->bs; 1755 size_t i; 1756 1757 if (bserrno != 0) { 1758 blob_persist_complete(seq, ctx, bserrno); 1759 return; 1760 } 1761 1762 pthread_mutex_lock(&bs->used_clusters_mutex); 1763 /* Release all clusters that were truncated */ 1764 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1765 uint32_t cluster_num = bs_lba_to_cluster(bs, blob->active.clusters[i]); 1766 1767 /* Nothing to release if it was not allocated */ 1768 if (blob->active.clusters[i] != 0) { 1769 bs_release_cluster(bs, cluster_num); 1770 } 1771 } 1772 pthread_mutex_unlock(&bs->used_clusters_mutex); 1773 1774 if (blob->active.num_clusters == 0) { 1775 free(blob->active.clusters); 1776 blob->active.clusters = NULL; 1777 blob->active.cluster_array_size = 0; 1778 } else if (blob->active.num_clusters != blob->active.cluster_array_size) { 1779 #ifndef __clang_analyzer__ 1780 void *tmp; 1781 1782 /* scan-build really can't figure reallocs, workaround it */ 1783 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters); 1784 assert(tmp != NULL); 1785 blob->active.clusters = tmp; 1786 1787 #endif 1788 blob->active.cluster_array_size = blob->active.num_clusters; 1789 } 1790 1791 /* Move on to clearing extent pages */ 1792 blob_persist_clear_extents(seq, ctx); 1793 } 1794 1795 static void 1796 blob_persist_clear_clusters(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1797 { 1798 struct spdk_blob *blob = ctx->blob; 1799 struct spdk_blob_store *bs = blob->bs; 1800 spdk_bs_batch_t *batch; 1801 size_t i; 1802 uint64_t lba; 1803 uint64_t lba_count; 1804 1805 /* Clusters don't move around in blobs. The list shrinks or grows 1806 * at the end, but no changes ever occur in the middle of the list. 1807 */ 1808 1809 batch = bs_sequence_to_batch(seq, blob_persist_clear_clusters_cpl, ctx); 1810 1811 /* Clear all clusters that were truncated */ 1812 lba = 0; 1813 lba_count = 0; 1814 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1815 uint64_t next_lba = blob->active.clusters[i]; 1816 uint64_t next_lba_count = bs_cluster_to_lba(bs, 1); 1817 1818 if (next_lba > 0 && (lba + lba_count) == next_lba) { 1819 /* This cluster is contiguous with the previous one. */ 1820 lba_count += next_lba_count; 1821 continue; 1822 } else if (next_lba == 0) { 1823 continue; 1824 } 1825 1826 /* This cluster is not contiguous with the previous one. */ 1827 1828 /* If a run of LBAs previously existing, clear them now */ 1829 if (lba_count > 0) { 1830 bs_batch_clear_dev(ctx, batch, lba, lba_count); 1831 } 1832 1833 /* Start building the next batch */ 1834 lba = next_lba; 1835 if (next_lba > 0) { 1836 lba_count = next_lba_count; 1837 } else { 1838 lba_count = 0; 1839 } 1840 } 1841 1842 /* If we ended with a contiguous set of LBAs, clear them now */ 1843 if (lba_count > 0) { 1844 bs_batch_clear_dev(ctx, batch, lba, lba_count); 1845 } 1846 1847 bs_batch_close(batch); 1848 } 1849 1850 static void 1851 blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1852 { 1853 struct spdk_blob_persist_ctx *ctx = cb_arg; 1854 struct spdk_blob *blob = ctx->blob; 1855 struct spdk_blob_store *bs = blob->bs; 1856 size_t i; 1857 1858 if (bserrno != 0) { 1859 blob_persist_complete(seq, ctx, bserrno); 1860 return; 1861 } 1862 1863 /* This loop starts at 1 because the first page is special and handled 1864 * below. The pages (except the first) are never written in place, 1865 * so any pages in the clean list must be zeroed. 1866 */ 1867 for (i = 1; i < blob->clean.num_pages; i++) { 1868 bs_release_md_page(bs, blob->clean.pages[i]); 1869 } 1870 1871 if (blob->active.num_pages == 0) { 1872 uint32_t page_num; 1873 1874 page_num = bs_blobid_to_page(blob->id); 1875 bs_release_md_page(bs, page_num); 1876 } 1877 1878 /* Move on to clearing clusters */ 1879 blob_persist_clear_clusters(seq, ctx); 1880 } 1881 1882 static void 1883 blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1884 { 1885 struct spdk_blob_persist_ctx *ctx = cb_arg; 1886 struct spdk_blob *blob = ctx->blob; 1887 struct spdk_blob_store *bs = blob->bs; 1888 uint64_t lba; 1889 uint64_t lba_count; 1890 spdk_bs_batch_t *batch; 1891 size_t i; 1892 1893 if (bserrno != 0) { 1894 blob_persist_complete(seq, ctx, bserrno); 1895 return; 1896 } 1897 1898 batch = bs_sequence_to_batch(seq, blob_persist_zero_pages_cpl, ctx); 1899 1900 lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 1901 1902 /* This loop starts at 1 because the first page is special and handled 1903 * below. The pages (except the first) are never written in place, 1904 * so any pages in the clean list must be zeroed. 1905 */ 1906 for (i = 1; i < blob->clean.num_pages; i++) { 1907 lba = bs_md_page_to_lba(bs, blob->clean.pages[i]); 1908 1909 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1910 } 1911 1912 /* The first page will only be zeroed if this is a delete. */ 1913 if (blob->active.num_pages == 0) { 1914 uint32_t page_num; 1915 1916 /* The first page in the metadata goes where the blobid indicates */ 1917 page_num = bs_blobid_to_page(blob->id); 1918 lba = bs_md_page_to_lba(bs, page_num); 1919 1920 bs_batch_write_zeroes_dev(batch, lba, lba_count); 1921 } 1922 1923 bs_batch_close(batch); 1924 } 1925 1926 static void 1927 blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1928 { 1929 struct spdk_blob_persist_ctx *ctx = cb_arg; 1930 struct spdk_blob *blob = ctx->blob; 1931 struct spdk_blob_store *bs = blob->bs; 1932 uint64_t lba; 1933 uint32_t lba_count; 1934 struct spdk_blob_md_page *page; 1935 1936 if (bserrno != 0) { 1937 blob_persist_complete(seq, ctx, bserrno); 1938 return; 1939 } 1940 1941 if (blob->active.num_pages == 0) { 1942 /* Move on to the next step */ 1943 blob_persist_zero_pages(seq, ctx, 0); 1944 return; 1945 } 1946 1947 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 1948 1949 page = &ctx->pages[0]; 1950 /* The first page in the metadata goes where the blobid indicates */ 1951 lba = bs_md_page_to_lba(bs, bs_blobid_to_page(blob->id)); 1952 1953 bs_sequence_write_dev(seq, page, lba, lba_count, 1954 blob_persist_zero_pages, ctx); 1955 } 1956 1957 static void 1958 blob_persist_write_page_chain(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx) 1959 { 1960 struct spdk_blob *blob = ctx->blob; 1961 struct spdk_blob_store *bs = blob->bs; 1962 uint64_t lba; 1963 uint32_t lba_count; 1964 struct spdk_blob_md_page *page; 1965 spdk_bs_batch_t *batch; 1966 size_t i; 1967 1968 /* Clusters don't move around in blobs. The list shrinks or grows 1969 * at the end, but no changes ever occur in the middle of the list. 1970 */ 1971 1972 lba_count = bs_byte_to_lba(bs, sizeof(*page)); 1973 1974 batch = bs_sequence_to_batch(seq, blob_persist_write_page_root, ctx); 1975 1976 /* This starts at 1. The root page is not written until 1977 * all of the others are finished 1978 */ 1979 for (i = 1; i < blob->active.num_pages; i++) { 1980 page = &ctx->pages[i]; 1981 assert(page->sequence_num == i); 1982 1983 lba = bs_md_page_to_lba(bs, blob->active.pages[i]); 1984 1985 bs_batch_write_dev(batch, page, lba, lba_count); 1986 } 1987 1988 bs_batch_close(batch); 1989 } 1990 1991 static int 1992 blob_resize(struct spdk_blob *blob, uint64_t sz) 1993 { 1994 uint64_t i; 1995 uint64_t *tmp; 1996 uint64_t cluster; 1997 uint32_t lfmd; /* lowest free md page */ 1998 uint64_t num_clusters; 1999 uint32_t *ep_tmp; 2000 uint64_t new_num_ep = 0, current_num_ep = 0; 2001 struct spdk_blob_store *bs; 2002 2003 bs = blob->bs; 2004 2005 blob_verify_md_op(blob); 2006 2007 if (blob->active.num_clusters == sz) { 2008 return 0; 2009 } 2010 2011 if (blob->active.num_clusters < blob->active.cluster_array_size) { 2012 /* If this blob was resized to be larger, then smaller, then 2013 * larger without syncing, then the cluster array already 2014 * contains spare assigned clusters we can use. 2015 */ 2016 num_clusters = spdk_min(blob->active.cluster_array_size, 2017 sz); 2018 } else { 2019 num_clusters = blob->active.num_clusters; 2020 } 2021 2022 if (blob->use_extent_table) { 2023 /* Round up since every cluster beyond current Extent Table size, 2024 * requires new extent page. */ 2025 new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP); 2026 current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP); 2027 } 2028 2029 /* Check first that we have enough clusters and md pages before we start claiming them. */ 2030 if (sz > num_clusters && spdk_blob_is_thin_provisioned(blob) == false) { 2031 if ((sz - num_clusters) > bs->num_free_clusters) { 2032 return -ENOSPC; 2033 } 2034 lfmd = 0; 2035 for (i = current_num_ep; i < new_num_ep ; i++) { 2036 lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd); 2037 if (lfmd == UINT32_MAX) { 2038 /* No more free md pages. Cannot satisfy the request */ 2039 return -ENOSPC; 2040 } 2041 } 2042 } 2043 2044 if (sz > num_clusters) { 2045 /* Expand the cluster array if necessary. 2046 * We only shrink the array when persisting. 2047 */ 2048 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz); 2049 if (sz > 0 && tmp == NULL) { 2050 return -ENOMEM; 2051 } 2052 memset(tmp + blob->active.cluster_array_size, 0, 2053 sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size)); 2054 blob->active.clusters = tmp; 2055 blob->active.cluster_array_size = sz; 2056 2057 /* Expand the extents table, only if enough clusters were added */ 2058 if (new_num_ep > current_num_ep && blob->use_extent_table) { 2059 ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep); 2060 if (new_num_ep > 0 && ep_tmp == NULL) { 2061 return -ENOMEM; 2062 } 2063 memset(ep_tmp + blob->active.extent_pages_array_size, 0, 2064 sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size)); 2065 blob->active.extent_pages = ep_tmp; 2066 blob->active.extent_pages_array_size = new_num_ep; 2067 } 2068 } 2069 2070 blob->state = SPDK_BLOB_STATE_DIRTY; 2071 2072 if (spdk_blob_is_thin_provisioned(blob) == false) { 2073 cluster = 0; 2074 lfmd = 0; 2075 pthread_mutex_lock(&blob->bs->used_clusters_mutex); 2076 for (i = num_clusters; i < sz; i++) { 2077 bs_allocate_cluster(blob, i, &cluster, &lfmd, true); 2078 lfmd++; 2079 } 2080 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 2081 } 2082 2083 blob->active.num_clusters = sz; 2084 blob->active.num_extent_pages = new_num_ep; 2085 2086 return 0; 2087 } 2088 2089 static void 2090 blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx) 2091 { 2092 spdk_bs_sequence_t *seq = ctx->seq; 2093 struct spdk_blob *blob = ctx->blob; 2094 struct spdk_blob_store *bs = blob->bs; 2095 uint64_t i; 2096 uint32_t page_num; 2097 void *tmp; 2098 int rc; 2099 2100 /* Generate the new metadata */ 2101 rc = blob_serialize(blob, &ctx->pages, &blob->active.num_pages); 2102 if (rc < 0) { 2103 blob_persist_complete(seq, ctx, rc); 2104 return; 2105 } 2106 2107 assert(blob->active.num_pages >= 1); 2108 2109 /* Resize the cache of page indices */ 2110 tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 2111 if (!tmp) { 2112 blob_persist_complete(seq, ctx, -ENOMEM); 2113 return; 2114 } 2115 blob->active.pages = tmp; 2116 2117 /* Assign this metadata to pages. This requires two passes - 2118 * one to verify that there are enough pages and a second 2119 * to actually claim them. */ 2120 page_num = 0; 2121 /* Note that this loop starts at one. The first page location is fixed by the blobid. */ 2122 for (i = 1; i < blob->active.num_pages; i++) { 2123 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2124 if (page_num == UINT32_MAX) { 2125 blob_persist_complete(seq, ctx, -ENOMEM); 2126 return; 2127 } 2128 page_num++; 2129 } 2130 2131 page_num = 0; 2132 blob->active.pages[0] = bs_blobid_to_page(blob->id); 2133 for (i = 1; i < blob->active.num_pages; i++) { 2134 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 2135 ctx->pages[i - 1].next = page_num; 2136 /* Now that previous metadata page is complete, calculate the crc for it. */ 2137 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2138 blob->active.pages[i] = page_num; 2139 bs_claim_md_page(bs, page_num); 2140 SPDK_DEBUGLOG(blob, "Claiming page %u for blob %" PRIu64 "\n", page_num, blob->id); 2141 page_num++; 2142 } 2143 ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]); 2144 /* Start writing the metadata from last page to first */ 2145 blob->state = SPDK_BLOB_STATE_CLEAN; 2146 blob_persist_write_page_chain(seq, ctx); 2147 } 2148 2149 static void 2150 blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2151 { 2152 struct spdk_blob_persist_ctx *ctx = cb_arg; 2153 struct spdk_blob *blob = ctx->blob; 2154 size_t i; 2155 uint32_t extent_page_id; 2156 uint32_t page_count = 0; 2157 int rc; 2158 2159 if (ctx->extent_page != NULL) { 2160 spdk_free(ctx->extent_page); 2161 ctx->extent_page = NULL; 2162 } 2163 2164 if (bserrno != 0) { 2165 blob_persist_complete(seq, ctx, bserrno); 2166 return; 2167 } 2168 2169 /* Only write out Extent Pages when blob was resized. */ 2170 for (i = ctx->next_extent_page; i < blob->active.extent_pages_array_size; i++) { 2171 extent_page_id = blob->active.extent_pages[i]; 2172 if (extent_page_id == 0) { 2173 /* No Extent Page to persist */ 2174 assert(spdk_blob_is_thin_provisioned(blob)); 2175 continue; 2176 } 2177 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id)); 2178 ctx->next_extent_page = i + 1; 2179 rc = blob_serialize_add_page(ctx->blob, &ctx->extent_page, &page_count, &ctx->extent_page); 2180 if (rc < 0) { 2181 blob_persist_complete(seq, ctx, rc); 2182 return; 2183 } 2184 2185 blob->state = SPDK_BLOB_STATE_DIRTY; 2186 blob_serialize_extent_page(blob, i * SPDK_EXTENTS_PER_EP, ctx->extent_page); 2187 2188 ctx->extent_page->crc = blob_md_page_calc_crc(ctx->extent_page); 2189 2190 bs_sequence_write_dev(seq, ctx->extent_page, bs_md_page_to_lba(blob->bs, extent_page_id), 2191 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 2192 blob_persist_write_extent_pages, ctx); 2193 return; 2194 } 2195 2196 blob_persist_generate_new_md(ctx); 2197 } 2198 2199 static void 2200 blob_persist_start(struct spdk_blob_persist_ctx *ctx) 2201 { 2202 spdk_bs_sequence_t *seq = ctx->seq; 2203 struct spdk_blob *blob = ctx->blob; 2204 2205 if (blob->active.num_pages == 0) { 2206 /* This is the signal that the blob should be deleted. 2207 * Immediately jump to the clean up routine. */ 2208 assert(blob->clean.num_pages > 0); 2209 blob->state = SPDK_BLOB_STATE_CLEAN; 2210 blob_persist_zero_pages(seq, ctx, 0); 2211 return; 2212 2213 } 2214 2215 if (blob->clean.num_clusters < blob->active.num_clusters) { 2216 /* Blob was resized up */ 2217 assert(blob->clean.num_extent_pages <= blob->active.num_extent_pages); 2218 ctx->next_extent_page = spdk_max(1, blob->clean.num_extent_pages) - 1; 2219 } else if (blob->active.num_clusters < blob->active.cluster_array_size) { 2220 /* Blob was resized down */ 2221 assert(blob->clean.num_extent_pages >= blob->active.num_extent_pages); 2222 ctx->next_extent_page = spdk_max(1, blob->active.num_extent_pages) - 1; 2223 } else { 2224 /* No change in size occurred */ 2225 blob_persist_generate_new_md(ctx); 2226 return; 2227 } 2228 2229 blob_persist_write_extent_pages(seq, ctx, 0); 2230 } 2231 2232 static void 2233 blob_persist_dirty_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2234 { 2235 struct spdk_blob_persist_ctx *ctx = cb_arg; 2236 2237 spdk_free(ctx->super); 2238 2239 if (bserrno != 0) { 2240 blob_persist_complete(seq, ctx, bserrno); 2241 return; 2242 } 2243 2244 ctx->blob->bs->clean = 0; 2245 2246 blob_persist_start(ctx); 2247 } 2248 2249 static void 2250 bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2251 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg); 2252 2253 2254 static void 2255 blob_persist_dirty(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2256 { 2257 struct spdk_blob_persist_ctx *ctx = cb_arg; 2258 2259 if (bserrno != 0) { 2260 spdk_free(ctx->super); 2261 blob_persist_complete(seq, ctx, bserrno); 2262 return; 2263 } 2264 2265 ctx->super->clean = 0; 2266 if (ctx->super->size == 0) { 2267 ctx->super->size = ctx->blob->bs->dev->blockcnt * ctx->blob->bs->dev->blocklen; 2268 } 2269 2270 bs_write_super(seq, ctx->blob->bs, ctx->super, blob_persist_dirty_cpl, ctx); 2271 } 2272 2273 static void 2274 blob_persist_check_dirty(struct spdk_blob_persist_ctx *ctx) 2275 { 2276 if (ctx->blob->bs->clean) { 2277 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 2278 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 2279 if (!ctx->super) { 2280 blob_persist_complete(ctx->seq, ctx, -ENOMEM); 2281 return; 2282 } 2283 2284 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(ctx->blob->bs, 0), 2285 bs_byte_to_lba(ctx->blob->bs, sizeof(*ctx->super)), 2286 blob_persist_dirty, ctx); 2287 } else { 2288 blob_persist_start(ctx); 2289 } 2290 } 2291 2292 /* Write a blob to disk */ 2293 static void 2294 blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 2295 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2296 { 2297 struct spdk_blob_persist_ctx *ctx; 2298 2299 blob_verify_md_op(blob); 2300 2301 if (blob->state == SPDK_BLOB_STATE_CLEAN && TAILQ_EMPTY(&blob->persists_to_complete)) { 2302 cb_fn(seq, cb_arg, 0); 2303 return; 2304 } 2305 2306 ctx = calloc(1, sizeof(*ctx)); 2307 if (!ctx) { 2308 cb_fn(seq, cb_arg, -ENOMEM); 2309 return; 2310 } 2311 ctx->blob = blob; 2312 ctx->seq = seq; 2313 ctx->cb_fn = cb_fn; 2314 ctx->cb_arg = cb_arg; 2315 2316 /* Multiple blob persists can affect one another, via blob->state or 2317 * blob mutable data changes. To prevent it, queue up the persists. */ 2318 if (!TAILQ_EMPTY(&blob->persists_to_complete)) { 2319 TAILQ_INSERT_TAIL(&blob->pending_persists, ctx, link); 2320 return; 2321 } 2322 TAILQ_INSERT_HEAD(&blob->persists_to_complete, ctx, link); 2323 2324 blob_persist_check_dirty(ctx); 2325 } 2326 2327 struct spdk_blob_copy_cluster_ctx { 2328 struct spdk_blob *blob; 2329 uint8_t *buf; 2330 uint64_t page; 2331 uint64_t new_cluster; 2332 uint32_t new_extent_page; 2333 spdk_bs_sequence_t *seq; 2334 struct spdk_blob_md_page *new_cluster_page; 2335 }; 2336 2337 static void 2338 blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno) 2339 { 2340 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2341 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq; 2342 TAILQ_HEAD(, spdk_bs_request_set) requests; 2343 spdk_bs_user_op_t *op; 2344 2345 TAILQ_INIT(&requests); 2346 TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link); 2347 2348 while (!TAILQ_EMPTY(&requests)) { 2349 op = TAILQ_FIRST(&requests); 2350 TAILQ_REMOVE(&requests, op, link); 2351 if (bserrno == 0) { 2352 bs_user_op_execute(op); 2353 } else { 2354 bs_user_op_abort(op, bserrno); 2355 } 2356 } 2357 2358 spdk_free(ctx->buf); 2359 free(ctx); 2360 } 2361 2362 static void 2363 blob_insert_cluster_cpl(void *cb_arg, int bserrno) 2364 { 2365 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2366 2367 if (bserrno) { 2368 if (bserrno == -EEXIST) { 2369 /* The metadata insert failed because another thread 2370 * allocated the cluster first. Free our cluster 2371 * but continue without error. */ 2372 bserrno = 0; 2373 } 2374 pthread_mutex_lock(&ctx->blob->bs->used_clusters_mutex); 2375 bs_release_cluster(ctx->blob->bs, ctx->new_cluster); 2376 pthread_mutex_unlock(&ctx->blob->bs->used_clusters_mutex); 2377 if (ctx->new_extent_page != 0) { 2378 bs_release_md_page(ctx->blob->bs, ctx->new_extent_page); 2379 } 2380 } 2381 2382 bs_sequence_finish(ctx->seq, bserrno); 2383 } 2384 2385 static void 2386 blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2387 { 2388 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2389 uint32_t cluster_number; 2390 2391 if (bserrno) { 2392 /* The write failed, so jump to the final completion handler */ 2393 bs_sequence_finish(seq, bserrno); 2394 return; 2395 } 2396 2397 cluster_number = bs_page_to_cluster(ctx->blob->bs, ctx->page); 2398 2399 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2400 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2401 } 2402 2403 static void 2404 blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2405 { 2406 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2407 2408 if (bserrno != 0) { 2409 /* The read failed, so jump to the final completion handler */ 2410 bs_sequence_finish(seq, bserrno); 2411 return; 2412 } 2413 2414 /* Write whole cluster */ 2415 bs_sequence_write_dev(seq, ctx->buf, 2416 bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2417 bs_cluster_to_lba(ctx->blob->bs, 1), 2418 blob_write_copy_cpl, ctx); 2419 } 2420 2421 static void 2422 bs_allocate_and_copy_cluster(struct spdk_blob *blob, 2423 struct spdk_io_channel *_ch, 2424 uint64_t io_unit, spdk_bs_user_op_t *op) 2425 { 2426 struct spdk_bs_cpl cpl; 2427 struct spdk_bs_channel *ch; 2428 struct spdk_blob_copy_cluster_ctx *ctx; 2429 uint32_t cluster_start_page; 2430 uint32_t cluster_number; 2431 int rc; 2432 2433 ch = spdk_io_channel_get_ctx(_ch); 2434 2435 if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) { 2436 /* There are already operations pending. Queue this user op 2437 * and return because it will be re-executed when the outstanding 2438 * cluster allocation completes. */ 2439 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2440 return; 2441 } 2442 2443 /* Round the io_unit offset down to the first page in the cluster */ 2444 cluster_start_page = bs_io_unit_to_cluster_start(blob, io_unit); 2445 2446 /* Calculate which index in the metadata cluster array the corresponding 2447 * cluster is supposed to be at. */ 2448 cluster_number = bs_io_unit_to_cluster_number(blob, io_unit); 2449 2450 ctx = calloc(1, sizeof(*ctx)); 2451 if (!ctx) { 2452 bs_user_op_abort(op, -ENOMEM); 2453 return; 2454 } 2455 2456 assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0); 2457 2458 ctx->blob = blob; 2459 ctx->page = cluster_start_page; 2460 ctx->new_cluster_page = ch->new_cluster_page; 2461 memset(ctx->new_cluster_page, 0, SPDK_BS_PAGE_SIZE); 2462 2463 if (blob->parent_id != SPDK_BLOBID_INVALID) { 2464 ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, 2465 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 2466 if (!ctx->buf) { 2467 SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n", 2468 blob->bs->cluster_sz); 2469 free(ctx); 2470 bs_user_op_abort(op, -ENOMEM); 2471 return; 2472 } 2473 } 2474 2475 pthread_mutex_lock(&blob->bs->used_clusters_mutex); 2476 rc = bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page, 2477 false); 2478 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 2479 if (rc != 0) { 2480 spdk_free(ctx->buf); 2481 free(ctx); 2482 bs_user_op_abort(op, rc); 2483 return; 2484 } 2485 2486 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2487 cpl.u.blob_basic.cb_fn = blob_allocate_and_copy_cluster_cpl; 2488 cpl.u.blob_basic.cb_arg = ctx; 2489 2490 ctx->seq = bs_sequence_start(_ch, &cpl); 2491 if (!ctx->seq) { 2492 pthread_mutex_lock(&blob->bs->used_clusters_mutex); 2493 bs_release_cluster(blob->bs, ctx->new_cluster); 2494 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 2495 spdk_free(ctx->buf); 2496 free(ctx); 2497 bs_user_op_abort(op, -ENOMEM); 2498 return; 2499 } 2500 2501 /* Queue the user op to block other incoming operations */ 2502 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2503 2504 if (blob->parent_id != SPDK_BLOBID_INVALID) { 2505 /* Read cluster from backing device */ 2506 bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf, 2507 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2508 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz), 2509 blob_write_copy, ctx); 2510 } else { 2511 blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2512 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx); 2513 } 2514 } 2515 2516 static inline bool 2517 blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length, 2518 uint64_t *lba, uint64_t *lba_count) 2519 { 2520 *lba_count = length; 2521 2522 if (!bs_io_unit_is_allocated(blob, io_unit)) { 2523 assert(blob->back_bs_dev != NULL); 2524 *lba = bs_io_unit_to_back_dev_lba(blob, io_unit); 2525 *lba_count = bs_io_unit_to_back_dev_lba(blob, *lba_count); 2526 return false; 2527 } else { 2528 *lba = bs_blob_io_unit_to_lba(blob, io_unit); 2529 return true; 2530 } 2531 } 2532 2533 struct op_split_ctx { 2534 struct spdk_blob *blob; 2535 struct spdk_io_channel *channel; 2536 uint64_t io_unit_offset; 2537 uint64_t io_units_remaining; 2538 void *curr_payload; 2539 enum spdk_blob_op_type op_type; 2540 spdk_bs_sequence_t *seq; 2541 bool in_submit_ctx; 2542 bool completed_in_submit_ctx; 2543 bool done; 2544 }; 2545 2546 static void 2547 blob_request_submit_op_split_next(void *cb_arg, int bserrno) 2548 { 2549 struct op_split_ctx *ctx = cb_arg; 2550 struct spdk_blob *blob = ctx->blob; 2551 struct spdk_io_channel *ch = ctx->channel; 2552 enum spdk_blob_op_type op_type = ctx->op_type; 2553 uint8_t *buf; 2554 uint64_t offset; 2555 uint64_t length; 2556 uint64_t op_length; 2557 2558 if (bserrno != 0 || ctx->io_units_remaining == 0) { 2559 bs_sequence_finish(ctx->seq, bserrno); 2560 if (ctx->in_submit_ctx) { 2561 /* Defer freeing of the ctx object, since it will be 2562 * accessed when this unwinds back to the submisison 2563 * context. 2564 */ 2565 ctx->done = true; 2566 } else { 2567 free(ctx); 2568 } 2569 return; 2570 } 2571 2572 if (ctx->in_submit_ctx) { 2573 /* If this split operation completed in the context 2574 * of its submission, mark the flag and return immediately 2575 * to avoid recursion. 2576 */ 2577 ctx->completed_in_submit_ctx = true; 2578 return; 2579 } 2580 2581 while (true) { 2582 ctx->completed_in_submit_ctx = false; 2583 2584 offset = ctx->io_unit_offset; 2585 length = ctx->io_units_remaining; 2586 buf = ctx->curr_payload; 2587 op_length = spdk_min(length, bs_num_io_units_to_cluster_boundary(blob, 2588 offset)); 2589 2590 /* Update length and payload for next operation */ 2591 ctx->io_units_remaining -= op_length; 2592 ctx->io_unit_offset += op_length; 2593 if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) { 2594 ctx->curr_payload += op_length * blob->bs->io_unit_size; 2595 } 2596 2597 assert(!ctx->in_submit_ctx); 2598 ctx->in_submit_ctx = true; 2599 2600 switch (op_type) { 2601 case SPDK_BLOB_READ: 2602 spdk_blob_io_read(blob, ch, buf, offset, op_length, 2603 blob_request_submit_op_split_next, ctx); 2604 break; 2605 case SPDK_BLOB_WRITE: 2606 spdk_blob_io_write(blob, ch, buf, offset, op_length, 2607 blob_request_submit_op_split_next, ctx); 2608 break; 2609 case SPDK_BLOB_UNMAP: 2610 spdk_blob_io_unmap(blob, ch, offset, op_length, 2611 blob_request_submit_op_split_next, ctx); 2612 break; 2613 case SPDK_BLOB_WRITE_ZEROES: 2614 spdk_blob_io_write_zeroes(blob, ch, offset, op_length, 2615 blob_request_submit_op_split_next, ctx); 2616 break; 2617 case SPDK_BLOB_READV: 2618 case SPDK_BLOB_WRITEV: 2619 SPDK_ERRLOG("readv/write not valid\n"); 2620 bs_sequence_finish(ctx->seq, -EINVAL); 2621 free(ctx); 2622 return; 2623 } 2624 2625 #ifndef __clang_analyzer__ 2626 /* scan-build reports a false positive around accessing the ctx here. It 2627 * forms a path that recursively calls this function, but then says 2628 * "assuming ctx->in_submit_ctx is false", when that isn't possible. 2629 * This path does free(ctx), returns to here, and reports a use-after-free 2630 * bug. Wrapping this bit of code so that scan-build doesn't see it 2631 * works around the scan-build bug. 2632 */ 2633 assert(ctx->in_submit_ctx); 2634 ctx->in_submit_ctx = false; 2635 2636 /* If the operation completed immediately, loop back and submit the 2637 * next operation. Otherwise we can return and the next split 2638 * operation will get submitted when this current operation is 2639 * later completed asynchronously. 2640 */ 2641 if (ctx->completed_in_submit_ctx) { 2642 continue; 2643 } else if (ctx->done) { 2644 free(ctx); 2645 } 2646 #endif 2647 break; 2648 } 2649 } 2650 2651 static void 2652 blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob, 2653 void *payload, uint64_t offset, uint64_t length, 2654 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 2655 { 2656 struct op_split_ctx *ctx; 2657 spdk_bs_sequence_t *seq; 2658 struct spdk_bs_cpl cpl; 2659 2660 assert(blob != NULL); 2661 2662 ctx = calloc(1, sizeof(struct op_split_ctx)); 2663 if (ctx == NULL) { 2664 cb_fn(cb_arg, -ENOMEM); 2665 return; 2666 } 2667 2668 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2669 cpl.u.blob_basic.cb_fn = cb_fn; 2670 cpl.u.blob_basic.cb_arg = cb_arg; 2671 2672 seq = bs_sequence_start(ch, &cpl); 2673 if (!seq) { 2674 free(ctx); 2675 cb_fn(cb_arg, -ENOMEM); 2676 return; 2677 } 2678 2679 ctx->blob = blob; 2680 ctx->channel = ch; 2681 ctx->curr_payload = payload; 2682 ctx->io_unit_offset = offset; 2683 ctx->io_units_remaining = length; 2684 ctx->op_type = op_type; 2685 ctx->seq = seq; 2686 2687 blob_request_submit_op_split_next(ctx, 0); 2688 } 2689 2690 static void 2691 blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob, 2692 void *payload, uint64_t offset, uint64_t length, 2693 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 2694 { 2695 struct spdk_bs_cpl cpl; 2696 uint64_t lba; 2697 uint64_t lba_count; 2698 bool is_allocated; 2699 2700 assert(blob != NULL); 2701 2702 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2703 cpl.u.blob_basic.cb_fn = cb_fn; 2704 cpl.u.blob_basic.cb_arg = cb_arg; 2705 2706 if (blob->frozen_refcnt) { 2707 /* This blob I/O is frozen */ 2708 spdk_bs_user_op_t *op; 2709 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 2710 2711 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 2712 if (!op) { 2713 cb_fn(cb_arg, -ENOMEM); 2714 return; 2715 } 2716 2717 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 2718 2719 return; 2720 } 2721 2722 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 2723 2724 switch (op_type) { 2725 case SPDK_BLOB_READ: { 2726 spdk_bs_batch_t *batch; 2727 2728 batch = bs_batch_open(_ch, &cpl); 2729 if (!batch) { 2730 cb_fn(cb_arg, -ENOMEM); 2731 return; 2732 } 2733 2734 if (is_allocated) { 2735 /* Read from the blob */ 2736 bs_batch_read_dev(batch, payload, lba, lba_count); 2737 } else { 2738 /* Read from the backing block device */ 2739 bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count); 2740 } 2741 2742 bs_batch_close(batch); 2743 break; 2744 } 2745 case SPDK_BLOB_WRITE: 2746 case SPDK_BLOB_WRITE_ZEROES: { 2747 if (is_allocated) { 2748 /* Write to the blob */ 2749 spdk_bs_batch_t *batch; 2750 2751 if (lba_count == 0) { 2752 cb_fn(cb_arg, 0); 2753 return; 2754 } 2755 2756 batch = bs_batch_open(_ch, &cpl); 2757 if (!batch) { 2758 cb_fn(cb_arg, -ENOMEM); 2759 return; 2760 } 2761 2762 if (op_type == SPDK_BLOB_WRITE) { 2763 bs_batch_write_dev(batch, payload, lba, lba_count); 2764 } else { 2765 bs_batch_write_zeroes_dev(batch, lba, lba_count); 2766 } 2767 2768 bs_batch_close(batch); 2769 } else { 2770 /* Queue this operation and allocate the cluster */ 2771 spdk_bs_user_op_t *op; 2772 2773 op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 2774 if (!op) { 2775 cb_fn(cb_arg, -ENOMEM); 2776 return; 2777 } 2778 2779 bs_allocate_and_copy_cluster(blob, _ch, offset, op); 2780 } 2781 break; 2782 } 2783 case SPDK_BLOB_UNMAP: { 2784 spdk_bs_batch_t *batch; 2785 2786 batch = bs_batch_open(_ch, &cpl); 2787 if (!batch) { 2788 cb_fn(cb_arg, -ENOMEM); 2789 return; 2790 } 2791 2792 if (is_allocated) { 2793 bs_batch_unmap_dev(batch, lba, lba_count); 2794 } 2795 2796 bs_batch_close(batch); 2797 break; 2798 } 2799 case SPDK_BLOB_READV: 2800 case SPDK_BLOB_WRITEV: 2801 SPDK_ERRLOG("readv/write not valid\n"); 2802 cb_fn(cb_arg, -EINVAL); 2803 break; 2804 } 2805 } 2806 2807 static void 2808 blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel, 2809 void *payload, uint64_t offset, uint64_t length, 2810 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 2811 { 2812 assert(blob != NULL); 2813 2814 if (blob->data_ro && op_type != SPDK_BLOB_READ) { 2815 cb_fn(cb_arg, -EPERM); 2816 return; 2817 } 2818 2819 if (length == 0) { 2820 cb_fn(cb_arg, 0); 2821 return; 2822 } 2823 2824 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 2825 cb_fn(cb_arg, -EINVAL); 2826 return; 2827 } 2828 if (length <= bs_num_io_units_to_cluster_boundary(blob, offset)) { 2829 blob_request_submit_op_single(_channel, blob, payload, offset, length, 2830 cb_fn, cb_arg, op_type); 2831 } else { 2832 blob_request_submit_op_split(_channel, blob, payload, offset, length, 2833 cb_fn, cb_arg, op_type); 2834 } 2835 } 2836 2837 struct rw_iov_ctx { 2838 struct spdk_blob *blob; 2839 struct spdk_io_channel *channel; 2840 spdk_blob_op_complete cb_fn; 2841 void *cb_arg; 2842 bool read; 2843 int iovcnt; 2844 struct iovec *orig_iov; 2845 uint64_t io_unit_offset; 2846 uint64_t io_units_remaining; 2847 uint64_t io_units_done; 2848 struct spdk_blob_ext_io_opts *ext_io_opts; 2849 struct iovec iov[0]; 2850 }; 2851 2852 static void 2853 rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2854 { 2855 assert(cb_arg == NULL); 2856 bs_sequence_finish(seq, bserrno); 2857 } 2858 2859 static void 2860 rw_iov_split_next(void *cb_arg, int bserrno) 2861 { 2862 struct rw_iov_ctx *ctx = cb_arg; 2863 struct spdk_blob *blob = ctx->blob; 2864 struct iovec *iov, *orig_iov; 2865 int iovcnt; 2866 size_t orig_iovoff; 2867 uint64_t io_units_count, io_units_to_boundary, io_unit_offset; 2868 uint64_t byte_count; 2869 2870 if (bserrno != 0 || ctx->io_units_remaining == 0) { 2871 ctx->cb_fn(ctx->cb_arg, bserrno); 2872 free(ctx); 2873 return; 2874 } 2875 2876 io_unit_offset = ctx->io_unit_offset; 2877 io_units_to_boundary = bs_num_io_units_to_cluster_boundary(blob, io_unit_offset); 2878 io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary); 2879 /* 2880 * Get index and offset into the original iov array for our current position in the I/O sequence. 2881 * byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will 2882 * point to the current position in the I/O sequence. 2883 */ 2884 byte_count = ctx->io_units_done * blob->bs->io_unit_size; 2885 orig_iov = &ctx->orig_iov[0]; 2886 orig_iovoff = 0; 2887 while (byte_count > 0) { 2888 if (byte_count >= orig_iov->iov_len) { 2889 byte_count -= orig_iov->iov_len; 2890 orig_iov++; 2891 } else { 2892 orig_iovoff = byte_count; 2893 byte_count = 0; 2894 } 2895 } 2896 2897 /* 2898 * Build an iov array for the next I/O in the sequence. byte_count will keep track of how many 2899 * bytes of this next I/O remain to be accounted for in the new iov array. 2900 */ 2901 byte_count = io_units_count * blob->bs->io_unit_size; 2902 iov = &ctx->iov[0]; 2903 iovcnt = 0; 2904 while (byte_count > 0) { 2905 assert(iovcnt < ctx->iovcnt); 2906 iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff); 2907 iov->iov_base = orig_iov->iov_base + orig_iovoff; 2908 byte_count -= iov->iov_len; 2909 orig_iovoff = 0; 2910 orig_iov++; 2911 iov++; 2912 iovcnt++; 2913 } 2914 2915 ctx->io_unit_offset += io_units_count; 2916 ctx->io_units_remaining -= io_units_count; 2917 ctx->io_units_done += io_units_count; 2918 iov = &ctx->iov[0]; 2919 2920 if (ctx->read) { 2921 spdk_blob_io_readv_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 2922 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 2923 } else { 2924 spdk_blob_io_writev_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 2925 io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts); 2926 } 2927 } 2928 2929 static void 2930 blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel, 2931 struct iovec *iov, int iovcnt, 2932 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg, bool read, 2933 struct spdk_blob_ext_io_opts *ext_io_opts) 2934 { 2935 struct spdk_bs_cpl cpl; 2936 2937 assert(blob != NULL); 2938 2939 if (!read && blob->data_ro) { 2940 cb_fn(cb_arg, -EPERM); 2941 return; 2942 } 2943 2944 if (length == 0) { 2945 cb_fn(cb_arg, 0); 2946 return; 2947 } 2948 2949 if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 2950 cb_fn(cb_arg, -EINVAL); 2951 return; 2952 } 2953 2954 /* 2955 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having 2956 * to split a request that spans a cluster boundary. For I/O that do not span a cluster boundary, 2957 * there will be no noticeable difference compared to using a batch. For I/O that do span a cluster 2958 * boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need 2959 * to allocate a separate iov array and split the I/O such that none of the resulting 2960 * smaller I/O cross a cluster boundary. These smaller I/O will be issued in sequence (not in parallel) 2961 * but since this case happens very infrequently, any performance impact will be negligible. 2962 * 2963 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs 2964 * for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them 2965 * in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called 2966 * when the batch was completed, to allow for freeing the memory for the iov arrays. 2967 */ 2968 if (spdk_likely(length <= bs_num_io_units_to_cluster_boundary(blob, offset))) { 2969 uint64_t lba_count; 2970 uint64_t lba; 2971 bool is_allocated; 2972 2973 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2974 cpl.u.blob_basic.cb_fn = cb_fn; 2975 cpl.u.blob_basic.cb_arg = cb_arg; 2976 2977 if (blob->frozen_refcnt) { 2978 /* This blob I/O is frozen */ 2979 enum spdk_blob_op_type op_type; 2980 spdk_bs_user_op_t *op; 2981 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel); 2982 2983 op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV; 2984 op = bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length); 2985 if (!op) { 2986 cb_fn(cb_arg, -ENOMEM); 2987 return; 2988 } 2989 2990 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 2991 2992 return; 2993 } 2994 2995 is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 2996 2997 if (read) { 2998 spdk_bs_sequence_t *seq; 2999 3000 seq = bs_sequence_start(_channel, &cpl); 3001 if (!seq) { 3002 cb_fn(cb_arg, -ENOMEM); 3003 return; 3004 } 3005 3006 seq->ext_io_opts = ext_io_opts; 3007 3008 if (is_allocated) { 3009 bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3010 } else { 3011 bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count, 3012 rw_iov_done, NULL); 3013 } 3014 } else { 3015 if (is_allocated) { 3016 spdk_bs_sequence_t *seq; 3017 3018 seq = bs_sequence_start(_channel, &cpl); 3019 if (!seq) { 3020 cb_fn(cb_arg, -ENOMEM); 3021 return; 3022 } 3023 3024 seq->ext_io_opts = ext_io_opts; 3025 3026 bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL); 3027 } else { 3028 /* Queue this operation and allocate the cluster */ 3029 spdk_bs_user_op_t *op; 3030 3031 op = bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, 3032 length); 3033 if (!op) { 3034 cb_fn(cb_arg, -ENOMEM); 3035 return; 3036 } 3037 3038 op->ext_io_opts = ext_io_opts; 3039 3040 bs_allocate_and_copy_cluster(blob, _channel, offset, op); 3041 } 3042 } 3043 } else { 3044 struct rw_iov_ctx *ctx; 3045 3046 ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec)); 3047 if (ctx == NULL) { 3048 cb_fn(cb_arg, -ENOMEM); 3049 return; 3050 } 3051 3052 ctx->blob = blob; 3053 ctx->channel = _channel; 3054 ctx->cb_fn = cb_fn; 3055 ctx->cb_arg = cb_arg; 3056 ctx->read = read; 3057 ctx->orig_iov = iov; 3058 ctx->iovcnt = iovcnt; 3059 ctx->io_unit_offset = offset; 3060 ctx->io_units_remaining = length; 3061 ctx->io_units_done = 0; 3062 ctx->ext_io_opts = ext_io_opts; 3063 3064 rw_iov_split_next(ctx, 0); 3065 } 3066 } 3067 3068 static struct spdk_blob * 3069 blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid) 3070 { 3071 struct spdk_blob find; 3072 3073 if (spdk_bit_array_get(bs->open_blobids, blobid) == 0) { 3074 return NULL; 3075 } 3076 3077 find.id = blobid; 3078 return RB_FIND(spdk_blob_tree, &bs->open_blobs, &find); 3079 } 3080 3081 static void 3082 blob_get_snapshot_and_clone_entries(struct spdk_blob *blob, 3083 struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry) 3084 { 3085 assert(blob != NULL); 3086 *snapshot_entry = NULL; 3087 *clone_entry = NULL; 3088 3089 if (blob->parent_id == SPDK_BLOBID_INVALID) { 3090 return; 3091 } 3092 3093 TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) { 3094 if ((*snapshot_entry)->id == blob->parent_id) { 3095 break; 3096 } 3097 } 3098 3099 if (*snapshot_entry != NULL) { 3100 TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) { 3101 if ((*clone_entry)->id == blob->id) { 3102 break; 3103 } 3104 } 3105 3106 assert(*clone_entry != NULL); 3107 } 3108 } 3109 3110 static int 3111 bs_channel_create(void *io_device, void *ctx_buf) 3112 { 3113 struct spdk_blob_store *bs = io_device; 3114 struct spdk_bs_channel *channel = ctx_buf; 3115 struct spdk_bs_dev *dev; 3116 uint32_t max_ops = bs->max_channel_ops; 3117 uint32_t i; 3118 3119 dev = bs->dev; 3120 3121 channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set)); 3122 if (!channel->req_mem) { 3123 return -1; 3124 } 3125 3126 TAILQ_INIT(&channel->reqs); 3127 3128 for (i = 0; i < max_ops; i++) { 3129 TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link); 3130 } 3131 3132 channel->bs = bs; 3133 channel->dev = dev; 3134 channel->dev_channel = dev->create_channel(dev); 3135 3136 if (!channel->dev_channel) { 3137 SPDK_ERRLOG("Failed to create device channel.\n"); 3138 free(channel->req_mem); 3139 return -1; 3140 } 3141 3142 channel->new_cluster_page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_SOCKET_ID_ANY, 3143 SPDK_MALLOC_DMA); 3144 if (!channel->new_cluster_page) { 3145 SPDK_ERRLOG("Failed to allocate new cluster page\n"); 3146 free(channel->req_mem); 3147 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3148 return -1; 3149 } 3150 3151 TAILQ_INIT(&channel->need_cluster_alloc); 3152 TAILQ_INIT(&channel->queued_io); 3153 3154 return 0; 3155 } 3156 3157 static void 3158 bs_channel_destroy(void *io_device, void *ctx_buf) 3159 { 3160 struct spdk_bs_channel *channel = ctx_buf; 3161 spdk_bs_user_op_t *op; 3162 3163 while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) { 3164 op = TAILQ_FIRST(&channel->need_cluster_alloc); 3165 TAILQ_REMOVE(&channel->need_cluster_alloc, op, link); 3166 bs_user_op_abort(op, -EIO); 3167 } 3168 3169 while (!TAILQ_EMPTY(&channel->queued_io)) { 3170 op = TAILQ_FIRST(&channel->queued_io); 3171 TAILQ_REMOVE(&channel->queued_io, op, link); 3172 bs_user_op_abort(op, -EIO); 3173 } 3174 3175 free(channel->req_mem); 3176 spdk_free(channel->new_cluster_page); 3177 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 3178 } 3179 3180 static void 3181 bs_dev_destroy(void *io_device) 3182 { 3183 struct spdk_blob_store *bs = io_device; 3184 struct spdk_blob *blob, *blob_tmp; 3185 3186 bs->dev->destroy(bs->dev); 3187 3188 RB_FOREACH_SAFE(blob, spdk_blob_tree, &bs->open_blobs, blob_tmp) { 3189 RB_REMOVE(spdk_blob_tree, &bs->open_blobs, blob); 3190 spdk_bit_array_clear(bs->open_blobids, blob->id); 3191 blob_free(blob); 3192 } 3193 3194 pthread_mutex_destroy(&bs->used_clusters_mutex); 3195 3196 spdk_bit_array_free(&bs->open_blobids); 3197 spdk_bit_array_free(&bs->used_blobids); 3198 spdk_bit_array_free(&bs->used_md_pages); 3199 spdk_bit_pool_free(&bs->used_clusters); 3200 /* 3201 * If this function is called for any reason except a successful unload, 3202 * the unload_cpl type will be NONE and this will be a nop. 3203 */ 3204 bs_call_cpl(&bs->unload_cpl, bs->unload_err); 3205 3206 free(bs); 3207 } 3208 3209 static int 3210 bs_blob_list_add(struct spdk_blob *blob) 3211 { 3212 spdk_blob_id snapshot_id; 3213 struct spdk_blob_list *snapshot_entry = NULL; 3214 struct spdk_blob_list *clone_entry = NULL; 3215 3216 assert(blob != NULL); 3217 3218 snapshot_id = blob->parent_id; 3219 if (snapshot_id == SPDK_BLOBID_INVALID) { 3220 return 0; 3221 } 3222 3223 snapshot_entry = bs_get_snapshot_entry(blob->bs, snapshot_id); 3224 if (snapshot_entry == NULL) { 3225 /* Snapshot not found */ 3226 snapshot_entry = calloc(1, sizeof(struct spdk_blob_list)); 3227 if (snapshot_entry == NULL) { 3228 return -ENOMEM; 3229 } 3230 snapshot_entry->id = snapshot_id; 3231 TAILQ_INIT(&snapshot_entry->clones); 3232 TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link); 3233 } else { 3234 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 3235 if (clone_entry->id == blob->id) { 3236 break; 3237 } 3238 } 3239 } 3240 3241 if (clone_entry == NULL) { 3242 /* Clone not found */ 3243 clone_entry = calloc(1, sizeof(struct spdk_blob_list)); 3244 if (clone_entry == NULL) { 3245 return -ENOMEM; 3246 } 3247 clone_entry->id = blob->id; 3248 TAILQ_INIT(&clone_entry->clones); 3249 TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link); 3250 snapshot_entry->clone_count++; 3251 } 3252 3253 return 0; 3254 } 3255 3256 static void 3257 bs_blob_list_remove(struct spdk_blob *blob) 3258 { 3259 struct spdk_blob_list *snapshot_entry = NULL; 3260 struct spdk_blob_list *clone_entry = NULL; 3261 3262 blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry); 3263 3264 if (snapshot_entry == NULL) { 3265 return; 3266 } 3267 3268 blob->parent_id = SPDK_BLOBID_INVALID; 3269 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3270 free(clone_entry); 3271 3272 snapshot_entry->clone_count--; 3273 } 3274 3275 static int 3276 bs_blob_list_free(struct spdk_blob_store *bs) 3277 { 3278 struct spdk_blob_list *snapshot_entry; 3279 struct spdk_blob_list *snapshot_entry_tmp; 3280 struct spdk_blob_list *clone_entry; 3281 struct spdk_blob_list *clone_entry_tmp; 3282 3283 TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) { 3284 TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) { 3285 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3286 free(clone_entry); 3287 } 3288 TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link); 3289 free(snapshot_entry); 3290 } 3291 3292 return 0; 3293 } 3294 3295 static void 3296 bs_free(struct spdk_blob_store *bs) 3297 { 3298 bs_blob_list_free(bs); 3299 3300 bs_unregister_md_thread(bs); 3301 spdk_io_device_unregister(bs, bs_dev_destroy); 3302 } 3303 3304 void 3305 spdk_bs_opts_init(struct spdk_bs_opts *opts, size_t opts_size) 3306 { 3307 3308 if (!opts) { 3309 SPDK_ERRLOG("opts should not be NULL\n"); 3310 return; 3311 } 3312 3313 if (!opts_size) { 3314 SPDK_ERRLOG("opts_size should not be zero value\n"); 3315 return; 3316 } 3317 3318 memset(opts, 0, opts_size); 3319 opts->opts_size = opts_size; 3320 3321 #define FIELD_OK(field) \ 3322 offsetof(struct spdk_bs_opts, field) + sizeof(opts->field) <= opts_size 3323 3324 #define SET_FIELD(field, value) \ 3325 if (FIELD_OK(field)) { \ 3326 opts->field = value; \ 3327 } \ 3328 3329 SET_FIELD(cluster_sz, SPDK_BLOB_OPTS_CLUSTER_SZ); 3330 SET_FIELD(num_md_pages, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3331 SET_FIELD(max_md_ops, SPDK_BLOB_OPTS_NUM_MD_PAGES); 3332 SET_FIELD(max_channel_ops, SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS); 3333 SET_FIELD(clear_method, BS_CLEAR_WITH_UNMAP); 3334 3335 if (FIELD_OK(bstype)) { 3336 memset(&opts->bstype, 0, sizeof(opts->bstype)); 3337 } 3338 3339 SET_FIELD(iter_cb_fn, NULL); 3340 SET_FIELD(iter_cb_arg, NULL); 3341 SET_FIELD(force_recover, false); 3342 3343 #undef FIELD_OK 3344 #undef SET_FIELD 3345 } 3346 3347 static int 3348 bs_opts_verify(struct spdk_bs_opts *opts) 3349 { 3350 if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 || 3351 opts->max_channel_ops == 0) { 3352 SPDK_ERRLOG("Blobstore options cannot be set to 0\n"); 3353 return -1; 3354 } 3355 3356 return 0; 3357 } 3358 3359 /* START spdk_bs_load */ 3360 3361 /* spdk_bs_load_ctx is used for init, load, unload and dump code paths. */ 3362 3363 struct spdk_bs_load_ctx { 3364 struct spdk_blob_store *bs; 3365 struct spdk_bs_super_block *super; 3366 3367 struct spdk_bs_md_mask *mask; 3368 bool in_page_chain; 3369 uint32_t page_index; 3370 uint32_t cur_page; 3371 struct spdk_blob_md_page *page; 3372 3373 uint64_t num_extent_pages; 3374 uint32_t *extent_page_num; 3375 struct spdk_blob_md_page *extent_pages; 3376 struct spdk_bit_array *used_clusters; 3377 3378 spdk_bs_sequence_t *seq; 3379 spdk_blob_op_with_handle_complete iter_cb_fn; 3380 void *iter_cb_arg; 3381 struct spdk_blob *blob; 3382 spdk_blob_id blobid; 3383 3384 bool force_recover; 3385 3386 /* These fields are used in the spdk_bs_dump path. */ 3387 bool dumping; 3388 FILE *fp; 3389 spdk_bs_dump_print_xattr print_xattr_fn; 3390 char xattr_name[4096]; 3391 }; 3392 3393 static int 3394 bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs, 3395 struct spdk_bs_load_ctx **_ctx) 3396 { 3397 struct spdk_blob_store *bs; 3398 struct spdk_bs_load_ctx *ctx; 3399 uint64_t dev_size; 3400 int rc; 3401 3402 dev_size = dev->blocklen * dev->blockcnt; 3403 if (dev_size < opts->cluster_sz) { 3404 /* Device size cannot be smaller than cluster size of blobstore */ 3405 SPDK_INFOLOG(blob, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n", 3406 dev_size, opts->cluster_sz); 3407 return -ENOSPC; 3408 } 3409 if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) { 3410 /* Cluster size cannot be smaller than page size */ 3411 SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n", 3412 opts->cluster_sz, SPDK_BS_PAGE_SIZE); 3413 return -EINVAL; 3414 } 3415 bs = calloc(1, sizeof(struct spdk_blob_store)); 3416 if (!bs) { 3417 return -ENOMEM; 3418 } 3419 3420 ctx = calloc(1, sizeof(struct spdk_bs_load_ctx)); 3421 if (!ctx) { 3422 free(bs); 3423 return -ENOMEM; 3424 } 3425 3426 ctx->bs = bs; 3427 ctx->iter_cb_fn = opts->iter_cb_fn; 3428 ctx->iter_cb_arg = opts->iter_cb_arg; 3429 ctx->force_recover = opts->force_recover; 3430 3431 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 3432 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3433 if (!ctx->super) { 3434 free(ctx); 3435 free(bs); 3436 return -ENOMEM; 3437 } 3438 3439 RB_INIT(&bs->open_blobs); 3440 TAILQ_INIT(&bs->snapshots); 3441 bs->dev = dev; 3442 bs->md_thread = spdk_get_thread(); 3443 assert(bs->md_thread != NULL); 3444 3445 /* 3446 * Do not use bs_lba_to_cluster() here since blockcnt may not be an 3447 * even multiple of the cluster size. 3448 */ 3449 bs->cluster_sz = opts->cluster_sz; 3450 bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen); 3451 ctx->used_clusters = spdk_bit_array_create(bs->total_clusters); 3452 if (!ctx->used_clusters) { 3453 spdk_free(ctx->super); 3454 free(ctx); 3455 free(bs); 3456 return -ENOMEM; 3457 } 3458 3459 bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE; 3460 if (spdk_u32_is_pow2(bs->pages_per_cluster)) { 3461 bs->pages_per_cluster_shift = spdk_u32log2(bs->pages_per_cluster); 3462 } 3463 bs->num_free_clusters = bs->total_clusters; 3464 bs->io_unit_size = dev->blocklen; 3465 3466 bs->max_channel_ops = opts->max_channel_ops; 3467 bs->super_blob = SPDK_BLOBID_INVALID; 3468 memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype)); 3469 3470 /* The metadata is assumed to be at least 1 page */ 3471 bs->used_md_pages = spdk_bit_array_create(1); 3472 bs->used_blobids = spdk_bit_array_create(0); 3473 bs->open_blobids = spdk_bit_array_create(0); 3474 3475 pthread_mutex_init(&bs->used_clusters_mutex, NULL); 3476 3477 spdk_io_device_register(bs, bs_channel_create, bs_channel_destroy, 3478 sizeof(struct spdk_bs_channel), "blobstore"); 3479 rc = bs_register_md_thread(bs); 3480 if (rc == -1) { 3481 spdk_io_device_unregister(bs, NULL); 3482 pthread_mutex_destroy(&bs->used_clusters_mutex); 3483 spdk_bit_array_free(&bs->open_blobids); 3484 spdk_bit_array_free(&bs->used_blobids); 3485 spdk_bit_array_free(&bs->used_md_pages); 3486 spdk_bit_array_free(&ctx->used_clusters); 3487 spdk_free(ctx->super); 3488 free(ctx); 3489 free(bs); 3490 /* FIXME: this is a lie but don't know how to get a proper error code here */ 3491 return -ENOMEM; 3492 } 3493 3494 *_ctx = ctx; 3495 *_bs = bs; 3496 return 0; 3497 } 3498 3499 static void 3500 bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno) 3501 { 3502 assert(bserrno != 0); 3503 3504 spdk_free(ctx->super); 3505 bs_sequence_finish(ctx->seq, bserrno); 3506 bs_free(ctx->bs); 3507 spdk_bit_array_free(&ctx->used_clusters); 3508 free(ctx); 3509 } 3510 3511 static void 3512 bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 3513 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 3514 { 3515 /* Update the values in the super block */ 3516 super->super_blob = bs->super_blob; 3517 memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype)); 3518 super->crc = blob_md_page_calc_crc(super); 3519 bs_sequence_write_dev(seq, super, bs_page_to_lba(bs, 0), 3520 bs_byte_to_lba(bs, sizeof(*super)), 3521 cb_fn, cb_arg); 3522 } 3523 3524 static void 3525 bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3526 { 3527 struct spdk_bs_load_ctx *ctx = arg; 3528 uint64_t mask_size, lba, lba_count; 3529 3530 /* Write out the used clusters mask */ 3531 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 3532 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3533 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3534 if (!ctx->mask) { 3535 bs_load_ctx_fail(ctx, -ENOMEM); 3536 return; 3537 } 3538 3539 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS; 3540 ctx->mask->length = ctx->bs->total_clusters; 3541 /* We could get here through the normal unload path, or through dirty 3542 * shutdown recovery. For the normal unload path, we use the mask from 3543 * the bit pool. For dirty shutdown recovery, we don't have a bit pool yet - 3544 * only the bit array from the load ctx. 3545 */ 3546 if (ctx->bs->used_clusters) { 3547 assert(ctx->mask->length == spdk_bit_pool_capacity(ctx->bs->used_clusters)); 3548 spdk_bit_pool_store_mask(ctx->bs->used_clusters, ctx->mask->mask); 3549 } else { 3550 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->used_clusters)); 3551 spdk_bit_array_store_mask(ctx->used_clusters, ctx->mask->mask); 3552 } 3553 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 3554 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 3555 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3556 } 3557 3558 static void 3559 bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3560 { 3561 struct spdk_bs_load_ctx *ctx = arg; 3562 uint64_t mask_size, lba, lba_count; 3563 3564 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 3565 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3566 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3567 if (!ctx->mask) { 3568 bs_load_ctx_fail(ctx, -ENOMEM); 3569 return; 3570 } 3571 3572 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES; 3573 ctx->mask->length = ctx->super->md_len; 3574 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages)); 3575 3576 spdk_bit_array_store_mask(ctx->bs->used_md_pages, ctx->mask->mask); 3577 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 3578 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 3579 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3580 } 3581 3582 static void 3583 bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3584 { 3585 struct spdk_bs_load_ctx *ctx = arg; 3586 uint64_t mask_size, lba, lba_count; 3587 3588 if (ctx->super->used_blobid_mask_len == 0) { 3589 /* 3590 * This is a pre-v3 on-disk format where the blobid mask does not get 3591 * written to disk. 3592 */ 3593 cb_fn(seq, arg, 0); 3594 return; 3595 } 3596 3597 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 3598 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 3599 SPDK_MALLOC_DMA); 3600 if (!ctx->mask) { 3601 bs_load_ctx_fail(ctx, -ENOMEM); 3602 return; 3603 } 3604 3605 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS; 3606 ctx->mask->length = ctx->super->md_len; 3607 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids)); 3608 3609 spdk_bit_array_store_mask(ctx->bs->used_blobids, ctx->mask->mask); 3610 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 3611 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 3612 bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3613 } 3614 3615 static void 3616 blob_set_thin_provision(struct spdk_blob *blob) 3617 { 3618 blob_verify_md_op(blob); 3619 blob->invalid_flags |= SPDK_BLOB_THIN_PROV; 3620 blob->state = SPDK_BLOB_STATE_DIRTY; 3621 } 3622 3623 static void 3624 blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method) 3625 { 3626 blob_verify_md_op(blob); 3627 blob->clear_method = clear_method; 3628 blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT); 3629 blob->state = SPDK_BLOB_STATE_DIRTY; 3630 } 3631 3632 static void bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno); 3633 3634 static void 3635 bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno) 3636 { 3637 struct spdk_bs_load_ctx *ctx = cb_arg; 3638 spdk_blob_id id; 3639 int64_t page_num; 3640 3641 /* Iterate to next blob (we can't use spdk_bs_iter_next function as our 3642 * last blob has been removed */ 3643 page_num = bs_blobid_to_page(ctx->blobid); 3644 page_num++; 3645 page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num); 3646 if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) { 3647 bs_load_iter(ctx, NULL, -ENOENT); 3648 return; 3649 } 3650 3651 id = bs_page_to_blobid(page_num); 3652 3653 spdk_bs_open_blob(ctx->bs, id, bs_load_iter, ctx); 3654 } 3655 3656 static void 3657 bs_delete_corrupted_close_cb(void *cb_arg, int bserrno) 3658 { 3659 struct spdk_bs_load_ctx *ctx = cb_arg; 3660 3661 if (bserrno != 0) { 3662 SPDK_ERRLOG("Failed to close corrupted blob\n"); 3663 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3664 return; 3665 } 3666 3667 spdk_bs_delete_blob(ctx->bs, ctx->blobid, bs_delete_corrupted_blob_cpl, ctx); 3668 } 3669 3670 static void 3671 bs_delete_corrupted_blob(void *cb_arg, int bserrno) 3672 { 3673 struct spdk_bs_load_ctx *ctx = cb_arg; 3674 uint64_t i; 3675 3676 if (bserrno != 0) { 3677 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 3678 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3679 return; 3680 } 3681 3682 /* Snapshot and clone have the same copy of cluster map and extent pages 3683 * at this point. Let's clear both for snapshot now, 3684 * so that it won't be cleared for clone later when we remove snapshot. 3685 * Also set thin provision to pass data corruption check */ 3686 for (i = 0; i < ctx->blob->active.num_clusters; i++) { 3687 ctx->blob->active.clusters[i] = 0; 3688 } 3689 for (i = 0; i < ctx->blob->active.num_extent_pages; i++) { 3690 ctx->blob->active.extent_pages[i] = 0; 3691 } 3692 3693 ctx->blob->md_ro = false; 3694 3695 blob_set_thin_provision(ctx->blob); 3696 3697 ctx->blobid = ctx->blob->id; 3698 3699 spdk_blob_close(ctx->blob, bs_delete_corrupted_close_cb, ctx); 3700 } 3701 3702 static void 3703 bs_update_corrupted_blob(void *cb_arg, int bserrno) 3704 { 3705 struct spdk_bs_load_ctx *ctx = cb_arg; 3706 3707 if (bserrno != 0) { 3708 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 3709 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3710 return; 3711 } 3712 3713 ctx->blob->md_ro = false; 3714 blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true); 3715 blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true); 3716 spdk_blob_set_read_only(ctx->blob); 3717 3718 if (ctx->iter_cb_fn) { 3719 ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0); 3720 } 3721 bs_blob_list_add(ctx->blob); 3722 3723 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3724 } 3725 3726 static void 3727 bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno) 3728 { 3729 struct spdk_bs_load_ctx *ctx = cb_arg; 3730 3731 if (bserrno != 0) { 3732 SPDK_ERRLOG("Failed to open clone of a corrupted blob\n"); 3733 spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx); 3734 return; 3735 } 3736 3737 if (blob->parent_id == ctx->blob->id) { 3738 /* Power failure occurred before updating clone (snapshot delete case) 3739 * or after updating clone (creating snapshot case) - keep snapshot */ 3740 spdk_blob_close(blob, bs_update_corrupted_blob, ctx); 3741 } else { 3742 /* Power failure occurred after updating clone (snapshot delete case) 3743 * or before updating clone (creating snapshot case) - remove snapshot */ 3744 spdk_blob_close(blob, bs_delete_corrupted_blob, ctx); 3745 } 3746 } 3747 3748 static void 3749 bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno) 3750 { 3751 struct spdk_bs_load_ctx *ctx = arg; 3752 const void *value; 3753 size_t len; 3754 int rc = 0; 3755 3756 if (bserrno == 0) { 3757 /* Examine blob if it is corrupted after power failure. Fix 3758 * the ones that can be fixed and remove any other corrupted 3759 * ones. If it is not corrupted just process it */ 3760 rc = blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true); 3761 if (rc != 0) { 3762 rc = blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true); 3763 if (rc != 0) { 3764 /* Not corrupted - process it and continue with iterating through blobs */ 3765 if (ctx->iter_cb_fn) { 3766 ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0); 3767 } 3768 bs_blob_list_add(blob); 3769 spdk_bs_iter_next(ctx->bs, blob, bs_load_iter, ctx); 3770 return; 3771 } 3772 3773 } 3774 3775 assert(len == sizeof(spdk_blob_id)); 3776 3777 ctx->blob = blob; 3778 3779 /* Open clone to check if we are able to fix this blob or should we remove it */ 3780 spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, bs_examine_clone, ctx); 3781 return; 3782 } else if (bserrno == -ENOENT) { 3783 bserrno = 0; 3784 } else { 3785 /* 3786 * This case needs to be looked at further. Same problem 3787 * exists with applications that rely on explicit blob 3788 * iteration. We should just skip the blob that failed 3789 * to load and continue on to the next one. 3790 */ 3791 SPDK_ERRLOG("Error in iterating blobs\n"); 3792 } 3793 3794 ctx->iter_cb_fn = NULL; 3795 3796 spdk_free(ctx->super); 3797 spdk_free(ctx->mask); 3798 bs_sequence_finish(ctx->seq, bserrno); 3799 free(ctx); 3800 } 3801 3802 static void bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 3803 3804 static void 3805 bs_load_complete(struct spdk_bs_load_ctx *ctx) 3806 { 3807 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 3808 if (ctx->dumping) { 3809 bs_dump_read_md_page(ctx->seq, ctx); 3810 return; 3811 } 3812 spdk_bs_iter_first(ctx->bs, bs_load_iter, ctx); 3813 } 3814 3815 static void 3816 bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3817 { 3818 struct spdk_bs_load_ctx *ctx = cb_arg; 3819 int rc; 3820 3821 /* The type must be correct */ 3822 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS); 3823 3824 /* The length of the mask (in bits) must not be greater than 3825 * the length of the buffer (converted to bits) */ 3826 assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8)); 3827 3828 /* The length of the mask must be exactly equal to the size 3829 * (in pages) of the metadata region */ 3830 assert(ctx->mask->length == ctx->super->md_len); 3831 3832 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length); 3833 if (rc < 0) { 3834 spdk_free(ctx->mask); 3835 bs_load_ctx_fail(ctx, rc); 3836 return; 3837 } 3838 3839 spdk_bit_array_load_mask(ctx->bs->used_blobids, ctx->mask->mask); 3840 bs_load_complete(ctx); 3841 } 3842 3843 static void 3844 bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3845 { 3846 struct spdk_bs_load_ctx *ctx = cb_arg; 3847 uint64_t lba, lba_count, mask_size; 3848 int rc; 3849 3850 if (bserrno != 0) { 3851 bs_load_ctx_fail(ctx, bserrno); 3852 return; 3853 } 3854 3855 /* The type must be correct */ 3856 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 3857 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 3858 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 3859 struct spdk_blob_md_page) * 8)); 3860 /* The length of the mask must be exactly equal to the total number of clusters */ 3861 assert(ctx->mask->length == ctx->bs->total_clusters); 3862 3863 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->mask->length); 3864 if (rc < 0) { 3865 spdk_free(ctx->mask); 3866 bs_load_ctx_fail(ctx, rc); 3867 return; 3868 } 3869 3870 spdk_bit_array_load_mask(ctx->used_clusters, ctx->mask->mask); 3871 ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->used_clusters); 3872 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 3873 3874 spdk_free(ctx->mask); 3875 3876 /* Read the used blobids mask */ 3877 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 3878 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 3879 SPDK_MALLOC_DMA); 3880 if (!ctx->mask) { 3881 bs_load_ctx_fail(ctx, -ENOMEM); 3882 return; 3883 } 3884 lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 3885 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 3886 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 3887 bs_load_used_blobids_cpl, ctx); 3888 } 3889 3890 static void 3891 bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3892 { 3893 struct spdk_bs_load_ctx *ctx = cb_arg; 3894 uint64_t lba, lba_count, mask_size; 3895 int rc; 3896 3897 if (bserrno != 0) { 3898 bs_load_ctx_fail(ctx, bserrno); 3899 return; 3900 } 3901 3902 /* The type must be correct */ 3903 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES); 3904 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 3905 assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE * 3906 8)); 3907 /* The length of the mask must be exactly equal to the size (in pages) of the metadata region */ 3908 if (ctx->mask->length != ctx->super->md_len) { 3909 SPDK_ERRLOG("mismatched md_len in used_pages mask: " 3910 "mask->length=%" PRIu32 " super->md_len=%" PRIu32 "\n", 3911 ctx->mask->length, ctx->super->md_len); 3912 assert(false); 3913 } 3914 3915 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length); 3916 if (rc < 0) { 3917 spdk_free(ctx->mask); 3918 bs_load_ctx_fail(ctx, rc); 3919 return; 3920 } 3921 3922 spdk_bit_array_load_mask(ctx->bs->used_md_pages, ctx->mask->mask); 3923 spdk_free(ctx->mask); 3924 3925 /* Read the used clusters mask */ 3926 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 3927 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 3928 SPDK_MALLOC_DMA); 3929 if (!ctx->mask) { 3930 bs_load_ctx_fail(ctx, -ENOMEM); 3931 return; 3932 } 3933 lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 3934 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 3935 bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 3936 bs_load_used_clusters_cpl, ctx); 3937 } 3938 3939 static void 3940 bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx) 3941 { 3942 uint64_t lba, lba_count, mask_size; 3943 3944 /* Read the used pages mask */ 3945 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 3946 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3947 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3948 if (!ctx->mask) { 3949 bs_load_ctx_fail(ctx, -ENOMEM); 3950 return; 3951 } 3952 3953 lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 3954 lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 3955 bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 3956 bs_load_used_pages_cpl, ctx); 3957 } 3958 3959 static int 3960 bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_page *page) 3961 { 3962 struct spdk_blob_store *bs = ctx->bs; 3963 struct spdk_blob_md_descriptor *desc; 3964 size_t cur_desc = 0; 3965 3966 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 3967 while (cur_desc < sizeof(page->descriptors)) { 3968 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 3969 if (desc->length == 0) { 3970 /* If padding and length are 0, this terminates the page */ 3971 break; 3972 } 3973 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 3974 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 3975 unsigned int i, j; 3976 unsigned int cluster_count = 0; 3977 uint32_t cluster_idx; 3978 3979 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 3980 3981 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 3982 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 3983 cluster_idx = desc_extent_rle->extents[i].cluster_idx; 3984 /* 3985 * cluster_idx = 0 means an unallocated cluster - don't mark that 3986 * in the used cluster map. 3987 */ 3988 if (cluster_idx != 0) { 3989 SPDK_NOTICELOG("Recover: cluster %" PRIu32 "\n", cluster_idx + j); 3990 spdk_bit_array_set(ctx->used_clusters, cluster_idx + j); 3991 if (bs->num_free_clusters == 0) { 3992 return -ENOSPC; 3993 } 3994 bs->num_free_clusters--; 3995 } 3996 cluster_count++; 3997 } 3998 } 3999 if (cluster_count == 0) { 4000 return -EINVAL; 4001 } 4002 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4003 struct spdk_blob_md_descriptor_extent_page *desc_extent; 4004 uint32_t i; 4005 uint32_t cluster_count = 0; 4006 uint32_t cluster_idx; 4007 size_t cluster_idx_length; 4008 4009 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 4010 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 4011 4012 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 4013 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 4014 return -EINVAL; 4015 } 4016 4017 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 4018 cluster_idx = desc_extent->cluster_idx[i]; 4019 /* 4020 * cluster_idx = 0 means an unallocated cluster - don't mark that 4021 * in the used cluster map. 4022 */ 4023 if (cluster_idx != 0) { 4024 if (cluster_idx < desc_extent->start_cluster_idx && 4025 cluster_idx >= desc_extent->start_cluster_idx + cluster_count) { 4026 return -EINVAL; 4027 } 4028 spdk_bit_array_set(ctx->used_clusters, cluster_idx); 4029 if (bs->num_free_clusters == 0) { 4030 return -ENOSPC; 4031 } 4032 bs->num_free_clusters--; 4033 } 4034 cluster_count++; 4035 } 4036 4037 if (cluster_count == 0) { 4038 return -EINVAL; 4039 } 4040 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4041 /* Skip this item */ 4042 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4043 /* Skip this item */ 4044 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 4045 /* Skip this item */ 4046 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 4047 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 4048 uint32_t num_extent_pages = ctx->num_extent_pages; 4049 uint32_t i; 4050 size_t extent_pages_length; 4051 void *tmp; 4052 4053 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 4054 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 4055 4056 if (desc_extent_table->length == 0 || 4057 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 4058 return -EINVAL; 4059 } 4060 4061 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4062 if (desc_extent_table->extent_page[i].page_idx != 0) { 4063 if (desc_extent_table->extent_page[i].num_pages != 1) { 4064 return -EINVAL; 4065 } 4066 num_extent_pages += 1; 4067 } 4068 } 4069 4070 if (num_extent_pages > 0) { 4071 tmp = realloc(ctx->extent_page_num, num_extent_pages * sizeof(uint32_t)); 4072 if (tmp == NULL) { 4073 return -ENOMEM; 4074 } 4075 ctx->extent_page_num = tmp; 4076 4077 /* Extent table entries contain md page numbers for extent pages. 4078 * Zeroes represent unallocated extent pages, those are run-length-encoded. 4079 */ 4080 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 4081 if (desc_extent_table->extent_page[i].page_idx != 0) { 4082 ctx->extent_page_num[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx; 4083 ctx->num_extent_pages += 1; 4084 } 4085 } 4086 } 4087 } else { 4088 /* Error */ 4089 return -EINVAL; 4090 } 4091 /* Advance to the next descriptor */ 4092 cur_desc += sizeof(*desc) + desc->length; 4093 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 4094 break; 4095 } 4096 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 4097 } 4098 return 0; 4099 } 4100 4101 static bool bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page) 4102 { 4103 uint32_t crc; 4104 struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4105 size_t desc_len; 4106 4107 crc = blob_md_page_calc_crc(page); 4108 if (crc != page->crc) { 4109 return false; 4110 } 4111 4112 /* Extent page should always be of sequence num 0. */ 4113 if (page->sequence_num != 0) { 4114 return false; 4115 } 4116 4117 /* Descriptor type must be EXTENT_PAGE. */ 4118 if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4119 return false; 4120 } 4121 4122 /* Descriptor length cannot exceed the page. */ 4123 desc_len = sizeof(*desc) + desc->length; 4124 if (desc_len > sizeof(page->descriptors)) { 4125 return false; 4126 } 4127 4128 /* It has to be the only descriptor in the page. */ 4129 if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) { 4130 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len); 4131 if (desc->length != 0) { 4132 return false; 4133 } 4134 } 4135 4136 return true; 4137 } 4138 4139 static bool bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx) 4140 { 4141 uint32_t crc; 4142 struct spdk_blob_md_page *page = ctx->page; 4143 4144 crc = blob_md_page_calc_crc(page); 4145 if (crc != page->crc) { 4146 return false; 4147 } 4148 4149 /* First page of a sequence should match the blobid. */ 4150 if (page->sequence_num == 0 && 4151 bs_page_to_blobid(ctx->cur_page) != page->id) { 4152 return false; 4153 } 4154 assert(bs_load_cur_extent_page_valid(page) == false); 4155 4156 return true; 4157 } 4158 4159 static void 4160 bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx); 4161 4162 static void 4163 bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4164 { 4165 struct spdk_bs_load_ctx *ctx = cb_arg; 4166 4167 if (bserrno != 0) { 4168 bs_load_ctx_fail(ctx, bserrno); 4169 return; 4170 } 4171 4172 bs_load_complete(ctx); 4173 } 4174 4175 static void 4176 bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4177 { 4178 struct spdk_bs_load_ctx *ctx = cb_arg; 4179 4180 spdk_free(ctx->mask); 4181 ctx->mask = NULL; 4182 4183 if (bserrno != 0) { 4184 bs_load_ctx_fail(ctx, bserrno); 4185 return; 4186 } 4187 4188 bs_write_used_clusters(seq, ctx, bs_load_write_used_clusters_cpl); 4189 } 4190 4191 static void 4192 bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4193 { 4194 struct spdk_bs_load_ctx *ctx = cb_arg; 4195 4196 spdk_free(ctx->mask); 4197 ctx->mask = NULL; 4198 4199 if (bserrno != 0) { 4200 bs_load_ctx_fail(ctx, bserrno); 4201 return; 4202 } 4203 4204 bs_write_used_blobids(seq, ctx, bs_load_write_used_blobids_cpl); 4205 } 4206 4207 static void 4208 bs_load_write_used_md(struct spdk_bs_load_ctx *ctx) 4209 { 4210 bs_write_used_md(ctx->seq, ctx, bs_load_write_used_pages_cpl); 4211 } 4212 4213 static void 4214 bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx) 4215 { 4216 uint64_t num_md_clusters; 4217 uint64_t i; 4218 4219 ctx->in_page_chain = false; 4220 4221 do { 4222 ctx->page_index++; 4223 } while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true); 4224 4225 if (ctx->page_index < ctx->super->md_len) { 4226 ctx->cur_page = ctx->page_index; 4227 bs_load_replay_cur_md_page(ctx); 4228 } else { 4229 /* Claim all of the clusters used by the metadata */ 4230 num_md_clusters = spdk_divide_round_up( 4231 ctx->super->md_start + ctx->super->md_len, ctx->bs->pages_per_cluster); 4232 for (i = 0; i < num_md_clusters; i++) { 4233 spdk_bit_array_set(ctx->used_clusters, i); 4234 } 4235 ctx->bs->num_free_clusters -= num_md_clusters; 4236 spdk_free(ctx->page); 4237 bs_load_write_used_md(ctx); 4238 } 4239 } 4240 4241 static void 4242 bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4243 { 4244 struct spdk_bs_load_ctx *ctx = cb_arg; 4245 uint32_t page_num; 4246 uint64_t i; 4247 4248 if (bserrno != 0) { 4249 spdk_free(ctx->extent_pages); 4250 bs_load_ctx_fail(ctx, bserrno); 4251 return; 4252 } 4253 4254 for (i = 0; i < ctx->num_extent_pages; i++) { 4255 /* Extent pages are only read when present within in chain md. 4256 * Integrity of md is not right if that page was not a valid extent page. */ 4257 if (bs_load_cur_extent_page_valid(&ctx->extent_pages[i]) != true) { 4258 spdk_free(ctx->extent_pages); 4259 bs_load_ctx_fail(ctx, -EILSEQ); 4260 return; 4261 } 4262 4263 page_num = ctx->extent_page_num[i]; 4264 spdk_bit_array_set(ctx->bs->used_md_pages, page_num); 4265 if (bs_load_replay_md_parse_page(ctx, &ctx->extent_pages[i])) { 4266 spdk_free(ctx->extent_pages); 4267 bs_load_ctx_fail(ctx, -EILSEQ); 4268 return; 4269 } 4270 } 4271 4272 spdk_free(ctx->extent_pages); 4273 free(ctx->extent_page_num); 4274 ctx->extent_page_num = NULL; 4275 ctx->num_extent_pages = 0; 4276 4277 bs_load_replay_md_chain_cpl(ctx); 4278 } 4279 4280 static void 4281 bs_load_replay_extent_pages(struct spdk_bs_load_ctx *ctx) 4282 { 4283 spdk_bs_batch_t *batch; 4284 uint32_t page; 4285 uint64_t lba; 4286 uint64_t i; 4287 4288 ctx->extent_pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE * ctx->num_extent_pages, 0, 4289 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4290 if (!ctx->extent_pages) { 4291 bs_load_ctx_fail(ctx, -ENOMEM); 4292 return; 4293 } 4294 4295 batch = bs_sequence_to_batch(ctx->seq, bs_load_replay_extent_page_cpl, ctx); 4296 4297 for (i = 0; i < ctx->num_extent_pages; i++) { 4298 page = ctx->extent_page_num[i]; 4299 assert(page < ctx->super->md_len); 4300 lba = bs_md_page_to_lba(ctx->bs, page); 4301 bs_batch_read_dev(batch, &ctx->extent_pages[i], lba, 4302 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE)); 4303 } 4304 4305 bs_batch_close(batch); 4306 } 4307 4308 static void 4309 bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4310 { 4311 struct spdk_bs_load_ctx *ctx = cb_arg; 4312 uint32_t page_num; 4313 struct spdk_blob_md_page *page; 4314 4315 if (bserrno != 0) { 4316 bs_load_ctx_fail(ctx, bserrno); 4317 return; 4318 } 4319 4320 page_num = ctx->cur_page; 4321 page = ctx->page; 4322 if (bs_load_cur_md_page_valid(ctx) == true) { 4323 if (page->sequence_num == 0 || ctx->in_page_chain == true) { 4324 bs_claim_md_page(ctx->bs, page_num); 4325 if (page->sequence_num == 0) { 4326 SPDK_NOTICELOG("Recover: blob %" PRIu32 "\n", page_num); 4327 spdk_bit_array_set(ctx->bs->used_blobids, page_num); 4328 } 4329 if (bs_load_replay_md_parse_page(ctx, page)) { 4330 bs_load_ctx_fail(ctx, -EILSEQ); 4331 return; 4332 } 4333 if (page->next != SPDK_INVALID_MD_PAGE) { 4334 ctx->in_page_chain = true; 4335 ctx->cur_page = page->next; 4336 bs_load_replay_cur_md_page(ctx); 4337 return; 4338 } 4339 if (ctx->num_extent_pages != 0) { 4340 bs_load_replay_extent_pages(ctx); 4341 return; 4342 } 4343 } 4344 } 4345 bs_load_replay_md_chain_cpl(ctx); 4346 } 4347 4348 static void 4349 bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx) 4350 { 4351 uint64_t lba; 4352 4353 assert(ctx->cur_page < ctx->super->md_len); 4354 lba = bs_md_page_to_lba(ctx->bs, ctx->cur_page); 4355 bs_sequence_read_dev(ctx->seq, ctx->page, lba, 4356 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 4357 bs_load_replay_md_cpl, ctx); 4358 } 4359 4360 static void 4361 bs_load_replay_md(struct spdk_bs_load_ctx *ctx) 4362 { 4363 ctx->page_index = 0; 4364 ctx->cur_page = 0; 4365 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 4366 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4367 if (!ctx->page) { 4368 bs_load_ctx_fail(ctx, -ENOMEM); 4369 return; 4370 } 4371 bs_load_replay_cur_md_page(ctx); 4372 } 4373 4374 static void 4375 bs_recover(struct spdk_bs_load_ctx *ctx) 4376 { 4377 int rc; 4378 4379 SPDK_NOTICELOG("Performing recovery on blobstore\n"); 4380 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len); 4381 if (rc < 0) { 4382 bs_load_ctx_fail(ctx, -ENOMEM); 4383 return; 4384 } 4385 4386 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len); 4387 if (rc < 0) { 4388 bs_load_ctx_fail(ctx, -ENOMEM); 4389 return; 4390 } 4391 4392 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4393 if (rc < 0) { 4394 bs_load_ctx_fail(ctx, -ENOMEM); 4395 return; 4396 } 4397 4398 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->super->md_len); 4399 if (rc < 0) { 4400 bs_load_ctx_fail(ctx, -ENOMEM); 4401 return; 4402 } 4403 4404 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 4405 bs_load_replay_md(ctx); 4406 } 4407 4408 static int 4409 bs_parse_super(struct spdk_bs_load_ctx *ctx) 4410 { 4411 int rc; 4412 4413 if (ctx->super->size == 0) { 4414 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 4415 } 4416 4417 if (ctx->super->io_unit_size == 0) { 4418 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 4419 } 4420 4421 ctx->bs->clean = 1; 4422 ctx->bs->cluster_sz = ctx->super->cluster_size; 4423 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 4424 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 4425 if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) { 4426 ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster); 4427 } 4428 ctx->bs->io_unit_size = ctx->super->io_unit_size; 4429 rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters); 4430 if (rc < 0) { 4431 return -ENOMEM; 4432 } 4433 ctx->bs->md_start = ctx->super->md_start; 4434 ctx->bs->md_len = ctx->super->md_len; 4435 rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len); 4436 if (rc < 0) { 4437 return -ENOMEM; 4438 } 4439 4440 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 4441 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 4442 ctx->bs->super_blob = ctx->super->super_blob; 4443 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 4444 4445 return 0; 4446 } 4447 4448 static void 4449 bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4450 { 4451 struct spdk_bs_load_ctx *ctx = cb_arg; 4452 uint32_t crc; 4453 int rc; 4454 static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH]; 4455 4456 if (ctx->super->version > SPDK_BS_VERSION || 4457 ctx->super->version < SPDK_BS_INITIAL_VERSION) { 4458 bs_load_ctx_fail(ctx, -EILSEQ); 4459 return; 4460 } 4461 4462 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 4463 sizeof(ctx->super->signature)) != 0) { 4464 bs_load_ctx_fail(ctx, -EILSEQ); 4465 return; 4466 } 4467 4468 crc = blob_md_page_calc_crc(ctx->super); 4469 if (crc != ctx->super->crc) { 4470 bs_load_ctx_fail(ctx, -EILSEQ); 4471 return; 4472 } 4473 4474 if (memcmp(&ctx->bs->bstype, &ctx->super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 4475 SPDK_DEBUGLOG(blob, "Bstype matched - loading blobstore\n"); 4476 } else if (memcmp(&ctx->bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 4477 SPDK_DEBUGLOG(blob, "Bstype wildcard used - loading blobstore regardless bstype\n"); 4478 } else { 4479 SPDK_DEBUGLOG(blob, "Unexpected bstype\n"); 4480 SPDK_LOGDUMP(blob, "Expected:", ctx->bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 4481 SPDK_LOGDUMP(blob, "Found:", ctx->super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 4482 bs_load_ctx_fail(ctx, -ENXIO); 4483 return; 4484 } 4485 4486 if (ctx->super->size > ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen) { 4487 SPDK_NOTICELOG("Size mismatch, dev size: %" PRIu64 ", blobstore size: %" PRIu64 "\n", 4488 ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen, ctx->super->size); 4489 bs_load_ctx_fail(ctx, -EILSEQ); 4490 return; 4491 } 4492 4493 rc = bs_parse_super(ctx); 4494 if (rc < 0) { 4495 bs_load_ctx_fail(ctx, rc); 4496 return; 4497 } 4498 4499 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0 || ctx->force_recover) { 4500 bs_recover(ctx); 4501 } else { 4502 bs_load_read_used_pages(ctx); 4503 } 4504 } 4505 4506 static inline int 4507 bs_opts_copy(struct spdk_bs_opts *src, struct spdk_bs_opts *dst) 4508 { 4509 4510 if (!src->opts_size) { 4511 SPDK_ERRLOG("opts_size should not be zero value\n"); 4512 return -1; 4513 } 4514 4515 #define FIELD_OK(field) \ 4516 offsetof(struct spdk_bs_opts, field) + sizeof(src->field) <= src->opts_size 4517 4518 #define SET_FIELD(field) \ 4519 if (FIELD_OK(field)) { \ 4520 dst->field = src->field; \ 4521 } \ 4522 4523 SET_FIELD(cluster_sz); 4524 SET_FIELD(num_md_pages); 4525 SET_FIELD(max_md_ops); 4526 SET_FIELD(max_channel_ops); 4527 SET_FIELD(clear_method); 4528 4529 if (FIELD_OK(bstype)) { 4530 memcpy(&dst->bstype, &src->bstype, sizeof(dst->bstype)); 4531 } 4532 SET_FIELD(iter_cb_fn); 4533 SET_FIELD(iter_cb_arg); 4534 SET_FIELD(force_recover); 4535 4536 dst->opts_size = src->opts_size; 4537 4538 /* You should not remove this statement, but need to update the assert statement 4539 * if you add a new field, and also add a corresponding SET_FIELD statement */ 4540 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_opts) == 72, "Incorrect size"); 4541 4542 #undef FIELD_OK 4543 #undef SET_FIELD 4544 4545 return 0; 4546 } 4547 4548 void 4549 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 4550 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 4551 { 4552 struct spdk_blob_store *bs; 4553 struct spdk_bs_cpl cpl; 4554 struct spdk_bs_load_ctx *ctx; 4555 struct spdk_bs_opts opts = {}; 4556 int err; 4557 4558 SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev); 4559 4560 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 4561 SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen); 4562 dev->destroy(dev); 4563 cb_fn(cb_arg, NULL, -EINVAL); 4564 return; 4565 } 4566 4567 spdk_bs_opts_init(&opts, sizeof(opts)); 4568 if (o) { 4569 if (bs_opts_copy(o, &opts)) { 4570 return; 4571 } 4572 } 4573 4574 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 4575 dev->destroy(dev); 4576 cb_fn(cb_arg, NULL, -EINVAL); 4577 return; 4578 } 4579 4580 err = bs_alloc(dev, &opts, &bs, &ctx); 4581 if (err) { 4582 dev->destroy(dev); 4583 cb_fn(cb_arg, NULL, err); 4584 return; 4585 } 4586 4587 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 4588 cpl.u.bs_handle.cb_fn = cb_fn; 4589 cpl.u.bs_handle.cb_arg = cb_arg; 4590 cpl.u.bs_handle.bs = bs; 4591 4592 ctx->seq = bs_sequence_start(bs->md_channel, &cpl); 4593 if (!ctx->seq) { 4594 spdk_free(ctx->super); 4595 free(ctx); 4596 bs_free(bs); 4597 cb_fn(cb_arg, NULL, -ENOMEM); 4598 return; 4599 } 4600 4601 /* Read the super block */ 4602 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 4603 bs_byte_to_lba(bs, sizeof(*ctx->super)), 4604 bs_load_super_cpl, ctx); 4605 } 4606 4607 /* END spdk_bs_load */ 4608 4609 /* START spdk_bs_dump */ 4610 4611 static void 4612 bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 4613 { 4614 spdk_free(ctx->super); 4615 4616 /* 4617 * We need to defer calling bs_call_cpl() until after 4618 * dev destruction, so tuck these away for later use. 4619 */ 4620 ctx->bs->unload_err = bserrno; 4621 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 4622 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 4623 4624 bs_sequence_finish(seq, 0); 4625 bs_free(ctx->bs); 4626 free(ctx); 4627 } 4628 4629 static void 4630 bs_dump_print_xattr(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 4631 { 4632 struct spdk_blob_md_descriptor_xattr *desc_xattr; 4633 uint32_t i; 4634 const char *type; 4635 4636 desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc; 4637 4638 if (desc_xattr->length != 4639 sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) + 4640 desc_xattr->name_length + desc_xattr->value_length) { 4641 } 4642 4643 memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length); 4644 ctx->xattr_name[desc_xattr->name_length] = '\0'; 4645 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4646 type = "XATTR"; 4647 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4648 type = "XATTR_INTERNAL"; 4649 } else { 4650 assert(false); 4651 type = "XATTR_?"; 4652 } 4653 fprintf(ctx->fp, "%s: name = \"%s\"\n", type, ctx->xattr_name); 4654 fprintf(ctx->fp, " value = \""); 4655 ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name, 4656 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 4657 desc_xattr->value_length); 4658 fprintf(ctx->fp, "\"\n"); 4659 for (i = 0; i < desc_xattr->value_length; i++) { 4660 if (i % 16 == 0) { 4661 fprintf(ctx->fp, " "); 4662 } 4663 fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i)); 4664 if ((i + 1) % 16 == 0) { 4665 fprintf(ctx->fp, "\n"); 4666 } 4667 } 4668 if (i % 16 != 0) { 4669 fprintf(ctx->fp, "\n"); 4670 } 4671 } 4672 4673 struct type_flag_desc { 4674 uint64_t mask; 4675 uint64_t val; 4676 const char *name; 4677 }; 4678 4679 static void 4680 bs_dump_print_type_bits(struct spdk_bs_load_ctx *ctx, uint64_t flags, 4681 struct type_flag_desc *desc, size_t numflags) 4682 { 4683 uint64_t covered = 0; 4684 size_t i; 4685 4686 for (i = 0; i < numflags; i++) { 4687 if ((desc[i].mask & flags) != desc[i].val) { 4688 continue; 4689 } 4690 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " %s", desc[i].val, desc[i].name); 4691 if (desc[i].mask != desc[i].val) { 4692 fprintf(ctx->fp, " (mask 0x%" PRIx64 " value 0x%" PRIx64 ")", 4693 desc[i].mask, desc[i].val); 4694 } 4695 fprintf(ctx->fp, "\n"); 4696 covered |= desc[i].mask; 4697 } 4698 if ((flags & ~covered) != 0) { 4699 fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " Unknown\n", flags & ~covered); 4700 } 4701 } 4702 4703 static void 4704 bs_dump_print_type_flags(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 4705 { 4706 struct spdk_blob_md_descriptor_flags *type_desc; 4707 #define ADD_FLAG(f) { f, f, #f } 4708 #define ADD_MASK_VAL(m, v) { m, v, #v } 4709 static struct type_flag_desc invalid[] = { 4710 ADD_FLAG(SPDK_BLOB_THIN_PROV), 4711 ADD_FLAG(SPDK_BLOB_INTERNAL_XATTR), 4712 ADD_FLAG(SPDK_BLOB_EXTENT_TABLE), 4713 }; 4714 static struct type_flag_desc data_ro[] = { 4715 ADD_FLAG(SPDK_BLOB_READ_ONLY), 4716 }; 4717 static struct type_flag_desc md_ro[] = { 4718 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_DEFAULT), 4719 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_NONE), 4720 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_UNMAP), 4721 ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_WRITE_ZEROES), 4722 }; 4723 #undef ADD_FLAG 4724 #undef ADD_MASK_VAL 4725 4726 type_desc = (struct spdk_blob_md_descriptor_flags *)desc; 4727 fprintf(ctx->fp, "Flags:\n"); 4728 fprintf(ctx->fp, "\tinvalid: 0x%016" PRIx64 "\n", type_desc->invalid_flags); 4729 bs_dump_print_type_bits(ctx, type_desc->invalid_flags, invalid, 4730 SPDK_COUNTOF(invalid)); 4731 fprintf(ctx->fp, "\tdata_ro: 0x%016" PRIx64 "\n", type_desc->data_ro_flags); 4732 bs_dump_print_type_bits(ctx, type_desc->data_ro_flags, data_ro, 4733 SPDK_COUNTOF(data_ro)); 4734 fprintf(ctx->fp, "\t md_ro: 0x%016" PRIx64 "\n", type_desc->md_ro_flags); 4735 bs_dump_print_type_bits(ctx, type_desc->md_ro_flags, md_ro, 4736 SPDK_COUNTOF(md_ro)); 4737 } 4738 4739 static void 4740 bs_dump_print_extent_table(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc) 4741 { 4742 struct spdk_blob_md_descriptor_extent_table *et_desc; 4743 uint64_t num_extent_pages; 4744 uint32_t et_idx; 4745 4746 et_desc = (struct spdk_blob_md_descriptor_extent_table *)desc; 4747 num_extent_pages = (et_desc->length - sizeof(et_desc->num_clusters)) / 4748 sizeof(et_desc->extent_page[0]); 4749 4750 fprintf(ctx->fp, "Extent table:\n"); 4751 for (et_idx = 0; et_idx < num_extent_pages; et_idx++) { 4752 if (et_desc->extent_page[et_idx].page_idx == 0) { 4753 /* Zeroes represent unallocated extent pages. */ 4754 continue; 4755 } 4756 fprintf(ctx->fp, "\tExtent page: %5" PRIu32 " length %3" PRIu32 4757 " at LBA %" PRIu64 "\n", et_desc->extent_page[et_idx].page_idx, 4758 et_desc->extent_page[et_idx].num_pages, 4759 bs_md_page_to_lba(ctx->bs, et_desc->extent_page[et_idx].page_idx)); 4760 } 4761 } 4762 4763 static void 4764 bs_dump_print_md_page(struct spdk_bs_load_ctx *ctx) 4765 { 4766 uint32_t page_idx = ctx->cur_page; 4767 struct spdk_blob_md_page *page = ctx->page; 4768 struct spdk_blob_md_descriptor *desc; 4769 size_t cur_desc = 0; 4770 uint32_t crc; 4771 4772 fprintf(ctx->fp, "=========\n"); 4773 fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx); 4774 fprintf(ctx->fp, "Start LBA: %" PRIu64 "\n", bs_md_page_to_lba(ctx->bs, page_idx)); 4775 fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id); 4776 fprintf(ctx->fp, "Sequence: %" PRIu32 "\n", page->sequence_num); 4777 if (page->next == SPDK_INVALID_MD_PAGE) { 4778 fprintf(ctx->fp, "Next: None\n"); 4779 } else { 4780 fprintf(ctx->fp, "Next: %" PRIu32 "\n", page->next); 4781 } 4782 fprintf(ctx->fp, "In used bit array%s:", ctx->super->clean ? "" : " (not clean: dubious)"); 4783 if (spdk_bit_array_get(ctx->bs->used_md_pages, page_idx)) { 4784 fprintf(ctx->fp, " md"); 4785 } 4786 if (spdk_bit_array_get(ctx->bs->used_blobids, page_idx)) { 4787 fprintf(ctx->fp, " blob"); 4788 } 4789 fprintf(ctx->fp, "\n"); 4790 4791 crc = blob_md_page_calc_crc(page); 4792 fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch"); 4793 4794 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4795 while (cur_desc < sizeof(page->descriptors)) { 4796 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 4797 if (desc->length == 0) { 4798 /* If padding and length are 0, this terminates the page */ 4799 break; 4800 } 4801 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 4802 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 4803 unsigned int i; 4804 4805 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 4806 4807 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 4808 if (desc_extent_rle->extents[i].cluster_idx != 0) { 4809 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 4810 desc_extent_rle->extents[i].cluster_idx); 4811 } else { 4812 fprintf(ctx->fp, "Unallocated Extent - "); 4813 } 4814 fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length); 4815 fprintf(ctx->fp, "\n"); 4816 } 4817 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4818 struct spdk_blob_md_descriptor_extent_page *desc_extent; 4819 unsigned int i; 4820 4821 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 4822 4823 for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) { 4824 if (desc_extent->cluster_idx[i] != 0) { 4825 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 4826 desc_extent->cluster_idx[i]); 4827 } else { 4828 fprintf(ctx->fp, "Unallocated Extent"); 4829 } 4830 fprintf(ctx->fp, "\n"); 4831 } 4832 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4833 bs_dump_print_xattr(ctx, desc); 4834 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4835 bs_dump_print_xattr(ctx, desc); 4836 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 4837 bs_dump_print_type_flags(ctx, desc); 4838 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 4839 bs_dump_print_extent_table(ctx, desc); 4840 } else { 4841 /* Error */ 4842 fprintf(ctx->fp, "Unknown descriptor type %" PRIu8 "\n", desc->type); 4843 } 4844 /* Advance to the next descriptor */ 4845 cur_desc += sizeof(*desc) + desc->length; 4846 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 4847 break; 4848 } 4849 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 4850 } 4851 } 4852 4853 static void 4854 bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4855 { 4856 struct spdk_bs_load_ctx *ctx = cb_arg; 4857 4858 if (bserrno != 0) { 4859 bs_dump_finish(seq, ctx, bserrno); 4860 return; 4861 } 4862 4863 if (ctx->page->id != 0) { 4864 bs_dump_print_md_page(ctx); 4865 } 4866 4867 ctx->cur_page++; 4868 4869 if (ctx->cur_page < ctx->super->md_len) { 4870 bs_dump_read_md_page(seq, ctx); 4871 } else { 4872 spdk_free(ctx->page); 4873 bs_dump_finish(seq, ctx, 0); 4874 } 4875 } 4876 4877 static void 4878 bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 4879 { 4880 struct spdk_bs_load_ctx *ctx = cb_arg; 4881 uint64_t lba; 4882 4883 assert(ctx->cur_page < ctx->super->md_len); 4884 lba = bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 4885 bs_sequence_read_dev(seq, ctx->page, lba, 4886 bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 4887 bs_dump_read_md_page_cpl, ctx); 4888 } 4889 4890 static void 4891 bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4892 { 4893 struct spdk_bs_load_ctx *ctx = cb_arg; 4894 int rc; 4895 4896 fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature); 4897 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 4898 sizeof(ctx->super->signature)) != 0) { 4899 fprintf(ctx->fp, "(Mismatch)\n"); 4900 bs_dump_finish(seq, ctx, bserrno); 4901 return; 4902 } else { 4903 fprintf(ctx->fp, "(OK)\n"); 4904 } 4905 fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version); 4906 fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc, 4907 (ctx->super->crc == blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch"); 4908 fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype); 4909 fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size); 4910 fprintf(ctx->fp, "Super Blob ID: "); 4911 if (ctx->super->super_blob == SPDK_BLOBID_INVALID) { 4912 fprintf(ctx->fp, "(None)\n"); 4913 } else { 4914 fprintf(ctx->fp, "0x%" PRIx64 "\n", ctx->super->super_blob); 4915 } 4916 fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean); 4917 fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start); 4918 fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len); 4919 fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start); 4920 fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len); 4921 fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start); 4922 fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len); 4923 fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start); 4924 fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len); 4925 4926 ctx->cur_page = 0; 4927 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, 4928 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4929 if (!ctx->page) { 4930 bs_dump_finish(seq, ctx, -ENOMEM); 4931 return; 4932 } 4933 4934 rc = bs_parse_super(ctx); 4935 if (rc < 0) { 4936 bs_load_ctx_fail(ctx, rc); 4937 return; 4938 } 4939 4940 bs_load_read_used_pages(ctx); 4941 } 4942 4943 void 4944 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn, 4945 spdk_bs_op_complete cb_fn, void *cb_arg) 4946 { 4947 struct spdk_blob_store *bs; 4948 struct spdk_bs_cpl cpl; 4949 struct spdk_bs_load_ctx *ctx; 4950 struct spdk_bs_opts opts = {}; 4951 int err; 4952 4953 SPDK_DEBUGLOG(blob, "Dumping blobstore from dev %p\n", dev); 4954 4955 spdk_bs_opts_init(&opts, sizeof(opts)); 4956 4957 err = bs_alloc(dev, &opts, &bs, &ctx); 4958 if (err) { 4959 dev->destroy(dev); 4960 cb_fn(cb_arg, err); 4961 return; 4962 } 4963 4964 ctx->dumping = true; 4965 ctx->fp = fp; 4966 ctx->print_xattr_fn = print_xattr_fn; 4967 4968 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 4969 cpl.u.bs_basic.cb_fn = cb_fn; 4970 cpl.u.bs_basic.cb_arg = cb_arg; 4971 4972 ctx->seq = bs_sequence_start(bs->md_channel, &cpl); 4973 if (!ctx->seq) { 4974 spdk_free(ctx->super); 4975 free(ctx); 4976 bs_free(bs); 4977 cb_fn(cb_arg, -ENOMEM); 4978 return; 4979 } 4980 4981 /* Read the super block */ 4982 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 4983 bs_byte_to_lba(bs, sizeof(*ctx->super)), 4984 bs_dump_super_cpl, ctx); 4985 } 4986 4987 /* END spdk_bs_dump */ 4988 4989 /* START spdk_bs_init */ 4990 4991 static void 4992 bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4993 { 4994 struct spdk_bs_load_ctx *ctx = cb_arg; 4995 4996 ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters); 4997 spdk_free(ctx->super); 4998 free(ctx); 4999 5000 bs_sequence_finish(seq, bserrno); 5001 } 5002 5003 static void 5004 bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5005 { 5006 struct spdk_bs_load_ctx *ctx = cb_arg; 5007 5008 /* Write super block */ 5009 bs_sequence_write_dev(seq, ctx->super, bs_page_to_lba(ctx->bs, 0), 5010 bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 5011 bs_init_persist_super_cpl, ctx); 5012 } 5013 5014 void 5015 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 5016 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 5017 { 5018 struct spdk_bs_load_ctx *ctx; 5019 struct spdk_blob_store *bs; 5020 struct spdk_bs_cpl cpl; 5021 spdk_bs_sequence_t *seq; 5022 spdk_bs_batch_t *batch; 5023 uint64_t num_md_lba; 5024 uint64_t num_md_pages; 5025 uint64_t num_md_clusters; 5026 uint32_t i; 5027 struct spdk_bs_opts opts = {}; 5028 int rc; 5029 uint64_t lba, lba_count; 5030 5031 SPDK_DEBUGLOG(blob, "Initializing blobstore on dev %p\n", dev); 5032 5033 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 5034 SPDK_ERRLOG("unsupported dev block length of %d\n", 5035 dev->blocklen); 5036 dev->destroy(dev); 5037 cb_fn(cb_arg, NULL, -EINVAL); 5038 return; 5039 } 5040 5041 spdk_bs_opts_init(&opts, sizeof(opts)); 5042 if (o) { 5043 if (bs_opts_copy(o, &opts)) { 5044 return; 5045 } 5046 } 5047 5048 if (bs_opts_verify(&opts) != 0) { 5049 dev->destroy(dev); 5050 cb_fn(cb_arg, NULL, -EINVAL); 5051 return; 5052 } 5053 5054 rc = bs_alloc(dev, &opts, &bs, &ctx); 5055 if (rc) { 5056 dev->destroy(dev); 5057 cb_fn(cb_arg, NULL, rc); 5058 return; 5059 } 5060 5061 if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) { 5062 /* By default, allocate 1 page per cluster. 5063 * Technically, this over-allocates metadata 5064 * because more metadata will reduce the number 5065 * of usable clusters. This can be addressed with 5066 * more complex math in the future. 5067 */ 5068 bs->md_len = bs->total_clusters; 5069 } else { 5070 bs->md_len = opts.num_md_pages; 5071 } 5072 rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len); 5073 if (rc < 0) { 5074 spdk_free(ctx->super); 5075 free(ctx); 5076 bs_free(bs); 5077 cb_fn(cb_arg, NULL, -ENOMEM); 5078 return; 5079 } 5080 5081 rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len); 5082 if (rc < 0) { 5083 spdk_free(ctx->super); 5084 free(ctx); 5085 bs_free(bs); 5086 cb_fn(cb_arg, NULL, -ENOMEM); 5087 return; 5088 } 5089 5090 rc = spdk_bit_array_resize(&bs->open_blobids, bs->md_len); 5091 if (rc < 0) { 5092 spdk_free(ctx->super); 5093 free(ctx); 5094 bs_free(bs); 5095 cb_fn(cb_arg, NULL, -ENOMEM); 5096 return; 5097 } 5098 5099 memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 5100 sizeof(ctx->super->signature)); 5101 ctx->super->version = SPDK_BS_VERSION; 5102 ctx->super->length = sizeof(*ctx->super); 5103 ctx->super->super_blob = bs->super_blob; 5104 ctx->super->clean = 0; 5105 ctx->super->cluster_size = bs->cluster_sz; 5106 ctx->super->io_unit_size = bs->io_unit_size; 5107 memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype)); 5108 5109 /* Calculate how many pages the metadata consumes at the front 5110 * of the disk. 5111 */ 5112 5113 /* The super block uses 1 page */ 5114 num_md_pages = 1; 5115 5116 /* The used_md_pages mask requires 1 bit per metadata page, rounded 5117 * up to the nearest page, plus a header. 5118 */ 5119 ctx->super->used_page_mask_start = num_md_pages; 5120 ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5121 spdk_divide_round_up(bs->md_len, 8), 5122 SPDK_BS_PAGE_SIZE); 5123 num_md_pages += ctx->super->used_page_mask_len; 5124 5125 /* The used_clusters mask requires 1 bit per cluster, rounded 5126 * up to the nearest page, plus a header. 5127 */ 5128 ctx->super->used_cluster_mask_start = num_md_pages; 5129 ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5130 spdk_divide_round_up(bs->total_clusters, 8), 5131 SPDK_BS_PAGE_SIZE); 5132 num_md_pages += ctx->super->used_cluster_mask_len; 5133 5134 /* The used_blobids mask requires 1 bit per metadata page, rounded 5135 * up to the nearest page, plus a header. 5136 */ 5137 ctx->super->used_blobid_mask_start = num_md_pages; 5138 ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 5139 spdk_divide_round_up(bs->md_len, 8), 5140 SPDK_BS_PAGE_SIZE); 5141 num_md_pages += ctx->super->used_blobid_mask_len; 5142 5143 /* The metadata region size was chosen above */ 5144 ctx->super->md_start = bs->md_start = num_md_pages; 5145 ctx->super->md_len = bs->md_len; 5146 num_md_pages += bs->md_len; 5147 5148 num_md_lba = bs_page_to_lba(bs, num_md_pages); 5149 5150 ctx->super->size = dev->blockcnt * dev->blocklen; 5151 5152 ctx->super->crc = blob_md_page_calc_crc(ctx->super); 5153 5154 num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster); 5155 if (num_md_clusters > bs->total_clusters) { 5156 SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, " 5157 "please decrease number of pages reserved for metadata " 5158 "or increase cluster size.\n"); 5159 spdk_free(ctx->super); 5160 spdk_bit_array_free(&ctx->used_clusters); 5161 free(ctx); 5162 bs_free(bs); 5163 cb_fn(cb_arg, NULL, -ENOMEM); 5164 return; 5165 } 5166 /* Claim all of the clusters used by the metadata */ 5167 for (i = 0; i < num_md_clusters; i++) { 5168 spdk_bit_array_set(ctx->used_clusters, i); 5169 } 5170 5171 bs->num_free_clusters -= num_md_clusters; 5172 bs->total_data_clusters = bs->num_free_clusters; 5173 5174 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 5175 cpl.u.bs_handle.cb_fn = cb_fn; 5176 cpl.u.bs_handle.cb_arg = cb_arg; 5177 cpl.u.bs_handle.bs = bs; 5178 5179 seq = bs_sequence_start(bs->md_channel, &cpl); 5180 if (!seq) { 5181 spdk_free(ctx->super); 5182 free(ctx); 5183 bs_free(bs); 5184 cb_fn(cb_arg, NULL, -ENOMEM); 5185 return; 5186 } 5187 5188 batch = bs_sequence_to_batch(seq, bs_init_trim_cpl, ctx); 5189 5190 /* Clear metadata space */ 5191 bs_batch_write_zeroes_dev(batch, 0, num_md_lba); 5192 5193 lba = num_md_lba; 5194 lba_count = ctx->bs->dev->blockcnt - lba; 5195 switch (opts.clear_method) { 5196 case BS_CLEAR_WITH_UNMAP: 5197 /* Trim data clusters */ 5198 bs_batch_unmap_dev(batch, lba, lba_count); 5199 break; 5200 case BS_CLEAR_WITH_WRITE_ZEROES: 5201 /* Write_zeroes to data clusters */ 5202 bs_batch_write_zeroes_dev(batch, lba, lba_count); 5203 break; 5204 case BS_CLEAR_WITH_NONE: 5205 default: 5206 break; 5207 } 5208 5209 bs_batch_close(batch); 5210 } 5211 5212 /* END spdk_bs_init */ 5213 5214 /* START spdk_bs_destroy */ 5215 5216 static void 5217 bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5218 { 5219 struct spdk_bs_load_ctx *ctx = cb_arg; 5220 struct spdk_blob_store *bs = ctx->bs; 5221 5222 /* 5223 * We need to defer calling bs_call_cpl() until after 5224 * dev destruction, so tuck these away for later use. 5225 */ 5226 bs->unload_err = bserrno; 5227 memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5228 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5229 5230 bs_sequence_finish(seq, bserrno); 5231 5232 bs_free(bs); 5233 free(ctx); 5234 } 5235 5236 void 5237 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, 5238 void *cb_arg) 5239 { 5240 struct spdk_bs_cpl cpl; 5241 spdk_bs_sequence_t *seq; 5242 struct spdk_bs_load_ctx *ctx; 5243 5244 SPDK_DEBUGLOG(blob, "Destroying blobstore\n"); 5245 5246 if (!RB_EMPTY(&bs->open_blobs)) { 5247 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5248 cb_fn(cb_arg, -EBUSY); 5249 return; 5250 } 5251 5252 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5253 cpl.u.bs_basic.cb_fn = cb_fn; 5254 cpl.u.bs_basic.cb_arg = cb_arg; 5255 5256 ctx = calloc(1, sizeof(*ctx)); 5257 if (!ctx) { 5258 cb_fn(cb_arg, -ENOMEM); 5259 return; 5260 } 5261 5262 ctx->bs = bs; 5263 5264 seq = bs_sequence_start(bs->md_channel, &cpl); 5265 if (!seq) { 5266 free(ctx); 5267 cb_fn(cb_arg, -ENOMEM); 5268 return; 5269 } 5270 5271 /* Write zeroes to the super block */ 5272 bs_sequence_write_zeroes_dev(seq, 5273 bs_page_to_lba(bs, 0), 5274 bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)), 5275 bs_destroy_trim_cpl, ctx); 5276 } 5277 5278 /* END spdk_bs_destroy */ 5279 5280 /* START spdk_bs_unload */ 5281 5282 static void 5283 bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno) 5284 { 5285 spdk_bs_sequence_t *seq = ctx->seq; 5286 5287 spdk_free(ctx->super); 5288 5289 /* 5290 * We need to defer calling bs_call_cpl() until after 5291 * dev destruction, so tuck these away for later use. 5292 */ 5293 ctx->bs->unload_err = bserrno; 5294 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 5295 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 5296 5297 bs_sequence_finish(seq, bserrno); 5298 5299 bs_free(ctx->bs); 5300 free(ctx); 5301 } 5302 5303 static void 5304 bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5305 { 5306 struct spdk_bs_load_ctx *ctx = cb_arg; 5307 5308 bs_unload_finish(ctx, bserrno); 5309 } 5310 5311 static void 5312 bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5313 { 5314 struct spdk_bs_load_ctx *ctx = cb_arg; 5315 5316 spdk_free(ctx->mask); 5317 5318 if (bserrno != 0) { 5319 bs_unload_finish(ctx, bserrno); 5320 return; 5321 } 5322 5323 ctx->super->clean = 1; 5324 5325 bs_write_super(seq, ctx->bs, ctx->super, bs_unload_write_super_cpl, ctx); 5326 } 5327 5328 static void 5329 bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5330 { 5331 struct spdk_bs_load_ctx *ctx = cb_arg; 5332 5333 spdk_free(ctx->mask); 5334 ctx->mask = NULL; 5335 5336 if (bserrno != 0) { 5337 bs_unload_finish(ctx, bserrno); 5338 return; 5339 } 5340 5341 bs_write_used_clusters(seq, ctx, bs_unload_write_used_clusters_cpl); 5342 } 5343 5344 static void 5345 bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5346 { 5347 struct spdk_bs_load_ctx *ctx = cb_arg; 5348 5349 spdk_free(ctx->mask); 5350 ctx->mask = NULL; 5351 5352 if (bserrno != 0) { 5353 bs_unload_finish(ctx, bserrno); 5354 return; 5355 } 5356 5357 bs_write_used_blobids(seq, ctx, bs_unload_write_used_blobids_cpl); 5358 } 5359 5360 static void 5361 bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5362 { 5363 struct spdk_bs_load_ctx *ctx = cb_arg; 5364 5365 if (bserrno != 0) { 5366 bs_unload_finish(ctx, bserrno); 5367 return; 5368 } 5369 5370 bs_write_used_md(seq, cb_arg, bs_unload_write_used_pages_cpl); 5371 } 5372 5373 void 5374 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) 5375 { 5376 struct spdk_bs_cpl cpl; 5377 struct spdk_bs_load_ctx *ctx; 5378 5379 SPDK_DEBUGLOG(blob, "Syncing blobstore\n"); 5380 5381 if (!RB_EMPTY(&bs->open_blobs)) { 5382 SPDK_ERRLOG("Blobstore still has open blobs\n"); 5383 cb_fn(cb_arg, -EBUSY); 5384 return; 5385 } 5386 5387 ctx = calloc(1, sizeof(*ctx)); 5388 if (!ctx) { 5389 cb_fn(cb_arg, -ENOMEM); 5390 return; 5391 } 5392 5393 ctx->bs = bs; 5394 5395 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5396 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5397 if (!ctx->super) { 5398 free(ctx); 5399 cb_fn(cb_arg, -ENOMEM); 5400 return; 5401 } 5402 5403 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5404 cpl.u.bs_basic.cb_fn = cb_fn; 5405 cpl.u.bs_basic.cb_arg = cb_arg; 5406 5407 ctx->seq = bs_sequence_start(bs->md_channel, &cpl); 5408 if (!ctx->seq) { 5409 spdk_free(ctx->super); 5410 free(ctx); 5411 cb_fn(cb_arg, -ENOMEM); 5412 return; 5413 } 5414 5415 /* Read super block */ 5416 bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0), 5417 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5418 bs_unload_read_super_cpl, ctx); 5419 } 5420 5421 /* END spdk_bs_unload */ 5422 5423 /* START spdk_bs_set_super */ 5424 5425 struct spdk_bs_set_super_ctx { 5426 struct spdk_blob_store *bs; 5427 struct spdk_bs_super_block *super; 5428 }; 5429 5430 static void 5431 bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5432 { 5433 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5434 5435 if (bserrno != 0) { 5436 SPDK_ERRLOG("Unable to write to super block of blobstore\n"); 5437 } 5438 5439 spdk_free(ctx->super); 5440 5441 bs_sequence_finish(seq, bserrno); 5442 5443 free(ctx); 5444 } 5445 5446 static void 5447 bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5448 { 5449 struct spdk_bs_set_super_ctx *ctx = cb_arg; 5450 5451 if (bserrno != 0) { 5452 SPDK_ERRLOG("Unable to read super block of blobstore\n"); 5453 spdk_free(ctx->super); 5454 bs_sequence_finish(seq, bserrno); 5455 free(ctx); 5456 return; 5457 } 5458 5459 bs_write_super(seq, ctx->bs, ctx->super, bs_set_super_write_cpl, ctx); 5460 } 5461 5462 void 5463 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid, 5464 spdk_bs_op_complete cb_fn, void *cb_arg) 5465 { 5466 struct spdk_bs_cpl cpl; 5467 spdk_bs_sequence_t *seq; 5468 struct spdk_bs_set_super_ctx *ctx; 5469 5470 SPDK_DEBUGLOG(blob, "Setting super blob id on blobstore\n"); 5471 5472 ctx = calloc(1, sizeof(*ctx)); 5473 if (!ctx) { 5474 cb_fn(cb_arg, -ENOMEM); 5475 return; 5476 } 5477 5478 ctx->bs = bs; 5479 5480 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 5481 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5482 if (!ctx->super) { 5483 free(ctx); 5484 cb_fn(cb_arg, -ENOMEM); 5485 return; 5486 } 5487 5488 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5489 cpl.u.bs_basic.cb_fn = cb_fn; 5490 cpl.u.bs_basic.cb_arg = cb_arg; 5491 5492 seq = bs_sequence_start(bs->md_channel, &cpl); 5493 if (!seq) { 5494 spdk_free(ctx->super); 5495 free(ctx); 5496 cb_fn(cb_arg, -ENOMEM); 5497 return; 5498 } 5499 5500 bs->super_blob = blobid; 5501 5502 /* Read super block */ 5503 bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0), 5504 bs_byte_to_lba(bs, sizeof(*ctx->super)), 5505 bs_set_super_read_cpl, ctx); 5506 } 5507 5508 /* END spdk_bs_set_super */ 5509 5510 void 5511 spdk_bs_get_super(struct spdk_blob_store *bs, 5512 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5513 { 5514 if (bs->super_blob == SPDK_BLOBID_INVALID) { 5515 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT); 5516 } else { 5517 cb_fn(cb_arg, bs->super_blob, 0); 5518 } 5519 } 5520 5521 uint64_t 5522 spdk_bs_get_cluster_size(struct spdk_blob_store *bs) 5523 { 5524 return bs->cluster_sz; 5525 } 5526 5527 uint64_t 5528 spdk_bs_get_page_size(struct spdk_blob_store *bs) 5529 { 5530 return SPDK_BS_PAGE_SIZE; 5531 } 5532 5533 uint64_t 5534 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs) 5535 { 5536 return bs->io_unit_size; 5537 } 5538 5539 uint64_t 5540 spdk_bs_free_cluster_count(struct spdk_blob_store *bs) 5541 { 5542 return bs->num_free_clusters; 5543 } 5544 5545 uint64_t 5546 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs) 5547 { 5548 return bs->total_data_clusters; 5549 } 5550 5551 static int 5552 bs_register_md_thread(struct spdk_blob_store *bs) 5553 { 5554 bs->md_channel = spdk_get_io_channel(bs); 5555 if (!bs->md_channel) { 5556 SPDK_ERRLOG("Failed to get IO channel.\n"); 5557 return -1; 5558 } 5559 5560 return 0; 5561 } 5562 5563 static int 5564 bs_unregister_md_thread(struct spdk_blob_store *bs) 5565 { 5566 spdk_put_io_channel(bs->md_channel); 5567 5568 return 0; 5569 } 5570 5571 spdk_blob_id spdk_blob_get_id(struct spdk_blob *blob) 5572 { 5573 assert(blob != NULL); 5574 5575 return blob->id; 5576 } 5577 5578 uint64_t spdk_blob_get_num_pages(struct spdk_blob *blob) 5579 { 5580 assert(blob != NULL); 5581 5582 return bs_cluster_to_page(blob->bs, blob->active.num_clusters); 5583 } 5584 5585 uint64_t spdk_blob_get_num_io_units(struct spdk_blob *blob) 5586 { 5587 assert(blob != NULL); 5588 5589 return spdk_blob_get_num_pages(blob) * bs_io_unit_per_page(blob->bs); 5590 } 5591 5592 uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob) 5593 { 5594 assert(blob != NULL); 5595 5596 return blob->active.num_clusters; 5597 } 5598 5599 /* START spdk_bs_create_blob */ 5600 5601 static void 5602 bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5603 { 5604 struct spdk_blob *blob = cb_arg; 5605 uint32_t page_idx = bs_blobid_to_page(blob->id); 5606 5607 if (bserrno != 0) { 5608 spdk_bit_array_clear(blob->bs->used_blobids, page_idx); 5609 bs_release_md_page(blob->bs, page_idx); 5610 } 5611 5612 blob_free(blob); 5613 5614 bs_sequence_finish(seq, bserrno); 5615 } 5616 5617 static int 5618 blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs, 5619 bool internal) 5620 { 5621 uint64_t i; 5622 size_t value_len = 0; 5623 int rc; 5624 const void *value = NULL; 5625 if (xattrs->count > 0 && xattrs->get_value == NULL) { 5626 return -EINVAL; 5627 } 5628 for (i = 0; i < xattrs->count; i++) { 5629 xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len); 5630 if (value == NULL || value_len == 0) { 5631 return -EINVAL; 5632 } 5633 rc = blob_set_xattr(blob, xattrs->names[i], value, value_len, internal); 5634 if (rc < 0) { 5635 return rc; 5636 } 5637 } 5638 return 0; 5639 } 5640 5641 static void 5642 blob_opts_copy(const struct spdk_blob_opts *src, struct spdk_blob_opts *dst) 5643 { 5644 #define FIELD_OK(field) \ 5645 offsetof(struct spdk_blob_opts, field) + sizeof(src->field) <= src->opts_size 5646 5647 #define SET_FIELD(field) \ 5648 if (FIELD_OK(field)) { \ 5649 dst->field = src->field; \ 5650 } \ 5651 5652 SET_FIELD(num_clusters); 5653 SET_FIELD(thin_provision); 5654 SET_FIELD(clear_method); 5655 5656 if (FIELD_OK(xattrs)) { 5657 memcpy(&dst->xattrs, &src->xattrs, sizeof(src->xattrs)); 5658 } 5659 5660 SET_FIELD(use_extent_table); 5661 5662 dst->opts_size = src->opts_size; 5663 5664 /* You should not remove this statement, but need to update the assert statement 5665 * if you add a new field, and also add a corresponding SET_FIELD statement */ 5666 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_opts) == 64, "Incorrect size"); 5667 5668 #undef FIELD_OK 5669 #undef SET_FIELD 5670 } 5671 5672 static void 5673 bs_create_blob(struct spdk_blob_store *bs, 5674 const struct spdk_blob_opts *opts, 5675 const struct spdk_blob_xattr_opts *internal_xattrs, 5676 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5677 { 5678 struct spdk_blob *blob; 5679 uint32_t page_idx; 5680 struct spdk_bs_cpl cpl; 5681 struct spdk_blob_opts opts_local; 5682 struct spdk_blob_xattr_opts internal_xattrs_default; 5683 spdk_bs_sequence_t *seq; 5684 spdk_blob_id id; 5685 int rc; 5686 5687 assert(spdk_get_thread() == bs->md_thread); 5688 5689 page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 5690 if (page_idx == UINT32_MAX) { 5691 cb_fn(cb_arg, 0, -ENOMEM); 5692 return; 5693 } 5694 spdk_bit_array_set(bs->used_blobids, page_idx); 5695 bs_claim_md_page(bs, page_idx); 5696 5697 id = bs_page_to_blobid(page_idx); 5698 5699 SPDK_DEBUGLOG(blob, "Creating blob with id %" PRIu64 " at page %u\n", id, page_idx); 5700 5701 blob = blob_alloc(bs, id); 5702 if (!blob) { 5703 spdk_bit_array_clear(bs->used_blobids, page_idx); 5704 bs_release_md_page(bs, page_idx); 5705 cb_fn(cb_arg, 0, -ENOMEM); 5706 return; 5707 } 5708 5709 spdk_blob_opts_init(&opts_local, sizeof(opts_local)); 5710 if (opts) { 5711 blob_opts_copy(opts, &opts_local); 5712 } 5713 5714 blob->use_extent_table = opts_local.use_extent_table; 5715 if (blob->use_extent_table) { 5716 blob->invalid_flags |= SPDK_BLOB_EXTENT_TABLE; 5717 } 5718 5719 if (!internal_xattrs) { 5720 blob_xattrs_init(&internal_xattrs_default); 5721 internal_xattrs = &internal_xattrs_default; 5722 } 5723 5724 rc = blob_set_xattrs(blob, &opts_local.xattrs, false); 5725 if (rc < 0) { 5726 blob_free(blob); 5727 spdk_bit_array_clear(bs->used_blobids, page_idx); 5728 bs_release_md_page(bs, page_idx); 5729 cb_fn(cb_arg, 0, rc); 5730 return; 5731 } 5732 5733 rc = blob_set_xattrs(blob, internal_xattrs, true); 5734 if (rc < 0) { 5735 blob_free(blob); 5736 spdk_bit_array_clear(bs->used_blobids, page_idx); 5737 bs_release_md_page(bs, page_idx); 5738 cb_fn(cb_arg, 0, rc); 5739 return; 5740 } 5741 5742 if (opts_local.thin_provision) { 5743 blob_set_thin_provision(blob); 5744 } 5745 5746 blob_set_clear_method(blob, opts_local.clear_method); 5747 5748 rc = blob_resize(blob, opts_local.num_clusters); 5749 if (rc < 0) { 5750 blob_free(blob); 5751 spdk_bit_array_clear(bs->used_blobids, page_idx); 5752 bs_release_md_page(bs, page_idx); 5753 cb_fn(cb_arg, 0, rc); 5754 return; 5755 } 5756 cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 5757 cpl.u.blobid.cb_fn = cb_fn; 5758 cpl.u.blobid.cb_arg = cb_arg; 5759 cpl.u.blobid.blobid = blob->id; 5760 5761 seq = bs_sequence_start(bs->md_channel, &cpl); 5762 if (!seq) { 5763 blob_free(blob); 5764 spdk_bit_array_clear(bs->used_blobids, page_idx); 5765 bs_release_md_page(bs, page_idx); 5766 cb_fn(cb_arg, 0, -ENOMEM); 5767 return; 5768 } 5769 5770 blob_persist(seq, blob, bs_create_blob_cpl, blob); 5771 } 5772 5773 void spdk_bs_create_blob(struct spdk_blob_store *bs, 5774 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5775 { 5776 bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg); 5777 } 5778 5779 void spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts, 5780 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5781 { 5782 bs_create_blob(bs, opts, NULL, cb_fn, cb_arg); 5783 } 5784 5785 /* END spdk_bs_create_blob */ 5786 5787 /* START blob_cleanup */ 5788 5789 struct spdk_clone_snapshot_ctx { 5790 struct spdk_bs_cpl cpl; 5791 int bserrno; 5792 bool frozen; 5793 5794 struct spdk_io_channel *channel; 5795 5796 /* Current cluster for inflate operation */ 5797 uint64_t cluster; 5798 5799 /* For inflation force allocation of all unallocated clusters and remove 5800 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */ 5801 bool allocate_all; 5802 5803 struct { 5804 spdk_blob_id id; 5805 struct spdk_blob *blob; 5806 bool md_ro; 5807 } original; 5808 struct { 5809 spdk_blob_id id; 5810 struct spdk_blob *blob; 5811 } new; 5812 5813 /* xattrs specified for snapshot/clones only. They have no impact on 5814 * the original blobs xattrs. */ 5815 const struct spdk_blob_xattr_opts *xattrs; 5816 }; 5817 5818 static void 5819 bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno) 5820 { 5821 struct spdk_clone_snapshot_ctx *ctx = cb_arg; 5822 struct spdk_bs_cpl *cpl = &ctx->cpl; 5823 5824 if (bserrno != 0) { 5825 if (ctx->bserrno != 0) { 5826 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 5827 } else { 5828 ctx->bserrno = bserrno; 5829 } 5830 } 5831 5832 switch (cpl->type) { 5833 case SPDK_BS_CPL_TYPE_BLOBID: 5834 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno); 5835 break; 5836 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 5837 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 5838 break; 5839 default: 5840 SPDK_UNREACHABLE(); 5841 break; 5842 } 5843 5844 free(ctx); 5845 } 5846 5847 static void 5848 bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 5849 { 5850 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5851 struct spdk_blob *origblob = ctx->original.blob; 5852 5853 if (bserrno != 0) { 5854 if (ctx->bserrno != 0) { 5855 SPDK_ERRLOG("Unfreeze error %d\n", bserrno); 5856 } else { 5857 ctx->bserrno = bserrno; 5858 } 5859 } 5860 5861 ctx->original.id = origblob->id; 5862 origblob->locked_operation_in_progress = false; 5863 5864 /* Revert md_ro to original state */ 5865 origblob->md_ro = ctx->original.md_ro; 5866 5867 spdk_blob_close(origblob, bs_clone_snapshot_cleanup_finish, ctx); 5868 } 5869 5870 static void 5871 bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno) 5872 { 5873 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5874 struct spdk_blob *origblob = ctx->original.blob; 5875 5876 if (bserrno != 0) { 5877 if (ctx->bserrno != 0) { 5878 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 5879 } else { 5880 ctx->bserrno = bserrno; 5881 } 5882 } 5883 5884 if (ctx->frozen) { 5885 /* Unfreeze any outstanding I/O */ 5886 blob_unfreeze_io(origblob, bs_snapshot_unfreeze_cpl, ctx); 5887 } else { 5888 bs_snapshot_unfreeze_cpl(ctx, 0); 5889 } 5890 5891 } 5892 5893 static void 5894 bs_clone_snapshot_newblob_cleanup(struct spdk_clone_snapshot_ctx *ctx, int bserrno) 5895 { 5896 struct spdk_blob *newblob = ctx->new.blob; 5897 5898 if (bserrno != 0) { 5899 if (ctx->bserrno != 0) { 5900 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 5901 } else { 5902 ctx->bserrno = bserrno; 5903 } 5904 } 5905 5906 ctx->new.id = newblob->id; 5907 spdk_blob_close(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 5908 } 5909 5910 /* END blob_cleanup */ 5911 5912 /* START spdk_bs_create_snapshot */ 5913 5914 static void 5915 bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2) 5916 { 5917 uint64_t *cluster_temp; 5918 uint32_t *extent_page_temp; 5919 5920 cluster_temp = blob1->active.clusters; 5921 blob1->active.clusters = blob2->active.clusters; 5922 blob2->active.clusters = cluster_temp; 5923 5924 extent_page_temp = blob1->active.extent_pages; 5925 blob1->active.extent_pages = blob2->active.extent_pages; 5926 blob2->active.extent_pages = extent_page_temp; 5927 } 5928 5929 static void 5930 bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno) 5931 { 5932 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5933 struct spdk_blob *origblob = ctx->original.blob; 5934 struct spdk_blob *newblob = ctx->new.blob; 5935 5936 if (bserrno != 0) { 5937 bs_snapshot_swap_cluster_maps(newblob, origblob); 5938 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 5939 return; 5940 } 5941 5942 /* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */ 5943 bserrno = blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true); 5944 if (bserrno != 0) { 5945 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 5946 return; 5947 } 5948 5949 bs_blob_list_add(ctx->original.blob); 5950 5951 spdk_blob_set_read_only(newblob); 5952 5953 /* sync snapshot metadata */ 5954 spdk_blob_sync_md(newblob, bs_clone_snapshot_origblob_cleanup, ctx); 5955 } 5956 5957 static void 5958 bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno) 5959 { 5960 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5961 struct spdk_blob *origblob = ctx->original.blob; 5962 struct spdk_blob *newblob = ctx->new.blob; 5963 5964 if (bserrno != 0) { 5965 /* return cluster map back to original */ 5966 bs_snapshot_swap_cluster_maps(newblob, origblob); 5967 5968 /* Newblob md sync failed. Valid clusters are only present in origblob. 5969 * Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occurred. 5970 * Newblob needs to be reverted to thin_provisioned state at creation to properly close. */ 5971 blob_set_thin_provision(newblob); 5972 assert(spdk_mem_all_zero(newblob->active.clusters, 5973 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 5974 assert(spdk_mem_all_zero(newblob->active.extent_pages, 5975 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 5976 5977 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 5978 return; 5979 } 5980 5981 /* Set internal xattr for snapshot id */ 5982 bserrno = blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true); 5983 if (bserrno != 0) { 5984 /* return cluster map back to original */ 5985 bs_snapshot_swap_cluster_maps(newblob, origblob); 5986 blob_set_thin_provision(newblob); 5987 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 5988 return; 5989 } 5990 5991 /* Create new back_bs_dev for snapshot */ 5992 origblob->back_bs_dev = bs_create_blob_bs_dev(newblob); 5993 if (origblob->back_bs_dev == NULL) { 5994 /* return cluster map back to original */ 5995 bs_snapshot_swap_cluster_maps(newblob, origblob); 5996 blob_set_thin_provision(newblob); 5997 bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL); 5998 return; 5999 } 6000 6001 bs_blob_list_remove(origblob); 6002 origblob->parent_id = newblob->id; 6003 /* set clone blob as thin provisioned */ 6004 blob_set_thin_provision(origblob); 6005 6006 bs_blob_list_add(newblob); 6007 6008 /* sync clone metadata */ 6009 spdk_blob_sync_md(origblob, bs_snapshot_origblob_sync_cpl, ctx); 6010 } 6011 6012 static void 6013 bs_snapshot_freeze_cpl(void *cb_arg, int rc) 6014 { 6015 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6016 struct spdk_blob *origblob = ctx->original.blob; 6017 struct spdk_blob *newblob = ctx->new.blob; 6018 int bserrno; 6019 6020 if (rc != 0) { 6021 bs_clone_snapshot_newblob_cleanup(ctx, rc); 6022 return; 6023 } 6024 6025 ctx->frozen = true; 6026 6027 if (newblob->back_bs_dev) { 6028 newblob->back_bs_dev->destroy(newblob->back_bs_dev); 6029 } 6030 /* set new back_bs_dev for snapshot */ 6031 newblob->back_bs_dev = origblob->back_bs_dev; 6032 /* Set invalid flags from origblob */ 6033 newblob->invalid_flags = origblob->invalid_flags; 6034 6035 /* inherit parent from original blob if set */ 6036 newblob->parent_id = origblob->parent_id; 6037 if (origblob->parent_id != SPDK_BLOBID_INVALID) { 6038 /* Set internal xattr for snapshot id */ 6039 bserrno = blob_set_xattr(newblob, BLOB_SNAPSHOT, 6040 &origblob->parent_id, sizeof(spdk_blob_id), true); 6041 if (bserrno != 0) { 6042 bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 6043 return; 6044 } 6045 } 6046 6047 /* swap cluster maps */ 6048 bs_snapshot_swap_cluster_maps(newblob, origblob); 6049 6050 /* Set the clear method on the new blob to match the original. */ 6051 blob_set_clear_method(newblob, origblob->clear_method); 6052 6053 /* sync snapshot metadata */ 6054 spdk_blob_sync_md(newblob, bs_snapshot_newblob_sync_cpl, ctx); 6055 } 6056 6057 static void 6058 bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6059 { 6060 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6061 struct spdk_blob *origblob = ctx->original.blob; 6062 struct spdk_blob *newblob = _blob; 6063 6064 if (bserrno != 0) { 6065 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6066 return; 6067 } 6068 6069 ctx->new.blob = newblob; 6070 assert(spdk_blob_is_thin_provisioned(newblob)); 6071 assert(spdk_mem_all_zero(newblob->active.clusters, 6072 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 6073 assert(spdk_mem_all_zero(newblob->active.extent_pages, 6074 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 6075 6076 blob_freeze_io(origblob, bs_snapshot_freeze_cpl, ctx); 6077 } 6078 6079 static void 6080 bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6081 { 6082 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6083 struct spdk_blob *origblob = ctx->original.blob; 6084 6085 if (bserrno != 0) { 6086 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6087 return; 6088 } 6089 6090 ctx->new.id = blobid; 6091 ctx->cpl.u.blobid.blobid = blobid; 6092 6093 spdk_bs_open_blob(origblob->bs, ctx->new.id, bs_snapshot_newblob_open_cpl, ctx); 6094 } 6095 6096 6097 static void 6098 bs_xattr_snapshot(void *arg, const char *name, 6099 const void **value, size_t *value_len) 6100 { 6101 assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0); 6102 6103 struct spdk_blob *blob = (struct spdk_blob *)arg; 6104 *value = &blob->id; 6105 *value_len = sizeof(blob->id); 6106 } 6107 6108 static void 6109 bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6110 { 6111 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6112 struct spdk_blob_opts opts; 6113 struct spdk_blob_xattr_opts internal_xattrs; 6114 char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS }; 6115 6116 if (bserrno != 0) { 6117 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6118 return; 6119 } 6120 6121 ctx->original.blob = _blob; 6122 6123 if (_blob->data_ro || _blob->md_ro) { 6124 SPDK_DEBUGLOG(blob, "Cannot create snapshot from read only blob with id %" PRIu64 "\n", 6125 _blob->id); 6126 ctx->bserrno = -EINVAL; 6127 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6128 return; 6129 } 6130 6131 if (_blob->locked_operation_in_progress) { 6132 SPDK_DEBUGLOG(blob, "Cannot create snapshot - another operation in progress\n"); 6133 ctx->bserrno = -EBUSY; 6134 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6135 return; 6136 } 6137 6138 _blob->locked_operation_in_progress = true; 6139 6140 spdk_blob_opts_init(&opts, sizeof(opts)); 6141 blob_xattrs_init(&internal_xattrs); 6142 6143 /* Change the size of new blob to the same as in original blob, 6144 * but do not allocate clusters */ 6145 opts.thin_provision = true; 6146 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6147 opts.use_extent_table = _blob->use_extent_table; 6148 6149 /* If there are any xattrs specified for snapshot, set them now */ 6150 if (ctx->xattrs) { 6151 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6152 } 6153 /* Set internal xattr SNAPSHOT_IN_PROGRESS */ 6154 internal_xattrs.count = 1; 6155 internal_xattrs.ctx = _blob; 6156 internal_xattrs.names = xattrs_names; 6157 internal_xattrs.get_value = bs_xattr_snapshot; 6158 6159 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6160 bs_snapshot_newblob_create_cpl, ctx); 6161 } 6162 6163 void spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid, 6164 const struct spdk_blob_xattr_opts *snapshot_xattrs, 6165 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6166 { 6167 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6168 6169 if (!ctx) { 6170 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6171 return; 6172 } 6173 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6174 ctx->cpl.u.blobid.cb_fn = cb_fn; 6175 ctx->cpl.u.blobid.cb_arg = cb_arg; 6176 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6177 ctx->bserrno = 0; 6178 ctx->frozen = false; 6179 ctx->original.id = blobid; 6180 ctx->xattrs = snapshot_xattrs; 6181 6182 spdk_bs_open_blob(bs, ctx->original.id, bs_snapshot_origblob_open_cpl, ctx); 6183 } 6184 /* END spdk_bs_create_snapshot */ 6185 6186 /* START spdk_bs_create_clone */ 6187 6188 static void 6189 bs_xattr_clone(void *arg, const char *name, 6190 const void **value, size_t *value_len) 6191 { 6192 assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0); 6193 6194 struct spdk_blob *blob = (struct spdk_blob *)arg; 6195 *value = &blob->id; 6196 *value_len = sizeof(blob->id); 6197 } 6198 6199 static void 6200 bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6201 { 6202 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6203 struct spdk_blob *clone = _blob; 6204 6205 ctx->new.blob = clone; 6206 bs_blob_list_add(clone); 6207 6208 spdk_blob_close(clone, bs_clone_snapshot_origblob_cleanup, ctx); 6209 } 6210 6211 static void 6212 bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 6213 { 6214 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6215 6216 ctx->cpl.u.blobid.blobid = blobid; 6217 spdk_bs_open_blob(ctx->original.blob->bs, blobid, bs_clone_newblob_open_cpl, ctx); 6218 } 6219 6220 static void 6221 bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6222 { 6223 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6224 struct spdk_blob_opts opts; 6225 struct spdk_blob_xattr_opts internal_xattrs; 6226 char *xattr_names[] = { BLOB_SNAPSHOT }; 6227 6228 if (bserrno != 0) { 6229 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6230 return; 6231 } 6232 6233 ctx->original.blob = _blob; 6234 ctx->original.md_ro = _blob->md_ro; 6235 6236 if (!_blob->data_ro || !_blob->md_ro) { 6237 SPDK_DEBUGLOG(blob, "Clone not from read-only blob\n"); 6238 ctx->bserrno = -EINVAL; 6239 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6240 return; 6241 } 6242 6243 if (_blob->locked_operation_in_progress) { 6244 SPDK_DEBUGLOG(blob, "Cannot create clone - another operation in progress\n"); 6245 ctx->bserrno = -EBUSY; 6246 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6247 return; 6248 } 6249 6250 _blob->locked_operation_in_progress = true; 6251 6252 spdk_blob_opts_init(&opts, sizeof(opts)); 6253 blob_xattrs_init(&internal_xattrs); 6254 6255 opts.thin_provision = true; 6256 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 6257 opts.use_extent_table = _blob->use_extent_table; 6258 if (ctx->xattrs) { 6259 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 6260 } 6261 6262 /* Set internal xattr BLOB_SNAPSHOT */ 6263 internal_xattrs.count = 1; 6264 internal_xattrs.ctx = _blob; 6265 internal_xattrs.names = xattr_names; 6266 internal_xattrs.get_value = bs_xattr_clone; 6267 6268 bs_create_blob(_blob->bs, &opts, &internal_xattrs, 6269 bs_clone_newblob_create_cpl, ctx); 6270 } 6271 6272 void spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid, 6273 const struct spdk_blob_xattr_opts *clone_xattrs, 6274 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 6275 { 6276 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6277 6278 if (!ctx) { 6279 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 6280 return; 6281 } 6282 6283 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 6284 ctx->cpl.u.blobid.cb_fn = cb_fn; 6285 ctx->cpl.u.blobid.cb_arg = cb_arg; 6286 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 6287 ctx->bserrno = 0; 6288 ctx->xattrs = clone_xattrs; 6289 ctx->original.id = blobid; 6290 6291 spdk_bs_open_blob(bs, ctx->original.id, bs_clone_origblob_open_cpl, ctx); 6292 } 6293 6294 /* END spdk_bs_create_clone */ 6295 6296 /* START spdk_bs_inflate_blob */ 6297 6298 static void 6299 bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno) 6300 { 6301 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6302 struct spdk_blob *_blob = ctx->original.blob; 6303 6304 if (bserrno != 0) { 6305 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6306 return; 6307 } 6308 6309 /* Temporarily override md_ro flag for MD modification */ 6310 _blob->md_ro = false; 6311 6312 bserrno = blob_set_xattr(_blob, BLOB_SNAPSHOT, &_parent->id, sizeof(spdk_blob_id), true); 6313 if (bserrno != 0) { 6314 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6315 return; 6316 } 6317 6318 assert(_parent != NULL); 6319 6320 bs_blob_list_remove(_blob); 6321 _blob->parent_id = _parent->id; 6322 6323 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 6324 _blob->back_bs_dev = bs_create_blob_bs_dev(_parent); 6325 bs_blob_list_add(_blob); 6326 6327 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 6328 } 6329 6330 static void 6331 bs_inflate_blob_done(struct spdk_clone_snapshot_ctx *ctx) 6332 { 6333 struct spdk_blob *_blob = ctx->original.blob; 6334 struct spdk_blob *_parent; 6335 6336 if (ctx->allocate_all) { 6337 /* remove thin provisioning */ 6338 bs_blob_list_remove(_blob); 6339 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 6340 _blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV; 6341 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 6342 _blob->back_bs_dev = NULL; 6343 _blob->parent_id = SPDK_BLOBID_INVALID; 6344 } else { 6345 _parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob; 6346 if (_parent->parent_id != SPDK_BLOBID_INVALID) { 6347 /* We must change the parent of the inflated blob */ 6348 spdk_bs_open_blob(_blob->bs, _parent->parent_id, 6349 bs_inflate_blob_set_parent_cpl, ctx); 6350 return; 6351 } 6352 6353 bs_blob_list_remove(_blob); 6354 blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 6355 _blob->parent_id = SPDK_BLOBID_INVALID; 6356 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 6357 _blob->back_bs_dev = bs_create_zeroes_dev(); 6358 } 6359 6360 /* Temporarily override md_ro flag for MD modification */ 6361 _blob->md_ro = false; 6362 _blob->state = SPDK_BLOB_STATE_DIRTY; 6363 6364 spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx); 6365 } 6366 6367 /* Check if cluster needs allocation */ 6368 static inline bool 6369 bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all) 6370 { 6371 struct spdk_blob_bs_dev *b; 6372 6373 assert(blob != NULL); 6374 6375 if (blob->active.clusters[cluster] != 0) { 6376 /* Cluster is already allocated */ 6377 return false; 6378 } 6379 6380 if (blob->parent_id == SPDK_BLOBID_INVALID) { 6381 /* Blob have no parent blob */ 6382 return allocate_all; 6383 } 6384 6385 b = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 6386 return (allocate_all || b->blob->active.clusters[cluster] != 0); 6387 } 6388 6389 static void 6390 bs_inflate_blob_touch_next(void *cb_arg, int bserrno) 6391 { 6392 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6393 struct spdk_blob *_blob = ctx->original.blob; 6394 struct spdk_bs_cpl cpl; 6395 spdk_bs_user_op_t *op; 6396 uint64_t offset; 6397 6398 if (bserrno != 0) { 6399 bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 6400 return; 6401 } 6402 6403 for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) { 6404 if (bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) { 6405 break; 6406 } 6407 } 6408 6409 if (ctx->cluster < _blob->active.num_clusters) { 6410 offset = bs_cluster_to_lba(_blob->bs, ctx->cluster); 6411 6412 /* We may safely increment a cluster before copying */ 6413 ctx->cluster++; 6414 6415 /* Use a dummy 0B read as a context for cluster copy */ 6416 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 6417 cpl.u.blob_basic.cb_fn = bs_inflate_blob_touch_next; 6418 cpl.u.blob_basic.cb_arg = ctx; 6419 6420 op = bs_user_op_alloc(ctx->channel, &cpl, SPDK_BLOB_READ, _blob, 6421 NULL, 0, offset, 0); 6422 if (!op) { 6423 bs_clone_snapshot_origblob_cleanup(ctx, -ENOMEM); 6424 return; 6425 } 6426 6427 bs_allocate_and_copy_cluster(_blob, ctx->channel, offset, op); 6428 } else { 6429 bs_inflate_blob_done(ctx); 6430 } 6431 } 6432 6433 static void 6434 bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6435 { 6436 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 6437 uint64_t clusters_needed; 6438 uint64_t i; 6439 6440 if (bserrno != 0) { 6441 bs_clone_snapshot_cleanup_finish(ctx, bserrno); 6442 return; 6443 } 6444 6445 ctx->original.blob = _blob; 6446 ctx->original.md_ro = _blob->md_ro; 6447 6448 if (_blob->locked_operation_in_progress) { 6449 SPDK_DEBUGLOG(blob, "Cannot inflate blob - another operation in progress\n"); 6450 ctx->bserrno = -EBUSY; 6451 spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx); 6452 return; 6453 } 6454 6455 _blob->locked_operation_in_progress = true; 6456 6457 if (!ctx->allocate_all && _blob->parent_id == SPDK_BLOBID_INVALID) { 6458 /* This blob have no parent, so we cannot decouple it. */ 6459 SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n"); 6460 bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 6461 return; 6462 } 6463 6464 if (spdk_blob_is_thin_provisioned(_blob) == false) { 6465 /* This is not thin provisioned blob. No need to inflate. */ 6466 bs_clone_snapshot_origblob_cleanup(ctx, 0); 6467 return; 6468 } 6469 6470 /* Do two passes - one to verify that we can obtain enough clusters 6471 * and another to actually claim them. 6472 */ 6473 clusters_needed = 0; 6474 for (i = 0; i < _blob->active.num_clusters; i++) { 6475 if (bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) { 6476 clusters_needed++; 6477 } 6478 } 6479 6480 if (clusters_needed > _blob->bs->num_free_clusters) { 6481 /* Not enough free clusters. Cannot satisfy the request. */ 6482 bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC); 6483 return; 6484 } 6485 6486 ctx->cluster = 0; 6487 bs_inflate_blob_touch_next(ctx, 0); 6488 } 6489 6490 static void 6491 bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 6492 spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg) 6493 { 6494 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 6495 6496 if (!ctx) { 6497 cb_fn(cb_arg, -ENOMEM); 6498 return; 6499 } 6500 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 6501 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 6502 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 6503 ctx->bserrno = 0; 6504 ctx->original.id = blobid; 6505 ctx->channel = channel; 6506 ctx->allocate_all = allocate_all; 6507 6508 spdk_bs_open_blob(bs, ctx->original.id, bs_inflate_blob_open_cpl, ctx); 6509 } 6510 6511 void 6512 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 6513 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 6514 { 6515 bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg); 6516 } 6517 6518 void 6519 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 6520 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 6521 { 6522 bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg); 6523 } 6524 /* END spdk_bs_inflate_blob */ 6525 6526 /* START spdk_blob_resize */ 6527 struct spdk_bs_resize_ctx { 6528 spdk_blob_op_complete cb_fn; 6529 void *cb_arg; 6530 struct spdk_blob *blob; 6531 uint64_t sz; 6532 int rc; 6533 }; 6534 6535 static void 6536 bs_resize_unfreeze_cpl(void *cb_arg, int rc) 6537 { 6538 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 6539 6540 if (rc != 0) { 6541 SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc); 6542 } 6543 6544 if (ctx->rc != 0) { 6545 SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc); 6546 rc = ctx->rc; 6547 } 6548 6549 ctx->blob->locked_operation_in_progress = false; 6550 6551 ctx->cb_fn(ctx->cb_arg, rc); 6552 free(ctx); 6553 } 6554 6555 static void 6556 bs_resize_freeze_cpl(void *cb_arg, int rc) 6557 { 6558 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 6559 6560 if (rc != 0) { 6561 ctx->blob->locked_operation_in_progress = false; 6562 ctx->cb_fn(ctx->cb_arg, rc); 6563 free(ctx); 6564 return; 6565 } 6566 6567 ctx->rc = blob_resize(ctx->blob, ctx->sz); 6568 6569 blob_unfreeze_io(ctx->blob, bs_resize_unfreeze_cpl, ctx); 6570 } 6571 6572 void 6573 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg) 6574 { 6575 struct spdk_bs_resize_ctx *ctx; 6576 6577 blob_verify_md_op(blob); 6578 6579 SPDK_DEBUGLOG(blob, "Resizing blob %" PRIu64 " to %" PRIu64 " clusters\n", blob->id, sz); 6580 6581 if (blob->md_ro) { 6582 cb_fn(cb_arg, -EPERM); 6583 return; 6584 } 6585 6586 if (sz == blob->active.num_clusters) { 6587 cb_fn(cb_arg, 0); 6588 return; 6589 } 6590 6591 if (blob->locked_operation_in_progress) { 6592 cb_fn(cb_arg, -EBUSY); 6593 return; 6594 } 6595 6596 ctx = calloc(1, sizeof(*ctx)); 6597 if (!ctx) { 6598 cb_fn(cb_arg, -ENOMEM); 6599 return; 6600 } 6601 6602 blob->locked_operation_in_progress = true; 6603 ctx->cb_fn = cb_fn; 6604 ctx->cb_arg = cb_arg; 6605 ctx->blob = blob; 6606 ctx->sz = sz; 6607 blob_freeze_io(blob, bs_resize_freeze_cpl, ctx); 6608 } 6609 6610 /* END spdk_blob_resize */ 6611 6612 6613 /* START spdk_bs_delete_blob */ 6614 6615 static void 6616 bs_delete_close_cpl(void *cb_arg, int bserrno) 6617 { 6618 spdk_bs_sequence_t *seq = cb_arg; 6619 6620 bs_sequence_finish(seq, bserrno); 6621 } 6622 6623 static void 6624 bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 6625 { 6626 struct spdk_blob *blob = cb_arg; 6627 6628 if (bserrno != 0) { 6629 /* 6630 * We already removed this blob from the blobstore tailq, so 6631 * we need to free it here since this is the last reference 6632 * to it. 6633 */ 6634 blob_free(blob); 6635 bs_delete_close_cpl(seq, bserrno); 6636 return; 6637 } 6638 6639 /* 6640 * This will immediately decrement the ref_count and call 6641 * the completion routine since the metadata state is clean. 6642 * By calling spdk_blob_close, we reduce the number of call 6643 * points into code that touches the blob->open_ref count 6644 * and the blobstore's blob list. 6645 */ 6646 spdk_blob_close(blob, bs_delete_close_cpl, seq); 6647 } 6648 6649 struct delete_snapshot_ctx { 6650 struct spdk_blob_list *parent_snapshot_entry; 6651 struct spdk_blob *snapshot; 6652 struct spdk_blob_md_page *page; 6653 bool snapshot_md_ro; 6654 struct spdk_blob *clone; 6655 bool clone_md_ro; 6656 spdk_blob_op_with_handle_complete cb_fn; 6657 void *cb_arg; 6658 int bserrno; 6659 uint32_t next_extent_page; 6660 }; 6661 6662 static void 6663 delete_blob_cleanup_finish(void *cb_arg, int bserrno) 6664 { 6665 struct delete_snapshot_ctx *ctx = cb_arg; 6666 6667 if (bserrno != 0) { 6668 SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno); 6669 } 6670 6671 assert(ctx != NULL); 6672 6673 if (bserrno != 0 && ctx->bserrno == 0) { 6674 ctx->bserrno = bserrno; 6675 } 6676 6677 ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno); 6678 spdk_free(ctx->page); 6679 free(ctx); 6680 } 6681 6682 static void 6683 delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno) 6684 { 6685 struct delete_snapshot_ctx *ctx = cb_arg; 6686 6687 if (bserrno != 0) { 6688 ctx->bserrno = bserrno; 6689 SPDK_ERRLOG("Clone cleanup error %d\n", bserrno); 6690 } 6691 6692 if (ctx->bserrno != 0) { 6693 assert(blob_lookup(ctx->snapshot->bs, ctx->snapshot->id) == NULL); 6694 RB_INSERT(spdk_blob_tree, &ctx->snapshot->bs->open_blobs, ctx->snapshot); 6695 spdk_bit_array_set(ctx->snapshot->bs->open_blobids, ctx->snapshot->id); 6696 } 6697 6698 ctx->snapshot->locked_operation_in_progress = false; 6699 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 6700 6701 spdk_blob_close(ctx->snapshot, delete_blob_cleanup_finish, ctx); 6702 } 6703 6704 static void 6705 delete_snapshot_cleanup_clone(void *cb_arg, int bserrno) 6706 { 6707 struct delete_snapshot_ctx *ctx = cb_arg; 6708 6709 ctx->clone->locked_operation_in_progress = false; 6710 ctx->clone->md_ro = ctx->clone_md_ro; 6711 6712 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 6713 } 6714 6715 static void 6716 delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 6717 { 6718 struct delete_snapshot_ctx *ctx = cb_arg; 6719 6720 if (bserrno) { 6721 ctx->bserrno = bserrno; 6722 delete_snapshot_cleanup_clone(ctx, 0); 6723 return; 6724 } 6725 6726 ctx->clone->locked_operation_in_progress = false; 6727 spdk_blob_close(ctx->clone, delete_blob_cleanup_finish, ctx); 6728 } 6729 6730 static void 6731 delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno) 6732 { 6733 struct delete_snapshot_ctx *ctx = cb_arg; 6734 struct spdk_blob_list *parent_snapshot_entry = NULL; 6735 struct spdk_blob_list *snapshot_entry = NULL; 6736 struct spdk_blob_list *clone_entry = NULL; 6737 struct spdk_blob_list *snapshot_clone_entry = NULL; 6738 6739 if (bserrno) { 6740 SPDK_ERRLOG("Failed to sync MD on blob\n"); 6741 ctx->bserrno = bserrno; 6742 delete_snapshot_cleanup_clone(ctx, 0); 6743 return; 6744 } 6745 6746 /* Get snapshot entry for the snapshot we want to remove */ 6747 snapshot_entry = bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id); 6748 6749 assert(snapshot_entry != NULL); 6750 6751 /* Remove clone entry in this snapshot (at this point there can be only one clone) */ 6752 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 6753 assert(clone_entry != NULL); 6754 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 6755 snapshot_entry->clone_count--; 6756 assert(TAILQ_EMPTY(&snapshot_entry->clones)); 6757 6758 if (ctx->snapshot->parent_id != SPDK_BLOBID_INVALID) { 6759 /* This snapshot is at the same time a clone of another snapshot - we need to 6760 * update parent snapshot (remove current clone, add new one inherited from 6761 * the snapshot that is being removed) */ 6762 6763 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 6764 * snapshot that we are removing */ 6765 blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry, 6766 &snapshot_clone_entry); 6767 6768 /* Switch clone entry in parent snapshot */ 6769 TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link); 6770 TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link); 6771 free(snapshot_clone_entry); 6772 } else { 6773 /* No parent snapshot - just remove clone entry */ 6774 free(clone_entry); 6775 } 6776 6777 /* Restore md_ro flags */ 6778 ctx->clone->md_ro = ctx->clone_md_ro; 6779 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 6780 6781 blob_unfreeze_io(ctx->clone, delete_snapshot_unfreeze_cpl, ctx); 6782 } 6783 6784 static void 6785 delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno) 6786 { 6787 struct delete_snapshot_ctx *ctx = cb_arg; 6788 uint64_t i; 6789 6790 ctx->snapshot->md_ro = false; 6791 6792 if (bserrno) { 6793 SPDK_ERRLOG("Failed to sync MD on clone\n"); 6794 ctx->bserrno = bserrno; 6795 6796 /* Restore snapshot to previous state */ 6797 bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 6798 if (bserrno != 0) { 6799 delete_snapshot_cleanup_clone(ctx, bserrno); 6800 return; 6801 } 6802 6803 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx); 6804 return; 6805 } 6806 6807 /* Clear cluster map entries for snapshot */ 6808 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 6809 if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) { 6810 ctx->snapshot->active.clusters[i] = 0; 6811 } 6812 } 6813 for (i = 0; i < ctx->snapshot->active.num_extent_pages && 6814 i < ctx->clone->active.num_extent_pages; i++) { 6815 if (ctx->clone->active.extent_pages[i] == ctx->snapshot->active.extent_pages[i]) { 6816 ctx->snapshot->active.extent_pages[i] = 0; 6817 } 6818 } 6819 6820 blob_set_thin_provision(ctx->snapshot); 6821 ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY; 6822 6823 if (ctx->parent_snapshot_entry != NULL) { 6824 ctx->snapshot->back_bs_dev = NULL; 6825 } 6826 6827 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_cpl, ctx); 6828 } 6829 6830 static void 6831 delete_snapshot_update_extent_pages_cpl(struct delete_snapshot_ctx *ctx) 6832 { 6833 /* Delete old backing bs_dev from clone (related to snapshot that will be removed) */ 6834 ctx->clone->back_bs_dev->destroy(ctx->clone->back_bs_dev); 6835 6836 /* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */ 6837 if (ctx->parent_snapshot_entry != NULL) { 6838 /* ...to parent snapshot */ 6839 ctx->clone->parent_id = ctx->parent_snapshot_entry->id; 6840 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 6841 blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id, 6842 sizeof(spdk_blob_id), 6843 true); 6844 } else { 6845 /* ...to blobid invalid and zeroes dev */ 6846 ctx->clone->parent_id = SPDK_BLOBID_INVALID; 6847 ctx->clone->back_bs_dev = bs_create_zeroes_dev(); 6848 blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true); 6849 } 6850 6851 spdk_blob_sync_md(ctx->clone, delete_snapshot_sync_clone_cpl, ctx); 6852 } 6853 6854 static void 6855 delete_snapshot_update_extent_pages(void *cb_arg, int bserrno) 6856 { 6857 struct delete_snapshot_ctx *ctx = cb_arg; 6858 uint32_t *extent_page; 6859 uint64_t i; 6860 6861 for (i = ctx->next_extent_page; i < ctx->snapshot->active.num_extent_pages && 6862 i < ctx->clone->active.num_extent_pages; i++) { 6863 if (ctx->snapshot->active.extent_pages[i] == 0) { 6864 /* No extent page to use from snapshot */ 6865 continue; 6866 } 6867 6868 extent_page = &ctx->clone->active.extent_pages[i]; 6869 if (*extent_page == 0) { 6870 /* Copy extent page from snapshot when clone did not have a matching one */ 6871 *extent_page = ctx->snapshot->active.extent_pages[i]; 6872 continue; 6873 } 6874 6875 /* Clone and snapshot both contain partially filled matching extent pages. 6876 * Update the clone extent page in place with cluster map containing the mix of both. */ 6877 ctx->next_extent_page = i + 1; 6878 memset(ctx->page, 0, SPDK_BS_PAGE_SIZE); 6879 6880 blob_write_extent_page(ctx->clone, *extent_page, i * SPDK_EXTENTS_PER_EP, ctx->page, 6881 delete_snapshot_update_extent_pages, ctx); 6882 return; 6883 } 6884 delete_snapshot_update_extent_pages_cpl(ctx); 6885 } 6886 6887 static void 6888 delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno) 6889 { 6890 struct delete_snapshot_ctx *ctx = cb_arg; 6891 uint64_t i; 6892 6893 /* Temporarily override md_ro flag for clone for MD modification */ 6894 ctx->clone_md_ro = ctx->clone->md_ro; 6895 ctx->clone->md_ro = false; 6896 6897 if (bserrno) { 6898 SPDK_ERRLOG("Failed to sync MD with xattr on blob\n"); 6899 ctx->bserrno = bserrno; 6900 delete_snapshot_cleanup_clone(ctx, 0); 6901 return; 6902 } 6903 6904 /* Copy snapshot map to clone map (only unallocated clusters in clone) */ 6905 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 6906 if (ctx->clone->active.clusters[i] == 0) { 6907 ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i]; 6908 } 6909 } 6910 ctx->next_extent_page = 0; 6911 delete_snapshot_update_extent_pages(ctx, 0); 6912 } 6913 6914 static void 6915 delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno) 6916 { 6917 struct delete_snapshot_ctx *ctx = cb_arg; 6918 6919 if (bserrno) { 6920 SPDK_ERRLOG("Failed to freeze I/O on clone\n"); 6921 ctx->bserrno = bserrno; 6922 delete_snapshot_cleanup_clone(ctx, 0); 6923 return; 6924 } 6925 6926 /* Temporarily override md_ro flag for snapshot for MD modification */ 6927 ctx->snapshot_md_ro = ctx->snapshot->md_ro; 6928 ctx->snapshot->md_ro = false; 6929 6930 /* Mark blob as pending for removal for power failure safety, use clone id for recovery */ 6931 ctx->bserrno = blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id, 6932 sizeof(spdk_blob_id), true); 6933 if (ctx->bserrno != 0) { 6934 delete_snapshot_cleanup_clone(ctx, 0); 6935 return; 6936 } 6937 6938 spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx); 6939 } 6940 6941 static void 6942 delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno) 6943 { 6944 struct delete_snapshot_ctx *ctx = cb_arg; 6945 6946 if (bserrno) { 6947 SPDK_ERRLOG("Failed to open clone\n"); 6948 ctx->bserrno = bserrno; 6949 delete_snapshot_cleanup_snapshot(ctx, 0); 6950 return; 6951 } 6952 6953 ctx->clone = clone; 6954 6955 if (clone->locked_operation_in_progress) { 6956 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress on its clone\n"); 6957 ctx->bserrno = -EBUSY; 6958 spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx); 6959 return; 6960 } 6961 6962 clone->locked_operation_in_progress = true; 6963 6964 blob_freeze_io(clone, delete_snapshot_freeze_io_cb, ctx); 6965 } 6966 6967 static void 6968 update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx) 6969 { 6970 struct spdk_blob_list *snapshot_entry = NULL; 6971 struct spdk_blob_list *clone_entry = NULL; 6972 struct spdk_blob_list *snapshot_clone_entry = NULL; 6973 6974 /* Get snapshot entry for the snapshot we want to remove */ 6975 snapshot_entry = bs_get_snapshot_entry(snapshot->bs, snapshot->id); 6976 6977 assert(snapshot_entry != NULL); 6978 6979 /* Get clone of the snapshot (at this point there can be only one clone) */ 6980 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 6981 assert(snapshot_entry->clone_count == 1); 6982 assert(clone_entry != NULL); 6983 6984 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 6985 * snapshot that we are removing */ 6986 blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry, 6987 &snapshot_clone_entry); 6988 6989 spdk_bs_open_blob(snapshot->bs, clone_entry->id, delete_snapshot_open_clone_cb, ctx); 6990 } 6991 6992 static void 6993 bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno) 6994 { 6995 spdk_bs_sequence_t *seq = cb_arg; 6996 struct spdk_blob_list *snapshot_entry = NULL; 6997 uint32_t page_num; 6998 6999 if (bserrno) { 7000 SPDK_ERRLOG("Failed to remove blob\n"); 7001 bs_sequence_finish(seq, bserrno); 7002 return; 7003 } 7004 7005 /* Remove snapshot from the list */ 7006 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 7007 if (snapshot_entry != NULL) { 7008 TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link); 7009 free(snapshot_entry); 7010 } 7011 7012 page_num = bs_blobid_to_page(blob->id); 7013 spdk_bit_array_clear(blob->bs->used_blobids, page_num); 7014 blob->state = SPDK_BLOB_STATE_DIRTY; 7015 blob->active.num_pages = 0; 7016 blob_resize(blob, 0); 7017 7018 blob_persist(seq, blob, bs_delete_persist_cpl, blob); 7019 } 7020 7021 static int 7022 bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone) 7023 { 7024 struct spdk_blob_list *snapshot_entry = NULL; 7025 struct spdk_blob_list *clone_entry = NULL; 7026 struct spdk_blob *clone = NULL; 7027 bool has_one_clone = false; 7028 7029 /* Check if this is a snapshot with clones */ 7030 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 7031 if (snapshot_entry != NULL) { 7032 if (snapshot_entry->clone_count > 1) { 7033 SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n"); 7034 return -EBUSY; 7035 } else if (snapshot_entry->clone_count == 1) { 7036 has_one_clone = true; 7037 } 7038 } 7039 7040 /* Check if someone has this blob open (besides this delete context): 7041 * - open_ref = 1 - only this context opened blob, so it is ok to remove it 7042 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot 7043 * and that is ok, because we will update it accordingly */ 7044 if (blob->open_ref <= 2 && has_one_clone) { 7045 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 7046 assert(clone_entry != NULL); 7047 clone = blob_lookup(blob->bs, clone_entry->id); 7048 7049 if (blob->open_ref == 2 && clone == NULL) { 7050 /* Clone is closed and someone else opened this blob */ 7051 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 7052 return -EBUSY; 7053 } 7054 7055 *update_clone = true; 7056 return 0; 7057 } 7058 7059 if (blob->open_ref > 1) { 7060 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 7061 return -EBUSY; 7062 } 7063 7064 assert(has_one_clone == false); 7065 *update_clone = false; 7066 return 0; 7067 } 7068 7069 static void 7070 bs_delete_enomem_close_cpl(void *cb_arg, int bserrno) 7071 { 7072 spdk_bs_sequence_t *seq = cb_arg; 7073 7074 bs_sequence_finish(seq, -ENOMEM); 7075 } 7076 7077 static void 7078 bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 7079 { 7080 spdk_bs_sequence_t *seq = cb_arg; 7081 struct delete_snapshot_ctx *ctx; 7082 bool update_clone = false; 7083 7084 if (bserrno != 0) { 7085 bs_sequence_finish(seq, bserrno); 7086 return; 7087 } 7088 7089 blob_verify_md_op(blob); 7090 7091 ctx = calloc(1, sizeof(*ctx)); 7092 if (ctx == NULL) { 7093 spdk_blob_close(blob, bs_delete_enomem_close_cpl, seq); 7094 return; 7095 } 7096 7097 ctx->snapshot = blob; 7098 ctx->cb_fn = bs_delete_blob_finish; 7099 ctx->cb_arg = seq; 7100 7101 /* Check if blob can be removed and if it is a snapshot with clone on top of it */ 7102 ctx->bserrno = bs_is_blob_deletable(blob, &update_clone); 7103 if (ctx->bserrno) { 7104 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 7105 return; 7106 } 7107 7108 if (blob->locked_operation_in_progress) { 7109 SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress\n"); 7110 ctx->bserrno = -EBUSY; 7111 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 7112 return; 7113 } 7114 7115 blob->locked_operation_in_progress = true; 7116 7117 /* 7118 * Remove the blob from the blob_store list now, to ensure it does not 7119 * get returned after this point by blob_lookup(). 7120 */ 7121 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 7122 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 7123 7124 if (update_clone) { 7125 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 7126 if (!ctx->page) { 7127 ctx->bserrno = -ENOMEM; 7128 spdk_blob_close(blob, delete_blob_cleanup_finish, ctx); 7129 return; 7130 } 7131 /* This blob is a snapshot with active clone - update clone first */ 7132 update_clone_on_snapshot_deletion(blob, ctx); 7133 } else { 7134 /* This blob does not have any clones - just remove it */ 7135 bs_blob_list_remove(blob); 7136 bs_delete_blob_finish(seq, blob, 0); 7137 free(ctx); 7138 } 7139 } 7140 7141 void 7142 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 7143 spdk_blob_op_complete cb_fn, void *cb_arg) 7144 { 7145 struct spdk_bs_cpl cpl; 7146 spdk_bs_sequence_t *seq; 7147 7148 SPDK_DEBUGLOG(blob, "Deleting blob %" PRIu64 "\n", blobid); 7149 7150 assert(spdk_get_thread() == bs->md_thread); 7151 7152 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7153 cpl.u.blob_basic.cb_fn = cb_fn; 7154 cpl.u.blob_basic.cb_arg = cb_arg; 7155 7156 seq = bs_sequence_start(bs->md_channel, &cpl); 7157 if (!seq) { 7158 cb_fn(cb_arg, -ENOMEM); 7159 return; 7160 } 7161 7162 spdk_bs_open_blob(bs, blobid, bs_delete_open_cpl, seq); 7163 } 7164 7165 /* END spdk_bs_delete_blob */ 7166 7167 /* START spdk_bs_open_blob */ 7168 7169 static void 7170 bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7171 { 7172 struct spdk_blob *blob = cb_arg; 7173 struct spdk_blob *existing; 7174 7175 if (bserrno != 0) { 7176 blob_free(blob); 7177 seq->cpl.u.blob_handle.blob = NULL; 7178 bs_sequence_finish(seq, bserrno); 7179 return; 7180 } 7181 7182 existing = blob_lookup(blob->bs, blob->id); 7183 if (existing) { 7184 blob_free(blob); 7185 existing->open_ref++; 7186 seq->cpl.u.blob_handle.blob = existing; 7187 bs_sequence_finish(seq, 0); 7188 return; 7189 } 7190 7191 blob->open_ref++; 7192 7193 spdk_bit_array_set(blob->bs->open_blobids, blob->id); 7194 RB_INSERT(spdk_blob_tree, &blob->bs->open_blobs, blob); 7195 7196 bs_sequence_finish(seq, bserrno); 7197 } 7198 7199 static inline void 7200 blob_open_opts_copy(const struct spdk_blob_open_opts *src, struct spdk_blob_open_opts *dst) 7201 { 7202 #define FIELD_OK(field) \ 7203 offsetof(struct spdk_blob_opts, field) + sizeof(src->field) <= src->opts_size 7204 7205 #define SET_FIELD(field) \ 7206 if (FIELD_OK(field)) { \ 7207 dst->field = src->field; \ 7208 } \ 7209 7210 SET_FIELD(clear_method); 7211 7212 dst->opts_size = src->opts_size; 7213 7214 /* You should not remove this statement, but need to update the assert statement 7215 * if you add a new field, and also add a corresponding SET_FIELD statement */ 7216 SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_open_opts) == 16, "Incorrect size"); 7217 7218 #undef FIELD_OK 7219 #undef SET_FIELD 7220 } 7221 7222 static void 7223 bs_open_blob(struct spdk_blob_store *bs, 7224 spdk_blob_id blobid, 7225 struct spdk_blob_open_opts *opts, 7226 spdk_blob_op_with_handle_complete cb_fn, 7227 void *cb_arg) 7228 { 7229 struct spdk_blob *blob; 7230 struct spdk_bs_cpl cpl; 7231 struct spdk_blob_open_opts opts_local; 7232 spdk_bs_sequence_t *seq; 7233 uint32_t page_num; 7234 7235 SPDK_DEBUGLOG(blob, "Opening blob %" PRIu64 "\n", blobid); 7236 assert(spdk_get_thread() == bs->md_thread); 7237 7238 page_num = bs_blobid_to_page(blobid); 7239 if (spdk_bit_array_get(bs->used_blobids, page_num) == false) { 7240 /* Invalid blobid */ 7241 cb_fn(cb_arg, NULL, -ENOENT); 7242 return; 7243 } 7244 7245 blob = blob_lookup(bs, blobid); 7246 if (blob) { 7247 blob->open_ref++; 7248 cb_fn(cb_arg, blob, 0); 7249 return; 7250 } 7251 7252 blob = blob_alloc(bs, blobid); 7253 if (!blob) { 7254 cb_fn(cb_arg, NULL, -ENOMEM); 7255 return; 7256 } 7257 7258 spdk_blob_open_opts_init(&opts_local, sizeof(opts_local)); 7259 if (opts) { 7260 blob_open_opts_copy(opts, &opts_local); 7261 } 7262 7263 blob->clear_method = opts_local.clear_method; 7264 7265 cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE; 7266 cpl.u.blob_handle.cb_fn = cb_fn; 7267 cpl.u.blob_handle.cb_arg = cb_arg; 7268 cpl.u.blob_handle.blob = blob; 7269 7270 seq = bs_sequence_start(bs->md_channel, &cpl); 7271 if (!seq) { 7272 blob_free(blob); 7273 cb_fn(cb_arg, NULL, -ENOMEM); 7274 return; 7275 } 7276 7277 blob_load(seq, blob, bs_open_blob_cpl, blob); 7278 } 7279 7280 void spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 7281 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7282 { 7283 bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg); 7284 } 7285 7286 void spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid, 7287 struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7288 { 7289 bs_open_blob(bs, blobid, opts, cb_fn, cb_arg); 7290 } 7291 7292 /* END spdk_bs_open_blob */ 7293 7294 /* START spdk_blob_set_read_only */ 7295 int spdk_blob_set_read_only(struct spdk_blob *blob) 7296 { 7297 blob_verify_md_op(blob); 7298 7299 blob->data_ro_flags |= SPDK_BLOB_READ_ONLY; 7300 7301 blob->state = SPDK_BLOB_STATE_DIRTY; 7302 return 0; 7303 } 7304 /* END spdk_blob_set_read_only */ 7305 7306 /* START spdk_blob_sync_md */ 7307 7308 static void 7309 blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7310 { 7311 struct spdk_blob *blob = cb_arg; 7312 7313 if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 7314 blob->data_ro = true; 7315 blob->md_ro = true; 7316 } 7317 7318 bs_sequence_finish(seq, bserrno); 7319 } 7320 7321 static void 7322 blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 7323 { 7324 struct spdk_bs_cpl cpl; 7325 spdk_bs_sequence_t *seq; 7326 7327 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7328 cpl.u.blob_basic.cb_fn = cb_fn; 7329 cpl.u.blob_basic.cb_arg = cb_arg; 7330 7331 seq = bs_sequence_start(blob->bs->md_channel, &cpl); 7332 if (!seq) { 7333 cb_fn(cb_arg, -ENOMEM); 7334 return; 7335 } 7336 7337 blob_persist(seq, blob, blob_sync_md_cpl, blob); 7338 } 7339 7340 void 7341 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 7342 { 7343 blob_verify_md_op(blob); 7344 7345 SPDK_DEBUGLOG(blob, "Syncing blob %" PRIu64 "\n", blob->id); 7346 7347 if (blob->md_ro) { 7348 assert(blob->state == SPDK_BLOB_STATE_CLEAN); 7349 cb_fn(cb_arg, 0); 7350 return; 7351 } 7352 7353 blob_sync_md(blob, cb_fn, cb_arg); 7354 } 7355 7356 /* END spdk_blob_sync_md */ 7357 7358 struct spdk_blob_insert_cluster_ctx { 7359 struct spdk_thread *thread; 7360 struct spdk_blob *blob; 7361 uint32_t cluster_num; /* cluster index in blob */ 7362 uint32_t cluster; /* cluster on disk */ 7363 uint32_t extent_page; /* extent page on disk */ 7364 struct spdk_blob_md_page *page; /* preallocated extent page */ 7365 int rc; 7366 spdk_blob_op_complete cb_fn; 7367 void *cb_arg; 7368 }; 7369 7370 static void 7371 blob_insert_cluster_msg_cpl(void *arg) 7372 { 7373 struct spdk_blob_insert_cluster_ctx *ctx = arg; 7374 7375 ctx->cb_fn(ctx->cb_arg, ctx->rc); 7376 free(ctx); 7377 } 7378 7379 static void 7380 blob_insert_cluster_msg_cb(void *arg, int bserrno) 7381 { 7382 struct spdk_blob_insert_cluster_ctx *ctx = arg; 7383 7384 ctx->rc = bserrno; 7385 spdk_thread_send_msg(ctx->thread, blob_insert_cluster_msg_cpl, ctx); 7386 } 7387 7388 static void 7389 blob_insert_new_ep_cb(void *arg, int bserrno) 7390 { 7391 struct spdk_blob_insert_cluster_ctx *ctx = arg; 7392 uint32_t *extent_page; 7393 7394 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 7395 *extent_page = ctx->extent_page; 7396 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 7397 blob_sync_md(ctx->blob, blob_insert_cluster_msg_cb, ctx); 7398 } 7399 7400 static void 7401 blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7402 { 7403 bs_sequence_finish(seq, bserrno); 7404 } 7405 7406 static void 7407 blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 7408 struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg) 7409 { 7410 spdk_bs_sequence_t *seq; 7411 struct spdk_bs_cpl cpl; 7412 7413 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7414 cpl.u.blob_basic.cb_fn = cb_fn; 7415 cpl.u.blob_basic.cb_arg = cb_arg; 7416 7417 seq = bs_sequence_start(blob->bs->md_channel, &cpl); 7418 if (!seq) { 7419 cb_fn(cb_arg, -ENOMEM); 7420 return; 7421 } 7422 7423 assert(page); 7424 page->next = SPDK_INVALID_MD_PAGE; 7425 page->id = blob->id; 7426 page->sequence_num = 0; 7427 7428 blob_serialize_extent_page(blob, cluster_num, page); 7429 7430 page->crc = blob_md_page_calc_crc(page); 7431 7432 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true); 7433 7434 bs_sequence_write_dev(seq, page, bs_md_page_to_lba(blob->bs, extent), 7435 bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 7436 blob_persist_extent_page_cpl, page); 7437 } 7438 7439 static void 7440 blob_insert_cluster_msg(void *arg) 7441 { 7442 struct spdk_blob_insert_cluster_ctx *ctx = arg; 7443 uint32_t *extent_page; 7444 7445 ctx->rc = blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster); 7446 if (ctx->rc != 0) { 7447 spdk_thread_send_msg(ctx->thread, blob_insert_cluster_msg_cpl, ctx); 7448 return; 7449 } 7450 7451 if (ctx->blob->use_extent_table == false) { 7452 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 7453 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 7454 blob_sync_md(ctx->blob, blob_insert_cluster_msg_cb, ctx); 7455 return; 7456 } 7457 7458 extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 7459 if (*extent_page == 0) { 7460 /* Extent page requires allocation. 7461 * It was already claimed in the used_md_pages map and placed in ctx. */ 7462 assert(ctx->extent_page != 0); 7463 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 7464 blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page, 7465 blob_insert_new_ep_cb, ctx); 7466 } else { 7467 /* It is possible for original thread to allocate extent page for 7468 * different cluster in the same extent page. In such case proceed with 7469 * updating the existing extent page, but release the additional one. */ 7470 if (ctx->extent_page != 0) { 7471 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 7472 bs_release_md_page(ctx->blob->bs, ctx->extent_page); 7473 ctx->extent_page = 0; 7474 } 7475 /* Extent page already allocated. 7476 * Every cluster allocation, requires just an update of single extent page. */ 7477 blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page, 7478 blob_insert_cluster_msg_cb, ctx); 7479 } 7480 } 7481 7482 static void 7483 blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 7484 uint64_t cluster, uint32_t extent_page, struct spdk_blob_md_page *page, 7485 spdk_blob_op_complete cb_fn, void *cb_arg) 7486 { 7487 struct spdk_blob_insert_cluster_ctx *ctx; 7488 7489 ctx = calloc(1, sizeof(*ctx)); 7490 if (ctx == NULL) { 7491 cb_fn(cb_arg, -ENOMEM); 7492 return; 7493 } 7494 7495 ctx->thread = spdk_get_thread(); 7496 ctx->blob = blob; 7497 ctx->cluster_num = cluster_num; 7498 ctx->cluster = cluster; 7499 ctx->extent_page = extent_page; 7500 ctx->page = page; 7501 ctx->cb_fn = cb_fn; 7502 ctx->cb_arg = cb_arg; 7503 7504 spdk_thread_send_msg(blob->bs->md_thread, blob_insert_cluster_msg, ctx); 7505 } 7506 7507 /* START spdk_blob_close */ 7508 7509 static void 7510 blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 7511 { 7512 struct spdk_blob *blob = cb_arg; 7513 7514 if (bserrno == 0) { 7515 blob->open_ref--; 7516 if (blob->open_ref == 0) { 7517 /* 7518 * Blobs with active.num_pages == 0 are deleted blobs. 7519 * these blobs are removed from the blob_store list 7520 * when the deletion process starts - so don't try to 7521 * remove them again. 7522 */ 7523 if (blob->active.num_pages > 0) { 7524 spdk_bit_array_clear(blob->bs->open_blobids, blob->id); 7525 RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob); 7526 } 7527 blob_free(blob); 7528 } 7529 } 7530 7531 bs_sequence_finish(seq, bserrno); 7532 } 7533 7534 void spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 7535 { 7536 struct spdk_bs_cpl cpl; 7537 spdk_bs_sequence_t *seq; 7538 7539 blob_verify_md_op(blob); 7540 7541 SPDK_DEBUGLOG(blob, "Closing blob %" PRIu64 "\n", blob->id); 7542 7543 if (blob->open_ref == 0) { 7544 cb_fn(cb_arg, -EBADF); 7545 return; 7546 } 7547 7548 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 7549 cpl.u.blob_basic.cb_fn = cb_fn; 7550 cpl.u.blob_basic.cb_arg = cb_arg; 7551 7552 seq = bs_sequence_start(blob->bs->md_channel, &cpl); 7553 if (!seq) { 7554 cb_fn(cb_arg, -ENOMEM); 7555 return; 7556 } 7557 7558 /* Sync metadata */ 7559 blob_persist(seq, blob, blob_close_cpl, blob); 7560 } 7561 7562 /* END spdk_blob_close */ 7563 7564 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs) 7565 { 7566 return spdk_get_io_channel(bs); 7567 } 7568 7569 void spdk_bs_free_io_channel(struct spdk_io_channel *channel) 7570 { 7571 spdk_put_io_channel(channel); 7572 } 7573 7574 void spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel, 7575 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 7576 { 7577 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 7578 SPDK_BLOB_UNMAP); 7579 } 7580 7581 void spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel, 7582 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 7583 { 7584 blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 7585 SPDK_BLOB_WRITE_ZEROES); 7586 } 7587 7588 void spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel, 7589 void *payload, uint64_t offset, uint64_t length, 7590 spdk_blob_op_complete cb_fn, void *cb_arg) 7591 { 7592 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 7593 SPDK_BLOB_WRITE); 7594 } 7595 7596 void spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel, 7597 void *payload, uint64_t offset, uint64_t length, 7598 spdk_blob_op_complete cb_fn, void *cb_arg) 7599 { 7600 blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 7601 SPDK_BLOB_READ); 7602 } 7603 7604 void spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel, 7605 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 7606 spdk_blob_op_complete cb_fn, void *cb_arg) 7607 { 7608 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, NULL); 7609 } 7610 7611 void spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel, 7612 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 7613 spdk_blob_op_complete cb_fn, void *cb_arg) 7614 { 7615 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, NULL); 7616 } 7617 7618 void 7619 spdk_blob_io_writev_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 7620 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 7621 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 7622 { 7623 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, 7624 io_opts); 7625 } 7626 7627 void 7628 spdk_blob_io_readv_ext(struct spdk_blob *blob, struct spdk_io_channel *channel, 7629 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 7630 spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts) 7631 { 7632 blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, 7633 io_opts); 7634 } 7635 7636 struct spdk_bs_iter_ctx { 7637 int64_t page_num; 7638 struct spdk_blob_store *bs; 7639 7640 spdk_blob_op_with_handle_complete cb_fn; 7641 void *cb_arg; 7642 }; 7643 7644 static void 7645 bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 7646 { 7647 struct spdk_bs_iter_ctx *ctx = cb_arg; 7648 struct spdk_blob_store *bs = ctx->bs; 7649 spdk_blob_id id; 7650 7651 if (bserrno == 0) { 7652 ctx->cb_fn(ctx->cb_arg, _blob, bserrno); 7653 free(ctx); 7654 return; 7655 } 7656 7657 ctx->page_num++; 7658 ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num); 7659 if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) { 7660 ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT); 7661 free(ctx); 7662 return; 7663 } 7664 7665 id = bs_page_to_blobid(ctx->page_num); 7666 7667 spdk_bs_open_blob(bs, id, bs_iter_cpl, ctx); 7668 } 7669 7670 void 7671 spdk_bs_iter_first(struct spdk_blob_store *bs, 7672 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7673 { 7674 struct spdk_bs_iter_ctx *ctx; 7675 7676 ctx = calloc(1, sizeof(*ctx)); 7677 if (!ctx) { 7678 cb_fn(cb_arg, NULL, -ENOMEM); 7679 return; 7680 } 7681 7682 ctx->page_num = -1; 7683 ctx->bs = bs; 7684 ctx->cb_fn = cb_fn; 7685 ctx->cb_arg = cb_arg; 7686 7687 bs_iter_cpl(ctx, NULL, -1); 7688 } 7689 7690 static void 7691 bs_iter_close_cpl(void *cb_arg, int bserrno) 7692 { 7693 struct spdk_bs_iter_ctx *ctx = cb_arg; 7694 7695 bs_iter_cpl(ctx, NULL, -1); 7696 } 7697 7698 void 7699 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob, 7700 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7701 { 7702 struct spdk_bs_iter_ctx *ctx; 7703 7704 assert(blob != NULL); 7705 7706 ctx = calloc(1, sizeof(*ctx)); 7707 if (!ctx) { 7708 cb_fn(cb_arg, NULL, -ENOMEM); 7709 return; 7710 } 7711 7712 ctx->page_num = bs_blobid_to_page(blob->id); 7713 ctx->bs = bs; 7714 ctx->cb_fn = cb_fn; 7715 ctx->cb_arg = cb_arg; 7716 7717 /* Close the existing blob */ 7718 spdk_blob_close(blob, bs_iter_close_cpl, ctx); 7719 } 7720 7721 static int 7722 blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 7723 uint16_t value_len, bool internal) 7724 { 7725 struct spdk_xattr_tailq *xattrs; 7726 struct spdk_xattr *xattr; 7727 size_t desc_size; 7728 void *tmp; 7729 7730 blob_verify_md_op(blob); 7731 7732 if (blob->md_ro) { 7733 return -EPERM; 7734 } 7735 7736 desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len; 7737 if (desc_size > SPDK_BS_MAX_DESC_SIZE) { 7738 SPDK_DEBUGLOG(blob, "Xattr '%s' of size %zu does not fix into single page %zu\n", name, 7739 desc_size, SPDK_BS_MAX_DESC_SIZE); 7740 return -ENOMEM; 7741 } 7742 7743 if (internal) { 7744 xattrs = &blob->xattrs_internal; 7745 blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR; 7746 } else { 7747 xattrs = &blob->xattrs; 7748 } 7749 7750 TAILQ_FOREACH(xattr, xattrs, link) { 7751 if (!strcmp(name, xattr->name)) { 7752 tmp = malloc(value_len); 7753 if (!tmp) { 7754 return -ENOMEM; 7755 } 7756 7757 free(xattr->value); 7758 xattr->value_len = value_len; 7759 xattr->value = tmp; 7760 memcpy(xattr->value, value, value_len); 7761 7762 blob->state = SPDK_BLOB_STATE_DIRTY; 7763 7764 return 0; 7765 } 7766 } 7767 7768 xattr = calloc(1, sizeof(*xattr)); 7769 if (!xattr) { 7770 return -ENOMEM; 7771 } 7772 7773 xattr->name = strdup(name); 7774 if (!xattr->name) { 7775 free(xattr); 7776 return -ENOMEM; 7777 } 7778 7779 xattr->value_len = value_len; 7780 xattr->value = malloc(value_len); 7781 if (!xattr->value) { 7782 free(xattr->name); 7783 free(xattr); 7784 return -ENOMEM; 7785 } 7786 memcpy(xattr->value, value, value_len); 7787 TAILQ_INSERT_TAIL(xattrs, xattr, link); 7788 7789 blob->state = SPDK_BLOB_STATE_DIRTY; 7790 7791 return 0; 7792 } 7793 7794 int 7795 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 7796 uint16_t value_len) 7797 { 7798 return blob_set_xattr(blob, name, value, value_len, false); 7799 } 7800 7801 static int 7802 blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal) 7803 { 7804 struct spdk_xattr_tailq *xattrs; 7805 struct spdk_xattr *xattr; 7806 7807 blob_verify_md_op(blob); 7808 7809 if (blob->md_ro) { 7810 return -EPERM; 7811 } 7812 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 7813 7814 TAILQ_FOREACH(xattr, xattrs, link) { 7815 if (!strcmp(name, xattr->name)) { 7816 TAILQ_REMOVE(xattrs, xattr, link); 7817 free(xattr->value); 7818 free(xattr->name); 7819 free(xattr); 7820 7821 if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) { 7822 blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR; 7823 } 7824 blob->state = SPDK_BLOB_STATE_DIRTY; 7825 7826 return 0; 7827 } 7828 } 7829 7830 return -ENOENT; 7831 } 7832 7833 int 7834 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name) 7835 { 7836 return blob_remove_xattr(blob, name, false); 7837 } 7838 7839 static int 7840 blob_get_xattr_value(struct spdk_blob *blob, const char *name, 7841 const void **value, size_t *value_len, bool internal) 7842 { 7843 struct spdk_xattr *xattr; 7844 struct spdk_xattr_tailq *xattrs; 7845 7846 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 7847 7848 TAILQ_FOREACH(xattr, xattrs, link) { 7849 if (!strcmp(name, xattr->name)) { 7850 *value = xattr->value; 7851 *value_len = xattr->value_len; 7852 return 0; 7853 } 7854 } 7855 return -ENOENT; 7856 } 7857 7858 int 7859 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 7860 const void **value, size_t *value_len) 7861 { 7862 blob_verify_md_op(blob); 7863 7864 return blob_get_xattr_value(blob, name, value, value_len, false); 7865 } 7866 7867 struct spdk_xattr_names { 7868 uint32_t count; 7869 const char *names[0]; 7870 }; 7871 7872 static int 7873 blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names) 7874 { 7875 struct spdk_xattr *xattr; 7876 int count = 0; 7877 7878 TAILQ_FOREACH(xattr, xattrs, link) { 7879 count++; 7880 } 7881 7882 *names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *)); 7883 if (*names == NULL) { 7884 return -ENOMEM; 7885 } 7886 7887 TAILQ_FOREACH(xattr, xattrs, link) { 7888 (*names)->names[(*names)->count++] = xattr->name; 7889 } 7890 7891 return 0; 7892 } 7893 7894 int 7895 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names) 7896 { 7897 blob_verify_md_op(blob); 7898 7899 return blob_get_xattr_names(&blob->xattrs, names); 7900 } 7901 7902 uint32_t 7903 spdk_xattr_names_get_count(struct spdk_xattr_names *names) 7904 { 7905 assert(names != NULL); 7906 7907 return names->count; 7908 } 7909 7910 const char * 7911 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index) 7912 { 7913 if (index >= names->count) { 7914 return NULL; 7915 } 7916 7917 return names->names[index]; 7918 } 7919 7920 void 7921 spdk_xattr_names_free(struct spdk_xattr_names *names) 7922 { 7923 free(names); 7924 } 7925 7926 struct spdk_bs_type 7927 spdk_bs_get_bstype(struct spdk_blob_store *bs) 7928 { 7929 return bs->bstype; 7930 } 7931 7932 void 7933 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype) 7934 { 7935 memcpy(&bs->bstype, &bstype, sizeof(bstype)); 7936 } 7937 7938 bool 7939 spdk_blob_is_read_only(struct spdk_blob *blob) 7940 { 7941 assert(blob != NULL); 7942 return (blob->data_ro || blob->md_ro); 7943 } 7944 7945 bool 7946 spdk_blob_is_snapshot(struct spdk_blob *blob) 7947 { 7948 struct spdk_blob_list *snapshot_entry; 7949 7950 assert(blob != NULL); 7951 7952 snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id); 7953 if (snapshot_entry == NULL) { 7954 return false; 7955 } 7956 7957 return true; 7958 } 7959 7960 bool 7961 spdk_blob_is_clone(struct spdk_blob *blob) 7962 { 7963 assert(blob != NULL); 7964 7965 if (blob->parent_id != SPDK_BLOBID_INVALID) { 7966 assert(spdk_blob_is_thin_provisioned(blob)); 7967 return true; 7968 } 7969 7970 return false; 7971 } 7972 7973 bool 7974 spdk_blob_is_thin_provisioned(struct spdk_blob *blob) 7975 { 7976 assert(blob != NULL); 7977 return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 7978 } 7979 7980 static void 7981 blob_update_clear_method(struct spdk_blob *blob) 7982 { 7983 enum blob_clear_method stored_cm; 7984 7985 assert(blob != NULL); 7986 7987 /* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored 7988 * in metadata previously. If something other than the default was 7989 * specified, ignore stored value and used what was passed in. 7990 */ 7991 stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT); 7992 7993 if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) { 7994 blob->clear_method = stored_cm; 7995 } else if (blob->clear_method != stored_cm) { 7996 SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n", 7997 blob->clear_method, stored_cm); 7998 } 7999 } 8000 8001 spdk_blob_id 8002 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id) 8003 { 8004 struct spdk_blob_list *snapshot_entry = NULL; 8005 struct spdk_blob_list *clone_entry = NULL; 8006 8007 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 8008 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 8009 if (clone_entry->id == blob_id) { 8010 return snapshot_entry->id; 8011 } 8012 } 8013 } 8014 8015 return SPDK_BLOBID_INVALID; 8016 } 8017 8018 int 8019 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids, 8020 size_t *count) 8021 { 8022 struct spdk_blob_list *snapshot_entry, *clone_entry; 8023 size_t n; 8024 8025 snapshot_entry = bs_get_snapshot_entry(bs, blobid); 8026 if (snapshot_entry == NULL) { 8027 *count = 0; 8028 return 0; 8029 } 8030 8031 if (ids == NULL || *count < snapshot_entry->clone_count) { 8032 *count = snapshot_entry->clone_count; 8033 return -ENOMEM; 8034 } 8035 *count = snapshot_entry->clone_count; 8036 8037 n = 0; 8038 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 8039 ids[n++] = clone_entry->id; 8040 } 8041 8042 return 0; 8043 } 8044 8045 SPDK_LOG_REGISTER_COMPONENT(blob) 8046