1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/blob.h" 37 #include "spdk/crc32.h" 38 #include "spdk/env.h" 39 #include "spdk/queue.h" 40 #include "spdk/thread.h" 41 #include "spdk/bit_array.h" 42 #include "spdk/likely.h" 43 44 #include "spdk_internal/assert.h" 45 #include "spdk_internal/log.h" 46 47 #include "blobstore.h" 48 49 #define BLOB_CRC32C_INITIAL 0xffffffffUL 50 51 static int spdk_bs_register_md_thread(struct spdk_blob_store *bs); 52 static int spdk_bs_unregister_md_thread(struct spdk_blob_store *bs); 53 static void _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 54 static void _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 55 uint64_t cluster, spdk_blob_op_complete cb_fn, void *cb_arg); 56 57 static int _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 58 uint16_t value_len, bool internal); 59 static int _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 60 const void **value, size_t *value_len, bool internal); 61 static int _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal); 62 63 static void 64 _spdk_blob_verify_md_op(struct spdk_blob *blob) 65 { 66 assert(blob != NULL); 67 assert(spdk_get_thread() == blob->bs->md_thread); 68 assert(blob->state != SPDK_BLOB_STATE_LOADING); 69 } 70 71 static inline size_t 72 divide_round_up(size_t num, size_t divisor) 73 { 74 return (num + divisor - 1) / divisor; 75 } 76 77 static void 78 _spdk_bs_claim_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 79 { 80 assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters)); 81 assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == false); 82 assert(bs->num_free_clusters > 0); 83 84 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %u\n", cluster_num); 85 86 spdk_bit_array_set(bs->used_clusters, cluster_num); 87 bs->num_free_clusters--; 88 } 89 90 static int 91 _spdk_blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster) 92 { 93 uint64_t *cluster_lba = &blob->active.clusters[cluster_num]; 94 95 _spdk_blob_verify_md_op(blob); 96 97 if (*cluster_lba != 0) { 98 return -EEXIST; 99 } 100 101 *cluster_lba = _spdk_bs_cluster_to_lba(blob->bs, cluster); 102 return 0; 103 } 104 105 static int 106 _spdk_bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num, 107 uint64_t *lowest_free_cluster, bool update_map) 108 { 109 pthread_mutex_lock(&blob->bs->used_clusters_mutex); 110 *lowest_free_cluster = spdk_bit_array_find_first_clear(blob->bs->used_clusters, 111 *lowest_free_cluster); 112 if (*lowest_free_cluster >= blob->bs->total_clusters) { 113 /* No more free clusters. Cannot satisfy the request */ 114 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 115 return -ENOSPC; 116 } 117 118 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %lu for blob %lu\n", *lowest_free_cluster, blob->id); 119 _spdk_bs_claim_cluster(blob->bs, *lowest_free_cluster); 120 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 121 122 if (update_map) { 123 _spdk_blob_insert_cluster(blob, cluster_num, *lowest_free_cluster); 124 } 125 126 return 0; 127 } 128 129 static void 130 _spdk_bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 131 { 132 assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters)); 133 assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == true); 134 assert(bs->num_free_clusters < bs->total_clusters); 135 136 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Releasing cluster %u\n", cluster_num); 137 138 pthread_mutex_lock(&bs->used_clusters_mutex); 139 spdk_bit_array_clear(bs->used_clusters, cluster_num); 140 bs->num_free_clusters++; 141 pthread_mutex_unlock(&bs->used_clusters_mutex); 142 } 143 144 static void 145 _spdk_blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs) 146 { 147 xattrs->count = 0; 148 xattrs->names = NULL; 149 xattrs->ctx = NULL; 150 xattrs->get_value = NULL; 151 } 152 153 void 154 spdk_blob_opts_init(struct spdk_blob_opts *opts) 155 { 156 opts->num_clusters = 0; 157 opts->thin_provision = false; 158 _spdk_blob_xattrs_init(&opts->xattrs); 159 } 160 161 static struct spdk_blob * 162 _spdk_blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id) 163 { 164 struct spdk_blob *blob; 165 166 blob = calloc(1, sizeof(*blob)); 167 if (!blob) { 168 return NULL; 169 } 170 171 blob->id = id; 172 blob->bs = bs; 173 174 blob->parent_id = SPDK_BLOBID_INVALID; 175 176 blob->state = SPDK_BLOB_STATE_DIRTY; 177 blob->active.num_pages = 1; 178 blob->active.pages = calloc(1, sizeof(*blob->active.pages)); 179 if (!blob->active.pages) { 180 free(blob); 181 return NULL; 182 } 183 184 blob->active.pages[0] = _spdk_bs_blobid_to_page(id); 185 186 TAILQ_INIT(&blob->xattrs); 187 TAILQ_INIT(&blob->xattrs_internal); 188 189 return blob; 190 } 191 192 static void 193 _spdk_xattrs_free(struct spdk_xattr_tailq *xattrs) 194 { 195 struct spdk_xattr *xattr, *xattr_tmp; 196 197 TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) { 198 TAILQ_REMOVE(xattrs, xattr, link); 199 free(xattr->name); 200 free(xattr->value); 201 free(xattr); 202 } 203 } 204 205 static void 206 _spdk_blob_free(struct spdk_blob *blob) 207 { 208 assert(blob != NULL); 209 210 free(blob->active.clusters); 211 free(blob->clean.clusters); 212 free(blob->active.pages); 213 free(blob->clean.pages); 214 215 _spdk_xattrs_free(&blob->xattrs); 216 _spdk_xattrs_free(&blob->xattrs_internal); 217 218 if (blob->back_bs_dev) { 219 blob->back_bs_dev->destroy(blob->back_bs_dev); 220 } 221 222 free(blob); 223 } 224 225 struct freeze_io_ctx { 226 struct spdk_bs_cpl cpl; 227 struct spdk_blob *blob; 228 }; 229 230 static void 231 _spdk_blob_io_sync(struct spdk_io_channel_iter *i) 232 { 233 spdk_for_each_channel_continue(i, 0); 234 } 235 236 static void 237 _spdk_blob_execute_queued_io(struct spdk_io_channel_iter *i) 238 { 239 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 240 struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch); 241 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 242 struct spdk_bs_request_set *set; 243 struct spdk_bs_user_op_args *args; 244 spdk_bs_user_op_t *op, *tmp; 245 246 TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) { 247 set = (struct spdk_bs_request_set *)op; 248 args = &set->u.user_op; 249 250 if (args->blob == ctx->blob) { 251 TAILQ_REMOVE(&ch->queued_io, op, link); 252 spdk_bs_user_op_execute(op); 253 } 254 } 255 256 spdk_for_each_channel_continue(i, 0); 257 } 258 259 static void 260 _spdk_blob_io_cpl(struct spdk_io_channel_iter *i, int status) 261 { 262 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 263 264 ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0); 265 266 free(ctx); 267 } 268 269 static void 270 _spdk_blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 271 { 272 struct freeze_io_ctx *ctx; 273 274 ctx = calloc(1, sizeof(*ctx)); 275 if (!ctx) { 276 cb_fn(cb_arg, -ENOMEM); 277 return; 278 } 279 280 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 281 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 282 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 283 ctx->blob = blob; 284 285 /* Freeze I/O on blob */ 286 blob->frozen_refcnt++; 287 288 if (blob->frozen_refcnt == 1) { 289 spdk_for_each_channel(blob->bs, _spdk_blob_io_sync, ctx, _spdk_blob_io_cpl); 290 } else { 291 cb_fn(cb_arg, 0); 292 free(ctx); 293 } 294 } 295 296 static void 297 _spdk_blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 298 { 299 struct freeze_io_ctx *ctx; 300 301 ctx = calloc(1, sizeof(*ctx)); 302 if (!ctx) { 303 cb_fn(cb_arg, -ENOMEM); 304 return; 305 } 306 307 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 308 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 309 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 310 ctx->blob = blob; 311 312 assert(blob->frozen_refcnt > 0); 313 314 blob->frozen_refcnt--; 315 316 if (blob->frozen_refcnt == 0) { 317 spdk_for_each_channel(blob->bs, _spdk_blob_execute_queued_io, ctx, _spdk_blob_io_cpl); 318 } else { 319 cb_fn(cb_arg, 0); 320 free(ctx); 321 } 322 } 323 324 static int 325 _spdk_blob_mark_clean(struct spdk_blob *blob) 326 { 327 uint64_t *clusters = NULL; 328 uint32_t *pages = NULL; 329 330 assert(blob != NULL); 331 332 if (blob->active.num_clusters) { 333 assert(blob->active.clusters); 334 clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters)); 335 if (!clusters) { 336 return -ENOMEM; 337 } 338 memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*clusters)); 339 } 340 341 if (blob->active.num_pages) { 342 assert(blob->active.pages); 343 pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages)); 344 if (!pages) { 345 free(clusters); 346 return -ENOMEM; 347 } 348 memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*pages)); 349 } 350 351 free(blob->clean.clusters); 352 free(blob->clean.pages); 353 354 blob->clean.num_clusters = blob->active.num_clusters; 355 blob->clean.clusters = blob->active.clusters; 356 blob->clean.num_pages = blob->active.num_pages; 357 blob->clean.pages = blob->active.pages; 358 359 blob->active.clusters = clusters; 360 blob->active.pages = pages; 361 362 /* If the metadata was dirtied again while the metadata was being written to disk, 363 * we do not want to revert the DIRTY state back to CLEAN here. 364 */ 365 if (blob->state == SPDK_BLOB_STATE_LOADING) { 366 blob->state = SPDK_BLOB_STATE_CLEAN; 367 } 368 369 return 0; 370 } 371 372 static int 373 _spdk_blob_deserialize_xattr(struct spdk_blob *blob, 374 struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal) 375 { 376 struct spdk_xattr *xattr; 377 378 if (desc_xattr->length != sizeof(desc_xattr->name_length) + 379 sizeof(desc_xattr->value_length) + 380 desc_xattr->name_length + desc_xattr->value_length) { 381 return -EINVAL; 382 } 383 384 xattr = calloc(1, sizeof(*xattr)); 385 if (xattr == NULL) { 386 return -ENOMEM; 387 } 388 389 xattr->name = malloc(desc_xattr->name_length + 1); 390 if (xattr->name == NULL) { 391 free(xattr); 392 return -ENOMEM; 393 } 394 memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length); 395 xattr->name[desc_xattr->name_length] = '\0'; 396 397 xattr->value = malloc(desc_xattr->value_length); 398 if (xattr->value == NULL) { 399 free(xattr->name); 400 free(xattr); 401 return -ENOMEM; 402 } 403 xattr->value_len = desc_xattr->value_length; 404 memcpy(xattr->value, 405 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 406 desc_xattr->value_length); 407 408 TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link); 409 410 return 0; 411 } 412 413 414 static int 415 _spdk_blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob) 416 { 417 struct spdk_blob_md_descriptor *desc; 418 size_t cur_desc = 0; 419 void *tmp; 420 421 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 422 while (cur_desc < sizeof(page->descriptors)) { 423 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 424 if (desc->length == 0) { 425 /* If padding and length are 0, this terminates the page */ 426 break; 427 } 428 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 429 struct spdk_blob_md_descriptor_flags *desc_flags; 430 431 desc_flags = (struct spdk_blob_md_descriptor_flags *)desc; 432 433 if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) { 434 return -EINVAL; 435 } 436 437 if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) != 438 SPDK_BLOB_INVALID_FLAGS_MASK) { 439 return -EINVAL; 440 } 441 442 if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) != 443 SPDK_BLOB_DATA_RO_FLAGS_MASK) { 444 blob->data_ro = true; 445 blob->md_ro = true; 446 } 447 448 if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) != 449 SPDK_BLOB_MD_RO_FLAGS_MASK) { 450 blob->md_ro = true; 451 } 452 453 if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 454 blob->data_ro = true; 455 blob->md_ro = true; 456 } 457 458 blob->invalid_flags = desc_flags->invalid_flags; 459 blob->data_ro_flags = desc_flags->data_ro_flags; 460 blob->md_ro_flags = desc_flags->md_ro_flags; 461 462 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) { 463 struct spdk_blob_md_descriptor_extent *desc_extent; 464 unsigned int i, j; 465 unsigned int cluster_count = blob->active.num_clusters; 466 467 desc_extent = (struct spdk_blob_md_descriptor_extent *)desc; 468 469 if (desc_extent->length == 0 || 470 (desc_extent->length % sizeof(desc_extent->extents[0]) != 0)) { 471 return -EINVAL; 472 } 473 474 for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) { 475 for (j = 0; j < desc_extent->extents[i].length; j++) { 476 if (!spdk_bit_array_get(blob->bs->used_clusters, 477 desc_extent->extents[i].cluster_idx + j)) { 478 return -EINVAL; 479 } 480 cluster_count++; 481 } 482 } 483 484 if (cluster_count == 0) { 485 return -EINVAL; 486 } 487 tmp = realloc(blob->active.clusters, cluster_count * sizeof(uint64_t)); 488 if (tmp == NULL) { 489 return -ENOMEM; 490 } 491 blob->active.clusters = tmp; 492 blob->active.cluster_array_size = cluster_count; 493 494 for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) { 495 for (j = 0; j < desc_extent->extents[i].length; j++) { 496 if (desc_extent->extents[i].cluster_idx != 0) { 497 blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs, 498 desc_extent->extents[i].cluster_idx + j); 499 } else if (spdk_blob_is_thin_provisioned(blob)) { 500 blob->active.clusters[blob->active.num_clusters++] = 0; 501 } else { 502 return -EINVAL; 503 } 504 } 505 } 506 507 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 508 int rc; 509 510 rc = _spdk_blob_deserialize_xattr(blob, 511 (struct spdk_blob_md_descriptor_xattr *) desc, false); 512 if (rc != 0) { 513 return rc; 514 } 515 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 516 int rc; 517 518 rc = _spdk_blob_deserialize_xattr(blob, 519 (struct spdk_blob_md_descriptor_xattr *) desc, true); 520 if (rc != 0) { 521 return rc; 522 } 523 } else { 524 /* Unrecognized descriptor type. Do not fail - just continue to the 525 * next descriptor. If this descriptor is associated with some feature 526 * defined in a newer version of blobstore, that version of blobstore 527 * should create and set an associated feature flag to specify if this 528 * blob can be loaded or not. 529 */ 530 } 531 532 /* Advance to the next descriptor */ 533 cur_desc += sizeof(*desc) + desc->length; 534 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 535 break; 536 } 537 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 538 } 539 540 return 0; 541 } 542 543 static int 544 _spdk_blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count, 545 struct spdk_blob *blob) 546 { 547 const struct spdk_blob_md_page *page; 548 uint32_t i; 549 int rc; 550 551 assert(page_count > 0); 552 assert(pages[0].sequence_num == 0); 553 assert(blob != NULL); 554 assert(blob->state == SPDK_BLOB_STATE_LOADING); 555 assert(blob->active.clusters == NULL); 556 557 /* The blobid provided doesn't match what's in the MD, this can 558 * happen for example if a bogus blobid is passed in through open. 559 */ 560 if (blob->id != pages[0].id) { 561 SPDK_ERRLOG("Blobid (%lu) doesn't match what's in metadata (%lu)\n", 562 blob->id, pages[0].id); 563 return -ENOENT; 564 } 565 566 for (i = 0; i < page_count; i++) { 567 page = &pages[i]; 568 569 assert(page->id == blob->id); 570 assert(page->sequence_num == i); 571 572 rc = _spdk_blob_parse_page(page, blob); 573 if (rc != 0) { 574 return rc; 575 } 576 } 577 578 return 0; 579 } 580 581 static int 582 _spdk_blob_serialize_add_page(const struct spdk_blob *blob, 583 struct spdk_blob_md_page **pages, 584 uint32_t *page_count, 585 struct spdk_blob_md_page **last_page) 586 { 587 struct spdk_blob_md_page *page; 588 589 assert(pages != NULL); 590 assert(page_count != NULL); 591 592 if (*page_count == 0) { 593 assert(*pages == NULL); 594 *page_count = 1; 595 *pages = spdk_dma_malloc(SPDK_BS_PAGE_SIZE, 596 SPDK_BS_PAGE_SIZE, 597 NULL); 598 } else { 599 assert(*pages != NULL); 600 (*page_count)++; 601 *pages = spdk_dma_realloc(*pages, 602 SPDK_BS_PAGE_SIZE * (*page_count), 603 SPDK_BS_PAGE_SIZE, 604 NULL); 605 } 606 607 if (*pages == NULL) { 608 *page_count = 0; 609 *last_page = NULL; 610 return -ENOMEM; 611 } 612 613 page = &(*pages)[*page_count - 1]; 614 memset(page, 0, sizeof(*page)); 615 page->id = blob->id; 616 page->sequence_num = *page_count - 1; 617 page->next = SPDK_INVALID_MD_PAGE; 618 *last_page = page; 619 620 return 0; 621 } 622 623 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor. 624 * Update required_sz on both success and failure. 625 * 626 */ 627 static int 628 _spdk_blob_serialize_xattr(const struct spdk_xattr *xattr, 629 uint8_t *buf, size_t buf_sz, 630 size_t *required_sz, bool internal) 631 { 632 struct spdk_blob_md_descriptor_xattr *desc; 633 634 *required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) + 635 strlen(xattr->name) + 636 xattr->value_len; 637 638 if (buf_sz < *required_sz) { 639 return -1; 640 } 641 642 desc = (struct spdk_blob_md_descriptor_xattr *)buf; 643 644 desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR; 645 desc->length = sizeof(desc->name_length) + 646 sizeof(desc->value_length) + 647 strlen(xattr->name) + 648 xattr->value_len; 649 desc->name_length = strlen(xattr->name); 650 desc->value_length = xattr->value_len; 651 652 memcpy(desc->name, xattr->name, desc->name_length); 653 memcpy((void *)((uintptr_t)desc->name + desc->name_length), 654 xattr->value, 655 desc->value_length); 656 657 return 0; 658 } 659 660 static void 661 _spdk_blob_serialize_extent(const struct spdk_blob *blob, 662 uint64_t start_cluster, uint64_t *next_cluster, 663 uint8_t *buf, size_t buf_sz) 664 { 665 struct spdk_blob_md_descriptor_extent *desc; 666 size_t cur_sz; 667 uint64_t i, extent_idx; 668 uint32_t lba, lba_per_cluster, lba_count; 669 670 /* The buffer must have room for at least one extent */ 671 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->extents[0]); 672 if (buf_sz < cur_sz) { 673 *next_cluster = start_cluster; 674 return; 675 } 676 677 desc = (struct spdk_blob_md_descriptor_extent *)buf; 678 desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT; 679 680 lba_per_cluster = _spdk_bs_cluster_to_lba(blob->bs, 1); 681 682 lba = blob->active.clusters[start_cluster]; 683 lba_count = lba_per_cluster; 684 extent_idx = 0; 685 for (i = start_cluster + 1; i < blob->active.num_clusters; i++) { 686 if ((lba + lba_count) == blob->active.clusters[i]) { 687 lba_count += lba_per_cluster; 688 continue; 689 } 690 desc->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 691 desc->extents[extent_idx].length = lba_count / lba_per_cluster; 692 extent_idx++; 693 694 cur_sz += sizeof(desc->extents[extent_idx]); 695 696 if (buf_sz < cur_sz) { 697 /* If we ran out of buffer space, return */ 698 desc->length = sizeof(desc->extents[0]) * extent_idx; 699 *next_cluster = i; 700 return; 701 } 702 703 lba = blob->active.clusters[i]; 704 lba_count = lba_per_cluster; 705 } 706 707 desc->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 708 desc->extents[extent_idx].length = lba_count / lba_per_cluster; 709 extent_idx++; 710 711 desc->length = sizeof(desc->extents[0]) * extent_idx; 712 *next_cluster = blob->active.num_clusters; 713 714 return; 715 } 716 717 static void 718 _spdk_blob_serialize_flags(const struct spdk_blob *blob, 719 uint8_t *buf, size_t *buf_sz) 720 { 721 struct spdk_blob_md_descriptor_flags *desc; 722 723 /* 724 * Flags get serialized first, so we should always have room for the flags 725 * descriptor. 726 */ 727 assert(*buf_sz >= sizeof(*desc)); 728 729 desc = (struct spdk_blob_md_descriptor_flags *)buf; 730 desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS; 731 desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor); 732 desc->invalid_flags = blob->invalid_flags; 733 desc->data_ro_flags = blob->data_ro_flags; 734 desc->md_ro_flags = blob->md_ro_flags; 735 736 *buf_sz -= sizeof(*desc); 737 } 738 739 static int 740 _spdk_blob_serialize_xattrs(const struct spdk_blob *blob, 741 const struct spdk_xattr_tailq *xattrs, bool internal, 742 struct spdk_blob_md_page **pages, 743 struct spdk_blob_md_page *cur_page, 744 uint32_t *page_count, uint8_t **buf, 745 size_t *remaining_sz) 746 { 747 const struct spdk_xattr *xattr; 748 int rc; 749 750 TAILQ_FOREACH(xattr, xattrs, link) { 751 size_t required_sz = 0; 752 753 rc = _spdk_blob_serialize_xattr(xattr, 754 *buf, *remaining_sz, 755 &required_sz, internal); 756 if (rc < 0) { 757 /* Need to add a new page to the chain */ 758 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, 759 &cur_page); 760 if (rc < 0) { 761 spdk_dma_free(*pages); 762 *pages = NULL; 763 *page_count = 0; 764 return rc; 765 } 766 767 *buf = (uint8_t *)cur_page->descriptors; 768 *remaining_sz = sizeof(cur_page->descriptors); 769 770 /* Try again */ 771 required_sz = 0; 772 rc = _spdk_blob_serialize_xattr(xattr, 773 *buf, *remaining_sz, 774 &required_sz, internal); 775 776 if (rc < 0) { 777 spdk_dma_free(*pages); 778 *pages = NULL; 779 *page_count = 0; 780 return rc; 781 } 782 } 783 784 *remaining_sz -= required_sz; 785 *buf += required_sz; 786 } 787 788 return 0; 789 } 790 791 static int 792 _spdk_blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages, 793 uint32_t *page_count) 794 { 795 struct spdk_blob_md_page *cur_page; 796 int rc; 797 uint8_t *buf; 798 size_t remaining_sz; 799 uint64_t last_cluster; 800 801 assert(pages != NULL); 802 assert(page_count != NULL); 803 assert(blob != NULL); 804 assert(blob->state == SPDK_BLOB_STATE_DIRTY); 805 806 *pages = NULL; 807 *page_count = 0; 808 809 /* A blob always has at least 1 page, even if it has no descriptors */ 810 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page); 811 if (rc < 0) { 812 return rc; 813 } 814 815 buf = (uint8_t *)cur_page->descriptors; 816 remaining_sz = sizeof(cur_page->descriptors); 817 818 /* Serialize flags */ 819 _spdk_blob_serialize_flags(blob, buf, &remaining_sz); 820 buf += sizeof(struct spdk_blob_md_descriptor_flags); 821 822 /* Serialize xattrs */ 823 rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs, false, 824 pages, cur_page, page_count, &buf, &remaining_sz); 825 if (rc < 0) { 826 return rc; 827 } 828 829 /* Serialize internal xattrs */ 830 rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs_internal, true, 831 pages, cur_page, page_count, &buf, &remaining_sz); 832 if (rc < 0) { 833 return rc; 834 } 835 836 /* Serialize extents */ 837 last_cluster = 0; 838 while (last_cluster < blob->active.num_clusters) { 839 _spdk_blob_serialize_extent(blob, last_cluster, &last_cluster, 840 buf, remaining_sz); 841 842 if (last_cluster == blob->active.num_clusters) { 843 break; 844 } 845 846 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, 847 &cur_page); 848 if (rc < 0) { 849 return rc; 850 } 851 852 buf = (uint8_t *)cur_page->descriptors; 853 remaining_sz = sizeof(cur_page->descriptors); 854 } 855 856 return 0; 857 } 858 859 struct spdk_blob_load_ctx { 860 struct spdk_blob *blob; 861 862 struct spdk_blob_md_page *pages; 863 uint32_t num_pages; 864 spdk_bs_sequence_t *seq; 865 866 spdk_bs_sequence_cpl cb_fn; 867 void *cb_arg; 868 }; 869 870 static uint32_t 871 _spdk_blob_md_page_calc_crc(void *page) 872 { 873 uint32_t crc; 874 875 crc = BLOB_CRC32C_INITIAL; 876 crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc); 877 crc ^= BLOB_CRC32C_INITIAL; 878 879 return crc; 880 881 } 882 883 static void 884 _spdk_blob_load_final(void *cb_arg, int bserrno) 885 { 886 struct spdk_blob_load_ctx *ctx = cb_arg; 887 struct spdk_blob *blob = ctx->blob; 888 889 _spdk_blob_mark_clean(blob); 890 891 ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno); 892 893 /* Free the memory */ 894 spdk_dma_free(ctx->pages); 895 free(ctx); 896 } 897 898 static void 899 _spdk_blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 900 { 901 struct spdk_blob_load_ctx *ctx = cb_arg; 902 struct spdk_blob *blob = ctx->blob; 903 904 if (bserrno != 0) { 905 goto error; 906 } 907 908 blob->back_bs_dev = spdk_bs_create_blob_bs_dev(snapshot); 909 910 if (blob->back_bs_dev == NULL) { 911 bserrno = -ENOMEM; 912 goto error; 913 } 914 915 _spdk_blob_load_final(ctx, bserrno); 916 return; 917 918 error: 919 SPDK_ERRLOG("Snapshot fail\n"); 920 _spdk_blob_free(blob); 921 ctx->cb_fn(ctx->seq, NULL, bserrno); 922 spdk_dma_free(ctx->pages); 923 free(ctx); 924 } 925 926 static void 927 _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 928 { 929 struct spdk_blob_load_ctx *ctx = cb_arg; 930 struct spdk_blob *blob = ctx->blob; 931 struct spdk_blob_md_page *page; 932 const void *value; 933 size_t len; 934 int rc; 935 uint32_t crc; 936 937 page = &ctx->pages[ctx->num_pages - 1]; 938 crc = _spdk_blob_md_page_calc_crc(page); 939 if (crc != page->crc) { 940 SPDK_ERRLOG("Metadata page %d crc mismatch\n", ctx->num_pages); 941 _spdk_blob_free(blob); 942 ctx->cb_fn(seq, NULL, -EINVAL); 943 spdk_dma_free(ctx->pages); 944 free(ctx); 945 return; 946 } 947 948 if (page->next != SPDK_INVALID_MD_PAGE) { 949 uint32_t next_page = page->next; 950 uint64_t next_lba = _spdk_bs_page_to_lba(blob->bs, blob->bs->md_start + next_page); 951 952 953 assert(next_lba < (blob->bs->md_start + blob->bs->md_len)); 954 955 /* Read the next page */ 956 ctx->num_pages++; 957 ctx->pages = spdk_dma_realloc(ctx->pages, (sizeof(*page) * ctx->num_pages), 958 sizeof(*page), NULL); 959 if (ctx->pages == NULL) { 960 ctx->cb_fn(seq, ctx->cb_arg, -ENOMEM); 961 free(ctx); 962 return; 963 } 964 965 spdk_bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1], 966 next_lba, 967 _spdk_bs_byte_to_lba(blob->bs, sizeof(*page)), 968 _spdk_blob_load_cpl, ctx); 969 return; 970 } 971 972 /* Parse the pages */ 973 rc = _spdk_blob_parse(ctx->pages, ctx->num_pages, blob); 974 if (rc) { 975 _spdk_blob_free(blob); 976 ctx->cb_fn(seq, NULL, rc); 977 spdk_dma_free(ctx->pages); 978 free(ctx); 979 return; 980 } 981 ctx->seq = seq; 982 983 984 if (spdk_blob_is_thin_provisioned(blob)) { 985 rc = _spdk_blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true); 986 if (rc == 0) { 987 if (len != sizeof(spdk_blob_id)) { 988 _spdk_blob_free(blob); 989 ctx->cb_fn(seq, NULL, -EINVAL); 990 spdk_dma_free(ctx->pages); 991 free(ctx); 992 return; 993 } 994 /* open snapshot blob and continue in the callback function */ 995 blob->parent_id = *(spdk_blob_id *)value; 996 spdk_bs_open_blob(blob->bs, blob->parent_id, 997 _spdk_blob_load_snapshot_cpl, ctx); 998 return; 999 } else { 1000 /* add zeroes_dev for thin provisioned blob */ 1001 blob->back_bs_dev = spdk_bs_create_zeroes_dev(); 1002 } 1003 } else { 1004 /* standard blob */ 1005 blob->back_bs_dev = NULL; 1006 } 1007 _spdk_blob_load_final(ctx, bserrno); 1008 } 1009 1010 /* Load a blob from disk given a blobid */ 1011 static void 1012 _spdk_blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1013 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1014 { 1015 struct spdk_blob_load_ctx *ctx; 1016 struct spdk_blob_store *bs; 1017 uint32_t page_num; 1018 uint64_t lba; 1019 1020 _spdk_blob_verify_md_op(blob); 1021 1022 bs = blob->bs; 1023 1024 ctx = calloc(1, sizeof(*ctx)); 1025 if (!ctx) { 1026 cb_fn(seq, cb_arg, -ENOMEM); 1027 return; 1028 } 1029 1030 ctx->blob = blob; 1031 ctx->pages = spdk_dma_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, 1032 SPDK_BS_PAGE_SIZE, NULL); 1033 if (!ctx->pages) { 1034 free(ctx); 1035 cb_fn(seq, cb_arg, -ENOMEM); 1036 return; 1037 } 1038 ctx->num_pages = 1; 1039 ctx->cb_fn = cb_fn; 1040 ctx->cb_arg = cb_arg; 1041 1042 page_num = _spdk_bs_blobid_to_page(blob->id); 1043 lba = _spdk_bs_page_to_lba(blob->bs, bs->md_start + page_num); 1044 1045 blob->state = SPDK_BLOB_STATE_LOADING; 1046 1047 spdk_bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1048 _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE), 1049 _spdk_blob_load_cpl, ctx); 1050 } 1051 1052 struct spdk_blob_persist_ctx { 1053 struct spdk_blob *blob; 1054 1055 struct spdk_bs_super_block *super; 1056 1057 struct spdk_blob_md_page *pages; 1058 1059 uint64_t idx; 1060 1061 spdk_bs_sequence_t *seq; 1062 spdk_bs_sequence_cpl cb_fn; 1063 void *cb_arg; 1064 }; 1065 1066 static void 1067 _spdk_blob_persist_complete(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1068 { 1069 struct spdk_blob_persist_ctx *ctx = cb_arg; 1070 struct spdk_blob *blob = ctx->blob; 1071 1072 if (bserrno == 0) { 1073 _spdk_blob_mark_clean(blob); 1074 } 1075 1076 /* Call user callback */ 1077 ctx->cb_fn(seq, ctx->cb_arg, bserrno); 1078 1079 /* Free the memory */ 1080 spdk_dma_free(ctx->pages); 1081 free(ctx); 1082 } 1083 1084 static void 1085 _spdk_blob_persist_unmap_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1086 { 1087 struct spdk_blob_persist_ctx *ctx = cb_arg; 1088 struct spdk_blob *blob = ctx->blob; 1089 struct spdk_blob_store *bs = blob->bs; 1090 void *tmp; 1091 size_t i; 1092 1093 /* Release all clusters that were truncated */ 1094 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1095 uint32_t cluster_num = _spdk_bs_lba_to_cluster(bs, blob->active.clusters[i]); 1096 1097 /* Nothing to release if it was not allocated */ 1098 if (blob->active.clusters[i] != 0) { 1099 _spdk_bs_release_cluster(bs, cluster_num); 1100 } 1101 } 1102 1103 if (blob->active.num_clusters == 0) { 1104 free(blob->active.clusters); 1105 blob->active.clusters = NULL; 1106 blob->active.cluster_array_size = 0; 1107 } else { 1108 tmp = realloc(blob->active.clusters, sizeof(uint64_t) * blob->active.num_clusters); 1109 assert(tmp != NULL); 1110 blob->active.clusters = tmp; 1111 blob->active.cluster_array_size = blob->active.num_clusters; 1112 } 1113 1114 _spdk_blob_persist_complete(seq, ctx, bserrno); 1115 } 1116 1117 static void 1118 _spdk_blob_persist_unmap_clusters(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1119 { 1120 struct spdk_blob_persist_ctx *ctx = cb_arg; 1121 struct spdk_blob *blob = ctx->blob; 1122 struct spdk_blob_store *bs = blob->bs; 1123 spdk_bs_batch_t *batch; 1124 size_t i; 1125 uint64_t lba; 1126 uint32_t lba_count; 1127 1128 /* Clusters don't move around in blobs. The list shrinks or grows 1129 * at the end, but no changes ever occur in the middle of the list. 1130 */ 1131 1132 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_unmap_clusters_cpl, ctx); 1133 1134 /* Unmap all clusters that were truncated */ 1135 lba = 0; 1136 lba_count = 0; 1137 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1138 uint64_t next_lba = blob->active.clusters[i]; 1139 uint32_t next_lba_count = _spdk_bs_cluster_to_lba(bs, 1); 1140 1141 if (next_lba > 0 && (lba + lba_count) == next_lba) { 1142 /* This cluster is contiguous with the previous one. */ 1143 lba_count += next_lba_count; 1144 continue; 1145 } 1146 1147 /* This cluster is not contiguous with the previous one. */ 1148 1149 /* If a run of LBAs previously existing, send them 1150 * as an unmap. 1151 */ 1152 if (lba_count > 0) { 1153 spdk_bs_batch_unmap_dev(batch, lba, lba_count); 1154 } 1155 1156 /* Start building the next batch */ 1157 lba = next_lba; 1158 if (next_lba > 0) { 1159 lba_count = next_lba_count; 1160 } else { 1161 lba_count = 0; 1162 } 1163 } 1164 1165 /* If we ended with a contiguous set of LBAs, send the unmap now */ 1166 if (lba_count > 0) { 1167 spdk_bs_batch_unmap_dev(batch, lba, lba_count); 1168 } 1169 1170 spdk_bs_batch_close(batch); 1171 } 1172 1173 static void 1174 _spdk_blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1175 { 1176 struct spdk_blob_persist_ctx *ctx = cb_arg; 1177 struct spdk_blob *blob = ctx->blob; 1178 struct spdk_blob_store *bs = blob->bs; 1179 size_t i; 1180 1181 /* This loop starts at 1 because the first page is special and handled 1182 * below. The pages (except the first) are never written in place, 1183 * so any pages in the clean list must be zeroed. 1184 */ 1185 for (i = 1; i < blob->clean.num_pages; i++) { 1186 spdk_bit_array_clear(bs->used_md_pages, blob->clean.pages[i]); 1187 } 1188 1189 if (blob->active.num_pages == 0) { 1190 uint32_t page_num; 1191 1192 page_num = _spdk_bs_blobid_to_page(blob->id); 1193 spdk_bit_array_clear(bs->used_md_pages, page_num); 1194 } 1195 1196 /* Move on to unmapping clusters */ 1197 _spdk_blob_persist_unmap_clusters(seq, ctx, 0); 1198 } 1199 1200 static void 1201 _spdk_blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1202 { 1203 struct spdk_blob_persist_ctx *ctx = cb_arg; 1204 struct spdk_blob *blob = ctx->blob; 1205 struct spdk_blob_store *bs = blob->bs; 1206 uint64_t lba; 1207 uint32_t lba_count; 1208 spdk_bs_batch_t *batch; 1209 size_t i; 1210 1211 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_zero_pages_cpl, ctx); 1212 1213 lba_count = _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 1214 1215 /* This loop starts at 1 because the first page is special and handled 1216 * below. The pages (except the first) are never written in place, 1217 * so any pages in the clean list must be zeroed. 1218 */ 1219 for (i = 1; i < blob->clean.num_pages; i++) { 1220 lba = _spdk_bs_page_to_lba(bs, bs->md_start + blob->clean.pages[i]); 1221 1222 spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count); 1223 } 1224 1225 /* The first page will only be zeroed if this is a delete. */ 1226 if (blob->active.num_pages == 0) { 1227 uint32_t page_num; 1228 1229 /* The first page in the metadata goes where the blobid indicates */ 1230 page_num = _spdk_bs_blobid_to_page(blob->id); 1231 lba = _spdk_bs_page_to_lba(bs, bs->md_start + page_num); 1232 1233 spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count); 1234 } 1235 1236 spdk_bs_batch_close(batch); 1237 } 1238 1239 static void 1240 _spdk_blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1241 { 1242 struct spdk_blob_persist_ctx *ctx = cb_arg; 1243 struct spdk_blob *blob = ctx->blob; 1244 struct spdk_blob_store *bs = blob->bs; 1245 uint64_t lba; 1246 uint32_t lba_count; 1247 struct spdk_blob_md_page *page; 1248 1249 if (blob->active.num_pages == 0) { 1250 /* Move on to the next step */ 1251 _spdk_blob_persist_zero_pages(seq, ctx, 0); 1252 return; 1253 } 1254 1255 lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page)); 1256 1257 page = &ctx->pages[0]; 1258 /* The first page in the metadata goes where the blobid indicates */ 1259 lba = _spdk_bs_page_to_lba(bs, bs->md_start + _spdk_bs_blobid_to_page(blob->id)); 1260 1261 spdk_bs_sequence_write_dev(seq, page, lba, lba_count, 1262 _spdk_blob_persist_zero_pages, ctx); 1263 } 1264 1265 static void 1266 _spdk_blob_persist_write_page_chain(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1267 { 1268 struct spdk_blob_persist_ctx *ctx = cb_arg; 1269 struct spdk_blob *blob = ctx->blob; 1270 struct spdk_blob_store *bs = blob->bs; 1271 uint64_t lba; 1272 uint32_t lba_count; 1273 struct spdk_blob_md_page *page; 1274 spdk_bs_batch_t *batch; 1275 size_t i; 1276 1277 /* Clusters don't move around in blobs. The list shrinks or grows 1278 * at the end, but no changes ever occur in the middle of the list. 1279 */ 1280 1281 lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page)); 1282 1283 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_write_page_root, ctx); 1284 1285 /* This starts at 1. The root page is not written until 1286 * all of the others are finished 1287 */ 1288 for (i = 1; i < blob->active.num_pages; i++) { 1289 page = &ctx->pages[i]; 1290 assert(page->sequence_num == i); 1291 1292 lba = _spdk_bs_page_to_lba(bs, bs->md_start + blob->active.pages[i]); 1293 1294 spdk_bs_batch_write_dev(batch, page, lba, lba_count); 1295 } 1296 1297 spdk_bs_batch_close(batch); 1298 } 1299 1300 static int 1301 _spdk_blob_resize(struct spdk_blob *blob, uint64_t sz) 1302 { 1303 uint64_t i; 1304 uint64_t *tmp; 1305 uint64_t lfc; /* lowest free cluster */ 1306 uint64_t num_clusters; 1307 struct spdk_blob_store *bs; 1308 1309 bs = blob->bs; 1310 1311 _spdk_blob_verify_md_op(blob); 1312 1313 if (blob->active.num_clusters == sz) { 1314 return 0; 1315 } 1316 1317 if (blob->active.num_clusters < blob->active.cluster_array_size) { 1318 /* If this blob was resized to be larger, then smaller, then 1319 * larger without syncing, then the cluster array already 1320 * contains spare assigned clusters we can use. 1321 */ 1322 num_clusters = spdk_min(blob->active.cluster_array_size, 1323 sz); 1324 } else { 1325 num_clusters = blob->active.num_clusters; 1326 } 1327 1328 /* Do two passes - one to verify that we can obtain enough clusters 1329 * and another to actually claim them. 1330 */ 1331 1332 if (spdk_blob_is_thin_provisioned(blob) == false) { 1333 lfc = 0; 1334 for (i = num_clusters; i < sz; i++) { 1335 lfc = spdk_bit_array_find_first_clear(bs->used_clusters, lfc); 1336 if (lfc >= bs->total_clusters) { 1337 /* No more free clusters. Cannot satisfy the request */ 1338 return -ENOSPC; 1339 } 1340 lfc++; 1341 } 1342 } 1343 1344 if (sz > num_clusters) { 1345 /* Expand the cluster array if necessary. 1346 * We only shrink the array when persisting. 1347 */ 1348 tmp = realloc(blob->active.clusters, sizeof(uint64_t) * sz); 1349 if (sz > 0 && tmp == NULL) { 1350 return -ENOMEM; 1351 } 1352 memset(tmp + blob->active.cluster_array_size, 0, 1353 sizeof(uint64_t) * (sz - blob->active.cluster_array_size)); 1354 blob->active.clusters = tmp; 1355 blob->active.cluster_array_size = sz; 1356 } 1357 1358 blob->state = SPDK_BLOB_STATE_DIRTY; 1359 1360 if (spdk_blob_is_thin_provisioned(blob) == false) { 1361 lfc = 0; 1362 for (i = num_clusters; i < sz; i++) { 1363 _spdk_bs_allocate_cluster(blob, i, &lfc, true); 1364 lfc++; 1365 } 1366 } 1367 1368 blob->active.num_clusters = sz; 1369 1370 return 0; 1371 } 1372 1373 static void 1374 _spdk_blob_persist_start(struct spdk_blob_persist_ctx *ctx) 1375 { 1376 spdk_bs_sequence_t *seq = ctx->seq; 1377 struct spdk_blob *blob = ctx->blob; 1378 struct spdk_blob_store *bs = blob->bs; 1379 uint64_t i; 1380 uint32_t page_num; 1381 void *tmp; 1382 int rc; 1383 1384 if (blob->active.num_pages == 0) { 1385 /* This is the signal that the blob should be deleted. 1386 * Immediately jump to the clean up routine. */ 1387 assert(blob->clean.num_pages > 0); 1388 ctx->idx = blob->clean.num_pages - 1; 1389 blob->state = SPDK_BLOB_STATE_CLEAN; 1390 _spdk_blob_persist_zero_pages(seq, ctx, 0); 1391 return; 1392 1393 } 1394 1395 /* Generate the new metadata */ 1396 rc = _spdk_blob_serialize(blob, &ctx->pages, &blob->active.num_pages); 1397 if (rc < 0) { 1398 _spdk_blob_persist_complete(seq, ctx, rc); 1399 return; 1400 } 1401 1402 assert(blob->active.num_pages >= 1); 1403 1404 /* Resize the cache of page indices */ 1405 tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 1406 if (!tmp) { 1407 _spdk_blob_persist_complete(seq, ctx, -ENOMEM); 1408 return; 1409 } 1410 blob->active.pages = tmp; 1411 1412 /* Assign this metadata to pages. This requires two passes - 1413 * one to verify that there are enough pages and a second 1414 * to actually claim them. */ 1415 page_num = 0; 1416 /* Note that this loop starts at one. The first page location is fixed by the blobid. */ 1417 for (i = 1; i < blob->active.num_pages; i++) { 1418 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 1419 if (page_num >= spdk_bit_array_capacity(bs->used_md_pages)) { 1420 _spdk_blob_persist_complete(seq, ctx, -ENOMEM); 1421 return; 1422 } 1423 page_num++; 1424 } 1425 1426 page_num = 0; 1427 blob->active.pages[0] = _spdk_bs_blobid_to_page(blob->id); 1428 for (i = 1; i < blob->active.num_pages; i++) { 1429 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 1430 ctx->pages[i - 1].next = page_num; 1431 /* Now that previous metadata page is complete, calculate the crc for it. */ 1432 ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]); 1433 blob->active.pages[i] = page_num; 1434 spdk_bit_array_set(bs->used_md_pages, page_num); 1435 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming page %u for blob %lu\n", page_num, blob->id); 1436 page_num++; 1437 } 1438 ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]); 1439 /* Start writing the metadata from last page to first */ 1440 ctx->idx = blob->active.num_pages - 1; 1441 blob->state = SPDK_BLOB_STATE_CLEAN; 1442 _spdk_blob_persist_write_page_chain(seq, ctx, 0); 1443 } 1444 1445 static void 1446 _spdk_blob_persist_dirty_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1447 { 1448 struct spdk_blob_persist_ctx *ctx = cb_arg; 1449 1450 ctx->blob->bs->clean = 0; 1451 1452 spdk_dma_free(ctx->super); 1453 1454 _spdk_blob_persist_start(ctx); 1455 } 1456 1457 static void 1458 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 1459 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg); 1460 1461 1462 static void 1463 _spdk_blob_persist_dirty(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1464 { 1465 struct spdk_blob_persist_ctx *ctx = cb_arg; 1466 1467 ctx->super->clean = 0; 1468 1469 _spdk_bs_write_super(seq, ctx->blob->bs, ctx->super, _spdk_blob_persist_dirty_cpl, ctx); 1470 } 1471 1472 1473 /* Write a blob to disk */ 1474 static void 1475 _spdk_blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1476 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1477 { 1478 struct spdk_blob_persist_ctx *ctx; 1479 1480 _spdk_blob_verify_md_op(blob); 1481 1482 if (blob->state == SPDK_BLOB_STATE_CLEAN) { 1483 cb_fn(seq, cb_arg, 0); 1484 return; 1485 } 1486 1487 ctx = calloc(1, sizeof(*ctx)); 1488 if (!ctx) { 1489 cb_fn(seq, cb_arg, -ENOMEM); 1490 return; 1491 } 1492 ctx->blob = blob; 1493 ctx->seq = seq; 1494 ctx->cb_fn = cb_fn; 1495 ctx->cb_arg = cb_arg; 1496 1497 if (blob->bs->clean) { 1498 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 1499 if (!ctx->super) { 1500 cb_fn(seq, cb_arg, -ENOMEM); 1501 free(ctx); 1502 return; 1503 } 1504 1505 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(blob->bs, 0), 1506 _spdk_bs_byte_to_lba(blob->bs, sizeof(*ctx->super)), 1507 _spdk_blob_persist_dirty, ctx); 1508 } else { 1509 _spdk_blob_persist_start(ctx); 1510 } 1511 } 1512 1513 struct spdk_blob_copy_cluster_ctx { 1514 struct spdk_blob *blob; 1515 uint8_t *buf; 1516 uint64_t page; 1517 uint64_t new_cluster; 1518 spdk_bs_sequence_t *seq; 1519 }; 1520 1521 static void 1522 _spdk_blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno) 1523 { 1524 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 1525 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq; 1526 TAILQ_HEAD(, spdk_bs_request_set) requests; 1527 spdk_bs_user_op_t *op; 1528 1529 TAILQ_INIT(&requests); 1530 TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link); 1531 1532 while (!TAILQ_EMPTY(&requests)) { 1533 op = TAILQ_FIRST(&requests); 1534 TAILQ_REMOVE(&requests, op, link); 1535 if (bserrno == 0) { 1536 spdk_bs_user_op_execute(op); 1537 } else { 1538 spdk_bs_user_op_abort(op); 1539 } 1540 } 1541 1542 spdk_dma_free(ctx->buf); 1543 free(ctx); 1544 } 1545 1546 static void 1547 _spdk_blob_insert_cluster_cpl(void *cb_arg, int bserrno) 1548 { 1549 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 1550 1551 if (bserrno) { 1552 uint32_t cluster_number; 1553 1554 if (bserrno == -EEXIST) { 1555 /* The metadata insert failed because another thread 1556 * allocated the cluster first. Free our cluster 1557 * but continue without error. */ 1558 bserrno = 0; 1559 } 1560 1561 cluster_number = _spdk_bs_page_to_cluster(ctx->blob->bs, ctx->page); 1562 _spdk_bs_release_cluster(ctx->blob->bs, cluster_number); 1563 } 1564 1565 spdk_bs_sequence_finish(ctx->seq, bserrno); 1566 } 1567 1568 static void 1569 _spdk_blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1570 { 1571 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 1572 uint32_t cluster_number; 1573 1574 if (bserrno) { 1575 /* The write failed, so jump to the final completion handler */ 1576 spdk_bs_sequence_finish(seq, bserrno); 1577 return; 1578 } 1579 1580 cluster_number = _spdk_bs_page_to_cluster(ctx->blob->bs, ctx->page); 1581 1582 _spdk_blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 1583 _spdk_blob_insert_cluster_cpl, ctx); 1584 } 1585 1586 static void 1587 _spdk_blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1588 { 1589 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 1590 1591 if (bserrno != 0) { 1592 /* The read failed, so jump to the final completion handler */ 1593 spdk_bs_sequence_finish(seq, bserrno); 1594 return; 1595 } 1596 1597 /* Write whole cluster */ 1598 spdk_bs_sequence_write_dev(seq, ctx->buf, 1599 _spdk_bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 1600 _spdk_bs_cluster_to_lba(ctx->blob->bs, 1), 1601 _spdk_blob_write_copy_cpl, ctx); 1602 } 1603 1604 static void 1605 _spdk_bs_allocate_and_copy_cluster(struct spdk_blob *blob, 1606 struct spdk_io_channel *_ch, 1607 uint64_t offset, spdk_bs_user_op_t *op) 1608 { 1609 struct spdk_bs_cpl cpl; 1610 struct spdk_bs_channel *ch; 1611 struct spdk_blob_copy_cluster_ctx *ctx; 1612 uint32_t cluster_start_page; 1613 uint32_t cluster_number; 1614 int rc; 1615 1616 ch = spdk_io_channel_get_ctx(_ch); 1617 1618 if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) { 1619 /* There are already operations pending. Queue this user op 1620 * and return because it will be re-executed when the outstanding 1621 * cluster allocation completes. */ 1622 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 1623 return; 1624 } 1625 1626 /* Round the page offset down to the first page in the cluster */ 1627 cluster_start_page = _spdk_bs_page_to_cluster_start(blob, offset); 1628 1629 /* Calculate which index in the metadata cluster array the corresponding 1630 * cluster is supposed to be at. */ 1631 cluster_number = _spdk_bs_page_to_cluster(blob->bs, cluster_start_page); 1632 1633 ctx = calloc(1, sizeof(*ctx)); 1634 if (!ctx) { 1635 spdk_bs_user_op_abort(op); 1636 return; 1637 } 1638 1639 assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0); 1640 1641 ctx->blob = blob; 1642 ctx->page = cluster_start_page; 1643 1644 ctx->buf = spdk_dma_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, NULL); 1645 if (!ctx->buf) { 1646 SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n", 1647 blob->bs->cluster_sz); 1648 free(ctx); 1649 spdk_bs_user_op_abort(op); 1650 return; 1651 } 1652 1653 rc = _spdk_bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, false); 1654 if (rc != 0) { 1655 spdk_dma_free(ctx->buf); 1656 free(ctx); 1657 spdk_bs_user_op_abort(op); 1658 return; 1659 } 1660 1661 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 1662 cpl.u.blob_basic.cb_fn = _spdk_blob_allocate_and_copy_cluster_cpl; 1663 cpl.u.blob_basic.cb_arg = ctx; 1664 1665 ctx->seq = spdk_bs_sequence_start(_ch, &cpl); 1666 if (!ctx->seq) { 1667 _spdk_bs_release_cluster(blob->bs, ctx->new_cluster); 1668 spdk_dma_free(ctx->buf); 1669 free(ctx); 1670 spdk_bs_user_op_abort(op); 1671 return; 1672 } 1673 1674 /* Queue the user op to block other incoming operations */ 1675 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 1676 1677 /* Read cluster from backing device */ 1678 spdk_bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf, 1679 _spdk_bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 1680 _spdk_bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz), 1681 _spdk_blob_write_copy, ctx); 1682 } 1683 1684 static void 1685 _spdk_blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t page, uint64_t length, 1686 uint64_t *lba, uint32_t *lba_count) 1687 { 1688 *lba_count = _spdk_bs_page_to_lba(blob->bs, length); 1689 1690 if (!_spdk_bs_page_is_allocated(blob, page)) { 1691 assert(blob->back_bs_dev != NULL); 1692 *lba = _spdk_bs_dev_page_to_lba(blob->back_bs_dev, page); 1693 *lba_count = _spdk_bs_blob_lba_to_back_dev_lba(blob, *lba_count); 1694 } else { 1695 *lba = _spdk_bs_blob_page_to_lba(blob, page); 1696 } 1697 } 1698 1699 struct op_split_ctx { 1700 struct spdk_blob *blob; 1701 struct spdk_io_channel *channel; 1702 uint64_t page_offset; 1703 uint64_t pages_remaining; 1704 void *curr_payload; 1705 enum spdk_blob_op_type op_type; 1706 spdk_bs_sequence_t *seq; 1707 }; 1708 1709 static void 1710 _spdk_blob_request_submit_op_split_next(void *cb_arg, int bserrno) 1711 { 1712 struct op_split_ctx *ctx = cb_arg; 1713 struct spdk_blob *blob = ctx->blob; 1714 struct spdk_io_channel *ch = ctx->channel; 1715 enum spdk_blob_op_type op_type = ctx->op_type; 1716 uint8_t *buf = ctx->curr_payload; 1717 uint64_t offset = ctx->page_offset; 1718 uint64_t length = ctx->pages_remaining; 1719 uint64_t op_length; 1720 1721 if (bserrno != 0 || ctx->pages_remaining == 0) { 1722 spdk_bs_sequence_finish(ctx->seq, bserrno); 1723 free(ctx); 1724 return; 1725 } 1726 1727 op_length = spdk_min(length, _spdk_bs_num_pages_to_cluster_boundary(blob, offset)); 1728 1729 /* Update length and payload for next operation */ 1730 ctx->pages_remaining -= op_length; 1731 ctx->page_offset += op_length; 1732 if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) { 1733 ctx->curr_payload += (op_length * SPDK_BS_PAGE_SIZE); 1734 } 1735 1736 switch (op_type) { 1737 case SPDK_BLOB_READ: 1738 spdk_blob_io_read(blob, ch, buf, offset, op_length, 1739 _spdk_blob_request_submit_op_split_next, ctx); 1740 break; 1741 case SPDK_BLOB_WRITE: 1742 spdk_blob_io_write(blob, ch, buf, offset, op_length, 1743 _spdk_blob_request_submit_op_split_next, ctx); 1744 break; 1745 case SPDK_BLOB_UNMAP: 1746 spdk_blob_io_unmap(blob, ch, offset, op_length, 1747 _spdk_blob_request_submit_op_split_next, ctx); 1748 break; 1749 case SPDK_BLOB_WRITE_ZEROES: 1750 spdk_blob_io_write_zeroes(blob, ch, offset, op_length, 1751 _spdk_blob_request_submit_op_split_next, ctx); 1752 break; 1753 case SPDK_BLOB_READV: 1754 case SPDK_BLOB_WRITEV: 1755 SPDK_ERRLOG("readv/write not valid for %s\n", __func__); 1756 spdk_bs_sequence_finish(ctx->seq, -EINVAL); 1757 free(ctx); 1758 break; 1759 } 1760 } 1761 1762 static void 1763 _spdk_blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob, 1764 void *payload, uint64_t offset, uint64_t length, 1765 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 1766 { 1767 struct op_split_ctx *ctx; 1768 spdk_bs_sequence_t *seq; 1769 struct spdk_bs_cpl cpl; 1770 1771 assert(blob != NULL); 1772 1773 ctx = calloc(1, sizeof(struct op_split_ctx)); 1774 if (ctx == NULL) { 1775 cb_fn(cb_arg, -ENOMEM); 1776 return; 1777 } 1778 1779 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 1780 cpl.u.blob_basic.cb_fn = cb_fn; 1781 cpl.u.blob_basic.cb_arg = cb_arg; 1782 1783 seq = spdk_bs_sequence_start(ch, &cpl); 1784 if (!seq) { 1785 free(ctx); 1786 cb_fn(cb_arg, -ENOMEM); 1787 return; 1788 } 1789 1790 ctx->blob = blob; 1791 ctx->channel = ch; 1792 ctx->curr_payload = payload; 1793 ctx->page_offset = offset; 1794 ctx->pages_remaining = length; 1795 ctx->op_type = op_type; 1796 ctx->seq = seq; 1797 1798 _spdk_blob_request_submit_op_split_next(ctx, 0); 1799 } 1800 1801 static void 1802 _spdk_blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob, 1803 void *payload, uint64_t offset, uint64_t length, 1804 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 1805 { 1806 struct spdk_bs_cpl cpl; 1807 uint64_t lba; 1808 uint32_t lba_count; 1809 1810 assert(blob != NULL); 1811 1812 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 1813 cpl.u.blob_basic.cb_fn = cb_fn; 1814 cpl.u.blob_basic.cb_arg = cb_arg; 1815 1816 _spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 1817 1818 if (blob->frozen_refcnt) { 1819 /* This blob I/O is frozen */ 1820 spdk_bs_user_op_t *op; 1821 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 1822 1823 op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 1824 if (!op) { 1825 cb_fn(cb_arg, -ENOMEM); 1826 return; 1827 } 1828 1829 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 1830 1831 return; 1832 } 1833 1834 switch (op_type) { 1835 case SPDK_BLOB_READ: { 1836 spdk_bs_batch_t *batch; 1837 1838 batch = spdk_bs_batch_open(_ch, &cpl); 1839 if (!batch) { 1840 cb_fn(cb_arg, -ENOMEM); 1841 return; 1842 } 1843 1844 if (_spdk_bs_page_is_allocated(blob, offset)) { 1845 /* Read from the blob */ 1846 spdk_bs_batch_read_dev(batch, payload, lba, lba_count); 1847 } else { 1848 /* Read from the backing block device */ 1849 spdk_bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count); 1850 } 1851 1852 spdk_bs_batch_close(batch); 1853 break; 1854 } 1855 case SPDK_BLOB_WRITE: 1856 case SPDK_BLOB_WRITE_ZEROES: { 1857 if (_spdk_bs_page_is_allocated(blob, offset)) { 1858 /* Write to the blob */ 1859 spdk_bs_batch_t *batch; 1860 1861 if (lba_count == 0) { 1862 cb_fn(cb_arg, 0); 1863 return; 1864 } 1865 1866 batch = spdk_bs_batch_open(_ch, &cpl); 1867 if (!batch) { 1868 cb_fn(cb_arg, -ENOMEM); 1869 return; 1870 } 1871 1872 if (op_type == SPDK_BLOB_WRITE) { 1873 spdk_bs_batch_write_dev(batch, payload, lba, lba_count); 1874 } else { 1875 spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count); 1876 } 1877 1878 spdk_bs_batch_close(batch); 1879 } else { 1880 /* Queue this operation and allocate the cluster */ 1881 spdk_bs_user_op_t *op; 1882 1883 op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 1884 if (!op) { 1885 cb_fn(cb_arg, -ENOMEM); 1886 return; 1887 } 1888 1889 _spdk_bs_allocate_and_copy_cluster(blob, _ch, offset, op); 1890 } 1891 break; 1892 } 1893 case SPDK_BLOB_UNMAP: { 1894 spdk_bs_batch_t *batch; 1895 1896 batch = spdk_bs_batch_open(_ch, &cpl); 1897 if (!batch) { 1898 cb_fn(cb_arg, -ENOMEM); 1899 return; 1900 } 1901 1902 if (_spdk_bs_page_is_allocated(blob, offset)) { 1903 spdk_bs_batch_unmap_dev(batch, lba, lba_count); 1904 } 1905 1906 spdk_bs_batch_close(batch); 1907 break; 1908 } 1909 case SPDK_BLOB_READV: 1910 case SPDK_BLOB_WRITEV: 1911 SPDK_ERRLOG("readv/write not valid\n"); 1912 cb_fn(cb_arg, -EINVAL); 1913 break; 1914 } 1915 } 1916 1917 static void 1918 _spdk_blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel, 1919 void *payload, uint64_t offset, uint64_t length, 1920 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 1921 { 1922 assert(blob != NULL); 1923 1924 if (blob->data_ro && op_type != SPDK_BLOB_READ) { 1925 cb_fn(cb_arg, -EPERM); 1926 return; 1927 } 1928 1929 if (offset + length > blob->active.num_clusters * blob->bs->pages_per_cluster) { 1930 cb_fn(cb_arg, -EINVAL); 1931 return; 1932 } 1933 1934 if (length <= _spdk_bs_num_pages_to_cluster_boundary(blob, offset)) { 1935 _spdk_blob_request_submit_op_single(_channel, blob, payload, offset, length, 1936 cb_fn, cb_arg, op_type); 1937 } else { 1938 _spdk_blob_request_submit_op_split(_channel, blob, payload, offset, length, 1939 cb_fn, cb_arg, op_type); 1940 } 1941 } 1942 1943 struct rw_iov_ctx { 1944 struct spdk_blob *blob; 1945 struct spdk_io_channel *channel; 1946 spdk_blob_op_complete cb_fn; 1947 void *cb_arg; 1948 bool read; 1949 int iovcnt; 1950 struct iovec *orig_iov; 1951 uint64_t page_offset; 1952 uint64_t pages_remaining; 1953 uint64_t pages_done; 1954 struct iovec iov[0]; 1955 }; 1956 1957 static void 1958 _spdk_rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1959 { 1960 assert(cb_arg == NULL); 1961 spdk_bs_sequence_finish(seq, bserrno); 1962 } 1963 1964 static void 1965 _spdk_rw_iov_split_next(void *cb_arg, int bserrno) 1966 { 1967 struct rw_iov_ctx *ctx = cb_arg; 1968 struct spdk_blob *blob = ctx->blob; 1969 struct iovec *iov, *orig_iov; 1970 int iovcnt; 1971 size_t orig_iovoff; 1972 uint64_t page_count, pages_to_boundary, page_offset; 1973 uint64_t byte_count; 1974 1975 if (bserrno != 0 || ctx->pages_remaining == 0) { 1976 ctx->cb_fn(ctx->cb_arg, bserrno); 1977 free(ctx); 1978 return; 1979 } 1980 1981 page_offset = ctx->page_offset; 1982 pages_to_boundary = _spdk_bs_num_pages_to_cluster_boundary(blob, page_offset); 1983 page_count = spdk_min(ctx->pages_remaining, pages_to_boundary); 1984 1985 /* 1986 * Get index and offset into the original iov array for our current position in the I/O sequence. 1987 * byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will 1988 * point to the current position in the I/O sequence. 1989 */ 1990 byte_count = ctx->pages_done * sizeof(struct spdk_blob_md_page); 1991 orig_iov = &ctx->orig_iov[0]; 1992 orig_iovoff = 0; 1993 while (byte_count > 0) { 1994 if (byte_count >= orig_iov->iov_len) { 1995 byte_count -= orig_iov->iov_len; 1996 orig_iov++; 1997 } else { 1998 orig_iovoff = byte_count; 1999 byte_count = 0; 2000 } 2001 } 2002 2003 /* 2004 * Build an iov array for the next I/O in the sequence. byte_count will keep track of how many 2005 * bytes of this next I/O remain to be accounted for in the new iov array. 2006 */ 2007 byte_count = page_count * sizeof(struct spdk_blob_md_page); 2008 iov = &ctx->iov[0]; 2009 iovcnt = 0; 2010 while (byte_count > 0) { 2011 iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff); 2012 iov->iov_base = orig_iov->iov_base + orig_iovoff; 2013 byte_count -= iov->iov_len; 2014 orig_iovoff = 0; 2015 orig_iov++; 2016 iov++; 2017 iovcnt++; 2018 } 2019 2020 ctx->page_offset += page_count; 2021 ctx->pages_done += page_count; 2022 ctx->pages_remaining -= page_count; 2023 iov = &ctx->iov[0]; 2024 2025 if (ctx->read) { 2026 spdk_blob_io_readv(ctx->blob, ctx->channel, iov, iovcnt, page_offset, 2027 page_count, _spdk_rw_iov_split_next, ctx); 2028 } else { 2029 spdk_blob_io_writev(ctx->blob, ctx->channel, iov, iovcnt, page_offset, 2030 page_count, _spdk_rw_iov_split_next, ctx); 2031 } 2032 } 2033 2034 static void 2035 _spdk_blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel, 2036 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 2037 spdk_blob_op_complete cb_fn, void *cb_arg, bool read) 2038 { 2039 struct spdk_bs_cpl cpl; 2040 2041 assert(blob != NULL); 2042 2043 if (!read && blob->data_ro) { 2044 cb_fn(cb_arg, -EPERM); 2045 return; 2046 } 2047 2048 if (length == 0) { 2049 cb_fn(cb_arg, 0); 2050 return; 2051 } 2052 2053 if (offset + length > blob->active.num_clusters * blob->bs->pages_per_cluster) { 2054 cb_fn(cb_arg, -EINVAL); 2055 return; 2056 } 2057 2058 /* 2059 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having 2060 * to split a request that spans a cluster boundary. For I/O that do not span a cluster boundary, 2061 * there will be no noticeable difference compared to using a batch. For I/O that do span a cluster 2062 * boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need 2063 * to allocate a separate iov array and split the I/O such that none of the resulting 2064 * smaller I/O cross a cluster boundary. These smaller I/O will be issued in sequence (not in parallel) 2065 * but since this case happens very infrequently, any performance impact will be negligible. 2066 * 2067 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs 2068 * for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them 2069 * in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called 2070 * when the batch was completed, to allow for freeing the memory for the iov arrays. 2071 */ 2072 if (spdk_likely(length <= _spdk_bs_num_pages_to_cluster_boundary(blob, offset))) { 2073 uint32_t lba_count; 2074 uint64_t lba; 2075 2076 _spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 2077 2078 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2079 cpl.u.blob_basic.cb_fn = cb_fn; 2080 cpl.u.blob_basic.cb_arg = cb_arg; 2081 if (blob->frozen_refcnt) { 2082 /* This blob I/O is frozen */ 2083 spdk_bs_user_op_t *op; 2084 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel); 2085 2086 op = spdk_bs_user_op_alloc(_channel, &cpl, read, blob, iov, iovcnt, offset, length); 2087 if (!op) { 2088 cb_fn(cb_arg, -ENOMEM); 2089 return; 2090 } 2091 2092 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 2093 2094 return; 2095 } 2096 2097 if (read) { 2098 spdk_bs_sequence_t *seq; 2099 2100 seq = spdk_bs_sequence_start(_channel, &cpl); 2101 if (!seq) { 2102 cb_fn(cb_arg, -ENOMEM); 2103 return; 2104 } 2105 2106 if (_spdk_bs_page_is_allocated(blob, offset)) { 2107 spdk_bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL); 2108 } else { 2109 spdk_bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count, 2110 _spdk_rw_iov_done, NULL); 2111 } 2112 } else { 2113 if (_spdk_bs_page_is_allocated(blob, offset)) { 2114 spdk_bs_sequence_t *seq; 2115 2116 seq = spdk_bs_sequence_start(_channel, &cpl); 2117 if (!seq) { 2118 cb_fn(cb_arg, -ENOMEM); 2119 return; 2120 } 2121 2122 spdk_bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL); 2123 } else { 2124 /* Queue this operation and allocate the cluster */ 2125 spdk_bs_user_op_t *op; 2126 2127 op = spdk_bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, length); 2128 if (!op) { 2129 cb_fn(cb_arg, -ENOMEM); 2130 return; 2131 } 2132 2133 _spdk_bs_allocate_and_copy_cluster(blob, _channel, offset, op); 2134 } 2135 } 2136 } else { 2137 struct rw_iov_ctx *ctx; 2138 2139 ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec)); 2140 if (ctx == NULL) { 2141 cb_fn(cb_arg, -ENOMEM); 2142 return; 2143 } 2144 2145 ctx->blob = blob; 2146 ctx->channel = _channel; 2147 ctx->cb_fn = cb_fn; 2148 ctx->cb_arg = cb_arg; 2149 ctx->read = read; 2150 ctx->orig_iov = iov; 2151 ctx->iovcnt = iovcnt; 2152 ctx->page_offset = offset; 2153 ctx->pages_remaining = length; 2154 ctx->pages_done = 0; 2155 2156 _spdk_rw_iov_split_next(ctx, 0); 2157 } 2158 } 2159 2160 static struct spdk_blob * 2161 _spdk_blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid) 2162 { 2163 struct spdk_blob *blob; 2164 2165 TAILQ_FOREACH(blob, &bs->blobs, link) { 2166 if (blob->id == blobid) { 2167 return blob; 2168 } 2169 } 2170 2171 return NULL; 2172 } 2173 2174 static int 2175 _spdk_bs_channel_create(void *io_device, void *ctx_buf) 2176 { 2177 struct spdk_blob_store *bs = io_device; 2178 struct spdk_bs_channel *channel = ctx_buf; 2179 struct spdk_bs_dev *dev; 2180 uint32_t max_ops = bs->max_channel_ops; 2181 uint32_t i; 2182 2183 dev = bs->dev; 2184 2185 channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set)); 2186 if (!channel->req_mem) { 2187 return -1; 2188 } 2189 2190 TAILQ_INIT(&channel->reqs); 2191 2192 for (i = 0; i < max_ops; i++) { 2193 TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link); 2194 } 2195 2196 channel->bs = bs; 2197 channel->dev = dev; 2198 channel->dev_channel = dev->create_channel(dev); 2199 2200 if (!channel->dev_channel) { 2201 SPDK_ERRLOG("Failed to create device channel.\n"); 2202 free(channel->req_mem); 2203 return -1; 2204 } 2205 2206 TAILQ_INIT(&channel->need_cluster_alloc); 2207 TAILQ_INIT(&channel->queued_io); 2208 2209 return 0; 2210 } 2211 2212 static void 2213 _spdk_bs_channel_destroy(void *io_device, void *ctx_buf) 2214 { 2215 struct spdk_bs_channel *channel = ctx_buf; 2216 spdk_bs_user_op_t *op; 2217 2218 while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) { 2219 op = TAILQ_FIRST(&channel->need_cluster_alloc); 2220 TAILQ_REMOVE(&channel->need_cluster_alloc, op, link); 2221 spdk_bs_user_op_abort(op); 2222 } 2223 2224 while (!TAILQ_EMPTY(&channel->queued_io)) { 2225 op = TAILQ_FIRST(&channel->queued_io); 2226 TAILQ_REMOVE(&channel->queued_io, op, link); 2227 spdk_bs_user_op_abort(op); 2228 } 2229 2230 free(channel->req_mem); 2231 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 2232 } 2233 2234 static void 2235 _spdk_bs_dev_destroy(void *io_device) 2236 { 2237 struct spdk_blob_store *bs = io_device; 2238 struct spdk_blob *blob, *blob_tmp; 2239 2240 bs->dev->destroy(bs->dev); 2241 2242 TAILQ_FOREACH_SAFE(blob, &bs->blobs, link, blob_tmp) { 2243 TAILQ_REMOVE(&bs->blobs, blob, link); 2244 _spdk_blob_free(blob); 2245 } 2246 2247 pthread_mutex_destroy(&bs->used_clusters_mutex); 2248 2249 spdk_bit_array_free(&bs->used_blobids); 2250 spdk_bit_array_free(&bs->used_md_pages); 2251 spdk_bit_array_free(&bs->used_clusters); 2252 /* 2253 * If this function is called for any reason except a successful unload, 2254 * the unload_cpl type will be NONE and this will be a nop. 2255 */ 2256 spdk_bs_call_cpl(&bs->unload_cpl, bs->unload_err); 2257 2258 free(bs); 2259 } 2260 2261 static int 2262 _spdk_bs_blob_list_add(struct spdk_blob *blob) 2263 { 2264 spdk_blob_id snapshot_id; 2265 struct spdk_blob_list *snapshot_entry = NULL; 2266 struct spdk_blob_list *clone_entry = NULL; 2267 2268 assert(blob != NULL); 2269 2270 snapshot_id = blob->parent_id; 2271 if (snapshot_id == SPDK_BLOBID_INVALID) { 2272 return 0; 2273 } 2274 2275 TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) { 2276 if (snapshot_entry->id == snapshot_id) { 2277 break; 2278 } 2279 } 2280 2281 if (snapshot_entry == NULL) { 2282 /* Snapshot not found */ 2283 snapshot_entry = calloc(1, sizeof(struct spdk_blob_list)); 2284 if (snapshot_entry == NULL) { 2285 return -ENOMEM; 2286 } 2287 snapshot_entry->id = snapshot_id; 2288 TAILQ_INIT(&snapshot_entry->clones); 2289 TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link); 2290 } else { 2291 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 2292 if (clone_entry->id == blob->id) { 2293 break; 2294 } 2295 } 2296 } 2297 2298 if (clone_entry == NULL) { 2299 /* Clone not found */ 2300 clone_entry = calloc(1, sizeof(struct spdk_blob_list)); 2301 if (clone_entry == NULL) { 2302 return -ENOMEM; 2303 } 2304 clone_entry->id = blob->id; 2305 TAILQ_INIT(&clone_entry->clones); 2306 TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link); 2307 snapshot_entry->clone_count++; 2308 } 2309 2310 return 0; 2311 } 2312 2313 static int 2314 _spdk_bs_blob_list_remove(struct spdk_blob *blob) 2315 { 2316 struct spdk_blob_list *snapshot_entry = NULL; 2317 struct spdk_blob_list *clone_entry = NULL; 2318 spdk_blob_id snapshot_id; 2319 2320 assert(blob != NULL); 2321 2322 snapshot_id = blob->parent_id; 2323 if (snapshot_id == SPDK_BLOBID_INVALID) { 2324 return 0; 2325 } 2326 2327 TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) { 2328 if (snapshot_entry->id == snapshot_id) { 2329 break; 2330 } 2331 } 2332 2333 assert(snapshot_entry != NULL); 2334 2335 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 2336 if (clone_entry->id == blob->id) { 2337 break; 2338 } 2339 } 2340 2341 assert(clone_entry != NULL); 2342 2343 blob->parent_id = SPDK_BLOBID_INVALID; 2344 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 2345 free(clone_entry); 2346 2347 snapshot_entry->clone_count--; 2348 if (snapshot_entry->clone_count == 0) { 2349 /* Snapshot have no more clones */ 2350 TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link); 2351 free(snapshot_entry); 2352 } 2353 2354 return 0; 2355 } 2356 2357 static int 2358 _spdk_bs_blob_list_free(struct spdk_blob_store *bs) 2359 { 2360 struct spdk_blob_list *snapshot_entry; 2361 struct spdk_blob_list *snapshot_entry_tmp; 2362 struct spdk_blob_list *clone_entry; 2363 struct spdk_blob_list *clone_entry_tmp; 2364 2365 TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) { 2366 TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) { 2367 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 2368 free(clone_entry); 2369 } 2370 TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link); 2371 free(snapshot_entry); 2372 } 2373 2374 return 0; 2375 } 2376 2377 static void 2378 _spdk_bs_free(struct spdk_blob_store *bs) 2379 { 2380 _spdk_bs_blob_list_free(bs); 2381 2382 spdk_bs_unregister_md_thread(bs); 2383 spdk_io_device_unregister(bs, _spdk_bs_dev_destroy); 2384 } 2385 2386 void 2387 spdk_bs_opts_init(struct spdk_bs_opts *opts) 2388 { 2389 opts->cluster_sz = SPDK_BLOB_OPTS_CLUSTER_SZ; 2390 opts->num_md_pages = SPDK_BLOB_OPTS_NUM_MD_PAGES; 2391 opts->max_md_ops = SPDK_BLOB_OPTS_MAX_MD_OPS; 2392 opts->max_channel_ops = SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS; 2393 memset(&opts->bstype, 0, sizeof(opts->bstype)); 2394 opts->iter_cb_fn = NULL; 2395 opts->iter_cb_arg = NULL; 2396 } 2397 2398 static int 2399 _spdk_bs_opts_verify(struct spdk_bs_opts *opts) 2400 { 2401 if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 || 2402 opts->max_channel_ops == 0) { 2403 SPDK_ERRLOG("Blobstore options cannot be set to 0\n"); 2404 return -1; 2405 } 2406 2407 return 0; 2408 } 2409 2410 static struct spdk_blob_store * 2411 _spdk_bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts) 2412 { 2413 struct spdk_blob_store *bs; 2414 uint64_t dev_size; 2415 int rc; 2416 2417 dev_size = dev->blocklen * dev->blockcnt; 2418 if (dev_size < opts->cluster_sz) { 2419 /* Device size cannot be smaller than cluster size of blobstore */ 2420 SPDK_ERRLOG("Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n", 2421 dev_size, opts->cluster_sz); 2422 return NULL; 2423 } 2424 if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) { 2425 /* Cluster size cannot be smaller than page size */ 2426 SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n", 2427 opts->cluster_sz, SPDK_BS_PAGE_SIZE); 2428 return NULL; 2429 } 2430 bs = calloc(1, sizeof(struct spdk_blob_store)); 2431 if (!bs) { 2432 return NULL; 2433 } 2434 2435 TAILQ_INIT(&bs->blobs); 2436 TAILQ_INIT(&bs->snapshots); 2437 bs->dev = dev; 2438 bs->md_thread = spdk_get_thread(); 2439 assert(bs->md_thread != NULL); 2440 2441 /* 2442 * Do not use _spdk_bs_lba_to_cluster() here since blockcnt may not be an 2443 * even multiple of the cluster size. 2444 */ 2445 bs->cluster_sz = opts->cluster_sz; 2446 bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen); 2447 bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE; 2448 bs->num_free_clusters = bs->total_clusters; 2449 bs->used_clusters = spdk_bit_array_create(bs->total_clusters); 2450 if (bs->used_clusters == NULL) { 2451 free(bs); 2452 return NULL; 2453 } 2454 2455 bs->max_channel_ops = opts->max_channel_ops; 2456 bs->super_blob = SPDK_BLOBID_INVALID; 2457 memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype)); 2458 2459 /* The metadata is assumed to be at least 1 page */ 2460 bs->used_md_pages = spdk_bit_array_create(1); 2461 bs->used_blobids = spdk_bit_array_create(0); 2462 2463 pthread_mutex_init(&bs->used_clusters_mutex, NULL); 2464 2465 spdk_io_device_register(bs, _spdk_bs_channel_create, _spdk_bs_channel_destroy, 2466 sizeof(struct spdk_bs_channel)); 2467 rc = spdk_bs_register_md_thread(bs); 2468 if (rc == -1) { 2469 spdk_io_device_unregister(bs, NULL); 2470 pthread_mutex_destroy(&bs->used_clusters_mutex); 2471 spdk_bit_array_free(&bs->used_blobids); 2472 spdk_bit_array_free(&bs->used_md_pages); 2473 spdk_bit_array_free(&bs->used_clusters); 2474 free(bs); 2475 return NULL; 2476 } 2477 2478 return bs; 2479 } 2480 2481 /* START spdk_bs_load, spdk_bs_load_ctx will used for both load and unload. */ 2482 2483 struct spdk_bs_load_ctx { 2484 struct spdk_blob_store *bs; 2485 struct spdk_bs_super_block *super; 2486 2487 struct spdk_bs_md_mask *mask; 2488 bool in_page_chain; 2489 uint32_t page_index; 2490 uint32_t cur_page; 2491 struct spdk_blob_md_page *page; 2492 bool is_load; 2493 2494 spdk_bs_sequence_t *seq; 2495 spdk_blob_op_with_handle_complete iter_cb_fn; 2496 void *iter_cb_arg; 2497 }; 2498 2499 static void 2500 _spdk_bs_load_ctx_fail(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 2501 { 2502 assert(bserrno != 0); 2503 2504 spdk_dma_free(ctx->super); 2505 spdk_bs_sequence_finish(seq, bserrno); 2506 /* 2507 * Only free the blobstore when a load fails. If an unload fails (for some reason) 2508 * we want to keep the blobstore in case the caller wants to try again. 2509 */ 2510 if (ctx->is_load) { 2511 _spdk_bs_free(ctx->bs); 2512 } 2513 free(ctx); 2514 } 2515 2516 static void 2517 _spdk_bs_set_mask(struct spdk_bit_array *array, struct spdk_bs_md_mask *mask) 2518 { 2519 uint32_t i = 0; 2520 2521 while (true) { 2522 i = spdk_bit_array_find_first_set(array, i); 2523 if (i >= mask->length) { 2524 break; 2525 } 2526 mask->mask[i / 8] |= 1U << (i % 8); 2527 i++; 2528 } 2529 } 2530 2531 static void 2532 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2533 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2534 { 2535 /* Update the values in the super block */ 2536 super->super_blob = bs->super_blob; 2537 memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype)); 2538 super->crc = _spdk_blob_md_page_calc_crc(super); 2539 spdk_bs_sequence_write_dev(seq, super, _spdk_bs_page_to_lba(bs, 0), 2540 _spdk_bs_byte_to_lba(bs, sizeof(*super)), 2541 cb_fn, cb_arg); 2542 } 2543 2544 static void 2545 _spdk_bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 2546 { 2547 struct spdk_bs_load_ctx *ctx = arg; 2548 uint64_t mask_size, lba, lba_count; 2549 2550 /* Write out the used clusters mask */ 2551 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 2552 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2553 if (!ctx->mask) { 2554 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2555 return; 2556 } 2557 2558 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS; 2559 ctx->mask->length = ctx->bs->total_clusters; 2560 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_clusters)); 2561 2562 _spdk_bs_set_mask(ctx->bs->used_clusters, ctx->mask); 2563 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 2564 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 2565 spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 2566 } 2567 2568 static void 2569 _spdk_bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 2570 { 2571 struct spdk_bs_load_ctx *ctx = arg; 2572 uint64_t mask_size, lba, lba_count; 2573 2574 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 2575 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2576 if (!ctx->mask) { 2577 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2578 return; 2579 } 2580 2581 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES; 2582 ctx->mask->length = ctx->super->md_len; 2583 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages)); 2584 2585 _spdk_bs_set_mask(ctx->bs->used_md_pages, ctx->mask); 2586 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 2587 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 2588 spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 2589 } 2590 2591 static void 2592 _spdk_bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 2593 { 2594 struct spdk_bs_load_ctx *ctx = arg; 2595 uint64_t mask_size, lba, lba_count; 2596 2597 if (ctx->super->used_blobid_mask_len == 0) { 2598 /* 2599 * This is a pre-v3 on-disk format where the blobid mask does not get 2600 * written to disk. 2601 */ 2602 cb_fn(seq, arg, 0); 2603 return; 2604 } 2605 2606 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 2607 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2608 if (!ctx->mask) { 2609 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2610 return; 2611 } 2612 2613 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS; 2614 ctx->mask->length = ctx->super->md_len; 2615 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids)); 2616 2617 _spdk_bs_set_mask(ctx->bs->used_blobids, ctx->mask); 2618 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 2619 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 2620 spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 2621 } 2622 2623 static void 2624 _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno) 2625 { 2626 struct spdk_bs_load_ctx *ctx = arg; 2627 2628 if (bserrno == 0) { 2629 if (ctx->iter_cb_fn) { 2630 ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0); 2631 } 2632 _spdk_bs_blob_list_add(blob); 2633 spdk_bs_iter_next(ctx->bs, blob, _spdk_bs_load_iter, ctx); 2634 return; 2635 } 2636 2637 if (bserrno == -ENOENT) { 2638 bserrno = 0; 2639 } else { 2640 /* 2641 * This case needs to be looked at further. Same problem 2642 * exists with applications that rely on explicit blob 2643 * iteration. We should just skip the blob that failed 2644 * to load and coontinue on to the next one. 2645 */ 2646 SPDK_ERRLOG("Error in iterating blobs\n"); 2647 } 2648 2649 ctx->iter_cb_fn = NULL; 2650 2651 spdk_dma_free(ctx->super); 2652 spdk_dma_free(ctx->mask); 2653 spdk_bs_sequence_finish(ctx->seq, bserrno); 2654 free(ctx); 2655 } 2656 2657 static void 2658 _spdk_bs_load_complete(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 2659 { 2660 ctx->seq = seq; 2661 spdk_bs_iter_first(ctx->bs, _spdk_bs_load_iter, ctx); 2662 } 2663 2664 static void 2665 _spdk_bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2666 { 2667 struct spdk_bs_load_ctx *ctx = cb_arg; 2668 uint32_t i, j; 2669 int rc; 2670 2671 /* The type must be correct */ 2672 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS); 2673 2674 /* The length of the mask (in bits) must not be greater than 2675 * the length of the buffer (converted to bits) */ 2676 assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8)); 2677 2678 /* The length of the mask must be exactly equal to the size 2679 * (in pages) of the metadata region */ 2680 assert(ctx->mask->length == ctx->super->md_len); 2681 2682 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length); 2683 if (rc < 0) { 2684 spdk_dma_free(ctx->mask); 2685 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2686 return; 2687 } 2688 2689 for (i = 0; i < ctx->mask->length / 8; i++) { 2690 uint8_t segment = ctx->mask->mask[i]; 2691 for (j = 0; segment; j++) { 2692 if (segment & 1U) { 2693 spdk_bit_array_set(ctx->bs->used_blobids, (i * 8) + j); 2694 } 2695 segment >>= 1U; 2696 } 2697 } 2698 2699 _spdk_bs_load_complete(seq, ctx, bserrno); 2700 } 2701 2702 static void 2703 _spdk_bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2704 { 2705 struct spdk_bs_load_ctx *ctx = cb_arg; 2706 uint64_t lba, lba_count, mask_size; 2707 uint32_t i, j; 2708 int rc; 2709 2710 /* The type must be correct */ 2711 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 2712 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 2713 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 2714 struct spdk_blob_md_page) * 8)); 2715 /* The length of the mask must be exactly equal to the total number of clusters */ 2716 assert(ctx->mask->length == ctx->bs->total_clusters); 2717 2718 rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters); 2719 if (rc < 0) { 2720 spdk_dma_free(ctx->mask); 2721 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2722 return; 2723 } 2724 2725 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 2726 for (i = 0; i < ctx->mask->length / 8; i++) { 2727 uint8_t segment = ctx->mask->mask[i]; 2728 for (j = 0; segment && (j < 8); j++) { 2729 if (segment & 1U) { 2730 spdk_bit_array_set(ctx->bs->used_clusters, (i * 8) + j); 2731 assert(ctx->bs->num_free_clusters > 0); 2732 ctx->bs->num_free_clusters--; 2733 } 2734 segment >>= 1U; 2735 } 2736 } 2737 2738 spdk_dma_free(ctx->mask); 2739 2740 /* Read the used blobids mask */ 2741 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 2742 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2743 if (!ctx->mask) { 2744 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2745 return; 2746 } 2747 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 2748 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 2749 spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 2750 _spdk_bs_load_used_blobids_cpl, ctx); 2751 } 2752 2753 static void 2754 _spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2755 { 2756 struct spdk_bs_load_ctx *ctx = cb_arg; 2757 uint64_t lba, lba_count, mask_size; 2758 uint32_t i, j; 2759 int rc; 2760 2761 /* The type must be correct */ 2762 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES); 2763 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 2764 assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE * 2765 8)); 2766 /* The length of the mask must be exactly equal to the size (in pages) of the metadata region */ 2767 assert(ctx->mask->length == ctx->super->md_len); 2768 2769 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length); 2770 if (rc < 0) { 2771 spdk_dma_free(ctx->mask); 2772 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2773 return; 2774 } 2775 2776 for (i = 0; i < ctx->mask->length / 8; i++) { 2777 uint8_t segment = ctx->mask->mask[i]; 2778 for (j = 0; segment && (j < 8); j++) { 2779 if (segment & 1U) { 2780 spdk_bit_array_set(ctx->bs->used_md_pages, (i * 8) + j); 2781 } 2782 segment >>= 1U; 2783 } 2784 } 2785 spdk_dma_free(ctx->mask); 2786 2787 /* Read the used clusters mask */ 2788 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 2789 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2790 if (!ctx->mask) { 2791 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2792 return; 2793 } 2794 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 2795 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 2796 spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 2797 _spdk_bs_load_used_clusters_cpl, ctx); 2798 } 2799 2800 static void 2801 _spdk_bs_load_read_used_pages(spdk_bs_sequence_t *seq, void *cb_arg) 2802 { 2803 struct spdk_bs_load_ctx *ctx = cb_arg; 2804 uint64_t lba, lba_count, mask_size; 2805 2806 /* Read the used pages mask */ 2807 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 2808 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2809 if (!ctx->mask) { 2810 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2811 return; 2812 } 2813 2814 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 2815 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 2816 spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 2817 _spdk_bs_load_used_pages_cpl, ctx); 2818 } 2819 2820 static int 2821 _spdk_bs_load_replay_md_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob_store *bs) 2822 { 2823 struct spdk_blob_md_descriptor *desc; 2824 size_t cur_desc = 0; 2825 2826 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 2827 while (cur_desc < sizeof(page->descriptors)) { 2828 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 2829 if (desc->length == 0) { 2830 /* If padding and length are 0, this terminates the page */ 2831 break; 2832 } 2833 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) { 2834 struct spdk_blob_md_descriptor_extent *desc_extent; 2835 unsigned int i, j; 2836 unsigned int cluster_count = 0; 2837 uint32_t cluster_idx; 2838 2839 desc_extent = (struct spdk_blob_md_descriptor_extent *)desc; 2840 2841 for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) { 2842 for (j = 0; j < desc_extent->extents[i].length; j++) { 2843 cluster_idx = desc_extent->extents[i].cluster_idx; 2844 /* 2845 * cluster_idx = 0 means an unallocated cluster - don't mark that 2846 * in the used cluster map. 2847 */ 2848 if (cluster_idx != 0) { 2849 spdk_bit_array_set(bs->used_clusters, cluster_idx + j); 2850 if (bs->num_free_clusters == 0) { 2851 return -ENOSPC; 2852 } 2853 bs->num_free_clusters--; 2854 } 2855 cluster_count++; 2856 } 2857 } 2858 if (cluster_count == 0) { 2859 return -EINVAL; 2860 } 2861 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 2862 /* Skip this item */ 2863 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 2864 /* Skip this item */ 2865 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 2866 /* Skip this item */ 2867 } else { 2868 /* Error */ 2869 return -EINVAL; 2870 } 2871 /* Advance to the next descriptor */ 2872 cur_desc += sizeof(*desc) + desc->length; 2873 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 2874 break; 2875 } 2876 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 2877 } 2878 return 0; 2879 } 2880 2881 static bool _spdk_bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx) 2882 { 2883 uint32_t crc; 2884 2885 crc = _spdk_blob_md_page_calc_crc(ctx->page); 2886 if (crc != ctx->page->crc) { 2887 return false; 2888 } 2889 2890 if (_spdk_bs_page_to_blobid(ctx->cur_page) != ctx->page->id) { 2891 return false; 2892 } 2893 return true; 2894 } 2895 2896 static void 2897 _spdk_bs_load_replay_cur_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 2898 2899 static void 2900 _spdk_bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2901 { 2902 struct spdk_bs_load_ctx *ctx = cb_arg; 2903 2904 _spdk_bs_load_complete(seq, ctx, bserrno); 2905 } 2906 2907 static void 2908 _spdk_bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2909 { 2910 struct spdk_bs_load_ctx *ctx = cb_arg; 2911 2912 spdk_dma_free(ctx->mask); 2913 ctx->mask = NULL; 2914 2915 _spdk_bs_write_used_clusters(seq, cb_arg, _spdk_bs_load_write_used_clusters_cpl); 2916 } 2917 2918 static void 2919 _spdk_bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2920 { 2921 struct spdk_bs_load_ctx *ctx = cb_arg; 2922 2923 spdk_dma_free(ctx->mask); 2924 ctx->mask = NULL; 2925 2926 _spdk_bs_write_used_blobids(seq, cb_arg, _spdk_bs_load_write_used_blobids_cpl); 2927 } 2928 2929 static void 2930 _spdk_bs_load_write_used_md(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2931 { 2932 _spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_load_write_used_pages_cpl); 2933 } 2934 2935 static void 2936 _spdk_bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2937 { 2938 struct spdk_bs_load_ctx *ctx = cb_arg; 2939 uint64_t num_md_clusters; 2940 uint64_t i; 2941 uint32_t page_num; 2942 2943 if (bserrno != 0) { 2944 _spdk_bs_load_ctx_fail(seq, ctx, bserrno); 2945 return; 2946 } 2947 2948 page_num = ctx->cur_page; 2949 if (_spdk_bs_load_cur_md_page_valid(ctx) == true) { 2950 if (ctx->page->sequence_num == 0 || ctx->in_page_chain == true) { 2951 spdk_bit_array_set(ctx->bs->used_md_pages, page_num); 2952 if (ctx->page->sequence_num == 0) { 2953 spdk_bit_array_set(ctx->bs->used_blobids, page_num); 2954 } 2955 if (_spdk_bs_load_replay_md_parse_page(ctx->page, ctx->bs)) { 2956 _spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ); 2957 return; 2958 } 2959 if (ctx->page->next != SPDK_INVALID_MD_PAGE) { 2960 ctx->in_page_chain = true; 2961 ctx->cur_page = ctx->page->next; 2962 _spdk_bs_load_replay_cur_md_page(seq, cb_arg); 2963 return; 2964 } 2965 } 2966 } 2967 2968 ctx->in_page_chain = false; 2969 2970 do { 2971 ctx->page_index++; 2972 } while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true); 2973 2974 if (ctx->page_index < ctx->super->md_len) { 2975 ctx->cur_page = ctx->page_index; 2976 _spdk_bs_load_replay_cur_md_page(seq, cb_arg); 2977 } else { 2978 /* Claim all of the clusters used by the metadata */ 2979 num_md_clusters = divide_round_up(ctx->super->md_len, ctx->bs->pages_per_cluster); 2980 for (i = 0; i < num_md_clusters; i++) { 2981 _spdk_bs_claim_cluster(ctx->bs, i); 2982 } 2983 spdk_dma_free(ctx->page); 2984 _spdk_bs_load_write_used_md(seq, ctx, bserrno); 2985 } 2986 } 2987 2988 static void 2989 _spdk_bs_load_replay_cur_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 2990 { 2991 struct spdk_bs_load_ctx *ctx = cb_arg; 2992 uint64_t lba; 2993 2994 assert(ctx->cur_page < ctx->super->md_len); 2995 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 2996 spdk_bs_sequence_read_dev(seq, ctx->page, lba, 2997 _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 2998 _spdk_bs_load_replay_md_cpl, ctx); 2999 } 3000 3001 static void 3002 _spdk_bs_load_replay_md(spdk_bs_sequence_t *seq, void *cb_arg) 3003 { 3004 struct spdk_bs_load_ctx *ctx = cb_arg; 3005 3006 ctx->page_index = 0; 3007 ctx->cur_page = 0; 3008 ctx->page = spdk_dma_zmalloc(SPDK_BS_PAGE_SIZE, 3009 SPDK_BS_PAGE_SIZE, 3010 NULL); 3011 if (!ctx->page) { 3012 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 3013 return; 3014 } 3015 _spdk_bs_load_replay_cur_md_page(seq, cb_arg); 3016 } 3017 3018 static void 3019 _spdk_bs_recover(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3020 { 3021 struct spdk_bs_load_ctx *ctx = cb_arg; 3022 int rc; 3023 3024 if (bserrno != 0) { 3025 _spdk_bs_load_ctx_fail(seq, ctx, -EIO); 3026 return; 3027 } 3028 3029 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len); 3030 if (rc < 0) { 3031 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 3032 return; 3033 } 3034 3035 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len); 3036 if (rc < 0) { 3037 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 3038 return; 3039 } 3040 3041 rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters); 3042 if (rc < 0) { 3043 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 3044 return; 3045 } 3046 3047 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 3048 _spdk_bs_load_replay_md(seq, cb_arg); 3049 } 3050 3051 static void 3052 _spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3053 { 3054 struct spdk_bs_load_ctx *ctx = cb_arg; 3055 uint32_t crc; 3056 static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH]; 3057 3058 if (ctx->super->version > SPDK_BS_VERSION || 3059 ctx->super->version < SPDK_BS_INITIAL_VERSION) { 3060 _spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ); 3061 return; 3062 } 3063 3064 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 3065 sizeof(ctx->super->signature)) != 0) { 3066 _spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ); 3067 return; 3068 } 3069 3070 crc = _spdk_blob_md_page_calc_crc(ctx->super); 3071 if (crc != ctx->super->crc) { 3072 _spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ); 3073 return; 3074 } 3075 3076 if (memcmp(&ctx->bs->bstype, &ctx->super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 3077 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype matched - loading blobstore\n"); 3078 } else if (memcmp(&ctx->bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 3079 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype wildcard used - loading blobstore regardless bstype\n"); 3080 } else { 3081 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Unexpected bstype\n"); 3082 SPDK_TRACEDUMP(SPDK_LOG_BLOB, "Expected:", ctx->bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 3083 SPDK_TRACEDUMP(SPDK_LOG_BLOB, "Found:", ctx->super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 3084 _spdk_bs_load_ctx_fail(seq, ctx, -ENXIO); 3085 return; 3086 } 3087 3088 /* Parse the super block */ 3089 ctx->bs->clean = 1; 3090 ctx->bs->cluster_sz = ctx->super->cluster_size; 3091 ctx->bs->total_clusters = ctx->bs->dev->blockcnt / (ctx->bs->cluster_sz / ctx->bs->dev->blocklen); 3092 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 3093 ctx->bs->md_start = ctx->super->md_start; 3094 ctx->bs->md_len = ctx->super->md_len; 3095 ctx->bs->total_data_clusters = ctx->bs->total_clusters - divide_round_up( 3096 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 3097 ctx->bs->super_blob = ctx->super->super_blob; 3098 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 3099 3100 if (ctx->super->clean == 0) { 3101 _spdk_bs_recover(seq, ctx, 0); 3102 } else if (ctx->super->used_blobid_mask_len == 0) { 3103 /* 3104 * Metadata is clean, but this is an old metadata format without 3105 * a blobid mask. Clear the clean bit and then build the masks 3106 * using _spdk_bs_recover. 3107 */ 3108 ctx->super->clean = 0; 3109 ctx->bs->clean = 0; 3110 _spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_recover, ctx); 3111 } else { 3112 _spdk_bs_load_read_used_pages(seq, ctx); 3113 } 3114 } 3115 3116 void 3117 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 3118 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 3119 { 3120 struct spdk_blob_store *bs; 3121 struct spdk_bs_cpl cpl; 3122 spdk_bs_sequence_t *seq; 3123 struct spdk_bs_load_ctx *ctx; 3124 struct spdk_bs_opts opts = {}; 3125 3126 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Loading blobstore from dev %p\n", dev); 3127 3128 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 3129 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "unsupported dev block length of %d\n", dev->blocklen); 3130 dev->destroy(dev); 3131 cb_fn(cb_arg, NULL, -EINVAL); 3132 return; 3133 } 3134 3135 if (o) { 3136 opts = *o; 3137 } else { 3138 spdk_bs_opts_init(&opts); 3139 } 3140 3141 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 3142 dev->destroy(dev); 3143 cb_fn(cb_arg, NULL, -EINVAL); 3144 return; 3145 } 3146 3147 bs = _spdk_bs_alloc(dev, &opts); 3148 if (!bs) { 3149 dev->destroy(dev); 3150 cb_fn(cb_arg, NULL, -ENOMEM); 3151 return; 3152 } 3153 3154 ctx = calloc(1, sizeof(*ctx)); 3155 if (!ctx) { 3156 _spdk_bs_free(bs); 3157 cb_fn(cb_arg, NULL, -ENOMEM); 3158 return; 3159 } 3160 3161 ctx->bs = bs; 3162 ctx->is_load = true; 3163 ctx->iter_cb_fn = opts.iter_cb_fn; 3164 ctx->iter_cb_arg = opts.iter_cb_arg; 3165 3166 /* Allocate memory for the super block */ 3167 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3168 if (!ctx->super) { 3169 free(ctx); 3170 _spdk_bs_free(bs); 3171 cb_fn(cb_arg, NULL, -ENOMEM); 3172 return; 3173 } 3174 3175 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 3176 cpl.u.bs_handle.cb_fn = cb_fn; 3177 cpl.u.bs_handle.cb_arg = cb_arg; 3178 cpl.u.bs_handle.bs = bs; 3179 3180 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3181 if (!seq) { 3182 spdk_dma_free(ctx->super); 3183 free(ctx); 3184 _spdk_bs_free(bs); 3185 cb_fn(cb_arg, NULL, -ENOMEM); 3186 return; 3187 } 3188 3189 /* Read the super block */ 3190 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 3191 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 3192 _spdk_bs_load_super_cpl, ctx); 3193 } 3194 3195 /* END spdk_bs_load */ 3196 3197 /* START spdk_bs_dump */ 3198 3199 struct spdk_bs_dump_ctx { 3200 struct spdk_blob_store *bs; 3201 struct spdk_bs_super_block *super; 3202 uint32_t cur_page; 3203 struct spdk_blob_md_page *page; 3204 spdk_bs_sequence_t *seq; 3205 FILE *fp; 3206 spdk_bs_dump_print_xattr print_xattr_fn; 3207 char xattr_name[4096]; 3208 }; 3209 3210 static void 3211 _spdk_bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_dump_ctx *ctx, int bserrno) 3212 { 3213 spdk_dma_free(ctx->super); 3214 3215 /* 3216 * We need to defer calling spdk_bs_call_cpl() until after 3217 * dev destuction, so tuck these away for later use. 3218 */ 3219 ctx->bs->unload_err = bserrno; 3220 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 3221 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 3222 3223 spdk_bs_sequence_finish(seq, 0); 3224 _spdk_bs_free(ctx->bs); 3225 free(ctx); 3226 } 3227 3228 static void _spdk_bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 3229 3230 static void 3231 _spdk_bs_dump_print_md_page(struct spdk_bs_dump_ctx *ctx) 3232 { 3233 uint32_t page_idx = ctx->cur_page; 3234 struct spdk_blob_md_page *page = ctx->page; 3235 struct spdk_blob_md_descriptor *desc; 3236 size_t cur_desc = 0; 3237 uint32_t crc; 3238 3239 fprintf(ctx->fp, "=========\n"); 3240 fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx); 3241 fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id); 3242 3243 crc = _spdk_blob_md_page_calc_crc(page); 3244 fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch"); 3245 3246 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 3247 while (cur_desc < sizeof(page->descriptors)) { 3248 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 3249 if (desc->length == 0) { 3250 /* If padding and length are 0, this terminates the page */ 3251 break; 3252 } 3253 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) { 3254 struct spdk_blob_md_descriptor_extent *desc_extent; 3255 unsigned int i; 3256 3257 desc_extent = (struct spdk_blob_md_descriptor_extent *)desc; 3258 3259 for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) { 3260 if (desc_extent->extents[i].cluster_idx != 0) { 3261 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 3262 desc_extent->extents[i].cluster_idx); 3263 } else { 3264 fprintf(ctx->fp, "Unallocated Extent - "); 3265 } 3266 fprintf(ctx->fp, " Length: %" PRIu32, desc_extent->extents[i].length); 3267 fprintf(ctx->fp, "\n"); 3268 } 3269 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 3270 struct spdk_blob_md_descriptor_xattr *desc_xattr; 3271 uint32_t i; 3272 3273 desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc; 3274 3275 if (desc_xattr->length != 3276 sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) + 3277 desc_xattr->name_length + desc_xattr->value_length) { 3278 } 3279 3280 memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length); 3281 ctx->xattr_name[desc_xattr->name_length] = '\0'; 3282 fprintf(ctx->fp, "XATTR: name = \"%s\"\n", ctx->xattr_name); 3283 fprintf(ctx->fp, " value = \""); 3284 ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name, 3285 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 3286 desc_xattr->value_length); 3287 fprintf(ctx->fp, "\"\n"); 3288 for (i = 0; i < desc_xattr->value_length; i++) { 3289 if (i % 16 == 0) { 3290 fprintf(ctx->fp, " "); 3291 } 3292 fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i)); 3293 if ((i + 1) % 16 == 0) { 3294 fprintf(ctx->fp, "\n"); 3295 } 3296 } 3297 if (i % 16 != 0) { 3298 fprintf(ctx->fp, "\n"); 3299 } 3300 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 3301 /* TODO */ 3302 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 3303 /* TODO */ 3304 } else { 3305 /* Error */ 3306 } 3307 /* Advance to the next descriptor */ 3308 cur_desc += sizeof(*desc) + desc->length; 3309 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 3310 break; 3311 } 3312 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 3313 } 3314 } 3315 3316 static void 3317 _spdk_bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3318 { 3319 struct spdk_bs_dump_ctx *ctx = cb_arg; 3320 3321 if (bserrno != 0) { 3322 _spdk_bs_dump_finish(seq, ctx, bserrno); 3323 return; 3324 } 3325 3326 if (ctx->page->id != 0) { 3327 _spdk_bs_dump_print_md_page(ctx); 3328 } 3329 3330 ctx->cur_page++; 3331 3332 if (ctx->cur_page < ctx->super->md_len) { 3333 _spdk_bs_dump_read_md_page(seq, cb_arg); 3334 } else { 3335 spdk_dma_free(ctx->page); 3336 _spdk_bs_dump_finish(seq, ctx, 0); 3337 } 3338 } 3339 3340 static void 3341 _spdk_bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 3342 { 3343 struct spdk_bs_dump_ctx *ctx = cb_arg; 3344 uint64_t lba; 3345 3346 assert(ctx->cur_page < ctx->super->md_len); 3347 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 3348 spdk_bs_sequence_read_dev(seq, ctx->page, lba, 3349 _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 3350 _spdk_bs_dump_read_md_page_cpl, ctx); 3351 } 3352 3353 static void 3354 _spdk_bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3355 { 3356 struct spdk_bs_dump_ctx *ctx = cb_arg; 3357 3358 fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature); 3359 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 3360 sizeof(ctx->super->signature)) != 0) { 3361 fprintf(ctx->fp, "(Mismatch)\n"); 3362 _spdk_bs_dump_finish(seq, ctx, bserrno); 3363 return; 3364 } else { 3365 fprintf(ctx->fp, "(OK)\n"); 3366 } 3367 fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version); 3368 fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc, 3369 (ctx->super->crc == _spdk_blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch"); 3370 fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype); 3371 fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size); 3372 fprintf(ctx->fp, "Super Blob ID: "); 3373 if (ctx->super->super_blob == SPDK_BLOBID_INVALID) { 3374 fprintf(ctx->fp, "(None)\n"); 3375 } else { 3376 fprintf(ctx->fp, "%" PRIu64 "\n", ctx->super->super_blob); 3377 } 3378 fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean); 3379 fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start); 3380 fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len); 3381 fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start); 3382 fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len); 3383 fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start); 3384 fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len); 3385 fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start); 3386 fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len); 3387 3388 ctx->cur_page = 0; 3389 ctx->page = spdk_dma_zmalloc(SPDK_BS_PAGE_SIZE, 3390 SPDK_BS_PAGE_SIZE, 3391 NULL); 3392 if (!ctx->page) { 3393 _spdk_bs_dump_finish(seq, ctx, -ENOMEM); 3394 return; 3395 } 3396 _spdk_bs_dump_read_md_page(seq, cb_arg); 3397 } 3398 3399 void 3400 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn, 3401 spdk_bs_op_complete cb_fn, void *cb_arg) 3402 { 3403 struct spdk_blob_store *bs; 3404 struct spdk_bs_cpl cpl; 3405 spdk_bs_sequence_t *seq; 3406 struct spdk_bs_dump_ctx *ctx; 3407 struct spdk_bs_opts opts = {}; 3408 3409 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Dumping blobstore from dev %p\n", dev); 3410 3411 spdk_bs_opts_init(&opts); 3412 3413 bs = _spdk_bs_alloc(dev, &opts); 3414 if (!bs) { 3415 dev->destroy(dev); 3416 cb_fn(cb_arg, -ENOMEM); 3417 return; 3418 } 3419 3420 ctx = calloc(1, sizeof(*ctx)); 3421 if (!ctx) { 3422 _spdk_bs_free(bs); 3423 cb_fn(cb_arg, -ENOMEM); 3424 return; 3425 } 3426 3427 ctx->bs = bs; 3428 ctx->fp = fp; 3429 ctx->print_xattr_fn = print_xattr_fn; 3430 3431 /* Allocate memory for the super block */ 3432 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3433 if (!ctx->super) { 3434 free(ctx); 3435 _spdk_bs_free(bs); 3436 cb_fn(cb_arg, -ENOMEM); 3437 return; 3438 } 3439 3440 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 3441 cpl.u.bs_basic.cb_fn = cb_fn; 3442 cpl.u.bs_basic.cb_arg = cb_arg; 3443 3444 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3445 if (!seq) { 3446 spdk_dma_free(ctx->super); 3447 free(ctx); 3448 _spdk_bs_free(bs); 3449 cb_fn(cb_arg, -ENOMEM); 3450 return; 3451 } 3452 3453 /* Read the super block */ 3454 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 3455 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 3456 _spdk_bs_dump_super_cpl, ctx); 3457 } 3458 3459 /* END spdk_bs_dump */ 3460 3461 /* START spdk_bs_init */ 3462 3463 struct spdk_bs_init_ctx { 3464 struct spdk_blob_store *bs; 3465 struct spdk_bs_super_block *super; 3466 }; 3467 3468 static void 3469 _spdk_bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3470 { 3471 struct spdk_bs_init_ctx *ctx = cb_arg; 3472 3473 spdk_dma_free(ctx->super); 3474 free(ctx); 3475 3476 spdk_bs_sequence_finish(seq, bserrno); 3477 } 3478 3479 static void 3480 _spdk_bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3481 { 3482 struct spdk_bs_init_ctx *ctx = cb_arg; 3483 3484 /* Write super block */ 3485 spdk_bs_sequence_write_dev(seq, ctx->super, _spdk_bs_page_to_lba(ctx->bs, 0), 3486 _spdk_bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 3487 _spdk_bs_init_persist_super_cpl, ctx); 3488 } 3489 3490 void 3491 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 3492 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 3493 { 3494 struct spdk_bs_init_ctx *ctx; 3495 struct spdk_blob_store *bs; 3496 struct spdk_bs_cpl cpl; 3497 spdk_bs_sequence_t *seq; 3498 spdk_bs_batch_t *batch; 3499 uint64_t num_md_lba; 3500 uint64_t num_md_pages; 3501 uint64_t num_md_clusters; 3502 uint32_t i; 3503 struct spdk_bs_opts opts = {}; 3504 int rc; 3505 3506 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Initializing blobstore on dev %p\n", dev); 3507 3508 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 3509 SPDK_ERRLOG("unsupported dev block length of %d\n", 3510 dev->blocklen); 3511 dev->destroy(dev); 3512 cb_fn(cb_arg, NULL, -EINVAL); 3513 return; 3514 } 3515 3516 if (o) { 3517 opts = *o; 3518 } else { 3519 spdk_bs_opts_init(&opts); 3520 } 3521 3522 if (_spdk_bs_opts_verify(&opts) != 0) { 3523 dev->destroy(dev); 3524 cb_fn(cb_arg, NULL, -EINVAL); 3525 return; 3526 } 3527 3528 bs = _spdk_bs_alloc(dev, &opts); 3529 if (!bs) { 3530 dev->destroy(dev); 3531 cb_fn(cb_arg, NULL, -ENOMEM); 3532 return; 3533 } 3534 3535 if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) { 3536 /* By default, allocate 1 page per cluster. 3537 * Technically, this over-allocates metadata 3538 * because more metadata will reduce the number 3539 * of usable clusters. This can be addressed with 3540 * more complex math in the future. 3541 */ 3542 bs->md_len = bs->total_clusters; 3543 } else { 3544 bs->md_len = opts.num_md_pages; 3545 } 3546 3547 rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len); 3548 if (rc < 0) { 3549 _spdk_bs_free(bs); 3550 cb_fn(cb_arg, NULL, -ENOMEM); 3551 return; 3552 } 3553 3554 rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len); 3555 if (rc < 0) { 3556 _spdk_bs_free(bs); 3557 cb_fn(cb_arg, NULL, -ENOMEM); 3558 return; 3559 } 3560 3561 ctx = calloc(1, sizeof(*ctx)); 3562 if (!ctx) { 3563 _spdk_bs_free(bs); 3564 cb_fn(cb_arg, NULL, -ENOMEM); 3565 return; 3566 } 3567 3568 ctx->bs = bs; 3569 3570 /* Allocate memory for the super block */ 3571 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3572 if (!ctx->super) { 3573 free(ctx); 3574 _spdk_bs_free(bs); 3575 cb_fn(cb_arg, NULL, -ENOMEM); 3576 return; 3577 } 3578 memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 3579 sizeof(ctx->super->signature)); 3580 ctx->super->version = SPDK_BS_VERSION; 3581 ctx->super->length = sizeof(*ctx->super); 3582 ctx->super->super_blob = bs->super_blob; 3583 ctx->super->clean = 0; 3584 ctx->super->cluster_size = bs->cluster_sz; 3585 memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype)); 3586 3587 /* Calculate how many pages the metadata consumes at the front 3588 * of the disk. 3589 */ 3590 3591 /* The super block uses 1 page */ 3592 num_md_pages = 1; 3593 3594 /* The used_md_pages mask requires 1 bit per metadata page, rounded 3595 * up to the nearest page, plus a header. 3596 */ 3597 ctx->super->used_page_mask_start = num_md_pages; 3598 ctx->super->used_page_mask_len = divide_round_up(sizeof(struct spdk_bs_md_mask) + 3599 divide_round_up(bs->md_len, 8), 3600 SPDK_BS_PAGE_SIZE); 3601 num_md_pages += ctx->super->used_page_mask_len; 3602 3603 /* The used_clusters mask requires 1 bit per cluster, rounded 3604 * up to the nearest page, plus a header. 3605 */ 3606 ctx->super->used_cluster_mask_start = num_md_pages; 3607 ctx->super->used_cluster_mask_len = divide_round_up(sizeof(struct spdk_bs_md_mask) + 3608 divide_round_up(bs->total_clusters, 8), 3609 SPDK_BS_PAGE_SIZE); 3610 num_md_pages += ctx->super->used_cluster_mask_len; 3611 3612 /* The used_blobids mask requires 1 bit per metadata page, rounded 3613 * up to the nearest page, plus a header. 3614 */ 3615 ctx->super->used_blobid_mask_start = num_md_pages; 3616 ctx->super->used_blobid_mask_len = divide_round_up(sizeof(struct spdk_bs_md_mask) + 3617 divide_round_up(bs->md_len, 8), 3618 SPDK_BS_PAGE_SIZE); 3619 num_md_pages += ctx->super->used_blobid_mask_len; 3620 3621 /* The metadata region size was chosen above */ 3622 ctx->super->md_start = bs->md_start = num_md_pages; 3623 ctx->super->md_len = bs->md_len; 3624 num_md_pages += bs->md_len; 3625 3626 num_md_lba = _spdk_bs_page_to_lba(bs, num_md_pages); 3627 3628 ctx->super->crc = _spdk_blob_md_page_calc_crc(ctx->super); 3629 3630 num_md_clusters = divide_round_up(num_md_pages, bs->pages_per_cluster); 3631 if (num_md_clusters > bs->total_clusters) { 3632 SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, " 3633 "please decrease number of pages reserved for metadata " 3634 "or increase cluster size.\n"); 3635 spdk_dma_free(ctx->super); 3636 free(ctx); 3637 _spdk_bs_free(bs); 3638 cb_fn(cb_arg, NULL, -ENOMEM); 3639 return; 3640 } 3641 /* Claim all of the clusters used by the metadata */ 3642 for (i = 0; i < num_md_clusters; i++) { 3643 _spdk_bs_claim_cluster(bs, i); 3644 } 3645 3646 bs->total_data_clusters = bs->num_free_clusters; 3647 3648 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 3649 cpl.u.bs_handle.cb_fn = cb_fn; 3650 cpl.u.bs_handle.cb_arg = cb_arg; 3651 cpl.u.bs_handle.bs = bs; 3652 3653 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3654 if (!seq) { 3655 spdk_dma_free(ctx->super); 3656 free(ctx); 3657 _spdk_bs_free(bs); 3658 cb_fn(cb_arg, NULL, -ENOMEM); 3659 return; 3660 } 3661 3662 batch = spdk_bs_sequence_to_batch(seq, _spdk_bs_init_trim_cpl, ctx); 3663 3664 /* Clear metadata space */ 3665 spdk_bs_batch_write_zeroes_dev(batch, 0, num_md_lba); 3666 /* Trim data clusters */ 3667 spdk_bs_batch_unmap_dev(batch, num_md_lba, ctx->bs->dev->blockcnt - num_md_lba); 3668 3669 spdk_bs_batch_close(batch); 3670 } 3671 3672 /* END spdk_bs_init */ 3673 3674 /* START spdk_bs_destroy */ 3675 3676 static void 3677 _spdk_bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3678 { 3679 struct spdk_bs_init_ctx *ctx = cb_arg; 3680 struct spdk_blob_store *bs = ctx->bs; 3681 3682 /* 3683 * We need to defer calling spdk_bs_call_cpl() until after 3684 * dev destruction, so tuck these away for later use. 3685 */ 3686 bs->unload_err = bserrno; 3687 memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 3688 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 3689 3690 spdk_bs_sequence_finish(seq, bserrno); 3691 3692 _spdk_bs_free(bs); 3693 free(ctx); 3694 } 3695 3696 void 3697 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, 3698 void *cb_arg) 3699 { 3700 struct spdk_bs_cpl cpl; 3701 spdk_bs_sequence_t *seq; 3702 struct spdk_bs_init_ctx *ctx; 3703 3704 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Destroying blobstore\n"); 3705 3706 if (!TAILQ_EMPTY(&bs->blobs)) { 3707 SPDK_ERRLOG("Blobstore still has open blobs\n"); 3708 cb_fn(cb_arg, -EBUSY); 3709 return; 3710 } 3711 3712 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 3713 cpl.u.bs_basic.cb_fn = cb_fn; 3714 cpl.u.bs_basic.cb_arg = cb_arg; 3715 3716 ctx = calloc(1, sizeof(*ctx)); 3717 if (!ctx) { 3718 cb_fn(cb_arg, -ENOMEM); 3719 return; 3720 } 3721 3722 ctx->bs = bs; 3723 3724 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3725 if (!seq) { 3726 free(ctx); 3727 cb_fn(cb_arg, -ENOMEM); 3728 return; 3729 } 3730 3731 /* Write zeroes to the super block */ 3732 spdk_bs_sequence_write_zeroes_dev(seq, 3733 _spdk_bs_page_to_lba(bs, 0), 3734 _spdk_bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)), 3735 _spdk_bs_destroy_trim_cpl, ctx); 3736 } 3737 3738 /* END spdk_bs_destroy */ 3739 3740 /* START spdk_bs_unload */ 3741 3742 static void 3743 _spdk_bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3744 { 3745 struct spdk_bs_load_ctx *ctx = cb_arg; 3746 3747 spdk_dma_free(ctx->super); 3748 3749 /* 3750 * We need to defer calling spdk_bs_call_cpl() until after 3751 * dev destuction, so tuck these away for later use. 3752 */ 3753 ctx->bs->unload_err = bserrno; 3754 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 3755 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 3756 3757 spdk_bs_sequence_finish(seq, bserrno); 3758 3759 _spdk_bs_free(ctx->bs); 3760 free(ctx); 3761 } 3762 3763 static void 3764 _spdk_bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3765 { 3766 struct spdk_bs_load_ctx *ctx = cb_arg; 3767 3768 spdk_dma_free(ctx->mask); 3769 ctx->super->clean = 1; 3770 3771 _spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_unload_write_super_cpl, ctx); 3772 } 3773 3774 static void 3775 _spdk_bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3776 { 3777 struct spdk_bs_load_ctx *ctx = cb_arg; 3778 3779 spdk_dma_free(ctx->mask); 3780 ctx->mask = NULL; 3781 3782 _spdk_bs_write_used_clusters(seq, cb_arg, _spdk_bs_unload_write_used_clusters_cpl); 3783 } 3784 3785 static void 3786 _spdk_bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3787 { 3788 struct spdk_bs_load_ctx *ctx = cb_arg; 3789 3790 spdk_dma_free(ctx->mask); 3791 ctx->mask = NULL; 3792 3793 _spdk_bs_write_used_blobids(seq, cb_arg, _spdk_bs_unload_write_used_blobids_cpl); 3794 } 3795 3796 static void 3797 _spdk_bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3798 { 3799 _spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_unload_write_used_pages_cpl); 3800 } 3801 3802 void 3803 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) 3804 { 3805 struct spdk_bs_cpl cpl; 3806 spdk_bs_sequence_t *seq; 3807 struct spdk_bs_load_ctx *ctx; 3808 3809 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blobstore\n"); 3810 3811 if (!TAILQ_EMPTY(&bs->blobs)) { 3812 SPDK_ERRLOG("Blobstore still has open blobs\n"); 3813 cb_fn(cb_arg, -EBUSY); 3814 return; 3815 } 3816 3817 ctx = calloc(1, sizeof(*ctx)); 3818 if (!ctx) { 3819 cb_fn(cb_arg, -ENOMEM); 3820 return; 3821 } 3822 3823 ctx->bs = bs; 3824 ctx->is_load = false; 3825 3826 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3827 if (!ctx->super) { 3828 free(ctx); 3829 cb_fn(cb_arg, -ENOMEM); 3830 return; 3831 } 3832 3833 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 3834 cpl.u.bs_basic.cb_fn = cb_fn; 3835 cpl.u.bs_basic.cb_arg = cb_arg; 3836 3837 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3838 if (!seq) { 3839 spdk_dma_free(ctx->super); 3840 free(ctx); 3841 cb_fn(cb_arg, -ENOMEM); 3842 return; 3843 } 3844 3845 /* Read super block */ 3846 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 3847 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 3848 _spdk_bs_unload_read_super_cpl, ctx); 3849 } 3850 3851 /* END spdk_bs_unload */ 3852 3853 /* START spdk_bs_set_super */ 3854 3855 struct spdk_bs_set_super_ctx { 3856 struct spdk_blob_store *bs; 3857 struct spdk_bs_super_block *super; 3858 }; 3859 3860 static void 3861 _spdk_bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3862 { 3863 struct spdk_bs_set_super_ctx *ctx = cb_arg; 3864 3865 if (bserrno != 0) { 3866 SPDK_ERRLOG("Unable to write to super block of blobstore\n"); 3867 } 3868 3869 spdk_dma_free(ctx->super); 3870 3871 spdk_bs_sequence_finish(seq, bserrno); 3872 3873 free(ctx); 3874 } 3875 3876 static void 3877 _spdk_bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3878 { 3879 struct spdk_bs_set_super_ctx *ctx = cb_arg; 3880 3881 if (bserrno != 0) { 3882 SPDK_ERRLOG("Unable to read super block of blobstore\n"); 3883 spdk_dma_free(ctx->super); 3884 spdk_bs_sequence_finish(seq, bserrno); 3885 free(ctx); 3886 return; 3887 } 3888 3889 _spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_set_super_write_cpl, ctx); 3890 } 3891 3892 void 3893 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid, 3894 spdk_bs_op_complete cb_fn, void *cb_arg) 3895 { 3896 struct spdk_bs_cpl cpl; 3897 spdk_bs_sequence_t *seq; 3898 struct spdk_bs_set_super_ctx *ctx; 3899 3900 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Setting super blob id on blobstore\n"); 3901 3902 ctx = calloc(1, sizeof(*ctx)); 3903 if (!ctx) { 3904 cb_fn(cb_arg, -ENOMEM); 3905 return; 3906 } 3907 3908 ctx->bs = bs; 3909 3910 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3911 if (!ctx->super) { 3912 free(ctx); 3913 cb_fn(cb_arg, -ENOMEM); 3914 return; 3915 } 3916 3917 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 3918 cpl.u.bs_basic.cb_fn = cb_fn; 3919 cpl.u.bs_basic.cb_arg = cb_arg; 3920 3921 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3922 if (!seq) { 3923 spdk_dma_free(ctx->super); 3924 free(ctx); 3925 cb_fn(cb_arg, -ENOMEM); 3926 return; 3927 } 3928 3929 bs->super_blob = blobid; 3930 3931 /* Read super block */ 3932 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 3933 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 3934 _spdk_bs_set_super_read_cpl, ctx); 3935 } 3936 3937 /* END spdk_bs_set_super */ 3938 3939 void 3940 spdk_bs_get_super(struct spdk_blob_store *bs, 3941 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 3942 { 3943 if (bs->super_blob == SPDK_BLOBID_INVALID) { 3944 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT); 3945 } else { 3946 cb_fn(cb_arg, bs->super_blob, 0); 3947 } 3948 } 3949 3950 uint64_t 3951 spdk_bs_get_cluster_size(struct spdk_blob_store *bs) 3952 { 3953 return bs->cluster_sz; 3954 } 3955 3956 uint64_t 3957 spdk_bs_get_page_size(struct spdk_blob_store *bs) 3958 { 3959 return SPDK_BS_PAGE_SIZE; 3960 } 3961 3962 uint64_t 3963 spdk_bs_free_cluster_count(struct spdk_blob_store *bs) 3964 { 3965 return bs->num_free_clusters; 3966 } 3967 3968 uint64_t 3969 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs) 3970 { 3971 return bs->total_data_clusters; 3972 } 3973 3974 static int 3975 spdk_bs_register_md_thread(struct spdk_blob_store *bs) 3976 { 3977 bs->md_channel = spdk_get_io_channel(bs); 3978 if (!bs->md_channel) { 3979 SPDK_ERRLOG("Failed to get IO channel.\n"); 3980 return -1; 3981 } 3982 3983 return 0; 3984 } 3985 3986 static int 3987 spdk_bs_unregister_md_thread(struct spdk_blob_store *bs) 3988 { 3989 spdk_put_io_channel(bs->md_channel); 3990 3991 return 0; 3992 } 3993 3994 spdk_blob_id spdk_blob_get_id(struct spdk_blob *blob) 3995 { 3996 assert(blob != NULL); 3997 3998 return blob->id; 3999 } 4000 4001 uint64_t spdk_blob_get_num_pages(struct spdk_blob *blob) 4002 { 4003 assert(blob != NULL); 4004 4005 return _spdk_bs_cluster_to_page(blob->bs, blob->active.num_clusters); 4006 } 4007 4008 uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob) 4009 { 4010 assert(blob != NULL); 4011 4012 return blob->active.num_clusters; 4013 } 4014 4015 /* START spdk_bs_create_blob */ 4016 4017 static void 4018 _spdk_bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4019 { 4020 struct spdk_blob *blob = cb_arg; 4021 4022 _spdk_blob_free(blob); 4023 4024 spdk_bs_sequence_finish(seq, bserrno); 4025 } 4026 4027 static int 4028 _spdk_blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs, 4029 bool internal) 4030 { 4031 uint64_t i; 4032 size_t value_len = 0; 4033 int rc; 4034 const void *value = NULL; 4035 if (xattrs->count > 0 && xattrs->get_value == NULL) { 4036 return -EINVAL; 4037 } 4038 for (i = 0; i < xattrs->count; i++) { 4039 xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len); 4040 if (value == NULL || value_len == 0) { 4041 return -EINVAL; 4042 } 4043 rc = _spdk_blob_set_xattr(blob, xattrs->names[i], value, value_len, internal); 4044 if (rc < 0) { 4045 return rc; 4046 } 4047 } 4048 return 0; 4049 } 4050 4051 static void 4052 _spdk_blob_set_thin_provision(struct spdk_blob *blob) 4053 { 4054 _spdk_blob_verify_md_op(blob); 4055 blob->invalid_flags |= SPDK_BLOB_THIN_PROV; 4056 blob->state = SPDK_BLOB_STATE_DIRTY; 4057 } 4058 4059 static void 4060 _spdk_bs_create_blob(struct spdk_blob_store *bs, 4061 const struct spdk_blob_opts *opts, 4062 const struct spdk_blob_xattr_opts *internal_xattrs, 4063 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4064 { 4065 struct spdk_blob *blob; 4066 uint32_t page_idx; 4067 struct spdk_bs_cpl cpl; 4068 struct spdk_blob_opts opts_default; 4069 struct spdk_blob_xattr_opts internal_xattrs_default; 4070 spdk_bs_sequence_t *seq; 4071 spdk_blob_id id; 4072 int rc; 4073 4074 assert(spdk_get_thread() == bs->md_thread); 4075 4076 page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 4077 if (page_idx >= spdk_bit_array_capacity(bs->used_md_pages)) { 4078 cb_fn(cb_arg, 0, -ENOMEM); 4079 return; 4080 } 4081 spdk_bit_array_set(bs->used_blobids, page_idx); 4082 spdk_bit_array_set(bs->used_md_pages, page_idx); 4083 4084 id = _spdk_bs_page_to_blobid(page_idx); 4085 4086 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Creating blob with id %lu at page %u\n", id, page_idx); 4087 4088 blob = _spdk_blob_alloc(bs, id); 4089 if (!blob) { 4090 cb_fn(cb_arg, 0, -ENOMEM); 4091 return; 4092 } 4093 4094 if (!opts) { 4095 spdk_blob_opts_init(&opts_default); 4096 opts = &opts_default; 4097 } 4098 if (!internal_xattrs) { 4099 _spdk_blob_xattrs_init(&internal_xattrs_default); 4100 internal_xattrs = &internal_xattrs_default; 4101 } 4102 4103 rc = _spdk_blob_set_xattrs(blob, &opts->xattrs, false); 4104 if (rc < 0) { 4105 _spdk_blob_free(blob); 4106 cb_fn(cb_arg, 0, rc); 4107 return; 4108 } 4109 4110 rc = _spdk_blob_set_xattrs(blob, internal_xattrs, true); 4111 if (rc < 0) { 4112 _spdk_blob_free(blob); 4113 cb_fn(cb_arg, 0, rc); 4114 return; 4115 } 4116 4117 if (opts->thin_provision) { 4118 _spdk_blob_set_thin_provision(blob); 4119 } 4120 4121 rc = _spdk_blob_resize(blob, opts->num_clusters); 4122 if (rc < 0) { 4123 _spdk_blob_free(blob); 4124 cb_fn(cb_arg, 0, rc); 4125 return; 4126 } 4127 cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 4128 cpl.u.blobid.cb_fn = cb_fn; 4129 cpl.u.blobid.cb_arg = cb_arg; 4130 cpl.u.blobid.blobid = blob->id; 4131 4132 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 4133 if (!seq) { 4134 _spdk_blob_free(blob); 4135 cb_fn(cb_arg, 0, -ENOMEM); 4136 return; 4137 } 4138 4139 _spdk_blob_persist(seq, blob, _spdk_bs_create_blob_cpl, blob); 4140 } 4141 4142 void spdk_bs_create_blob(struct spdk_blob_store *bs, 4143 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4144 { 4145 _spdk_bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg); 4146 } 4147 4148 void spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts, 4149 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4150 { 4151 _spdk_bs_create_blob(bs, opts, NULL, cb_fn, cb_arg); 4152 } 4153 4154 /* END spdk_bs_create_blob */ 4155 4156 /* START blob_cleanup */ 4157 4158 struct spdk_clone_snapshot_ctx { 4159 struct spdk_bs_cpl cpl; 4160 int bserrno; 4161 bool frozen; 4162 4163 struct spdk_io_channel *channel; 4164 4165 /* Current cluster for inflate operation */ 4166 uint64_t cluster; 4167 4168 struct { 4169 spdk_blob_id id; 4170 struct spdk_blob *blob; 4171 } original; 4172 struct { 4173 spdk_blob_id id; 4174 struct spdk_blob *blob; 4175 } new; 4176 4177 /* xattrs specified for snapshot/clones only. They have no impact on 4178 * the original blobs xattrs. */ 4179 const struct spdk_blob_xattr_opts *xattrs; 4180 }; 4181 4182 static void 4183 _spdk_bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno) 4184 { 4185 struct spdk_clone_snapshot_ctx *ctx = cb_arg; 4186 struct spdk_bs_cpl *cpl = &ctx->cpl; 4187 4188 if (bserrno != 0) { 4189 if (ctx->bserrno != 0) { 4190 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 4191 } else { 4192 ctx->bserrno = bserrno; 4193 } 4194 } 4195 4196 switch (cpl->type) { 4197 case SPDK_BS_CPL_TYPE_BLOBID: 4198 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno); 4199 break; 4200 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 4201 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 4202 break; 4203 default: 4204 SPDK_UNREACHABLE(); 4205 break; 4206 } 4207 4208 free(ctx); 4209 } 4210 4211 static void 4212 _spdk_bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 4213 { 4214 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4215 struct spdk_blob *origblob = ctx->original.blob; 4216 4217 if (bserrno != 0) { 4218 if (ctx->bserrno != 0) { 4219 SPDK_ERRLOG("Unfreeze error %d\n", bserrno); 4220 } else { 4221 ctx->bserrno = bserrno; 4222 } 4223 } 4224 4225 ctx->original.id = origblob->id; 4226 spdk_blob_close(origblob, _spdk_bs_clone_snapshot_cleanup_finish, ctx); 4227 } 4228 4229 static void 4230 _spdk_bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno) 4231 { 4232 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4233 struct spdk_blob *origblob = ctx->original.blob; 4234 4235 if (bserrno != 0) { 4236 if (ctx->bserrno != 0) { 4237 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 4238 } else { 4239 ctx->bserrno = bserrno; 4240 } 4241 } 4242 4243 if (ctx->frozen) { 4244 /* Unfreeze any outstanding I/O */ 4245 _spdk_blob_unfreeze_io(origblob, _spdk_bs_snapshot_unfreeze_cpl, ctx); 4246 } else { 4247 _spdk_bs_snapshot_unfreeze_cpl(ctx, 0); 4248 } 4249 4250 } 4251 4252 static void 4253 _spdk_bs_clone_snapshot_newblob_cleanup(void *cb_arg, int bserrno) 4254 { 4255 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4256 struct spdk_blob *newblob = ctx->new.blob; 4257 4258 if (bserrno != 0) { 4259 if (ctx->bserrno != 0) { 4260 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 4261 } else { 4262 ctx->bserrno = bserrno; 4263 } 4264 } 4265 4266 ctx->new.id = newblob->id; 4267 spdk_blob_close(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 4268 } 4269 4270 /* END blob_cleanup */ 4271 4272 /* START spdk_bs_create_snapshot */ 4273 4274 static void 4275 _spdk_bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno) 4276 { 4277 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4278 struct spdk_blob *newblob = ctx->new.blob; 4279 4280 if (bserrno != 0) { 4281 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 4282 return; 4283 } 4284 4285 /* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */ 4286 bserrno = _spdk_blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true); 4287 if (bserrno != 0) { 4288 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4289 return; 4290 } 4291 4292 _spdk_bs_blob_list_add(ctx->original.blob); 4293 4294 spdk_blob_set_read_only(newblob); 4295 4296 /* sync snapshot metadata */ 4297 spdk_blob_sync_md(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, cb_arg); 4298 } 4299 4300 static void 4301 _spdk_bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno) 4302 { 4303 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4304 struct spdk_blob *origblob = ctx->original.blob; 4305 struct spdk_blob *newblob = ctx->new.blob; 4306 4307 if (bserrno != 0) { 4308 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 4309 return; 4310 } 4311 4312 /* Set internal xattr for snapshot id */ 4313 bserrno = _spdk_blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true); 4314 if (bserrno != 0) { 4315 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 4316 return; 4317 } 4318 origblob->parent_id = newblob->id; 4319 4320 /* Create new back_bs_dev for snapshot */ 4321 origblob->back_bs_dev = spdk_bs_create_blob_bs_dev(newblob); 4322 if (origblob->back_bs_dev == NULL) { 4323 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL); 4324 return; 4325 } 4326 4327 /* set clone blob as thin provisioned */ 4328 _spdk_blob_set_thin_provision(origblob); 4329 4330 _spdk_bs_blob_list_add(newblob); 4331 4332 /* Zero out origblob cluster map */ 4333 memset(origblob->active.clusters, 0, 4334 origblob->active.num_clusters * sizeof(origblob->active.clusters)); 4335 4336 /* sync clone metadata */ 4337 spdk_blob_sync_md(origblob, _spdk_bs_snapshot_origblob_sync_cpl, ctx); 4338 } 4339 4340 static void 4341 _spdk_bs_snapshot_freeze_cpl(void *cb_arg, int rc) 4342 { 4343 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4344 struct spdk_blob *origblob = ctx->original.blob; 4345 struct spdk_blob *newblob = ctx->new.blob; 4346 4347 ctx->frozen = true; 4348 4349 /* set new back_bs_dev for snapshot */ 4350 newblob->back_bs_dev = origblob->back_bs_dev; 4351 /* Set invalid flags from origblob */ 4352 newblob->invalid_flags = origblob->invalid_flags; 4353 4354 /* Copy cluster map to snapshot */ 4355 memcpy(newblob->active.clusters, origblob->active.clusters, 4356 origblob->active.num_clusters * sizeof(origblob->active.clusters)); 4357 4358 /* sync snapshot metadata */ 4359 spdk_blob_sync_md(newblob, _spdk_bs_snapshot_newblob_sync_cpl, ctx); 4360 } 4361 4362 static void 4363 _spdk_bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4364 { 4365 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4366 struct spdk_blob *origblob = ctx->original.blob; 4367 struct spdk_blob *newblob = _blob; 4368 4369 if (bserrno != 0) { 4370 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4371 return; 4372 } 4373 4374 ctx->new.blob = newblob; 4375 4376 _spdk_blob_freeze_io(origblob, _spdk_bs_snapshot_freeze_cpl, ctx); 4377 } 4378 4379 static void 4380 _spdk_bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 4381 { 4382 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4383 struct spdk_blob *origblob = ctx->original.blob; 4384 4385 if (bserrno != 0) { 4386 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4387 return; 4388 } 4389 4390 ctx->new.id = blobid; 4391 ctx->cpl.u.blobid.blobid = blobid; 4392 4393 spdk_bs_open_blob(origblob->bs, ctx->new.id, _spdk_bs_snapshot_newblob_open_cpl, ctx); 4394 } 4395 4396 4397 static void 4398 _spdk_bs_xattr_snapshot(void *arg, const char *name, 4399 const void **value, size_t *value_len) 4400 { 4401 assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0); 4402 4403 struct spdk_blob *blob = (struct spdk_blob *)arg; 4404 *value = &blob->id; 4405 *value_len = sizeof(blob->id); 4406 } 4407 4408 static void 4409 _spdk_bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4410 { 4411 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4412 struct spdk_blob_opts opts; 4413 struct spdk_blob_xattr_opts internal_xattrs; 4414 char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS }; 4415 4416 if (bserrno != 0) { 4417 _spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno); 4418 return; 4419 } 4420 4421 ctx->original.blob = _blob; 4422 4423 if (_blob->data_ro || _blob->md_ro) { 4424 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create snapshot from read only blob with id %lu\n", 4425 _blob->id); 4426 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 4427 return; 4428 } 4429 4430 spdk_blob_opts_init(&opts); 4431 _spdk_blob_xattrs_init(&internal_xattrs); 4432 4433 /* Change the size of new blob to the same as in original blob, 4434 * but do not allocate clusters */ 4435 opts.thin_provision = true; 4436 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 4437 4438 /* If there are any xattrs specified for snapshot, set them now */ 4439 if (ctx->xattrs) { 4440 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 4441 } 4442 /* Set internal xattr SNAPSHOT_IN_PROGRESS */ 4443 internal_xattrs.count = 1; 4444 internal_xattrs.ctx = _blob; 4445 internal_xattrs.names = xattrs_names; 4446 internal_xattrs.get_value = _spdk_bs_xattr_snapshot; 4447 4448 _spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs, 4449 _spdk_bs_snapshot_newblob_create_cpl, ctx); 4450 } 4451 4452 void spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid, 4453 const struct spdk_blob_xattr_opts *snapshot_xattrs, 4454 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4455 { 4456 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 4457 4458 if (!ctx) { 4459 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 4460 return; 4461 } 4462 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 4463 ctx->cpl.u.blobid.cb_fn = cb_fn; 4464 ctx->cpl.u.blobid.cb_arg = cb_arg; 4465 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 4466 ctx->bserrno = 0; 4467 ctx->frozen = false; 4468 ctx->original.id = blobid; 4469 ctx->xattrs = snapshot_xattrs; 4470 4471 spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_snapshot_origblob_open_cpl, ctx); 4472 } 4473 /* END spdk_bs_create_snapshot */ 4474 4475 /* START spdk_bs_create_clone */ 4476 4477 static void 4478 _spdk_bs_xattr_clone(void *arg, const char *name, 4479 const void **value, size_t *value_len) 4480 { 4481 assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0); 4482 4483 struct spdk_blob *blob = (struct spdk_blob *)arg; 4484 *value = &blob->id; 4485 *value_len = sizeof(blob->id); 4486 } 4487 4488 static void 4489 _spdk_bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4490 { 4491 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4492 struct spdk_blob *clone = _blob; 4493 4494 ctx->new.blob = clone; 4495 _spdk_bs_blob_list_add(clone); 4496 4497 spdk_blob_close(clone, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 4498 } 4499 4500 static void 4501 _spdk_bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 4502 { 4503 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4504 4505 ctx->cpl.u.blobid.blobid = blobid; 4506 spdk_bs_open_blob(ctx->original.blob->bs, blobid, _spdk_bs_clone_newblob_open_cpl, ctx); 4507 } 4508 4509 static void 4510 _spdk_bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4511 { 4512 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4513 struct spdk_blob_opts opts; 4514 struct spdk_blob_xattr_opts internal_xattrs; 4515 char *xattr_names[] = { BLOB_SNAPSHOT }; 4516 4517 if (bserrno != 0) { 4518 _spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno); 4519 return; 4520 } 4521 4522 ctx->original.blob = _blob; 4523 4524 if (!_blob->data_ro || !_blob->md_ro) { 4525 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Clone not from read-only blob\n"); 4526 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 4527 return; 4528 } 4529 4530 spdk_blob_opts_init(&opts); 4531 _spdk_blob_xattrs_init(&internal_xattrs); 4532 4533 opts.thin_provision = true; 4534 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 4535 if (ctx->xattrs) { 4536 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 4537 } 4538 4539 /* Set internal xattr BLOB_SNAPSHOT */ 4540 internal_xattrs.count = 1; 4541 internal_xattrs.ctx = _blob; 4542 internal_xattrs.names = xattr_names; 4543 internal_xattrs.get_value = _spdk_bs_xattr_clone; 4544 4545 _spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs, 4546 _spdk_bs_clone_newblob_create_cpl, ctx); 4547 } 4548 4549 void spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid, 4550 const struct spdk_blob_xattr_opts *clone_xattrs, 4551 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4552 { 4553 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 4554 4555 if (!ctx) { 4556 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 4557 return; 4558 } 4559 4560 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 4561 ctx->cpl.u.blobid.cb_fn = cb_fn; 4562 ctx->cpl.u.blobid.cb_arg = cb_arg; 4563 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 4564 ctx->bserrno = 0; 4565 ctx->xattrs = clone_xattrs; 4566 ctx->original.id = blobid; 4567 4568 spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_clone_origblob_open_cpl, ctx); 4569 } 4570 4571 /* END spdk_bs_create_clone */ 4572 4573 /* START spdk_bs_inflate_blob */ 4574 4575 static void 4576 _spdk_bs_inflate_blob_sync(void *cb_arg, int bserrno) 4577 { 4578 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4579 struct spdk_blob *_blob = ctx->original.blob; 4580 4581 if (bserrno != 0) { 4582 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4583 return; 4584 } 4585 4586 /* Destroy back_bs_dev */ 4587 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 4588 _blob->back_bs_dev = NULL; 4589 4590 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, 0); 4591 } 4592 4593 static void 4594 _spdk_bs_inflate_blob_done(void *cb_arg, int bserrno) 4595 { 4596 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4597 struct spdk_blob *_blob = ctx->original.blob; 4598 4599 if (bserrno != 0) { 4600 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4601 return; 4602 } 4603 4604 _spdk_bs_blob_list_remove(_blob); 4605 4606 _spdk_blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 4607 4608 /* Unset thin provision */ 4609 _blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV; 4610 _blob->state = SPDK_BLOB_STATE_DIRTY; 4611 4612 spdk_blob_sync_md(_blob, _spdk_bs_inflate_blob_sync, ctx); 4613 } 4614 4615 static void 4616 _spdk_bs_inflate_blob_touch_next(void *cb_arg, int bserrno) 4617 { 4618 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4619 struct spdk_blob *_blob = ctx->original.blob; 4620 uint64_t offset; 4621 4622 if (bserrno != 0) { 4623 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4624 return; 4625 } 4626 4627 for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) { 4628 if (_blob->active.clusters[ctx->cluster] == 0) { 4629 break; 4630 } 4631 } 4632 4633 if (ctx->cluster < _blob->active.num_clusters) { 4634 offset = _spdk_bs_cluster_to_page(_blob->bs, ctx->cluster); 4635 4636 /* We may safely increment a cluster before write */ 4637 ctx->cluster++; 4638 4639 /* Use zero length write to touch a cluster */ 4640 spdk_blob_io_write(_blob, ctx->channel, NULL, offset, 0, 4641 _spdk_bs_inflate_blob_touch_next, ctx); 4642 } else { 4643 _spdk_bs_inflate_blob_done(cb_arg, bserrno); 4644 } 4645 } 4646 4647 static void 4648 _spdk_bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4649 { 4650 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4651 uint64_t lfc; /* lowest free cluster */ 4652 uint64_t i; 4653 4654 if (bserrno != 0) { 4655 _spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno); 4656 return; 4657 } 4658 ctx->original.blob = _blob; 4659 4660 if (spdk_blob_is_thin_provisioned(_blob) == false) { 4661 /* This is not thin provisioned blob. No need to inflate. */ 4662 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, 0); 4663 return; 4664 } 4665 4666 /* Do two passes - one to verify that we can obtain enough clusters 4667 * and another to actually claim them. 4668 */ 4669 lfc = 0; 4670 for (i = 0; i < _blob->active.num_clusters; i++) { 4671 if (_blob->active.clusters[i] == 0) { 4672 lfc = spdk_bit_array_find_first_clear(_blob->bs->used_clusters, lfc); 4673 if (lfc >= _blob->bs->total_clusters) { 4674 /* No more free clusters. Cannot satisfy the request */ 4675 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC); 4676 return; 4677 } 4678 lfc++; 4679 } 4680 } 4681 4682 ctx->cluster = 0; 4683 _spdk_bs_inflate_blob_touch_next(ctx, 0); 4684 } 4685 4686 void spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 4687 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 4688 { 4689 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 4690 4691 if (!ctx) { 4692 cb_fn(cb_arg, -ENOMEM); 4693 return; 4694 } 4695 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 4696 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 4697 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 4698 ctx->bserrno = 0; 4699 ctx->original.id = blobid; 4700 ctx->channel = channel; 4701 4702 spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_inflate_blob_open_cpl, ctx); 4703 } 4704 4705 /* END spdk_bs_inflate_blob */ 4706 4707 /* START spdk_blob_resize */ 4708 struct spdk_bs_resize_ctx { 4709 spdk_blob_op_complete cb_fn; 4710 void *cb_arg; 4711 struct spdk_blob *blob; 4712 uint64_t sz; 4713 int rc; 4714 }; 4715 4716 static void 4717 _spdk_bs_resize_unfreeze_cpl(void *cb_arg, int rc) 4718 { 4719 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 4720 4721 if (rc != 0) { 4722 SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc); 4723 } 4724 4725 if (ctx->rc != 0) { 4726 SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc); 4727 rc = ctx->rc; 4728 } 4729 4730 ctx->blob->resize_in_progress = false; 4731 4732 ctx->cb_fn(ctx->cb_arg, rc); 4733 free(ctx); 4734 } 4735 4736 static void 4737 _spdk_bs_resize_freeze_cpl(void *cb_arg, int rc) 4738 { 4739 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 4740 4741 if (rc != 0) { 4742 ctx->blob->resize_in_progress = false; 4743 ctx->cb_fn(ctx->cb_arg, rc); 4744 free(ctx); 4745 return; 4746 } 4747 4748 ctx->rc = _spdk_blob_resize(ctx->blob, ctx->sz); 4749 4750 _spdk_blob_unfreeze_io(ctx->blob, _spdk_bs_resize_unfreeze_cpl, ctx); 4751 } 4752 4753 void 4754 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg) 4755 { 4756 struct spdk_bs_resize_ctx *ctx; 4757 4758 _spdk_blob_verify_md_op(blob); 4759 4760 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Resizing blob %lu to %lu clusters\n", blob->id, sz); 4761 4762 if (blob->md_ro) { 4763 cb_fn(cb_arg, -EPERM); 4764 return; 4765 } 4766 4767 if (sz == blob->active.num_clusters) { 4768 cb_fn(cb_arg, 0); 4769 return; 4770 } 4771 4772 if (blob->resize_in_progress) { 4773 cb_fn(cb_arg, -EBUSY); 4774 return; 4775 } 4776 4777 ctx = calloc(1, sizeof(*ctx)); 4778 if (!ctx) { 4779 cb_fn(cb_arg, -ENOMEM); 4780 return; 4781 } 4782 4783 blob->resize_in_progress = true; 4784 ctx->cb_fn = cb_fn; 4785 ctx->cb_arg = cb_arg; 4786 ctx->blob = blob; 4787 ctx->sz = sz; 4788 _spdk_blob_freeze_io(blob, _spdk_bs_resize_freeze_cpl, ctx); 4789 } 4790 4791 /* END spdk_blob_resize */ 4792 4793 4794 /* START spdk_bs_delete_blob */ 4795 4796 static void 4797 _spdk_bs_delete_close_cpl(void *cb_arg, int bserrno) 4798 { 4799 spdk_bs_sequence_t *seq = cb_arg; 4800 4801 spdk_bs_sequence_finish(seq, bserrno); 4802 } 4803 4804 static void 4805 _spdk_bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4806 { 4807 struct spdk_blob *blob = cb_arg; 4808 4809 if (bserrno != 0) { 4810 /* 4811 * We already removed this blob from the blobstore tailq, so 4812 * we need to free it here since this is the last reference 4813 * to it. 4814 */ 4815 _spdk_blob_free(blob); 4816 _spdk_bs_delete_close_cpl(seq, bserrno); 4817 return; 4818 } 4819 4820 /* 4821 * This will immediately decrement the ref_count and call 4822 * the completion routine since the metadata state is clean. 4823 * By calling spdk_blob_close, we reduce the number of call 4824 * points into code that touches the blob->open_ref count 4825 * and the blobstore's blob list. 4826 */ 4827 spdk_blob_close(blob, _spdk_bs_delete_close_cpl, seq); 4828 } 4829 4830 static void 4831 _spdk_bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 4832 { 4833 spdk_bs_sequence_t *seq = cb_arg; 4834 uint32_t page_num; 4835 4836 if (bserrno != 0) { 4837 spdk_bs_sequence_finish(seq, bserrno); 4838 return; 4839 } 4840 4841 _spdk_blob_verify_md_op(blob); 4842 4843 if (blob->open_ref > 1) { 4844 /* 4845 * Someone has this blob open (besides this delete context). 4846 * Decrement the ref count directly and return -EBUSY. 4847 */ 4848 blob->open_ref--; 4849 spdk_bs_sequence_finish(seq, -EBUSY); 4850 return; 4851 } 4852 4853 bserrno = _spdk_bs_blob_list_remove(blob); 4854 if (bserrno != 0) { 4855 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Remove blob #%" PRIu64 " from a list\n", blob->id); 4856 spdk_bs_sequence_finish(seq, bserrno); 4857 return; 4858 } 4859 4860 /* 4861 * Remove the blob from the blob_store list now, to ensure it does not 4862 * get returned after this point by _spdk_blob_lookup(). 4863 */ 4864 TAILQ_REMOVE(&blob->bs->blobs, blob, link); 4865 page_num = _spdk_bs_blobid_to_page(blob->id); 4866 spdk_bit_array_clear(blob->bs->used_blobids, page_num); 4867 blob->state = SPDK_BLOB_STATE_DIRTY; 4868 blob->active.num_pages = 0; 4869 _spdk_blob_resize(blob, 0); 4870 4871 _spdk_blob_persist(seq, blob, _spdk_bs_delete_persist_cpl, blob); 4872 } 4873 4874 void 4875 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 4876 spdk_blob_op_complete cb_fn, void *cb_arg) 4877 { 4878 struct spdk_bs_cpl cpl; 4879 spdk_bs_sequence_t *seq; 4880 struct spdk_blob_list *snapshot_entry = NULL; 4881 4882 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Deleting blob %lu\n", blobid); 4883 4884 assert(spdk_get_thread() == bs->md_thread); 4885 4886 /* Check if this is a snapshot with clones */ 4887 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 4888 if (snapshot_entry->id == blobid) { 4889 break; 4890 } 4891 } 4892 if (snapshot_entry != NULL) { 4893 /* If snapshot have clones, we cannot remove it */ 4894 if (!TAILQ_EMPTY(&snapshot_entry->clones)) { 4895 SPDK_ERRLOG("Cannot remove snapshot with clones\n"); 4896 cb_fn(cb_arg, -EBUSY); 4897 return; 4898 } 4899 } 4900 4901 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 4902 cpl.u.blob_basic.cb_fn = cb_fn; 4903 cpl.u.blob_basic.cb_arg = cb_arg; 4904 4905 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 4906 if (!seq) { 4907 cb_fn(cb_arg, -ENOMEM); 4908 return; 4909 } 4910 4911 spdk_bs_open_blob(bs, blobid, _spdk_bs_delete_open_cpl, seq); 4912 } 4913 4914 /* END spdk_bs_delete_blob */ 4915 4916 /* START spdk_bs_open_blob */ 4917 4918 static void 4919 _spdk_bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4920 { 4921 struct spdk_blob *blob = cb_arg; 4922 4923 /* If the blob have crc error, we just return NULL. */ 4924 if (blob == NULL) { 4925 seq->cpl.u.blob_handle.blob = NULL; 4926 spdk_bs_sequence_finish(seq, bserrno); 4927 return; 4928 } 4929 4930 blob->open_ref++; 4931 4932 TAILQ_INSERT_HEAD(&blob->bs->blobs, blob, link); 4933 4934 spdk_bs_sequence_finish(seq, bserrno); 4935 } 4936 4937 void spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 4938 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 4939 { 4940 struct spdk_blob *blob; 4941 struct spdk_bs_cpl cpl; 4942 spdk_bs_sequence_t *seq; 4943 uint32_t page_num; 4944 4945 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Opening blob %lu\n", blobid); 4946 assert(spdk_get_thread() == bs->md_thread); 4947 4948 page_num = _spdk_bs_blobid_to_page(blobid); 4949 if (spdk_bit_array_get(bs->used_blobids, page_num) == false) { 4950 /* Invalid blobid */ 4951 cb_fn(cb_arg, NULL, -ENOENT); 4952 return; 4953 } 4954 4955 blob = _spdk_blob_lookup(bs, blobid); 4956 if (blob) { 4957 blob->open_ref++; 4958 cb_fn(cb_arg, blob, 0); 4959 return; 4960 } 4961 4962 blob = _spdk_blob_alloc(bs, blobid); 4963 if (!blob) { 4964 cb_fn(cb_arg, NULL, -ENOMEM); 4965 return; 4966 } 4967 4968 cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE; 4969 cpl.u.blob_handle.cb_fn = cb_fn; 4970 cpl.u.blob_handle.cb_arg = cb_arg; 4971 cpl.u.blob_handle.blob = blob; 4972 4973 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 4974 if (!seq) { 4975 _spdk_blob_free(blob); 4976 cb_fn(cb_arg, NULL, -ENOMEM); 4977 return; 4978 } 4979 4980 _spdk_blob_load(seq, blob, _spdk_bs_open_blob_cpl, blob); 4981 } 4982 /* END spdk_bs_open_blob */ 4983 4984 /* START spdk_blob_set_read_only */ 4985 int spdk_blob_set_read_only(struct spdk_blob *blob) 4986 { 4987 _spdk_blob_verify_md_op(blob); 4988 4989 blob->data_ro_flags |= SPDK_BLOB_READ_ONLY; 4990 4991 blob->state = SPDK_BLOB_STATE_DIRTY; 4992 return 0; 4993 } 4994 /* END spdk_blob_set_read_only */ 4995 4996 /* START spdk_blob_sync_md */ 4997 4998 static void 4999 _spdk_blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5000 { 5001 struct spdk_blob *blob = cb_arg; 5002 5003 if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 5004 blob->data_ro = true; 5005 blob->md_ro = true; 5006 } 5007 5008 spdk_bs_sequence_finish(seq, bserrno); 5009 } 5010 5011 static void 5012 _spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 5013 { 5014 struct spdk_bs_cpl cpl; 5015 spdk_bs_sequence_t *seq; 5016 5017 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 5018 cpl.u.blob_basic.cb_fn = cb_fn; 5019 cpl.u.blob_basic.cb_arg = cb_arg; 5020 5021 seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl); 5022 if (!seq) { 5023 cb_fn(cb_arg, -ENOMEM); 5024 return; 5025 } 5026 5027 _spdk_blob_persist(seq, blob, _spdk_blob_sync_md_cpl, blob); 5028 } 5029 5030 void 5031 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 5032 { 5033 _spdk_blob_verify_md_op(blob); 5034 5035 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blob %lu\n", blob->id); 5036 5037 if (blob->md_ro) { 5038 assert(blob->state == SPDK_BLOB_STATE_CLEAN); 5039 cb_fn(cb_arg, 0); 5040 return; 5041 } 5042 5043 _spdk_blob_sync_md(blob, cb_fn, cb_arg); 5044 } 5045 5046 /* END spdk_blob_sync_md */ 5047 5048 struct spdk_blob_insert_cluster_ctx { 5049 struct spdk_thread *thread; 5050 struct spdk_blob *blob; 5051 uint32_t cluster_num; /* cluster index in blob */ 5052 uint32_t cluster; /* cluster on disk */ 5053 int rc; 5054 spdk_blob_op_complete cb_fn; 5055 void *cb_arg; 5056 }; 5057 5058 static void 5059 _spdk_blob_insert_cluster_msg_cpl(void *arg) 5060 { 5061 struct spdk_blob_insert_cluster_ctx *ctx = arg; 5062 5063 ctx->cb_fn(ctx->cb_arg, ctx->rc); 5064 free(ctx); 5065 } 5066 5067 static void 5068 _spdk_blob_insert_cluster_msg_cb(void *arg, int bserrno) 5069 { 5070 struct spdk_blob_insert_cluster_ctx *ctx = arg; 5071 5072 ctx->rc = bserrno; 5073 spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx); 5074 } 5075 5076 static void 5077 _spdk_blob_insert_cluster_msg(void *arg) 5078 { 5079 struct spdk_blob_insert_cluster_ctx *ctx = arg; 5080 5081 ctx->rc = _spdk_blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster); 5082 if (ctx->rc != 0) { 5083 spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx); 5084 return; 5085 } 5086 5087 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 5088 _spdk_blob_sync_md(ctx->blob, _spdk_blob_insert_cluster_msg_cb, ctx); 5089 } 5090 5091 static void 5092 _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 5093 uint64_t cluster, spdk_blob_op_complete cb_fn, void *cb_arg) 5094 { 5095 struct spdk_blob_insert_cluster_ctx *ctx; 5096 5097 ctx = calloc(1, sizeof(*ctx)); 5098 if (ctx == NULL) { 5099 cb_fn(cb_arg, -ENOMEM); 5100 return; 5101 } 5102 5103 ctx->thread = spdk_get_thread(); 5104 ctx->blob = blob; 5105 ctx->cluster_num = cluster_num; 5106 ctx->cluster = cluster; 5107 ctx->cb_fn = cb_fn; 5108 ctx->cb_arg = cb_arg; 5109 5110 spdk_thread_send_msg(blob->bs->md_thread, _spdk_blob_insert_cluster_msg, ctx); 5111 } 5112 5113 /* START spdk_blob_close */ 5114 5115 static void 5116 _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5117 { 5118 struct spdk_blob *blob = cb_arg; 5119 5120 if (bserrno == 0) { 5121 blob->open_ref--; 5122 if (blob->open_ref == 0) { 5123 /* 5124 * Blobs with active.num_pages == 0 are deleted blobs. 5125 * these blobs are removed from the blob_store list 5126 * when the deletion process starts - so don't try to 5127 * remove them again. 5128 */ 5129 if (blob->active.num_pages > 0) { 5130 TAILQ_REMOVE(&blob->bs->blobs, blob, link); 5131 } 5132 _spdk_blob_free(blob); 5133 } 5134 } 5135 5136 spdk_bs_sequence_finish(seq, bserrno); 5137 } 5138 5139 void spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 5140 { 5141 struct spdk_bs_cpl cpl; 5142 spdk_bs_sequence_t *seq; 5143 5144 _spdk_blob_verify_md_op(blob); 5145 5146 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Closing blob %lu\n", blob->id); 5147 5148 if (blob->open_ref == 0) { 5149 cb_fn(cb_arg, -EBADF); 5150 return; 5151 } 5152 5153 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 5154 cpl.u.blob_basic.cb_fn = cb_fn; 5155 cpl.u.blob_basic.cb_arg = cb_arg; 5156 5157 seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl); 5158 if (!seq) { 5159 cb_fn(cb_arg, -ENOMEM); 5160 return; 5161 } 5162 5163 /* Sync metadata */ 5164 _spdk_blob_persist(seq, blob, _spdk_blob_close_cpl, blob); 5165 } 5166 5167 /* END spdk_blob_close */ 5168 5169 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs) 5170 { 5171 return spdk_get_io_channel(bs); 5172 } 5173 5174 void spdk_bs_free_io_channel(struct spdk_io_channel *channel) 5175 { 5176 spdk_put_io_channel(channel); 5177 } 5178 5179 void spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel, 5180 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 5181 { 5182 _spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 5183 SPDK_BLOB_UNMAP); 5184 } 5185 5186 void spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel, 5187 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 5188 { 5189 _spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 5190 SPDK_BLOB_WRITE_ZEROES); 5191 } 5192 5193 void spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel, 5194 void *payload, uint64_t offset, uint64_t length, 5195 spdk_blob_op_complete cb_fn, void *cb_arg) 5196 { 5197 _spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 5198 SPDK_BLOB_WRITE); 5199 } 5200 5201 void spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel, 5202 void *payload, uint64_t offset, uint64_t length, 5203 spdk_blob_op_complete cb_fn, void *cb_arg) 5204 { 5205 _spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 5206 SPDK_BLOB_READ); 5207 } 5208 5209 void spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel, 5210 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 5211 spdk_blob_op_complete cb_fn, void *cb_arg) 5212 { 5213 _spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false); 5214 } 5215 5216 void spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel, 5217 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 5218 spdk_blob_op_complete cb_fn, void *cb_arg) 5219 { 5220 _spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true); 5221 } 5222 5223 struct spdk_bs_iter_ctx { 5224 int64_t page_num; 5225 struct spdk_blob_store *bs; 5226 5227 spdk_blob_op_with_handle_complete cb_fn; 5228 void *cb_arg; 5229 }; 5230 5231 static void 5232 _spdk_bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 5233 { 5234 struct spdk_bs_iter_ctx *ctx = cb_arg; 5235 struct spdk_blob_store *bs = ctx->bs; 5236 spdk_blob_id id; 5237 5238 if (bserrno == 0) { 5239 ctx->cb_fn(ctx->cb_arg, _blob, bserrno); 5240 free(ctx); 5241 return; 5242 } 5243 5244 ctx->page_num++; 5245 ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num); 5246 if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) { 5247 ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT); 5248 free(ctx); 5249 return; 5250 } 5251 5252 id = _spdk_bs_page_to_blobid(ctx->page_num); 5253 5254 spdk_bs_open_blob(bs, id, _spdk_bs_iter_cpl, ctx); 5255 } 5256 5257 void 5258 spdk_bs_iter_first(struct spdk_blob_store *bs, 5259 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 5260 { 5261 struct spdk_bs_iter_ctx *ctx; 5262 5263 ctx = calloc(1, sizeof(*ctx)); 5264 if (!ctx) { 5265 cb_fn(cb_arg, NULL, -ENOMEM); 5266 return; 5267 } 5268 5269 ctx->page_num = -1; 5270 ctx->bs = bs; 5271 ctx->cb_fn = cb_fn; 5272 ctx->cb_arg = cb_arg; 5273 5274 _spdk_bs_iter_cpl(ctx, NULL, -1); 5275 } 5276 5277 static void 5278 _spdk_bs_iter_close_cpl(void *cb_arg, int bserrno) 5279 { 5280 struct spdk_bs_iter_ctx *ctx = cb_arg; 5281 5282 _spdk_bs_iter_cpl(ctx, NULL, -1); 5283 } 5284 5285 void 5286 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob, 5287 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 5288 { 5289 struct spdk_bs_iter_ctx *ctx; 5290 5291 assert(blob != NULL); 5292 5293 ctx = calloc(1, sizeof(*ctx)); 5294 if (!ctx) { 5295 cb_fn(cb_arg, NULL, -ENOMEM); 5296 return; 5297 } 5298 5299 ctx->page_num = _spdk_bs_blobid_to_page(blob->id); 5300 ctx->bs = bs; 5301 ctx->cb_fn = cb_fn; 5302 ctx->cb_arg = cb_arg; 5303 5304 /* Close the existing blob */ 5305 spdk_blob_close(blob, _spdk_bs_iter_close_cpl, ctx); 5306 } 5307 5308 static int 5309 _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 5310 uint16_t value_len, bool internal) 5311 { 5312 struct spdk_xattr_tailq *xattrs; 5313 struct spdk_xattr *xattr; 5314 5315 _spdk_blob_verify_md_op(blob); 5316 5317 if (blob->md_ro) { 5318 return -EPERM; 5319 } 5320 5321 if (internal) { 5322 xattrs = &blob->xattrs_internal; 5323 blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR; 5324 } else { 5325 xattrs = &blob->xattrs; 5326 } 5327 5328 TAILQ_FOREACH(xattr, xattrs, link) { 5329 if (!strcmp(name, xattr->name)) { 5330 free(xattr->value); 5331 xattr->value_len = value_len; 5332 xattr->value = malloc(value_len); 5333 memcpy(xattr->value, value, value_len); 5334 5335 blob->state = SPDK_BLOB_STATE_DIRTY; 5336 5337 return 0; 5338 } 5339 } 5340 5341 xattr = calloc(1, sizeof(*xattr)); 5342 if (!xattr) { 5343 return -ENOMEM; 5344 } 5345 xattr->name = strdup(name); 5346 xattr->value_len = value_len; 5347 xattr->value = malloc(value_len); 5348 memcpy(xattr->value, value, value_len); 5349 TAILQ_INSERT_TAIL(xattrs, xattr, link); 5350 5351 blob->state = SPDK_BLOB_STATE_DIRTY; 5352 5353 return 0; 5354 } 5355 5356 int 5357 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 5358 uint16_t value_len) 5359 { 5360 return _spdk_blob_set_xattr(blob, name, value, value_len, false); 5361 } 5362 5363 static int 5364 _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal) 5365 { 5366 struct spdk_xattr_tailq *xattrs; 5367 struct spdk_xattr *xattr; 5368 5369 _spdk_blob_verify_md_op(blob); 5370 5371 if (blob->md_ro) { 5372 return -EPERM; 5373 } 5374 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 5375 5376 TAILQ_FOREACH(xattr, xattrs, link) { 5377 if (!strcmp(name, xattr->name)) { 5378 TAILQ_REMOVE(xattrs, xattr, link); 5379 free(xattr->value); 5380 free(xattr->name); 5381 free(xattr); 5382 5383 if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) { 5384 blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR; 5385 } 5386 blob->state = SPDK_BLOB_STATE_DIRTY; 5387 5388 return 0; 5389 } 5390 } 5391 5392 return -ENOENT; 5393 } 5394 5395 int 5396 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name) 5397 { 5398 return _spdk_blob_remove_xattr(blob, name, false); 5399 } 5400 5401 static int 5402 _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 5403 const void **value, size_t *value_len, bool internal) 5404 { 5405 struct spdk_xattr *xattr; 5406 struct spdk_xattr_tailq *xattrs; 5407 5408 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 5409 5410 TAILQ_FOREACH(xattr, xattrs, link) { 5411 if (!strcmp(name, xattr->name)) { 5412 *value = xattr->value; 5413 *value_len = xattr->value_len; 5414 return 0; 5415 } 5416 } 5417 return -ENOENT; 5418 } 5419 5420 int 5421 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 5422 const void **value, size_t *value_len) 5423 { 5424 _spdk_blob_verify_md_op(blob); 5425 5426 return _spdk_blob_get_xattr_value(blob, name, value, value_len, false); 5427 } 5428 5429 struct spdk_xattr_names { 5430 uint32_t count; 5431 const char *names[0]; 5432 }; 5433 5434 static int 5435 _spdk_blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names) 5436 { 5437 struct spdk_xattr *xattr; 5438 int count = 0; 5439 5440 TAILQ_FOREACH(xattr, xattrs, link) { 5441 count++; 5442 } 5443 5444 *names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *)); 5445 if (*names == NULL) { 5446 return -ENOMEM; 5447 } 5448 5449 TAILQ_FOREACH(xattr, xattrs, link) { 5450 (*names)->names[(*names)->count++] = xattr->name; 5451 } 5452 5453 return 0; 5454 } 5455 5456 int 5457 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names) 5458 { 5459 _spdk_blob_verify_md_op(blob); 5460 5461 return _spdk_blob_get_xattr_names(&blob->xattrs, names); 5462 } 5463 5464 uint32_t 5465 spdk_xattr_names_get_count(struct spdk_xattr_names *names) 5466 { 5467 assert(names != NULL); 5468 5469 return names->count; 5470 } 5471 5472 const char * 5473 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index) 5474 { 5475 if (index >= names->count) { 5476 return NULL; 5477 } 5478 5479 return names->names[index]; 5480 } 5481 5482 void 5483 spdk_xattr_names_free(struct spdk_xattr_names *names) 5484 { 5485 free(names); 5486 } 5487 5488 struct spdk_bs_type 5489 spdk_bs_get_bstype(struct spdk_blob_store *bs) 5490 { 5491 return bs->bstype; 5492 } 5493 5494 void 5495 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype) 5496 { 5497 memcpy(&bs->bstype, &bstype, sizeof(bstype)); 5498 } 5499 5500 bool 5501 spdk_blob_is_read_only(struct spdk_blob *blob) 5502 { 5503 assert(blob != NULL); 5504 return (blob->data_ro || blob->md_ro); 5505 } 5506 5507 bool 5508 spdk_blob_is_snapshot(struct spdk_blob *blob) 5509 { 5510 struct spdk_blob_list *snapshot_entry; 5511 5512 assert(blob != NULL); 5513 5514 TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) { 5515 if (snapshot_entry->id == blob->id) { 5516 break; 5517 } 5518 } 5519 5520 if (snapshot_entry == NULL) { 5521 return false; 5522 } 5523 5524 return true; 5525 } 5526 5527 bool 5528 spdk_blob_is_clone(struct spdk_blob *blob) 5529 { 5530 assert(blob != NULL); 5531 5532 if (blob->parent_id != SPDK_BLOBID_INVALID) { 5533 assert(spdk_blob_is_thin_provisioned(blob)); 5534 return true; 5535 } 5536 5537 return false; 5538 } 5539 5540 bool 5541 spdk_blob_is_thin_provisioned(struct spdk_blob *blob) 5542 { 5543 assert(blob != NULL); 5544 return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 5545 } 5546 5547 spdk_blob_id 5548 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id) 5549 { 5550 struct spdk_blob_list *snapshot_entry = NULL; 5551 struct spdk_blob_list *clone_entry = NULL; 5552 5553 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 5554 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 5555 if (clone_entry->id == blob_id) { 5556 return snapshot_entry->id; 5557 } 5558 } 5559 } 5560 5561 return SPDK_BLOBID_INVALID; 5562 } 5563 5564 int 5565 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids, 5566 size_t *count) 5567 { 5568 struct spdk_blob_list *snapshot_entry, *clone_entry; 5569 size_t n; 5570 5571 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 5572 if (snapshot_entry->id == blobid) { 5573 break; 5574 } 5575 } 5576 if (snapshot_entry == NULL) { 5577 *count = 0; 5578 return 0; 5579 } 5580 5581 if (ids == NULL || *count < snapshot_entry->clone_count) { 5582 *count = snapshot_entry->clone_count; 5583 return -ENOMEM; 5584 } 5585 *count = snapshot_entry->clone_count; 5586 5587 n = 0; 5588 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 5589 ids[n++] = clone_entry->id; 5590 } 5591 5592 return 0; 5593 } 5594 5595 SPDK_LOG_REGISTER_COMPONENT("blob", SPDK_LOG_BLOB) 5596