1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/blob.h" 37 #include "spdk/crc32.h" 38 #include "spdk/env.h" 39 #include "spdk/queue.h" 40 #include "spdk/thread.h" 41 #include "spdk/bit_array.h" 42 #include "spdk/likely.h" 43 44 #include "spdk_internal/assert.h" 45 #include "spdk_internal/log.h" 46 47 #include "blobstore.h" 48 49 #define BLOB_CRC32C_INITIAL 0xffffffffUL 50 51 static int spdk_bs_register_md_thread(struct spdk_blob_store *bs); 52 static int spdk_bs_unregister_md_thread(struct spdk_blob_store *bs); 53 static void _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 54 static void _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 55 uint64_t cluster, spdk_blob_op_complete cb_fn, void *cb_arg); 56 57 static int _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 58 uint16_t value_len, bool internal); 59 static int _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 60 const void **value, size_t *value_len, bool internal); 61 static int _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal); 62 63 static void 64 _spdk_blob_verify_md_op(struct spdk_blob *blob) 65 { 66 assert(blob != NULL); 67 assert(spdk_get_thread() == blob->bs->md_thread); 68 assert(blob->state != SPDK_BLOB_STATE_LOADING); 69 } 70 71 static inline size_t 72 divide_round_up(size_t num, size_t divisor) 73 { 74 return (num + divisor - 1) / divisor; 75 } 76 77 static void 78 _spdk_bs_claim_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 79 { 80 assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters)); 81 assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == false); 82 assert(bs->num_free_clusters > 0); 83 84 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %u\n", cluster_num); 85 86 spdk_bit_array_set(bs->used_clusters, cluster_num); 87 bs->num_free_clusters--; 88 } 89 90 static int 91 _spdk_blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster) 92 { 93 uint64_t *cluster_lba = &blob->active.clusters[cluster_num]; 94 95 _spdk_blob_verify_md_op(blob); 96 97 if (*cluster_lba != 0) { 98 return -EEXIST; 99 } 100 101 *cluster_lba = _spdk_bs_cluster_to_lba(blob->bs, cluster); 102 return 0; 103 } 104 105 static int 106 _spdk_bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num, 107 uint64_t *lowest_free_cluster, bool update_map) 108 { 109 pthread_mutex_lock(&blob->bs->used_clusters_mutex); 110 *lowest_free_cluster = spdk_bit_array_find_first_clear(blob->bs->used_clusters, 111 *lowest_free_cluster); 112 if (*lowest_free_cluster >= blob->bs->total_clusters) { 113 /* No more free clusters. Cannot satisfy the request */ 114 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 115 return -ENOSPC; 116 } 117 118 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %lu for blob %lu\n", *lowest_free_cluster, blob->id); 119 _spdk_bs_claim_cluster(blob->bs, *lowest_free_cluster); 120 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 121 122 if (update_map) { 123 _spdk_blob_insert_cluster(blob, cluster_num, *lowest_free_cluster); 124 } 125 126 return 0; 127 } 128 129 static void 130 _spdk_bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 131 { 132 assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters)); 133 assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == true); 134 assert(bs->num_free_clusters < bs->total_clusters); 135 136 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Releasing cluster %u\n", cluster_num); 137 138 pthread_mutex_lock(&bs->used_clusters_mutex); 139 spdk_bit_array_clear(bs->used_clusters, cluster_num); 140 bs->num_free_clusters++; 141 pthread_mutex_unlock(&bs->used_clusters_mutex); 142 } 143 144 static void 145 _spdk_blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs) 146 { 147 xattrs->count = 0; 148 xattrs->names = NULL; 149 xattrs->ctx = NULL; 150 xattrs->get_value = NULL; 151 } 152 153 void 154 spdk_blob_opts_init(struct spdk_blob_opts *opts) 155 { 156 opts->num_clusters = 0; 157 opts->thin_provision = false; 158 _spdk_blob_xattrs_init(&opts->xattrs); 159 } 160 161 static struct spdk_blob * 162 _spdk_blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id) 163 { 164 struct spdk_blob *blob; 165 166 blob = calloc(1, sizeof(*blob)); 167 if (!blob) { 168 return NULL; 169 } 170 171 blob->id = id; 172 blob->bs = bs; 173 174 blob->parent_id = SPDK_BLOBID_INVALID; 175 176 blob->state = SPDK_BLOB_STATE_DIRTY; 177 blob->active.num_pages = 1; 178 blob->active.pages = calloc(1, sizeof(*blob->active.pages)); 179 if (!blob->active.pages) { 180 free(blob); 181 return NULL; 182 } 183 184 blob->active.pages[0] = _spdk_bs_blobid_to_page(id); 185 186 TAILQ_INIT(&blob->xattrs); 187 TAILQ_INIT(&blob->xattrs_internal); 188 189 return blob; 190 } 191 192 static void 193 _spdk_xattrs_free(struct spdk_xattr_tailq *xattrs) 194 { 195 struct spdk_xattr *xattr, *xattr_tmp; 196 197 TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) { 198 TAILQ_REMOVE(xattrs, xattr, link); 199 free(xattr->name); 200 free(xattr->value); 201 free(xattr); 202 } 203 } 204 205 static void 206 _spdk_blob_free(struct spdk_blob *blob) 207 { 208 assert(blob != NULL); 209 210 free(blob->active.clusters); 211 free(blob->clean.clusters); 212 free(blob->active.pages); 213 free(blob->clean.pages); 214 215 _spdk_xattrs_free(&blob->xattrs); 216 _spdk_xattrs_free(&blob->xattrs_internal); 217 218 if (blob->back_bs_dev) { 219 blob->back_bs_dev->destroy(blob->back_bs_dev); 220 } 221 222 free(blob); 223 } 224 225 struct freeze_io_ctx { 226 struct spdk_bs_cpl cpl; 227 struct spdk_blob *blob; 228 }; 229 230 static void 231 _spdk_blob_io_sync(struct spdk_io_channel_iter *i) 232 { 233 spdk_for_each_channel_continue(i, 0); 234 } 235 236 static void 237 _spdk_blob_execute_queued_io(struct spdk_io_channel_iter *i) 238 { 239 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 240 struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch); 241 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 242 struct spdk_bs_request_set *set; 243 struct spdk_bs_user_op_args *args; 244 spdk_bs_user_op_t *op, *tmp; 245 246 TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) { 247 set = (struct spdk_bs_request_set *)op; 248 args = &set->u.user_op; 249 250 if (args->blob == ctx->blob) { 251 TAILQ_REMOVE(&ch->queued_io, op, link); 252 spdk_bs_user_op_execute(op); 253 } 254 } 255 256 spdk_for_each_channel_continue(i, 0); 257 } 258 259 static void 260 _spdk_blob_io_cpl(struct spdk_io_channel_iter *i, int status) 261 { 262 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 263 264 ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0); 265 266 free(ctx); 267 } 268 269 static void 270 _spdk_blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 271 { 272 struct freeze_io_ctx *ctx; 273 274 ctx = calloc(1, sizeof(*ctx)); 275 if (!ctx) { 276 cb_fn(cb_arg, -ENOMEM); 277 return; 278 } 279 280 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 281 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 282 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 283 ctx->blob = blob; 284 285 /* Freeze I/O on blob */ 286 blob->frozen_refcnt++; 287 288 if (blob->frozen_refcnt == 1) { 289 spdk_for_each_channel(blob->bs, _spdk_blob_io_sync, ctx, _spdk_blob_io_cpl); 290 } else { 291 cb_fn(cb_arg, 0); 292 free(ctx); 293 } 294 } 295 296 static void 297 _spdk_blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 298 { 299 struct freeze_io_ctx *ctx; 300 301 ctx = calloc(1, sizeof(*ctx)); 302 if (!ctx) { 303 cb_fn(cb_arg, -ENOMEM); 304 return; 305 } 306 307 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 308 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 309 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 310 ctx->blob = blob; 311 312 assert(blob->frozen_refcnt > 0); 313 314 blob->frozen_refcnt--; 315 316 if (blob->frozen_refcnt == 0) { 317 spdk_for_each_channel(blob->bs, _spdk_blob_execute_queued_io, ctx, _spdk_blob_io_cpl); 318 } else { 319 cb_fn(cb_arg, 0); 320 free(ctx); 321 } 322 } 323 324 static int 325 _spdk_blob_mark_clean(struct spdk_blob *blob) 326 { 327 uint64_t *clusters = NULL; 328 uint32_t *pages = NULL; 329 330 assert(blob != NULL); 331 332 if (blob->active.num_clusters) { 333 assert(blob->active.clusters); 334 clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters)); 335 if (!clusters) { 336 return -ENOMEM; 337 } 338 memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*clusters)); 339 } 340 341 if (blob->active.num_pages) { 342 assert(blob->active.pages); 343 pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages)); 344 if (!pages) { 345 free(clusters); 346 return -ENOMEM; 347 } 348 memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*pages)); 349 } 350 351 free(blob->clean.clusters); 352 free(blob->clean.pages); 353 354 blob->clean.num_clusters = blob->active.num_clusters; 355 blob->clean.clusters = blob->active.clusters; 356 blob->clean.num_pages = blob->active.num_pages; 357 blob->clean.pages = blob->active.pages; 358 359 blob->active.clusters = clusters; 360 blob->active.pages = pages; 361 362 /* If the metadata was dirtied again while the metadata was being written to disk, 363 * we do not want to revert the DIRTY state back to CLEAN here. 364 */ 365 if (blob->state == SPDK_BLOB_STATE_LOADING) { 366 blob->state = SPDK_BLOB_STATE_CLEAN; 367 } 368 369 return 0; 370 } 371 372 static int 373 _spdk_blob_deserialize_xattr(struct spdk_blob *blob, 374 struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal) 375 { 376 struct spdk_xattr *xattr; 377 378 if (desc_xattr->length != sizeof(desc_xattr->name_length) + 379 sizeof(desc_xattr->value_length) + 380 desc_xattr->name_length + desc_xattr->value_length) { 381 return -EINVAL; 382 } 383 384 xattr = calloc(1, sizeof(*xattr)); 385 if (xattr == NULL) { 386 return -ENOMEM; 387 } 388 389 xattr->name = malloc(desc_xattr->name_length + 1); 390 if (xattr->name == NULL) { 391 free(xattr); 392 return -ENOMEM; 393 } 394 memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length); 395 xattr->name[desc_xattr->name_length] = '\0'; 396 397 xattr->value = malloc(desc_xattr->value_length); 398 if (xattr->value == NULL) { 399 free(xattr->name); 400 free(xattr); 401 return -ENOMEM; 402 } 403 xattr->value_len = desc_xattr->value_length; 404 memcpy(xattr->value, 405 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 406 desc_xattr->value_length); 407 408 TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link); 409 410 return 0; 411 } 412 413 414 static int 415 _spdk_blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob) 416 { 417 struct spdk_blob_md_descriptor *desc; 418 size_t cur_desc = 0; 419 void *tmp; 420 421 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 422 while (cur_desc < sizeof(page->descriptors)) { 423 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 424 if (desc->length == 0) { 425 /* If padding and length are 0, this terminates the page */ 426 break; 427 } 428 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 429 struct spdk_blob_md_descriptor_flags *desc_flags; 430 431 desc_flags = (struct spdk_blob_md_descriptor_flags *)desc; 432 433 if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) { 434 return -EINVAL; 435 } 436 437 if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) != 438 SPDK_BLOB_INVALID_FLAGS_MASK) { 439 return -EINVAL; 440 } 441 442 if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) != 443 SPDK_BLOB_DATA_RO_FLAGS_MASK) { 444 blob->data_ro = true; 445 blob->md_ro = true; 446 } 447 448 if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) != 449 SPDK_BLOB_MD_RO_FLAGS_MASK) { 450 blob->md_ro = true; 451 } 452 453 if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 454 blob->data_ro = true; 455 blob->md_ro = true; 456 } 457 458 blob->invalid_flags = desc_flags->invalid_flags; 459 blob->data_ro_flags = desc_flags->data_ro_flags; 460 blob->md_ro_flags = desc_flags->md_ro_flags; 461 462 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) { 463 struct spdk_blob_md_descriptor_extent *desc_extent; 464 unsigned int i, j; 465 unsigned int cluster_count = blob->active.num_clusters; 466 467 desc_extent = (struct spdk_blob_md_descriptor_extent *)desc; 468 469 if (desc_extent->length == 0 || 470 (desc_extent->length % sizeof(desc_extent->extents[0]) != 0)) { 471 return -EINVAL; 472 } 473 474 for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) { 475 for (j = 0; j < desc_extent->extents[i].length; j++) { 476 if (!spdk_bit_array_get(blob->bs->used_clusters, 477 desc_extent->extents[i].cluster_idx + j)) { 478 return -EINVAL; 479 } 480 cluster_count++; 481 } 482 } 483 484 if (cluster_count == 0) { 485 return -EINVAL; 486 } 487 tmp = realloc(blob->active.clusters, cluster_count * sizeof(uint64_t)); 488 if (tmp == NULL) { 489 return -ENOMEM; 490 } 491 blob->active.clusters = tmp; 492 blob->active.cluster_array_size = cluster_count; 493 494 for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) { 495 for (j = 0; j < desc_extent->extents[i].length; j++) { 496 if (desc_extent->extents[i].cluster_idx != 0) { 497 blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs, 498 desc_extent->extents[i].cluster_idx + j); 499 } else if (spdk_blob_is_thin_provisioned(blob)) { 500 blob->active.clusters[blob->active.num_clusters++] = 0; 501 } else { 502 return -EINVAL; 503 } 504 } 505 } 506 507 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 508 int rc; 509 510 rc = _spdk_blob_deserialize_xattr(blob, 511 (struct spdk_blob_md_descriptor_xattr *) desc, false); 512 if (rc != 0) { 513 return rc; 514 } 515 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 516 int rc; 517 518 rc = _spdk_blob_deserialize_xattr(blob, 519 (struct spdk_blob_md_descriptor_xattr *) desc, true); 520 if (rc != 0) { 521 return rc; 522 } 523 } else { 524 /* Unrecognized descriptor type. Do not fail - just continue to the 525 * next descriptor. If this descriptor is associated with some feature 526 * defined in a newer version of blobstore, that version of blobstore 527 * should create and set an associated feature flag to specify if this 528 * blob can be loaded or not. 529 */ 530 } 531 532 /* Advance to the next descriptor */ 533 cur_desc += sizeof(*desc) + desc->length; 534 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 535 break; 536 } 537 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 538 } 539 540 return 0; 541 } 542 543 static int 544 _spdk_blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count, 545 struct spdk_blob *blob) 546 { 547 const struct spdk_blob_md_page *page; 548 uint32_t i; 549 int rc; 550 551 assert(page_count > 0); 552 assert(pages[0].sequence_num == 0); 553 assert(blob != NULL); 554 assert(blob->state == SPDK_BLOB_STATE_LOADING); 555 assert(blob->active.clusters == NULL); 556 557 /* The blobid provided doesn't match what's in the MD, this can 558 * happen for example if a bogus blobid is passed in through open. 559 */ 560 if (blob->id != pages[0].id) { 561 SPDK_ERRLOG("Blobid (%lu) doesn't match what's in metadata (%lu)\n", 562 blob->id, pages[0].id); 563 return -ENOENT; 564 } 565 566 for (i = 0; i < page_count; i++) { 567 page = &pages[i]; 568 569 assert(page->id == blob->id); 570 assert(page->sequence_num == i); 571 572 rc = _spdk_blob_parse_page(page, blob); 573 if (rc != 0) { 574 return rc; 575 } 576 } 577 578 return 0; 579 } 580 581 static int 582 _spdk_blob_serialize_add_page(const struct spdk_blob *blob, 583 struct spdk_blob_md_page **pages, 584 uint32_t *page_count, 585 struct spdk_blob_md_page **last_page) 586 { 587 struct spdk_blob_md_page *page; 588 589 assert(pages != NULL); 590 assert(page_count != NULL); 591 592 if (*page_count == 0) { 593 assert(*pages == NULL); 594 *page_count = 1; 595 *pages = spdk_dma_malloc(SPDK_BS_PAGE_SIZE, 596 SPDK_BS_PAGE_SIZE, 597 NULL); 598 } else { 599 assert(*pages != NULL); 600 (*page_count)++; 601 *pages = spdk_dma_realloc(*pages, 602 SPDK_BS_PAGE_SIZE * (*page_count), 603 SPDK_BS_PAGE_SIZE, 604 NULL); 605 } 606 607 if (*pages == NULL) { 608 *page_count = 0; 609 *last_page = NULL; 610 return -ENOMEM; 611 } 612 613 page = &(*pages)[*page_count - 1]; 614 memset(page, 0, sizeof(*page)); 615 page->id = blob->id; 616 page->sequence_num = *page_count - 1; 617 page->next = SPDK_INVALID_MD_PAGE; 618 *last_page = page; 619 620 return 0; 621 } 622 623 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor. 624 * Update required_sz on both success and failure. 625 * 626 */ 627 static int 628 _spdk_blob_serialize_xattr(const struct spdk_xattr *xattr, 629 uint8_t *buf, size_t buf_sz, 630 size_t *required_sz, bool internal) 631 { 632 struct spdk_blob_md_descriptor_xattr *desc; 633 634 *required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) + 635 strlen(xattr->name) + 636 xattr->value_len; 637 638 if (buf_sz < *required_sz) { 639 return -1; 640 } 641 642 desc = (struct spdk_blob_md_descriptor_xattr *)buf; 643 644 desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR; 645 desc->length = sizeof(desc->name_length) + 646 sizeof(desc->value_length) + 647 strlen(xattr->name) + 648 xattr->value_len; 649 desc->name_length = strlen(xattr->name); 650 desc->value_length = xattr->value_len; 651 652 memcpy(desc->name, xattr->name, desc->name_length); 653 memcpy((void *)((uintptr_t)desc->name + desc->name_length), 654 xattr->value, 655 desc->value_length); 656 657 return 0; 658 } 659 660 static void 661 _spdk_blob_serialize_extent(const struct spdk_blob *blob, 662 uint64_t start_cluster, uint64_t *next_cluster, 663 uint8_t *buf, size_t buf_sz) 664 { 665 struct spdk_blob_md_descriptor_extent *desc; 666 size_t cur_sz; 667 uint64_t i, extent_idx; 668 uint64_t lba, lba_per_cluster, lba_count; 669 670 /* The buffer must have room for at least one extent */ 671 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->extents[0]); 672 if (buf_sz < cur_sz) { 673 *next_cluster = start_cluster; 674 return; 675 } 676 677 desc = (struct spdk_blob_md_descriptor_extent *)buf; 678 desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT; 679 680 lba_per_cluster = _spdk_bs_cluster_to_lba(blob->bs, 1); 681 682 lba = blob->active.clusters[start_cluster]; 683 lba_count = lba_per_cluster; 684 extent_idx = 0; 685 for (i = start_cluster + 1; i < blob->active.num_clusters; i++) { 686 if ((lba + lba_count) == blob->active.clusters[i]) { 687 lba_count += lba_per_cluster; 688 continue; 689 } 690 desc->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 691 desc->extents[extent_idx].length = lba_count / lba_per_cluster; 692 extent_idx++; 693 694 cur_sz += sizeof(desc->extents[extent_idx]); 695 696 if (buf_sz < cur_sz) { 697 /* If we ran out of buffer space, return */ 698 desc->length = sizeof(desc->extents[0]) * extent_idx; 699 *next_cluster = i; 700 return; 701 } 702 703 lba = blob->active.clusters[i]; 704 lba_count = lba_per_cluster; 705 } 706 707 desc->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 708 desc->extents[extent_idx].length = lba_count / lba_per_cluster; 709 extent_idx++; 710 711 desc->length = sizeof(desc->extents[0]) * extent_idx; 712 *next_cluster = blob->active.num_clusters; 713 714 return; 715 } 716 717 static void 718 _spdk_blob_serialize_flags(const struct spdk_blob *blob, 719 uint8_t *buf, size_t *buf_sz) 720 { 721 struct spdk_blob_md_descriptor_flags *desc; 722 723 /* 724 * Flags get serialized first, so we should always have room for the flags 725 * descriptor. 726 */ 727 assert(*buf_sz >= sizeof(*desc)); 728 729 desc = (struct spdk_blob_md_descriptor_flags *)buf; 730 desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS; 731 desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor); 732 desc->invalid_flags = blob->invalid_flags; 733 desc->data_ro_flags = blob->data_ro_flags; 734 desc->md_ro_flags = blob->md_ro_flags; 735 736 *buf_sz -= sizeof(*desc); 737 } 738 739 static int 740 _spdk_blob_serialize_xattrs(const struct spdk_blob *blob, 741 const struct spdk_xattr_tailq *xattrs, bool internal, 742 struct spdk_blob_md_page **pages, 743 struct spdk_blob_md_page *cur_page, 744 uint32_t *page_count, uint8_t **buf, 745 size_t *remaining_sz) 746 { 747 const struct spdk_xattr *xattr; 748 int rc; 749 750 TAILQ_FOREACH(xattr, xattrs, link) { 751 size_t required_sz = 0; 752 753 rc = _spdk_blob_serialize_xattr(xattr, 754 *buf, *remaining_sz, 755 &required_sz, internal); 756 if (rc < 0) { 757 /* Need to add a new page to the chain */ 758 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, 759 &cur_page); 760 if (rc < 0) { 761 spdk_dma_free(*pages); 762 *pages = NULL; 763 *page_count = 0; 764 return rc; 765 } 766 767 *buf = (uint8_t *)cur_page->descriptors; 768 *remaining_sz = sizeof(cur_page->descriptors); 769 770 /* Try again */ 771 required_sz = 0; 772 rc = _spdk_blob_serialize_xattr(xattr, 773 *buf, *remaining_sz, 774 &required_sz, internal); 775 776 if (rc < 0) { 777 spdk_dma_free(*pages); 778 *pages = NULL; 779 *page_count = 0; 780 return rc; 781 } 782 } 783 784 *remaining_sz -= required_sz; 785 *buf += required_sz; 786 } 787 788 return 0; 789 } 790 791 static int 792 _spdk_blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages, 793 uint32_t *page_count) 794 { 795 struct spdk_blob_md_page *cur_page; 796 int rc; 797 uint8_t *buf; 798 size_t remaining_sz; 799 uint64_t last_cluster; 800 801 assert(pages != NULL); 802 assert(page_count != NULL); 803 assert(blob != NULL); 804 assert(blob->state == SPDK_BLOB_STATE_DIRTY); 805 806 *pages = NULL; 807 *page_count = 0; 808 809 /* A blob always has at least 1 page, even if it has no descriptors */ 810 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page); 811 if (rc < 0) { 812 return rc; 813 } 814 815 buf = (uint8_t *)cur_page->descriptors; 816 remaining_sz = sizeof(cur_page->descriptors); 817 818 /* Serialize flags */ 819 _spdk_blob_serialize_flags(blob, buf, &remaining_sz); 820 buf += sizeof(struct spdk_blob_md_descriptor_flags); 821 822 /* Serialize xattrs */ 823 rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs, false, 824 pages, cur_page, page_count, &buf, &remaining_sz); 825 if (rc < 0) { 826 return rc; 827 } 828 829 /* Serialize internal xattrs */ 830 rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs_internal, true, 831 pages, cur_page, page_count, &buf, &remaining_sz); 832 if (rc < 0) { 833 return rc; 834 } 835 836 /* Serialize extents */ 837 last_cluster = 0; 838 while (last_cluster < blob->active.num_clusters) { 839 _spdk_blob_serialize_extent(blob, last_cluster, &last_cluster, 840 buf, remaining_sz); 841 842 if (last_cluster == blob->active.num_clusters) { 843 break; 844 } 845 846 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, 847 &cur_page); 848 if (rc < 0) { 849 return rc; 850 } 851 852 buf = (uint8_t *)cur_page->descriptors; 853 remaining_sz = sizeof(cur_page->descriptors); 854 } 855 856 return 0; 857 } 858 859 struct spdk_blob_load_ctx { 860 struct spdk_blob *blob; 861 862 struct spdk_blob_md_page *pages; 863 uint32_t num_pages; 864 spdk_bs_sequence_t *seq; 865 866 spdk_bs_sequence_cpl cb_fn; 867 void *cb_arg; 868 }; 869 870 static uint32_t 871 _spdk_blob_md_page_calc_crc(void *page) 872 { 873 uint32_t crc; 874 875 crc = BLOB_CRC32C_INITIAL; 876 crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc); 877 crc ^= BLOB_CRC32C_INITIAL; 878 879 return crc; 880 881 } 882 883 static void 884 _spdk_blob_load_final(void *cb_arg, int bserrno) 885 { 886 struct spdk_blob_load_ctx *ctx = cb_arg; 887 struct spdk_blob *blob = ctx->blob; 888 889 _spdk_blob_mark_clean(blob); 890 891 ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno); 892 893 /* Free the memory */ 894 spdk_dma_free(ctx->pages); 895 free(ctx); 896 } 897 898 static void 899 _spdk_blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 900 { 901 struct spdk_blob_load_ctx *ctx = cb_arg; 902 struct spdk_blob *blob = ctx->blob; 903 904 if (bserrno != 0) { 905 goto error; 906 } 907 908 blob->back_bs_dev = spdk_bs_create_blob_bs_dev(snapshot); 909 910 if (blob->back_bs_dev == NULL) { 911 bserrno = -ENOMEM; 912 goto error; 913 } 914 915 _spdk_blob_load_final(ctx, bserrno); 916 return; 917 918 error: 919 SPDK_ERRLOG("Snapshot fail\n"); 920 _spdk_blob_free(blob); 921 ctx->cb_fn(ctx->seq, NULL, bserrno); 922 spdk_dma_free(ctx->pages); 923 free(ctx); 924 } 925 926 static void 927 _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 928 { 929 struct spdk_blob_load_ctx *ctx = cb_arg; 930 struct spdk_blob *blob = ctx->blob; 931 struct spdk_blob_md_page *page; 932 const void *value; 933 size_t len; 934 int rc; 935 uint32_t crc; 936 937 page = &ctx->pages[ctx->num_pages - 1]; 938 crc = _spdk_blob_md_page_calc_crc(page); 939 if (crc != page->crc) { 940 SPDK_ERRLOG("Metadata page %d crc mismatch\n", ctx->num_pages); 941 _spdk_blob_free(blob); 942 ctx->cb_fn(seq, NULL, -EINVAL); 943 spdk_dma_free(ctx->pages); 944 free(ctx); 945 return; 946 } 947 948 if (page->next != SPDK_INVALID_MD_PAGE) { 949 uint32_t next_page = page->next; 950 uint64_t next_lba = _spdk_bs_page_to_lba(blob->bs, blob->bs->md_start + next_page); 951 952 953 assert(next_lba < (blob->bs->md_start + blob->bs->md_len)); 954 955 /* Read the next page */ 956 ctx->num_pages++; 957 ctx->pages = spdk_dma_realloc(ctx->pages, (sizeof(*page) * ctx->num_pages), 958 sizeof(*page), NULL); 959 if (ctx->pages == NULL) { 960 ctx->cb_fn(seq, ctx->cb_arg, -ENOMEM); 961 free(ctx); 962 return; 963 } 964 965 spdk_bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1], 966 next_lba, 967 _spdk_bs_byte_to_lba(blob->bs, sizeof(*page)), 968 _spdk_blob_load_cpl, ctx); 969 return; 970 } 971 972 /* Parse the pages */ 973 rc = _spdk_blob_parse(ctx->pages, ctx->num_pages, blob); 974 if (rc) { 975 _spdk_blob_free(blob); 976 ctx->cb_fn(seq, NULL, rc); 977 spdk_dma_free(ctx->pages); 978 free(ctx); 979 return; 980 } 981 ctx->seq = seq; 982 983 984 if (spdk_blob_is_thin_provisioned(blob)) { 985 rc = _spdk_blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true); 986 if (rc == 0) { 987 if (len != sizeof(spdk_blob_id)) { 988 _spdk_blob_free(blob); 989 ctx->cb_fn(seq, NULL, -EINVAL); 990 spdk_dma_free(ctx->pages); 991 free(ctx); 992 return; 993 } 994 /* open snapshot blob and continue in the callback function */ 995 blob->parent_id = *(spdk_blob_id *)value; 996 spdk_bs_open_blob(blob->bs, blob->parent_id, 997 _spdk_blob_load_snapshot_cpl, ctx); 998 return; 999 } else { 1000 /* add zeroes_dev for thin provisioned blob */ 1001 blob->back_bs_dev = spdk_bs_create_zeroes_dev(); 1002 } 1003 } else { 1004 /* standard blob */ 1005 blob->back_bs_dev = NULL; 1006 } 1007 _spdk_blob_load_final(ctx, bserrno); 1008 } 1009 1010 /* Load a blob from disk given a blobid */ 1011 static void 1012 _spdk_blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1013 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1014 { 1015 struct spdk_blob_load_ctx *ctx; 1016 struct spdk_blob_store *bs; 1017 uint32_t page_num; 1018 uint64_t lba; 1019 1020 _spdk_blob_verify_md_op(blob); 1021 1022 bs = blob->bs; 1023 1024 ctx = calloc(1, sizeof(*ctx)); 1025 if (!ctx) { 1026 cb_fn(seq, cb_arg, -ENOMEM); 1027 return; 1028 } 1029 1030 ctx->blob = blob; 1031 ctx->pages = spdk_dma_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, 1032 SPDK_BS_PAGE_SIZE, NULL); 1033 if (!ctx->pages) { 1034 free(ctx); 1035 cb_fn(seq, cb_arg, -ENOMEM); 1036 return; 1037 } 1038 ctx->num_pages = 1; 1039 ctx->cb_fn = cb_fn; 1040 ctx->cb_arg = cb_arg; 1041 1042 page_num = _spdk_bs_blobid_to_page(blob->id); 1043 lba = _spdk_bs_page_to_lba(blob->bs, bs->md_start + page_num); 1044 1045 blob->state = SPDK_BLOB_STATE_LOADING; 1046 1047 spdk_bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1048 _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE), 1049 _spdk_blob_load_cpl, ctx); 1050 } 1051 1052 struct spdk_blob_persist_ctx { 1053 struct spdk_blob *blob; 1054 1055 struct spdk_bs_super_block *super; 1056 1057 struct spdk_blob_md_page *pages; 1058 1059 uint64_t idx; 1060 1061 spdk_bs_sequence_t *seq; 1062 spdk_bs_sequence_cpl cb_fn; 1063 void *cb_arg; 1064 }; 1065 1066 static void 1067 _spdk_blob_persist_complete(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1068 { 1069 struct spdk_blob_persist_ctx *ctx = cb_arg; 1070 struct spdk_blob *blob = ctx->blob; 1071 1072 if (bserrno == 0) { 1073 _spdk_blob_mark_clean(blob); 1074 } 1075 1076 /* Call user callback */ 1077 ctx->cb_fn(seq, ctx->cb_arg, bserrno); 1078 1079 /* Free the memory */ 1080 spdk_dma_free(ctx->pages); 1081 free(ctx); 1082 } 1083 1084 static void 1085 _spdk_blob_persist_unmap_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1086 { 1087 struct spdk_blob_persist_ctx *ctx = cb_arg; 1088 struct spdk_blob *blob = ctx->blob; 1089 struct spdk_blob_store *bs = blob->bs; 1090 void *tmp; 1091 size_t i; 1092 1093 /* Release all clusters that were truncated */ 1094 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1095 uint32_t cluster_num = _spdk_bs_lba_to_cluster(bs, blob->active.clusters[i]); 1096 1097 /* Nothing to release if it was not allocated */ 1098 if (blob->active.clusters[i] != 0) { 1099 _spdk_bs_release_cluster(bs, cluster_num); 1100 } 1101 } 1102 1103 if (blob->active.num_clusters == 0) { 1104 free(blob->active.clusters); 1105 blob->active.clusters = NULL; 1106 blob->active.cluster_array_size = 0; 1107 } else { 1108 tmp = realloc(blob->active.clusters, sizeof(uint64_t) * blob->active.num_clusters); 1109 assert(tmp != NULL); 1110 blob->active.clusters = tmp; 1111 blob->active.cluster_array_size = blob->active.num_clusters; 1112 } 1113 1114 _spdk_blob_persist_complete(seq, ctx, bserrno); 1115 } 1116 1117 static void 1118 _spdk_blob_persist_unmap_clusters(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1119 { 1120 struct spdk_blob_persist_ctx *ctx = cb_arg; 1121 struct spdk_blob *blob = ctx->blob; 1122 struct spdk_blob_store *bs = blob->bs; 1123 spdk_bs_batch_t *batch; 1124 size_t i; 1125 uint64_t lba; 1126 uint32_t lba_count; 1127 1128 /* Clusters don't move around in blobs. The list shrinks or grows 1129 * at the end, but no changes ever occur in the middle of the list. 1130 */ 1131 1132 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_unmap_clusters_cpl, ctx); 1133 1134 /* Unmap all clusters that were truncated */ 1135 lba = 0; 1136 lba_count = 0; 1137 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1138 uint64_t next_lba = blob->active.clusters[i]; 1139 uint32_t next_lba_count = _spdk_bs_cluster_to_lba(bs, 1); 1140 1141 if (next_lba > 0 && (lba + lba_count) == next_lba) { 1142 /* This cluster is contiguous with the previous one. */ 1143 lba_count += next_lba_count; 1144 continue; 1145 } 1146 1147 /* This cluster is not contiguous with the previous one. */ 1148 1149 /* If a run of LBAs previously existing, send them 1150 * as an unmap. 1151 */ 1152 if (lba_count > 0) { 1153 spdk_bs_batch_unmap_dev(batch, lba, lba_count); 1154 } 1155 1156 /* Start building the next batch */ 1157 lba = next_lba; 1158 if (next_lba > 0) { 1159 lba_count = next_lba_count; 1160 } else { 1161 lba_count = 0; 1162 } 1163 } 1164 1165 /* If we ended with a contiguous set of LBAs, send the unmap now */ 1166 if (lba_count > 0) { 1167 spdk_bs_batch_unmap_dev(batch, lba, lba_count); 1168 } 1169 1170 spdk_bs_batch_close(batch); 1171 } 1172 1173 static void 1174 _spdk_blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1175 { 1176 struct spdk_blob_persist_ctx *ctx = cb_arg; 1177 struct spdk_blob *blob = ctx->blob; 1178 struct spdk_blob_store *bs = blob->bs; 1179 size_t i; 1180 1181 /* This loop starts at 1 because the first page is special and handled 1182 * below. The pages (except the first) are never written in place, 1183 * so any pages in the clean list must be zeroed. 1184 */ 1185 for (i = 1; i < blob->clean.num_pages; i++) { 1186 spdk_bit_array_clear(bs->used_md_pages, blob->clean.pages[i]); 1187 } 1188 1189 if (blob->active.num_pages == 0) { 1190 uint32_t page_num; 1191 1192 page_num = _spdk_bs_blobid_to_page(blob->id); 1193 spdk_bit_array_clear(bs->used_md_pages, page_num); 1194 } 1195 1196 /* Move on to unmapping clusters */ 1197 _spdk_blob_persist_unmap_clusters(seq, ctx, 0); 1198 } 1199 1200 static void 1201 _spdk_blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1202 { 1203 struct spdk_blob_persist_ctx *ctx = cb_arg; 1204 struct spdk_blob *blob = ctx->blob; 1205 struct spdk_blob_store *bs = blob->bs; 1206 uint64_t lba; 1207 uint32_t lba_count; 1208 spdk_bs_batch_t *batch; 1209 size_t i; 1210 1211 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_zero_pages_cpl, ctx); 1212 1213 lba_count = _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 1214 1215 /* This loop starts at 1 because the first page is special and handled 1216 * below. The pages (except the first) are never written in place, 1217 * so any pages in the clean list must be zeroed. 1218 */ 1219 for (i = 1; i < blob->clean.num_pages; i++) { 1220 lba = _spdk_bs_page_to_lba(bs, bs->md_start + blob->clean.pages[i]); 1221 1222 spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count); 1223 } 1224 1225 /* The first page will only be zeroed if this is a delete. */ 1226 if (blob->active.num_pages == 0) { 1227 uint32_t page_num; 1228 1229 /* The first page in the metadata goes where the blobid indicates */ 1230 page_num = _spdk_bs_blobid_to_page(blob->id); 1231 lba = _spdk_bs_page_to_lba(bs, bs->md_start + page_num); 1232 1233 spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count); 1234 } 1235 1236 spdk_bs_batch_close(batch); 1237 } 1238 1239 static void 1240 _spdk_blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1241 { 1242 struct spdk_blob_persist_ctx *ctx = cb_arg; 1243 struct spdk_blob *blob = ctx->blob; 1244 struct spdk_blob_store *bs = blob->bs; 1245 uint64_t lba; 1246 uint32_t lba_count; 1247 struct spdk_blob_md_page *page; 1248 1249 if (blob->active.num_pages == 0) { 1250 /* Move on to the next step */ 1251 _spdk_blob_persist_zero_pages(seq, ctx, 0); 1252 return; 1253 } 1254 1255 lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page)); 1256 1257 page = &ctx->pages[0]; 1258 /* The first page in the metadata goes where the blobid indicates */ 1259 lba = _spdk_bs_page_to_lba(bs, bs->md_start + _spdk_bs_blobid_to_page(blob->id)); 1260 1261 spdk_bs_sequence_write_dev(seq, page, lba, lba_count, 1262 _spdk_blob_persist_zero_pages, ctx); 1263 } 1264 1265 static void 1266 _spdk_blob_persist_write_page_chain(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1267 { 1268 struct spdk_blob_persist_ctx *ctx = cb_arg; 1269 struct spdk_blob *blob = ctx->blob; 1270 struct spdk_blob_store *bs = blob->bs; 1271 uint64_t lba; 1272 uint32_t lba_count; 1273 struct spdk_blob_md_page *page; 1274 spdk_bs_batch_t *batch; 1275 size_t i; 1276 1277 /* Clusters don't move around in blobs. The list shrinks or grows 1278 * at the end, but no changes ever occur in the middle of the list. 1279 */ 1280 1281 lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page)); 1282 1283 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_write_page_root, ctx); 1284 1285 /* This starts at 1. The root page is not written until 1286 * all of the others are finished 1287 */ 1288 for (i = 1; i < blob->active.num_pages; i++) { 1289 page = &ctx->pages[i]; 1290 assert(page->sequence_num == i); 1291 1292 lba = _spdk_bs_page_to_lba(bs, bs->md_start + blob->active.pages[i]); 1293 1294 spdk_bs_batch_write_dev(batch, page, lba, lba_count); 1295 } 1296 1297 spdk_bs_batch_close(batch); 1298 } 1299 1300 static int 1301 _spdk_blob_resize(struct spdk_blob *blob, uint64_t sz) 1302 { 1303 uint64_t i; 1304 uint64_t *tmp; 1305 uint64_t lfc; /* lowest free cluster */ 1306 uint64_t num_clusters; 1307 struct spdk_blob_store *bs; 1308 1309 bs = blob->bs; 1310 1311 _spdk_blob_verify_md_op(blob); 1312 1313 if (blob->active.num_clusters == sz) { 1314 return 0; 1315 } 1316 1317 if (blob->active.num_clusters < blob->active.cluster_array_size) { 1318 /* If this blob was resized to be larger, then smaller, then 1319 * larger without syncing, then the cluster array already 1320 * contains spare assigned clusters we can use. 1321 */ 1322 num_clusters = spdk_min(blob->active.cluster_array_size, 1323 sz); 1324 } else { 1325 num_clusters = blob->active.num_clusters; 1326 } 1327 1328 /* Do two passes - one to verify that we can obtain enough clusters 1329 * and another to actually claim them. 1330 */ 1331 1332 if (spdk_blob_is_thin_provisioned(blob) == false) { 1333 lfc = 0; 1334 for (i = num_clusters; i < sz; i++) { 1335 lfc = spdk_bit_array_find_first_clear(bs->used_clusters, lfc); 1336 if (lfc >= bs->total_clusters) { 1337 /* No more free clusters. Cannot satisfy the request */ 1338 return -ENOSPC; 1339 } 1340 lfc++; 1341 } 1342 } 1343 1344 if (sz > num_clusters) { 1345 /* Expand the cluster array if necessary. 1346 * We only shrink the array when persisting. 1347 */ 1348 tmp = realloc(blob->active.clusters, sizeof(uint64_t) * sz); 1349 if (sz > 0 && tmp == NULL) { 1350 return -ENOMEM; 1351 } 1352 memset(tmp + blob->active.cluster_array_size, 0, 1353 sizeof(uint64_t) * (sz - blob->active.cluster_array_size)); 1354 blob->active.clusters = tmp; 1355 blob->active.cluster_array_size = sz; 1356 } 1357 1358 blob->state = SPDK_BLOB_STATE_DIRTY; 1359 1360 if (spdk_blob_is_thin_provisioned(blob) == false) { 1361 lfc = 0; 1362 for (i = num_clusters; i < sz; i++) { 1363 _spdk_bs_allocate_cluster(blob, i, &lfc, true); 1364 lfc++; 1365 } 1366 } 1367 1368 blob->active.num_clusters = sz; 1369 1370 return 0; 1371 } 1372 1373 static void 1374 _spdk_blob_persist_start(struct spdk_blob_persist_ctx *ctx) 1375 { 1376 spdk_bs_sequence_t *seq = ctx->seq; 1377 struct spdk_blob *blob = ctx->blob; 1378 struct spdk_blob_store *bs = blob->bs; 1379 uint64_t i; 1380 uint32_t page_num; 1381 void *tmp; 1382 int rc; 1383 1384 if (blob->active.num_pages == 0) { 1385 /* This is the signal that the blob should be deleted. 1386 * Immediately jump to the clean up routine. */ 1387 assert(blob->clean.num_pages > 0); 1388 ctx->idx = blob->clean.num_pages - 1; 1389 blob->state = SPDK_BLOB_STATE_CLEAN; 1390 _spdk_blob_persist_zero_pages(seq, ctx, 0); 1391 return; 1392 1393 } 1394 1395 /* Generate the new metadata */ 1396 rc = _spdk_blob_serialize(blob, &ctx->pages, &blob->active.num_pages); 1397 if (rc < 0) { 1398 _spdk_blob_persist_complete(seq, ctx, rc); 1399 return; 1400 } 1401 1402 assert(blob->active.num_pages >= 1); 1403 1404 /* Resize the cache of page indices */ 1405 tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 1406 if (!tmp) { 1407 _spdk_blob_persist_complete(seq, ctx, -ENOMEM); 1408 return; 1409 } 1410 blob->active.pages = tmp; 1411 1412 /* Assign this metadata to pages. This requires two passes - 1413 * one to verify that there are enough pages and a second 1414 * to actually claim them. */ 1415 page_num = 0; 1416 /* Note that this loop starts at one. The first page location is fixed by the blobid. */ 1417 for (i = 1; i < blob->active.num_pages; i++) { 1418 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 1419 if (page_num >= spdk_bit_array_capacity(bs->used_md_pages)) { 1420 _spdk_blob_persist_complete(seq, ctx, -ENOMEM); 1421 return; 1422 } 1423 page_num++; 1424 } 1425 1426 page_num = 0; 1427 blob->active.pages[0] = _spdk_bs_blobid_to_page(blob->id); 1428 for (i = 1; i < blob->active.num_pages; i++) { 1429 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 1430 ctx->pages[i - 1].next = page_num; 1431 /* Now that previous metadata page is complete, calculate the crc for it. */ 1432 ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]); 1433 blob->active.pages[i] = page_num; 1434 spdk_bit_array_set(bs->used_md_pages, page_num); 1435 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming page %u for blob %lu\n", page_num, blob->id); 1436 page_num++; 1437 } 1438 ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]); 1439 /* Start writing the metadata from last page to first */ 1440 ctx->idx = blob->active.num_pages - 1; 1441 blob->state = SPDK_BLOB_STATE_CLEAN; 1442 _spdk_blob_persist_write_page_chain(seq, ctx, 0); 1443 } 1444 1445 static void 1446 _spdk_blob_persist_dirty_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1447 { 1448 struct spdk_blob_persist_ctx *ctx = cb_arg; 1449 1450 ctx->blob->bs->clean = 0; 1451 1452 spdk_dma_free(ctx->super); 1453 1454 _spdk_blob_persist_start(ctx); 1455 } 1456 1457 static void 1458 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 1459 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg); 1460 1461 1462 static void 1463 _spdk_blob_persist_dirty(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1464 { 1465 struct spdk_blob_persist_ctx *ctx = cb_arg; 1466 1467 ctx->super->clean = 0; 1468 1469 _spdk_bs_write_super(seq, ctx->blob->bs, ctx->super, _spdk_blob_persist_dirty_cpl, ctx); 1470 } 1471 1472 1473 /* Write a blob to disk */ 1474 static void 1475 _spdk_blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1476 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1477 { 1478 struct spdk_blob_persist_ctx *ctx; 1479 1480 _spdk_blob_verify_md_op(blob); 1481 1482 if (blob->state == SPDK_BLOB_STATE_CLEAN) { 1483 cb_fn(seq, cb_arg, 0); 1484 return; 1485 } 1486 1487 ctx = calloc(1, sizeof(*ctx)); 1488 if (!ctx) { 1489 cb_fn(seq, cb_arg, -ENOMEM); 1490 return; 1491 } 1492 ctx->blob = blob; 1493 ctx->seq = seq; 1494 ctx->cb_fn = cb_fn; 1495 ctx->cb_arg = cb_arg; 1496 1497 if (blob->bs->clean) { 1498 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 1499 if (!ctx->super) { 1500 cb_fn(seq, cb_arg, -ENOMEM); 1501 free(ctx); 1502 return; 1503 } 1504 1505 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(blob->bs, 0), 1506 _spdk_bs_byte_to_lba(blob->bs, sizeof(*ctx->super)), 1507 _spdk_blob_persist_dirty, ctx); 1508 } else { 1509 _spdk_blob_persist_start(ctx); 1510 } 1511 } 1512 1513 struct spdk_blob_copy_cluster_ctx { 1514 struct spdk_blob *blob; 1515 uint8_t *buf; 1516 uint64_t page; 1517 uint64_t new_cluster; 1518 spdk_bs_sequence_t *seq; 1519 }; 1520 1521 static void 1522 _spdk_blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno) 1523 { 1524 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 1525 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq; 1526 TAILQ_HEAD(, spdk_bs_request_set) requests; 1527 spdk_bs_user_op_t *op; 1528 1529 TAILQ_INIT(&requests); 1530 TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link); 1531 1532 while (!TAILQ_EMPTY(&requests)) { 1533 op = TAILQ_FIRST(&requests); 1534 TAILQ_REMOVE(&requests, op, link); 1535 if (bserrno == 0) { 1536 spdk_bs_user_op_execute(op); 1537 } else { 1538 spdk_bs_user_op_abort(op); 1539 } 1540 } 1541 1542 spdk_dma_free(ctx->buf); 1543 free(ctx); 1544 } 1545 1546 static void 1547 _spdk_blob_insert_cluster_cpl(void *cb_arg, int bserrno) 1548 { 1549 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 1550 1551 if (bserrno) { 1552 uint32_t cluster_number; 1553 1554 if (bserrno == -EEXIST) { 1555 /* The metadata insert failed because another thread 1556 * allocated the cluster first. Free our cluster 1557 * but continue without error. */ 1558 bserrno = 0; 1559 } 1560 1561 cluster_number = _spdk_bs_page_to_cluster(ctx->blob->bs, ctx->page); 1562 _spdk_bs_release_cluster(ctx->blob->bs, cluster_number); 1563 } 1564 1565 spdk_bs_sequence_finish(ctx->seq, bserrno); 1566 } 1567 1568 static void 1569 _spdk_blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1570 { 1571 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 1572 uint32_t cluster_number; 1573 1574 if (bserrno) { 1575 /* The write failed, so jump to the final completion handler */ 1576 spdk_bs_sequence_finish(seq, bserrno); 1577 return; 1578 } 1579 1580 cluster_number = _spdk_bs_page_to_cluster(ctx->blob->bs, ctx->page); 1581 1582 _spdk_blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 1583 _spdk_blob_insert_cluster_cpl, ctx); 1584 } 1585 1586 static void 1587 _spdk_blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1588 { 1589 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 1590 1591 if (bserrno != 0) { 1592 /* The read failed, so jump to the final completion handler */ 1593 spdk_bs_sequence_finish(seq, bserrno); 1594 return; 1595 } 1596 1597 /* Write whole cluster */ 1598 spdk_bs_sequence_write_dev(seq, ctx->buf, 1599 _spdk_bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 1600 _spdk_bs_cluster_to_lba(ctx->blob->bs, 1), 1601 _spdk_blob_write_copy_cpl, ctx); 1602 } 1603 1604 static void 1605 _spdk_bs_allocate_and_copy_cluster(struct spdk_blob *blob, 1606 struct spdk_io_channel *_ch, 1607 uint64_t offset, spdk_bs_user_op_t *op) 1608 { 1609 struct spdk_bs_cpl cpl; 1610 struct spdk_bs_channel *ch; 1611 struct spdk_blob_copy_cluster_ctx *ctx; 1612 uint32_t cluster_start_page; 1613 uint32_t cluster_number; 1614 int rc; 1615 1616 ch = spdk_io_channel_get_ctx(_ch); 1617 1618 if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) { 1619 /* There are already operations pending. Queue this user op 1620 * and return because it will be re-executed when the outstanding 1621 * cluster allocation completes. */ 1622 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 1623 return; 1624 } 1625 1626 /* Round the page offset down to the first page in the cluster */ 1627 cluster_start_page = _spdk_bs_page_to_cluster_start(blob, offset); 1628 1629 /* Calculate which index in the metadata cluster array the corresponding 1630 * cluster is supposed to be at. */ 1631 cluster_number = _spdk_bs_page_to_cluster(blob->bs, cluster_start_page); 1632 1633 ctx = calloc(1, sizeof(*ctx)); 1634 if (!ctx) { 1635 spdk_bs_user_op_abort(op); 1636 return; 1637 } 1638 1639 assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0); 1640 1641 ctx->blob = blob; 1642 ctx->page = cluster_start_page; 1643 1644 ctx->buf = spdk_dma_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, NULL); 1645 if (!ctx->buf) { 1646 SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n", 1647 blob->bs->cluster_sz); 1648 free(ctx); 1649 spdk_bs_user_op_abort(op); 1650 return; 1651 } 1652 1653 rc = _spdk_bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, false); 1654 if (rc != 0) { 1655 spdk_dma_free(ctx->buf); 1656 free(ctx); 1657 spdk_bs_user_op_abort(op); 1658 return; 1659 } 1660 1661 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 1662 cpl.u.blob_basic.cb_fn = _spdk_blob_allocate_and_copy_cluster_cpl; 1663 cpl.u.blob_basic.cb_arg = ctx; 1664 1665 ctx->seq = spdk_bs_sequence_start(_ch, &cpl); 1666 if (!ctx->seq) { 1667 _spdk_bs_release_cluster(blob->bs, ctx->new_cluster); 1668 spdk_dma_free(ctx->buf); 1669 free(ctx); 1670 spdk_bs_user_op_abort(op); 1671 return; 1672 } 1673 1674 /* Queue the user op to block other incoming operations */ 1675 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 1676 1677 /* Read cluster from backing device */ 1678 spdk_bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf, 1679 _spdk_bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 1680 _spdk_bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz), 1681 _spdk_blob_write_copy, ctx); 1682 } 1683 1684 static void 1685 _spdk_blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t page, uint64_t length, 1686 uint64_t *lba, uint32_t *lba_count) 1687 { 1688 *lba_count = _spdk_bs_page_to_lba(blob->bs, length); 1689 1690 if (!_spdk_bs_page_is_allocated(blob, page)) { 1691 assert(blob->back_bs_dev != NULL); 1692 *lba = _spdk_bs_dev_page_to_lba(blob->back_bs_dev, page); 1693 *lba_count = _spdk_bs_blob_lba_to_back_dev_lba(blob, *lba_count); 1694 } else { 1695 *lba = _spdk_bs_blob_page_to_lba(blob, page); 1696 } 1697 } 1698 1699 struct op_split_ctx { 1700 struct spdk_blob *blob; 1701 struct spdk_io_channel *channel; 1702 uint64_t page_offset; 1703 uint64_t pages_remaining; 1704 void *curr_payload; 1705 enum spdk_blob_op_type op_type; 1706 spdk_bs_sequence_t *seq; 1707 }; 1708 1709 static void 1710 _spdk_blob_request_submit_op_split_next(void *cb_arg, int bserrno) 1711 { 1712 struct op_split_ctx *ctx = cb_arg; 1713 struct spdk_blob *blob = ctx->blob; 1714 struct spdk_io_channel *ch = ctx->channel; 1715 enum spdk_blob_op_type op_type = ctx->op_type; 1716 uint8_t *buf = ctx->curr_payload; 1717 uint64_t offset = ctx->page_offset; 1718 uint64_t length = ctx->pages_remaining; 1719 uint64_t op_length; 1720 1721 if (bserrno != 0 || ctx->pages_remaining == 0) { 1722 spdk_bs_sequence_finish(ctx->seq, bserrno); 1723 free(ctx); 1724 return; 1725 } 1726 1727 op_length = spdk_min(length, _spdk_bs_num_pages_to_cluster_boundary(blob, offset)); 1728 1729 /* Update length and payload for next operation */ 1730 ctx->pages_remaining -= op_length; 1731 ctx->page_offset += op_length; 1732 if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) { 1733 ctx->curr_payload += (op_length * SPDK_BS_PAGE_SIZE); 1734 } 1735 1736 switch (op_type) { 1737 case SPDK_BLOB_READ: 1738 spdk_blob_io_read(blob, ch, buf, offset, op_length, 1739 _spdk_blob_request_submit_op_split_next, ctx); 1740 break; 1741 case SPDK_BLOB_WRITE: 1742 spdk_blob_io_write(blob, ch, buf, offset, op_length, 1743 _spdk_blob_request_submit_op_split_next, ctx); 1744 break; 1745 case SPDK_BLOB_UNMAP: 1746 spdk_blob_io_unmap(blob, ch, offset, op_length, 1747 _spdk_blob_request_submit_op_split_next, ctx); 1748 break; 1749 case SPDK_BLOB_WRITE_ZEROES: 1750 spdk_blob_io_write_zeroes(blob, ch, offset, op_length, 1751 _spdk_blob_request_submit_op_split_next, ctx); 1752 break; 1753 case SPDK_BLOB_READV: 1754 case SPDK_BLOB_WRITEV: 1755 SPDK_ERRLOG("readv/write not valid for %s\n", __func__); 1756 spdk_bs_sequence_finish(ctx->seq, -EINVAL); 1757 free(ctx); 1758 break; 1759 } 1760 } 1761 1762 static void 1763 _spdk_blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob, 1764 void *payload, uint64_t offset, uint64_t length, 1765 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 1766 { 1767 struct op_split_ctx *ctx; 1768 spdk_bs_sequence_t *seq; 1769 struct spdk_bs_cpl cpl; 1770 1771 assert(blob != NULL); 1772 1773 ctx = calloc(1, sizeof(struct op_split_ctx)); 1774 if (ctx == NULL) { 1775 cb_fn(cb_arg, -ENOMEM); 1776 return; 1777 } 1778 1779 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 1780 cpl.u.blob_basic.cb_fn = cb_fn; 1781 cpl.u.blob_basic.cb_arg = cb_arg; 1782 1783 seq = spdk_bs_sequence_start(ch, &cpl); 1784 if (!seq) { 1785 free(ctx); 1786 cb_fn(cb_arg, -ENOMEM); 1787 return; 1788 } 1789 1790 ctx->blob = blob; 1791 ctx->channel = ch; 1792 ctx->curr_payload = payload; 1793 ctx->page_offset = offset; 1794 ctx->pages_remaining = length; 1795 ctx->op_type = op_type; 1796 ctx->seq = seq; 1797 1798 _spdk_blob_request_submit_op_split_next(ctx, 0); 1799 } 1800 1801 static void 1802 _spdk_blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob, 1803 void *payload, uint64_t offset, uint64_t length, 1804 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 1805 { 1806 struct spdk_bs_cpl cpl; 1807 uint64_t lba; 1808 uint32_t lba_count; 1809 1810 assert(blob != NULL); 1811 1812 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 1813 cpl.u.blob_basic.cb_fn = cb_fn; 1814 cpl.u.blob_basic.cb_arg = cb_arg; 1815 1816 _spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 1817 1818 if (blob->frozen_refcnt) { 1819 /* This blob I/O is frozen */ 1820 spdk_bs_user_op_t *op; 1821 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 1822 1823 op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 1824 if (!op) { 1825 cb_fn(cb_arg, -ENOMEM); 1826 return; 1827 } 1828 1829 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 1830 1831 return; 1832 } 1833 1834 switch (op_type) { 1835 case SPDK_BLOB_READ: { 1836 spdk_bs_batch_t *batch; 1837 1838 batch = spdk_bs_batch_open(_ch, &cpl); 1839 if (!batch) { 1840 cb_fn(cb_arg, -ENOMEM); 1841 return; 1842 } 1843 1844 if (_spdk_bs_page_is_allocated(blob, offset)) { 1845 /* Read from the blob */ 1846 spdk_bs_batch_read_dev(batch, payload, lba, lba_count); 1847 } else { 1848 /* Read from the backing block device */ 1849 spdk_bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count); 1850 } 1851 1852 spdk_bs_batch_close(batch); 1853 break; 1854 } 1855 case SPDK_BLOB_WRITE: 1856 case SPDK_BLOB_WRITE_ZEROES: { 1857 if (_spdk_bs_page_is_allocated(blob, offset)) { 1858 /* Write to the blob */ 1859 spdk_bs_batch_t *batch; 1860 1861 if (lba_count == 0) { 1862 cb_fn(cb_arg, 0); 1863 return; 1864 } 1865 1866 batch = spdk_bs_batch_open(_ch, &cpl); 1867 if (!batch) { 1868 cb_fn(cb_arg, -ENOMEM); 1869 return; 1870 } 1871 1872 if (op_type == SPDK_BLOB_WRITE) { 1873 spdk_bs_batch_write_dev(batch, payload, lba, lba_count); 1874 } else { 1875 spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count); 1876 } 1877 1878 spdk_bs_batch_close(batch); 1879 } else { 1880 /* Queue this operation and allocate the cluster */ 1881 spdk_bs_user_op_t *op; 1882 1883 op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 1884 if (!op) { 1885 cb_fn(cb_arg, -ENOMEM); 1886 return; 1887 } 1888 1889 _spdk_bs_allocate_and_copy_cluster(blob, _ch, offset, op); 1890 } 1891 break; 1892 } 1893 case SPDK_BLOB_UNMAP: { 1894 spdk_bs_batch_t *batch; 1895 1896 batch = spdk_bs_batch_open(_ch, &cpl); 1897 if (!batch) { 1898 cb_fn(cb_arg, -ENOMEM); 1899 return; 1900 } 1901 1902 if (_spdk_bs_page_is_allocated(blob, offset)) { 1903 spdk_bs_batch_unmap_dev(batch, lba, lba_count); 1904 } 1905 1906 spdk_bs_batch_close(batch); 1907 break; 1908 } 1909 case SPDK_BLOB_READV: 1910 case SPDK_BLOB_WRITEV: 1911 SPDK_ERRLOG("readv/write not valid\n"); 1912 cb_fn(cb_arg, -EINVAL); 1913 break; 1914 } 1915 } 1916 1917 static void 1918 _spdk_blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel, 1919 void *payload, uint64_t offset, uint64_t length, 1920 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 1921 { 1922 assert(blob != NULL); 1923 1924 if (blob->data_ro && op_type != SPDK_BLOB_READ) { 1925 cb_fn(cb_arg, -EPERM); 1926 return; 1927 } 1928 1929 if (offset + length > blob->active.num_clusters * blob->bs->pages_per_cluster) { 1930 cb_fn(cb_arg, -EINVAL); 1931 return; 1932 } 1933 1934 if (length <= _spdk_bs_num_pages_to_cluster_boundary(blob, offset)) { 1935 _spdk_blob_request_submit_op_single(_channel, blob, payload, offset, length, 1936 cb_fn, cb_arg, op_type); 1937 } else { 1938 _spdk_blob_request_submit_op_split(_channel, blob, payload, offset, length, 1939 cb_fn, cb_arg, op_type); 1940 } 1941 } 1942 1943 struct rw_iov_ctx { 1944 struct spdk_blob *blob; 1945 struct spdk_io_channel *channel; 1946 spdk_blob_op_complete cb_fn; 1947 void *cb_arg; 1948 bool read; 1949 int iovcnt; 1950 struct iovec *orig_iov; 1951 uint64_t page_offset; 1952 uint64_t pages_remaining; 1953 uint64_t pages_done; 1954 struct iovec iov[0]; 1955 }; 1956 1957 static void 1958 _spdk_rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1959 { 1960 assert(cb_arg == NULL); 1961 spdk_bs_sequence_finish(seq, bserrno); 1962 } 1963 1964 static void 1965 _spdk_rw_iov_split_next(void *cb_arg, int bserrno) 1966 { 1967 struct rw_iov_ctx *ctx = cb_arg; 1968 struct spdk_blob *blob = ctx->blob; 1969 struct iovec *iov, *orig_iov; 1970 int iovcnt; 1971 size_t orig_iovoff; 1972 uint64_t page_count, pages_to_boundary, page_offset; 1973 uint64_t byte_count; 1974 1975 if (bserrno != 0 || ctx->pages_remaining == 0) { 1976 ctx->cb_fn(ctx->cb_arg, bserrno); 1977 free(ctx); 1978 return; 1979 } 1980 1981 page_offset = ctx->page_offset; 1982 pages_to_boundary = _spdk_bs_num_pages_to_cluster_boundary(blob, page_offset); 1983 page_count = spdk_min(ctx->pages_remaining, pages_to_boundary); 1984 1985 /* 1986 * Get index and offset into the original iov array for our current position in the I/O sequence. 1987 * byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will 1988 * point to the current position in the I/O sequence. 1989 */ 1990 byte_count = ctx->pages_done * sizeof(struct spdk_blob_md_page); 1991 orig_iov = &ctx->orig_iov[0]; 1992 orig_iovoff = 0; 1993 while (byte_count > 0) { 1994 if (byte_count >= orig_iov->iov_len) { 1995 byte_count -= orig_iov->iov_len; 1996 orig_iov++; 1997 } else { 1998 orig_iovoff = byte_count; 1999 byte_count = 0; 2000 } 2001 } 2002 2003 /* 2004 * Build an iov array for the next I/O in the sequence. byte_count will keep track of how many 2005 * bytes of this next I/O remain to be accounted for in the new iov array. 2006 */ 2007 byte_count = page_count * sizeof(struct spdk_blob_md_page); 2008 iov = &ctx->iov[0]; 2009 iovcnt = 0; 2010 while (byte_count > 0) { 2011 iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff); 2012 iov->iov_base = orig_iov->iov_base + orig_iovoff; 2013 byte_count -= iov->iov_len; 2014 orig_iovoff = 0; 2015 orig_iov++; 2016 iov++; 2017 iovcnt++; 2018 } 2019 2020 ctx->page_offset += page_count; 2021 ctx->pages_done += page_count; 2022 ctx->pages_remaining -= page_count; 2023 iov = &ctx->iov[0]; 2024 2025 if (ctx->read) { 2026 spdk_blob_io_readv(ctx->blob, ctx->channel, iov, iovcnt, page_offset, 2027 page_count, _spdk_rw_iov_split_next, ctx); 2028 } else { 2029 spdk_blob_io_writev(ctx->blob, ctx->channel, iov, iovcnt, page_offset, 2030 page_count, _spdk_rw_iov_split_next, ctx); 2031 } 2032 } 2033 2034 static void 2035 _spdk_blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel, 2036 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 2037 spdk_blob_op_complete cb_fn, void *cb_arg, bool read) 2038 { 2039 struct spdk_bs_cpl cpl; 2040 2041 assert(blob != NULL); 2042 2043 if (!read && blob->data_ro) { 2044 cb_fn(cb_arg, -EPERM); 2045 return; 2046 } 2047 2048 if (length == 0) { 2049 cb_fn(cb_arg, 0); 2050 return; 2051 } 2052 2053 if (offset + length > blob->active.num_clusters * blob->bs->pages_per_cluster) { 2054 cb_fn(cb_arg, -EINVAL); 2055 return; 2056 } 2057 2058 /* 2059 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having 2060 * to split a request that spans a cluster boundary. For I/O that do not span a cluster boundary, 2061 * there will be no noticeable difference compared to using a batch. For I/O that do span a cluster 2062 * boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need 2063 * to allocate a separate iov array and split the I/O such that none of the resulting 2064 * smaller I/O cross a cluster boundary. These smaller I/O will be issued in sequence (not in parallel) 2065 * but since this case happens very infrequently, any performance impact will be negligible. 2066 * 2067 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs 2068 * for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them 2069 * in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called 2070 * when the batch was completed, to allow for freeing the memory for the iov arrays. 2071 */ 2072 if (spdk_likely(length <= _spdk_bs_num_pages_to_cluster_boundary(blob, offset))) { 2073 uint32_t lba_count; 2074 uint64_t lba; 2075 2076 _spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 2077 2078 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2079 cpl.u.blob_basic.cb_fn = cb_fn; 2080 cpl.u.blob_basic.cb_arg = cb_arg; 2081 if (blob->frozen_refcnt) { 2082 /* This blob I/O is frozen */ 2083 spdk_bs_user_op_t *op; 2084 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel); 2085 2086 op = spdk_bs_user_op_alloc(_channel, &cpl, read, blob, iov, iovcnt, offset, length); 2087 if (!op) { 2088 cb_fn(cb_arg, -ENOMEM); 2089 return; 2090 } 2091 2092 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 2093 2094 return; 2095 } 2096 2097 if (read) { 2098 spdk_bs_sequence_t *seq; 2099 2100 seq = spdk_bs_sequence_start(_channel, &cpl); 2101 if (!seq) { 2102 cb_fn(cb_arg, -ENOMEM); 2103 return; 2104 } 2105 2106 if (_spdk_bs_page_is_allocated(blob, offset)) { 2107 spdk_bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL); 2108 } else { 2109 spdk_bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count, 2110 _spdk_rw_iov_done, NULL); 2111 } 2112 } else { 2113 if (_spdk_bs_page_is_allocated(blob, offset)) { 2114 spdk_bs_sequence_t *seq; 2115 2116 seq = spdk_bs_sequence_start(_channel, &cpl); 2117 if (!seq) { 2118 cb_fn(cb_arg, -ENOMEM); 2119 return; 2120 } 2121 2122 spdk_bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL); 2123 } else { 2124 /* Queue this operation and allocate the cluster */ 2125 spdk_bs_user_op_t *op; 2126 2127 op = spdk_bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, length); 2128 if (!op) { 2129 cb_fn(cb_arg, -ENOMEM); 2130 return; 2131 } 2132 2133 _spdk_bs_allocate_and_copy_cluster(blob, _channel, offset, op); 2134 } 2135 } 2136 } else { 2137 struct rw_iov_ctx *ctx; 2138 2139 ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec)); 2140 if (ctx == NULL) { 2141 cb_fn(cb_arg, -ENOMEM); 2142 return; 2143 } 2144 2145 ctx->blob = blob; 2146 ctx->channel = _channel; 2147 ctx->cb_fn = cb_fn; 2148 ctx->cb_arg = cb_arg; 2149 ctx->read = read; 2150 ctx->orig_iov = iov; 2151 ctx->iovcnt = iovcnt; 2152 ctx->page_offset = offset; 2153 ctx->pages_remaining = length; 2154 ctx->pages_done = 0; 2155 2156 _spdk_rw_iov_split_next(ctx, 0); 2157 } 2158 } 2159 2160 static struct spdk_blob * 2161 _spdk_blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid) 2162 { 2163 struct spdk_blob *blob; 2164 2165 TAILQ_FOREACH(blob, &bs->blobs, link) { 2166 if (blob->id == blobid) { 2167 return blob; 2168 } 2169 } 2170 2171 return NULL; 2172 } 2173 2174 static int 2175 _spdk_bs_channel_create(void *io_device, void *ctx_buf) 2176 { 2177 struct spdk_blob_store *bs = io_device; 2178 struct spdk_bs_channel *channel = ctx_buf; 2179 struct spdk_bs_dev *dev; 2180 uint32_t max_ops = bs->max_channel_ops; 2181 uint32_t i; 2182 2183 dev = bs->dev; 2184 2185 channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set)); 2186 if (!channel->req_mem) { 2187 return -1; 2188 } 2189 2190 TAILQ_INIT(&channel->reqs); 2191 2192 for (i = 0; i < max_ops; i++) { 2193 TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link); 2194 } 2195 2196 channel->bs = bs; 2197 channel->dev = dev; 2198 channel->dev_channel = dev->create_channel(dev); 2199 2200 if (!channel->dev_channel) { 2201 SPDK_ERRLOG("Failed to create device channel.\n"); 2202 free(channel->req_mem); 2203 return -1; 2204 } 2205 2206 TAILQ_INIT(&channel->need_cluster_alloc); 2207 TAILQ_INIT(&channel->queued_io); 2208 2209 return 0; 2210 } 2211 2212 static void 2213 _spdk_bs_channel_destroy(void *io_device, void *ctx_buf) 2214 { 2215 struct spdk_bs_channel *channel = ctx_buf; 2216 spdk_bs_user_op_t *op; 2217 2218 while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) { 2219 op = TAILQ_FIRST(&channel->need_cluster_alloc); 2220 TAILQ_REMOVE(&channel->need_cluster_alloc, op, link); 2221 spdk_bs_user_op_abort(op); 2222 } 2223 2224 while (!TAILQ_EMPTY(&channel->queued_io)) { 2225 op = TAILQ_FIRST(&channel->queued_io); 2226 TAILQ_REMOVE(&channel->queued_io, op, link); 2227 spdk_bs_user_op_abort(op); 2228 } 2229 2230 free(channel->req_mem); 2231 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 2232 } 2233 2234 static void 2235 _spdk_bs_dev_destroy(void *io_device) 2236 { 2237 struct spdk_blob_store *bs = io_device; 2238 struct spdk_blob *blob, *blob_tmp; 2239 2240 bs->dev->destroy(bs->dev); 2241 2242 TAILQ_FOREACH_SAFE(blob, &bs->blobs, link, blob_tmp) { 2243 TAILQ_REMOVE(&bs->blobs, blob, link); 2244 _spdk_blob_free(blob); 2245 } 2246 2247 pthread_mutex_destroy(&bs->used_clusters_mutex); 2248 2249 spdk_bit_array_free(&bs->used_blobids); 2250 spdk_bit_array_free(&bs->used_md_pages); 2251 spdk_bit_array_free(&bs->used_clusters); 2252 /* 2253 * If this function is called for any reason except a successful unload, 2254 * the unload_cpl type will be NONE and this will be a nop. 2255 */ 2256 spdk_bs_call_cpl(&bs->unload_cpl, bs->unload_err); 2257 2258 free(bs); 2259 } 2260 2261 static int 2262 _spdk_bs_blob_list_add(struct spdk_blob *blob) 2263 { 2264 spdk_blob_id snapshot_id; 2265 struct spdk_blob_list *snapshot_entry = NULL; 2266 struct spdk_blob_list *clone_entry = NULL; 2267 2268 assert(blob != NULL); 2269 2270 snapshot_id = blob->parent_id; 2271 if (snapshot_id == SPDK_BLOBID_INVALID) { 2272 return 0; 2273 } 2274 2275 TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) { 2276 if (snapshot_entry->id == snapshot_id) { 2277 break; 2278 } 2279 } 2280 2281 if (snapshot_entry == NULL) { 2282 /* Snapshot not found */ 2283 snapshot_entry = calloc(1, sizeof(struct spdk_blob_list)); 2284 if (snapshot_entry == NULL) { 2285 return -ENOMEM; 2286 } 2287 snapshot_entry->id = snapshot_id; 2288 TAILQ_INIT(&snapshot_entry->clones); 2289 TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link); 2290 } else { 2291 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 2292 if (clone_entry->id == blob->id) { 2293 break; 2294 } 2295 } 2296 } 2297 2298 if (clone_entry == NULL) { 2299 /* Clone not found */ 2300 clone_entry = calloc(1, sizeof(struct spdk_blob_list)); 2301 if (clone_entry == NULL) { 2302 return -ENOMEM; 2303 } 2304 clone_entry->id = blob->id; 2305 TAILQ_INIT(&clone_entry->clones); 2306 TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link); 2307 snapshot_entry->clone_count++; 2308 } 2309 2310 return 0; 2311 } 2312 2313 static int 2314 _spdk_bs_blob_list_remove(struct spdk_blob *blob) 2315 { 2316 struct spdk_blob_list *snapshot_entry = NULL; 2317 struct spdk_blob_list *clone_entry = NULL; 2318 spdk_blob_id snapshot_id; 2319 2320 assert(blob != NULL); 2321 2322 snapshot_id = blob->parent_id; 2323 if (snapshot_id == SPDK_BLOBID_INVALID) { 2324 return 0; 2325 } 2326 2327 TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) { 2328 if (snapshot_entry->id == snapshot_id) { 2329 break; 2330 } 2331 } 2332 2333 assert(snapshot_entry != NULL); 2334 2335 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 2336 if (clone_entry->id == blob->id) { 2337 break; 2338 } 2339 } 2340 2341 assert(clone_entry != NULL); 2342 2343 blob->parent_id = SPDK_BLOBID_INVALID; 2344 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 2345 free(clone_entry); 2346 2347 snapshot_entry->clone_count--; 2348 if (snapshot_entry->clone_count == 0) { 2349 /* Snapshot have no more clones */ 2350 TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link); 2351 free(snapshot_entry); 2352 } 2353 2354 return 0; 2355 } 2356 2357 static int 2358 _spdk_bs_blob_list_free(struct spdk_blob_store *bs) 2359 { 2360 struct spdk_blob_list *snapshot_entry; 2361 struct spdk_blob_list *snapshot_entry_tmp; 2362 struct spdk_blob_list *clone_entry; 2363 struct spdk_blob_list *clone_entry_tmp; 2364 2365 TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) { 2366 TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) { 2367 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 2368 free(clone_entry); 2369 } 2370 TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link); 2371 free(snapshot_entry); 2372 } 2373 2374 return 0; 2375 } 2376 2377 static void 2378 _spdk_bs_free(struct spdk_blob_store *bs) 2379 { 2380 _spdk_bs_blob_list_free(bs); 2381 2382 spdk_bs_unregister_md_thread(bs); 2383 spdk_io_device_unregister(bs, _spdk_bs_dev_destroy); 2384 } 2385 2386 void 2387 spdk_bs_opts_init(struct spdk_bs_opts *opts) 2388 { 2389 opts->cluster_sz = SPDK_BLOB_OPTS_CLUSTER_SZ; 2390 opts->num_md_pages = SPDK_BLOB_OPTS_NUM_MD_PAGES; 2391 opts->max_md_ops = SPDK_BLOB_OPTS_MAX_MD_OPS; 2392 opts->max_channel_ops = SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS; 2393 memset(&opts->bstype, 0, sizeof(opts->bstype)); 2394 opts->iter_cb_fn = NULL; 2395 opts->iter_cb_arg = NULL; 2396 } 2397 2398 static int 2399 _spdk_bs_opts_verify(struct spdk_bs_opts *opts) 2400 { 2401 if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 || 2402 opts->max_channel_ops == 0) { 2403 SPDK_ERRLOG("Blobstore options cannot be set to 0\n"); 2404 return -1; 2405 } 2406 2407 return 0; 2408 } 2409 2410 static struct spdk_blob_store * 2411 _spdk_bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts) 2412 { 2413 struct spdk_blob_store *bs; 2414 uint64_t dev_size; 2415 int rc; 2416 2417 dev_size = dev->blocklen * dev->blockcnt; 2418 if (dev_size < opts->cluster_sz) { 2419 /* Device size cannot be smaller than cluster size of blobstore */ 2420 SPDK_ERRLOG("Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n", 2421 dev_size, opts->cluster_sz); 2422 return NULL; 2423 } 2424 if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) { 2425 /* Cluster size cannot be smaller than page size */ 2426 SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n", 2427 opts->cluster_sz, SPDK_BS_PAGE_SIZE); 2428 return NULL; 2429 } 2430 bs = calloc(1, sizeof(struct spdk_blob_store)); 2431 if (!bs) { 2432 return NULL; 2433 } 2434 2435 TAILQ_INIT(&bs->blobs); 2436 TAILQ_INIT(&bs->snapshots); 2437 bs->dev = dev; 2438 bs->md_thread = spdk_get_thread(); 2439 assert(bs->md_thread != NULL); 2440 2441 /* 2442 * Do not use _spdk_bs_lba_to_cluster() here since blockcnt may not be an 2443 * even multiple of the cluster size. 2444 */ 2445 bs->cluster_sz = opts->cluster_sz; 2446 bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen); 2447 bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE; 2448 bs->num_free_clusters = bs->total_clusters; 2449 bs->used_clusters = spdk_bit_array_create(bs->total_clusters); 2450 if (bs->used_clusters == NULL) { 2451 free(bs); 2452 return NULL; 2453 } 2454 2455 bs->max_channel_ops = opts->max_channel_ops; 2456 bs->super_blob = SPDK_BLOBID_INVALID; 2457 memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype)); 2458 2459 /* The metadata is assumed to be at least 1 page */ 2460 bs->used_md_pages = spdk_bit_array_create(1); 2461 bs->used_blobids = spdk_bit_array_create(0); 2462 2463 pthread_mutex_init(&bs->used_clusters_mutex, NULL); 2464 2465 spdk_io_device_register(bs, _spdk_bs_channel_create, _spdk_bs_channel_destroy, 2466 sizeof(struct spdk_bs_channel)); 2467 rc = spdk_bs_register_md_thread(bs); 2468 if (rc == -1) { 2469 spdk_io_device_unregister(bs, NULL); 2470 pthread_mutex_destroy(&bs->used_clusters_mutex); 2471 spdk_bit_array_free(&bs->used_blobids); 2472 spdk_bit_array_free(&bs->used_md_pages); 2473 spdk_bit_array_free(&bs->used_clusters); 2474 free(bs); 2475 return NULL; 2476 } 2477 2478 return bs; 2479 } 2480 2481 /* START spdk_bs_load, spdk_bs_load_ctx will used for both load and unload. */ 2482 2483 struct spdk_bs_load_ctx { 2484 struct spdk_blob_store *bs; 2485 struct spdk_bs_super_block *super; 2486 2487 struct spdk_bs_md_mask *mask; 2488 bool in_page_chain; 2489 uint32_t page_index; 2490 uint32_t cur_page; 2491 struct spdk_blob_md_page *page; 2492 bool is_load; 2493 2494 spdk_bs_sequence_t *seq; 2495 spdk_blob_op_with_handle_complete iter_cb_fn; 2496 void *iter_cb_arg; 2497 }; 2498 2499 static void 2500 _spdk_bs_load_ctx_fail(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 2501 { 2502 assert(bserrno != 0); 2503 2504 spdk_dma_free(ctx->super); 2505 spdk_bs_sequence_finish(seq, bserrno); 2506 /* 2507 * Only free the blobstore when a load fails. If an unload fails (for some reason) 2508 * we want to keep the blobstore in case the caller wants to try again. 2509 */ 2510 if (ctx->is_load) { 2511 _spdk_bs_free(ctx->bs); 2512 } 2513 free(ctx); 2514 } 2515 2516 static void 2517 _spdk_bs_set_mask(struct spdk_bit_array *array, struct spdk_bs_md_mask *mask) 2518 { 2519 uint32_t i = 0; 2520 2521 while (true) { 2522 i = spdk_bit_array_find_first_set(array, i); 2523 if (i >= mask->length) { 2524 break; 2525 } 2526 mask->mask[i / 8] |= 1U << (i % 8); 2527 i++; 2528 } 2529 } 2530 2531 static int 2532 _spdk_bs_load_mask(struct spdk_bit_array **array_ptr, struct spdk_bs_md_mask *mask) 2533 { 2534 struct spdk_bit_array *array; 2535 uint32_t i; 2536 2537 if (spdk_bit_array_resize(array_ptr, mask->length) < 0) { 2538 return -ENOMEM; 2539 } 2540 2541 array = *array_ptr; 2542 for (i = 0; i < mask->length; i++) { 2543 if (mask->mask[i / 8] & (1U << (i % 8))) { 2544 spdk_bit_array_set(array, i); 2545 } 2546 } 2547 2548 return 0; 2549 } 2550 2551 static void 2552 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2553 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2554 { 2555 /* Update the values in the super block */ 2556 super->super_blob = bs->super_blob; 2557 memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype)); 2558 super->crc = _spdk_blob_md_page_calc_crc(super); 2559 spdk_bs_sequence_write_dev(seq, super, _spdk_bs_page_to_lba(bs, 0), 2560 _spdk_bs_byte_to_lba(bs, sizeof(*super)), 2561 cb_fn, cb_arg); 2562 } 2563 2564 static void 2565 _spdk_bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 2566 { 2567 struct spdk_bs_load_ctx *ctx = arg; 2568 uint64_t mask_size, lba, lba_count; 2569 2570 /* Write out the used clusters mask */ 2571 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 2572 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2573 if (!ctx->mask) { 2574 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2575 return; 2576 } 2577 2578 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS; 2579 ctx->mask->length = ctx->bs->total_clusters; 2580 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_clusters)); 2581 2582 _spdk_bs_set_mask(ctx->bs->used_clusters, ctx->mask); 2583 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 2584 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 2585 spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 2586 } 2587 2588 static void 2589 _spdk_bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 2590 { 2591 struct spdk_bs_load_ctx *ctx = arg; 2592 uint64_t mask_size, lba, lba_count; 2593 2594 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 2595 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2596 if (!ctx->mask) { 2597 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2598 return; 2599 } 2600 2601 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES; 2602 ctx->mask->length = ctx->super->md_len; 2603 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages)); 2604 2605 _spdk_bs_set_mask(ctx->bs->used_md_pages, ctx->mask); 2606 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 2607 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 2608 spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 2609 } 2610 2611 static void 2612 _spdk_bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 2613 { 2614 struct spdk_bs_load_ctx *ctx = arg; 2615 uint64_t mask_size, lba, lba_count; 2616 2617 if (ctx->super->used_blobid_mask_len == 0) { 2618 /* 2619 * This is a pre-v3 on-disk format where the blobid mask does not get 2620 * written to disk. 2621 */ 2622 cb_fn(seq, arg, 0); 2623 return; 2624 } 2625 2626 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 2627 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2628 if (!ctx->mask) { 2629 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2630 return; 2631 } 2632 2633 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS; 2634 ctx->mask->length = ctx->super->md_len; 2635 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids)); 2636 2637 _spdk_bs_set_mask(ctx->bs->used_blobids, ctx->mask); 2638 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 2639 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 2640 spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 2641 } 2642 2643 static void 2644 _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno) 2645 { 2646 struct spdk_bs_load_ctx *ctx = arg; 2647 2648 if (bserrno == 0) { 2649 if (ctx->iter_cb_fn) { 2650 ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0); 2651 } 2652 _spdk_bs_blob_list_add(blob); 2653 spdk_bs_iter_next(ctx->bs, blob, _spdk_bs_load_iter, ctx); 2654 return; 2655 } 2656 2657 if (bserrno == -ENOENT) { 2658 bserrno = 0; 2659 } else { 2660 /* 2661 * This case needs to be looked at further. Same problem 2662 * exists with applications that rely on explicit blob 2663 * iteration. We should just skip the blob that failed 2664 * to load and coontinue on to the next one. 2665 */ 2666 SPDK_ERRLOG("Error in iterating blobs\n"); 2667 } 2668 2669 ctx->iter_cb_fn = NULL; 2670 2671 spdk_dma_free(ctx->super); 2672 spdk_dma_free(ctx->mask); 2673 spdk_bs_sequence_finish(ctx->seq, bserrno); 2674 free(ctx); 2675 } 2676 2677 static void 2678 _spdk_bs_load_complete(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 2679 { 2680 ctx->seq = seq; 2681 spdk_bs_iter_first(ctx->bs, _spdk_bs_load_iter, ctx); 2682 } 2683 2684 static void 2685 _spdk_bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2686 { 2687 struct spdk_bs_load_ctx *ctx = cb_arg; 2688 int rc; 2689 2690 /* The type must be correct */ 2691 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS); 2692 2693 /* The length of the mask (in bits) must not be greater than 2694 * the length of the buffer (converted to bits) */ 2695 assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8)); 2696 2697 /* The length of the mask must be exactly equal to the size 2698 * (in pages) of the metadata region */ 2699 assert(ctx->mask->length == ctx->super->md_len); 2700 2701 rc = _spdk_bs_load_mask(&ctx->bs->used_blobids, ctx->mask); 2702 if (rc < 0) { 2703 spdk_dma_free(ctx->mask); 2704 _spdk_bs_load_ctx_fail(seq, ctx, rc); 2705 return; 2706 } 2707 2708 _spdk_bs_load_complete(seq, ctx, bserrno); 2709 } 2710 2711 static void 2712 _spdk_bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2713 { 2714 struct spdk_bs_load_ctx *ctx = cb_arg; 2715 uint64_t lba, lba_count, mask_size; 2716 int rc; 2717 2718 /* The type must be correct */ 2719 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 2720 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 2721 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 2722 struct spdk_blob_md_page) * 8)); 2723 /* The length of the mask must be exactly equal to the total number of clusters */ 2724 assert(ctx->mask->length == ctx->bs->total_clusters); 2725 2726 rc = _spdk_bs_load_mask(&ctx->bs->used_clusters, ctx->mask); 2727 if (rc < 0) { 2728 spdk_dma_free(ctx->mask); 2729 _spdk_bs_load_ctx_fail(seq, ctx, rc); 2730 return; 2731 } 2732 2733 ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->bs->used_clusters); 2734 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 2735 2736 spdk_dma_free(ctx->mask); 2737 2738 /* Read the used blobids mask */ 2739 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 2740 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2741 if (!ctx->mask) { 2742 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2743 return; 2744 } 2745 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 2746 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 2747 spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 2748 _spdk_bs_load_used_blobids_cpl, ctx); 2749 } 2750 2751 static void 2752 _spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2753 { 2754 struct spdk_bs_load_ctx *ctx = cb_arg; 2755 uint64_t lba, lba_count, mask_size; 2756 int rc; 2757 2758 /* The type must be correct */ 2759 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES); 2760 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 2761 assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE * 2762 8)); 2763 /* The length of the mask must be exactly equal to the size (in pages) of the metadata region */ 2764 assert(ctx->mask->length == ctx->super->md_len); 2765 2766 rc = _spdk_bs_load_mask(&ctx->bs->used_md_pages, ctx->mask); 2767 if (rc < 0) { 2768 spdk_dma_free(ctx->mask); 2769 _spdk_bs_load_ctx_fail(seq, ctx, rc); 2770 return; 2771 } 2772 2773 spdk_dma_free(ctx->mask); 2774 2775 /* Read the used clusters mask */ 2776 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 2777 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2778 if (!ctx->mask) { 2779 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2780 return; 2781 } 2782 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 2783 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 2784 spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 2785 _spdk_bs_load_used_clusters_cpl, ctx); 2786 } 2787 2788 static void 2789 _spdk_bs_load_read_used_pages(spdk_bs_sequence_t *seq, void *cb_arg) 2790 { 2791 struct spdk_bs_load_ctx *ctx = cb_arg; 2792 uint64_t lba, lba_count, mask_size; 2793 2794 /* Read the used pages mask */ 2795 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 2796 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2797 if (!ctx->mask) { 2798 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2799 return; 2800 } 2801 2802 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 2803 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 2804 spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 2805 _spdk_bs_load_used_pages_cpl, ctx); 2806 } 2807 2808 static int 2809 _spdk_bs_load_replay_md_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob_store *bs) 2810 { 2811 struct spdk_blob_md_descriptor *desc; 2812 size_t cur_desc = 0; 2813 2814 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 2815 while (cur_desc < sizeof(page->descriptors)) { 2816 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 2817 if (desc->length == 0) { 2818 /* If padding and length are 0, this terminates the page */ 2819 break; 2820 } 2821 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) { 2822 struct spdk_blob_md_descriptor_extent *desc_extent; 2823 unsigned int i, j; 2824 unsigned int cluster_count = 0; 2825 uint32_t cluster_idx; 2826 2827 desc_extent = (struct spdk_blob_md_descriptor_extent *)desc; 2828 2829 for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) { 2830 for (j = 0; j < desc_extent->extents[i].length; j++) { 2831 cluster_idx = desc_extent->extents[i].cluster_idx; 2832 /* 2833 * cluster_idx = 0 means an unallocated cluster - don't mark that 2834 * in the used cluster map. 2835 */ 2836 if (cluster_idx != 0) { 2837 spdk_bit_array_set(bs->used_clusters, cluster_idx + j); 2838 if (bs->num_free_clusters == 0) { 2839 return -ENOSPC; 2840 } 2841 bs->num_free_clusters--; 2842 } 2843 cluster_count++; 2844 } 2845 } 2846 if (cluster_count == 0) { 2847 return -EINVAL; 2848 } 2849 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 2850 /* Skip this item */ 2851 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 2852 /* Skip this item */ 2853 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 2854 /* Skip this item */ 2855 } else { 2856 /* Error */ 2857 return -EINVAL; 2858 } 2859 /* Advance to the next descriptor */ 2860 cur_desc += sizeof(*desc) + desc->length; 2861 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 2862 break; 2863 } 2864 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 2865 } 2866 return 0; 2867 } 2868 2869 static bool _spdk_bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx) 2870 { 2871 uint32_t crc; 2872 2873 crc = _spdk_blob_md_page_calc_crc(ctx->page); 2874 if (crc != ctx->page->crc) { 2875 return false; 2876 } 2877 2878 if (_spdk_bs_page_to_blobid(ctx->cur_page) != ctx->page->id) { 2879 return false; 2880 } 2881 return true; 2882 } 2883 2884 static void 2885 _spdk_bs_load_replay_cur_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 2886 2887 static void 2888 _spdk_bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2889 { 2890 struct spdk_bs_load_ctx *ctx = cb_arg; 2891 2892 _spdk_bs_load_complete(seq, ctx, bserrno); 2893 } 2894 2895 static void 2896 _spdk_bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2897 { 2898 struct spdk_bs_load_ctx *ctx = cb_arg; 2899 2900 spdk_dma_free(ctx->mask); 2901 ctx->mask = NULL; 2902 2903 _spdk_bs_write_used_clusters(seq, cb_arg, _spdk_bs_load_write_used_clusters_cpl); 2904 } 2905 2906 static void 2907 _spdk_bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2908 { 2909 struct spdk_bs_load_ctx *ctx = cb_arg; 2910 2911 spdk_dma_free(ctx->mask); 2912 ctx->mask = NULL; 2913 2914 _spdk_bs_write_used_blobids(seq, cb_arg, _spdk_bs_load_write_used_blobids_cpl); 2915 } 2916 2917 static void 2918 _spdk_bs_load_write_used_md(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2919 { 2920 _spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_load_write_used_pages_cpl); 2921 } 2922 2923 static void 2924 _spdk_bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2925 { 2926 struct spdk_bs_load_ctx *ctx = cb_arg; 2927 uint64_t num_md_clusters; 2928 uint64_t i; 2929 uint32_t page_num; 2930 2931 if (bserrno != 0) { 2932 _spdk_bs_load_ctx_fail(seq, ctx, bserrno); 2933 return; 2934 } 2935 2936 page_num = ctx->cur_page; 2937 if (_spdk_bs_load_cur_md_page_valid(ctx) == true) { 2938 if (ctx->page->sequence_num == 0 || ctx->in_page_chain == true) { 2939 spdk_bit_array_set(ctx->bs->used_md_pages, page_num); 2940 if (ctx->page->sequence_num == 0) { 2941 spdk_bit_array_set(ctx->bs->used_blobids, page_num); 2942 } 2943 if (_spdk_bs_load_replay_md_parse_page(ctx->page, ctx->bs)) { 2944 _spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ); 2945 return; 2946 } 2947 if (ctx->page->next != SPDK_INVALID_MD_PAGE) { 2948 ctx->in_page_chain = true; 2949 ctx->cur_page = ctx->page->next; 2950 _spdk_bs_load_replay_cur_md_page(seq, cb_arg); 2951 return; 2952 } 2953 } 2954 } 2955 2956 ctx->in_page_chain = false; 2957 2958 do { 2959 ctx->page_index++; 2960 } while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true); 2961 2962 if (ctx->page_index < ctx->super->md_len) { 2963 ctx->cur_page = ctx->page_index; 2964 _spdk_bs_load_replay_cur_md_page(seq, cb_arg); 2965 } else { 2966 /* Claim all of the clusters used by the metadata */ 2967 num_md_clusters = divide_round_up(ctx->super->md_len, ctx->bs->pages_per_cluster); 2968 for (i = 0; i < num_md_clusters; i++) { 2969 _spdk_bs_claim_cluster(ctx->bs, i); 2970 } 2971 spdk_dma_free(ctx->page); 2972 _spdk_bs_load_write_used_md(seq, ctx, bserrno); 2973 } 2974 } 2975 2976 static void 2977 _spdk_bs_load_replay_cur_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 2978 { 2979 struct spdk_bs_load_ctx *ctx = cb_arg; 2980 uint64_t lba; 2981 2982 assert(ctx->cur_page < ctx->super->md_len); 2983 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 2984 spdk_bs_sequence_read_dev(seq, ctx->page, lba, 2985 _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 2986 _spdk_bs_load_replay_md_cpl, ctx); 2987 } 2988 2989 static void 2990 _spdk_bs_load_replay_md(spdk_bs_sequence_t *seq, void *cb_arg) 2991 { 2992 struct spdk_bs_load_ctx *ctx = cb_arg; 2993 2994 ctx->page_index = 0; 2995 ctx->cur_page = 0; 2996 ctx->page = spdk_dma_zmalloc(SPDK_BS_PAGE_SIZE, 2997 SPDK_BS_PAGE_SIZE, 2998 NULL); 2999 if (!ctx->page) { 3000 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 3001 return; 3002 } 3003 _spdk_bs_load_replay_cur_md_page(seq, cb_arg); 3004 } 3005 3006 static void 3007 _spdk_bs_recover(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3008 { 3009 struct spdk_bs_load_ctx *ctx = cb_arg; 3010 int rc; 3011 3012 if (bserrno != 0) { 3013 _spdk_bs_load_ctx_fail(seq, ctx, -EIO); 3014 return; 3015 } 3016 3017 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len); 3018 if (rc < 0) { 3019 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 3020 return; 3021 } 3022 3023 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len); 3024 if (rc < 0) { 3025 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 3026 return; 3027 } 3028 3029 rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters); 3030 if (rc < 0) { 3031 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 3032 return; 3033 } 3034 3035 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 3036 _spdk_bs_load_replay_md(seq, cb_arg); 3037 } 3038 3039 static void 3040 _spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3041 { 3042 struct spdk_bs_load_ctx *ctx = cb_arg; 3043 uint32_t crc; 3044 static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH]; 3045 3046 if (ctx->super->version > SPDK_BS_VERSION || 3047 ctx->super->version < SPDK_BS_INITIAL_VERSION) { 3048 _spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ); 3049 return; 3050 } 3051 3052 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 3053 sizeof(ctx->super->signature)) != 0) { 3054 _spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ); 3055 return; 3056 } 3057 3058 crc = _spdk_blob_md_page_calc_crc(ctx->super); 3059 if (crc != ctx->super->crc) { 3060 _spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ); 3061 return; 3062 } 3063 3064 if (memcmp(&ctx->bs->bstype, &ctx->super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 3065 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype matched - loading blobstore\n"); 3066 } else if (memcmp(&ctx->bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 3067 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype wildcard used - loading blobstore regardless bstype\n"); 3068 } else { 3069 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Unexpected bstype\n"); 3070 SPDK_TRACEDUMP(SPDK_LOG_BLOB, "Expected:", ctx->bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 3071 SPDK_TRACEDUMP(SPDK_LOG_BLOB, "Found:", ctx->super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 3072 _spdk_bs_load_ctx_fail(seq, ctx, -ENXIO); 3073 return; 3074 } 3075 3076 /* Parse the super block */ 3077 ctx->bs->clean = 1; 3078 ctx->bs->cluster_sz = ctx->super->cluster_size; 3079 ctx->bs->total_clusters = ctx->bs->dev->blockcnt / (ctx->bs->cluster_sz / ctx->bs->dev->blocklen); 3080 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 3081 ctx->bs->md_start = ctx->super->md_start; 3082 ctx->bs->md_len = ctx->super->md_len; 3083 ctx->bs->total_data_clusters = ctx->bs->total_clusters - divide_round_up( 3084 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 3085 ctx->bs->super_blob = ctx->super->super_blob; 3086 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 3087 3088 if (ctx->super->clean == 0) { 3089 _spdk_bs_recover(seq, ctx, 0); 3090 } else if (ctx->super->used_blobid_mask_len == 0) { 3091 /* 3092 * Metadata is clean, but this is an old metadata format without 3093 * a blobid mask. Clear the clean bit and then build the masks 3094 * using _spdk_bs_recover. 3095 */ 3096 ctx->super->clean = 0; 3097 ctx->bs->clean = 0; 3098 _spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_recover, ctx); 3099 } else { 3100 _spdk_bs_load_read_used_pages(seq, ctx); 3101 } 3102 } 3103 3104 void 3105 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 3106 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 3107 { 3108 struct spdk_blob_store *bs; 3109 struct spdk_bs_cpl cpl; 3110 spdk_bs_sequence_t *seq; 3111 struct spdk_bs_load_ctx *ctx; 3112 struct spdk_bs_opts opts = {}; 3113 3114 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Loading blobstore from dev %p\n", dev); 3115 3116 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 3117 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "unsupported dev block length of %d\n", dev->blocklen); 3118 dev->destroy(dev); 3119 cb_fn(cb_arg, NULL, -EINVAL); 3120 return; 3121 } 3122 3123 if (o) { 3124 opts = *o; 3125 } else { 3126 spdk_bs_opts_init(&opts); 3127 } 3128 3129 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 3130 dev->destroy(dev); 3131 cb_fn(cb_arg, NULL, -EINVAL); 3132 return; 3133 } 3134 3135 bs = _spdk_bs_alloc(dev, &opts); 3136 if (!bs) { 3137 dev->destroy(dev); 3138 cb_fn(cb_arg, NULL, -ENOMEM); 3139 return; 3140 } 3141 3142 ctx = calloc(1, sizeof(*ctx)); 3143 if (!ctx) { 3144 _spdk_bs_free(bs); 3145 cb_fn(cb_arg, NULL, -ENOMEM); 3146 return; 3147 } 3148 3149 ctx->bs = bs; 3150 ctx->is_load = true; 3151 ctx->iter_cb_fn = opts.iter_cb_fn; 3152 ctx->iter_cb_arg = opts.iter_cb_arg; 3153 3154 /* Allocate memory for the super block */ 3155 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3156 if (!ctx->super) { 3157 free(ctx); 3158 _spdk_bs_free(bs); 3159 cb_fn(cb_arg, NULL, -ENOMEM); 3160 return; 3161 } 3162 3163 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 3164 cpl.u.bs_handle.cb_fn = cb_fn; 3165 cpl.u.bs_handle.cb_arg = cb_arg; 3166 cpl.u.bs_handle.bs = bs; 3167 3168 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3169 if (!seq) { 3170 spdk_dma_free(ctx->super); 3171 free(ctx); 3172 _spdk_bs_free(bs); 3173 cb_fn(cb_arg, NULL, -ENOMEM); 3174 return; 3175 } 3176 3177 /* Read the super block */ 3178 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 3179 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 3180 _spdk_bs_load_super_cpl, ctx); 3181 } 3182 3183 /* END spdk_bs_load */ 3184 3185 /* START spdk_bs_dump */ 3186 3187 struct spdk_bs_dump_ctx { 3188 struct spdk_blob_store *bs; 3189 struct spdk_bs_super_block *super; 3190 uint32_t cur_page; 3191 struct spdk_blob_md_page *page; 3192 spdk_bs_sequence_t *seq; 3193 FILE *fp; 3194 spdk_bs_dump_print_xattr print_xattr_fn; 3195 char xattr_name[4096]; 3196 }; 3197 3198 static void 3199 _spdk_bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_dump_ctx *ctx, int bserrno) 3200 { 3201 spdk_dma_free(ctx->super); 3202 3203 /* 3204 * We need to defer calling spdk_bs_call_cpl() until after 3205 * dev destuction, so tuck these away for later use. 3206 */ 3207 ctx->bs->unload_err = bserrno; 3208 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 3209 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 3210 3211 spdk_bs_sequence_finish(seq, 0); 3212 _spdk_bs_free(ctx->bs); 3213 free(ctx); 3214 } 3215 3216 static void _spdk_bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 3217 3218 static void 3219 _spdk_bs_dump_print_md_page(struct spdk_bs_dump_ctx *ctx) 3220 { 3221 uint32_t page_idx = ctx->cur_page; 3222 struct spdk_blob_md_page *page = ctx->page; 3223 struct spdk_blob_md_descriptor *desc; 3224 size_t cur_desc = 0; 3225 uint32_t crc; 3226 3227 fprintf(ctx->fp, "=========\n"); 3228 fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx); 3229 fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id); 3230 3231 crc = _spdk_blob_md_page_calc_crc(page); 3232 fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch"); 3233 3234 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 3235 while (cur_desc < sizeof(page->descriptors)) { 3236 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 3237 if (desc->length == 0) { 3238 /* If padding and length are 0, this terminates the page */ 3239 break; 3240 } 3241 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) { 3242 struct spdk_blob_md_descriptor_extent *desc_extent; 3243 unsigned int i; 3244 3245 desc_extent = (struct spdk_blob_md_descriptor_extent *)desc; 3246 3247 for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) { 3248 if (desc_extent->extents[i].cluster_idx != 0) { 3249 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 3250 desc_extent->extents[i].cluster_idx); 3251 } else { 3252 fprintf(ctx->fp, "Unallocated Extent - "); 3253 } 3254 fprintf(ctx->fp, " Length: %" PRIu32, desc_extent->extents[i].length); 3255 fprintf(ctx->fp, "\n"); 3256 } 3257 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 3258 struct spdk_blob_md_descriptor_xattr *desc_xattr; 3259 uint32_t i; 3260 3261 desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc; 3262 3263 if (desc_xattr->length != 3264 sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) + 3265 desc_xattr->name_length + desc_xattr->value_length) { 3266 } 3267 3268 memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length); 3269 ctx->xattr_name[desc_xattr->name_length] = '\0'; 3270 fprintf(ctx->fp, "XATTR: name = \"%s\"\n", ctx->xattr_name); 3271 fprintf(ctx->fp, " value = \""); 3272 ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name, 3273 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 3274 desc_xattr->value_length); 3275 fprintf(ctx->fp, "\"\n"); 3276 for (i = 0; i < desc_xattr->value_length; i++) { 3277 if (i % 16 == 0) { 3278 fprintf(ctx->fp, " "); 3279 } 3280 fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i)); 3281 if ((i + 1) % 16 == 0) { 3282 fprintf(ctx->fp, "\n"); 3283 } 3284 } 3285 if (i % 16 != 0) { 3286 fprintf(ctx->fp, "\n"); 3287 } 3288 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 3289 /* TODO */ 3290 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 3291 /* TODO */ 3292 } else { 3293 /* Error */ 3294 } 3295 /* Advance to the next descriptor */ 3296 cur_desc += sizeof(*desc) + desc->length; 3297 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 3298 break; 3299 } 3300 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 3301 } 3302 } 3303 3304 static void 3305 _spdk_bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3306 { 3307 struct spdk_bs_dump_ctx *ctx = cb_arg; 3308 3309 if (bserrno != 0) { 3310 _spdk_bs_dump_finish(seq, ctx, bserrno); 3311 return; 3312 } 3313 3314 if (ctx->page->id != 0) { 3315 _spdk_bs_dump_print_md_page(ctx); 3316 } 3317 3318 ctx->cur_page++; 3319 3320 if (ctx->cur_page < ctx->super->md_len) { 3321 _spdk_bs_dump_read_md_page(seq, cb_arg); 3322 } else { 3323 spdk_dma_free(ctx->page); 3324 _spdk_bs_dump_finish(seq, ctx, 0); 3325 } 3326 } 3327 3328 static void 3329 _spdk_bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 3330 { 3331 struct spdk_bs_dump_ctx *ctx = cb_arg; 3332 uint64_t lba; 3333 3334 assert(ctx->cur_page < ctx->super->md_len); 3335 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 3336 spdk_bs_sequence_read_dev(seq, ctx->page, lba, 3337 _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 3338 _spdk_bs_dump_read_md_page_cpl, ctx); 3339 } 3340 3341 static void 3342 _spdk_bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3343 { 3344 struct spdk_bs_dump_ctx *ctx = cb_arg; 3345 3346 fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature); 3347 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 3348 sizeof(ctx->super->signature)) != 0) { 3349 fprintf(ctx->fp, "(Mismatch)\n"); 3350 _spdk_bs_dump_finish(seq, ctx, bserrno); 3351 return; 3352 } else { 3353 fprintf(ctx->fp, "(OK)\n"); 3354 } 3355 fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version); 3356 fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc, 3357 (ctx->super->crc == _spdk_blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch"); 3358 fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype); 3359 fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size); 3360 fprintf(ctx->fp, "Super Blob ID: "); 3361 if (ctx->super->super_blob == SPDK_BLOBID_INVALID) { 3362 fprintf(ctx->fp, "(None)\n"); 3363 } else { 3364 fprintf(ctx->fp, "%" PRIu64 "\n", ctx->super->super_blob); 3365 } 3366 fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean); 3367 fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start); 3368 fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len); 3369 fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start); 3370 fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len); 3371 fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start); 3372 fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len); 3373 fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start); 3374 fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len); 3375 3376 ctx->cur_page = 0; 3377 ctx->page = spdk_dma_zmalloc(SPDK_BS_PAGE_SIZE, 3378 SPDK_BS_PAGE_SIZE, 3379 NULL); 3380 if (!ctx->page) { 3381 _spdk_bs_dump_finish(seq, ctx, -ENOMEM); 3382 return; 3383 } 3384 _spdk_bs_dump_read_md_page(seq, cb_arg); 3385 } 3386 3387 void 3388 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn, 3389 spdk_bs_op_complete cb_fn, void *cb_arg) 3390 { 3391 struct spdk_blob_store *bs; 3392 struct spdk_bs_cpl cpl; 3393 spdk_bs_sequence_t *seq; 3394 struct spdk_bs_dump_ctx *ctx; 3395 struct spdk_bs_opts opts = {}; 3396 3397 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Dumping blobstore from dev %p\n", dev); 3398 3399 spdk_bs_opts_init(&opts); 3400 3401 bs = _spdk_bs_alloc(dev, &opts); 3402 if (!bs) { 3403 dev->destroy(dev); 3404 cb_fn(cb_arg, -ENOMEM); 3405 return; 3406 } 3407 3408 ctx = calloc(1, sizeof(*ctx)); 3409 if (!ctx) { 3410 _spdk_bs_free(bs); 3411 cb_fn(cb_arg, -ENOMEM); 3412 return; 3413 } 3414 3415 ctx->bs = bs; 3416 ctx->fp = fp; 3417 ctx->print_xattr_fn = print_xattr_fn; 3418 3419 /* Allocate memory for the super block */ 3420 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3421 if (!ctx->super) { 3422 free(ctx); 3423 _spdk_bs_free(bs); 3424 cb_fn(cb_arg, -ENOMEM); 3425 return; 3426 } 3427 3428 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 3429 cpl.u.bs_basic.cb_fn = cb_fn; 3430 cpl.u.bs_basic.cb_arg = cb_arg; 3431 3432 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3433 if (!seq) { 3434 spdk_dma_free(ctx->super); 3435 free(ctx); 3436 _spdk_bs_free(bs); 3437 cb_fn(cb_arg, -ENOMEM); 3438 return; 3439 } 3440 3441 /* Read the super block */ 3442 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 3443 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 3444 _spdk_bs_dump_super_cpl, ctx); 3445 } 3446 3447 /* END spdk_bs_dump */ 3448 3449 /* START spdk_bs_init */ 3450 3451 struct spdk_bs_init_ctx { 3452 struct spdk_blob_store *bs; 3453 struct spdk_bs_super_block *super; 3454 }; 3455 3456 static void 3457 _spdk_bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3458 { 3459 struct spdk_bs_init_ctx *ctx = cb_arg; 3460 3461 spdk_dma_free(ctx->super); 3462 free(ctx); 3463 3464 spdk_bs_sequence_finish(seq, bserrno); 3465 } 3466 3467 static void 3468 _spdk_bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3469 { 3470 struct spdk_bs_init_ctx *ctx = cb_arg; 3471 3472 /* Write super block */ 3473 spdk_bs_sequence_write_dev(seq, ctx->super, _spdk_bs_page_to_lba(ctx->bs, 0), 3474 _spdk_bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 3475 _spdk_bs_init_persist_super_cpl, ctx); 3476 } 3477 3478 void 3479 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 3480 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 3481 { 3482 struct spdk_bs_init_ctx *ctx; 3483 struct spdk_blob_store *bs; 3484 struct spdk_bs_cpl cpl; 3485 spdk_bs_sequence_t *seq; 3486 spdk_bs_batch_t *batch; 3487 uint64_t num_md_lba; 3488 uint64_t num_md_pages; 3489 uint64_t num_md_clusters; 3490 uint32_t i; 3491 struct spdk_bs_opts opts = {}; 3492 int rc; 3493 3494 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Initializing blobstore on dev %p\n", dev); 3495 3496 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 3497 SPDK_ERRLOG("unsupported dev block length of %d\n", 3498 dev->blocklen); 3499 dev->destroy(dev); 3500 cb_fn(cb_arg, NULL, -EINVAL); 3501 return; 3502 } 3503 3504 if (o) { 3505 opts = *o; 3506 } else { 3507 spdk_bs_opts_init(&opts); 3508 } 3509 3510 if (_spdk_bs_opts_verify(&opts) != 0) { 3511 dev->destroy(dev); 3512 cb_fn(cb_arg, NULL, -EINVAL); 3513 return; 3514 } 3515 3516 bs = _spdk_bs_alloc(dev, &opts); 3517 if (!bs) { 3518 dev->destroy(dev); 3519 cb_fn(cb_arg, NULL, -ENOMEM); 3520 return; 3521 } 3522 3523 if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) { 3524 /* By default, allocate 1 page per cluster. 3525 * Technically, this over-allocates metadata 3526 * because more metadata will reduce the number 3527 * of usable clusters. This can be addressed with 3528 * more complex math in the future. 3529 */ 3530 bs->md_len = bs->total_clusters; 3531 } else { 3532 bs->md_len = opts.num_md_pages; 3533 } 3534 3535 rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len); 3536 if (rc < 0) { 3537 _spdk_bs_free(bs); 3538 cb_fn(cb_arg, NULL, -ENOMEM); 3539 return; 3540 } 3541 3542 rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len); 3543 if (rc < 0) { 3544 _spdk_bs_free(bs); 3545 cb_fn(cb_arg, NULL, -ENOMEM); 3546 return; 3547 } 3548 3549 ctx = calloc(1, sizeof(*ctx)); 3550 if (!ctx) { 3551 _spdk_bs_free(bs); 3552 cb_fn(cb_arg, NULL, -ENOMEM); 3553 return; 3554 } 3555 3556 ctx->bs = bs; 3557 3558 /* Allocate memory for the super block */ 3559 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3560 if (!ctx->super) { 3561 free(ctx); 3562 _spdk_bs_free(bs); 3563 cb_fn(cb_arg, NULL, -ENOMEM); 3564 return; 3565 } 3566 memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 3567 sizeof(ctx->super->signature)); 3568 ctx->super->version = SPDK_BS_VERSION; 3569 ctx->super->length = sizeof(*ctx->super); 3570 ctx->super->super_blob = bs->super_blob; 3571 ctx->super->clean = 0; 3572 ctx->super->cluster_size = bs->cluster_sz; 3573 memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype)); 3574 3575 /* Calculate how many pages the metadata consumes at the front 3576 * of the disk. 3577 */ 3578 3579 /* The super block uses 1 page */ 3580 num_md_pages = 1; 3581 3582 /* The used_md_pages mask requires 1 bit per metadata page, rounded 3583 * up to the nearest page, plus a header. 3584 */ 3585 ctx->super->used_page_mask_start = num_md_pages; 3586 ctx->super->used_page_mask_len = divide_round_up(sizeof(struct spdk_bs_md_mask) + 3587 divide_round_up(bs->md_len, 8), 3588 SPDK_BS_PAGE_SIZE); 3589 num_md_pages += ctx->super->used_page_mask_len; 3590 3591 /* The used_clusters mask requires 1 bit per cluster, rounded 3592 * up to the nearest page, plus a header. 3593 */ 3594 ctx->super->used_cluster_mask_start = num_md_pages; 3595 ctx->super->used_cluster_mask_len = divide_round_up(sizeof(struct spdk_bs_md_mask) + 3596 divide_round_up(bs->total_clusters, 8), 3597 SPDK_BS_PAGE_SIZE); 3598 num_md_pages += ctx->super->used_cluster_mask_len; 3599 3600 /* The used_blobids mask requires 1 bit per metadata page, rounded 3601 * up to the nearest page, plus a header. 3602 */ 3603 ctx->super->used_blobid_mask_start = num_md_pages; 3604 ctx->super->used_blobid_mask_len = divide_round_up(sizeof(struct spdk_bs_md_mask) + 3605 divide_round_up(bs->md_len, 8), 3606 SPDK_BS_PAGE_SIZE); 3607 num_md_pages += ctx->super->used_blobid_mask_len; 3608 3609 /* The metadata region size was chosen above */ 3610 ctx->super->md_start = bs->md_start = num_md_pages; 3611 ctx->super->md_len = bs->md_len; 3612 num_md_pages += bs->md_len; 3613 3614 num_md_lba = _spdk_bs_page_to_lba(bs, num_md_pages); 3615 3616 ctx->super->crc = _spdk_blob_md_page_calc_crc(ctx->super); 3617 3618 num_md_clusters = divide_round_up(num_md_pages, bs->pages_per_cluster); 3619 if (num_md_clusters > bs->total_clusters) { 3620 SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, " 3621 "please decrease number of pages reserved for metadata " 3622 "or increase cluster size.\n"); 3623 spdk_dma_free(ctx->super); 3624 free(ctx); 3625 _spdk_bs_free(bs); 3626 cb_fn(cb_arg, NULL, -ENOMEM); 3627 return; 3628 } 3629 /* Claim all of the clusters used by the metadata */ 3630 for (i = 0; i < num_md_clusters; i++) { 3631 _spdk_bs_claim_cluster(bs, i); 3632 } 3633 3634 bs->total_data_clusters = bs->num_free_clusters; 3635 3636 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 3637 cpl.u.bs_handle.cb_fn = cb_fn; 3638 cpl.u.bs_handle.cb_arg = cb_arg; 3639 cpl.u.bs_handle.bs = bs; 3640 3641 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3642 if (!seq) { 3643 spdk_dma_free(ctx->super); 3644 free(ctx); 3645 _spdk_bs_free(bs); 3646 cb_fn(cb_arg, NULL, -ENOMEM); 3647 return; 3648 } 3649 3650 batch = spdk_bs_sequence_to_batch(seq, _spdk_bs_init_trim_cpl, ctx); 3651 3652 /* Clear metadata space */ 3653 spdk_bs_batch_write_zeroes_dev(batch, 0, num_md_lba); 3654 /* Trim data clusters */ 3655 spdk_bs_batch_unmap_dev(batch, num_md_lba, ctx->bs->dev->blockcnt - num_md_lba); 3656 3657 spdk_bs_batch_close(batch); 3658 } 3659 3660 /* END spdk_bs_init */ 3661 3662 /* START spdk_bs_destroy */ 3663 3664 static void 3665 _spdk_bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3666 { 3667 struct spdk_bs_init_ctx *ctx = cb_arg; 3668 struct spdk_blob_store *bs = ctx->bs; 3669 3670 /* 3671 * We need to defer calling spdk_bs_call_cpl() until after 3672 * dev destruction, so tuck these away for later use. 3673 */ 3674 bs->unload_err = bserrno; 3675 memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 3676 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 3677 3678 spdk_bs_sequence_finish(seq, bserrno); 3679 3680 _spdk_bs_free(bs); 3681 free(ctx); 3682 } 3683 3684 void 3685 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, 3686 void *cb_arg) 3687 { 3688 struct spdk_bs_cpl cpl; 3689 spdk_bs_sequence_t *seq; 3690 struct spdk_bs_init_ctx *ctx; 3691 3692 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Destroying blobstore\n"); 3693 3694 if (!TAILQ_EMPTY(&bs->blobs)) { 3695 SPDK_ERRLOG("Blobstore still has open blobs\n"); 3696 cb_fn(cb_arg, -EBUSY); 3697 return; 3698 } 3699 3700 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 3701 cpl.u.bs_basic.cb_fn = cb_fn; 3702 cpl.u.bs_basic.cb_arg = cb_arg; 3703 3704 ctx = calloc(1, sizeof(*ctx)); 3705 if (!ctx) { 3706 cb_fn(cb_arg, -ENOMEM); 3707 return; 3708 } 3709 3710 ctx->bs = bs; 3711 3712 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3713 if (!seq) { 3714 free(ctx); 3715 cb_fn(cb_arg, -ENOMEM); 3716 return; 3717 } 3718 3719 /* Write zeroes to the super block */ 3720 spdk_bs_sequence_write_zeroes_dev(seq, 3721 _spdk_bs_page_to_lba(bs, 0), 3722 _spdk_bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)), 3723 _spdk_bs_destroy_trim_cpl, ctx); 3724 } 3725 3726 /* END spdk_bs_destroy */ 3727 3728 /* START spdk_bs_unload */ 3729 3730 static void 3731 _spdk_bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3732 { 3733 struct spdk_bs_load_ctx *ctx = cb_arg; 3734 3735 spdk_dma_free(ctx->super); 3736 3737 /* 3738 * We need to defer calling spdk_bs_call_cpl() until after 3739 * dev destuction, so tuck these away for later use. 3740 */ 3741 ctx->bs->unload_err = bserrno; 3742 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 3743 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 3744 3745 spdk_bs_sequence_finish(seq, bserrno); 3746 3747 _spdk_bs_free(ctx->bs); 3748 free(ctx); 3749 } 3750 3751 static void 3752 _spdk_bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3753 { 3754 struct spdk_bs_load_ctx *ctx = cb_arg; 3755 3756 spdk_dma_free(ctx->mask); 3757 ctx->super->clean = 1; 3758 3759 _spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_unload_write_super_cpl, ctx); 3760 } 3761 3762 static void 3763 _spdk_bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3764 { 3765 struct spdk_bs_load_ctx *ctx = cb_arg; 3766 3767 spdk_dma_free(ctx->mask); 3768 ctx->mask = NULL; 3769 3770 _spdk_bs_write_used_clusters(seq, cb_arg, _spdk_bs_unload_write_used_clusters_cpl); 3771 } 3772 3773 static void 3774 _spdk_bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3775 { 3776 struct spdk_bs_load_ctx *ctx = cb_arg; 3777 3778 spdk_dma_free(ctx->mask); 3779 ctx->mask = NULL; 3780 3781 _spdk_bs_write_used_blobids(seq, cb_arg, _spdk_bs_unload_write_used_blobids_cpl); 3782 } 3783 3784 static void 3785 _spdk_bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3786 { 3787 _spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_unload_write_used_pages_cpl); 3788 } 3789 3790 void 3791 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) 3792 { 3793 struct spdk_bs_cpl cpl; 3794 spdk_bs_sequence_t *seq; 3795 struct spdk_bs_load_ctx *ctx; 3796 3797 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blobstore\n"); 3798 3799 if (!TAILQ_EMPTY(&bs->blobs)) { 3800 SPDK_ERRLOG("Blobstore still has open blobs\n"); 3801 cb_fn(cb_arg, -EBUSY); 3802 return; 3803 } 3804 3805 ctx = calloc(1, sizeof(*ctx)); 3806 if (!ctx) { 3807 cb_fn(cb_arg, -ENOMEM); 3808 return; 3809 } 3810 3811 ctx->bs = bs; 3812 ctx->is_load = false; 3813 3814 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3815 if (!ctx->super) { 3816 free(ctx); 3817 cb_fn(cb_arg, -ENOMEM); 3818 return; 3819 } 3820 3821 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 3822 cpl.u.bs_basic.cb_fn = cb_fn; 3823 cpl.u.bs_basic.cb_arg = cb_arg; 3824 3825 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3826 if (!seq) { 3827 spdk_dma_free(ctx->super); 3828 free(ctx); 3829 cb_fn(cb_arg, -ENOMEM); 3830 return; 3831 } 3832 3833 /* Read super block */ 3834 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 3835 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 3836 _spdk_bs_unload_read_super_cpl, ctx); 3837 } 3838 3839 /* END spdk_bs_unload */ 3840 3841 /* START spdk_bs_set_super */ 3842 3843 struct spdk_bs_set_super_ctx { 3844 struct spdk_blob_store *bs; 3845 struct spdk_bs_super_block *super; 3846 }; 3847 3848 static void 3849 _spdk_bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3850 { 3851 struct spdk_bs_set_super_ctx *ctx = cb_arg; 3852 3853 if (bserrno != 0) { 3854 SPDK_ERRLOG("Unable to write to super block of blobstore\n"); 3855 } 3856 3857 spdk_dma_free(ctx->super); 3858 3859 spdk_bs_sequence_finish(seq, bserrno); 3860 3861 free(ctx); 3862 } 3863 3864 static void 3865 _spdk_bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3866 { 3867 struct spdk_bs_set_super_ctx *ctx = cb_arg; 3868 3869 if (bserrno != 0) { 3870 SPDK_ERRLOG("Unable to read super block of blobstore\n"); 3871 spdk_dma_free(ctx->super); 3872 spdk_bs_sequence_finish(seq, bserrno); 3873 free(ctx); 3874 return; 3875 } 3876 3877 _spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_set_super_write_cpl, ctx); 3878 } 3879 3880 void 3881 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid, 3882 spdk_bs_op_complete cb_fn, void *cb_arg) 3883 { 3884 struct spdk_bs_cpl cpl; 3885 spdk_bs_sequence_t *seq; 3886 struct spdk_bs_set_super_ctx *ctx; 3887 3888 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Setting super blob id on blobstore\n"); 3889 3890 ctx = calloc(1, sizeof(*ctx)); 3891 if (!ctx) { 3892 cb_fn(cb_arg, -ENOMEM); 3893 return; 3894 } 3895 3896 ctx->bs = bs; 3897 3898 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3899 if (!ctx->super) { 3900 free(ctx); 3901 cb_fn(cb_arg, -ENOMEM); 3902 return; 3903 } 3904 3905 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 3906 cpl.u.bs_basic.cb_fn = cb_fn; 3907 cpl.u.bs_basic.cb_arg = cb_arg; 3908 3909 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3910 if (!seq) { 3911 spdk_dma_free(ctx->super); 3912 free(ctx); 3913 cb_fn(cb_arg, -ENOMEM); 3914 return; 3915 } 3916 3917 bs->super_blob = blobid; 3918 3919 /* Read super block */ 3920 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 3921 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 3922 _spdk_bs_set_super_read_cpl, ctx); 3923 } 3924 3925 /* END spdk_bs_set_super */ 3926 3927 void 3928 spdk_bs_get_super(struct spdk_blob_store *bs, 3929 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 3930 { 3931 if (bs->super_blob == SPDK_BLOBID_INVALID) { 3932 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT); 3933 } else { 3934 cb_fn(cb_arg, bs->super_blob, 0); 3935 } 3936 } 3937 3938 uint64_t 3939 spdk_bs_get_cluster_size(struct spdk_blob_store *bs) 3940 { 3941 return bs->cluster_sz; 3942 } 3943 3944 uint64_t 3945 spdk_bs_get_page_size(struct spdk_blob_store *bs) 3946 { 3947 return SPDK_BS_PAGE_SIZE; 3948 } 3949 3950 uint64_t 3951 spdk_bs_free_cluster_count(struct spdk_blob_store *bs) 3952 { 3953 return bs->num_free_clusters; 3954 } 3955 3956 uint64_t 3957 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs) 3958 { 3959 return bs->total_data_clusters; 3960 } 3961 3962 static int 3963 spdk_bs_register_md_thread(struct spdk_blob_store *bs) 3964 { 3965 bs->md_channel = spdk_get_io_channel(bs); 3966 if (!bs->md_channel) { 3967 SPDK_ERRLOG("Failed to get IO channel.\n"); 3968 return -1; 3969 } 3970 3971 return 0; 3972 } 3973 3974 static int 3975 spdk_bs_unregister_md_thread(struct spdk_blob_store *bs) 3976 { 3977 spdk_put_io_channel(bs->md_channel); 3978 3979 return 0; 3980 } 3981 3982 spdk_blob_id spdk_blob_get_id(struct spdk_blob *blob) 3983 { 3984 assert(blob != NULL); 3985 3986 return blob->id; 3987 } 3988 3989 uint64_t spdk_blob_get_num_pages(struct spdk_blob *blob) 3990 { 3991 assert(blob != NULL); 3992 3993 return _spdk_bs_cluster_to_page(blob->bs, blob->active.num_clusters); 3994 } 3995 3996 uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob) 3997 { 3998 assert(blob != NULL); 3999 4000 return blob->active.num_clusters; 4001 } 4002 4003 /* START spdk_bs_create_blob */ 4004 4005 static void 4006 _spdk_bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4007 { 4008 struct spdk_blob *blob = cb_arg; 4009 4010 _spdk_blob_free(blob); 4011 4012 spdk_bs_sequence_finish(seq, bserrno); 4013 } 4014 4015 static int 4016 _spdk_blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs, 4017 bool internal) 4018 { 4019 uint64_t i; 4020 size_t value_len = 0; 4021 int rc; 4022 const void *value = NULL; 4023 if (xattrs->count > 0 && xattrs->get_value == NULL) { 4024 return -EINVAL; 4025 } 4026 for (i = 0; i < xattrs->count; i++) { 4027 xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len); 4028 if (value == NULL || value_len == 0) { 4029 return -EINVAL; 4030 } 4031 rc = _spdk_blob_set_xattr(blob, xattrs->names[i], value, value_len, internal); 4032 if (rc < 0) { 4033 return rc; 4034 } 4035 } 4036 return 0; 4037 } 4038 4039 static void 4040 _spdk_blob_set_thin_provision(struct spdk_blob *blob) 4041 { 4042 _spdk_blob_verify_md_op(blob); 4043 blob->invalid_flags |= SPDK_BLOB_THIN_PROV; 4044 blob->state = SPDK_BLOB_STATE_DIRTY; 4045 } 4046 4047 static void 4048 _spdk_bs_create_blob(struct spdk_blob_store *bs, 4049 const struct spdk_blob_opts *opts, 4050 const struct spdk_blob_xattr_opts *internal_xattrs, 4051 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4052 { 4053 struct spdk_blob *blob; 4054 uint32_t page_idx; 4055 struct spdk_bs_cpl cpl; 4056 struct spdk_blob_opts opts_default; 4057 struct spdk_blob_xattr_opts internal_xattrs_default; 4058 spdk_bs_sequence_t *seq; 4059 spdk_blob_id id; 4060 int rc; 4061 4062 assert(spdk_get_thread() == bs->md_thread); 4063 4064 page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 4065 if (page_idx >= spdk_bit_array_capacity(bs->used_md_pages)) { 4066 cb_fn(cb_arg, 0, -ENOMEM); 4067 return; 4068 } 4069 spdk_bit_array_set(bs->used_blobids, page_idx); 4070 spdk_bit_array_set(bs->used_md_pages, page_idx); 4071 4072 id = _spdk_bs_page_to_blobid(page_idx); 4073 4074 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Creating blob with id %lu at page %u\n", id, page_idx); 4075 4076 blob = _spdk_blob_alloc(bs, id); 4077 if (!blob) { 4078 cb_fn(cb_arg, 0, -ENOMEM); 4079 return; 4080 } 4081 4082 if (!opts) { 4083 spdk_blob_opts_init(&opts_default); 4084 opts = &opts_default; 4085 } 4086 if (!internal_xattrs) { 4087 _spdk_blob_xattrs_init(&internal_xattrs_default); 4088 internal_xattrs = &internal_xattrs_default; 4089 } 4090 4091 rc = _spdk_blob_set_xattrs(blob, &opts->xattrs, false); 4092 if (rc < 0) { 4093 _spdk_blob_free(blob); 4094 cb_fn(cb_arg, 0, rc); 4095 return; 4096 } 4097 4098 rc = _spdk_blob_set_xattrs(blob, internal_xattrs, true); 4099 if (rc < 0) { 4100 _spdk_blob_free(blob); 4101 cb_fn(cb_arg, 0, rc); 4102 return; 4103 } 4104 4105 if (opts->thin_provision) { 4106 _spdk_blob_set_thin_provision(blob); 4107 } 4108 4109 rc = _spdk_blob_resize(blob, opts->num_clusters); 4110 if (rc < 0) { 4111 _spdk_blob_free(blob); 4112 cb_fn(cb_arg, 0, rc); 4113 return; 4114 } 4115 cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 4116 cpl.u.blobid.cb_fn = cb_fn; 4117 cpl.u.blobid.cb_arg = cb_arg; 4118 cpl.u.blobid.blobid = blob->id; 4119 4120 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 4121 if (!seq) { 4122 _spdk_blob_free(blob); 4123 cb_fn(cb_arg, 0, -ENOMEM); 4124 return; 4125 } 4126 4127 _spdk_blob_persist(seq, blob, _spdk_bs_create_blob_cpl, blob); 4128 } 4129 4130 void spdk_bs_create_blob(struct spdk_blob_store *bs, 4131 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4132 { 4133 _spdk_bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg); 4134 } 4135 4136 void spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts, 4137 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4138 { 4139 _spdk_bs_create_blob(bs, opts, NULL, cb_fn, cb_arg); 4140 } 4141 4142 /* END spdk_bs_create_blob */ 4143 4144 /* START blob_cleanup */ 4145 4146 struct spdk_clone_snapshot_ctx { 4147 struct spdk_bs_cpl cpl; 4148 int bserrno; 4149 bool frozen; 4150 4151 struct spdk_io_channel *channel; 4152 4153 /* Current cluster for inflate operation */ 4154 uint64_t cluster; 4155 4156 /* For inflation force allocation of all unallocated clusters and remove 4157 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */ 4158 bool allocate_all; 4159 4160 struct { 4161 spdk_blob_id id; 4162 struct spdk_blob *blob; 4163 } original; 4164 struct { 4165 spdk_blob_id id; 4166 struct spdk_blob *blob; 4167 } new; 4168 4169 /* xattrs specified for snapshot/clones only. They have no impact on 4170 * the original blobs xattrs. */ 4171 const struct spdk_blob_xattr_opts *xattrs; 4172 }; 4173 4174 static void 4175 _spdk_bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno) 4176 { 4177 struct spdk_clone_snapshot_ctx *ctx = cb_arg; 4178 struct spdk_bs_cpl *cpl = &ctx->cpl; 4179 4180 if (bserrno != 0) { 4181 if (ctx->bserrno != 0) { 4182 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 4183 } else { 4184 ctx->bserrno = bserrno; 4185 } 4186 } 4187 4188 switch (cpl->type) { 4189 case SPDK_BS_CPL_TYPE_BLOBID: 4190 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno); 4191 break; 4192 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 4193 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 4194 break; 4195 default: 4196 SPDK_UNREACHABLE(); 4197 break; 4198 } 4199 4200 free(ctx); 4201 } 4202 4203 static void 4204 _spdk_bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 4205 { 4206 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4207 struct spdk_blob *origblob = ctx->original.blob; 4208 4209 if (bserrno != 0) { 4210 if (ctx->bserrno != 0) { 4211 SPDK_ERRLOG("Unfreeze error %d\n", bserrno); 4212 } else { 4213 ctx->bserrno = bserrno; 4214 } 4215 } 4216 4217 ctx->original.id = origblob->id; 4218 spdk_blob_close(origblob, _spdk_bs_clone_snapshot_cleanup_finish, ctx); 4219 } 4220 4221 static void 4222 _spdk_bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno) 4223 { 4224 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4225 struct spdk_blob *origblob = ctx->original.blob; 4226 4227 if (bserrno != 0) { 4228 if (ctx->bserrno != 0) { 4229 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 4230 } else { 4231 ctx->bserrno = bserrno; 4232 } 4233 } 4234 4235 if (ctx->frozen) { 4236 /* Unfreeze any outstanding I/O */ 4237 _spdk_blob_unfreeze_io(origblob, _spdk_bs_snapshot_unfreeze_cpl, ctx); 4238 } else { 4239 _spdk_bs_snapshot_unfreeze_cpl(ctx, 0); 4240 } 4241 4242 } 4243 4244 static void 4245 _spdk_bs_clone_snapshot_newblob_cleanup(void *cb_arg, int bserrno) 4246 { 4247 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4248 struct spdk_blob *newblob = ctx->new.blob; 4249 4250 if (bserrno != 0) { 4251 if (ctx->bserrno != 0) { 4252 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 4253 } else { 4254 ctx->bserrno = bserrno; 4255 } 4256 } 4257 4258 ctx->new.id = newblob->id; 4259 spdk_blob_close(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 4260 } 4261 4262 /* END blob_cleanup */ 4263 4264 /* START spdk_bs_create_snapshot */ 4265 4266 static void 4267 _spdk_bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno) 4268 { 4269 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4270 struct spdk_blob *newblob = ctx->new.blob; 4271 4272 if (bserrno != 0) { 4273 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 4274 return; 4275 } 4276 4277 /* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */ 4278 bserrno = _spdk_blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true); 4279 if (bserrno != 0) { 4280 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4281 return; 4282 } 4283 4284 _spdk_bs_blob_list_add(ctx->original.blob); 4285 4286 spdk_blob_set_read_only(newblob); 4287 4288 /* sync snapshot metadata */ 4289 spdk_blob_sync_md(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, cb_arg); 4290 } 4291 4292 static void 4293 _spdk_bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno) 4294 { 4295 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4296 struct spdk_blob *origblob = ctx->original.blob; 4297 struct spdk_blob *newblob = ctx->new.blob; 4298 4299 if (bserrno != 0) { 4300 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 4301 return; 4302 } 4303 4304 /* Set internal xattr for snapshot id */ 4305 bserrno = _spdk_blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true); 4306 if (bserrno != 0) { 4307 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 4308 return; 4309 } 4310 4311 _spdk_bs_blob_list_remove(origblob); 4312 origblob->parent_id = newblob->id; 4313 4314 /* Create new back_bs_dev for snapshot */ 4315 origblob->back_bs_dev = spdk_bs_create_blob_bs_dev(newblob); 4316 if (origblob->back_bs_dev == NULL) { 4317 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL); 4318 return; 4319 } 4320 4321 /* set clone blob as thin provisioned */ 4322 _spdk_blob_set_thin_provision(origblob); 4323 4324 _spdk_bs_blob_list_add(newblob); 4325 4326 /* Zero out origblob cluster map */ 4327 memset(origblob->active.clusters, 0, 4328 origblob->active.num_clusters * sizeof(origblob->active.clusters)); 4329 4330 /* sync clone metadata */ 4331 spdk_blob_sync_md(origblob, _spdk_bs_snapshot_origblob_sync_cpl, ctx); 4332 } 4333 4334 static void 4335 _spdk_bs_snapshot_freeze_cpl(void *cb_arg, int rc) 4336 { 4337 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4338 struct spdk_blob *origblob = ctx->original.blob; 4339 struct spdk_blob *newblob = ctx->new.blob; 4340 int bserrno; 4341 4342 if (rc != 0) { 4343 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, rc); 4344 return; 4345 } 4346 4347 ctx->frozen = true; 4348 4349 /* set new back_bs_dev for snapshot */ 4350 newblob->back_bs_dev = origblob->back_bs_dev; 4351 /* Set invalid flags from origblob */ 4352 newblob->invalid_flags = origblob->invalid_flags; 4353 4354 /* inherit parent from original blob if set */ 4355 newblob->parent_id = origblob->parent_id; 4356 if (origblob->parent_id != SPDK_BLOBID_INVALID) { 4357 /* Set internal xattr for snapshot id */ 4358 bserrno = _spdk_blob_set_xattr(newblob, BLOB_SNAPSHOT, 4359 &origblob->parent_id, sizeof(spdk_blob_id), true); 4360 if (bserrno != 0) { 4361 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 4362 return; 4363 } 4364 } 4365 4366 /* Copy cluster map to snapshot */ 4367 memcpy(newblob->active.clusters, origblob->active.clusters, 4368 origblob->active.num_clusters * sizeof(origblob->active.clusters)); 4369 4370 /* sync snapshot metadata */ 4371 spdk_blob_sync_md(newblob, _spdk_bs_snapshot_newblob_sync_cpl, ctx); 4372 } 4373 4374 static void 4375 _spdk_bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4376 { 4377 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4378 struct spdk_blob *origblob = ctx->original.blob; 4379 struct spdk_blob *newblob = _blob; 4380 4381 if (bserrno != 0) { 4382 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4383 return; 4384 } 4385 4386 ctx->new.blob = newblob; 4387 4388 _spdk_blob_freeze_io(origblob, _spdk_bs_snapshot_freeze_cpl, ctx); 4389 } 4390 4391 static void 4392 _spdk_bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 4393 { 4394 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4395 struct spdk_blob *origblob = ctx->original.blob; 4396 4397 if (bserrno != 0) { 4398 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4399 return; 4400 } 4401 4402 ctx->new.id = blobid; 4403 ctx->cpl.u.blobid.blobid = blobid; 4404 4405 spdk_bs_open_blob(origblob->bs, ctx->new.id, _spdk_bs_snapshot_newblob_open_cpl, ctx); 4406 } 4407 4408 4409 static void 4410 _spdk_bs_xattr_snapshot(void *arg, const char *name, 4411 const void **value, size_t *value_len) 4412 { 4413 assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0); 4414 4415 struct spdk_blob *blob = (struct spdk_blob *)arg; 4416 *value = &blob->id; 4417 *value_len = sizeof(blob->id); 4418 } 4419 4420 static void 4421 _spdk_bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4422 { 4423 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4424 struct spdk_blob_opts opts; 4425 struct spdk_blob_xattr_opts internal_xattrs; 4426 char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS }; 4427 4428 if (bserrno != 0) { 4429 _spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno); 4430 return; 4431 } 4432 4433 ctx->original.blob = _blob; 4434 4435 if (_blob->data_ro || _blob->md_ro) { 4436 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create snapshot from read only blob with id %lu\n", 4437 _blob->id); 4438 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 4439 return; 4440 } 4441 4442 spdk_blob_opts_init(&opts); 4443 _spdk_blob_xattrs_init(&internal_xattrs); 4444 4445 /* Change the size of new blob to the same as in original blob, 4446 * but do not allocate clusters */ 4447 opts.thin_provision = true; 4448 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 4449 4450 /* If there are any xattrs specified for snapshot, set them now */ 4451 if (ctx->xattrs) { 4452 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 4453 } 4454 /* Set internal xattr SNAPSHOT_IN_PROGRESS */ 4455 internal_xattrs.count = 1; 4456 internal_xattrs.ctx = _blob; 4457 internal_xattrs.names = xattrs_names; 4458 internal_xattrs.get_value = _spdk_bs_xattr_snapshot; 4459 4460 _spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs, 4461 _spdk_bs_snapshot_newblob_create_cpl, ctx); 4462 } 4463 4464 void spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid, 4465 const struct spdk_blob_xattr_opts *snapshot_xattrs, 4466 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4467 { 4468 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 4469 4470 if (!ctx) { 4471 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 4472 return; 4473 } 4474 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 4475 ctx->cpl.u.blobid.cb_fn = cb_fn; 4476 ctx->cpl.u.blobid.cb_arg = cb_arg; 4477 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 4478 ctx->bserrno = 0; 4479 ctx->frozen = false; 4480 ctx->original.id = blobid; 4481 ctx->xattrs = snapshot_xattrs; 4482 4483 spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_snapshot_origblob_open_cpl, ctx); 4484 } 4485 /* END spdk_bs_create_snapshot */ 4486 4487 /* START spdk_bs_create_clone */ 4488 4489 static void 4490 _spdk_bs_xattr_clone(void *arg, const char *name, 4491 const void **value, size_t *value_len) 4492 { 4493 assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0); 4494 4495 struct spdk_blob *blob = (struct spdk_blob *)arg; 4496 *value = &blob->id; 4497 *value_len = sizeof(blob->id); 4498 } 4499 4500 static void 4501 _spdk_bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4502 { 4503 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4504 struct spdk_blob *clone = _blob; 4505 4506 ctx->new.blob = clone; 4507 _spdk_bs_blob_list_add(clone); 4508 4509 spdk_blob_close(clone, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 4510 } 4511 4512 static void 4513 _spdk_bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 4514 { 4515 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4516 4517 ctx->cpl.u.blobid.blobid = blobid; 4518 spdk_bs_open_blob(ctx->original.blob->bs, blobid, _spdk_bs_clone_newblob_open_cpl, ctx); 4519 } 4520 4521 static void 4522 _spdk_bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4523 { 4524 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4525 struct spdk_blob_opts opts; 4526 struct spdk_blob_xattr_opts internal_xattrs; 4527 char *xattr_names[] = { BLOB_SNAPSHOT }; 4528 4529 if (bserrno != 0) { 4530 _spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno); 4531 return; 4532 } 4533 4534 ctx->original.blob = _blob; 4535 4536 if (!_blob->data_ro || !_blob->md_ro) { 4537 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Clone not from read-only blob\n"); 4538 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 4539 return; 4540 } 4541 4542 spdk_blob_opts_init(&opts); 4543 _spdk_blob_xattrs_init(&internal_xattrs); 4544 4545 opts.thin_provision = true; 4546 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 4547 if (ctx->xattrs) { 4548 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 4549 } 4550 4551 /* Set internal xattr BLOB_SNAPSHOT */ 4552 internal_xattrs.count = 1; 4553 internal_xattrs.ctx = _blob; 4554 internal_xattrs.names = xattr_names; 4555 internal_xattrs.get_value = _spdk_bs_xattr_clone; 4556 4557 _spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs, 4558 _spdk_bs_clone_newblob_create_cpl, ctx); 4559 } 4560 4561 void spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid, 4562 const struct spdk_blob_xattr_opts *clone_xattrs, 4563 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4564 { 4565 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 4566 4567 if (!ctx) { 4568 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 4569 return; 4570 } 4571 4572 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 4573 ctx->cpl.u.blobid.cb_fn = cb_fn; 4574 ctx->cpl.u.blobid.cb_arg = cb_arg; 4575 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 4576 ctx->bserrno = 0; 4577 ctx->xattrs = clone_xattrs; 4578 ctx->original.id = blobid; 4579 4580 spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_clone_origblob_open_cpl, ctx); 4581 } 4582 4583 /* END spdk_bs_create_clone */ 4584 4585 /* START spdk_bs_inflate_blob */ 4586 4587 static void 4588 _spdk_bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno) 4589 { 4590 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4591 struct spdk_blob *_blob = ctx->original.blob; 4592 4593 if (bserrno != 0) { 4594 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4595 return; 4596 } 4597 4598 assert(_parent != NULL); 4599 4600 _spdk_bs_blob_list_remove(_blob); 4601 _blob->parent_id = _parent->id; 4602 _spdk_blob_set_xattr(_blob, BLOB_SNAPSHOT, &_blob->parent_id, 4603 sizeof(spdk_blob_id), true); 4604 4605 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 4606 _blob->back_bs_dev = spdk_bs_create_blob_bs_dev(_parent); 4607 _spdk_bs_blob_list_add(_blob); 4608 4609 spdk_blob_sync_md(_blob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 4610 } 4611 4612 static void 4613 _spdk_bs_inflate_blob_done(void *cb_arg, int bserrno) 4614 { 4615 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4616 struct spdk_blob *_blob = ctx->original.blob; 4617 struct spdk_blob *_parent; 4618 4619 if (bserrno != 0) { 4620 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4621 return; 4622 } 4623 4624 if (ctx->allocate_all) { 4625 /* remove thin provisioning */ 4626 _spdk_bs_blob_list_remove(_blob); 4627 _spdk_blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 4628 _blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV; 4629 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 4630 _blob->back_bs_dev = NULL; 4631 _blob->parent_id = SPDK_BLOBID_INVALID; 4632 } else { 4633 _parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob; 4634 if (_parent->parent_id != SPDK_BLOBID_INVALID) { 4635 /* We must change the parent of the inflated blob */ 4636 spdk_bs_open_blob(_blob->bs, _parent->parent_id, 4637 _spdk_bs_inflate_blob_set_parent_cpl, ctx); 4638 return; 4639 } 4640 4641 _spdk_bs_blob_list_remove(_blob); 4642 _spdk_blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 4643 _blob->parent_id = SPDK_BLOBID_INVALID; 4644 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 4645 _blob->back_bs_dev = spdk_bs_create_zeroes_dev(); 4646 } 4647 4648 _blob->state = SPDK_BLOB_STATE_DIRTY; 4649 spdk_blob_sync_md(_blob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 4650 } 4651 4652 /* Check if cluster needs allocation */ 4653 static inline bool 4654 _spdk_bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all) 4655 { 4656 struct spdk_blob_bs_dev *b; 4657 4658 assert(blob != NULL); 4659 4660 if (blob->active.clusters[cluster] != 0) { 4661 /* Cluster is already allocated */ 4662 return false; 4663 } 4664 4665 if (blob->parent_id == SPDK_BLOBID_INVALID) { 4666 /* Blob have no parent blob */ 4667 return allocate_all; 4668 } 4669 4670 b = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 4671 return (allocate_all || b->blob->active.clusters[cluster] != 0); 4672 } 4673 4674 static void 4675 _spdk_bs_inflate_blob_touch_next(void *cb_arg, int bserrno) 4676 { 4677 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4678 struct spdk_blob *_blob = ctx->original.blob; 4679 uint64_t offset; 4680 4681 if (bserrno != 0) { 4682 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4683 return; 4684 } 4685 4686 for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) { 4687 if (_spdk_bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) { 4688 break; 4689 } 4690 } 4691 4692 if (ctx->cluster < _blob->active.num_clusters) { 4693 offset = _spdk_bs_cluster_to_page(_blob->bs, ctx->cluster); 4694 4695 /* We may safely increment a cluster before write */ 4696 ctx->cluster++; 4697 4698 /* Use zero length write to touch a cluster */ 4699 spdk_blob_io_write(_blob, ctx->channel, NULL, offset, 0, 4700 _spdk_bs_inflate_blob_touch_next, ctx); 4701 } else { 4702 _spdk_bs_inflate_blob_done(cb_arg, bserrno); 4703 } 4704 } 4705 4706 static void 4707 _spdk_bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4708 { 4709 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4710 uint64_t lfc; /* lowest free cluster */ 4711 uint64_t i; 4712 4713 if (bserrno != 0) { 4714 _spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno); 4715 return; 4716 } 4717 ctx->original.blob = _blob; 4718 4719 if (!ctx->allocate_all && _blob->parent_id == SPDK_BLOBID_INVALID) { 4720 /* This blob have no parent, so we cannot decouple it. */ 4721 SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n"); 4722 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 4723 return; 4724 } 4725 4726 if (spdk_blob_is_thin_provisioned(_blob) == false) { 4727 /* This is not thin provisioned blob. No need to inflate. */ 4728 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, 0); 4729 return; 4730 } 4731 4732 /* Do two passes - one to verify that we can obtain enough clusters 4733 * and another to actually claim them. 4734 */ 4735 lfc = 0; 4736 for (i = 0; i < _blob->active.num_clusters; i++) { 4737 if (_spdk_bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) { 4738 lfc = spdk_bit_array_find_first_clear(_blob->bs->used_clusters, lfc); 4739 if (lfc >= _blob->bs->total_clusters) { 4740 /* No more free clusters. Cannot satisfy the request */ 4741 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC); 4742 return; 4743 } 4744 lfc++; 4745 } 4746 } 4747 4748 ctx->cluster = 0; 4749 _spdk_bs_inflate_blob_touch_next(ctx, 0); 4750 } 4751 4752 static void 4753 _spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 4754 spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg) 4755 { 4756 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 4757 4758 if (!ctx) { 4759 cb_fn(cb_arg, -ENOMEM); 4760 return; 4761 } 4762 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 4763 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 4764 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 4765 ctx->bserrno = 0; 4766 ctx->original.id = blobid; 4767 ctx->channel = channel; 4768 ctx->allocate_all = allocate_all; 4769 4770 spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_inflate_blob_open_cpl, ctx); 4771 } 4772 4773 void 4774 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 4775 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 4776 { 4777 _spdk_bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg); 4778 } 4779 4780 void 4781 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 4782 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 4783 { 4784 _spdk_bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg); 4785 } 4786 /* END spdk_bs_inflate_blob */ 4787 4788 /* START spdk_blob_resize */ 4789 struct spdk_bs_resize_ctx { 4790 spdk_blob_op_complete cb_fn; 4791 void *cb_arg; 4792 struct spdk_blob *blob; 4793 uint64_t sz; 4794 int rc; 4795 }; 4796 4797 static void 4798 _spdk_bs_resize_unfreeze_cpl(void *cb_arg, int rc) 4799 { 4800 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 4801 4802 if (rc != 0) { 4803 SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc); 4804 } 4805 4806 if (ctx->rc != 0) { 4807 SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc); 4808 rc = ctx->rc; 4809 } 4810 4811 ctx->blob->resize_in_progress = false; 4812 4813 ctx->cb_fn(ctx->cb_arg, rc); 4814 free(ctx); 4815 } 4816 4817 static void 4818 _spdk_bs_resize_freeze_cpl(void *cb_arg, int rc) 4819 { 4820 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 4821 4822 if (rc != 0) { 4823 ctx->blob->resize_in_progress = false; 4824 ctx->cb_fn(ctx->cb_arg, rc); 4825 free(ctx); 4826 return; 4827 } 4828 4829 ctx->rc = _spdk_blob_resize(ctx->blob, ctx->sz); 4830 4831 _spdk_blob_unfreeze_io(ctx->blob, _spdk_bs_resize_unfreeze_cpl, ctx); 4832 } 4833 4834 void 4835 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg) 4836 { 4837 struct spdk_bs_resize_ctx *ctx; 4838 4839 _spdk_blob_verify_md_op(blob); 4840 4841 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Resizing blob %lu to %lu clusters\n", blob->id, sz); 4842 4843 if (blob->md_ro) { 4844 cb_fn(cb_arg, -EPERM); 4845 return; 4846 } 4847 4848 if (sz == blob->active.num_clusters) { 4849 cb_fn(cb_arg, 0); 4850 return; 4851 } 4852 4853 if (blob->resize_in_progress) { 4854 cb_fn(cb_arg, -EBUSY); 4855 return; 4856 } 4857 4858 ctx = calloc(1, sizeof(*ctx)); 4859 if (!ctx) { 4860 cb_fn(cb_arg, -ENOMEM); 4861 return; 4862 } 4863 4864 blob->resize_in_progress = true; 4865 ctx->cb_fn = cb_fn; 4866 ctx->cb_arg = cb_arg; 4867 ctx->blob = blob; 4868 ctx->sz = sz; 4869 _spdk_blob_freeze_io(blob, _spdk_bs_resize_freeze_cpl, ctx); 4870 } 4871 4872 /* END spdk_blob_resize */ 4873 4874 4875 /* START spdk_bs_delete_blob */ 4876 4877 static void 4878 _spdk_bs_delete_close_cpl(void *cb_arg, int bserrno) 4879 { 4880 spdk_bs_sequence_t *seq = cb_arg; 4881 4882 spdk_bs_sequence_finish(seq, bserrno); 4883 } 4884 4885 static void 4886 _spdk_bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4887 { 4888 struct spdk_blob *blob = cb_arg; 4889 4890 if (bserrno != 0) { 4891 /* 4892 * We already removed this blob from the blobstore tailq, so 4893 * we need to free it here since this is the last reference 4894 * to it. 4895 */ 4896 _spdk_blob_free(blob); 4897 _spdk_bs_delete_close_cpl(seq, bserrno); 4898 return; 4899 } 4900 4901 /* 4902 * This will immediately decrement the ref_count and call 4903 * the completion routine since the metadata state is clean. 4904 * By calling spdk_blob_close, we reduce the number of call 4905 * points into code that touches the blob->open_ref count 4906 * and the blobstore's blob list. 4907 */ 4908 spdk_blob_close(blob, _spdk_bs_delete_close_cpl, seq); 4909 } 4910 4911 static void 4912 _spdk_bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 4913 { 4914 spdk_bs_sequence_t *seq = cb_arg; 4915 uint32_t page_num; 4916 4917 if (bserrno != 0) { 4918 spdk_bs_sequence_finish(seq, bserrno); 4919 return; 4920 } 4921 4922 _spdk_blob_verify_md_op(blob); 4923 4924 if (blob->open_ref > 1) { 4925 /* 4926 * Someone has this blob open (besides this delete context). 4927 * Decrement the ref count directly and return -EBUSY. 4928 */ 4929 blob->open_ref--; 4930 spdk_bs_sequence_finish(seq, -EBUSY); 4931 return; 4932 } 4933 4934 bserrno = _spdk_bs_blob_list_remove(blob); 4935 if (bserrno != 0) { 4936 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Remove blob #%" PRIu64 " from a list\n", blob->id); 4937 spdk_bs_sequence_finish(seq, bserrno); 4938 return; 4939 } 4940 4941 /* 4942 * Remove the blob from the blob_store list now, to ensure it does not 4943 * get returned after this point by _spdk_blob_lookup(). 4944 */ 4945 TAILQ_REMOVE(&blob->bs->blobs, blob, link); 4946 page_num = _spdk_bs_blobid_to_page(blob->id); 4947 spdk_bit_array_clear(blob->bs->used_blobids, page_num); 4948 blob->state = SPDK_BLOB_STATE_DIRTY; 4949 blob->active.num_pages = 0; 4950 _spdk_blob_resize(blob, 0); 4951 4952 _spdk_blob_persist(seq, blob, _spdk_bs_delete_persist_cpl, blob); 4953 } 4954 4955 void 4956 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 4957 spdk_blob_op_complete cb_fn, void *cb_arg) 4958 { 4959 struct spdk_bs_cpl cpl; 4960 spdk_bs_sequence_t *seq; 4961 struct spdk_blob_list *snapshot_entry = NULL; 4962 4963 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Deleting blob %lu\n", blobid); 4964 4965 assert(spdk_get_thread() == bs->md_thread); 4966 4967 /* Check if this is a snapshot with clones */ 4968 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 4969 if (snapshot_entry->id == blobid) { 4970 break; 4971 } 4972 } 4973 if (snapshot_entry != NULL) { 4974 /* If snapshot have clones, we cannot remove it */ 4975 if (!TAILQ_EMPTY(&snapshot_entry->clones)) { 4976 SPDK_ERRLOG("Cannot remove snapshot with clones\n"); 4977 cb_fn(cb_arg, -EBUSY); 4978 return; 4979 } 4980 } 4981 4982 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 4983 cpl.u.blob_basic.cb_fn = cb_fn; 4984 cpl.u.blob_basic.cb_arg = cb_arg; 4985 4986 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 4987 if (!seq) { 4988 cb_fn(cb_arg, -ENOMEM); 4989 return; 4990 } 4991 4992 spdk_bs_open_blob(bs, blobid, _spdk_bs_delete_open_cpl, seq); 4993 } 4994 4995 /* END spdk_bs_delete_blob */ 4996 4997 /* START spdk_bs_open_blob */ 4998 4999 static void 5000 _spdk_bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5001 { 5002 struct spdk_blob *blob = cb_arg; 5003 5004 /* If the blob have crc error, we just return NULL. */ 5005 if (blob == NULL) { 5006 seq->cpl.u.blob_handle.blob = NULL; 5007 spdk_bs_sequence_finish(seq, bserrno); 5008 return; 5009 } 5010 5011 blob->open_ref++; 5012 5013 TAILQ_INSERT_HEAD(&blob->bs->blobs, blob, link); 5014 5015 spdk_bs_sequence_finish(seq, bserrno); 5016 } 5017 5018 void spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 5019 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 5020 { 5021 struct spdk_blob *blob; 5022 struct spdk_bs_cpl cpl; 5023 spdk_bs_sequence_t *seq; 5024 uint32_t page_num; 5025 5026 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Opening blob %lu\n", blobid); 5027 assert(spdk_get_thread() == bs->md_thread); 5028 5029 page_num = _spdk_bs_blobid_to_page(blobid); 5030 if (spdk_bit_array_get(bs->used_blobids, page_num) == false) { 5031 /* Invalid blobid */ 5032 cb_fn(cb_arg, NULL, -ENOENT); 5033 return; 5034 } 5035 5036 blob = _spdk_blob_lookup(bs, blobid); 5037 if (blob) { 5038 blob->open_ref++; 5039 cb_fn(cb_arg, blob, 0); 5040 return; 5041 } 5042 5043 blob = _spdk_blob_alloc(bs, blobid); 5044 if (!blob) { 5045 cb_fn(cb_arg, NULL, -ENOMEM); 5046 return; 5047 } 5048 5049 cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE; 5050 cpl.u.blob_handle.cb_fn = cb_fn; 5051 cpl.u.blob_handle.cb_arg = cb_arg; 5052 cpl.u.blob_handle.blob = blob; 5053 5054 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 5055 if (!seq) { 5056 _spdk_blob_free(blob); 5057 cb_fn(cb_arg, NULL, -ENOMEM); 5058 return; 5059 } 5060 5061 _spdk_blob_load(seq, blob, _spdk_bs_open_blob_cpl, blob); 5062 } 5063 /* END spdk_bs_open_blob */ 5064 5065 /* START spdk_blob_set_read_only */ 5066 int spdk_blob_set_read_only(struct spdk_blob *blob) 5067 { 5068 _spdk_blob_verify_md_op(blob); 5069 5070 blob->data_ro_flags |= SPDK_BLOB_READ_ONLY; 5071 5072 blob->state = SPDK_BLOB_STATE_DIRTY; 5073 return 0; 5074 } 5075 /* END spdk_blob_set_read_only */ 5076 5077 /* START spdk_blob_sync_md */ 5078 5079 static void 5080 _spdk_blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5081 { 5082 struct spdk_blob *blob = cb_arg; 5083 5084 if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 5085 blob->data_ro = true; 5086 blob->md_ro = true; 5087 } 5088 5089 spdk_bs_sequence_finish(seq, bserrno); 5090 } 5091 5092 static void 5093 _spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 5094 { 5095 struct spdk_bs_cpl cpl; 5096 spdk_bs_sequence_t *seq; 5097 5098 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 5099 cpl.u.blob_basic.cb_fn = cb_fn; 5100 cpl.u.blob_basic.cb_arg = cb_arg; 5101 5102 seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl); 5103 if (!seq) { 5104 cb_fn(cb_arg, -ENOMEM); 5105 return; 5106 } 5107 5108 _spdk_blob_persist(seq, blob, _spdk_blob_sync_md_cpl, blob); 5109 } 5110 5111 void 5112 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 5113 { 5114 _spdk_blob_verify_md_op(blob); 5115 5116 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blob %lu\n", blob->id); 5117 5118 if (blob->md_ro) { 5119 assert(blob->state == SPDK_BLOB_STATE_CLEAN); 5120 cb_fn(cb_arg, 0); 5121 return; 5122 } 5123 5124 _spdk_blob_sync_md(blob, cb_fn, cb_arg); 5125 } 5126 5127 /* END spdk_blob_sync_md */ 5128 5129 struct spdk_blob_insert_cluster_ctx { 5130 struct spdk_thread *thread; 5131 struct spdk_blob *blob; 5132 uint32_t cluster_num; /* cluster index in blob */ 5133 uint32_t cluster; /* cluster on disk */ 5134 int rc; 5135 spdk_blob_op_complete cb_fn; 5136 void *cb_arg; 5137 }; 5138 5139 static void 5140 _spdk_blob_insert_cluster_msg_cpl(void *arg) 5141 { 5142 struct spdk_blob_insert_cluster_ctx *ctx = arg; 5143 5144 ctx->cb_fn(ctx->cb_arg, ctx->rc); 5145 free(ctx); 5146 } 5147 5148 static void 5149 _spdk_blob_insert_cluster_msg_cb(void *arg, int bserrno) 5150 { 5151 struct spdk_blob_insert_cluster_ctx *ctx = arg; 5152 5153 ctx->rc = bserrno; 5154 spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx); 5155 } 5156 5157 static void 5158 _spdk_blob_insert_cluster_msg(void *arg) 5159 { 5160 struct spdk_blob_insert_cluster_ctx *ctx = arg; 5161 5162 ctx->rc = _spdk_blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster); 5163 if (ctx->rc != 0) { 5164 spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx); 5165 return; 5166 } 5167 5168 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 5169 _spdk_blob_sync_md(ctx->blob, _spdk_blob_insert_cluster_msg_cb, ctx); 5170 } 5171 5172 static void 5173 _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 5174 uint64_t cluster, spdk_blob_op_complete cb_fn, void *cb_arg) 5175 { 5176 struct spdk_blob_insert_cluster_ctx *ctx; 5177 5178 ctx = calloc(1, sizeof(*ctx)); 5179 if (ctx == NULL) { 5180 cb_fn(cb_arg, -ENOMEM); 5181 return; 5182 } 5183 5184 ctx->thread = spdk_get_thread(); 5185 ctx->blob = blob; 5186 ctx->cluster_num = cluster_num; 5187 ctx->cluster = cluster; 5188 ctx->cb_fn = cb_fn; 5189 ctx->cb_arg = cb_arg; 5190 5191 spdk_thread_send_msg(blob->bs->md_thread, _spdk_blob_insert_cluster_msg, ctx); 5192 } 5193 5194 /* START spdk_blob_close */ 5195 5196 static void 5197 _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5198 { 5199 struct spdk_blob *blob = cb_arg; 5200 5201 if (bserrno == 0) { 5202 blob->open_ref--; 5203 if (blob->open_ref == 0) { 5204 /* 5205 * Blobs with active.num_pages == 0 are deleted blobs. 5206 * these blobs are removed from the blob_store list 5207 * when the deletion process starts - so don't try to 5208 * remove them again. 5209 */ 5210 if (blob->active.num_pages > 0) { 5211 TAILQ_REMOVE(&blob->bs->blobs, blob, link); 5212 } 5213 _spdk_blob_free(blob); 5214 } 5215 } 5216 5217 spdk_bs_sequence_finish(seq, bserrno); 5218 } 5219 5220 void spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 5221 { 5222 struct spdk_bs_cpl cpl; 5223 spdk_bs_sequence_t *seq; 5224 5225 _spdk_blob_verify_md_op(blob); 5226 5227 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Closing blob %lu\n", blob->id); 5228 5229 if (blob->open_ref == 0) { 5230 cb_fn(cb_arg, -EBADF); 5231 return; 5232 } 5233 5234 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 5235 cpl.u.blob_basic.cb_fn = cb_fn; 5236 cpl.u.blob_basic.cb_arg = cb_arg; 5237 5238 seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl); 5239 if (!seq) { 5240 cb_fn(cb_arg, -ENOMEM); 5241 return; 5242 } 5243 5244 /* Sync metadata */ 5245 _spdk_blob_persist(seq, blob, _spdk_blob_close_cpl, blob); 5246 } 5247 5248 /* END spdk_blob_close */ 5249 5250 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs) 5251 { 5252 return spdk_get_io_channel(bs); 5253 } 5254 5255 void spdk_bs_free_io_channel(struct spdk_io_channel *channel) 5256 { 5257 spdk_put_io_channel(channel); 5258 } 5259 5260 void spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel, 5261 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 5262 { 5263 _spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 5264 SPDK_BLOB_UNMAP); 5265 } 5266 5267 void spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel, 5268 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 5269 { 5270 _spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 5271 SPDK_BLOB_WRITE_ZEROES); 5272 } 5273 5274 void spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel, 5275 void *payload, uint64_t offset, uint64_t length, 5276 spdk_blob_op_complete cb_fn, void *cb_arg) 5277 { 5278 _spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 5279 SPDK_BLOB_WRITE); 5280 } 5281 5282 void spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel, 5283 void *payload, uint64_t offset, uint64_t length, 5284 spdk_blob_op_complete cb_fn, void *cb_arg) 5285 { 5286 _spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 5287 SPDK_BLOB_READ); 5288 } 5289 5290 void spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel, 5291 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 5292 spdk_blob_op_complete cb_fn, void *cb_arg) 5293 { 5294 _spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false); 5295 } 5296 5297 void spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel, 5298 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 5299 spdk_blob_op_complete cb_fn, void *cb_arg) 5300 { 5301 _spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true); 5302 } 5303 5304 struct spdk_bs_iter_ctx { 5305 int64_t page_num; 5306 struct spdk_blob_store *bs; 5307 5308 spdk_blob_op_with_handle_complete cb_fn; 5309 void *cb_arg; 5310 }; 5311 5312 static void 5313 _spdk_bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 5314 { 5315 struct spdk_bs_iter_ctx *ctx = cb_arg; 5316 struct spdk_blob_store *bs = ctx->bs; 5317 spdk_blob_id id; 5318 5319 if (bserrno == 0) { 5320 ctx->cb_fn(ctx->cb_arg, _blob, bserrno); 5321 free(ctx); 5322 return; 5323 } 5324 5325 ctx->page_num++; 5326 ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num); 5327 if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) { 5328 ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT); 5329 free(ctx); 5330 return; 5331 } 5332 5333 id = _spdk_bs_page_to_blobid(ctx->page_num); 5334 5335 spdk_bs_open_blob(bs, id, _spdk_bs_iter_cpl, ctx); 5336 } 5337 5338 void 5339 spdk_bs_iter_first(struct spdk_blob_store *bs, 5340 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 5341 { 5342 struct spdk_bs_iter_ctx *ctx; 5343 5344 ctx = calloc(1, sizeof(*ctx)); 5345 if (!ctx) { 5346 cb_fn(cb_arg, NULL, -ENOMEM); 5347 return; 5348 } 5349 5350 ctx->page_num = -1; 5351 ctx->bs = bs; 5352 ctx->cb_fn = cb_fn; 5353 ctx->cb_arg = cb_arg; 5354 5355 _spdk_bs_iter_cpl(ctx, NULL, -1); 5356 } 5357 5358 static void 5359 _spdk_bs_iter_close_cpl(void *cb_arg, int bserrno) 5360 { 5361 struct spdk_bs_iter_ctx *ctx = cb_arg; 5362 5363 _spdk_bs_iter_cpl(ctx, NULL, -1); 5364 } 5365 5366 void 5367 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob, 5368 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 5369 { 5370 struct spdk_bs_iter_ctx *ctx; 5371 5372 assert(blob != NULL); 5373 5374 ctx = calloc(1, sizeof(*ctx)); 5375 if (!ctx) { 5376 cb_fn(cb_arg, NULL, -ENOMEM); 5377 return; 5378 } 5379 5380 ctx->page_num = _spdk_bs_blobid_to_page(blob->id); 5381 ctx->bs = bs; 5382 ctx->cb_fn = cb_fn; 5383 ctx->cb_arg = cb_arg; 5384 5385 /* Close the existing blob */ 5386 spdk_blob_close(blob, _spdk_bs_iter_close_cpl, ctx); 5387 } 5388 5389 static int 5390 _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 5391 uint16_t value_len, bool internal) 5392 { 5393 struct spdk_xattr_tailq *xattrs; 5394 struct spdk_xattr *xattr; 5395 5396 _spdk_blob_verify_md_op(blob); 5397 5398 if (blob->md_ro) { 5399 return -EPERM; 5400 } 5401 5402 if (internal) { 5403 xattrs = &blob->xattrs_internal; 5404 blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR; 5405 } else { 5406 xattrs = &blob->xattrs; 5407 } 5408 5409 TAILQ_FOREACH(xattr, xattrs, link) { 5410 if (!strcmp(name, xattr->name)) { 5411 free(xattr->value); 5412 xattr->value_len = value_len; 5413 xattr->value = malloc(value_len); 5414 memcpy(xattr->value, value, value_len); 5415 5416 blob->state = SPDK_BLOB_STATE_DIRTY; 5417 5418 return 0; 5419 } 5420 } 5421 5422 xattr = calloc(1, sizeof(*xattr)); 5423 if (!xattr) { 5424 return -ENOMEM; 5425 } 5426 xattr->name = strdup(name); 5427 xattr->value_len = value_len; 5428 xattr->value = malloc(value_len); 5429 memcpy(xattr->value, value, value_len); 5430 TAILQ_INSERT_TAIL(xattrs, xattr, link); 5431 5432 blob->state = SPDK_BLOB_STATE_DIRTY; 5433 5434 return 0; 5435 } 5436 5437 int 5438 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 5439 uint16_t value_len) 5440 { 5441 return _spdk_blob_set_xattr(blob, name, value, value_len, false); 5442 } 5443 5444 static int 5445 _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal) 5446 { 5447 struct spdk_xattr_tailq *xattrs; 5448 struct spdk_xattr *xattr; 5449 5450 _spdk_blob_verify_md_op(blob); 5451 5452 if (blob->md_ro) { 5453 return -EPERM; 5454 } 5455 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 5456 5457 TAILQ_FOREACH(xattr, xattrs, link) { 5458 if (!strcmp(name, xattr->name)) { 5459 TAILQ_REMOVE(xattrs, xattr, link); 5460 free(xattr->value); 5461 free(xattr->name); 5462 free(xattr); 5463 5464 if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) { 5465 blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR; 5466 } 5467 blob->state = SPDK_BLOB_STATE_DIRTY; 5468 5469 return 0; 5470 } 5471 } 5472 5473 return -ENOENT; 5474 } 5475 5476 int 5477 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name) 5478 { 5479 return _spdk_blob_remove_xattr(blob, name, false); 5480 } 5481 5482 static int 5483 _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 5484 const void **value, size_t *value_len, bool internal) 5485 { 5486 struct spdk_xattr *xattr; 5487 struct spdk_xattr_tailq *xattrs; 5488 5489 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 5490 5491 TAILQ_FOREACH(xattr, xattrs, link) { 5492 if (!strcmp(name, xattr->name)) { 5493 *value = xattr->value; 5494 *value_len = xattr->value_len; 5495 return 0; 5496 } 5497 } 5498 return -ENOENT; 5499 } 5500 5501 int 5502 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 5503 const void **value, size_t *value_len) 5504 { 5505 _spdk_blob_verify_md_op(blob); 5506 5507 return _spdk_blob_get_xattr_value(blob, name, value, value_len, false); 5508 } 5509 5510 struct spdk_xattr_names { 5511 uint32_t count; 5512 const char *names[0]; 5513 }; 5514 5515 static int 5516 _spdk_blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names) 5517 { 5518 struct spdk_xattr *xattr; 5519 int count = 0; 5520 5521 TAILQ_FOREACH(xattr, xattrs, link) { 5522 count++; 5523 } 5524 5525 *names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *)); 5526 if (*names == NULL) { 5527 return -ENOMEM; 5528 } 5529 5530 TAILQ_FOREACH(xattr, xattrs, link) { 5531 (*names)->names[(*names)->count++] = xattr->name; 5532 } 5533 5534 return 0; 5535 } 5536 5537 int 5538 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names) 5539 { 5540 _spdk_blob_verify_md_op(blob); 5541 5542 return _spdk_blob_get_xattr_names(&blob->xattrs, names); 5543 } 5544 5545 uint32_t 5546 spdk_xattr_names_get_count(struct spdk_xattr_names *names) 5547 { 5548 assert(names != NULL); 5549 5550 return names->count; 5551 } 5552 5553 const char * 5554 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index) 5555 { 5556 if (index >= names->count) { 5557 return NULL; 5558 } 5559 5560 return names->names[index]; 5561 } 5562 5563 void 5564 spdk_xattr_names_free(struct spdk_xattr_names *names) 5565 { 5566 free(names); 5567 } 5568 5569 struct spdk_bs_type 5570 spdk_bs_get_bstype(struct spdk_blob_store *bs) 5571 { 5572 return bs->bstype; 5573 } 5574 5575 void 5576 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype) 5577 { 5578 memcpy(&bs->bstype, &bstype, sizeof(bstype)); 5579 } 5580 5581 bool 5582 spdk_blob_is_read_only(struct spdk_blob *blob) 5583 { 5584 assert(blob != NULL); 5585 return (blob->data_ro || blob->md_ro); 5586 } 5587 5588 bool 5589 spdk_blob_is_snapshot(struct spdk_blob *blob) 5590 { 5591 struct spdk_blob_list *snapshot_entry; 5592 5593 assert(blob != NULL); 5594 5595 TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) { 5596 if (snapshot_entry->id == blob->id) { 5597 break; 5598 } 5599 } 5600 5601 if (snapshot_entry == NULL) { 5602 return false; 5603 } 5604 5605 return true; 5606 } 5607 5608 bool 5609 spdk_blob_is_clone(struct spdk_blob *blob) 5610 { 5611 assert(blob != NULL); 5612 5613 if (blob->parent_id != SPDK_BLOBID_INVALID) { 5614 assert(spdk_blob_is_thin_provisioned(blob)); 5615 return true; 5616 } 5617 5618 return false; 5619 } 5620 5621 bool 5622 spdk_blob_is_thin_provisioned(struct spdk_blob *blob) 5623 { 5624 assert(blob != NULL); 5625 return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 5626 } 5627 5628 spdk_blob_id 5629 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id) 5630 { 5631 struct spdk_blob_list *snapshot_entry = NULL; 5632 struct spdk_blob_list *clone_entry = NULL; 5633 5634 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 5635 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 5636 if (clone_entry->id == blob_id) { 5637 return snapshot_entry->id; 5638 } 5639 } 5640 } 5641 5642 return SPDK_BLOBID_INVALID; 5643 } 5644 5645 int 5646 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids, 5647 size_t *count) 5648 { 5649 struct spdk_blob_list *snapshot_entry, *clone_entry; 5650 size_t n; 5651 5652 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 5653 if (snapshot_entry->id == blobid) { 5654 break; 5655 } 5656 } 5657 if (snapshot_entry == NULL) { 5658 *count = 0; 5659 return 0; 5660 } 5661 5662 if (ids == NULL || *count < snapshot_entry->clone_count) { 5663 *count = snapshot_entry->clone_count; 5664 return -ENOMEM; 5665 } 5666 *count = snapshot_entry->clone_count; 5667 5668 n = 0; 5669 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 5670 ids[n++] = clone_entry->id; 5671 } 5672 5673 return 0; 5674 } 5675 5676 SPDK_LOG_REGISTER_COMPONENT("blob", SPDK_LOG_BLOB) 5677