1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/blob.h" 37 #include "spdk/crc32.h" 38 #include "spdk/env.h" 39 #include "spdk/queue.h" 40 #include "spdk/thread.h" 41 #include "spdk/bit_array.h" 42 #include "spdk/likely.h" 43 #include "spdk/util.h" 44 45 #include "spdk_internal/assert.h" 46 #include "spdk_internal/log.h" 47 48 #include "blobstore.h" 49 50 #define BLOB_CRC32C_INITIAL 0xffffffffUL 51 52 static int spdk_bs_register_md_thread(struct spdk_blob_store *bs); 53 static int spdk_bs_unregister_md_thread(struct spdk_blob_store *bs); 54 static void _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 55 static void _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 56 uint64_t cluster, spdk_blob_op_complete cb_fn, void *cb_arg); 57 58 static int _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 59 uint16_t value_len, bool internal); 60 static int _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 61 const void **value, size_t *value_len, bool internal); 62 static int _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal); 63 64 static void 65 _spdk_blob_verify_md_op(struct spdk_blob *blob) 66 { 67 assert(blob != NULL); 68 assert(spdk_get_thread() == blob->bs->md_thread); 69 assert(blob->state != SPDK_BLOB_STATE_LOADING); 70 } 71 72 static void 73 _spdk_bs_claim_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 74 { 75 assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters)); 76 assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == false); 77 assert(bs->num_free_clusters > 0); 78 79 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %u\n", cluster_num); 80 81 spdk_bit_array_set(bs->used_clusters, cluster_num); 82 bs->num_free_clusters--; 83 } 84 85 static int 86 _spdk_blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster) 87 { 88 uint64_t *cluster_lba = &blob->active.clusters[cluster_num]; 89 90 _spdk_blob_verify_md_op(blob); 91 92 if (*cluster_lba != 0) { 93 return -EEXIST; 94 } 95 96 *cluster_lba = _spdk_bs_cluster_to_lba(blob->bs, cluster); 97 return 0; 98 } 99 100 static int 101 _spdk_bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num, 102 uint64_t *lowest_free_cluster, bool update_map) 103 { 104 pthread_mutex_lock(&blob->bs->used_clusters_mutex); 105 *lowest_free_cluster = spdk_bit_array_find_first_clear(blob->bs->used_clusters, 106 *lowest_free_cluster); 107 if (*lowest_free_cluster == UINT32_MAX) { 108 /* No more free clusters. Cannot satisfy the request */ 109 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 110 return -ENOSPC; 111 } 112 113 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %lu for blob %lu\n", *lowest_free_cluster, blob->id); 114 _spdk_bs_claim_cluster(blob->bs, *lowest_free_cluster); 115 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 116 117 if (update_map) { 118 _spdk_blob_insert_cluster(blob, cluster_num, *lowest_free_cluster); 119 } 120 121 return 0; 122 } 123 124 static void 125 _spdk_bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 126 { 127 assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters)); 128 assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == true); 129 assert(bs->num_free_clusters < bs->total_clusters); 130 131 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Releasing cluster %u\n", cluster_num); 132 133 pthread_mutex_lock(&bs->used_clusters_mutex); 134 spdk_bit_array_clear(bs->used_clusters, cluster_num); 135 bs->num_free_clusters++; 136 pthread_mutex_unlock(&bs->used_clusters_mutex); 137 } 138 139 static void 140 _spdk_blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs) 141 { 142 xattrs->count = 0; 143 xattrs->names = NULL; 144 xattrs->ctx = NULL; 145 xattrs->get_value = NULL; 146 } 147 148 void 149 spdk_blob_opts_init(struct spdk_blob_opts *opts) 150 { 151 opts->num_clusters = 0; 152 opts->thin_provision = false; 153 _spdk_blob_xattrs_init(&opts->xattrs); 154 } 155 156 static struct spdk_blob * 157 _spdk_blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id) 158 { 159 struct spdk_blob *blob; 160 161 blob = calloc(1, sizeof(*blob)); 162 if (!blob) { 163 return NULL; 164 } 165 166 blob->id = id; 167 blob->bs = bs; 168 169 blob->parent_id = SPDK_BLOBID_INVALID; 170 171 blob->state = SPDK_BLOB_STATE_DIRTY; 172 blob->active.num_pages = 1; 173 blob->active.pages = calloc(1, sizeof(*blob->active.pages)); 174 if (!blob->active.pages) { 175 free(blob); 176 return NULL; 177 } 178 179 blob->active.pages[0] = _spdk_bs_blobid_to_page(id); 180 181 TAILQ_INIT(&blob->xattrs); 182 TAILQ_INIT(&blob->xattrs_internal); 183 184 return blob; 185 } 186 187 static void 188 _spdk_xattrs_free(struct spdk_xattr_tailq *xattrs) 189 { 190 struct spdk_xattr *xattr, *xattr_tmp; 191 192 TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) { 193 TAILQ_REMOVE(xattrs, xattr, link); 194 free(xattr->name); 195 free(xattr->value); 196 free(xattr); 197 } 198 } 199 200 static void 201 _spdk_blob_free(struct spdk_blob *blob) 202 { 203 assert(blob != NULL); 204 205 free(blob->active.clusters); 206 free(blob->clean.clusters); 207 free(blob->active.pages); 208 free(blob->clean.pages); 209 210 _spdk_xattrs_free(&blob->xattrs); 211 _spdk_xattrs_free(&blob->xattrs_internal); 212 213 if (blob->back_bs_dev) { 214 blob->back_bs_dev->destroy(blob->back_bs_dev); 215 } 216 217 free(blob); 218 } 219 220 struct freeze_io_ctx { 221 struct spdk_bs_cpl cpl; 222 struct spdk_blob *blob; 223 }; 224 225 static void 226 _spdk_blob_io_sync(struct spdk_io_channel_iter *i) 227 { 228 spdk_for_each_channel_continue(i, 0); 229 } 230 231 static void 232 _spdk_blob_execute_queued_io(struct spdk_io_channel_iter *i) 233 { 234 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 235 struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch); 236 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 237 struct spdk_bs_request_set *set; 238 struct spdk_bs_user_op_args *args; 239 spdk_bs_user_op_t *op, *tmp; 240 241 TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) { 242 set = (struct spdk_bs_request_set *)op; 243 args = &set->u.user_op; 244 245 if (args->blob == ctx->blob) { 246 TAILQ_REMOVE(&ch->queued_io, op, link); 247 spdk_bs_user_op_execute(op); 248 } 249 } 250 251 spdk_for_each_channel_continue(i, 0); 252 } 253 254 static void 255 _spdk_blob_io_cpl(struct spdk_io_channel_iter *i, int status) 256 { 257 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 258 259 ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0); 260 261 free(ctx); 262 } 263 264 static void 265 _spdk_blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 266 { 267 struct freeze_io_ctx *ctx; 268 269 ctx = calloc(1, sizeof(*ctx)); 270 if (!ctx) { 271 cb_fn(cb_arg, -ENOMEM); 272 return; 273 } 274 275 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 276 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 277 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 278 ctx->blob = blob; 279 280 /* Freeze I/O on blob */ 281 blob->frozen_refcnt++; 282 283 if (blob->frozen_refcnt == 1) { 284 spdk_for_each_channel(blob->bs, _spdk_blob_io_sync, ctx, _spdk_blob_io_cpl); 285 } else { 286 cb_fn(cb_arg, 0); 287 free(ctx); 288 } 289 } 290 291 static void 292 _spdk_blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 293 { 294 struct freeze_io_ctx *ctx; 295 296 ctx = calloc(1, sizeof(*ctx)); 297 if (!ctx) { 298 cb_fn(cb_arg, -ENOMEM); 299 return; 300 } 301 302 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 303 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 304 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 305 ctx->blob = blob; 306 307 assert(blob->frozen_refcnt > 0); 308 309 blob->frozen_refcnt--; 310 311 if (blob->frozen_refcnt == 0) { 312 spdk_for_each_channel(blob->bs, _spdk_blob_execute_queued_io, ctx, _spdk_blob_io_cpl); 313 } else { 314 cb_fn(cb_arg, 0); 315 free(ctx); 316 } 317 } 318 319 static int 320 _spdk_blob_mark_clean(struct spdk_blob *blob) 321 { 322 uint64_t *clusters = NULL; 323 uint32_t *pages = NULL; 324 325 assert(blob != NULL); 326 327 if (blob->active.num_clusters) { 328 assert(blob->active.clusters); 329 clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters)); 330 if (!clusters) { 331 return -ENOMEM; 332 } 333 memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*clusters)); 334 } 335 336 if (blob->active.num_pages) { 337 assert(blob->active.pages); 338 pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages)); 339 if (!pages) { 340 free(clusters); 341 return -ENOMEM; 342 } 343 memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*pages)); 344 } 345 346 free(blob->clean.clusters); 347 free(blob->clean.pages); 348 349 blob->clean.num_clusters = blob->active.num_clusters; 350 blob->clean.clusters = blob->active.clusters; 351 blob->clean.num_pages = blob->active.num_pages; 352 blob->clean.pages = blob->active.pages; 353 354 blob->active.clusters = clusters; 355 blob->active.pages = pages; 356 357 /* If the metadata was dirtied again while the metadata was being written to disk, 358 * we do not want to revert the DIRTY state back to CLEAN here. 359 */ 360 if (blob->state == SPDK_BLOB_STATE_LOADING) { 361 blob->state = SPDK_BLOB_STATE_CLEAN; 362 } 363 364 return 0; 365 } 366 367 static int 368 _spdk_blob_deserialize_xattr(struct spdk_blob *blob, 369 struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal) 370 { 371 struct spdk_xattr *xattr; 372 373 if (desc_xattr->length != sizeof(desc_xattr->name_length) + 374 sizeof(desc_xattr->value_length) + 375 desc_xattr->name_length + desc_xattr->value_length) { 376 return -EINVAL; 377 } 378 379 xattr = calloc(1, sizeof(*xattr)); 380 if (xattr == NULL) { 381 return -ENOMEM; 382 } 383 384 xattr->name = malloc(desc_xattr->name_length + 1); 385 if (xattr->name == NULL) { 386 free(xattr); 387 return -ENOMEM; 388 } 389 memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length); 390 xattr->name[desc_xattr->name_length] = '\0'; 391 392 xattr->value = malloc(desc_xattr->value_length); 393 if (xattr->value == NULL) { 394 free(xattr->name); 395 free(xattr); 396 return -ENOMEM; 397 } 398 xattr->value_len = desc_xattr->value_length; 399 memcpy(xattr->value, 400 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 401 desc_xattr->value_length); 402 403 TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link); 404 405 return 0; 406 } 407 408 409 static int 410 _spdk_blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob) 411 { 412 struct spdk_blob_md_descriptor *desc; 413 size_t cur_desc = 0; 414 void *tmp; 415 416 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 417 while (cur_desc < sizeof(page->descriptors)) { 418 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 419 if (desc->length == 0) { 420 /* If padding and length are 0, this terminates the page */ 421 break; 422 } 423 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 424 struct spdk_blob_md_descriptor_flags *desc_flags; 425 426 desc_flags = (struct spdk_blob_md_descriptor_flags *)desc; 427 428 if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) { 429 return -EINVAL; 430 } 431 432 if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) != 433 SPDK_BLOB_INVALID_FLAGS_MASK) { 434 return -EINVAL; 435 } 436 437 if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) != 438 SPDK_BLOB_DATA_RO_FLAGS_MASK) { 439 blob->data_ro = true; 440 blob->md_ro = true; 441 } 442 443 if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) != 444 SPDK_BLOB_MD_RO_FLAGS_MASK) { 445 blob->md_ro = true; 446 } 447 448 if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 449 blob->data_ro = true; 450 blob->md_ro = true; 451 } 452 453 blob->invalid_flags = desc_flags->invalid_flags; 454 blob->data_ro_flags = desc_flags->data_ro_flags; 455 blob->md_ro_flags = desc_flags->md_ro_flags; 456 457 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) { 458 struct spdk_blob_md_descriptor_extent *desc_extent; 459 unsigned int i, j; 460 unsigned int cluster_count = blob->active.num_clusters; 461 462 desc_extent = (struct spdk_blob_md_descriptor_extent *)desc; 463 464 if (desc_extent->length == 0 || 465 (desc_extent->length % sizeof(desc_extent->extents[0]) != 0)) { 466 return -EINVAL; 467 } 468 469 for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) { 470 for (j = 0; j < desc_extent->extents[i].length; j++) { 471 if (desc_extent->extents[i].cluster_idx != 0) { 472 if (!spdk_bit_array_get(blob->bs->used_clusters, 473 desc_extent->extents[i].cluster_idx + j)) { 474 return -EINVAL; 475 } 476 } 477 cluster_count++; 478 } 479 } 480 481 if (cluster_count == 0) { 482 return -EINVAL; 483 } 484 tmp = realloc(blob->active.clusters, cluster_count * sizeof(uint64_t)); 485 if (tmp == NULL) { 486 return -ENOMEM; 487 } 488 blob->active.clusters = tmp; 489 blob->active.cluster_array_size = cluster_count; 490 491 for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) { 492 for (j = 0; j < desc_extent->extents[i].length; j++) { 493 if (desc_extent->extents[i].cluster_idx != 0) { 494 blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs, 495 desc_extent->extents[i].cluster_idx + j); 496 } else if (spdk_blob_is_thin_provisioned(blob)) { 497 blob->active.clusters[blob->active.num_clusters++] = 0; 498 } else { 499 return -EINVAL; 500 } 501 } 502 } 503 504 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 505 int rc; 506 507 rc = _spdk_blob_deserialize_xattr(blob, 508 (struct spdk_blob_md_descriptor_xattr *) desc, false); 509 if (rc != 0) { 510 return rc; 511 } 512 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 513 int rc; 514 515 rc = _spdk_blob_deserialize_xattr(blob, 516 (struct spdk_blob_md_descriptor_xattr *) desc, true); 517 if (rc != 0) { 518 return rc; 519 } 520 } else { 521 /* Unrecognized descriptor type. Do not fail - just continue to the 522 * next descriptor. If this descriptor is associated with some feature 523 * defined in a newer version of blobstore, that version of blobstore 524 * should create and set an associated feature flag to specify if this 525 * blob can be loaded or not. 526 */ 527 } 528 529 /* Advance to the next descriptor */ 530 cur_desc += sizeof(*desc) + desc->length; 531 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 532 break; 533 } 534 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 535 } 536 537 return 0; 538 } 539 540 static int 541 _spdk_blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count, 542 struct spdk_blob *blob) 543 { 544 const struct spdk_blob_md_page *page; 545 uint32_t i; 546 int rc; 547 548 assert(page_count > 0); 549 assert(pages[0].sequence_num == 0); 550 assert(blob != NULL); 551 assert(blob->state == SPDK_BLOB_STATE_LOADING); 552 assert(blob->active.clusters == NULL); 553 554 /* The blobid provided doesn't match what's in the MD, this can 555 * happen for example if a bogus blobid is passed in through open. 556 */ 557 if (blob->id != pages[0].id) { 558 SPDK_ERRLOG("Blobid (%lu) doesn't match what's in metadata (%lu)\n", 559 blob->id, pages[0].id); 560 return -ENOENT; 561 } 562 563 for (i = 0; i < page_count; i++) { 564 page = &pages[i]; 565 566 assert(page->id == blob->id); 567 assert(page->sequence_num == i); 568 569 rc = _spdk_blob_parse_page(page, blob); 570 if (rc != 0) { 571 return rc; 572 } 573 } 574 575 return 0; 576 } 577 578 static int 579 _spdk_blob_serialize_add_page(const struct spdk_blob *blob, 580 struct spdk_blob_md_page **pages, 581 uint32_t *page_count, 582 struct spdk_blob_md_page **last_page) 583 { 584 struct spdk_blob_md_page *page; 585 586 assert(pages != NULL); 587 assert(page_count != NULL); 588 589 if (*page_count == 0) { 590 assert(*pages == NULL); 591 *page_count = 1; 592 *pages = spdk_dma_malloc(SPDK_BS_PAGE_SIZE, 593 SPDK_BS_PAGE_SIZE, 594 NULL); 595 } else { 596 assert(*pages != NULL); 597 (*page_count)++; 598 *pages = spdk_dma_realloc(*pages, 599 SPDK_BS_PAGE_SIZE * (*page_count), 600 SPDK_BS_PAGE_SIZE, 601 NULL); 602 } 603 604 if (*pages == NULL) { 605 *page_count = 0; 606 *last_page = NULL; 607 return -ENOMEM; 608 } 609 610 page = &(*pages)[*page_count - 1]; 611 memset(page, 0, sizeof(*page)); 612 page->id = blob->id; 613 page->sequence_num = *page_count - 1; 614 page->next = SPDK_INVALID_MD_PAGE; 615 *last_page = page; 616 617 return 0; 618 } 619 620 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor. 621 * Update required_sz on both success and failure. 622 * 623 */ 624 static int 625 _spdk_blob_serialize_xattr(const struct spdk_xattr *xattr, 626 uint8_t *buf, size_t buf_sz, 627 size_t *required_sz, bool internal) 628 { 629 struct spdk_blob_md_descriptor_xattr *desc; 630 631 *required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) + 632 strlen(xattr->name) + 633 xattr->value_len; 634 635 if (buf_sz < *required_sz) { 636 return -1; 637 } 638 639 desc = (struct spdk_blob_md_descriptor_xattr *)buf; 640 641 desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR; 642 desc->length = sizeof(desc->name_length) + 643 sizeof(desc->value_length) + 644 strlen(xattr->name) + 645 xattr->value_len; 646 desc->name_length = strlen(xattr->name); 647 desc->value_length = xattr->value_len; 648 649 memcpy(desc->name, xattr->name, desc->name_length); 650 memcpy((void *)((uintptr_t)desc->name + desc->name_length), 651 xattr->value, 652 desc->value_length); 653 654 return 0; 655 } 656 657 static void 658 _spdk_blob_serialize_extent(const struct spdk_blob *blob, 659 uint64_t start_cluster, uint64_t *next_cluster, 660 uint8_t *buf, size_t buf_sz) 661 { 662 struct spdk_blob_md_descriptor_extent *desc; 663 size_t cur_sz; 664 uint64_t i, extent_idx; 665 uint64_t lba, lba_per_cluster, lba_count; 666 667 /* The buffer must have room for at least one extent */ 668 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->extents[0]); 669 if (buf_sz < cur_sz) { 670 *next_cluster = start_cluster; 671 return; 672 } 673 674 desc = (struct spdk_blob_md_descriptor_extent *)buf; 675 desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT; 676 677 lba_per_cluster = _spdk_bs_cluster_to_lba(blob->bs, 1); 678 679 lba = blob->active.clusters[start_cluster]; 680 lba_count = lba_per_cluster; 681 extent_idx = 0; 682 for (i = start_cluster + 1; i < blob->active.num_clusters; i++) { 683 if ((lba + lba_count) == blob->active.clusters[i]) { 684 lba_count += lba_per_cluster; 685 continue; 686 } else if (lba == 0 && blob->active.clusters[i] == 0) { 687 lba_count += lba_per_cluster; 688 continue; 689 } 690 desc->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 691 desc->extents[extent_idx].length = lba_count / lba_per_cluster; 692 extent_idx++; 693 694 cur_sz += sizeof(desc->extents[extent_idx]); 695 696 if (buf_sz < cur_sz) { 697 /* If we ran out of buffer space, return */ 698 desc->length = sizeof(desc->extents[0]) * extent_idx; 699 *next_cluster = i; 700 return; 701 } 702 703 lba = blob->active.clusters[i]; 704 lba_count = lba_per_cluster; 705 } 706 707 desc->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 708 desc->extents[extent_idx].length = lba_count / lba_per_cluster; 709 extent_idx++; 710 711 desc->length = sizeof(desc->extents[0]) * extent_idx; 712 *next_cluster = blob->active.num_clusters; 713 714 return; 715 } 716 717 static void 718 _spdk_blob_serialize_flags(const struct spdk_blob *blob, 719 uint8_t *buf, size_t *buf_sz) 720 { 721 struct spdk_blob_md_descriptor_flags *desc; 722 723 /* 724 * Flags get serialized first, so we should always have room for the flags 725 * descriptor. 726 */ 727 assert(*buf_sz >= sizeof(*desc)); 728 729 desc = (struct spdk_blob_md_descriptor_flags *)buf; 730 desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS; 731 desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor); 732 desc->invalid_flags = blob->invalid_flags; 733 desc->data_ro_flags = blob->data_ro_flags; 734 desc->md_ro_flags = blob->md_ro_flags; 735 736 *buf_sz -= sizeof(*desc); 737 } 738 739 static int 740 _spdk_blob_serialize_xattrs(const struct spdk_blob *blob, 741 const struct spdk_xattr_tailq *xattrs, bool internal, 742 struct spdk_blob_md_page **pages, 743 struct spdk_blob_md_page *cur_page, 744 uint32_t *page_count, uint8_t **buf, 745 size_t *remaining_sz) 746 { 747 const struct spdk_xattr *xattr; 748 int rc; 749 750 TAILQ_FOREACH(xattr, xattrs, link) { 751 size_t required_sz = 0; 752 753 rc = _spdk_blob_serialize_xattr(xattr, 754 *buf, *remaining_sz, 755 &required_sz, internal); 756 if (rc < 0) { 757 /* Need to add a new page to the chain */ 758 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, 759 &cur_page); 760 if (rc < 0) { 761 spdk_dma_free(*pages); 762 *pages = NULL; 763 *page_count = 0; 764 return rc; 765 } 766 767 *buf = (uint8_t *)cur_page->descriptors; 768 *remaining_sz = sizeof(cur_page->descriptors); 769 770 /* Try again */ 771 required_sz = 0; 772 rc = _spdk_blob_serialize_xattr(xattr, 773 *buf, *remaining_sz, 774 &required_sz, internal); 775 776 if (rc < 0) { 777 spdk_dma_free(*pages); 778 *pages = NULL; 779 *page_count = 0; 780 return rc; 781 } 782 } 783 784 *remaining_sz -= required_sz; 785 *buf += required_sz; 786 } 787 788 return 0; 789 } 790 791 static int 792 _spdk_blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages, 793 uint32_t *page_count) 794 { 795 struct spdk_blob_md_page *cur_page; 796 int rc; 797 uint8_t *buf; 798 size_t remaining_sz; 799 uint64_t last_cluster; 800 801 assert(pages != NULL); 802 assert(page_count != NULL); 803 assert(blob != NULL); 804 assert(blob->state == SPDK_BLOB_STATE_DIRTY); 805 806 *pages = NULL; 807 *page_count = 0; 808 809 /* A blob always has at least 1 page, even if it has no descriptors */ 810 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page); 811 if (rc < 0) { 812 return rc; 813 } 814 815 buf = (uint8_t *)cur_page->descriptors; 816 remaining_sz = sizeof(cur_page->descriptors); 817 818 /* Serialize flags */ 819 _spdk_blob_serialize_flags(blob, buf, &remaining_sz); 820 buf += sizeof(struct spdk_blob_md_descriptor_flags); 821 822 /* Serialize xattrs */ 823 rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs, false, 824 pages, cur_page, page_count, &buf, &remaining_sz); 825 if (rc < 0) { 826 return rc; 827 } 828 829 /* Serialize internal xattrs */ 830 rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs_internal, true, 831 pages, cur_page, page_count, &buf, &remaining_sz); 832 if (rc < 0) { 833 return rc; 834 } 835 836 /* Serialize extents */ 837 last_cluster = 0; 838 while (last_cluster < blob->active.num_clusters) { 839 _spdk_blob_serialize_extent(blob, last_cluster, &last_cluster, 840 buf, remaining_sz); 841 842 if (last_cluster == blob->active.num_clusters) { 843 break; 844 } 845 846 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, 847 &cur_page); 848 if (rc < 0) { 849 return rc; 850 } 851 852 buf = (uint8_t *)cur_page->descriptors; 853 remaining_sz = sizeof(cur_page->descriptors); 854 } 855 856 return 0; 857 } 858 859 struct spdk_blob_load_ctx { 860 struct spdk_blob *blob; 861 862 struct spdk_blob_md_page *pages; 863 uint32_t num_pages; 864 spdk_bs_sequence_t *seq; 865 866 spdk_bs_sequence_cpl cb_fn; 867 void *cb_arg; 868 }; 869 870 static uint32_t 871 _spdk_blob_md_page_calc_crc(void *page) 872 { 873 uint32_t crc; 874 875 crc = BLOB_CRC32C_INITIAL; 876 crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc); 877 crc ^= BLOB_CRC32C_INITIAL; 878 879 return crc; 880 881 } 882 883 static void 884 _spdk_blob_load_final(void *cb_arg, int bserrno) 885 { 886 struct spdk_blob_load_ctx *ctx = cb_arg; 887 struct spdk_blob *blob = ctx->blob; 888 889 _spdk_blob_mark_clean(blob); 890 891 ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno); 892 893 /* Free the memory */ 894 spdk_dma_free(ctx->pages); 895 free(ctx); 896 } 897 898 static void 899 _spdk_blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 900 { 901 struct spdk_blob_load_ctx *ctx = cb_arg; 902 struct spdk_blob *blob = ctx->blob; 903 904 if (bserrno != 0) { 905 goto error; 906 } 907 908 blob->back_bs_dev = spdk_bs_create_blob_bs_dev(snapshot); 909 910 if (blob->back_bs_dev == NULL) { 911 bserrno = -ENOMEM; 912 goto error; 913 } 914 915 _spdk_blob_load_final(ctx, bserrno); 916 return; 917 918 error: 919 SPDK_ERRLOG("Snapshot fail\n"); 920 _spdk_blob_free(blob); 921 ctx->cb_fn(ctx->seq, NULL, bserrno); 922 spdk_dma_free(ctx->pages); 923 free(ctx); 924 } 925 926 static void 927 _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 928 { 929 struct spdk_blob_load_ctx *ctx = cb_arg; 930 struct spdk_blob *blob = ctx->blob; 931 struct spdk_blob_md_page *page; 932 const void *value; 933 size_t len; 934 int rc; 935 uint32_t crc; 936 937 page = &ctx->pages[ctx->num_pages - 1]; 938 crc = _spdk_blob_md_page_calc_crc(page); 939 if (crc != page->crc) { 940 SPDK_ERRLOG("Metadata page %d crc mismatch\n", ctx->num_pages); 941 _spdk_blob_free(blob); 942 ctx->cb_fn(seq, NULL, -EINVAL); 943 spdk_dma_free(ctx->pages); 944 free(ctx); 945 return; 946 } 947 948 if (page->next != SPDK_INVALID_MD_PAGE) { 949 uint32_t next_page = page->next; 950 uint64_t next_lba = _spdk_bs_page_to_lba(blob->bs, blob->bs->md_start + next_page); 951 952 953 assert(next_lba < (blob->bs->md_start + blob->bs->md_len)); 954 955 /* Read the next page */ 956 ctx->num_pages++; 957 ctx->pages = spdk_dma_realloc(ctx->pages, (sizeof(*page) * ctx->num_pages), 958 sizeof(*page), NULL); 959 if (ctx->pages == NULL) { 960 ctx->cb_fn(seq, ctx->cb_arg, -ENOMEM); 961 free(ctx); 962 return; 963 } 964 965 spdk_bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1], 966 next_lba, 967 _spdk_bs_byte_to_lba(blob->bs, sizeof(*page)), 968 _spdk_blob_load_cpl, ctx); 969 return; 970 } 971 972 /* Parse the pages */ 973 rc = _spdk_blob_parse(ctx->pages, ctx->num_pages, blob); 974 if (rc) { 975 _spdk_blob_free(blob); 976 ctx->cb_fn(seq, NULL, rc); 977 spdk_dma_free(ctx->pages); 978 free(ctx); 979 return; 980 } 981 ctx->seq = seq; 982 983 984 if (spdk_blob_is_thin_provisioned(blob)) { 985 rc = _spdk_blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true); 986 if (rc == 0) { 987 if (len != sizeof(spdk_blob_id)) { 988 _spdk_blob_free(blob); 989 ctx->cb_fn(seq, NULL, -EINVAL); 990 spdk_dma_free(ctx->pages); 991 free(ctx); 992 return; 993 } 994 /* open snapshot blob and continue in the callback function */ 995 blob->parent_id = *(spdk_blob_id *)value; 996 spdk_bs_open_blob(blob->bs, blob->parent_id, 997 _spdk_blob_load_snapshot_cpl, ctx); 998 return; 999 } else { 1000 /* add zeroes_dev for thin provisioned blob */ 1001 blob->back_bs_dev = spdk_bs_create_zeroes_dev(); 1002 } 1003 } else { 1004 /* standard blob */ 1005 blob->back_bs_dev = NULL; 1006 } 1007 _spdk_blob_load_final(ctx, bserrno); 1008 } 1009 1010 /* Load a blob from disk given a blobid */ 1011 static void 1012 _spdk_blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1013 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1014 { 1015 struct spdk_blob_load_ctx *ctx; 1016 struct spdk_blob_store *bs; 1017 uint32_t page_num; 1018 uint64_t lba; 1019 1020 _spdk_blob_verify_md_op(blob); 1021 1022 bs = blob->bs; 1023 1024 ctx = calloc(1, sizeof(*ctx)); 1025 if (!ctx) { 1026 cb_fn(seq, cb_arg, -ENOMEM); 1027 return; 1028 } 1029 1030 ctx->blob = blob; 1031 ctx->pages = spdk_dma_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, 1032 SPDK_BS_PAGE_SIZE, NULL); 1033 if (!ctx->pages) { 1034 free(ctx); 1035 cb_fn(seq, cb_arg, -ENOMEM); 1036 return; 1037 } 1038 ctx->num_pages = 1; 1039 ctx->cb_fn = cb_fn; 1040 ctx->cb_arg = cb_arg; 1041 1042 page_num = _spdk_bs_blobid_to_page(blob->id); 1043 lba = _spdk_bs_page_to_lba(blob->bs, bs->md_start + page_num); 1044 1045 blob->state = SPDK_BLOB_STATE_LOADING; 1046 1047 spdk_bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1048 _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE), 1049 _spdk_blob_load_cpl, ctx); 1050 } 1051 1052 struct spdk_blob_persist_ctx { 1053 struct spdk_blob *blob; 1054 1055 struct spdk_bs_super_block *super; 1056 1057 struct spdk_blob_md_page *pages; 1058 1059 uint64_t idx; 1060 1061 spdk_bs_sequence_t *seq; 1062 spdk_bs_sequence_cpl cb_fn; 1063 void *cb_arg; 1064 }; 1065 1066 static void 1067 _spdk_blob_persist_complete(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1068 { 1069 struct spdk_blob_persist_ctx *ctx = cb_arg; 1070 struct spdk_blob *blob = ctx->blob; 1071 1072 if (bserrno == 0) { 1073 _spdk_blob_mark_clean(blob); 1074 } 1075 1076 /* Call user callback */ 1077 ctx->cb_fn(seq, ctx->cb_arg, bserrno); 1078 1079 /* Free the memory */ 1080 spdk_dma_free(ctx->pages); 1081 free(ctx); 1082 } 1083 1084 static void 1085 _spdk_blob_persist_unmap_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1086 { 1087 struct spdk_blob_persist_ctx *ctx = cb_arg; 1088 struct spdk_blob *blob = ctx->blob; 1089 struct spdk_blob_store *bs = blob->bs; 1090 void *tmp; 1091 size_t i; 1092 1093 /* Release all clusters that were truncated */ 1094 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1095 uint32_t cluster_num = _spdk_bs_lba_to_cluster(bs, blob->active.clusters[i]); 1096 1097 /* Nothing to release if it was not allocated */ 1098 if (blob->active.clusters[i] != 0) { 1099 _spdk_bs_release_cluster(bs, cluster_num); 1100 } 1101 } 1102 1103 if (blob->active.num_clusters == 0) { 1104 free(blob->active.clusters); 1105 blob->active.clusters = NULL; 1106 blob->active.cluster_array_size = 0; 1107 } else { 1108 tmp = realloc(blob->active.clusters, sizeof(uint64_t) * blob->active.num_clusters); 1109 assert(tmp != NULL); 1110 blob->active.clusters = tmp; 1111 blob->active.cluster_array_size = blob->active.num_clusters; 1112 } 1113 1114 _spdk_blob_persist_complete(seq, ctx, bserrno); 1115 } 1116 1117 static void 1118 _spdk_blob_persist_unmap_clusters(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1119 { 1120 struct spdk_blob_persist_ctx *ctx = cb_arg; 1121 struct spdk_blob *blob = ctx->blob; 1122 struct spdk_blob_store *bs = blob->bs; 1123 spdk_bs_batch_t *batch; 1124 size_t i; 1125 uint64_t lba; 1126 uint32_t lba_count; 1127 1128 /* Clusters don't move around in blobs. The list shrinks or grows 1129 * at the end, but no changes ever occur in the middle of the list. 1130 */ 1131 1132 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_unmap_clusters_cpl, ctx); 1133 1134 /* Unmap all clusters that were truncated */ 1135 lba = 0; 1136 lba_count = 0; 1137 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1138 uint64_t next_lba = blob->active.clusters[i]; 1139 uint32_t next_lba_count = _spdk_bs_cluster_to_lba(bs, 1); 1140 1141 if (next_lba > 0 && (lba + lba_count) == next_lba) { 1142 /* This cluster is contiguous with the previous one. */ 1143 lba_count += next_lba_count; 1144 continue; 1145 } 1146 1147 /* This cluster is not contiguous with the previous one. */ 1148 1149 /* If a run of LBAs previously existing, send them 1150 * as an unmap. 1151 */ 1152 if (lba_count > 0) { 1153 spdk_bs_batch_unmap_dev(batch, lba, lba_count); 1154 } 1155 1156 /* Start building the next batch */ 1157 lba = next_lba; 1158 if (next_lba > 0) { 1159 lba_count = next_lba_count; 1160 } else { 1161 lba_count = 0; 1162 } 1163 } 1164 1165 /* If we ended with a contiguous set of LBAs, send the unmap now */ 1166 if (lba_count > 0) { 1167 spdk_bs_batch_unmap_dev(batch, lba, lba_count); 1168 } 1169 1170 spdk_bs_batch_close(batch); 1171 } 1172 1173 static void 1174 _spdk_blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1175 { 1176 struct spdk_blob_persist_ctx *ctx = cb_arg; 1177 struct spdk_blob *blob = ctx->blob; 1178 struct spdk_blob_store *bs = blob->bs; 1179 size_t i; 1180 1181 /* This loop starts at 1 because the first page is special and handled 1182 * below. The pages (except the first) are never written in place, 1183 * so any pages in the clean list must be zeroed. 1184 */ 1185 for (i = 1; i < blob->clean.num_pages; i++) { 1186 spdk_bit_array_clear(bs->used_md_pages, blob->clean.pages[i]); 1187 } 1188 1189 if (blob->active.num_pages == 0) { 1190 uint32_t page_num; 1191 1192 page_num = _spdk_bs_blobid_to_page(blob->id); 1193 spdk_bit_array_clear(bs->used_md_pages, page_num); 1194 } 1195 1196 /* Move on to unmapping clusters */ 1197 _spdk_blob_persist_unmap_clusters(seq, ctx, 0); 1198 } 1199 1200 static void 1201 _spdk_blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1202 { 1203 struct spdk_blob_persist_ctx *ctx = cb_arg; 1204 struct spdk_blob *blob = ctx->blob; 1205 struct spdk_blob_store *bs = blob->bs; 1206 uint64_t lba; 1207 uint32_t lba_count; 1208 spdk_bs_batch_t *batch; 1209 size_t i; 1210 1211 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_zero_pages_cpl, ctx); 1212 1213 lba_count = _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 1214 1215 /* This loop starts at 1 because the first page is special and handled 1216 * below. The pages (except the first) are never written in place, 1217 * so any pages in the clean list must be zeroed. 1218 */ 1219 for (i = 1; i < blob->clean.num_pages; i++) { 1220 lba = _spdk_bs_page_to_lba(bs, bs->md_start + blob->clean.pages[i]); 1221 1222 spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count); 1223 } 1224 1225 /* The first page will only be zeroed if this is a delete. */ 1226 if (blob->active.num_pages == 0) { 1227 uint32_t page_num; 1228 1229 /* The first page in the metadata goes where the blobid indicates */ 1230 page_num = _spdk_bs_blobid_to_page(blob->id); 1231 lba = _spdk_bs_page_to_lba(bs, bs->md_start + page_num); 1232 1233 spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count); 1234 } 1235 1236 spdk_bs_batch_close(batch); 1237 } 1238 1239 static void 1240 _spdk_blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1241 { 1242 struct spdk_blob_persist_ctx *ctx = cb_arg; 1243 struct spdk_blob *blob = ctx->blob; 1244 struct spdk_blob_store *bs = blob->bs; 1245 uint64_t lba; 1246 uint32_t lba_count; 1247 struct spdk_blob_md_page *page; 1248 1249 if (blob->active.num_pages == 0) { 1250 /* Move on to the next step */ 1251 _spdk_blob_persist_zero_pages(seq, ctx, 0); 1252 return; 1253 } 1254 1255 lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page)); 1256 1257 page = &ctx->pages[0]; 1258 /* The first page in the metadata goes where the blobid indicates */ 1259 lba = _spdk_bs_page_to_lba(bs, bs->md_start + _spdk_bs_blobid_to_page(blob->id)); 1260 1261 spdk_bs_sequence_write_dev(seq, page, lba, lba_count, 1262 _spdk_blob_persist_zero_pages, ctx); 1263 } 1264 1265 static void 1266 _spdk_blob_persist_write_page_chain(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1267 { 1268 struct spdk_blob_persist_ctx *ctx = cb_arg; 1269 struct spdk_blob *blob = ctx->blob; 1270 struct spdk_blob_store *bs = blob->bs; 1271 uint64_t lba; 1272 uint32_t lba_count; 1273 struct spdk_blob_md_page *page; 1274 spdk_bs_batch_t *batch; 1275 size_t i; 1276 1277 /* Clusters don't move around in blobs. The list shrinks or grows 1278 * at the end, but no changes ever occur in the middle of the list. 1279 */ 1280 1281 lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page)); 1282 1283 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_write_page_root, ctx); 1284 1285 /* This starts at 1. The root page is not written until 1286 * all of the others are finished 1287 */ 1288 for (i = 1; i < blob->active.num_pages; i++) { 1289 page = &ctx->pages[i]; 1290 assert(page->sequence_num == i); 1291 1292 lba = _spdk_bs_page_to_lba(bs, bs->md_start + blob->active.pages[i]); 1293 1294 spdk_bs_batch_write_dev(batch, page, lba, lba_count); 1295 } 1296 1297 spdk_bs_batch_close(batch); 1298 } 1299 1300 static int 1301 _spdk_blob_resize(struct spdk_blob *blob, uint64_t sz) 1302 { 1303 uint64_t i; 1304 uint64_t *tmp; 1305 uint64_t lfc; /* lowest free cluster */ 1306 uint64_t num_clusters; 1307 struct spdk_blob_store *bs; 1308 1309 bs = blob->bs; 1310 1311 _spdk_blob_verify_md_op(blob); 1312 1313 if (blob->active.num_clusters == sz) { 1314 return 0; 1315 } 1316 1317 if (blob->active.num_clusters < blob->active.cluster_array_size) { 1318 /* If this blob was resized to be larger, then smaller, then 1319 * larger without syncing, then the cluster array already 1320 * contains spare assigned clusters we can use. 1321 */ 1322 num_clusters = spdk_min(blob->active.cluster_array_size, 1323 sz); 1324 } else { 1325 num_clusters = blob->active.num_clusters; 1326 } 1327 1328 /* Do two passes - one to verify that we can obtain enough clusters 1329 * and another to actually claim them. 1330 */ 1331 1332 if (spdk_blob_is_thin_provisioned(blob) == false) { 1333 lfc = 0; 1334 for (i = num_clusters; i < sz; i++) { 1335 lfc = spdk_bit_array_find_first_clear(bs->used_clusters, lfc); 1336 if (lfc == UINT32_MAX) { 1337 /* No more free clusters. Cannot satisfy the request */ 1338 return -ENOSPC; 1339 } 1340 lfc++; 1341 } 1342 } 1343 1344 if (sz > num_clusters) { 1345 /* Expand the cluster array if necessary. 1346 * We only shrink the array when persisting. 1347 */ 1348 tmp = realloc(blob->active.clusters, sizeof(uint64_t) * sz); 1349 if (sz > 0 && tmp == NULL) { 1350 return -ENOMEM; 1351 } 1352 memset(tmp + blob->active.cluster_array_size, 0, 1353 sizeof(uint64_t) * (sz - blob->active.cluster_array_size)); 1354 blob->active.clusters = tmp; 1355 blob->active.cluster_array_size = sz; 1356 } 1357 1358 blob->state = SPDK_BLOB_STATE_DIRTY; 1359 1360 if (spdk_blob_is_thin_provisioned(blob) == false) { 1361 lfc = 0; 1362 for (i = num_clusters; i < sz; i++) { 1363 _spdk_bs_allocate_cluster(blob, i, &lfc, true); 1364 lfc++; 1365 } 1366 } 1367 1368 blob->active.num_clusters = sz; 1369 1370 return 0; 1371 } 1372 1373 static void 1374 _spdk_blob_persist_start(struct spdk_blob_persist_ctx *ctx) 1375 { 1376 spdk_bs_sequence_t *seq = ctx->seq; 1377 struct spdk_blob *blob = ctx->blob; 1378 struct spdk_blob_store *bs = blob->bs; 1379 uint64_t i; 1380 uint32_t page_num; 1381 void *tmp; 1382 int rc; 1383 1384 if (blob->active.num_pages == 0) { 1385 /* This is the signal that the blob should be deleted. 1386 * Immediately jump to the clean up routine. */ 1387 assert(blob->clean.num_pages > 0); 1388 ctx->idx = blob->clean.num_pages - 1; 1389 blob->state = SPDK_BLOB_STATE_CLEAN; 1390 _spdk_blob_persist_zero_pages(seq, ctx, 0); 1391 return; 1392 1393 } 1394 1395 /* Generate the new metadata */ 1396 rc = _spdk_blob_serialize(blob, &ctx->pages, &blob->active.num_pages); 1397 if (rc < 0) { 1398 _spdk_blob_persist_complete(seq, ctx, rc); 1399 return; 1400 } 1401 1402 assert(blob->active.num_pages >= 1); 1403 1404 /* Resize the cache of page indices */ 1405 tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 1406 if (!tmp) { 1407 _spdk_blob_persist_complete(seq, ctx, -ENOMEM); 1408 return; 1409 } 1410 blob->active.pages = tmp; 1411 1412 /* Assign this metadata to pages. This requires two passes - 1413 * one to verify that there are enough pages and a second 1414 * to actually claim them. */ 1415 page_num = 0; 1416 /* Note that this loop starts at one. The first page location is fixed by the blobid. */ 1417 for (i = 1; i < blob->active.num_pages; i++) { 1418 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 1419 if (page_num == UINT32_MAX) { 1420 _spdk_blob_persist_complete(seq, ctx, -ENOMEM); 1421 return; 1422 } 1423 page_num++; 1424 } 1425 1426 page_num = 0; 1427 blob->active.pages[0] = _spdk_bs_blobid_to_page(blob->id); 1428 for (i = 1; i < blob->active.num_pages; i++) { 1429 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 1430 ctx->pages[i - 1].next = page_num; 1431 /* Now that previous metadata page is complete, calculate the crc for it. */ 1432 ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]); 1433 blob->active.pages[i] = page_num; 1434 spdk_bit_array_set(bs->used_md_pages, page_num); 1435 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming page %u for blob %lu\n", page_num, blob->id); 1436 page_num++; 1437 } 1438 ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]); 1439 /* Start writing the metadata from last page to first */ 1440 ctx->idx = blob->active.num_pages - 1; 1441 blob->state = SPDK_BLOB_STATE_CLEAN; 1442 _spdk_blob_persist_write_page_chain(seq, ctx, 0); 1443 } 1444 1445 static void 1446 _spdk_blob_persist_dirty_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1447 { 1448 struct spdk_blob_persist_ctx *ctx = cb_arg; 1449 1450 ctx->blob->bs->clean = 0; 1451 1452 spdk_dma_free(ctx->super); 1453 1454 _spdk_blob_persist_start(ctx); 1455 } 1456 1457 static void 1458 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 1459 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg); 1460 1461 1462 static void 1463 _spdk_blob_persist_dirty(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1464 { 1465 struct spdk_blob_persist_ctx *ctx = cb_arg; 1466 1467 ctx->super->clean = 0; 1468 if (ctx->super->size == 0) { 1469 ctx->super->size = ctx->blob->bs->dev->blockcnt * ctx->blob->bs->dev->blocklen; 1470 } 1471 1472 _spdk_bs_write_super(seq, ctx->blob->bs, ctx->super, _spdk_blob_persist_dirty_cpl, ctx); 1473 } 1474 1475 1476 /* Write a blob to disk */ 1477 static void 1478 _spdk_blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1479 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1480 { 1481 struct spdk_blob_persist_ctx *ctx; 1482 1483 _spdk_blob_verify_md_op(blob); 1484 1485 if (blob->state == SPDK_BLOB_STATE_CLEAN) { 1486 cb_fn(seq, cb_arg, 0); 1487 return; 1488 } 1489 1490 ctx = calloc(1, sizeof(*ctx)); 1491 if (!ctx) { 1492 cb_fn(seq, cb_arg, -ENOMEM); 1493 return; 1494 } 1495 ctx->blob = blob; 1496 ctx->seq = seq; 1497 ctx->cb_fn = cb_fn; 1498 ctx->cb_arg = cb_arg; 1499 1500 if (blob->bs->clean) { 1501 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 1502 if (!ctx->super) { 1503 cb_fn(seq, cb_arg, -ENOMEM); 1504 free(ctx); 1505 return; 1506 } 1507 1508 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(blob->bs, 0), 1509 _spdk_bs_byte_to_lba(blob->bs, sizeof(*ctx->super)), 1510 _spdk_blob_persist_dirty, ctx); 1511 } else { 1512 _spdk_blob_persist_start(ctx); 1513 } 1514 } 1515 1516 struct spdk_blob_copy_cluster_ctx { 1517 struct spdk_blob *blob; 1518 uint8_t *buf; 1519 uint64_t page; 1520 uint64_t new_cluster; 1521 spdk_bs_sequence_t *seq; 1522 }; 1523 1524 static void 1525 _spdk_blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno) 1526 { 1527 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 1528 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq; 1529 TAILQ_HEAD(, spdk_bs_request_set) requests; 1530 spdk_bs_user_op_t *op; 1531 1532 TAILQ_INIT(&requests); 1533 TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link); 1534 1535 while (!TAILQ_EMPTY(&requests)) { 1536 op = TAILQ_FIRST(&requests); 1537 TAILQ_REMOVE(&requests, op, link); 1538 if (bserrno == 0) { 1539 spdk_bs_user_op_execute(op); 1540 } else { 1541 spdk_bs_user_op_abort(op); 1542 } 1543 } 1544 1545 spdk_dma_free(ctx->buf); 1546 free(ctx); 1547 } 1548 1549 static void 1550 _spdk_blob_insert_cluster_cpl(void *cb_arg, int bserrno) 1551 { 1552 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 1553 1554 if (bserrno) { 1555 uint32_t cluster_number; 1556 1557 if (bserrno == -EEXIST) { 1558 /* The metadata insert failed because another thread 1559 * allocated the cluster first. Free our cluster 1560 * but continue without error. */ 1561 bserrno = 0; 1562 } 1563 1564 cluster_number = _spdk_bs_page_to_cluster(ctx->blob->bs, ctx->page); 1565 _spdk_bs_release_cluster(ctx->blob->bs, cluster_number); 1566 } 1567 1568 spdk_bs_sequence_finish(ctx->seq, bserrno); 1569 } 1570 1571 static void 1572 _spdk_blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1573 { 1574 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 1575 uint32_t cluster_number; 1576 1577 if (bserrno) { 1578 /* The write failed, so jump to the final completion handler */ 1579 spdk_bs_sequence_finish(seq, bserrno); 1580 return; 1581 } 1582 1583 cluster_number = _spdk_bs_page_to_cluster(ctx->blob->bs, ctx->page); 1584 1585 _spdk_blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 1586 _spdk_blob_insert_cluster_cpl, ctx); 1587 } 1588 1589 static void 1590 _spdk_blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1591 { 1592 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 1593 1594 if (bserrno != 0) { 1595 /* The read failed, so jump to the final completion handler */ 1596 spdk_bs_sequence_finish(seq, bserrno); 1597 return; 1598 } 1599 1600 /* Write whole cluster */ 1601 spdk_bs_sequence_write_dev(seq, ctx->buf, 1602 _spdk_bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 1603 _spdk_bs_cluster_to_lba(ctx->blob->bs, 1), 1604 _spdk_blob_write_copy_cpl, ctx); 1605 } 1606 1607 static void 1608 _spdk_bs_allocate_and_copy_cluster(struct spdk_blob *blob, 1609 struct spdk_io_channel *_ch, 1610 uint64_t io_unit, spdk_bs_user_op_t *op) 1611 { 1612 struct spdk_bs_cpl cpl; 1613 struct spdk_bs_channel *ch; 1614 struct spdk_blob_copy_cluster_ctx *ctx; 1615 uint32_t cluster_start_page; 1616 uint32_t cluster_number; 1617 int rc; 1618 1619 ch = spdk_io_channel_get_ctx(_ch); 1620 1621 if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) { 1622 /* There are already operations pending. Queue this user op 1623 * and return because it will be re-executed when the outstanding 1624 * cluster allocation completes. */ 1625 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 1626 return; 1627 } 1628 1629 /* Round the io_unit offset down to the first page in the cluster */ 1630 cluster_start_page = _spdk_bs_io_unit_to_cluster_start(blob, io_unit); 1631 1632 /* Calculate which index in the metadata cluster array the corresponding 1633 * cluster is supposed to be at. */ 1634 cluster_number = _spdk_bs_io_unit_to_cluster_number(blob, io_unit); 1635 1636 ctx = calloc(1, sizeof(*ctx)); 1637 if (!ctx) { 1638 spdk_bs_user_op_abort(op); 1639 return; 1640 } 1641 1642 assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0); 1643 1644 ctx->blob = blob; 1645 ctx->page = cluster_start_page; 1646 1647 if (blob->parent_id != SPDK_BLOBID_INVALID) { 1648 ctx->buf = spdk_dma_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, NULL); 1649 if (!ctx->buf) { 1650 SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n", 1651 blob->bs->cluster_sz); 1652 free(ctx); 1653 spdk_bs_user_op_abort(op); 1654 return; 1655 } 1656 } 1657 1658 rc = _spdk_bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, false); 1659 if (rc != 0) { 1660 spdk_dma_free(ctx->buf); 1661 free(ctx); 1662 spdk_bs_user_op_abort(op); 1663 return; 1664 } 1665 1666 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 1667 cpl.u.blob_basic.cb_fn = _spdk_blob_allocate_and_copy_cluster_cpl; 1668 cpl.u.blob_basic.cb_arg = ctx; 1669 1670 ctx->seq = spdk_bs_sequence_start(_ch, &cpl); 1671 if (!ctx->seq) { 1672 _spdk_bs_release_cluster(blob->bs, ctx->new_cluster); 1673 spdk_dma_free(ctx->buf); 1674 free(ctx); 1675 spdk_bs_user_op_abort(op); 1676 return; 1677 } 1678 1679 /* Queue the user op to block other incoming operations */ 1680 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 1681 1682 if (blob->parent_id != SPDK_BLOBID_INVALID) { 1683 /* Read cluster from backing device */ 1684 spdk_bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf, 1685 _spdk_bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 1686 _spdk_bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz), 1687 _spdk_blob_write_copy, ctx); 1688 } else { 1689 _spdk_blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 1690 _spdk_blob_insert_cluster_cpl, ctx); 1691 } 1692 } 1693 1694 static void 1695 _spdk_blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length, 1696 uint64_t *lba, uint32_t *lba_count) 1697 { 1698 *lba_count = length; 1699 1700 if (!_spdk_bs_io_unit_is_allocated(blob, io_unit)) { 1701 assert(blob->back_bs_dev != NULL); 1702 *lba = _spdk_bs_io_unit_to_back_dev_lba(blob, io_unit); 1703 *lba_count = _spdk_bs_io_unit_to_back_dev_lba(blob, *lba_count); 1704 } else { 1705 *lba = _spdk_bs_blob_io_unit_to_lba(blob, io_unit); 1706 } 1707 } 1708 1709 struct op_split_ctx { 1710 struct spdk_blob *blob; 1711 struct spdk_io_channel *channel; 1712 uint64_t io_unit_offset; 1713 uint64_t io_units_remaining; 1714 void *curr_payload; 1715 enum spdk_blob_op_type op_type; 1716 spdk_bs_sequence_t *seq; 1717 }; 1718 1719 static void 1720 _spdk_blob_request_submit_op_split_next(void *cb_arg, int bserrno) 1721 { 1722 struct op_split_ctx *ctx = cb_arg; 1723 struct spdk_blob *blob = ctx->blob; 1724 struct spdk_io_channel *ch = ctx->channel; 1725 enum spdk_blob_op_type op_type = ctx->op_type; 1726 uint8_t *buf = ctx->curr_payload; 1727 uint64_t offset = ctx->io_unit_offset; 1728 uint64_t length = ctx->io_units_remaining; 1729 uint64_t op_length; 1730 1731 if (bserrno != 0 || ctx->io_units_remaining == 0) { 1732 spdk_bs_sequence_finish(ctx->seq, bserrno); 1733 free(ctx); 1734 return; 1735 } 1736 1737 op_length = spdk_min(length, _spdk_bs_num_io_units_to_cluster_boundary(blob, 1738 offset)); 1739 1740 /* Update length and payload for next operation */ 1741 ctx->io_units_remaining -= op_length; 1742 ctx->io_unit_offset += op_length; 1743 if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) { 1744 ctx->curr_payload += op_length * blob->bs->io_unit_size; 1745 } 1746 1747 switch (op_type) { 1748 case SPDK_BLOB_READ: 1749 spdk_blob_io_read(blob, ch, buf, offset, op_length, 1750 _spdk_blob_request_submit_op_split_next, ctx); 1751 break; 1752 case SPDK_BLOB_WRITE: 1753 spdk_blob_io_write(blob, ch, buf, offset, op_length, 1754 _spdk_blob_request_submit_op_split_next, ctx); 1755 break; 1756 case SPDK_BLOB_UNMAP: 1757 spdk_blob_io_unmap(blob, ch, offset, op_length, 1758 _spdk_blob_request_submit_op_split_next, ctx); 1759 break; 1760 case SPDK_BLOB_WRITE_ZEROES: 1761 spdk_blob_io_write_zeroes(blob, ch, offset, op_length, 1762 _spdk_blob_request_submit_op_split_next, ctx); 1763 break; 1764 case SPDK_BLOB_READV: 1765 case SPDK_BLOB_WRITEV: 1766 SPDK_ERRLOG("readv/write not valid for %s\n", __func__); 1767 spdk_bs_sequence_finish(ctx->seq, -EINVAL); 1768 free(ctx); 1769 break; 1770 } 1771 } 1772 1773 static void 1774 _spdk_blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob, 1775 void *payload, uint64_t offset, uint64_t length, 1776 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 1777 { 1778 struct op_split_ctx *ctx; 1779 spdk_bs_sequence_t *seq; 1780 struct spdk_bs_cpl cpl; 1781 1782 assert(blob != NULL); 1783 1784 ctx = calloc(1, sizeof(struct op_split_ctx)); 1785 if (ctx == NULL) { 1786 cb_fn(cb_arg, -ENOMEM); 1787 return; 1788 } 1789 1790 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 1791 cpl.u.blob_basic.cb_fn = cb_fn; 1792 cpl.u.blob_basic.cb_arg = cb_arg; 1793 1794 seq = spdk_bs_sequence_start(ch, &cpl); 1795 if (!seq) { 1796 free(ctx); 1797 cb_fn(cb_arg, -ENOMEM); 1798 return; 1799 } 1800 1801 ctx->blob = blob; 1802 ctx->channel = ch; 1803 ctx->curr_payload = payload; 1804 ctx->io_unit_offset = offset; 1805 ctx->io_units_remaining = length; 1806 ctx->op_type = op_type; 1807 ctx->seq = seq; 1808 1809 _spdk_blob_request_submit_op_split_next(ctx, 0); 1810 } 1811 1812 static void 1813 _spdk_blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob, 1814 void *payload, uint64_t offset, uint64_t length, 1815 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 1816 { 1817 struct spdk_bs_cpl cpl; 1818 uint64_t lba; 1819 uint32_t lba_count; 1820 1821 assert(blob != NULL); 1822 1823 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 1824 cpl.u.blob_basic.cb_fn = cb_fn; 1825 cpl.u.blob_basic.cb_arg = cb_arg; 1826 1827 _spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 1828 1829 if (blob->frozen_refcnt) { 1830 /* This blob I/O is frozen */ 1831 spdk_bs_user_op_t *op; 1832 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 1833 1834 op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 1835 if (!op) { 1836 cb_fn(cb_arg, -ENOMEM); 1837 return; 1838 } 1839 1840 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 1841 1842 return; 1843 } 1844 1845 switch (op_type) { 1846 case SPDK_BLOB_READ: { 1847 spdk_bs_batch_t *batch; 1848 1849 batch = spdk_bs_batch_open(_ch, &cpl); 1850 if (!batch) { 1851 cb_fn(cb_arg, -ENOMEM); 1852 return; 1853 } 1854 1855 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 1856 /* Read from the blob */ 1857 spdk_bs_batch_read_dev(batch, payload, lba, lba_count); 1858 } else { 1859 /* Read from the backing block device */ 1860 spdk_bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count); 1861 } 1862 1863 spdk_bs_batch_close(batch); 1864 break; 1865 } 1866 case SPDK_BLOB_WRITE: 1867 case SPDK_BLOB_WRITE_ZEROES: { 1868 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 1869 /* Write to the blob */ 1870 spdk_bs_batch_t *batch; 1871 1872 if (lba_count == 0) { 1873 cb_fn(cb_arg, 0); 1874 return; 1875 } 1876 1877 batch = spdk_bs_batch_open(_ch, &cpl); 1878 if (!batch) { 1879 cb_fn(cb_arg, -ENOMEM); 1880 return; 1881 } 1882 1883 if (op_type == SPDK_BLOB_WRITE) { 1884 spdk_bs_batch_write_dev(batch, payload, lba, lba_count); 1885 } else { 1886 spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count); 1887 } 1888 1889 spdk_bs_batch_close(batch); 1890 } else { 1891 /* Queue this operation and allocate the cluster */ 1892 spdk_bs_user_op_t *op; 1893 1894 op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 1895 if (!op) { 1896 cb_fn(cb_arg, -ENOMEM); 1897 return; 1898 } 1899 1900 _spdk_bs_allocate_and_copy_cluster(blob, _ch, offset, op); 1901 } 1902 break; 1903 } 1904 case SPDK_BLOB_UNMAP: { 1905 spdk_bs_batch_t *batch; 1906 1907 batch = spdk_bs_batch_open(_ch, &cpl); 1908 if (!batch) { 1909 cb_fn(cb_arg, -ENOMEM); 1910 return; 1911 } 1912 1913 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 1914 spdk_bs_batch_unmap_dev(batch, lba, lba_count); 1915 } 1916 1917 spdk_bs_batch_close(batch); 1918 break; 1919 } 1920 case SPDK_BLOB_READV: 1921 case SPDK_BLOB_WRITEV: 1922 SPDK_ERRLOG("readv/write not valid\n"); 1923 cb_fn(cb_arg, -EINVAL); 1924 break; 1925 } 1926 } 1927 1928 static void 1929 _spdk_blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel, 1930 void *payload, uint64_t offset, uint64_t length, 1931 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 1932 { 1933 assert(blob != NULL); 1934 1935 if (blob->data_ro && op_type != SPDK_BLOB_READ) { 1936 cb_fn(cb_arg, -EPERM); 1937 return; 1938 } 1939 1940 if (offset + length > _spdk_bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 1941 cb_fn(cb_arg, -EINVAL); 1942 return; 1943 } 1944 if (length <= _spdk_bs_num_io_units_to_cluster_boundary(blob, offset)) { 1945 _spdk_blob_request_submit_op_single(_channel, blob, payload, offset, length, 1946 cb_fn, cb_arg, op_type); 1947 } else { 1948 _spdk_blob_request_submit_op_split(_channel, blob, payload, offset, length, 1949 cb_fn, cb_arg, op_type); 1950 } 1951 } 1952 1953 struct rw_iov_ctx { 1954 struct spdk_blob *blob; 1955 struct spdk_io_channel *channel; 1956 spdk_blob_op_complete cb_fn; 1957 void *cb_arg; 1958 bool read; 1959 int iovcnt; 1960 struct iovec *orig_iov; 1961 uint64_t io_unit_offset; 1962 uint64_t io_units_remaining; 1963 uint64_t io_units_done; 1964 struct iovec iov[0]; 1965 }; 1966 1967 static void 1968 _spdk_rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1969 { 1970 assert(cb_arg == NULL); 1971 spdk_bs_sequence_finish(seq, bserrno); 1972 } 1973 1974 static void 1975 _spdk_rw_iov_split_next(void *cb_arg, int bserrno) 1976 { 1977 struct rw_iov_ctx *ctx = cb_arg; 1978 struct spdk_blob *blob = ctx->blob; 1979 struct iovec *iov, *orig_iov; 1980 int iovcnt; 1981 size_t orig_iovoff; 1982 uint64_t io_units_count, io_units_to_boundary, io_unit_offset; 1983 uint64_t byte_count; 1984 1985 if (bserrno != 0 || ctx->io_units_remaining == 0) { 1986 ctx->cb_fn(ctx->cb_arg, bserrno); 1987 free(ctx); 1988 return; 1989 } 1990 1991 io_unit_offset = ctx->io_unit_offset; 1992 io_units_to_boundary = _spdk_bs_num_io_units_to_cluster_boundary(blob, io_unit_offset); 1993 io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary); 1994 /* 1995 * Get index and offset into the original iov array for our current position in the I/O sequence. 1996 * byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will 1997 * point to the current position in the I/O sequence. 1998 */ 1999 byte_count = ctx->io_units_done * blob->bs->io_unit_size; 2000 orig_iov = &ctx->orig_iov[0]; 2001 orig_iovoff = 0; 2002 while (byte_count > 0) { 2003 if (byte_count >= orig_iov->iov_len) { 2004 byte_count -= orig_iov->iov_len; 2005 orig_iov++; 2006 } else { 2007 orig_iovoff = byte_count; 2008 byte_count = 0; 2009 } 2010 } 2011 2012 /* 2013 * Build an iov array for the next I/O in the sequence. byte_count will keep track of how many 2014 * bytes of this next I/O remain to be accounted for in the new iov array. 2015 */ 2016 byte_count = io_units_count * blob->bs->io_unit_size; 2017 iov = &ctx->iov[0]; 2018 iovcnt = 0; 2019 while (byte_count > 0) { 2020 assert(iovcnt < ctx->iovcnt); 2021 iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff); 2022 iov->iov_base = orig_iov->iov_base + orig_iovoff; 2023 byte_count -= iov->iov_len; 2024 orig_iovoff = 0; 2025 orig_iov++; 2026 iov++; 2027 iovcnt++; 2028 } 2029 2030 ctx->io_unit_offset += io_units_count; 2031 ctx->io_units_remaining -= io_units_count; 2032 ctx->io_units_done += io_units_count; 2033 iov = &ctx->iov[0]; 2034 2035 if (ctx->read) { 2036 spdk_blob_io_readv(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 2037 io_units_count, _spdk_rw_iov_split_next, ctx); 2038 } else { 2039 spdk_blob_io_writev(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 2040 io_units_count, _spdk_rw_iov_split_next, ctx); 2041 } 2042 } 2043 2044 static void 2045 _spdk_blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel, 2046 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 2047 spdk_blob_op_complete cb_fn, void *cb_arg, bool read) 2048 { 2049 struct spdk_bs_cpl cpl; 2050 2051 assert(blob != NULL); 2052 2053 if (!read && blob->data_ro) { 2054 cb_fn(cb_arg, -EPERM); 2055 return; 2056 } 2057 2058 if (length == 0) { 2059 cb_fn(cb_arg, 0); 2060 return; 2061 } 2062 2063 if (offset + length > _spdk_bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 2064 cb_fn(cb_arg, -EINVAL); 2065 return; 2066 } 2067 2068 /* 2069 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having 2070 * to split a request that spans a cluster boundary. For I/O that do not span a cluster boundary, 2071 * there will be no noticeable difference compared to using a batch. For I/O that do span a cluster 2072 * boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need 2073 * to allocate a separate iov array and split the I/O such that none of the resulting 2074 * smaller I/O cross a cluster boundary. These smaller I/O will be issued in sequence (not in parallel) 2075 * but since this case happens very infrequently, any performance impact will be negligible. 2076 * 2077 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs 2078 * for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them 2079 * in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called 2080 * when the batch was completed, to allow for freeing the memory for the iov arrays. 2081 */ 2082 if (spdk_likely(length <= _spdk_bs_num_io_units_to_cluster_boundary(blob, offset))) { 2083 uint32_t lba_count; 2084 uint64_t lba; 2085 2086 _spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 2087 2088 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2089 cpl.u.blob_basic.cb_fn = cb_fn; 2090 cpl.u.blob_basic.cb_arg = cb_arg; 2091 if (blob->frozen_refcnt) { 2092 /* This blob I/O is frozen */ 2093 spdk_bs_user_op_t *op; 2094 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel); 2095 2096 op = spdk_bs_user_op_alloc(_channel, &cpl, read, blob, iov, iovcnt, offset, length); 2097 if (!op) { 2098 cb_fn(cb_arg, -ENOMEM); 2099 return; 2100 } 2101 2102 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 2103 2104 return; 2105 } 2106 2107 if (read) { 2108 spdk_bs_sequence_t *seq; 2109 2110 seq = spdk_bs_sequence_start(_channel, &cpl); 2111 if (!seq) { 2112 cb_fn(cb_arg, -ENOMEM); 2113 return; 2114 } 2115 2116 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 2117 spdk_bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL); 2118 } else { 2119 spdk_bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count, 2120 _spdk_rw_iov_done, NULL); 2121 } 2122 } else { 2123 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 2124 spdk_bs_sequence_t *seq; 2125 2126 seq = spdk_bs_sequence_start(_channel, &cpl); 2127 if (!seq) { 2128 cb_fn(cb_arg, -ENOMEM); 2129 return; 2130 } 2131 2132 spdk_bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL); 2133 } else { 2134 /* Queue this operation and allocate the cluster */ 2135 spdk_bs_user_op_t *op; 2136 2137 op = spdk_bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, 2138 length); 2139 if (!op) { 2140 cb_fn(cb_arg, -ENOMEM); 2141 return; 2142 } 2143 2144 _spdk_bs_allocate_and_copy_cluster(blob, _channel, offset, op); 2145 } 2146 } 2147 } else { 2148 struct rw_iov_ctx *ctx; 2149 2150 ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec)); 2151 if (ctx == NULL) { 2152 cb_fn(cb_arg, -ENOMEM); 2153 return; 2154 } 2155 2156 ctx->blob = blob; 2157 ctx->channel = _channel; 2158 ctx->cb_fn = cb_fn; 2159 ctx->cb_arg = cb_arg; 2160 ctx->read = read; 2161 ctx->orig_iov = iov; 2162 ctx->iovcnt = iovcnt; 2163 ctx->io_unit_offset = offset; 2164 ctx->io_units_remaining = length; 2165 ctx->io_units_done = 0; 2166 2167 _spdk_rw_iov_split_next(ctx, 0); 2168 } 2169 } 2170 2171 static struct spdk_blob * 2172 _spdk_blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid) 2173 { 2174 struct spdk_blob *blob; 2175 2176 TAILQ_FOREACH(blob, &bs->blobs, link) { 2177 if (blob->id == blobid) { 2178 return blob; 2179 } 2180 } 2181 2182 return NULL; 2183 } 2184 2185 static int 2186 _spdk_bs_channel_create(void *io_device, void *ctx_buf) 2187 { 2188 struct spdk_blob_store *bs = io_device; 2189 struct spdk_bs_channel *channel = ctx_buf; 2190 struct spdk_bs_dev *dev; 2191 uint32_t max_ops = bs->max_channel_ops; 2192 uint32_t i; 2193 2194 dev = bs->dev; 2195 2196 channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set)); 2197 if (!channel->req_mem) { 2198 return -1; 2199 } 2200 2201 TAILQ_INIT(&channel->reqs); 2202 2203 for (i = 0; i < max_ops; i++) { 2204 TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link); 2205 } 2206 2207 channel->bs = bs; 2208 channel->dev = dev; 2209 channel->dev_channel = dev->create_channel(dev); 2210 2211 if (!channel->dev_channel) { 2212 SPDK_ERRLOG("Failed to create device channel.\n"); 2213 free(channel->req_mem); 2214 return -1; 2215 } 2216 2217 TAILQ_INIT(&channel->need_cluster_alloc); 2218 TAILQ_INIT(&channel->queued_io); 2219 2220 return 0; 2221 } 2222 2223 static void 2224 _spdk_bs_channel_destroy(void *io_device, void *ctx_buf) 2225 { 2226 struct spdk_bs_channel *channel = ctx_buf; 2227 spdk_bs_user_op_t *op; 2228 2229 while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) { 2230 op = TAILQ_FIRST(&channel->need_cluster_alloc); 2231 TAILQ_REMOVE(&channel->need_cluster_alloc, op, link); 2232 spdk_bs_user_op_abort(op); 2233 } 2234 2235 while (!TAILQ_EMPTY(&channel->queued_io)) { 2236 op = TAILQ_FIRST(&channel->queued_io); 2237 TAILQ_REMOVE(&channel->queued_io, op, link); 2238 spdk_bs_user_op_abort(op); 2239 } 2240 2241 free(channel->req_mem); 2242 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 2243 } 2244 2245 static void 2246 _spdk_bs_dev_destroy(void *io_device) 2247 { 2248 struct spdk_blob_store *bs = io_device; 2249 struct spdk_blob *blob, *blob_tmp; 2250 2251 bs->dev->destroy(bs->dev); 2252 2253 TAILQ_FOREACH_SAFE(blob, &bs->blobs, link, blob_tmp) { 2254 TAILQ_REMOVE(&bs->blobs, blob, link); 2255 _spdk_blob_free(blob); 2256 } 2257 2258 pthread_mutex_destroy(&bs->used_clusters_mutex); 2259 2260 spdk_bit_array_free(&bs->used_blobids); 2261 spdk_bit_array_free(&bs->used_md_pages); 2262 spdk_bit_array_free(&bs->used_clusters); 2263 /* 2264 * If this function is called for any reason except a successful unload, 2265 * the unload_cpl type will be NONE and this will be a nop. 2266 */ 2267 spdk_bs_call_cpl(&bs->unload_cpl, bs->unload_err); 2268 2269 free(bs); 2270 } 2271 2272 static int 2273 _spdk_bs_blob_list_add(struct spdk_blob *blob) 2274 { 2275 spdk_blob_id snapshot_id; 2276 struct spdk_blob_list *snapshot_entry = NULL; 2277 struct spdk_blob_list *clone_entry = NULL; 2278 2279 assert(blob != NULL); 2280 2281 snapshot_id = blob->parent_id; 2282 if (snapshot_id == SPDK_BLOBID_INVALID) { 2283 return 0; 2284 } 2285 2286 TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) { 2287 if (snapshot_entry->id == snapshot_id) { 2288 break; 2289 } 2290 } 2291 2292 if (snapshot_entry == NULL) { 2293 /* Snapshot not found */ 2294 snapshot_entry = calloc(1, sizeof(struct spdk_blob_list)); 2295 if (snapshot_entry == NULL) { 2296 return -ENOMEM; 2297 } 2298 snapshot_entry->id = snapshot_id; 2299 TAILQ_INIT(&snapshot_entry->clones); 2300 TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link); 2301 } else { 2302 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 2303 if (clone_entry->id == blob->id) { 2304 break; 2305 } 2306 } 2307 } 2308 2309 if (clone_entry == NULL) { 2310 /* Clone not found */ 2311 clone_entry = calloc(1, sizeof(struct spdk_blob_list)); 2312 if (clone_entry == NULL) { 2313 return -ENOMEM; 2314 } 2315 clone_entry->id = blob->id; 2316 TAILQ_INIT(&clone_entry->clones); 2317 TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link); 2318 snapshot_entry->clone_count++; 2319 } 2320 2321 return 0; 2322 } 2323 2324 static int 2325 _spdk_bs_blob_list_remove(struct spdk_blob *blob) 2326 { 2327 struct spdk_blob_list *snapshot_entry = NULL; 2328 struct spdk_blob_list *clone_entry = NULL; 2329 spdk_blob_id snapshot_id; 2330 2331 assert(blob != NULL); 2332 2333 snapshot_id = blob->parent_id; 2334 if (snapshot_id == SPDK_BLOBID_INVALID) { 2335 return 0; 2336 } 2337 2338 TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) { 2339 if (snapshot_entry->id == snapshot_id) { 2340 break; 2341 } 2342 } 2343 2344 assert(snapshot_entry != NULL); 2345 2346 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 2347 if (clone_entry->id == blob->id) { 2348 break; 2349 } 2350 } 2351 2352 assert(clone_entry != NULL); 2353 2354 blob->parent_id = SPDK_BLOBID_INVALID; 2355 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 2356 free(clone_entry); 2357 2358 snapshot_entry->clone_count--; 2359 2360 return 0; 2361 } 2362 2363 static int 2364 _spdk_bs_blob_list_free(struct spdk_blob_store *bs) 2365 { 2366 struct spdk_blob_list *snapshot_entry; 2367 struct spdk_blob_list *snapshot_entry_tmp; 2368 struct spdk_blob_list *clone_entry; 2369 struct spdk_blob_list *clone_entry_tmp; 2370 2371 TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) { 2372 TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) { 2373 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 2374 free(clone_entry); 2375 } 2376 TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link); 2377 free(snapshot_entry); 2378 } 2379 2380 return 0; 2381 } 2382 2383 static void 2384 _spdk_bs_free(struct spdk_blob_store *bs) 2385 { 2386 _spdk_bs_blob_list_free(bs); 2387 2388 spdk_bs_unregister_md_thread(bs); 2389 spdk_io_device_unregister(bs, _spdk_bs_dev_destroy); 2390 } 2391 2392 void 2393 spdk_bs_opts_init(struct spdk_bs_opts *opts) 2394 { 2395 opts->cluster_sz = SPDK_BLOB_OPTS_CLUSTER_SZ; 2396 opts->num_md_pages = SPDK_BLOB_OPTS_NUM_MD_PAGES; 2397 opts->max_md_ops = SPDK_BLOB_OPTS_MAX_MD_OPS; 2398 opts->max_channel_ops = SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS; 2399 memset(&opts->bstype, 0, sizeof(opts->bstype)); 2400 opts->iter_cb_fn = NULL; 2401 opts->iter_cb_arg = NULL; 2402 } 2403 2404 static int 2405 _spdk_bs_opts_verify(struct spdk_bs_opts *opts) 2406 { 2407 if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 || 2408 opts->max_channel_ops == 0) { 2409 SPDK_ERRLOG("Blobstore options cannot be set to 0\n"); 2410 return -1; 2411 } 2412 2413 return 0; 2414 } 2415 2416 static int 2417 _spdk_bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs) 2418 { 2419 struct spdk_blob_store *bs; 2420 uint64_t dev_size; 2421 int rc; 2422 2423 dev_size = dev->blocklen * dev->blockcnt; 2424 if (dev_size < opts->cluster_sz) { 2425 /* Device size cannot be smaller than cluster size of blobstore */ 2426 SPDK_INFOLOG(SPDK_LOG_BLOB, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n", 2427 dev_size, opts->cluster_sz); 2428 return -ENOSPC; 2429 } 2430 if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) { 2431 /* Cluster size cannot be smaller than page size */ 2432 SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n", 2433 opts->cluster_sz, SPDK_BS_PAGE_SIZE); 2434 return -EINVAL; 2435 } 2436 bs = calloc(1, sizeof(struct spdk_blob_store)); 2437 if (!bs) { 2438 return -ENOMEM; 2439 } 2440 2441 TAILQ_INIT(&bs->blobs); 2442 TAILQ_INIT(&bs->snapshots); 2443 bs->dev = dev; 2444 bs->md_thread = spdk_get_thread(); 2445 assert(bs->md_thread != NULL); 2446 2447 /* 2448 * Do not use _spdk_bs_lba_to_cluster() here since blockcnt may not be an 2449 * even multiple of the cluster size. 2450 */ 2451 bs->cluster_sz = opts->cluster_sz; 2452 bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen); 2453 bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE; 2454 bs->num_free_clusters = bs->total_clusters; 2455 bs->used_clusters = spdk_bit_array_create(bs->total_clusters); 2456 bs->io_unit_size = dev->blocklen; 2457 if (bs->used_clusters == NULL) { 2458 free(bs); 2459 return -ENOMEM; 2460 } 2461 2462 bs->max_channel_ops = opts->max_channel_ops; 2463 bs->super_blob = SPDK_BLOBID_INVALID; 2464 memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype)); 2465 2466 /* The metadata is assumed to be at least 1 page */ 2467 bs->used_md_pages = spdk_bit_array_create(1); 2468 bs->used_blobids = spdk_bit_array_create(0); 2469 2470 pthread_mutex_init(&bs->used_clusters_mutex, NULL); 2471 2472 spdk_io_device_register(bs, _spdk_bs_channel_create, _spdk_bs_channel_destroy, 2473 sizeof(struct spdk_bs_channel), "blobstore"); 2474 rc = spdk_bs_register_md_thread(bs); 2475 if (rc == -1) { 2476 spdk_io_device_unregister(bs, NULL); 2477 pthread_mutex_destroy(&bs->used_clusters_mutex); 2478 spdk_bit_array_free(&bs->used_blobids); 2479 spdk_bit_array_free(&bs->used_md_pages); 2480 spdk_bit_array_free(&bs->used_clusters); 2481 free(bs); 2482 /* FIXME: this is a lie but don't know how to get a proper error code here */ 2483 return -ENOMEM; 2484 } 2485 2486 *_bs = bs; 2487 return 0; 2488 } 2489 2490 /* START spdk_bs_load, spdk_bs_load_ctx will used for both load and unload. */ 2491 2492 struct spdk_bs_load_ctx { 2493 struct spdk_blob_store *bs; 2494 struct spdk_bs_super_block *super; 2495 2496 struct spdk_bs_md_mask *mask; 2497 bool in_page_chain; 2498 uint32_t page_index; 2499 uint32_t cur_page; 2500 struct spdk_blob_md_page *page; 2501 bool is_load; 2502 2503 spdk_bs_sequence_t *seq; 2504 spdk_blob_op_with_handle_complete iter_cb_fn; 2505 void *iter_cb_arg; 2506 }; 2507 2508 static void 2509 _spdk_bs_load_ctx_fail(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 2510 { 2511 assert(bserrno != 0); 2512 2513 spdk_dma_free(ctx->super); 2514 spdk_bs_sequence_finish(seq, bserrno); 2515 /* 2516 * Only free the blobstore when a load fails. If an unload fails (for some reason) 2517 * we want to keep the blobstore in case the caller wants to try again. 2518 */ 2519 if (ctx->is_load) { 2520 _spdk_bs_free(ctx->bs); 2521 } 2522 free(ctx); 2523 } 2524 2525 static void 2526 _spdk_bs_set_mask(struct spdk_bit_array *array, struct spdk_bs_md_mask *mask) 2527 { 2528 uint32_t i = 0; 2529 2530 while (true) { 2531 i = spdk_bit_array_find_first_set(array, i); 2532 if (i >= mask->length) { 2533 break; 2534 } 2535 mask->mask[i / 8] |= 1U << (i % 8); 2536 i++; 2537 } 2538 } 2539 2540 static int 2541 _spdk_bs_load_mask(struct spdk_bit_array **array_ptr, struct spdk_bs_md_mask *mask) 2542 { 2543 struct spdk_bit_array *array; 2544 uint32_t i; 2545 2546 if (spdk_bit_array_resize(array_ptr, mask->length) < 0) { 2547 return -ENOMEM; 2548 } 2549 2550 array = *array_ptr; 2551 for (i = 0; i < mask->length; i++) { 2552 if (mask->mask[i / 8] & (1U << (i % 8))) { 2553 spdk_bit_array_set(array, i); 2554 } 2555 } 2556 2557 return 0; 2558 } 2559 2560 static void 2561 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2562 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2563 { 2564 /* Update the values in the super block */ 2565 super->super_blob = bs->super_blob; 2566 memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype)); 2567 super->crc = _spdk_blob_md_page_calc_crc(super); 2568 spdk_bs_sequence_write_dev(seq, super, _spdk_bs_page_to_lba(bs, 0), 2569 _spdk_bs_byte_to_lba(bs, sizeof(*super)), 2570 cb_fn, cb_arg); 2571 } 2572 2573 static void 2574 _spdk_bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 2575 { 2576 struct spdk_bs_load_ctx *ctx = arg; 2577 uint64_t mask_size, lba, lba_count; 2578 2579 /* Write out the used clusters mask */ 2580 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 2581 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2582 if (!ctx->mask) { 2583 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2584 return; 2585 } 2586 2587 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS; 2588 ctx->mask->length = ctx->bs->total_clusters; 2589 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_clusters)); 2590 2591 _spdk_bs_set_mask(ctx->bs->used_clusters, ctx->mask); 2592 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 2593 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 2594 spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 2595 } 2596 2597 static void 2598 _spdk_bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 2599 { 2600 struct spdk_bs_load_ctx *ctx = arg; 2601 uint64_t mask_size, lba, lba_count; 2602 2603 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 2604 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2605 if (!ctx->mask) { 2606 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2607 return; 2608 } 2609 2610 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES; 2611 ctx->mask->length = ctx->super->md_len; 2612 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages)); 2613 2614 _spdk_bs_set_mask(ctx->bs->used_md_pages, ctx->mask); 2615 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 2616 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 2617 spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 2618 } 2619 2620 static void 2621 _spdk_bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 2622 { 2623 struct spdk_bs_load_ctx *ctx = arg; 2624 uint64_t mask_size, lba, lba_count; 2625 2626 if (ctx->super->used_blobid_mask_len == 0) { 2627 /* 2628 * This is a pre-v3 on-disk format where the blobid mask does not get 2629 * written to disk. 2630 */ 2631 cb_fn(seq, arg, 0); 2632 return; 2633 } 2634 2635 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 2636 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2637 if (!ctx->mask) { 2638 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2639 return; 2640 } 2641 2642 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS; 2643 ctx->mask->length = ctx->super->md_len; 2644 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids)); 2645 2646 _spdk_bs_set_mask(ctx->bs->used_blobids, ctx->mask); 2647 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 2648 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 2649 spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 2650 } 2651 2652 static void 2653 _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno) 2654 { 2655 struct spdk_bs_load_ctx *ctx = arg; 2656 2657 if (bserrno == 0) { 2658 if (ctx->iter_cb_fn) { 2659 ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0); 2660 } 2661 _spdk_bs_blob_list_add(blob); 2662 spdk_bs_iter_next(ctx->bs, blob, _spdk_bs_load_iter, ctx); 2663 return; 2664 } 2665 2666 if (bserrno == -ENOENT) { 2667 bserrno = 0; 2668 } else { 2669 /* 2670 * This case needs to be looked at further. Same problem 2671 * exists with applications that rely on explicit blob 2672 * iteration. We should just skip the blob that failed 2673 * to load and continue on to the next one. 2674 */ 2675 SPDK_ERRLOG("Error in iterating blobs\n"); 2676 } 2677 2678 ctx->iter_cb_fn = NULL; 2679 2680 spdk_dma_free(ctx->super); 2681 spdk_dma_free(ctx->mask); 2682 spdk_bs_sequence_finish(ctx->seq, bserrno); 2683 free(ctx); 2684 } 2685 2686 static void 2687 _spdk_bs_load_complete(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 2688 { 2689 ctx->seq = seq; 2690 spdk_bs_iter_first(ctx->bs, _spdk_bs_load_iter, ctx); 2691 } 2692 2693 static void 2694 _spdk_bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2695 { 2696 struct spdk_bs_load_ctx *ctx = cb_arg; 2697 int rc; 2698 2699 /* The type must be correct */ 2700 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS); 2701 2702 /* The length of the mask (in bits) must not be greater than 2703 * the length of the buffer (converted to bits) */ 2704 assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8)); 2705 2706 /* The length of the mask must be exactly equal to the size 2707 * (in pages) of the metadata region */ 2708 assert(ctx->mask->length == ctx->super->md_len); 2709 2710 rc = _spdk_bs_load_mask(&ctx->bs->used_blobids, ctx->mask); 2711 if (rc < 0) { 2712 spdk_dma_free(ctx->mask); 2713 _spdk_bs_load_ctx_fail(seq, ctx, rc); 2714 return; 2715 } 2716 2717 _spdk_bs_load_complete(seq, ctx, bserrno); 2718 } 2719 2720 static void 2721 _spdk_bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2722 { 2723 struct spdk_bs_load_ctx *ctx = cb_arg; 2724 uint64_t lba, lba_count, mask_size; 2725 int rc; 2726 2727 /* The type must be correct */ 2728 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 2729 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 2730 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 2731 struct spdk_blob_md_page) * 8)); 2732 /* The length of the mask must be exactly equal to the total number of clusters */ 2733 assert(ctx->mask->length == ctx->bs->total_clusters); 2734 2735 rc = _spdk_bs_load_mask(&ctx->bs->used_clusters, ctx->mask); 2736 if (rc < 0) { 2737 spdk_dma_free(ctx->mask); 2738 _spdk_bs_load_ctx_fail(seq, ctx, rc); 2739 return; 2740 } 2741 2742 ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->bs->used_clusters); 2743 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 2744 2745 spdk_dma_free(ctx->mask); 2746 2747 /* Read the used blobids mask */ 2748 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 2749 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2750 if (!ctx->mask) { 2751 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2752 return; 2753 } 2754 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 2755 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 2756 spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 2757 _spdk_bs_load_used_blobids_cpl, ctx); 2758 } 2759 2760 static void 2761 _spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2762 { 2763 struct spdk_bs_load_ctx *ctx = cb_arg; 2764 uint64_t lba, lba_count, mask_size; 2765 int rc; 2766 2767 /* The type must be correct */ 2768 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES); 2769 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 2770 assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE * 2771 8)); 2772 /* The length of the mask must be exactly equal to the size (in pages) of the metadata region */ 2773 assert(ctx->mask->length == ctx->super->md_len); 2774 2775 rc = _spdk_bs_load_mask(&ctx->bs->used_md_pages, ctx->mask); 2776 if (rc < 0) { 2777 spdk_dma_free(ctx->mask); 2778 _spdk_bs_load_ctx_fail(seq, ctx, rc); 2779 return; 2780 } 2781 2782 spdk_dma_free(ctx->mask); 2783 2784 /* Read the used clusters mask */ 2785 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 2786 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2787 if (!ctx->mask) { 2788 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2789 return; 2790 } 2791 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 2792 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 2793 spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 2794 _spdk_bs_load_used_clusters_cpl, ctx); 2795 } 2796 2797 static void 2798 _spdk_bs_load_read_used_pages(spdk_bs_sequence_t *seq, void *cb_arg) 2799 { 2800 struct spdk_bs_load_ctx *ctx = cb_arg; 2801 uint64_t lba, lba_count, mask_size; 2802 2803 /* Read the used pages mask */ 2804 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 2805 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2806 if (!ctx->mask) { 2807 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2808 return; 2809 } 2810 2811 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 2812 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 2813 spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 2814 _spdk_bs_load_used_pages_cpl, ctx); 2815 } 2816 2817 static int 2818 _spdk_bs_load_replay_md_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob_store *bs) 2819 { 2820 struct spdk_blob_md_descriptor *desc; 2821 size_t cur_desc = 0; 2822 2823 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 2824 while (cur_desc < sizeof(page->descriptors)) { 2825 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 2826 if (desc->length == 0) { 2827 /* If padding and length are 0, this terminates the page */ 2828 break; 2829 } 2830 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) { 2831 struct spdk_blob_md_descriptor_extent *desc_extent; 2832 unsigned int i, j; 2833 unsigned int cluster_count = 0; 2834 uint32_t cluster_idx; 2835 2836 desc_extent = (struct spdk_blob_md_descriptor_extent *)desc; 2837 2838 for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) { 2839 for (j = 0; j < desc_extent->extents[i].length; j++) { 2840 cluster_idx = desc_extent->extents[i].cluster_idx; 2841 /* 2842 * cluster_idx = 0 means an unallocated cluster - don't mark that 2843 * in the used cluster map. 2844 */ 2845 if (cluster_idx != 0) { 2846 spdk_bit_array_set(bs->used_clusters, cluster_idx + j); 2847 if (bs->num_free_clusters == 0) { 2848 return -ENOSPC; 2849 } 2850 bs->num_free_clusters--; 2851 } 2852 cluster_count++; 2853 } 2854 } 2855 if (cluster_count == 0) { 2856 return -EINVAL; 2857 } 2858 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 2859 /* Skip this item */ 2860 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 2861 /* Skip this item */ 2862 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 2863 /* Skip this item */ 2864 } else { 2865 /* Error */ 2866 return -EINVAL; 2867 } 2868 /* Advance to the next descriptor */ 2869 cur_desc += sizeof(*desc) + desc->length; 2870 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 2871 break; 2872 } 2873 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 2874 } 2875 return 0; 2876 } 2877 2878 static bool _spdk_bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx) 2879 { 2880 uint32_t crc; 2881 2882 crc = _spdk_blob_md_page_calc_crc(ctx->page); 2883 if (crc != ctx->page->crc) { 2884 return false; 2885 } 2886 2887 if (_spdk_bs_page_to_blobid(ctx->cur_page) != ctx->page->id) { 2888 return false; 2889 } 2890 return true; 2891 } 2892 2893 static void 2894 _spdk_bs_load_replay_cur_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 2895 2896 static void 2897 _spdk_bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2898 { 2899 struct spdk_bs_load_ctx *ctx = cb_arg; 2900 2901 _spdk_bs_load_complete(seq, ctx, bserrno); 2902 } 2903 2904 static void 2905 _spdk_bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2906 { 2907 struct spdk_bs_load_ctx *ctx = cb_arg; 2908 2909 spdk_dma_free(ctx->mask); 2910 ctx->mask = NULL; 2911 2912 _spdk_bs_write_used_clusters(seq, cb_arg, _spdk_bs_load_write_used_clusters_cpl); 2913 } 2914 2915 static void 2916 _spdk_bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2917 { 2918 struct spdk_bs_load_ctx *ctx = cb_arg; 2919 2920 spdk_dma_free(ctx->mask); 2921 ctx->mask = NULL; 2922 2923 _spdk_bs_write_used_blobids(seq, cb_arg, _spdk_bs_load_write_used_blobids_cpl); 2924 } 2925 2926 static void 2927 _spdk_bs_load_write_used_md(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2928 { 2929 _spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_load_write_used_pages_cpl); 2930 } 2931 2932 static void 2933 _spdk_bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2934 { 2935 struct spdk_bs_load_ctx *ctx = cb_arg; 2936 uint64_t num_md_clusters; 2937 uint64_t i; 2938 uint32_t page_num; 2939 2940 if (bserrno != 0) { 2941 _spdk_bs_load_ctx_fail(seq, ctx, bserrno); 2942 return; 2943 } 2944 2945 page_num = ctx->cur_page; 2946 if (_spdk_bs_load_cur_md_page_valid(ctx) == true) { 2947 if (ctx->page->sequence_num == 0 || ctx->in_page_chain == true) { 2948 spdk_bit_array_set(ctx->bs->used_md_pages, page_num); 2949 if (ctx->page->sequence_num == 0) { 2950 spdk_bit_array_set(ctx->bs->used_blobids, page_num); 2951 } 2952 if (_spdk_bs_load_replay_md_parse_page(ctx->page, ctx->bs)) { 2953 _spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ); 2954 return; 2955 } 2956 if (ctx->page->next != SPDK_INVALID_MD_PAGE) { 2957 ctx->in_page_chain = true; 2958 ctx->cur_page = ctx->page->next; 2959 _spdk_bs_load_replay_cur_md_page(seq, cb_arg); 2960 return; 2961 } 2962 } 2963 } 2964 2965 ctx->in_page_chain = false; 2966 2967 do { 2968 ctx->page_index++; 2969 } while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true); 2970 2971 if (ctx->page_index < ctx->super->md_len) { 2972 ctx->cur_page = ctx->page_index; 2973 _spdk_bs_load_replay_cur_md_page(seq, cb_arg); 2974 } else { 2975 /* Claim all of the clusters used by the metadata */ 2976 num_md_clusters = spdk_divide_round_up(ctx->super->md_len, ctx->bs->pages_per_cluster); 2977 for (i = 0; i < num_md_clusters; i++) { 2978 _spdk_bs_claim_cluster(ctx->bs, i); 2979 } 2980 spdk_dma_free(ctx->page); 2981 _spdk_bs_load_write_used_md(seq, ctx, bserrno); 2982 } 2983 } 2984 2985 static void 2986 _spdk_bs_load_replay_cur_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 2987 { 2988 struct spdk_bs_load_ctx *ctx = cb_arg; 2989 uint64_t lba; 2990 2991 assert(ctx->cur_page < ctx->super->md_len); 2992 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 2993 spdk_bs_sequence_read_dev(seq, ctx->page, lba, 2994 _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 2995 _spdk_bs_load_replay_md_cpl, ctx); 2996 } 2997 2998 static void 2999 _spdk_bs_load_replay_md(spdk_bs_sequence_t *seq, void *cb_arg) 3000 { 3001 struct spdk_bs_load_ctx *ctx = cb_arg; 3002 3003 ctx->page_index = 0; 3004 ctx->cur_page = 0; 3005 ctx->page = spdk_dma_zmalloc(SPDK_BS_PAGE_SIZE, 3006 SPDK_BS_PAGE_SIZE, 3007 NULL); 3008 if (!ctx->page) { 3009 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 3010 return; 3011 } 3012 _spdk_bs_load_replay_cur_md_page(seq, cb_arg); 3013 } 3014 3015 static void 3016 _spdk_bs_recover(spdk_bs_sequence_t *seq, void *cb_arg) 3017 { 3018 struct spdk_bs_load_ctx *ctx = cb_arg; 3019 int rc; 3020 3021 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len); 3022 if (rc < 0) { 3023 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 3024 return; 3025 } 3026 3027 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len); 3028 if (rc < 0) { 3029 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 3030 return; 3031 } 3032 3033 rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters); 3034 if (rc < 0) { 3035 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 3036 return; 3037 } 3038 3039 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 3040 _spdk_bs_load_replay_md(seq, cb_arg); 3041 } 3042 3043 static void 3044 _spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3045 { 3046 struct spdk_bs_load_ctx *ctx = cb_arg; 3047 uint32_t crc; 3048 int rc; 3049 static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH]; 3050 3051 if (ctx->super->version > SPDK_BS_VERSION || 3052 ctx->super->version < SPDK_BS_INITIAL_VERSION) { 3053 _spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ); 3054 return; 3055 } 3056 3057 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 3058 sizeof(ctx->super->signature)) != 0) { 3059 _spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ); 3060 return; 3061 } 3062 3063 crc = _spdk_blob_md_page_calc_crc(ctx->super); 3064 if (crc != ctx->super->crc) { 3065 _spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ); 3066 return; 3067 } 3068 3069 if (memcmp(&ctx->bs->bstype, &ctx->super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 3070 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype matched - loading blobstore\n"); 3071 } else if (memcmp(&ctx->bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 3072 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype wildcard used - loading blobstore regardless bstype\n"); 3073 } else { 3074 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Unexpected bstype\n"); 3075 SPDK_LOGDUMP(SPDK_LOG_BLOB, "Expected:", ctx->bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 3076 SPDK_LOGDUMP(SPDK_LOG_BLOB, "Found:", ctx->super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 3077 _spdk_bs_load_ctx_fail(seq, ctx, -ENXIO); 3078 return; 3079 } 3080 3081 if (ctx->super->size > ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen) { 3082 SPDK_NOTICELOG("Size mismatch, dev size: %lu, blobstore size: %lu\n", 3083 ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen, ctx->super->size); 3084 _spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ); 3085 return; 3086 } 3087 3088 if (ctx->super->size == 0) { 3089 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 3090 } 3091 3092 if (ctx->super->io_unit_size == 0) { 3093 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 3094 } 3095 3096 /* Parse the super block */ 3097 ctx->bs->clean = 1; 3098 ctx->bs->cluster_sz = ctx->super->cluster_size; 3099 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 3100 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 3101 ctx->bs->io_unit_size = ctx->super->io_unit_size; 3102 rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters); 3103 if (rc < 0) { 3104 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 3105 return; 3106 } 3107 ctx->bs->md_start = ctx->super->md_start; 3108 ctx->bs->md_len = ctx->super->md_len; 3109 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 3110 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 3111 ctx->bs->super_blob = ctx->super->super_blob; 3112 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 3113 3114 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) { 3115 _spdk_bs_recover(seq, ctx); 3116 } else { 3117 _spdk_bs_load_read_used_pages(seq, ctx); 3118 } 3119 } 3120 3121 void 3122 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 3123 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 3124 { 3125 struct spdk_blob_store *bs; 3126 struct spdk_bs_cpl cpl; 3127 spdk_bs_sequence_t *seq; 3128 struct spdk_bs_load_ctx *ctx; 3129 struct spdk_bs_opts opts = {}; 3130 int err; 3131 3132 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Loading blobstore from dev %p\n", dev); 3133 3134 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 3135 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "unsupported dev block length of %d\n", dev->blocklen); 3136 dev->destroy(dev); 3137 cb_fn(cb_arg, NULL, -EINVAL); 3138 return; 3139 } 3140 3141 if (o) { 3142 opts = *o; 3143 } else { 3144 spdk_bs_opts_init(&opts); 3145 } 3146 3147 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 3148 dev->destroy(dev); 3149 cb_fn(cb_arg, NULL, -EINVAL); 3150 return; 3151 } 3152 3153 err = _spdk_bs_alloc(dev, &opts, &bs); 3154 if (err) { 3155 dev->destroy(dev); 3156 cb_fn(cb_arg, NULL, err); 3157 return; 3158 } 3159 3160 ctx = calloc(1, sizeof(*ctx)); 3161 if (!ctx) { 3162 _spdk_bs_free(bs); 3163 cb_fn(cb_arg, NULL, -ENOMEM); 3164 return; 3165 } 3166 3167 ctx->bs = bs; 3168 ctx->is_load = true; 3169 ctx->iter_cb_fn = opts.iter_cb_fn; 3170 ctx->iter_cb_arg = opts.iter_cb_arg; 3171 3172 /* Allocate memory for the super block */ 3173 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3174 if (!ctx->super) { 3175 free(ctx); 3176 _spdk_bs_free(bs); 3177 cb_fn(cb_arg, NULL, -ENOMEM); 3178 return; 3179 } 3180 3181 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 3182 cpl.u.bs_handle.cb_fn = cb_fn; 3183 cpl.u.bs_handle.cb_arg = cb_arg; 3184 cpl.u.bs_handle.bs = bs; 3185 3186 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3187 if (!seq) { 3188 spdk_dma_free(ctx->super); 3189 free(ctx); 3190 _spdk_bs_free(bs); 3191 cb_fn(cb_arg, NULL, -ENOMEM); 3192 return; 3193 } 3194 3195 /* Read the super block */ 3196 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 3197 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 3198 _spdk_bs_load_super_cpl, ctx); 3199 } 3200 3201 /* END spdk_bs_load */ 3202 3203 /* START spdk_bs_dump */ 3204 3205 struct spdk_bs_dump_ctx { 3206 struct spdk_blob_store *bs; 3207 struct spdk_bs_super_block *super; 3208 uint32_t cur_page; 3209 struct spdk_blob_md_page *page; 3210 spdk_bs_sequence_t *seq; 3211 FILE *fp; 3212 spdk_bs_dump_print_xattr print_xattr_fn; 3213 char xattr_name[4096]; 3214 }; 3215 3216 static void 3217 _spdk_bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_dump_ctx *ctx, int bserrno) 3218 { 3219 spdk_dma_free(ctx->super); 3220 3221 /* 3222 * We need to defer calling spdk_bs_call_cpl() until after 3223 * dev destruction, so tuck these away for later use. 3224 */ 3225 ctx->bs->unload_err = bserrno; 3226 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 3227 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 3228 3229 spdk_bs_sequence_finish(seq, 0); 3230 _spdk_bs_free(ctx->bs); 3231 free(ctx); 3232 } 3233 3234 static void _spdk_bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 3235 3236 static void 3237 _spdk_bs_dump_print_md_page(struct spdk_bs_dump_ctx *ctx) 3238 { 3239 uint32_t page_idx = ctx->cur_page; 3240 struct spdk_blob_md_page *page = ctx->page; 3241 struct spdk_blob_md_descriptor *desc; 3242 size_t cur_desc = 0; 3243 uint32_t crc; 3244 3245 fprintf(ctx->fp, "=========\n"); 3246 fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx); 3247 fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id); 3248 3249 crc = _spdk_blob_md_page_calc_crc(page); 3250 fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch"); 3251 3252 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 3253 while (cur_desc < sizeof(page->descriptors)) { 3254 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 3255 if (desc->length == 0) { 3256 /* If padding and length are 0, this terminates the page */ 3257 break; 3258 } 3259 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) { 3260 struct spdk_blob_md_descriptor_extent *desc_extent; 3261 unsigned int i; 3262 3263 desc_extent = (struct spdk_blob_md_descriptor_extent *)desc; 3264 3265 for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) { 3266 if (desc_extent->extents[i].cluster_idx != 0) { 3267 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 3268 desc_extent->extents[i].cluster_idx); 3269 } else { 3270 fprintf(ctx->fp, "Unallocated Extent - "); 3271 } 3272 fprintf(ctx->fp, " Length: %" PRIu32, desc_extent->extents[i].length); 3273 fprintf(ctx->fp, "\n"); 3274 } 3275 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 3276 struct spdk_blob_md_descriptor_xattr *desc_xattr; 3277 uint32_t i; 3278 3279 desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc; 3280 3281 if (desc_xattr->length != 3282 sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) + 3283 desc_xattr->name_length + desc_xattr->value_length) { 3284 } 3285 3286 memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length); 3287 ctx->xattr_name[desc_xattr->name_length] = '\0'; 3288 fprintf(ctx->fp, "XATTR: name = \"%s\"\n", ctx->xattr_name); 3289 fprintf(ctx->fp, " value = \""); 3290 ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name, 3291 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 3292 desc_xattr->value_length); 3293 fprintf(ctx->fp, "\"\n"); 3294 for (i = 0; i < desc_xattr->value_length; i++) { 3295 if (i % 16 == 0) { 3296 fprintf(ctx->fp, " "); 3297 } 3298 fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i)); 3299 if ((i + 1) % 16 == 0) { 3300 fprintf(ctx->fp, "\n"); 3301 } 3302 } 3303 if (i % 16 != 0) { 3304 fprintf(ctx->fp, "\n"); 3305 } 3306 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 3307 /* TODO */ 3308 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 3309 /* TODO */ 3310 } else { 3311 /* Error */ 3312 } 3313 /* Advance to the next descriptor */ 3314 cur_desc += sizeof(*desc) + desc->length; 3315 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 3316 break; 3317 } 3318 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 3319 } 3320 } 3321 3322 static void 3323 _spdk_bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3324 { 3325 struct spdk_bs_dump_ctx *ctx = cb_arg; 3326 3327 if (bserrno != 0) { 3328 _spdk_bs_dump_finish(seq, ctx, bserrno); 3329 return; 3330 } 3331 3332 if (ctx->page->id != 0) { 3333 _spdk_bs_dump_print_md_page(ctx); 3334 } 3335 3336 ctx->cur_page++; 3337 3338 if (ctx->cur_page < ctx->super->md_len) { 3339 _spdk_bs_dump_read_md_page(seq, cb_arg); 3340 } else { 3341 spdk_dma_free(ctx->page); 3342 _spdk_bs_dump_finish(seq, ctx, 0); 3343 } 3344 } 3345 3346 static void 3347 _spdk_bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 3348 { 3349 struct spdk_bs_dump_ctx *ctx = cb_arg; 3350 uint64_t lba; 3351 3352 assert(ctx->cur_page < ctx->super->md_len); 3353 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 3354 spdk_bs_sequence_read_dev(seq, ctx->page, lba, 3355 _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 3356 _spdk_bs_dump_read_md_page_cpl, ctx); 3357 } 3358 3359 static void 3360 _spdk_bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3361 { 3362 struct spdk_bs_dump_ctx *ctx = cb_arg; 3363 3364 fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature); 3365 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 3366 sizeof(ctx->super->signature)) != 0) { 3367 fprintf(ctx->fp, "(Mismatch)\n"); 3368 _spdk_bs_dump_finish(seq, ctx, bserrno); 3369 return; 3370 } else { 3371 fprintf(ctx->fp, "(OK)\n"); 3372 } 3373 fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version); 3374 fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc, 3375 (ctx->super->crc == _spdk_blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch"); 3376 fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype); 3377 fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size); 3378 fprintf(ctx->fp, "Super Blob ID: "); 3379 if (ctx->super->super_blob == SPDK_BLOBID_INVALID) { 3380 fprintf(ctx->fp, "(None)\n"); 3381 } else { 3382 fprintf(ctx->fp, "%" PRIu64 "\n", ctx->super->super_blob); 3383 } 3384 fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean); 3385 fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start); 3386 fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len); 3387 fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start); 3388 fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len); 3389 fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start); 3390 fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len); 3391 fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start); 3392 fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len); 3393 3394 ctx->cur_page = 0; 3395 ctx->page = spdk_dma_zmalloc(SPDK_BS_PAGE_SIZE, 3396 SPDK_BS_PAGE_SIZE, 3397 NULL); 3398 if (!ctx->page) { 3399 _spdk_bs_dump_finish(seq, ctx, -ENOMEM); 3400 return; 3401 } 3402 _spdk_bs_dump_read_md_page(seq, cb_arg); 3403 } 3404 3405 void 3406 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn, 3407 spdk_bs_op_complete cb_fn, void *cb_arg) 3408 { 3409 struct spdk_blob_store *bs; 3410 struct spdk_bs_cpl cpl; 3411 spdk_bs_sequence_t *seq; 3412 struct spdk_bs_dump_ctx *ctx; 3413 struct spdk_bs_opts opts = {}; 3414 int err; 3415 3416 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Dumping blobstore from dev %p\n", dev); 3417 3418 spdk_bs_opts_init(&opts); 3419 3420 err = _spdk_bs_alloc(dev, &opts, &bs); 3421 if (err) { 3422 dev->destroy(dev); 3423 cb_fn(cb_arg, err); 3424 return; 3425 } 3426 3427 ctx = calloc(1, sizeof(*ctx)); 3428 if (!ctx) { 3429 _spdk_bs_free(bs); 3430 cb_fn(cb_arg, -ENOMEM); 3431 return; 3432 } 3433 3434 ctx->bs = bs; 3435 ctx->fp = fp; 3436 ctx->print_xattr_fn = print_xattr_fn; 3437 3438 /* Allocate memory for the super block */ 3439 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3440 if (!ctx->super) { 3441 free(ctx); 3442 _spdk_bs_free(bs); 3443 cb_fn(cb_arg, -ENOMEM); 3444 return; 3445 } 3446 3447 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 3448 cpl.u.bs_basic.cb_fn = cb_fn; 3449 cpl.u.bs_basic.cb_arg = cb_arg; 3450 3451 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3452 if (!seq) { 3453 spdk_dma_free(ctx->super); 3454 free(ctx); 3455 _spdk_bs_free(bs); 3456 cb_fn(cb_arg, -ENOMEM); 3457 return; 3458 } 3459 3460 /* Read the super block */ 3461 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 3462 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 3463 _spdk_bs_dump_super_cpl, ctx); 3464 } 3465 3466 /* END spdk_bs_dump */ 3467 3468 /* START spdk_bs_init */ 3469 3470 struct spdk_bs_init_ctx { 3471 struct spdk_blob_store *bs; 3472 struct spdk_bs_super_block *super; 3473 }; 3474 3475 static void 3476 _spdk_bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3477 { 3478 struct spdk_bs_init_ctx *ctx = cb_arg; 3479 3480 spdk_dma_free(ctx->super); 3481 free(ctx); 3482 3483 spdk_bs_sequence_finish(seq, bserrno); 3484 } 3485 3486 static void 3487 _spdk_bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3488 { 3489 struct spdk_bs_init_ctx *ctx = cb_arg; 3490 3491 /* Write super block */ 3492 spdk_bs_sequence_write_dev(seq, ctx->super, _spdk_bs_page_to_lba(ctx->bs, 0), 3493 _spdk_bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 3494 _spdk_bs_init_persist_super_cpl, ctx); 3495 } 3496 3497 void 3498 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 3499 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 3500 { 3501 struct spdk_bs_init_ctx *ctx; 3502 struct spdk_blob_store *bs; 3503 struct spdk_bs_cpl cpl; 3504 spdk_bs_sequence_t *seq; 3505 spdk_bs_batch_t *batch; 3506 uint64_t num_md_lba; 3507 uint64_t num_md_pages; 3508 uint64_t num_md_clusters; 3509 uint32_t i; 3510 struct spdk_bs_opts opts = {}; 3511 int rc; 3512 3513 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Initializing blobstore on dev %p\n", dev); 3514 3515 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 3516 SPDK_ERRLOG("unsupported dev block length of %d\n", 3517 dev->blocklen); 3518 dev->destroy(dev); 3519 cb_fn(cb_arg, NULL, -EINVAL); 3520 return; 3521 } 3522 3523 if (o) { 3524 opts = *o; 3525 } else { 3526 spdk_bs_opts_init(&opts); 3527 } 3528 3529 if (_spdk_bs_opts_verify(&opts) != 0) { 3530 dev->destroy(dev); 3531 cb_fn(cb_arg, NULL, -EINVAL); 3532 return; 3533 } 3534 3535 rc = _spdk_bs_alloc(dev, &opts, &bs); 3536 if (rc) { 3537 dev->destroy(dev); 3538 cb_fn(cb_arg, NULL, rc); 3539 return; 3540 } 3541 3542 if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) { 3543 /* By default, allocate 1 page per cluster. 3544 * Technically, this over-allocates metadata 3545 * because more metadata will reduce the number 3546 * of usable clusters. This can be addressed with 3547 * more complex math in the future. 3548 */ 3549 bs->md_len = bs->total_clusters; 3550 } else { 3551 bs->md_len = opts.num_md_pages; 3552 } 3553 rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len); 3554 if (rc < 0) { 3555 _spdk_bs_free(bs); 3556 cb_fn(cb_arg, NULL, -ENOMEM); 3557 return; 3558 } 3559 3560 rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len); 3561 if (rc < 0) { 3562 _spdk_bs_free(bs); 3563 cb_fn(cb_arg, NULL, -ENOMEM); 3564 return; 3565 } 3566 3567 ctx = calloc(1, sizeof(*ctx)); 3568 if (!ctx) { 3569 _spdk_bs_free(bs); 3570 cb_fn(cb_arg, NULL, -ENOMEM); 3571 return; 3572 } 3573 3574 ctx->bs = bs; 3575 3576 /* Allocate memory for the super block */ 3577 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3578 if (!ctx->super) { 3579 free(ctx); 3580 _spdk_bs_free(bs); 3581 cb_fn(cb_arg, NULL, -ENOMEM); 3582 return; 3583 } 3584 memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 3585 sizeof(ctx->super->signature)); 3586 ctx->super->version = SPDK_BS_VERSION; 3587 ctx->super->length = sizeof(*ctx->super); 3588 ctx->super->super_blob = bs->super_blob; 3589 ctx->super->clean = 0; 3590 ctx->super->cluster_size = bs->cluster_sz; 3591 ctx->super->io_unit_size = bs->io_unit_size; 3592 memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype)); 3593 3594 /* Calculate how many pages the metadata consumes at the front 3595 * of the disk. 3596 */ 3597 3598 /* The super block uses 1 page */ 3599 num_md_pages = 1; 3600 3601 /* The used_md_pages mask requires 1 bit per metadata page, rounded 3602 * up to the nearest page, plus a header. 3603 */ 3604 ctx->super->used_page_mask_start = num_md_pages; 3605 ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 3606 spdk_divide_round_up(bs->md_len, 8), 3607 SPDK_BS_PAGE_SIZE); 3608 num_md_pages += ctx->super->used_page_mask_len; 3609 3610 /* The used_clusters mask requires 1 bit per cluster, rounded 3611 * up to the nearest page, plus a header. 3612 */ 3613 ctx->super->used_cluster_mask_start = num_md_pages; 3614 ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 3615 spdk_divide_round_up(bs->total_clusters, 8), 3616 SPDK_BS_PAGE_SIZE); 3617 num_md_pages += ctx->super->used_cluster_mask_len; 3618 3619 /* The used_blobids mask requires 1 bit per metadata page, rounded 3620 * up to the nearest page, plus a header. 3621 */ 3622 ctx->super->used_blobid_mask_start = num_md_pages; 3623 ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 3624 spdk_divide_round_up(bs->md_len, 8), 3625 SPDK_BS_PAGE_SIZE); 3626 num_md_pages += ctx->super->used_blobid_mask_len; 3627 3628 /* The metadata region size was chosen above */ 3629 ctx->super->md_start = bs->md_start = num_md_pages; 3630 ctx->super->md_len = bs->md_len; 3631 num_md_pages += bs->md_len; 3632 3633 num_md_lba = _spdk_bs_page_to_lba(bs, num_md_pages); 3634 3635 ctx->super->size = dev->blockcnt * dev->blocklen; 3636 3637 ctx->super->crc = _spdk_blob_md_page_calc_crc(ctx->super); 3638 3639 num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster); 3640 if (num_md_clusters > bs->total_clusters) { 3641 SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, " 3642 "please decrease number of pages reserved for metadata " 3643 "or increase cluster size.\n"); 3644 spdk_dma_free(ctx->super); 3645 free(ctx); 3646 _spdk_bs_free(bs); 3647 cb_fn(cb_arg, NULL, -ENOMEM); 3648 return; 3649 } 3650 /* Claim all of the clusters used by the metadata */ 3651 for (i = 0; i < num_md_clusters; i++) { 3652 _spdk_bs_claim_cluster(bs, i); 3653 } 3654 3655 bs->total_data_clusters = bs->num_free_clusters; 3656 3657 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 3658 cpl.u.bs_handle.cb_fn = cb_fn; 3659 cpl.u.bs_handle.cb_arg = cb_arg; 3660 cpl.u.bs_handle.bs = bs; 3661 3662 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3663 if (!seq) { 3664 spdk_dma_free(ctx->super); 3665 free(ctx); 3666 _spdk_bs_free(bs); 3667 cb_fn(cb_arg, NULL, -ENOMEM); 3668 return; 3669 } 3670 3671 batch = spdk_bs_sequence_to_batch(seq, _spdk_bs_init_trim_cpl, ctx); 3672 3673 /* Clear metadata space */ 3674 spdk_bs_batch_write_zeroes_dev(batch, 0, num_md_lba); 3675 /* Trim data clusters */ 3676 spdk_bs_batch_unmap_dev(batch, num_md_lba, ctx->bs->dev->blockcnt - num_md_lba); 3677 3678 spdk_bs_batch_close(batch); 3679 } 3680 3681 /* END spdk_bs_init */ 3682 3683 /* START spdk_bs_destroy */ 3684 3685 static void 3686 _spdk_bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3687 { 3688 struct spdk_bs_init_ctx *ctx = cb_arg; 3689 struct spdk_blob_store *bs = ctx->bs; 3690 3691 /* 3692 * We need to defer calling spdk_bs_call_cpl() until after 3693 * dev destruction, so tuck these away for later use. 3694 */ 3695 bs->unload_err = bserrno; 3696 memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 3697 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 3698 3699 spdk_bs_sequence_finish(seq, bserrno); 3700 3701 _spdk_bs_free(bs); 3702 free(ctx); 3703 } 3704 3705 void 3706 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, 3707 void *cb_arg) 3708 { 3709 struct spdk_bs_cpl cpl; 3710 spdk_bs_sequence_t *seq; 3711 struct spdk_bs_init_ctx *ctx; 3712 3713 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Destroying blobstore\n"); 3714 3715 if (!TAILQ_EMPTY(&bs->blobs)) { 3716 SPDK_ERRLOG("Blobstore still has open blobs\n"); 3717 cb_fn(cb_arg, -EBUSY); 3718 return; 3719 } 3720 3721 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 3722 cpl.u.bs_basic.cb_fn = cb_fn; 3723 cpl.u.bs_basic.cb_arg = cb_arg; 3724 3725 ctx = calloc(1, sizeof(*ctx)); 3726 if (!ctx) { 3727 cb_fn(cb_arg, -ENOMEM); 3728 return; 3729 } 3730 3731 ctx->bs = bs; 3732 3733 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3734 if (!seq) { 3735 free(ctx); 3736 cb_fn(cb_arg, -ENOMEM); 3737 return; 3738 } 3739 3740 /* Write zeroes to the super block */ 3741 spdk_bs_sequence_write_zeroes_dev(seq, 3742 _spdk_bs_page_to_lba(bs, 0), 3743 _spdk_bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)), 3744 _spdk_bs_destroy_trim_cpl, ctx); 3745 } 3746 3747 /* END spdk_bs_destroy */ 3748 3749 /* START spdk_bs_unload */ 3750 3751 static void 3752 _spdk_bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3753 { 3754 struct spdk_bs_load_ctx *ctx = cb_arg; 3755 3756 spdk_dma_free(ctx->super); 3757 3758 /* 3759 * We need to defer calling spdk_bs_call_cpl() until after 3760 * dev destruction, so tuck these away for later use. 3761 */ 3762 ctx->bs->unload_err = bserrno; 3763 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 3764 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 3765 3766 spdk_bs_sequence_finish(seq, bserrno); 3767 3768 _spdk_bs_free(ctx->bs); 3769 free(ctx); 3770 } 3771 3772 static void 3773 _spdk_bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3774 { 3775 struct spdk_bs_load_ctx *ctx = cb_arg; 3776 3777 spdk_dma_free(ctx->mask); 3778 ctx->super->clean = 1; 3779 3780 _spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_unload_write_super_cpl, ctx); 3781 } 3782 3783 static void 3784 _spdk_bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3785 { 3786 struct spdk_bs_load_ctx *ctx = cb_arg; 3787 3788 spdk_dma_free(ctx->mask); 3789 ctx->mask = NULL; 3790 3791 _spdk_bs_write_used_clusters(seq, cb_arg, _spdk_bs_unload_write_used_clusters_cpl); 3792 } 3793 3794 static void 3795 _spdk_bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3796 { 3797 struct spdk_bs_load_ctx *ctx = cb_arg; 3798 3799 spdk_dma_free(ctx->mask); 3800 ctx->mask = NULL; 3801 3802 _spdk_bs_write_used_blobids(seq, cb_arg, _spdk_bs_unload_write_used_blobids_cpl); 3803 } 3804 3805 static void 3806 _spdk_bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3807 { 3808 _spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_unload_write_used_pages_cpl); 3809 } 3810 3811 void 3812 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) 3813 { 3814 struct spdk_bs_cpl cpl; 3815 spdk_bs_sequence_t *seq; 3816 struct spdk_bs_load_ctx *ctx; 3817 3818 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blobstore\n"); 3819 3820 if (!TAILQ_EMPTY(&bs->blobs)) { 3821 SPDK_ERRLOG("Blobstore still has open blobs\n"); 3822 cb_fn(cb_arg, -EBUSY); 3823 return; 3824 } 3825 3826 ctx = calloc(1, sizeof(*ctx)); 3827 if (!ctx) { 3828 cb_fn(cb_arg, -ENOMEM); 3829 return; 3830 } 3831 3832 ctx->bs = bs; 3833 ctx->is_load = false; 3834 3835 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3836 if (!ctx->super) { 3837 free(ctx); 3838 cb_fn(cb_arg, -ENOMEM); 3839 return; 3840 } 3841 3842 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 3843 cpl.u.bs_basic.cb_fn = cb_fn; 3844 cpl.u.bs_basic.cb_arg = cb_arg; 3845 3846 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3847 if (!seq) { 3848 spdk_dma_free(ctx->super); 3849 free(ctx); 3850 cb_fn(cb_arg, -ENOMEM); 3851 return; 3852 } 3853 3854 /* Read super block */ 3855 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 3856 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 3857 _spdk_bs_unload_read_super_cpl, ctx); 3858 } 3859 3860 /* END spdk_bs_unload */ 3861 3862 /* START spdk_bs_set_super */ 3863 3864 struct spdk_bs_set_super_ctx { 3865 struct spdk_blob_store *bs; 3866 struct spdk_bs_super_block *super; 3867 }; 3868 3869 static void 3870 _spdk_bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3871 { 3872 struct spdk_bs_set_super_ctx *ctx = cb_arg; 3873 3874 if (bserrno != 0) { 3875 SPDK_ERRLOG("Unable to write to super block of blobstore\n"); 3876 } 3877 3878 spdk_dma_free(ctx->super); 3879 3880 spdk_bs_sequence_finish(seq, bserrno); 3881 3882 free(ctx); 3883 } 3884 3885 static void 3886 _spdk_bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3887 { 3888 struct spdk_bs_set_super_ctx *ctx = cb_arg; 3889 3890 if (bserrno != 0) { 3891 SPDK_ERRLOG("Unable to read super block of blobstore\n"); 3892 spdk_dma_free(ctx->super); 3893 spdk_bs_sequence_finish(seq, bserrno); 3894 free(ctx); 3895 return; 3896 } 3897 3898 _spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_set_super_write_cpl, ctx); 3899 } 3900 3901 void 3902 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid, 3903 spdk_bs_op_complete cb_fn, void *cb_arg) 3904 { 3905 struct spdk_bs_cpl cpl; 3906 spdk_bs_sequence_t *seq; 3907 struct spdk_bs_set_super_ctx *ctx; 3908 3909 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Setting super blob id on blobstore\n"); 3910 3911 ctx = calloc(1, sizeof(*ctx)); 3912 if (!ctx) { 3913 cb_fn(cb_arg, -ENOMEM); 3914 return; 3915 } 3916 3917 ctx->bs = bs; 3918 3919 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3920 if (!ctx->super) { 3921 free(ctx); 3922 cb_fn(cb_arg, -ENOMEM); 3923 return; 3924 } 3925 3926 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 3927 cpl.u.bs_basic.cb_fn = cb_fn; 3928 cpl.u.bs_basic.cb_arg = cb_arg; 3929 3930 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3931 if (!seq) { 3932 spdk_dma_free(ctx->super); 3933 free(ctx); 3934 cb_fn(cb_arg, -ENOMEM); 3935 return; 3936 } 3937 3938 bs->super_blob = blobid; 3939 3940 /* Read super block */ 3941 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 3942 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 3943 _spdk_bs_set_super_read_cpl, ctx); 3944 } 3945 3946 /* END spdk_bs_set_super */ 3947 3948 void 3949 spdk_bs_get_super(struct spdk_blob_store *bs, 3950 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 3951 { 3952 if (bs->super_blob == SPDK_BLOBID_INVALID) { 3953 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT); 3954 } else { 3955 cb_fn(cb_arg, bs->super_blob, 0); 3956 } 3957 } 3958 3959 uint64_t 3960 spdk_bs_get_cluster_size(struct spdk_blob_store *bs) 3961 { 3962 return bs->cluster_sz; 3963 } 3964 3965 uint64_t 3966 spdk_bs_get_page_size(struct spdk_blob_store *bs) 3967 { 3968 return SPDK_BS_PAGE_SIZE; 3969 } 3970 3971 uint64_t 3972 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs) 3973 { 3974 return bs->io_unit_size; 3975 } 3976 3977 uint64_t 3978 spdk_bs_free_cluster_count(struct spdk_blob_store *bs) 3979 { 3980 return bs->num_free_clusters; 3981 } 3982 3983 uint64_t 3984 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs) 3985 { 3986 return bs->total_data_clusters; 3987 } 3988 3989 static int 3990 spdk_bs_register_md_thread(struct spdk_blob_store *bs) 3991 { 3992 bs->md_channel = spdk_get_io_channel(bs); 3993 if (!bs->md_channel) { 3994 SPDK_ERRLOG("Failed to get IO channel.\n"); 3995 return -1; 3996 } 3997 3998 return 0; 3999 } 4000 4001 static int 4002 spdk_bs_unregister_md_thread(struct spdk_blob_store *bs) 4003 { 4004 spdk_put_io_channel(bs->md_channel); 4005 4006 return 0; 4007 } 4008 4009 spdk_blob_id spdk_blob_get_id(struct spdk_blob *blob) 4010 { 4011 assert(blob != NULL); 4012 4013 return blob->id; 4014 } 4015 4016 uint64_t spdk_blob_get_num_pages(struct spdk_blob *blob) 4017 { 4018 assert(blob != NULL); 4019 4020 return _spdk_bs_cluster_to_page(blob->bs, blob->active.num_clusters); 4021 } 4022 4023 uint64_t spdk_blob_get_num_io_units(struct spdk_blob *blob) 4024 { 4025 assert(blob != NULL); 4026 4027 return spdk_blob_get_num_pages(blob) * _spdk_bs_io_unit_per_page(blob->bs); 4028 } 4029 4030 uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob) 4031 { 4032 assert(blob != NULL); 4033 4034 return blob->active.num_clusters; 4035 } 4036 4037 /* START spdk_bs_create_blob */ 4038 4039 static void 4040 _spdk_bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4041 { 4042 struct spdk_blob *blob = cb_arg; 4043 4044 _spdk_blob_free(blob); 4045 4046 spdk_bs_sequence_finish(seq, bserrno); 4047 } 4048 4049 static int 4050 _spdk_blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs, 4051 bool internal) 4052 { 4053 uint64_t i; 4054 size_t value_len = 0; 4055 int rc; 4056 const void *value = NULL; 4057 if (xattrs->count > 0 && xattrs->get_value == NULL) { 4058 return -EINVAL; 4059 } 4060 for (i = 0; i < xattrs->count; i++) { 4061 xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len); 4062 if (value == NULL || value_len == 0) { 4063 return -EINVAL; 4064 } 4065 rc = _spdk_blob_set_xattr(blob, xattrs->names[i], value, value_len, internal); 4066 if (rc < 0) { 4067 return rc; 4068 } 4069 } 4070 return 0; 4071 } 4072 4073 static void 4074 _spdk_blob_set_thin_provision(struct spdk_blob *blob) 4075 { 4076 _spdk_blob_verify_md_op(blob); 4077 blob->invalid_flags |= SPDK_BLOB_THIN_PROV; 4078 blob->state = SPDK_BLOB_STATE_DIRTY; 4079 } 4080 4081 static void 4082 _spdk_bs_create_blob(struct spdk_blob_store *bs, 4083 const struct spdk_blob_opts *opts, 4084 const struct spdk_blob_xattr_opts *internal_xattrs, 4085 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4086 { 4087 struct spdk_blob *blob; 4088 uint32_t page_idx; 4089 struct spdk_bs_cpl cpl; 4090 struct spdk_blob_opts opts_default; 4091 struct spdk_blob_xattr_opts internal_xattrs_default; 4092 spdk_bs_sequence_t *seq; 4093 spdk_blob_id id; 4094 int rc; 4095 4096 assert(spdk_get_thread() == bs->md_thread); 4097 4098 page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 4099 if (page_idx == UINT32_MAX) { 4100 cb_fn(cb_arg, 0, -ENOMEM); 4101 return; 4102 } 4103 spdk_bit_array_set(bs->used_blobids, page_idx); 4104 spdk_bit_array_set(bs->used_md_pages, page_idx); 4105 4106 id = _spdk_bs_page_to_blobid(page_idx); 4107 4108 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Creating blob with id %lu at page %u\n", id, page_idx); 4109 4110 blob = _spdk_blob_alloc(bs, id); 4111 if (!blob) { 4112 cb_fn(cb_arg, 0, -ENOMEM); 4113 return; 4114 } 4115 4116 if (!opts) { 4117 spdk_blob_opts_init(&opts_default); 4118 opts = &opts_default; 4119 } 4120 if (!internal_xattrs) { 4121 _spdk_blob_xattrs_init(&internal_xattrs_default); 4122 internal_xattrs = &internal_xattrs_default; 4123 } 4124 4125 rc = _spdk_blob_set_xattrs(blob, &opts->xattrs, false); 4126 if (rc < 0) { 4127 _spdk_blob_free(blob); 4128 cb_fn(cb_arg, 0, rc); 4129 return; 4130 } 4131 4132 rc = _spdk_blob_set_xattrs(blob, internal_xattrs, true); 4133 if (rc < 0) { 4134 _spdk_blob_free(blob); 4135 cb_fn(cb_arg, 0, rc); 4136 return; 4137 } 4138 4139 if (opts->thin_provision) { 4140 _spdk_blob_set_thin_provision(blob); 4141 } 4142 4143 rc = _spdk_blob_resize(blob, opts->num_clusters); 4144 if (rc < 0) { 4145 _spdk_blob_free(blob); 4146 cb_fn(cb_arg, 0, rc); 4147 return; 4148 } 4149 cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 4150 cpl.u.blobid.cb_fn = cb_fn; 4151 cpl.u.blobid.cb_arg = cb_arg; 4152 cpl.u.blobid.blobid = blob->id; 4153 4154 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 4155 if (!seq) { 4156 _spdk_blob_free(blob); 4157 cb_fn(cb_arg, 0, -ENOMEM); 4158 return; 4159 } 4160 4161 _spdk_blob_persist(seq, blob, _spdk_bs_create_blob_cpl, blob); 4162 } 4163 4164 void spdk_bs_create_blob(struct spdk_blob_store *bs, 4165 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4166 { 4167 _spdk_bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg); 4168 } 4169 4170 void spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts, 4171 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4172 { 4173 _spdk_bs_create_blob(bs, opts, NULL, cb_fn, cb_arg); 4174 } 4175 4176 /* END spdk_bs_create_blob */ 4177 4178 /* START blob_cleanup */ 4179 4180 struct spdk_clone_snapshot_ctx { 4181 struct spdk_bs_cpl cpl; 4182 int bserrno; 4183 bool frozen; 4184 4185 struct spdk_io_channel *channel; 4186 4187 /* Current cluster for inflate operation */ 4188 uint64_t cluster; 4189 4190 /* For inflation force allocation of all unallocated clusters and remove 4191 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */ 4192 bool allocate_all; 4193 4194 struct { 4195 spdk_blob_id id; 4196 struct spdk_blob *blob; 4197 } original; 4198 struct { 4199 spdk_blob_id id; 4200 struct spdk_blob *blob; 4201 } new; 4202 4203 /* xattrs specified for snapshot/clones only. They have no impact on 4204 * the original blobs xattrs. */ 4205 const struct spdk_blob_xattr_opts *xattrs; 4206 }; 4207 4208 static void 4209 _spdk_bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno) 4210 { 4211 struct spdk_clone_snapshot_ctx *ctx = cb_arg; 4212 struct spdk_bs_cpl *cpl = &ctx->cpl; 4213 4214 if (bserrno != 0) { 4215 if (ctx->bserrno != 0) { 4216 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 4217 } else { 4218 ctx->bserrno = bserrno; 4219 } 4220 } 4221 4222 switch (cpl->type) { 4223 case SPDK_BS_CPL_TYPE_BLOBID: 4224 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno); 4225 break; 4226 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 4227 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 4228 break; 4229 default: 4230 SPDK_UNREACHABLE(); 4231 break; 4232 } 4233 4234 free(ctx); 4235 } 4236 4237 static void 4238 _spdk_bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 4239 { 4240 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4241 struct spdk_blob *origblob = ctx->original.blob; 4242 4243 if (bserrno != 0) { 4244 if (ctx->bserrno != 0) { 4245 SPDK_ERRLOG("Unfreeze error %d\n", bserrno); 4246 } else { 4247 ctx->bserrno = bserrno; 4248 } 4249 } 4250 4251 ctx->original.id = origblob->id; 4252 spdk_blob_close(origblob, _spdk_bs_clone_snapshot_cleanup_finish, ctx); 4253 } 4254 4255 static void 4256 _spdk_bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno) 4257 { 4258 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4259 struct spdk_blob *origblob = ctx->original.blob; 4260 4261 if (bserrno != 0) { 4262 if (ctx->bserrno != 0) { 4263 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 4264 } else { 4265 ctx->bserrno = bserrno; 4266 } 4267 } 4268 4269 if (ctx->frozen) { 4270 /* Unfreeze any outstanding I/O */ 4271 _spdk_blob_unfreeze_io(origblob, _spdk_bs_snapshot_unfreeze_cpl, ctx); 4272 } else { 4273 _spdk_bs_snapshot_unfreeze_cpl(ctx, 0); 4274 } 4275 4276 } 4277 4278 static void 4279 _spdk_bs_clone_snapshot_newblob_cleanup(void *cb_arg, int bserrno) 4280 { 4281 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4282 struct spdk_blob *newblob = ctx->new.blob; 4283 4284 if (bserrno != 0) { 4285 if (ctx->bserrno != 0) { 4286 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 4287 } else { 4288 ctx->bserrno = bserrno; 4289 } 4290 } 4291 4292 ctx->new.id = newblob->id; 4293 spdk_blob_close(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 4294 } 4295 4296 /* END blob_cleanup */ 4297 4298 /* START spdk_bs_create_snapshot */ 4299 4300 static void 4301 _spdk_bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno) 4302 { 4303 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4304 struct spdk_blob *newblob = ctx->new.blob; 4305 4306 if (bserrno != 0) { 4307 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 4308 return; 4309 } 4310 4311 /* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */ 4312 bserrno = _spdk_blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true); 4313 if (bserrno != 0) { 4314 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4315 return; 4316 } 4317 4318 _spdk_bs_blob_list_add(ctx->original.blob); 4319 4320 spdk_blob_set_read_only(newblob); 4321 4322 /* sync snapshot metadata */ 4323 spdk_blob_sync_md(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, cb_arg); 4324 } 4325 4326 static void 4327 _spdk_bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno) 4328 { 4329 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4330 struct spdk_blob *origblob = ctx->original.blob; 4331 struct spdk_blob *newblob = ctx->new.blob; 4332 4333 if (bserrno != 0) { 4334 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 4335 return; 4336 } 4337 4338 /* Set internal xattr for snapshot id */ 4339 bserrno = _spdk_blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true); 4340 if (bserrno != 0) { 4341 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 4342 return; 4343 } 4344 4345 _spdk_bs_blob_list_remove(origblob); 4346 origblob->parent_id = newblob->id; 4347 4348 /* Create new back_bs_dev for snapshot */ 4349 origblob->back_bs_dev = spdk_bs_create_blob_bs_dev(newblob); 4350 if (origblob->back_bs_dev == NULL) { 4351 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL); 4352 return; 4353 } 4354 4355 /* set clone blob as thin provisioned */ 4356 _spdk_blob_set_thin_provision(origblob); 4357 4358 _spdk_bs_blob_list_add(newblob); 4359 4360 /* Zero out origblob cluster map */ 4361 memset(origblob->active.clusters, 0, 4362 origblob->active.num_clusters * sizeof(origblob->active.clusters)); 4363 4364 /* sync clone metadata */ 4365 spdk_blob_sync_md(origblob, _spdk_bs_snapshot_origblob_sync_cpl, ctx); 4366 } 4367 4368 static void 4369 _spdk_bs_snapshot_freeze_cpl(void *cb_arg, int rc) 4370 { 4371 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4372 struct spdk_blob *origblob = ctx->original.blob; 4373 struct spdk_blob *newblob = ctx->new.blob; 4374 int bserrno; 4375 4376 if (rc != 0) { 4377 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, rc); 4378 return; 4379 } 4380 4381 ctx->frozen = true; 4382 4383 /* set new back_bs_dev for snapshot */ 4384 newblob->back_bs_dev = origblob->back_bs_dev; 4385 /* Set invalid flags from origblob */ 4386 newblob->invalid_flags = origblob->invalid_flags; 4387 4388 /* inherit parent from original blob if set */ 4389 newblob->parent_id = origblob->parent_id; 4390 if (origblob->parent_id != SPDK_BLOBID_INVALID) { 4391 /* Set internal xattr for snapshot id */ 4392 bserrno = _spdk_blob_set_xattr(newblob, BLOB_SNAPSHOT, 4393 &origblob->parent_id, sizeof(spdk_blob_id), true); 4394 if (bserrno != 0) { 4395 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 4396 return; 4397 } 4398 } 4399 4400 /* Copy cluster map to snapshot */ 4401 memcpy(newblob->active.clusters, origblob->active.clusters, 4402 origblob->active.num_clusters * sizeof(origblob->active.clusters)); 4403 4404 /* sync snapshot metadata */ 4405 spdk_blob_sync_md(newblob, _spdk_bs_snapshot_newblob_sync_cpl, ctx); 4406 } 4407 4408 static void 4409 _spdk_bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4410 { 4411 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4412 struct spdk_blob *origblob = ctx->original.blob; 4413 struct spdk_blob *newblob = _blob; 4414 4415 if (bserrno != 0) { 4416 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4417 return; 4418 } 4419 4420 ctx->new.blob = newblob; 4421 4422 _spdk_blob_freeze_io(origblob, _spdk_bs_snapshot_freeze_cpl, ctx); 4423 } 4424 4425 static void 4426 _spdk_bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 4427 { 4428 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4429 struct spdk_blob *origblob = ctx->original.blob; 4430 4431 if (bserrno != 0) { 4432 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4433 return; 4434 } 4435 4436 ctx->new.id = blobid; 4437 ctx->cpl.u.blobid.blobid = blobid; 4438 4439 spdk_bs_open_blob(origblob->bs, ctx->new.id, _spdk_bs_snapshot_newblob_open_cpl, ctx); 4440 } 4441 4442 4443 static void 4444 _spdk_bs_xattr_snapshot(void *arg, const char *name, 4445 const void **value, size_t *value_len) 4446 { 4447 assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0); 4448 4449 struct spdk_blob *blob = (struct spdk_blob *)arg; 4450 *value = &blob->id; 4451 *value_len = sizeof(blob->id); 4452 } 4453 4454 static void 4455 _spdk_bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4456 { 4457 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4458 struct spdk_blob_opts opts; 4459 struct spdk_blob_xattr_opts internal_xattrs; 4460 char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS }; 4461 4462 if (bserrno != 0) { 4463 _spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno); 4464 return; 4465 } 4466 4467 ctx->original.blob = _blob; 4468 4469 if (_blob->data_ro || _blob->md_ro) { 4470 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create snapshot from read only blob with id %lu\n", 4471 _blob->id); 4472 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 4473 return; 4474 } 4475 4476 spdk_blob_opts_init(&opts); 4477 _spdk_blob_xattrs_init(&internal_xattrs); 4478 4479 /* Change the size of new blob to the same as in original blob, 4480 * but do not allocate clusters */ 4481 opts.thin_provision = true; 4482 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 4483 4484 /* If there are any xattrs specified for snapshot, set them now */ 4485 if (ctx->xattrs) { 4486 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 4487 } 4488 /* Set internal xattr SNAPSHOT_IN_PROGRESS */ 4489 internal_xattrs.count = 1; 4490 internal_xattrs.ctx = _blob; 4491 internal_xattrs.names = xattrs_names; 4492 internal_xattrs.get_value = _spdk_bs_xattr_snapshot; 4493 4494 _spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs, 4495 _spdk_bs_snapshot_newblob_create_cpl, ctx); 4496 } 4497 4498 void spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid, 4499 const struct spdk_blob_xattr_opts *snapshot_xattrs, 4500 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4501 { 4502 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 4503 4504 if (!ctx) { 4505 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 4506 return; 4507 } 4508 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 4509 ctx->cpl.u.blobid.cb_fn = cb_fn; 4510 ctx->cpl.u.blobid.cb_arg = cb_arg; 4511 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 4512 ctx->bserrno = 0; 4513 ctx->frozen = false; 4514 ctx->original.id = blobid; 4515 ctx->xattrs = snapshot_xattrs; 4516 4517 spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_snapshot_origblob_open_cpl, ctx); 4518 } 4519 /* END spdk_bs_create_snapshot */ 4520 4521 /* START spdk_bs_create_clone */ 4522 4523 static void 4524 _spdk_bs_xattr_clone(void *arg, const char *name, 4525 const void **value, size_t *value_len) 4526 { 4527 assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0); 4528 4529 struct spdk_blob *blob = (struct spdk_blob *)arg; 4530 *value = &blob->id; 4531 *value_len = sizeof(blob->id); 4532 } 4533 4534 static void 4535 _spdk_bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4536 { 4537 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4538 struct spdk_blob *clone = _blob; 4539 4540 ctx->new.blob = clone; 4541 _spdk_bs_blob_list_add(clone); 4542 4543 spdk_blob_close(clone, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 4544 } 4545 4546 static void 4547 _spdk_bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 4548 { 4549 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4550 4551 ctx->cpl.u.blobid.blobid = blobid; 4552 spdk_bs_open_blob(ctx->original.blob->bs, blobid, _spdk_bs_clone_newblob_open_cpl, ctx); 4553 } 4554 4555 static void 4556 _spdk_bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4557 { 4558 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4559 struct spdk_blob_opts opts; 4560 struct spdk_blob_xattr_opts internal_xattrs; 4561 char *xattr_names[] = { BLOB_SNAPSHOT }; 4562 4563 if (bserrno != 0) { 4564 _spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno); 4565 return; 4566 } 4567 4568 ctx->original.blob = _blob; 4569 4570 if (!_blob->data_ro || !_blob->md_ro) { 4571 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Clone not from read-only blob\n"); 4572 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 4573 return; 4574 } 4575 4576 spdk_blob_opts_init(&opts); 4577 _spdk_blob_xattrs_init(&internal_xattrs); 4578 4579 opts.thin_provision = true; 4580 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 4581 if (ctx->xattrs) { 4582 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 4583 } 4584 4585 /* Set internal xattr BLOB_SNAPSHOT */ 4586 internal_xattrs.count = 1; 4587 internal_xattrs.ctx = _blob; 4588 internal_xattrs.names = xattr_names; 4589 internal_xattrs.get_value = _spdk_bs_xattr_clone; 4590 4591 _spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs, 4592 _spdk_bs_clone_newblob_create_cpl, ctx); 4593 } 4594 4595 void spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid, 4596 const struct spdk_blob_xattr_opts *clone_xattrs, 4597 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4598 { 4599 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 4600 4601 if (!ctx) { 4602 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 4603 return; 4604 } 4605 4606 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 4607 ctx->cpl.u.blobid.cb_fn = cb_fn; 4608 ctx->cpl.u.blobid.cb_arg = cb_arg; 4609 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 4610 ctx->bserrno = 0; 4611 ctx->xattrs = clone_xattrs; 4612 ctx->original.id = blobid; 4613 4614 spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_clone_origblob_open_cpl, ctx); 4615 } 4616 4617 /* END spdk_bs_create_clone */ 4618 4619 /* START spdk_bs_inflate_blob */ 4620 4621 static void 4622 _spdk_bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno) 4623 { 4624 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4625 struct spdk_blob *_blob = ctx->original.blob; 4626 4627 if (bserrno != 0) { 4628 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4629 return; 4630 } 4631 4632 assert(_parent != NULL); 4633 4634 _spdk_bs_blob_list_remove(_blob); 4635 _blob->parent_id = _parent->id; 4636 _spdk_blob_set_xattr(_blob, BLOB_SNAPSHOT, &_blob->parent_id, 4637 sizeof(spdk_blob_id), true); 4638 4639 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 4640 _blob->back_bs_dev = spdk_bs_create_blob_bs_dev(_parent); 4641 _spdk_bs_blob_list_add(_blob); 4642 4643 spdk_blob_sync_md(_blob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 4644 } 4645 4646 static void 4647 _spdk_bs_inflate_blob_done(void *cb_arg, int bserrno) 4648 { 4649 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4650 struct spdk_blob *_blob = ctx->original.blob; 4651 struct spdk_blob *_parent; 4652 4653 if (bserrno != 0) { 4654 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4655 return; 4656 } 4657 4658 if (ctx->allocate_all) { 4659 /* remove thin provisioning */ 4660 _spdk_bs_blob_list_remove(_blob); 4661 _spdk_blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 4662 _blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV; 4663 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 4664 _blob->back_bs_dev = NULL; 4665 _blob->parent_id = SPDK_BLOBID_INVALID; 4666 } else { 4667 _parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob; 4668 if (_parent->parent_id != SPDK_BLOBID_INVALID) { 4669 /* We must change the parent of the inflated blob */ 4670 spdk_bs_open_blob(_blob->bs, _parent->parent_id, 4671 _spdk_bs_inflate_blob_set_parent_cpl, ctx); 4672 return; 4673 } 4674 4675 _spdk_bs_blob_list_remove(_blob); 4676 _spdk_blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 4677 _blob->parent_id = SPDK_BLOBID_INVALID; 4678 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 4679 _blob->back_bs_dev = spdk_bs_create_zeroes_dev(); 4680 } 4681 4682 _blob->state = SPDK_BLOB_STATE_DIRTY; 4683 spdk_blob_sync_md(_blob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 4684 } 4685 4686 /* Check if cluster needs allocation */ 4687 static inline bool 4688 _spdk_bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all) 4689 { 4690 struct spdk_blob_bs_dev *b; 4691 4692 assert(blob != NULL); 4693 4694 if (blob->active.clusters[cluster] != 0) { 4695 /* Cluster is already allocated */ 4696 return false; 4697 } 4698 4699 if (blob->parent_id == SPDK_BLOBID_INVALID) { 4700 /* Blob have no parent blob */ 4701 return allocate_all; 4702 } 4703 4704 b = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 4705 return (allocate_all || b->blob->active.clusters[cluster] != 0); 4706 } 4707 4708 static void 4709 _spdk_bs_inflate_blob_touch_next(void *cb_arg, int bserrno) 4710 { 4711 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4712 struct spdk_blob *_blob = ctx->original.blob; 4713 uint64_t offset; 4714 4715 if (bserrno != 0) { 4716 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4717 return; 4718 } 4719 4720 for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) { 4721 if (_spdk_bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) { 4722 break; 4723 } 4724 } 4725 4726 if (ctx->cluster < _blob->active.num_clusters) { 4727 offset = _spdk_bs_cluster_to_lba(_blob->bs, ctx->cluster); 4728 4729 /* We may safely increment a cluster before write */ 4730 ctx->cluster++; 4731 4732 /* Use zero length write to touch a cluster */ 4733 spdk_blob_io_write(_blob, ctx->channel, NULL, offset, 0, 4734 _spdk_bs_inflate_blob_touch_next, ctx); 4735 } else { 4736 _spdk_bs_inflate_blob_done(cb_arg, bserrno); 4737 } 4738 } 4739 4740 static void 4741 _spdk_bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4742 { 4743 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4744 uint64_t lfc; /* lowest free cluster */ 4745 uint64_t i; 4746 4747 if (bserrno != 0) { 4748 _spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno); 4749 return; 4750 } 4751 ctx->original.blob = _blob; 4752 4753 if (!ctx->allocate_all && _blob->parent_id == SPDK_BLOBID_INVALID) { 4754 /* This blob have no parent, so we cannot decouple it. */ 4755 SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n"); 4756 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 4757 return; 4758 } 4759 4760 if (spdk_blob_is_thin_provisioned(_blob) == false) { 4761 /* This is not thin provisioned blob. No need to inflate. */ 4762 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, 0); 4763 return; 4764 } 4765 4766 /* Do two passes - one to verify that we can obtain enough clusters 4767 * and another to actually claim them. 4768 */ 4769 lfc = 0; 4770 for (i = 0; i < _blob->active.num_clusters; i++) { 4771 if (_spdk_bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) { 4772 lfc = spdk_bit_array_find_first_clear(_blob->bs->used_clusters, lfc); 4773 if (lfc == UINT32_MAX) { 4774 /* No more free clusters. Cannot satisfy the request */ 4775 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC); 4776 return; 4777 } 4778 lfc++; 4779 } 4780 } 4781 4782 ctx->cluster = 0; 4783 _spdk_bs_inflate_blob_touch_next(ctx, 0); 4784 } 4785 4786 static void 4787 _spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 4788 spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg) 4789 { 4790 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 4791 4792 if (!ctx) { 4793 cb_fn(cb_arg, -ENOMEM); 4794 return; 4795 } 4796 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 4797 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 4798 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 4799 ctx->bserrno = 0; 4800 ctx->original.id = blobid; 4801 ctx->channel = channel; 4802 ctx->allocate_all = allocate_all; 4803 4804 spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_inflate_blob_open_cpl, ctx); 4805 } 4806 4807 void 4808 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 4809 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 4810 { 4811 _spdk_bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg); 4812 } 4813 4814 void 4815 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 4816 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 4817 { 4818 _spdk_bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg); 4819 } 4820 /* END spdk_bs_inflate_blob */ 4821 4822 /* START spdk_blob_resize */ 4823 struct spdk_bs_resize_ctx { 4824 spdk_blob_op_complete cb_fn; 4825 void *cb_arg; 4826 struct spdk_blob *blob; 4827 uint64_t sz; 4828 int rc; 4829 }; 4830 4831 static void 4832 _spdk_bs_resize_unfreeze_cpl(void *cb_arg, int rc) 4833 { 4834 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 4835 4836 if (rc != 0) { 4837 SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc); 4838 } 4839 4840 if (ctx->rc != 0) { 4841 SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc); 4842 rc = ctx->rc; 4843 } 4844 4845 ctx->blob->resize_in_progress = false; 4846 4847 ctx->cb_fn(ctx->cb_arg, rc); 4848 free(ctx); 4849 } 4850 4851 static void 4852 _spdk_bs_resize_freeze_cpl(void *cb_arg, int rc) 4853 { 4854 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 4855 4856 if (rc != 0) { 4857 ctx->blob->resize_in_progress = false; 4858 ctx->cb_fn(ctx->cb_arg, rc); 4859 free(ctx); 4860 return; 4861 } 4862 4863 ctx->rc = _spdk_blob_resize(ctx->blob, ctx->sz); 4864 4865 _spdk_blob_unfreeze_io(ctx->blob, _spdk_bs_resize_unfreeze_cpl, ctx); 4866 } 4867 4868 void 4869 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg) 4870 { 4871 struct spdk_bs_resize_ctx *ctx; 4872 4873 _spdk_blob_verify_md_op(blob); 4874 4875 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Resizing blob %lu to %lu clusters\n", blob->id, sz); 4876 4877 if (blob->md_ro) { 4878 cb_fn(cb_arg, -EPERM); 4879 return; 4880 } 4881 4882 if (sz == blob->active.num_clusters) { 4883 cb_fn(cb_arg, 0); 4884 return; 4885 } 4886 4887 if (blob->resize_in_progress) { 4888 cb_fn(cb_arg, -EBUSY); 4889 return; 4890 } 4891 4892 ctx = calloc(1, sizeof(*ctx)); 4893 if (!ctx) { 4894 cb_fn(cb_arg, -ENOMEM); 4895 return; 4896 } 4897 4898 blob->resize_in_progress = true; 4899 ctx->cb_fn = cb_fn; 4900 ctx->cb_arg = cb_arg; 4901 ctx->blob = blob; 4902 ctx->sz = sz; 4903 _spdk_blob_freeze_io(blob, _spdk_bs_resize_freeze_cpl, ctx); 4904 } 4905 4906 /* END spdk_blob_resize */ 4907 4908 4909 /* START spdk_bs_delete_blob */ 4910 4911 static void 4912 _spdk_bs_delete_close_cpl(void *cb_arg, int bserrno) 4913 { 4914 spdk_bs_sequence_t *seq = cb_arg; 4915 4916 spdk_bs_sequence_finish(seq, bserrno); 4917 } 4918 4919 static void 4920 _spdk_bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4921 { 4922 struct spdk_blob *blob = cb_arg; 4923 4924 if (bserrno != 0) { 4925 /* 4926 * We already removed this blob from the blobstore tailq, so 4927 * we need to free it here since this is the last reference 4928 * to it. 4929 */ 4930 _spdk_blob_free(blob); 4931 _spdk_bs_delete_close_cpl(seq, bserrno); 4932 return; 4933 } 4934 4935 /* 4936 * This will immediately decrement the ref_count and call 4937 * the completion routine since the metadata state is clean. 4938 * By calling spdk_blob_close, we reduce the number of call 4939 * points into code that touches the blob->open_ref count 4940 * and the blobstore's blob list. 4941 */ 4942 spdk_blob_close(blob, _spdk_bs_delete_close_cpl, seq); 4943 } 4944 4945 static void 4946 _spdk_bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 4947 { 4948 spdk_bs_sequence_t *seq = cb_arg; 4949 struct spdk_blob_list *snapshot = NULL; 4950 uint32_t page_num; 4951 4952 if (bserrno != 0) { 4953 spdk_bs_sequence_finish(seq, bserrno); 4954 return; 4955 } 4956 4957 _spdk_blob_verify_md_op(blob); 4958 4959 if (blob->open_ref > 1) { 4960 /* 4961 * Someone has this blob open (besides this delete context). 4962 * Decrement the ref count directly and return -EBUSY. 4963 */ 4964 blob->open_ref--; 4965 spdk_bs_sequence_finish(seq, -EBUSY); 4966 return; 4967 } 4968 4969 bserrno = _spdk_bs_blob_list_remove(blob); 4970 if (bserrno != 0) { 4971 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Remove blob #%" PRIu64 " from a list\n", blob->id); 4972 spdk_bs_sequence_finish(seq, bserrno); 4973 return; 4974 } 4975 4976 /* 4977 * Remove the blob from the blob_store list now, to ensure it does not 4978 * get returned after this point by _spdk_blob_lookup(). 4979 */ 4980 TAILQ_REMOVE(&blob->bs->blobs, blob, link); 4981 4982 /* If blob is a snapshot then remove it from the list */ 4983 TAILQ_FOREACH(snapshot, &blob->bs->snapshots, link) { 4984 if (snapshot->id == blob->id) { 4985 TAILQ_REMOVE(&blob->bs->snapshots, snapshot, link); 4986 free(snapshot); 4987 break; 4988 } 4989 } 4990 4991 page_num = _spdk_bs_blobid_to_page(blob->id); 4992 spdk_bit_array_clear(blob->bs->used_blobids, page_num); 4993 blob->state = SPDK_BLOB_STATE_DIRTY; 4994 blob->active.num_pages = 0; 4995 _spdk_blob_resize(blob, 0); 4996 4997 _spdk_blob_persist(seq, blob, _spdk_bs_delete_persist_cpl, blob); 4998 } 4999 5000 void 5001 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 5002 spdk_blob_op_complete cb_fn, void *cb_arg) 5003 { 5004 struct spdk_bs_cpl cpl; 5005 spdk_bs_sequence_t *seq; 5006 struct spdk_blob_list *snapshot_entry = NULL; 5007 5008 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Deleting blob %lu\n", blobid); 5009 5010 assert(spdk_get_thread() == bs->md_thread); 5011 5012 /* Check if this is a snapshot with clones */ 5013 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 5014 if (snapshot_entry->id == blobid) { 5015 break; 5016 } 5017 } 5018 if (snapshot_entry != NULL) { 5019 /* If snapshot have clones, we cannot remove it */ 5020 if (!TAILQ_EMPTY(&snapshot_entry->clones)) { 5021 SPDK_ERRLOG("Cannot remove snapshot with clones\n"); 5022 cb_fn(cb_arg, -EBUSY); 5023 return; 5024 } 5025 } 5026 5027 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 5028 cpl.u.blob_basic.cb_fn = cb_fn; 5029 cpl.u.blob_basic.cb_arg = cb_arg; 5030 5031 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 5032 if (!seq) { 5033 cb_fn(cb_arg, -ENOMEM); 5034 return; 5035 } 5036 5037 spdk_bs_open_blob(bs, blobid, _spdk_bs_delete_open_cpl, seq); 5038 } 5039 5040 /* END spdk_bs_delete_blob */ 5041 5042 /* START spdk_bs_open_blob */ 5043 5044 static void 5045 _spdk_bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5046 { 5047 struct spdk_blob *blob = cb_arg; 5048 5049 /* If the blob have crc error, we just return NULL. */ 5050 if (blob == NULL) { 5051 seq->cpl.u.blob_handle.blob = NULL; 5052 spdk_bs_sequence_finish(seq, bserrno); 5053 return; 5054 } 5055 5056 blob->open_ref++; 5057 5058 TAILQ_INSERT_HEAD(&blob->bs->blobs, blob, link); 5059 5060 spdk_bs_sequence_finish(seq, bserrno); 5061 } 5062 5063 void spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 5064 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 5065 { 5066 struct spdk_blob *blob; 5067 struct spdk_bs_cpl cpl; 5068 spdk_bs_sequence_t *seq; 5069 uint32_t page_num; 5070 5071 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Opening blob %lu\n", blobid); 5072 assert(spdk_get_thread() == bs->md_thread); 5073 5074 page_num = _spdk_bs_blobid_to_page(blobid); 5075 if (spdk_bit_array_get(bs->used_blobids, page_num) == false) { 5076 /* Invalid blobid */ 5077 cb_fn(cb_arg, NULL, -ENOENT); 5078 return; 5079 } 5080 5081 blob = _spdk_blob_lookup(bs, blobid); 5082 if (blob) { 5083 blob->open_ref++; 5084 cb_fn(cb_arg, blob, 0); 5085 return; 5086 } 5087 5088 blob = _spdk_blob_alloc(bs, blobid); 5089 if (!blob) { 5090 cb_fn(cb_arg, NULL, -ENOMEM); 5091 return; 5092 } 5093 5094 cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE; 5095 cpl.u.blob_handle.cb_fn = cb_fn; 5096 cpl.u.blob_handle.cb_arg = cb_arg; 5097 cpl.u.blob_handle.blob = blob; 5098 5099 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 5100 if (!seq) { 5101 _spdk_blob_free(blob); 5102 cb_fn(cb_arg, NULL, -ENOMEM); 5103 return; 5104 } 5105 5106 _spdk_blob_load(seq, blob, _spdk_bs_open_blob_cpl, blob); 5107 } 5108 /* END spdk_bs_open_blob */ 5109 5110 /* START spdk_blob_set_read_only */ 5111 int spdk_blob_set_read_only(struct spdk_blob *blob) 5112 { 5113 _spdk_blob_verify_md_op(blob); 5114 5115 blob->data_ro_flags |= SPDK_BLOB_READ_ONLY; 5116 5117 blob->state = SPDK_BLOB_STATE_DIRTY; 5118 return 0; 5119 } 5120 /* END spdk_blob_set_read_only */ 5121 5122 /* START spdk_blob_sync_md */ 5123 5124 static void 5125 _spdk_blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5126 { 5127 struct spdk_blob *blob = cb_arg; 5128 5129 if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 5130 blob->data_ro = true; 5131 blob->md_ro = true; 5132 } 5133 5134 spdk_bs_sequence_finish(seq, bserrno); 5135 } 5136 5137 static void 5138 _spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 5139 { 5140 struct spdk_bs_cpl cpl; 5141 spdk_bs_sequence_t *seq; 5142 5143 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 5144 cpl.u.blob_basic.cb_fn = cb_fn; 5145 cpl.u.blob_basic.cb_arg = cb_arg; 5146 5147 seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl); 5148 if (!seq) { 5149 cb_fn(cb_arg, -ENOMEM); 5150 return; 5151 } 5152 5153 _spdk_blob_persist(seq, blob, _spdk_blob_sync_md_cpl, blob); 5154 } 5155 5156 void 5157 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 5158 { 5159 _spdk_blob_verify_md_op(blob); 5160 5161 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blob %lu\n", blob->id); 5162 5163 if (blob->md_ro) { 5164 assert(blob->state == SPDK_BLOB_STATE_CLEAN); 5165 cb_fn(cb_arg, 0); 5166 return; 5167 } 5168 5169 _spdk_blob_sync_md(blob, cb_fn, cb_arg); 5170 } 5171 5172 /* END spdk_blob_sync_md */ 5173 5174 struct spdk_blob_insert_cluster_ctx { 5175 struct spdk_thread *thread; 5176 struct spdk_blob *blob; 5177 uint32_t cluster_num; /* cluster index in blob */ 5178 uint32_t cluster; /* cluster on disk */ 5179 int rc; 5180 spdk_blob_op_complete cb_fn; 5181 void *cb_arg; 5182 }; 5183 5184 static void 5185 _spdk_blob_insert_cluster_msg_cpl(void *arg) 5186 { 5187 struct spdk_blob_insert_cluster_ctx *ctx = arg; 5188 5189 ctx->cb_fn(ctx->cb_arg, ctx->rc); 5190 free(ctx); 5191 } 5192 5193 static void 5194 _spdk_blob_insert_cluster_msg_cb(void *arg, int bserrno) 5195 { 5196 struct spdk_blob_insert_cluster_ctx *ctx = arg; 5197 5198 ctx->rc = bserrno; 5199 spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx); 5200 } 5201 5202 static void 5203 _spdk_blob_insert_cluster_msg(void *arg) 5204 { 5205 struct spdk_blob_insert_cluster_ctx *ctx = arg; 5206 5207 ctx->rc = _spdk_blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster); 5208 if (ctx->rc != 0) { 5209 spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx); 5210 return; 5211 } 5212 5213 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 5214 _spdk_blob_sync_md(ctx->blob, _spdk_blob_insert_cluster_msg_cb, ctx); 5215 } 5216 5217 static void 5218 _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 5219 uint64_t cluster, spdk_blob_op_complete cb_fn, void *cb_arg) 5220 { 5221 struct spdk_blob_insert_cluster_ctx *ctx; 5222 5223 ctx = calloc(1, sizeof(*ctx)); 5224 if (ctx == NULL) { 5225 cb_fn(cb_arg, -ENOMEM); 5226 return; 5227 } 5228 5229 ctx->thread = spdk_get_thread(); 5230 ctx->blob = blob; 5231 ctx->cluster_num = cluster_num; 5232 ctx->cluster = cluster; 5233 ctx->cb_fn = cb_fn; 5234 ctx->cb_arg = cb_arg; 5235 5236 spdk_thread_send_msg(blob->bs->md_thread, _spdk_blob_insert_cluster_msg, ctx); 5237 } 5238 5239 /* START spdk_blob_close */ 5240 5241 static void 5242 _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5243 { 5244 struct spdk_blob *blob = cb_arg; 5245 5246 if (bserrno == 0) { 5247 blob->open_ref--; 5248 if (blob->open_ref == 0) { 5249 /* 5250 * Blobs with active.num_pages == 0 are deleted blobs. 5251 * these blobs are removed from the blob_store list 5252 * when the deletion process starts - so don't try to 5253 * remove them again. 5254 */ 5255 if (blob->active.num_pages > 0) { 5256 TAILQ_REMOVE(&blob->bs->blobs, blob, link); 5257 } 5258 _spdk_blob_free(blob); 5259 } 5260 } 5261 5262 spdk_bs_sequence_finish(seq, bserrno); 5263 } 5264 5265 void spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 5266 { 5267 struct spdk_bs_cpl cpl; 5268 spdk_bs_sequence_t *seq; 5269 5270 _spdk_blob_verify_md_op(blob); 5271 5272 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Closing blob %lu\n", blob->id); 5273 5274 if (blob->open_ref == 0) { 5275 cb_fn(cb_arg, -EBADF); 5276 return; 5277 } 5278 5279 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 5280 cpl.u.blob_basic.cb_fn = cb_fn; 5281 cpl.u.blob_basic.cb_arg = cb_arg; 5282 5283 seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl); 5284 if (!seq) { 5285 cb_fn(cb_arg, -ENOMEM); 5286 return; 5287 } 5288 5289 /* Sync metadata */ 5290 _spdk_blob_persist(seq, blob, _spdk_blob_close_cpl, blob); 5291 } 5292 5293 /* END spdk_blob_close */ 5294 5295 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs) 5296 { 5297 return spdk_get_io_channel(bs); 5298 } 5299 5300 void spdk_bs_free_io_channel(struct spdk_io_channel *channel) 5301 { 5302 spdk_put_io_channel(channel); 5303 } 5304 5305 void spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel, 5306 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 5307 { 5308 _spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 5309 SPDK_BLOB_UNMAP); 5310 } 5311 5312 void spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel, 5313 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 5314 { 5315 _spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 5316 SPDK_BLOB_WRITE_ZEROES); 5317 } 5318 5319 void spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel, 5320 void *payload, uint64_t offset, uint64_t length, 5321 spdk_blob_op_complete cb_fn, void *cb_arg) 5322 { 5323 _spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 5324 SPDK_BLOB_WRITE); 5325 } 5326 5327 void spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel, 5328 void *payload, uint64_t offset, uint64_t length, 5329 spdk_blob_op_complete cb_fn, void *cb_arg) 5330 { 5331 _spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 5332 SPDK_BLOB_READ); 5333 } 5334 5335 void spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel, 5336 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 5337 spdk_blob_op_complete cb_fn, void *cb_arg) 5338 { 5339 _spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false); 5340 } 5341 5342 void spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel, 5343 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 5344 spdk_blob_op_complete cb_fn, void *cb_arg) 5345 { 5346 _spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true); 5347 } 5348 5349 struct spdk_bs_iter_ctx { 5350 int64_t page_num; 5351 struct spdk_blob_store *bs; 5352 5353 spdk_blob_op_with_handle_complete cb_fn; 5354 void *cb_arg; 5355 }; 5356 5357 static void 5358 _spdk_bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 5359 { 5360 struct spdk_bs_iter_ctx *ctx = cb_arg; 5361 struct spdk_blob_store *bs = ctx->bs; 5362 spdk_blob_id id; 5363 5364 if (bserrno == 0) { 5365 ctx->cb_fn(ctx->cb_arg, _blob, bserrno); 5366 free(ctx); 5367 return; 5368 } 5369 5370 ctx->page_num++; 5371 ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num); 5372 if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) { 5373 ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT); 5374 free(ctx); 5375 return; 5376 } 5377 5378 id = _spdk_bs_page_to_blobid(ctx->page_num); 5379 5380 spdk_bs_open_blob(bs, id, _spdk_bs_iter_cpl, ctx); 5381 } 5382 5383 void 5384 spdk_bs_iter_first(struct spdk_blob_store *bs, 5385 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 5386 { 5387 struct spdk_bs_iter_ctx *ctx; 5388 5389 ctx = calloc(1, sizeof(*ctx)); 5390 if (!ctx) { 5391 cb_fn(cb_arg, NULL, -ENOMEM); 5392 return; 5393 } 5394 5395 ctx->page_num = -1; 5396 ctx->bs = bs; 5397 ctx->cb_fn = cb_fn; 5398 ctx->cb_arg = cb_arg; 5399 5400 _spdk_bs_iter_cpl(ctx, NULL, -1); 5401 } 5402 5403 static void 5404 _spdk_bs_iter_close_cpl(void *cb_arg, int bserrno) 5405 { 5406 struct spdk_bs_iter_ctx *ctx = cb_arg; 5407 5408 _spdk_bs_iter_cpl(ctx, NULL, -1); 5409 } 5410 5411 void 5412 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob, 5413 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 5414 { 5415 struct spdk_bs_iter_ctx *ctx; 5416 5417 assert(blob != NULL); 5418 5419 ctx = calloc(1, sizeof(*ctx)); 5420 if (!ctx) { 5421 cb_fn(cb_arg, NULL, -ENOMEM); 5422 return; 5423 } 5424 5425 ctx->page_num = _spdk_bs_blobid_to_page(blob->id); 5426 ctx->bs = bs; 5427 ctx->cb_fn = cb_fn; 5428 ctx->cb_arg = cb_arg; 5429 5430 /* Close the existing blob */ 5431 spdk_blob_close(blob, _spdk_bs_iter_close_cpl, ctx); 5432 } 5433 5434 static int 5435 _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 5436 uint16_t value_len, bool internal) 5437 { 5438 struct spdk_xattr_tailq *xattrs; 5439 struct spdk_xattr *xattr; 5440 5441 _spdk_blob_verify_md_op(blob); 5442 5443 if (blob->md_ro) { 5444 return -EPERM; 5445 } 5446 5447 if (internal) { 5448 xattrs = &blob->xattrs_internal; 5449 blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR; 5450 } else { 5451 xattrs = &blob->xattrs; 5452 } 5453 5454 TAILQ_FOREACH(xattr, xattrs, link) { 5455 if (!strcmp(name, xattr->name)) { 5456 free(xattr->value); 5457 xattr->value_len = value_len; 5458 xattr->value = malloc(value_len); 5459 memcpy(xattr->value, value, value_len); 5460 5461 blob->state = SPDK_BLOB_STATE_DIRTY; 5462 5463 return 0; 5464 } 5465 } 5466 5467 xattr = calloc(1, sizeof(*xattr)); 5468 if (!xattr) { 5469 return -ENOMEM; 5470 } 5471 xattr->name = strdup(name); 5472 xattr->value_len = value_len; 5473 xattr->value = malloc(value_len); 5474 memcpy(xattr->value, value, value_len); 5475 TAILQ_INSERT_TAIL(xattrs, xattr, link); 5476 5477 blob->state = SPDK_BLOB_STATE_DIRTY; 5478 5479 return 0; 5480 } 5481 5482 int 5483 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 5484 uint16_t value_len) 5485 { 5486 return _spdk_blob_set_xattr(blob, name, value, value_len, false); 5487 } 5488 5489 static int 5490 _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal) 5491 { 5492 struct spdk_xattr_tailq *xattrs; 5493 struct spdk_xattr *xattr; 5494 5495 _spdk_blob_verify_md_op(blob); 5496 5497 if (blob->md_ro) { 5498 return -EPERM; 5499 } 5500 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 5501 5502 TAILQ_FOREACH(xattr, xattrs, link) { 5503 if (!strcmp(name, xattr->name)) { 5504 TAILQ_REMOVE(xattrs, xattr, link); 5505 free(xattr->value); 5506 free(xattr->name); 5507 free(xattr); 5508 5509 if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) { 5510 blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR; 5511 } 5512 blob->state = SPDK_BLOB_STATE_DIRTY; 5513 5514 return 0; 5515 } 5516 } 5517 5518 return -ENOENT; 5519 } 5520 5521 int 5522 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name) 5523 { 5524 return _spdk_blob_remove_xattr(blob, name, false); 5525 } 5526 5527 static int 5528 _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 5529 const void **value, size_t *value_len, bool internal) 5530 { 5531 struct spdk_xattr *xattr; 5532 struct spdk_xattr_tailq *xattrs; 5533 5534 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 5535 5536 TAILQ_FOREACH(xattr, xattrs, link) { 5537 if (!strcmp(name, xattr->name)) { 5538 *value = xattr->value; 5539 *value_len = xattr->value_len; 5540 return 0; 5541 } 5542 } 5543 return -ENOENT; 5544 } 5545 5546 int 5547 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 5548 const void **value, size_t *value_len) 5549 { 5550 _spdk_blob_verify_md_op(blob); 5551 5552 return _spdk_blob_get_xattr_value(blob, name, value, value_len, false); 5553 } 5554 5555 struct spdk_xattr_names { 5556 uint32_t count; 5557 const char *names[0]; 5558 }; 5559 5560 static int 5561 _spdk_blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names) 5562 { 5563 struct spdk_xattr *xattr; 5564 int count = 0; 5565 5566 TAILQ_FOREACH(xattr, xattrs, link) { 5567 count++; 5568 } 5569 5570 *names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *)); 5571 if (*names == NULL) { 5572 return -ENOMEM; 5573 } 5574 5575 TAILQ_FOREACH(xattr, xattrs, link) { 5576 (*names)->names[(*names)->count++] = xattr->name; 5577 } 5578 5579 return 0; 5580 } 5581 5582 int 5583 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names) 5584 { 5585 _spdk_blob_verify_md_op(blob); 5586 5587 return _spdk_blob_get_xattr_names(&blob->xattrs, names); 5588 } 5589 5590 uint32_t 5591 spdk_xattr_names_get_count(struct spdk_xattr_names *names) 5592 { 5593 assert(names != NULL); 5594 5595 return names->count; 5596 } 5597 5598 const char * 5599 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index) 5600 { 5601 if (index >= names->count) { 5602 return NULL; 5603 } 5604 5605 return names->names[index]; 5606 } 5607 5608 void 5609 spdk_xattr_names_free(struct spdk_xattr_names *names) 5610 { 5611 free(names); 5612 } 5613 5614 struct spdk_bs_type 5615 spdk_bs_get_bstype(struct spdk_blob_store *bs) 5616 { 5617 return bs->bstype; 5618 } 5619 5620 void 5621 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype) 5622 { 5623 memcpy(&bs->bstype, &bstype, sizeof(bstype)); 5624 } 5625 5626 bool 5627 spdk_blob_is_read_only(struct spdk_blob *blob) 5628 { 5629 assert(blob != NULL); 5630 return (blob->data_ro || blob->md_ro); 5631 } 5632 5633 bool 5634 spdk_blob_is_snapshot(struct spdk_blob *blob) 5635 { 5636 struct spdk_blob_list *snapshot_entry; 5637 5638 assert(blob != NULL); 5639 5640 TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) { 5641 if (snapshot_entry->id == blob->id) { 5642 break; 5643 } 5644 } 5645 5646 if (snapshot_entry == NULL) { 5647 return false; 5648 } 5649 5650 return true; 5651 } 5652 5653 bool 5654 spdk_blob_is_clone(struct spdk_blob *blob) 5655 { 5656 assert(blob != NULL); 5657 5658 if (blob->parent_id != SPDK_BLOBID_INVALID) { 5659 assert(spdk_blob_is_thin_provisioned(blob)); 5660 return true; 5661 } 5662 5663 return false; 5664 } 5665 5666 bool 5667 spdk_blob_is_thin_provisioned(struct spdk_blob *blob) 5668 { 5669 assert(blob != NULL); 5670 return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 5671 } 5672 5673 spdk_blob_id 5674 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id) 5675 { 5676 struct spdk_blob_list *snapshot_entry = NULL; 5677 struct spdk_blob_list *clone_entry = NULL; 5678 5679 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 5680 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 5681 if (clone_entry->id == blob_id) { 5682 return snapshot_entry->id; 5683 } 5684 } 5685 } 5686 5687 return SPDK_BLOBID_INVALID; 5688 } 5689 5690 int 5691 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids, 5692 size_t *count) 5693 { 5694 struct spdk_blob_list *snapshot_entry, *clone_entry; 5695 size_t n; 5696 5697 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 5698 if (snapshot_entry->id == blobid) { 5699 break; 5700 } 5701 } 5702 if (snapshot_entry == NULL) { 5703 *count = 0; 5704 return 0; 5705 } 5706 5707 if (ids == NULL || *count < snapshot_entry->clone_count) { 5708 *count = snapshot_entry->clone_count; 5709 return -ENOMEM; 5710 } 5711 *count = snapshot_entry->clone_count; 5712 5713 n = 0; 5714 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 5715 ids[n++] = clone_entry->id; 5716 } 5717 5718 return 0; 5719 } 5720 5721 SPDK_LOG_REGISTER_COMPONENT("blob", SPDK_LOG_BLOB) 5722