1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/blob.h" 37 #include "spdk/crc32.h" 38 #include "spdk/env.h" 39 #include "spdk/queue.h" 40 #include "spdk/thread.h" 41 #include "spdk/bit_array.h" 42 #include "spdk/likely.h" 43 #include "spdk/util.h" 44 45 #include "spdk_internal/assert.h" 46 #include "spdk_internal/log.h" 47 48 #include "blobstore.h" 49 50 #define BLOB_CRC32C_INITIAL 0xffffffffUL 51 52 static int spdk_bs_register_md_thread(struct spdk_blob_store *bs); 53 static int spdk_bs_unregister_md_thread(struct spdk_blob_store *bs); 54 static void _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 55 static void _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 56 uint64_t cluster, spdk_blob_op_complete cb_fn, void *cb_arg); 57 58 static int _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 59 uint16_t value_len, bool internal); 60 static int _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 61 const void **value, size_t *value_len, bool internal); 62 static int _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal); 63 64 static void 65 _spdk_blob_verify_md_op(struct spdk_blob *blob) 66 { 67 assert(blob != NULL); 68 assert(spdk_get_thread() == blob->bs->md_thread); 69 assert(blob->state != SPDK_BLOB_STATE_LOADING); 70 } 71 72 static void 73 _spdk_bs_claim_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 74 { 75 assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters)); 76 assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == false); 77 assert(bs->num_free_clusters > 0); 78 79 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %u\n", cluster_num); 80 81 spdk_bit_array_set(bs->used_clusters, cluster_num); 82 bs->num_free_clusters--; 83 } 84 85 static int 86 _spdk_blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster) 87 { 88 uint64_t *cluster_lba = &blob->active.clusters[cluster_num]; 89 90 _spdk_blob_verify_md_op(blob); 91 92 if (*cluster_lba != 0) { 93 return -EEXIST; 94 } 95 96 *cluster_lba = _spdk_bs_cluster_to_lba(blob->bs, cluster); 97 return 0; 98 } 99 100 static int 101 _spdk_bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num, 102 uint64_t *lowest_free_cluster, bool update_map) 103 { 104 pthread_mutex_lock(&blob->bs->used_clusters_mutex); 105 *lowest_free_cluster = spdk_bit_array_find_first_clear(blob->bs->used_clusters, 106 *lowest_free_cluster); 107 if (*lowest_free_cluster == UINT32_MAX) { 108 /* No more free clusters. Cannot satisfy the request */ 109 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 110 return -ENOSPC; 111 } 112 113 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %lu for blob %lu\n", *lowest_free_cluster, blob->id); 114 _spdk_bs_claim_cluster(blob->bs, *lowest_free_cluster); 115 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 116 117 if (update_map) { 118 _spdk_blob_insert_cluster(blob, cluster_num, *lowest_free_cluster); 119 } 120 121 return 0; 122 } 123 124 static void 125 _spdk_bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 126 { 127 assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters)); 128 assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == true); 129 assert(bs->num_free_clusters < bs->total_clusters); 130 131 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Releasing cluster %u\n", cluster_num); 132 133 pthread_mutex_lock(&bs->used_clusters_mutex); 134 spdk_bit_array_clear(bs->used_clusters, cluster_num); 135 bs->num_free_clusters++; 136 pthread_mutex_unlock(&bs->used_clusters_mutex); 137 } 138 139 static void 140 _spdk_blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs) 141 { 142 xattrs->count = 0; 143 xattrs->names = NULL; 144 xattrs->ctx = NULL; 145 xattrs->get_value = NULL; 146 } 147 148 void 149 spdk_blob_opts_init(struct spdk_blob_opts *opts) 150 { 151 opts->num_clusters = 0; 152 opts->thin_provision = false; 153 _spdk_blob_xattrs_init(&opts->xattrs); 154 } 155 156 static struct spdk_blob * 157 _spdk_blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id) 158 { 159 struct spdk_blob *blob; 160 161 blob = calloc(1, sizeof(*blob)); 162 if (!blob) { 163 return NULL; 164 } 165 166 blob->id = id; 167 blob->bs = bs; 168 169 blob->parent_id = SPDK_BLOBID_INVALID; 170 171 blob->state = SPDK_BLOB_STATE_DIRTY; 172 blob->active.num_pages = 1; 173 blob->active.pages = calloc(1, sizeof(*blob->active.pages)); 174 if (!blob->active.pages) { 175 free(blob); 176 return NULL; 177 } 178 179 blob->active.pages[0] = _spdk_bs_blobid_to_page(id); 180 181 TAILQ_INIT(&blob->xattrs); 182 TAILQ_INIT(&blob->xattrs_internal); 183 184 return blob; 185 } 186 187 static void 188 _spdk_xattrs_free(struct spdk_xattr_tailq *xattrs) 189 { 190 struct spdk_xattr *xattr, *xattr_tmp; 191 192 TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) { 193 TAILQ_REMOVE(xattrs, xattr, link); 194 free(xattr->name); 195 free(xattr->value); 196 free(xattr); 197 } 198 } 199 200 static void 201 _spdk_blob_free(struct spdk_blob *blob) 202 { 203 assert(blob != NULL); 204 205 free(blob->active.clusters); 206 free(blob->clean.clusters); 207 free(blob->active.pages); 208 free(blob->clean.pages); 209 210 _spdk_xattrs_free(&blob->xattrs); 211 _spdk_xattrs_free(&blob->xattrs_internal); 212 213 if (blob->back_bs_dev) { 214 blob->back_bs_dev->destroy(blob->back_bs_dev); 215 } 216 217 free(blob); 218 } 219 220 struct freeze_io_ctx { 221 struct spdk_bs_cpl cpl; 222 struct spdk_blob *blob; 223 }; 224 225 static void 226 _spdk_blob_io_sync(struct spdk_io_channel_iter *i) 227 { 228 spdk_for_each_channel_continue(i, 0); 229 } 230 231 static void 232 _spdk_blob_execute_queued_io(struct spdk_io_channel_iter *i) 233 { 234 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 235 struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch); 236 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 237 struct spdk_bs_request_set *set; 238 struct spdk_bs_user_op_args *args; 239 spdk_bs_user_op_t *op, *tmp; 240 241 TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) { 242 set = (struct spdk_bs_request_set *)op; 243 args = &set->u.user_op; 244 245 if (args->blob == ctx->blob) { 246 TAILQ_REMOVE(&ch->queued_io, op, link); 247 spdk_bs_user_op_execute(op); 248 } 249 } 250 251 spdk_for_each_channel_continue(i, 0); 252 } 253 254 static void 255 _spdk_blob_io_cpl(struct spdk_io_channel_iter *i, int status) 256 { 257 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 258 259 ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0); 260 261 free(ctx); 262 } 263 264 static void 265 _spdk_blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 266 { 267 struct freeze_io_ctx *ctx; 268 269 ctx = calloc(1, sizeof(*ctx)); 270 if (!ctx) { 271 cb_fn(cb_arg, -ENOMEM); 272 return; 273 } 274 275 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 276 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 277 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 278 ctx->blob = blob; 279 280 /* Freeze I/O on blob */ 281 blob->frozen_refcnt++; 282 283 if (blob->frozen_refcnt == 1) { 284 spdk_for_each_channel(blob->bs, _spdk_blob_io_sync, ctx, _spdk_blob_io_cpl); 285 } else { 286 cb_fn(cb_arg, 0); 287 free(ctx); 288 } 289 } 290 291 static void 292 _spdk_blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 293 { 294 struct freeze_io_ctx *ctx; 295 296 ctx = calloc(1, sizeof(*ctx)); 297 if (!ctx) { 298 cb_fn(cb_arg, -ENOMEM); 299 return; 300 } 301 302 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 303 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 304 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 305 ctx->blob = blob; 306 307 assert(blob->frozen_refcnt > 0); 308 309 blob->frozen_refcnt--; 310 311 if (blob->frozen_refcnt == 0) { 312 spdk_for_each_channel(blob->bs, _spdk_blob_execute_queued_io, ctx, _spdk_blob_io_cpl); 313 } else { 314 cb_fn(cb_arg, 0); 315 free(ctx); 316 } 317 } 318 319 static int 320 _spdk_blob_mark_clean(struct spdk_blob *blob) 321 { 322 uint64_t *clusters = NULL; 323 uint32_t *pages = NULL; 324 325 assert(blob != NULL); 326 327 if (blob->active.num_clusters) { 328 assert(blob->active.clusters); 329 clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters)); 330 if (!clusters) { 331 return -ENOMEM; 332 } 333 memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*clusters)); 334 } 335 336 if (blob->active.num_pages) { 337 assert(blob->active.pages); 338 pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages)); 339 if (!pages) { 340 free(clusters); 341 return -ENOMEM; 342 } 343 memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*pages)); 344 } 345 346 free(blob->clean.clusters); 347 free(blob->clean.pages); 348 349 blob->clean.num_clusters = blob->active.num_clusters; 350 blob->clean.clusters = blob->active.clusters; 351 blob->clean.num_pages = blob->active.num_pages; 352 blob->clean.pages = blob->active.pages; 353 354 blob->active.clusters = clusters; 355 blob->active.pages = pages; 356 357 /* If the metadata was dirtied again while the metadata was being written to disk, 358 * we do not want to revert the DIRTY state back to CLEAN here. 359 */ 360 if (blob->state == SPDK_BLOB_STATE_LOADING) { 361 blob->state = SPDK_BLOB_STATE_CLEAN; 362 } 363 364 return 0; 365 } 366 367 static int 368 _spdk_blob_deserialize_xattr(struct spdk_blob *blob, 369 struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal) 370 { 371 struct spdk_xattr *xattr; 372 373 if (desc_xattr->length != sizeof(desc_xattr->name_length) + 374 sizeof(desc_xattr->value_length) + 375 desc_xattr->name_length + desc_xattr->value_length) { 376 return -EINVAL; 377 } 378 379 xattr = calloc(1, sizeof(*xattr)); 380 if (xattr == NULL) { 381 return -ENOMEM; 382 } 383 384 xattr->name = malloc(desc_xattr->name_length + 1); 385 if (xattr->name == NULL) { 386 free(xattr); 387 return -ENOMEM; 388 } 389 memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length); 390 xattr->name[desc_xattr->name_length] = '\0'; 391 392 xattr->value = malloc(desc_xattr->value_length); 393 if (xattr->value == NULL) { 394 free(xattr->name); 395 free(xattr); 396 return -ENOMEM; 397 } 398 xattr->value_len = desc_xattr->value_length; 399 memcpy(xattr->value, 400 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 401 desc_xattr->value_length); 402 403 TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link); 404 405 return 0; 406 } 407 408 409 static int 410 _spdk_blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob) 411 { 412 struct spdk_blob_md_descriptor *desc; 413 size_t cur_desc = 0; 414 void *tmp; 415 416 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 417 while (cur_desc < sizeof(page->descriptors)) { 418 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 419 if (desc->length == 0) { 420 /* If padding and length are 0, this terminates the page */ 421 break; 422 } 423 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 424 struct spdk_blob_md_descriptor_flags *desc_flags; 425 426 desc_flags = (struct spdk_blob_md_descriptor_flags *)desc; 427 428 if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) { 429 return -EINVAL; 430 } 431 432 if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) != 433 SPDK_BLOB_INVALID_FLAGS_MASK) { 434 return -EINVAL; 435 } 436 437 if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) != 438 SPDK_BLOB_DATA_RO_FLAGS_MASK) { 439 blob->data_ro = true; 440 blob->md_ro = true; 441 } 442 443 if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) != 444 SPDK_BLOB_MD_RO_FLAGS_MASK) { 445 blob->md_ro = true; 446 } 447 448 if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 449 blob->data_ro = true; 450 blob->md_ro = true; 451 } 452 453 blob->invalid_flags = desc_flags->invalid_flags; 454 blob->data_ro_flags = desc_flags->data_ro_flags; 455 blob->md_ro_flags = desc_flags->md_ro_flags; 456 457 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) { 458 struct spdk_blob_md_descriptor_extent *desc_extent; 459 unsigned int i, j; 460 unsigned int cluster_count = blob->active.num_clusters; 461 462 desc_extent = (struct spdk_blob_md_descriptor_extent *)desc; 463 464 if (desc_extent->length == 0 || 465 (desc_extent->length % sizeof(desc_extent->extents[0]) != 0)) { 466 return -EINVAL; 467 } 468 469 for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) { 470 for (j = 0; j < desc_extent->extents[i].length; j++) { 471 if (desc_extent->extents[i].cluster_idx != 0) { 472 if (!spdk_bit_array_get(blob->bs->used_clusters, 473 desc_extent->extents[i].cluster_idx + j)) { 474 return -EINVAL; 475 } 476 } 477 cluster_count++; 478 } 479 } 480 481 if (cluster_count == 0) { 482 return -EINVAL; 483 } 484 tmp = realloc(blob->active.clusters, cluster_count * sizeof(uint64_t)); 485 if (tmp == NULL) { 486 return -ENOMEM; 487 } 488 blob->active.clusters = tmp; 489 blob->active.cluster_array_size = cluster_count; 490 491 for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) { 492 for (j = 0; j < desc_extent->extents[i].length; j++) { 493 if (desc_extent->extents[i].cluster_idx != 0) { 494 blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs, 495 desc_extent->extents[i].cluster_idx + j); 496 } else if (spdk_blob_is_thin_provisioned(blob)) { 497 blob->active.clusters[blob->active.num_clusters++] = 0; 498 } else { 499 return -EINVAL; 500 } 501 } 502 } 503 504 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 505 int rc; 506 507 rc = _spdk_blob_deserialize_xattr(blob, 508 (struct spdk_blob_md_descriptor_xattr *) desc, false); 509 if (rc != 0) { 510 return rc; 511 } 512 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 513 int rc; 514 515 rc = _spdk_blob_deserialize_xattr(blob, 516 (struct spdk_blob_md_descriptor_xattr *) desc, true); 517 if (rc != 0) { 518 return rc; 519 } 520 } else { 521 /* Unrecognized descriptor type. Do not fail - just continue to the 522 * next descriptor. If this descriptor is associated with some feature 523 * defined in a newer version of blobstore, that version of blobstore 524 * should create and set an associated feature flag to specify if this 525 * blob can be loaded or not. 526 */ 527 } 528 529 /* Advance to the next descriptor */ 530 cur_desc += sizeof(*desc) + desc->length; 531 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 532 break; 533 } 534 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 535 } 536 537 return 0; 538 } 539 540 static int 541 _spdk_blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count, 542 struct spdk_blob *blob) 543 { 544 const struct spdk_blob_md_page *page; 545 uint32_t i; 546 int rc; 547 548 assert(page_count > 0); 549 assert(pages[0].sequence_num == 0); 550 assert(blob != NULL); 551 assert(blob->state == SPDK_BLOB_STATE_LOADING); 552 assert(blob->active.clusters == NULL); 553 554 /* The blobid provided doesn't match what's in the MD, this can 555 * happen for example if a bogus blobid is passed in through open. 556 */ 557 if (blob->id != pages[0].id) { 558 SPDK_ERRLOG("Blobid (%lu) doesn't match what's in metadata (%lu)\n", 559 blob->id, pages[0].id); 560 return -ENOENT; 561 } 562 563 for (i = 0; i < page_count; i++) { 564 page = &pages[i]; 565 566 assert(page->id == blob->id); 567 assert(page->sequence_num == i); 568 569 rc = _spdk_blob_parse_page(page, blob); 570 if (rc != 0) { 571 return rc; 572 } 573 } 574 575 return 0; 576 } 577 578 static int 579 _spdk_blob_serialize_add_page(const struct spdk_blob *blob, 580 struct spdk_blob_md_page **pages, 581 uint32_t *page_count, 582 struct spdk_blob_md_page **last_page) 583 { 584 struct spdk_blob_md_page *page; 585 586 assert(pages != NULL); 587 assert(page_count != NULL); 588 589 if (*page_count == 0) { 590 assert(*pages == NULL); 591 *page_count = 1; 592 *pages = spdk_dma_malloc(SPDK_BS_PAGE_SIZE, 593 SPDK_BS_PAGE_SIZE, 594 NULL); 595 } else { 596 assert(*pages != NULL); 597 (*page_count)++; 598 *pages = spdk_dma_realloc(*pages, 599 SPDK_BS_PAGE_SIZE * (*page_count), 600 SPDK_BS_PAGE_SIZE, 601 NULL); 602 } 603 604 if (*pages == NULL) { 605 *page_count = 0; 606 *last_page = NULL; 607 return -ENOMEM; 608 } 609 610 page = &(*pages)[*page_count - 1]; 611 memset(page, 0, sizeof(*page)); 612 page->id = blob->id; 613 page->sequence_num = *page_count - 1; 614 page->next = SPDK_INVALID_MD_PAGE; 615 *last_page = page; 616 617 return 0; 618 } 619 620 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor. 621 * Update required_sz on both success and failure. 622 * 623 */ 624 static int 625 _spdk_blob_serialize_xattr(const struct spdk_xattr *xattr, 626 uint8_t *buf, size_t buf_sz, 627 size_t *required_sz, bool internal) 628 { 629 struct spdk_blob_md_descriptor_xattr *desc; 630 631 *required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) + 632 strlen(xattr->name) + 633 xattr->value_len; 634 635 if (buf_sz < *required_sz) { 636 return -1; 637 } 638 639 desc = (struct spdk_blob_md_descriptor_xattr *)buf; 640 641 desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR; 642 desc->length = sizeof(desc->name_length) + 643 sizeof(desc->value_length) + 644 strlen(xattr->name) + 645 xattr->value_len; 646 desc->name_length = strlen(xattr->name); 647 desc->value_length = xattr->value_len; 648 649 memcpy(desc->name, xattr->name, desc->name_length); 650 memcpy((void *)((uintptr_t)desc->name + desc->name_length), 651 xattr->value, 652 desc->value_length); 653 654 return 0; 655 } 656 657 static void 658 _spdk_blob_serialize_extent(const struct spdk_blob *blob, 659 uint64_t start_cluster, uint64_t *next_cluster, 660 uint8_t *buf, size_t buf_sz) 661 { 662 struct spdk_blob_md_descriptor_extent *desc; 663 size_t cur_sz; 664 uint64_t i, extent_idx; 665 uint64_t lba, lba_per_cluster, lba_count; 666 667 /* The buffer must have room for at least one extent */ 668 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->extents[0]); 669 if (buf_sz < cur_sz) { 670 *next_cluster = start_cluster; 671 return; 672 } 673 674 desc = (struct spdk_blob_md_descriptor_extent *)buf; 675 desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT; 676 677 lba_per_cluster = _spdk_bs_cluster_to_lba(blob->bs, 1); 678 679 lba = blob->active.clusters[start_cluster]; 680 lba_count = lba_per_cluster; 681 extent_idx = 0; 682 for (i = start_cluster + 1; i < blob->active.num_clusters; i++) { 683 if ((lba + lba_count) == blob->active.clusters[i]) { 684 lba_count += lba_per_cluster; 685 continue; 686 } else if (lba == 0 && blob->active.clusters[i] == 0) { 687 lba_count += lba_per_cluster; 688 continue; 689 } 690 desc->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 691 desc->extents[extent_idx].length = lba_count / lba_per_cluster; 692 extent_idx++; 693 694 cur_sz += sizeof(desc->extents[extent_idx]); 695 696 if (buf_sz < cur_sz) { 697 /* If we ran out of buffer space, return */ 698 desc->length = sizeof(desc->extents[0]) * extent_idx; 699 *next_cluster = i; 700 return; 701 } 702 703 lba = blob->active.clusters[i]; 704 lba_count = lba_per_cluster; 705 } 706 707 desc->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 708 desc->extents[extent_idx].length = lba_count / lba_per_cluster; 709 extent_idx++; 710 711 desc->length = sizeof(desc->extents[0]) * extent_idx; 712 *next_cluster = blob->active.num_clusters; 713 714 return; 715 } 716 717 static void 718 _spdk_blob_serialize_flags(const struct spdk_blob *blob, 719 uint8_t *buf, size_t *buf_sz) 720 { 721 struct spdk_blob_md_descriptor_flags *desc; 722 723 /* 724 * Flags get serialized first, so we should always have room for the flags 725 * descriptor. 726 */ 727 assert(*buf_sz >= sizeof(*desc)); 728 729 desc = (struct spdk_blob_md_descriptor_flags *)buf; 730 desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS; 731 desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor); 732 desc->invalid_flags = blob->invalid_flags; 733 desc->data_ro_flags = blob->data_ro_flags; 734 desc->md_ro_flags = blob->md_ro_flags; 735 736 *buf_sz -= sizeof(*desc); 737 } 738 739 static int 740 _spdk_blob_serialize_xattrs(const struct spdk_blob *blob, 741 const struct spdk_xattr_tailq *xattrs, bool internal, 742 struct spdk_blob_md_page **pages, 743 struct spdk_blob_md_page *cur_page, 744 uint32_t *page_count, uint8_t **buf, 745 size_t *remaining_sz) 746 { 747 const struct spdk_xattr *xattr; 748 int rc; 749 750 TAILQ_FOREACH(xattr, xattrs, link) { 751 size_t required_sz = 0; 752 753 rc = _spdk_blob_serialize_xattr(xattr, 754 *buf, *remaining_sz, 755 &required_sz, internal); 756 if (rc < 0) { 757 /* Need to add a new page to the chain */ 758 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, 759 &cur_page); 760 if (rc < 0) { 761 spdk_dma_free(*pages); 762 *pages = NULL; 763 *page_count = 0; 764 return rc; 765 } 766 767 *buf = (uint8_t *)cur_page->descriptors; 768 *remaining_sz = sizeof(cur_page->descriptors); 769 770 /* Try again */ 771 required_sz = 0; 772 rc = _spdk_blob_serialize_xattr(xattr, 773 *buf, *remaining_sz, 774 &required_sz, internal); 775 776 if (rc < 0) { 777 spdk_dma_free(*pages); 778 *pages = NULL; 779 *page_count = 0; 780 return rc; 781 } 782 } 783 784 *remaining_sz -= required_sz; 785 *buf += required_sz; 786 } 787 788 return 0; 789 } 790 791 static int 792 _spdk_blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages, 793 uint32_t *page_count) 794 { 795 struct spdk_blob_md_page *cur_page; 796 int rc; 797 uint8_t *buf; 798 size_t remaining_sz; 799 uint64_t last_cluster; 800 801 assert(pages != NULL); 802 assert(page_count != NULL); 803 assert(blob != NULL); 804 assert(blob->state == SPDK_BLOB_STATE_DIRTY); 805 806 *pages = NULL; 807 *page_count = 0; 808 809 /* A blob always has at least 1 page, even if it has no descriptors */ 810 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page); 811 if (rc < 0) { 812 return rc; 813 } 814 815 buf = (uint8_t *)cur_page->descriptors; 816 remaining_sz = sizeof(cur_page->descriptors); 817 818 /* Serialize flags */ 819 _spdk_blob_serialize_flags(blob, buf, &remaining_sz); 820 buf += sizeof(struct spdk_blob_md_descriptor_flags); 821 822 /* Serialize xattrs */ 823 rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs, false, 824 pages, cur_page, page_count, &buf, &remaining_sz); 825 if (rc < 0) { 826 return rc; 827 } 828 829 /* Serialize internal xattrs */ 830 rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs_internal, true, 831 pages, cur_page, page_count, &buf, &remaining_sz); 832 if (rc < 0) { 833 return rc; 834 } 835 836 /* Serialize extents */ 837 last_cluster = 0; 838 while (last_cluster < blob->active.num_clusters) { 839 _spdk_blob_serialize_extent(blob, last_cluster, &last_cluster, 840 buf, remaining_sz); 841 842 if (last_cluster == blob->active.num_clusters) { 843 break; 844 } 845 846 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, 847 &cur_page); 848 if (rc < 0) { 849 return rc; 850 } 851 852 buf = (uint8_t *)cur_page->descriptors; 853 remaining_sz = sizeof(cur_page->descriptors); 854 } 855 856 return 0; 857 } 858 859 struct spdk_blob_load_ctx { 860 struct spdk_blob *blob; 861 862 struct spdk_blob_md_page *pages; 863 uint32_t num_pages; 864 spdk_bs_sequence_t *seq; 865 866 spdk_bs_sequence_cpl cb_fn; 867 void *cb_arg; 868 }; 869 870 static uint32_t 871 _spdk_blob_md_page_calc_crc(void *page) 872 { 873 uint32_t crc; 874 875 crc = BLOB_CRC32C_INITIAL; 876 crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc); 877 crc ^= BLOB_CRC32C_INITIAL; 878 879 return crc; 880 881 } 882 883 static void 884 _spdk_blob_load_final(void *cb_arg, int bserrno) 885 { 886 struct spdk_blob_load_ctx *ctx = cb_arg; 887 struct spdk_blob *blob = ctx->blob; 888 889 _spdk_blob_mark_clean(blob); 890 891 ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno); 892 893 /* Free the memory */ 894 spdk_dma_free(ctx->pages); 895 free(ctx); 896 } 897 898 static void 899 _spdk_blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 900 { 901 struct spdk_blob_load_ctx *ctx = cb_arg; 902 struct spdk_blob *blob = ctx->blob; 903 904 if (bserrno != 0) { 905 goto error; 906 } 907 908 blob->back_bs_dev = spdk_bs_create_blob_bs_dev(snapshot); 909 910 if (blob->back_bs_dev == NULL) { 911 bserrno = -ENOMEM; 912 goto error; 913 } 914 915 _spdk_blob_load_final(ctx, bserrno); 916 return; 917 918 error: 919 SPDK_ERRLOG("Snapshot fail\n"); 920 _spdk_blob_free(blob); 921 ctx->cb_fn(ctx->seq, NULL, bserrno); 922 spdk_dma_free(ctx->pages); 923 free(ctx); 924 } 925 926 static void 927 _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 928 { 929 struct spdk_blob_load_ctx *ctx = cb_arg; 930 struct spdk_blob *blob = ctx->blob; 931 struct spdk_blob_md_page *page; 932 const void *value; 933 size_t len; 934 int rc; 935 uint32_t crc; 936 937 if (bserrno) { 938 SPDK_ERRLOG("Metadata page read failed: %d\n", bserrno); 939 _spdk_blob_free(blob); 940 ctx->cb_fn(seq, NULL, bserrno); 941 spdk_dma_free(ctx->pages); 942 free(ctx); 943 return; 944 } 945 946 page = &ctx->pages[ctx->num_pages - 1]; 947 crc = _spdk_blob_md_page_calc_crc(page); 948 if (crc != page->crc) { 949 SPDK_ERRLOG("Metadata page %d crc mismatch\n", ctx->num_pages); 950 _spdk_blob_free(blob); 951 ctx->cb_fn(seq, NULL, -EINVAL); 952 spdk_dma_free(ctx->pages); 953 free(ctx); 954 return; 955 } 956 957 if (page->next != SPDK_INVALID_MD_PAGE) { 958 uint32_t next_page = page->next; 959 uint64_t next_lba = _spdk_bs_page_to_lba(blob->bs, blob->bs->md_start + next_page); 960 961 962 assert(next_lba < (blob->bs->md_start + blob->bs->md_len)); 963 964 /* Read the next page */ 965 ctx->num_pages++; 966 ctx->pages = spdk_dma_realloc(ctx->pages, (sizeof(*page) * ctx->num_pages), 967 sizeof(*page), NULL); 968 if (ctx->pages == NULL) { 969 ctx->cb_fn(seq, ctx->cb_arg, -ENOMEM); 970 free(ctx); 971 return; 972 } 973 974 spdk_bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1], 975 next_lba, 976 _spdk_bs_byte_to_lba(blob->bs, sizeof(*page)), 977 _spdk_blob_load_cpl, ctx); 978 return; 979 } 980 981 /* Parse the pages */ 982 rc = _spdk_blob_parse(ctx->pages, ctx->num_pages, blob); 983 if (rc) { 984 _spdk_blob_free(blob); 985 ctx->cb_fn(seq, NULL, rc); 986 spdk_dma_free(ctx->pages); 987 free(ctx); 988 return; 989 } 990 ctx->seq = seq; 991 992 993 if (spdk_blob_is_thin_provisioned(blob)) { 994 rc = _spdk_blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true); 995 if (rc == 0) { 996 if (len != sizeof(spdk_blob_id)) { 997 _spdk_blob_free(blob); 998 ctx->cb_fn(seq, NULL, -EINVAL); 999 spdk_dma_free(ctx->pages); 1000 free(ctx); 1001 return; 1002 } 1003 /* open snapshot blob and continue in the callback function */ 1004 blob->parent_id = *(spdk_blob_id *)value; 1005 spdk_bs_open_blob(blob->bs, blob->parent_id, 1006 _spdk_blob_load_snapshot_cpl, ctx); 1007 return; 1008 } else { 1009 /* add zeroes_dev for thin provisioned blob */ 1010 blob->back_bs_dev = spdk_bs_create_zeroes_dev(); 1011 } 1012 } else { 1013 /* standard blob */ 1014 blob->back_bs_dev = NULL; 1015 } 1016 _spdk_blob_load_final(ctx, bserrno); 1017 } 1018 1019 /* Load a blob from disk given a blobid */ 1020 static void 1021 _spdk_blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1022 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1023 { 1024 struct spdk_blob_load_ctx *ctx; 1025 struct spdk_blob_store *bs; 1026 uint32_t page_num; 1027 uint64_t lba; 1028 1029 _spdk_blob_verify_md_op(blob); 1030 1031 bs = blob->bs; 1032 1033 ctx = calloc(1, sizeof(*ctx)); 1034 if (!ctx) { 1035 cb_fn(seq, cb_arg, -ENOMEM); 1036 return; 1037 } 1038 1039 ctx->blob = blob; 1040 ctx->pages = spdk_dma_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, 1041 SPDK_BS_PAGE_SIZE, NULL); 1042 if (!ctx->pages) { 1043 free(ctx); 1044 cb_fn(seq, cb_arg, -ENOMEM); 1045 return; 1046 } 1047 ctx->num_pages = 1; 1048 ctx->cb_fn = cb_fn; 1049 ctx->cb_arg = cb_arg; 1050 1051 page_num = _spdk_bs_blobid_to_page(blob->id); 1052 lba = _spdk_bs_page_to_lba(blob->bs, bs->md_start + page_num); 1053 1054 blob->state = SPDK_BLOB_STATE_LOADING; 1055 1056 spdk_bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1057 _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE), 1058 _spdk_blob_load_cpl, ctx); 1059 } 1060 1061 struct spdk_blob_persist_ctx { 1062 struct spdk_blob *blob; 1063 1064 struct spdk_bs_super_block *super; 1065 1066 struct spdk_blob_md_page *pages; 1067 1068 uint64_t idx; 1069 1070 spdk_bs_sequence_t *seq; 1071 spdk_bs_sequence_cpl cb_fn; 1072 void *cb_arg; 1073 }; 1074 1075 static void 1076 _spdk_blob_persist_complete(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1077 { 1078 struct spdk_blob_persist_ctx *ctx = cb_arg; 1079 struct spdk_blob *blob = ctx->blob; 1080 1081 if (bserrno == 0) { 1082 _spdk_blob_mark_clean(blob); 1083 } 1084 1085 /* Call user callback */ 1086 ctx->cb_fn(seq, ctx->cb_arg, bserrno); 1087 1088 /* Free the memory */ 1089 spdk_dma_free(ctx->pages); 1090 free(ctx); 1091 } 1092 1093 static void 1094 _spdk_blob_persist_unmap_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1095 { 1096 struct spdk_blob_persist_ctx *ctx = cb_arg; 1097 struct spdk_blob *blob = ctx->blob; 1098 struct spdk_blob_store *bs = blob->bs; 1099 void *tmp; 1100 size_t i; 1101 1102 /* Release all clusters that were truncated */ 1103 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1104 uint32_t cluster_num = _spdk_bs_lba_to_cluster(bs, blob->active.clusters[i]); 1105 1106 /* Nothing to release if it was not allocated */ 1107 if (blob->active.clusters[i] != 0) { 1108 _spdk_bs_release_cluster(bs, cluster_num); 1109 } 1110 } 1111 1112 if (blob->active.num_clusters == 0) { 1113 free(blob->active.clusters); 1114 blob->active.clusters = NULL; 1115 blob->active.cluster_array_size = 0; 1116 } else { 1117 tmp = realloc(blob->active.clusters, sizeof(uint64_t) * blob->active.num_clusters); 1118 assert(tmp != NULL); 1119 blob->active.clusters = tmp; 1120 blob->active.cluster_array_size = blob->active.num_clusters; 1121 } 1122 1123 _spdk_blob_persist_complete(seq, ctx, bserrno); 1124 } 1125 1126 static void 1127 _spdk_blob_persist_unmap_clusters(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1128 { 1129 struct spdk_blob_persist_ctx *ctx = cb_arg; 1130 struct spdk_blob *blob = ctx->blob; 1131 struct spdk_blob_store *bs = blob->bs; 1132 spdk_bs_batch_t *batch; 1133 size_t i; 1134 uint64_t lba; 1135 uint32_t lba_count; 1136 1137 /* Clusters don't move around in blobs. The list shrinks or grows 1138 * at the end, but no changes ever occur in the middle of the list. 1139 */ 1140 1141 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_unmap_clusters_cpl, ctx); 1142 1143 /* Unmap all clusters that were truncated */ 1144 lba = 0; 1145 lba_count = 0; 1146 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1147 uint64_t next_lba = blob->active.clusters[i]; 1148 uint32_t next_lba_count = _spdk_bs_cluster_to_lba(bs, 1); 1149 1150 if (next_lba > 0 && (lba + lba_count) == next_lba) { 1151 /* This cluster is contiguous with the previous one. */ 1152 lba_count += next_lba_count; 1153 continue; 1154 } 1155 1156 /* This cluster is not contiguous with the previous one. */ 1157 1158 /* If a run of LBAs previously existing, send them 1159 * as an unmap. 1160 */ 1161 if (lba_count > 0) { 1162 spdk_bs_batch_unmap_dev(batch, lba, lba_count); 1163 } 1164 1165 /* Start building the next batch */ 1166 lba = next_lba; 1167 if (next_lba > 0) { 1168 lba_count = next_lba_count; 1169 } else { 1170 lba_count = 0; 1171 } 1172 } 1173 1174 /* If we ended with a contiguous set of LBAs, send the unmap now */ 1175 if (lba_count > 0) { 1176 spdk_bs_batch_unmap_dev(batch, lba, lba_count); 1177 } 1178 1179 spdk_bs_batch_close(batch); 1180 } 1181 1182 static void 1183 _spdk_blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1184 { 1185 struct spdk_blob_persist_ctx *ctx = cb_arg; 1186 struct spdk_blob *blob = ctx->blob; 1187 struct spdk_blob_store *bs = blob->bs; 1188 size_t i; 1189 1190 /* This loop starts at 1 because the first page is special and handled 1191 * below. The pages (except the first) are never written in place, 1192 * so any pages in the clean list must be zeroed. 1193 */ 1194 for (i = 1; i < blob->clean.num_pages; i++) { 1195 spdk_bit_array_clear(bs->used_md_pages, blob->clean.pages[i]); 1196 } 1197 1198 if (blob->active.num_pages == 0) { 1199 uint32_t page_num; 1200 1201 page_num = _spdk_bs_blobid_to_page(blob->id); 1202 spdk_bit_array_clear(bs->used_md_pages, page_num); 1203 } 1204 1205 /* Move on to unmapping clusters */ 1206 _spdk_blob_persist_unmap_clusters(seq, ctx, 0); 1207 } 1208 1209 static void 1210 _spdk_blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1211 { 1212 struct spdk_blob_persist_ctx *ctx = cb_arg; 1213 struct spdk_blob *blob = ctx->blob; 1214 struct spdk_blob_store *bs = blob->bs; 1215 uint64_t lba; 1216 uint32_t lba_count; 1217 spdk_bs_batch_t *batch; 1218 size_t i; 1219 1220 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_zero_pages_cpl, ctx); 1221 1222 lba_count = _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 1223 1224 /* This loop starts at 1 because the first page is special and handled 1225 * below. The pages (except the first) are never written in place, 1226 * so any pages in the clean list must be zeroed. 1227 */ 1228 for (i = 1; i < blob->clean.num_pages; i++) { 1229 lba = _spdk_bs_page_to_lba(bs, bs->md_start + blob->clean.pages[i]); 1230 1231 spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count); 1232 } 1233 1234 /* The first page will only be zeroed if this is a delete. */ 1235 if (blob->active.num_pages == 0) { 1236 uint32_t page_num; 1237 1238 /* The first page in the metadata goes where the blobid indicates */ 1239 page_num = _spdk_bs_blobid_to_page(blob->id); 1240 lba = _spdk_bs_page_to_lba(bs, bs->md_start + page_num); 1241 1242 spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count); 1243 } 1244 1245 spdk_bs_batch_close(batch); 1246 } 1247 1248 static void 1249 _spdk_blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1250 { 1251 struct spdk_blob_persist_ctx *ctx = cb_arg; 1252 struct spdk_blob *blob = ctx->blob; 1253 struct spdk_blob_store *bs = blob->bs; 1254 uint64_t lba; 1255 uint32_t lba_count; 1256 struct spdk_blob_md_page *page; 1257 1258 if (blob->active.num_pages == 0) { 1259 /* Move on to the next step */ 1260 _spdk_blob_persist_zero_pages(seq, ctx, 0); 1261 return; 1262 } 1263 1264 lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page)); 1265 1266 page = &ctx->pages[0]; 1267 /* The first page in the metadata goes where the blobid indicates */ 1268 lba = _spdk_bs_page_to_lba(bs, bs->md_start + _spdk_bs_blobid_to_page(blob->id)); 1269 1270 spdk_bs_sequence_write_dev(seq, page, lba, lba_count, 1271 _spdk_blob_persist_zero_pages, ctx); 1272 } 1273 1274 static void 1275 _spdk_blob_persist_write_page_chain(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1276 { 1277 struct spdk_blob_persist_ctx *ctx = cb_arg; 1278 struct spdk_blob *blob = ctx->blob; 1279 struct spdk_blob_store *bs = blob->bs; 1280 uint64_t lba; 1281 uint32_t lba_count; 1282 struct spdk_blob_md_page *page; 1283 spdk_bs_batch_t *batch; 1284 size_t i; 1285 1286 /* Clusters don't move around in blobs. The list shrinks or grows 1287 * at the end, but no changes ever occur in the middle of the list. 1288 */ 1289 1290 lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page)); 1291 1292 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_write_page_root, ctx); 1293 1294 /* This starts at 1. The root page is not written until 1295 * all of the others are finished 1296 */ 1297 for (i = 1; i < blob->active.num_pages; i++) { 1298 page = &ctx->pages[i]; 1299 assert(page->sequence_num == i); 1300 1301 lba = _spdk_bs_page_to_lba(bs, bs->md_start + blob->active.pages[i]); 1302 1303 spdk_bs_batch_write_dev(batch, page, lba, lba_count); 1304 } 1305 1306 spdk_bs_batch_close(batch); 1307 } 1308 1309 static int 1310 _spdk_blob_resize(struct spdk_blob *blob, uint64_t sz) 1311 { 1312 uint64_t i; 1313 uint64_t *tmp; 1314 uint64_t lfc; /* lowest free cluster */ 1315 uint64_t num_clusters; 1316 struct spdk_blob_store *bs; 1317 1318 bs = blob->bs; 1319 1320 _spdk_blob_verify_md_op(blob); 1321 1322 if (blob->active.num_clusters == sz) { 1323 return 0; 1324 } 1325 1326 if (blob->active.num_clusters < blob->active.cluster_array_size) { 1327 /* If this blob was resized to be larger, then smaller, then 1328 * larger without syncing, then the cluster array already 1329 * contains spare assigned clusters we can use. 1330 */ 1331 num_clusters = spdk_min(blob->active.cluster_array_size, 1332 sz); 1333 } else { 1334 num_clusters = blob->active.num_clusters; 1335 } 1336 1337 /* Do two passes - one to verify that we can obtain enough clusters 1338 * and another to actually claim them. 1339 */ 1340 1341 if (spdk_blob_is_thin_provisioned(blob) == false) { 1342 lfc = 0; 1343 for (i = num_clusters; i < sz; i++) { 1344 lfc = spdk_bit_array_find_first_clear(bs->used_clusters, lfc); 1345 if (lfc == UINT32_MAX) { 1346 /* No more free clusters. Cannot satisfy the request */ 1347 return -ENOSPC; 1348 } 1349 lfc++; 1350 } 1351 } 1352 1353 if (sz > num_clusters) { 1354 /* Expand the cluster array if necessary. 1355 * We only shrink the array when persisting. 1356 */ 1357 tmp = realloc(blob->active.clusters, sizeof(uint64_t) * sz); 1358 if (sz > 0 && tmp == NULL) { 1359 return -ENOMEM; 1360 } 1361 memset(tmp + blob->active.cluster_array_size, 0, 1362 sizeof(uint64_t) * (sz - blob->active.cluster_array_size)); 1363 blob->active.clusters = tmp; 1364 blob->active.cluster_array_size = sz; 1365 } 1366 1367 blob->state = SPDK_BLOB_STATE_DIRTY; 1368 1369 if (spdk_blob_is_thin_provisioned(blob) == false) { 1370 lfc = 0; 1371 for (i = num_clusters; i < sz; i++) { 1372 _spdk_bs_allocate_cluster(blob, i, &lfc, true); 1373 lfc++; 1374 } 1375 } 1376 1377 blob->active.num_clusters = sz; 1378 1379 return 0; 1380 } 1381 1382 static void 1383 _spdk_blob_persist_start(struct spdk_blob_persist_ctx *ctx) 1384 { 1385 spdk_bs_sequence_t *seq = ctx->seq; 1386 struct spdk_blob *blob = ctx->blob; 1387 struct spdk_blob_store *bs = blob->bs; 1388 uint64_t i; 1389 uint32_t page_num; 1390 void *tmp; 1391 int rc; 1392 1393 if (blob->active.num_pages == 0) { 1394 /* This is the signal that the blob should be deleted. 1395 * Immediately jump to the clean up routine. */ 1396 assert(blob->clean.num_pages > 0); 1397 ctx->idx = blob->clean.num_pages - 1; 1398 blob->state = SPDK_BLOB_STATE_CLEAN; 1399 _spdk_blob_persist_zero_pages(seq, ctx, 0); 1400 return; 1401 1402 } 1403 1404 /* Generate the new metadata */ 1405 rc = _spdk_blob_serialize(blob, &ctx->pages, &blob->active.num_pages); 1406 if (rc < 0) { 1407 _spdk_blob_persist_complete(seq, ctx, rc); 1408 return; 1409 } 1410 1411 assert(blob->active.num_pages >= 1); 1412 1413 /* Resize the cache of page indices */ 1414 tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 1415 if (!tmp) { 1416 _spdk_blob_persist_complete(seq, ctx, -ENOMEM); 1417 return; 1418 } 1419 blob->active.pages = tmp; 1420 1421 /* Assign this metadata to pages. This requires two passes - 1422 * one to verify that there are enough pages and a second 1423 * to actually claim them. */ 1424 page_num = 0; 1425 /* Note that this loop starts at one. The first page location is fixed by the blobid. */ 1426 for (i = 1; i < blob->active.num_pages; i++) { 1427 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 1428 if (page_num == UINT32_MAX) { 1429 _spdk_blob_persist_complete(seq, ctx, -ENOMEM); 1430 return; 1431 } 1432 page_num++; 1433 } 1434 1435 page_num = 0; 1436 blob->active.pages[0] = _spdk_bs_blobid_to_page(blob->id); 1437 for (i = 1; i < blob->active.num_pages; i++) { 1438 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 1439 ctx->pages[i - 1].next = page_num; 1440 /* Now that previous metadata page is complete, calculate the crc for it. */ 1441 ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]); 1442 blob->active.pages[i] = page_num; 1443 spdk_bit_array_set(bs->used_md_pages, page_num); 1444 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming page %u for blob %lu\n", page_num, blob->id); 1445 page_num++; 1446 } 1447 ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]); 1448 /* Start writing the metadata from last page to first */ 1449 ctx->idx = blob->active.num_pages - 1; 1450 blob->state = SPDK_BLOB_STATE_CLEAN; 1451 _spdk_blob_persist_write_page_chain(seq, ctx, 0); 1452 } 1453 1454 static void 1455 _spdk_blob_persist_dirty_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1456 { 1457 struct spdk_blob_persist_ctx *ctx = cb_arg; 1458 1459 ctx->blob->bs->clean = 0; 1460 1461 spdk_dma_free(ctx->super); 1462 1463 _spdk_blob_persist_start(ctx); 1464 } 1465 1466 static void 1467 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 1468 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg); 1469 1470 1471 static void 1472 _spdk_blob_persist_dirty(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1473 { 1474 struct spdk_blob_persist_ctx *ctx = cb_arg; 1475 1476 ctx->super->clean = 0; 1477 if (ctx->super->size == 0) { 1478 ctx->super->size = ctx->blob->bs->dev->blockcnt * ctx->blob->bs->dev->blocklen; 1479 } 1480 1481 _spdk_bs_write_super(seq, ctx->blob->bs, ctx->super, _spdk_blob_persist_dirty_cpl, ctx); 1482 } 1483 1484 1485 /* Write a blob to disk */ 1486 static void 1487 _spdk_blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1488 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1489 { 1490 struct spdk_blob_persist_ctx *ctx; 1491 1492 _spdk_blob_verify_md_op(blob); 1493 1494 if (blob->state == SPDK_BLOB_STATE_CLEAN) { 1495 cb_fn(seq, cb_arg, 0); 1496 return; 1497 } 1498 1499 ctx = calloc(1, sizeof(*ctx)); 1500 if (!ctx) { 1501 cb_fn(seq, cb_arg, -ENOMEM); 1502 return; 1503 } 1504 ctx->blob = blob; 1505 ctx->seq = seq; 1506 ctx->cb_fn = cb_fn; 1507 ctx->cb_arg = cb_arg; 1508 1509 if (blob->bs->clean) { 1510 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 1511 if (!ctx->super) { 1512 cb_fn(seq, cb_arg, -ENOMEM); 1513 free(ctx); 1514 return; 1515 } 1516 1517 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(blob->bs, 0), 1518 _spdk_bs_byte_to_lba(blob->bs, sizeof(*ctx->super)), 1519 _spdk_blob_persist_dirty, ctx); 1520 } else { 1521 _spdk_blob_persist_start(ctx); 1522 } 1523 } 1524 1525 struct spdk_blob_copy_cluster_ctx { 1526 struct spdk_blob *blob; 1527 uint8_t *buf; 1528 uint64_t page; 1529 uint64_t new_cluster; 1530 spdk_bs_sequence_t *seq; 1531 }; 1532 1533 static void 1534 _spdk_blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno) 1535 { 1536 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 1537 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq; 1538 TAILQ_HEAD(, spdk_bs_request_set) requests; 1539 spdk_bs_user_op_t *op; 1540 1541 TAILQ_INIT(&requests); 1542 TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link); 1543 1544 while (!TAILQ_EMPTY(&requests)) { 1545 op = TAILQ_FIRST(&requests); 1546 TAILQ_REMOVE(&requests, op, link); 1547 if (bserrno == 0) { 1548 spdk_bs_user_op_execute(op); 1549 } else { 1550 spdk_bs_user_op_abort(op); 1551 } 1552 } 1553 1554 spdk_dma_free(ctx->buf); 1555 free(ctx); 1556 } 1557 1558 static void 1559 _spdk_blob_insert_cluster_cpl(void *cb_arg, int bserrno) 1560 { 1561 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 1562 1563 if (bserrno) { 1564 uint32_t cluster_number; 1565 1566 if (bserrno == -EEXIST) { 1567 /* The metadata insert failed because another thread 1568 * allocated the cluster first. Free our cluster 1569 * but continue without error. */ 1570 bserrno = 0; 1571 } 1572 1573 cluster_number = _spdk_bs_page_to_cluster(ctx->blob->bs, ctx->page); 1574 _spdk_bs_release_cluster(ctx->blob->bs, cluster_number); 1575 } 1576 1577 spdk_bs_sequence_finish(ctx->seq, bserrno); 1578 } 1579 1580 static void 1581 _spdk_blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1582 { 1583 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 1584 uint32_t cluster_number; 1585 1586 if (bserrno) { 1587 /* The write failed, so jump to the final completion handler */ 1588 spdk_bs_sequence_finish(seq, bserrno); 1589 return; 1590 } 1591 1592 cluster_number = _spdk_bs_page_to_cluster(ctx->blob->bs, ctx->page); 1593 1594 _spdk_blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 1595 _spdk_blob_insert_cluster_cpl, ctx); 1596 } 1597 1598 static void 1599 _spdk_blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1600 { 1601 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 1602 1603 if (bserrno != 0) { 1604 /* The read failed, so jump to the final completion handler */ 1605 spdk_bs_sequence_finish(seq, bserrno); 1606 return; 1607 } 1608 1609 /* Write whole cluster */ 1610 spdk_bs_sequence_write_dev(seq, ctx->buf, 1611 _spdk_bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 1612 _spdk_bs_cluster_to_lba(ctx->blob->bs, 1), 1613 _spdk_blob_write_copy_cpl, ctx); 1614 } 1615 1616 static void 1617 _spdk_bs_allocate_and_copy_cluster(struct spdk_blob *blob, 1618 struct spdk_io_channel *_ch, 1619 uint64_t io_unit, spdk_bs_user_op_t *op) 1620 { 1621 struct spdk_bs_cpl cpl; 1622 struct spdk_bs_channel *ch; 1623 struct spdk_blob_copy_cluster_ctx *ctx; 1624 uint32_t cluster_start_page; 1625 uint32_t cluster_number; 1626 int rc; 1627 1628 ch = spdk_io_channel_get_ctx(_ch); 1629 1630 if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) { 1631 /* There are already operations pending. Queue this user op 1632 * and return because it will be re-executed when the outstanding 1633 * cluster allocation completes. */ 1634 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 1635 return; 1636 } 1637 1638 /* Round the io_unit offset down to the first page in the cluster */ 1639 cluster_start_page = _spdk_bs_io_unit_to_cluster_start(blob, io_unit); 1640 1641 /* Calculate which index in the metadata cluster array the corresponding 1642 * cluster is supposed to be at. */ 1643 cluster_number = _spdk_bs_io_unit_to_cluster_number(blob, io_unit); 1644 1645 ctx = calloc(1, sizeof(*ctx)); 1646 if (!ctx) { 1647 spdk_bs_user_op_abort(op); 1648 return; 1649 } 1650 1651 assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0); 1652 1653 ctx->blob = blob; 1654 ctx->page = cluster_start_page; 1655 1656 if (blob->parent_id != SPDK_BLOBID_INVALID) { 1657 ctx->buf = spdk_dma_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, NULL); 1658 if (!ctx->buf) { 1659 SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n", 1660 blob->bs->cluster_sz); 1661 free(ctx); 1662 spdk_bs_user_op_abort(op); 1663 return; 1664 } 1665 } 1666 1667 rc = _spdk_bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, false); 1668 if (rc != 0) { 1669 spdk_dma_free(ctx->buf); 1670 free(ctx); 1671 spdk_bs_user_op_abort(op); 1672 return; 1673 } 1674 1675 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 1676 cpl.u.blob_basic.cb_fn = _spdk_blob_allocate_and_copy_cluster_cpl; 1677 cpl.u.blob_basic.cb_arg = ctx; 1678 1679 ctx->seq = spdk_bs_sequence_start(_ch, &cpl); 1680 if (!ctx->seq) { 1681 _spdk_bs_release_cluster(blob->bs, ctx->new_cluster); 1682 spdk_dma_free(ctx->buf); 1683 free(ctx); 1684 spdk_bs_user_op_abort(op); 1685 return; 1686 } 1687 1688 /* Queue the user op to block other incoming operations */ 1689 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 1690 1691 if (blob->parent_id != SPDK_BLOBID_INVALID) { 1692 /* Read cluster from backing device */ 1693 spdk_bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf, 1694 _spdk_bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 1695 _spdk_bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz), 1696 _spdk_blob_write_copy, ctx); 1697 } else { 1698 _spdk_blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 1699 _spdk_blob_insert_cluster_cpl, ctx); 1700 } 1701 } 1702 1703 static void 1704 _spdk_blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length, 1705 uint64_t *lba, uint32_t *lba_count) 1706 { 1707 *lba_count = length; 1708 1709 if (!_spdk_bs_io_unit_is_allocated(blob, io_unit)) { 1710 assert(blob->back_bs_dev != NULL); 1711 *lba = _spdk_bs_io_unit_to_back_dev_lba(blob, io_unit); 1712 *lba_count = _spdk_bs_io_unit_to_back_dev_lba(blob, *lba_count); 1713 } else { 1714 *lba = _spdk_bs_blob_io_unit_to_lba(blob, io_unit); 1715 } 1716 } 1717 1718 struct op_split_ctx { 1719 struct spdk_blob *blob; 1720 struct spdk_io_channel *channel; 1721 uint64_t io_unit_offset; 1722 uint64_t io_units_remaining; 1723 void *curr_payload; 1724 enum spdk_blob_op_type op_type; 1725 spdk_bs_sequence_t *seq; 1726 }; 1727 1728 static void 1729 _spdk_blob_request_submit_op_split_next(void *cb_arg, int bserrno) 1730 { 1731 struct op_split_ctx *ctx = cb_arg; 1732 struct spdk_blob *blob = ctx->blob; 1733 struct spdk_io_channel *ch = ctx->channel; 1734 enum spdk_blob_op_type op_type = ctx->op_type; 1735 uint8_t *buf = ctx->curr_payload; 1736 uint64_t offset = ctx->io_unit_offset; 1737 uint64_t length = ctx->io_units_remaining; 1738 uint64_t op_length; 1739 1740 if (bserrno != 0 || ctx->io_units_remaining == 0) { 1741 spdk_bs_sequence_finish(ctx->seq, bserrno); 1742 free(ctx); 1743 return; 1744 } 1745 1746 op_length = spdk_min(length, _spdk_bs_num_io_units_to_cluster_boundary(blob, 1747 offset)); 1748 1749 /* Update length and payload for next operation */ 1750 ctx->io_units_remaining -= op_length; 1751 ctx->io_unit_offset += op_length; 1752 if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) { 1753 ctx->curr_payload += op_length * blob->bs->io_unit_size; 1754 } 1755 1756 switch (op_type) { 1757 case SPDK_BLOB_READ: 1758 spdk_blob_io_read(blob, ch, buf, offset, op_length, 1759 _spdk_blob_request_submit_op_split_next, ctx); 1760 break; 1761 case SPDK_BLOB_WRITE: 1762 spdk_blob_io_write(blob, ch, buf, offset, op_length, 1763 _spdk_blob_request_submit_op_split_next, ctx); 1764 break; 1765 case SPDK_BLOB_UNMAP: 1766 spdk_blob_io_unmap(blob, ch, offset, op_length, 1767 _spdk_blob_request_submit_op_split_next, ctx); 1768 break; 1769 case SPDK_BLOB_WRITE_ZEROES: 1770 spdk_blob_io_write_zeroes(blob, ch, offset, op_length, 1771 _spdk_blob_request_submit_op_split_next, ctx); 1772 break; 1773 case SPDK_BLOB_READV: 1774 case SPDK_BLOB_WRITEV: 1775 SPDK_ERRLOG("readv/write not valid for %s\n", __func__); 1776 spdk_bs_sequence_finish(ctx->seq, -EINVAL); 1777 free(ctx); 1778 break; 1779 } 1780 } 1781 1782 static void 1783 _spdk_blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob, 1784 void *payload, uint64_t offset, uint64_t length, 1785 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 1786 { 1787 struct op_split_ctx *ctx; 1788 spdk_bs_sequence_t *seq; 1789 struct spdk_bs_cpl cpl; 1790 1791 assert(blob != NULL); 1792 1793 ctx = calloc(1, sizeof(struct op_split_ctx)); 1794 if (ctx == NULL) { 1795 cb_fn(cb_arg, -ENOMEM); 1796 return; 1797 } 1798 1799 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 1800 cpl.u.blob_basic.cb_fn = cb_fn; 1801 cpl.u.blob_basic.cb_arg = cb_arg; 1802 1803 seq = spdk_bs_sequence_start(ch, &cpl); 1804 if (!seq) { 1805 free(ctx); 1806 cb_fn(cb_arg, -ENOMEM); 1807 return; 1808 } 1809 1810 ctx->blob = blob; 1811 ctx->channel = ch; 1812 ctx->curr_payload = payload; 1813 ctx->io_unit_offset = offset; 1814 ctx->io_units_remaining = length; 1815 ctx->op_type = op_type; 1816 ctx->seq = seq; 1817 1818 _spdk_blob_request_submit_op_split_next(ctx, 0); 1819 } 1820 1821 static void 1822 _spdk_blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob, 1823 void *payload, uint64_t offset, uint64_t length, 1824 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 1825 { 1826 struct spdk_bs_cpl cpl; 1827 uint64_t lba; 1828 uint32_t lba_count; 1829 1830 assert(blob != NULL); 1831 1832 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 1833 cpl.u.blob_basic.cb_fn = cb_fn; 1834 cpl.u.blob_basic.cb_arg = cb_arg; 1835 1836 _spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 1837 1838 if (blob->frozen_refcnt) { 1839 /* This blob I/O is frozen */ 1840 spdk_bs_user_op_t *op; 1841 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 1842 1843 op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 1844 if (!op) { 1845 cb_fn(cb_arg, -ENOMEM); 1846 return; 1847 } 1848 1849 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 1850 1851 return; 1852 } 1853 1854 switch (op_type) { 1855 case SPDK_BLOB_READ: { 1856 spdk_bs_batch_t *batch; 1857 1858 batch = spdk_bs_batch_open(_ch, &cpl); 1859 if (!batch) { 1860 cb_fn(cb_arg, -ENOMEM); 1861 return; 1862 } 1863 1864 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 1865 /* Read from the blob */ 1866 spdk_bs_batch_read_dev(batch, payload, lba, lba_count); 1867 } else { 1868 /* Read from the backing block device */ 1869 spdk_bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count); 1870 } 1871 1872 spdk_bs_batch_close(batch); 1873 break; 1874 } 1875 case SPDK_BLOB_WRITE: 1876 case SPDK_BLOB_WRITE_ZEROES: { 1877 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 1878 /* Write to the blob */ 1879 spdk_bs_batch_t *batch; 1880 1881 if (lba_count == 0) { 1882 cb_fn(cb_arg, 0); 1883 return; 1884 } 1885 1886 batch = spdk_bs_batch_open(_ch, &cpl); 1887 if (!batch) { 1888 cb_fn(cb_arg, -ENOMEM); 1889 return; 1890 } 1891 1892 if (op_type == SPDK_BLOB_WRITE) { 1893 spdk_bs_batch_write_dev(batch, payload, lba, lba_count); 1894 } else { 1895 spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count); 1896 } 1897 1898 spdk_bs_batch_close(batch); 1899 } else { 1900 /* Queue this operation and allocate the cluster */ 1901 spdk_bs_user_op_t *op; 1902 1903 op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 1904 if (!op) { 1905 cb_fn(cb_arg, -ENOMEM); 1906 return; 1907 } 1908 1909 _spdk_bs_allocate_and_copy_cluster(blob, _ch, offset, op); 1910 } 1911 break; 1912 } 1913 case SPDK_BLOB_UNMAP: { 1914 spdk_bs_batch_t *batch; 1915 1916 batch = spdk_bs_batch_open(_ch, &cpl); 1917 if (!batch) { 1918 cb_fn(cb_arg, -ENOMEM); 1919 return; 1920 } 1921 1922 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 1923 spdk_bs_batch_unmap_dev(batch, lba, lba_count); 1924 } 1925 1926 spdk_bs_batch_close(batch); 1927 break; 1928 } 1929 case SPDK_BLOB_READV: 1930 case SPDK_BLOB_WRITEV: 1931 SPDK_ERRLOG("readv/write not valid\n"); 1932 cb_fn(cb_arg, -EINVAL); 1933 break; 1934 } 1935 } 1936 1937 static void 1938 _spdk_blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel, 1939 void *payload, uint64_t offset, uint64_t length, 1940 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 1941 { 1942 assert(blob != NULL); 1943 1944 if (blob->data_ro && op_type != SPDK_BLOB_READ) { 1945 cb_fn(cb_arg, -EPERM); 1946 return; 1947 } 1948 1949 if (offset + length > _spdk_bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 1950 cb_fn(cb_arg, -EINVAL); 1951 return; 1952 } 1953 if (length <= _spdk_bs_num_io_units_to_cluster_boundary(blob, offset)) { 1954 _spdk_blob_request_submit_op_single(_channel, blob, payload, offset, length, 1955 cb_fn, cb_arg, op_type); 1956 } else { 1957 _spdk_blob_request_submit_op_split(_channel, blob, payload, offset, length, 1958 cb_fn, cb_arg, op_type); 1959 } 1960 } 1961 1962 struct rw_iov_ctx { 1963 struct spdk_blob *blob; 1964 struct spdk_io_channel *channel; 1965 spdk_blob_op_complete cb_fn; 1966 void *cb_arg; 1967 bool read; 1968 int iovcnt; 1969 struct iovec *orig_iov; 1970 uint64_t io_unit_offset; 1971 uint64_t io_units_remaining; 1972 uint64_t io_units_done; 1973 struct iovec iov[0]; 1974 }; 1975 1976 static void 1977 _spdk_rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1978 { 1979 assert(cb_arg == NULL); 1980 spdk_bs_sequence_finish(seq, bserrno); 1981 } 1982 1983 static void 1984 _spdk_rw_iov_split_next(void *cb_arg, int bserrno) 1985 { 1986 struct rw_iov_ctx *ctx = cb_arg; 1987 struct spdk_blob *blob = ctx->blob; 1988 struct iovec *iov, *orig_iov; 1989 int iovcnt; 1990 size_t orig_iovoff; 1991 uint64_t io_units_count, io_units_to_boundary, io_unit_offset; 1992 uint64_t byte_count; 1993 1994 if (bserrno != 0 || ctx->io_units_remaining == 0) { 1995 ctx->cb_fn(ctx->cb_arg, bserrno); 1996 free(ctx); 1997 return; 1998 } 1999 2000 io_unit_offset = ctx->io_unit_offset; 2001 io_units_to_boundary = _spdk_bs_num_io_units_to_cluster_boundary(blob, io_unit_offset); 2002 io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary); 2003 /* 2004 * Get index and offset into the original iov array for our current position in the I/O sequence. 2005 * byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will 2006 * point to the current position in the I/O sequence. 2007 */ 2008 byte_count = ctx->io_units_done * blob->bs->io_unit_size; 2009 orig_iov = &ctx->orig_iov[0]; 2010 orig_iovoff = 0; 2011 while (byte_count > 0) { 2012 if (byte_count >= orig_iov->iov_len) { 2013 byte_count -= orig_iov->iov_len; 2014 orig_iov++; 2015 } else { 2016 orig_iovoff = byte_count; 2017 byte_count = 0; 2018 } 2019 } 2020 2021 /* 2022 * Build an iov array for the next I/O in the sequence. byte_count will keep track of how many 2023 * bytes of this next I/O remain to be accounted for in the new iov array. 2024 */ 2025 byte_count = io_units_count * blob->bs->io_unit_size; 2026 iov = &ctx->iov[0]; 2027 iovcnt = 0; 2028 while (byte_count > 0) { 2029 assert(iovcnt < ctx->iovcnt); 2030 iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff); 2031 iov->iov_base = orig_iov->iov_base + orig_iovoff; 2032 byte_count -= iov->iov_len; 2033 orig_iovoff = 0; 2034 orig_iov++; 2035 iov++; 2036 iovcnt++; 2037 } 2038 2039 ctx->io_unit_offset += io_units_count; 2040 ctx->io_units_remaining -= io_units_count; 2041 ctx->io_units_done += io_units_count; 2042 iov = &ctx->iov[0]; 2043 2044 if (ctx->read) { 2045 spdk_blob_io_readv(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 2046 io_units_count, _spdk_rw_iov_split_next, ctx); 2047 } else { 2048 spdk_blob_io_writev(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 2049 io_units_count, _spdk_rw_iov_split_next, ctx); 2050 } 2051 } 2052 2053 static void 2054 _spdk_blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel, 2055 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 2056 spdk_blob_op_complete cb_fn, void *cb_arg, bool read) 2057 { 2058 struct spdk_bs_cpl cpl; 2059 2060 assert(blob != NULL); 2061 2062 if (!read && blob->data_ro) { 2063 cb_fn(cb_arg, -EPERM); 2064 return; 2065 } 2066 2067 if (length == 0) { 2068 cb_fn(cb_arg, 0); 2069 return; 2070 } 2071 2072 if (offset + length > _spdk_bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 2073 cb_fn(cb_arg, -EINVAL); 2074 return; 2075 } 2076 2077 /* 2078 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having 2079 * to split a request that spans a cluster boundary. For I/O that do not span a cluster boundary, 2080 * there will be no noticeable difference compared to using a batch. For I/O that do span a cluster 2081 * boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need 2082 * to allocate a separate iov array and split the I/O such that none of the resulting 2083 * smaller I/O cross a cluster boundary. These smaller I/O will be issued in sequence (not in parallel) 2084 * but since this case happens very infrequently, any performance impact will be negligible. 2085 * 2086 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs 2087 * for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them 2088 * in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called 2089 * when the batch was completed, to allow for freeing the memory for the iov arrays. 2090 */ 2091 if (spdk_likely(length <= _spdk_bs_num_io_units_to_cluster_boundary(blob, offset))) { 2092 uint32_t lba_count; 2093 uint64_t lba; 2094 2095 _spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 2096 2097 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2098 cpl.u.blob_basic.cb_fn = cb_fn; 2099 cpl.u.blob_basic.cb_arg = cb_arg; 2100 if (blob->frozen_refcnt) { 2101 /* This blob I/O is frozen */ 2102 spdk_bs_user_op_t *op; 2103 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel); 2104 2105 op = spdk_bs_user_op_alloc(_channel, &cpl, read, blob, iov, iovcnt, offset, length); 2106 if (!op) { 2107 cb_fn(cb_arg, -ENOMEM); 2108 return; 2109 } 2110 2111 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 2112 2113 return; 2114 } 2115 2116 if (read) { 2117 spdk_bs_sequence_t *seq; 2118 2119 seq = spdk_bs_sequence_start(_channel, &cpl); 2120 if (!seq) { 2121 cb_fn(cb_arg, -ENOMEM); 2122 return; 2123 } 2124 2125 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 2126 spdk_bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL); 2127 } else { 2128 spdk_bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count, 2129 _spdk_rw_iov_done, NULL); 2130 } 2131 } else { 2132 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 2133 spdk_bs_sequence_t *seq; 2134 2135 seq = spdk_bs_sequence_start(_channel, &cpl); 2136 if (!seq) { 2137 cb_fn(cb_arg, -ENOMEM); 2138 return; 2139 } 2140 2141 spdk_bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL); 2142 } else { 2143 /* Queue this operation and allocate the cluster */ 2144 spdk_bs_user_op_t *op; 2145 2146 op = spdk_bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, 2147 length); 2148 if (!op) { 2149 cb_fn(cb_arg, -ENOMEM); 2150 return; 2151 } 2152 2153 _spdk_bs_allocate_and_copy_cluster(blob, _channel, offset, op); 2154 } 2155 } 2156 } else { 2157 struct rw_iov_ctx *ctx; 2158 2159 ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec)); 2160 if (ctx == NULL) { 2161 cb_fn(cb_arg, -ENOMEM); 2162 return; 2163 } 2164 2165 ctx->blob = blob; 2166 ctx->channel = _channel; 2167 ctx->cb_fn = cb_fn; 2168 ctx->cb_arg = cb_arg; 2169 ctx->read = read; 2170 ctx->orig_iov = iov; 2171 ctx->iovcnt = iovcnt; 2172 ctx->io_unit_offset = offset; 2173 ctx->io_units_remaining = length; 2174 ctx->io_units_done = 0; 2175 2176 _spdk_rw_iov_split_next(ctx, 0); 2177 } 2178 } 2179 2180 static struct spdk_blob * 2181 _spdk_blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid) 2182 { 2183 struct spdk_blob *blob; 2184 2185 TAILQ_FOREACH(blob, &bs->blobs, link) { 2186 if (blob->id == blobid) { 2187 return blob; 2188 } 2189 } 2190 2191 return NULL; 2192 } 2193 2194 static int 2195 _spdk_bs_channel_create(void *io_device, void *ctx_buf) 2196 { 2197 struct spdk_blob_store *bs = io_device; 2198 struct spdk_bs_channel *channel = ctx_buf; 2199 struct spdk_bs_dev *dev; 2200 uint32_t max_ops = bs->max_channel_ops; 2201 uint32_t i; 2202 2203 dev = bs->dev; 2204 2205 channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set)); 2206 if (!channel->req_mem) { 2207 return -1; 2208 } 2209 2210 TAILQ_INIT(&channel->reqs); 2211 2212 for (i = 0; i < max_ops; i++) { 2213 TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link); 2214 } 2215 2216 channel->bs = bs; 2217 channel->dev = dev; 2218 channel->dev_channel = dev->create_channel(dev); 2219 2220 if (!channel->dev_channel) { 2221 SPDK_ERRLOG("Failed to create device channel.\n"); 2222 free(channel->req_mem); 2223 return -1; 2224 } 2225 2226 TAILQ_INIT(&channel->need_cluster_alloc); 2227 TAILQ_INIT(&channel->queued_io); 2228 2229 return 0; 2230 } 2231 2232 static void 2233 _spdk_bs_channel_destroy(void *io_device, void *ctx_buf) 2234 { 2235 struct spdk_bs_channel *channel = ctx_buf; 2236 spdk_bs_user_op_t *op; 2237 2238 while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) { 2239 op = TAILQ_FIRST(&channel->need_cluster_alloc); 2240 TAILQ_REMOVE(&channel->need_cluster_alloc, op, link); 2241 spdk_bs_user_op_abort(op); 2242 } 2243 2244 while (!TAILQ_EMPTY(&channel->queued_io)) { 2245 op = TAILQ_FIRST(&channel->queued_io); 2246 TAILQ_REMOVE(&channel->queued_io, op, link); 2247 spdk_bs_user_op_abort(op); 2248 } 2249 2250 free(channel->req_mem); 2251 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 2252 } 2253 2254 static void 2255 _spdk_bs_dev_destroy(void *io_device) 2256 { 2257 struct spdk_blob_store *bs = io_device; 2258 struct spdk_blob *blob, *blob_tmp; 2259 2260 bs->dev->destroy(bs->dev); 2261 2262 TAILQ_FOREACH_SAFE(blob, &bs->blobs, link, blob_tmp) { 2263 TAILQ_REMOVE(&bs->blobs, blob, link); 2264 _spdk_blob_free(blob); 2265 } 2266 2267 pthread_mutex_destroy(&bs->used_clusters_mutex); 2268 2269 spdk_bit_array_free(&bs->used_blobids); 2270 spdk_bit_array_free(&bs->used_md_pages); 2271 spdk_bit_array_free(&bs->used_clusters); 2272 /* 2273 * If this function is called for any reason except a successful unload, 2274 * the unload_cpl type will be NONE and this will be a nop. 2275 */ 2276 spdk_bs_call_cpl(&bs->unload_cpl, bs->unload_err); 2277 2278 free(bs); 2279 } 2280 2281 static int 2282 _spdk_bs_blob_list_add(struct spdk_blob *blob) 2283 { 2284 spdk_blob_id snapshot_id; 2285 struct spdk_blob_list *snapshot_entry = NULL; 2286 struct spdk_blob_list *clone_entry = NULL; 2287 2288 assert(blob != NULL); 2289 2290 snapshot_id = blob->parent_id; 2291 if (snapshot_id == SPDK_BLOBID_INVALID) { 2292 return 0; 2293 } 2294 2295 TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) { 2296 if (snapshot_entry->id == snapshot_id) { 2297 break; 2298 } 2299 } 2300 2301 if (snapshot_entry == NULL) { 2302 /* Snapshot not found */ 2303 snapshot_entry = calloc(1, sizeof(struct spdk_blob_list)); 2304 if (snapshot_entry == NULL) { 2305 return -ENOMEM; 2306 } 2307 snapshot_entry->id = snapshot_id; 2308 TAILQ_INIT(&snapshot_entry->clones); 2309 TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link); 2310 } else { 2311 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 2312 if (clone_entry->id == blob->id) { 2313 break; 2314 } 2315 } 2316 } 2317 2318 if (clone_entry == NULL) { 2319 /* Clone not found */ 2320 clone_entry = calloc(1, sizeof(struct spdk_blob_list)); 2321 if (clone_entry == NULL) { 2322 return -ENOMEM; 2323 } 2324 clone_entry->id = blob->id; 2325 TAILQ_INIT(&clone_entry->clones); 2326 TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link); 2327 snapshot_entry->clone_count++; 2328 } 2329 2330 return 0; 2331 } 2332 2333 static int 2334 _spdk_bs_blob_list_remove(struct spdk_blob *blob) 2335 { 2336 struct spdk_blob_list *snapshot_entry = NULL; 2337 struct spdk_blob_list *clone_entry = NULL; 2338 spdk_blob_id snapshot_id; 2339 2340 assert(blob != NULL); 2341 2342 snapshot_id = blob->parent_id; 2343 if (snapshot_id == SPDK_BLOBID_INVALID) { 2344 return 0; 2345 } 2346 2347 TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) { 2348 if (snapshot_entry->id == snapshot_id) { 2349 break; 2350 } 2351 } 2352 2353 assert(snapshot_entry != NULL); 2354 2355 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 2356 if (clone_entry->id == blob->id) { 2357 break; 2358 } 2359 } 2360 2361 assert(clone_entry != NULL); 2362 2363 blob->parent_id = SPDK_BLOBID_INVALID; 2364 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 2365 free(clone_entry); 2366 2367 snapshot_entry->clone_count--; 2368 2369 return 0; 2370 } 2371 2372 static int 2373 _spdk_bs_blob_list_free(struct spdk_blob_store *bs) 2374 { 2375 struct spdk_blob_list *snapshot_entry; 2376 struct spdk_blob_list *snapshot_entry_tmp; 2377 struct spdk_blob_list *clone_entry; 2378 struct spdk_blob_list *clone_entry_tmp; 2379 2380 TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) { 2381 TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) { 2382 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 2383 free(clone_entry); 2384 } 2385 TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link); 2386 free(snapshot_entry); 2387 } 2388 2389 return 0; 2390 } 2391 2392 static void 2393 _spdk_bs_free(struct spdk_blob_store *bs) 2394 { 2395 _spdk_bs_blob_list_free(bs); 2396 2397 spdk_bs_unregister_md_thread(bs); 2398 spdk_io_device_unregister(bs, _spdk_bs_dev_destroy); 2399 } 2400 2401 void 2402 spdk_bs_opts_init(struct spdk_bs_opts *opts) 2403 { 2404 opts->cluster_sz = SPDK_BLOB_OPTS_CLUSTER_SZ; 2405 opts->num_md_pages = SPDK_BLOB_OPTS_NUM_MD_PAGES; 2406 opts->max_md_ops = SPDK_BLOB_OPTS_MAX_MD_OPS; 2407 opts->max_channel_ops = SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS; 2408 memset(&opts->bstype, 0, sizeof(opts->bstype)); 2409 opts->iter_cb_fn = NULL; 2410 opts->iter_cb_arg = NULL; 2411 } 2412 2413 static int 2414 _spdk_bs_opts_verify(struct spdk_bs_opts *opts) 2415 { 2416 if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 || 2417 opts->max_channel_ops == 0) { 2418 SPDK_ERRLOG("Blobstore options cannot be set to 0\n"); 2419 return -1; 2420 } 2421 2422 return 0; 2423 } 2424 2425 static int 2426 _spdk_bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs) 2427 { 2428 struct spdk_blob_store *bs; 2429 uint64_t dev_size; 2430 int rc; 2431 2432 dev_size = dev->blocklen * dev->blockcnt; 2433 if (dev_size < opts->cluster_sz) { 2434 /* Device size cannot be smaller than cluster size of blobstore */ 2435 SPDK_INFOLOG(SPDK_LOG_BLOB, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n", 2436 dev_size, opts->cluster_sz); 2437 return -ENOSPC; 2438 } 2439 if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) { 2440 /* Cluster size cannot be smaller than page size */ 2441 SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n", 2442 opts->cluster_sz, SPDK_BS_PAGE_SIZE); 2443 return -EINVAL; 2444 } 2445 bs = calloc(1, sizeof(struct spdk_blob_store)); 2446 if (!bs) { 2447 return -ENOMEM; 2448 } 2449 2450 TAILQ_INIT(&bs->blobs); 2451 TAILQ_INIT(&bs->snapshots); 2452 bs->dev = dev; 2453 bs->md_thread = spdk_get_thread(); 2454 assert(bs->md_thread != NULL); 2455 2456 /* 2457 * Do not use _spdk_bs_lba_to_cluster() here since blockcnt may not be an 2458 * even multiple of the cluster size. 2459 */ 2460 bs->cluster_sz = opts->cluster_sz; 2461 bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen); 2462 bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE; 2463 bs->num_free_clusters = bs->total_clusters; 2464 bs->used_clusters = spdk_bit_array_create(bs->total_clusters); 2465 bs->io_unit_size = dev->blocklen; 2466 if (bs->used_clusters == NULL) { 2467 free(bs); 2468 return -ENOMEM; 2469 } 2470 2471 bs->max_channel_ops = opts->max_channel_ops; 2472 bs->super_blob = SPDK_BLOBID_INVALID; 2473 memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype)); 2474 2475 /* The metadata is assumed to be at least 1 page */ 2476 bs->used_md_pages = spdk_bit_array_create(1); 2477 bs->used_blobids = spdk_bit_array_create(0); 2478 2479 pthread_mutex_init(&bs->used_clusters_mutex, NULL); 2480 2481 spdk_io_device_register(bs, _spdk_bs_channel_create, _spdk_bs_channel_destroy, 2482 sizeof(struct spdk_bs_channel), "blobstore"); 2483 rc = spdk_bs_register_md_thread(bs); 2484 if (rc == -1) { 2485 spdk_io_device_unregister(bs, NULL); 2486 pthread_mutex_destroy(&bs->used_clusters_mutex); 2487 spdk_bit_array_free(&bs->used_blobids); 2488 spdk_bit_array_free(&bs->used_md_pages); 2489 spdk_bit_array_free(&bs->used_clusters); 2490 free(bs); 2491 /* FIXME: this is a lie but don't know how to get a proper error code here */ 2492 return -ENOMEM; 2493 } 2494 2495 *_bs = bs; 2496 return 0; 2497 } 2498 2499 /* START spdk_bs_load, spdk_bs_load_ctx will used for both load and unload. */ 2500 2501 struct spdk_bs_load_ctx { 2502 struct spdk_blob_store *bs; 2503 struct spdk_bs_super_block *super; 2504 2505 struct spdk_bs_md_mask *mask; 2506 bool in_page_chain; 2507 uint32_t page_index; 2508 uint32_t cur_page; 2509 struct spdk_blob_md_page *page; 2510 bool is_load; 2511 2512 spdk_bs_sequence_t *seq; 2513 spdk_blob_op_with_handle_complete iter_cb_fn; 2514 void *iter_cb_arg; 2515 }; 2516 2517 static void 2518 _spdk_bs_load_ctx_fail(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 2519 { 2520 assert(bserrno != 0); 2521 2522 spdk_dma_free(ctx->super); 2523 spdk_bs_sequence_finish(seq, bserrno); 2524 /* 2525 * Only free the blobstore when a load fails. If an unload fails (for some reason) 2526 * we want to keep the blobstore in case the caller wants to try again. 2527 */ 2528 if (ctx->is_load) { 2529 _spdk_bs_free(ctx->bs); 2530 } 2531 free(ctx); 2532 } 2533 2534 static void 2535 _spdk_bs_set_mask(struct spdk_bit_array *array, struct spdk_bs_md_mask *mask) 2536 { 2537 uint32_t i = 0; 2538 2539 while (true) { 2540 i = spdk_bit_array_find_first_set(array, i); 2541 if (i >= mask->length) { 2542 break; 2543 } 2544 mask->mask[i / 8] |= 1U << (i % 8); 2545 i++; 2546 } 2547 } 2548 2549 static int 2550 _spdk_bs_load_mask(struct spdk_bit_array **array_ptr, struct spdk_bs_md_mask *mask) 2551 { 2552 struct spdk_bit_array *array; 2553 uint32_t i; 2554 2555 if (spdk_bit_array_resize(array_ptr, mask->length) < 0) { 2556 return -ENOMEM; 2557 } 2558 2559 array = *array_ptr; 2560 for (i = 0; i < mask->length; i++) { 2561 if (mask->mask[i / 8] & (1U << (i % 8))) { 2562 spdk_bit_array_set(array, i); 2563 } 2564 } 2565 2566 return 0; 2567 } 2568 2569 static void 2570 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2571 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2572 { 2573 /* Update the values in the super block */ 2574 super->super_blob = bs->super_blob; 2575 memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype)); 2576 super->crc = _spdk_blob_md_page_calc_crc(super); 2577 spdk_bs_sequence_write_dev(seq, super, _spdk_bs_page_to_lba(bs, 0), 2578 _spdk_bs_byte_to_lba(bs, sizeof(*super)), 2579 cb_fn, cb_arg); 2580 } 2581 2582 static void 2583 _spdk_bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 2584 { 2585 struct spdk_bs_load_ctx *ctx = arg; 2586 uint64_t mask_size, lba, lba_count; 2587 2588 /* Write out the used clusters mask */ 2589 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 2590 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2591 if (!ctx->mask) { 2592 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2593 return; 2594 } 2595 2596 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS; 2597 ctx->mask->length = ctx->bs->total_clusters; 2598 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_clusters)); 2599 2600 _spdk_bs_set_mask(ctx->bs->used_clusters, ctx->mask); 2601 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 2602 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 2603 spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 2604 } 2605 2606 static void 2607 _spdk_bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 2608 { 2609 struct spdk_bs_load_ctx *ctx = arg; 2610 uint64_t mask_size, lba, lba_count; 2611 2612 if (seq->bserrno) { 2613 _spdk_bs_load_ctx_fail(seq, ctx, seq->bserrno); 2614 return; 2615 } 2616 2617 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 2618 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2619 if (!ctx->mask) { 2620 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2621 return; 2622 } 2623 2624 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES; 2625 ctx->mask->length = ctx->super->md_len; 2626 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages)); 2627 2628 _spdk_bs_set_mask(ctx->bs->used_md_pages, ctx->mask); 2629 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 2630 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 2631 spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 2632 } 2633 2634 static void 2635 _spdk_bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 2636 { 2637 struct spdk_bs_load_ctx *ctx = arg; 2638 uint64_t mask_size, lba, lba_count; 2639 2640 if (ctx->super->used_blobid_mask_len == 0) { 2641 /* 2642 * This is a pre-v3 on-disk format where the blobid mask does not get 2643 * written to disk. 2644 */ 2645 cb_fn(seq, arg, 0); 2646 return; 2647 } 2648 2649 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 2650 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2651 if (!ctx->mask) { 2652 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2653 return; 2654 } 2655 2656 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS; 2657 ctx->mask->length = ctx->super->md_len; 2658 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids)); 2659 2660 _spdk_bs_set_mask(ctx->bs->used_blobids, ctx->mask); 2661 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 2662 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 2663 spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 2664 } 2665 2666 static void 2667 _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno) 2668 { 2669 struct spdk_bs_load_ctx *ctx = arg; 2670 2671 if (bserrno == 0) { 2672 if (ctx->iter_cb_fn) { 2673 ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0); 2674 } 2675 _spdk_bs_blob_list_add(blob); 2676 spdk_bs_iter_next(ctx->bs, blob, _spdk_bs_load_iter, ctx); 2677 return; 2678 } 2679 2680 if (bserrno == -ENOENT) { 2681 bserrno = 0; 2682 } else { 2683 /* 2684 * This case needs to be looked at further. Same problem 2685 * exists with applications that rely on explicit blob 2686 * iteration. We should just skip the blob that failed 2687 * to load and continue on to the next one. 2688 */ 2689 SPDK_ERRLOG("Error in iterating blobs\n"); 2690 } 2691 2692 ctx->iter_cb_fn = NULL; 2693 2694 spdk_dma_free(ctx->super); 2695 spdk_dma_free(ctx->mask); 2696 spdk_bs_sequence_finish(ctx->seq, bserrno); 2697 free(ctx); 2698 } 2699 2700 static void 2701 _spdk_bs_load_complete(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 2702 { 2703 ctx->seq = seq; 2704 spdk_bs_iter_first(ctx->bs, _spdk_bs_load_iter, ctx); 2705 } 2706 2707 static void 2708 _spdk_bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2709 { 2710 struct spdk_bs_load_ctx *ctx = cb_arg; 2711 int rc; 2712 2713 /* The type must be correct */ 2714 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS); 2715 2716 /* The length of the mask (in bits) must not be greater than 2717 * the length of the buffer (converted to bits) */ 2718 assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8)); 2719 2720 /* The length of the mask must be exactly equal to the size 2721 * (in pages) of the metadata region */ 2722 assert(ctx->mask->length == ctx->super->md_len); 2723 2724 rc = _spdk_bs_load_mask(&ctx->bs->used_blobids, ctx->mask); 2725 if (rc < 0) { 2726 spdk_dma_free(ctx->mask); 2727 _spdk_bs_load_ctx_fail(seq, ctx, rc); 2728 return; 2729 } 2730 2731 _spdk_bs_load_complete(seq, ctx, bserrno); 2732 } 2733 2734 static void 2735 _spdk_bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2736 { 2737 struct spdk_bs_load_ctx *ctx = cb_arg; 2738 uint64_t lba, lba_count, mask_size; 2739 int rc; 2740 2741 /* The type must be correct */ 2742 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 2743 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 2744 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 2745 struct spdk_blob_md_page) * 8)); 2746 /* The length of the mask must be exactly equal to the total number of clusters */ 2747 assert(ctx->mask->length == ctx->bs->total_clusters); 2748 2749 rc = _spdk_bs_load_mask(&ctx->bs->used_clusters, ctx->mask); 2750 if (rc < 0) { 2751 spdk_dma_free(ctx->mask); 2752 _spdk_bs_load_ctx_fail(seq, ctx, rc); 2753 return; 2754 } 2755 2756 ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->bs->used_clusters); 2757 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 2758 2759 spdk_dma_free(ctx->mask); 2760 2761 /* Read the used blobids mask */ 2762 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 2763 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2764 if (!ctx->mask) { 2765 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2766 return; 2767 } 2768 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 2769 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 2770 spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 2771 _spdk_bs_load_used_blobids_cpl, ctx); 2772 } 2773 2774 static void 2775 _spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2776 { 2777 struct spdk_bs_load_ctx *ctx = cb_arg; 2778 uint64_t lba, lba_count, mask_size; 2779 int rc; 2780 2781 /* The type must be correct */ 2782 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES); 2783 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 2784 assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE * 2785 8)); 2786 /* The length of the mask must be exactly equal to the size (in pages) of the metadata region */ 2787 assert(ctx->mask->length == ctx->super->md_len); 2788 2789 rc = _spdk_bs_load_mask(&ctx->bs->used_md_pages, ctx->mask); 2790 if (rc < 0) { 2791 spdk_dma_free(ctx->mask); 2792 _spdk_bs_load_ctx_fail(seq, ctx, rc); 2793 return; 2794 } 2795 2796 spdk_dma_free(ctx->mask); 2797 2798 /* Read the used clusters mask */ 2799 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 2800 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2801 if (!ctx->mask) { 2802 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2803 return; 2804 } 2805 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 2806 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 2807 spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 2808 _spdk_bs_load_used_clusters_cpl, ctx); 2809 } 2810 2811 static void 2812 _spdk_bs_load_read_used_pages(spdk_bs_sequence_t *seq, void *cb_arg) 2813 { 2814 struct spdk_bs_load_ctx *ctx = cb_arg; 2815 uint64_t lba, lba_count, mask_size; 2816 2817 /* Read the used pages mask */ 2818 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 2819 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2820 if (!ctx->mask) { 2821 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2822 return; 2823 } 2824 2825 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 2826 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 2827 spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 2828 _spdk_bs_load_used_pages_cpl, ctx); 2829 } 2830 2831 static int 2832 _spdk_bs_load_replay_md_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob_store *bs) 2833 { 2834 struct spdk_blob_md_descriptor *desc; 2835 size_t cur_desc = 0; 2836 2837 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 2838 while (cur_desc < sizeof(page->descriptors)) { 2839 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 2840 if (desc->length == 0) { 2841 /* If padding and length are 0, this terminates the page */ 2842 break; 2843 } 2844 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) { 2845 struct spdk_blob_md_descriptor_extent *desc_extent; 2846 unsigned int i, j; 2847 unsigned int cluster_count = 0; 2848 uint32_t cluster_idx; 2849 2850 desc_extent = (struct spdk_blob_md_descriptor_extent *)desc; 2851 2852 for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) { 2853 for (j = 0; j < desc_extent->extents[i].length; j++) { 2854 cluster_idx = desc_extent->extents[i].cluster_idx; 2855 /* 2856 * cluster_idx = 0 means an unallocated cluster - don't mark that 2857 * in the used cluster map. 2858 */ 2859 if (cluster_idx != 0) { 2860 spdk_bit_array_set(bs->used_clusters, cluster_idx + j); 2861 if (bs->num_free_clusters == 0) { 2862 return -ENOSPC; 2863 } 2864 bs->num_free_clusters--; 2865 } 2866 cluster_count++; 2867 } 2868 } 2869 if (cluster_count == 0) { 2870 return -EINVAL; 2871 } 2872 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 2873 /* Skip this item */ 2874 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 2875 /* Skip this item */ 2876 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 2877 /* Skip this item */ 2878 } else { 2879 /* Error */ 2880 return -EINVAL; 2881 } 2882 /* Advance to the next descriptor */ 2883 cur_desc += sizeof(*desc) + desc->length; 2884 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 2885 break; 2886 } 2887 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 2888 } 2889 return 0; 2890 } 2891 2892 static bool _spdk_bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx) 2893 { 2894 uint32_t crc; 2895 2896 crc = _spdk_blob_md_page_calc_crc(ctx->page); 2897 if (crc != ctx->page->crc) { 2898 return false; 2899 } 2900 2901 if (_spdk_bs_page_to_blobid(ctx->cur_page) != ctx->page->id) { 2902 return false; 2903 } 2904 return true; 2905 } 2906 2907 static void 2908 _spdk_bs_load_replay_cur_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 2909 2910 static void 2911 _spdk_bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2912 { 2913 struct spdk_bs_load_ctx *ctx = cb_arg; 2914 2915 _spdk_bs_load_complete(seq, ctx, bserrno); 2916 } 2917 2918 static void 2919 _spdk_bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2920 { 2921 struct spdk_bs_load_ctx *ctx = cb_arg; 2922 2923 spdk_dma_free(ctx->mask); 2924 ctx->mask = NULL; 2925 2926 _spdk_bs_write_used_clusters(seq, cb_arg, _spdk_bs_load_write_used_clusters_cpl); 2927 } 2928 2929 static void 2930 _spdk_bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2931 { 2932 struct spdk_bs_load_ctx *ctx = cb_arg; 2933 2934 spdk_dma_free(ctx->mask); 2935 ctx->mask = NULL; 2936 2937 _spdk_bs_write_used_blobids(seq, cb_arg, _spdk_bs_load_write_used_blobids_cpl); 2938 } 2939 2940 static void 2941 _spdk_bs_load_write_used_md(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2942 { 2943 _spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_load_write_used_pages_cpl); 2944 } 2945 2946 static void 2947 _spdk_bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2948 { 2949 struct spdk_bs_load_ctx *ctx = cb_arg; 2950 uint64_t num_md_clusters; 2951 uint64_t i; 2952 uint32_t page_num; 2953 2954 if (bserrno != 0) { 2955 _spdk_bs_load_ctx_fail(seq, ctx, bserrno); 2956 return; 2957 } 2958 2959 page_num = ctx->cur_page; 2960 if (_spdk_bs_load_cur_md_page_valid(ctx) == true) { 2961 if (ctx->page->sequence_num == 0 || ctx->in_page_chain == true) { 2962 spdk_bit_array_set(ctx->bs->used_md_pages, page_num); 2963 if (ctx->page->sequence_num == 0) { 2964 spdk_bit_array_set(ctx->bs->used_blobids, page_num); 2965 } 2966 if (_spdk_bs_load_replay_md_parse_page(ctx->page, ctx->bs)) { 2967 _spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ); 2968 return; 2969 } 2970 if (ctx->page->next != SPDK_INVALID_MD_PAGE) { 2971 ctx->in_page_chain = true; 2972 ctx->cur_page = ctx->page->next; 2973 _spdk_bs_load_replay_cur_md_page(seq, cb_arg); 2974 return; 2975 } 2976 } 2977 } 2978 2979 ctx->in_page_chain = false; 2980 2981 do { 2982 ctx->page_index++; 2983 } while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true); 2984 2985 if (ctx->page_index < ctx->super->md_len) { 2986 ctx->cur_page = ctx->page_index; 2987 _spdk_bs_load_replay_cur_md_page(seq, cb_arg); 2988 } else { 2989 /* Claim all of the clusters used by the metadata */ 2990 num_md_clusters = spdk_divide_round_up(ctx->super->md_len, ctx->bs->pages_per_cluster); 2991 for (i = 0; i < num_md_clusters; i++) { 2992 _spdk_bs_claim_cluster(ctx->bs, i); 2993 } 2994 spdk_dma_free(ctx->page); 2995 _spdk_bs_load_write_used_md(seq, ctx, bserrno); 2996 } 2997 } 2998 2999 static void 3000 _spdk_bs_load_replay_cur_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 3001 { 3002 struct spdk_bs_load_ctx *ctx = cb_arg; 3003 uint64_t lba; 3004 3005 assert(ctx->cur_page < ctx->super->md_len); 3006 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 3007 spdk_bs_sequence_read_dev(seq, ctx->page, lba, 3008 _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 3009 _spdk_bs_load_replay_md_cpl, ctx); 3010 } 3011 3012 static void 3013 _spdk_bs_load_replay_md(spdk_bs_sequence_t *seq, void *cb_arg) 3014 { 3015 struct spdk_bs_load_ctx *ctx = cb_arg; 3016 3017 ctx->page_index = 0; 3018 ctx->cur_page = 0; 3019 ctx->page = spdk_dma_zmalloc(SPDK_BS_PAGE_SIZE, 3020 SPDK_BS_PAGE_SIZE, 3021 NULL); 3022 if (!ctx->page) { 3023 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 3024 return; 3025 } 3026 _spdk_bs_load_replay_cur_md_page(seq, cb_arg); 3027 } 3028 3029 static void 3030 _spdk_bs_recover(spdk_bs_sequence_t *seq, void *cb_arg) 3031 { 3032 struct spdk_bs_load_ctx *ctx = cb_arg; 3033 int rc; 3034 3035 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len); 3036 if (rc < 0) { 3037 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 3038 return; 3039 } 3040 3041 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len); 3042 if (rc < 0) { 3043 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 3044 return; 3045 } 3046 3047 rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters); 3048 if (rc < 0) { 3049 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 3050 return; 3051 } 3052 3053 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 3054 _spdk_bs_load_replay_md(seq, cb_arg); 3055 } 3056 3057 static void 3058 _spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3059 { 3060 struct spdk_bs_load_ctx *ctx = cb_arg; 3061 uint32_t crc; 3062 int rc; 3063 static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH]; 3064 3065 if (ctx->super->version > SPDK_BS_VERSION || 3066 ctx->super->version < SPDK_BS_INITIAL_VERSION) { 3067 _spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ); 3068 return; 3069 } 3070 3071 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 3072 sizeof(ctx->super->signature)) != 0) { 3073 _spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ); 3074 return; 3075 } 3076 3077 crc = _spdk_blob_md_page_calc_crc(ctx->super); 3078 if (crc != ctx->super->crc) { 3079 _spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ); 3080 return; 3081 } 3082 3083 if (memcmp(&ctx->bs->bstype, &ctx->super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 3084 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype matched - loading blobstore\n"); 3085 } else if (memcmp(&ctx->bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 3086 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype wildcard used - loading blobstore regardless bstype\n"); 3087 } else { 3088 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Unexpected bstype\n"); 3089 SPDK_LOGDUMP(SPDK_LOG_BLOB, "Expected:", ctx->bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 3090 SPDK_LOGDUMP(SPDK_LOG_BLOB, "Found:", ctx->super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 3091 _spdk_bs_load_ctx_fail(seq, ctx, -ENXIO); 3092 return; 3093 } 3094 3095 if (ctx->super->size > ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen) { 3096 SPDK_NOTICELOG("Size mismatch, dev size: %lu, blobstore size: %lu\n", 3097 ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen, ctx->super->size); 3098 _spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ); 3099 return; 3100 } 3101 3102 if (ctx->super->size == 0) { 3103 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 3104 } 3105 3106 if (ctx->super->io_unit_size == 0) { 3107 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 3108 } 3109 3110 /* Parse the super block */ 3111 ctx->bs->clean = 1; 3112 ctx->bs->cluster_sz = ctx->super->cluster_size; 3113 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 3114 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 3115 ctx->bs->io_unit_size = ctx->super->io_unit_size; 3116 rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters); 3117 if (rc < 0) { 3118 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 3119 return; 3120 } 3121 ctx->bs->md_start = ctx->super->md_start; 3122 ctx->bs->md_len = ctx->super->md_len; 3123 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 3124 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 3125 ctx->bs->super_blob = ctx->super->super_blob; 3126 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 3127 3128 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) { 3129 _spdk_bs_recover(seq, ctx); 3130 } else { 3131 _spdk_bs_load_read_used_pages(seq, ctx); 3132 } 3133 } 3134 3135 void 3136 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 3137 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 3138 { 3139 struct spdk_blob_store *bs; 3140 struct spdk_bs_cpl cpl; 3141 spdk_bs_sequence_t *seq; 3142 struct spdk_bs_load_ctx *ctx; 3143 struct spdk_bs_opts opts = {}; 3144 int err; 3145 3146 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Loading blobstore from dev %p\n", dev); 3147 3148 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 3149 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "unsupported dev block length of %d\n", dev->blocklen); 3150 dev->destroy(dev); 3151 cb_fn(cb_arg, NULL, -EINVAL); 3152 return; 3153 } 3154 3155 if (o) { 3156 opts = *o; 3157 } else { 3158 spdk_bs_opts_init(&opts); 3159 } 3160 3161 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 3162 dev->destroy(dev); 3163 cb_fn(cb_arg, NULL, -EINVAL); 3164 return; 3165 } 3166 3167 err = _spdk_bs_alloc(dev, &opts, &bs); 3168 if (err) { 3169 dev->destroy(dev); 3170 cb_fn(cb_arg, NULL, err); 3171 return; 3172 } 3173 3174 ctx = calloc(1, sizeof(*ctx)); 3175 if (!ctx) { 3176 _spdk_bs_free(bs); 3177 cb_fn(cb_arg, NULL, -ENOMEM); 3178 return; 3179 } 3180 3181 ctx->bs = bs; 3182 ctx->is_load = true; 3183 ctx->iter_cb_fn = opts.iter_cb_fn; 3184 ctx->iter_cb_arg = opts.iter_cb_arg; 3185 3186 /* Allocate memory for the super block */ 3187 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3188 if (!ctx->super) { 3189 free(ctx); 3190 _spdk_bs_free(bs); 3191 cb_fn(cb_arg, NULL, -ENOMEM); 3192 return; 3193 } 3194 3195 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 3196 cpl.u.bs_handle.cb_fn = cb_fn; 3197 cpl.u.bs_handle.cb_arg = cb_arg; 3198 cpl.u.bs_handle.bs = bs; 3199 3200 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3201 if (!seq) { 3202 spdk_dma_free(ctx->super); 3203 free(ctx); 3204 _spdk_bs_free(bs); 3205 cb_fn(cb_arg, NULL, -ENOMEM); 3206 return; 3207 } 3208 3209 /* Read the super block */ 3210 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 3211 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 3212 _spdk_bs_load_super_cpl, ctx); 3213 } 3214 3215 /* END spdk_bs_load */ 3216 3217 /* START spdk_bs_dump */ 3218 3219 struct spdk_bs_dump_ctx { 3220 struct spdk_blob_store *bs; 3221 struct spdk_bs_super_block *super; 3222 uint32_t cur_page; 3223 struct spdk_blob_md_page *page; 3224 spdk_bs_sequence_t *seq; 3225 FILE *fp; 3226 spdk_bs_dump_print_xattr print_xattr_fn; 3227 char xattr_name[4096]; 3228 }; 3229 3230 static void 3231 _spdk_bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_dump_ctx *ctx, int bserrno) 3232 { 3233 spdk_dma_free(ctx->super); 3234 3235 /* 3236 * We need to defer calling spdk_bs_call_cpl() until after 3237 * dev destruction, so tuck these away for later use. 3238 */ 3239 ctx->bs->unload_err = bserrno; 3240 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 3241 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 3242 3243 spdk_bs_sequence_finish(seq, 0); 3244 _spdk_bs_free(ctx->bs); 3245 free(ctx); 3246 } 3247 3248 static void _spdk_bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 3249 3250 static void 3251 _spdk_bs_dump_print_md_page(struct spdk_bs_dump_ctx *ctx) 3252 { 3253 uint32_t page_idx = ctx->cur_page; 3254 struct spdk_blob_md_page *page = ctx->page; 3255 struct spdk_blob_md_descriptor *desc; 3256 size_t cur_desc = 0; 3257 uint32_t crc; 3258 3259 fprintf(ctx->fp, "=========\n"); 3260 fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx); 3261 fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id); 3262 3263 crc = _spdk_blob_md_page_calc_crc(page); 3264 fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch"); 3265 3266 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 3267 while (cur_desc < sizeof(page->descriptors)) { 3268 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 3269 if (desc->length == 0) { 3270 /* If padding and length are 0, this terminates the page */ 3271 break; 3272 } 3273 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) { 3274 struct spdk_blob_md_descriptor_extent *desc_extent; 3275 unsigned int i; 3276 3277 desc_extent = (struct spdk_blob_md_descriptor_extent *)desc; 3278 3279 for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) { 3280 if (desc_extent->extents[i].cluster_idx != 0) { 3281 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 3282 desc_extent->extents[i].cluster_idx); 3283 } else { 3284 fprintf(ctx->fp, "Unallocated Extent - "); 3285 } 3286 fprintf(ctx->fp, " Length: %" PRIu32, desc_extent->extents[i].length); 3287 fprintf(ctx->fp, "\n"); 3288 } 3289 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 3290 struct spdk_blob_md_descriptor_xattr *desc_xattr; 3291 uint32_t i; 3292 3293 desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc; 3294 3295 if (desc_xattr->length != 3296 sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) + 3297 desc_xattr->name_length + desc_xattr->value_length) { 3298 } 3299 3300 memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length); 3301 ctx->xattr_name[desc_xattr->name_length] = '\0'; 3302 fprintf(ctx->fp, "XATTR: name = \"%s\"\n", ctx->xattr_name); 3303 fprintf(ctx->fp, " value = \""); 3304 ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name, 3305 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 3306 desc_xattr->value_length); 3307 fprintf(ctx->fp, "\"\n"); 3308 for (i = 0; i < desc_xattr->value_length; i++) { 3309 if (i % 16 == 0) { 3310 fprintf(ctx->fp, " "); 3311 } 3312 fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i)); 3313 if ((i + 1) % 16 == 0) { 3314 fprintf(ctx->fp, "\n"); 3315 } 3316 } 3317 if (i % 16 != 0) { 3318 fprintf(ctx->fp, "\n"); 3319 } 3320 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 3321 /* TODO */ 3322 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 3323 /* TODO */ 3324 } else { 3325 /* Error */ 3326 } 3327 /* Advance to the next descriptor */ 3328 cur_desc += sizeof(*desc) + desc->length; 3329 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 3330 break; 3331 } 3332 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 3333 } 3334 } 3335 3336 static void 3337 _spdk_bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3338 { 3339 struct spdk_bs_dump_ctx *ctx = cb_arg; 3340 3341 if (bserrno != 0) { 3342 _spdk_bs_dump_finish(seq, ctx, bserrno); 3343 return; 3344 } 3345 3346 if (ctx->page->id != 0) { 3347 _spdk_bs_dump_print_md_page(ctx); 3348 } 3349 3350 ctx->cur_page++; 3351 3352 if (ctx->cur_page < ctx->super->md_len) { 3353 _spdk_bs_dump_read_md_page(seq, cb_arg); 3354 } else { 3355 spdk_dma_free(ctx->page); 3356 _spdk_bs_dump_finish(seq, ctx, 0); 3357 } 3358 } 3359 3360 static void 3361 _spdk_bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 3362 { 3363 struct spdk_bs_dump_ctx *ctx = cb_arg; 3364 uint64_t lba; 3365 3366 assert(ctx->cur_page < ctx->super->md_len); 3367 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 3368 spdk_bs_sequence_read_dev(seq, ctx->page, lba, 3369 _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 3370 _spdk_bs_dump_read_md_page_cpl, ctx); 3371 } 3372 3373 static void 3374 _spdk_bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3375 { 3376 struct spdk_bs_dump_ctx *ctx = cb_arg; 3377 3378 fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature); 3379 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 3380 sizeof(ctx->super->signature)) != 0) { 3381 fprintf(ctx->fp, "(Mismatch)\n"); 3382 _spdk_bs_dump_finish(seq, ctx, bserrno); 3383 return; 3384 } else { 3385 fprintf(ctx->fp, "(OK)\n"); 3386 } 3387 fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version); 3388 fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc, 3389 (ctx->super->crc == _spdk_blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch"); 3390 fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype); 3391 fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size); 3392 fprintf(ctx->fp, "Super Blob ID: "); 3393 if (ctx->super->super_blob == SPDK_BLOBID_INVALID) { 3394 fprintf(ctx->fp, "(None)\n"); 3395 } else { 3396 fprintf(ctx->fp, "%" PRIu64 "\n", ctx->super->super_blob); 3397 } 3398 fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean); 3399 fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start); 3400 fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len); 3401 fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start); 3402 fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len); 3403 fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start); 3404 fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len); 3405 fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start); 3406 fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len); 3407 3408 ctx->cur_page = 0; 3409 ctx->page = spdk_dma_zmalloc(SPDK_BS_PAGE_SIZE, 3410 SPDK_BS_PAGE_SIZE, 3411 NULL); 3412 if (!ctx->page) { 3413 _spdk_bs_dump_finish(seq, ctx, -ENOMEM); 3414 return; 3415 } 3416 _spdk_bs_dump_read_md_page(seq, cb_arg); 3417 } 3418 3419 void 3420 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn, 3421 spdk_bs_op_complete cb_fn, void *cb_arg) 3422 { 3423 struct spdk_blob_store *bs; 3424 struct spdk_bs_cpl cpl; 3425 spdk_bs_sequence_t *seq; 3426 struct spdk_bs_dump_ctx *ctx; 3427 struct spdk_bs_opts opts = {}; 3428 int err; 3429 3430 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Dumping blobstore from dev %p\n", dev); 3431 3432 spdk_bs_opts_init(&opts); 3433 3434 err = _spdk_bs_alloc(dev, &opts, &bs); 3435 if (err) { 3436 dev->destroy(dev); 3437 cb_fn(cb_arg, err); 3438 return; 3439 } 3440 3441 ctx = calloc(1, sizeof(*ctx)); 3442 if (!ctx) { 3443 _spdk_bs_free(bs); 3444 cb_fn(cb_arg, -ENOMEM); 3445 return; 3446 } 3447 3448 ctx->bs = bs; 3449 ctx->fp = fp; 3450 ctx->print_xattr_fn = print_xattr_fn; 3451 3452 /* Allocate memory for the super block */ 3453 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3454 if (!ctx->super) { 3455 free(ctx); 3456 _spdk_bs_free(bs); 3457 cb_fn(cb_arg, -ENOMEM); 3458 return; 3459 } 3460 3461 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 3462 cpl.u.bs_basic.cb_fn = cb_fn; 3463 cpl.u.bs_basic.cb_arg = cb_arg; 3464 3465 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3466 if (!seq) { 3467 spdk_dma_free(ctx->super); 3468 free(ctx); 3469 _spdk_bs_free(bs); 3470 cb_fn(cb_arg, -ENOMEM); 3471 return; 3472 } 3473 3474 /* Read the super block */ 3475 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 3476 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 3477 _spdk_bs_dump_super_cpl, ctx); 3478 } 3479 3480 /* END spdk_bs_dump */ 3481 3482 /* START spdk_bs_init */ 3483 3484 struct spdk_bs_init_ctx { 3485 struct spdk_blob_store *bs; 3486 struct spdk_bs_super_block *super; 3487 }; 3488 3489 static void 3490 _spdk_bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3491 { 3492 struct spdk_bs_init_ctx *ctx = cb_arg; 3493 3494 spdk_dma_free(ctx->super); 3495 free(ctx); 3496 3497 spdk_bs_sequence_finish(seq, bserrno); 3498 } 3499 3500 static void 3501 _spdk_bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3502 { 3503 struct spdk_bs_init_ctx *ctx = cb_arg; 3504 3505 /* Write super block */ 3506 spdk_bs_sequence_write_dev(seq, ctx->super, _spdk_bs_page_to_lba(ctx->bs, 0), 3507 _spdk_bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 3508 _spdk_bs_init_persist_super_cpl, ctx); 3509 } 3510 3511 void 3512 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 3513 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 3514 { 3515 struct spdk_bs_init_ctx *ctx; 3516 struct spdk_blob_store *bs; 3517 struct spdk_bs_cpl cpl; 3518 spdk_bs_sequence_t *seq; 3519 spdk_bs_batch_t *batch; 3520 uint64_t num_md_lba; 3521 uint64_t num_md_pages; 3522 uint64_t num_md_clusters; 3523 uint32_t i; 3524 struct spdk_bs_opts opts = {}; 3525 int rc; 3526 3527 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Initializing blobstore on dev %p\n", dev); 3528 3529 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 3530 SPDK_ERRLOG("unsupported dev block length of %d\n", 3531 dev->blocklen); 3532 dev->destroy(dev); 3533 cb_fn(cb_arg, NULL, -EINVAL); 3534 return; 3535 } 3536 3537 if (o) { 3538 opts = *o; 3539 } else { 3540 spdk_bs_opts_init(&opts); 3541 } 3542 3543 if (_spdk_bs_opts_verify(&opts) != 0) { 3544 dev->destroy(dev); 3545 cb_fn(cb_arg, NULL, -EINVAL); 3546 return; 3547 } 3548 3549 rc = _spdk_bs_alloc(dev, &opts, &bs); 3550 if (rc) { 3551 dev->destroy(dev); 3552 cb_fn(cb_arg, NULL, rc); 3553 return; 3554 } 3555 3556 if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) { 3557 /* By default, allocate 1 page per cluster. 3558 * Technically, this over-allocates metadata 3559 * because more metadata will reduce the number 3560 * of usable clusters. This can be addressed with 3561 * more complex math in the future. 3562 */ 3563 bs->md_len = bs->total_clusters; 3564 } else { 3565 bs->md_len = opts.num_md_pages; 3566 } 3567 rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len); 3568 if (rc < 0) { 3569 _spdk_bs_free(bs); 3570 cb_fn(cb_arg, NULL, -ENOMEM); 3571 return; 3572 } 3573 3574 rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len); 3575 if (rc < 0) { 3576 _spdk_bs_free(bs); 3577 cb_fn(cb_arg, NULL, -ENOMEM); 3578 return; 3579 } 3580 3581 ctx = calloc(1, sizeof(*ctx)); 3582 if (!ctx) { 3583 _spdk_bs_free(bs); 3584 cb_fn(cb_arg, NULL, -ENOMEM); 3585 return; 3586 } 3587 3588 ctx->bs = bs; 3589 3590 /* Allocate memory for the super block */ 3591 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3592 if (!ctx->super) { 3593 free(ctx); 3594 _spdk_bs_free(bs); 3595 cb_fn(cb_arg, NULL, -ENOMEM); 3596 return; 3597 } 3598 memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 3599 sizeof(ctx->super->signature)); 3600 ctx->super->version = SPDK_BS_VERSION; 3601 ctx->super->length = sizeof(*ctx->super); 3602 ctx->super->super_blob = bs->super_blob; 3603 ctx->super->clean = 0; 3604 ctx->super->cluster_size = bs->cluster_sz; 3605 ctx->super->io_unit_size = bs->io_unit_size; 3606 memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype)); 3607 3608 /* Calculate how many pages the metadata consumes at the front 3609 * of the disk. 3610 */ 3611 3612 /* The super block uses 1 page */ 3613 num_md_pages = 1; 3614 3615 /* The used_md_pages mask requires 1 bit per metadata page, rounded 3616 * up to the nearest page, plus a header. 3617 */ 3618 ctx->super->used_page_mask_start = num_md_pages; 3619 ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 3620 spdk_divide_round_up(bs->md_len, 8), 3621 SPDK_BS_PAGE_SIZE); 3622 num_md_pages += ctx->super->used_page_mask_len; 3623 3624 /* The used_clusters mask requires 1 bit per cluster, rounded 3625 * up to the nearest page, plus a header. 3626 */ 3627 ctx->super->used_cluster_mask_start = num_md_pages; 3628 ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 3629 spdk_divide_round_up(bs->total_clusters, 8), 3630 SPDK_BS_PAGE_SIZE); 3631 num_md_pages += ctx->super->used_cluster_mask_len; 3632 3633 /* The used_blobids mask requires 1 bit per metadata page, rounded 3634 * up to the nearest page, plus a header. 3635 */ 3636 ctx->super->used_blobid_mask_start = num_md_pages; 3637 ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 3638 spdk_divide_round_up(bs->md_len, 8), 3639 SPDK_BS_PAGE_SIZE); 3640 num_md_pages += ctx->super->used_blobid_mask_len; 3641 3642 /* The metadata region size was chosen above */ 3643 ctx->super->md_start = bs->md_start = num_md_pages; 3644 ctx->super->md_len = bs->md_len; 3645 num_md_pages += bs->md_len; 3646 3647 num_md_lba = _spdk_bs_page_to_lba(bs, num_md_pages); 3648 3649 ctx->super->size = dev->blockcnt * dev->blocklen; 3650 3651 ctx->super->crc = _spdk_blob_md_page_calc_crc(ctx->super); 3652 3653 num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster); 3654 if (num_md_clusters > bs->total_clusters) { 3655 SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, " 3656 "please decrease number of pages reserved for metadata " 3657 "or increase cluster size.\n"); 3658 spdk_dma_free(ctx->super); 3659 free(ctx); 3660 _spdk_bs_free(bs); 3661 cb_fn(cb_arg, NULL, -ENOMEM); 3662 return; 3663 } 3664 /* Claim all of the clusters used by the metadata */ 3665 for (i = 0; i < num_md_clusters; i++) { 3666 _spdk_bs_claim_cluster(bs, i); 3667 } 3668 3669 bs->total_data_clusters = bs->num_free_clusters; 3670 3671 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 3672 cpl.u.bs_handle.cb_fn = cb_fn; 3673 cpl.u.bs_handle.cb_arg = cb_arg; 3674 cpl.u.bs_handle.bs = bs; 3675 3676 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3677 if (!seq) { 3678 spdk_dma_free(ctx->super); 3679 free(ctx); 3680 _spdk_bs_free(bs); 3681 cb_fn(cb_arg, NULL, -ENOMEM); 3682 return; 3683 } 3684 3685 batch = spdk_bs_sequence_to_batch(seq, _spdk_bs_init_trim_cpl, ctx); 3686 3687 /* Clear metadata space */ 3688 spdk_bs_batch_write_zeroes_dev(batch, 0, num_md_lba); 3689 /* Trim data clusters */ 3690 spdk_bs_batch_unmap_dev(batch, num_md_lba, ctx->bs->dev->blockcnt - num_md_lba); 3691 3692 spdk_bs_batch_close(batch); 3693 } 3694 3695 /* END spdk_bs_init */ 3696 3697 /* START spdk_bs_destroy */ 3698 3699 static void 3700 _spdk_bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3701 { 3702 struct spdk_bs_init_ctx *ctx = cb_arg; 3703 struct spdk_blob_store *bs = ctx->bs; 3704 3705 /* 3706 * We need to defer calling spdk_bs_call_cpl() until after 3707 * dev destruction, so tuck these away for later use. 3708 */ 3709 bs->unload_err = bserrno; 3710 memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 3711 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 3712 3713 spdk_bs_sequence_finish(seq, bserrno); 3714 3715 _spdk_bs_free(bs); 3716 free(ctx); 3717 } 3718 3719 void 3720 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, 3721 void *cb_arg) 3722 { 3723 struct spdk_bs_cpl cpl; 3724 spdk_bs_sequence_t *seq; 3725 struct spdk_bs_init_ctx *ctx; 3726 3727 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Destroying blobstore\n"); 3728 3729 if (!TAILQ_EMPTY(&bs->blobs)) { 3730 SPDK_ERRLOG("Blobstore still has open blobs\n"); 3731 cb_fn(cb_arg, -EBUSY); 3732 return; 3733 } 3734 3735 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 3736 cpl.u.bs_basic.cb_fn = cb_fn; 3737 cpl.u.bs_basic.cb_arg = cb_arg; 3738 3739 ctx = calloc(1, sizeof(*ctx)); 3740 if (!ctx) { 3741 cb_fn(cb_arg, -ENOMEM); 3742 return; 3743 } 3744 3745 ctx->bs = bs; 3746 3747 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3748 if (!seq) { 3749 free(ctx); 3750 cb_fn(cb_arg, -ENOMEM); 3751 return; 3752 } 3753 3754 /* Write zeroes to the super block */ 3755 spdk_bs_sequence_write_zeroes_dev(seq, 3756 _spdk_bs_page_to_lba(bs, 0), 3757 _spdk_bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)), 3758 _spdk_bs_destroy_trim_cpl, ctx); 3759 } 3760 3761 /* END spdk_bs_destroy */ 3762 3763 /* START spdk_bs_unload */ 3764 3765 static void 3766 _spdk_bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3767 { 3768 struct spdk_bs_load_ctx *ctx = cb_arg; 3769 3770 spdk_dma_free(ctx->super); 3771 3772 /* 3773 * We need to defer calling spdk_bs_call_cpl() until after 3774 * dev destruction, so tuck these away for later use. 3775 */ 3776 ctx->bs->unload_err = bserrno; 3777 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 3778 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 3779 3780 spdk_bs_sequence_finish(seq, bserrno); 3781 3782 _spdk_bs_free(ctx->bs); 3783 free(ctx); 3784 } 3785 3786 static void 3787 _spdk_bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3788 { 3789 struct spdk_bs_load_ctx *ctx = cb_arg; 3790 3791 spdk_dma_free(ctx->mask); 3792 ctx->super->clean = 1; 3793 3794 _spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_unload_write_super_cpl, ctx); 3795 } 3796 3797 static void 3798 _spdk_bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3799 { 3800 struct spdk_bs_load_ctx *ctx = cb_arg; 3801 3802 spdk_dma_free(ctx->mask); 3803 ctx->mask = NULL; 3804 3805 _spdk_bs_write_used_clusters(seq, cb_arg, _spdk_bs_unload_write_used_clusters_cpl); 3806 } 3807 3808 static void 3809 _spdk_bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3810 { 3811 struct spdk_bs_load_ctx *ctx = cb_arg; 3812 3813 spdk_dma_free(ctx->mask); 3814 ctx->mask = NULL; 3815 3816 _spdk_bs_write_used_blobids(seq, cb_arg, _spdk_bs_unload_write_used_blobids_cpl); 3817 } 3818 3819 static void 3820 _spdk_bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3821 { 3822 _spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_unload_write_used_pages_cpl); 3823 } 3824 3825 void 3826 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) 3827 { 3828 struct spdk_bs_cpl cpl; 3829 spdk_bs_sequence_t *seq; 3830 struct spdk_bs_load_ctx *ctx; 3831 3832 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blobstore\n"); 3833 3834 if (!TAILQ_EMPTY(&bs->blobs)) { 3835 SPDK_ERRLOG("Blobstore still has open blobs\n"); 3836 cb_fn(cb_arg, -EBUSY); 3837 return; 3838 } 3839 3840 ctx = calloc(1, sizeof(*ctx)); 3841 if (!ctx) { 3842 cb_fn(cb_arg, -ENOMEM); 3843 return; 3844 } 3845 3846 ctx->bs = bs; 3847 ctx->is_load = false; 3848 3849 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3850 if (!ctx->super) { 3851 free(ctx); 3852 cb_fn(cb_arg, -ENOMEM); 3853 return; 3854 } 3855 3856 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 3857 cpl.u.bs_basic.cb_fn = cb_fn; 3858 cpl.u.bs_basic.cb_arg = cb_arg; 3859 3860 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3861 if (!seq) { 3862 spdk_dma_free(ctx->super); 3863 free(ctx); 3864 cb_fn(cb_arg, -ENOMEM); 3865 return; 3866 } 3867 3868 /* Read super block */ 3869 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 3870 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 3871 _spdk_bs_unload_read_super_cpl, ctx); 3872 } 3873 3874 /* END spdk_bs_unload */ 3875 3876 /* START spdk_bs_set_super */ 3877 3878 struct spdk_bs_set_super_ctx { 3879 struct spdk_blob_store *bs; 3880 struct spdk_bs_super_block *super; 3881 }; 3882 3883 static void 3884 _spdk_bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3885 { 3886 struct spdk_bs_set_super_ctx *ctx = cb_arg; 3887 3888 if (bserrno != 0) { 3889 SPDK_ERRLOG("Unable to write to super block of blobstore\n"); 3890 } 3891 3892 spdk_dma_free(ctx->super); 3893 3894 spdk_bs_sequence_finish(seq, bserrno); 3895 3896 free(ctx); 3897 } 3898 3899 static void 3900 _spdk_bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3901 { 3902 struct spdk_bs_set_super_ctx *ctx = cb_arg; 3903 3904 if (bserrno != 0) { 3905 SPDK_ERRLOG("Unable to read super block of blobstore\n"); 3906 spdk_dma_free(ctx->super); 3907 spdk_bs_sequence_finish(seq, bserrno); 3908 free(ctx); 3909 return; 3910 } 3911 3912 _spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_set_super_write_cpl, ctx); 3913 } 3914 3915 void 3916 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid, 3917 spdk_bs_op_complete cb_fn, void *cb_arg) 3918 { 3919 struct spdk_bs_cpl cpl; 3920 spdk_bs_sequence_t *seq; 3921 struct spdk_bs_set_super_ctx *ctx; 3922 3923 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Setting super blob id on blobstore\n"); 3924 3925 ctx = calloc(1, sizeof(*ctx)); 3926 if (!ctx) { 3927 cb_fn(cb_arg, -ENOMEM); 3928 return; 3929 } 3930 3931 ctx->bs = bs; 3932 3933 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3934 if (!ctx->super) { 3935 free(ctx); 3936 cb_fn(cb_arg, -ENOMEM); 3937 return; 3938 } 3939 3940 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 3941 cpl.u.bs_basic.cb_fn = cb_fn; 3942 cpl.u.bs_basic.cb_arg = cb_arg; 3943 3944 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3945 if (!seq) { 3946 spdk_dma_free(ctx->super); 3947 free(ctx); 3948 cb_fn(cb_arg, -ENOMEM); 3949 return; 3950 } 3951 3952 bs->super_blob = blobid; 3953 3954 /* Read super block */ 3955 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 3956 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 3957 _spdk_bs_set_super_read_cpl, ctx); 3958 } 3959 3960 /* END spdk_bs_set_super */ 3961 3962 void 3963 spdk_bs_get_super(struct spdk_blob_store *bs, 3964 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 3965 { 3966 if (bs->super_blob == SPDK_BLOBID_INVALID) { 3967 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT); 3968 } else { 3969 cb_fn(cb_arg, bs->super_blob, 0); 3970 } 3971 } 3972 3973 uint64_t 3974 spdk_bs_get_cluster_size(struct spdk_blob_store *bs) 3975 { 3976 return bs->cluster_sz; 3977 } 3978 3979 uint64_t 3980 spdk_bs_get_page_size(struct spdk_blob_store *bs) 3981 { 3982 return SPDK_BS_PAGE_SIZE; 3983 } 3984 3985 uint64_t 3986 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs) 3987 { 3988 return bs->io_unit_size; 3989 } 3990 3991 uint64_t 3992 spdk_bs_free_cluster_count(struct spdk_blob_store *bs) 3993 { 3994 return bs->num_free_clusters; 3995 } 3996 3997 uint64_t 3998 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs) 3999 { 4000 return bs->total_data_clusters; 4001 } 4002 4003 static int 4004 spdk_bs_register_md_thread(struct spdk_blob_store *bs) 4005 { 4006 bs->md_channel = spdk_get_io_channel(bs); 4007 if (!bs->md_channel) { 4008 SPDK_ERRLOG("Failed to get IO channel.\n"); 4009 return -1; 4010 } 4011 4012 return 0; 4013 } 4014 4015 static int 4016 spdk_bs_unregister_md_thread(struct spdk_blob_store *bs) 4017 { 4018 spdk_put_io_channel(bs->md_channel); 4019 4020 return 0; 4021 } 4022 4023 spdk_blob_id spdk_blob_get_id(struct spdk_blob *blob) 4024 { 4025 assert(blob != NULL); 4026 4027 return blob->id; 4028 } 4029 4030 uint64_t spdk_blob_get_num_pages(struct spdk_blob *blob) 4031 { 4032 assert(blob != NULL); 4033 4034 return _spdk_bs_cluster_to_page(blob->bs, blob->active.num_clusters); 4035 } 4036 4037 uint64_t spdk_blob_get_num_io_units(struct spdk_blob *blob) 4038 { 4039 assert(blob != NULL); 4040 4041 return spdk_blob_get_num_pages(blob) * _spdk_bs_io_unit_per_page(blob->bs); 4042 } 4043 4044 uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob) 4045 { 4046 assert(blob != NULL); 4047 4048 return blob->active.num_clusters; 4049 } 4050 4051 /* START spdk_bs_create_blob */ 4052 4053 static void 4054 _spdk_bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4055 { 4056 struct spdk_blob *blob = cb_arg; 4057 4058 _spdk_blob_free(blob); 4059 4060 spdk_bs_sequence_finish(seq, bserrno); 4061 } 4062 4063 static int 4064 _spdk_blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs, 4065 bool internal) 4066 { 4067 uint64_t i; 4068 size_t value_len = 0; 4069 int rc; 4070 const void *value = NULL; 4071 if (xattrs->count > 0 && xattrs->get_value == NULL) { 4072 return -EINVAL; 4073 } 4074 for (i = 0; i < xattrs->count; i++) { 4075 xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len); 4076 if (value == NULL || value_len == 0) { 4077 return -EINVAL; 4078 } 4079 rc = _spdk_blob_set_xattr(blob, xattrs->names[i], value, value_len, internal); 4080 if (rc < 0) { 4081 return rc; 4082 } 4083 } 4084 return 0; 4085 } 4086 4087 static void 4088 _spdk_blob_set_thin_provision(struct spdk_blob *blob) 4089 { 4090 _spdk_blob_verify_md_op(blob); 4091 blob->invalid_flags |= SPDK_BLOB_THIN_PROV; 4092 blob->state = SPDK_BLOB_STATE_DIRTY; 4093 } 4094 4095 static void 4096 _spdk_bs_create_blob(struct spdk_blob_store *bs, 4097 const struct spdk_blob_opts *opts, 4098 const struct spdk_blob_xattr_opts *internal_xattrs, 4099 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4100 { 4101 struct spdk_blob *blob; 4102 uint32_t page_idx; 4103 struct spdk_bs_cpl cpl; 4104 struct spdk_blob_opts opts_default; 4105 struct spdk_blob_xattr_opts internal_xattrs_default; 4106 spdk_bs_sequence_t *seq; 4107 spdk_blob_id id; 4108 int rc; 4109 4110 assert(spdk_get_thread() == bs->md_thread); 4111 4112 page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 4113 if (page_idx == UINT32_MAX) { 4114 cb_fn(cb_arg, 0, -ENOMEM); 4115 return; 4116 } 4117 spdk_bit_array_set(bs->used_blobids, page_idx); 4118 spdk_bit_array_set(bs->used_md_pages, page_idx); 4119 4120 id = _spdk_bs_page_to_blobid(page_idx); 4121 4122 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Creating blob with id %lu at page %u\n", id, page_idx); 4123 4124 blob = _spdk_blob_alloc(bs, id); 4125 if (!blob) { 4126 cb_fn(cb_arg, 0, -ENOMEM); 4127 return; 4128 } 4129 4130 if (!opts) { 4131 spdk_blob_opts_init(&opts_default); 4132 opts = &opts_default; 4133 } 4134 if (!internal_xattrs) { 4135 _spdk_blob_xattrs_init(&internal_xattrs_default); 4136 internal_xattrs = &internal_xattrs_default; 4137 } 4138 4139 rc = _spdk_blob_set_xattrs(blob, &opts->xattrs, false); 4140 if (rc < 0) { 4141 _spdk_blob_free(blob); 4142 cb_fn(cb_arg, 0, rc); 4143 return; 4144 } 4145 4146 rc = _spdk_blob_set_xattrs(blob, internal_xattrs, true); 4147 if (rc < 0) { 4148 _spdk_blob_free(blob); 4149 cb_fn(cb_arg, 0, rc); 4150 return; 4151 } 4152 4153 if (opts->thin_provision) { 4154 _spdk_blob_set_thin_provision(blob); 4155 } 4156 4157 rc = _spdk_blob_resize(blob, opts->num_clusters); 4158 if (rc < 0) { 4159 _spdk_blob_free(blob); 4160 cb_fn(cb_arg, 0, rc); 4161 return; 4162 } 4163 cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 4164 cpl.u.blobid.cb_fn = cb_fn; 4165 cpl.u.blobid.cb_arg = cb_arg; 4166 cpl.u.blobid.blobid = blob->id; 4167 4168 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 4169 if (!seq) { 4170 _spdk_blob_free(blob); 4171 cb_fn(cb_arg, 0, -ENOMEM); 4172 return; 4173 } 4174 4175 _spdk_blob_persist(seq, blob, _spdk_bs_create_blob_cpl, blob); 4176 } 4177 4178 void spdk_bs_create_blob(struct spdk_blob_store *bs, 4179 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4180 { 4181 _spdk_bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg); 4182 } 4183 4184 void spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts, 4185 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4186 { 4187 _spdk_bs_create_blob(bs, opts, NULL, cb_fn, cb_arg); 4188 } 4189 4190 /* END spdk_bs_create_blob */ 4191 4192 /* START blob_cleanup */ 4193 4194 struct spdk_clone_snapshot_ctx { 4195 struct spdk_bs_cpl cpl; 4196 int bserrno; 4197 bool frozen; 4198 4199 struct spdk_io_channel *channel; 4200 4201 /* Current cluster for inflate operation */ 4202 uint64_t cluster; 4203 4204 /* For inflation force allocation of all unallocated clusters and remove 4205 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */ 4206 bool allocate_all; 4207 4208 struct { 4209 spdk_blob_id id; 4210 struct spdk_blob *blob; 4211 } original; 4212 struct { 4213 spdk_blob_id id; 4214 struct spdk_blob *blob; 4215 } new; 4216 4217 /* xattrs specified for snapshot/clones only. They have no impact on 4218 * the original blobs xattrs. */ 4219 const struct spdk_blob_xattr_opts *xattrs; 4220 }; 4221 4222 static void 4223 _spdk_bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno) 4224 { 4225 struct spdk_clone_snapshot_ctx *ctx = cb_arg; 4226 struct spdk_bs_cpl *cpl = &ctx->cpl; 4227 4228 if (bserrno != 0) { 4229 if (ctx->bserrno != 0) { 4230 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 4231 } else { 4232 ctx->bserrno = bserrno; 4233 } 4234 } 4235 4236 switch (cpl->type) { 4237 case SPDK_BS_CPL_TYPE_BLOBID: 4238 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno); 4239 break; 4240 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 4241 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 4242 break; 4243 default: 4244 SPDK_UNREACHABLE(); 4245 break; 4246 } 4247 4248 free(ctx); 4249 } 4250 4251 static void 4252 _spdk_bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 4253 { 4254 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4255 struct spdk_blob *origblob = ctx->original.blob; 4256 4257 if (bserrno != 0) { 4258 if (ctx->bserrno != 0) { 4259 SPDK_ERRLOG("Unfreeze error %d\n", bserrno); 4260 } else { 4261 ctx->bserrno = bserrno; 4262 } 4263 } 4264 4265 ctx->original.id = origblob->id; 4266 spdk_blob_close(origblob, _spdk_bs_clone_snapshot_cleanup_finish, ctx); 4267 } 4268 4269 static void 4270 _spdk_bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno) 4271 { 4272 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4273 struct spdk_blob *origblob = ctx->original.blob; 4274 4275 if (bserrno != 0) { 4276 if (ctx->bserrno != 0) { 4277 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 4278 } else { 4279 ctx->bserrno = bserrno; 4280 } 4281 } 4282 4283 if (ctx->frozen) { 4284 /* Unfreeze any outstanding I/O */ 4285 _spdk_blob_unfreeze_io(origblob, _spdk_bs_snapshot_unfreeze_cpl, ctx); 4286 } else { 4287 _spdk_bs_snapshot_unfreeze_cpl(ctx, 0); 4288 } 4289 4290 } 4291 4292 static void 4293 _spdk_bs_clone_snapshot_newblob_cleanup(void *cb_arg, int bserrno) 4294 { 4295 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4296 struct spdk_blob *newblob = ctx->new.blob; 4297 4298 if (bserrno != 0) { 4299 if (ctx->bserrno != 0) { 4300 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 4301 } else { 4302 ctx->bserrno = bserrno; 4303 } 4304 } 4305 4306 ctx->new.id = newblob->id; 4307 spdk_blob_close(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 4308 } 4309 4310 /* END blob_cleanup */ 4311 4312 /* START spdk_bs_create_snapshot */ 4313 4314 static void 4315 _spdk_bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno) 4316 { 4317 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4318 struct spdk_blob *newblob = ctx->new.blob; 4319 4320 if (bserrno != 0) { 4321 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 4322 return; 4323 } 4324 4325 /* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */ 4326 bserrno = _spdk_blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true); 4327 if (bserrno != 0) { 4328 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4329 return; 4330 } 4331 4332 _spdk_bs_blob_list_add(ctx->original.blob); 4333 4334 spdk_blob_set_read_only(newblob); 4335 4336 /* sync snapshot metadata */ 4337 spdk_blob_sync_md(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, cb_arg); 4338 } 4339 4340 static void 4341 _spdk_bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno) 4342 { 4343 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4344 struct spdk_blob *origblob = ctx->original.blob; 4345 struct spdk_blob *newblob = ctx->new.blob; 4346 4347 if (bserrno != 0) { 4348 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 4349 return; 4350 } 4351 4352 /* Set internal xattr for snapshot id */ 4353 bserrno = _spdk_blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true); 4354 if (bserrno != 0) { 4355 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 4356 return; 4357 } 4358 4359 _spdk_bs_blob_list_remove(origblob); 4360 origblob->parent_id = newblob->id; 4361 4362 /* Create new back_bs_dev for snapshot */ 4363 origblob->back_bs_dev = spdk_bs_create_blob_bs_dev(newblob); 4364 if (origblob->back_bs_dev == NULL) { 4365 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL); 4366 return; 4367 } 4368 4369 /* set clone blob as thin provisioned */ 4370 _spdk_blob_set_thin_provision(origblob); 4371 4372 _spdk_bs_blob_list_add(newblob); 4373 4374 /* Zero out origblob cluster map */ 4375 memset(origblob->active.clusters, 0, 4376 origblob->active.num_clusters * sizeof(origblob->active.clusters)); 4377 4378 /* sync clone metadata */ 4379 spdk_blob_sync_md(origblob, _spdk_bs_snapshot_origblob_sync_cpl, ctx); 4380 } 4381 4382 static void 4383 _spdk_bs_snapshot_freeze_cpl(void *cb_arg, int rc) 4384 { 4385 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4386 struct spdk_blob *origblob = ctx->original.blob; 4387 struct spdk_blob *newblob = ctx->new.blob; 4388 int bserrno; 4389 4390 if (rc != 0) { 4391 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, rc); 4392 return; 4393 } 4394 4395 ctx->frozen = true; 4396 4397 /* set new back_bs_dev for snapshot */ 4398 newblob->back_bs_dev = origblob->back_bs_dev; 4399 /* Set invalid flags from origblob */ 4400 newblob->invalid_flags = origblob->invalid_flags; 4401 4402 /* inherit parent from original blob if set */ 4403 newblob->parent_id = origblob->parent_id; 4404 if (origblob->parent_id != SPDK_BLOBID_INVALID) { 4405 /* Set internal xattr for snapshot id */ 4406 bserrno = _spdk_blob_set_xattr(newblob, BLOB_SNAPSHOT, 4407 &origblob->parent_id, sizeof(spdk_blob_id), true); 4408 if (bserrno != 0) { 4409 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 4410 return; 4411 } 4412 } 4413 4414 /* Copy cluster map to snapshot */ 4415 memcpy(newblob->active.clusters, origblob->active.clusters, 4416 origblob->active.num_clusters * sizeof(origblob->active.clusters)); 4417 4418 /* sync snapshot metadata */ 4419 spdk_blob_sync_md(newblob, _spdk_bs_snapshot_newblob_sync_cpl, ctx); 4420 } 4421 4422 static void 4423 _spdk_bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4424 { 4425 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4426 struct spdk_blob *origblob = ctx->original.blob; 4427 struct spdk_blob *newblob = _blob; 4428 4429 if (bserrno != 0) { 4430 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4431 return; 4432 } 4433 4434 ctx->new.blob = newblob; 4435 4436 _spdk_blob_freeze_io(origblob, _spdk_bs_snapshot_freeze_cpl, ctx); 4437 } 4438 4439 static void 4440 _spdk_bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 4441 { 4442 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4443 struct spdk_blob *origblob = ctx->original.blob; 4444 4445 if (bserrno != 0) { 4446 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4447 return; 4448 } 4449 4450 ctx->new.id = blobid; 4451 ctx->cpl.u.blobid.blobid = blobid; 4452 4453 spdk_bs_open_blob(origblob->bs, ctx->new.id, _spdk_bs_snapshot_newblob_open_cpl, ctx); 4454 } 4455 4456 4457 static void 4458 _spdk_bs_xattr_snapshot(void *arg, const char *name, 4459 const void **value, size_t *value_len) 4460 { 4461 assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0); 4462 4463 struct spdk_blob *blob = (struct spdk_blob *)arg; 4464 *value = &blob->id; 4465 *value_len = sizeof(blob->id); 4466 } 4467 4468 static void 4469 _spdk_bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4470 { 4471 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4472 struct spdk_blob_opts opts; 4473 struct spdk_blob_xattr_opts internal_xattrs; 4474 char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS }; 4475 4476 if (bserrno != 0) { 4477 _spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno); 4478 return; 4479 } 4480 4481 ctx->original.blob = _blob; 4482 4483 if (_blob->data_ro || _blob->md_ro) { 4484 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create snapshot from read only blob with id %lu\n", 4485 _blob->id); 4486 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 4487 return; 4488 } 4489 4490 spdk_blob_opts_init(&opts); 4491 _spdk_blob_xattrs_init(&internal_xattrs); 4492 4493 /* Change the size of new blob to the same as in original blob, 4494 * but do not allocate clusters */ 4495 opts.thin_provision = true; 4496 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 4497 4498 /* If there are any xattrs specified for snapshot, set them now */ 4499 if (ctx->xattrs) { 4500 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 4501 } 4502 /* Set internal xattr SNAPSHOT_IN_PROGRESS */ 4503 internal_xattrs.count = 1; 4504 internal_xattrs.ctx = _blob; 4505 internal_xattrs.names = xattrs_names; 4506 internal_xattrs.get_value = _spdk_bs_xattr_snapshot; 4507 4508 _spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs, 4509 _spdk_bs_snapshot_newblob_create_cpl, ctx); 4510 } 4511 4512 void spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid, 4513 const struct spdk_blob_xattr_opts *snapshot_xattrs, 4514 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4515 { 4516 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 4517 4518 if (!ctx) { 4519 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 4520 return; 4521 } 4522 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 4523 ctx->cpl.u.blobid.cb_fn = cb_fn; 4524 ctx->cpl.u.blobid.cb_arg = cb_arg; 4525 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 4526 ctx->bserrno = 0; 4527 ctx->frozen = false; 4528 ctx->original.id = blobid; 4529 ctx->xattrs = snapshot_xattrs; 4530 4531 spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_snapshot_origblob_open_cpl, ctx); 4532 } 4533 /* END spdk_bs_create_snapshot */ 4534 4535 /* START spdk_bs_create_clone */ 4536 4537 static void 4538 _spdk_bs_xattr_clone(void *arg, const char *name, 4539 const void **value, size_t *value_len) 4540 { 4541 assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0); 4542 4543 struct spdk_blob *blob = (struct spdk_blob *)arg; 4544 *value = &blob->id; 4545 *value_len = sizeof(blob->id); 4546 } 4547 4548 static void 4549 _spdk_bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4550 { 4551 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4552 struct spdk_blob *clone = _blob; 4553 4554 ctx->new.blob = clone; 4555 _spdk_bs_blob_list_add(clone); 4556 4557 spdk_blob_close(clone, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 4558 } 4559 4560 static void 4561 _spdk_bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 4562 { 4563 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4564 4565 ctx->cpl.u.blobid.blobid = blobid; 4566 spdk_bs_open_blob(ctx->original.blob->bs, blobid, _spdk_bs_clone_newblob_open_cpl, ctx); 4567 } 4568 4569 static void 4570 _spdk_bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4571 { 4572 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4573 struct spdk_blob_opts opts; 4574 struct spdk_blob_xattr_opts internal_xattrs; 4575 char *xattr_names[] = { BLOB_SNAPSHOT }; 4576 4577 if (bserrno != 0) { 4578 _spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno); 4579 return; 4580 } 4581 4582 ctx->original.blob = _blob; 4583 4584 if (!_blob->data_ro || !_blob->md_ro) { 4585 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Clone not from read-only blob\n"); 4586 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 4587 return; 4588 } 4589 4590 spdk_blob_opts_init(&opts); 4591 _spdk_blob_xattrs_init(&internal_xattrs); 4592 4593 opts.thin_provision = true; 4594 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 4595 if (ctx->xattrs) { 4596 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 4597 } 4598 4599 /* Set internal xattr BLOB_SNAPSHOT */ 4600 internal_xattrs.count = 1; 4601 internal_xattrs.ctx = _blob; 4602 internal_xattrs.names = xattr_names; 4603 internal_xattrs.get_value = _spdk_bs_xattr_clone; 4604 4605 _spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs, 4606 _spdk_bs_clone_newblob_create_cpl, ctx); 4607 } 4608 4609 void spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid, 4610 const struct spdk_blob_xattr_opts *clone_xattrs, 4611 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4612 { 4613 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 4614 4615 if (!ctx) { 4616 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 4617 return; 4618 } 4619 4620 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 4621 ctx->cpl.u.blobid.cb_fn = cb_fn; 4622 ctx->cpl.u.blobid.cb_arg = cb_arg; 4623 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 4624 ctx->bserrno = 0; 4625 ctx->xattrs = clone_xattrs; 4626 ctx->original.id = blobid; 4627 4628 spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_clone_origblob_open_cpl, ctx); 4629 } 4630 4631 /* END spdk_bs_create_clone */ 4632 4633 /* START spdk_bs_inflate_blob */ 4634 4635 static void 4636 _spdk_bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno) 4637 { 4638 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4639 struct spdk_blob *_blob = ctx->original.blob; 4640 4641 if (bserrno != 0) { 4642 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4643 return; 4644 } 4645 4646 assert(_parent != NULL); 4647 4648 _spdk_bs_blob_list_remove(_blob); 4649 _blob->parent_id = _parent->id; 4650 _spdk_blob_set_xattr(_blob, BLOB_SNAPSHOT, &_blob->parent_id, 4651 sizeof(spdk_blob_id), true); 4652 4653 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 4654 _blob->back_bs_dev = spdk_bs_create_blob_bs_dev(_parent); 4655 _spdk_bs_blob_list_add(_blob); 4656 4657 spdk_blob_sync_md(_blob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 4658 } 4659 4660 static void 4661 _spdk_bs_inflate_blob_done(void *cb_arg, int bserrno) 4662 { 4663 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4664 struct spdk_blob *_blob = ctx->original.blob; 4665 struct spdk_blob *_parent; 4666 4667 if (bserrno != 0) { 4668 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4669 return; 4670 } 4671 4672 if (ctx->allocate_all) { 4673 /* remove thin provisioning */ 4674 _spdk_bs_blob_list_remove(_blob); 4675 _spdk_blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 4676 _blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV; 4677 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 4678 _blob->back_bs_dev = NULL; 4679 _blob->parent_id = SPDK_BLOBID_INVALID; 4680 } else { 4681 _parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob; 4682 if (_parent->parent_id != SPDK_BLOBID_INVALID) { 4683 /* We must change the parent of the inflated blob */ 4684 spdk_bs_open_blob(_blob->bs, _parent->parent_id, 4685 _spdk_bs_inflate_blob_set_parent_cpl, ctx); 4686 return; 4687 } 4688 4689 _spdk_bs_blob_list_remove(_blob); 4690 _spdk_blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 4691 _blob->parent_id = SPDK_BLOBID_INVALID; 4692 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 4693 _blob->back_bs_dev = spdk_bs_create_zeroes_dev(); 4694 } 4695 4696 _blob->state = SPDK_BLOB_STATE_DIRTY; 4697 spdk_blob_sync_md(_blob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 4698 } 4699 4700 /* Check if cluster needs allocation */ 4701 static inline bool 4702 _spdk_bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all) 4703 { 4704 struct spdk_blob_bs_dev *b; 4705 4706 assert(blob != NULL); 4707 4708 if (blob->active.clusters[cluster] != 0) { 4709 /* Cluster is already allocated */ 4710 return false; 4711 } 4712 4713 if (blob->parent_id == SPDK_BLOBID_INVALID) { 4714 /* Blob have no parent blob */ 4715 return allocate_all; 4716 } 4717 4718 b = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 4719 return (allocate_all || b->blob->active.clusters[cluster] != 0); 4720 } 4721 4722 static void 4723 _spdk_bs_inflate_blob_touch_next(void *cb_arg, int bserrno) 4724 { 4725 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4726 struct spdk_blob *_blob = ctx->original.blob; 4727 uint64_t offset; 4728 4729 if (bserrno != 0) { 4730 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4731 return; 4732 } 4733 4734 for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) { 4735 if (_spdk_bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) { 4736 break; 4737 } 4738 } 4739 4740 if (ctx->cluster < _blob->active.num_clusters) { 4741 offset = _spdk_bs_cluster_to_lba(_blob->bs, ctx->cluster); 4742 4743 /* We may safely increment a cluster before write */ 4744 ctx->cluster++; 4745 4746 /* Use zero length write to touch a cluster */ 4747 spdk_blob_io_write(_blob, ctx->channel, NULL, offset, 0, 4748 _spdk_bs_inflate_blob_touch_next, ctx); 4749 } else { 4750 _spdk_bs_inflate_blob_done(cb_arg, bserrno); 4751 } 4752 } 4753 4754 static void 4755 _spdk_bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4756 { 4757 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4758 uint64_t lfc; /* lowest free cluster */ 4759 uint64_t i; 4760 4761 if (bserrno != 0) { 4762 _spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno); 4763 return; 4764 } 4765 ctx->original.blob = _blob; 4766 4767 if (!ctx->allocate_all && _blob->parent_id == SPDK_BLOBID_INVALID) { 4768 /* This blob have no parent, so we cannot decouple it. */ 4769 SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n"); 4770 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 4771 return; 4772 } 4773 4774 if (spdk_blob_is_thin_provisioned(_blob) == false) { 4775 /* This is not thin provisioned blob. No need to inflate. */ 4776 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, 0); 4777 return; 4778 } 4779 4780 /* Do two passes - one to verify that we can obtain enough clusters 4781 * and another to actually claim them. 4782 */ 4783 lfc = 0; 4784 for (i = 0; i < _blob->active.num_clusters; i++) { 4785 if (_spdk_bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) { 4786 lfc = spdk_bit_array_find_first_clear(_blob->bs->used_clusters, lfc); 4787 if (lfc == UINT32_MAX) { 4788 /* No more free clusters. Cannot satisfy the request */ 4789 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC); 4790 return; 4791 } 4792 lfc++; 4793 } 4794 } 4795 4796 ctx->cluster = 0; 4797 _spdk_bs_inflate_blob_touch_next(ctx, 0); 4798 } 4799 4800 static void 4801 _spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 4802 spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg) 4803 { 4804 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 4805 4806 if (!ctx) { 4807 cb_fn(cb_arg, -ENOMEM); 4808 return; 4809 } 4810 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 4811 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 4812 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 4813 ctx->bserrno = 0; 4814 ctx->original.id = blobid; 4815 ctx->channel = channel; 4816 ctx->allocate_all = allocate_all; 4817 4818 spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_inflate_blob_open_cpl, ctx); 4819 } 4820 4821 void 4822 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 4823 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 4824 { 4825 _spdk_bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg); 4826 } 4827 4828 void 4829 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 4830 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 4831 { 4832 _spdk_bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg); 4833 } 4834 /* END spdk_bs_inflate_blob */ 4835 4836 /* START spdk_blob_resize */ 4837 struct spdk_bs_resize_ctx { 4838 spdk_blob_op_complete cb_fn; 4839 void *cb_arg; 4840 struct spdk_blob *blob; 4841 uint64_t sz; 4842 int rc; 4843 }; 4844 4845 static void 4846 _spdk_bs_resize_unfreeze_cpl(void *cb_arg, int rc) 4847 { 4848 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 4849 4850 if (rc != 0) { 4851 SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc); 4852 } 4853 4854 if (ctx->rc != 0) { 4855 SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc); 4856 rc = ctx->rc; 4857 } 4858 4859 ctx->blob->resize_in_progress = false; 4860 4861 ctx->cb_fn(ctx->cb_arg, rc); 4862 free(ctx); 4863 } 4864 4865 static void 4866 _spdk_bs_resize_freeze_cpl(void *cb_arg, int rc) 4867 { 4868 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 4869 4870 if (rc != 0) { 4871 ctx->blob->resize_in_progress = false; 4872 ctx->cb_fn(ctx->cb_arg, rc); 4873 free(ctx); 4874 return; 4875 } 4876 4877 ctx->rc = _spdk_blob_resize(ctx->blob, ctx->sz); 4878 4879 _spdk_blob_unfreeze_io(ctx->blob, _spdk_bs_resize_unfreeze_cpl, ctx); 4880 } 4881 4882 void 4883 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg) 4884 { 4885 struct spdk_bs_resize_ctx *ctx; 4886 4887 _spdk_blob_verify_md_op(blob); 4888 4889 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Resizing blob %lu to %lu clusters\n", blob->id, sz); 4890 4891 if (blob->md_ro) { 4892 cb_fn(cb_arg, -EPERM); 4893 return; 4894 } 4895 4896 if (sz == blob->active.num_clusters) { 4897 cb_fn(cb_arg, 0); 4898 return; 4899 } 4900 4901 if (blob->resize_in_progress) { 4902 cb_fn(cb_arg, -EBUSY); 4903 return; 4904 } 4905 4906 ctx = calloc(1, sizeof(*ctx)); 4907 if (!ctx) { 4908 cb_fn(cb_arg, -ENOMEM); 4909 return; 4910 } 4911 4912 blob->resize_in_progress = true; 4913 ctx->cb_fn = cb_fn; 4914 ctx->cb_arg = cb_arg; 4915 ctx->blob = blob; 4916 ctx->sz = sz; 4917 _spdk_blob_freeze_io(blob, _spdk_bs_resize_freeze_cpl, ctx); 4918 } 4919 4920 /* END spdk_blob_resize */ 4921 4922 4923 /* START spdk_bs_delete_blob */ 4924 4925 static void 4926 _spdk_bs_delete_close_cpl(void *cb_arg, int bserrno) 4927 { 4928 spdk_bs_sequence_t *seq = cb_arg; 4929 4930 spdk_bs_sequence_finish(seq, bserrno); 4931 } 4932 4933 static void 4934 _spdk_bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4935 { 4936 struct spdk_blob *blob = cb_arg; 4937 4938 if (bserrno != 0) { 4939 /* 4940 * We already removed this blob from the blobstore tailq, so 4941 * we need to free it here since this is the last reference 4942 * to it. 4943 */ 4944 _spdk_blob_free(blob); 4945 _spdk_bs_delete_close_cpl(seq, bserrno); 4946 return; 4947 } 4948 4949 /* 4950 * This will immediately decrement the ref_count and call 4951 * the completion routine since the metadata state is clean. 4952 * By calling spdk_blob_close, we reduce the number of call 4953 * points into code that touches the blob->open_ref count 4954 * and the blobstore's blob list. 4955 */ 4956 spdk_blob_close(blob, _spdk_bs_delete_close_cpl, seq); 4957 } 4958 4959 static void 4960 _spdk_bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 4961 { 4962 spdk_bs_sequence_t *seq = cb_arg; 4963 struct spdk_blob_list *snapshot = NULL; 4964 uint32_t page_num; 4965 4966 if (bserrno != 0) { 4967 spdk_bs_sequence_finish(seq, bserrno); 4968 return; 4969 } 4970 4971 _spdk_blob_verify_md_op(blob); 4972 4973 if (blob->open_ref > 1) { 4974 /* 4975 * Someone has this blob open (besides this delete context). 4976 * Decrement the ref count directly and return -EBUSY. 4977 */ 4978 blob->open_ref--; 4979 spdk_bs_sequence_finish(seq, -EBUSY); 4980 return; 4981 } 4982 4983 bserrno = _spdk_bs_blob_list_remove(blob); 4984 if (bserrno != 0) { 4985 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Remove blob #%" PRIu64 " from a list\n", blob->id); 4986 spdk_bs_sequence_finish(seq, bserrno); 4987 return; 4988 } 4989 4990 /* 4991 * Remove the blob from the blob_store list now, to ensure it does not 4992 * get returned after this point by _spdk_blob_lookup(). 4993 */ 4994 TAILQ_REMOVE(&blob->bs->blobs, blob, link); 4995 4996 /* If blob is a snapshot then remove it from the list */ 4997 TAILQ_FOREACH(snapshot, &blob->bs->snapshots, link) { 4998 if (snapshot->id == blob->id) { 4999 TAILQ_REMOVE(&blob->bs->snapshots, snapshot, link); 5000 free(snapshot); 5001 break; 5002 } 5003 } 5004 5005 page_num = _spdk_bs_blobid_to_page(blob->id); 5006 spdk_bit_array_clear(blob->bs->used_blobids, page_num); 5007 blob->state = SPDK_BLOB_STATE_DIRTY; 5008 blob->active.num_pages = 0; 5009 _spdk_blob_resize(blob, 0); 5010 5011 _spdk_blob_persist(seq, blob, _spdk_bs_delete_persist_cpl, blob); 5012 } 5013 5014 void 5015 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 5016 spdk_blob_op_complete cb_fn, void *cb_arg) 5017 { 5018 struct spdk_bs_cpl cpl; 5019 spdk_bs_sequence_t *seq; 5020 struct spdk_blob_list *snapshot_entry = NULL; 5021 5022 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Deleting blob %lu\n", blobid); 5023 5024 assert(spdk_get_thread() == bs->md_thread); 5025 5026 /* Check if this is a snapshot with clones */ 5027 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 5028 if (snapshot_entry->id == blobid) { 5029 break; 5030 } 5031 } 5032 if (snapshot_entry != NULL) { 5033 /* If snapshot have clones, we cannot remove it */ 5034 if (!TAILQ_EMPTY(&snapshot_entry->clones)) { 5035 SPDK_ERRLOG("Cannot remove snapshot with clones\n"); 5036 cb_fn(cb_arg, -EBUSY); 5037 return; 5038 } 5039 } 5040 5041 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 5042 cpl.u.blob_basic.cb_fn = cb_fn; 5043 cpl.u.blob_basic.cb_arg = cb_arg; 5044 5045 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 5046 if (!seq) { 5047 cb_fn(cb_arg, -ENOMEM); 5048 return; 5049 } 5050 5051 spdk_bs_open_blob(bs, blobid, _spdk_bs_delete_open_cpl, seq); 5052 } 5053 5054 /* END spdk_bs_delete_blob */ 5055 5056 /* START spdk_bs_open_blob */ 5057 5058 static void 5059 _spdk_bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5060 { 5061 struct spdk_blob *blob = cb_arg; 5062 5063 /* If the blob have crc error, we just return NULL. */ 5064 if (blob == NULL) { 5065 seq->cpl.u.blob_handle.blob = NULL; 5066 spdk_bs_sequence_finish(seq, bserrno); 5067 return; 5068 } 5069 5070 blob->open_ref++; 5071 5072 TAILQ_INSERT_HEAD(&blob->bs->blobs, blob, link); 5073 5074 spdk_bs_sequence_finish(seq, bserrno); 5075 } 5076 5077 void spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 5078 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 5079 { 5080 struct spdk_blob *blob; 5081 struct spdk_bs_cpl cpl; 5082 spdk_bs_sequence_t *seq; 5083 uint32_t page_num; 5084 5085 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Opening blob %lu\n", blobid); 5086 assert(spdk_get_thread() == bs->md_thread); 5087 5088 page_num = _spdk_bs_blobid_to_page(blobid); 5089 if (spdk_bit_array_get(bs->used_blobids, page_num) == false) { 5090 /* Invalid blobid */ 5091 cb_fn(cb_arg, NULL, -ENOENT); 5092 return; 5093 } 5094 5095 blob = _spdk_blob_lookup(bs, blobid); 5096 if (blob) { 5097 blob->open_ref++; 5098 cb_fn(cb_arg, blob, 0); 5099 return; 5100 } 5101 5102 blob = _spdk_blob_alloc(bs, blobid); 5103 if (!blob) { 5104 cb_fn(cb_arg, NULL, -ENOMEM); 5105 return; 5106 } 5107 5108 cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE; 5109 cpl.u.blob_handle.cb_fn = cb_fn; 5110 cpl.u.blob_handle.cb_arg = cb_arg; 5111 cpl.u.blob_handle.blob = blob; 5112 5113 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 5114 if (!seq) { 5115 _spdk_blob_free(blob); 5116 cb_fn(cb_arg, NULL, -ENOMEM); 5117 return; 5118 } 5119 5120 _spdk_blob_load(seq, blob, _spdk_bs_open_blob_cpl, blob); 5121 } 5122 /* END spdk_bs_open_blob */ 5123 5124 /* START spdk_blob_set_read_only */ 5125 int spdk_blob_set_read_only(struct spdk_blob *blob) 5126 { 5127 _spdk_blob_verify_md_op(blob); 5128 5129 blob->data_ro_flags |= SPDK_BLOB_READ_ONLY; 5130 5131 blob->state = SPDK_BLOB_STATE_DIRTY; 5132 return 0; 5133 } 5134 /* END spdk_blob_set_read_only */ 5135 5136 /* START spdk_blob_sync_md */ 5137 5138 static void 5139 _spdk_blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5140 { 5141 struct spdk_blob *blob = cb_arg; 5142 5143 if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 5144 blob->data_ro = true; 5145 blob->md_ro = true; 5146 } 5147 5148 spdk_bs_sequence_finish(seq, bserrno); 5149 } 5150 5151 static void 5152 _spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 5153 { 5154 struct spdk_bs_cpl cpl; 5155 spdk_bs_sequence_t *seq; 5156 5157 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 5158 cpl.u.blob_basic.cb_fn = cb_fn; 5159 cpl.u.blob_basic.cb_arg = cb_arg; 5160 5161 seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl); 5162 if (!seq) { 5163 cb_fn(cb_arg, -ENOMEM); 5164 return; 5165 } 5166 5167 _spdk_blob_persist(seq, blob, _spdk_blob_sync_md_cpl, blob); 5168 } 5169 5170 void 5171 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 5172 { 5173 _spdk_blob_verify_md_op(blob); 5174 5175 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blob %lu\n", blob->id); 5176 5177 if (blob->md_ro) { 5178 assert(blob->state == SPDK_BLOB_STATE_CLEAN); 5179 cb_fn(cb_arg, 0); 5180 return; 5181 } 5182 5183 _spdk_blob_sync_md(blob, cb_fn, cb_arg); 5184 } 5185 5186 /* END spdk_blob_sync_md */ 5187 5188 struct spdk_blob_insert_cluster_ctx { 5189 struct spdk_thread *thread; 5190 struct spdk_blob *blob; 5191 uint32_t cluster_num; /* cluster index in blob */ 5192 uint32_t cluster; /* cluster on disk */ 5193 int rc; 5194 spdk_blob_op_complete cb_fn; 5195 void *cb_arg; 5196 }; 5197 5198 static void 5199 _spdk_blob_insert_cluster_msg_cpl(void *arg) 5200 { 5201 struct spdk_blob_insert_cluster_ctx *ctx = arg; 5202 5203 ctx->cb_fn(ctx->cb_arg, ctx->rc); 5204 free(ctx); 5205 } 5206 5207 static void 5208 _spdk_blob_insert_cluster_msg_cb(void *arg, int bserrno) 5209 { 5210 struct spdk_blob_insert_cluster_ctx *ctx = arg; 5211 5212 ctx->rc = bserrno; 5213 spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx); 5214 } 5215 5216 static void 5217 _spdk_blob_insert_cluster_msg(void *arg) 5218 { 5219 struct spdk_blob_insert_cluster_ctx *ctx = arg; 5220 5221 ctx->rc = _spdk_blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster); 5222 if (ctx->rc != 0) { 5223 spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx); 5224 return; 5225 } 5226 5227 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 5228 _spdk_blob_sync_md(ctx->blob, _spdk_blob_insert_cluster_msg_cb, ctx); 5229 } 5230 5231 static void 5232 _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 5233 uint64_t cluster, spdk_blob_op_complete cb_fn, void *cb_arg) 5234 { 5235 struct spdk_blob_insert_cluster_ctx *ctx; 5236 5237 ctx = calloc(1, sizeof(*ctx)); 5238 if (ctx == NULL) { 5239 cb_fn(cb_arg, -ENOMEM); 5240 return; 5241 } 5242 5243 ctx->thread = spdk_get_thread(); 5244 ctx->blob = blob; 5245 ctx->cluster_num = cluster_num; 5246 ctx->cluster = cluster; 5247 ctx->cb_fn = cb_fn; 5248 ctx->cb_arg = cb_arg; 5249 5250 spdk_thread_send_msg(blob->bs->md_thread, _spdk_blob_insert_cluster_msg, ctx); 5251 } 5252 5253 /* START spdk_blob_close */ 5254 5255 static void 5256 _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5257 { 5258 struct spdk_blob *blob = cb_arg; 5259 5260 if (bserrno == 0) { 5261 blob->open_ref--; 5262 if (blob->open_ref == 0) { 5263 /* 5264 * Blobs with active.num_pages == 0 are deleted blobs. 5265 * these blobs are removed from the blob_store list 5266 * when the deletion process starts - so don't try to 5267 * remove them again. 5268 */ 5269 if (blob->active.num_pages > 0) { 5270 TAILQ_REMOVE(&blob->bs->blobs, blob, link); 5271 } 5272 _spdk_blob_free(blob); 5273 } 5274 } 5275 5276 spdk_bs_sequence_finish(seq, bserrno); 5277 } 5278 5279 void spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 5280 { 5281 struct spdk_bs_cpl cpl; 5282 spdk_bs_sequence_t *seq; 5283 5284 _spdk_blob_verify_md_op(blob); 5285 5286 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Closing blob %lu\n", blob->id); 5287 5288 if (blob->open_ref == 0) { 5289 cb_fn(cb_arg, -EBADF); 5290 return; 5291 } 5292 5293 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 5294 cpl.u.blob_basic.cb_fn = cb_fn; 5295 cpl.u.blob_basic.cb_arg = cb_arg; 5296 5297 seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl); 5298 if (!seq) { 5299 cb_fn(cb_arg, -ENOMEM); 5300 return; 5301 } 5302 5303 /* Sync metadata */ 5304 _spdk_blob_persist(seq, blob, _spdk_blob_close_cpl, blob); 5305 } 5306 5307 /* END spdk_blob_close */ 5308 5309 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs) 5310 { 5311 return spdk_get_io_channel(bs); 5312 } 5313 5314 void spdk_bs_free_io_channel(struct spdk_io_channel *channel) 5315 { 5316 spdk_put_io_channel(channel); 5317 } 5318 5319 void spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel, 5320 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 5321 { 5322 _spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 5323 SPDK_BLOB_UNMAP); 5324 } 5325 5326 void spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel, 5327 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 5328 { 5329 _spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 5330 SPDK_BLOB_WRITE_ZEROES); 5331 } 5332 5333 void spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel, 5334 void *payload, uint64_t offset, uint64_t length, 5335 spdk_blob_op_complete cb_fn, void *cb_arg) 5336 { 5337 _spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 5338 SPDK_BLOB_WRITE); 5339 } 5340 5341 void spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel, 5342 void *payload, uint64_t offset, uint64_t length, 5343 spdk_blob_op_complete cb_fn, void *cb_arg) 5344 { 5345 _spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 5346 SPDK_BLOB_READ); 5347 } 5348 5349 void spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel, 5350 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 5351 spdk_blob_op_complete cb_fn, void *cb_arg) 5352 { 5353 _spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false); 5354 } 5355 5356 void spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel, 5357 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 5358 spdk_blob_op_complete cb_fn, void *cb_arg) 5359 { 5360 _spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true); 5361 } 5362 5363 struct spdk_bs_iter_ctx { 5364 int64_t page_num; 5365 struct spdk_blob_store *bs; 5366 5367 spdk_blob_op_with_handle_complete cb_fn; 5368 void *cb_arg; 5369 }; 5370 5371 static void 5372 _spdk_bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 5373 { 5374 struct spdk_bs_iter_ctx *ctx = cb_arg; 5375 struct spdk_blob_store *bs = ctx->bs; 5376 spdk_blob_id id; 5377 5378 if (bserrno == 0) { 5379 ctx->cb_fn(ctx->cb_arg, _blob, bserrno); 5380 free(ctx); 5381 return; 5382 } 5383 5384 ctx->page_num++; 5385 ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num); 5386 if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) { 5387 ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT); 5388 free(ctx); 5389 return; 5390 } 5391 5392 id = _spdk_bs_page_to_blobid(ctx->page_num); 5393 5394 spdk_bs_open_blob(bs, id, _spdk_bs_iter_cpl, ctx); 5395 } 5396 5397 void 5398 spdk_bs_iter_first(struct spdk_blob_store *bs, 5399 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 5400 { 5401 struct spdk_bs_iter_ctx *ctx; 5402 5403 ctx = calloc(1, sizeof(*ctx)); 5404 if (!ctx) { 5405 cb_fn(cb_arg, NULL, -ENOMEM); 5406 return; 5407 } 5408 5409 ctx->page_num = -1; 5410 ctx->bs = bs; 5411 ctx->cb_fn = cb_fn; 5412 ctx->cb_arg = cb_arg; 5413 5414 _spdk_bs_iter_cpl(ctx, NULL, -1); 5415 } 5416 5417 static void 5418 _spdk_bs_iter_close_cpl(void *cb_arg, int bserrno) 5419 { 5420 struct spdk_bs_iter_ctx *ctx = cb_arg; 5421 5422 _spdk_bs_iter_cpl(ctx, NULL, -1); 5423 } 5424 5425 void 5426 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob, 5427 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 5428 { 5429 struct spdk_bs_iter_ctx *ctx; 5430 5431 assert(blob != NULL); 5432 5433 ctx = calloc(1, sizeof(*ctx)); 5434 if (!ctx) { 5435 cb_fn(cb_arg, NULL, -ENOMEM); 5436 return; 5437 } 5438 5439 ctx->page_num = _spdk_bs_blobid_to_page(blob->id); 5440 ctx->bs = bs; 5441 ctx->cb_fn = cb_fn; 5442 ctx->cb_arg = cb_arg; 5443 5444 /* Close the existing blob */ 5445 spdk_blob_close(blob, _spdk_bs_iter_close_cpl, ctx); 5446 } 5447 5448 static int 5449 _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 5450 uint16_t value_len, bool internal) 5451 { 5452 struct spdk_xattr_tailq *xattrs; 5453 struct spdk_xattr *xattr; 5454 5455 _spdk_blob_verify_md_op(blob); 5456 5457 if (blob->md_ro) { 5458 return -EPERM; 5459 } 5460 5461 if (internal) { 5462 xattrs = &blob->xattrs_internal; 5463 blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR; 5464 } else { 5465 xattrs = &blob->xattrs; 5466 } 5467 5468 TAILQ_FOREACH(xattr, xattrs, link) { 5469 if (!strcmp(name, xattr->name)) { 5470 free(xattr->value); 5471 xattr->value_len = value_len; 5472 xattr->value = malloc(value_len); 5473 memcpy(xattr->value, value, value_len); 5474 5475 blob->state = SPDK_BLOB_STATE_DIRTY; 5476 5477 return 0; 5478 } 5479 } 5480 5481 xattr = calloc(1, sizeof(*xattr)); 5482 if (!xattr) { 5483 return -ENOMEM; 5484 } 5485 xattr->name = strdup(name); 5486 xattr->value_len = value_len; 5487 xattr->value = malloc(value_len); 5488 memcpy(xattr->value, value, value_len); 5489 TAILQ_INSERT_TAIL(xattrs, xattr, link); 5490 5491 blob->state = SPDK_BLOB_STATE_DIRTY; 5492 5493 return 0; 5494 } 5495 5496 int 5497 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 5498 uint16_t value_len) 5499 { 5500 return _spdk_blob_set_xattr(blob, name, value, value_len, false); 5501 } 5502 5503 static int 5504 _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal) 5505 { 5506 struct spdk_xattr_tailq *xattrs; 5507 struct spdk_xattr *xattr; 5508 5509 _spdk_blob_verify_md_op(blob); 5510 5511 if (blob->md_ro) { 5512 return -EPERM; 5513 } 5514 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 5515 5516 TAILQ_FOREACH(xattr, xattrs, link) { 5517 if (!strcmp(name, xattr->name)) { 5518 TAILQ_REMOVE(xattrs, xattr, link); 5519 free(xattr->value); 5520 free(xattr->name); 5521 free(xattr); 5522 5523 if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) { 5524 blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR; 5525 } 5526 blob->state = SPDK_BLOB_STATE_DIRTY; 5527 5528 return 0; 5529 } 5530 } 5531 5532 return -ENOENT; 5533 } 5534 5535 int 5536 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name) 5537 { 5538 return _spdk_blob_remove_xattr(blob, name, false); 5539 } 5540 5541 static int 5542 _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 5543 const void **value, size_t *value_len, bool internal) 5544 { 5545 struct spdk_xattr *xattr; 5546 struct spdk_xattr_tailq *xattrs; 5547 5548 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 5549 5550 TAILQ_FOREACH(xattr, xattrs, link) { 5551 if (!strcmp(name, xattr->name)) { 5552 *value = xattr->value; 5553 *value_len = xattr->value_len; 5554 return 0; 5555 } 5556 } 5557 return -ENOENT; 5558 } 5559 5560 int 5561 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 5562 const void **value, size_t *value_len) 5563 { 5564 _spdk_blob_verify_md_op(blob); 5565 5566 return _spdk_blob_get_xattr_value(blob, name, value, value_len, false); 5567 } 5568 5569 struct spdk_xattr_names { 5570 uint32_t count; 5571 const char *names[0]; 5572 }; 5573 5574 static int 5575 _spdk_blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names) 5576 { 5577 struct spdk_xattr *xattr; 5578 int count = 0; 5579 5580 TAILQ_FOREACH(xattr, xattrs, link) { 5581 count++; 5582 } 5583 5584 *names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *)); 5585 if (*names == NULL) { 5586 return -ENOMEM; 5587 } 5588 5589 TAILQ_FOREACH(xattr, xattrs, link) { 5590 (*names)->names[(*names)->count++] = xattr->name; 5591 } 5592 5593 return 0; 5594 } 5595 5596 int 5597 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names) 5598 { 5599 _spdk_blob_verify_md_op(blob); 5600 5601 return _spdk_blob_get_xattr_names(&blob->xattrs, names); 5602 } 5603 5604 uint32_t 5605 spdk_xattr_names_get_count(struct spdk_xattr_names *names) 5606 { 5607 assert(names != NULL); 5608 5609 return names->count; 5610 } 5611 5612 const char * 5613 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index) 5614 { 5615 if (index >= names->count) { 5616 return NULL; 5617 } 5618 5619 return names->names[index]; 5620 } 5621 5622 void 5623 spdk_xattr_names_free(struct spdk_xattr_names *names) 5624 { 5625 free(names); 5626 } 5627 5628 struct spdk_bs_type 5629 spdk_bs_get_bstype(struct spdk_blob_store *bs) 5630 { 5631 return bs->bstype; 5632 } 5633 5634 void 5635 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype) 5636 { 5637 memcpy(&bs->bstype, &bstype, sizeof(bstype)); 5638 } 5639 5640 bool 5641 spdk_blob_is_read_only(struct spdk_blob *blob) 5642 { 5643 assert(blob != NULL); 5644 return (blob->data_ro || blob->md_ro); 5645 } 5646 5647 bool 5648 spdk_blob_is_snapshot(struct spdk_blob *blob) 5649 { 5650 struct spdk_blob_list *snapshot_entry; 5651 5652 assert(blob != NULL); 5653 5654 TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) { 5655 if (snapshot_entry->id == blob->id) { 5656 break; 5657 } 5658 } 5659 5660 if (snapshot_entry == NULL) { 5661 return false; 5662 } 5663 5664 return true; 5665 } 5666 5667 bool 5668 spdk_blob_is_clone(struct spdk_blob *blob) 5669 { 5670 assert(blob != NULL); 5671 5672 if (blob->parent_id != SPDK_BLOBID_INVALID) { 5673 assert(spdk_blob_is_thin_provisioned(blob)); 5674 return true; 5675 } 5676 5677 return false; 5678 } 5679 5680 bool 5681 spdk_blob_is_thin_provisioned(struct spdk_blob *blob) 5682 { 5683 assert(blob != NULL); 5684 return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 5685 } 5686 5687 spdk_blob_id 5688 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id) 5689 { 5690 struct spdk_blob_list *snapshot_entry = NULL; 5691 struct spdk_blob_list *clone_entry = NULL; 5692 5693 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 5694 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 5695 if (clone_entry->id == blob_id) { 5696 return snapshot_entry->id; 5697 } 5698 } 5699 } 5700 5701 return SPDK_BLOBID_INVALID; 5702 } 5703 5704 int 5705 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids, 5706 size_t *count) 5707 { 5708 struct spdk_blob_list *snapshot_entry, *clone_entry; 5709 size_t n; 5710 5711 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 5712 if (snapshot_entry->id == blobid) { 5713 break; 5714 } 5715 } 5716 if (snapshot_entry == NULL) { 5717 *count = 0; 5718 return 0; 5719 } 5720 5721 if (ids == NULL || *count < snapshot_entry->clone_count) { 5722 *count = snapshot_entry->clone_count; 5723 return -ENOMEM; 5724 } 5725 *count = snapshot_entry->clone_count; 5726 5727 n = 0; 5728 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 5729 ids[n++] = clone_entry->id; 5730 } 5731 5732 return 0; 5733 } 5734 5735 SPDK_LOG_REGISTER_COMPONENT("blob", SPDK_LOG_BLOB) 5736