1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/blob.h" 37 #include "spdk/crc32.h" 38 #include "spdk/env.h" 39 #include "spdk/queue.h" 40 #include "spdk/thread.h" 41 #include "spdk/bit_array.h" 42 #include "spdk/likely.h" 43 #include "spdk/util.h" 44 45 #include "spdk_internal/assert.h" 46 #include "spdk_internal/log.h" 47 48 #include "blobstore.h" 49 50 #define BLOB_CRC32C_INITIAL 0xffffffffUL 51 52 static int spdk_bs_register_md_thread(struct spdk_blob_store *bs); 53 static int spdk_bs_unregister_md_thread(struct spdk_blob_store *bs); 54 static void _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 55 static void _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 56 uint64_t cluster, spdk_blob_op_complete cb_fn, void *cb_arg); 57 58 static int _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 59 uint16_t value_len, bool internal); 60 static int _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 61 const void **value, size_t *value_len, bool internal); 62 static int _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal); 63 64 static void 65 _spdk_blob_verify_md_op(struct spdk_blob *blob) 66 { 67 assert(blob != NULL); 68 assert(spdk_get_thread() == blob->bs->md_thread); 69 assert(blob->state != SPDK_BLOB_STATE_LOADING); 70 } 71 72 static void 73 _spdk_bs_claim_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 74 { 75 assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters)); 76 assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == false); 77 assert(bs->num_free_clusters > 0); 78 79 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %u\n", cluster_num); 80 81 spdk_bit_array_set(bs->used_clusters, cluster_num); 82 bs->num_free_clusters--; 83 } 84 85 static int 86 _spdk_blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster) 87 { 88 uint64_t *cluster_lba = &blob->active.clusters[cluster_num]; 89 90 _spdk_blob_verify_md_op(blob); 91 92 if (*cluster_lba != 0) { 93 return -EEXIST; 94 } 95 96 *cluster_lba = _spdk_bs_cluster_to_lba(blob->bs, cluster); 97 return 0; 98 } 99 100 static int 101 _spdk_bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num, 102 uint64_t *lowest_free_cluster, bool update_map) 103 { 104 pthread_mutex_lock(&blob->bs->used_clusters_mutex); 105 *lowest_free_cluster = spdk_bit_array_find_first_clear(blob->bs->used_clusters, 106 *lowest_free_cluster); 107 if (*lowest_free_cluster == UINT32_MAX) { 108 /* No more free clusters. Cannot satisfy the request */ 109 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 110 return -ENOSPC; 111 } 112 113 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %lu for blob %lu\n", *lowest_free_cluster, blob->id); 114 _spdk_bs_claim_cluster(blob->bs, *lowest_free_cluster); 115 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 116 117 if (update_map) { 118 _spdk_blob_insert_cluster(blob, cluster_num, *lowest_free_cluster); 119 } 120 121 return 0; 122 } 123 124 static void 125 _spdk_bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 126 { 127 assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters)); 128 assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == true); 129 assert(bs->num_free_clusters < bs->total_clusters); 130 131 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Releasing cluster %u\n", cluster_num); 132 133 pthread_mutex_lock(&bs->used_clusters_mutex); 134 spdk_bit_array_clear(bs->used_clusters, cluster_num); 135 bs->num_free_clusters++; 136 pthread_mutex_unlock(&bs->used_clusters_mutex); 137 } 138 139 static void 140 _spdk_blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs) 141 { 142 xattrs->count = 0; 143 xattrs->names = NULL; 144 xattrs->ctx = NULL; 145 xattrs->get_value = NULL; 146 } 147 148 void 149 spdk_blob_opts_init(struct spdk_blob_opts *opts) 150 { 151 opts->num_clusters = 0; 152 opts->thin_provision = false; 153 _spdk_blob_xattrs_init(&opts->xattrs); 154 } 155 156 static struct spdk_blob * 157 _spdk_blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id) 158 { 159 struct spdk_blob *blob; 160 161 blob = calloc(1, sizeof(*blob)); 162 if (!blob) { 163 return NULL; 164 } 165 166 blob->id = id; 167 blob->bs = bs; 168 169 blob->parent_id = SPDK_BLOBID_INVALID; 170 171 blob->state = SPDK_BLOB_STATE_DIRTY; 172 blob->active.num_pages = 1; 173 blob->active.pages = calloc(1, sizeof(*blob->active.pages)); 174 if (!blob->active.pages) { 175 free(blob); 176 return NULL; 177 } 178 179 blob->active.pages[0] = _spdk_bs_blobid_to_page(id); 180 181 TAILQ_INIT(&blob->xattrs); 182 TAILQ_INIT(&blob->xattrs_internal); 183 184 return blob; 185 } 186 187 static void 188 _spdk_xattrs_free(struct spdk_xattr_tailq *xattrs) 189 { 190 struct spdk_xattr *xattr, *xattr_tmp; 191 192 TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) { 193 TAILQ_REMOVE(xattrs, xattr, link); 194 free(xattr->name); 195 free(xattr->value); 196 free(xattr); 197 } 198 } 199 200 static void 201 _spdk_blob_free(struct spdk_blob *blob) 202 { 203 assert(blob != NULL); 204 205 free(blob->active.clusters); 206 free(blob->clean.clusters); 207 free(blob->active.pages); 208 free(blob->clean.pages); 209 210 _spdk_xattrs_free(&blob->xattrs); 211 _spdk_xattrs_free(&blob->xattrs_internal); 212 213 if (blob->back_bs_dev) { 214 blob->back_bs_dev->destroy(blob->back_bs_dev); 215 } 216 217 free(blob); 218 } 219 220 struct freeze_io_ctx { 221 struct spdk_bs_cpl cpl; 222 struct spdk_blob *blob; 223 }; 224 225 static void 226 _spdk_blob_io_sync(struct spdk_io_channel_iter *i) 227 { 228 spdk_for_each_channel_continue(i, 0); 229 } 230 231 static void 232 _spdk_blob_execute_queued_io(struct spdk_io_channel_iter *i) 233 { 234 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 235 struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch); 236 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 237 struct spdk_bs_request_set *set; 238 struct spdk_bs_user_op_args *args; 239 spdk_bs_user_op_t *op, *tmp; 240 241 TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) { 242 set = (struct spdk_bs_request_set *)op; 243 args = &set->u.user_op; 244 245 if (args->blob == ctx->blob) { 246 TAILQ_REMOVE(&ch->queued_io, op, link); 247 spdk_bs_user_op_execute(op); 248 } 249 } 250 251 spdk_for_each_channel_continue(i, 0); 252 } 253 254 static void 255 _spdk_blob_io_cpl(struct spdk_io_channel_iter *i, int status) 256 { 257 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 258 259 ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0); 260 261 free(ctx); 262 } 263 264 static void 265 _spdk_blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 266 { 267 struct freeze_io_ctx *ctx; 268 269 ctx = calloc(1, sizeof(*ctx)); 270 if (!ctx) { 271 cb_fn(cb_arg, -ENOMEM); 272 return; 273 } 274 275 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 276 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 277 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 278 ctx->blob = blob; 279 280 /* Freeze I/O on blob */ 281 blob->frozen_refcnt++; 282 283 if (blob->frozen_refcnt == 1) { 284 spdk_for_each_channel(blob->bs, _spdk_blob_io_sync, ctx, _spdk_blob_io_cpl); 285 } else { 286 cb_fn(cb_arg, 0); 287 free(ctx); 288 } 289 } 290 291 static void 292 _spdk_blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 293 { 294 struct freeze_io_ctx *ctx; 295 296 ctx = calloc(1, sizeof(*ctx)); 297 if (!ctx) { 298 cb_fn(cb_arg, -ENOMEM); 299 return; 300 } 301 302 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 303 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 304 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 305 ctx->blob = blob; 306 307 assert(blob->frozen_refcnt > 0); 308 309 blob->frozen_refcnt--; 310 311 if (blob->frozen_refcnt == 0) { 312 spdk_for_each_channel(blob->bs, _spdk_blob_execute_queued_io, ctx, _spdk_blob_io_cpl); 313 } else { 314 cb_fn(cb_arg, 0); 315 free(ctx); 316 } 317 } 318 319 static int 320 _spdk_blob_mark_clean(struct spdk_blob *blob) 321 { 322 uint64_t *clusters = NULL; 323 uint32_t *pages = NULL; 324 325 assert(blob != NULL); 326 327 if (blob->active.num_clusters) { 328 assert(blob->active.clusters); 329 clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters)); 330 if (!clusters) { 331 return -ENOMEM; 332 } 333 memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*clusters)); 334 } 335 336 if (blob->active.num_pages) { 337 assert(blob->active.pages); 338 pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages)); 339 if (!pages) { 340 free(clusters); 341 return -ENOMEM; 342 } 343 memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*pages)); 344 } 345 346 free(blob->clean.clusters); 347 free(blob->clean.pages); 348 349 blob->clean.num_clusters = blob->active.num_clusters; 350 blob->clean.clusters = blob->active.clusters; 351 blob->clean.num_pages = blob->active.num_pages; 352 blob->clean.pages = blob->active.pages; 353 354 blob->active.clusters = clusters; 355 blob->active.pages = pages; 356 357 /* If the metadata was dirtied again while the metadata was being written to disk, 358 * we do not want to revert the DIRTY state back to CLEAN here. 359 */ 360 if (blob->state == SPDK_BLOB_STATE_LOADING) { 361 blob->state = SPDK_BLOB_STATE_CLEAN; 362 } 363 364 return 0; 365 } 366 367 static int 368 _spdk_blob_deserialize_xattr(struct spdk_blob *blob, 369 struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal) 370 { 371 struct spdk_xattr *xattr; 372 373 if (desc_xattr->length != sizeof(desc_xattr->name_length) + 374 sizeof(desc_xattr->value_length) + 375 desc_xattr->name_length + desc_xattr->value_length) { 376 return -EINVAL; 377 } 378 379 xattr = calloc(1, sizeof(*xattr)); 380 if (xattr == NULL) { 381 return -ENOMEM; 382 } 383 384 xattr->name = malloc(desc_xattr->name_length + 1); 385 if (xattr->name == NULL) { 386 free(xattr); 387 return -ENOMEM; 388 } 389 memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length); 390 xattr->name[desc_xattr->name_length] = '\0'; 391 392 xattr->value = malloc(desc_xattr->value_length); 393 if (xattr->value == NULL) { 394 free(xattr->name); 395 free(xattr); 396 return -ENOMEM; 397 } 398 xattr->value_len = desc_xattr->value_length; 399 memcpy(xattr->value, 400 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 401 desc_xattr->value_length); 402 403 TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link); 404 405 return 0; 406 } 407 408 409 static int 410 _spdk_blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob) 411 { 412 struct spdk_blob_md_descriptor *desc; 413 size_t cur_desc = 0; 414 void *tmp; 415 416 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 417 while (cur_desc < sizeof(page->descriptors)) { 418 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 419 if (desc->length == 0) { 420 /* If padding and length are 0, this terminates the page */ 421 break; 422 } 423 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 424 struct spdk_blob_md_descriptor_flags *desc_flags; 425 426 desc_flags = (struct spdk_blob_md_descriptor_flags *)desc; 427 428 if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) { 429 return -EINVAL; 430 } 431 432 if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) != 433 SPDK_BLOB_INVALID_FLAGS_MASK) { 434 return -EINVAL; 435 } 436 437 if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) != 438 SPDK_BLOB_DATA_RO_FLAGS_MASK) { 439 blob->data_ro = true; 440 blob->md_ro = true; 441 } 442 443 if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) != 444 SPDK_BLOB_MD_RO_FLAGS_MASK) { 445 blob->md_ro = true; 446 } 447 448 if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 449 blob->data_ro = true; 450 blob->md_ro = true; 451 } 452 453 blob->invalid_flags = desc_flags->invalid_flags; 454 blob->data_ro_flags = desc_flags->data_ro_flags; 455 blob->md_ro_flags = desc_flags->md_ro_flags; 456 457 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) { 458 struct spdk_blob_md_descriptor_extent *desc_extent; 459 unsigned int i, j; 460 unsigned int cluster_count = blob->active.num_clusters; 461 462 desc_extent = (struct spdk_blob_md_descriptor_extent *)desc; 463 464 if (desc_extent->length == 0 || 465 (desc_extent->length % sizeof(desc_extent->extents[0]) != 0)) { 466 return -EINVAL; 467 } 468 469 for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) { 470 for (j = 0; j < desc_extent->extents[i].length; j++) { 471 if (desc_extent->extents[i].cluster_idx != 0) { 472 if (!spdk_bit_array_get(blob->bs->used_clusters, 473 desc_extent->extents[i].cluster_idx + j)) { 474 return -EINVAL; 475 } 476 } 477 cluster_count++; 478 } 479 } 480 481 if (cluster_count == 0) { 482 return -EINVAL; 483 } 484 tmp = realloc(blob->active.clusters, cluster_count * sizeof(uint64_t)); 485 if (tmp == NULL) { 486 return -ENOMEM; 487 } 488 blob->active.clusters = tmp; 489 blob->active.cluster_array_size = cluster_count; 490 491 for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) { 492 for (j = 0; j < desc_extent->extents[i].length; j++) { 493 if (desc_extent->extents[i].cluster_idx != 0) { 494 blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs, 495 desc_extent->extents[i].cluster_idx + j); 496 } else if (spdk_blob_is_thin_provisioned(blob)) { 497 blob->active.clusters[blob->active.num_clusters++] = 0; 498 } else { 499 return -EINVAL; 500 } 501 } 502 } 503 504 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 505 int rc; 506 507 rc = _spdk_blob_deserialize_xattr(blob, 508 (struct spdk_blob_md_descriptor_xattr *) desc, false); 509 if (rc != 0) { 510 return rc; 511 } 512 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 513 int rc; 514 515 rc = _spdk_blob_deserialize_xattr(blob, 516 (struct spdk_blob_md_descriptor_xattr *) desc, true); 517 if (rc != 0) { 518 return rc; 519 } 520 } else { 521 /* Unrecognized descriptor type. Do not fail - just continue to the 522 * next descriptor. If this descriptor is associated with some feature 523 * defined in a newer version of blobstore, that version of blobstore 524 * should create and set an associated feature flag to specify if this 525 * blob can be loaded or not. 526 */ 527 } 528 529 /* Advance to the next descriptor */ 530 cur_desc += sizeof(*desc) + desc->length; 531 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 532 break; 533 } 534 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 535 } 536 537 return 0; 538 } 539 540 static int 541 _spdk_blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count, 542 struct spdk_blob *blob) 543 { 544 const struct spdk_blob_md_page *page; 545 uint32_t i; 546 int rc; 547 548 assert(page_count > 0); 549 assert(pages[0].sequence_num == 0); 550 assert(blob != NULL); 551 assert(blob->state == SPDK_BLOB_STATE_LOADING); 552 assert(blob->active.clusters == NULL); 553 554 /* The blobid provided doesn't match what's in the MD, this can 555 * happen for example if a bogus blobid is passed in through open. 556 */ 557 if (blob->id != pages[0].id) { 558 SPDK_ERRLOG("Blobid (%lu) doesn't match what's in metadata (%lu)\n", 559 blob->id, pages[0].id); 560 return -ENOENT; 561 } 562 563 for (i = 0; i < page_count; i++) { 564 page = &pages[i]; 565 566 assert(page->id == blob->id); 567 assert(page->sequence_num == i); 568 569 rc = _spdk_blob_parse_page(page, blob); 570 if (rc != 0) { 571 return rc; 572 } 573 } 574 575 return 0; 576 } 577 578 static int 579 _spdk_blob_serialize_add_page(const struct spdk_blob *blob, 580 struct spdk_blob_md_page **pages, 581 uint32_t *page_count, 582 struct spdk_blob_md_page **last_page) 583 { 584 struct spdk_blob_md_page *page; 585 586 assert(pages != NULL); 587 assert(page_count != NULL); 588 589 if (*page_count == 0) { 590 assert(*pages == NULL); 591 *page_count = 1; 592 *pages = spdk_dma_malloc(SPDK_BS_PAGE_SIZE, 593 SPDK_BS_PAGE_SIZE, 594 NULL); 595 } else { 596 assert(*pages != NULL); 597 (*page_count)++; 598 *pages = spdk_dma_realloc(*pages, 599 SPDK_BS_PAGE_SIZE * (*page_count), 600 SPDK_BS_PAGE_SIZE, 601 NULL); 602 } 603 604 if (*pages == NULL) { 605 *page_count = 0; 606 *last_page = NULL; 607 return -ENOMEM; 608 } 609 610 page = &(*pages)[*page_count - 1]; 611 memset(page, 0, sizeof(*page)); 612 page->id = blob->id; 613 page->sequence_num = *page_count - 1; 614 page->next = SPDK_INVALID_MD_PAGE; 615 *last_page = page; 616 617 return 0; 618 } 619 620 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor. 621 * Update required_sz on both success and failure. 622 * 623 */ 624 static int 625 _spdk_blob_serialize_xattr(const struct spdk_xattr *xattr, 626 uint8_t *buf, size_t buf_sz, 627 size_t *required_sz, bool internal) 628 { 629 struct spdk_blob_md_descriptor_xattr *desc; 630 631 *required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) + 632 strlen(xattr->name) + 633 xattr->value_len; 634 635 if (buf_sz < *required_sz) { 636 return -1; 637 } 638 639 desc = (struct spdk_blob_md_descriptor_xattr *)buf; 640 641 desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR; 642 desc->length = sizeof(desc->name_length) + 643 sizeof(desc->value_length) + 644 strlen(xattr->name) + 645 xattr->value_len; 646 desc->name_length = strlen(xattr->name); 647 desc->value_length = xattr->value_len; 648 649 memcpy(desc->name, xattr->name, desc->name_length); 650 memcpy((void *)((uintptr_t)desc->name + desc->name_length), 651 xattr->value, 652 desc->value_length); 653 654 return 0; 655 } 656 657 static void 658 _spdk_blob_serialize_extent(const struct spdk_blob *blob, 659 uint64_t start_cluster, uint64_t *next_cluster, 660 uint8_t *buf, size_t buf_sz) 661 { 662 struct spdk_blob_md_descriptor_extent *desc; 663 size_t cur_sz; 664 uint64_t i, extent_idx; 665 uint64_t lba, lba_per_cluster, lba_count; 666 667 /* The buffer must have room for at least one extent */ 668 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->extents[0]); 669 if (buf_sz < cur_sz) { 670 *next_cluster = start_cluster; 671 return; 672 } 673 674 desc = (struct spdk_blob_md_descriptor_extent *)buf; 675 desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT; 676 677 lba_per_cluster = _spdk_bs_cluster_to_lba(blob->bs, 1); 678 679 lba = blob->active.clusters[start_cluster]; 680 lba_count = lba_per_cluster; 681 extent_idx = 0; 682 for (i = start_cluster + 1; i < blob->active.num_clusters; i++) { 683 if ((lba + lba_count) == blob->active.clusters[i]) { 684 lba_count += lba_per_cluster; 685 continue; 686 } else if (lba == 0 && blob->active.clusters[i] == 0) { 687 lba_count += lba_per_cluster; 688 continue; 689 } 690 desc->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 691 desc->extents[extent_idx].length = lba_count / lba_per_cluster; 692 extent_idx++; 693 694 cur_sz += sizeof(desc->extents[extent_idx]); 695 696 if (buf_sz < cur_sz) { 697 /* If we ran out of buffer space, return */ 698 desc->length = sizeof(desc->extents[0]) * extent_idx; 699 *next_cluster = i; 700 return; 701 } 702 703 lba = blob->active.clusters[i]; 704 lba_count = lba_per_cluster; 705 } 706 707 desc->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 708 desc->extents[extent_idx].length = lba_count / lba_per_cluster; 709 extent_idx++; 710 711 desc->length = sizeof(desc->extents[0]) * extent_idx; 712 *next_cluster = blob->active.num_clusters; 713 714 return; 715 } 716 717 static void 718 _spdk_blob_serialize_flags(const struct spdk_blob *blob, 719 uint8_t *buf, size_t *buf_sz) 720 { 721 struct spdk_blob_md_descriptor_flags *desc; 722 723 /* 724 * Flags get serialized first, so we should always have room for the flags 725 * descriptor. 726 */ 727 assert(*buf_sz >= sizeof(*desc)); 728 729 desc = (struct spdk_blob_md_descriptor_flags *)buf; 730 desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS; 731 desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor); 732 desc->invalid_flags = blob->invalid_flags; 733 desc->data_ro_flags = blob->data_ro_flags; 734 desc->md_ro_flags = blob->md_ro_flags; 735 736 *buf_sz -= sizeof(*desc); 737 } 738 739 static int 740 _spdk_blob_serialize_xattrs(const struct spdk_blob *blob, 741 const struct spdk_xattr_tailq *xattrs, bool internal, 742 struct spdk_blob_md_page **pages, 743 struct spdk_blob_md_page *cur_page, 744 uint32_t *page_count, uint8_t **buf, 745 size_t *remaining_sz) 746 { 747 const struct spdk_xattr *xattr; 748 int rc; 749 750 TAILQ_FOREACH(xattr, xattrs, link) { 751 size_t required_sz = 0; 752 753 rc = _spdk_blob_serialize_xattr(xattr, 754 *buf, *remaining_sz, 755 &required_sz, internal); 756 if (rc < 0) { 757 /* Need to add a new page to the chain */ 758 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, 759 &cur_page); 760 if (rc < 0) { 761 spdk_dma_free(*pages); 762 *pages = NULL; 763 *page_count = 0; 764 return rc; 765 } 766 767 *buf = (uint8_t *)cur_page->descriptors; 768 *remaining_sz = sizeof(cur_page->descriptors); 769 770 /* Try again */ 771 required_sz = 0; 772 rc = _spdk_blob_serialize_xattr(xattr, 773 *buf, *remaining_sz, 774 &required_sz, internal); 775 776 if (rc < 0) { 777 spdk_dma_free(*pages); 778 *pages = NULL; 779 *page_count = 0; 780 return rc; 781 } 782 } 783 784 *remaining_sz -= required_sz; 785 *buf += required_sz; 786 } 787 788 return 0; 789 } 790 791 static int 792 _spdk_blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages, 793 uint32_t *page_count) 794 { 795 struct spdk_blob_md_page *cur_page; 796 int rc; 797 uint8_t *buf; 798 size_t remaining_sz; 799 uint64_t last_cluster; 800 801 assert(pages != NULL); 802 assert(page_count != NULL); 803 assert(blob != NULL); 804 assert(blob->state == SPDK_BLOB_STATE_DIRTY); 805 806 *pages = NULL; 807 *page_count = 0; 808 809 /* A blob always has at least 1 page, even if it has no descriptors */ 810 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page); 811 if (rc < 0) { 812 return rc; 813 } 814 815 buf = (uint8_t *)cur_page->descriptors; 816 remaining_sz = sizeof(cur_page->descriptors); 817 818 /* Serialize flags */ 819 _spdk_blob_serialize_flags(blob, buf, &remaining_sz); 820 buf += sizeof(struct spdk_blob_md_descriptor_flags); 821 822 /* Serialize xattrs */ 823 rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs, false, 824 pages, cur_page, page_count, &buf, &remaining_sz); 825 if (rc < 0) { 826 return rc; 827 } 828 829 /* Serialize internal xattrs */ 830 rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs_internal, true, 831 pages, cur_page, page_count, &buf, &remaining_sz); 832 if (rc < 0) { 833 return rc; 834 } 835 836 /* Serialize extents */ 837 last_cluster = 0; 838 while (last_cluster < blob->active.num_clusters) { 839 _spdk_blob_serialize_extent(blob, last_cluster, &last_cluster, 840 buf, remaining_sz); 841 842 if (last_cluster == blob->active.num_clusters) { 843 break; 844 } 845 846 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, 847 &cur_page); 848 if (rc < 0) { 849 return rc; 850 } 851 852 buf = (uint8_t *)cur_page->descriptors; 853 remaining_sz = sizeof(cur_page->descriptors); 854 } 855 856 return 0; 857 } 858 859 struct spdk_blob_load_ctx { 860 struct spdk_blob *blob; 861 862 struct spdk_blob_md_page *pages; 863 uint32_t num_pages; 864 spdk_bs_sequence_t *seq; 865 866 spdk_bs_sequence_cpl cb_fn; 867 void *cb_arg; 868 }; 869 870 static uint32_t 871 _spdk_blob_md_page_calc_crc(void *page) 872 { 873 uint32_t crc; 874 875 crc = BLOB_CRC32C_INITIAL; 876 crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc); 877 crc ^= BLOB_CRC32C_INITIAL; 878 879 return crc; 880 881 } 882 883 static void 884 _spdk_blob_load_final(void *cb_arg, int bserrno) 885 { 886 struct spdk_blob_load_ctx *ctx = cb_arg; 887 struct spdk_blob *blob = ctx->blob; 888 889 _spdk_blob_mark_clean(blob); 890 891 ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno); 892 893 /* Free the memory */ 894 spdk_dma_free(ctx->pages); 895 free(ctx); 896 } 897 898 static void 899 _spdk_blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 900 { 901 struct spdk_blob_load_ctx *ctx = cb_arg; 902 struct spdk_blob *blob = ctx->blob; 903 904 if (bserrno != 0) { 905 goto error; 906 } 907 908 blob->back_bs_dev = spdk_bs_create_blob_bs_dev(snapshot); 909 910 if (blob->back_bs_dev == NULL) { 911 bserrno = -ENOMEM; 912 goto error; 913 } 914 915 _spdk_blob_load_final(ctx, bserrno); 916 return; 917 918 error: 919 SPDK_ERRLOG("Snapshot fail\n"); 920 _spdk_blob_free(blob); 921 ctx->cb_fn(ctx->seq, NULL, bserrno); 922 spdk_dma_free(ctx->pages); 923 free(ctx); 924 } 925 926 static void 927 _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 928 { 929 struct spdk_blob_load_ctx *ctx = cb_arg; 930 struct spdk_blob *blob = ctx->blob; 931 struct spdk_blob_md_page *page; 932 const void *value; 933 size_t len; 934 int rc; 935 uint32_t crc; 936 937 if (bserrno) { 938 SPDK_ERRLOG("Metadata page read failed: %d\n", bserrno); 939 _spdk_blob_free(blob); 940 ctx->cb_fn(seq, NULL, bserrno); 941 spdk_dma_free(ctx->pages); 942 free(ctx); 943 return; 944 } 945 946 page = &ctx->pages[ctx->num_pages - 1]; 947 crc = _spdk_blob_md_page_calc_crc(page); 948 if (crc != page->crc) { 949 SPDK_ERRLOG("Metadata page %d crc mismatch\n", ctx->num_pages); 950 _spdk_blob_free(blob); 951 ctx->cb_fn(seq, NULL, -EINVAL); 952 spdk_dma_free(ctx->pages); 953 free(ctx); 954 return; 955 } 956 957 if (page->next != SPDK_INVALID_MD_PAGE) { 958 uint32_t next_page = page->next; 959 uint64_t next_lba = _spdk_bs_page_to_lba(blob->bs, blob->bs->md_start + next_page); 960 961 962 assert(next_lba < (blob->bs->md_start + blob->bs->md_len)); 963 964 /* Read the next page */ 965 ctx->num_pages++; 966 ctx->pages = spdk_dma_realloc(ctx->pages, (sizeof(*page) * ctx->num_pages), 967 sizeof(*page), NULL); 968 if (ctx->pages == NULL) { 969 ctx->cb_fn(seq, ctx->cb_arg, -ENOMEM); 970 free(ctx); 971 return; 972 } 973 974 spdk_bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1], 975 next_lba, 976 _spdk_bs_byte_to_lba(blob->bs, sizeof(*page)), 977 _spdk_blob_load_cpl, ctx); 978 return; 979 } 980 981 /* Parse the pages */ 982 rc = _spdk_blob_parse(ctx->pages, ctx->num_pages, blob); 983 if (rc) { 984 _spdk_blob_free(blob); 985 ctx->cb_fn(seq, NULL, rc); 986 spdk_dma_free(ctx->pages); 987 free(ctx); 988 return; 989 } 990 ctx->seq = seq; 991 992 993 if (spdk_blob_is_thin_provisioned(blob)) { 994 rc = _spdk_blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true); 995 if (rc == 0) { 996 if (len != sizeof(spdk_blob_id)) { 997 _spdk_blob_free(blob); 998 ctx->cb_fn(seq, NULL, -EINVAL); 999 spdk_dma_free(ctx->pages); 1000 free(ctx); 1001 return; 1002 } 1003 /* open snapshot blob and continue in the callback function */ 1004 blob->parent_id = *(spdk_blob_id *)value; 1005 spdk_bs_open_blob(blob->bs, blob->parent_id, 1006 _spdk_blob_load_snapshot_cpl, ctx); 1007 return; 1008 } else { 1009 /* add zeroes_dev for thin provisioned blob */ 1010 blob->back_bs_dev = spdk_bs_create_zeroes_dev(); 1011 } 1012 } else { 1013 /* standard blob */ 1014 blob->back_bs_dev = NULL; 1015 } 1016 _spdk_blob_load_final(ctx, bserrno); 1017 } 1018 1019 /* Load a blob from disk given a blobid */ 1020 static void 1021 _spdk_blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1022 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1023 { 1024 struct spdk_blob_load_ctx *ctx; 1025 struct spdk_blob_store *bs; 1026 uint32_t page_num; 1027 uint64_t lba; 1028 1029 _spdk_blob_verify_md_op(blob); 1030 1031 bs = blob->bs; 1032 1033 ctx = calloc(1, sizeof(*ctx)); 1034 if (!ctx) { 1035 cb_fn(seq, cb_arg, -ENOMEM); 1036 return; 1037 } 1038 1039 ctx->blob = blob; 1040 ctx->pages = spdk_dma_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, 1041 SPDK_BS_PAGE_SIZE, NULL); 1042 if (!ctx->pages) { 1043 free(ctx); 1044 cb_fn(seq, cb_arg, -ENOMEM); 1045 return; 1046 } 1047 ctx->num_pages = 1; 1048 ctx->cb_fn = cb_fn; 1049 ctx->cb_arg = cb_arg; 1050 1051 page_num = _spdk_bs_blobid_to_page(blob->id); 1052 lba = _spdk_bs_page_to_lba(blob->bs, bs->md_start + page_num); 1053 1054 blob->state = SPDK_BLOB_STATE_LOADING; 1055 1056 spdk_bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1057 _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE), 1058 _spdk_blob_load_cpl, ctx); 1059 } 1060 1061 struct spdk_blob_persist_ctx { 1062 struct spdk_blob *blob; 1063 1064 struct spdk_bs_super_block *super; 1065 1066 struct spdk_blob_md_page *pages; 1067 1068 uint64_t idx; 1069 1070 spdk_bs_sequence_t *seq; 1071 spdk_bs_sequence_cpl cb_fn; 1072 void *cb_arg; 1073 }; 1074 1075 static void 1076 _spdk_blob_persist_complete(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1077 { 1078 struct spdk_blob_persist_ctx *ctx = cb_arg; 1079 struct spdk_blob *blob = ctx->blob; 1080 1081 if (bserrno == 0) { 1082 _spdk_blob_mark_clean(blob); 1083 } 1084 1085 /* Call user callback */ 1086 ctx->cb_fn(seq, ctx->cb_arg, bserrno); 1087 1088 /* Free the memory */ 1089 spdk_dma_free(ctx->pages); 1090 free(ctx); 1091 } 1092 1093 static void 1094 _spdk_blob_persist_unmap_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1095 { 1096 struct spdk_blob_persist_ctx *ctx = cb_arg; 1097 struct spdk_blob *blob = ctx->blob; 1098 struct spdk_blob_store *bs = blob->bs; 1099 void *tmp; 1100 size_t i; 1101 1102 /* Release all clusters that were truncated */ 1103 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1104 uint32_t cluster_num = _spdk_bs_lba_to_cluster(bs, blob->active.clusters[i]); 1105 1106 /* Nothing to release if it was not allocated */ 1107 if (blob->active.clusters[i] != 0) { 1108 _spdk_bs_release_cluster(bs, cluster_num); 1109 } 1110 } 1111 1112 if (blob->active.num_clusters == 0) { 1113 free(blob->active.clusters); 1114 blob->active.clusters = NULL; 1115 blob->active.cluster_array_size = 0; 1116 } else { 1117 tmp = realloc(blob->active.clusters, sizeof(uint64_t) * blob->active.num_clusters); 1118 assert(tmp != NULL); 1119 blob->active.clusters = tmp; 1120 blob->active.cluster_array_size = blob->active.num_clusters; 1121 } 1122 1123 _spdk_blob_persist_complete(seq, ctx, bserrno); 1124 } 1125 1126 static void 1127 _spdk_blob_persist_unmap_clusters(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1128 { 1129 struct spdk_blob_persist_ctx *ctx = cb_arg; 1130 struct spdk_blob *blob = ctx->blob; 1131 struct spdk_blob_store *bs = blob->bs; 1132 spdk_bs_batch_t *batch; 1133 size_t i; 1134 uint64_t lba; 1135 uint32_t lba_count; 1136 1137 /* Clusters don't move around in blobs. The list shrinks or grows 1138 * at the end, but no changes ever occur in the middle of the list. 1139 */ 1140 1141 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_unmap_clusters_cpl, ctx); 1142 1143 /* Unmap all clusters that were truncated */ 1144 lba = 0; 1145 lba_count = 0; 1146 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1147 uint64_t next_lba = blob->active.clusters[i]; 1148 uint32_t next_lba_count = _spdk_bs_cluster_to_lba(bs, 1); 1149 1150 if (next_lba > 0 && (lba + lba_count) == next_lba) { 1151 /* This cluster is contiguous with the previous one. */ 1152 lba_count += next_lba_count; 1153 continue; 1154 } 1155 1156 /* This cluster is not contiguous with the previous one. */ 1157 1158 /* If a run of LBAs previously existing, send them 1159 * as an unmap. 1160 */ 1161 if (lba_count > 0) { 1162 spdk_bs_batch_unmap_dev(batch, lba, lba_count); 1163 } 1164 1165 /* Start building the next batch */ 1166 lba = next_lba; 1167 if (next_lba > 0) { 1168 lba_count = next_lba_count; 1169 } else { 1170 lba_count = 0; 1171 } 1172 } 1173 1174 /* If we ended with a contiguous set of LBAs, send the unmap now */ 1175 if (lba_count > 0) { 1176 spdk_bs_batch_unmap_dev(batch, lba, lba_count); 1177 } 1178 1179 spdk_bs_batch_close(batch); 1180 } 1181 1182 static void 1183 _spdk_blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1184 { 1185 struct spdk_blob_persist_ctx *ctx = cb_arg; 1186 struct spdk_blob *blob = ctx->blob; 1187 struct spdk_blob_store *bs = blob->bs; 1188 size_t i; 1189 1190 /* This loop starts at 1 because the first page is special and handled 1191 * below. The pages (except the first) are never written in place, 1192 * so any pages in the clean list must be zeroed. 1193 */ 1194 for (i = 1; i < blob->clean.num_pages; i++) { 1195 spdk_bit_array_clear(bs->used_md_pages, blob->clean.pages[i]); 1196 } 1197 1198 if (blob->active.num_pages == 0) { 1199 uint32_t page_num; 1200 1201 page_num = _spdk_bs_blobid_to_page(blob->id); 1202 spdk_bit_array_clear(bs->used_md_pages, page_num); 1203 } 1204 1205 /* Move on to unmapping clusters */ 1206 _spdk_blob_persist_unmap_clusters(seq, ctx, 0); 1207 } 1208 1209 static void 1210 _spdk_blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1211 { 1212 struct spdk_blob_persist_ctx *ctx = cb_arg; 1213 struct spdk_blob *blob = ctx->blob; 1214 struct spdk_blob_store *bs = blob->bs; 1215 uint64_t lba; 1216 uint32_t lba_count; 1217 spdk_bs_batch_t *batch; 1218 size_t i; 1219 1220 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_zero_pages_cpl, ctx); 1221 1222 lba_count = _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 1223 1224 /* This loop starts at 1 because the first page is special and handled 1225 * below. The pages (except the first) are never written in place, 1226 * so any pages in the clean list must be zeroed. 1227 */ 1228 for (i = 1; i < blob->clean.num_pages; i++) { 1229 lba = _spdk_bs_page_to_lba(bs, bs->md_start + blob->clean.pages[i]); 1230 1231 spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count); 1232 } 1233 1234 /* The first page will only be zeroed if this is a delete. */ 1235 if (blob->active.num_pages == 0) { 1236 uint32_t page_num; 1237 1238 /* The first page in the metadata goes where the blobid indicates */ 1239 page_num = _spdk_bs_blobid_to_page(blob->id); 1240 lba = _spdk_bs_page_to_lba(bs, bs->md_start + page_num); 1241 1242 spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count); 1243 } 1244 1245 spdk_bs_batch_close(batch); 1246 } 1247 1248 static void 1249 _spdk_blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1250 { 1251 struct spdk_blob_persist_ctx *ctx = cb_arg; 1252 struct spdk_blob *blob = ctx->blob; 1253 struct spdk_blob_store *bs = blob->bs; 1254 uint64_t lba; 1255 uint32_t lba_count; 1256 struct spdk_blob_md_page *page; 1257 1258 if (blob->active.num_pages == 0) { 1259 /* Move on to the next step */ 1260 _spdk_blob_persist_zero_pages(seq, ctx, 0); 1261 return; 1262 } 1263 1264 lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page)); 1265 1266 page = &ctx->pages[0]; 1267 /* The first page in the metadata goes where the blobid indicates */ 1268 lba = _spdk_bs_page_to_lba(bs, bs->md_start + _spdk_bs_blobid_to_page(blob->id)); 1269 1270 spdk_bs_sequence_write_dev(seq, page, lba, lba_count, 1271 _spdk_blob_persist_zero_pages, ctx); 1272 } 1273 1274 static void 1275 _spdk_blob_persist_write_page_chain(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1276 { 1277 struct spdk_blob_persist_ctx *ctx = cb_arg; 1278 struct spdk_blob *blob = ctx->blob; 1279 struct spdk_blob_store *bs = blob->bs; 1280 uint64_t lba; 1281 uint32_t lba_count; 1282 struct spdk_blob_md_page *page; 1283 spdk_bs_batch_t *batch; 1284 size_t i; 1285 1286 /* Clusters don't move around in blobs. The list shrinks or grows 1287 * at the end, but no changes ever occur in the middle of the list. 1288 */ 1289 1290 lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page)); 1291 1292 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_write_page_root, ctx); 1293 1294 /* This starts at 1. The root page is not written until 1295 * all of the others are finished 1296 */ 1297 for (i = 1; i < blob->active.num_pages; i++) { 1298 page = &ctx->pages[i]; 1299 assert(page->sequence_num == i); 1300 1301 lba = _spdk_bs_page_to_lba(bs, bs->md_start + blob->active.pages[i]); 1302 1303 spdk_bs_batch_write_dev(batch, page, lba, lba_count); 1304 } 1305 1306 spdk_bs_batch_close(batch); 1307 } 1308 1309 static int 1310 _spdk_blob_resize(struct spdk_blob *blob, uint64_t sz) 1311 { 1312 uint64_t i; 1313 uint64_t *tmp; 1314 uint64_t lfc; /* lowest free cluster */ 1315 uint64_t num_clusters; 1316 struct spdk_blob_store *bs; 1317 1318 bs = blob->bs; 1319 1320 _spdk_blob_verify_md_op(blob); 1321 1322 if (blob->active.num_clusters == sz) { 1323 return 0; 1324 } 1325 1326 if (blob->active.num_clusters < blob->active.cluster_array_size) { 1327 /* If this blob was resized to be larger, then smaller, then 1328 * larger without syncing, then the cluster array already 1329 * contains spare assigned clusters we can use. 1330 */ 1331 num_clusters = spdk_min(blob->active.cluster_array_size, 1332 sz); 1333 } else { 1334 num_clusters = blob->active.num_clusters; 1335 } 1336 1337 /* Do two passes - one to verify that we can obtain enough clusters 1338 * and another to actually claim them. 1339 */ 1340 1341 if (spdk_blob_is_thin_provisioned(blob) == false) { 1342 lfc = 0; 1343 for (i = num_clusters; i < sz; i++) { 1344 lfc = spdk_bit_array_find_first_clear(bs->used_clusters, lfc); 1345 if (lfc == UINT32_MAX) { 1346 /* No more free clusters. Cannot satisfy the request */ 1347 return -ENOSPC; 1348 } 1349 lfc++; 1350 } 1351 } 1352 1353 if (sz > num_clusters) { 1354 /* Expand the cluster array if necessary. 1355 * We only shrink the array when persisting. 1356 */ 1357 tmp = realloc(blob->active.clusters, sizeof(uint64_t) * sz); 1358 if (sz > 0 && tmp == NULL) { 1359 return -ENOMEM; 1360 } 1361 memset(tmp + blob->active.cluster_array_size, 0, 1362 sizeof(uint64_t) * (sz - blob->active.cluster_array_size)); 1363 blob->active.clusters = tmp; 1364 blob->active.cluster_array_size = sz; 1365 } 1366 1367 blob->state = SPDK_BLOB_STATE_DIRTY; 1368 1369 if (spdk_blob_is_thin_provisioned(blob) == false) { 1370 lfc = 0; 1371 for (i = num_clusters; i < sz; i++) { 1372 _spdk_bs_allocate_cluster(blob, i, &lfc, true); 1373 lfc++; 1374 } 1375 } 1376 1377 blob->active.num_clusters = sz; 1378 1379 return 0; 1380 } 1381 1382 static void 1383 _spdk_blob_persist_start(struct spdk_blob_persist_ctx *ctx) 1384 { 1385 spdk_bs_sequence_t *seq = ctx->seq; 1386 struct spdk_blob *blob = ctx->blob; 1387 struct spdk_blob_store *bs = blob->bs; 1388 uint64_t i; 1389 uint32_t page_num; 1390 void *tmp; 1391 int rc; 1392 1393 if (blob->active.num_pages == 0) { 1394 /* This is the signal that the blob should be deleted. 1395 * Immediately jump to the clean up routine. */ 1396 assert(blob->clean.num_pages > 0); 1397 ctx->idx = blob->clean.num_pages - 1; 1398 blob->state = SPDK_BLOB_STATE_CLEAN; 1399 _spdk_blob_persist_zero_pages(seq, ctx, 0); 1400 return; 1401 1402 } 1403 1404 /* Generate the new metadata */ 1405 rc = _spdk_blob_serialize(blob, &ctx->pages, &blob->active.num_pages); 1406 if (rc < 0) { 1407 _spdk_blob_persist_complete(seq, ctx, rc); 1408 return; 1409 } 1410 1411 assert(blob->active.num_pages >= 1); 1412 1413 /* Resize the cache of page indices */ 1414 tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 1415 if (!tmp) { 1416 _spdk_blob_persist_complete(seq, ctx, -ENOMEM); 1417 return; 1418 } 1419 blob->active.pages = tmp; 1420 1421 /* Assign this metadata to pages. This requires two passes - 1422 * one to verify that there are enough pages and a second 1423 * to actually claim them. */ 1424 page_num = 0; 1425 /* Note that this loop starts at one. The first page location is fixed by the blobid. */ 1426 for (i = 1; i < blob->active.num_pages; i++) { 1427 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 1428 if (page_num == UINT32_MAX) { 1429 _spdk_blob_persist_complete(seq, ctx, -ENOMEM); 1430 return; 1431 } 1432 page_num++; 1433 } 1434 1435 page_num = 0; 1436 blob->active.pages[0] = _spdk_bs_blobid_to_page(blob->id); 1437 for (i = 1; i < blob->active.num_pages; i++) { 1438 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 1439 ctx->pages[i - 1].next = page_num; 1440 /* Now that previous metadata page is complete, calculate the crc for it. */ 1441 ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]); 1442 blob->active.pages[i] = page_num; 1443 spdk_bit_array_set(bs->used_md_pages, page_num); 1444 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming page %u for blob %lu\n", page_num, blob->id); 1445 page_num++; 1446 } 1447 ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]); 1448 /* Start writing the metadata from last page to first */ 1449 ctx->idx = blob->active.num_pages - 1; 1450 blob->state = SPDK_BLOB_STATE_CLEAN; 1451 _spdk_blob_persist_write_page_chain(seq, ctx, 0); 1452 } 1453 1454 static void 1455 _spdk_blob_persist_dirty_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1456 { 1457 struct spdk_blob_persist_ctx *ctx = cb_arg; 1458 1459 ctx->blob->bs->clean = 0; 1460 1461 spdk_dma_free(ctx->super); 1462 1463 _spdk_blob_persist_start(ctx); 1464 } 1465 1466 static void 1467 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 1468 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg); 1469 1470 1471 static void 1472 _spdk_blob_persist_dirty(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1473 { 1474 struct spdk_blob_persist_ctx *ctx = cb_arg; 1475 1476 ctx->super->clean = 0; 1477 if (ctx->super->size == 0) { 1478 ctx->super->size = ctx->blob->bs->dev->blockcnt * ctx->blob->bs->dev->blocklen; 1479 } 1480 1481 _spdk_bs_write_super(seq, ctx->blob->bs, ctx->super, _spdk_blob_persist_dirty_cpl, ctx); 1482 } 1483 1484 1485 /* Write a blob to disk */ 1486 static void 1487 _spdk_blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1488 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1489 { 1490 struct spdk_blob_persist_ctx *ctx; 1491 1492 _spdk_blob_verify_md_op(blob); 1493 1494 if (blob->state == SPDK_BLOB_STATE_CLEAN) { 1495 cb_fn(seq, cb_arg, 0); 1496 return; 1497 } 1498 1499 ctx = calloc(1, sizeof(*ctx)); 1500 if (!ctx) { 1501 cb_fn(seq, cb_arg, -ENOMEM); 1502 return; 1503 } 1504 ctx->blob = blob; 1505 ctx->seq = seq; 1506 ctx->cb_fn = cb_fn; 1507 ctx->cb_arg = cb_arg; 1508 1509 if (blob->bs->clean) { 1510 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 1511 if (!ctx->super) { 1512 cb_fn(seq, cb_arg, -ENOMEM); 1513 free(ctx); 1514 return; 1515 } 1516 1517 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(blob->bs, 0), 1518 _spdk_bs_byte_to_lba(blob->bs, sizeof(*ctx->super)), 1519 _spdk_blob_persist_dirty, ctx); 1520 } else { 1521 _spdk_blob_persist_start(ctx); 1522 } 1523 } 1524 1525 struct spdk_blob_copy_cluster_ctx { 1526 struct spdk_blob *blob; 1527 uint8_t *buf; 1528 uint64_t page; 1529 uint64_t new_cluster; 1530 spdk_bs_sequence_t *seq; 1531 }; 1532 1533 static void 1534 _spdk_blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno) 1535 { 1536 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 1537 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq; 1538 TAILQ_HEAD(, spdk_bs_request_set) requests; 1539 spdk_bs_user_op_t *op; 1540 1541 TAILQ_INIT(&requests); 1542 TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link); 1543 1544 while (!TAILQ_EMPTY(&requests)) { 1545 op = TAILQ_FIRST(&requests); 1546 TAILQ_REMOVE(&requests, op, link); 1547 if (bserrno == 0) { 1548 spdk_bs_user_op_execute(op); 1549 } else { 1550 spdk_bs_user_op_abort(op); 1551 } 1552 } 1553 1554 spdk_dma_free(ctx->buf); 1555 free(ctx); 1556 } 1557 1558 static void 1559 _spdk_blob_insert_cluster_cpl(void *cb_arg, int bserrno) 1560 { 1561 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 1562 1563 if (bserrno) { 1564 uint32_t cluster_number; 1565 1566 if (bserrno == -EEXIST) { 1567 /* The metadata insert failed because another thread 1568 * allocated the cluster first. Free our cluster 1569 * but continue without error. */ 1570 bserrno = 0; 1571 } 1572 1573 cluster_number = _spdk_bs_page_to_cluster(ctx->blob->bs, ctx->page); 1574 _spdk_bs_release_cluster(ctx->blob->bs, cluster_number); 1575 } 1576 1577 spdk_bs_sequence_finish(ctx->seq, bserrno); 1578 } 1579 1580 static void 1581 _spdk_blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1582 { 1583 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 1584 uint32_t cluster_number; 1585 1586 if (bserrno) { 1587 /* The write failed, so jump to the final completion handler */ 1588 spdk_bs_sequence_finish(seq, bserrno); 1589 return; 1590 } 1591 1592 cluster_number = _spdk_bs_page_to_cluster(ctx->blob->bs, ctx->page); 1593 1594 _spdk_blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 1595 _spdk_blob_insert_cluster_cpl, ctx); 1596 } 1597 1598 static void 1599 _spdk_blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1600 { 1601 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 1602 1603 if (bserrno != 0) { 1604 /* The read failed, so jump to the final completion handler */ 1605 spdk_bs_sequence_finish(seq, bserrno); 1606 return; 1607 } 1608 1609 /* Write whole cluster */ 1610 spdk_bs_sequence_write_dev(seq, ctx->buf, 1611 _spdk_bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 1612 _spdk_bs_cluster_to_lba(ctx->blob->bs, 1), 1613 _spdk_blob_write_copy_cpl, ctx); 1614 } 1615 1616 static void 1617 _spdk_bs_allocate_and_copy_cluster(struct spdk_blob *blob, 1618 struct spdk_io_channel *_ch, 1619 uint64_t io_unit, spdk_bs_user_op_t *op) 1620 { 1621 struct spdk_bs_cpl cpl; 1622 struct spdk_bs_channel *ch; 1623 struct spdk_blob_copy_cluster_ctx *ctx; 1624 uint32_t cluster_start_page; 1625 uint32_t cluster_number; 1626 int rc; 1627 1628 ch = spdk_io_channel_get_ctx(_ch); 1629 1630 if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) { 1631 /* There are already operations pending. Queue this user op 1632 * and return because it will be re-executed when the outstanding 1633 * cluster allocation completes. */ 1634 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 1635 return; 1636 } 1637 1638 /* Round the io_unit offset down to the first page in the cluster */ 1639 cluster_start_page = _spdk_bs_io_unit_to_cluster_start(blob, io_unit); 1640 1641 /* Calculate which index in the metadata cluster array the corresponding 1642 * cluster is supposed to be at. */ 1643 cluster_number = _spdk_bs_io_unit_to_cluster_number(blob, io_unit); 1644 1645 ctx = calloc(1, sizeof(*ctx)); 1646 if (!ctx) { 1647 spdk_bs_user_op_abort(op); 1648 return; 1649 } 1650 1651 assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0); 1652 1653 ctx->blob = blob; 1654 ctx->page = cluster_start_page; 1655 1656 if (blob->parent_id != SPDK_BLOBID_INVALID) { 1657 ctx->buf = spdk_dma_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, NULL); 1658 if (!ctx->buf) { 1659 SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n", 1660 blob->bs->cluster_sz); 1661 free(ctx); 1662 spdk_bs_user_op_abort(op); 1663 return; 1664 } 1665 } 1666 1667 rc = _spdk_bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, false); 1668 if (rc != 0) { 1669 spdk_dma_free(ctx->buf); 1670 free(ctx); 1671 spdk_bs_user_op_abort(op); 1672 return; 1673 } 1674 1675 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 1676 cpl.u.blob_basic.cb_fn = _spdk_blob_allocate_and_copy_cluster_cpl; 1677 cpl.u.blob_basic.cb_arg = ctx; 1678 1679 ctx->seq = spdk_bs_sequence_start(_ch, &cpl); 1680 if (!ctx->seq) { 1681 _spdk_bs_release_cluster(blob->bs, ctx->new_cluster); 1682 spdk_dma_free(ctx->buf); 1683 free(ctx); 1684 spdk_bs_user_op_abort(op); 1685 return; 1686 } 1687 1688 /* Queue the user op to block other incoming operations */ 1689 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 1690 1691 if (blob->parent_id != SPDK_BLOBID_INVALID) { 1692 /* Read cluster from backing device */ 1693 spdk_bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf, 1694 _spdk_bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 1695 _spdk_bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz), 1696 _spdk_blob_write_copy, ctx); 1697 } else { 1698 _spdk_blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 1699 _spdk_blob_insert_cluster_cpl, ctx); 1700 } 1701 } 1702 1703 static void 1704 _spdk_blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length, 1705 uint64_t *lba, uint32_t *lba_count) 1706 { 1707 *lba_count = length; 1708 1709 if (!_spdk_bs_io_unit_is_allocated(blob, io_unit)) { 1710 assert(blob->back_bs_dev != NULL); 1711 *lba = _spdk_bs_io_unit_to_back_dev_lba(blob, io_unit); 1712 *lba_count = _spdk_bs_io_unit_to_back_dev_lba(blob, *lba_count); 1713 } else { 1714 *lba = _spdk_bs_blob_io_unit_to_lba(blob, io_unit); 1715 } 1716 } 1717 1718 struct op_split_ctx { 1719 struct spdk_blob *blob; 1720 struct spdk_io_channel *channel; 1721 uint64_t io_unit_offset; 1722 uint64_t io_units_remaining; 1723 void *curr_payload; 1724 enum spdk_blob_op_type op_type; 1725 spdk_bs_sequence_t *seq; 1726 }; 1727 1728 static void 1729 _spdk_blob_request_submit_op_split_next(void *cb_arg, int bserrno) 1730 { 1731 struct op_split_ctx *ctx = cb_arg; 1732 struct spdk_blob *blob = ctx->blob; 1733 struct spdk_io_channel *ch = ctx->channel; 1734 enum spdk_blob_op_type op_type = ctx->op_type; 1735 uint8_t *buf = ctx->curr_payload; 1736 uint64_t offset = ctx->io_unit_offset; 1737 uint64_t length = ctx->io_units_remaining; 1738 uint64_t op_length; 1739 1740 if (bserrno != 0 || ctx->io_units_remaining == 0) { 1741 spdk_bs_sequence_finish(ctx->seq, bserrno); 1742 free(ctx); 1743 return; 1744 } 1745 1746 op_length = spdk_min(length, _spdk_bs_num_io_units_to_cluster_boundary(blob, 1747 offset)); 1748 1749 /* Update length and payload for next operation */ 1750 ctx->io_units_remaining -= op_length; 1751 ctx->io_unit_offset += op_length; 1752 if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) { 1753 ctx->curr_payload += op_length * blob->bs->io_unit_size; 1754 } 1755 1756 switch (op_type) { 1757 case SPDK_BLOB_READ: 1758 spdk_blob_io_read(blob, ch, buf, offset, op_length, 1759 _spdk_blob_request_submit_op_split_next, ctx); 1760 break; 1761 case SPDK_BLOB_WRITE: 1762 spdk_blob_io_write(blob, ch, buf, offset, op_length, 1763 _spdk_blob_request_submit_op_split_next, ctx); 1764 break; 1765 case SPDK_BLOB_UNMAP: 1766 spdk_blob_io_unmap(blob, ch, offset, op_length, 1767 _spdk_blob_request_submit_op_split_next, ctx); 1768 break; 1769 case SPDK_BLOB_WRITE_ZEROES: 1770 spdk_blob_io_write_zeroes(blob, ch, offset, op_length, 1771 _spdk_blob_request_submit_op_split_next, ctx); 1772 break; 1773 case SPDK_BLOB_READV: 1774 case SPDK_BLOB_WRITEV: 1775 SPDK_ERRLOG("readv/write not valid for %s\n", __func__); 1776 spdk_bs_sequence_finish(ctx->seq, -EINVAL); 1777 free(ctx); 1778 break; 1779 } 1780 } 1781 1782 static void 1783 _spdk_blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob, 1784 void *payload, uint64_t offset, uint64_t length, 1785 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 1786 { 1787 struct op_split_ctx *ctx; 1788 spdk_bs_sequence_t *seq; 1789 struct spdk_bs_cpl cpl; 1790 1791 assert(blob != NULL); 1792 1793 ctx = calloc(1, sizeof(struct op_split_ctx)); 1794 if (ctx == NULL) { 1795 cb_fn(cb_arg, -ENOMEM); 1796 return; 1797 } 1798 1799 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 1800 cpl.u.blob_basic.cb_fn = cb_fn; 1801 cpl.u.blob_basic.cb_arg = cb_arg; 1802 1803 seq = spdk_bs_sequence_start(ch, &cpl); 1804 if (!seq) { 1805 free(ctx); 1806 cb_fn(cb_arg, -ENOMEM); 1807 return; 1808 } 1809 1810 ctx->blob = blob; 1811 ctx->channel = ch; 1812 ctx->curr_payload = payload; 1813 ctx->io_unit_offset = offset; 1814 ctx->io_units_remaining = length; 1815 ctx->op_type = op_type; 1816 ctx->seq = seq; 1817 1818 _spdk_blob_request_submit_op_split_next(ctx, 0); 1819 } 1820 1821 static void 1822 _spdk_blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob, 1823 void *payload, uint64_t offset, uint64_t length, 1824 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 1825 { 1826 struct spdk_bs_cpl cpl; 1827 uint64_t lba; 1828 uint32_t lba_count; 1829 1830 assert(blob != NULL); 1831 1832 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 1833 cpl.u.blob_basic.cb_fn = cb_fn; 1834 cpl.u.blob_basic.cb_arg = cb_arg; 1835 1836 _spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 1837 1838 if (blob->frozen_refcnt) { 1839 /* This blob I/O is frozen */ 1840 spdk_bs_user_op_t *op; 1841 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 1842 1843 op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 1844 if (!op) { 1845 cb_fn(cb_arg, -ENOMEM); 1846 return; 1847 } 1848 1849 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 1850 1851 return; 1852 } 1853 1854 switch (op_type) { 1855 case SPDK_BLOB_READ: { 1856 spdk_bs_batch_t *batch; 1857 1858 batch = spdk_bs_batch_open(_ch, &cpl); 1859 if (!batch) { 1860 cb_fn(cb_arg, -ENOMEM); 1861 return; 1862 } 1863 1864 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 1865 /* Read from the blob */ 1866 spdk_bs_batch_read_dev(batch, payload, lba, lba_count); 1867 } else { 1868 /* Read from the backing block device */ 1869 spdk_bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count); 1870 } 1871 1872 spdk_bs_batch_close(batch); 1873 break; 1874 } 1875 case SPDK_BLOB_WRITE: 1876 case SPDK_BLOB_WRITE_ZEROES: { 1877 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 1878 /* Write to the blob */ 1879 spdk_bs_batch_t *batch; 1880 1881 if (lba_count == 0) { 1882 cb_fn(cb_arg, 0); 1883 return; 1884 } 1885 1886 batch = spdk_bs_batch_open(_ch, &cpl); 1887 if (!batch) { 1888 cb_fn(cb_arg, -ENOMEM); 1889 return; 1890 } 1891 1892 if (op_type == SPDK_BLOB_WRITE) { 1893 spdk_bs_batch_write_dev(batch, payload, lba, lba_count); 1894 } else { 1895 spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count); 1896 } 1897 1898 spdk_bs_batch_close(batch); 1899 } else { 1900 /* Queue this operation and allocate the cluster */ 1901 spdk_bs_user_op_t *op; 1902 1903 op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 1904 if (!op) { 1905 cb_fn(cb_arg, -ENOMEM); 1906 return; 1907 } 1908 1909 _spdk_bs_allocate_and_copy_cluster(blob, _ch, offset, op); 1910 } 1911 break; 1912 } 1913 case SPDK_BLOB_UNMAP: { 1914 spdk_bs_batch_t *batch; 1915 1916 batch = spdk_bs_batch_open(_ch, &cpl); 1917 if (!batch) { 1918 cb_fn(cb_arg, -ENOMEM); 1919 return; 1920 } 1921 1922 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 1923 spdk_bs_batch_unmap_dev(batch, lba, lba_count); 1924 } 1925 1926 spdk_bs_batch_close(batch); 1927 break; 1928 } 1929 case SPDK_BLOB_READV: 1930 case SPDK_BLOB_WRITEV: 1931 SPDK_ERRLOG("readv/write not valid\n"); 1932 cb_fn(cb_arg, -EINVAL); 1933 break; 1934 } 1935 } 1936 1937 static void 1938 _spdk_blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel, 1939 void *payload, uint64_t offset, uint64_t length, 1940 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 1941 { 1942 assert(blob != NULL); 1943 1944 if (blob->data_ro && op_type != SPDK_BLOB_READ) { 1945 cb_fn(cb_arg, -EPERM); 1946 return; 1947 } 1948 1949 if (offset + length > _spdk_bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 1950 cb_fn(cb_arg, -EINVAL); 1951 return; 1952 } 1953 if (length <= _spdk_bs_num_io_units_to_cluster_boundary(blob, offset)) { 1954 _spdk_blob_request_submit_op_single(_channel, blob, payload, offset, length, 1955 cb_fn, cb_arg, op_type); 1956 } else { 1957 _spdk_blob_request_submit_op_split(_channel, blob, payload, offset, length, 1958 cb_fn, cb_arg, op_type); 1959 } 1960 } 1961 1962 struct rw_iov_ctx { 1963 struct spdk_blob *blob; 1964 struct spdk_io_channel *channel; 1965 spdk_blob_op_complete cb_fn; 1966 void *cb_arg; 1967 bool read; 1968 int iovcnt; 1969 struct iovec *orig_iov; 1970 uint64_t io_unit_offset; 1971 uint64_t io_units_remaining; 1972 uint64_t io_units_done; 1973 struct iovec iov[0]; 1974 }; 1975 1976 static void 1977 _spdk_rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1978 { 1979 assert(cb_arg == NULL); 1980 spdk_bs_sequence_finish(seq, bserrno); 1981 } 1982 1983 static void 1984 _spdk_rw_iov_split_next(void *cb_arg, int bserrno) 1985 { 1986 struct rw_iov_ctx *ctx = cb_arg; 1987 struct spdk_blob *blob = ctx->blob; 1988 struct iovec *iov, *orig_iov; 1989 int iovcnt; 1990 size_t orig_iovoff; 1991 uint64_t io_units_count, io_units_to_boundary, io_unit_offset; 1992 uint64_t byte_count; 1993 1994 if (bserrno != 0 || ctx->io_units_remaining == 0) { 1995 ctx->cb_fn(ctx->cb_arg, bserrno); 1996 free(ctx); 1997 return; 1998 } 1999 2000 io_unit_offset = ctx->io_unit_offset; 2001 io_units_to_boundary = _spdk_bs_num_io_units_to_cluster_boundary(blob, io_unit_offset); 2002 io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary); 2003 /* 2004 * Get index and offset into the original iov array for our current position in the I/O sequence. 2005 * byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will 2006 * point to the current position in the I/O sequence. 2007 */ 2008 byte_count = ctx->io_units_done * blob->bs->io_unit_size; 2009 orig_iov = &ctx->orig_iov[0]; 2010 orig_iovoff = 0; 2011 while (byte_count > 0) { 2012 if (byte_count >= orig_iov->iov_len) { 2013 byte_count -= orig_iov->iov_len; 2014 orig_iov++; 2015 } else { 2016 orig_iovoff = byte_count; 2017 byte_count = 0; 2018 } 2019 } 2020 2021 /* 2022 * Build an iov array for the next I/O in the sequence. byte_count will keep track of how many 2023 * bytes of this next I/O remain to be accounted for in the new iov array. 2024 */ 2025 byte_count = io_units_count * blob->bs->io_unit_size; 2026 iov = &ctx->iov[0]; 2027 iovcnt = 0; 2028 while (byte_count > 0) { 2029 assert(iovcnt < ctx->iovcnt); 2030 iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff); 2031 iov->iov_base = orig_iov->iov_base + orig_iovoff; 2032 byte_count -= iov->iov_len; 2033 orig_iovoff = 0; 2034 orig_iov++; 2035 iov++; 2036 iovcnt++; 2037 } 2038 2039 ctx->io_unit_offset += io_units_count; 2040 ctx->io_units_remaining -= io_units_count; 2041 ctx->io_units_done += io_units_count; 2042 iov = &ctx->iov[0]; 2043 2044 if (ctx->read) { 2045 spdk_blob_io_readv(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 2046 io_units_count, _spdk_rw_iov_split_next, ctx); 2047 } else { 2048 spdk_blob_io_writev(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 2049 io_units_count, _spdk_rw_iov_split_next, ctx); 2050 } 2051 } 2052 2053 static void 2054 _spdk_blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel, 2055 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 2056 spdk_blob_op_complete cb_fn, void *cb_arg, bool read) 2057 { 2058 struct spdk_bs_cpl cpl; 2059 2060 assert(blob != NULL); 2061 2062 if (!read && blob->data_ro) { 2063 cb_fn(cb_arg, -EPERM); 2064 return; 2065 } 2066 2067 if (length == 0) { 2068 cb_fn(cb_arg, 0); 2069 return; 2070 } 2071 2072 if (offset + length > _spdk_bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 2073 cb_fn(cb_arg, -EINVAL); 2074 return; 2075 } 2076 2077 /* 2078 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having 2079 * to split a request that spans a cluster boundary. For I/O that do not span a cluster boundary, 2080 * there will be no noticeable difference compared to using a batch. For I/O that do span a cluster 2081 * boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need 2082 * to allocate a separate iov array and split the I/O such that none of the resulting 2083 * smaller I/O cross a cluster boundary. These smaller I/O will be issued in sequence (not in parallel) 2084 * but since this case happens very infrequently, any performance impact will be negligible. 2085 * 2086 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs 2087 * for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them 2088 * in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called 2089 * when the batch was completed, to allow for freeing the memory for the iov arrays. 2090 */ 2091 if (spdk_likely(length <= _spdk_bs_num_io_units_to_cluster_boundary(blob, offset))) { 2092 uint32_t lba_count; 2093 uint64_t lba; 2094 2095 _spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 2096 2097 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2098 cpl.u.blob_basic.cb_fn = cb_fn; 2099 cpl.u.blob_basic.cb_arg = cb_arg; 2100 if (blob->frozen_refcnt) { 2101 /* This blob I/O is frozen */ 2102 spdk_bs_user_op_t *op; 2103 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel); 2104 2105 op = spdk_bs_user_op_alloc(_channel, &cpl, read, blob, iov, iovcnt, offset, length); 2106 if (!op) { 2107 cb_fn(cb_arg, -ENOMEM); 2108 return; 2109 } 2110 2111 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 2112 2113 return; 2114 } 2115 2116 if (read) { 2117 spdk_bs_sequence_t *seq; 2118 2119 seq = spdk_bs_sequence_start(_channel, &cpl); 2120 if (!seq) { 2121 cb_fn(cb_arg, -ENOMEM); 2122 return; 2123 } 2124 2125 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 2126 spdk_bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL); 2127 } else { 2128 spdk_bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count, 2129 _spdk_rw_iov_done, NULL); 2130 } 2131 } else { 2132 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 2133 spdk_bs_sequence_t *seq; 2134 2135 seq = spdk_bs_sequence_start(_channel, &cpl); 2136 if (!seq) { 2137 cb_fn(cb_arg, -ENOMEM); 2138 return; 2139 } 2140 2141 spdk_bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL); 2142 } else { 2143 /* Queue this operation and allocate the cluster */ 2144 spdk_bs_user_op_t *op; 2145 2146 op = spdk_bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, 2147 length); 2148 if (!op) { 2149 cb_fn(cb_arg, -ENOMEM); 2150 return; 2151 } 2152 2153 _spdk_bs_allocate_and_copy_cluster(blob, _channel, offset, op); 2154 } 2155 } 2156 } else { 2157 struct rw_iov_ctx *ctx; 2158 2159 ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec)); 2160 if (ctx == NULL) { 2161 cb_fn(cb_arg, -ENOMEM); 2162 return; 2163 } 2164 2165 ctx->blob = blob; 2166 ctx->channel = _channel; 2167 ctx->cb_fn = cb_fn; 2168 ctx->cb_arg = cb_arg; 2169 ctx->read = read; 2170 ctx->orig_iov = iov; 2171 ctx->iovcnt = iovcnt; 2172 ctx->io_unit_offset = offset; 2173 ctx->io_units_remaining = length; 2174 ctx->io_units_done = 0; 2175 2176 _spdk_rw_iov_split_next(ctx, 0); 2177 } 2178 } 2179 2180 static struct spdk_blob * 2181 _spdk_blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid) 2182 { 2183 struct spdk_blob *blob; 2184 2185 TAILQ_FOREACH(blob, &bs->blobs, link) { 2186 if (blob->id == blobid) { 2187 return blob; 2188 } 2189 } 2190 2191 return NULL; 2192 } 2193 2194 static int 2195 _spdk_bs_channel_create(void *io_device, void *ctx_buf) 2196 { 2197 struct spdk_blob_store *bs = io_device; 2198 struct spdk_bs_channel *channel = ctx_buf; 2199 struct spdk_bs_dev *dev; 2200 uint32_t max_ops = bs->max_channel_ops; 2201 uint32_t i; 2202 2203 dev = bs->dev; 2204 2205 channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set)); 2206 if (!channel->req_mem) { 2207 return -1; 2208 } 2209 2210 TAILQ_INIT(&channel->reqs); 2211 2212 for (i = 0; i < max_ops; i++) { 2213 TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link); 2214 } 2215 2216 channel->bs = bs; 2217 channel->dev = dev; 2218 channel->dev_channel = dev->create_channel(dev); 2219 2220 if (!channel->dev_channel) { 2221 SPDK_ERRLOG("Failed to create device channel.\n"); 2222 free(channel->req_mem); 2223 return -1; 2224 } 2225 2226 TAILQ_INIT(&channel->need_cluster_alloc); 2227 TAILQ_INIT(&channel->queued_io); 2228 2229 return 0; 2230 } 2231 2232 static void 2233 _spdk_bs_channel_destroy(void *io_device, void *ctx_buf) 2234 { 2235 struct spdk_bs_channel *channel = ctx_buf; 2236 spdk_bs_user_op_t *op; 2237 2238 while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) { 2239 op = TAILQ_FIRST(&channel->need_cluster_alloc); 2240 TAILQ_REMOVE(&channel->need_cluster_alloc, op, link); 2241 spdk_bs_user_op_abort(op); 2242 } 2243 2244 while (!TAILQ_EMPTY(&channel->queued_io)) { 2245 op = TAILQ_FIRST(&channel->queued_io); 2246 TAILQ_REMOVE(&channel->queued_io, op, link); 2247 spdk_bs_user_op_abort(op); 2248 } 2249 2250 free(channel->req_mem); 2251 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 2252 } 2253 2254 static void 2255 _spdk_bs_dev_destroy(void *io_device) 2256 { 2257 struct spdk_blob_store *bs = io_device; 2258 struct spdk_blob *blob, *blob_tmp; 2259 2260 bs->dev->destroy(bs->dev); 2261 2262 TAILQ_FOREACH_SAFE(blob, &bs->blobs, link, blob_tmp) { 2263 TAILQ_REMOVE(&bs->blobs, blob, link); 2264 _spdk_blob_free(blob); 2265 } 2266 2267 pthread_mutex_destroy(&bs->used_clusters_mutex); 2268 2269 spdk_bit_array_free(&bs->used_blobids); 2270 spdk_bit_array_free(&bs->used_md_pages); 2271 spdk_bit_array_free(&bs->used_clusters); 2272 /* 2273 * If this function is called for any reason except a successful unload, 2274 * the unload_cpl type will be NONE and this will be a nop. 2275 */ 2276 spdk_bs_call_cpl(&bs->unload_cpl, bs->unload_err); 2277 2278 free(bs); 2279 } 2280 2281 static int 2282 _spdk_bs_blob_list_add(struct spdk_blob *blob) 2283 { 2284 spdk_blob_id snapshot_id; 2285 struct spdk_blob_list *snapshot_entry = NULL; 2286 struct spdk_blob_list *clone_entry = NULL; 2287 2288 assert(blob != NULL); 2289 2290 snapshot_id = blob->parent_id; 2291 if (snapshot_id == SPDK_BLOBID_INVALID) { 2292 return 0; 2293 } 2294 2295 TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) { 2296 if (snapshot_entry->id == snapshot_id) { 2297 break; 2298 } 2299 } 2300 2301 if (snapshot_entry == NULL) { 2302 /* Snapshot not found */ 2303 snapshot_entry = calloc(1, sizeof(struct spdk_blob_list)); 2304 if (snapshot_entry == NULL) { 2305 return -ENOMEM; 2306 } 2307 snapshot_entry->id = snapshot_id; 2308 TAILQ_INIT(&snapshot_entry->clones); 2309 TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link); 2310 } else { 2311 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 2312 if (clone_entry->id == blob->id) { 2313 break; 2314 } 2315 } 2316 } 2317 2318 if (clone_entry == NULL) { 2319 /* Clone not found */ 2320 clone_entry = calloc(1, sizeof(struct spdk_blob_list)); 2321 if (clone_entry == NULL) { 2322 return -ENOMEM; 2323 } 2324 clone_entry->id = blob->id; 2325 TAILQ_INIT(&clone_entry->clones); 2326 TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link); 2327 snapshot_entry->clone_count++; 2328 } 2329 2330 return 0; 2331 } 2332 2333 static int 2334 _spdk_bs_blob_list_remove(struct spdk_blob *blob) 2335 { 2336 struct spdk_blob_list *snapshot_entry = NULL; 2337 struct spdk_blob_list *clone_entry = NULL; 2338 spdk_blob_id snapshot_id; 2339 2340 assert(blob != NULL); 2341 2342 snapshot_id = blob->parent_id; 2343 if (snapshot_id == SPDK_BLOBID_INVALID) { 2344 return 0; 2345 } 2346 2347 TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) { 2348 if (snapshot_entry->id == snapshot_id) { 2349 break; 2350 } 2351 } 2352 2353 assert(snapshot_entry != NULL); 2354 2355 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 2356 if (clone_entry->id == blob->id) { 2357 break; 2358 } 2359 } 2360 2361 assert(clone_entry != NULL); 2362 2363 blob->parent_id = SPDK_BLOBID_INVALID; 2364 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 2365 free(clone_entry); 2366 2367 snapshot_entry->clone_count--; 2368 2369 return 0; 2370 } 2371 2372 static int 2373 _spdk_bs_blob_list_free(struct spdk_blob_store *bs) 2374 { 2375 struct spdk_blob_list *snapshot_entry; 2376 struct spdk_blob_list *snapshot_entry_tmp; 2377 struct spdk_blob_list *clone_entry; 2378 struct spdk_blob_list *clone_entry_tmp; 2379 2380 TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) { 2381 TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) { 2382 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 2383 free(clone_entry); 2384 } 2385 TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link); 2386 free(snapshot_entry); 2387 } 2388 2389 return 0; 2390 } 2391 2392 static void 2393 _spdk_bs_free(struct spdk_blob_store *bs) 2394 { 2395 _spdk_bs_blob_list_free(bs); 2396 2397 spdk_bs_unregister_md_thread(bs); 2398 spdk_io_device_unregister(bs, _spdk_bs_dev_destroy); 2399 } 2400 2401 void 2402 spdk_bs_opts_init(struct spdk_bs_opts *opts) 2403 { 2404 opts->cluster_sz = SPDK_BLOB_OPTS_CLUSTER_SZ; 2405 opts->num_md_pages = SPDK_BLOB_OPTS_NUM_MD_PAGES; 2406 opts->max_md_ops = SPDK_BLOB_OPTS_MAX_MD_OPS; 2407 opts->max_channel_ops = SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS; 2408 memset(&opts->bstype, 0, sizeof(opts->bstype)); 2409 opts->iter_cb_fn = NULL; 2410 opts->iter_cb_arg = NULL; 2411 } 2412 2413 static int 2414 _spdk_bs_opts_verify(struct spdk_bs_opts *opts) 2415 { 2416 if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 || 2417 opts->max_channel_ops == 0) { 2418 SPDK_ERRLOG("Blobstore options cannot be set to 0\n"); 2419 return -1; 2420 } 2421 2422 return 0; 2423 } 2424 2425 static int 2426 _spdk_bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs) 2427 { 2428 struct spdk_blob_store *bs; 2429 uint64_t dev_size; 2430 int rc; 2431 2432 dev_size = dev->blocklen * dev->blockcnt; 2433 if (dev_size < opts->cluster_sz) { 2434 /* Device size cannot be smaller than cluster size of blobstore */ 2435 SPDK_INFOLOG(SPDK_LOG_BLOB, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n", 2436 dev_size, opts->cluster_sz); 2437 return -ENOSPC; 2438 } 2439 if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) { 2440 /* Cluster size cannot be smaller than page size */ 2441 SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n", 2442 opts->cluster_sz, SPDK_BS_PAGE_SIZE); 2443 return -EINVAL; 2444 } 2445 bs = calloc(1, sizeof(struct spdk_blob_store)); 2446 if (!bs) { 2447 return -ENOMEM; 2448 } 2449 2450 TAILQ_INIT(&bs->blobs); 2451 TAILQ_INIT(&bs->snapshots); 2452 bs->dev = dev; 2453 bs->md_thread = spdk_get_thread(); 2454 assert(bs->md_thread != NULL); 2455 2456 /* 2457 * Do not use _spdk_bs_lba_to_cluster() here since blockcnt may not be an 2458 * even multiple of the cluster size. 2459 */ 2460 bs->cluster_sz = opts->cluster_sz; 2461 bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen); 2462 bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE; 2463 bs->num_free_clusters = bs->total_clusters; 2464 bs->used_clusters = spdk_bit_array_create(bs->total_clusters); 2465 bs->io_unit_size = dev->blocklen; 2466 if (bs->used_clusters == NULL) { 2467 free(bs); 2468 return -ENOMEM; 2469 } 2470 2471 bs->max_channel_ops = opts->max_channel_ops; 2472 bs->super_blob = SPDK_BLOBID_INVALID; 2473 memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype)); 2474 2475 /* The metadata is assumed to be at least 1 page */ 2476 bs->used_md_pages = spdk_bit_array_create(1); 2477 bs->used_blobids = spdk_bit_array_create(0); 2478 2479 pthread_mutex_init(&bs->used_clusters_mutex, NULL); 2480 2481 spdk_io_device_register(bs, _spdk_bs_channel_create, _spdk_bs_channel_destroy, 2482 sizeof(struct spdk_bs_channel), "blobstore"); 2483 rc = spdk_bs_register_md_thread(bs); 2484 if (rc == -1) { 2485 spdk_io_device_unregister(bs, NULL); 2486 pthread_mutex_destroy(&bs->used_clusters_mutex); 2487 spdk_bit_array_free(&bs->used_blobids); 2488 spdk_bit_array_free(&bs->used_md_pages); 2489 spdk_bit_array_free(&bs->used_clusters); 2490 free(bs); 2491 /* FIXME: this is a lie but don't know how to get a proper error code here */ 2492 return -ENOMEM; 2493 } 2494 2495 *_bs = bs; 2496 return 0; 2497 } 2498 2499 /* START spdk_bs_load, spdk_bs_load_ctx will used for both load and unload. */ 2500 2501 struct spdk_bs_load_ctx { 2502 struct spdk_blob_store *bs; 2503 struct spdk_bs_super_block *super; 2504 2505 struct spdk_bs_md_mask *mask; 2506 bool in_page_chain; 2507 uint32_t page_index; 2508 uint32_t cur_page; 2509 struct spdk_blob_md_page *page; 2510 2511 spdk_bs_sequence_t *seq; 2512 spdk_blob_op_with_handle_complete iter_cb_fn; 2513 void *iter_cb_arg; 2514 }; 2515 2516 static void 2517 _spdk_bs_load_ctx_fail(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 2518 { 2519 assert(bserrno != 0); 2520 2521 spdk_dma_free(ctx->super); 2522 spdk_bs_sequence_finish(seq, bserrno); 2523 _spdk_bs_free(ctx->bs); 2524 free(ctx); 2525 } 2526 2527 static void 2528 _spdk_bs_set_mask(struct spdk_bit_array *array, struct spdk_bs_md_mask *mask) 2529 { 2530 uint32_t i = 0; 2531 2532 while (true) { 2533 i = spdk_bit_array_find_first_set(array, i); 2534 if (i >= mask->length) { 2535 break; 2536 } 2537 mask->mask[i / 8] |= 1U << (i % 8); 2538 i++; 2539 } 2540 } 2541 2542 static int 2543 _spdk_bs_load_mask(struct spdk_bit_array **array_ptr, struct spdk_bs_md_mask *mask) 2544 { 2545 struct spdk_bit_array *array; 2546 uint32_t i; 2547 2548 if (spdk_bit_array_resize(array_ptr, mask->length) < 0) { 2549 return -ENOMEM; 2550 } 2551 2552 array = *array_ptr; 2553 for (i = 0; i < mask->length; i++) { 2554 if (mask->mask[i / 8] & (1U << (i % 8))) { 2555 spdk_bit_array_set(array, i); 2556 } 2557 } 2558 2559 return 0; 2560 } 2561 2562 static void 2563 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2564 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2565 { 2566 /* Update the values in the super block */ 2567 super->super_blob = bs->super_blob; 2568 memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype)); 2569 super->crc = _spdk_blob_md_page_calc_crc(super); 2570 spdk_bs_sequence_write_dev(seq, super, _spdk_bs_page_to_lba(bs, 0), 2571 _spdk_bs_byte_to_lba(bs, sizeof(*super)), 2572 cb_fn, cb_arg); 2573 } 2574 2575 static void 2576 _spdk_bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 2577 { 2578 struct spdk_bs_load_ctx *ctx = arg; 2579 uint64_t mask_size, lba, lba_count; 2580 2581 /* Write out the used clusters mask */ 2582 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 2583 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2584 if (!ctx->mask) { 2585 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2586 return; 2587 } 2588 2589 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS; 2590 ctx->mask->length = ctx->bs->total_clusters; 2591 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_clusters)); 2592 2593 _spdk_bs_set_mask(ctx->bs->used_clusters, ctx->mask); 2594 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 2595 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 2596 spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 2597 } 2598 2599 static void 2600 _spdk_bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 2601 { 2602 struct spdk_bs_load_ctx *ctx = arg; 2603 uint64_t mask_size, lba, lba_count; 2604 2605 if (seq->bserrno) { 2606 _spdk_bs_load_ctx_fail(seq, ctx, seq->bserrno); 2607 return; 2608 } 2609 2610 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 2611 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2612 if (!ctx->mask) { 2613 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2614 return; 2615 } 2616 2617 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES; 2618 ctx->mask->length = ctx->super->md_len; 2619 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages)); 2620 2621 _spdk_bs_set_mask(ctx->bs->used_md_pages, ctx->mask); 2622 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 2623 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 2624 spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 2625 } 2626 2627 static void 2628 _spdk_bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 2629 { 2630 struct spdk_bs_load_ctx *ctx = arg; 2631 uint64_t mask_size, lba, lba_count; 2632 2633 if (ctx->super->used_blobid_mask_len == 0) { 2634 /* 2635 * This is a pre-v3 on-disk format where the blobid mask does not get 2636 * written to disk. 2637 */ 2638 cb_fn(seq, arg, 0); 2639 return; 2640 } 2641 2642 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 2643 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2644 if (!ctx->mask) { 2645 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2646 return; 2647 } 2648 2649 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS; 2650 ctx->mask->length = ctx->super->md_len; 2651 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids)); 2652 2653 _spdk_bs_set_mask(ctx->bs->used_blobids, ctx->mask); 2654 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 2655 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 2656 spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 2657 } 2658 2659 static void 2660 _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno) 2661 { 2662 struct spdk_bs_load_ctx *ctx = arg; 2663 2664 if (bserrno == 0) { 2665 if (ctx->iter_cb_fn) { 2666 ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0); 2667 } 2668 _spdk_bs_blob_list_add(blob); 2669 spdk_bs_iter_next(ctx->bs, blob, _spdk_bs_load_iter, ctx); 2670 return; 2671 } 2672 2673 if (bserrno == -ENOENT) { 2674 bserrno = 0; 2675 } else { 2676 /* 2677 * This case needs to be looked at further. Same problem 2678 * exists with applications that rely on explicit blob 2679 * iteration. We should just skip the blob that failed 2680 * to load and continue on to the next one. 2681 */ 2682 SPDK_ERRLOG("Error in iterating blobs\n"); 2683 } 2684 2685 ctx->iter_cb_fn = NULL; 2686 2687 spdk_dma_free(ctx->super); 2688 spdk_dma_free(ctx->mask); 2689 spdk_bs_sequence_finish(ctx->seq, bserrno); 2690 free(ctx); 2691 } 2692 2693 static void 2694 _spdk_bs_load_complete(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) 2695 { 2696 ctx->seq = seq; 2697 spdk_bs_iter_first(ctx->bs, _spdk_bs_load_iter, ctx); 2698 } 2699 2700 static void 2701 _spdk_bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2702 { 2703 struct spdk_bs_load_ctx *ctx = cb_arg; 2704 int rc; 2705 2706 /* The type must be correct */ 2707 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS); 2708 2709 /* The length of the mask (in bits) must not be greater than 2710 * the length of the buffer (converted to bits) */ 2711 assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8)); 2712 2713 /* The length of the mask must be exactly equal to the size 2714 * (in pages) of the metadata region */ 2715 assert(ctx->mask->length == ctx->super->md_len); 2716 2717 rc = _spdk_bs_load_mask(&ctx->bs->used_blobids, ctx->mask); 2718 if (rc < 0) { 2719 spdk_dma_free(ctx->mask); 2720 _spdk_bs_load_ctx_fail(seq, ctx, rc); 2721 return; 2722 } 2723 2724 _spdk_bs_load_complete(seq, ctx, bserrno); 2725 } 2726 2727 static void 2728 _spdk_bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2729 { 2730 struct spdk_bs_load_ctx *ctx = cb_arg; 2731 uint64_t lba, lba_count, mask_size; 2732 int rc; 2733 2734 /* The type must be correct */ 2735 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 2736 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 2737 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 2738 struct spdk_blob_md_page) * 8)); 2739 /* The length of the mask must be exactly equal to the total number of clusters */ 2740 assert(ctx->mask->length == ctx->bs->total_clusters); 2741 2742 rc = _spdk_bs_load_mask(&ctx->bs->used_clusters, ctx->mask); 2743 if (rc < 0) { 2744 spdk_dma_free(ctx->mask); 2745 _spdk_bs_load_ctx_fail(seq, ctx, rc); 2746 return; 2747 } 2748 2749 ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->bs->used_clusters); 2750 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 2751 2752 spdk_dma_free(ctx->mask); 2753 2754 /* Read the used blobids mask */ 2755 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 2756 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2757 if (!ctx->mask) { 2758 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2759 return; 2760 } 2761 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 2762 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 2763 spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 2764 _spdk_bs_load_used_blobids_cpl, ctx); 2765 } 2766 2767 static void 2768 _spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2769 { 2770 struct spdk_bs_load_ctx *ctx = cb_arg; 2771 uint64_t lba, lba_count, mask_size; 2772 int rc; 2773 2774 /* The type must be correct */ 2775 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES); 2776 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 2777 assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE * 2778 8)); 2779 /* The length of the mask must be exactly equal to the size (in pages) of the metadata region */ 2780 assert(ctx->mask->length == ctx->super->md_len); 2781 2782 rc = _spdk_bs_load_mask(&ctx->bs->used_md_pages, ctx->mask); 2783 if (rc < 0) { 2784 spdk_dma_free(ctx->mask); 2785 _spdk_bs_load_ctx_fail(seq, ctx, rc); 2786 return; 2787 } 2788 2789 spdk_dma_free(ctx->mask); 2790 2791 /* Read the used clusters mask */ 2792 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 2793 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2794 if (!ctx->mask) { 2795 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2796 return; 2797 } 2798 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 2799 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 2800 spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 2801 _spdk_bs_load_used_clusters_cpl, ctx); 2802 } 2803 2804 static void 2805 _spdk_bs_load_read_used_pages(spdk_bs_sequence_t *seq, void *cb_arg) 2806 { 2807 struct spdk_bs_load_ctx *ctx = cb_arg; 2808 uint64_t lba, lba_count, mask_size; 2809 2810 /* Read the used pages mask */ 2811 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 2812 ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL); 2813 if (!ctx->mask) { 2814 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 2815 return; 2816 } 2817 2818 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 2819 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 2820 spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 2821 _spdk_bs_load_used_pages_cpl, ctx); 2822 } 2823 2824 static int 2825 _spdk_bs_load_replay_md_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob_store *bs) 2826 { 2827 struct spdk_blob_md_descriptor *desc; 2828 size_t cur_desc = 0; 2829 2830 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 2831 while (cur_desc < sizeof(page->descriptors)) { 2832 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 2833 if (desc->length == 0) { 2834 /* If padding and length are 0, this terminates the page */ 2835 break; 2836 } 2837 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) { 2838 struct spdk_blob_md_descriptor_extent *desc_extent; 2839 unsigned int i, j; 2840 unsigned int cluster_count = 0; 2841 uint32_t cluster_idx; 2842 2843 desc_extent = (struct spdk_blob_md_descriptor_extent *)desc; 2844 2845 for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) { 2846 for (j = 0; j < desc_extent->extents[i].length; j++) { 2847 cluster_idx = desc_extent->extents[i].cluster_idx; 2848 /* 2849 * cluster_idx = 0 means an unallocated cluster - don't mark that 2850 * in the used cluster map. 2851 */ 2852 if (cluster_idx != 0) { 2853 spdk_bit_array_set(bs->used_clusters, cluster_idx + j); 2854 if (bs->num_free_clusters == 0) { 2855 return -ENOSPC; 2856 } 2857 bs->num_free_clusters--; 2858 } 2859 cluster_count++; 2860 } 2861 } 2862 if (cluster_count == 0) { 2863 return -EINVAL; 2864 } 2865 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 2866 /* Skip this item */ 2867 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 2868 /* Skip this item */ 2869 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 2870 /* Skip this item */ 2871 } else { 2872 /* Error */ 2873 return -EINVAL; 2874 } 2875 /* Advance to the next descriptor */ 2876 cur_desc += sizeof(*desc) + desc->length; 2877 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 2878 break; 2879 } 2880 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 2881 } 2882 return 0; 2883 } 2884 2885 static bool _spdk_bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx) 2886 { 2887 uint32_t crc; 2888 2889 crc = _spdk_blob_md_page_calc_crc(ctx->page); 2890 if (crc != ctx->page->crc) { 2891 return false; 2892 } 2893 2894 if (_spdk_bs_page_to_blobid(ctx->cur_page) != ctx->page->id) { 2895 return false; 2896 } 2897 return true; 2898 } 2899 2900 static void 2901 _spdk_bs_load_replay_cur_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 2902 2903 static void 2904 _spdk_bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2905 { 2906 struct spdk_bs_load_ctx *ctx = cb_arg; 2907 2908 _spdk_bs_load_complete(seq, ctx, bserrno); 2909 } 2910 2911 static void 2912 _spdk_bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2913 { 2914 struct spdk_bs_load_ctx *ctx = cb_arg; 2915 2916 spdk_dma_free(ctx->mask); 2917 ctx->mask = NULL; 2918 2919 _spdk_bs_write_used_clusters(seq, cb_arg, _spdk_bs_load_write_used_clusters_cpl); 2920 } 2921 2922 static void 2923 _spdk_bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2924 { 2925 struct spdk_bs_load_ctx *ctx = cb_arg; 2926 2927 spdk_dma_free(ctx->mask); 2928 ctx->mask = NULL; 2929 2930 _spdk_bs_write_used_blobids(seq, cb_arg, _spdk_bs_load_write_used_blobids_cpl); 2931 } 2932 2933 static void 2934 _spdk_bs_load_write_used_md(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2935 { 2936 _spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_load_write_used_pages_cpl); 2937 } 2938 2939 static void 2940 _spdk_bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2941 { 2942 struct spdk_bs_load_ctx *ctx = cb_arg; 2943 uint64_t num_md_clusters; 2944 uint64_t i; 2945 uint32_t page_num; 2946 2947 if (bserrno != 0) { 2948 _spdk_bs_load_ctx_fail(seq, ctx, bserrno); 2949 return; 2950 } 2951 2952 page_num = ctx->cur_page; 2953 if (_spdk_bs_load_cur_md_page_valid(ctx) == true) { 2954 if (ctx->page->sequence_num == 0 || ctx->in_page_chain == true) { 2955 spdk_bit_array_set(ctx->bs->used_md_pages, page_num); 2956 if (ctx->page->sequence_num == 0) { 2957 spdk_bit_array_set(ctx->bs->used_blobids, page_num); 2958 } 2959 if (_spdk_bs_load_replay_md_parse_page(ctx->page, ctx->bs)) { 2960 _spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ); 2961 return; 2962 } 2963 if (ctx->page->next != SPDK_INVALID_MD_PAGE) { 2964 ctx->in_page_chain = true; 2965 ctx->cur_page = ctx->page->next; 2966 _spdk_bs_load_replay_cur_md_page(seq, cb_arg); 2967 return; 2968 } 2969 } 2970 } 2971 2972 ctx->in_page_chain = false; 2973 2974 do { 2975 ctx->page_index++; 2976 } while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true); 2977 2978 if (ctx->page_index < ctx->super->md_len) { 2979 ctx->cur_page = ctx->page_index; 2980 _spdk_bs_load_replay_cur_md_page(seq, cb_arg); 2981 } else { 2982 /* Claim all of the clusters used by the metadata */ 2983 num_md_clusters = spdk_divide_round_up(ctx->super->md_len, ctx->bs->pages_per_cluster); 2984 for (i = 0; i < num_md_clusters; i++) { 2985 _spdk_bs_claim_cluster(ctx->bs, i); 2986 } 2987 spdk_dma_free(ctx->page); 2988 _spdk_bs_load_write_used_md(seq, ctx, bserrno); 2989 } 2990 } 2991 2992 static void 2993 _spdk_bs_load_replay_cur_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 2994 { 2995 struct spdk_bs_load_ctx *ctx = cb_arg; 2996 uint64_t lba; 2997 2998 assert(ctx->cur_page < ctx->super->md_len); 2999 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 3000 spdk_bs_sequence_read_dev(seq, ctx->page, lba, 3001 _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 3002 _spdk_bs_load_replay_md_cpl, ctx); 3003 } 3004 3005 static void 3006 _spdk_bs_load_replay_md(spdk_bs_sequence_t *seq, void *cb_arg) 3007 { 3008 struct spdk_bs_load_ctx *ctx = cb_arg; 3009 3010 ctx->page_index = 0; 3011 ctx->cur_page = 0; 3012 ctx->page = spdk_dma_zmalloc(SPDK_BS_PAGE_SIZE, 3013 SPDK_BS_PAGE_SIZE, 3014 NULL); 3015 if (!ctx->page) { 3016 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 3017 return; 3018 } 3019 _spdk_bs_load_replay_cur_md_page(seq, cb_arg); 3020 } 3021 3022 static void 3023 _spdk_bs_recover(spdk_bs_sequence_t *seq, void *cb_arg) 3024 { 3025 struct spdk_bs_load_ctx *ctx = cb_arg; 3026 int rc; 3027 3028 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len); 3029 if (rc < 0) { 3030 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 3031 return; 3032 } 3033 3034 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len); 3035 if (rc < 0) { 3036 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 3037 return; 3038 } 3039 3040 rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters); 3041 if (rc < 0) { 3042 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 3043 return; 3044 } 3045 3046 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 3047 _spdk_bs_load_replay_md(seq, cb_arg); 3048 } 3049 3050 static void 3051 _spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3052 { 3053 struct spdk_bs_load_ctx *ctx = cb_arg; 3054 uint32_t crc; 3055 int rc; 3056 static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH]; 3057 3058 if (ctx->super->version > SPDK_BS_VERSION || 3059 ctx->super->version < SPDK_BS_INITIAL_VERSION) { 3060 _spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ); 3061 return; 3062 } 3063 3064 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 3065 sizeof(ctx->super->signature)) != 0) { 3066 _spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ); 3067 return; 3068 } 3069 3070 crc = _spdk_blob_md_page_calc_crc(ctx->super); 3071 if (crc != ctx->super->crc) { 3072 _spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ); 3073 return; 3074 } 3075 3076 if (memcmp(&ctx->bs->bstype, &ctx->super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 3077 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype matched - loading blobstore\n"); 3078 } else if (memcmp(&ctx->bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 3079 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype wildcard used - loading blobstore regardless bstype\n"); 3080 } else { 3081 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Unexpected bstype\n"); 3082 SPDK_LOGDUMP(SPDK_LOG_BLOB, "Expected:", ctx->bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 3083 SPDK_LOGDUMP(SPDK_LOG_BLOB, "Found:", ctx->super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 3084 _spdk_bs_load_ctx_fail(seq, ctx, -ENXIO); 3085 return; 3086 } 3087 3088 if (ctx->super->size > ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen) { 3089 SPDK_NOTICELOG("Size mismatch, dev size: %lu, blobstore size: %lu\n", 3090 ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen, ctx->super->size); 3091 _spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ); 3092 return; 3093 } 3094 3095 if (ctx->super->size == 0) { 3096 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 3097 } 3098 3099 if (ctx->super->io_unit_size == 0) { 3100 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 3101 } 3102 3103 /* Parse the super block */ 3104 ctx->bs->clean = 1; 3105 ctx->bs->cluster_sz = ctx->super->cluster_size; 3106 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 3107 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 3108 ctx->bs->io_unit_size = ctx->super->io_unit_size; 3109 rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters); 3110 if (rc < 0) { 3111 _spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); 3112 return; 3113 } 3114 ctx->bs->md_start = ctx->super->md_start; 3115 ctx->bs->md_len = ctx->super->md_len; 3116 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 3117 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 3118 ctx->bs->super_blob = ctx->super->super_blob; 3119 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 3120 3121 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) { 3122 _spdk_bs_recover(seq, ctx); 3123 } else { 3124 _spdk_bs_load_read_used_pages(seq, ctx); 3125 } 3126 } 3127 3128 void 3129 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 3130 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 3131 { 3132 struct spdk_blob_store *bs; 3133 struct spdk_bs_cpl cpl; 3134 spdk_bs_sequence_t *seq; 3135 struct spdk_bs_load_ctx *ctx; 3136 struct spdk_bs_opts opts = {}; 3137 int err; 3138 3139 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Loading blobstore from dev %p\n", dev); 3140 3141 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 3142 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "unsupported dev block length of %d\n", dev->blocklen); 3143 dev->destroy(dev); 3144 cb_fn(cb_arg, NULL, -EINVAL); 3145 return; 3146 } 3147 3148 if (o) { 3149 opts = *o; 3150 } else { 3151 spdk_bs_opts_init(&opts); 3152 } 3153 3154 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 3155 dev->destroy(dev); 3156 cb_fn(cb_arg, NULL, -EINVAL); 3157 return; 3158 } 3159 3160 err = _spdk_bs_alloc(dev, &opts, &bs); 3161 if (err) { 3162 dev->destroy(dev); 3163 cb_fn(cb_arg, NULL, err); 3164 return; 3165 } 3166 3167 ctx = calloc(1, sizeof(*ctx)); 3168 if (!ctx) { 3169 _spdk_bs_free(bs); 3170 cb_fn(cb_arg, NULL, -ENOMEM); 3171 return; 3172 } 3173 3174 ctx->bs = bs; 3175 ctx->iter_cb_fn = opts.iter_cb_fn; 3176 ctx->iter_cb_arg = opts.iter_cb_arg; 3177 3178 /* Allocate memory for the super block */ 3179 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3180 if (!ctx->super) { 3181 free(ctx); 3182 _spdk_bs_free(bs); 3183 cb_fn(cb_arg, NULL, -ENOMEM); 3184 return; 3185 } 3186 3187 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 3188 cpl.u.bs_handle.cb_fn = cb_fn; 3189 cpl.u.bs_handle.cb_arg = cb_arg; 3190 cpl.u.bs_handle.bs = bs; 3191 3192 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3193 if (!seq) { 3194 spdk_dma_free(ctx->super); 3195 free(ctx); 3196 _spdk_bs_free(bs); 3197 cb_fn(cb_arg, NULL, -ENOMEM); 3198 return; 3199 } 3200 3201 /* Read the super block */ 3202 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 3203 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 3204 _spdk_bs_load_super_cpl, ctx); 3205 } 3206 3207 /* END spdk_bs_load */ 3208 3209 /* START spdk_bs_dump */ 3210 3211 struct spdk_bs_dump_ctx { 3212 struct spdk_blob_store *bs; 3213 struct spdk_bs_super_block *super; 3214 uint32_t cur_page; 3215 struct spdk_blob_md_page *page; 3216 spdk_bs_sequence_t *seq; 3217 FILE *fp; 3218 spdk_bs_dump_print_xattr print_xattr_fn; 3219 char xattr_name[4096]; 3220 }; 3221 3222 static void 3223 _spdk_bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_dump_ctx *ctx, int bserrno) 3224 { 3225 spdk_dma_free(ctx->super); 3226 3227 /* 3228 * We need to defer calling spdk_bs_call_cpl() until after 3229 * dev destruction, so tuck these away for later use. 3230 */ 3231 ctx->bs->unload_err = bserrno; 3232 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 3233 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 3234 3235 spdk_bs_sequence_finish(seq, 0); 3236 _spdk_bs_free(ctx->bs); 3237 free(ctx); 3238 } 3239 3240 static void _spdk_bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 3241 3242 static void 3243 _spdk_bs_dump_print_md_page(struct spdk_bs_dump_ctx *ctx) 3244 { 3245 uint32_t page_idx = ctx->cur_page; 3246 struct spdk_blob_md_page *page = ctx->page; 3247 struct spdk_blob_md_descriptor *desc; 3248 size_t cur_desc = 0; 3249 uint32_t crc; 3250 3251 fprintf(ctx->fp, "=========\n"); 3252 fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx); 3253 fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id); 3254 3255 crc = _spdk_blob_md_page_calc_crc(page); 3256 fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch"); 3257 3258 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 3259 while (cur_desc < sizeof(page->descriptors)) { 3260 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 3261 if (desc->length == 0) { 3262 /* If padding and length are 0, this terminates the page */ 3263 break; 3264 } 3265 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) { 3266 struct spdk_blob_md_descriptor_extent *desc_extent; 3267 unsigned int i; 3268 3269 desc_extent = (struct spdk_blob_md_descriptor_extent *)desc; 3270 3271 for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) { 3272 if (desc_extent->extents[i].cluster_idx != 0) { 3273 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 3274 desc_extent->extents[i].cluster_idx); 3275 } else { 3276 fprintf(ctx->fp, "Unallocated Extent - "); 3277 } 3278 fprintf(ctx->fp, " Length: %" PRIu32, desc_extent->extents[i].length); 3279 fprintf(ctx->fp, "\n"); 3280 } 3281 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 3282 struct spdk_blob_md_descriptor_xattr *desc_xattr; 3283 uint32_t i; 3284 3285 desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc; 3286 3287 if (desc_xattr->length != 3288 sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) + 3289 desc_xattr->name_length + desc_xattr->value_length) { 3290 } 3291 3292 memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length); 3293 ctx->xattr_name[desc_xattr->name_length] = '\0'; 3294 fprintf(ctx->fp, "XATTR: name = \"%s\"\n", ctx->xattr_name); 3295 fprintf(ctx->fp, " value = \""); 3296 ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name, 3297 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 3298 desc_xattr->value_length); 3299 fprintf(ctx->fp, "\"\n"); 3300 for (i = 0; i < desc_xattr->value_length; i++) { 3301 if (i % 16 == 0) { 3302 fprintf(ctx->fp, " "); 3303 } 3304 fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i)); 3305 if ((i + 1) % 16 == 0) { 3306 fprintf(ctx->fp, "\n"); 3307 } 3308 } 3309 if (i % 16 != 0) { 3310 fprintf(ctx->fp, "\n"); 3311 } 3312 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 3313 /* TODO */ 3314 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 3315 /* TODO */ 3316 } else { 3317 /* Error */ 3318 } 3319 /* Advance to the next descriptor */ 3320 cur_desc += sizeof(*desc) + desc->length; 3321 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 3322 break; 3323 } 3324 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 3325 } 3326 } 3327 3328 static void 3329 _spdk_bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3330 { 3331 struct spdk_bs_dump_ctx *ctx = cb_arg; 3332 3333 if (bserrno != 0) { 3334 _spdk_bs_dump_finish(seq, ctx, bserrno); 3335 return; 3336 } 3337 3338 if (ctx->page->id != 0) { 3339 _spdk_bs_dump_print_md_page(ctx); 3340 } 3341 3342 ctx->cur_page++; 3343 3344 if (ctx->cur_page < ctx->super->md_len) { 3345 _spdk_bs_dump_read_md_page(seq, cb_arg); 3346 } else { 3347 spdk_dma_free(ctx->page); 3348 _spdk_bs_dump_finish(seq, ctx, 0); 3349 } 3350 } 3351 3352 static void 3353 _spdk_bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 3354 { 3355 struct spdk_bs_dump_ctx *ctx = cb_arg; 3356 uint64_t lba; 3357 3358 assert(ctx->cur_page < ctx->super->md_len); 3359 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 3360 spdk_bs_sequence_read_dev(seq, ctx->page, lba, 3361 _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 3362 _spdk_bs_dump_read_md_page_cpl, ctx); 3363 } 3364 3365 static void 3366 _spdk_bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3367 { 3368 struct spdk_bs_dump_ctx *ctx = cb_arg; 3369 3370 fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature); 3371 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 3372 sizeof(ctx->super->signature)) != 0) { 3373 fprintf(ctx->fp, "(Mismatch)\n"); 3374 _spdk_bs_dump_finish(seq, ctx, bserrno); 3375 return; 3376 } else { 3377 fprintf(ctx->fp, "(OK)\n"); 3378 } 3379 fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version); 3380 fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc, 3381 (ctx->super->crc == _spdk_blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch"); 3382 fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype); 3383 fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size); 3384 fprintf(ctx->fp, "Super Blob ID: "); 3385 if (ctx->super->super_blob == SPDK_BLOBID_INVALID) { 3386 fprintf(ctx->fp, "(None)\n"); 3387 } else { 3388 fprintf(ctx->fp, "%" PRIu64 "\n", ctx->super->super_blob); 3389 } 3390 fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean); 3391 fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start); 3392 fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len); 3393 fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start); 3394 fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len); 3395 fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start); 3396 fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len); 3397 fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start); 3398 fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len); 3399 3400 ctx->cur_page = 0; 3401 ctx->page = spdk_dma_zmalloc(SPDK_BS_PAGE_SIZE, 3402 SPDK_BS_PAGE_SIZE, 3403 NULL); 3404 if (!ctx->page) { 3405 _spdk_bs_dump_finish(seq, ctx, -ENOMEM); 3406 return; 3407 } 3408 _spdk_bs_dump_read_md_page(seq, cb_arg); 3409 } 3410 3411 void 3412 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn, 3413 spdk_bs_op_complete cb_fn, void *cb_arg) 3414 { 3415 struct spdk_blob_store *bs; 3416 struct spdk_bs_cpl cpl; 3417 spdk_bs_sequence_t *seq; 3418 struct spdk_bs_dump_ctx *ctx; 3419 struct spdk_bs_opts opts = {}; 3420 int err; 3421 3422 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Dumping blobstore from dev %p\n", dev); 3423 3424 spdk_bs_opts_init(&opts); 3425 3426 err = _spdk_bs_alloc(dev, &opts, &bs); 3427 if (err) { 3428 dev->destroy(dev); 3429 cb_fn(cb_arg, err); 3430 return; 3431 } 3432 3433 ctx = calloc(1, sizeof(*ctx)); 3434 if (!ctx) { 3435 _spdk_bs_free(bs); 3436 cb_fn(cb_arg, -ENOMEM); 3437 return; 3438 } 3439 3440 ctx->bs = bs; 3441 ctx->fp = fp; 3442 ctx->print_xattr_fn = print_xattr_fn; 3443 3444 /* Allocate memory for the super block */ 3445 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3446 if (!ctx->super) { 3447 free(ctx); 3448 _spdk_bs_free(bs); 3449 cb_fn(cb_arg, -ENOMEM); 3450 return; 3451 } 3452 3453 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 3454 cpl.u.bs_basic.cb_fn = cb_fn; 3455 cpl.u.bs_basic.cb_arg = cb_arg; 3456 3457 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3458 if (!seq) { 3459 spdk_dma_free(ctx->super); 3460 free(ctx); 3461 _spdk_bs_free(bs); 3462 cb_fn(cb_arg, -ENOMEM); 3463 return; 3464 } 3465 3466 /* Read the super block */ 3467 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 3468 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 3469 _spdk_bs_dump_super_cpl, ctx); 3470 } 3471 3472 /* END spdk_bs_dump */ 3473 3474 /* START spdk_bs_init */ 3475 3476 struct spdk_bs_init_ctx { 3477 struct spdk_blob_store *bs; 3478 struct spdk_bs_super_block *super; 3479 }; 3480 3481 static void 3482 _spdk_bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3483 { 3484 struct spdk_bs_init_ctx *ctx = cb_arg; 3485 3486 spdk_dma_free(ctx->super); 3487 free(ctx); 3488 3489 spdk_bs_sequence_finish(seq, bserrno); 3490 } 3491 3492 static void 3493 _spdk_bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3494 { 3495 struct spdk_bs_init_ctx *ctx = cb_arg; 3496 3497 /* Write super block */ 3498 spdk_bs_sequence_write_dev(seq, ctx->super, _spdk_bs_page_to_lba(ctx->bs, 0), 3499 _spdk_bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 3500 _spdk_bs_init_persist_super_cpl, ctx); 3501 } 3502 3503 void 3504 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 3505 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 3506 { 3507 struct spdk_bs_init_ctx *ctx; 3508 struct spdk_blob_store *bs; 3509 struct spdk_bs_cpl cpl; 3510 spdk_bs_sequence_t *seq; 3511 spdk_bs_batch_t *batch; 3512 uint64_t num_md_lba; 3513 uint64_t num_md_pages; 3514 uint64_t num_md_clusters; 3515 uint32_t i; 3516 struct spdk_bs_opts opts = {}; 3517 int rc; 3518 3519 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Initializing blobstore on dev %p\n", dev); 3520 3521 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 3522 SPDK_ERRLOG("unsupported dev block length of %d\n", 3523 dev->blocklen); 3524 dev->destroy(dev); 3525 cb_fn(cb_arg, NULL, -EINVAL); 3526 return; 3527 } 3528 3529 if (o) { 3530 opts = *o; 3531 } else { 3532 spdk_bs_opts_init(&opts); 3533 } 3534 3535 if (_spdk_bs_opts_verify(&opts) != 0) { 3536 dev->destroy(dev); 3537 cb_fn(cb_arg, NULL, -EINVAL); 3538 return; 3539 } 3540 3541 rc = _spdk_bs_alloc(dev, &opts, &bs); 3542 if (rc) { 3543 dev->destroy(dev); 3544 cb_fn(cb_arg, NULL, rc); 3545 return; 3546 } 3547 3548 if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) { 3549 /* By default, allocate 1 page per cluster. 3550 * Technically, this over-allocates metadata 3551 * because more metadata will reduce the number 3552 * of usable clusters. This can be addressed with 3553 * more complex math in the future. 3554 */ 3555 bs->md_len = bs->total_clusters; 3556 } else { 3557 bs->md_len = opts.num_md_pages; 3558 } 3559 rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len); 3560 if (rc < 0) { 3561 _spdk_bs_free(bs); 3562 cb_fn(cb_arg, NULL, -ENOMEM); 3563 return; 3564 } 3565 3566 rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len); 3567 if (rc < 0) { 3568 _spdk_bs_free(bs); 3569 cb_fn(cb_arg, NULL, -ENOMEM); 3570 return; 3571 } 3572 3573 ctx = calloc(1, sizeof(*ctx)); 3574 if (!ctx) { 3575 _spdk_bs_free(bs); 3576 cb_fn(cb_arg, NULL, -ENOMEM); 3577 return; 3578 } 3579 3580 ctx->bs = bs; 3581 3582 /* Allocate memory for the super block */ 3583 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3584 if (!ctx->super) { 3585 free(ctx); 3586 _spdk_bs_free(bs); 3587 cb_fn(cb_arg, NULL, -ENOMEM); 3588 return; 3589 } 3590 memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 3591 sizeof(ctx->super->signature)); 3592 ctx->super->version = SPDK_BS_VERSION; 3593 ctx->super->length = sizeof(*ctx->super); 3594 ctx->super->super_blob = bs->super_blob; 3595 ctx->super->clean = 0; 3596 ctx->super->cluster_size = bs->cluster_sz; 3597 ctx->super->io_unit_size = bs->io_unit_size; 3598 memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype)); 3599 3600 /* Calculate how many pages the metadata consumes at the front 3601 * of the disk. 3602 */ 3603 3604 /* The super block uses 1 page */ 3605 num_md_pages = 1; 3606 3607 /* The used_md_pages mask requires 1 bit per metadata page, rounded 3608 * up to the nearest page, plus a header. 3609 */ 3610 ctx->super->used_page_mask_start = num_md_pages; 3611 ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 3612 spdk_divide_round_up(bs->md_len, 8), 3613 SPDK_BS_PAGE_SIZE); 3614 num_md_pages += ctx->super->used_page_mask_len; 3615 3616 /* The used_clusters mask requires 1 bit per cluster, rounded 3617 * up to the nearest page, plus a header. 3618 */ 3619 ctx->super->used_cluster_mask_start = num_md_pages; 3620 ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 3621 spdk_divide_round_up(bs->total_clusters, 8), 3622 SPDK_BS_PAGE_SIZE); 3623 num_md_pages += ctx->super->used_cluster_mask_len; 3624 3625 /* The used_blobids mask requires 1 bit per metadata page, rounded 3626 * up to the nearest page, plus a header. 3627 */ 3628 ctx->super->used_blobid_mask_start = num_md_pages; 3629 ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 3630 spdk_divide_round_up(bs->md_len, 8), 3631 SPDK_BS_PAGE_SIZE); 3632 num_md_pages += ctx->super->used_blobid_mask_len; 3633 3634 /* The metadata region size was chosen above */ 3635 ctx->super->md_start = bs->md_start = num_md_pages; 3636 ctx->super->md_len = bs->md_len; 3637 num_md_pages += bs->md_len; 3638 3639 num_md_lba = _spdk_bs_page_to_lba(bs, num_md_pages); 3640 3641 ctx->super->size = dev->blockcnt * dev->blocklen; 3642 3643 ctx->super->crc = _spdk_blob_md_page_calc_crc(ctx->super); 3644 3645 num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster); 3646 if (num_md_clusters > bs->total_clusters) { 3647 SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, " 3648 "please decrease number of pages reserved for metadata " 3649 "or increase cluster size.\n"); 3650 spdk_dma_free(ctx->super); 3651 free(ctx); 3652 _spdk_bs_free(bs); 3653 cb_fn(cb_arg, NULL, -ENOMEM); 3654 return; 3655 } 3656 /* Claim all of the clusters used by the metadata */ 3657 for (i = 0; i < num_md_clusters; i++) { 3658 _spdk_bs_claim_cluster(bs, i); 3659 } 3660 3661 bs->total_data_clusters = bs->num_free_clusters; 3662 3663 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 3664 cpl.u.bs_handle.cb_fn = cb_fn; 3665 cpl.u.bs_handle.cb_arg = cb_arg; 3666 cpl.u.bs_handle.bs = bs; 3667 3668 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3669 if (!seq) { 3670 spdk_dma_free(ctx->super); 3671 free(ctx); 3672 _spdk_bs_free(bs); 3673 cb_fn(cb_arg, NULL, -ENOMEM); 3674 return; 3675 } 3676 3677 batch = spdk_bs_sequence_to_batch(seq, _spdk_bs_init_trim_cpl, ctx); 3678 3679 /* Clear metadata space */ 3680 spdk_bs_batch_write_zeroes_dev(batch, 0, num_md_lba); 3681 /* Trim data clusters */ 3682 spdk_bs_batch_unmap_dev(batch, num_md_lba, ctx->bs->dev->blockcnt - num_md_lba); 3683 3684 spdk_bs_batch_close(batch); 3685 } 3686 3687 /* END spdk_bs_init */ 3688 3689 /* START spdk_bs_destroy */ 3690 3691 static void 3692 _spdk_bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3693 { 3694 struct spdk_bs_init_ctx *ctx = cb_arg; 3695 struct spdk_blob_store *bs = ctx->bs; 3696 3697 /* 3698 * We need to defer calling spdk_bs_call_cpl() until after 3699 * dev destruction, so tuck these away for later use. 3700 */ 3701 bs->unload_err = bserrno; 3702 memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 3703 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 3704 3705 spdk_bs_sequence_finish(seq, bserrno); 3706 3707 _spdk_bs_free(bs); 3708 free(ctx); 3709 } 3710 3711 void 3712 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, 3713 void *cb_arg) 3714 { 3715 struct spdk_bs_cpl cpl; 3716 spdk_bs_sequence_t *seq; 3717 struct spdk_bs_init_ctx *ctx; 3718 3719 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Destroying blobstore\n"); 3720 3721 if (!TAILQ_EMPTY(&bs->blobs)) { 3722 SPDK_ERRLOG("Blobstore still has open blobs\n"); 3723 cb_fn(cb_arg, -EBUSY); 3724 return; 3725 } 3726 3727 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 3728 cpl.u.bs_basic.cb_fn = cb_fn; 3729 cpl.u.bs_basic.cb_arg = cb_arg; 3730 3731 ctx = calloc(1, sizeof(*ctx)); 3732 if (!ctx) { 3733 cb_fn(cb_arg, -ENOMEM); 3734 return; 3735 } 3736 3737 ctx->bs = bs; 3738 3739 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3740 if (!seq) { 3741 free(ctx); 3742 cb_fn(cb_arg, -ENOMEM); 3743 return; 3744 } 3745 3746 /* Write zeroes to the super block */ 3747 spdk_bs_sequence_write_zeroes_dev(seq, 3748 _spdk_bs_page_to_lba(bs, 0), 3749 _spdk_bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)), 3750 _spdk_bs_destroy_trim_cpl, ctx); 3751 } 3752 3753 /* END spdk_bs_destroy */ 3754 3755 /* START spdk_bs_unload */ 3756 3757 static void 3758 _spdk_bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3759 { 3760 struct spdk_bs_load_ctx *ctx = cb_arg; 3761 3762 spdk_dma_free(ctx->super); 3763 3764 /* 3765 * We need to defer calling spdk_bs_call_cpl() until after 3766 * dev destruction, so tuck these away for later use. 3767 */ 3768 ctx->bs->unload_err = bserrno; 3769 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 3770 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 3771 3772 spdk_bs_sequence_finish(seq, bserrno); 3773 3774 _spdk_bs_free(ctx->bs); 3775 free(ctx); 3776 } 3777 3778 static void 3779 _spdk_bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3780 { 3781 struct spdk_bs_load_ctx *ctx = cb_arg; 3782 3783 spdk_dma_free(ctx->mask); 3784 ctx->super->clean = 1; 3785 3786 _spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_unload_write_super_cpl, ctx); 3787 } 3788 3789 static void 3790 _spdk_bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3791 { 3792 struct spdk_bs_load_ctx *ctx = cb_arg; 3793 3794 spdk_dma_free(ctx->mask); 3795 ctx->mask = NULL; 3796 3797 _spdk_bs_write_used_clusters(seq, cb_arg, _spdk_bs_unload_write_used_clusters_cpl); 3798 } 3799 3800 static void 3801 _spdk_bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3802 { 3803 struct spdk_bs_load_ctx *ctx = cb_arg; 3804 3805 spdk_dma_free(ctx->mask); 3806 ctx->mask = NULL; 3807 3808 _spdk_bs_write_used_blobids(seq, cb_arg, _spdk_bs_unload_write_used_blobids_cpl); 3809 } 3810 3811 static void 3812 _spdk_bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3813 { 3814 _spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_unload_write_used_pages_cpl); 3815 } 3816 3817 void 3818 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) 3819 { 3820 struct spdk_bs_cpl cpl; 3821 spdk_bs_sequence_t *seq; 3822 struct spdk_bs_load_ctx *ctx; 3823 3824 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blobstore\n"); 3825 3826 if (!TAILQ_EMPTY(&bs->blobs)) { 3827 SPDK_ERRLOG("Blobstore still has open blobs\n"); 3828 cb_fn(cb_arg, -EBUSY); 3829 return; 3830 } 3831 3832 ctx = calloc(1, sizeof(*ctx)); 3833 if (!ctx) { 3834 cb_fn(cb_arg, -ENOMEM); 3835 return; 3836 } 3837 3838 ctx->bs = bs; 3839 3840 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3841 if (!ctx->super) { 3842 free(ctx); 3843 cb_fn(cb_arg, -ENOMEM); 3844 return; 3845 } 3846 3847 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 3848 cpl.u.bs_basic.cb_fn = cb_fn; 3849 cpl.u.bs_basic.cb_arg = cb_arg; 3850 3851 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3852 if (!seq) { 3853 spdk_dma_free(ctx->super); 3854 free(ctx); 3855 cb_fn(cb_arg, -ENOMEM); 3856 return; 3857 } 3858 3859 /* Read super block */ 3860 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 3861 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 3862 _spdk_bs_unload_read_super_cpl, ctx); 3863 } 3864 3865 /* END spdk_bs_unload */ 3866 3867 /* START spdk_bs_set_super */ 3868 3869 struct spdk_bs_set_super_ctx { 3870 struct spdk_blob_store *bs; 3871 struct spdk_bs_super_block *super; 3872 }; 3873 3874 static void 3875 _spdk_bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3876 { 3877 struct spdk_bs_set_super_ctx *ctx = cb_arg; 3878 3879 if (bserrno != 0) { 3880 SPDK_ERRLOG("Unable to write to super block of blobstore\n"); 3881 } 3882 3883 spdk_dma_free(ctx->super); 3884 3885 spdk_bs_sequence_finish(seq, bserrno); 3886 3887 free(ctx); 3888 } 3889 3890 static void 3891 _spdk_bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3892 { 3893 struct spdk_bs_set_super_ctx *ctx = cb_arg; 3894 3895 if (bserrno != 0) { 3896 SPDK_ERRLOG("Unable to read super block of blobstore\n"); 3897 spdk_dma_free(ctx->super); 3898 spdk_bs_sequence_finish(seq, bserrno); 3899 free(ctx); 3900 return; 3901 } 3902 3903 _spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_set_super_write_cpl, ctx); 3904 } 3905 3906 void 3907 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid, 3908 spdk_bs_op_complete cb_fn, void *cb_arg) 3909 { 3910 struct spdk_bs_cpl cpl; 3911 spdk_bs_sequence_t *seq; 3912 struct spdk_bs_set_super_ctx *ctx; 3913 3914 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Setting super blob id on blobstore\n"); 3915 3916 ctx = calloc(1, sizeof(*ctx)); 3917 if (!ctx) { 3918 cb_fn(cb_arg, -ENOMEM); 3919 return; 3920 } 3921 3922 ctx->bs = bs; 3923 3924 ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 3925 if (!ctx->super) { 3926 free(ctx); 3927 cb_fn(cb_arg, -ENOMEM); 3928 return; 3929 } 3930 3931 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 3932 cpl.u.bs_basic.cb_fn = cb_fn; 3933 cpl.u.bs_basic.cb_arg = cb_arg; 3934 3935 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 3936 if (!seq) { 3937 spdk_dma_free(ctx->super); 3938 free(ctx); 3939 cb_fn(cb_arg, -ENOMEM); 3940 return; 3941 } 3942 3943 bs->super_blob = blobid; 3944 3945 /* Read super block */ 3946 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 3947 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 3948 _spdk_bs_set_super_read_cpl, ctx); 3949 } 3950 3951 /* END spdk_bs_set_super */ 3952 3953 void 3954 spdk_bs_get_super(struct spdk_blob_store *bs, 3955 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 3956 { 3957 if (bs->super_blob == SPDK_BLOBID_INVALID) { 3958 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT); 3959 } else { 3960 cb_fn(cb_arg, bs->super_blob, 0); 3961 } 3962 } 3963 3964 uint64_t 3965 spdk_bs_get_cluster_size(struct spdk_blob_store *bs) 3966 { 3967 return bs->cluster_sz; 3968 } 3969 3970 uint64_t 3971 spdk_bs_get_page_size(struct spdk_blob_store *bs) 3972 { 3973 return SPDK_BS_PAGE_SIZE; 3974 } 3975 3976 uint64_t 3977 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs) 3978 { 3979 return bs->io_unit_size; 3980 } 3981 3982 uint64_t 3983 spdk_bs_free_cluster_count(struct spdk_blob_store *bs) 3984 { 3985 return bs->num_free_clusters; 3986 } 3987 3988 uint64_t 3989 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs) 3990 { 3991 return bs->total_data_clusters; 3992 } 3993 3994 static int 3995 spdk_bs_register_md_thread(struct spdk_blob_store *bs) 3996 { 3997 bs->md_channel = spdk_get_io_channel(bs); 3998 if (!bs->md_channel) { 3999 SPDK_ERRLOG("Failed to get IO channel.\n"); 4000 return -1; 4001 } 4002 4003 return 0; 4004 } 4005 4006 static int 4007 spdk_bs_unregister_md_thread(struct spdk_blob_store *bs) 4008 { 4009 spdk_put_io_channel(bs->md_channel); 4010 4011 return 0; 4012 } 4013 4014 spdk_blob_id spdk_blob_get_id(struct spdk_blob *blob) 4015 { 4016 assert(blob != NULL); 4017 4018 return blob->id; 4019 } 4020 4021 uint64_t spdk_blob_get_num_pages(struct spdk_blob *blob) 4022 { 4023 assert(blob != NULL); 4024 4025 return _spdk_bs_cluster_to_page(blob->bs, blob->active.num_clusters); 4026 } 4027 4028 uint64_t spdk_blob_get_num_io_units(struct spdk_blob *blob) 4029 { 4030 assert(blob != NULL); 4031 4032 return spdk_blob_get_num_pages(blob) * _spdk_bs_io_unit_per_page(blob->bs); 4033 } 4034 4035 uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob) 4036 { 4037 assert(blob != NULL); 4038 4039 return blob->active.num_clusters; 4040 } 4041 4042 /* START spdk_bs_create_blob */ 4043 4044 static void 4045 _spdk_bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4046 { 4047 struct spdk_blob *blob = cb_arg; 4048 4049 _spdk_blob_free(blob); 4050 4051 spdk_bs_sequence_finish(seq, bserrno); 4052 } 4053 4054 static int 4055 _spdk_blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs, 4056 bool internal) 4057 { 4058 uint64_t i; 4059 size_t value_len = 0; 4060 int rc; 4061 const void *value = NULL; 4062 if (xattrs->count > 0 && xattrs->get_value == NULL) { 4063 return -EINVAL; 4064 } 4065 for (i = 0; i < xattrs->count; i++) { 4066 xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len); 4067 if (value == NULL || value_len == 0) { 4068 return -EINVAL; 4069 } 4070 rc = _spdk_blob_set_xattr(blob, xattrs->names[i], value, value_len, internal); 4071 if (rc < 0) { 4072 return rc; 4073 } 4074 } 4075 return 0; 4076 } 4077 4078 static void 4079 _spdk_blob_set_thin_provision(struct spdk_blob *blob) 4080 { 4081 _spdk_blob_verify_md_op(blob); 4082 blob->invalid_flags |= SPDK_BLOB_THIN_PROV; 4083 blob->state = SPDK_BLOB_STATE_DIRTY; 4084 } 4085 4086 static void 4087 _spdk_bs_create_blob(struct spdk_blob_store *bs, 4088 const struct spdk_blob_opts *opts, 4089 const struct spdk_blob_xattr_opts *internal_xattrs, 4090 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4091 { 4092 struct spdk_blob *blob; 4093 uint32_t page_idx; 4094 struct spdk_bs_cpl cpl; 4095 struct spdk_blob_opts opts_default; 4096 struct spdk_blob_xattr_opts internal_xattrs_default; 4097 spdk_bs_sequence_t *seq; 4098 spdk_blob_id id; 4099 int rc; 4100 4101 assert(spdk_get_thread() == bs->md_thread); 4102 4103 page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 4104 if (page_idx == UINT32_MAX) { 4105 cb_fn(cb_arg, 0, -ENOMEM); 4106 return; 4107 } 4108 spdk_bit_array_set(bs->used_blobids, page_idx); 4109 spdk_bit_array_set(bs->used_md_pages, page_idx); 4110 4111 id = _spdk_bs_page_to_blobid(page_idx); 4112 4113 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Creating blob with id %lu at page %u\n", id, page_idx); 4114 4115 blob = _spdk_blob_alloc(bs, id); 4116 if (!blob) { 4117 cb_fn(cb_arg, 0, -ENOMEM); 4118 return; 4119 } 4120 4121 if (!opts) { 4122 spdk_blob_opts_init(&opts_default); 4123 opts = &opts_default; 4124 } 4125 if (!internal_xattrs) { 4126 _spdk_blob_xattrs_init(&internal_xattrs_default); 4127 internal_xattrs = &internal_xattrs_default; 4128 } 4129 4130 rc = _spdk_blob_set_xattrs(blob, &opts->xattrs, false); 4131 if (rc < 0) { 4132 _spdk_blob_free(blob); 4133 cb_fn(cb_arg, 0, rc); 4134 return; 4135 } 4136 4137 rc = _spdk_blob_set_xattrs(blob, internal_xattrs, true); 4138 if (rc < 0) { 4139 _spdk_blob_free(blob); 4140 cb_fn(cb_arg, 0, rc); 4141 return; 4142 } 4143 4144 if (opts->thin_provision) { 4145 _spdk_blob_set_thin_provision(blob); 4146 } 4147 4148 rc = _spdk_blob_resize(blob, opts->num_clusters); 4149 if (rc < 0) { 4150 _spdk_blob_free(blob); 4151 cb_fn(cb_arg, 0, rc); 4152 return; 4153 } 4154 cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 4155 cpl.u.blobid.cb_fn = cb_fn; 4156 cpl.u.blobid.cb_arg = cb_arg; 4157 cpl.u.blobid.blobid = blob->id; 4158 4159 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 4160 if (!seq) { 4161 _spdk_blob_free(blob); 4162 cb_fn(cb_arg, 0, -ENOMEM); 4163 return; 4164 } 4165 4166 _spdk_blob_persist(seq, blob, _spdk_bs_create_blob_cpl, blob); 4167 } 4168 4169 void spdk_bs_create_blob(struct spdk_blob_store *bs, 4170 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4171 { 4172 _spdk_bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg); 4173 } 4174 4175 void spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts, 4176 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4177 { 4178 _spdk_bs_create_blob(bs, opts, NULL, cb_fn, cb_arg); 4179 } 4180 4181 /* END spdk_bs_create_blob */ 4182 4183 /* START blob_cleanup */ 4184 4185 struct spdk_clone_snapshot_ctx { 4186 struct spdk_bs_cpl cpl; 4187 int bserrno; 4188 bool frozen; 4189 4190 struct spdk_io_channel *channel; 4191 4192 /* Current cluster for inflate operation */ 4193 uint64_t cluster; 4194 4195 /* For inflation force allocation of all unallocated clusters and remove 4196 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */ 4197 bool allocate_all; 4198 4199 struct { 4200 spdk_blob_id id; 4201 struct spdk_blob *blob; 4202 } original; 4203 struct { 4204 spdk_blob_id id; 4205 struct spdk_blob *blob; 4206 } new; 4207 4208 /* xattrs specified for snapshot/clones only. They have no impact on 4209 * the original blobs xattrs. */ 4210 const struct spdk_blob_xattr_opts *xattrs; 4211 }; 4212 4213 static void 4214 _spdk_bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno) 4215 { 4216 struct spdk_clone_snapshot_ctx *ctx = cb_arg; 4217 struct spdk_bs_cpl *cpl = &ctx->cpl; 4218 4219 if (bserrno != 0) { 4220 if (ctx->bserrno != 0) { 4221 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 4222 } else { 4223 ctx->bserrno = bserrno; 4224 } 4225 } 4226 4227 switch (cpl->type) { 4228 case SPDK_BS_CPL_TYPE_BLOBID: 4229 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno); 4230 break; 4231 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 4232 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 4233 break; 4234 default: 4235 SPDK_UNREACHABLE(); 4236 break; 4237 } 4238 4239 free(ctx); 4240 } 4241 4242 static void 4243 _spdk_bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 4244 { 4245 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4246 struct spdk_blob *origblob = ctx->original.blob; 4247 4248 if (bserrno != 0) { 4249 if (ctx->bserrno != 0) { 4250 SPDK_ERRLOG("Unfreeze error %d\n", bserrno); 4251 } else { 4252 ctx->bserrno = bserrno; 4253 } 4254 } 4255 4256 ctx->original.id = origblob->id; 4257 spdk_blob_close(origblob, _spdk_bs_clone_snapshot_cleanup_finish, ctx); 4258 } 4259 4260 static void 4261 _spdk_bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno) 4262 { 4263 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4264 struct spdk_blob *origblob = ctx->original.blob; 4265 4266 if (bserrno != 0) { 4267 if (ctx->bserrno != 0) { 4268 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 4269 } else { 4270 ctx->bserrno = bserrno; 4271 } 4272 } 4273 4274 if (ctx->frozen) { 4275 /* Unfreeze any outstanding I/O */ 4276 _spdk_blob_unfreeze_io(origblob, _spdk_bs_snapshot_unfreeze_cpl, ctx); 4277 } else { 4278 _spdk_bs_snapshot_unfreeze_cpl(ctx, 0); 4279 } 4280 4281 } 4282 4283 static void 4284 _spdk_bs_clone_snapshot_newblob_cleanup(void *cb_arg, int bserrno) 4285 { 4286 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4287 struct spdk_blob *newblob = ctx->new.blob; 4288 4289 if (bserrno != 0) { 4290 if (ctx->bserrno != 0) { 4291 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 4292 } else { 4293 ctx->bserrno = bserrno; 4294 } 4295 } 4296 4297 ctx->new.id = newblob->id; 4298 spdk_blob_close(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 4299 } 4300 4301 /* END blob_cleanup */ 4302 4303 /* START spdk_bs_create_snapshot */ 4304 4305 static void 4306 _spdk_bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno) 4307 { 4308 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4309 struct spdk_blob *newblob = ctx->new.blob; 4310 4311 if (bserrno != 0) { 4312 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 4313 return; 4314 } 4315 4316 /* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */ 4317 bserrno = _spdk_blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true); 4318 if (bserrno != 0) { 4319 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4320 return; 4321 } 4322 4323 _spdk_bs_blob_list_add(ctx->original.blob); 4324 4325 spdk_blob_set_read_only(newblob); 4326 4327 /* sync snapshot metadata */ 4328 spdk_blob_sync_md(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, cb_arg); 4329 } 4330 4331 static void 4332 _spdk_bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno) 4333 { 4334 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4335 struct spdk_blob *origblob = ctx->original.blob; 4336 struct spdk_blob *newblob = ctx->new.blob; 4337 4338 if (bserrno != 0) { 4339 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 4340 return; 4341 } 4342 4343 /* Set internal xattr for snapshot id */ 4344 bserrno = _spdk_blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true); 4345 if (bserrno != 0) { 4346 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 4347 return; 4348 } 4349 4350 _spdk_bs_blob_list_remove(origblob); 4351 origblob->parent_id = newblob->id; 4352 4353 /* Create new back_bs_dev for snapshot */ 4354 origblob->back_bs_dev = spdk_bs_create_blob_bs_dev(newblob); 4355 if (origblob->back_bs_dev == NULL) { 4356 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL); 4357 return; 4358 } 4359 4360 /* set clone blob as thin provisioned */ 4361 _spdk_blob_set_thin_provision(origblob); 4362 4363 _spdk_bs_blob_list_add(newblob); 4364 4365 /* Zero out origblob cluster map */ 4366 memset(origblob->active.clusters, 0, 4367 origblob->active.num_clusters * sizeof(origblob->active.clusters)); 4368 4369 /* sync clone metadata */ 4370 spdk_blob_sync_md(origblob, _spdk_bs_snapshot_origblob_sync_cpl, ctx); 4371 } 4372 4373 static void 4374 _spdk_bs_snapshot_freeze_cpl(void *cb_arg, int rc) 4375 { 4376 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4377 struct spdk_blob *origblob = ctx->original.blob; 4378 struct spdk_blob *newblob = ctx->new.blob; 4379 int bserrno; 4380 4381 if (rc != 0) { 4382 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, rc); 4383 return; 4384 } 4385 4386 ctx->frozen = true; 4387 4388 /* set new back_bs_dev for snapshot */ 4389 newblob->back_bs_dev = origblob->back_bs_dev; 4390 /* Set invalid flags from origblob */ 4391 newblob->invalid_flags = origblob->invalid_flags; 4392 4393 /* inherit parent from original blob if set */ 4394 newblob->parent_id = origblob->parent_id; 4395 if (origblob->parent_id != SPDK_BLOBID_INVALID) { 4396 /* Set internal xattr for snapshot id */ 4397 bserrno = _spdk_blob_set_xattr(newblob, BLOB_SNAPSHOT, 4398 &origblob->parent_id, sizeof(spdk_blob_id), true); 4399 if (bserrno != 0) { 4400 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 4401 return; 4402 } 4403 } 4404 4405 /* Copy cluster map to snapshot */ 4406 memcpy(newblob->active.clusters, origblob->active.clusters, 4407 origblob->active.num_clusters * sizeof(origblob->active.clusters)); 4408 4409 /* sync snapshot metadata */ 4410 spdk_blob_sync_md(newblob, _spdk_bs_snapshot_newblob_sync_cpl, ctx); 4411 } 4412 4413 static void 4414 _spdk_bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4415 { 4416 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4417 struct spdk_blob *origblob = ctx->original.blob; 4418 struct spdk_blob *newblob = _blob; 4419 4420 if (bserrno != 0) { 4421 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4422 return; 4423 } 4424 4425 ctx->new.blob = newblob; 4426 4427 _spdk_blob_freeze_io(origblob, _spdk_bs_snapshot_freeze_cpl, ctx); 4428 } 4429 4430 static void 4431 _spdk_bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 4432 { 4433 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4434 struct spdk_blob *origblob = ctx->original.blob; 4435 4436 if (bserrno != 0) { 4437 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4438 return; 4439 } 4440 4441 ctx->new.id = blobid; 4442 ctx->cpl.u.blobid.blobid = blobid; 4443 4444 spdk_bs_open_blob(origblob->bs, ctx->new.id, _spdk_bs_snapshot_newblob_open_cpl, ctx); 4445 } 4446 4447 4448 static void 4449 _spdk_bs_xattr_snapshot(void *arg, const char *name, 4450 const void **value, size_t *value_len) 4451 { 4452 assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0); 4453 4454 struct spdk_blob *blob = (struct spdk_blob *)arg; 4455 *value = &blob->id; 4456 *value_len = sizeof(blob->id); 4457 } 4458 4459 static void 4460 _spdk_bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4461 { 4462 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4463 struct spdk_blob_opts opts; 4464 struct spdk_blob_xattr_opts internal_xattrs; 4465 char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS }; 4466 4467 if (bserrno != 0) { 4468 _spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno); 4469 return; 4470 } 4471 4472 ctx->original.blob = _blob; 4473 4474 if (_blob->data_ro || _blob->md_ro) { 4475 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create snapshot from read only blob with id %lu\n", 4476 _blob->id); 4477 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 4478 return; 4479 } 4480 4481 spdk_blob_opts_init(&opts); 4482 _spdk_blob_xattrs_init(&internal_xattrs); 4483 4484 /* Change the size of new blob to the same as in original blob, 4485 * but do not allocate clusters */ 4486 opts.thin_provision = true; 4487 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 4488 4489 /* If there are any xattrs specified for snapshot, set them now */ 4490 if (ctx->xattrs) { 4491 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 4492 } 4493 /* Set internal xattr SNAPSHOT_IN_PROGRESS */ 4494 internal_xattrs.count = 1; 4495 internal_xattrs.ctx = _blob; 4496 internal_xattrs.names = xattrs_names; 4497 internal_xattrs.get_value = _spdk_bs_xattr_snapshot; 4498 4499 _spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs, 4500 _spdk_bs_snapshot_newblob_create_cpl, ctx); 4501 } 4502 4503 void spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid, 4504 const struct spdk_blob_xattr_opts *snapshot_xattrs, 4505 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4506 { 4507 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 4508 4509 if (!ctx) { 4510 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 4511 return; 4512 } 4513 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 4514 ctx->cpl.u.blobid.cb_fn = cb_fn; 4515 ctx->cpl.u.blobid.cb_arg = cb_arg; 4516 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 4517 ctx->bserrno = 0; 4518 ctx->frozen = false; 4519 ctx->original.id = blobid; 4520 ctx->xattrs = snapshot_xattrs; 4521 4522 spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_snapshot_origblob_open_cpl, ctx); 4523 } 4524 /* END spdk_bs_create_snapshot */ 4525 4526 /* START spdk_bs_create_clone */ 4527 4528 static void 4529 _spdk_bs_xattr_clone(void *arg, const char *name, 4530 const void **value, size_t *value_len) 4531 { 4532 assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0); 4533 4534 struct spdk_blob *blob = (struct spdk_blob *)arg; 4535 *value = &blob->id; 4536 *value_len = sizeof(blob->id); 4537 } 4538 4539 static void 4540 _spdk_bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4541 { 4542 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4543 struct spdk_blob *clone = _blob; 4544 4545 ctx->new.blob = clone; 4546 _spdk_bs_blob_list_add(clone); 4547 4548 spdk_blob_close(clone, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 4549 } 4550 4551 static void 4552 _spdk_bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 4553 { 4554 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4555 4556 ctx->cpl.u.blobid.blobid = blobid; 4557 spdk_bs_open_blob(ctx->original.blob->bs, blobid, _spdk_bs_clone_newblob_open_cpl, ctx); 4558 } 4559 4560 static void 4561 _spdk_bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4562 { 4563 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4564 struct spdk_blob_opts opts; 4565 struct spdk_blob_xattr_opts internal_xattrs; 4566 char *xattr_names[] = { BLOB_SNAPSHOT }; 4567 4568 if (bserrno != 0) { 4569 _spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno); 4570 return; 4571 } 4572 4573 ctx->original.blob = _blob; 4574 4575 if (!_blob->data_ro || !_blob->md_ro) { 4576 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Clone not from read-only blob\n"); 4577 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 4578 return; 4579 } 4580 4581 spdk_blob_opts_init(&opts); 4582 _spdk_blob_xattrs_init(&internal_xattrs); 4583 4584 opts.thin_provision = true; 4585 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 4586 if (ctx->xattrs) { 4587 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 4588 } 4589 4590 /* Set internal xattr BLOB_SNAPSHOT */ 4591 internal_xattrs.count = 1; 4592 internal_xattrs.ctx = _blob; 4593 internal_xattrs.names = xattr_names; 4594 internal_xattrs.get_value = _spdk_bs_xattr_clone; 4595 4596 _spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs, 4597 _spdk_bs_clone_newblob_create_cpl, ctx); 4598 } 4599 4600 void spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid, 4601 const struct spdk_blob_xattr_opts *clone_xattrs, 4602 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4603 { 4604 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 4605 4606 if (!ctx) { 4607 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 4608 return; 4609 } 4610 4611 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 4612 ctx->cpl.u.blobid.cb_fn = cb_fn; 4613 ctx->cpl.u.blobid.cb_arg = cb_arg; 4614 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 4615 ctx->bserrno = 0; 4616 ctx->xattrs = clone_xattrs; 4617 ctx->original.id = blobid; 4618 4619 spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_clone_origblob_open_cpl, ctx); 4620 } 4621 4622 /* END spdk_bs_create_clone */ 4623 4624 /* START spdk_bs_inflate_blob */ 4625 4626 static void 4627 _spdk_bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno) 4628 { 4629 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4630 struct spdk_blob *_blob = ctx->original.blob; 4631 4632 if (bserrno != 0) { 4633 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4634 return; 4635 } 4636 4637 assert(_parent != NULL); 4638 4639 _spdk_bs_blob_list_remove(_blob); 4640 _blob->parent_id = _parent->id; 4641 _spdk_blob_set_xattr(_blob, BLOB_SNAPSHOT, &_blob->parent_id, 4642 sizeof(spdk_blob_id), true); 4643 4644 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 4645 _blob->back_bs_dev = spdk_bs_create_blob_bs_dev(_parent); 4646 _spdk_bs_blob_list_add(_blob); 4647 4648 spdk_blob_sync_md(_blob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 4649 } 4650 4651 static void 4652 _spdk_bs_inflate_blob_done(void *cb_arg, int bserrno) 4653 { 4654 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4655 struct spdk_blob *_blob = ctx->original.blob; 4656 struct spdk_blob *_parent; 4657 4658 if (bserrno != 0) { 4659 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4660 return; 4661 } 4662 4663 if (ctx->allocate_all) { 4664 /* remove thin provisioning */ 4665 _spdk_bs_blob_list_remove(_blob); 4666 _spdk_blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 4667 _blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV; 4668 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 4669 _blob->back_bs_dev = NULL; 4670 _blob->parent_id = SPDK_BLOBID_INVALID; 4671 } else { 4672 _parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob; 4673 if (_parent->parent_id != SPDK_BLOBID_INVALID) { 4674 /* We must change the parent of the inflated blob */ 4675 spdk_bs_open_blob(_blob->bs, _parent->parent_id, 4676 _spdk_bs_inflate_blob_set_parent_cpl, ctx); 4677 return; 4678 } 4679 4680 _spdk_bs_blob_list_remove(_blob); 4681 _spdk_blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 4682 _blob->parent_id = SPDK_BLOBID_INVALID; 4683 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 4684 _blob->back_bs_dev = spdk_bs_create_zeroes_dev(); 4685 } 4686 4687 _blob->state = SPDK_BLOB_STATE_DIRTY; 4688 spdk_blob_sync_md(_blob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 4689 } 4690 4691 /* Check if cluster needs allocation */ 4692 static inline bool 4693 _spdk_bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all) 4694 { 4695 struct spdk_blob_bs_dev *b; 4696 4697 assert(blob != NULL); 4698 4699 if (blob->active.clusters[cluster] != 0) { 4700 /* Cluster is already allocated */ 4701 return false; 4702 } 4703 4704 if (blob->parent_id == SPDK_BLOBID_INVALID) { 4705 /* Blob have no parent blob */ 4706 return allocate_all; 4707 } 4708 4709 b = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 4710 return (allocate_all || b->blob->active.clusters[cluster] != 0); 4711 } 4712 4713 static void 4714 _spdk_bs_inflate_blob_touch_next(void *cb_arg, int bserrno) 4715 { 4716 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4717 struct spdk_blob *_blob = ctx->original.blob; 4718 uint64_t offset; 4719 4720 if (bserrno != 0) { 4721 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 4722 return; 4723 } 4724 4725 for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) { 4726 if (_spdk_bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) { 4727 break; 4728 } 4729 } 4730 4731 if (ctx->cluster < _blob->active.num_clusters) { 4732 offset = _spdk_bs_cluster_to_lba(_blob->bs, ctx->cluster); 4733 4734 /* We may safely increment a cluster before write */ 4735 ctx->cluster++; 4736 4737 /* Use zero length write to touch a cluster */ 4738 spdk_blob_io_write(_blob, ctx->channel, NULL, offset, 0, 4739 _spdk_bs_inflate_blob_touch_next, ctx); 4740 } else { 4741 _spdk_bs_inflate_blob_done(cb_arg, bserrno); 4742 } 4743 } 4744 4745 static void 4746 _spdk_bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 4747 { 4748 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 4749 uint64_t lfc; /* lowest free cluster */ 4750 uint64_t i; 4751 4752 if (bserrno != 0) { 4753 _spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno); 4754 return; 4755 } 4756 ctx->original.blob = _blob; 4757 4758 if (!ctx->allocate_all && _blob->parent_id == SPDK_BLOBID_INVALID) { 4759 /* This blob have no parent, so we cannot decouple it. */ 4760 SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n"); 4761 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 4762 return; 4763 } 4764 4765 if (spdk_blob_is_thin_provisioned(_blob) == false) { 4766 /* This is not thin provisioned blob. No need to inflate. */ 4767 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, 0); 4768 return; 4769 } 4770 4771 /* Do two passes - one to verify that we can obtain enough clusters 4772 * and another to actually claim them. 4773 */ 4774 lfc = 0; 4775 for (i = 0; i < _blob->active.num_clusters; i++) { 4776 if (_spdk_bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) { 4777 lfc = spdk_bit_array_find_first_clear(_blob->bs->used_clusters, lfc); 4778 if (lfc == UINT32_MAX) { 4779 /* No more free clusters. Cannot satisfy the request */ 4780 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC); 4781 return; 4782 } 4783 lfc++; 4784 } 4785 } 4786 4787 ctx->cluster = 0; 4788 _spdk_bs_inflate_blob_touch_next(ctx, 0); 4789 } 4790 4791 static void 4792 _spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 4793 spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg) 4794 { 4795 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 4796 4797 if (!ctx) { 4798 cb_fn(cb_arg, -ENOMEM); 4799 return; 4800 } 4801 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 4802 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 4803 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 4804 ctx->bserrno = 0; 4805 ctx->original.id = blobid; 4806 ctx->channel = channel; 4807 ctx->allocate_all = allocate_all; 4808 4809 spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_inflate_blob_open_cpl, ctx); 4810 } 4811 4812 void 4813 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 4814 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 4815 { 4816 _spdk_bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg); 4817 } 4818 4819 void 4820 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 4821 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 4822 { 4823 _spdk_bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg); 4824 } 4825 /* END spdk_bs_inflate_blob */ 4826 4827 /* START spdk_blob_resize */ 4828 struct spdk_bs_resize_ctx { 4829 spdk_blob_op_complete cb_fn; 4830 void *cb_arg; 4831 struct spdk_blob *blob; 4832 uint64_t sz; 4833 int rc; 4834 }; 4835 4836 static void 4837 _spdk_bs_resize_unfreeze_cpl(void *cb_arg, int rc) 4838 { 4839 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 4840 4841 if (rc != 0) { 4842 SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc); 4843 } 4844 4845 if (ctx->rc != 0) { 4846 SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc); 4847 rc = ctx->rc; 4848 } 4849 4850 ctx->blob->resize_in_progress = false; 4851 4852 ctx->cb_fn(ctx->cb_arg, rc); 4853 free(ctx); 4854 } 4855 4856 static void 4857 _spdk_bs_resize_freeze_cpl(void *cb_arg, int rc) 4858 { 4859 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 4860 4861 if (rc != 0) { 4862 ctx->blob->resize_in_progress = false; 4863 ctx->cb_fn(ctx->cb_arg, rc); 4864 free(ctx); 4865 return; 4866 } 4867 4868 ctx->rc = _spdk_blob_resize(ctx->blob, ctx->sz); 4869 4870 _spdk_blob_unfreeze_io(ctx->blob, _spdk_bs_resize_unfreeze_cpl, ctx); 4871 } 4872 4873 void 4874 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg) 4875 { 4876 struct spdk_bs_resize_ctx *ctx; 4877 4878 _spdk_blob_verify_md_op(blob); 4879 4880 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Resizing blob %lu to %lu clusters\n", blob->id, sz); 4881 4882 if (blob->md_ro) { 4883 cb_fn(cb_arg, -EPERM); 4884 return; 4885 } 4886 4887 if (sz == blob->active.num_clusters) { 4888 cb_fn(cb_arg, 0); 4889 return; 4890 } 4891 4892 if (blob->resize_in_progress) { 4893 cb_fn(cb_arg, -EBUSY); 4894 return; 4895 } 4896 4897 ctx = calloc(1, sizeof(*ctx)); 4898 if (!ctx) { 4899 cb_fn(cb_arg, -ENOMEM); 4900 return; 4901 } 4902 4903 blob->resize_in_progress = true; 4904 ctx->cb_fn = cb_fn; 4905 ctx->cb_arg = cb_arg; 4906 ctx->blob = blob; 4907 ctx->sz = sz; 4908 _spdk_blob_freeze_io(blob, _spdk_bs_resize_freeze_cpl, ctx); 4909 } 4910 4911 /* END spdk_blob_resize */ 4912 4913 4914 /* START spdk_bs_delete_blob */ 4915 4916 static void 4917 _spdk_bs_delete_close_cpl(void *cb_arg, int bserrno) 4918 { 4919 spdk_bs_sequence_t *seq = cb_arg; 4920 4921 spdk_bs_sequence_finish(seq, bserrno); 4922 } 4923 4924 static void 4925 _spdk_bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4926 { 4927 struct spdk_blob *blob = cb_arg; 4928 4929 if (bserrno != 0) { 4930 /* 4931 * We already removed this blob from the blobstore tailq, so 4932 * we need to free it here since this is the last reference 4933 * to it. 4934 */ 4935 _spdk_blob_free(blob); 4936 _spdk_bs_delete_close_cpl(seq, bserrno); 4937 return; 4938 } 4939 4940 /* 4941 * This will immediately decrement the ref_count and call 4942 * the completion routine since the metadata state is clean. 4943 * By calling spdk_blob_close, we reduce the number of call 4944 * points into code that touches the blob->open_ref count 4945 * and the blobstore's blob list. 4946 */ 4947 spdk_blob_close(blob, _spdk_bs_delete_close_cpl, seq); 4948 } 4949 4950 static void 4951 _spdk_bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 4952 { 4953 spdk_bs_sequence_t *seq = cb_arg; 4954 struct spdk_blob_list *snapshot = NULL; 4955 uint32_t page_num; 4956 4957 if (bserrno != 0) { 4958 spdk_bs_sequence_finish(seq, bserrno); 4959 return; 4960 } 4961 4962 _spdk_blob_verify_md_op(blob); 4963 4964 if (blob->open_ref > 1) { 4965 /* 4966 * Someone has this blob open (besides this delete context). 4967 * Decrement the ref count directly and return -EBUSY. 4968 */ 4969 blob->open_ref--; 4970 spdk_bs_sequence_finish(seq, -EBUSY); 4971 return; 4972 } 4973 4974 bserrno = _spdk_bs_blob_list_remove(blob); 4975 if (bserrno != 0) { 4976 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Remove blob #%" PRIu64 " from a list\n", blob->id); 4977 spdk_bs_sequence_finish(seq, bserrno); 4978 return; 4979 } 4980 4981 /* 4982 * Remove the blob from the blob_store list now, to ensure it does not 4983 * get returned after this point by _spdk_blob_lookup(). 4984 */ 4985 TAILQ_REMOVE(&blob->bs->blobs, blob, link); 4986 4987 /* If blob is a snapshot then remove it from the list */ 4988 TAILQ_FOREACH(snapshot, &blob->bs->snapshots, link) { 4989 if (snapshot->id == blob->id) { 4990 TAILQ_REMOVE(&blob->bs->snapshots, snapshot, link); 4991 free(snapshot); 4992 break; 4993 } 4994 } 4995 4996 page_num = _spdk_bs_blobid_to_page(blob->id); 4997 spdk_bit_array_clear(blob->bs->used_blobids, page_num); 4998 blob->state = SPDK_BLOB_STATE_DIRTY; 4999 blob->active.num_pages = 0; 5000 _spdk_blob_resize(blob, 0); 5001 5002 _spdk_blob_persist(seq, blob, _spdk_bs_delete_persist_cpl, blob); 5003 } 5004 5005 void 5006 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 5007 spdk_blob_op_complete cb_fn, void *cb_arg) 5008 { 5009 struct spdk_bs_cpl cpl; 5010 spdk_bs_sequence_t *seq; 5011 struct spdk_blob_list *snapshot_entry = NULL; 5012 5013 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Deleting blob %lu\n", blobid); 5014 5015 assert(spdk_get_thread() == bs->md_thread); 5016 5017 /* Check if this is a snapshot with clones */ 5018 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 5019 if (snapshot_entry->id == blobid) { 5020 break; 5021 } 5022 } 5023 if (snapshot_entry != NULL) { 5024 /* If snapshot have clones, we cannot remove it */ 5025 if (!TAILQ_EMPTY(&snapshot_entry->clones)) { 5026 SPDK_ERRLOG("Cannot remove snapshot with clones\n"); 5027 cb_fn(cb_arg, -EBUSY); 5028 return; 5029 } 5030 } 5031 5032 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 5033 cpl.u.blob_basic.cb_fn = cb_fn; 5034 cpl.u.blob_basic.cb_arg = cb_arg; 5035 5036 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 5037 if (!seq) { 5038 cb_fn(cb_arg, -ENOMEM); 5039 return; 5040 } 5041 5042 spdk_bs_open_blob(bs, blobid, _spdk_bs_delete_open_cpl, seq); 5043 } 5044 5045 /* END spdk_bs_delete_blob */ 5046 5047 /* START spdk_bs_open_blob */ 5048 5049 static void 5050 _spdk_bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5051 { 5052 struct spdk_blob *blob = cb_arg; 5053 5054 /* If the blob have crc error, we just return NULL. */ 5055 if (blob == NULL) { 5056 seq->cpl.u.blob_handle.blob = NULL; 5057 spdk_bs_sequence_finish(seq, bserrno); 5058 return; 5059 } 5060 5061 blob->open_ref++; 5062 5063 TAILQ_INSERT_HEAD(&blob->bs->blobs, blob, link); 5064 5065 spdk_bs_sequence_finish(seq, bserrno); 5066 } 5067 5068 void spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 5069 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 5070 { 5071 struct spdk_blob *blob; 5072 struct spdk_bs_cpl cpl; 5073 spdk_bs_sequence_t *seq; 5074 uint32_t page_num; 5075 5076 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Opening blob %lu\n", blobid); 5077 assert(spdk_get_thread() == bs->md_thread); 5078 5079 page_num = _spdk_bs_blobid_to_page(blobid); 5080 if (spdk_bit_array_get(bs->used_blobids, page_num) == false) { 5081 /* Invalid blobid */ 5082 cb_fn(cb_arg, NULL, -ENOENT); 5083 return; 5084 } 5085 5086 blob = _spdk_blob_lookup(bs, blobid); 5087 if (blob) { 5088 blob->open_ref++; 5089 cb_fn(cb_arg, blob, 0); 5090 return; 5091 } 5092 5093 blob = _spdk_blob_alloc(bs, blobid); 5094 if (!blob) { 5095 cb_fn(cb_arg, NULL, -ENOMEM); 5096 return; 5097 } 5098 5099 cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE; 5100 cpl.u.blob_handle.cb_fn = cb_fn; 5101 cpl.u.blob_handle.cb_arg = cb_arg; 5102 cpl.u.blob_handle.blob = blob; 5103 5104 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 5105 if (!seq) { 5106 _spdk_blob_free(blob); 5107 cb_fn(cb_arg, NULL, -ENOMEM); 5108 return; 5109 } 5110 5111 _spdk_blob_load(seq, blob, _spdk_bs_open_blob_cpl, blob); 5112 } 5113 /* END spdk_bs_open_blob */ 5114 5115 /* START spdk_blob_set_read_only */ 5116 int spdk_blob_set_read_only(struct spdk_blob *blob) 5117 { 5118 _spdk_blob_verify_md_op(blob); 5119 5120 blob->data_ro_flags |= SPDK_BLOB_READ_ONLY; 5121 5122 blob->state = SPDK_BLOB_STATE_DIRTY; 5123 return 0; 5124 } 5125 /* END spdk_blob_set_read_only */ 5126 5127 /* START spdk_blob_sync_md */ 5128 5129 static void 5130 _spdk_blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5131 { 5132 struct spdk_blob *blob = cb_arg; 5133 5134 if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 5135 blob->data_ro = true; 5136 blob->md_ro = true; 5137 } 5138 5139 spdk_bs_sequence_finish(seq, bserrno); 5140 } 5141 5142 static void 5143 _spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 5144 { 5145 struct spdk_bs_cpl cpl; 5146 spdk_bs_sequence_t *seq; 5147 5148 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 5149 cpl.u.blob_basic.cb_fn = cb_fn; 5150 cpl.u.blob_basic.cb_arg = cb_arg; 5151 5152 seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl); 5153 if (!seq) { 5154 cb_fn(cb_arg, -ENOMEM); 5155 return; 5156 } 5157 5158 _spdk_blob_persist(seq, blob, _spdk_blob_sync_md_cpl, blob); 5159 } 5160 5161 void 5162 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 5163 { 5164 _spdk_blob_verify_md_op(blob); 5165 5166 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blob %lu\n", blob->id); 5167 5168 if (blob->md_ro) { 5169 assert(blob->state == SPDK_BLOB_STATE_CLEAN); 5170 cb_fn(cb_arg, 0); 5171 return; 5172 } 5173 5174 _spdk_blob_sync_md(blob, cb_fn, cb_arg); 5175 } 5176 5177 /* END spdk_blob_sync_md */ 5178 5179 struct spdk_blob_insert_cluster_ctx { 5180 struct spdk_thread *thread; 5181 struct spdk_blob *blob; 5182 uint32_t cluster_num; /* cluster index in blob */ 5183 uint32_t cluster; /* cluster on disk */ 5184 int rc; 5185 spdk_blob_op_complete cb_fn; 5186 void *cb_arg; 5187 }; 5188 5189 static void 5190 _spdk_blob_insert_cluster_msg_cpl(void *arg) 5191 { 5192 struct spdk_blob_insert_cluster_ctx *ctx = arg; 5193 5194 ctx->cb_fn(ctx->cb_arg, ctx->rc); 5195 free(ctx); 5196 } 5197 5198 static void 5199 _spdk_blob_insert_cluster_msg_cb(void *arg, int bserrno) 5200 { 5201 struct spdk_blob_insert_cluster_ctx *ctx = arg; 5202 5203 ctx->rc = bserrno; 5204 spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx); 5205 } 5206 5207 static void 5208 _spdk_blob_insert_cluster_msg(void *arg) 5209 { 5210 struct spdk_blob_insert_cluster_ctx *ctx = arg; 5211 5212 ctx->rc = _spdk_blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster); 5213 if (ctx->rc != 0) { 5214 spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx); 5215 return; 5216 } 5217 5218 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 5219 _spdk_blob_sync_md(ctx->blob, _spdk_blob_insert_cluster_msg_cb, ctx); 5220 } 5221 5222 static void 5223 _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 5224 uint64_t cluster, spdk_blob_op_complete cb_fn, void *cb_arg) 5225 { 5226 struct spdk_blob_insert_cluster_ctx *ctx; 5227 5228 ctx = calloc(1, sizeof(*ctx)); 5229 if (ctx == NULL) { 5230 cb_fn(cb_arg, -ENOMEM); 5231 return; 5232 } 5233 5234 ctx->thread = spdk_get_thread(); 5235 ctx->blob = blob; 5236 ctx->cluster_num = cluster_num; 5237 ctx->cluster = cluster; 5238 ctx->cb_fn = cb_fn; 5239 ctx->cb_arg = cb_arg; 5240 5241 spdk_thread_send_msg(blob->bs->md_thread, _spdk_blob_insert_cluster_msg, ctx); 5242 } 5243 5244 /* START spdk_blob_close */ 5245 5246 static void 5247 _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5248 { 5249 struct spdk_blob *blob = cb_arg; 5250 5251 if (bserrno == 0) { 5252 blob->open_ref--; 5253 if (blob->open_ref == 0) { 5254 /* 5255 * Blobs with active.num_pages == 0 are deleted blobs. 5256 * these blobs are removed from the blob_store list 5257 * when the deletion process starts - so don't try to 5258 * remove them again. 5259 */ 5260 if (blob->active.num_pages > 0) { 5261 TAILQ_REMOVE(&blob->bs->blobs, blob, link); 5262 } 5263 _spdk_blob_free(blob); 5264 } 5265 } 5266 5267 spdk_bs_sequence_finish(seq, bserrno); 5268 } 5269 5270 void spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 5271 { 5272 struct spdk_bs_cpl cpl; 5273 spdk_bs_sequence_t *seq; 5274 5275 _spdk_blob_verify_md_op(blob); 5276 5277 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Closing blob %lu\n", blob->id); 5278 5279 if (blob->open_ref == 0) { 5280 cb_fn(cb_arg, -EBADF); 5281 return; 5282 } 5283 5284 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 5285 cpl.u.blob_basic.cb_fn = cb_fn; 5286 cpl.u.blob_basic.cb_arg = cb_arg; 5287 5288 seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl); 5289 if (!seq) { 5290 cb_fn(cb_arg, -ENOMEM); 5291 return; 5292 } 5293 5294 /* Sync metadata */ 5295 _spdk_blob_persist(seq, blob, _spdk_blob_close_cpl, blob); 5296 } 5297 5298 /* END spdk_blob_close */ 5299 5300 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs) 5301 { 5302 return spdk_get_io_channel(bs); 5303 } 5304 5305 void spdk_bs_free_io_channel(struct spdk_io_channel *channel) 5306 { 5307 spdk_put_io_channel(channel); 5308 } 5309 5310 void spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel, 5311 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 5312 { 5313 _spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 5314 SPDK_BLOB_UNMAP); 5315 } 5316 5317 void spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel, 5318 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 5319 { 5320 _spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 5321 SPDK_BLOB_WRITE_ZEROES); 5322 } 5323 5324 void spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel, 5325 void *payload, uint64_t offset, uint64_t length, 5326 spdk_blob_op_complete cb_fn, void *cb_arg) 5327 { 5328 _spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 5329 SPDK_BLOB_WRITE); 5330 } 5331 5332 void spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel, 5333 void *payload, uint64_t offset, uint64_t length, 5334 spdk_blob_op_complete cb_fn, void *cb_arg) 5335 { 5336 _spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 5337 SPDK_BLOB_READ); 5338 } 5339 5340 void spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel, 5341 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 5342 spdk_blob_op_complete cb_fn, void *cb_arg) 5343 { 5344 _spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false); 5345 } 5346 5347 void spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel, 5348 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 5349 spdk_blob_op_complete cb_fn, void *cb_arg) 5350 { 5351 _spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true); 5352 } 5353 5354 struct spdk_bs_iter_ctx { 5355 int64_t page_num; 5356 struct spdk_blob_store *bs; 5357 5358 spdk_blob_op_with_handle_complete cb_fn; 5359 void *cb_arg; 5360 }; 5361 5362 static void 5363 _spdk_bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 5364 { 5365 struct spdk_bs_iter_ctx *ctx = cb_arg; 5366 struct spdk_blob_store *bs = ctx->bs; 5367 spdk_blob_id id; 5368 5369 if (bserrno == 0) { 5370 ctx->cb_fn(ctx->cb_arg, _blob, bserrno); 5371 free(ctx); 5372 return; 5373 } 5374 5375 ctx->page_num++; 5376 ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num); 5377 if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) { 5378 ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT); 5379 free(ctx); 5380 return; 5381 } 5382 5383 id = _spdk_bs_page_to_blobid(ctx->page_num); 5384 5385 spdk_bs_open_blob(bs, id, _spdk_bs_iter_cpl, ctx); 5386 } 5387 5388 void 5389 spdk_bs_iter_first(struct spdk_blob_store *bs, 5390 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 5391 { 5392 struct spdk_bs_iter_ctx *ctx; 5393 5394 ctx = calloc(1, sizeof(*ctx)); 5395 if (!ctx) { 5396 cb_fn(cb_arg, NULL, -ENOMEM); 5397 return; 5398 } 5399 5400 ctx->page_num = -1; 5401 ctx->bs = bs; 5402 ctx->cb_fn = cb_fn; 5403 ctx->cb_arg = cb_arg; 5404 5405 _spdk_bs_iter_cpl(ctx, NULL, -1); 5406 } 5407 5408 static void 5409 _spdk_bs_iter_close_cpl(void *cb_arg, int bserrno) 5410 { 5411 struct spdk_bs_iter_ctx *ctx = cb_arg; 5412 5413 _spdk_bs_iter_cpl(ctx, NULL, -1); 5414 } 5415 5416 void 5417 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob, 5418 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 5419 { 5420 struct spdk_bs_iter_ctx *ctx; 5421 5422 assert(blob != NULL); 5423 5424 ctx = calloc(1, sizeof(*ctx)); 5425 if (!ctx) { 5426 cb_fn(cb_arg, NULL, -ENOMEM); 5427 return; 5428 } 5429 5430 ctx->page_num = _spdk_bs_blobid_to_page(blob->id); 5431 ctx->bs = bs; 5432 ctx->cb_fn = cb_fn; 5433 ctx->cb_arg = cb_arg; 5434 5435 /* Close the existing blob */ 5436 spdk_blob_close(blob, _spdk_bs_iter_close_cpl, ctx); 5437 } 5438 5439 static int 5440 _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 5441 uint16_t value_len, bool internal) 5442 { 5443 struct spdk_xattr_tailq *xattrs; 5444 struct spdk_xattr *xattr; 5445 5446 _spdk_blob_verify_md_op(blob); 5447 5448 if (blob->md_ro) { 5449 return -EPERM; 5450 } 5451 5452 if (internal) { 5453 xattrs = &blob->xattrs_internal; 5454 blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR; 5455 } else { 5456 xattrs = &blob->xattrs; 5457 } 5458 5459 TAILQ_FOREACH(xattr, xattrs, link) { 5460 if (!strcmp(name, xattr->name)) { 5461 free(xattr->value); 5462 xattr->value_len = value_len; 5463 xattr->value = malloc(value_len); 5464 memcpy(xattr->value, value, value_len); 5465 5466 blob->state = SPDK_BLOB_STATE_DIRTY; 5467 5468 return 0; 5469 } 5470 } 5471 5472 xattr = calloc(1, sizeof(*xattr)); 5473 if (!xattr) { 5474 return -ENOMEM; 5475 } 5476 xattr->name = strdup(name); 5477 xattr->value_len = value_len; 5478 xattr->value = malloc(value_len); 5479 memcpy(xattr->value, value, value_len); 5480 TAILQ_INSERT_TAIL(xattrs, xattr, link); 5481 5482 blob->state = SPDK_BLOB_STATE_DIRTY; 5483 5484 return 0; 5485 } 5486 5487 int 5488 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 5489 uint16_t value_len) 5490 { 5491 return _spdk_blob_set_xattr(blob, name, value, value_len, false); 5492 } 5493 5494 static int 5495 _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal) 5496 { 5497 struct spdk_xattr_tailq *xattrs; 5498 struct spdk_xattr *xattr; 5499 5500 _spdk_blob_verify_md_op(blob); 5501 5502 if (blob->md_ro) { 5503 return -EPERM; 5504 } 5505 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 5506 5507 TAILQ_FOREACH(xattr, xattrs, link) { 5508 if (!strcmp(name, xattr->name)) { 5509 TAILQ_REMOVE(xattrs, xattr, link); 5510 free(xattr->value); 5511 free(xattr->name); 5512 free(xattr); 5513 5514 if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) { 5515 blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR; 5516 } 5517 blob->state = SPDK_BLOB_STATE_DIRTY; 5518 5519 return 0; 5520 } 5521 } 5522 5523 return -ENOENT; 5524 } 5525 5526 int 5527 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name) 5528 { 5529 return _spdk_blob_remove_xattr(blob, name, false); 5530 } 5531 5532 static int 5533 _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 5534 const void **value, size_t *value_len, bool internal) 5535 { 5536 struct spdk_xattr *xattr; 5537 struct spdk_xattr_tailq *xattrs; 5538 5539 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 5540 5541 TAILQ_FOREACH(xattr, xattrs, link) { 5542 if (!strcmp(name, xattr->name)) { 5543 *value = xattr->value; 5544 *value_len = xattr->value_len; 5545 return 0; 5546 } 5547 } 5548 return -ENOENT; 5549 } 5550 5551 int 5552 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 5553 const void **value, size_t *value_len) 5554 { 5555 _spdk_blob_verify_md_op(blob); 5556 5557 return _spdk_blob_get_xattr_value(blob, name, value, value_len, false); 5558 } 5559 5560 struct spdk_xattr_names { 5561 uint32_t count; 5562 const char *names[0]; 5563 }; 5564 5565 static int 5566 _spdk_blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names) 5567 { 5568 struct spdk_xattr *xattr; 5569 int count = 0; 5570 5571 TAILQ_FOREACH(xattr, xattrs, link) { 5572 count++; 5573 } 5574 5575 *names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *)); 5576 if (*names == NULL) { 5577 return -ENOMEM; 5578 } 5579 5580 TAILQ_FOREACH(xattr, xattrs, link) { 5581 (*names)->names[(*names)->count++] = xattr->name; 5582 } 5583 5584 return 0; 5585 } 5586 5587 int 5588 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names) 5589 { 5590 _spdk_blob_verify_md_op(blob); 5591 5592 return _spdk_blob_get_xattr_names(&blob->xattrs, names); 5593 } 5594 5595 uint32_t 5596 spdk_xattr_names_get_count(struct spdk_xattr_names *names) 5597 { 5598 assert(names != NULL); 5599 5600 return names->count; 5601 } 5602 5603 const char * 5604 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index) 5605 { 5606 if (index >= names->count) { 5607 return NULL; 5608 } 5609 5610 return names->names[index]; 5611 } 5612 5613 void 5614 spdk_xattr_names_free(struct spdk_xattr_names *names) 5615 { 5616 free(names); 5617 } 5618 5619 struct spdk_bs_type 5620 spdk_bs_get_bstype(struct spdk_blob_store *bs) 5621 { 5622 return bs->bstype; 5623 } 5624 5625 void 5626 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype) 5627 { 5628 memcpy(&bs->bstype, &bstype, sizeof(bstype)); 5629 } 5630 5631 bool 5632 spdk_blob_is_read_only(struct spdk_blob *blob) 5633 { 5634 assert(blob != NULL); 5635 return (blob->data_ro || blob->md_ro); 5636 } 5637 5638 bool 5639 spdk_blob_is_snapshot(struct spdk_blob *blob) 5640 { 5641 struct spdk_blob_list *snapshot_entry; 5642 5643 assert(blob != NULL); 5644 5645 TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) { 5646 if (snapshot_entry->id == blob->id) { 5647 break; 5648 } 5649 } 5650 5651 if (snapshot_entry == NULL) { 5652 return false; 5653 } 5654 5655 return true; 5656 } 5657 5658 bool 5659 spdk_blob_is_clone(struct spdk_blob *blob) 5660 { 5661 assert(blob != NULL); 5662 5663 if (blob->parent_id != SPDK_BLOBID_INVALID) { 5664 assert(spdk_blob_is_thin_provisioned(blob)); 5665 return true; 5666 } 5667 5668 return false; 5669 } 5670 5671 bool 5672 spdk_blob_is_thin_provisioned(struct spdk_blob *blob) 5673 { 5674 assert(blob != NULL); 5675 return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 5676 } 5677 5678 spdk_blob_id 5679 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id) 5680 { 5681 struct spdk_blob_list *snapshot_entry = NULL; 5682 struct spdk_blob_list *clone_entry = NULL; 5683 5684 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 5685 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 5686 if (clone_entry->id == blob_id) { 5687 return snapshot_entry->id; 5688 } 5689 } 5690 } 5691 5692 return SPDK_BLOBID_INVALID; 5693 } 5694 5695 int 5696 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids, 5697 size_t *count) 5698 { 5699 struct spdk_blob_list *snapshot_entry, *clone_entry; 5700 size_t n; 5701 5702 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 5703 if (snapshot_entry->id == blobid) { 5704 break; 5705 } 5706 } 5707 if (snapshot_entry == NULL) { 5708 *count = 0; 5709 return 0; 5710 } 5711 5712 if (ids == NULL || *count < snapshot_entry->clone_count) { 5713 *count = snapshot_entry->clone_count; 5714 return -ENOMEM; 5715 } 5716 *count = snapshot_entry->clone_count; 5717 5718 n = 0; 5719 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 5720 ids[n++] = clone_entry->id; 5721 } 5722 5723 return 0; 5724 } 5725 5726 SPDK_LOG_REGISTER_COMPONENT("blob", SPDK_LOG_BLOB) 5727