1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/blob.h" 37 #include "spdk/crc32.h" 38 #include "spdk/env.h" 39 #include "spdk/queue.h" 40 #include "spdk/thread.h" 41 #include "spdk/bit_array.h" 42 #include "spdk/likely.h" 43 #include "spdk/util.h" 44 #include "spdk/string.h" 45 46 #include "spdk_internal/assert.h" 47 #include "spdk_internal/log.h" 48 49 #include "blobstore.h" 50 51 #define BLOB_CRC32C_INITIAL 0xffffffffUL 52 53 static int spdk_bs_register_md_thread(struct spdk_blob_store *bs); 54 static int spdk_bs_unregister_md_thread(struct spdk_blob_store *bs); 55 static void _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 56 static void _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 57 uint64_t cluster, uint32_t extent, spdk_blob_op_complete cb_fn, void *cb_arg); 58 59 static int _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 60 uint16_t value_len, bool internal); 61 static int _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 62 const void **value, size_t *value_len, bool internal); 63 static int _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal); 64 65 static void _spdk_blob_insert_extent(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 66 spdk_blob_op_complete cb_fn, void *cb_arg); 67 68 static void 69 _spdk_blob_verify_md_op(struct spdk_blob *blob) 70 { 71 assert(blob != NULL); 72 assert(spdk_get_thread() == blob->bs->md_thread); 73 assert(blob->state != SPDK_BLOB_STATE_LOADING); 74 } 75 76 static struct spdk_blob_list * 77 _spdk_bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid) 78 { 79 struct spdk_blob_list *snapshot_entry = NULL; 80 81 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 82 if (snapshot_entry->id == blobid) { 83 break; 84 } 85 } 86 87 return snapshot_entry; 88 } 89 90 static void 91 _spdk_bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page) 92 { 93 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 94 assert(spdk_bit_array_get(bs->used_md_pages, page) == false); 95 96 spdk_bit_array_set(bs->used_md_pages, page); 97 } 98 99 static void 100 _spdk_bs_release_md_page(struct spdk_blob_store *bs, uint32_t page) 101 { 102 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 103 assert(spdk_bit_array_get(bs->used_md_pages, page) == true); 104 105 spdk_bit_array_clear(bs->used_md_pages, page); 106 } 107 108 static void 109 _spdk_bs_claim_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 110 { 111 assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters)); 112 assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == false); 113 assert(bs->num_free_clusters > 0); 114 115 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %u\n", cluster_num); 116 117 spdk_bit_array_set(bs->used_clusters, cluster_num); 118 bs->num_free_clusters--; 119 } 120 121 static int 122 _spdk_blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster) 123 { 124 uint64_t *cluster_lba = &blob->active.clusters[cluster_num]; 125 126 _spdk_blob_verify_md_op(blob); 127 128 if (*cluster_lba != 0) { 129 return -EEXIST; 130 } 131 132 *cluster_lba = _spdk_bs_cluster_to_lba(blob->bs, cluster); 133 return 0; 134 } 135 136 static int 137 _spdk_bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num, 138 uint64_t *lowest_free_cluster, uint32_t *lowest_free_md_page, bool update_map) 139 { 140 uint32_t *extent_page = 0; 141 142 pthread_mutex_lock(&blob->bs->used_clusters_mutex); 143 *lowest_free_cluster = spdk_bit_array_find_first_clear(blob->bs->used_clusters, 144 *lowest_free_cluster); 145 if (*lowest_free_cluster == UINT32_MAX) { 146 /* No more free clusters. Cannot satisfy the request */ 147 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 148 return -ENOSPC; 149 } 150 151 if (blob->use_extent_table) { 152 extent_page = _spdk_bs_cluster_to_extent_page(blob, cluster_num); 153 if (*extent_page == 0) { 154 /* No extent_page is allocated for the cluster */ 155 *lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, 156 *lowest_free_md_page); 157 if (*lowest_free_md_page == UINT32_MAX) { 158 /* No more free md pages. Cannot satisfy the request */ 159 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 160 return -ENOSPC; 161 } 162 _spdk_bs_claim_md_page(blob->bs, *lowest_free_md_page); 163 } 164 } 165 166 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %lu for blob %lu\n", *lowest_free_cluster, blob->id); 167 _spdk_bs_claim_cluster(blob->bs, *lowest_free_cluster); 168 169 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 170 171 if (update_map) { 172 _spdk_blob_insert_cluster(blob, cluster_num, *lowest_free_cluster); 173 if (blob->use_extent_table && *extent_page == 0) { 174 *extent_page = *lowest_free_md_page; 175 } 176 } 177 178 return 0; 179 } 180 181 static void 182 _spdk_bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 183 { 184 assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters)); 185 assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == true); 186 assert(bs->num_free_clusters < bs->total_clusters); 187 188 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Releasing cluster %u\n", cluster_num); 189 190 pthread_mutex_lock(&bs->used_clusters_mutex); 191 spdk_bit_array_clear(bs->used_clusters, cluster_num); 192 bs->num_free_clusters++; 193 pthread_mutex_unlock(&bs->used_clusters_mutex); 194 } 195 196 static void 197 _spdk_blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs) 198 { 199 xattrs->count = 0; 200 xattrs->names = NULL; 201 xattrs->ctx = NULL; 202 xattrs->get_value = NULL; 203 } 204 205 void 206 spdk_blob_opts_init(struct spdk_blob_opts *opts) 207 { 208 opts->num_clusters = 0; 209 opts->thin_provision = false; 210 opts->clear_method = BLOB_CLEAR_WITH_DEFAULT; 211 _spdk_blob_xattrs_init(&opts->xattrs); 212 opts->use_extent_table = true; 213 } 214 215 void 216 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts) 217 { 218 opts->clear_method = BLOB_CLEAR_WITH_DEFAULT; 219 } 220 221 static struct spdk_blob * 222 _spdk_blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id) 223 { 224 struct spdk_blob *blob; 225 226 blob = calloc(1, sizeof(*blob)); 227 if (!blob) { 228 return NULL; 229 } 230 231 blob->id = id; 232 blob->bs = bs; 233 234 blob->parent_id = SPDK_BLOBID_INVALID; 235 236 blob->state = SPDK_BLOB_STATE_DIRTY; 237 blob->extent_rle_found = false; 238 blob->extent_table_found = false; 239 blob->active.num_pages = 1; 240 blob->active.pages = calloc(1, sizeof(*blob->active.pages)); 241 if (!blob->active.pages) { 242 free(blob); 243 return NULL; 244 } 245 246 blob->active.pages[0] = _spdk_bs_blobid_to_page(id); 247 248 TAILQ_INIT(&blob->xattrs); 249 TAILQ_INIT(&blob->xattrs_internal); 250 251 return blob; 252 } 253 254 static void 255 _spdk_xattrs_free(struct spdk_xattr_tailq *xattrs) 256 { 257 struct spdk_xattr *xattr, *xattr_tmp; 258 259 TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) { 260 TAILQ_REMOVE(xattrs, xattr, link); 261 free(xattr->name); 262 free(xattr->value); 263 free(xattr); 264 } 265 } 266 267 static void 268 _spdk_blob_free(struct spdk_blob *blob) 269 { 270 assert(blob != NULL); 271 272 free(blob->active.extent_pages); 273 free(blob->clean.extent_pages); 274 free(blob->active.clusters); 275 free(blob->clean.clusters); 276 free(blob->active.pages); 277 free(blob->clean.pages); 278 279 _spdk_xattrs_free(&blob->xattrs); 280 _spdk_xattrs_free(&blob->xattrs_internal); 281 282 if (blob->back_bs_dev) { 283 blob->back_bs_dev->destroy(blob->back_bs_dev); 284 } 285 286 free(blob); 287 } 288 289 struct freeze_io_ctx { 290 struct spdk_bs_cpl cpl; 291 struct spdk_blob *blob; 292 }; 293 294 static void 295 _spdk_blob_io_sync(struct spdk_io_channel_iter *i) 296 { 297 spdk_for_each_channel_continue(i, 0); 298 } 299 300 static void 301 _spdk_blob_execute_queued_io(struct spdk_io_channel_iter *i) 302 { 303 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 304 struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch); 305 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 306 struct spdk_bs_request_set *set; 307 struct spdk_bs_user_op_args *args; 308 spdk_bs_user_op_t *op, *tmp; 309 310 TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) { 311 set = (struct spdk_bs_request_set *)op; 312 args = &set->u.user_op; 313 314 if (args->blob == ctx->blob) { 315 TAILQ_REMOVE(&ch->queued_io, op, link); 316 spdk_bs_user_op_execute(op); 317 } 318 } 319 320 spdk_for_each_channel_continue(i, 0); 321 } 322 323 static void 324 _spdk_blob_io_cpl(struct spdk_io_channel_iter *i, int status) 325 { 326 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 327 328 ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0); 329 330 free(ctx); 331 } 332 333 static void 334 _spdk_blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 335 { 336 struct freeze_io_ctx *ctx; 337 338 ctx = calloc(1, sizeof(*ctx)); 339 if (!ctx) { 340 cb_fn(cb_arg, -ENOMEM); 341 return; 342 } 343 344 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 345 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 346 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 347 ctx->blob = blob; 348 349 /* Freeze I/O on blob */ 350 blob->frozen_refcnt++; 351 352 if (blob->frozen_refcnt == 1) { 353 spdk_for_each_channel(blob->bs, _spdk_blob_io_sync, ctx, _spdk_blob_io_cpl); 354 } else { 355 cb_fn(cb_arg, 0); 356 free(ctx); 357 } 358 } 359 360 static void 361 _spdk_blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 362 { 363 struct freeze_io_ctx *ctx; 364 365 ctx = calloc(1, sizeof(*ctx)); 366 if (!ctx) { 367 cb_fn(cb_arg, -ENOMEM); 368 return; 369 } 370 371 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 372 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 373 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 374 ctx->blob = blob; 375 376 assert(blob->frozen_refcnt > 0); 377 378 blob->frozen_refcnt--; 379 380 if (blob->frozen_refcnt == 0) { 381 spdk_for_each_channel(blob->bs, _spdk_blob_execute_queued_io, ctx, _spdk_blob_io_cpl); 382 } else { 383 cb_fn(cb_arg, 0); 384 free(ctx); 385 } 386 } 387 388 static int 389 _spdk_blob_mark_clean(struct spdk_blob *blob) 390 { 391 uint32_t *extent_pages = NULL; 392 uint64_t *clusters = NULL; 393 uint32_t *pages = NULL; 394 395 assert(blob != NULL); 396 397 if (blob->active.num_extent_pages) { 398 assert(blob->active.extent_pages); 399 extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages)); 400 if (!extent_pages) { 401 return -ENOMEM; 402 } 403 memcpy(extent_pages, blob->active.extent_pages, 404 blob->active.num_extent_pages * sizeof(*extent_pages)); 405 } 406 407 if (blob->active.num_clusters) { 408 assert(blob->active.clusters); 409 clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters)); 410 if (!clusters) { 411 free(extent_pages); 412 return -ENOMEM; 413 } 414 memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 415 } 416 417 if (blob->active.num_pages) { 418 assert(blob->active.pages); 419 pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages)); 420 if (!pages) { 421 free(extent_pages); 422 free(clusters); 423 return -ENOMEM; 424 } 425 memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 426 } 427 428 free(blob->clean.extent_pages); 429 free(blob->clean.clusters); 430 free(blob->clean.pages); 431 432 blob->clean.num_extent_pages = blob->active.num_extent_pages; 433 blob->clean.extent_pages = blob->active.extent_pages; 434 blob->clean.num_clusters = blob->active.num_clusters; 435 blob->clean.clusters = blob->active.clusters; 436 blob->clean.num_pages = blob->active.num_pages; 437 blob->clean.pages = blob->active.pages; 438 439 blob->active.extent_pages = extent_pages; 440 blob->active.clusters = clusters; 441 blob->active.pages = pages; 442 443 /* If the metadata was dirtied again while the metadata was being written to disk, 444 * we do not want to revert the DIRTY state back to CLEAN here. 445 */ 446 if (blob->state == SPDK_BLOB_STATE_LOADING) { 447 blob->state = SPDK_BLOB_STATE_CLEAN; 448 } 449 450 return 0; 451 } 452 453 static int 454 _spdk_blob_deserialize_xattr(struct spdk_blob *blob, 455 struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal) 456 { 457 struct spdk_xattr *xattr; 458 459 if (desc_xattr->length != sizeof(desc_xattr->name_length) + 460 sizeof(desc_xattr->value_length) + 461 desc_xattr->name_length + desc_xattr->value_length) { 462 return -EINVAL; 463 } 464 465 xattr = calloc(1, sizeof(*xattr)); 466 if (xattr == NULL) { 467 return -ENOMEM; 468 } 469 470 xattr->name = malloc(desc_xattr->name_length + 1); 471 if (xattr->name == NULL) { 472 free(xattr); 473 return -ENOMEM; 474 } 475 memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length); 476 xattr->name[desc_xattr->name_length] = '\0'; 477 478 xattr->value = malloc(desc_xattr->value_length); 479 if (xattr->value == NULL) { 480 free(xattr->name); 481 free(xattr); 482 return -ENOMEM; 483 } 484 xattr->value_len = desc_xattr->value_length; 485 memcpy(xattr->value, 486 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 487 desc_xattr->value_length); 488 489 TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link); 490 491 return 0; 492 } 493 494 495 static int 496 _spdk_blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob) 497 { 498 struct spdk_blob_md_descriptor *desc; 499 size_t cur_desc = 0; 500 void *tmp; 501 502 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 503 while (cur_desc < sizeof(page->descriptors)) { 504 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 505 if (desc->length == 0) { 506 /* If padding and length are 0, this terminates the page */ 507 break; 508 } 509 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 510 struct spdk_blob_md_descriptor_flags *desc_flags; 511 512 desc_flags = (struct spdk_blob_md_descriptor_flags *)desc; 513 514 if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) { 515 return -EINVAL; 516 } 517 518 if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) != 519 SPDK_BLOB_INVALID_FLAGS_MASK) { 520 return -EINVAL; 521 } 522 523 if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) != 524 SPDK_BLOB_DATA_RO_FLAGS_MASK) { 525 blob->data_ro = true; 526 blob->md_ro = true; 527 } 528 529 if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) != 530 SPDK_BLOB_MD_RO_FLAGS_MASK) { 531 blob->md_ro = true; 532 } 533 534 if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 535 blob->data_ro = true; 536 blob->md_ro = true; 537 } 538 539 blob->invalid_flags = desc_flags->invalid_flags; 540 blob->data_ro_flags = desc_flags->data_ro_flags; 541 blob->md_ro_flags = desc_flags->md_ro_flags; 542 543 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 544 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 545 unsigned int i, j; 546 unsigned int cluster_count = blob->active.num_clusters; 547 548 if (blob->extent_table_found) { 549 /* Extent Table already present in the md, 550 * both descriptors should never be at the same time. */ 551 return -EINVAL; 552 } 553 blob->extent_rle_found = true; 554 555 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 556 557 if (desc_extent_rle->length == 0 || 558 (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) { 559 return -EINVAL; 560 } 561 562 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 563 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 564 if (desc_extent_rle->extents[i].cluster_idx != 0) { 565 if (!spdk_bit_array_get(blob->bs->used_clusters, 566 desc_extent_rle->extents[i].cluster_idx + j)) { 567 return -EINVAL; 568 } 569 } 570 cluster_count++; 571 } 572 } 573 574 if (cluster_count == 0) { 575 return -EINVAL; 576 } 577 tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters)); 578 if (tmp == NULL) { 579 return -ENOMEM; 580 } 581 blob->active.clusters = tmp; 582 blob->active.cluster_array_size = cluster_count; 583 584 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 585 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 586 if (desc_extent_rle->extents[i].cluster_idx != 0) { 587 blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs, 588 desc_extent_rle->extents[i].cluster_idx + j); 589 } else if (spdk_blob_is_thin_provisioned(blob)) { 590 blob->active.clusters[blob->active.num_clusters++] = 0; 591 } else { 592 return -EINVAL; 593 } 594 } 595 } 596 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 597 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 598 uint32_t num_extent_pages = blob->active.num_extent_pages; 599 uint32_t i, j; 600 size_t extent_pages_length; 601 602 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 603 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 604 605 if (blob->extent_rle_found) { 606 /* This means that Extent RLE is present in MD, 607 * both should never be at the same time. */ 608 return -EINVAL; 609 } else if (blob->extent_table_found && 610 desc_extent_table->num_clusters != blob->remaining_clusters_in_et) { 611 /* Number of clusters in this ET does not match number 612 * from previously read EXTENT_TABLE. */ 613 return -EINVAL; 614 } 615 616 blob->extent_table_found = true; 617 618 if (desc_extent_table->length == 0 || 619 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 620 return -EINVAL; 621 } 622 623 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 624 num_extent_pages += desc_extent_table->extent_page[i].num_pages; 625 } 626 627 tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t)); 628 if (tmp == NULL) { 629 return -ENOMEM; 630 } 631 blob->active.extent_pages = tmp; 632 blob->active.extent_pages_array_size = num_extent_pages; 633 634 blob->remaining_clusters_in_et = desc_extent_table->num_clusters; 635 636 /* Extent table entries contain md page numbers for extent pages. 637 * Zeroes represent unallocated extent pages, those are run-length-encoded. 638 */ 639 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 640 if (desc_extent_table->extent_page[i].page_idx != 0) { 641 assert(desc_extent_table->extent_page[i].num_pages == 1); 642 blob->active.extent_pages[blob->active.num_extent_pages++] = 643 desc_extent_table->extent_page[i].page_idx; 644 } else if (spdk_blob_is_thin_provisioned(blob)) { 645 for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) { 646 blob->active.extent_pages[blob->active.num_extent_pages++] = 0; 647 } 648 } else { 649 return -EINVAL; 650 } 651 } 652 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 653 struct spdk_blob_md_descriptor_extent_page *desc_extent; 654 unsigned int i; 655 unsigned int cluster_count = 0; 656 size_t cluster_idx_length; 657 658 if (blob->extent_rle_found) { 659 /* This means that Extent RLE is present in MD, 660 * both should never be at the same time. */ 661 return -EINVAL; 662 } 663 664 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 665 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 666 667 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 668 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 669 return -EINVAL; 670 } 671 672 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 673 if (desc_extent->cluster_idx[i] != 0) { 674 if (!spdk_bit_array_get(blob->bs->used_clusters, desc_extent->cluster_idx[i])) { 675 return -EINVAL; 676 } 677 } 678 cluster_count++; 679 } 680 681 if (cluster_count == 0) { 682 return -EINVAL; 683 } 684 685 /* When reading extent pages sequentially starting cluster idx should match 686 * current size of a blob. 687 * If changed to batch reading, this check shall be removed. */ 688 if (desc_extent->start_cluster_idx != blob->active.num_clusters) { 689 return -EINVAL; 690 } 691 692 tmp = realloc(blob->active.clusters, 693 (cluster_count + blob->active.num_clusters) * sizeof(*blob->active.clusters)); 694 if (tmp == NULL) { 695 return -ENOMEM; 696 } 697 blob->active.clusters = tmp; 698 blob->active.cluster_array_size = (cluster_count + blob->active.num_clusters); 699 700 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 701 if (desc_extent->cluster_idx[i] != 0) { 702 blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs, 703 desc_extent->cluster_idx[i]); 704 } else if (spdk_blob_is_thin_provisioned(blob)) { 705 blob->active.clusters[blob->active.num_clusters++] = 0; 706 } else { 707 return -EINVAL; 708 } 709 } 710 assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters); 711 assert(blob->remaining_clusters_in_et >= cluster_count); 712 blob->remaining_clusters_in_et -= cluster_count; 713 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 714 int rc; 715 716 rc = _spdk_blob_deserialize_xattr(blob, 717 (struct spdk_blob_md_descriptor_xattr *) desc, false); 718 if (rc != 0) { 719 return rc; 720 } 721 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 722 int rc; 723 724 rc = _spdk_blob_deserialize_xattr(blob, 725 (struct spdk_blob_md_descriptor_xattr *) desc, true); 726 if (rc != 0) { 727 return rc; 728 } 729 } else { 730 /* Unrecognized descriptor type. Do not fail - just continue to the 731 * next descriptor. If this descriptor is associated with some feature 732 * defined in a newer version of blobstore, that version of blobstore 733 * should create and set an associated feature flag to specify if this 734 * blob can be loaded or not. 735 */ 736 } 737 738 /* Advance to the next descriptor */ 739 cur_desc += sizeof(*desc) + desc->length; 740 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 741 break; 742 } 743 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 744 } 745 746 return 0; 747 } 748 749 static bool _spdk_bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page); 750 751 static int 752 _spdk_blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob) 753 { 754 assert(blob != NULL); 755 assert(blob->state == SPDK_BLOB_STATE_LOADING); 756 757 if (_spdk_bs_load_cur_extent_page_valid(extent_page) == false) { 758 return -ENOENT; 759 } 760 761 return _spdk_blob_parse_page(extent_page, blob); 762 } 763 764 static int 765 _spdk_blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count, 766 struct spdk_blob *blob) 767 { 768 const struct spdk_blob_md_page *page; 769 uint32_t i; 770 int rc; 771 772 assert(page_count > 0); 773 assert(pages[0].sequence_num == 0); 774 assert(blob != NULL); 775 assert(blob->state == SPDK_BLOB_STATE_LOADING); 776 assert(blob->active.clusters == NULL); 777 778 /* The blobid provided doesn't match what's in the MD, this can 779 * happen for example if a bogus blobid is passed in through open. 780 */ 781 if (blob->id != pages[0].id) { 782 SPDK_ERRLOG("Blobid (%lu) doesn't match what's in metadata (%lu)\n", 783 blob->id, pages[0].id); 784 return -ENOENT; 785 } 786 787 for (i = 0; i < page_count; i++) { 788 page = &pages[i]; 789 790 assert(page->id == blob->id); 791 assert(page->sequence_num == i); 792 793 rc = _spdk_blob_parse_page(page, blob); 794 if (rc != 0) { 795 return rc; 796 } 797 } 798 799 return 0; 800 } 801 802 static int 803 _spdk_blob_serialize_add_page(const struct spdk_blob *blob, 804 struct spdk_blob_md_page **pages, 805 uint32_t *page_count, 806 struct spdk_blob_md_page **last_page) 807 { 808 struct spdk_blob_md_page *page; 809 810 assert(pages != NULL); 811 assert(page_count != NULL); 812 813 if (*page_count == 0) { 814 assert(*pages == NULL); 815 *page_count = 1; 816 *pages = spdk_malloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE, 817 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 818 } else { 819 assert(*pages != NULL); 820 (*page_count)++; 821 *pages = spdk_realloc(*pages, 822 SPDK_BS_PAGE_SIZE * (*page_count), 823 SPDK_BS_PAGE_SIZE); 824 } 825 826 if (*pages == NULL) { 827 *page_count = 0; 828 *last_page = NULL; 829 return -ENOMEM; 830 } 831 832 page = &(*pages)[*page_count - 1]; 833 memset(page, 0, sizeof(*page)); 834 page->id = blob->id; 835 page->sequence_num = *page_count - 1; 836 page->next = SPDK_INVALID_MD_PAGE; 837 *last_page = page; 838 839 return 0; 840 } 841 842 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor. 843 * Update required_sz on both success and failure. 844 * 845 */ 846 static int 847 _spdk_blob_serialize_xattr(const struct spdk_xattr *xattr, 848 uint8_t *buf, size_t buf_sz, 849 size_t *required_sz, bool internal) 850 { 851 struct spdk_blob_md_descriptor_xattr *desc; 852 853 *required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) + 854 strlen(xattr->name) + 855 xattr->value_len; 856 857 if (buf_sz < *required_sz) { 858 return -1; 859 } 860 861 desc = (struct spdk_blob_md_descriptor_xattr *)buf; 862 863 desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR; 864 desc->length = sizeof(desc->name_length) + 865 sizeof(desc->value_length) + 866 strlen(xattr->name) + 867 xattr->value_len; 868 desc->name_length = strlen(xattr->name); 869 desc->value_length = xattr->value_len; 870 871 memcpy(desc->name, xattr->name, desc->name_length); 872 memcpy((void *)((uintptr_t)desc->name + desc->name_length), 873 xattr->value, 874 desc->value_length); 875 876 return 0; 877 } 878 879 static void 880 _spdk_blob_serialize_extent_table_entry(const struct spdk_blob *blob, 881 uint64_t start_ep, uint64_t *next_ep, 882 uint8_t **buf, size_t *remaining_sz) 883 { 884 struct spdk_blob_md_descriptor_extent_table *desc; 885 size_t cur_sz; 886 uint64_t i, et_idx; 887 uint32_t extent_page, ep_len; 888 889 /* The buffer must have room for at least num_clusters entry */ 890 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters); 891 if (*remaining_sz < cur_sz) { 892 *next_ep = start_ep; 893 return; 894 } 895 896 desc = (struct spdk_blob_md_descriptor_extent_table *)*buf; 897 desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE; 898 899 desc->num_clusters = blob->active.num_clusters; 900 901 ep_len = 1; 902 et_idx = 0; 903 for (i = start_ep; i < blob->active.num_extent_pages; i++) { 904 if (*remaining_sz < cur_sz + sizeof(desc->extent_page[0])) { 905 /* If we ran out of buffer space, return */ 906 break; 907 } 908 909 extent_page = blob->active.extent_pages[i]; 910 /* Verify that next extent_page is unallocated */ 911 if (extent_page == 0 && 912 (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) { 913 ep_len++; 914 continue; 915 } 916 desc->extent_page[et_idx].page_idx = extent_page; 917 desc->extent_page[et_idx].num_pages = ep_len; 918 et_idx++; 919 920 ep_len = 1; 921 cur_sz += sizeof(desc->extent_page[et_idx]); 922 } 923 *next_ep = i; 924 925 desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx; 926 *remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length; 927 *buf += sizeof(struct spdk_blob_md_descriptor) + desc->length; 928 } 929 930 static int 931 _spdk_blob_serialize_extent_table(const struct spdk_blob *blob, 932 struct spdk_blob_md_page **pages, 933 struct spdk_blob_md_page *cur_page, 934 uint32_t *page_count, uint8_t **buf, 935 size_t *remaining_sz) 936 { 937 uint64_t last_extent_page; 938 int rc; 939 940 last_extent_page = 0; 941 /* At least single extent table entry has to be always persisted. 942 * Such case occurs with num_extent_pages == 0. */ 943 while (last_extent_page <= blob->active.num_extent_pages) { 944 _spdk_blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf, 945 remaining_sz); 946 947 if (last_extent_page == blob->active.num_extent_pages) { 948 break; 949 } 950 951 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page); 952 if (rc < 0) { 953 return rc; 954 } 955 956 *buf = (uint8_t *)cur_page->descriptors; 957 *remaining_sz = sizeof(cur_page->descriptors); 958 } 959 960 return 0; 961 } 962 963 static void 964 _spdk_blob_serialize_extent_rle(const struct spdk_blob *blob, 965 uint64_t start_cluster, uint64_t *next_cluster, 966 uint8_t **buf, size_t *buf_sz) 967 { 968 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 969 size_t cur_sz; 970 uint64_t i, extent_idx; 971 uint64_t lba, lba_per_cluster, lba_count; 972 973 /* The buffer must have room for at least one extent */ 974 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]); 975 if (*buf_sz < cur_sz) { 976 *next_cluster = start_cluster; 977 return; 978 } 979 980 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf; 981 desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE; 982 983 lba_per_cluster = _spdk_bs_cluster_to_lba(blob->bs, 1); 984 985 lba = blob->active.clusters[start_cluster]; 986 lba_count = lba_per_cluster; 987 extent_idx = 0; 988 for (i = start_cluster + 1; i < blob->active.num_clusters; i++) { 989 if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) { 990 /* Run-length encode sequential non-zero LBA */ 991 lba_count += lba_per_cluster; 992 continue; 993 } else if (lba == 0 && blob->active.clusters[i] == 0) { 994 /* Run-length encode unallocated clusters */ 995 lba_count += lba_per_cluster; 996 continue; 997 } 998 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 999 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1000 extent_idx++; 1001 1002 cur_sz += sizeof(desc_extent_rle->extents[extent_idx]); 1003 1004 if (*buf_sz < cur_sz) { 1005 /* If we ran out of buffer space, return */ 1006 *next_cluster = i; 1007 break; 1008 } 1009 1010 lba = blob->active.clusters[i]; 1011 lba_count = lba_per_cluster; 1012 } 1013 1014 if (*buf_sz >= cur_sz) { 1015 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1016 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1017 extent_idx++; 1018 1019 *next_cluster = blob->active.num_clusters; 1020 } 1021 1022 desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx; 1023 *buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1024 *buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1025 } 1026 1027 static int 1028 _spdk_blob_serialize_extents_rle(const struct spdk_blob *blob, 1029 struct spdk_blob_md_page **pages, 1030 struct spdk_blob_md_page *cur_page, 1031 uint32_t *page_count, uint8_t **buf, 1032 size_t *remaining_sz) 1033 { 1034 uint64_t last_cluster; 1035 int rc; 1036 1037 last_cluster = 0; 1038 while (last_cluster < blob->active.num_clusters) { 1039 _spdk_blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz); 1040 1041 if (last_cluster == blob->active.num_clusters) { 1042 break; 1043 } 1044 1045 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page); 1046 if (rc < 0) { 1047 return rc; 1048 } 1049 1050 *buf = (uint8_t *)cur_page->descriptors; 1051 *remaining_sz = sizeof(cur_page->descriptors); 1052 } 1053 1054 return 0; 1055 } 1056 1057 static void 1058 _spdk_blob_serialize_extent_page(const struct spdk_blob *blob, 1059 uint64_t cluster, struct spdk_blob_md_page *page) 1060 { 1061 struct spdk_blob_md_descriptor_extent_page *desc_extent; 1062 uint64_t i, extent_idx; 1063 uint64_t lba, lba_per_cluster; 1064 uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 1065 1066 desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors; 1067 desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE; 1068 1069 lba_per_cluster = _spdk_bs_cluster_to_lba(blob->bs, 1); 1070 1071 desc_extent->start_cluster_idx = start_cluster_idx; 1072 extent_idx = 0; 1073 for (i = start_cluster_idx; i < blob->active.num_clusters; i++) { 1074 lba = blob->active.clusters[i]; 1075 desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster; 1076 if (extent_idx >= SPDK_EXTENTS_PER_EP) { 1077 break; 1078 } 1079 } 1080 desc_extent->length = sizeof(desc_extent->start_cluster_idx) + 1081 sizeof(desc_extent->cluster_idx[0]) * extent_idx; 1082 } 1083 1084 static void 1085 _spdk_blob_serialize_flags(const struct spdk_blob *blob, 1086 uint8_t *buf, size_t *buf_sz) 1087 { 1088 struct spdk_blob_md_descriptor_flags *desc; 1089 1090 /* 1091 * Flags get serialized first, so we should always have room for the flags 1092 * descriptor. 1093 */ 1094 assert(*buf_sz >= sizeof(*desc)); 1095 1096 desc = (struct spdk_blob_md_descriptor_flags *)buf; 1097 desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS; 1098 desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor); 1099 desc->invalid_flags = blob->invalid_flags; 1100 desc->data_ro_flags = blob->data_ro_flags; 1101 desc->md_ro_flags = blob->md_ro_flags; 1102 1103 *buf_sz -= sizeof(*desc); 1104 } 1105 1106 static int 1107 _spdk_blob_serialize_xattrs(const struct spdk_blob *blob, 1108 const struct spdk_xattr_tailq *xattrs, bool internal, 1109 struct spdk_blob_md_page **pages, 1110 struct spdk_blob_md_page *cur_page, 1111 uint32_t *page_count, uint8_t **buf, 1112 size_t *remaining_sz) 1113 { 1114 const struct spdk_xattr *xattr; 1115 int rc; 1116 1117 TAILQ_FOREACH(xattr, xattrs, link) { 1118 size_t required_sz = 0; 1119 1120 rc = _spdk_blob_serialize_xattr(xattr, 1121 *buf, *remaining_sz, 1122 &required_sz, internal); 1123 if (rc < 0) { 1124 /* Need to add a new page to the chain */ 1125 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, 1126 &cur_page); 1127 if (rc < 0) { 1128 spdk_free(*pages); 1129 *pages = NULL; 1130 *page_count = 0; 1131 return rc; 1132 } 1133 1134 *buf = (uint8_t *)cur_page->descriptors; 1135 *remaining_sz = sizeof(cur_page->descriptors); 1136 1137 /* Try again */ 1138 required_sz = 0; 1139 rc = _spdk_blob_serialize_xattr(xattr, 1140 *buf, *remaining_sz, 1141 &required_sz, internal); 1142 1143 if (rc < 0) { 1144 spdk_free(*pages); 1145 *pages = NULL; 1146 *page_count = 0; 1147 return rc; 1148 } 1149 } 1150 1151 *remaining_sz -= required_sz; 1152 *buf += required_sz; 1153 } 1154 1155 return 0; 1156 } 1157 1158 static int 1159 _spdk_blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages, 1160 uint32_t *page_count) 1161 { 1162 struct spdk_blob_md_page *cur_page; 1163 int rc; 1164 uint8_t *buf; 1165 size_t remaining_sz; 1166 1167 assert(pages != NULL); 1168 assert(page_count != NULL); 1169 assert(blob != NULL); 1170 assert(blob->state == SPDK_BLOB_STATE_DIRTY); 1171 1172 *pages = NULL; 1173 *page_count = 0; 1174 1175 /* A blob always has at least 1 page, even if it has no descriptors */ 1176 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page); 1177 if (rc < 0) { 1178 return rc; 1179 } 1180 1181 buf = (uint8_t *)cur_page->descriptors; 1182 remaining_sz = sizeof(cur_page->descriptors); 1183 1184 /* Serialize flags */ 1185 _spdk_blob_serialize_flags(blob, buf, &remaining_sz); 1186 buf += sizeof(struct spdk_blob_md_descriptor_flags); 1187 1188 /* Serialize xattrs */ 1189 rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs, false, 1190 pages, cur_page, page_count, &buf, &remaining_sz); 1191 if (rc < 0) { 1192 return rc; 1193 } 1194 1195 /* Serialize internal xattrs */ 1196 rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs_internal, true, 1197 pages, cur_page, page_count, &buf, &remaining_sz); 1198 if (rc < 0) { 1199 return rc; 1200 } 1201 1202 if (blob->use_extent_table) { 1203 /* Serialize extent table */ 1204 rc = _spdk_blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1205 } else { 1206 /* Serialize extents */ 1207 rc = _spdk_blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1208 } 1209 1210 return rc; 1211 } 1212 1213 struct spdk_blob_load_ctx { 1214 struct spdk_blob *blob; 1215 1216 struct spdk_blob_md_page *pages; 1217 uint32_t num_pages; 1218 uint32_t next_extent_page; 1219 spdk_bs_sequence_t *seq; 1220 1221 spdk_bs_sequence_cpl cb_fn; 1222 void *cb_arg; 1223 }; 1224 1225 static uint32_t 1226 _spdk_blob_md_page_calc_crc(void *page) 1227 { 1228 uint32_t crc; 1229 1230 crc = BLOB_CRC32C_INITIAL; 1231 crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc); 1232 crc ^= BLOB_CRC32C_INITIAL; 1233 1234 return crc; 1235 1236 } 1237 1238 static void 1239 _spdk_blob_load_final(void *cb_arg, int bserrno) 1240 { 1241 struct spdk_blob_load_ctx *ctx = cb_arg; 1242 struct spdk_blob *blob = ctx->blob; 1243 1244 if (bserrno == 0) { 1245 _spdk_blob_mark_clean(blob); 1246 } 1247 1248 ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno); 1249 1250 /* Free the memory */ 1251 spdk_free(ctx->pages); 1252 free(ctx); 1253 } 1254 1255 static void 1256 _spdk_blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 1257 { 1258 struct spdk_blob_load_ctx *ctx = cb_arg; 1259 struct spdk_blob *blob = ctx->blob; 1260 1261 if (bserrno == 0) { 1262 blob->back_bs_dev = spdk_bs_create_blob_bs_dev(snapshot); 1263 if (blob->back_bs_dev == NULL) { 1264 bserrno = -ENOMEM; 1265 } 1266 } 1267 if (bserrno != 0) { 1268 SPDK_ERRLOG("Snapshot fail\n"); 1269 } 1270 1271 _spdk_blob_load_final(ctx, bserrno); 1272 } 1273 1274 static void _spdk_blob_update_clear_method(struct spdk_blob *blob); 1275 1276 static void 1277 _spdk_blob_load_backing_dev(void *cb_arg) 1278 { 1279 struct spdk_blob_load_ctx *ctx = cb_arg; 1280 struct spdk_blob *blob = ctx->blob; 1281 const void *value; 1282 size_t len; 1283 int rc; 1284 1285 if (spdk_blob_is_thin_provisioned(blob)) { 1286 rc = _spdk_blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true); 1287 if (rc == 0) { 1288 if (len != sizeof(spdk_blob_id)) { 1289 _spdk_blob_load_final(ctx, -EINVAL); 1290 return; 1291 } 1292 /* open snapshot blob and continue in the callback function */ 1293 blob->parent_id = *(spdk_blob_id *)value; 1294 spdk_bs_open_blob(blob->bs, blob->parent_id, 1295 _spdk_blob_load_snapshot_cpl, ctx); 1296 return; 1297 } else { 1298 /* add zeroes_dev for thin provisioned blob */ 1299 blob->back_bs_dev = spdk_bs_create_zeroes_dev(); 1300 } 1301 } else { 1302 /* standard blob */ 1303 blob->back_bs_dev = NULL; 1304 } 1305 _spdk_blob_load_final(ctx, 0); 1306 } 1307 1308 static void 1309 _spdk_blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1310 { 1311 struct spdk_blob_load_ctx *ctx = cb_arg; 1312 struct spdk_blob *blob = ctx->blob; 1313 struct spdk_blob_md_page *page; 1314 uint64_t i; 1315 uint32_t crc; 1316 uint64_t lba; 1317 void *tmp; 1318 uint64_t sz; 1319 1320 if (bserrno) { 1321 SPDK_ERRLOG("Extent page read failed: %d\n", bserrno); 1322 _spdk_blob_load_final(ctx, bserrno); 1323 return; 1324 } 1325 1326 if (ctx->pages == NULL) { 1327 /* First iteration of this function, allocate buffer for single EXTENT_PAGE */ 1328 ctx->pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE, NULL, SPDK_ENV_SOCKET_ID_ANY, 1329 SPDK_MALLOC_DMA); 1330 if (!ctx->pages) { 1331 _spdk_blob_load_final(ctx, -ENOMEM); 1332 return; 1333 } 1334 ctx->num_pages = 1; 1335 ctx->next_extent_page = 0; 1336 } else { 1337 page = &ctx->pages[0]; 1338 crc = _spdk_blob_md_page_calc_crc(page); 1339 if (crc != page->crc) { 1340 _spdk_blob_load_final(ctx, -EINVAL); 1341 return; 1342 } 1343 1344 if (page->next != SPDK_INVALID_MD_PAGE) { 1345 _spdk_blob_load_final(ctx, -EINVAL); 1346 return; 1347 } 1348 1349 bserrno = _spdk_blob_parse_extent_page(page, blob); 1350 if (bserrno) { 1351 _spdk_blob_load_final(ctx, bserrno); 1352 return; 1353 } 1354 } 1355 1356 for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) { 1357 if (blob->active.extent_pages[i] != 0) { 1358 /* Extent page was allocated, read and parse it. */ 1359 lba = _spdk_bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]); 1360 ctx->next_extent_page = i + 1; 1361 1362 spdk_bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1363 _spdk_bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 1364 _spdk_blob_load_cpl_extents_cpl, ctx); 1365 return; 1366 } else { 1367 /* Thin provisioned blobs can point to unallocated extent pages. 1368 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */ 1369 1370 sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP); 1371 blob->active.num_clusters += sz; 1372 blob->remaining_clusters_in_et -= sz; 1373 1374 assert(spdk_blob_is_thin_provisioned(blob)); 1375 assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0); 1376 1377 tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 1378 if (tmp == NULL) { 1379 _spdk_blob_load_final(ctx, -ENOMEM); 1380 return; 1381 } 1382 memset(tmp + blob->active.cluster_array_size, 0, 1383 sizeof(*blob->active.clusters) * (blob->active.num_clusters - blob->active.cluster_array_size)); 1384 blob->active.clusters = tmp; 1385 blob->active.cluster_array_size = blob->active.num_clusters; 1386 } 1387 } 1388 1389 _spdk_blob_load_backing_dev(ctx); 1390 } 1391 1392 static void 1393 _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1394 { 1395 struct spdk_blob_load_ctx *ctx = cb_arg; 1396 struct spdk_blob *blob = ctx->blob; 1397 struct spdk_blob_md_page *page; 1398 int rc; 1399 uint32_t crc; 1400 1401 if (bserrno) { 1402 SPDK_ERRLOG("Metadata page read failed: %d\n", bserrno); 1403 _spdk_blob_load_final(ctx, bserrno); 1404 return; 1405 } 1406 1407 page = &ctx->pages[ctx->num_pages - 1]; 1408 crc = _spdk_blob_md_page_calc_crc(page); 1409 if (crc != page->crc) { 1410 SPDK_ERRLOG("Metadata page %d crc mismatch\n", ctx->num_pages); 1411 _spdk_blob_load_final(ctx, -EINVAL); 1412 return; 1413 } 1414 1415 if (page->next != SPDK_INVALID_MD_PAGE) { 1416 uint32_t next_page = page->next; 1417 uint64_t next_lba = _spdk_bs_md_page_to_lba(blob->bs, next_page); 1418 1419 /* Read the next page */ 1420 ctx->num_pages++; 1421 ctx->pages = spdk_realloc(ctx->pages, (sizeof(*page) * ctx->num_pages), 1422 sizeof(*page)); 1423 if (ctx->pages == NULL) { 1424 _spdk_blob_load_final(ctx, -ENOMEM); 1425 return; 1426 } 1427 1428 spdk_bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1], 1429 next_lba, 1430 _spdk_bs_byte_to_lba(blob->bs, sizeof(*page)), 1431 _spdk_blob_load_cpl, ctx); 1432 return; 1433 } 1434 1435 /* Parse the pages */ 1436 rc = _spdk_blob_parse(ctx->pages, ctx->num_pages, blob); 1437 if (rc) { 1438 _spdk_blob_load_final(ctx, rc); 1439 return; 1440 } 1441 1442 if (blob->extent_table_found == true) { 1443 /* If EXTENT_TABLE was found, that means support for it should be enabled. */ 1444 assert(blob->extent_rle_found == false); 1445 blob->use_extent_table = true; 1446 } else { 1447 /* If EXTENT_RLE or no extent_* descriptor was found disable support 1448 * for extent table. No extent_* descriptors means that blob has length of 0 1449 * and no extent_rle descriptors were persisted for it. 1450 * EXTENT_TABLE if used, is always present in metadata regardless of length. */ 1451 blob->use_extent_table = false; 1452 } 1453 1454 /* Check the clear_method stored in metadata vs what may have been passed 1455 * via spdk_bs_open_blob_ext() and update accordingly. 1456 */ 1457 _spdk_blob_update_clear_method(blob); 1458 1459 spdk_free(ctx->pages); 1460 ctx->pages = NULL; 1461 1462 if (blob->extent_table_found) { 1463 _spdk_blob_load_cpl_extents_cpl(seq, ctx, 0); 1464 } else { 1465 _spdk_blob_load_backing_dev(ctx); 1466 } 1467 } 1468 1469 /* Load a blob from disk given a blobid */ 1470 static void 1471 _spdk_blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1472 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1473 { 1474 struct spdk_blob_load_ctx *ctx; 1475 struct spdk_blob_store *bs; 1476 uint32_t page_num; 1477 uint64_t lba; 1478 1479 _spdk_blob_verify_md_op(blob); 1480 1481 bs = blob->bs; 1482 1483 ctx = calloc(1, sizeof(*ctx)); 1484 if (!ctx) { 1485 cb_fn(seq, cb_arg, -ENOMEM); 1486 return; 1487 } 1488 1489 ctx->blob = blob; 1490 ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE); 1491 if (!ctx->pages) { 1492 free(ctx); 1493 cb_fn(seq, cb_arg, -ENOMEM); 1494 return; 1495 } 1496 ctx->num_pages = 1; 1497 ctx->cb_fn = cb_fn; 1498 ctx->cb_arg = cb_arg; 1499 ctx->seq = seq; 1500 1501 page_num = _spdk_bs_blobid_to_page(blob->id); 1502 lba = _spdk_bs_md_page_to_lba(blob->bs, page_num); 1503 1504 blob->state = SPDK_BLOB_STATE_LOADING; 1505 1506 spdk_bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1507 _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE), 1508 _spdk_blob_load_cpl, ctx); 1509 } 1510 1511 struct spdk_blob_persist_ctx { 1512 struct spdk_blob *blob; 1513 1514 struct spdk_bs_super_block *super; 1515 1516 struct spdk_blob_md_page *pages; 1517 uint32_t next_extent_page; 1518 struct spdk_blob_md_page *extent_page; 1519 1520 spdk_bs_sequence_t *seq; 1521 spdk_bs_sequence_cpl cb_fn; 1522 void *cb_arg; 1523 }; 1524 1525 static void 1526 spdk_bs_batch_clear_dev(struct spdk_blob_persist_ctx *ctx, spdk_bs_batch_t *batch, uint64_t lba, 1527 uint32_t lba_count) 1528 { 1529 switch (ctx->blob->clear_method) { 1530 case BLOB_CLEAR_WITH_DEFAULT: 1531 case BLOB_CLEAR_WITH_UNMAP: 1532 spdk_bs_batch_unmap_dev(batch, lba, lba_count); 1533 break; 1534 case BLOB_CLEAR_WITH_WRITE_ZEROES: 1535 spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count); 1536 break; 1537 case BLOB_CLEAR_WITH_NONE: 1538 default: 1539 break; 1540 } 1541 } 1542 1543 static void 1544 _spdk_blob_persist_complete(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1545 { 1546 struct spdk_blob_persist_ctx *ctx = cb_arg; 1547 struct spdk_blob *blob = ctx->blob; 1548 1549 if (bserrno == 0) { 1550 _spdk_blob_mark_clean(blob); 1551 } 1552 1553 /* Call user callback */ 1554 ctx->cb_fn(seq, ctx->cb_arg, bserrno); 1555 1556 /* Free the memory */ 1557 spdk_free(ctx->pages); 1558 free(ctx); 1559 } 1560 1561 static void 1562 _spdk_blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1563 { 1564 struct spdk_blob_persist_ctx *ctx = cb_arg; 1565 struct spdk_blob *blob = ctx->blob; 1566 struct spdk_blob_store *bs = blob->bs; 1567 size_t i; 1568 1569 /* Release all clusters that were truncated */ 1570 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1571 uint32_t cluster_num = _spdk_bs_lba_to_cluster(bs, blob->active.clusters[i]); 1572 1573 /* Nothing to release if it was not allocated */ 1574 if (blob->active.clusters[i] != 0) { 1575 _spdk_bs_release_cluster(bs, cluster_num); 1576 } 1577 } 1578 1579 if (blob->active.num_clusters == 0) { 1580 free(blob->active.clusters); 1581 blob->active.clusters = NULL; 1582 blob->active.cluster_array_size = 0; 1583 } else if (blob->active.num_clusters != blob->active.cluster_array_size) { 1584 #ifndef __clang_analyzer__ 1585 void *tmp; 1586 1587 /* scan-build really can't figure reallocs, workaround it */ 1588 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters); 1589 assert(tmp != NULL); 1590 blob->active.clusters = tmp; 1591 1592 tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages); 1593 assert(tmp != NULL); 1594 blob->active.extent_pages = tmp; 1595 #endif 1596 blob->active.extent_pages_array_size = blob->active.num_extent_pages; 1597 blob->active.cluster_array_size = blob->active.num_clusters; 1598 } 1599 1600 /* TODO: Add path to persist clear extent pages. */ 1601 _spdk_blob_persist_complete(seq, ctx, bserrno); 1602 } 1603 1604 static void 1605 _spdk_blob_persist_clear_clusters(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1606 { 1607 struct spdk_blob_persist_ctx *ctx = cb_arg; 1608 struct spdk_blob *blob = ctx->blob; 1609 struct spdk_blob_store *bs = blob->bs; 1610 spdk_bs_batch_t *batch; 1611 size_t i; 1612 uint64_t lba; 1613 uint32_t lba_count; 1614 1615 /* Clusters don't move around in blobs. The list shrinks or grows 1616 * at the end, but no changes ever occur in the middle of the list. 1617 */ 1618 1619 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_clear_clusters_cpl, ctx); 1620 1621 /* Clear all clusters that were truncated */ 1622 lba = 0; 1623 lba_count = 0; 1624 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1625 uint64_t next_lba = blob->active.clusters[i]; 1626 uint32_t next_lba_count = _spdk_bs_cluster_to_lba(bs, 1); 1627 1628 if (next_lba > 0 && (lba + lba_count) == next_lba) { 1629 /* This cluster is contiguous with the previous one. */ 1630 lba_count += next_lba_count; 1631 continue; 1632 } 1633 1634 /* This cluster is not contiguous with the previous one. */ 1635 1636 /* If a run of LBAs previously existing, clear them now */ 1637 if (lba_count > 0) { 1638 spdk_bs_batch_clear_dev(ctx, batch, lba, lba_count); 1639 } 1640 1641 /* Start building the next batch */ 1642 lba = next_lba; 1643 if (next_lba > 0) { 1644 lba_count = next_lba_count; 1645 } else { 1646 lba_count = 0; 1647 } 1648 } 1649 1650 /* If we ended with a contiguous set of LBAs, clear them now */ 1651 if (lba_count > 0) { 1652 spdk_bs_batch_clear_dev(ctx, batch, lba, lba_count); 1653 } 1654 1655 spdk_bs_batch_close(batch); 1656 } 1657 1658 static void 1659 _spdk_blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1660 { 1661 struct spdk_blob_persist_ctx *ctx = cb_arg; 1662 struct spdk_blob *blob = ctx->blob; 1663 struct spdk_blob_store *bs = blob->bs; 1664 size_t i; 1665 1666 /* This loop starts at 1 because the first page is special and handled 1667 * below. The pages (except the first) are never written in place, 1668 * so any pages in the clean list must be zeroed. 1669 */ 1670 for (i = 1; i < blob->clean.num_pages; i++) { 1671 _spdk_bs_release_md_page(bs, blob->clean.pages[i]); 1672 } 1673 1674 if (blob->active.num_pages == 0) { 1675 uint32_t page_num; 1676 1677 page_num = _spdk_bs_blobid_to_page(blob->id); 1678 _spdk_bs_release_md_page(bs, page_num); 1679 } 1680 1681 /* Move on to clearing clusters */ 1682 _spdk_blob_persist_clear_clusters(seq, ctx, 0); 1683 } 1684 1685 static void 1686 _spdk_blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1687 { 1688 struct spdk_blob_persist_ctx *ctx = cb_arg; 1689 struct spdk_blob *blob = ctx->blob; 1690 struct spdk_blob_store *bs = blob->bs; 1691 uint64_t lba; 1692 uint32_t lba_count; 1693 spdk_bs_batch_t *batch; 1694 size_t i; 1695 1696 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_zero_pages_cpl, ctx); 1697 1698 lba_count = _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 1699 1700 /* This loop starts at 1 because the first page is special and handled 1701 * below. The pages (except the first) are never written in place, 1702 * so any pages in the clean list must be zeroed. 1703 */ 1704 for (i = 1; i < blob->clean.num_pages; i++) { 1705 lba = _spdk_bs_md_page_to_lba(bs, blob->clean.pages[i]); 1706 1707 spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count); 1708 } 1709 1710 /* The first page will only be zeroed if this is a delete. */ 1711 if (blob->active.num_pages == 0) { 1712 uint32_t page_num; 1713 1714 /* The first page in the metadata goes where the blobid indicates */ 1715 page_num = _spdk_bs_blobid_to_page(blob->id); 1716 lba = _spdk_bs_md_page_to_lba(bs, page_num); 1717 1718 spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count); 1719 } 1720 1721 spdk_bs_batch_close(batch); 1722 } 1723 1724 static void 1725 _spdk_blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1726 { 1727 struct spdk_blob_persist_ctx *ctx = cb_arg; 1728 struct spdk_blob *blob = ctx->blob; 1729 struct spdk_blob_store *bs = blob->bs; 1730 uint64_t lba; 1731 uint32_t lba_count; 1732 struct spdk_blob_md_page *page; 1733 1734 if (blob->active.num_pages == 0) { 1735 /* Move on to the next step */ 1736 _spdk_blob_persist_zero_pages(seq, ctx, 0); 1737 return; 1738 } 1739 1740 lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page)); 1741 1742 page = &ctx->pages[0]; 1743 /* The first page in the metadata goes where the blobid indicates */ 1744 lba = _spdk_bs_md_page_to_lba(bs, _spdk_bs_blobid_to_page(blob->id)); 1745 1746 spdk_bs_sequence_write_dev(seq, page, lba, lba_count, 1747 _spdk_blob_persist_zero_pages, ctx); 1748 } 1749 1750 static void 1751 _spdk_blob_persist_write_page_chain(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1752 { 1753 struct spdk_blob_persist_ctx *ctx = cb_arg; 1754 struct spdk_blob *blob = ctx->blob; 1755 struct spdk_blob_store *bs = blob->bs; 1756 uint64_t lba; 1757 uint32_t lba_count; 1758 struct spdk_blob_md_page *page; 1759 spdk_bs_batch_t *batch; 1760 size_t i; 1761 1762 /* Clusters don't move around in blobs. The list shrinks or grows 1763 * at the end, but no changes ever occur in the middle of the list. 1764 */ 1765 1766 lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page)); 1767 1768 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_write_page_root, ctx); 1769 1770 /* This starts at 1. The root page is not written until 1771 * all of the others are finished 1772 */ 1773 for (i = 1; i < blob->active.num_pages; i++) { 1774 page = &ctx->pages[i]; 1775 assert(page->sequence_num == i); 1776 1777 lba = _spdk_bs_md_page_to_lba(bs, blob->active.pages[i]); 1778 1779 spdk_bs_batch_write_dev(batch, page, lba, lba_count); 1780 } 1781 1782 spdk_bs_batch_close(batch); 1783 } 1784 1785 static int 1786 _spdk_blob_resize(struct spdk_blob *blob, uint64_t sz) 1787 { 1788 uint64_t i; 1789 uint64_t *tmp; 1790 uint64_t lfc; /* lowest free cluster */ 1791 uint32_t lfmd; /* lowest free md page */ 1792 uint64_t num_clusters; 1793 uint32_t *ep_tmp; 1794 uint64_t new_num_ep = 0, current_num_ep = 0; 1795 struct spdk_blob_store *bs; 1796 1797 bs = blob->bs; 1798 1799 _spdk_blob_verify_md_op(blob); 1800 1801 if (blob->active.num_clusters == sz) { 1802 return 0; 1803 } 1804 1805 if (blob->active.num_clusters < blob->active.cluster_array_size) { 1806 /* If this blob was resized to be larger, then smaller, then 1807 * larger without syncing, then the cluster array already 1808 * contains spare assigned clusters we can use. 1809 */ 1810 num_clusters = spdk_min(blob->active.cluster_array_size, 1811 sz); 1812 } else { 1813 num_clusters = blob->active.num_clusters; 1814 } 1815 1816 if (blob->use_extent_table) { 1817 /* Round up since every cluster beyond current Extent Table size, 1818 * requires new extent page. */ 1819 new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP); 1820 current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP); 1821 } 1822 1823 /* Do two passes - one to verify that we can obtain enough clusters 1824 * and md pages, another to actually claim them. 1825 */ 1826 1827 if (spdk_blob_is_thin_provisioned(blob) == false) { 1828 lfc = 0; 1829 for (i = num_clusters; i < sz; i++) { 1830 lfc = spdk_bit_array_find_first_clear(bs->used_clusters, lfc); 1831 if (lfc == UINT32_MAX) { 1832 /* No more free clusters. Cannot satisfy the request */ 1833 return -ENOSPC; 1834 } 1835 lfc++; 1836 } 1837 lfmd = 0; 1838 for (i = current_num_ep; i < new_num_ep ; i++) { 1839 lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd); 1840 if (lfmd == UINT32_MAX) { 1841 /* No more free md pages. Cannot satisfy the request */ 1842 return -ENOSPC; 1843 } 1844 } 1845 } 1846 1847 if (sz > num_clusters) { 1848 /* Expand the cluster array if necessary. 1849 * We only shrink the array when persisting. 1850 */ 1851 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz); 1852 if (sz > 0 && tmp == NULL) { 1853 return -ENOMEM; 1854 } 1855 memset(tmp + blob->active.cluster_array_size, 0, 1856 sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size)); 1857 blob->active.clusters = tmp; 1858 blob->active.cluster_array_size = sz; 1859 1860 /* Expand the extents table, only if enough clusters were added */ 1861 if (new_num_ep > current_num_ep && blob->use_extent_table) { 1862 ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep); 1863 if (new_num_ep > 0 && ep_tmp == NULL) { 1864 return -ENOMEM; 1865 } 1866 memset(ep_tmp + blob->active.extent_pages_array_size, 0, 1867 sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size)); 1868 blob->active.extent_pages = ep_tmp; 1869 blob->active.extent_pages_array_size = new_num_ep; 1870 } 1871 } 1872 1873 blob->state = SPDK_BLOB_STATE_DIRTY; 1874 1875 if (spdk_blob_is_thin_provisioned(blob) == false) { 1876 lfc = 0; 1877 lfmd = 0; 1878 for (i = num_clusters; i < sz; i++) { 1879 _spdk_bs_allocate_cluster(blob, i, &lfc, &lfmd, true); 1880 lfc++; 1881 lfmd++; 1882 } 1883 } 1884 1885 blob->active.num_clusters = sz; 1886 blob->active.num_extent_pages = new_num_ep; 1887 1888 return 0; 1889 } 1890 1891 static void 1892 _spdk_blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx) 1893 { 1894 spdk_bs_sequence_t *seq = ctx->seq; 1895 struct spdk_blob *blob = ctx->blob; 1896 struct spdk_blob_store *bs = blob->bs; 1897 uint64_t i; 1898 uint32_t page_num; 1899 void *tmp; 1900 int rc; 1901 1902 /* Generate the new metadata */ 1903 rc = _spdk_blob_serialize(blob, &ctx->pages, &blob->active.num_pages); 1904 if (rc < 0) { 1905 _spdk_blob_persist_complete(seq, ctx, rc); 1906 return; 1907 } 1908 1909 assert(blob->active.num_pages >= 1); 1910 1911 /* Resize the cache of page indices */ 1912 tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 1913 if (!tmp) { 1914 _spdk_blob_persist_complete(seq, ctx, -ENOMEM); 1915 return; 1916 } 1917 blob->active.pages = tmp; 1918 1919 /* Assign this metadata to pages. This requires two passes - 1920 * one to verify that there are enough pages and a second 1921 * to actually claim them. */ 1922 page_num = 0; 1923 /* Note that this loop starts at one. The first page location is fixed by the blobid. */ 1924 for (i = 1; i < blob->active.num_pages; i++) { 1925 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 1926 if (page_num == UINT32_MAX) { 1927 _spdk_blob_persist_complete(seq, ctx, -ENOMEM); 1928 return; 1929 } 1930 page_num++; 1931 } 1932 1933 page_num = 0; 1934 blob->active.pages[0] = _spdk_bs_blobid_to_page(blob->id); 1935 for (i = 1; i < blob->active.num_pages; i++) { 1936 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 1937 ctx->pages[i - 1].next = page_num; 1938 /* Now that previous metadata page is complete, calculate the crc for it. */ 1939 ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]); 1940 blob->active.pages[i] = page_num; 1941 _spdk_bs_claim_md_page(bs, page_num); 1942 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming page %u for blob %lu\n", page_num, blob->id); 1943 page_num++; 1944 } 1945 ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]); 1946 /* Start writing the metadata from last page to first */ 1947 blob->state = SPDK_BLOB_STATE_CLEAN; 1948 _spdk_blob_persist_write_page_chain(seq, ctx, 0); 1949 } 1950 1951 static void _spdk_blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, 1952 int bserrno); 1953 1954 static void 1955 _spdk_blob_persist_write_extent_page(uint32_t extent, uint64_t cluster_num, 1956 struct spdk_blob_persist_ctx *ctx) 1957 { 1958 spdk_bs_sequence_t *seq = ctx->seq; 1959 uint32_t page_count = 0; 1960 struct spdk_blob *blob = ctx->blob; 1961 int rc; 1962 1963 rc = _spdk_blob_serialize_add_page(blob, &ctx->extent_page, &page_count, &ctx->extent_page); 1964 if (rc < 0) { 1965 assert(false); 1966 return; 1967 } 1968 1969 _spdk_blob_serialize_extent_page(blob, cluster_num, ctx->extent_page); 1970 1971 ctx->extent_page->crc = _spdk_blob_md_page_calc_crc(ctx->extent_page); 1972 1973 spdk_bs_sequence_write_dev(seq, ctx->extent_page, _spdk_bs_md_page_to_lba(blob->bs, extent), 1974 _spdk_bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 1975 _spdk_blob_persist_write_extent_pages, ctx); 1976 } 1977 1978 static void 1979 _spdk_blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1980 { 1981 struct spdk_blob_persist_ctx *ctx = cb_arg; 1982 struct spdk_blob *blob = ctx->blob; 1983 size_t i; 1984 uint32_t extent_page_id; 1985 1986 if (ctx->extent_page != NULL) { 1987 spdk_free(ctx->extent_page); 1988 ctx->extent_page = NULL; 1989 } 1990 1991 /* Only write out changed extent pages */ 1992 for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) { 1993 extent_page_id = blob->active.extent_pages[i]; 1994 if (extent_page_id == 0) { 1995 /* No Extent Page to persist */ 1996 assert(spdk_blob_is_thin_provisioned(blob)); 1997 continue; 1998 } 1999 /* Writing out new extent page for the first time. Either active extent pages is larger 2000 * than clean extent pages or there was no extent page assigned due to thin provisioning. */ 2001 if (i >= blob->clean.extent_pages_array_size || blob->clean.extent_pages[i] == 0) { 2002 blob->state = SPDK_BLOB_STATE_DIRTY; 2003 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id)); 2004 ctx->next_extent_page = i + 1; 2005 _spdk_blob_persist_write_extent_page(extent_page_id, i * SPDK_EXTENTS_PER_EP, ctx); 2006 return; 2007 } 2008 assert(blob->clean.extent_pages[i] != 0); 2009 } 2010 2011 _spdk_blob_persist_generate_new_md(ctx); 2012 } 2013 2014 static void 2015 _spdk_blob_persist_start(struct spdk_blob_persist_ctx *ctx) 2016 { 2017 spdk_bs_sequence_t *seq = ctx->seq; 2018 struct spdk_blob *blob = ctx->blob; 2019 2020 if (blob->active.num_pages == 0) { 2021 /* This is the signal that the blob should be deleted. 2022 * Immediately jump to the clean up routine. */ 2023 assert(blob->clean.num_pages > 0); 2024 blob->state = SPDK_BLOB_STATE_CLEAN; 2025 _spdk_blob_persist_zero_pages(seq, ctx, 0); 2026 return; 2027 2028 } 2029 2030 _spdk_blob_persist_write_extent_pages(seq, ctx, 0); 2031 } 2032 2033 static void 2034 _spdk_blob_persist_dirty_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2035 { 2036 struct spdk_blob_persist_ctx *ctx = cb_arg; 2037 2038 ctx->blob->bs->clean = 0; 2039 2040 spdk_free(ctx->super); 2041 2042 _spdk_blob_persist_start(ctx); 2043 } 2044 2045 static void 2046 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2047 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg); 2048 2049 2050 static void 2051 _spdk_blob_persist_dirty(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2052 { 2053 struct spdk_blob_persist_ctx *ctx = cb_arg; 2054 2055 ctx->super->clean = 0; 2056 if (ctx->super->size == 0) { 2057 ctx->super->size = ctx->blob->bs->dev->blockcnt * ctx->blob->bs->dev->blocklen; 2058 } 2059 2060 _spdk_bs_write_super(seq, ctx->blob->bs, ctx->super, _spdk_blob_persist_dirty_cpl, ctx); 2061 } 2062 2063 2064 /* Write a blob to disk */ 2065 static void 2066 _spdk_blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 2067 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2068 { 2069 struct spdk_blob_persist_ctx *ctx; 2070 2071 _spdk_blob_verify_md_op(blob); 2072 2073 if (blob->state == SPDK_BLOB_STATE_CLEAN) { 2074 cb_fn(seq, cb_arg, 0); 2075 return; 2076 } 2077 2078 ctx = calloc(1, sizeof(*ctx)); 2079 if (!ctx) { 2080 cb_fn(seq, cb_arg, -ENOMEM); 2081 return; 2082 } 2083 ctx->blob = blob; 2084 ctx->seq = seq; 2085 ctx->cb_fn = cb_fn; 2086 ctx->cb_arg = cb_arg; 2087 ctx->next_extent_page = 0; 2088 2089 if (blob->bs->clean) { 2090 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 2091 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 2092 if (!ctx->super) { 2093 cb_fn(seq, cb_arg, -ENOMEM); 2094 free(ctx); 2095 return; 2096 } 2097 2098 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(blob->bs, 0), 2099 _spdk_bs_byte_to_lba(blob->bs, sizeof(*ctx->super)), 2100 _spdk_blob_persist_dirty, ctx); 2101 } else { 2102 _spdk_blob_persist_start(ctx); 2103 } 2104 } 2105 2106 struct spdk_blob_copy_cluster_ctx { 2107 struct spdk_blob *blob; 2108 uint8_t *buf; 2109 uint64_t page; 2110 uint64_t new_cluster; 2111 uint32_t new_extent_page; 2112 spdk_bs_sequence_t *seq; 2113 }; 2114 2115 static void 2116 _spdk_blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno) 2117 { 2118 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2119 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq; 2120 TAILQ_HEAD(, spdk_bs_request_set) requests; 2121 spdk_bs_user_op_t *op; 2122 2123 TAILQ_INIT(&requests); 2124 TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link); 2125 2126 while (!TAILQ_EMPTY(&requests)) { 2127 op = TAILQ_FIRST(&requests); 2128 TAILQ_REMOVE(&requests, op, link); 2129 if (bserrno == 0) { 2130 spdk_bs_user_op_execute(op); 2131 } else { 2132 spdk_bs_user_op_abort(op); 2133 } 2134 } 2135 2136 spdk_free(ctx->buf); 2137 free(ctx); 2138 } 2139 2140 static void 2141 _spdk_blob_insert_cluster_cpl(void *cb_arg, int bserrno) 2142 { 2143 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2144 2145 if (bserrno) { 2146 if (bserrno == -EEXIST) { 2147 /* The metadata insert failed because another thread 2148 * allocated the cluster first. Free our cluster 2149 * but continue without error. */ 2150 bserrno = 0; 2151 } 2152 _spdk_bs_release_cluster(ctx->blob->bs, ctx->new_cluster); 2153 if (ctx->new_extent_page != 0) { 2154 _spdk_bs_release_md_page(ctx->blob->bs, ctx->new_extent_page); 2155 } 2156 } 2157 2158 spdk_bs_sequence_finish(ctx->seq, bserrno); 2159 } 2160 2161 static void 2162 _spdk_blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2163 { 2164 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2165 uint32_t cluster_number; 2166 2167 if (bserrno) { 2168 /* The write failed, so jump to the final completion handler */ 2169 spdk_bs_sequence_finish(seq, bserrno); 2170 return; 2171 } 2172 2173 cluster_number = _spdk_bs_page_to_cluster(ctx->blob->bs, ctx->page); 2174 2175 _spdk_blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2176 ctx->new_extent_page, _spdk_blob_insert_cluster_cpl, ctx); 2177 } 2178 2179 static void 2180 _spdk_blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2181 { 2182 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2183 2184 if (bserrno != 0) { 2185 /* The read failed, so jump to the final completion handler */ 2186 spdk_bs_sequence_finish(seq, bserrno); 2187 return; 2188 } 2189 2190 /* Write whole cluster */ 2191 spdk_bs_sequence_write_dev(seq, ctx->buf, 2192 _spdk_bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2193 _spdk_bs_cluster_to_lba(ctx->blob->bs, 1), 2194 _spdk_blob_write_copy_cpl, ctx); 2195 } 2196 2197 static void 2198 _spdk_bs_allocate_and_copy_cluster(struct spdk_blob *blob, 2199 struct spdk_io_channel *_ch, 2200 uint64_t io_unit, spdk_bs_user_op_t *op) 2201 { 2202 struct spdk_bs_cpl cpl; 2203 struct spdk_bs_channel *ch; 2204 struct spdk_blob_copy_cluster_ctx *ctx; 2205 uint32_t cluster_start_page; 2206 uint32_t cluster_number; 2207 int rc; 2208 2209 ch = spdk_io_channel_get_ctx(_ch); 2210 2211 if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) { 2212 /* There are already operations pending. Queue this user op 2213 * and return because it will be re-executed when the outstanding 2214 * cluster allocation completes. */ 2215 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2216 return; 2217 } 2218 2219 /* Round the io_unit offset down to the first page in the cluster */ 2220 cluster_start_page = _spdk_bs_io_unit_to_cluster_start(blob, io_unit); 2221 2222 /* Calculate which index in the metadata cluster array the corresponding 2223 * cluster is supposed to be at. */ 2224 cluster_number = _spdk_bs_io_unit_to_cluster_number(blob, io_unit); 2225 2226 ctx = calloc(1, sizeof(*ctx)); 2227 if (!ctx) { 2228 spdk_bs_user_op_abort(op); 2229 return; 2230 } 2231 2232 assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0); 2233 2234 ctx->blob = blob; 2235 ctx->page = cluster_start_page; 2236 2237 if (blob->parent_id != SPDK_BLOBID_INVALID) { 2238 ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, 2239 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 2240 if (!ctx->buf) { 2241 SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n", 2242 blob->bs->cluster_sz); 2243 free(ctx); 2244 spdk_bs_user_op_abort(op); 2245 return; 2246 } 2247 } 2248 2249 rc = _spdk_bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page, 2250 false); 2251 if (rc != 0) { 2252 spdk_free(ctx->buf); 2253 free(ctx); 2254 spdk_bs_user_op_abort(op); 2255 return; 2256 } 2257 2258 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2259 cpl.u.blob_basic.cb_fn = _spdk_blob_allocate_and_copy_cluster_cpl; 2260 cpl.u.blob_basic.cb_arg = ctx; 2261 2262 ctx->seq = spdk_bs_sequence_start(_ch, &cpl); 2263 if (!ctx->seq) { 2264 _spdk_bs_release_cluster(blob->bs, ctx->new_cluster); 2265 spdk_free(ctx->buf); 2266 free(ctx); 2267 spdk_bs_user_op_abort(op); 2268 return; 2269 } 2270 2271 /* Queue the user op to block other incoming operations */ 2272 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2273 2274 if (blob->parent_id != SPDK_BLOBID_INVALID) { 2275 /* Read cluster from backing device */ 2276 spdk_bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf, 2277 _spdk_bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2278 _spdk_bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz), 2279 _spdk_blob_write_copy, ctx); 2280 } else { 2281 _spdk_blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2282 ctx->new_extent_page, _spdk_blob_insert_cluster_cpl, ctx); 2283 } 2284 } 2285 2286 static void 2287 _spdk_blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length, 2288 uint64_t *lba, uint32_t *lba_count) 2289 { 2290 *lba_count = length; 2291 2292 if (!_spdk_bs_io_unit_is_allocated(blob, io_unit)) { 2293 assert(blob->back_bs_dev != NULL); 2294 *lba = _spdk_bs_io_unit_to_back_dev_lba(blob, io_unit); 2295 *lba_count = _spdk_bs_io_unit_to_back_dev_lba(blob, *lba_count); 2296 } else { 2297 *lba = _spdk_bs_blob_io_unit_to_lba(blob, io_unit); 2298 } 2299 } 2300 2301 struct op_split_ctx { 2302 struct spdk_blob *blob; 2303 struct spdk_io_channel *channel; 2304 uint64_t io_unit_offset; 2305 uint64_t io_units_remaining; 2306 void *curr_payload; 2307 enum spdk_blob_op_type op_type; 2308 spdk_bs_sequence_t *seq; 2309 }; 2310 2311 static void 2312 _spdk_blob_request_submit_op_split_next(void *cb_arg, int bserrno) 2313 { 2314 struct op_split_ctx *ctx = cb_arg; 2315 struct spdk_blob *blob = ctx->blob; 2316 struct spdk_io_channel *ch = ctx->channel; 2317 enum spdk_blob_op_type op_type = ctx->op_type; 2318 uint8_t *buf = ctx->curr_payload; 2319 uint64_t offset = ctx->io_unit_offset; 2320 uint64_t length = ctx->io_units_remaining; 2321 uint64_t op_length; 2322 2323 if (bserrno != 0 || ctx->io_units_remaining == 0) { 2324 spdk_bs_sequence_finish(ctx->seq, bserrno); 2325 free(ctx); 2326 return; 2327 } 2328 2329 op_length = spdk_min(length, _spdk_bs_num_io_units_to_cluster_boundary(blob, 2330 offset)); 2331 2332 /* Update length and payload for next operation */ 2333 ctx->io_units_remaining -= op_length; 2334 ctx->io_unit_offset += op_length; 2335 if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) { 2336 ctx->curr_payload += op_length * blob->bs->io_unit_size; 2337 } 2338 2339 switch (op_type) { 2340 case SPDK_BLOB_READ: 2341 spdk_blob_io_read(blob, ch, buf, offset, op_length, 2342 _spdk_blob_request_submit_op_split_next, ctx); 2343 break; 2344 case SPDK_BLOB_WRITE: 2345 spdk_blob_io_write(blob, ch, buf, offset, op_length, 2346 _spdk_blob_request_submit_op_split_next, ctx); 2347 break; 2348 case SPDK_BLOB_UNMAP: 2349 spdk_blob_io_unmap(blob, ch, offset, op_length, 2350 _spdk_blob_request_submit_op_split_next, ctx); 2351 break; 2352 case SPDK_BLOB_WRITE_ZEROES: 2353 spdk_blob_io_write_zeroes(blob, ch, offset, op_length, 2354 _spdk_blob_request_submit_op_split_next, ctx); 2355 break; 2356 case SPDK_BLOB_READV: 2357 case SPDK_BLOB_WRITEV: 2358 SPDK_ERRLOG("readv/write not valid\n"); 2359 spdk_bs_sequence_finish(ctx->seq, -EINVAL); 2360 free(ctx); 2361 break; 2362 } 2363 } 2364 2365 static void 2366 _spdk_blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob, 2367 void *payload, uint64_t offset, uint64_t length, 2368 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 2369 { 2370 struct op_split_ctx *ctx; 2371 spdk_bs_sequence_t *seq; 2372 struct spdk_bs_cpl cpl; 2373 2374 assert(blob != NULL); 2375 2376 ctx = calloc(1, sizeof(struct op_split_ctx)); 2377 if (ctx == NULL) { 2378 cb_fn(cb_arg, -ENOMEM); 2379 return; 2380 } 2381 2382 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2383 cpl.u.blob_basic.cb_fn = cb_fn; 2384 cpl.u.blob_basic.cb_arg = cb_arg; 2385 2386 seq = spdk_bs_sequence_start(ch, &cpl); 2387 if (!seq) { 2388 free(ctx); 2389 cb_fn(cb_arg, -ENOMEM); 2390 return; 2391 } 2392 2393 ctx->blob = blob; 2394 ctx->channel = ch; 2395 ctx->curr_payload = payload; 2396 ctx->io_unit_offset = offset; 2397 ctx->io_units_remaining = length; 2398 ctx->op_type = op_type; 2399 ctx->seq = seq; 2400 2401 _spdk_blob_request_submit_op_split_next(ctx, 0); 2402 } 2403 2404 static void 2405 _spdk_blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob, 2406 void *payload, uint64_t offset, uint64_t length, 2407 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 2408 { 2409 struct spdk_bs_cpl cpl; 2410 uint64_t lba; 2411 uint32_t lba_count; 2412 2413 assert(blob != NULL); 2414 2415 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2416 cpl.u.blob_basic.cb_fn = cb_fn; 2417 cpl.u.blob_basic.cb_arg = cb_arg; 2418 2419 _spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 2420 2421 if (blob->frozen_refcnt) { 2422 /* This blob I/O is frozen */ 2423 spdk_bs_user_op_t *op; 2424 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 2425 2426 op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 2427 if (!op) { 2428 cb_fn(cb_arg, -ENOMEM); 2429 return; 2430 } 2431 2432 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 2433 2434 return; 2435 } 2436 2437 switch (op_type) { 2438 case SPDK_BLOB_READ: { 2439 spdk_bs_batch_t *batch; 2440 2441 batch = spdk_bs_batch_open(_ch, &cpl); 2442 if (!batch) { 2443 cb_fn(cb_arg, -ENOMEM); 2444 return; 2445 } 2446 2447 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 2448 /* Read from the blob */ 2449 spdk_bs_batch_read_dev(batch, payload, lba, lba_count); 2450 } else { 2451 /* Read from the backing block device */ 2452 spdk_bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count); 2453 } 2454 2455 spdk_bs_batch_close(batch); 2456 break; 2457 } 2458 case SPDK_BLOB_WRITE: 2459 case SPDK_BLOB_WRITE_ZEROES: { 2460 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 2461 /* Write to the blob */ 2462 spdk_bs_batch_t *batch; 2463 2464 if (lba_count == 0) { 2465 cb_fn(cb_arg, 0); 2466 return; 2467 } 2468 2469 batch = spdk_bs_batch_open(_ch, &cpl); 2470 if (!batch) { 2471 cb_fn(cb_arg, -ENOMEM); 2472 return; 2473 } 2474 2475 if (op_type == SPDK_BLOB_WRITE) { 2476 spdk_bs_batch_write_dev(batch, payload, lba, lba_count); 2477 } else { 2478 spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count); 2479 } 2480 2481 spdk_bs_batch_close(batch); 2482 } else { 2483 /* Queue this operation and allocate the cluster */ 2484 spdk_bs_user_op_t *op; 2485 2486 op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 2487 if (!op) { 2488 cb_fn(cb_arg, -ENOMEM); 2489 return; 2490 } 2491 2492 _spdk_bs_allocate_and_copy_cluster(blob, _ch, offset, op); 2493 } 2494 break; 2495 } 2496 case SPDK_BLOB_UNMAP: { 2497 spdk_bs_batch_t *batch; 2498 2499 batch = spdk_bs_batch_open(_ch, &cpl); 2500 if (!batch) { 2501 cb_fn(cb_arg, -ENOMEM); 2502 return; 2503 } 2504 2505 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 2506 spdk_bs_batch_unmap_dev(batch, lba, lba_count); 2507 } 2508 2509 spdk_bs_batch_close(batch); 2510 break; 2511 } 2512 case SPDK_BLOB_READV: 2513 case SPDK_BLOB_WRITEV: 2514 SPDK_ERRLOG("readv/write not valid\n"); 2515 cb_fn(cb_arg, -EINVAL); 2516 break; 2517 } 2518 } 2519 2520 static void 2521 _spdk_blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel, 2522 void *payload, uint64_t offset, uint64_t length, 2523 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 2524 { 2525 assert(blob != NULL); 2526 2527 if (blob->data_ro && op_type != SPDK_BLOB_READ) { 2528 cb_fn(cb_arg, -EPERM); 2529 return; 2530 } 2531 2532 if (offset + length > _spdk_bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 2533 cb_fn(cb_arg, -EINVAL); 2534 return; 2535 } 2536 if (length <= _spdk_bs_num_io_units_to_cluster_boundary(blob, offset)) { 2537 _spdk_blob_request_submit_op_single(_channel, blob, payload, offset, length, 2538 cb_fn, cb_arg, op_type); 2539 } else { 2540 _spdk_blob_request_submit_op_split(_channel, blob, payload, offset, length, 2541 cb_fn, cb_arg, op_type); 2542 } 2543 } 2544 2545 struct rw_iov_ctx { 2546 struct spdk_blob *blob; 2547 struct spdk_io_channel *channel; 2548 spdk_blob_op_complete cb_fn; 2549 void *cb_arg; 2550 bool read; 2551 int iovcnt; 2552 struct iovec *orig_iov; 2553 uint64_t io_unit_offset; 2554 uint64_t io_units_remaining; 2555 uint64_t io_units_done; 2556 struct iovec iov[0]; 2557 }; 2558 2559 static void 2560 _spdk_rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2561 { 2562 assert(cb_arg == NULL); 2563 spdk_bs_sequence_finish(seq, bserrno); 2564 } 2565 2566 static void 2567 _spdk_rw_iov_split_next(void *cb_arg, int bserrno) 2568 { 2569 struct rw_iov_ctx *ctx = cb_arg; 2570 struct spdk_blob *blob = ctx->blob; 2571 struct iovec *iov, *orig_iov; 2572 int iovcnt; 2573 size_t orig_iovoff; 2574 uint64_t io_units_count, io_units_to_boundary, io_unit_offset; 2575 uint64_t byte_count; 2576 2577 if (bserrno != 0 || ctx->io_units_remaining == 0) { 2578 ctx->cb_fn(ctx->cb_arg, bserrno); 2579 free(ctx); 2580 return; 2581 } 2582 2583 io_unit_offset = ctx->io_unit_offset; 2584 io_units_to_boundary = _spdk_bs_num_io_units_to_cluster_boundary(blob, io_unit_offset); 2585 io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary); 2586 /* 2587 * Get index and offset into the original iov array for our current position in the I/O sequence. 2588 * byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will 2589 * point to the current position in the I/O sequence. 2590 */ 2591 byte_count = ctx->io_units_done * blob->bs->io_unit_size; 2592 orig_iov = &ctx->orig_iov[0]; 2593 orig_iovoff = 0; 2594 while (byte_count > 0) { 2595 if (byte_count >= orig_iov->iov_len) { 2596 byte_count -= orig_iov->iov_len; 2597 orig_iov++; 2598 } else { 2599 orig_iovoff = byte_count; 2600 byte_count = 0; 2601 } 2602 } 2603 2604 /* 2605 * Build an iov array for the next I/O in the sequence. byte_count will keep track of how many 2606 * bytes of this next I/O remain to be accounted for in the new iov array. 2607 */ 2608 byte_count = io_units_count * blob->bs->io_unit_size; 2609 iov = &ctx->iov[0]; 2610 iovcnt = 0; 2611 while (byte_count > 0) { 2612 assert(iovcnt < ctx->iovcnt); 2613 iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff); 2614 iov->iov_base = orig_iov->iov_base + orig_iovoff; 2615 byte_count -= iov->iov_len; 2616 orig_iovoff = 0; 2617 orig_iov++; 2618 iov++; 2619 iovcnt++; 2620 } 2621 2622 ctx->io_unit_offset += io_units_count; 2623 ctx->io_units_remaining -= io_units_count; 2624 ctx->io_units_done += io_units_count; 2625 iov = &ctx->iov[0]; 2626 2627 if (ctx->read) { 2628 spdk_blob_io_readv(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 2629 io_units_count, _spdk_rw_iov_split_next, ctx); 2630 } else { 2631 spdk_blob_io_writev(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 2632 io_units_count, _spdk_rw_iov_split_next, ctx); 2633 } 2634 } 2635 2636 static void 2637 _spdk_blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel, 2638 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 2639 spdk_blob_op_complete cb_fn, void *cb_arg, bool read) 2640 { 2641 struct spdk_bs_cpl cpl; 2642 2643 assert(blob != NULL); 2644 2645 if (!read && blob->data_ro) { 2646 cb_fn(cb_arg, -EPERM); 2647 return; 2648 } 2649 2650 if (length == 0) { 2651 cb_fn(cb_arg, 0); 2652 return; 2653 } 2654 2655 if (offset + length > _spdk_bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 2656 cb_fn(cb_arg, -EINVAL); 2657 return; 2658 } 2659 2660 /* 2661 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having 2662 * to split a request that spans a cluster boundary. For I/O that do not span a cluster boundary, 2663 * there will be no noticeable difference compared to using a batch. For I/O that do span a cluster 2664 * boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need 2665 * to allocate a separate iov array and split the I/O such that none of the resulting 2666 * smaller I/O cross a cluster boundary. These smaller I/O will be issued in sequence (not in parallel) 2667 * but since this case happens very infrequently, any performance impact will be negligible. 2668 * 2669 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs 2670 * for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them 2671 * in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called 2672 * when the batch was completed, to allow for freeing the memory for the iov arrays. 2673 */ 2674 if (spdk_likely(length <= _spdk_bs_num_io_units_to_cluster_boundary(blob, offset))) { 2675 uint32_t lba_count; 2676 uint64_t lba; 2677 2678 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2679 cpl.u.blob_basic.cb_fn = cb_fn; 2680 cpl.u.blob_basic.cb_arg = cb_arg; 2681 2682 if (blob->frozen_refcnt) { 2683 /* This blob I/O is frozen */ 2684 enum spdk_blob_op_type op_type; 2685 spdk_bs_user_op_t *op; 2686 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel); 2687 2688 op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV; 2689 op = spdk_bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length); 2690 if (!op) { 2691 cb_fn(cb_arg, -ENOMEM); 2692 return; 2693 } 2694 2695 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 2696 2697 return; 2698 } 2699 2700 _spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 2701 2702 if (read) { 2703 spdk_bs_sequence_t *seq; 2704 2705 seq = spdk_bs_sequence_start(_channel, &cpl); 2706 if (!seq) { 2707 cb_fn(cb_arg, -ENOMEM); 2708 return; 2709 } 2710 2711 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 2712 spdk_bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL); 2713 } else { 2714 spdk_bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count, 2715 _spdk_rw_iov_done, NULL); 2716 } 2717 } else { 2718 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 2719 spdk_bs_sequence_t *seq; 2720 2721 seq = spdk_bs_sequence_start(_channel, &cpl); 2722 if (!seq) { 2723 cb_fn(cb_arg, -ENOMEM); 2724 return; 2725 } 2726 2727 spdk_bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL); 2728 } else { 2729 /* Queue this operation and allocate the cluster */ 2730 spdk_bs_user_op_t *op; 2731 2732 op = spdk_bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, 2733 length); 2734 if (!op) { 2735 cb_fn(cb_arg, -ENOMEM); 2736 return; 2737 } 2738 2739 _spdk_bs_allocate_and_copy_cluster(blob, _channel, offset, op); 2740 } 2741 } 2742 } else { 2743 struct rw_iov_ctx *ctx; 2744 2745 ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec)); 2746 if (ctx == NULL) { 2747 cb_fn(cb_arg, -ENOMEM); 2748 return; 2749 } 2750 2751 ctx->blob = blob; 2752 ctx->channel = _channel; 2753 ctx->cb_fn = cb_fn; 2754 ctx->cb_arg = cb_arg; 2755 ctx->read = read; 2756 ctx->orig_iov = iov; 2757 ctx->iovcnt = iovcnt; 2758 ctx->io_unit_offset = offset; 2759 ctx->io_units_remaining = length; 2760 ctx->io_units_done = 0; 2761 2762 _spdk_rw_iov_split_next(ctx, 0); 2763 } 2764 } 2765 2766 static struct spdk_blob * 2767 _spdk_blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid) 2768 { 2769 struct spdk_blob *blob; 2770 2771 TAILQ_FOREACH(blob, &bs->blobs, link) { 2772 if (blob->id == blobid) { 2773 return blob; 2774 } 2775 } 2776 2777 return NULL; 2778 } 2779 2780 static void 2781 _spdk_blob_get_snapshot_and_clone_entries(struct spdk_blob *blob, 2782 struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry) 2783 { 2784 assert(blob != NULL); 2785 *snapshot_entry = NULL; 2786 *clone_entry = NULL; 2787 2788 if (blob->parent_id == SPDK_BLOBID_INVALID) { 2789 return; 2790 } 2791 2792 TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) { 2793 if ((*snapshot_entry)->id == blob->parent_id) { 2794 break; 2795 } 2796 } 2797 2798 if (*snapshot_entry != NULL) { 2799 TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) { 2800 if ((*clone_entry)->id == blob->id) { 2801 break; 2802 } 2803 } 2804 2805 assert(clone_entry != NULL); 2806 } 2807 } 2808 2809 static int 2810 _spdk_bs_channel_create(void *io_device, void *ctx_buf) 2811 { 2812 struct spdk_blob_store *bs = io_device; 2813 struct spdk_bs_channel *channel = ctx_buf; 2814 struct spdk_bs_dev *dev; 2815 uint32_t max_ops = bs->max_channel_ops; 2816 uint32_t i; 2817 2818 dev = bs->dev; 2819 2820 channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set)); 2821 if (!channel->req_mem) { 2822 return -1; 2823 } 2824 2825 TAILQ_INIT(&channel->reqs); 2826 2827 for (i = 0; i < max_ops; i++) { 2828 TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link); 2829 } 2830 2831 channel->bs = bs; 2832 channel->dev = dev; 2833 channel->dev_channel = dev->create_channel(dev); 2834 2835 if (!channel->dev_channel) { 2836 SPDK_ERRLOG("Failed to create device channel.\n"); 2837 free(channel->req_mem); 2838 return -1; 2839 } 2840 2841 TAILQ_INIT(&channel->need_cluster_alloc); 2842 TAILQ_INIT(&channel->queued_io); 2843 2844 return 0; 2845 } 2846 2847 static void 2848 _spdk_bs_channel_destroy(void *io_device, void *ctx_buf) 2849 { 2850 struct spdk_bs_channel *channel = ctx_buf; 2851 spdk_bs_user_op_t *op; 2852 2853 while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) { 2854 op = TAILQ_FIRST(&channel->need_cluster_alloc); 2855 TAILQ_REMOVE(&channel->need_cluster_alloc, op, link); 2856 spdk_bs_user_op_abort(op); 2857 } 2858 2859 while (!TAILQ_EMPTY(&channel->queued_io)) { 2860 op = TAILQ_FIRST(&channel->queued_io); 2861 TAILQ_REMOVE(&channel->queued_io, op, link); 2862 spdk_bs_user_op_abort(op); 2863 } 2864 2865 free(channel->req_mem); 2866 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 2867 } 2868 2869 static void 2870 _spdk_bs_dev_destroy(void *io_device) 2871 { 2872 struct spdk_blob_store *bs = io_device; 2873 struct spdk_blob *blob, *blob_tmp; 2874 2875 bs->dev->destroy(bs->dev); 2876 2877 TAILQ_FOREACH_SAFE(blob, &bs->blobs, link, blob_tmp) { 2878 TAILQ_REMOVE(&bs->blobs, blob, link); 2879 _spdk_blob_free(blob); 2880 } 2881 2882 pthread_mutex_destroy(&bs->used_clusters_mutex); 2883 2884 spdk_bit_array_free(&bs->used_blobids); 2885 spdk_bit_array_free(&bs->used_md_pages); 2886 spdk_bit_array_free(&bs->used_clusters); 2887 /* 2888 * If this function is called for any reason except a successful unload, 2889 * the unload_cpl type will be NONE and this will be a nop. 2890 */ 2891 spdk_bs_call_cpl(&bs->unload_cpl, bs->unload_err); 2892 2893 free(bs); 2894 } 2895 2896 static int 2897 _spdk_bs_blob_list_add(struct spdk_blob *blob) 2898 { 2899 spdk_blob_id snapshot_id; 2900 struct spdk_blob_list *snapshot_entry = NULL; 2901 struct spdk_blob_list *clone_entry = NULL; 2902 2903 assert(blob != NULL); 2904 2905 snapshot_id = blob->parent_id; 2906 if (snapshot_id == SPDK_BLOBID_INVALID) { 2907 return 0; 2908 } 2909 2910 snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, snapshot_id); 2911 if (snapshot_entry == NULL) { 2912 /* Snapshot not found */ 2913 snapshot_entry = calloc(1, sizeof(struct spdk_blob_list)); 2914 if (snapshot_entry == NULL) { 2915 return -ENOMEM; 2916 } 2917 snapshot_entry->id = snapshot_id; 2918 TAILQ_INIT(&snapshot_entry->clones); 2919 TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link); 2920 } else { 2921 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 2922 if (clone_entry->id == blob->id) { 2923 break; 2924 } 2925 } 2926 } 2927 2928 if (clone_entry == NULL) { 2929 /* Clone not found */ 2930 clone_entry = calloc(1, sizeof(struct spdk_blob_list)); 2931 if (clone_entry == NULL) { 2932 return -ENOMEM; 2933 } 2934 clone_entry->id = blob->id; 2935 TAILQ_INIT(&clone_entry->clones); 2936 TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link); 2937 snapshot_entry->clone_count++; 2938 } 2939 2940 return 0; 2941 } 2942 2943 static void 2944 _spdk_bs_blob_list_remove(struct spdk_blob *blob) 2945 { 2946 struct spdk_blob_list *snapshot_entry = NULL; 2947 struct spdk_blob_list *clone_entry = NULL; 2948 2949 _spdk_blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry); 2950 2951 if (snapshot_entry == NULL) { 2952 return; 2953 } 2954 2955 blob->parent_id = SPDK_BLOBID_INVALID; 2956 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 2957 free(clone_entry); 2958 2959 snapshot_entry->clone_count--; 2960 } 2961 2962 static int 2963 _spdk_bs_blob_list_free(struct spdk_blob_store *bs) 2964 { 2965 struct spdk_blob_list *snapshot_entry; 2966 struct spdk_blob_list *snapshot_entry_tmp; 2967 struct spdk_blob_list *clone_entry; 2968 struct spdk_blob_list *clone_entry_tmp; 2969 2970 TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) { 2971 TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) { 2972 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 2973 free(clone_entry); 2974 } 2975 TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link); 2976 free(snapshot_entry); 2977 } 2978 2979 return 0; 2980 } 2981 2982 static void 2983 _spdk_bs_free(struct spdk_blob_store *bs) 2984 { 2985 _spdk_bs_blob_list_free(bs); 2986 2987 spdk_bs_unregister_md_thread(bs); 2988 spdk_io_device_unregister(bs, _spdk_bs_dev_destroy); 2989 } 2990 2991 void 2992 spdk_bs_opts_init(struct spdk_bs_opts *opts) 2993 { 2994 opts->cluster_sz = SPDK_BLOB_OPTS_CLUSTER_SZ; 2995 opts->num_md_pages = SPDK_BLOB_OPTS_NUM_MD_PAGES; 2996 opts->max_md_ops = SPDK_BLOB_OPTS_MAX_MD_OPS; 2997 opts->max_channel_ops = SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS; 2998 opts->clear_method = BS_CLEAR_WITH_UNMAP; 2999 memset(&opts->bstype, 0, sizeof(opts->bstype)); 3000 opts->iter_cb_fn = NULL; 3001 opts->iter_cb_arg = NULL; 3002 } 3003 3004 static int 3005 _spdk_bs_opts_verify(struct spdk_bs_opts *opts) 3006 { 3007 if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 || 3008 opts->max_channel_ops == 0) { 3009 SPDK_ERRLOG("Blobstore options cannot be set to 0\n"); 3010 return -1; 3011 } 3012 3013 return 0; 3014 } 3015 3016 static int 3017 _spdk_bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs) 3018 { 3019 struct spdk_blob_store *bs; 3020 uint64_t dev_size; 3021 int rc; 3022 3023 dev_size = dev->blocklen * dev->blockcnt; 3024 if (dev_size < opts->cluster_sz) { 3025 /* Device size cannot be smaller than cluster size of blobstore */ 3026 SPDK_INFOLOG(SPDK_LOG_BLOB, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n", 3027 dev_size, opts->cluster_sz); 3028 return -ENOSPC; 3029 } 3030 if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) { 3031 /* Cluster size cannot be smaller than page size */ 3032 SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n", 3033 opts->cluster_sz, SPDK_BS_PAGE_SIZE); 3034 return -EINVAL; 3035 } 3036 bs = calloc(1, sizeof(struct spdk_blob_store)); 3037 if (!bs) { 3038 return -ENOMEM; 3039 } 3040 3041 TAILQ_INIT(&bs->blobs); 3042 TAILQ_INIT(&bs->snapshots); 3043 bs->dev = dev; 3044 bs->md_thread = spdk_get_thread(); 3045 assert(bs->md_thread != NULL); 3046 3047 /* 3048 * Do not use _spdk_bs_lba_to_cluster() here since blockcnt may not be an 3049 * even multiple of the cluster size. 3050 */ 3051 bs->cluster_sz = opts->cluster_sz; 3052 bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen); 3053 bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE; 3054 bs->num_free_clusters = bs->total_clusters; 3055 bs->used_clusters = spdk_bit_array_create(bs->total_clusters); 3056 bs->io_unit_size = dev->blocklen; 3057 if (bs->used_clusters == NULL) { 3058 free(bs); 3059 return -ENOMEM; 3060 } 3061 3062 bs->max_channel_ops = opts->max_channel_ops; 3063 bs->super_blob = SPDK_BLOBID_INVALID; 3064 memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype)); 3065 3066 /* The metadata is assumed to be at least 1 page */ 3067 bs->used_md_pages = spdk_bit_array_create(1); 3068 bs->used_blobids = spdk_bit_array_create(0); 3069 3070 pthread_mutex_init(&bs->used_clusters_mutex, NULL); 3071 3072 spdk_io_device_register(bs, _spdk_bs_channel_create, _spdk_bs_channel_destroy, 3073 sizeof(struct spdk_bs_channel), "blobstore"); 3074 rc = spdk_bs_register_md_thread(bs); 3075 if (rc == -1) { 3076 spdk_io_device_unregister(bs, NULL); 3077 pthread_mutex_destroy(&bs->used_clusters_mutex); 3078 spdk_bit_array_free(&bs->used_blobids); 3079 spdk_bit_array_free(&bs->used_md_pages); 3080 spdk_bit_array_free(&bs->used_clusters); 3081 free(bs); 3082 /* FIXME: this is a lie but don't know how to get a proper error code here */ 3083 return -ENOMEM; 3084 } 3085 3086 *_bs = bs; 3087 return 0; 3088 } 3089 3090 /* START spdk_bs_load, spdk_bs_load_ctx will used for both load and unload. */ 3091 3092 struct spdk_bs_load_ctx { 3093 struct spdk_blob_store *bs; 3094 struct spdk_bs_super_block *super; 3095 3096 struct spdk_bs_md_mask *mask; 3097 bool in_page_chain; 3098 uint32_t page_index; 3099 uint32_t cur_page; 3100 struct spdk_blob_md_page *page; 3101 3102 uint64_t num_extent_pages; 3103 uint32_t *extent_pages; 3104 3105 spdk_bs_sequence_t *seq; 3106 spdk_blob_op_with_handle_complete iter_cb_fn; 3107 void *iter_cb_arg; 3108 struct spdk_blob *blob; 3109 spdk_blob_id blobid; 3110 }; 3111 3112 static void 3113 _spdk_bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno) 3114 { 3115 assert(bserrno != 0); 3116 3117 spdk_free(ctx->super); 3118 spdk_bs_sequence_finish(ctx->seq, bserrno); 3119 _spdk_bs_free(ctx->bs); 3120 free(ctx); 3121 } 3122 3123 static void 3124 _spdk_bs_set_mask(struct spdk_bit_array *array, struct spdk_bs_md_mask *mask) 3125 { 3126 uint32_t i = 0; 3127 3128 while (true) { 3129 i = spdk_bit_array_find_first_set(array, i); 3130 if (i >= mask->length) { 3131 break; 3132 } 3133 mask->mask[i / 8] |= 1U << (i % 8); 3134 i++; 3135 } 3136 } 3137 3138 static int 3139 _spdk_bs_load_mask(struct spdk_bit_array **array_ptr, struct spdk_bs_md_mask *mask) 3140 { 3141 struct spdk_bit_array *array; 3142 uint32_t i; 3143 3144 if (spdk_bit_array_resize(array_ptr, mask->length) < 0) { 3145 return -ENOMEM; 3146 } 3147 3148 array = *array_ptr; 3149 for (i = 0; i < mask->length; i++) { 3150 if (mask->mask[i / 8] & (1U << (i % 8))) { 3151 spdk_bit_array_set(array, i); 3152 } 3153 } 3154 3155 return 0; 3156 } 3157 3158 static void 3159 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 3160 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 3161 { 3162 /* Update the values in the super block */ 3163 super->super_blob = bs->super_blob; 3164 memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype)); 3165 super->crc = _spdk_blob_md_page_calc_crc(super); 3166 spdk_bs_sequence_write_dev(seq, super, _spdk_bs_page_to_lba(bs, 0), 3167 _spdk_bs_byte_to_lba(bs, sizeof(*super)), 3168 cb_fn, cb_arg); 3169 } 3170 3171 static void 3172 _spdk_bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3173 { 3174 struct spdk_bs_load_ctx *ctx = arg; 3175 uint64_t mask_size, lba, lba_count; 3176 3177 /* Write out the used clusters mask */ 3178 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 3179 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3180 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3181 if (!ctx->mask) { 3182 _spdk_bs_load_ctx_fail(ctx, -ENOMEM); 3183 return; 3184 } 3185 3186 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS; 3187 ctx->mask->length = ctx->bs->total_clusters; 3188 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_clusters)); 3189 3190 _spdk_bs_set_mask(ctx->bs->used_clusters, ctx->mask); 3191 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 3192 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 3193 spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3194 } 3195 3196 static void 3197 _spdk_bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3198 { 3199 struct spdk_bs_load_ctx *ctx = arg; 3200 uint64_t mask_size, lba, lba_count; 3201 3202 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 3203 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3204 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3205 if (!ctx->mask) { 3206 _spdk_bs_load_ctx_fail(ctx, -ENOMEM); 3207 return; 3208 } 3209 3210 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES; 3211 ctx->mask->length = ctx->super->md_len; 3212 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages)); 3213 3214 _spdk_bs_set_mask(ctx->bs->used_md_pages, ctx->mask); 3215 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 3216 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 3217 spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3218 } 3219 3220 static void 3221 _spdk_bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3222 { 3223 struct spdk_bs_load_ctx *ctx = arg; 3224 uint64_t mask_size, lba, lba_count; 3225 3226 if (ctx->super->used_blobid_mask_len == 0) { 3227 /* 3228 * This is a pre-v3 on-disk format where the blobid mask does not get 3229 * written to disk. 3230 */ 3231 cb_fn(seq, arg, 0); 3232 return; 3233 } 3234 3235 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 3236 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 3237 SPDK_MALLOC_DMA); 3238 if (!ctx->mask) { 3239 _spdk_bs_load_ctx_fail(ctx, -ENOMEM); 3240 return; 3241 } 3242 3243 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS; 3244 ctx->mask->length = ctx->super->md_len; 3245 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids)); 3246 3247 _spdk_bs_set_mask(ctx->bs->used_blobids, ctx->mask); 3248 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 3249 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 3250 spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3251 } 3252 3253 static void 3254 _spdk_blob_set_thin_provision(struct spdk_blob *blob) 3255 { 3256 _spdk_blob_verify_md_op(blob); 3257 blob->invalid_flags |= SPDK_BLOB_THIN_PROV; 3258 blob->state = SPDK_BLOB_STATE_DIRTY; 3259 } 3260 3261 static void 3262 _spdk_blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method) 3263 { 3264 _spdk_blob_verify_md_op(blob); 3265 blob->clear_method = clear_method; 3266 blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT); 3267 blob->state = SPDK_BLOB_STATE_DIRTY; 3268 } 3269 3270 static void _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno); 3271 3272 static void 3273 _spdk_bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno) 3274 { 3275 struct spdk_bs_load_ctx *ctx = cb_arg; 3276 spdk_blob_id id; 3277 int64_t page_num; 3278 3279 /* Iterate to next blob (we can't use spdk_bs_iter_next function as our 3280 * last blob has been removed */ 3281 page_num = _spdk_bs_blobid_to_page(ctx->blobid); 3282 page_num++; 3283 page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num); 3284 if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) { 3285 _spdk_bs_load_iter(ctx, NULL, -ENOENT); 3286 return; 3287 } 3288 3289 id = _spdk_bs_page_to_blobid(page_num); 3290 3291 spdk_bs_open_blob(ctx->bs, id, _spdk_bs_load_iter, ctx); 3292 } 3293 3294 static void 3295 _spdk_bs_delete_corrupted_close_cb(void *cb_arg, int bserrno) 3296 { 3297 struct spdk_bs_load_ctx *ctx = cb_arg; 3298 3299 if (bserrno != 0) { 3300 SPDK_ERRLOG("Failed to close corrupted blob\n"); 3301 spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx); 3302 return; 3303 } 3304 3305 spdk_bs_delete_blob(ctx->bs, ctx->blobid, _spdk_bs_delete_corrupted_blob_cpl, ctx); 3306 } 3307 3308 static void 3309 _spdk_bs_delete_corrupted_blob(void *cb_arg, int bserrno) 3310 { 3311 struct spdk_bs_load_ctx *ctx = cb_arg; 3312 uint64_t i; 3313 3314 if (bserrno != 0) { 3315 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 3316 spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx); 3317 return; 3318 } 3319 3320 /* Snapshot and clone have the same copy of cluster map and extent pages 3321 * at this point. Let's clear both for snpashot now, 3322 * so that it won't be cleared for clone later when we remove snapshot. 3323 * Also set thin provision to pass data corruption check */ 3324 for (i = 0; i < ctx->blob->active.num_clusters; i++) { 3325 ctx->blob->active.clusters[i] = 0; 3326 } 3327 for (i = 0; i < ctx->blob->active.num_extent_pages; i++) { 3328 ctx->blob->active.extent_pages[i] = 0; 3329 } 3330 3331 ctx->blob->md_ro = false; 3332 3333 _spdk_blob_set_thin_provision(ctx->blob); 3334 3335 ctx->blobid = ctx->blob->id; 3336 3337 spdk_blob_close(ctx->blob, _spdk_bs_delete_corrupted_close_cb, ctx); 3338 } 3339 3340 static void 3341 _spdk_bs_update_corrupted_blob(void *cb_arg, int bserrno) 3342 { 3343 struct spdk_bs_load_ctx *ctx = cb_arg; 3344 3345 if (bserrno != 0) { 3346 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 3347 spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx); 3348 return; 3349 } 3350 3351 ctx->blob->md_ro = false; 3352 _spdk_blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true); 3353 _spdk_blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true); 3354 spdk_blob_set_read_only(ctx->blob); 3355 3356 if (ctx->iter_cb_fn) { 3357 ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0); 3358 } 3359 _spdk_bs_blob_list_add(ctx->blob); 3360 3361 spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx); 3362 } 3363 3364 static void 3365 _spdk_bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno) 3366 { 3367 struct spdk_bs_load_ctx *ctx = cb_arg; 3368 3369 if (bserrno != 0) { 3370 SPDK_ERRLOG("Failed to open clone of a corrupted blob\n"); 3371 spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx); 3372 return; 3373 } 3374 3375 if (blob->parent_id == ctx->blob->id) { 3376 /* Power failure occured before updating clone (snapshot delete case) 3377 * or after updating clone (creating snapshot case) - keep snapshot */ 3378 spdk_blob_close(blob, _spdk_bs_update_corrupted_blob, ctx); 3379 } else { 3380 /* Power failure occured after updating clone (snapshot delete case) 3381 * or before updating clone (creating snapshot case) - remove snapshot */ 3382 spdk_blob_close(blob, _spdk_bs_delete_corrupted_blob, ctx); 3383 } 3384 } 3385 3386 static void 3387 _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno) 3388 { 3389 struct spdk_bs_load_ctx *ctx = arg; 3390 const void *value; 3391 size_t len; 3392 int rc = 0; 3393 3394 if (bserrno == 0) { 3395 /* Examine blob if it is corrupted after power failure. Fix 3396 * the ones that can be fixed and remove any other corrupted 3397 * ones. If it is not corrupted just process it */ 3398 rc = _spdk_blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true); 3399 if (rc != 0) { 3400 rc = _spdk_blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true); 3401 if (rc != 0) { 3402 /* Not corrupted - process it and continue with iterating through blobs */ 3403 if (ctx->iter_cb_fn) { 3404 ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0); 3405 } 3406 _spdk_bs_blob_list_add(blob); 3407 spdk_bs_iter_next(ctx->bs, blob, _spdk_bs_load_iter, ctx); 3408 return; 3409 } 3410 3411 } 3412 3413 assert(len == sizeof(spdk_blob_id)); 3414 3415 ctx->blob = blob; 3416 3417 /* Open clone to check if we are able to fix this blob or should we remove it */ 3418 spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, _spdk_bs_examine_clone, ctx); 3419 return; 3420 } else if (bserrno == -ENOENT) { 3421 bserrno = 0; 3422 } else { 3423 /* 3424 * This case needs to be looked at further. Same problem 3425 * exists with applications that rely on explicit blob 3426 * iteration. We should just skip the blob that failed 3427 * to load and continue on to the next one. 3428 */ 3429 SPDK_ERRLOG("Error in iterating blobs\n"); 3430 } 3431 3432 ctx->iter_cb_fn = NULL; 3433 3434 spdk_free(ctx->super); 3435 spdk_free(ctx->mask); 3436 spdk_bs_sequence_finish(ctx->seq, bserrno); 3437 free(ctx); 3438 } 3439 3440 static void 3441 _spdk_bs_load_complete(struct spdk_bs_load_ctx *ctx) 3442 { 3443 spdk_bs_iter_first(ctx->bs, _spdk_bs_load_iter, ctx); 3444 } 3445 3446 static void 3447 _spdk_bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3448 { 3449 struct spdk_bs_load_ctx *ctx = cb_arg; 3450 int rc; 3451 3452 /* The type must be correct */ 3453 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS); 3454 3455 /* The length of the mask (in bits) must not be greater than 3456 * the length of the buffer (converted to bits) */ 3457 assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8)); 3458 3459 /* The length of the mask must be exactly equal to the size 3460 * (in pages) of the metadata region */ 3461 assert(ctx->mask->length == ctx->super->md_len); 3462 3463 rc = _spdk_bs_load_mask(&ctx->bs->used_blobids, ctx->mask); 3464 if (rc < 0) { 3465 spdk_free(ctx->mask); 3466 _spdk_bs_load_ctx_fail(ctx, rc); 3467 return; 3468 } 3469 3470 _spdk_bs_load_complete(ctx); 3471 } 3472 3473 static void 3474 _spdk_bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3475 { 3476 struct spdk_bs_load_ctx *ctx = cb_arg; 3477 uint64_t lba, lba_count, mask_size; 3478 int rc; 3479 3480 if (bserrno != 0) { 3481 _spdk_bs_load_ctx_fail(ctx, bserrno); 3482 return; 3483 } 3484 3485 /* The type must be correct */ 3486 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 3487 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 3488 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 3489 struct spdk_blob_md_page) * 8)); 3490 /* The length of the mask must be exactly equal to the total number of clusters */ 3491 assert(ctx->mask->length == ctx->bs->total_clusters); 3492 3493 rc = _spdk_bs_load_mask(&ctx->bs->used_clusters, ctx->mask); 3494 if (rc < 0) { 3495 spdk_free(ctx->mask); 3496 _spdk_bs_load_ctx_fail(ctx, rc); 3497 return; 3498 } 3499 3500 ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->bs->used_clusters); 3501 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 3502 3503 spdk_free(ctx->mask); 3504 3505 /* Read the used blobids mask */ 3506 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 3507 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 3508 SPDK_MALLOC_DMA); 3509 if (!ctx->mask) { 3510 _spdk_bs_load_ctx_fail(ctx, -ENOMEM); 3511 return; 3512 } 3513 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 3514 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 3515 spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 3516 _spdk_bs_load_used_blobids_cpl, ctx); 3517 } 3518 3519 static void 3520 _spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3521 { 3522 struct spdk_bs_load_ctx *ctx = cb_arg; 3523 uint64_t lba, lba_count, mask_size; 3524 int rc; 3525 3526 if (bserrno != 0) { 3527 _spdk_bs_load_ctx_fail(ctx, bserrno); 3528 return; 3529 } 3530 3531 /* The type must be correct */ 3532 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES); 3533 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 3534 assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE * 3535 8)); 3536 /* The length of the mask must be exactly equal to the size (in pages) of the metadata region */ 3537 assert(ctx->mask->length == ctx->super->md_len); 3538 3539 rc = _spdk_bs_load_mask(&ctx->bs->used_md_pages, ctx->mask); 3540 if (rc < 0) { 3541 spdk_free(ctx->mask); 3542 _spdk_bs_load_ctx_fail(ctx, rc); 3543 return; 3544 } 3545 3546 spdk_free(ctx->mask); 3547 3548 /* Read the used clusters mask */ 3549 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 3550 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 3551 SPDK_MALLOC_DMA); 3552 if (!ctx->mask) { 3553 _spdk_bs_load_ctx_fail(ctx, -ENOMEM); 3554 return; 3555 } 3556 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 3557 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 3558 spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 3559 _spdk_bs_load_used_clusters_cpl, ctx); 3560 } 3561 3562 static void 3563 _spdk_bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx) 3564 { 3565 uint64_t lba, lba_count, mask_size; 3566 3567 /* Read the used pages mask */ 3568 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 3569 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3570 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3571 if (!ctx->mask) { 3572 _spdk_bs_load_ctx_fail(ctx, -ENOMEM); 3573 return; 3574 } 3575 3576 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 3577 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 3578 spdk_bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 3579 _spdk_bs_load_used_pages_cpl, ctx); 3580 } 3581 3582 static int 3583 _spdk_bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx) 3584 { 3585 struct spdk_blob_store *bs = ctx->bs; 3586 struct spdk_blob_md_page *page = ctx->page; 3587 struct spdk_blob_md_descriptor *desc; 3588 size_t cur_desc = 0; 3589 3590 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 3591 while (cur_desc < sizeof(page->descriptors)) { 3592 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 3593 if (desc->length == 0) { 3594 /* If padding and length are 0, this terminates the page */ 3595 break; 3596 } 3597 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 3598 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 3599 unsigned int i, j; 3600 unsigned int cluster_count = 0; 3601 uint32_t cluster_idx; 3602 3603 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 3604 3605 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 3606 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 3607 cluster_idx = desc_extent_rle->extents[i].cluster_idx; 3608 /* 3609 * cluster_idx = 0 means an unallocated cluster - don't mark that 3610 * in the used cluster map. 3611 */ 3612 if (cluster_idx != 0) { 3613 spdk_bit_array_set(bs->used_clusters, cluster_idx + j); 3614 if (bs->num_free_clusters == 0) { 3615 return -ENOSPC; 3616 } 3617 bs->num_free_clusters--; 3618 } 3619 cluster_count++; 3620 } 3621 } 3622 if (cluster_count == 0) { 3623 return -EINVAL; 3624 } 3625 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 3626 struct spdk_blob_md_descriptor_extent_page *desc_extent; 3627 uint32_t i; 3628 uint32_t cluster_count = 0; 3629 uint32_t cluster_idx; 3630 size_t cluster_idx_length; 3631 3632 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 3633 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 3634 3635 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 3636 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 3637 return -EINVAL; 3638 } 3639 3640 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 3641 cluster_idx = desc_extent->cluster_idx[i]; 3642 /* 3643 * cluster_idx = 0 means an unallocated cluster - don't mark that 3644 * in the used cluster map. 3645 */ 3646 if (cluster_idx != 0) { 3647 if (cluster_idx < desc_extent->start_cluster_idx && 3648 cluster_idx >= desc_extent->start_cluster_idx + cluster_count) { 3649 return -EINVAL; 3650 } 3651 spdk_bit_array_set(bs->used_clusters, cluster_idx); 3652 if (bs->num_free_clusters == 0) { 3653 return -ENOSPC; 3654 } 3655 bs->num_free_clusters--; 3656 } 3657 cluster_count++; 3658 } 3659 3660 if (cluster_count == 0) { 3661 return -EINVAL; 3662 } 3663 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 3664 /* Skip this item */ 3665 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 3666 /* Skip this item */ 3667 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 3668 /* Skip this item */ 3669 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 3670 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 3671 uint32_t num_extent_pages = ctx->num_extent_pages; 3672 uint32_t i; 3673 size_t extent_pages_length; 3674 void *tmp; 3675 3676 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 3677 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 3678 3679 if (desc_extent_table->length == 0 || 3680 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 3681 return -EINVAL; 3682 } 3683 3684 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 3685 if (desc_extent_table->extent_page[i].page_idx != 0) { 3686 if (desc_extent_table->extent_page[i].num_pages != 1) { 3687 return -EINVAL; 3688 } 3689 num_extent_pages += 1; 3690 } 3691 } 3692 3693 if (num_extent_pages > 0) { 3694 tmp = realloc(ctx->extent_pages, num_extent_pages * sizeof(uint32_t)); 3695 if (tmp == NULL) { 3696 return -ENOMEM; 3697 } 3698 ctx->extent_pages = tmp; 3699 3700 /* Extent table entries contain md page numbers for extent pages. 3701 * Zeroes represent unallocated extent pages, those are run-length-encoded. 3702 */ 3703 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 3704 if (desc_extent_table->extent_page[i].page_idx != 0) { 3705 ctx->extent_pages[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx; 3706 ctx->num_extent_pages += 1; 3707 } 3708 } 3709 } 3710 } else { 3711 /* Error */ 3712 return -EINVAL; 3713 } 3714 /* Advance to the next descriptor */ 3715 cur_desc += sizeof(*desc) + desc->length; 3716 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 3717 break; 3718 } 3719 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 3720 } 3721 return 0; 3722 } 3723 3724 static bool _spdk_bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page) 3725 { 3726 uint32_t crc; 3727 struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors; 3728 size_t desc_len; 3729 3730 crc = _spdk_blob_md_page_calc_crc(page); 3731 if (crc != page->crc) { 3732 return false; 3733 } 3734 3735 /* Extent page should always be of sequence num 0. */ 3736 if (page->sequence_num != 0) { 3737 return false; 3738 } 3739 3740 /* Descriptor type must be EXTENT_PAGE. */ 3741 if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 3742 return false; 3743 } 3744 3745 /* Descriptor length cannot exceed the page. */ 3746 desc_len = sizeof(*desc) + desc->length; 3747 if (desc_len > sizeof(page->descriptors)) { 3748 return false; 3749 } 3750 3751 /* It has to be the only descriptor in the page. */ 3752 if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) { 3753 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len); 3754 if (desc->length != 0) { 3755 return false; 3756 } 3757 } 3758 3759 return true; 3760 } 3761 3762 static bool _spdk_bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx) 3763 { 3764 uint32_t crc; 3765 3766 crc = _spdk_blob_md_page_calc_crc(ctx->page); 3767 if (crc != ctx->page->crc) { 3768 return false; 3769 } 3770 3771 /* First page of a sequence should match the blobid. */ 3772 if (ctx->page->sequence_num == 0 && 3773 _spdk_bs_page_to_blobid(ctx->cur_page) != ctx->page->id) { 3774 return false; 3775 } 3776 return true; 3777 } 3778 3779 static void 3780 _spdk_bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx); 3781 3782 static void 3783 _spdk_bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3784 { 3785 struct spdk_bs_load_ctx *ctx = cb_arg; 3786 3787 if (bserrno != 0) { 3788 _spdk_bs_load_ctx_fail(ctx, bserrno); 3789 return; 3790 } 3791 3792 _spdk_bs_load_complete(ctx); 3793 } 3794 3795 static void 3796 _spdk_bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3797 { 3798 struct spdk_bs_load_ctx *ctx = cb_arg; 3799 3800 spdk_free(ctx->mask); 3801 ctx->mask = NULL; 3802 3803 if (bserrno != 0) { 3804 _spdk_bs_load_ctx_fail(ctx, bserrno); 3805 return; 3806 } 3807 3808 _spdk_bs_write_used_clusters(seq, ctx, _spdk_bs_load_write_used_clusters_cpl); 3809 } 3810 3811 static void 3812 _spdk_bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3813 { 3814 struct spdk_bs_load_ctx *ctx = cb_arg; 3815 3816 spdk_free(ctx->mask); 3817 ctx->mask = NULL; 3818 3819 if (bserrno != 0) { 3820 _spdk_bs_load_ctx_fail(ctx, bserrno); 3821 return; 3822 } 3823 3824 _spdk_bs_write_used_blobids(seq, ctx, _spdk_bs_load_write_used_blobids_cpl); 3825 } 3826 3827 static void 3828 _spdk_bs_load_write_used_md(struct spdk_bs_load_ctx *ctx) 3829 { 3830 _spdk_bs_write_used_md(ctx->seq, ctx, _spdk_bs_load_write_used_pages_cpl); 3831 } 3832 3833 static void 3834 _spdk_bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx) 3835 { 3836 uint64_t num_md_clusters; 3837 uint64_t i; 3838 3839 ctx->in_page_chain = false; 3840 3841 do { 3842 ctx->page_index++; 3843 } while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true); 3844 3845 if (ctx->page_index < ctx->super->md_len) { 3846 ctx->cur_page = ctx->page_index; 3847 _spdk_bs_load_replay_cur_md_page(ctx); 3848 } else { 3849 /* Claim all of the clusters used by the metadata */ 3850 num_md_clusters = spdk_divide_round_up(ctx->super->md_len, ctx->bs->pages_per_cluster); 3851 for (i = 0; i < num_md_clusters; i++) { 3852 _spdk_bs_claim_cluster(ctx->bs, i); 3853 } 3854 spdk_free(ctx->page); 3855 _spdk_bs_load_write_used_md(ctx); 3856 } 3857 } 3858 3859 static void _spdk_bs_load_replay_extent_page(spdk_bs_sequence_t *seq, uint32_t page, void *cb_arg); 3860 3861 static void 3862 _spdk_bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3863 { 3864 struct spdk_bs_load_ctx *ctx = cb_arg; 3865 uint32_t page_num; 3866 3867 if (bserrno != 0) { 3868 _spdk_bs_load_ctx_fail(ctx, bserrno); 3869 return; 3870 } 3871 3872 /* Extent pages are only read when present within in chain md. 3873 * Integrity of md is not right if that page was not a valid extent page. */ 3874 if (_spdk_bs_load_cur_extent_page_valid(ctx->page) != true) { 3875 _spdk_bs_load_ctx_fail(ctx, -EILSEQ); 3876 return; 3877 } 3878 3879 page_num = ctx->extent_pages[ctx->num_extent_pages - 1]; 3880 spdk_bit_array_set(ctx->bs->used_md_pages, page_num); 3881 if (_spdk_bs_load_replay_md_parse_page(ctx)) { 3882 _spdk_bs_load_ctx_fail(ctx, -EILSEQ); 3883 return; 3884 } 3885 3886 ctx->num_extent_pages--; 3887 if (ctx->num_extent_pages > 0) { 3888 _spdk_bs_load_replay_extent_page(seq, ctx->extent_pages[ctx->num_extent_pages - 1], ctx); 3889 return; 3890 } 3891 3892 free(ctx->extent_pages); 3893 ctx->extent_pages = NULL; 3894 3895 _spdk_bs_load_replay_md_chain_cpl(ctx); 3896 } 3897 3898 static void 3899 _spdk_bs_load_replay_extent_page(spdk_bs_sequence_t *seq, uint32_t page, void *cb_arg) 3900 { 3901 struct spdk_bs_load_ctx *ctx = cb_arg; 3902 uint64_t lba; 3903 3904 assert(page < ctx->super->md_len); 3905 lba = _spdk_bs_md_page_to_lba(ctx->bs, page); 3906 spdk_bs_sequence_read_dev(seq, ctx->page, lba, 3907 _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 3908 _spdk_bs_load_replay_extent_page_cpl, ctx); 3909 } 3910 3911 static void 3912 _spdk_bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3913 { 3914 struct spdk_bs_load_ctx *ctx = cb_arg; 3915 uint32_t page_num; 3916 3917 if (bserrno != 0) { 3918 _spdk_bs_load_ctx_fail(ctx, bserrno); 3919 return; 3920 } 3921 3922 page_num = ctx->cur_page; 3923 if (_spdk_bs_load_cur_md_page_valid(ctx) == true) { 3924 if (ctx->page->sequence_num == 0 || ctx->in_page_chain == true) { 3925 _spdk_bs_claim_md_page(ctx->bs, page_num); 3926 if (ctx->page->sequence_num == 0) { 3927 spdk_bit_array_set(ctx->bs->used_blobids, page_num); 3928 } 3929 if (_spdk_bs_load_replay_md_parse_page(ctx)) { 3930 _spdk_bs_load_ctx_fail(ctx, -EILSEQ); 3931 return; 3932 } 3933 if (ctx->page->next != SPDK_INVALID_MD_PAGE) { 3934 ctx->in_page_chain = true; 3935 ctx->cur_page = ctx->page->next; 3936 _spdk_bs_load_replay_cur_md_page(ctx); 3937 return; 3938 } 3939 if (ctx->num_extent_pages != 0) { 3940 /* Extent pages are read from last to first, 3941 * decreasing the num_extent_pages as they are read. */ 3942 _spdk_bs_load_replay_extent_page(seq, ctx->extent_pages[ctx->num_extent_pages - 1], ctx); 3943 return; 3944 } 3945 } 3946 } 3947 _spdk_bs_load_replay_md_chain_cpl(ctx); 3948 } 3949 3950 static void 3951 _spdk_bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx) 3952 { 3953 uint64_t lba; 3954 3955 assert(ctx->cur_page < ctx->super->md_len); 3956 lba = _spdk_bs_md_page_to_lba(ctx->bs, ctx->cur_page); 3957 spdk_bs_sequence_read_dev(ctx->seq, ctx->page, lba, 3958 _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 3959 _spdk_bs_load_replay_md_cpl, ctx); 3960 } 3961 3962 static void 3963 _spdk_bs_load_replay_md(struct spdk_bs_load_ctx *ctx) 3964 { 3965 ctx->page_index = 0; 3966 ctx->cur_page = 0; 3967 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE, 3968 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3969 if (!ctx->page) { 3970 _spdk_bs_load_ctx_fail(ctx, -ENOMEM); 3971 return; 3972 } 3973 _spdk_bs_load_replay_cur_md_page(ctx); 3974 } 3975 3976 static void 3977 _spdk_bs_recover(struct spdk_bs_load_ctx *ctx) 3978 { 3979 int rc; 3980 3981 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len); 3982 if (rc < 0) { 3983 _spdk_bs_load_ctx_fail(ctx, -ENOMEM); 3984 return; 3985 } 3986 3987 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len); 3988 if (rc < 0) { 3989 _spdk_bs_load_ctx_fail(ctx, -ENOMEM); 3990 return; 3991 } 3992 3993 rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters); 3994 if (rc < 0) { 3995 _spdk_bs_load_ctx_fail(ctx, -ENOMEM); 3996 return; 3997 } 3998 3999 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 4000 _spdk_bs_load_replay_md(ctx); 4001 } 4002 4003 static void 4004 _spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4005 { 4006 struct spdk_bs_load_ctx *ctx = cb_arg; 4007 uint32_t crc; 4008 int rc; 4009 static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH]; 4010 4011 if (ctx->super->version > SPDK_BS_VERSION || 4012 ctx->super->version < SPDK_BS_INITIAL_VERSION) { 4013 _spdk_bs_load_ctx_fail(ctx, -EILSEQ); 4014 return; 4015 } 4016 4017 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 4018 sizeof(ctx->super->signature)) != 0) { 4019 _spdk_bs_load_ctx_fail(ctx, -EILSEQ); 4020 return; 4021 } 4022 4023 crc = _spdk_blob_md_page_calc_crc(ctx->super); 4024 if (crc != ctx->super->crc) { 4025 _spdk_bs_load_ctx_fail(ctx, -EILSEQ); 4026 return; 4027 } 4028 4029 if (memcmp(&ctx->bs->bstype, &ctx->super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 4030 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype matched - loading blobstore\n"); 4031 } else if (memcmp(&ctx->bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 4032 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype wildcard used - loading blobstore regardless bstype\n"); 4033 } else { 4034 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Unexpected bstype\n"); 4035 SPDK_LOGDUMP(SPDK_LOG_BLOB, "Expected:", ctx->bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 4036 SPDK_LOGDUMP(SPDK_LOG_BLOB, "Found:", ctx->super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 4037 _spdk_bs_load_ctx_fail(ctx, -ENXIO); 4038 return; 4039 } 4040 4041 if (ctx->super->size > ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen) { 4042 SPDK_NOTICELOG("Size mismatch, dev size: %lu, blobstore size: %lu\n", 4043 ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen, ctx->super->size); 4044 _spdk_bs_load_ctx_fail(ctx, -EILSEQ); 4045 return; 4046 } 4047 4048 if (ctx->super->size == 0) { 4049 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 4050 } 4051 4052 if (ctx->super->io_unit_size == 0) { 4053 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 4054 } 4055 4056 /* Parse the super block */ 4057 ctx->bs->clean = 1; 4058 ctx->bs->cluster_sz = ctx->super->cluster_size; 4059 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 4060 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 4061 ctx->bs->io_unit_size = ctx->super->io_unit_size; 4062 rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters); 4063 if (rc < 0) { 4064 _spdk_bs_load_ctx_fail(ctx, -ENOMEM); 4065 return; 4066 } 4067 ctx->bs->md_start = ctx->super->md_start; 4068 ctx->bs->md_len = ctx->super->md_len; 4069 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 4070 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 4071 ctx->bs->super_blob = ctx->super->super_blob; 4072 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 4073 4074 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) { 4075 _spdk_bs_recover(ctx); 4076 } else { 4077 _spdk_bs_load_read_used_pages(ctx); 4078 } 4079 } 4080 4081 void 4082 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 4083 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 4084 { 4085 struct spdk_blob_store *bs; 4086 struct spdk_bs_cpl cpl; 4087 struct spdk_bs_load_ctx *ctx; 4088 struct spdk_bs_opts opts = {}; 4089 int err; 4090 4091 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Loading blobstore from dev %p\n", dev); 4092 4093 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 4094 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "unsupported dev block length of %d\n", dev->blocklen); 4095 dev->destroy(dev); 4096 cb_fn(cb_arg, NULL, -EINVAL); 4097 return; 4098 } 4099 4100 if (o) { 4101 opts = *o; 4102 } else { 4103 spdk_bs_opts_init(&opts); 4104 } 4105 4106 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 4107 dev->destroy(dev); 4108 cb_fn(cb_arg, NULL, -EINVAL); 4109 return; 4110 } 4111 4112 err = _spdk_bs_alloc(dev, &opts, &bs); 4113 if (err) { 4114 dev->destroy(dev); 4115 cb_fn(cb_arg, NULL, err); 4116 return; 4117 } 4118 4119 ctx = calloc(1, sizeof(*ctx)); 4120 if (!ctx) { 4121 _spdk_bs_free(bs); 4122 cb_fn(cb_arg, NULL, -ENOMEM); 4123 return; 4124 } 4125 4126 ctx->bs = bs; 4127 ctx->iter_cb_fn = opts.iter_cb_fn; 4128 ctx->iter_cb_arg = opts.iter_cb_arg; 4129 4130 /* Allocate memory for the super block */ 4131 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 4132 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4133 if (!ctx->super) { 4134 free(ctx); 4135 _spdk_bs_free(bs); 4136 cb_fn(cb_arg, NULL, -ENOMEM); 4137 return; 4138 } 4139 4140 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 4141 cpl.u.bs_handle.cb_fn = cb_fn; 4142 cpl.u.bs_handle.cb_arg = cb_arg; 4143 cpl.u.bs_handle.bs = bs; 4144 4145 ctx->seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 4146 if (!ctx->seq) { 4147 spdk_free(ctx->super); 4148 free(ctx); 4149 _spdk_bs_free(bs); 4150 cb_fn(cb_arg, NULL, -ENOMEM); 4151 return; 4152 } 4153 4154 /* Read the super block */ 4155 spdk_bs_sequence_read_dev(ctx->seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 4156 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 4157 _spdk_bs_load_super_cpl, ctx); 4158 } 4159 4160 /* END spdk_bs_load */ 4161 4162 /* START spdk_bs_dump */ 4163 4164 struct spdk_bs_dump_ctx { 4165 struct spdk_blob_store *bs; 4166 struct spdk_bs_super_block *super; 4167 uint32_t cur_page; 4168 struct spdk_blob_md_page *page; 4169 spdk_bs_sequence_t *seq; 4170 FILE *fp; 4171 spdk_bs_dump_print_xattr print_xattr_fn; 4172 char xattr_name[4096]; 4173 }; 4174 4175 static void 4176 _spdk_bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_dump_ctx *ctx, int bserrno) 4177 { 4178 spdk_free(ctx->super); 4179 4180 /* 4181 * We need to defer calling spdk_bs_call_cpl() until after 4182 * dev destruction, so tuck these away for later use. 4183 */ 4184 ctx->bs->unload_err = bserrno; 4185 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 4186 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 4187 4188 spdk_bs_sequence_finish(seq, 0); 4189 _spdk_bs_free(ctx->bs); 4190 free(ctx); 4191 } 4192 4193 static void _spdk_bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 4194 4195 static void 4196 _spdk_bs_dump_print_md_page(struct spdk_bs_dump_ctx *ctx) 4197 { 4198 uint32_t page_idx = ctx->cur_page; 4199 struct spdk_blob_md_page *page = ctx->page; 4200 struct spdk_blob_md_descriptor *desc; 4201 size_t cur_desc = 0; 4202 uint32_t crc; 4203 4204 fprintf(ctx->fp, "=========\n"); 4205 fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx); 4206 fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id); 4207 4208 crc = _spdk_blob_md_page_calc_crc(page); 4209 fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch"); 4210 4211 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4212 while (cur_desc < sizeof(page->descriptors)) { 4213 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 4214 if (desc->length == 0) { 4215 /* If padding and length are 0, this terminates the page */ 4216 break; 4217 } 4218 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 4219 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 4220 unsigned int i; 4221 4222 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 4223 4224 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 4225 if (desc_extent_rle->extents[i].cluster_idx != 0) { 4226 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 4227 desc_extent_rle->extents[i].cluster_idx); 4228 } else { 4229 fprintf(ctx->fp, "Unallocated Extent - "); 4230 } 4231 fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length); 4232 fprintf(ctx->fp, "\n"); 4233 } 4234 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4235 struct spdk_blob_md_descriptor_extent_page *desc_extent; 4236 unsigned int i; 4237 4238 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 4239 4240 for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) { 4241 if (desc_extent->cluster_idx[i] != 0) { 4242 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 4243 desc_extent->cluster_idx[i]); 4244 } else { 4245 fprintf(ctx->fp, "Unallocated Extent"); 4246 } 4247 fprintf(ctx->fp, "\n"); 4248 } 4249 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4250 struct spdk_blob_md_descriptor_xattr *desc_xattr; 4251 uint32_t i; 4252 4253 desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc; 4254 4255 if (desc_xattr->length != 4256 sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) + 4257 desc_xattr->name_length + desc_xattr->value_length) { 4258 } 4259 4260 memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length); 4261 ctx->xattr_name[desc_xattr->name_length] = '\0'; 4262 fprintf(ctx->fp, "XATTR: name = \"%s\"\n", ctx->xattr_name); 4263 fprintf(ctx->fp, " value = \""); 4264 ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name, 4265 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 4266 desc_xattr->value_length); 4267 fprintf(ctx->fp, "\"\n"); 4268 for (i = 0; i < desc_xattr->value_length; i++) { 4269 if (i % 16 == 0) { 4270 fprintf(ctx->fp, " "); 4271 } 4272 fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i)); 4273 if ((i + 1) % 16 == 0) { 4274 fprintf(ctx->fp, "\n"); 4275 } 4276 } 4277 if (i % 16 != 0) { 4278 fprintf(ctx->fp, "\n"); 4279 } 4280 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4281 /* TODO */ 4282 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 4283 /* TODO */ 4284 } else { 4285 /* Error */ 4286 } 4287 /* Advance to the next descriptor */ 4288 cur_desc += sizeof(*desc) + desc->length; 4289 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 4290 break; 4291 } 4292 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 4293 } 4294 } 4295 4296 static void 4297 _spdk_bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4298 { 4299 struct spdk_bs_dump_ctx *ctx = cb_arg; 4300 4301 if (bserrno != 0) { 4302 _spdk_bs_dump_finish(seq, ctx, bserrno); 4303 return; 4304 } 4305 4306 if (ctx->page->id != 0) { 4307 _spdk_bs_dump_print_md_page(ctx); 4308 } 4309 4310 ctx->cur_page++; 4311 4312 if (ctx->cur_page < ctx->super->md_len) { 4313 _spdk_bs_dump_read_md_page(seq, ctx); 4314 } else { 4315 spdk_free(ctx->page); 4316 _spdk_bs_dump_finish(seq, ctx, 0); 4317 } 4318 } 4319 4320 static void 4321 _spdk_bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 4322 { 4323 struct spdk_bs_dump_ctx *ctx = cb_arg; 4324 uint64_t lba; 4325 4326 assert(ctx->cur_page < ctx->super->md_len); 4327 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 4328 spdk_bs_sequence_read_dev(seq, ctx->page, lba, 4329 _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 4330 _spdk_bs_dump_read_md_page_cpl, ctx); 4331 } 4332 4333 static void 4334 _spdk_bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4335 { 4336 struct spdk_bs_dump_ctx *ctx = cb_arg; 4337 4338 fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature); 4339 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 4340 sizeof(ctx->super->signature)) != 0) { 4341 fprintf(ctx->fp, "(Mismatch)\n"); 4342 _spdk_bs_dump_finish(seq, ctx, bserrno); 4343 return; 4344 } else { 4345 fprintf(ctx->fp, "(OK)\n"); 4346 } 4347 fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version); 4348 fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc, 4349 (ctx->super->crc == _spdk_blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch"); 4350 fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype); 4351 fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size); 4352 fprintf(ctx->fp, "Super Blob ID: "); 4353 if (ctx->super->super_blob == SPDK_BLOBID_INVALID) { 4354 fprintf(ctx->fp, "(None)\n"); 4355 } else { 4356 fprintf(ctx->fp, "%" PRIu64 "\n", ctx->super->super_blob); 4357 } 4358 fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean); 4359 fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start); 4360 fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len); 4361 fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start); 4362 fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len); 4363 fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start); 4364 fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len); 4365 fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start); 4366 fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len); 4367 4368 ctx->cur_page = 0; 4369 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE, 4370 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4371 if (!ctx->page) { 4372 _spdk_bs_dump_finish(seq, ctx, -ENOMEM); 4373 return; 4374 } 4375 _spdk_bs_dump_read_md_page(seq, ctx); 4376 } 4377 4378 void 4379 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn, 4380 spdk_bs_op_complete cb_fn, void *cb_arg) 4381 { 4382 struct spdk_blob_store *bs; 4383 struct spdk_bs_cpl cpl; 4384 spdk_bs_sequence_t *seq; 4385 struct spdk_bs_dump_ctx *ctx; 4386 struct spdk_bs_opts opts = {}; 4387 int err; 4388 4389 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Dumping blobstore from dev %p\n", dev); 4390 4391 spdk_bs_opts_init(&opts); 4392 4393 err = _spdk_bs_alloc(dev, &opts, &bs); 4394 if (err) { 4395 dev->destroy(dev); 4396 cb_fn(cb_arg, err); 4397 return; 4398 } 4399 4400 ctx = calloc(1, sizeof(*ctx)); 4401 if (!ctx) { 4402 _spdk_bs_free(bs); 4403 cb_fn(cb_arg, -ENOMEM); 4404 return; 4405 } 4406 4407 ctx->bs = bs; 4408 ctx->fp = fp; 4409 ctx->print_xattr_fn = print_xattr_fn; 4410 4411 /* Allocate memory for the super block */ 4412 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 4413 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4414 if (!ctx->super) { 4415 free(ctx); 4416 _spdk_bs_free(bs); 4417 cb_fn(cb_arg, -ENOMEM); 4418 return; 4419 } 4420 4421 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 4422 cpl.u.bs_basic.cb_fn = cb_fn; 4423 cpl.u.bs_basic.cb_arg = cb_arg; 4424 4425 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 4426 if (!seq) { 4427 spdk_free(ctx->super); 4428 free(ctx); 4429 _spdk_bs_free(bs); 4430 cb_fn(cb_arg, -ENOMEM); 4431 return; 4432 } 4433 4434 /* Read the super block */ 4435 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 4436 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 4437 _spdk_bs_dump_super_cpl, ctx); 4438 } 4439 4440 /* END spdk_bs_dump */ 4441 4442 /* START spdk_bs_init */ 4443 4444 struct spdk_bs_init_ctx { 4445 struct spdk_blob_store *bs; 4446 struct spdk_bs_super_block *super; 4447 }; 4448 4449 static void 4450 _spdk_bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4451 { 4452 struct spdk_bs_init_ctx *ctx = cb_arg; 4453 4454 spdk_free(ctx->super); 4455 free(ctx); 4456 4457 spdk_bs_sequence_finish(seq, bserrno); 4458 } 4459 4460 static void 4461 _spdk_bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4462 { 4463 struct spdk_bs_init_ctx *ctx = cb_arg; 4464 4465 /* Write super block */ 4466 spdk_bs_sequence_write_dev(seq, ctx->super, _spdk_bs_page_to_lba(ctx->bs, 0), 4467 _spdk_bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 4468 _spdk_bs_init_persist_super_cpl, ctx); 4469 } 4470 4471 void 4472 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 4473 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 4474 { 4475 struct spdk_bs_init_ctx *ctx; 4476 struct spdk_blob_store *bs; 4477 struct spdk_bs_cpl cpl; 4478 spdk_bs_sequence_t *seq; 4479 spdk_bs_batch_t *batch; 4480 uint64_t num_md_lba; 4481 uint64_t num_md_pages; 4482 uint64_t num_md_clusters; 4483 uint32_t i; 4484 struct spdk_bs_opts opts = {}; 4485 int rc; 4486 4487 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Initializing blobstore on dev %p\n", dev); 4488 4489 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 4490 SPDK_ERRLOG("unsupported dev block length of %d\n", 4491 dev->blocklen); 4492 dev->destroy(dev); 4493 cb_fn(cb_arg, NULL, -EINVAL); 4494 return; 4495 } 4496 4497 if (o) { 4498 opts = *o; 4499 } else { 4500 spdk_bs_opts_init(&opts); 4501 } 4502 4503 if (_spdk_bs_opts_verify(&opts) != 0) { 4504 dev->destroy(dev); 4505 cb_fn(cb_arg, NULL, -EINVAL); 4506 return; 4507 } 4508 4509 rc = _spdk_bs_alloc(dev, &opts, &bs); 4510 if (rc) { 4511 dev->destroy(dev); 4512 cb_fn(cb_arg, NULL, rc); 4513 return; 4514 } 4515 4516 if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) { 4517 /* By default, allocate 1 page per cluster. 4518 * Technically, this over-allocates metadata 4519 * because more metadata will reduce the number 4520 * of usable clusters. This can be addressed with 4521 * more complex math in the future. 4522 */ 4523 bs->md_len = bs->total_clusters; 4524 } else { 4525 bs->md_len = opts.num_md_pages; 4526 } 4527 rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len); 4528 if (rc < 0) { 4529 _spdk_bs_free(bs); 4530 cb_fn(cb_arg, NULL, -ENOMEM); 4531 return; 4532 } 4533 4534 rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len); 4535 if (rc < 0) { 4536 _spdk_bs_free(bs); 4537 cb_fn(cb_arg, NULL, -ENOMEM); 4538 return; 4539 } 4540 4541 ctx = calloc(1, sizeof(*ctx)); 4542 if (!ctx) { 4543 _spdk_bs_free(bs); 4544 cb_fn(cb_arg, NULL, -ENOMEM); 4545 return; 4546 } 4547 4548 ctx->bs = bs; 4549 4550 /* Allocate memory for the super block */ 4551 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 4552 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4553 if (!ctx->super) { 4554 free(ctx); 4555 _spdk_bs_free(bs); 4556 cb_fn(cb_arg, NULL, -ENOMEM); 4557 return; 4558 } 4559 memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 4560 sizeof(ctx->super->signature)); 4561 ctx->super->version = SPDK_BS_VERSION; 4562 ctx->super->length = sizeof(*ctx->super); 4563 ctx->super->super_blob = bs->super_blob; 4564 ctx->super->clean = 0; 4565 ctx->super->cluster_size = bs->cluster_sz; 4566 ctx->super->io_unit_size = bs->io_unit_size; 4567 memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype)); 4568 4569 /* Calculate how many pages the metadata consumes at the front 4570 * of the disk. 4571 */ 4572 4573 /* The super block uses 1 page */ 4574 num_md_pages = 1; 4575 4576 /* The used_md_pages mask requires 1 bit per metadata page, rounded 4577 * up to the nearest page, plus a header. 4578 */ 4579 ctx->super->used_page_mask_start = num_md_pages; 4580 ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 4581 spdk_divide_round_up(bs->md_len, 8), 4582 SPDK_BS_PAGE_SIZE); 4583 num_md_pages += ctx->super->used_page_mask_len; 4584 4585 /* The used_clusters mask requires 1 bit per cluster, rounded 4586 * up to the nearest page, plus a header. 4587 */ 4588 ctx->super->used_cluster_mask_start = num_md_pages; 4589 ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 4590 spdk_divide_round_up(bs->total_clusters, 8), 4591 SPDK_BS_PAGE_SIZE); 4592 num_md_pages += ctx->super->used_cluster_mask_len; 4593 4594 /* The used_blobids mask requires 1 bit per metadata page, rounded 4595 * up to the nearest page, plus a header. 4596 */ 4597 ctx->super->used_blobid_mask_start = num_md_pages; 4598 ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 4599 spdk_divide_round_up(bs->md_len, 8), 4600 SPDK_BS_PAGE_SIZE); 4601 num_md_pages += ctx->super->used_blobid_mask_len; 4602 4603 /* The metadata region size was chosen above */ 4604 ctx->super->md_start = bs->md_start = num_md_pages; 4605 ctx->super->md_len = bs->md_len; 4606 num_md_pages += bs->md_len; 4607 4608 num_md_lba = _spdk_bs_page_to_lba(bs, num_md_pages); 4609 4610 ctx->super->size = dev->blockcnt * dev->blocklen; 4611 4612 ctx->super->crc = _spdk_blob_md_page_calc_crc(ctx->super); 4613 4614 num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster); 4615 if (num_md_clusters > bs->total_clusters) { 4616 SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, " 4617 "please decrease number of pages reserved for metadata " 4618 "or increase cluster size.\n"); 4619 spdk_free(ctx->super); 4620 free(ctx); 4621 _spdk_bs_free(bs); 4622 cb_fn(cb_arg, NULL, -ENOMEM); 4623 return; 4624 } 4625 /* Claim all of the clusters used by the metadata */ 4626 for (i = 0; i < num_md_clusters; i++) { 4627 _spdk_bs_claim_cluster(bs, i); 4628 } 4629 4630 bs->total_data_clusters = bs->num_free_clusters; 4631 4632 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 4633 cpl.u.bs_handle.cb_fn = cb_fn; 4634 cpl.u.bs_handle.cb_arg = cb_arg; 4635 cpl.u.bs_handle.bs = bs; 4636 4637 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 4638 if (!seq) { 4639 spdk_free(ctx->super); 4640 free(ctx); 4641 _spdk_bs_free(bs); 4642 cb_fn(cb_arg, NULL, -ENOMEM); 4643 return; 4644 } 4645 4646 batch = spdk_bs_sequence_to_batch(seq, _spdk_bs_init_trim_cpl, ctx); 4647 4648 /* Clear metadata space */ 4649 spdk_bs_batch_write_zeroes_dev(batch, 0, num_md_lba); 4650 4651 switch (opts.clear_method) { 4652 case BS_CLEAR_WITH_UNMAP: 4653 /* Trim data clusters */ 4654 spdk_bs_batch_unmap_dev(batch, num_md_lba, ctx->bs->dev->blockcnt - num_md_lba); 4655 break; 4656 case BS_CLEAR_WITH_WRITE_ZEROES: 4657 /* Write_zeroes to data clusters */ 4658 spdk_bs_batch_write_zeroes_dev(batch, num_md_lba, ctx->bs->dev->blockcnt - num_md_lba); 4659 break; 4660 case BS_CLEAR_WITH_NONE: 4661 default: 4662 break; 4663 } 4664 4665 spdk_bs_batch_close(batch); 4666 } 4667 4668 /* END spdk_bs_init */ 4669 4670 /* START spdk_bs_destroy */ 4671 4672 static void 4673 _spdk_bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4674 { 4675 struct spdk_bs_init_ctx *ctx = cb_arg; 4676 struct spdk_blob_store *bs = ctx->bs; 4677 4678 /* 4679 * We need to defer calling spdk_bs_call_cpl() until after 4680 * dev destruction, so tuck these away for later use. 4681 */ 4682 bs->unload_err = bserrno; 4683 memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 4684 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 4685 4686 spdk_bs_sequence_finish(seq, bserrno); 4687 4688 _spdk_bs_free(bs); 4689 free(ctx); 4690 } 4691 4692 void 4693 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, 4694 void *cb_arg) 4695 { 4696 struct spdk_bs_cpl cpl; 4697 spdk_bs_sequence_t *seq; 4698 struct spdk_bs_init_ctx *ctx; 4699 4700 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Destroying blobstore\n"); 4701 4702 if (!TAILQ_EMPTY(&bs->blobs)) { 4703 SPDK_ERRLOG("Blobstore still has open blobs\n"); 4704 cb_fn(cb_arg, -EBUSY); 4705 return; 4706 } 4707 4708 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 4709 cpl.u.bs_basic.cb_fn = cb_fn; 4710 cpl.u.bs_basic.cb_arg = cb_arg; 4711 4712 ctx = calloc(1, sizeof(*ctx)); 4713 if (!ctx) { 4714 cb_fn(cb_arg, -ENOMEM); 4715 return; 4716 } 4717 4718 ctx->bs = bs; 4719 4720 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 4721 if (!seq) { 4722 free(ctx); 4723 cb_fn(cb_arg, -ENOMEM); 4724 return; 4725 } 4726 4727 /* Write zeroes to the super block */ 4728 spdk_bs_sequence_write_zeroes_dev(seq, 4729 _spdk_bs_page_to_lba(bs, 0), 4730 _spdk_bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)), 4731 _spdk_bs_destroy_trim_cpl, ctx); 4732 } 4733 4734 /* END spdk_bs_destroy */ 4735 4736 /* START spdk_bs_unload */ 4737 4738 static void 4739 _spdk_bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno) 4740 { 4741 spdk_bs_sequence_t *seq = ctx->seq; 4742 4743 spdk_free(ctx->super); 4744 4745 /* 4746 * We need to defer calling spdk_bs_call_cpl() until after 4747 * dev destruction, so tuck these away for later use. 4748 */ 4749 ctx->bs->unload_err = bserrno; 4750 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 4751 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 4752 4753 spdk_bs_sequence_finish(seq, bserrno); 4754 4755 _spdk_bs_free(ctx->bs); 4756 free(ctx); 4757 } 4758 4759 static void 4760 _spdk_bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4761 { 4762 struct spdk_bs_load_ctx *ctx = cb_arg; 4763 4764 _spdk_bs_unload_finish(ctx, bserrno); 4765 } 4766 4767 static void 4768 _spdk_bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4769 { 4770 struct spdk_bs_load_ctx *ctx = cb_arg; 4771 4772 spdk_free(ctx->mask); 4773 4774 if (bserrno != 0) { 4775 _spdk_bs_unload_finish(ctx, bserrno); 4776 return; 4777 } 4778 4779 ctx->super->clean = 1; 4780 4781 _spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_unload_write_super_cpl, ctx); 4782 } 4783 4784 static void 4785 _spdk_bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4786 { 4787 struct spdk_bs_load_ctx *ctx = cb_arg; 4788 4789 spdk_free(ctx->mask); 4790 ctx->mask = NULL; 4791 4792 if (bserrno != 0) { 4793 _spdk_bs_unload_finish(ctx, bserrno); 4794 return; 4795 } 4796 4797 _spdk_bs_write_used_clusters(seq, ctx, _spdk_bs_unload_write_used_clusters_cpl); 4798 } 4799 4800 static void 4801 _spdk_bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4802 { 4803 struct spdk_bs_load_ctx *ctx = cb_arg; 4804 4805 spdk_free(ctx->mask); 4806 ctx->mask = NULL; 4807 4808 if (bserrno != 0) { 4809 _spdk_bs_unload_finish(ctx, bserrno); 4810 return; 4811 } 4812 4813 _spdk_bs_write_used_blobids(seq, ctx, _spdk_bs_unload_write_used_blobids_cpl); 4814 } 4815 4816 static void 4817 _spdk_bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4818 { 4819 struct spdk_bs_load_ctx *ctx = cb_arg; 4820 4821 if (bserrno != 0) { 4822 _spdk_bs_unload_finish(ctx, bserrno); 4823 return; 4824 } 4825 4826 _spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_unload_write_used_pages_cpl); 4827 } 4828 4829 void 4830 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) 4831 { 4832 struct spdk_bs_cpl cpl; 4833 struct spdk_bs_load_ctx *ctx; 4834 4835 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blobstore\n"); 4836 4837 if (!TAILQ_EMPTY(&bs->blobs)) { 4838 SPDK_ERRLOG("Blobstore still has open blobs\n"); 4839 cb_fn(cb_arg, -EBUSY); 4840 return; 4841 } 4842 4843 ctx = calloc(1, sizeof(*ctx)); 4844 if (!ctx) { 4845 cb_fn(cb_arg, -ENOMEM); 4846 return; 4847 } 4848 4849 ctx->bs = bs; 4850 4851 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 4852 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4853 if (!ctx->super) { 4854 free(ctx); 4855 cb_fn(cb_arg, -ENOMEM); 4856 return; 4857 } 4858 4859 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 4860 cpl.u.bs_basic.cb_fn = cb_fn; 4861 cpl.u.bs_basic.cb_arg = cb_arg; 4862 4863 ctx->seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 4864 if (!ctx->seq) { 4865 spdk_free(ctx->super); 4866 free(ctx); 4867 cb_fn(cb_arg, -ENOMEM); 4868 return; 4869 } 4870 4871 /* Read super block */ 4872 spdk_bs_sequence_read_dev(ctx->seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 4873 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 4874 _spdk_bs_unload_read_super_cpl, ctx); 4875 } 4876 4877 /* END spdk_bs_unload */ 4878 4879 /* START spdk_bs_set_super */ 4880 4881 struct spdk_bs_set_super_ctx { 4882 struct spdk_blob_store *bs; 4883 struct spdk_bs_super_block *super; 4884 }; 4885 4886 static void 4887 _spdk_bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4888 { 4889 struct spdk_bs_set_super_ctx *ctx = cb_arg; 4890 4891 if (bserrno != 0) { 4892 SPDK_ERRLOG("Unable to write to super block of blobstore\n"); 4893 } 4894 4895 spdk_free(ctx->super); 4896 4897 spdk_bs_sequence_finish(seq, bserrno); 4898 4899 free(ctx); 4900 } 4901 4902 static void 4903 _spdk_bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4904 { 4905 struct spdk_bs_set_super_ctx *ctx = cb_arg; 4906 4907 if (bserrno != 0) { 4908 SPDK_ERRLOG("Unable to read super block of blobstore\n"); 4909 spdk_free(ctx->super); 4910 spdk_bs_sequence_finish(seq, bserrno); 4911 free(ctx); 4912 return; 4913 } 4914 4915 _spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_set_super_write_cpl, ctx); 4916 } 4917 4918 void 4919 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid, 4920 spdk_bs_op_complete cb_fn, void *cb_arg) 4921 { 4922 struct spdk_bs_cpl cpl; 4923 spdk_bs_sequence_t *seq; 4924 struct spdk_bs_set_super_ctx *ctx; 4925 4926 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Setting super blob id on blobstore\n"); 4927 4928 ctx = calloc(1, sizeof(*ctx)); 4929 if (!ctx) { 4930 cb_fn(cb_arg, -ENOMEM); 4931 return; 4932 } 4933 4934 ctx->bs = bs; 4935 4936 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 4937 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4938 if (!ctx->super) { 4939 free(ctx); 4940 cb_fn(cb_arg, -ENOMEM); 4941 return; 4942 } 4943 4944 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 4945 cpl.u.bs_basic.cb_fn = cb_fn; 4946 cpl.u.bs_basic.cb_arg = cb_arg; 4947 4948 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 4949 if (!seq) { 4950 spdk_free(ctx->super); 4951 free(ctx); 4952 cb_fn(cb_arg, -ENOMEM); 4953 return; 4954 } 4955 4956 bs->super_blob = blobid; 4957 4958 /* Read super block */ 4959 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 4960 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 4961 _spdk_bs_set_super_read_cpl, ctx); 4962 } 4963 4964 /* END spdk_bs_set_super */ 4965 4966 void 4967 spdk_bs_get_super(struct spdk_blob_store *bs, 4968 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 4969 { 4970 if (bs->super_blob == SPDK_BLOBID_INVALID) { 4971 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT); 4972 } else { 4973 cb_fn(cb_arg, bs->super_blob, 0); 4974 } 4975 } 4976 4977 uint64_t 4978 spdk_bs_get_cluster_size(struct spdk_blob_store *bs) 4979 { 4980 return bs->cluster_sz; 4981 } 4982 4983 uint64_t 4984 spdk_bs_get_page_size(struct spdk_blob_store *bs) 4985 { 4986 return SPDK_BS_PAGE_SIZE; 4987 } 4988 4989 uint64_t 4990 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs) 4991 { 4992 return bs->io_unit_size; 4993 } 4994 4995 uint64_t 4996 spdk_bs_free_cluster_count(struct spdk_blob_store *bs) 4997 { 4998 return bs->num_free_clusters; 4999 } 5000 5001 uint64_t 5002 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs) 5003 { 5004 return bs->total_data_clusters; 5005 } 5006 5007 static int 5008 spdk_bs_register_md_thread(struct spdk_blob_store *bs) 5009 { 5010 bs->md_channel = spdk_get_io_channel(bs); 5011 if (!bs->md_channel) { 5012 SPDK_ERRLOG("Failed to get IO channel.\n"); 5013 return -1; 5014 } 5015 5016 return 0; 5017 } 5018 5019 static int 5020 spdk_bs_unregister_md_thread(struct spdk_blob_store *bs) 5021 { 5022 spdk_put_io_channel(bs->md_channel); 5023 5024 return 0; 5025 } 5026 5027 spdk_blob_id spdk_blob_get_id(struct spdk_blob *blob) 5028 { 5029 assert(blob != NULL); 5030 5031 return blob->id; 5032 } 5033 5034 uint64_t spdk_blob_get_num_pages(struct spdk_blob *blob) 5035 { 5036 assert(blob != NULL); 5037 5038 return _spdk_bs_cluster_to_page(blob->bs, blob->active.num_clusters); 5039 } 5040 5041 uint64_t spdk_blob_get_num_io_units(struct spdk_blob *blob) 5042 { 5043 assert(blob != NULL); 5044 5045 return spdk_blob_get_num_pages(blob) * _spdk_bs_io_unit_per_page(blob->bs); 5046 } 5047 5048 uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob) 5049 { 5050 assert(blob != NULL); 5051 5052 return blob->active.num_clusters; 5053 } 5054 5055 /* START spdk_bs_create_blob */ 5056 5057 static void 5058 _spdk_bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5059 { 5060 struct spdk_blob *blob = cb_arg; 5061 5062 _spdk_blob_free(blob); 5063 5064 spdk_bs_sequence_finish(seq, bserrno); 5065 } 5066 5067 static int 5068 _spdk_blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs, 5069 bool internal) 5070 { 5071 uint64_t i; 5072 size_t value_len = 0; 5073 int rc; 5074 const void *value = NULL; 5075 if (xattrs->count > 0 && xattrs->get_value == NULL) { 5076 return -EINVAL; 5077 } 5078 for (i = 0; i < xattrs->count; i++) { 5079 xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len); 5080 if (value == NULL || value_len == 0) { 5081 return -EINVAL; 5082 } 5083 rc = _spdk_blob_set_xattr(blob, xattrs->names[i], value, value_len, internal); 5084 if (rc < 0) { 5085 return rc; 5086 } 5087 } 5088 return 0; 5089 } 5090 5091 static void 5092 _spdk_bs_create_blob(struct spdk_blob_store *bs, 5093 const struct spdk_blob_opts *opts, 5094 const struct spdk_blob_xattr_opts *internal_xattrs, 5095 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5096 { 5097 struct spdk_blob *blob; 5098 uint32_t page_idx; 5099 struct spdk_bs_cpl cpl; 5100 struct spdk_blob_opts opts_default; 5101 struct spdk_blob_xattr_opts internal_xattrs_default; 5102 spdk_bs_sequence_t *seq; 5103 spdk_blob_id id; 5104 int rc; 5105 5106 assert(spdk_get_thread() == bs->md_thread); 5107 5108 page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 5109 if (page_idx == UINT32_MAX) { 5110 cb_fn(cb_arg, 0, -ENOMEM); 5111 return; 5112 } 5113 spdk_bit_array_set(bs->used_blobids, page_idx); 5114 _spdk_bs_claim_md_page(bs, page_idx); 5115 5116 id = _spdk_bs_page_to_blobid(page_idx); 5117 5118 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Creating blob with id %lu at page %u\n", id, page_idx); 5119 5120 blob = _spdk_blob_alloc(bs, id); 5121 if (!blob) { 5122 cb_fn(cb_arg, 0, -ENOMEM); 5123 return; 5124 } 5125 5126 if (!opts) { 5127 spdk_blob_opts_init(&opts_default); 5128 opts = &opts_default; 5129 } 5130 5131 blob->use_extent_table = opts->use_extent_table; 5132 if (blob->use_extent_table) { 5133 blob->invalid_flags |= SPDK_BLOB_EXTENT_TABLE; 5134 } 5135 5136 if (!internal_xattrs) { 5137 _spdk_blob_xattrs_init(&internal_xattrs_default); 5138 internal_xattrs = &internal_xattrs_default; 5139 } 5140 5141 rc = _spdk_blob_set_xattrs(blob, &opts->xattrs, false); 5142 if (rc < 0) { 5143 _spdk_blob_free(blob); 5144 cb_fn(cb_arg, 0, rc); 5145 return; 5146 } 5147 5148 rc = _spdk_blob_set_xattrs(blob, internal_xattrs, true); 5149 if (rc < 0) { 5150 _spdk_blob_free(blob); 5151 cb_fn(cb_arg, 0, rc); 5152 return; 5153 } 5154 5155 if (opts->thin_provision) { 5156 _spdk_blob_set_thin_provision(blob); 5157 } 5158 5159 _spdk_blob_set_clear_method(blob, opts->clear_method); 5160 5161 rc = _spdk_blob_resize(blob, opts->num_clusters); 5162 if (rc < 0) { 5163 _spdk_blob_free(blob); 5164 cb_fn(cb_arg, 0, rc); 5165 return; 5166 } 5167 cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 5168 cpl.u.blobid.cb_fn = cb_fn; 5169 cpl.u.blobid.cb_arg = cb_arg; 5170 cpl.u.blobid.blobid = blob->id; 5171 5172 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 5173 if (!seq) { 5174 _spdk_blob_free(blob); 5175 cb_fn(cb_arg, 0, -ENOMEM); 5176 return; 5177 } 5178 5179 _spdk_blob_persist(seq, blob, _spdk_bs_create_blob_cpl, blob); 5180 } 5181 5182 void spdk_bs_create_blob(struct spdk_blob_store *bs, 5183 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5184 { 5185 _spdk_bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg); 5186 } 5187 5188 void spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts, 5189 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5190 { 5191 _spdk_bs_create_blob(bs, opts, NULL, cb_fn, cb_arg); 5192 } 5193 5194 /* END spdk_bs_create_blob */ 5195 5196 /* START blob_cleanup */ 5197 5198 struct spdk_clone_snapshot_ctx { 5199 struct spdk_bs_cpl cpl; 5200 int bserrno; 5201 bool frozen; 5202 5203 struct spdk_io_channel *channel; 5204 5205 /* Current cluster for inflate operation */ 5206 uint64_t cluster; 5207 5208 /* For inflation force allocation of all unallocated clusters and remove 5209 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */ 5210 bool allocate_all; 5211 5212 struct { 5213 spdk_blob_id id; 5214 struct spdk_blob *blob; 5215 } original; 5216 struct { 5217 spdk_blob_id id; 5218 struct spdk_blob *blob; 5219 } new; 5220 5221 /* xattrs specified for snapshot/clones only. They have no impact on 5222 * the original blobs xattrs. */ 5223 const struct spdk_blob_xattr_opts *xattrs; 5224 }; 5225 5226 static void 5227 _spdk_bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno) 5228 { 5229 struct spdk_clone_snapshot_ctx *ctx = cb_arg; 5230 struct spdk_bs_cpl *cpl = &ctx->cpl; 5231 5232 if (bserrno != 0) { 5233 if (ctx->bserrno != 0) { 5234 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 5235 } else { 5236 ctx->bserrno = bserrno; 5237 } 5238 } 5239 5240 switch (cpl->type) { 5241 case SPDK_BS_CPL_TYPE_BLOBID: 5242 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno); 5243 break; 5244 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 5245 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 5246 break; 5247 default: 5248 SPDK_UNREACHABLE(); 5249 break; 5250 } 5251 5252 free(ctx); 5253 } 5254 5255 static void 5256 _spdk_bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 5257 { 5258 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5259 struct spdk_blob *origblob = ctx->original.blob; 5260 5261 if (bserrno != 0) { 5262 if (ctx->bserrno != 0) { 5263 SPDK_ERRLOG("Unfreeze error %d\n", bserrno); 5264 } else { 5265 ctx->bserrno = bserrno; 5266 } 5267 } 5268 5269 ctx->original.id = origblob->id; 5270 origblob->locked_operation_in_progress = false; 5271 5272 spdk_blob_close(origblob, _spdk_bs_clone_snapshot_cleanup_finish, ctx); 5273 } 5274 5275 static void 5276 _spdk_bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno) 5277 { 5278 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5279 struct spdk_blob *origblob = ctx->original.blob; 5280 5281 if (bserrno != 0) { 5282 if (ctx->bserrno != 0) { 5283 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 5284 } else { 5285 ctx->bserrno = bserrno; 5286 } 5287 } 5288 5289 if (ctx->frozen) { 5290 /* Unfreeze any outstanding I/O */ 5291 _spdk_blob_unfreeze_io(origblob, _spdk_bs_snapshot_unfreeze_cpl, ctx); 5292 } else { 5293 _spdk_bs_snapshot_unfreeze_cpl(ctx, 0); 5294 } 5295 5296 } 5297 5298 static void 5299 _spdk_bs_clone_snapshot_newblob_cleanup(void *cb_arg, int bserrno) 5300 { 5301 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5302 struct spdk_blob *newblob = ctx->new.blob; 5303 5304 if (bserrno != 0) { 5305 if (ctx->bserrno != 0) { 5306 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 5307 } else { 5308 ctx->bserrno = bserrno; 5309 } 5310 } 5311 5312 ctx->new.id = newblob->id; 5313 spdk_blob_close(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 5314 } 5315 5316 /* END blob_cleanup */ 5317 5318 /* START spdk_bs_create_snapshot */ 5319 5320 static void 5321 _spdk_bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2) 5322 { 5323 uint64_t *cluster_temp; 5324 uint32_t *extent_page_temp; 5325 5326 cluster_temp = blob1->active.clusters; 5327 blob1->active.clusters = blob2->active.clusters; 5328 blob2->active.clusters = cluster_temp; 5329 5330 extent_page_temp = blob1->active.extent_pages; 5331 blob1->active.extent_pages = blob2->active.extent_pages; 5332 blob2->active.extent_pages = extent_page_temp; 5333 } 5334 5335 static void 5336 _spdk_bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno) 5337 { 5338 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5339 struct spdk_blob *origblob = ctx->original.blob; 5340 struct spdk_blob *newblob = ctx->new.blob; 5341 5342 if (bserrno != 0) { 5343 _spdk_bs_snapshot_swap_cluster_maps(newblob, origblob); 5344 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 5345 return; 5346 } 5347 5348 /* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */ 5349 bserrno = _spdk_blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true); 5350 if (bserrno != 0) { 5351 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 5352 return; 5353 } 5354 5355 _spdk_bs_blob_list_add(ctx->original.blob); 5356 5357 spdk_blob_set_read_only(newblob); 5358 5359 /* sync snapshot metadata */ 5360 spdk_blob_sync_md(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 5361 } 5362 5363 static void 5364 _spdk_bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno) 5365 { 5366 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5367 struct spdk_blob *origblob = ctx->original.blob; 5368 struct spdk_blob *newblob = ctx->new.blob; 5369 5370 if (bserrno != 0) { 5371 /* return cluster map back to original */ 5372 _spdk_bs_snapshot_swap_cluster_maps(newblob, origblob); 5373 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 5374 return; 5375 } 5376 5377 /* Set internal xattr for snapshot id */ 5378 bserrno = _spdk_blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true); 5379 if (bserrno != 0) { 5380 /* return cluster map back to original */ 5381 _spdk_bs_snapshot_swap_cluster_maps(newblob, origblob); 5382 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 5383 return; 5384 } 5385 5386 _spdk_bs_blob_list_remove(origblob); 5387 origblob->parent_id = newblob->id; 5388 5389 /* Create new back_bs_dev for snapshot */ 5390 origblob->back_bs_dev = spdk_bs_create_blob_bs_dev(newblob); 5391 if (origblob->back_bs_dev == NULL) { 5392 /* return cluster map back to original */ 5393 _spdk_bs_snapshot_swap_cluster_maps(newblob, origblob); 5394 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL); 5395 return; 5396 } 5397 5398 /* set clone blob as thin provisioned */ 5399 _spdk_blob_set_thin_provision(origblob); 5400 5401 _spdk_bs_blob_list_add(newblob); 5402 5403 /* sync clone metadata */ 5404 spdk_blob_sync_md(origblob, _spdk_bs_snapshot_origblob_sync_cpl, ctx); 5405 } 5406 5407 static void 5408 _spdk_bs_snapshot_freeze_cpl(void *cb_arg, int rc) 5409 { 5410 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5411 struct spdk_blob *origblob = ctx->original.blob; 5412 struct spdk_blob *newblob = ctx->new.blob; 5413 int bserrno; 5414 5415 if (rc != 0) { 5416 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, rc); 5417 return; 5418 } 5419 5420 ctx->frozen = true; 5421 5422 /* set new back_bs_dev for snapshot */ 5423 newblob->back_bs_dev = origblob->back_bs_dev; 5424 /* Set invalid flags from origblob */ 5425 newblob->invalid_flags = origblob->invalid_flags; 5426 5427 /* inherit parent from original blob if set */ 5428 newblob->parent_id = origblob->parent_id; 5429 if (origblob->parent_id != SPDK_BLOBID_INVALID) { 5430 /* Set internal xattr for snapshot id */ 5431 bserrno = _spdk_blob_set_xattr(newblob, BLOB_SNAPSHOT, 5432 &origblob->parent_id, sizeof(spdk_blob_id), true); 5433 if (bserrno != 0) { 5434 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 5435 return; 5436 } 5437 } 5438 5439 /* swap cluster maps */ 5440 _spdk_bs_snapshot_swap_cluster_maps(newblob, origblob); 5441 5442 /* Set the clear method on the new blob to match the original. */ 5443 _spdk_blob_set_clear_method(newblob, origblob->clear_method); 5444 5445 /* sync snapshot metadata */ 5446 spdk_blob_sync_md(newblob, _spdk_bs_snapshot_newblob_sync_cpl, ctx); 5447 } 5448 5449 static void 5450 _spdk_bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 5451 { 5452 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5453 struct spdk_blob *origblob = ctx->original.blob; 5454 struct spdk_blob *newblob = _blob; 5455 5456 if (bserrno != 0) { 5457 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 5458 return; 5459 } 5460 5461 ctx->new.blob = newblob; 5462 assert(spdk_blob_is_thin_provisioned(newblob)); 5463 assert(spdk_mem_all_zero(newblob->active.clusters, 5464 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 5465 assert(spdk_mem_all_zero(newblob->active.extent_pages, 5466 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 5467 5468 _spdk_blob_freeze_io(origblob, _spdk_bs_snapshot_freeze_cpl, ctx); 5469 } 5470 5471 static void 5472 _spdk_bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 5473 { 5474 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5475 struct spdk_blob *origblob = ctx->original.blob; 5476 5477 if (bserrno != 0) { 5478 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 5479 return; 5480 } 5481 5482 ctx->new.id = blobid; 5483 ctx->cpl.u.blobid.blobid = blobid; 5484 5485 spdk_bs_open_blob(origblob->bs, ctx->new.id, _spdk_bs_snapshot_newblob_open_cpl, ctx); 5486 } 5487 5488 5489 static void 5490 _spdk_bs_xattr_snapshot(void *arg, const char *name, 5491 const void **value, size_t *value_len) 5492 { 5493 assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0); 5494 5495 struct spdk_blob *blob = (struct spdk_blob *)arg; 5496 *value = &blob->id; 5497 *value_len = sizeof(blob->id); 5498 } 5499 5500 static void 5501 _spdk_bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 5502 { 5503 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5504 struct spdk_blob_opts opts; 5505 struct spdk_blob_xattr_opts internal_xattrs; 5506 char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS }; 5507 5508 if (bserrno != 0) { 5509 _spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno); 5510 return; 5511 } 5512 5513 ctx->original.blob = _blob; 5514 5515 if (_blob->data_ro || _blob->md_ro) { 5516 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create snapshot from read only blob with id %lu\n", 5517 _blob->id); 5518 ctx->bserrno = -EINVAL; 5519 spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx); 5520 return; 5521 } 5522 5523 if (_blob->locked_operation_in_progress) { 5524 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create snapshot - another operation in progress\n"); 5525 ctx->bserrno = -EBUSY; 5526 spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx); 5527 return; 5528 } 5529 5530 _blob->locked_operation_in_progress = true; 5531 5532 spdk_blob_opts_init(&opts); 5533 _spdk_blob_xattrs_init(&internal_xattrs); 5534 5535 /* Change the size of new blob to the same as in original blob, 5536 * but do not allocate clusters */ 5537 opts.thin_provision = true; 5538 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 5539 opts.use_extent_table = _blob->use_extent_table; 5540 5541 /* If there are any xattrs specified for snapshot, set them now */ 5542 if (ctx->xattrs) { 5543 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 5544 } 5545 /* Set internal xattr SNAPSHOT_IN_PROGRESS */ 5546 internal_xattrs.count = 1; 5547 internal_xattrs.ctx = _blob; 5548 internal_xattrs.names = xattrs_names; 5549 internal_xattrs.get_value = _spdk_bs_xattr_snapshot; 5550 5551 _spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs, 5552 _spdk_bs_snapshot_newblob_create_cpl, ctx); 5553 } 5554 5555 void spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid, 5556 const struct spdk_blob_xattr_opts *snapshot_xattrs, 5557 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5558 { 5559 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 5560 5561 if (!ctx) { 5562 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 5563 return; 5564 } 5565 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 5566 ctx->cpl.u.blobid.cb_fn = cb_fn; 5567 ctx->cpl.u.blobid.cb_arg = cb_arg; 5568 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 5569 ctx->bserrno = 0; 5570 ctx->frozen = false; 5571 ctx->original.id = blobid; 5572 ctx->xattrs = snapshot_xattrs; 5573 5574 spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_snapshot_origblob_open_cpl, ctx); 5575 } 5576 /* END spdk_bs_create_snapshot */ 5577 5578 /* START spdk_bs_create_clone */ 5579 5580 static void 5581 _spdk_bs_xattr_clone(void *arg, const char *name, 5582 const void **value, size_t *value_len) 5583 { 5584 assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0); 5585 5586 struct spdk_blob *blob = (struct spdk_blob *)arg; 5587 *value = &blob->id; 5588 *value_len = sizeof(blob->id); 5589 } 5590 5591 static void 5592 _spdk_bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 5593 { 5594 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5595 struct spdk_blob *clone = _blob; 5596 5597 ctx->new.blob = clone; 5598 _spdk_bs_blob_list_add(clone); 5599 5600 spdk_blob_close(clone, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 5601 } 5602 5603 static void 5604 _spdk_bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 5605 { 5606 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5607 5608 ctx->cpl.u.blobid.blobid = blobid; 5609 spdk_bs_open_blob(ctx->original.blob->bs, blobid, _spdk_bs_clone_newblob_open_cpl, ctx); 5610 } 5611 5612 static void 5613 _spdk_bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 5614 { 5615 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5616 struct spdk_blob_opts opts; 5617 struct spdk_blob_xattr_opts internal_xattrs; 5618 char *xattr_names[] = { BLOB_SNAPSHOT }; 5619 5620 if (bserrno != 0) { 5621 _spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno); 5622 return; 5623 } 5624 5625 ctx->original.blob = _blob; 5626 5627 if (!_blob->data_ro || !_blob->md_ro) { 5628 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Clone not from read-only blob\n"); 5629 ctx->bserrno = -EINVAL; 5630 spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx); 5631 return; 5632 } 5633 5634 if (_blob->locked_operation_in_progress) { 5635 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create clone - another operation in progress\n"); 5636 ctx->bserrno = -EBUSY; 5637 spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx); 5638 return; 5639 } 5640 5641 _blob->locked_operation_in_progress = true; 5642 5643 spdk_blob_opts_init(&opts); 5644 _spdk_blob_xattrs_init(&internal_xattrs); 5645 5646 opts.thin_provision = true; 5647 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 5648 opts.use_extent_table = _blob->use_extent_table; 5649 if (ctx->xattrs) { 5650 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 5651 } 5652 5653 /* Set internal xattr BLOB_SNAPSHOT */ 5654 internal_xattrs.count = 1; 5655 internal_xattrs.ctx = _blob; 5656 internal_xattrs.names = xattr_names; 5657 internal_xattrs.get_value = _spdk_bs_xattr_clone; 5658 5659 _spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs, 5660 _spdk_bs_clone_newblob_create_cpl, ctx); 5661 } 5662 5663 void spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid, 5664 const struct spdk_blob_xattr_opts *clone_xattrs, 5665 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5666 { 5667 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 5668 5669 if (!ctx) { 5670 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 5671 return; 5672 } 5673 5674 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 5675 ctx->cpl.u.blobid.cb_fn = cb_fn; 5676 ctx->cpl.u.blobid.cb_arg = cb_arg; 5677 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 5678 ctx->bserrno = 0; 5679 ctx->xattrs = clone_xattrs; 5680 ctx->original.id = blobid; 5681 5682 spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_clone_origblob_open_cpl, ctx); 5683 } 5684 5685 /* END spdk_bs_create_clone */ 5686 5687 /* START spdk_bs_inflate_blob */ 5688 5689 static void 5690 _spdk_bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno) 5691 { 5692 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5693 struct spdk_blob *_blob = ctx->original.blob; 5694 5695 if (bserrno != 0) { 5696 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 5697 return; 5698 } 5699 5700 assert(_parent != NULL); 5701 5702 _spdk_bs_blob_list_remove(_blob); 5703 _blob->parent_id = _parent->id; 5704 _spdk_blob_set_xattr(_blob, BLOB_SNAPSHOT, &_blob->parent_id, 5705 sizeof(spdk_blob_id), true); 5706 5707 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 5708 _blob->back_bs_dev = spdk_bs_create_blob_bs_dev(_parent); 5709 _spdk_bs_blob_list_add(_blob); 5710 5711 spdk_blob_sync_md(_blob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 5712 } 5713 5714 static void 5715 _spdk_bs_inflate_blob_done(void *cb_arg, int bserrno) 5716 { 5717 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5718 struct spdk_blob *_blob = ctx->original.blob; 5719 struct spdk_blob *_parent; 5720 5721 if (bserrno != 0) { 5722 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 5723 return; 5724 } 5725 5726 if (ctx->allocate_all) { 5727 /* remove thin provisioning */ 5728 _spdk_bs_blob_list_remove(_blob); 5729 _spdk_blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 5730 _blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV; 5731 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 5732 _blob->back_bs_dev = NULL; 5733 _blob->parent_id = SPDK_BLOBID_INVALID; 5734 } else { 5735 _parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob; 5736 if (_parent->parent_id != SPDK_BLOBID_INVALID) { 5737 /* We must change the parent of the inflated blob */ 5738 spdk_bs_open_blob(_blob->bs, _parent->parent_id, 5739 _spdk_bs_inflate_blob_set_parent_cpl, ctx); 5740 return; 5741 } 5742 5743 _spdk_bs_blob_list_remove(_blob); 5744 _spdk_blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 5745 _blob->parent_id = SPDK_BLOBID_INVALID; 5746 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 5747 _blob->back_bs_dev = spdk_bs_create_zeroes_dev(); 5748 } 5749 5750 _blob->state = SPDK_BLOB_STATE_DIRTY; 5751 spdk_blob_sync_md(_blob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 5752 } 5753 5754 /* Check if cluster needs allocation */ 5755 static inline bool 5756 _spdk_bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all) 5757 { 5758 struct spdk_blob_bs_dev *b; 5759 5760 assert(blob != NULL); 5761 5762 if (blob->active.clusters[cluster] != 0) { 5763 /* Cluster is already allocated */ 5764 return false; 5765 } 5766 5767 if (blob->parent_id == SPDK_BLOBID_INVALID) { 5768 /* Blob have no parent blob */ 5769 return allocate_all; 5770 } 5771 5772 b = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 5773 return (allocate_all || b->blob->active.clusters[cluster] != 0); 5774 } 5775 5776 static void 5777 _spdk_bs_inflate_blob_touch_next(void *cb_arg, int bserrno) 5778 { 5779 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5780 struct spdk_blob *_blob = ctx->original.blob; 5781 uint64_t offset; 5782 5783 if (bserrno != 0) { 5784 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 5785 return; 5786 } 5787 5788 for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) { 5789 if (_spdk_bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) { 5790 break; 5791 } 5792 } 5793 5794 if (ctx->cluster < _blob->active.num_clusters) { 5795 offset = _spdk_bs_cluster_to_lba(_blob->bs, ctx->cluster); 5796 5797 /* We may safely increment a cluster before write */ 5798 ctx->cluster++; 5799 5800 /* Use zero length write to touch a cluster */ 5801 spdk_blob_io_write(_blob, ctx->channel, NULL, offset, 0, 5802 _spdk_bs_inflate_blob_touch_next, ctx); 5803 } else { 5804 _spdk_bs_inflate_blob_done(cb_arg, bserrno); 5805 } 5806 } 5807 5808 static void 5809 _spdk_bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 5810 { 5811 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5812 uint64_t lfc; /* lowest free cluster */ 5813 uint64_t i; 5814 5815 if (bserrno != 0) { 5816 _spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno); 5817 return; 5818 } 5819 5820 ctx->original.blob = _blob; 5821 5822 if (_blob->locked_operation_in_progress) { 5823 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot inflate blob - another operation in progress\n"); 5824 ctx->bserrno = -EBUSY; 5825 spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx); 5826 return; 5827 } 5828 5829 _blob->locked_operation_in_progress = true; 5830 5831 if (!ctx->allocate_all && _blob->parent_id == SPDK_BLOBID_INVALID) { 5832 /* This blob have no parent, so we cannot decouple it. */ 5833 SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n"); 5834 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 5835 return; 5836 } 5837 5838 if (spdk_blob_is_thin_provisioned(_blob) == false) { 5839 /* This is not thin provisioned blob. No need to inflate. */ 5840 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, 0); 5841 return; 5842 } 5843 5844 /* Do two passes - one to verify that we can obtain enough clusters 5845 * and another to actually claim them. 5846 */ 5847 lfc = 0; 5848 for (i = 0; i < _blob->active.num_clusters; i++) { 5849 if (_spdk_bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) { 5850 lfc = spdk_bit_array_find_first_clear(_blob->bs->used_clusters, lfc); 5851 if (lfc == UINT32_MAX) { 5852 /* No more free clusters. Cannot satisfy the request */ 5853 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC); 5854 return; 5855 } 5856 lfc++; 5857 } 5858 } 5859 5860 ctx->cluster = 0; 5861 _spdk_bs_inflate_blob_touch_next(ctx, 0); 5862 } 5863 5864 static void 5865 _spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 5866 spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg) 5867 { 5868 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 5869 5870 if (!ctx) { 5871 cb_fn(cb_arg, -ENOMEM); 5872 return; 5873 } 5874 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 5875 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 5876 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 5877 ctx->bserrno = 0; 5878 ctx->original.id = blobid; 5879 ctx->channel = channel; 5880 ctx->allocate_all = allocate_all; 5881 5882 spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_inflate_blob_open_cpl, ctx); 5883 } 5884 5885 void 5886 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 5887 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 5888 { 5889 _spdk_bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg); 5890 } 5891 5892 void 5893 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 5894 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 5895 { 5896 _spdk_bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg); 5897 } 5898 /* END spdk_bs_inflate_blob */ 5899 5900 /* START spdk_blob_resize */ 5901 struct spdk_bs_resize_ctx { 5902 spdk_blob_op_complete cb_fn; 5903 void *cb_arg; 5904 struct spdk_blob *blob; 5905 uint64_t sz; 5906 int rc; 5907 }; 5908 5909 static void 5910 _spdk_bs_resize_unfreeze_cpl(void *cb_arg, int rc) 5911 { 5912 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 5913 5914 if (rc != 0) { 5915 SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc); 5916 } 5917 5918 if (ctx->rc != 0) { 5919 SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc); 5920 rc = ctx->rc; 5921 } 5922 5923 ctx->blob->locked_operation_in_progress = false; 5924 5925 ctx->cb_fn(ctx->cb_arg, rc); 5926 free(ctx); 5927 } 5928 5929 static void 5930 _spdk_bs_resize_freeze_cpl(void *cb_arg, int rc) 5931 { 5932 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 5933 5934 if (rc != 0) { 5935 ctx->blob->locked_operation_in_progress = false; 5936 ctx->cb_fn(ctx->cb_arg, rc); 5937 free(ctx); 5938 return; 5939 } 5940 5941 ctx->rc = _spdk_blob_resize(ctx->blob, ctx->sz); 5942 5943 _spdk_blob_unfreeze_io(ctx->blob, _spdk_bs_resize_unfreeze_cpl, ctx); 5944 } 5945 5946 void 5947 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg) 5948 { 5949 struct spdk_bs_resize_ctx *ctx; 5950 5951 _spdk_blob_verify_md_op(blob); 5952 5953 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Resizing blob %lu to %lu clusters\n", blob->id, sz); 5954 5955 if (blob->md_ro) { 5956 cb_fn(cb_arg, -EPERM); 5957 return; 5958 } 5959 5960 if (sz == blob->active.num_clusters) { 5961 cb_fn(cb_arg, 0); 5962 return; 5963 } 5964 5965 if (blob->locked_operation_in_progress) { 5966 cb_fn(cb_arg, -EBUSY); 5967 return; 5968 } 5969 5970 ctx = calloc(1, sizeof(*ctx)); 5971 if (!ctx) { 5972 cb_fn(cb_arg, -ENOMEM); 5973 return; 5974 } 5975 5976 blob->locked_operation_in_progress = true; 5977 ctx->cb_fn = cb_fn; 5978 ctx->cb_arg = cb_arg; 5979 ctx->blob = blob; 5980 ctx->sz = sz; 5981 _spdk_blob_freeze_io(blob, _spdk_bs_resize_freeze_cpl, ctx); 5982 } 5983 5984 /* END spdk_blob_resize */ 5985 5986 5987 /* START spdk_bs_delete_blob */ 5988 5989 static void 5990 _spdk_bs_delete_close_cpl(void *cb_arg, int bserrno) 5991 { 5992 spdk_bs_sequence_t *seq = cb_arg; 5993 5994 spdk_bs_sequence_finish(seq, bserrno); 5995 } 5996 5997 static void 5998 _spdk_bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5999 { 6000 struct spdk_blob *blob = cb_arg; 6001 6002 if (bserrno != 0) { 6003 /* 6004 * We already removed this blob from the blobstore tailq, so 6005 * we need to free it here since this is the last reference 6006 * to it. 6007 */ 6008 _spdk_blob_free(blob); 6009 _spdk_bs_delete_close_cpl(seq, bserrno); 6010 return; 6011 } 6012 6013 /* 6014 * This will immediately decrement the ref_count and call 6015 * the completion routine since the metadata state is clean. 6016 * By calling spdk_blob_close, we reduce the number of call 6017 * points into code that touches the blob->open_ref count 6018 * and the blobstore's blob list. 6019 */ 6020 spdk_blob_close(blob, _spdk_bs_delete_close_cpl, seq); 6021 } 6022 6023 struct delete_snapshot_ctx { 6024 struct spdk_blob_list *parent_snapshot_entry; 6025 struct spdk_blob *snapshot; 6026 bool snapshot_md_ro; 6027 struct spdk_blob *clone; 6028 bool clone_md_ro; 6029 spdk_blob_op_with_handle_complete cb_fn; 6030 void *cb_arg; 6031 int bserrno; 6032 }; 6033 6034 static void 6035 _spdk_delete_blob_cleanup_finish(void *cb_arg, int bserrno) 6036 { 6037 struct delete_snapshot_ctx *ctx = cb_arg; 6038 6039 if (bserrno != 0) { 6040 SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno); 6041 } 6042 6043 assert(ctx != NULL); 6044 6045 if (bserrno != 0 && ctx->bserrno == 0) { 6046 ctx->bserrno = bserrno; 6047 } 6048 6049 ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno); 6050 free(ctx); 6051 } 6052 6053 static void 6054 _spdk_delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno) 6055 { 6056 struct delete_snapshot_ctx *ctx = cb_arg; 6057 6058 if (bserrno != 0) { 6059 ctx->bserrno = bserrno; 6060 SPDK_ERRLOG("Clone cleanup error %d\n", bserrno); 6061 } 6062 6063 /* open_ref == 1 menas that only deletion context has opened this snapshot 6064 * open_ref == 2 menas that clone has opened this snapshot as well, 6065 * so we have to add it back to the blobs list */ 6066 if (ctx->snapshot->open_ref == 2) { 6067 TAILQ_INSERT_HEAD(&ctx->snapshot->bs->blobs, ctx->snapshot, link); 6068 } 6069 6070 ctx->snapshot->locked_operation_in_progress = false; 6071 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 6072 6073 spdk_blob_close(ctx->snapshot, _spdk_delete_blob_cleanup_finish, ctx); 6074 } 6075 6076 static void 6077 _spdk_delete_snapshot_cleanup_clone(void *cb_arg, int bserrno) 6078 { 6079 struct delete_snapshot_ctx *ctx = cb_arg; 6080 6081 ctx->clone->locked_operation_in_progress = false; 6082 ctx->clone->md_ro = ctx->clone_md_ro; 6083 6084 spdk_blob_close(ctx->clone, _spdk_delete_snapshot_cleanup_snapshot, ctx); 6085 } 6086 6087 static void 6088 _spdk_delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 6089 { 6090 struct delete_snapshot_ctx *ctx = cb_arg; 6091 6092 if (bserrno) { 6093 ctx->bserrno = bserrno; 6094 _spdk_delete_snapshot_cleanup_clone(ctx, 0); 6095 return; 6096 } 6097 6098 ctx->clone->locked_operation_in_progress = false; 6099 spdk_blob_close(ctx->clone, _spdk_delete_blob_cleanup_finish, ctx); 6100 } 6101 6102 static void 6103 _spdk_delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno) 6104 { 6105 struct delete_snapshot_ctx *ctx = cb_arg; 6106 struct spdk_blob_list *parent_snapshot_entry = NULL; 6107 struct spdk_blob_list *snapshot_entry = NULL; 6108 struct spdk_blob_list *clone_entry = NULL; 6109 struct spdk_blob_list *snapshot_clone_entry = NULL; 6110 6111 if (bserrno) { 6112 SPDK_ERRLOG("Failed to sync MD on blob\n"); 6113 ctx->bserrno = bserrno; 6114 _spdk_delete_snapshot_cleanup_clone(ctx, 0); 6115 return; 6116 } 6117 6118 /* Get snapshot entry for the snapshot we want to remove */ 6119 snapshot_entry = _spdk_bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id); 6120 6121 assert(snapshot_entry != NULL); 6122 6123 /* Remove clone entry in this snapshot (at this point there can be only one clone) */ 6124 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 6125 assert(clone_entry != NULL); 6126 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 6127 snapshot_entry->clone_count--; 6128 assert(TAILQ_EMPTY(&snapshot_entry->clones)); 6129 6130 if (ctx->snapshot->parent_id != SPDK_BLOBID_INVALID) { 6131 /* This snapshot is at the same time a clone of another snapshot - we need to 6132 * update parent snapshot (remove current clone, add new one inherited from 6133 * the snapshot that is being removed) */ 6134 6135 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 6136 * snapshot that we are removing */ 6137 _spdk_blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry, 6138 &snapshot_clone_entry); 6139 6140 /* Switch clone entry in parent snapshot */ 6141 TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link); 6142 TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link); 6143 free(snapshot_clone_entry); 6144 } else { 6145 /* No parent snapshot - just remove clone entry */ 6146 free(clone_entry); 6147 } 6148 6149 /* Restore md_ro flags */ 6150 ctx->clone->md_ro = ctx->clone_md_ro; 6151 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 6152 6153 _spdk_blob_unfreeze_io(ctx->clone, _spdk_delete_snapshot_unfreeze_cpl, ctx); 6154 } 6155 6156 static void 6157 _spdk_delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno) 6158 { 6159 struct delete_snapshot_ctx *ctx = cb_arg; 6160 uint64_t i; 6161 6162 ctx->snapshot->md_ro = false; 6163 6164 if (bserrno) { 6165 SPDK_ERRLOG("Failed to sync MD on clone\n"); 6166 ctx->bserrno = bserrno; 6167 6168 /* Restore snapshot to previous state */ 6169 bserrno = _spdk_blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 6170 if (bserrno != 0) { 6171 _spdk_delete_snapshot_cleanup_clone(ctx, bserrno); 6172 return; 6173 } 6174 6175 spdk_blob_sync_md(ctx->snapshot, _spdk_delete_snapshot_cleanup_clone, ctx); 6176 return; 6177 } 6178 6179 /* Clear cluster map entries for snapshot */ 6180 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 6181 if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) { 6182 ctx->snapshot->active.clusters[i] = 0; 6183 } 6184 } 6185 6186 ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY; 6187 6188 if (ctx->parent_snapshot_entry != NULL) { 6189 ctx->snapshot->back_bs_dev = NULL; 6190 } 6191 6192 spdk_blob_sync_md(ctx->snapshot, _spdk_delete_snapshot_sync_snapshot_cpl, ctx); 6193 } 6194 6195 static void 6196 _spdk_delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno) 6197 { 6198 struct delete_snapshot_ctx *ctx = cb_arg; 6199 uint64_t i; 6200 6201 /* Temporarily override md_ro flag for clone for MD modification */ 6202 ctx->clone_md_ro = ctx->clone->md_ro; 6203 ctx->clone->md_ro = false; 6204 6205 if (bserrno) { 6206 SPDK_ERRLOG("Failed to sync MD with xattr on blob\n"); 6207 ctx->bserrno = bserrno; 6208 _spdk_delete_snapshot_cleanup_clone(ctx, 0); 6209 return; 6210 } 6211 6212 /* Copy snapshot map to clone map (only unallocated clusters in clone) */ 6213 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 6214 if (ctx->clone->active.clusters[i] == 0) { 6215 ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i]; 6216 } 6217 } 6218 6219 /* Delete old backing bs_dev from clone (related to snapshot that will be removed) */ 6220 ctx->clone->back_bs_dev->destroy(ctx->clone->back_bs_dev); 6221 6222 /* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */ 6223 if (ctx->parent_snapshot_entry != NULL) { 6224 /* ...to parent snapshot */ 6225 ctx->clone->parent_id = ctx->parent_snapshot_entry->id; 6226 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 6227 _spdk_blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id, 6228 sizeof(spdk_blob_id), 6229 true); 6230 } else { 6231 /* ...to blobid invalid and zeroes dev */ 6232 ctx->clone->parent_id = SPDK_BLOBID_INVALID; 6233 ctx->clone->back_bs_dev = spdk_bs_create_zeroes_dev(); 6234 _spdk_blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true); 6235 } 6236 6237 spdk_blob_sync_md(ctx->clone, _spdk_delete_snapshot_sync_clone_cpl, ctx); 6238 } 6239 6240 static void 6241 _spdk_delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno) 6242 { 6243 struct delete_snapshot_ctx *ctx = cb_arg; 6244 6245 if (bserrno) { 6246 SPDK_ERRLOG("Failed to freeze I/O on clone\n"); 6247 ctx->bserrno = bserrno; 6248 _spdk_delete_snapshot_cleanup_clone(ctx, 0); 6249 return; 6250 } 6251 6252 /* Temporarily override md_ro flag for snapshot for MD modification */ 6253 ctx->snapshot_md_ro = ctx->snapshot->md_ro; 6254 ctx->snapshot->md_ro = false; 6255 6256 /* Mark blob as pending for removal for power failure safety, use clone id for recovery */ 6257 ctx->bserrno = _spdk_blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id, 6258 sizeof(spdk_blob_id), true); 6259 if (ctx->bserrno != 0) { 6260 _spdk_delete_snapshot_cleanup_clone(ctx, 0); 6261 return; 6262 } 6263 6264 spdk_blob_sync_md(ctx->snapshot, _spdk_delete_snapshot_sync_snapshot_xattr_cpl, ctx); 6265 } 6266 6267 static void 6268 _spdk_delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno) 6269 { 6270 struct delete_snapshot_ctx *ctx = cb_arg; 6271 6272 if (bserrno) { 6273 SPDK_ERRLOG("Failed to open clone\n"); 6274 ctx->bserrno = bserrno; 6275 _spdk_delete_snapshot_cleanup_snapshot(ctx, 0); 6276 return; 6277 } 6278 6279 ctx->clone = clone; 6280 6281 if (clone->locked_operation_in_progress) { 6282 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot remove blob - another operation in progress on its clone\n"); 6283 ctx->bserrno = -EBUSY; 6284 spdk_blob_close(ctx->clone, _spdk_delete_snapshot_cleanup_snapshot, ctx); 6285 return; 6286 } 6287 6288 clone->locked_operation_in_progress = true; 6289 6290 _spdk_blob_freeze_io(clone, _spdk_delete_snapshot_freeze_io_cb, ctx); 6291 } 6292 6293 static void 6294 _spdk_update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx) 6295 { 6296 struct spdk_blob_list *snapshot_entry = NULL; 6297 struct spdk_blob_list *clone_entry = NULL; 6298 struct spdk_blob_list *snapshot_clone_entry = NULL; 6299 6300 /* Get snapshot entry for the snapshot we want to remove */ 6301 snapshot_entry = _spdk_bs_get_snapshot_entry(snapshot->bs, snapshot->id); 6302 6303 assert(snapshot_entry != NULL); 6304 6305 /* Get clone of the snapshot (at this point there can be only one clone) */ 6306 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 6307 assert(snapshot_entry->clone_count == 1); 6308 assert(clone_entry != NULL); 6309 6310 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 6311 * snapshot that we are removing */ 6312 _spdk_blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry, 6313 &snapshot_clone_entry); 6314 6315 spdk_bs_open_blob(snapshot->bs, clone_entry->id, _spdk_delete_snapshot_open_clone_cb, ctx); 6316 } 6317 6318 static void 6319 _spdk_bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno) 6320 { 6321 spdk_bs_sequence_t *seq = cb_arg; 6322 struct spdk_blob_list *snapshot_entry = NULL; 6323 uint32_t page_num; 6324 6325 if (bserrno) { 6326 SPDK_ERRLOG("Failed to remove blob\n"); 6327 spdk_bs_sequence_finish(seq, bserrno); 6328 return; 6329 } 6330 6331 /* Remove snapshot from the list */ 6332 snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, blob->id); 6333 if (snapshot_entry != NULL) { 6334 TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link); 6335 free(snapshot_entry); 6336 } 6337 6338 page_num = _spdk_bs_blobid_to_page(blob->id); 6339 spdk_bit_array_clear(blob->bs->used_blobids, page_num); 6340 blob->state = SPDK_BLOB_STATE_DIRTY; 6341 blob->active.num_pages = 0; 6342 _spdk_blob_resize(blob, 0); 6343 6344 _spdk_blob_persist(seq, blob, _spdk_bs_delete_persist_cpl, blob); 6345 } 6346 6347 static int 6348 _spdk_bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone) 6349 { 6350 struct spdk_blob_list *snapshot_entry = NULL; 6351 struct spdk_blob_list *clone_entry = NULL; 6352 struct spdk_blob *clone = NULL; 6353 bool has_one_clone = false; 6354 6355 /* Check if this is a snapshot with clones */ 6356 snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, blob->id); 6357 if (snapshot_entry != NULL) { 6358 if (snapshot_entry->clone_count > 1) { 6359 SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n"); 6360 return -EBUSY; 6361 } else if (snapshot_entry->clone_count == 1) { 6362 has_one_clone = true; 6363 } 6364 } 6365 6366 /* Check if someone has this blob open (besides this delete context): 6367 * - open_ref = 1 - only this context opened blob, so it is ok to remove it 6368 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot 6369 * and that is ok, because we will update it accordingly */ 6370 if (blob->open_ref <= 2 && has_one_clone) { 6371 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 6372 assert(clone_entry != NULL); 6373 clone = _spdk_blob_lookup(blob->bs, clone_entry->id); 6374 6375 if (blob->open_ref == 2 && clone == NULL) { 6376 /* Clone is closed and someone else opened this blob */ 6377 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 6378 return -EBUSY; 6379 } 6380 6381 *update_clone = true; 6382 return 0; 6383 } 6384 6385 if (blob->open_ref > 1) { 6386 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 6387 return -EBUSY; 6388 } 6389 6390 assert(has_one_clone == false); 6391 *update_clone = false; 6392 return 0; 6393 } 6394 6395 static void 6396 _spdk_bs_delete_enomem_close_cpl(void *cb_arg, int bserrno) 6397 { 6398 spdk_bs_sequence_t *seq = cb_arg; 6399 6400 spdk_bs_sequence_finish(seq, -ENOMEM); 6401 } 6402 6403 static void 6404 _spdk_bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 6405 { 6406 spdk_bs_sequence_t *seq = cb_arg; 6407 struct delete_snapshot_ctx *ctx; 6408 bool update_clone = false; 6409 6410 if (bserrno != 0) { 6411 spdk_bs_sequence_finish(seq, bserrno); 6412 return; 6413 } 6414 6415 _spdk_blob_verify_md_op(blob); 6416 6417 ctx = calloc(1, sizeof(*ctx)); 6418 if (ctx == NULL) { 6419 spdk_blob_close(blob, _spdk_bs_delete_enomem_close_cpl, seq); 6420 return; 6421 } 6422 6423 ctx->snapshot = blob; 6424 ctx->cb_fn = _spdk_bs_delete_blob_finish; 6425 ctx->cb_arg = seq; 6426 6427 /* Check if blob can be removed and if it is a snapshot with clone on top of it */ 6428 ctx->bserrno = _spdk_bs_is_blob_deletable(blob, &update_clone); 6429 if (ctx->bserrno) { 6430 spdk_blob_close(blob, _spdk_delete_blob_cleanup_finish, ctx); 6431 return; 6432 } 6433 6434 if (blob->locked_operation_in_progress) { 6435 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot remove blob - another operation in progress\n"); 6436 ctx->bserrno = -EBUSY; 6437 spdk_blob_close(blob, _spdk_delete_blob_cleanup_finish, ctx); 6438 return; 6439 } 6440 6441 blob->locked_operation_in_progress = true; 6442 6443 /* 6444 * Remove the blob from the blob_store list now, to ensure it does not 6445 * get returned after this point by _spdk_blob_lookup(). 6446 */ 6447 TAILQ_REMOVE(&blob->bs->blobs, blob, link); 6448 6449 if (update_clone) { 6450 /* This blob is a snapshot with active clone - update clone first */ 6451 _spdk_update_clone_on_snapshot_deletion(blob, ctx); 6452 } else { 6453 /* This blob does not have any clones - just remove it */ 6454 _spdk_bs_blob_list_remove(blob); 6455 _spdk_bs_delete_blob_finish(seq, blob, 0); 6456 free(ctx); 6457 } 6458 } 6459 6460 void 6461 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 6462 spdk_blob_op_complete cb_fn, void *cb_arg) 6463 { 6464 struct spdk_bs_cpl cpl; 6465 spdk_bs_sequence_t *seq; 6466 6467 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Deleting blob %lu\n", blobid); 6468 6469 assert(spdk_get_thread() == bs->md_thread); 6470 6471 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 6472 cpl.u.blob_basic.cb_fn = cb_fn; 6473 cpl.u.blob_basic.cb_arg = cb_arg; 6474 6475 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 6476 if (!seq) { 6477 cb_fn(cb_arg, -ENOMEM); 6478 return; 6479 } 6480 6481 spdk_bs_open_blob(bs, blobid, _spdk_bs_delete_open_cpl, seq); 6482 } 6483 6484 /* END spdk_bs_delete_blob */ 6485 6486 /* START spdk_bs_open_blob */ 6487 6488 static void 6489 _spdk_bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 6490 { 6491 struct spdk_blob *blob = cb_arg; 6492 6493 if (bserrno != 0) { 6494 _spdk_blob_free(blob); 6495 seq->cpl.u.blob_handle.blob = NULL; 6496 spdk_bs_sequence_finish(seq, bserrno); 6497 return; 6498 } 6499 6500 blob->open_ref++; 6501 6502 TAILQ_INSERT_HEAD(&blob->bs->blobs, blob, link); 6503 6504 spdk_bs_sequence_finish(seq, bserrno); 6505 } 6506 6507 static void _spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 6508 struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 6509 { 6510 struct spdk_blob *blob; 6511 struct spdk_bs_cpl cpl; 6512 struct spdk_blob_open_opts opts_default; 6513 spdk_bs_sequence_t *seq; 6514 uint32_t page_num; 6515 6516 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Opening blob %lu\n", blobid); 6517 assert(spdk_get_thread() == bs->md_thread); 6518 6519 page_num = _spdk_bs_blobid_to_page(blobid); 6520 if (spdk_bit_array_get(bs->used_blobids, page_num) == false) { 6521 /* Invalid blobid */ 6522 cb_fn(cb_arg, NULL, -ENOENT); 6523 return; 6524 } 6525 6526 blob = _spdk_blob_lookup(bs, blobid); 6527 if (blob) { 6528 blob->open_ref++; 6529 cb_fn(cb_arg, blob, 0); 6530 return; 6531 } 6532 6533 blob = _spdk_blob_alloc(bs, blobid); 6534 if (!blob) { 6535 cb_fn(cb_arg, NULL, -ENOMEM); 6536 return; 6537 } 6538 6539 if (!opts) { 6540 spdk_blob_open_opts_init(&opts_default); 6541 opts = &opts_default; 6542 } 6543 6544 blob->clear_method = opts->clear_method; 6545 6546 cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE; 6547 cpl.u.blob_handle.cb_fn = cb_fn; 6548 cpl.u.blob_handle.cb_arg = cb_arg; 6549 cpl.u.blob_handle.blob = blob; 6550 6551 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 6552 if (!seq) { 6553 _spdk_blob_free(blob); 6554 cb_fn(cb_arg, NULL, -ENOMEM); 6555 return; 6556 } 6557 6558 _spdk_blob_load(seq, blob, _spdk_bs_open_blob_cpl, blob); 6559 } 6560 6561 void spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 6562 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 6563 { 6564 _spdk_bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg); 6565 } 6566 6567 void spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid, 6568 struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 6569 { 6570 _spdk_bs_open_blob(bs, blobid, opts, cb_fn, cb_arg); 6571 } 6572 6573 /* END spdk_bs_open_blob */ 6574 6575 /* START spdk_blob_set_read_only */ 6576 int spdk_blob_set_read_only(struct spdk_blob *blob) 6577 { 6578 _spdk_blob_verify_md_op(blob); 6579 6580 blob->data_ro_flags |= SPDK_BLOB_READ_ONLY; 6581 6582 blob->state = SPDK_BLOB_STATE_DIRTY; 6583 return 0; 6584 } 6585 /* END spdk_blob_set_read_only */ 6586 6587 /* START spdk_blob_sync_md */ 6588 6589 static void 6590 _spdk_blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 6591 { 6592 struct spdk_blob *blob = cb_arg; 6593 6594 if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 6595 blob->data_ro = true; 6596 blob->md_ro = true; 6597 } 6598 6599 spdk_bs_sequence_finish(seq, bserrno); 6600 } 6601 6602 static void 6603 _spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 6604 { 6605 struct spdk_bs_cpl cpl; 6606 spdk_bs_sequence_t *seq; 6607 6608 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 6609 cpl.u.blob_basic.cb_fn = cb_fn; 6610 cpl.u.blob_basic.cb_arg = cb_arg; 6611 6612 seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl); 6613 if (!seq) { 6614 cb_fn(cb_arg, -ENOMEM); 6615 return; 6616 } 6617 6618 _spdk_blob_persist(seq, blob, _spdk_blob_sync_md_cpl, blob); 6619 } 6620 6621 void 6622 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 6623 { 6624 _spdk_blob_verify_md_op(blob); 6625 6626 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blob %lu\n", blob->id); 6627 6628 if (blob->md_ro) { 6629 assert(blob->state == SPDK_BLOB_STATE_CLEAN); 6630 cb_fn(cb_arg, 0); 6631 return; 6632 } 6633 6634 _spdk_blob_sync_md(blob, cb_fn, cb_arg); 6635 } 6636 6637 /* END spdk_blob_sync_md */ 6638 6639 struct spdk_blob_insert_cluster_ctx { 6640 struct spdk_thread *thread; 6641 struct spdk_blob *blob; 6642 uint32_t cluster_num; /* cluster index in blob */ 6643 uint32_t cluster; /* cluster on disk */ 6644 uint32_t extent_page; /* extent page on disk */ 6645 int rc; 6646 spdk_blob_op_complete cb_fn; 6647 void *cb_arg; 6648 }; 6649 6650 static void 6651 _spdk_blob_insert_cluster_msg_cpl(void *arg) 6652 { 6653 struct spdk_blob_insert_cluster_ctx *ctx = arg; 6654 6655 ctx->cb_fn(ctx->cb_arg, ctx->rc); 6656 free(ctx); 6657 } 6658 6659 static void 6660 _spdk_blob_insert_cluster_msg_cb(void *arg, int bserrno) 6661 { 6662 struct spdk_blob_insert_cluster_ctx *ctx = arg; 6663 6664 ctx->rc = bserrno; 6665 spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx); 6666 } 6667 6668 static void 6669 _spdk_blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 6670 { 6671 struct spdk_blob_md_page *page = cb_arg; 6672 6673 spdk_bs_sequence_finish(seq, bserrno); 6674 spdk_free(page); 6675 } 6676 6677 static void 6678 _spdk_blob_insert_extent(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 6679 spdk_blob_op_complete cb_fn, void *cb_arg) 6680 { 6681 spdk_bs_sequence_t *seq; 6682 struct spdk_bs_cpl cpl; 6683 struct spdk_blob_md_page *page = NULL; 6684 uint32_t page_count = 0; 6685 int rc; 6686 6687 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 6688 cpl.u.blob_basic.cb_fn = cb_fn; 6689 cpl.u.blob_basic.cb_arg = cb_arg; 6690 6691 seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl); 6692 if (!seq) { 6693 cb_fn(cb_arg, -ENOMEM); 6694 return; 6695 } 6696 rc = _spdk_blob_serialize_add_page(blob, &page, &page_count, &page); 6697 if (rc < 0) { 6698 spdk_bs_sequence_finish(seq, rc); 6699 return; 6700 } 6701 6702 _spdk_blob_serialize_extent_page(blob, cluster_num, page); 6703 6704 page->crc = _spdk_blob_md_page_calc_crc(page); 6705 6706 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true); 6707 6708 spdk_bs_sequence_write_dev(seq, page, _spdk_bs_md_page_to_lba(blob->bs, extent), 6709 _spdk_bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 6710 _spdk_blob_persist_extent_page_cpl, page); 6711 } 6712 6713 static void 6714 _spdk_blob_insert_cluster_msg(void *arg) 6715 { 6716 struct spdk_blob_insert_cluster_ctx *ctx = arg; 6717 uint32_t *extent_page; 6718 6719 ctx->rc = _spdk_blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster); 6720 if (ctx->rc != 0) { 6721 spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx); 6722 return; 6723 } 6724 6725 if (ctx->blob->use_extent_table == false) { 6726 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 6727 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 6728 _spdk_blob_sync_md(ctx->blob, _spdk_blob_insert_cluster_msg_cb, ctx); 6729 return; 6730 } 6731 6732 extent_page = _spdk_bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 6733 if (*extent_page == 0) { 6734 /* Extent page requires allocation. 6735 * It was already claimed in the used_md_pages map and placed in ctx. 6736 * Blob persist will take care of writing out new extent page on disk. */ 6737 assert(ctx->extent_page != 0); 6738 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 6739 *extent_page = ctx->extent_page; 6740 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 6741 _spdk_blob_sync_md(ctx->blob, _spdk_blob_insert_cluster_msg_cb, ctx); 6742 } else { 6743 /* It is possible for original thread to allocate extent page for 6744 * different cluster in the same extent page. In such case proceed with 6745 * updating the existing extent page, but release the additional one. */ 6746 if (ctx->extent_page != 0) { 6747 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 6748 _spdk_bs_release_md_page(ctx->blob->bs, ctx->extent_page); 6749 } 6750 /* Extent page already allocated. 6751 * Every cluster allocation, requires just an update of single extent page. */ 6752 _spdk_blob_insert_extent(ctx->blob, *extent_page, ctx->cluster_num, 6753 _spdk_blob_insert_cluster_msg_cb, ctx); 6754 } 6755 } 6756 6757 static void 6758 _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 6759 uint64_t cluster, uint32_t extent_page, spdk_blob_op_complete cb_fn, void *cb_arg) 6760 { 6761 struct spdk_blob_insert_cluster_ctx *ctx; 6762 6763 ctx = calloc(1, sizeof(*ctx)); 6764 if (ctx == NULL) { 6765 cb_fn(cb_arg, -ENOMEM); 6766 return; 6767 } 6768 6769 ctx->thread = spdk_get_thread(); 6770 ctx->blob = blob; 6771 ctx->cluster_num = cluster_num; 6772 ctx->cluster = cluster; 6773 ctx->extent_page = extent_page; 6774 ctx->cb_fn = cb_fn; 6775 ctx->cb_arg = cb_arg; 6776 6777 spdk_thread_send_msg(blob->bs->md_thread, _spdk_blob_insert_cluster_msg, ctx); 6778 } 6779 6780 /* START spdk_blob_close */ 6781 6782 static void 6783 _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 6784 { 6785 struct spdk_blob *blob = cb_arg; 6786 6787 if (bserrno == 0) { 6788 blob->open_ref--; 6789 if (blob->open_ref == 0) { 6790 /* 6791 * Blobs with active.num_pages == 0 are deleted blobs. 6792 * these blobs are removed from the blob_store list 6793 * when the deletion process starts - so don't try to 6794 * remove them again. 6795 */ 6796 if (blob->active.num_pages > 0) { 6797 TAILQ_REMOVE(&blob->bs->blobs, blob, link); 6798 } 6799 _spdk_blob_free(blob); 6800 } 6801 } 6802 6803 spdk_bs_sequence_finish(seq, bserrno); 6804 } 6805 6806 void spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 6807 { 6808 struct spdk_bs_cpl cpl; 6809 spdk_bs_sequence_t *seq; 6810 6811 _spdk_blob_verify_md_op(blob); 6812 6813 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Closing blob %lu\n", blob->id); 6814 6815 if (blob->open_ref == 0) { 6816 cb_fn(cb_arg, -EBADF); 6817 return; 6818 } 6819 6820 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 6821 cpl.u.blob_basic.cb_fn = cb_fn; 6822 cpl.u.blob_basic.cb_arg = cb_arg; 6823 6824 seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl); 6825 if (!seq) { 6826 cb_fn(cb_arg, -ENOMEM); 6827 return; 6828 } 6829 6830 /* Sync metadata */ 6831 _spdk_blob_persist(seq, blob, _spdk_blob_close_cpl, blob); 6832 } 6833 6834 /* END spdk_blob_close */ 6835 6836 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs) 6837 { 6838 return spdk_get_io_channel(bs); 6839 } 6840 6841 void spdk_bs_free_io_channel(struct spdk_io_channel *channel) 6842 { 6843 spdk_put_io_channel(channel); 6844 } 6845 6846 void spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel, 6847 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 6848 { 6849 _spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 6850 SPDK_BLOB_UNMAP); 6851 } 6852 6853 void spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel, 6854 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 6855 { 6856 _spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 6857 SPDK_BLOB_WRITE_ZEROES); 6858 } 6859 6860 void spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel, 6861 void *payload, uint64_t offset, uint64_t length, 6862 spdk_blob_op_complete cb_fn, void *cb_arg) 6863 { 6864 _spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 6865 SPDK_BLOB_WRITE); 6866 } 6867 6868 void spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel, 6869 void *payload, uint64_t offset, uint64_t length, 6870 spdk_blob_op_complete cb_fn, void *cb_arg) 6871 { 6872 _spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 6873 SPDK_BLOB_READ); 6874 } 6875 6876 void spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel, 6877 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 6878 spdk_blob_op_complete cb_fn, void *cb_arg) 6879 { 6880 _spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false); 6881 } 6882 6883 void spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel, 6884 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 6885 spdk_blob_op_complete cb_fn, void *cb_arg) 6886 { 6887 _spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true); 6888 } 6889 6890 struct spdk_bs_iter_ctx { 6891 int64_t page_num; 6892 struct spdk_blob_store *bs; 6893 6894 spdk_blob_op_with_handle_complete cb_fn; 6895 void *cb_arg; 6896 }; 6897 6898 static void 6899 _spdk_bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6900 { 6901 struct spdk_bs_iter_ctx *ctx = cb_arg; 6902 struct spdk_blob_store *bs = ctx->bs; 6903 spdk_blob_id id; 6904 6905 if (bserrno == 0) { 6906 ctx->cb_fn(ctx->cb_arg, _blob, bserrno); 6907 free(ctx); 6908 return; 6909 } 6910 6911 ctx->page_num++; 6912 ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num); 6913 if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) { 6914 ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT); 6915 free(ctx); 6916 return; 6917 } 6918 6919 id = _spdk_bs_page_to_blobid(ctx->page_num); 6920 6921 spdk_bs_open_blob(bs, id, _spdk_bs_iter_cpl, ctx); 6922 } 6923 6924 void 6925 spdk_bs_iter_first(struct spdk_blob_store *bs, 6926 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 6927 { 6928 struct spdk_bs_iter_ctx *ctx; 6929 6930 ctx = calloc(1, sizeof(*ctx)); 6931 if (!ctx) { 6932 cb_fn(cb_arg, NULL, -ENOMEM); 6933 return; 6934 } 6935 6936 ctx->page_num = -1; 6937 ctx->bs = bs; 6938 ctx->cb_fn = cb_fn; 6939 ctx->cb_arg = cb_arg; 6940 6941 _spdk_bs_iter_cpl(ctx, NULL, -1); 6942 } 6943 6944 static void 6945 _spdk_bs_iter_close_cpl(void *cb_arg, int bserrno) 6946 { 6947 struct spdk_bs_iter_ctx *ctx = cb_arg; 6948 6949 _spdk_bs_iter_cpl(ctx, NULL, -1); 6950 } 6951 6952 void 6953 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob, 6954 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 6955 { 6956 struct spdk_bs_iter_ctx *ctx; 6957 6958 assert(blob != NULL); 6959 6960 ctx = calloc(1, sizeof(*ctx)); 6961 if (!ctx) { 6962 cb_fn(cb_arg, NULL, -ENOMEM); 6963 return; 6964 } 6965 6966 ctx->page_num = _spdk_bs_blobid_to_page(blob->id); 6967 ctx->bs = bs; 6968 ctx->cb_fn = cb_fn; 6969 ctx->cb_arg = cb_arg; 6970 6971 /* Close the existing blob */ 6972 spdk_blob_close(blob, _spdk_bs_iter_close_cpl, ctx); 6973 } 6974 6975 static int 6976 _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 6977 uint16_t value_len, bool internal) 6978 { 6979 struct spdk_xattr_tailq *xattrs; 6980 struct spdk_xattr *xattr; 6981 size_t desc_size; 6982 6983 _spdk_blob_verify_md_op(blob); 6984 6985 if (blob->md_ro) { 6986 return -EPERM; 6987 } 6988 6989 desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len; 6990 if (desc_size > SPDK_BS_MAX_DESC_SIZE) { 6991 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Xattr '%s' of size %ld does not fix into single page %ld\n", name, 6992 desc_size, SPDK_BS_MAX_DESC_SIZE); 6993 return -ENOMEM; 6994 } 6995 6996 if (internal) { 6997 xattrs = &blob->xattrs_internal; 6998 blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR; 6999 } else { 7000 xattrs = &blob->xattrs; 7001 } 7002 7003 TAILQ_FOREACH(xattr, xattrs, link) { 7004 if (!strcmp(name, xattr->name)) { 7005 free(xattr->value); 7006 xattr->value_len = value_len; 7007 xattr->value = malloc(value_len); 7008 memcpy(xattr->value, value, value_len); 7009 7010 blob->state = SPDK_BLOB_STATE_DIRTY; 7011 7012 return 0; 7013 } 7014 } 7015 7016 xattr = calloc(1, sizeof(*xattr)); 7017 if (!xattr) { 7018 return -ENOMEM; 7019 } 7020 xattr->name = strdup(name); 7021 xattr->value_len = value_len; 7022 xattr->value = malloc(value_len); 7023 memcpy(xattr->value, value, value_len); 7024 TAILQ_INSERT_TAIL(xattrs, xattr, link); 7025 7026 blob->state = SPDK_BLOB_STATE_DIRTY; 7027 7028 return 0; 7029 } 7030 7031 int 7032 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 7033 uint16_t value_len) 7034 { 7035 return _spdk_blob_set_xattr(blob, name, value, value_len, false); 7036 } 7037 7038 static int 7039 _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal) 7040 { 7041 struct spdk_xattr_tailq *xattrs; 7042 struct spdk_xattr *xattr; 7043 7044 _spdk_blob_verify_md_op(blob); 7045 7046 if (blob->md_ro) { 7047 return -EPERM; 7048 } 7049 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 7050 7051 TAILQ_FOREACH(xattr, xattrs, link) { 7052 if (!strcmp(name, xattr->name)) { 7053 TAILQ_REMOVE(xattrs, xattr, link); 7054 free(xattr->value); 7055 free(xattr->name); 7056 free(xattr); 7057 7058 if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) { 7059 blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR; 7060 } 7061 blob->state = SPDK_BLOB_STATE_DIRTY; 7062 7063 return 0; 7064 } 7065 } 7066 7067 return -ENOENT; 7068 } 7069 7070 int 7071 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name) 7072 { 7073 return _spdk_blob_remove_xattr(blob, name, false); 7074 } 7075 7076 static int 7077 _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 7078 const void **value, size_t *value_len, bool internal) 7079 { 7080 struct spdk_xattr *xattr; 7081 struct spdk_xattr_tailq *xattrs; 7082 7083 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 7084 7085 TAILQ_FOREACH(xattr, xattrs, link) { 7086 if (!strcmp(name, xattr->name)) { 7087 *value = xattr->value; 7088 *value_len = xattr->value_len; 7089 return 0; 7090 } 7091 } 7092 return -ENOENT; 7093 } 7094 7095 int 7096 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 7097 const void **value, size_t *value_len) 7098 { 7099 _spdk_blob_verify_md_op(blob); 7100 7101 return _spdk_blob_get_xattr_value(blob, name, value, value_len, false); 7102 } 7103 7104 struct spdk_xattr_names { 7105 uint32_t count; 7106 const char *names[0]; 7107 }; 7108 7109 static int 7110 _spdk_blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names) 7111 { 7112 struct spdk_xattr *xattr; 7113 int count = 0; 7114 7115 TAILQ_FOREACH(xattr, xattrs, link) { 7116 count++; 7117 } 7118 7119 *names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *)); 7120 if (*names == NULL) { 7121 return -ENOMEM; 7122 } 7123 7124 TAILQ_FOREACH(xattr, xattrs, link) { 7125 (*names)->names[(*names)->count++] = xattr->name; 7126 } 7127 7128 return 0; 7129 } 7130 7131 int 7132 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names) 7133 { 7134 _spdk_blob_verify_md_op(blob); 7135 7136 return _spdk_blob_get_xattr_names(&blob->xattrs, names); 7137 } 7138 7139 uint32_t 7140 spdk_xattr_names_get_count(struct spdk_xattr_names *names) 7141 { 7142 assert(names != NULL); 7143 7144 return names->count; 7145 } 7146 7147 const char * 7148 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index) 7149 { 7150 if (index >= names->count) { 7151 return NULL; 7152 } 7153 7154 return names->names[index]; 7155 } 7156 7157 void 7158 spdk_xattr_names_free(struct spdk_xattr_names *names) 7159 { 7160 free(names); 7161 } 7162 7163 struct spdk_bs_type 7164 spdk_bs_get_bstype(struct spdk_blob_store *bs) 7165 { 7166 return bs->bstype; 7167 } 7168 7169 void 7170 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype) 7171 { 7172 memcpy(&bs->bstype, &bstype, sizeof(bstype)); 7173 } 7174 7175 bool 7176 spdk_blob_is_read_only(struct spdk_blob *blob) 7177 { 7178 assert(blob != NULL); 7179 return (blob->data_ro || blob->md_ro); 7180 } 7181 7182 bool 7183 spdk_blob_is_snapshot(struct spdk_blob *blob) 7184 { 7185 struct spdk_blob_list *snapshot_entry; 7186 7187 assert(blob != NULL); 7188 7189 snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, blob->id); 7190 if (snapshot_entry == NULL) { 7191 return false; 7192 } 7193 7194 return true; 7195 } 7196 7197 bool 7198 spdk_blob_is_clone(struct spdk_blob *blob) 7199 { 7200 assert(blob != NULL); 7201 7202 if (blob->parent_id != SPDK_BLOBID_INVALID) { 7203 assert(spdk_blob_is_thin_provisioned(blob)); 7204 return true; 7205 } 7206 7207 return false; 7208 } 7209 7210 bool 7211 spdk_blob_is_thin_provisioned(struct spdk_blob *blob) 7212 { 7213 assert(blob != NULL); 7214 return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 7215 } 7216 7217 static void 7218 _spdk_blob_update_clear_method(struct spdk_blob *blob) 7219 { 7220 enum blob_clear_method stored_cm; 7221 7222 assert(blob != NULL); 7223 7224 /* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored 7225 * in metadata previously. If something other than the default was 7226 * specified, ignore stored value and used what was passed in. 7227 */ 7228 stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT); 7229 7230 if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) { 7231 blob->clear_method = stored_cm; 7232 } else if (blob->clear_method != stored_cm) { 7233 SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n", 7234 blob->clear_method, stored_cm); 7235 } 7236 } 7237 7238 spdk_blob_id 7239 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id) 7240 { 7241 struct spdk_blob_list *snapshot_entry = NULL; 7242 struct spdk_blob_list *clone_entry = NULL; 7243 7244 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 7245 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 7246 if (clone_entry->id == blob_id) { 7247 return snapshot_entry->id; 7248 } 7249 } 7250 } 7251 7252 return SPDK_BLOBID_INVALID; 7253 } 7254 7255 int 7256 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids, 7257 size_t *count) 7258 { 7259 struct spdk_blob_list *snapshot_entry, *clone_entry; 7260 size_t n; 7261 7262 snapshot_entry = _spdk_bs_get_snapshot_entry(bs, blobid); 7263 if (snapshot_entry == NULL) { 7264 *count = 0; 7265 return 0; 7266 } 7267 7268 if (ids == NULL || *count < snapshot_entry->clone_count) { 7269 *count = snapshot_entry->clone_count; 7270 return -ENOMEM; 7271 } 7272 *count = snapshot_entry->clone_count; 7273 7274 n = 0; 7275 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 7276 ids[n++] = clone_entry->id; 7277 } 7278 7279 return 0; 7280 } 7281 7282 SPDK_LOG_REGISTER_COMPONENT("blob", SPDK_LOG_BLOB) 7283