1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/blob.h" 37 #include "spdk/crc32.h" 38 #include "spdk/env.h" 39 #include "spdk/queue.h" 40 #include "spdk/thread.h" 41 #include "spdk/bit_array.h" 42 #include "spdk/likely.h" 43 #include "spdk/util.h" 44 #include "spdk/string.h" 45 46 #include "spdk_internal/assert.h" 47 #include "spdk_internal/log.h" 48 49 #include "blobstore.h" 50 51 #define BLOB_CRC32C_INITIAL 0xffffffffUL 52 53 static int spdk_bs_register_md_thread(struct spdk_blob_store *bs); 54 static int spdk_bs_unregister_md_thread(struct spdk_blob_store *bs); 55 static void _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno); 56 static void _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 57 uint64_t cluster, uint32_t extent, spdk_blob_op_complete cb_fn, void *cb_arg); 58 59 static int _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 60 uint16_t value_len, bool internal); 61 static int _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 62 const void **value, size_t *value_len, bool internal); 63 static int _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal); 64 65 static void _spdk_blob_insert_extent(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 66 spdk_blob_op_complete cb_fn, void *cb_arg); 67 68 static void 69 _spdk_blob_verify_md_op(struct spdk_blob *blob) 70 { 71 assert(blob != NULL); 72 assert(spdk_get_thread() == blob->bs->md_thread); 73 assert(blob->state != SPDK_BLOB_STATE_LOADING); 74 } 75 76 static struct spdk_blob_list * 77 _spdk_bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid) 78 { 79 struct spdk_blob_list *snapshot_entry = NULL; 80 81 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 82 if (snapshot_entry->id == blobid) { 83 break; 84 } 85 } 86 87 return snapshot_entry; 88 } 89 90 static void 91 _spdk_bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page) 92 { 93 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 94 assert(spdk_bit_array_get(bs->used_md_pages, page) == false); 95 96 spdk_bit_array_set(bs->used_md_pages, page); 97 } 98 99 static void 100 _spdk_bs_release_md_page(struct spdk_blob_store *bs, uint32_t page) 101 { 102 assert(page < spdk_bit_array_capacity(bs->used_md_pages)); 103 assert(spdk_bit_array_get(bs->used_md_pages, page) == true); 104 105 spdk_bit_array_clear(bs->used_md_pages, page); 106 } 107 108 static void 109 _spdk_bs_claim_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 110 { 111 assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters)); 112 assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == false); 113 assert(bs->num_free_clusters > 0); 114 115 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %u\n", cluster_num); 116 117 spdk_bit_array_set(bs->used_clusters, cluster_num); 118 bs->num_free_clusters--; 119 } 120 121 static int 122 _spdk_blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster) 123 { 124 uint64_t *cluster_lba = &blob->active.clusters[cluster_num]; 125 126 _spdk_blob_verify_md_op(blob); 127 128 if (*cluster_lba != 0) { 129 return -EEXIST; 130 } 131 132 *cluster_lba = _spdk_bs_cluster_to_lba(blob->bs, cluster); 133 return 0; 134 } 135 136 static int 137 _spdk_bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num, 138 uint64_t *lowest_free_cluster, uint32_t *lowest_free_md_page, bool update_map) 139 { 140 uint32_t *extent_page = 0; 141 142 pthread_mutex_lock(&blob->bs->used_clusters_mutex); 143 *lowest_free_cluster = spdk_bit_array_find_first_clear(blob->bs->used_clusters, 144 *lowest_free_cluster); 145 if (*lowest_free_cluster == UINT32_MAX) { 146 /* No more free clusters. Cannot satisfy the request */ 147 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 148 return -ENOSPC; 149 } 150 151 if (blob->use_extent_table) { 152 extent_page = _spdk_bs_cluster_to_extent_page(blob, cluster_num); 153 if (*extent_page == 0) { 154 /* No extent_page is allocated for the cluster */ 155 *lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, 156 *lowest_free_md_page); 157 if (*lowest_free_md_page == UINT32_MAX) { 158 /* No more free md pages. Cannot satisfy the request */ 159 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 160 return -ENOSPC; 161 } 162 _spdk_bs_claim_md_page(blob->bs, *lowest_free_md_page); 163 } 164 } 165 166 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %lu for blob %lu\n", *lowest_free_cluster, blob->id); 167 _spdk_bs_claim_cluster(blob->bs, *lowest_free_cluster); 168 169 pthread_mutex_unlock(&blob->bs->used_clusters_mutex); 170 171 if (update_map) { 172 _spdk_blob_insert_cluster(blob, cluster_num, *lowest_free_cluster); 173 if (blob->use_extent_table && *extent_page == 0) { 174 *extent_page = *lowest_free_md_page; 175 } 176 } 177 178 return 0; 179 } 180 181 static void 182 _spdk_bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 183 { 184 assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters)); 185 assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == true); 186 assert(bs->num_free_clusters < bs->total_clusters); 187 188 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Releasing cluster %u\n", cluster_num); 189 190 pthread_mutex_lock(&bs->used_clusters_mutex); 191 spdk_bit_array_clear(bs->used_clusters, cluster_num); 192 bs->num_free_clusters++; 193 pthread_mutex_unlock(&bs->used_clusters_mutex); 194 } 195 196 static void 197 _spdk_blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs) 198 { 199 xattrs->count = 0; 200 xattrs->names = NULL; 201 xattrs->ctx = NULL; 202 xattrs->get_value = NULL; 203 } 204 205 void 206 spdk_blob_opts_init(struct spdk_blob_opts *opts) 207 { 208 opts->num_clusters = 0; 209 opts->thin_provision = false; 210 opts->clear_method = BLOB_CLEAR_WITH_DEFAULT; 211 _spdk_blob_xattrs_init(&opts->xattrs); 212 opts->use_extent_table = true; 213 } 214 215 void 216 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts) 217 { 218 opts->clear_method = BLOB_CLEAR_WITH_DEFAULT; 219 } 220 221 static struct spdk_blob * 222 _spdk_blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id) 223 { 224 struct spdk_blob *blob; 225 226 blob = calloc(1, sizeof(*blob)); 227 if (!blob) { 228 return NULL; 229 } 230 231 blob->id = id; 232 blob->bs = bs; 233 234 blob->parent_id = SPDK_BLOBID_INVALID; 235 236 blob->state = SPDK_BLOB_STATE_DIRTY; 237 blob->extent_rle_found = false; 238 blob->extent_table_found = false; 239 blob->active.num_pages = 1; 240 blob->active.pages = calloc(1, sizeof(*blob->active.pages)); 241 if (!blob->active.pages) { 242 free(blob); 243 return NULL; 244 } 245 246 blob->active.pages[0] = _spdk_bs_blobid_to_page(id); 247 248 TAILQ_INIT(&blob->xattrs); 249 TAILQ_INIT(&blob->xattrs_internal); 250 TAILQ_INIT(&blob->pending_persists); 251 252 return blob; 253 } 254 255 static void 256 _spdk_xattrs_free(struct spdk_xattr_tailq *xattrs) 257 { 258 struct spdk_xattr *xattr, *xattr_tmp; 259 260 TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) { 261 TAILQ_REMOVE(xattrs, xattr, link); 262 free(xattr->name); 263 free(xattr->value); 264 free(xattr); 265 } 266 } 267 268 static void 269 _spdk_blob_free(struct spdk_blob *blob) 270 { 271 assert(blob != NULL); 272 assert(TAILQ_EMPTY(&blob->pending_persists)); 273 274 free(blob->active.extent_pages); 275 free(blob->clean.extent_pages); 276 free(blob->active.clusters); 277 free(blob->clean.clusters); 278 free(blob->active.pages); 279 free(blob->clean.pages); 280 281 _spdk_xattrs_free(&blob->xattrs); 282 _spdk_xattrs_free(&blob->xattrs_internal); 283 284 if (blob->back_bs_dev) { 285 blob->back_bs_dev->destroy(blob->back_bs_dev); 286 } 287 288 free(blob); 289 } 290 291 struct freeze_io_ctx { 292 struct spdk_bs_cpl cpl; 293 struct spdk_blob *blob; 294 }; 295 296 static void 297 _spdk_blob_io_sync(struct spdk_io_channel_iter *i) 298 { 299 spdk_for_each_channel_continue(i, 0); 300 } 301 302 static void 303 _spdk_blob_execute_queued_io(struct spdk_io_channel_iter *i) 304 { 305 struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i); 306 struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch); 307 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 308 struct spdk_bs_request_set *set; 309 struct spdk_bs_user_op_args *args; 310 spdk_bs_user_op_t *op, *tmp; 311 312 TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) { 313 set = (struct spdk_bs_request_set *)op; 314 args = &set->u.user_op; 315 316 if (args->blob == ctx->blob) { 317 TAILQ_REMOVE(&ch->queued_io, op, link); 318 spdk_bs_user_op_execute(op); 319 } 320 } 321 322 spdk_for_each_channel_continue(i, 0); 323 } 324 325 static void 326 _spdk_blob_io_cpl(struct spdk_io_channel_iter *i, int status) 327 { 328 struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 329 330 ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0); 331 332 free(ctx); 333 } 334 335 static void 336 _spdk_blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 337 { 338 struct freeze_io_ctx *ctx; 339 340 ctx = calloc(1, sizeof(*ctx)); 341 if (!ctx) { 342 cb_fn(cb_arg, -ENOMEM); 343 return; 344 } 345 346 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 347 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 348 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 349 ctx->blob = blob; 350 351 /* Freeze I/O on blob */ 352 blob->frozen_refcnt++; 353 354 if (blob->frozen_refcnt == 1) { 355 spdk_for_each_channel(blob->bs, _spdk_blob_io_sync, ctx, _spdk_blob_io_cpl); 356 } else { 357 cb_fn(cb_arg, 0); 358 free(ctx); 359 } 360 } 361 362 static void 363 _spdk_blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 364 { 365 struct freeze_io_ctx *ctx; 366 367 ctx = calloc(1, sizeof(*ctx)); 368 if (!ctx) { 369 cb_fn(cb_arg, -ENOMEM); 370 return; 371 } 372 373 ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 374 ctx->cpl.u.blob_basic.cb_fn = cb_fn; 375 ctx->cpl.u.blob_basic.cb_arg = cb_arg; 376 ctx->blob = blob; 377 378 assert(blob->frozen_refcnt > 0); 379 380 blob->frozen_refcnt--; 381 382 if (blob->frozen_refcnt == 0) { 383 spdk_for_each_channel(blob->bs, _spdk_blob_execute_queued_io, ctx, _spdk_blob_io_cpl); 384 } else { 385 cb_fn(cb_arg, 0); 386 free(ctx); 387 } 388 } 389 390 static int 391 _spdk_blob_mark_clean(struct spdk_blob *blob) 392 { 393 uint32_t *extent_pages = NULL; 394 uint64_t *clusters = NULL; 395 uint32_t *pages = NULL; 396 397 assert(blob != NULL); 398 399 if (blob->active.num_extent_pages) { 400 assert(blob->active.extent_pages); 401 extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages)); 402 if (!extent_pages) { 403 return -ENOMEM; 404 } 405 memcpy(extent_pages, blob->active.extent_pages, 406 blob->active.num_extent_pages * sizeof(*extent_pages)); 407 } 408 409 if (blob->active.num_clusters) { 410 assert(blob->active.clusters); 411 clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters)); 412 if (!clusters) { 413 free(extent_pages); 414 return -ENOMEM; 415 } 416 memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 417 } 418 419 if (blob->active.num_pages) { 420 assert(blob->active.pages); 421 pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages)); 422 if (!pages) { 423 free(extent_pages); 424 free(clusters); 425 return -ENOMEM; 426 } 427 memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 428 } 429 430 free(blob->clean.extent_pages); 431 free(blob->clean.clusters); 432 free(blob->clean.pages); 433 434 blob->clean.num_extent_pages = blob->active.num_extent_pages; 435 blob->clean.extent_pages = blob->active.extent_pages; 436 blob->clean.num_clusters = blob->active.num_clusters; 437 blob->clean.clusters = blob->active.clusters; 438 blob->clean.num_pages = blob->active.num_pages; 439 blob->clean.pages = blob->active.pages; 440 441 blob->active.extent_pages = extent_pages; 442 blob->active.clusters = clusters; 443 blob->active.pages = pages; 444 445 /* If the metadata was dirtied again while the metadata was being written to disk, 446 * we do not want to revert the DIRTY state back to CLEAN here. 447 */ 448 if (blob->state == SPDK_BLOB_STATE_LOADING) { 449 blob->state = SPDK_BLOB_STATE_CLEAN; 450 } 451 452 return 0; 453 } 454 455 static int 456 _spdk_blob_deserialize_xattr(struct spdk_blob *blob, 457 struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal) 458 { 459 struct spdk_xattr *xattr; 460 461 if (desc_xattr->length != sizeof(desc_xattr->name_length) + 462 sizeof(desc_xattr->value_length) + 463 desc_xattr->name_length + desc_xattr->value_length) { 464 return -EINVAL; 465 } 466 467 xattr = calloc(1, sizeof(*xattr)); 468 if (xattr == NULL) { 469 return -ENOMEM; 470 } 471 472 xattr->name = malloc(desc_xattr->name_length + 1); 473 if (xattr->name == NULL) { 474 free(xattr); 475 return -ENOMEM; 476 } 477 memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length); 478 xattr->name[desc_xattr->name_length] = '\0'; 479 480 xattr->value = malloc(desc_xattr->value_length); 481 if (xattr->value == NULL) { 482 free(xattr->name); 483 free(xattr); 484 return -ENOMEM; 485 } 486 xattr->value_len = desc_xattr->value_length; 487 memcpy(xattr->value, 488 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 489 desc_xattr->value_length); 490 491 TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link); 492 493 return 0; 494 } 495 496 497 static int 498 _spdk_blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob) 499 { 500 struct spdk_blob_md_descriptor *desc; 501 size_t cur_desc = 0; 502 void *tmp; 503 504 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 505 while (cur_desc < sizeof(page->descriptors)) { 506 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 507 if (desc->length == 0) { 508 /* If padding and length are 0, this terminates the page */ 509 break; 510 } 511 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 512 struct spdk_blob_md_descriptor_flags *desc_flags; 513 514 desc_flags = (struct spdk_blob_md_descriptor_flags *)desc; 515 516 if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) { 517 return -EINVAL; 518 } 519 520 if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) != 521 SPDK_BLOB_INVALID_FLAGS_MASK) { 522 return -EINVAL; 523 } 524 525 if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) != 526 SPDK_BLOB_DATA_RO_FLAGS_MASK) { 527 blob->data_ro = true; 528 blob->md_ro = true; 529 } 530 531 if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) != 532 SPDK_BLOB_MD_RO_FLAGS_MASK) { 533 blob->md_ro = true; 534 } 535 536 if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 537 blob->data_ro = true; 538 blob->md_ro = true; 539 } 540 541 blob->invalid_flags = desc_flags->invalid_flags; 542 blob->data_ro_flags = desc_flags->data_ro_flags; 543 blob->md_ro_flags = desc_flags->md_ro_flags; 544 545 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 546 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 547 unsigned int i, j; 548 unsigned int cluster_count = blob->active.num_clusters; 549 550 if (blob->extent_table_found) { 551 /* Extent Table already present in the md, 552 * both descriptors should never be at the same time. */ 553 return -EINVAL; 554 } 555 blob->extent_rle_found = true; 556 557 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 558 559 if (desc_extent_rle->length == 0 || 560 (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) { 561 return -EINVAL; 562 } 563 564 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 565 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 566 if (desc_extent_rle->extents[i].cluster_idx != 0) { 567 if (!spdk_bit_array_get(blob->bs->used_clusters, 568 desc_extent_rle->extents[i].cluster_idx + j)) { 569 return -EINVAL; 570 } 571 } 572 cluster_count++; 573 } 574 } 575 576 if (cluster_count == 0) { 577 return -EINVAL; 578 } 579 tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters)); 580 if (tmp == NULL) { 581 return -ENOMEM; 582 } 583 blob->active.clusters = tmp; 584 blob->active.cluster_array_size = cluster_count; 585 586 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 587 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 588 if (desc_extent_rle->extents[i].cluster_idx != 0) { 589 blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs, 590 desc_extent_rle->extents[i].cluster_idx + j); 591 } else if (spdk_blob_is_thin_provisioned(blob)) { 592 blob->active.clusters[blob->active.num_clusters++] = 0; 593 } else { 594 return -EINVAL; 595 } 596 } 597 } 598 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 599 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 600 uint32_t num_extent_pages = blob->active.num_extent_pages; 601 uint32_t i, j; 602 size_t extent_pages_length; 603 604 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 605 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 606 607 if (blob->extent_rle_found) { 608 /* This means that Extent RLE is present in MD, 609 * both should never be at the same time. */ 610 return -EINVAL; 611 } else if (blob->extent_table_found && 612 desc_extent_table->num_clusters != blob->remaining_clusters_in_et) { 613 /* Number of clusters in this ET does not match number 614 * from previously read EXTENT_TABLE. */ 615 return -EINVAL; 616 } 617 618 blob->extent_table_found = true; 619 620 if (desc_extent_table->length == 0 || 621 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 622 return -EINVAL; 623 } 624 625 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 626 num_extent_pages += desc_extent_table->extent_page[i].num_pages; 627 } 628 629 tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t)); 630 if (tmp == NULL) { 631 return -ENOMEM; 632 } 633 blob->active.extent_pages = tmp; 634 blob->active.extent_pages_array_size = num_extent_pages; 635 636 blob->remaining_clusters_in_et = desc_extent_table->num_clusters; 637 638 /* Extent table entries contain md page numbers for extent pages. 639 * Zeroes represent unallocated extent pages, those are run-length-encoded. 640 */ 641 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 642 if (desc_extent_table->extent_page[i].page_idx != 0) { 643 assert(desc_extent_table->extent_page[i].num_pages == 1); 644 blob->active.extent_pages[blob->active.num_extent_pages++] = 645 desc_extent_table->extent_page[i].page_idx; 646 } else if (spdk_blob_is_thin_provisioned(blob)) { 647 for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) { 648 blob->active.extent_pages[blob->active.num_extent_pages++] = 0; 649 } 650 } else { 651 return -EINVAL; 652 } 653 } 654 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 655 struct spdk_blob_md_descriptor_extent_page *desc_extent; 656 unsigned int i; 657 unsigned int cluster_count = 0; 658 size_t cluster_idx_length; 659 660 if (blob->extent_rle_found) { 661 /* This means that Extent RLE is present in MD, 662 * both should never be at the same time. */ 663 return -EINVAL; 664 } 665 666 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 667 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 668 669 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 670 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 671 return -EINVAL; 672 } 673 674 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 675 if (desc_extent->cluster_idx[i] != 0) { 676 if (!spdk_bit_array_get(blob->bs->used_clusters, desc_extent->cluster_idx[i])) { 677 return -EINVAL; 678 } 679 } 680 cluster_count++; 681 } 682 683 if (cluster_count == 0) { 684 return -EINVAL; 685 } 686 687 /* When reading extent pages sequentially starting cluster idx should match 688 * current size of a blob. 689 * If changed to batch reading, this check shall be removed. */ 690 if (desc_extent->start_cluster_idx != blob->active.num_clusters) { 691 return -EINVAL; 692 } 693 694 tmp = realloc(blob->active.clusters, 695 (cluster_count + blob->active.num_clusters) * sizeof(*blob->active.clusters)); 696 if (tmp == NULL) { 697 return -ENOMEM; 698 } 699 blob->active.clusters = tmp; 700 blob->active.cluster_array_size = (cluster_count + blob->active.num_clusters); 701 702 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 703 if (desc_extent->cluster_idx[i] != 0) { 704 blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs, 705 desc_extent->cluster_idx[i]); 706 } else if (spdk_blob_is_thin_provisioned(blob)) { 707 blob->active.clusters[blob->active.num_clusters++] = 0; 708 } else { 709 return -EINVAL; 710 } 711 } 712 assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters); 713 assert(blob->remaining_clusters_in_et >= cluster_count); 714 blob->remaining_clusters_in_et -= cluster_count; 715 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 716 int rc; 717 718 rc = _spdk_blob_deserialize_xattr(blob, 719 (struct spdk_blob_md_descriptor_xattr *) desc, false); 720 if (rc != 0) { 721 return rc; 722 } 723 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 724 int rc; 725 726 rc = _spdk_blob_deserialize_xattr(blob, 727 (struct spdk_blob_md_descriptor_xattr *) desc, true); 728 if (rc != 0) { 729 return rc; 730 } 731 } else { 732 /* Unrecognized descriptor type. Do not fail - just continue to the 733 * next descriptor. If this descriptor is associated with some feature 734 * defined in a newer version of blobstore, that version of blobstore 735 * should create and set an associated feature flag to specify if this 736 * blob can be loaded or not. 737 */ 738 } 739 740 /* Advance to the next descriptor */ 741 cur_desc += sizeof(*desc) + desc->length; 742 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 743 break; 744 } 745 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 746 } 747 748 return 0; 749 } 750 751 static bool _spdk_bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page); 752 753 static int 754 _spdk_blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob) 755 { 756 assert(blob != NULL); 757 assert(blob->state == SPDK_BLOB_STATE_LOADING); 758 759 if (_spdk_bs_load_cur_extent_page_valid(extent_page) == false) { 760 return -ENOENT; 761 } 762 763 return _spdk_blob_parse_page(extent_page, blob); 764 } 765 766 static int 767 _spdk_blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count, 768 struct spdk_blob *blob) 769 { 770 const struct spdk_blob_md_page *page; 771 uint32_t i; 772 int rc; 773 774 assert(page_count > 0); 775 assert(pages[0].sequence_num == 0); 776 assert(blob != NULL); 777 assert(blob->state == SPDK_BLOB_STATE_LOADING); 778 assert(blob->active.clusters == NULL); 779 780 /* The blobid provided doesn't match what's in the MD, this can 781 * happen for example if a bogus blobid is passed in through open. 782 */ 783 if (blob->id != pages[0].id) { 784 SPDK_ERRLOG("Blobid (%lu) doesn't match what's in metadata (%lu)\n", 785 blob->id, pages[0].id); 786 return -ENOENT; 787 } 788 789 for (i = 0; i < page_count; i++) { 790 page = &pages[i]; 791 792 assert(page->id == blob->id); 793 assert(page->sequence_num == i); 794 795 rc = _spdk_blob_parse_page(page, blob); 796 if (rc != 0) { 797 return rc; 798 } 799 } 800 801 return 0; 802 } 803 804 static int 805 _spdk_blob_serialize_add_page(const struct spdk_blob *blob, 806 struct spdk_blob_md_page **pages, 807 uint32_t *page_count, 808 struct spdk_blob_md_page **last_page) 809 { 810 struct spdk_blob_md_page *page; 811 812 assert(pages != NULL); 813 assert(page_count != NULL); 814 815 if (*page_count == 0) { 816 assert(*pages == NULL); 817 *page_count = 1; 818 *pages = spdk_malloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE, 819 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 820 } else { 821 assert(*pages != NULL); 822 (*page_count)++; 823 *pages = spdk_realloc(*pages, 824 SPDK_BS_PAGE_SIZE * (*page_count), 825 SPDK_BS_PAGE_SIZE); 826 } 827 828 if (*pages == NULL) { 829 *page_count = 0; 830 *last_page = NULL; 831 return -ENOMEM; 832 } 833 834 page = &(*pages)[*page_count - 1]; 835 memset(page, 0, sizeof(*page)); 836 page->id = blob->id; 837 page->sequence_num = *page_count - 1; 838 page->next = SPDK_INVALID_MD_PAGE; 839 *last_page = page; 840 841 return 0; 842 } 843 844 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor. 845 * Update required_sz on both success and failure. 846 * 847 */ 848 static int 849 _spdk_blob_serialize_xattr(const struct spdk_xattr *xattr, 850 uint8_t *buf, size_t buf_sz, 851 size_t *required_sz, bool internal) 852 { 853 struct spdk_blob_md_descriptor_xattr *desc; 854 855 *required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) + 856 strlen(xattr->name) + 857 xattr->value_len; 858 859 if (buf_sz < *required_sz) { 860 return -1; 861 } 862 863 desc = (struct spdk_blob_md_descriptor_xattr *)buf; 864 865 desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR; 866 desc->length = sizeof(desc->name_length) + 867 sizeof(desc->value_length) + 868 strlen(xattr->name) + 869 xattr->value_len; 870 desc->name_length = strlen(xattr->name); 871 desc->value_length = xattr->value_len; 872 873 memcpy(desc->name, xattr->name, desc->name_length); 874 memcpy((void *)((uintptr_t)desc->name + desc->name_length), 875 xattr->value, 876 desc->value_length); 877 878 return 0; 879 } 880 881 static void 882 _spdk_blob_serialize_extent_table_entry(const struct spdk_blob *blob, 883 uint64_t start_ep, uint64_t *next_ep, 884 uint8_t **buf, size_t *remaining_sz) 885 { 886 struct spdk_blob_md_descriptor_extent_table *desc; 887 size_t cur_sz; 888 uint64_t i, et_idx; 889 uint32_t extent_page, ep_len; 890 891 /* The buffer must have room for at least num_clusters entry */ 892 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters); 893 if (*remaining_sz < cur_sz) { 894 *next_ep = start_ep; 895 return; 896 } 897 898 desc = (struct spdk_blob_md_descriptor_extent_table *)*buf; 899 desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE; 900 901 desc->num_clusters = blob->active.num_clusters; 902 903 ep_len = 1; 904 et_idx = 0; 905 for (i = start_ep; i < blob->active.num_extent_pages; i++) { 906 if (*remaining_sz < cur_sz + sizeof(desc->extent_page[0])) { 907 /* If we ran out of buffer space, return */ 908 break; 909 } 910 911 extent_page = blob->active.extent_pages[i]; 912 /* Verify that next extent_page is unallocated */ 913 if (extent_page == 0 && 914 (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) { 915 ep_len++; 916 continue; 917 } 918 desc->extent_page[et_idx].page_idx = extent_page; 919 desc->extent_page[et_idx].num_pages = ep_len; 920 et_idx++; 921 922 ep_len = 1; 923 cur_sz += sizeof(desc->extent_page[et_idx]); 924 } 925 *next_ep = i; 926 927 desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx; 928 *remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length; 929 *buf += sizeof(struct spdk_blob_md_descriptor) + desc->length; 930 } 931 932 static int 933 _spdk_blob_serialize_extent_table(const struct spdk_blob *blob, 934 struct spdk_blob_md_page **pages, 935 struct spdk_blob_md_page *cur_page, 936 uint32_t *page_count, uint8_t **buf, 937 size_t *remaining_sz) 938 { 939 uint64_t last_extent_page; 940 int rc; 941 942 last_extent_page = 0; 943 /* At least single extent table entry has to be always persisted. 944 * Such case occurs with num_extent_pages == 0. */ 945 while (last_extent_page <= blob->active.num_extent_pages) { 946 _spdk_blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf, 947 remaining_sz); 948 949 if (last_extent_page == blob->active.num_extent_pages) { 950 break; 951 } 952 953 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page); 954 if (rc < 0) { 955 return rc; 956 } 957 958 *buf = (uint8_t *)cur_page->descriptors; 959 *remaining_sz = sizeof(cur_page->descriptors); 960 } 961 962 return 0; 963 } 964 965 static void 966 _spdk_blob_serialize_extent_rle(const struct spdk_blob *blob, 967 uint64_t start_cluster, uint64_t *next_cluster, 968 uint8_t **buf, size_t *buf_sz) 969 { 970 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 971 size_t cur_sz; 972 uint64_t i, extent_idx; 973 uint64_t lba, lba_per_cluster, lba_count; 974 975 /* The buffer must have room for at least one extent */ 976 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]); 977 if (*buf_sz < cur_sz) { 978 *next_cluster = start_cluster; 979 return; 980 } 981 982 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf; 983 desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE; 984 985 lba_per_cluster = _spdk_bs_cluster_to_lba(blob->bs, 1); 986 987 lba = blob->active.clusters[start_cluster]; 988 lba_count = lba_per_cluster; 989 extent_idx = 0; 990 for (i = start_cluster + 1; i < blob->active.num_clusters; i++) { 991 if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) { 992 /* Run-length encode sequential non-zero LBA */ 993 lba_count += lba_per_cluster; 994 continue; 995 } else if (lba == 0 && blob->active.clusters[i] == 0) { 996 /* Run-length encode unallocated clusters */ 997 lba_count += lba_per_cluster; 998 continue; 999 } 1000 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1001 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1002 extent_idx++; 1003 1004 cur_sz += sizeof(desc_extent_rle->extents[extent_idx]); 1005 1006 if (*buf_sz < cur_sz) { 1007 /* If we ran out of buffer space, return */ 1008 *next_cluster = i; 1009 break; 1010 } 1011 1012 lba = blob->active.clusters[i]; 1013 lba_count = lba_per_cluster; 1014 } 1015 1016 if (*buf_sz >= cur_sz) { 1017 desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 1018 desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster; 1019 extent_idx++; 1020 1021 *next_cluster = blob->active.num_clusters; 1022 } 1023 1024 desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx; 1025 *buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1026 *buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length; 1027 } 1028 1029 static int 1030 _spdk_blob_serialize_extents_rle(const struct spdk_blob *blob, 1031 struct spdk_blob_md_page **pages, 1032 struct spdk_blob_md_page *cur_page, 1033 uint32_t *page_count, uint8_t **buf, 1034 size_t *remaining_sz) 1035 { 1036 uint64_t last_cluster; 1037 int rc; 1038 1039 last_cluster = 0; 1040 while (last_cluster < blob->active.num_clusters) { 1041 _spdk_blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz); 1042 1043 if (last_cluster == blob->active.num_clusters) { 1044 break; 1045 } 1046 1047 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page); 1048 if (rc < 0) { 1049 return rc; 1050 } 1051 1052 *buf = (uint8_t *)cur_page->descriptors; 1053 *remaining_sz = sizeof(cur_page->descriptors); 1054 } 1055 1056 return 0; 1057 } 1058 1059 static void 1060 _spdk_blob_serialize_extent_page(const struct spdk_blob *blob, 1061 uint64_t cluster, struct spdk_blob_md_page *page) 1062 { 1063 struct spdk_blob_md_descriptor_extent_page *desc_extent; 1064 uint64_t i, extent_idx; 1065 uint64_t lba, lba_per_cluster; 1066 uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP; 1067 1068 desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors; 1069 desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE; 1070 1071 lba_per_cluster = _spdk_bs_cluster_to_lba(blob->bs, 1); 1072 1073 desc_extent->start_cluster_idx = start_cluster_idx; 1074 extent_idx = 0; 1075 for (i = start_cluster_idx; i < blob->active.num_clusters; i++) { 1076 lba = blob->active.clusters[i]; 1077 desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster; 1078 if (extent_idx >= SPDK_EXTENTS_PER_EP) { 1079 break; 1080 } 1081 } 1082 desc_extent->length = sizeof(desc_extent->start_cluster_idx) + 1083 sizeof(desc_extent->cluster_idx[0]) * extent_idx; 1084 } 1085 1086 static void 1087 _spdk_blob_serialize_flags(const struct spdk_blob *blob, 1088 uint8_t *buf, size_t *buf_sz) 1089 { 1090 struct spdk_blob_md_descriptor_flags *desc; 1091 1092 /* 1093 * Flags get serialized first, so we should always have room for the flags 1094 * descriptor. 1095 */ 1096 assert(*buf_sz >= sizeof(*desc)); 1097 1098 desc = (struct spdk_blob_md_descriptor_flags *)buf; 1099 desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS; 1100 desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor); 1101 desc->invalid_flags = blob->invalid_flags; 1102 desc->data_ro_flags = blob->data_ro_flags; 1103 desc->md_ro_flags = blob->md_ro_flags; 1104 1105 *buf_sz -= sizeof(*desc); 1106 } 1107 1108 static int 1109 _spdk_blob_serialize_xattrs(const struct spdk_blob *blob, 1110 const struct spdk_xattr_tailq *xattrs, bool internal, 1111 struct spdk_blob_md_page **pages, 1112 struct spdk_blob_md_page *cur_page, 1113 uint32_t *page_count, uint8_t **buf, 1114 size_t *remaining_sz) 1115 { 1116 const struct spdk_xattr *xattr; 1117 int rc; 1118 1119 TAILQ_FOREACH(xattr, xattrs, link) { 1120 size_t required_sz = 0; 1121 1122 rc = _spdk_blob_serialize_xattr(xattr, 1123 *buf, *remaining_sz, 1124 &required_sz, internal); 1125 if (rc < 0) { 1126 /* Need to add a new page to the chain */ 1127 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, 1128 &cur_page); 1129 if (rc < 0) { 1130 spdk_free(*pages); 1131 *pages = NULL; 1132 *page_count = 0; 1133 return rc; 1134 } 1135 1136 *buf = (uint8_t *)cur_page->descriptors; 1137 *remaining_sz = sizeof(cur_page->descriptors); 1138 1139 /* Try again */ 1140 required_sz = 0; 1141 rc = _spdk_blob_serialize_xattr(xattr, 1142 *buf, *remaining_sz, 1143 &required_sz, internal); 1144 1145 if (rc < 0) { 1146 spdk_free(*pages); 1147 *pages = NULL; 1148 *page_count = 0; 1149 return rc; 1150 } 1151 } 1152 1153 *remaining_sz -= required_sz; 1154 *buf += required_sz; 1155 } 1156 1157 return 0; 1158 } 1159 1160 static int 1161 _spdk_blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages, 1162 uint32_t *page_count) 1163 { 1164 struct spdk_blob_md_page *cur_page; 1165 int rc; 1166 uint8_t *buf; 1167 size_t remaining_sz; 1168 1169 assert(pages != NULL); 1170 assert(page_count != NULL); 1171 assert(blob != NULL); 1172 assert(blob->state == SPDK_BLOB_STATE_DIRTY); 1173 1174 *pages = NULL; 1175 *page_count = 0; 1176 1177 /* A blob always has at least 1 page, even if it has no descriptors */ 1178 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page); 1179 if (rc < 0) { 1180 return rc; 1181 } 1182 1183 buf = (uint8_t *)cur_page->descriptors; 1184 remaining_sz = sizeof(cur_page->descriptors); 1185 1186 /* Serialize flags */ 1187 _spdk_blob_serialize_flags(blob, buf, &remaining_sz); 1188 buf += sizeof(struct spdk_blob_md_descriptor_flags); 1189 1190 /* Serialize xattrs */ 1191 rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs, false, 1192 pages, cur_page, page_count, &buf, &remaining_sz); 1193 if (rc < 0) { 1194 return rc; 1195 } 1196 1197 /* Serialize internal xattrs */ 1198 rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs_internal, true, 1199 pages, cur_page, page_count, &buf, &remaining_sz); 1200 if (rc < 0) { 1201 return rc; 1202 } 1203 1204 if (blob->use_extent_table) { 1205 /* Serialize extent table */ 1206 rc = _spdk_blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1207 } else { 1208 /* Serialize extents */ 1209 rc = _spdk_blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz); 1210 } 1211 1212 return rc; 1213 } 1214 1215 struct spdk_blob_load_ctx { 1216 struct spdk_blob *blob; 1217 1218 struct spdk_blob_md_page *pages; 1219 uint32_t num_pages; 1220 uint32_t next_extent_page; 1221 spdk_bs_sequence_t *seq; 1222 1223 spdk_bs_sequence_cpl cb_fn; 1224 void *cb_arg; 1225 }; 1226 1227 static uint32_t 1228 _spdk_blob_md_page_calc_crc(void *page) 1229 { 1230 uint32_t crc; 1231 1232 crc = BLOB_CRC32C_INITIAL; 1233 crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc); 1234 crc ^= BLOB_CRC32C_INITIAL; 1235 1236 return crc; 1237 1238 } 1239 1240 static void 1241 _spdk_blob_load_final(void *cb_arg, int bserrno) 1242 { 1243 struct spdk_blob_load_ctx *ctx = cb_arg; 1244 struct spdk_blob *blob = ctx->blob; 1245 1246 if (bserrno == 0) { 1247 _spdk_blob_mark_clean(blob); 1248 } 1249 1250 ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno); 1251 1252 /* Free the memory */ 1253 spdk_free(ctx->pages); 1254 free(ctx); 1255 } 1256 1257 static void 1258 _spdk_blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno) 1259 { 1260 struct spdk_blob_load_ctx *ctx = cb_arg; 1261 struct spdk_blob *blob = ctx->blob; 1262 1263 if (bserrno == 0) { 1264 blob->back_bs_dev = spdk_bs_create_blob_bs_dev(snapshot); 1265 if (blob->back_bs_dev == NULL) { 1266 bserrno = -ENOMEM; 1267 } 1268 } 1269 if (bserrno != 0) { 1270 SPDK_ERRLOG("Snapshot fail\n"); 1271 } 1272 1273 _spdk_blob_load_final(ctx, bserrno); 1274 } 1275 1276 static void _spdk_blob_update_clear_method(struct spdk_blob *blob); 1277 1278 static void 1279 _spdk_blob_load_backing_dev(void *cb_arg) 1280 { 1281 struct spdk_blob_load_ctx *ctx = cb_arg; 1282 struct spdk_blob *blob = ctx->blob; 1283 const void *value; 1284 size_t len; 1285 int rc; 1286 1287 if (spdk_blob_is_thin_provisioned(blob)) { 1288 rc = _spdk_blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true); 1289 if (rc == 0) { 1290 if (len != sizeof(spdk_blob_id)) { 1291 _spdk_blob_load_final(ctx, -EINVAL); 1292 return; 1293 } 1294 /* open snapshot blob and continue in the callback function */ 1295 blob->parent_id = *(spdk_blob_id *)value; 1296 spdk_bs_open_blob(blob->bs, blob->parent_id, 1297 _spdk_blob_load_snapshot_cpl, ctx); 1298 return; 1299 } else { 1300 /* add zeroes_dev for thin provisioned blob */ 1301 blob->back_bs_dev = spdk_bs_create_zeroes_dev(); 1302 } 1303 } else { 1304 /* standard blob */ 1305 blob->back_bs_dev = NULL; 1306 } 1307 _spdk_blob_load_final(ctx, 0); 1308 } 1309 1310 static void 1311 _spdk_blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1312 { 1313 struct spdk_blob_load_ctx *ctx = cb_arg; 1314 struct spdk_blob *blob = ctx->blob; 1315 struct spdk_blob_md_page *page; 1316 uint64_t i; 1317 uint32_t crc; 1318 uint64_t lba; 1319 void *tmp; 1320 uint64_t sz; 1321 1322 if (bserrno) { 1323 SPDK_ERRLOG("Extent page read failed: %d\n", bserrno); 1324 _spdk_blob_load_final(ctx, bserrno); 1325 return; 1326 } 1327 1328 if (ctx->pages == NULL) { 1329 /* First iteration of this function, allocate buffer for single EXTENT_PAGE */ 1330 ctx->pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE, NULL, SPDK_ENV_SOCKET_ID_ANY, 1331 SPDK_MALLOC_DMA); 1332 if (!ctx->pages) { 1333 _spdk_blob_load_final(ctx, -ENOMEM); 1334 return; 1335 } 1336 ctx->num_pages = 1; 1337 ctx->next_extent_page = 0; 1338 } else { 1339 page = &ctx->pages[0]; 1340 crc = _spdk_blob_md_page_calc_crc(page); 1341 if (crc != page->crc) { 1342 _spdk_blob_load_final(ctx, -EINVAL); 1343 return; 1344 } 1345 1346 if (page->next != SPDK_INVALID_MD_PAGE) { 1347 _spdk_blob_load_final(ctx, -EINVAL); 1348 return; 1349 } 1350 1351 bserrno = _spdk_blob_parse_extent_page(page, blob); 1352 if (bserrno) { 1353 _spdk_blob_load_final(ctx, bserrno); 1354 return; 1355 } 1356 } 1357 1358 for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) { 1359 if (blob->active.extent_pages[i] != 0) { 1360 /* Extent page was allocated, read and parse it. */ 1361 lba = _spdk_bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]); 1362 ctx->next_extent_page = i + 1; 1363 1364 spdk_bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1365 _spdk_bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 1366 _spdk_blob_load_cpl_extents_cpl, ctx); 1367 return; 1368 } else { 1369 /* Thin provisioned blobs can point to unallocated extent pages. 1370 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */ 1371 1372 sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP); 1373 blob->active.num_clusters += sz; 1374 blob->remaining_clusters_in_et -= sz; 1375 1376 assert(spdk_blob_is_thin_provisioned(blob)); 1377 assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0); 1378 1379 tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters)); 1380 if (tmp == NULL) { 1381 _spdk_blob_load_final(ctx, -ENOMEM); 1382 return; 1383 } 1384 memset(tmp + blob->active.cluster_array_size, 0, 1385 sizeof(*blob->active.clusters) * (blob->active.num_clusters - blob->active.cluster_array_size)); 1386 blob->active.clusters = tmp; 1387 blob->active.cluster_array_size = blob->active.num_clusters; 1388 } 1389 } 1390 1391 _spdk_blob_load_backing_dev(ctx); 1392 } 1393 1394 static void 1395 _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1396 { 1397 struct spdk_blob_load_ctx *ctx = cb_arg; 1398 struct spdk_blob *blob = ctx->blob; 1399 struct spdk_blob_md_page *page; 1400 int rc; 1401 uint32_t crc; 1402 1403 if (bserrno) { 1404 SPDK_ERRLOG("Metadata page read failed: %d\n", bserrno); 1405 _spdk_blob_load_final(ctx, bserrno); 1406 return; 1407 } 1408 1409 page = &ctx->pages[ctx->num_pages - 1]; 1410 crc = _spdk_blob_md_page_calc_crc(page); 1411 if (crc != page->crc) { 1412 SPDK_ERRLOG("Metadata page %d crc mismatch\n", ctx->num_pages); 1413 _spdk_blob_load_final(ctx, -EINVAL); 1414 return; 1415 } 1416 1417 if (page->next != SPDK_INVALID_MD_PAGE) { 1418 uint32_t next_page = page->next; 1419 uint64_t next_lba = _spdk_bs_md_page_to_lba(blob->bs, next_page); 1420 1421 /* Read the next page */ 1422 ctx->num_pages++; 1423 ctx->pages = spdk_realloc(ctx->pages, (sizeof(*page) * ctx->num_pages), 1424 sizeof(*page)); 1425 if (ctx->pages == NULL) { 1426 _spdk_blob_load_final(ctx, -ENOMEM); 1427 return; 1428 } 1429 1430 spdk_bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1], 1431 next_lba, 1432 _spdk_bs_byte_to_lba(blob->bs, sizeof(*page)), 1433 _spdk_blob_load_cpl, ctx); 1434 return; 1435 } 1436 1437 /* Parse the pages */ 1438 rc = _spdk_blob_parse(ctx->pages, ctx->num_pages, blob); 1439 if (rc) { 1440 _spdk_blob_load_final(ctx, rc); 1441 return; 1442 } 1443 1444 if (blob->extent_table_found == true) { 1445 /* If EXTENT_TABLE was found, that means support for it should be enabled. */ 1446 assert(blob->extent_rle_found == false); 1447 blob->use_extent_table = true; 1448 } else { 1449 /* If EXTENT_RLE or no extent_* descriptor was found disable support 1450 * for extent table. No extent_* descriptors means that blob has length of 0 1451 * and no extent_rle descriptors were persisted for it. 1452 * EXTENT_TABLE if used, is always present in metadata regardless of length. */ 1453 blob->use_extent_table = false; 1454 } 1455 1456 /* Check the clear_method stored in metadata vs what may have been passed 1457 * via spdk_bs_open_blob_ext() and update accordingly. 1458 */ 1459 _spdk_blob_update_clear_method(blob); 1460 1461 spdk_free(ctx->pages); 1462 ctx->pages = NULL; 1463 1464 if (blob->extent_table_found) { 1465 _spdk_blob_load_cpl_extents_cpl(seq, ctx, 0); 1466 } else { 1467 _spdk_blob_load_backing_dev(ctx); 1468 } 1469 } 1470 1471 /* Load a blob from disk given a blobid */ 1472 static void 1473 _spdk_blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 1474 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 1475 { 1476 struct spdk_blob_load_ctx *ctx; 1477 struct spdk_blob_store *bs; 1478 uint32_t page_num; 1479 uint64_t lba; 1480 1481 _spdk_blob_verify_md_op(blob); 1482 1483 bs = blob->bs; 1484 1485 ctx = calloc(1, sizeof(*ctx)); 1486 if (!ctx) { 1487 cb_fn(seq, cb_arg, -ENOMEM); 1488 return; 1489 } 1490 1491 ctx->blob = blob; 1492 ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE); 1493 if (!ctx->pages) { 1494 free(ctx); 1495 cb_fn(seq, cb_arg, -ENOMEM); 1496 return; 1497 } 1498 ctx->num_pages = 1; 1499 ctx->cb_fn = cb_fn; 1500 ctx->cb_arg = cb_arg; 1501 ctx->seq = seq; 1502 1503 page_num = _spdk_bs_blobid_to_page(blob->id); 1504 lba = _spdk_bs_md_page_to_lba(blob->bs, page_num); 1505 1506 blob->state = SPDK_BLOB_STATE_LOADING; 1507 1508 spdk_bs_sequence_read_dev(seq, &ctx->pages[0], lba, 1509 _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE), 1510 _spdk_blob_load_cpl, ctx); 1511 } 1512 1513 struct spdk_blob_persist_ctx { 1514 struct spdk_blob *blob; 1515 1516 struct spdk_bs_super_block *super; 1517 1518 struct spdk_blob_md_page *pages; 1519 uint32_t next_extent_page; 1520 struct spdk_blob_md_page *extent_page; 1521 1522 spdk_bs_sequence_t *seq; 1523 spdk_bs_sequence_cpl cb_fn; 1524 void *cb_arg; 1525 TAILQ_ENTRY(spdk_blob_persist_ctx) link; 1526 }; 1527 1528 static void 1529 spdk_bs_batch_clear_dev(struct spdk_blob_persist_ctx *ctx, spdk_bs_batch_t *batch, uint64_t lba, 1530 uint32_t lba_count) 1531 { 1532 switch (ctx->blob->clear_method) { 1533 case BLOB_CLEAR_WITH_DEFAULT: 1534 case BLOB_CLEAR_WITH_UNMAP: 1535 spdk_bs_batch_unmap_dev(batch, lba, lba_count); 1536 break; 1537 case BLOB_CLEAR_WITH_WRITE_ZEROES: 1538 spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count); 1539 break; 1540 case BLOB_CLEAR_WITH_NONE: 1541 default: 1542 break; 1543 } 1544 } 1545 1546 static void _spdk_blob_persist_check_dirty(struct spdk_blob_persist_ctx *ctx); 1547 1548 static void 1549 _spdk_blob_persist_complete(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1550 { 1551 struct spdk_blob_persist_ctx *ctx = cb_arg; 1552 struct spdk_blob_persist_ctx *next_persist; 1553 struct spdk_blob *blob = ctx->blob; 1554 1555 if (bserrno == 0) { 1556 _spdk_blob_mark_clean(blob); 1557 } 1558 1559 assert(ctx == TAILQ_FIRST(&blob->pending_persists)); 1560 TAILQ_REMOVE(&blob->pending_persists, ctx, link); 1561 1562 next_persist = TAILQ_FIRST(&blob->pending_persists); 1563 1564 /* Call user callback */ 1565 ctx->cb_fn(seq, ctx->cb_arg, bserrno); 1566 1567 /* Free the memory */ 1568 spdk_free(ctx->pages); 1569 free(ctx); 1570 1571 if (next_persist != NULL) { 1572 _spdk_blob_persist_check_dirty(next_persist); 1573 } 1574 } 1575 1576 static void 1577 _spdk_blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1578 { 1579 struct spdk_blob_persist_ctx *ctx = cb_arg; 1580 struct spdk_blob *blob = ctx->blob; 1581 struct spdk_blob_store *bs = blob->bs; 1582 size_t i; 1583 1584 if (bserrno != 0) { 1585 _spdk_blob_persist_complete(seq, ctx, bserrno); 1586 return; 1587 } 1588 1589 /* Release all clusters that were truncated */ 1590 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1591 uint32_t cluster_num = _spdk_bs_lba_to_cluster(bs, blob->active.clusters[i]); 1592 1593 /* Nothing to release if it was not allocated */ 1594 if (blob->active.clusters[i] != 0) { 1595 _spdk_bs_release_cluster(bs, cluster_num); 1596 } 1597 } 1598 1599 if (blob->active.num_clusters == 0) { 1600 free(blob->active.clusters); 1601 blob->active.clusters = NULL; 1602 blob->active.cluster_array_size = 0; 1603 } else if (blob->active.num_clusters != blob->active.cluster_array_size) { 1604 #ifndef __clang_analyzer__ 1605 void *tmp; 1606 1607 /* scan-build really can't figure reallocs, workaround it */ 1608 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters); 1609 assert(tmp != NULL); 1610 blob->active.clusters = tmp; 1611 1612 tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages); 1613 assert(tmp != NULL); 1614 blob->active.extent_pages = tmp; 1615 #endif 1616 blob->active.extent_pages_array_size = blob->active.num_extent_pages; 1617 blob->active.cluster_array_size = blob->active.num_clusters; 1618 } 1619 1620 /* TODO: Add path to persist clear extent pages. */ 1621 _spdk_blob_persist_complete(seq, ctx, bserrno); 1622 } 1623 1624 static void 1625 _spdk_blob_persist_clear_clusters(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1626 { 1627 struct spdk_blob_persist_ctx *ctx = cb_arg; 1628 struct spdk_blob *blob = ctx->blob; 1629 struct spdk_blob_store *bs = blob->bs; 1630 spdk_bs_batch_t *batch; 1631 size_t i; 1632 uint64_t lba; 1633 uint32_t lba_count; 1634 1635 if (bserrno != 0) { 1636 _spdk_blob_persist_complete(seq, ctx, bserrno); 1637 return; 1638 } 1639 1640 /* Clusters don't move around in blobs. The list shrinks or grows 1641 * at the end, but no changes ever occur in the middle of the list. 1642 */ 1643 1644 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_clear_clusters_cpl, ctx); 1645 1646 /* Clear all clusters that were truncated */ 1647 lba = 0; 1648 lba_count = 0; 1649 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 1650 uint64_t next_lba = blob->active.clusters[i]; 1651 uint32_t next_lba_count = _spdk_bs_cluster_to_lba(bs, 1); 1652 1653 if (next_lba > 0 && (lba + lba_count) == next_lba) { 1654 /* This cluster is contiguous with the previous one. */ 1655 lba_count += next_lba_count; 1656 continue; 1657 } 1658 1659 /* This cluster is not contiguous with the previous one. */ 1660 1661 /* If a run of LBAs previously existing, clear them now */ 1662 if (lba_count > 0) { 1663 spdk_bs_batch_clear_dev(ctx, batch, lba, lba_count); 1664 } 1665 1666 /* Start building the next batch */ 1667 lba = next_lba; 1668 if (next_lba > 0) { 1669 lba_count = next_lba_count; 1670 } else { 1671 lba_count = 0; 1672 } 1673 } 1674 1675 /* If we ended with a contiguous set of LBAs, clear them now */ 1676 if (lba_count > 0) { 1677 spdk_bs_batch_clear_dev(ctx, batch, lba, lba_count); 1678 } 1679 1680 spdk_bs_batch_close(batch); 1681 } 1682 1683 static void 1684 _spdk_blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1685 { 1686 struct spdk_blob_persist_ctx *ctx = cb_arg; 1687 struct spdk_blob *blob = ctx->blob; 1688 struct spdk_blob_store *bs = blob->bs; 1689 size_t i; 1690 1691 if (bserrno != 0) { 1692 _spdk_blob_persist_complete(seq, ctx, bserrno); 1693 return; 1694 } 1695 1696 /* This loop starts at 1 because the first page is special and handled 1697 * below. The pages (except the first) are never written in place, 1698 * so any pages in the clean list must be zeroed. 1699 */ 1700 for (i = 1; i < blob->clean.num_pages; i++) { 1701 _spdk_bs_release_md_page(bs, blob->clean.pages[i]); 1702 } 1703 1704 if (blob->active.num_pages == 0) { 1705 uint32_t page_num; 1706 1707 page_num = _spdk_bs_blobid_to_page(blob->id); 1708 _spdk_bs_release_md_page(bs, page_num); 1709 } 1710 1711 /* Move on to clearing clusters */ 1712 _spdk_blob_persist_clear_clusters(seq, ctx, 0); 1713 } 1714 1715 static void 1716 _spdk_blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1717 { 1718 struct spdk_blob_persist_ctx *ctx = cb_arg; 1719 struct spdk_blob *blob = ctx->blob; 1720 struct spdk_blob_store *bs = blob->bs; 1721 uint64_t lba; 1722 uint32_t lba_count; 1723 spdk_bs_batch_t *batch; 1724 size_t i; 1725 1726 if (bserrno != 0) { 1727 _spdk_blob_persist_complete(seq, ctx, bserrno); 1728 return; 1729 } 1730 1731 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_zero_pages_cpl, ctx); 1732 1733 lba_count = _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE); 1734 1735 /* This loop starts at 1 because the first page is special and handled 1736 * below. The pages (except the first) are never written in place, 1737 * so any pages in the clean list must be zeroed. 1738 */ 1739 for (i = 1; i < blob->clean.num_pages; i++) { 1740 lba = _spdk_bs_md_page_to_lba(bs, blob->clean.pages[i]); 1741 1742 spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count); 1743 } 1744 1745 /* The first page will only be zeroed if this is a delete. */ 1746 if (blob->active.num_pages == 0) { 1747 uint32_t page_num; 1748 1749 /* The first page in the metadata goes where the blobid indicates */ 1750 page_num = _spdk_bs_blobid_to_page(blob->id); 1751 lba = _spdk_bs_md_page_to_lba(bs, page_num); 1752 1753 spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count); 1754 } 1755 1756 spdk_bs_batch_close(batch); 1757 } 1758 1759 static void 1760 _spdk_blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1761 { 1762 struct spdk_blob_persist_ctx *ctx = cb_arg; 1763 struct spdk_blob *blob = ctx->blob; 1764 struct spdk_blob_store *bs = blob->bs; 1765 uint64_t lba; 1766 uint32_t lba_count; 1767 struct spdk_blob_md_page *page; 1768 1769 if (bserrno != 0) { 1770 _spdk_blob_persist_complete(seq, ctx, bserrno); 1771 return; 1772 } 1773 1774 if (blob->active.num_pages == 0) { 1775 /* Move on to the next step */ 1776 _spdk_blob_persist_zero_pages(seq, ctx, 0); 1777 return; 1778 } 1779 1780 lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page)); 1781 1782 page = &ctx->pages[0]; 1783 /* The first page in the metadata goes where the blobid indicates */ 1784 lba = _spdk_bs_md_page_to_lba(bs, _spdk_bs_blobid_to_page(blob->id)); 1785 1786 spdk_bs_sequence_write_dev(seq, page, lba, lba_count, 1787 _spdk_blob_persist_zero_pages, ctx); 1788 } 1789 1790 static void 1791 _spdk_blob_persist_write_page_chain(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1792 { 1793 struct spdk_blob_persist_ctx *ctx = cb_arg; 1794 struct spdk_blob *blob = ctx->blob; 1795 struct spdk_blob_store *bs = blob->bs; 1796 uint64_t lba; 1797 uint32_t lba_count; 1798 struct spdk_blob_md_page *page; 1799 spdk_bs_batch_t *batch; 1800 size_t i; 1801 1802 if (bserrno != 0) { 1803 _spdk_blob_persist_complete(seq, ctx, bserrno); 1804 return; 1805 } 1806 1807 /* Clusters don't move around in blobs. The list shrinks or grows 1808 * at the end, but no changes ever occur in the middle of the list. 1809 */ 1810 1811 lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page)); 1812 1813 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_write_page_root, ctx); 1814 1815 /* This starts at 1. The root page is not written until 1816 * all of the others are finished 1817 */ 1818 for (i = 1; i < blob->active.num_pages; i++) { 1819 page = &ctx->pages[i]; 1820 assert(page->sequence_num == i); 1821 1822 lba = _spdk_bs_md_page_to_lba(bs, blob->active.pages[i]); 1823 1824 spdk_bs_batch_write_dev(batch, page, lba, lba_count); 1825 } 1826 1827 spdk_bs_batch_close(batch); 1828 } 1829 1830 static int 1831 _spdk_blob_resize(struct spdk_blob *blob, uint64_t sz) 1832 { 1833 uint64_t i; 1834 uint64_t *tmp; 1835 uint64_t lfc; /* lowest free cluster */ 1836 uint32_t lfmd; /* lowest free md page */ 1837 uint64_t num_clusters; 1838 uint32_t *ep_tmp; 1839 uint64_t new_num_ep = 0, current_num_ep = 0; 1840 struct spdk_blob_store *bs; 1841 1842 bs = blob->bs; 1843 1844 _spdk_blob_verify_md_op(blob); 1845 1846 if (blob->active.num_clusters == sz) { 1847 return 0; 1848 } 1849 1850 if (blob->active.num_clusters < blob->active.cluster_array_size) { 1851 /* If this blob was resized to be larger, then smaller, then 1852 * larger without syncing, then the cluster array already 1853 * contains spare assigned clusters we can use. 1854 */ 1855 num_clusters = spdk_min(blob->active.cluster_array_size, 1856 sz); 1857 } else { 1858 num_clusters = blob->active.num_clusters; 1859 } 1860 1861 if (blob->use_extent_table) { 1862 /* Round up since every cluster beyond current Extent Table size, 1863 * requires new extent page. */ 1864 new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP); 1865 current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP); 1866 } 1867 1868 /* Do two passes - one to verify that we can obtain enough clusters 1869 * and md pages, another to actually claim them. 1870 */ 1871 1872 if (spdk_blob_is_thin_provisioned(blob) == false) { 1873 lfc = 0; 1874 for (i = num_clusters; i < sz; i++) { 1875 lfc = spdk_bit_array_find_first_clear(bs->used_clusters, lfc); 1876 if (lfc == UINT32_MAX) { 1877 /* No more free clusters. Cannot satisfy the request */ 1878 return -ENOSPC; 1879 } 1880 lfc++; 1881 } 1882 lfmd = 0; 1883 for (i = current_num_ep; i < new_num_ep ; i++) { 1884 lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd); 1885 if (lfmd == UINT32_MAX) { 1886 /* No more free md pages. Cannot satisfy the request */ 1887 return -ENOSPC; 1888 } 1889 } 1890 } 1891 1892 if (sz > num_clusters) { 1893 /* Expand the cluster array if necessary. 1894 * We only shrink the array when persisting. 1895 */ 1896 tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz); 1897 if (sz > 0 && tmp == NULL) { 1898 return -ENOMEM; 1899 } 1900 memset(tmp + blob->active.cluster_array_size, 0, 1901 sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size)); 1902 blob->active.clusters = tmp; 1903 blob->active.cluster_array_size = sz; 1904 1905 /* Expand the extents table, only if enough clusters were added */ 1906 if (new_num_ep > current_num_ep && blob->use_extent_table) { 1907 ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep); 1908 if (new_num_ep > 0 && ep_tmp == NULL) { 1909 return -ENOMEM; 1910 } 1911 memset(ep_tmp + blob->active.extent_pages_array_size, 0, 1912 sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size)); 1913 blob->active.extent_pages = ep_tmp; 1914 blob->active.extent_pages_array_size = new_num_ep; 1915 } 1916 } 1917 1918 blob->state = SPDK_BLOB_STATE_DIRTY; 1919 1920 if (spdk_blob_is_thin_provisioned(blob) == false) { 1921 lfc = 0; 1922 lfmd = 0; 1923 for (i = num_clusters; i < sz; i++) { 1924 _spdk_bs_allocate_cluster(blob, i, &lfc, &lfmd, true); 1925 lfc++; 1926 lfmd++; 1927 } 1928 } 1929 1930 blob->active.num_clusters = sz; 1931 blob->active.num_extent_pages = new_num_ep; 1932 1933 return 0; 1934 } 1935 1936 static void 1937 _spdk_blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx) 1938 { 1939 spdk_bs_sequence_t *seq = ctx->seq; 1940 struct spdk_blob *blob = ctx->blob; 1941 struct spdk_blob_store *bs = blob->bs; 1942 uint64_t i; 1943 uint32_t page_num; 1944 void *tmp; 1945 int rc; 1946 1947 /* Generate the new metadata */ 1948 rc = _spdk_blob_serialize(blob, &ctx->pages, &blob->active.num_pages); 1949 if (rc < 0) { 1950 _spdk_blob_persist_complete(seq, ctx, rc); 1951 return; 1952 } 1953 1954 assert(blob->active.num_pages >= 1); 1955 1956 /* Resize the cache of page indices */ 1957 tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages)); 1958 if (!tmp) { 1959 _spdk_blob_persist_complete(seq, ctx, -ENOMEM); 1960 return; 1961 } 1962 blob->active.pages = tmp; 1963 1964 /* Assign this metadata to pages. This requires two passes - 1965 * one to verify that there are enough pages and a second 1966 * to actually claim them. */ 1967 page_num = 0; 1968 /* Note that this loop starts at one. The first page location is fixed by the blobid. */ 1969 for (i = 1; i < blob->active.num_pages; i++) { 1970 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 1971 if (page_num == UINT32_MAX) { 1972 _spdk_blob_persist_complete(seq, ctx, -ENOMEM); 1973 return; 1974 } 1975 page_num++; 1976 } 1977 1978 page_num = 0; 1979 blob->active.pages[0] = _spdk_bs_blobid_to_page(blob->id); 1980 for (i = 1; i < blob->active.num_pages; i++) { 1981 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 1982 ctx->pages[i - 1].next = page_num; 1983 /* Now that previous metadata page is complete, calculate the crc for it. */ 1984 ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]); 1985 blob->active.pages[i] = page_num; 1986 _spdk_bs_claim_md_page(bs, page_num); 1987 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming page %u for blob %lu\n", page_num, blob->id); 1988 page_num++; 1989 } 1990 ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]); 1991 /* Start writing the metadata from last page to first */ 1992 blob->state = SPDK_BLOB_STATE_CLEAN; 1993 _spdk_blob_persist_write_page_chain(seq, ctx, 0); 1994 } 1995 1996 static void 1997 _spdk_blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1998 { 1999 struct spdk_blob_persist_ctx *ctx = cb_arg; 2000 struct spdk_blob *blob = ctx->blob; 2001 size_t i; 2002 uint32_t extent_page_id; 2003 uint32_t page_count = 0; 2004 int rc; 2005 2006 if (ctx->extent_page != NULL) { 2007 spdk_free(ctx->extent_page); 2008 ctx->extent_page = NULL; 2009 } 2010 2011 if (bserrno != 0) { 2012 _spdk_blob_persist_complete(seq, ctx, bserrno); 2013 return; 2014 } 2015 2016 /* Only write out changed extent pages */ 2017 for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) { 2018 extent_page_id = blob->active.extent_pages[i]; 2019 if (extent_page_id == 0) { 2020 /* No Extent Page to persist */ 2021 assert(spdk_blob_is_thin_provisioned(blob)); 2022 continue; 2023 } 2024 /* Writing out new extent page for the first time. Either active extent pages is larger 2025 * than clean extent pages or there was no extent page assigned due to thin provisioning. */ 2026 if (i >= blob->clean.extent_pages_array_size || blob->clean.extent_pages[i] == 0) { 2027 blob->state = SPDK_BLOB_STATE_DIRTY; 2028 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id)); 2029 ctx->next_extent_page = i + 1; 2030 rc = _spdk_blob_serialize_add_page(ctx->blob, &ctx->extent_page, &page_count, &ctx->extent_page); 2031 if (rc < 0) { 2032 _spdk_blob_persist_complete(seq, ctx, rc); 2033 return; 2034 } 2035 2036 _spdk_blob_serialize_extent_page(blob, i * SPDK_EXTENTS_PER_EP, ctx->extent_page); 2037 2038 ctx->extent_page->crc = _spdk_blob_md_page_calc_crc(ctx->extent_page); 2039 2040 spdk_bs_sequence_write_dev(seq, ctx->extent_page, _spdk_bs_md_page_to_lba(blob->bs, extent_page_id), 2041 _spdk_bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 2042 _spdk_blob_persist_write_extent_pages, ctx); 2043 return; 2044 } 2045 assert(blob->clean.extent_pages[i] != 0); 2046 } 2047 2048 _spdk_blob_persist_generate_new_md(ctx); 2049 } 2050 2051 static void 2052 _spdk_blob_persist_start(struct spdk_blob_persist_ctx *ctx) 2053 { 2054 spdk_bs_sequence_t *seq = ctx->seq; 2055 struct spdk_blob *blob = ctx->blob; 2056 2057 if (blob->active.num_pages == 0) { 2058 /* This is the signal that the blob should be deleted. 2059 * Immediately jump to the clean up routine. */ 2060 assert(blob->clean.num_pages > 0); 2061 blob->state = SPDK_BLOB_STATE_CLEAN; 2062 _spdk_blob_persist_zero_pages(seq, ctx, 0); 2063 return; 2064 2065 } 2066 2067 _spdk_blob_persist_write_extent_pages(seq, ctx, 0); 2068 } 2069 2070 static void 2071 _spdk_blob_persist_dirty_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2072 { 2073 struct spdk_blob_persist_ctx *ctx = cb_arg; 2074 2075 spdk_free(ctx->super); 2076 2077 if (bserrno != 0) { 2078 _spdk_blob_persist_complete(seq, ctx, bserrno); 2079 return; 2080 } 2081 2082 ctx->blob->bs->clean = 0; 2083 2084 _spdk_blob_persist_start(ctx); 2085 } 2086 2087 static void 2088 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 2089 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg); 2090 2091 2092 static void 2093 _spdk_blob_persist_dirty(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2094 { 2095 struct spdk_blob_persist_ctx *ctx = cb_arg; 2096 2097 if (bserrno != 0) { 2098 spdk_free(ctx->super); 2099 _spdk_blob_persist_complete(seq, ctx, bserrno); 2100 return; 2101 } 2102 2103 ctx->super->clean = 0; 2104 if (ctx->super->size == 0) { 2105 ctx->super->size = ctx->blob->bs->dev->blockcnt * ctx->blob->bs->dev->blocklen; 2106 } 2107 2108 _spdk_bs_write_super(seq, ctx->blob->bs, ctx->super, _spdk_blob_persist_dirty_cpl, ctx); 2109 } 2110 2111 static void 2112 _spdk_blob_persist_check_dirty(struct spdk_blob_persist_ctx *ctx) 2113 { 2114 if (ctx->blob->bs->clean) { 2115 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 2116 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 2117 if (!ctx->super) { 2118 _spdk_blob_persist_complete(ctx->seq, ctx, -ENOMEM); 2119 return; 2120 } 2121 2122 spdk_bs_sequence_read_dev(ctx->seq, ctx->super, _spdk_bs_page_to_lba(ctx->blob->bs, 0), 2123 _spdk_bs_byte_to_lba(ctx->blob->bs, sizeof(*ctx->super)), 2124 _spdk_blob_persist_dirty, ctx); 2125 } else { 2126 _spdk_blob_persist_start(ctx); 2127 } 2128 } 2129 2130 /* Write a blob to disk */ 2131 static void 2132 _spdk_blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 2133 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 2134 { 2135 struct spdk_blob_persist_ctx *ctx; 2136 2137 _spdk_blob_verify_md_op(blob); 2138 2139 if (blob->state == SPDK_BLOB_STATE_CLEAN && TAILQ_EMPTY(&blob->pending_persists)) { 2140 cb_fn(seq, cb_arg, 0); 2141 return; 2142 } 2143 2144 ctx = calloc(1, sizeof(*ctx)); 2145 if (!ctx) { 2146 cb_fn(seq, cb_arg, -ENOMEM); 2147 return; 2148 } 2149 ctx->blob = blob; 2150 ctx->seq = seq; 2151 ctx->cb_fn = cb_fn; 2152 ctx->cb_arg = cb_arg; 2153 ctx->next_extent_page = 0; 2154 2155 /* Multiple blob persists can affect one another, via blob->state or 2156 * blob mutable data changes. To prevent it, queue up the persists. */ 2157 if (!TAILQ_EMPTY(&blob->pending_persists)) { 2158 TAILQ_INSERT_TAIL(&blob->pending_persists, ctx, link); 2159 return; 2160 } 2161 TAILQ_INSERT_HEAD(&blob->pending_persists, ctx, link); 2162 2163 _spdk_blob_persist_check_dirty(ctx); 2164 } 2165 2166 struct spdk_blob_copy_cluster_ctx { 2167 struct spdk_blob *blob; 2168 uint8_t *buf; 2169 uint64_t page; 2170 uint64_t new_cluster; 2171 uint32_t new_extent_page; 2172 spdk_bs_sequence_t *seq; 2173 }; 2174 2175 static void 2176 _spdk_blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno) 2177 { 2178 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2179 struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq; 2180 TAILQ_HEAD(, spdk_bs_request_set) requests; 2181 spdk_bs_user_op_t *op; 2182 2183 TAILQ_INIT(&requests); 2184 TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link); 2185 2186 while (!TAILQ_EMPTY(&requests)) { 2187 op = TAILQ_FIRST(&requests); 2188 TAILQ_REMOVE(&requests, op, link); 2189 if (bserrno == 0) { 2190 spdk_bs_user_op_execute(op); 2191 } else { 2192 spdk_bs_user_op_abort(op); 2193 } 2194 } 2195 2196 spdk_free(ctx->buf); 2197 free(ctx); 2198 } 2199 2200 static void 2201 _spdk_blob_insert_cluster_cpl(void *cb_arg, int bserrno) 2202 { 2203 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2204 2205 if (bserrno) { 2206 if (bserrno == -EEXIST) { 2207 /* The metadata insert failed because another thread 2208 * allocated the cluster first. Free our cluster 2209 * but continue without error. */ 2210 bserrno = 0; 2211 } 2212 _spdk_bs_release_cluster(ctx->blob->bs, ctx->new_cluster); 2213 if (ctx->new_extent_page != 0) { 2214 _spdk_bs_release_md_page(ctx->blob->bs, ctx->new_extent_page); 2215 } 2216 } 2217 2218 spdk_bs_sequence_finish(ctx->seq, bserrno); 2219 } 2220 2221 static void 2222 _spdk_blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2223 { 2224 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2225 uint32_t cluster_number; 2226 2227 if (bserrno) { 2228 /* The write failed, so jump to the final completion handler */ 2229 spdk_bs_sequence_finish(seq, bserrno); 2230 return; 2231 } 2232 2233 cluster_number = _spdk_bs_page_to_cluster(ctx->blob->bs, ctx->page); 2234 2235 _spdk_blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2236 ctx->new_extent_page, _spdk_blob_insert_cluster_cpl, ctx); 2237 } 2238 2239 static void 2240 _spdk_blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2241 { 2242 struct spdk_blob_copy_cluster_ctx *ctx = cb_arg; 2243 2244 if (bserrno != 0) { 2245 /* The read failed, so jump to the final completion handler */ 2246 spdk_bs_sequence_finish(seq, bserrno); 2247 return; 2248 } 2249 2250 /* Write whole cluster */ 2251 spdk_bs_sequence_write_dev(seq, ctx->buf, 2252 _spdk_bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster), 2253 _spdk_bs_cluster_to_lba(ctx->blob->bs, 1), 2254 _spdk_blob_write_copy_cpl, ctx); 2255 } 2256 2257 static void 2258 _spdk_bs_allocate_and_copy_cluster(struct spdk_blob *blob, 2259 struct spdk_io_channel *_ch, 2260 uint64_t io_unit, spdk_bs_user_op_t *op) 2261 { 2262 struct spdk_bs_cpl cpl; 2263 struct spdk_bs_channel *ch; 2264 struct spdk_blob_copy_cluster_ctx *ctx; 2265 uint32_t cluster_start_page; 2266 uint32_t cluster_number; 2267 int rc; 2268 2269 ch = spdk_io_channel_get_ctx(_ch); 2270 2271 if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) { 2272 /* There are already operations pending. Queue this user op 2273 * and return because it will be re-executed when the outstanding 2274 * cluster allocation completes. */ 2275 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2276 return; 2277 } 2278 2279 /* Round the io_unit offset down to the first page in the cluster */ 2280 cluster_start_page = _spdk_bs_io_unit_to_cluster_start(blob, io_unit); 2281 2282 /* Calculate which index in the metadata cluster array the corresponding 2283 * cluster is supposed to be at. */ 2284 cluster_number = _spdk_bs_io_unit_to_cluster_number(blob, io_unit); 2285 2286 ctx = calloc(1, sizeof(*ctx)); 2287 if (!ctx) { 2288 spdk_bs_user_op_abort(op); 2289 return; 2290 } 2291 2292 assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0); 2293 2294 ctx->blob = blob; 2295 ctx->page = cluster_start_page; 2296 2297 if (blob->parent_id != SPDK_BLOBID_INVALID) { 2298 ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, 2299 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 2300 if (!ctx->buf) { 2301 SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n", 2302 blob->bs->cluster_sz); 2303 free(ctx); 2304 spdk_bs_user_op_abort(op); 2305 return; 2306 } 2307 } 2308 2309 rc = _spdk_bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page, 2310 false); 2311 if (rc != 0) { 2312 spdk_free(ctx->buf); 2313 free(ctx); 2314 spdk_bs_user_op_abort(op); 2315 return; 2316 } 2317 2318 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2319 cpl.u.blob_basic.cb_fn = _spdk_blob_allocate_and_copy_cluster_cpl; 2320 cpl.u.blob_basic.cb_arg = ctx; 2321 2322 ctx->seq = spdk_bs_sequence_start(_ch, &cpl); 2323 if (!ctx->seq) { 2324 _spdk_bs_release_cluster(blob->bs, ctx->new_cluster); 2325 spdk_free(ctx->buf); 2326 free(ctx); 2327 spdk_bs_user_op_abort(op); 2328 return; 2329 } 2330 2331 /* Queue the user op to block other incoming operations */ 2332 TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link); 2333 2334 if (blob->parent_id != SPDK_BLOBID_INVALID) { 2335 /* Read cluster from backing device */ 2336 spdk_bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf, 2337 _spdk_bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page), 2338 _spdk_bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz), 2339 _spdk_blob_write_copy, ctx); 2340 } else { 2341 _spdk_blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster, 2342 ctx->new_extent_page, _spdk_blob_insert_cluster_cpl, ctx); 2343 } 2344 } 2345 2346 static void 2347 _spdk_blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length, 2348 uint64_t *lba, uint32_t *lba_count) 2349 { 2350 *lba_count = length; 2351 2352 if (!_spdk_bs_io_unit_is_allocated(blob, io_unit)) { 2353 assert(blob->back_bs_dev != NULL); 2354 *lba = _spdk_bs_io_unit_to_back_dev_lba(blob, io_unit); 2355 *lba_count = _spdk_bs_io_unit_to_back_dev_lba(blob, *lba_count); 2356 } else { 2357 *lba = _spdk_bs_blob_io_unit_to_lba(blob, io_unit); 2358 } 2359 } 2360 2361 struct op_split_ctx { 2362 struct spdk_blob *blob; 2363 struct spdk_io_channel *channel; 2364 uint64_t io_unit_offset; 2365 uint64_t io_units_remaining; 2366 void *curr_payload; 2367 enum spdk_blob_op_type op_type; 2368 spdk_bs_sequence_t *seq; 2369 }; 2370 2371 static void 2372 _spdk_blob_request_submit_op_split_next(void *cb_arg, int bserrno) 2373 { 2374 struct op_split_ctx *ctx = cb_arg; 2375 struct spdk_blob *blob = ctx->blob; 2376 struct spdk_io_channel *ch = ctx->channel; 2377 enum spdk_blob_op_type op_type = ctx->op_type; 2378 uint8_t *buf = ctx->curr_payload; 2379 uint64_t offset = ctx->io_unit_offset; 2380 uint64_t length = ctx->io_units_remaining; 2381 uint64_t op_length; 2382 2383 if (bserrno != 0 || ctx->io_units_remaining == 0) { 2384 spdk_bs_sequence_finish(ctx->seq, bserrno); 2385 free(ctx); 2386 return; 2387 } 2388 2389 op_length = spdk_min(length, _spdk_bs_num_io_units_to_cluster_boundary(blob, 2390 offset)); 2391 2392 /* Update length and payload for next operation */ 2393 ctx->io_units_remaining -= op_length; 2394 ctx->io_unit_offset += op_length; 2395 if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) { 2396 ctx->curr_payload += op_length * blob->bs->io_unit_size; 2397 } 2398 2399 switch (op_type) { 2400 case SPDK_BLOB_READ: 2401 spdk_blob_io_read(blob, ch, buf, offset, op_length, 2402 _spdk_blob_request_submit_op_split_next, ctx); 2403 break; 2404 case SPDK_BLOB_WRITE: 2405 spdk_blob_io_write(blob, ch, buf, offset, op_length, 2406 _spdk_blob_request_submit_op_split_next, ctx); 2407 break; 2408 case SPDK_BLOB_UNMAP: 2409 spdk_blob_io_unmap(blob, ch, offset, op_length, 2410 _spdk_blob_request_submit_op_split_next, ctx); 2411 break; 2412 case SPDK_BLOB_WRITE_ZEROES: 2413 spdk_blob_io_write_zeroes(blob, ch, offset, op_length, 2414 _spdk_blob_request_submit_op_split_next, ctx); 2415 break; 2416 case SPDK_BLOB_READV: 2417 case SPDK_BLOB_WRITEV: 2418 SPDK_ERRLOG("readv/write not valid\n"); 2419 spdk_bs_sequence_finish(ctx->seq, -EINVAL); 2420 free(ctx); 2421 break; 2422 } 2423 } 2424 2425 static void 2426 _spdk_blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob, 2427 void *payload, uint64_t offset, uint64_t length, 2428 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 2429 { 2430 struct op_split_ctx *ctx; 2431 spdk_bs_sequence_t *seq; 2432 struct spdk_bs_cpl cpl; 2433 2434 assert(blob != NULL); 2435 2436 ctx = calloc(1, sizeof(struct op_split_ctx)); 2437 if (ctx == NULL) { 2438 cb_fn(cb_arg, -ENOMEM); 2439 return; 2440 } 2441 2442 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2443 cpl.u.blob_basic.cb_fn = cb_fn; 2444 cpl.u.blob_basic.cb_arg = cb_arg; 2445 2446 seq = spdk_bs_sequence_start(ch, &cpl); 2447 if (!seq) { 2448 free(ctx); 2449 cb_fn(cb_arg, -ENOMEM); 2450 return; 2451 } 2452 2453 ctx->blob = blob; 2454 ctx->channel = ch; 2455 ctx->curr_payload = payload; 2456 ctx->io_unit_offset = offset; 2457 ctx->io_units_remaining = length; 2458 ctx->op_type = op_type; 2459 ctx->seq = seq; 2460 2461 _spdk_blob_request_submit_op_split_next(ctx, 0); 2462 } 2463 2464 static void 2465 _spdk_blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob, 2466 void *payload, uint64_t offset, uint64_t length, 2467 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 2468 { 2469 struct spdk_bs_cpl cpl; 2470 uint64_t lba; 2471 uint32_t lba_count; 2472 2473 assert(blob != NULL); 2474 2475 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2476 cpl.u.blob_basic.cb_fn = cb_fn; 2477 cpl.u.blob_basic.cb_arg = cb_arg; 2478 2479 _spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 2480 2481 if (blob->frozen_refcnt) { 2482 /* This blob I/O is frozen */ 2483 spdk_bs_user_op_t *op; 2484 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch); 2485 2486 op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 2487 if (!op) { 2488 cb_fn(cb_arg, -ENOMEM); 2489 return; 2490 } 2491 2492 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 2493 2494 return; 2495 } 2496 2497 switch (op_type) { 2498 case SPDK_BLOB_READ: { 2499 spdk_bs_batch_t *batch; 2500 2501 batch = spdk_bs_batch_open(_ch, &cpl); 2502 if (!batch) { 2503 cb_fn(cb_arg, -ENOMEM); 2504 return; 2505 } 2506 2507 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 2508 /* Read from the blob */ 2509 spdk_bs_batch_read_dev(batch, payload, lba, lba_count); 2510 } else { 2511 /* Read from the backing block device */ 2512 spdk_bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count); 2513 } 2514 2515 spdk_bs_batch_close(batch); 2516 break; 2517 } 2518 case SPDK_BLOB_WRITE: 2519 case SPDK_BLOB_WRITE_ZEROES: { 2520 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 2521 /* Write to the blob */ 2522 spdk_bs_batch_t *batch; 2523 2524 if (lba_count == 0) { 2525 cb_fn(cb_arg, 0); 2526 return; 2527 } 2528 2529 batch = spdk_bs_batch_open(_ch, &cpl); 2530 if (!batch) { 2531 cb_fn(cb_arg, -ENOMEM); 2532 return; 2533 } 2534 2535 if (op_type == SPDK_BLOB_WRITE) { 2536 spdk_bs_batch_write_dev(batch, payload, lba, lba_count); 2537 } else { 2538 spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count); 2539 } 2540 2541 spdk_bs_batch_close(batch); 2542 } else { 2543 /* Queue this operation and allocate the cluster */ 2544 spdk_bs_user_op_t *op; 2545 2546 op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length); 2547 if (!op) { 2548 cb_fn(cb_arg, -ENOMEM); 2549 return; 2550 } 2551 2552 _spdk_bs_allocate_and_copy_cluster(blob, _ch, offset, op); 2553 } 2554 break; 2555 } 2556 case SPDK_BLOB_UNMAP: { 2557 spdk_bs_batch_t *batch; 2558 2559 batch = spdk_bs_batch_open(_ch, &cpl); 2560 if (!batch) { 2561 cb_fn(cb_arg, -ENOMEM); 2562 return; 2563 } 2564 2565 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 2566 spdk_bs_batch_unmap_dev(batch, lba, lba_count); 2567 } 2568 2569 spdk_bs_batch_close(batch); 2570 break; 2571 } 2572 case SPDK_BLOB_READV: 2573 case SPDK_BLOB_WRITEV: 2574 SPDK_ERRLOG("readv/write not valid\n"); 2575 cb_fn(cb_arg, -EINVAL); 2576 break; 2577 } 2578 } 2579 2580 static void 2581 _spdk_blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel, 2582 void *payload, uint64_t offset, uint64_t length, 2583 spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type) 2584 { 2585 assert(blob != NULL); 2586 2587 if (blob->data_ro && op_type != SPDK_BLOB_READ) { 2588 cb_fn(cb_arg, -EPERM); 2589 return; 2590 } 2591 2592 if (offset + length > _spdk_bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 2593 cb_fn(cb_arg, -EINVAL); 2594 return; 2595 } 2596 if (length <= _spdk_bs_num_io_units_to_cluster_boundary(blob, offset)) { 2597 _spdk_blob_request_submit_op_single(_channel, blob, payload, offset, length, 2598 cb_fn, cb_arg, op_type); 2599 } else { 2600 _spdk_blob_request_submit_op_split(_channel, blob, payload, offset, length, 2601 cb_fn, cb_arg, op_type); 2602 } 2603 } 2604 2605 struct rw_iov_ctx { 2606 struct spdk_blob *blob; 2607 struct spdk_io_channel *channel; 2608 spdk_blob_op_complete cb_fn; 2609 void *cb_arg; 2610 bool read; 2611 int iovcnt; 2612 struct iovec *orig_iov; 2613 uint64_t io_unit_offset; 2614 uint64_t io_units_remaining; 2615 uint64_t io_units_done; 2616 struct iovec iov[0]; 2617 }; 2618 2619 static void 2620 _spdk_rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2621 { 2622 assert(cb_arg == NULL); 2623 spdk_bs_sequence_finish(seq, bserrno); 2624 } 2625 2626 static void 2627 _spdk_rw_iov_split_next(void *cb_arg, int bserrno) 2628 { 2629 struct rw_iov_ctx *ctx = cb_arg; 2630 struct spdk_blob *blob = ctx->blob; 2631 struct iovec *iov, *orig_iov; 2632 int iovcnt; 2633 size_t orig_iovoff; 2634 uint64_t io_units_count, io_units_to_boundary, io_unit_offset; 2635 uint64_t byte_count; 2636 2637 if (bserrno != 0 || ctx->io_units_remaining == 0) { 2638 ctx->cb_fn(ctx->cb_arg, bserrno); 2639 free(ctx); 2640 return; 2641 } 2642 2643 io_unit_offset = ctx->io_unit_offset; 2644 io_units_to_boundary = _spdk_bs_num_io_units_to_cluster_boundary(blob, io_unit_offset); 2645 io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary); 2646 /* 2647 * Get index and offset into the original iov array for our current position in the I/O sequence. 2648 * byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will 2649 * point to the current position in the I/O sequence. 2650 */ 2651 byte_count = ctx->io_units_done * blob->bs->io_unit_size; 2652 orig_iov = &ctx->orig_iov[0]; 2653 orig_iovoff = 0; 2654 while (byte_count > 0) { 2655 if (byte_count >= orig_iov->iov_len) { 2656 byte_count -= orig_iov->iov_len; 2657 orig_iov++; 2658 } else { 2659 orig_iovoff = byte_count; 2660 byte_count = 0; 2661 } 2662 } 2663 2664 /* 2665 * Build an iov array for the next I/O in the sequence. byte_count will keep track of how many 2666 * bytes of this next I/O remain to be accounted for in the new iov array. 2667 */ 2668 byte_count = io_units_count * blob->bs->io_unit_size; 2669 iov = &ctx->iov[0]; 2670 iovcnt = 0; 2671 while (byte_count > 0) { 2672 assert(iovcnt < ctx->iovcnt); 2673 iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff); 2674 iov->iov_base = orig_iov->iov_base + orig_iovoff; 2675 byte_count -= iov->iov_len; 2676 orig_iovoff = 0; 2677 orig_iov++; 2678 iov++; 2679 iovcnt++; 2680 } 2681 2682 ctx->io_unit_offset += io_units_count; 2683 ctx->io_units_remaining -= io_units_count; 2684 ctx->io_units_done += io_units_count; 2685 iov = &ctx->iov[0]; 2686 2687 if (ctx->read) { 2688 spdk_blob_io_readv(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 2689 io_units_count, _spdk_rw_iov_split_next, ctx); 2690 } else { 2691 spdk_blob_io_writev(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset, 2692 io_units_count, _spdk_rw_iov_split_next, ctx); 2693 } 2694 } 2695 2696 static void 2697 _spdk_blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel, 2698 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 2699 spdk_blob_op_complete cb_fn, void *cb_arg, bool read) 2700 { 2701 struct spdk_bs_cpl cpl; 2702 2703 assert(blob != NULL); 2704 2705 if (!read && blob->data_ro) { 2706 cb_fn(cb_arg, -EPERM); 2707 return; 2708 } 2709 2710 if (length == 0) { 2711 cb_fn(cb_arg, 0); 2712 return; 2713 } 2714 2715 if (offset + length > _spdk_bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) { 2716 cb_fn(cb_arg, -EINVAL); 2717 return; 2718 } 2719 2720 /* 2721 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having 2722 * to split a request that spans a cluster boundary. For I/O that do not span a cluster boundary, 2723 * there will be no noticeable difference compared to using a batch. For I/O that do span a cluster 2724 * boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need 2725 * to allocate a separate iov array and split the I/O such that none of the resulting 2726 * smaller I/O cross a cluster boundary. These smaller I/O will be issued in sequence (not in parallel) 2727 * but since this case happens very infrequently, any performance impact will be negligible. 2728 * 2729 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs 2730 * for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them 2731 * in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called 2732 * when the batch was completed, to allow for freeing the memory for the iov arrays. 2733 */ 2734 if (spdk_likely(length <= _spdk_bs_num_io_units_to_cluster_boundary(blob, offset))) { 2735 uint32_t lba_count; 2736 uint64_t lba; 2737 2738 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2739 cpl.u.blob_basic.cb_fn = cb_fn; 2740 cpl.u.blob_basic.cb_arg = cb_arg; 2741 2742 if (blob->frozen_refcnt) { 2743 /* This blob I/O is frozen */ 2744 enum spdk_blob_op_type op_type; 2745 spdk_bs_user_op_t *op; 2746 struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel); 2747 2748 op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV; 2749 op = spdk_bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length); 2750 if (!op) { 2751 cb_fn(cb_arg, -ENOMEM); 2752 return; 2753 } 2754 2755 TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link); 2756 2757 return; 2758 } 2759 2760 _spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count); 2761 2762 if (read) { 2763 spdk_bs_sequence_t *seq; 2764 2765 seq = spdk_bs_sequence_start(_channel, &cpl); 2766 if (!seq) { 2767 cb_fn(cb_arg, -ENOMEM); 2768 return; 2769 } 2770 2771 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 2772 spdk_bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL); 2773 } else { 2774 spdk_bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count, 2775 _spdk_rw_iov_done, NULL); 2776 } 2777 } else { 2778 if (_spdk_bs_io_unit_is_allocated(blob, offset)) { 2779 spdk_bs_sequence_t *seq; 2780 2781 seq = spdk_bs_sequence_start(_channel, &cpl); 2782 if (!seq) { 2783 cb_fn(cb_arg, -ENOMEM); 2784 return; 2785 } 2786 2787 spdk_bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL); 2788 } else { 2789 /* Queue this operation and allocate the cluster */ 2790 spdk_bs_user_op_t *op; 2791 2792 op = spdk_bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, 2793 length); 2794 if (!op) { 2795 cb_fn(cb_arg, -ENOMEM); 2796 return; 2797 } 2798 2799 _spdk_bs_allocate_and_copy_cluster(blob, _channel, offset, op); 2800 } 2801 } 2802 } else { 2803 struct rw_iov_ctx *ctx; 2804 2805 ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec)); 2806 if (ctx == NULL) { 2807 cb_fn(cb_arg, -ENOMEM); 2808 return; 2809 } 2810 2811 ctx->blob = blob; 2812 ctx->channel = _channel; 2813 ctx->cb_fn = cb_fn; 2814 ctx->cb_arg = cb_arg; 2815 ctx->read = read; 2816 ctx->orig_iov = iov; 2817 ctx->iovcnt = iovcnt; 2818 ctx->io_unit_offset = offset; 2819 ctx->io_units_remaining = length; 2820 ctx->io_units_done = 0; 2821 2822 _spdk_rw_iov_split_next(ctx, 0); 2823 } 2824 } 2825 2826 static struct spdk_blob * 2827 _spdk_blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid) 2828 { 2829 struct spdk_blob *blob; 2830 2831 TAILQ_FOREACH(blob, &bs->blobs, link) { 2832 if (blob->id == blobid) { 2833 return blob; 2834 } 2835 } 2836 2837 return NULL; 2838 } 2839 2840 static void 2841 _spdk_blob_get_snapshot_and_clone_entries(struct spdk_blob *blob, 2842 struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry) 2843 { 2844 assert(blob != NULL); 2845 *snapshot_entry = NULL; 2846 *clone_entry = NULL; 2847 2848 if (blob->parent_id == SPDK_BLOBID_INVALID) { 2849 return; 2850 } 2851 2852 TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) { 2853 if ((*snapshot_entry)->id == blob->parent_id) { 2854 break; 2855 } 2856 } 2857 2858 if (*snapshot_entry != NULL) { 2859 TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) { 2860 if ((*clone_entry)->id == blob->id) { 2861 break; 2862 } 2863 } 2864 2865 assert(clone_entry != NULL); 2866 } 2867 } 2868 2869 static int 2870 _spdk_bs_channel_create(void *io_device, void *ctx_buf) 2871 { 2872 struct spdk_blob_store *bs = io_device; 2873 struct spdk_bs_channel *channel = ctx_buf; 2874 struct spdk_bs_dev *dev; 2875 uint32_t max_ops = bs->max_channel_ops; 2876 uint32_t i; 2877 2878 dev = bs->dev; 2879 2880 channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set)); 2881 if (!channel->req_mem) { 2882 return -1; 2883 } 2884 2885 TAILQ_INIT(&channel->reqs); 2886 2887 for (i = 0; i < max_ops; i++) { 2888 TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link); 2889 } 2890 2891 channel->bs = bs; 2892 channel->dev = dev; 2893 channel->dev_channel = dev->create_channel(dev); 2894 2895 if (!channel->dev_channel) { 2896 SPDK_ERRLOG("Failed to create device channel.\n"); 2897 free(channel->req_mem); 2898 return -1; 2899 } 2900 2901 TAILQ_INIT(&channel->need_cluster_alloc); 2902 TAILQ_INIT(&channel->queued_io); 2903 2904 return 0; 2905 } 2906 2907 static void 2908 _spdk_bs_channel_destroy(void *io_device, void *ctx_buf) 2909 { 2910 struct spdk_bs_channel *channel = ctx_buf; 2911 spdk_bs_user_op_t *op; 2912 2913 while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) { 2914 op = TAILQ_FIRST(&channel->need_cluster_alloc); 2915 TAILQ_REMOVE(&channel->need_cluster_alloc, op, link); 2916 spdk_bs_user_op_abort(op); 2917 } 2918 2919 while (!TAILQ_EMPTY(&channel->queued_io)) { 2920 op = TAILQ_FIRST(&channel->queued_io); 2921 TAILQ_REMOVE(&channel->queued_io, op, link); 2922 spdk_bs_user_op_abort(op); 2923 } 2924 2925 free(channel->req_mem); 2926 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 2927 } 2928 2929 static void 2930 _spdk_bs_dev_destroy(void *io_device) 2931 { 2932 struct spdk_blob_store *bs = io_device; 2933 struct spdk_blob *blob, *blob_tmp; 2934 2935 bs->dev->destroy(bs->dev); 2936 2937 TAILQ_FOREACH_SAFE(blob, &bs->blobs, link, blob_tmp) { 2938 TAILQ_REMOVE(&bs->blobs, blob, link); 2939 _spdk_blob_free(blob); 2940 } 2941 2942 pthread_mutex_destroy(&bs->used_clusters_mutex); 2943 2944 spdk_bit_array_free(&bs->used_blobids); 2945 spdk_bit_array_free(&bs->used_md_pages); 2946 spdk_bit_array_free(&bs->used_clusters); 2947 /* 2948 * If this function is called for any reason except a successful unload, 2949 * the unload_cpl type will be NONE and this will be a nop. 2950 */ 2951 spdk_bs_call_cpl(&bs->unload_cpl, bs->unload_err); 2952 2953 free(bs); 2954 } 2955 2956 static int 2957 _spdk_bs_blob_list_add(struct spdk_blob *blob) 2958 { 2959 spdk_blob_id snapshot_id; 2960 struct spdk_blob_list *snapshot_entry = NULL; 2961 struct spdk_blob_list *clone_entry = NULL; 2962 2963 assert(blob != NULL); 2964 2965 snapshot_id = blob->parent_id; 2966 if (snapshot_id == SPDK_BLOBID_INVALID) { 2967 return 0; 2968 } 2969 2970 snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, snapshot_id); 2971 if (snapshot_entry == NULL) { 2972 /* Snapshot not found */ 2973 snapshot_entry = calloc(1, sizeof(struct spdk_blob_list)); 2974 if (snapshot_entry == NULL) { 2975 return -ENOMEM; 2976 } 2977 snapshot_entry->id = snapshot_id; 2978 TAILQ_INIT(&snapshot_entry->clones); 2979 TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link); 2980 } else { 2981 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 2982 if (clone_entry->id == blob->id) { 2983 break; 2984 } 2985 } 2986 } 2987 2988 if (clone_entry == NULL) { 2989 /* Clone not found */ 2990 clone_entry = calloc(1, sizeof(struct spdk_blob_list)); 2991 if (clone_entry == NULL) { 2992 return -ENOMEM; 2993 } 2994 clone_entry->id = blob->id; 2995 TAILQ_INIT(&clone_entry->clones); 2996 TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link); 2997 snapshot_entry->clone_count++; 2998 } 2999 3000 return 0; 3001 } 3002 3003 static void 3004 _spdk_bs_blob_list_remove(struct spdk_blob *blob) 3005 { 3006 struct spdk_blob_list *snapshot_entry = NULL; 3007 struct spdk_blob_list *clone_entry = NULL; 3008 3009 _spdk_blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry); 3010 3011 if (snapshot_entry == NULL) { 3012 return; 3013 } 3014 3015 blob->parent_id = SPDK_BLOBID_INVALID; 3016 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3017 free(clone_entry); 3018 3019 snapshot_entry->clone_count--; 3020 } 3021 3022 static int 3023 _spdk_bs_blob_list_free(struct spdk_blob_store *bs) 3024 { 3025 struct spdk_blob_list *snapshot_entry; 3026 struct spdk_blob_list *snapshot_entry_tmp; 3027 struct spdk_blob_list *clone_entry; 3028 struct spdk_blob_list *clone_entry_tmp; 3029 3030 TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) { 3031 TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) { 3032 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 3033 free(clone_entry); 3034 } 3035 TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link); 3036 free(snapshot_entry); 3037 } 3038 3039 return 0; 3040 } 3041 3042 static void 3043 _spdk_bs_free(struct spdk_blob_store *bs) 3044 { 3045 _spdk_bs_blob_list_free(bs); 3046 3047 spdk_bs_unregister_md_thread(bs); 3048 spdk_io_device_unregister(bs, _spdk_bs_dev_destroy); 3049 } 3050 3051 void 3052 spdk_bs_opts_init(struct spdk_bs_opts *opts) 3053 { 3054 opts->cluster_sz = SPDK_BLOB_OPTS_CLUSTER_SZ; 3055 opts->num_md_pages = SPDK_BLOB_OPTS_NUM_MD_PAGES; 3056 opts->max_md_ops = SPDK_BLOB_OPTS_MAX_MD_OPS; 3057 opts->max_channel_ops = SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS; 3058 opts->clear_method = BS_CLEAR_WITH_UNMAP; 3059 memset(&opts->bstype, 0, sizeof(opts->bstype)); 3060 opts->iter_cb_fn = NULL; 3061 opts->iter_cb_arg = NULL; 3062 } 3063 3064 static int 3065 _spdk_bs_opts_verify(struct spdk_bs_opts *opts) 3066 { 3067 if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 || 3068 opts->max_channel_ops == 0) { 3069 SPDK_ERRLOG("Blobstore options cannot be set to 0\n"); 3070 return -1; 3071 } 3072 3073 return 0; 3074 } 3075 3076 static int 3077 _spdk_bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs) 3078 { 3079 struct spdk_blob_store *bs; 3080 uint64_t dev_size; 3081 int rc; 3082 3083 dev_size = dev->blocklen * dev->blockcnt; 3084 if (dev_size < opts->cluster_sz) { 3085 /* Device size cannot be smaller than cluster size of blobstore */ 3086 SPDK_INFOLOG(SPDK_LOG_BLOB, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n", 3087 dev_size, opts->cluster_sz); 3088 return -ENOSPC; 3089 } 3090 if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) { 3091 /* Cluster size cannot be smaller than page size */ 3092 SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n", 3093 opts->cluster_sz, SPDK_BS_PAGE_SIZE); 3094 return -EINVAL; 3095 } 3096 bs = calloc(1, sizeof(struct spdk_blob_store)); 3097 if (!bs) { 3098 return -ENOMEM; 3099 } 3100 3101 TAILQ_INIT(&bs->blobs); 3102 TAILQ_INIT(&bs->snapshots); 3103 bs->dev = dev; 3104 bs->md_thread = spdk_get_thread(); 3105 assert(bs->md_thread != NULL); 3106 3107 /* 3108 * Do not use _spdk_bs_lba_to_cluster() here since blockcnt may not be an 3109 * even multiple of the cluster size. 3110 */ 3111 bs->cluster_sz = opts->cluster_sz; 3112 bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen); 3113 bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE; 3114 bs->num_free_clusters = bs->total_clusters; 3115 bs->used_clusters = spdk_bit_array_create(bs->total_clusters); 3116 bs->io_unit_size = dev->blocklen; 3117 if (bs->used_clusters == NULL) { 3118 free(bs); 3119 return -ENOMEM; 3120 } 3121 3122 bs->max_channel_ops = opts->max_channel_ops; 3123 bs->super_blob = SPDK_BLOBID_INVALID; 3124 memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype)); 3125 3126 /* The metadata is assumed to be at least 1 page */ 3127 bs->used_md_pages = spdk_bit_array_create(1); 3128 bs->used_blobids = spdk_bit_array_create(0); 3129 3130 pthread_mutex_init(&bs->used_clusters_mutex, NULL); 3131 3132 spdk_io_device_register(bs, _spdk_bs_channel_create, _spdk_bs_channel_destroy, 3133 sizeof(struct spdk_bs_channel), "blobstore"); 3134 rc = spdk_bs_register_md_thread(bs); 3135 if (rc == -1) { 3136 spdk_io_device_unregister(bs, NULL); 3137 pthread_mutex_destroy(&bs->used_clusters_mutex); 3138 spdk_bit_array_free(&bs->used_blobids); 3139 spdk_bit_array_free(&bs->used_md_pages); 3140 spdk_bit_array_free(&bs->used_clusters); 3141 free(bs); 3142 /* FIXME: this is a lie but don't know how to get a proper error code here */ 3143 return -ENOMEM; 3144 } 3145 3146 *_bs = bs; 3147 return 0; 3148 } 3149 3150 /* START spdk_bs_load, spdk_bs_load_ctx will used for both load and unload. */ 3151 3152 struct spdk_bs_load_ctx { 3153 struct spdk_blob_store *bs; 3154 struct spdk_bs_super_block *super; 3155 3156 struct spdk_bs_md_mask *mask; 3157 bool in_page_chain; 3158 uint32_t page_index; 3159 uint32_t cur_page; 3160 struct spdk_blob_md_page *page; 3161 3162 uint64_t num_extent_pages; 3163 uint32_t *extent_pages; 3164 3165 spdk_bs_sequence_t *seq; 3166 spdk_blob_op_with_handle_complete iter_cb_fn; 3167 void *iter_cb_arg; 3168 struct spdk_blob *blob; 3169 spdk_blob_id blobid; 3170 }; 3171 3172 static void 3173 _spdk_bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno) 3174 { 3175 assert(bserrno != 0); 3176 3177 spdk_free(ctx->super); 3178 spdk_bs_sequence_finish(ctx->seq, bserrno); 3179 _spdk_bs_free(ctx->bs); 3180 free(ctx); 3181 } 3182 3183 static void 3184 _spdk_bs_set_mask(struct spdk_bit_array *array, struct spdk_bs_md_mask *mask) 3185 { 3186 uint32_t i = 0; 3187 3188 while (true) { 3189 i = spdk_bit_array_find_first_set(array, i); 3190 if (i >= mask->length) { 3191 break; 3192 } 3193 mask->mask[i / 8] |= 1U << (i % 8); 3194 i++; 3195 } 3196 } 3197 3198 static int 3199 _spdk_bs_load_mask(struct spdk_bit_array **array_ptr, struct spdk_bs_md_mask *mask) 3200 { 3201 struct spdk_bit_array *array; 3202 uint32_t i; 3203 3204 if (spdk_bit_array_resize(array_ptr, mask->length) < 0) { 3205 return -ENOMEM; 3206 } 3207 3208 array = *array_ptr; 3209 for (i = 0; i < mask->length; i++) { 3210 if (mask->mask[i / 8] & (1U << (i % 8))) { 3211 spdk_bit_array_set(array, i); 3212 } 3213 } 3214 3215 return 0; 3216 } 3217 3218 static void 3219 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs, 3220 struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg) 3221 { 3222 /* Update the values in the super block */ 3223 super->super_blob = bs->super_blob; 3224 memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype)); 3225 super->crc = _spdk_blob_md_page_calc_crc(super); 3226 spdk_bs_sequence_write_dev(seq, super, _spdk_bs_page_to_lba(bs, 0), 3227 _spdk_bs_byte_to_lba(bs, sizeof(*super)), 3228 cb_fn, cb_arg); 3229 } 3230 3231 static void 3232 _spdk_bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3233 { 3234 struct spdk_bs_load_ctx *ctx = arg; 3235 uint64_t mask_size, lba, lba_count; 3236 3237 /* Write out the used clusters mask */ 3238 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 3239 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3240 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3241 if (!ctx->mask) { 3242 _spdk_bs_load_ctx_fail(ctx, -ENOMEM); 3243 return; 3244 } 3245 3246 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS; 3247 ctx->mask->length = ctx->bs->total_clusters; 3248 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_clusters)); 3249 3250 _spdk_bs_set_mask(ctx->bs->used_clusters, ctx->mask); 3251 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 3252 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 3253 spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3254 } 3255 3256 static void 3257 _spdk_bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3258 { 3259 struct spdk_bs_load_ctx *ctx = arg; 3260 uint64_t mask_size, lba, lba_count; 3261 3262 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 3263 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3264 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3265 if (!ctx->mask) { 3266 _spdk_bs_load_ctx_fail(ctx, -ENOMEM); 3267 return; 3268 } 3269 3270 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES; 3271 ctx->mask->length = ctx->super->md_len; 3272 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages)); 3273 3274 _spdk_bs_set_mask(ctx->bs->used_md_pages, ctx->mask); 3275 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 3276 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 3277 spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3278 } 3279 3280 static void 3281 _spdk_bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn) 3282 { 3283 struct spdk_bs_load_ctx *ctx = arg; 3284 uint64_t mask_size, lba, lba_count; 3285 3286 if (ctx->super->used_blobid_mask_len == 0) { 3287 /* 3288 * This is a pre-v3 on-disk format where the blobid mask does not get 3289 * written to disk. 3290 */ 3291 cb_fn(seq, arg, 0); 3292 return; 3293 } 3294 3295 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 3296 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 3297 SPDK_MALLOC_DMA); 3298 if (!ctx->mask) { 3299 _spdk_bs_load_ctx_fail(ctx, -ENOMEM); 3300 return; 3301 } 3302 3303 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS; 3304 ctx->mask->length = ctx->super->md_len; 3305 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids)); 3306 3307 _spdk_bs_set_mask(ctx->bs->used_blobids, ctx->mask); 3308 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 3309 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 3310 spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg); 3311 } 3312 3313 static void 3314 _spdk_blob_set_thin_provision(struct spdk_blob *blob) 3315 { 3316 _spdk_blob_verify_md_op(blob); 3317 blob->invalid_flags |= SPDK_BLOB_THIN_PROV; 3318 blob->state = SPDK_BLOB_STATE_DIRTY; 3319 } 3320 3321 static void 3322 _spdk_blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method) 3323 { 3324 _spdk_blob_verify_md_op(blob); 3325 blob->clear_method = clear_method; 3326 blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT); 3327 blob->state = SPDK_BLOB_STATE_DIRTY; 3328 } 3329 3330 static void _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno); 3331 3332 static void 3333 _spdk_bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno) 3334 { 3335 struct spdk_bs_load_ctx *ctx = cb_arg; 3336 spdk_blob_id id; 3337 int64_t page_num; 3338 3339 /* Iterate to next blob (we can't use spdk_bs_iter_next function as our 3340 * last blob has been removed */ 3341 page_num = _spdk_bs_blobid_to_page(ctx->blobid); 3342 page_num++; 3343 page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num); 3344 if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) { 3345 _spdk_bs_load_iter(ctx, NULL, -ENOENT); 3346 return; 3347 } 3348 3349 id = _spdk_bs_page_to_blobid(page_num); 3350 3351 spdk_bs_open_blob(ctx->bs, id, _spdk_bs_load_iter, ctx); 3352 } 3353 3354 static void 3355 _spdk_bs_delete_corrupted_close_cb(void *cb_arg, int bserrno) 3356 { 3357 struct spdk_bs_load_ctx *ctx = cb_arg; 3358 3359 if (bserrno != 0) { 3360 SPDK_ERRLOG("Failed to close corrupted blob\n"); 3361 spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx); 3362 return; 3363 } 3364 3365 spdk_bs_delete_blob(ctx->bs, ctx->blobid, _spdk_bs_delete_corrupted_blob_cpl, ctx); 3366 } 3367 3368 static void 3369 _spdk_bs_delete_corrupted_blob(void *cb_arg, int bserrno) 3370 { 3371 struct spdk_bs_load_ctx *ctx = cb_arg; 3372 uint64_t i; 3373 3374 if (bserrno != 0) { 3375 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 3376 spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx); 3377 return; 3378 } 3379 3380 /* Snapshot and clone have the same copy of cluster map and extent pages 3381 * at this point. Let's clear both for snpashot now, 3382 * so that it won't be cleared for clone later when we remove snapshot. 3383 * Also set thin provision to pass data corruption check */ 3384 for (i = 0; i < ctx->blob->active.num_clusters; i++) { 3385 ctx->blob->active.clusters[i] = 0; 3386 } 3387 for (i = 0; i < ctx->blob->active.num_extent_pages; i++) { 3388 ctx->blob->active.extent_pages[i] = 0; 3389 } 3390 3391 ctx->blob->md_ro = false; 3392 3393 _spdk_blob_set_thin_provision(ctx->blob); 3394 3395 ctx->blobid = ctx->blob->id; 3396 3397 spdk_blob_close(ctx->blob, _spdk_bs_delete_corrupted_close_cb, ctx); 3398 } 3399 3400 static void 3401 _spdk_bs_update_corrupted_blob(void *cb_arg, int bserrno) 3402 { 3403 struct spdk_bs_load_ctx *ctx = cb_arg; 3404 3405 if (bserrno != 0) { 3406 SPDK_ERRLOG("Failed to close clone of a corrupted blob\n"); 3407 spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx); 3408 return; 3409 } 3410 3411 ctx->blob->md_ro = false; 3412 _spdk_blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true); 3413 _spdk_blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true); 3414 spdk_blob_set_read_only(ctx->blob); 3415 3416 if (ctx->iter_cb_fn) { 3417 ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0); 3418 } 3419 _spdk_bs_blob_list_add(ctx->blob); 3420 3421 spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx); 3422 } 3423 3424 static void 3425 _spdk_bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno) 3426 { 3427 struct spdk_bs_load_ctx *ctx = cb_arg; 3428 3429 if (bserrno != 0) { 3430 SPDK_ERRLOG("Failed to open clone of a corrupted blob\n"); 3431 spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx); 3432 return; 3433 } 3434 3435 if (blob->parent_id == ctx->blob->id) { 3436 /* Power failure occured before updating clone (snapshot delete case) 3437 * or after updating clone (creating snapshot case) - keep snapshot */ 3438 spdk_blob_close(blob, _spdk_bs_update_corrupted_blob, ctx); 3439 } else { 3440 /* Power failure occured after updating clone (snapshot delete case) 3441 * or before updating clone (creating snapshot case) - remove snapshot */ 3442 spdk_blob_close(blob, _spdk_bs_delete_corrupted_blob, ctx); 3443 } 3444 } 3445 3446 static void 3447 _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno) 3448 { 3449 struct spdk_bs_load_ctx *ctx = arg; 3450 const void *value; 3451 size_t len; 3452 int rc = 0; 3453 3454 if (bserrno == 0) { 3455 /* Examine blob if it is corrupted after power failure. Fix 3456 * the ones that can be fixed and remove any other corrupted 3457 * ones. If it is not corrupted just process it */ 3458 rc = _spdk_blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true); 3459 if (rc != 0) { 3460 rc = _spdk_blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true); 3461 if (rc != 0) { 3462 /* Not corrupted - process it and continue with iterating through blobs */ 3463 if (ctx->iter_cb_fn) { 3464 ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0); 3465 } 3466 _spdk_bs_blob_list_add(blob); 3467 spdk_bs_iter_next(ctx->bs, blob, _spdk_bs_load_iter, ctx); 3468 return; 3469 } 3470 3471 } 3472 3473 assert(len == sizeof(spdk_blob_id)); 3474 3475 ctx->blob = blob; 3476 3477 /* Open clone to check if we are able to fix this blob or should we remove it */ 3478 spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, _spdk_bs_examine_clone, ctx); 3479 return; 3480 } else if (bserrno == -ENOENT) { 3481 bserrno = 0; 3482 } else { 3483 /* 3484 * This case needs to be looked at further. Same problem 3485 * exists with applications that rely on explicit blob 3486 * iteration. We should just skip the blob that failed 3487 * to load and continue on to the next one. 3488 */ 3489 SPDK_ERRLOG("Error in iterating blobs\n"); 3490 } 3491 3492 ctx->iter_cb_fn = NULL; 3493 3494 spdk_free(ctx->super); 3495 spdk_free(ctx->mask); 3496 spdk_bs_sequence_finish(ctx->seq, bserrno); 3497 free(ctx); 3498 } 3499 3500 static void 3501 _spdk_bs_load_complete(struct spdk_bs_load_ctx *ctx) 3502 { 3503 spdk_bs_iter_first(ctx->bs, _spdk_bs_load_iter, ctx); 3504 } 3505 3506 static void 3507 _spdk_bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3508 { 3509 struct spdk_bs_load_ctx *ctx = cb_arg; 3510 int rc; 3511 3512 /* The type must be correct */ 3513 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS); 3514 3515 /* The length of the mask (in bits) must not be greater than 3516 * the length of the buffer (converted to bits) */ 3517 assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8)); 3518 3519 /* The length of the mask must be exactly equal to the size 3520 * (in pages) of the metadata region */ 3521 assert(ctx->mask->length == ctx->super->md_len); 3522 3523 rc = _spdk_bs_load_mask(&ctx->bs->used_blobids, ctx->mask); 3524 if (rc < 0) { 3525 spdk_free(ctx->mask); 3526 _spdk_bs_load_ctx_fail(ctx, rc); 3527 return; 3528 } 3529 3530 _spdk_bs_load_complete(ctx); 3531 } 3532 3533 static void 3534 _spdk_bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3535 { 3536 struct spdk_bs_load_ctx *ctx = cb_arg; 3537 uint64_t lba, lba_count, mask_size; 3538 int rc; 3539 3540 if (bserrno != 0) { 3541 _spdk_bs_load_ctx_fail(ctx, bserrno); 3542 return; 3543 } 3544 3545 /* The type must be correct */ 3546 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 3547 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 3548 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 3549 struct spdk_blob_md_page) * 8)); 3550 /* The length of the mask must be exactly equal to the total number of clusters */ 3551 assert(ctx->mask->length == ctx->bs->total_clusters); 3552 3553 rc = _spdk_bs_load_mask(&ctx->bs->used_clusters, ctx->mask); 3554 if (rc < 0) { 3555 spdk_free(ctx->mask); 3556 _spdk_bs_load_ctx_fail(ctx, rc); 3557 return; 3558 } 3559 3560 ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->bs->used_clusters); 3561 assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters); 3562 3563 spdk_free(ctx->mask); 3564 3565 /* Read the used blobids mask */ 3566 mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE; 3567 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 3568 SPDK_MALLOC_DMA); 3569 if (!ctx->mask) { 3570 _spdk_bs_load_ctx_fail(ctx, -ENOMEM); 3571 return; 3572 } 3573 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start); 3574 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len); 3575 spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 3576 _spdk_bs_load_used_blobids_cpl, ctx); 3577 } 3578 3579 static void 3580 _spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3581 { 3582 struct spdk_bs_load_ctx *ctx = cb_arg; 3583 uint64_t lba, lba_count, mask_size; 3584 int rc; 3585 3586 if (bserrno != 0) { 3587 _spdk_bs_load_ctx_fail(ctx, bserrno); 3588 return; 3589 } 3590 3591 /* The type must be correct */ 3592 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES); 3593 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 3594 assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE * 3595 8)); 3596 /* The length of the mask must be exactly equal to the size (in pages) of the metadata region */ 3597 assert(ctx->mask->length == ctx->super->md_len); 3598 3599 rc = _spdk_bs_load_mask(&ctx->bs->used_md_pages, ctx->mask); 3600 if (rc < 0) { 3601 spdk_free(ctx->mask); 3602 _spdk_bs_load_ctx_fail(ctx, rc); 3603 return; 3604 } 3605 3606 spdk_free(ctx->mask); 3607 3608 /* Read the used clusters mask */ 3609 mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE; 3610 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, 3611 SPDK_MALLOC_DMA); 3612 if (!ctx->mask) { 3613 _spdk_bs_load_ctx_fail(ctx, -ENOMEM); 3614 return; 3615 } 3616 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 3617 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 3618 spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, 3619 _spdk_bs_load_used_clusters_cpl, ctx); 3620 } 3621 3622 static void 3623 _spdk_bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx) 3624 { 3625 uint64_t lba, lba_count, mask_size; 3626 3627 /* Read the used pages mask */ 3628 mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; 3629 ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, 3630 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 3631 if (!ctx->mask) { 3632 _spdk_bs_load_ctx_fail(ctx, -ENOMEM); 3633 return; 3634 } 3635 3636 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 3637 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 3638 spdk_bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count, 3639 _spdk_bs_load_used_pages_cpl, ctx); 3640 } 3641 3642 static int 3643 _spdk_bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_page *page) 3644 { 3645 struct spdk_blob_store *bs = ctx->bs; 3646 struct spdk_blob_md_descriptor *desc; 3647 size_t cur_desc = 0; 3648 3649 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 3650 while (cur_desc < sizeof(page->descriptors)) { 3651 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 3652 if (desc->length == 0) { 3653 /* If padding and length are 0, this terminates the page */ 3654 break; 3655 } 3656 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 3657 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 3658 unsigned int i, j; 3659 unsigned int cluster_count = 0; 3660 uint32_t cluster_idx; 3661 3662 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 3663 3664 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 3665 for (j = 0; j < desc_extent_rle->extents[i].length; j++) { 3666 cluster_idx = desc_extent_rle->extents[i].cluster_idx; 3667 /* 3668 * cluster_idx = 0 means an unallocated cluster - don't mark that 3669 * in the used cluster map. 3670 */ 3671 if (cluster_idx != 0) { 3672 spdk_bit_array_set(bs->used_clusters, cluster_idx + j); 3673 if (bs->num_free_clusters == 0) { 3674 return -ENOSPC; 3675 } 3676 bs->num_free_clusters--; 3677 } 3678 cluster_count++; 3679 } 3680 } 3681 if (cluster_count == 0) { 3682 return -EINVAL; 3683 } 3684 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 3685 struct spdk_blob_md_descriptor_extent_page *desc_extent; 3686 uint32_t i; 3687 uint32_t cluster_count = 0; 3688 uint32_t cluster_idx; 3689 size_t cluster_idx_length; 3690 3691 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 3692 cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx); 3693 3694 if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) || 3695 (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) { 3696 return -EINVAL; 3697 } 3698 3699 for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) { 3700 cluster_idx = desc_extent->cluster_idx[i]; 3701 /* 3702 * cluster_idx = 0 means an unallocated cluster - don't mark that 3703 * in the used cluster map. 3704 */ 3705 if (cluster_idx != 0) { 3706 if (cluster_idx < desc_extent->start_cluster_idx && 3707 cluster_idx >= desc_extent->start_cluster_idx + cluster_count) { 3708 return -EINVAL; 3709 } 3710 spdk_bit_array_set(bs->used_clusters, cluster_idx); 3711 if (bs->num_free_clusters == 0) { 3712 return -ENOSPC; 3713 } 3714 bs->num_free_clusters--; 3715 } 3716 cluster_count++; 3717 } 3718 3719 if (cluster_count == 0) { 3720 return -EINVAL; 3721 } 3722 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 3723 /* Skip this item */ 3724 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 3725 /* Skip this item */ 3726 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 3727 /* Skip this item */ 3728 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) { 3729 struct spdk_blob_md_descriptor_extent_table *desc_extent_table; 3730 uint32_t num_extent_pages = ctx->num_extent_pages; 3731 uint32_t i; 3732 size_t extent_pages_length; 3733 void *tmp; 3734 3735 desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc; 3736 extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters); 3737 3738 if (desc_extent_table->length == 0 || 3739 (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) { 3740 return -EINVAL; 3741 } 3742 3743 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 3744 if (desc_extent_table->extent_page[i].page_idx != 0) { 3745 if (desc_extent_table->extent_page[i].num_pages != 1) { 3746 return -EINVAL; 3747 } 3748 num_extent_pages += 1; 3749 } 3750 } 3751 3752 if (num_extent_pages > 0) { 3753 tmp = realloc(ctx->extent_pages, num_extent_pages * sizeof(uint32_t)); 3754 if (tmp == NULL) { 3755 return -ENOMEM; 3756 } 3757 ctx->extent_pages = tmp; 3758 3759 /* Extent table entries contain md page numbers for extent pages. 3760 * Zeroes represent unallocated extent pages, those are run-length-encoded. 3761 */ 3762 for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) { 3763 if (desc_extent_table->extent_page[i].page_idx != 0) { 3764 ctx->extent_pages[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx; 3765 ctx->num_extent_pages += 1; 3766 } 3767 } 3768 } 3769 } else { 3770 /* Error */ 3771 return -EINVAL; 3772 } 3773 /* Advance to the next descriptor */ 3774 cur_desc += sizeof(*desc) + desc->length; 3775 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 3776 break; 3777 } 3778 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 3779 } 3780 return 0; 3781 } 3782 3783 static bool _spdk_bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page) 3784 { 3785 uint32_t crc; 3786 struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors; 3787 size_t desc_len; 3788 3789 crc = _spdk_blob_md_page_calc_crc(page); 3790 if (crc != page->crc) { 3791 return false; 3792 } 3793 3794 /* Extent page should always be of sequence num 0. */ 3795 if (page->sequence_num != 0) { 3796 return false; 3797 } 3798 3799 /* Descriptor type must be EXTENT_PAGE. */ 3800 if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 3801 return false; 3802 } 3803 3804 /* Descriptor length cannot exceed the page. */ 3805 desc_len = sizeof(*desc) + desc->length; 3806 if (desc_len > sizeof(page->descriptors)) { 3807 return false; 3808 } 3809 3810 /* It has to be the only descriptor in the page. */ 3811 if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) { 3812 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len); 3813 if (desc->length != 0) { 3814 return false; 3815 } 3816 } 3817 3818 return true; 3819 } 3820 3821 static bool _spdk_bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx) 3822 { 3823 uint32_t crc; 3824 struct spdk_blob_md_page *page = ctx->page; 3825 3826 crc = _spdk_blob_md_page_calc_crc(page); 3827 if (crc != page->crc) { 3828 return false; 3829 } 3830 3831 /* First page of a sequence should match the blobid. */ 3832 if (page->sequence_num == 0 && 3833 _spdk_bs_page_to_blobid(ctx->cur_page) != page->id) { 3834 return false; 3835 } 3836 return true; 3837 } 3838 3839 static void 3840 _spdk_bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx); 3841 3842 static void 3843 _spdk_bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3844 { 3845 struct spdk_bs_load_ctx *ctx = cb_arg; 3846 3847 if (bserrno != 0) { 3848 _spdk_bs_load_ctx_fail(ctx, bserrno); 3849 return; 3850 } 3851 3852 _spdk_bs_load_complete(ctx); 3853 } 3854 3855 static void 3856 _spdk_bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3857 { 3858 struct spdk_bs_load_ctx *ctx = cb_arg; 3859 3860 spdk_free(ctx->mask); 3861 ctx->mask = NULL; 3862 3863 if (bserrno != 0) { 3864 _spdk_bs_load_ctx_fail(ctx, bserrno); 3865 return; 3866 } 3867 3868 _spdk_bs_write_used_clusters(seq, ctx, _spdk_bs_load_write_used_clusters_cpl); 3869 } 3870 3871 static void 3872 _spdk_bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3873 { 3874 struct spdk_bs_load_ctx *ctx = cb_arg; 3875 3876 spdk_free(ctx->mask); 3877 ctx->mask = NULL; 3878 3879 if (bserrno != 0) { 3880 _spdk_bs_load_ctx_fail(ctx, bserrno); 3881 return; 3882 } 3883 3884 _spdk_bs_write_used_blobids(seq, ctx, _spdk_bs_load_write_used_blobids_cpl); 3885 } 3886 3887 static void 3888 _spdk_bs_load_write_used_md(struct spdk_bs_load_ctx *ctx) 3889 { 3890 _spdk_bs_write_used_md(ctx->seq, ctx, _spdk_bs_load_write_used_pages_cpl); 3891 } 3892 3893 static void 3894 _spdk_bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx) 3895 { 3896 uint64_t num_md_clusters; 3897 uint64_t i; 3898 3899 ctx->in_page_chain = false; 3900 3901 do { 3902 ctx->page_index++; 3903 } while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true); 3904 3905 if (ctx->page_index < ctx->super->md_len) { 3906 ctx->cur_page = ctx->page_index; 3907 _spdk_bs_load_replay_cur_md_page(ctx); 3908 } else { 3909 /* Claim all of the clusters used by the metadata */ 3910 num_md_clusters = spdk_divide_round_up(ctx->super->md_len, ctx->bs->pages_per_cluster); 3911 for (i = 0; i < num_md_clusters; i++) { 3912 _spdk_bs_claim_cluster(ctx->bs, i); 3913 } 3914 spdk_free(ctx->page); 3915 _spdk_bs_load_write_used_md(ctx); 3916 } 3917 } 3918 3919 static void _spdk_bs_load_replay_extent_page(spdk_bs_sequence_t *seq, uint32_t page, void *cb_arg); 3920 3921 static void 3922 _spdk_bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3923 { 3924 struct spdk_bs_load_ctx *ctx = cb_arg; 3925 uint32_t page_num; 3926 3927 if (bserrno != 0) { 3928 _spdk_bs_load_ctx_fail(ctx, bserrno); 3929 return; 3930 } 3931 3932 /* Extent pages are only read when present within in chain md. 3933 * Integrity of md is not right if that page was not a valid extent page. */ 3934 if (_spdk_bs_load_cur_extent_page_valid(ctx->page) != true) { 3935 _spdk_bs_load_ctx_fail(ctx, -EILSEQ); 3936 return; 3937 } 3938 3939 page_num = ctx->extent_pages[ctx->num_extent_pages - 1]; 3940 spdk_bit_array_set(ctx->bs->used_md_pages, page_num); 3941 if (_spdk_bs_load_replay_md_parse_page(ctx, ctx->page)) { 3942 _spdk_bs_load_ctx_fail(ctx, -EILSEQ); 3943 return; 3944 } 3945 3946 ctx->num_extent_pages--; 3947 if (ctx->num_extent_pages > 0) { 3948 _spdk_bs_load_replay_extent_page(seq, ctx->extent_pages[ctx->num_extent_pages - 1], ctx); 3949 return; 3950 } 3951 3952 free(ctx->extent_pages); 3953 ctx->extent_pages = NULL; 3954 3955 _spdk_bs_load_replay_md_chain_cpl(ctx); 3956 } 3957 3958 static void 3959 _spdk_bs_load_replay_extent_page(spdk_bs_sequence_t *seq, uint32_t page, void *cb_arg) 3960 { 3961 struct spdk_bs_load_ctx *ctx = cb_arg; 3962 uint64_t lba; 3963 3964 assert(page < ctx->super->md_len); 3965 lba = _spdk_bs_md_page_to_lba(ctx->bs, page); 3966 spdk_bs_sequence_read_dev(seq, ctx->page, lba, 3967 _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 3968 _spdk_bs_load_replay_extent_page_cpl, ctx); 3969 } 3970 3971 static void 3972 _spdk_bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 3973 { 3974 struct spdk_bs_load_ctx *ctx = cb_arg; 3975 uint32_t page_num; 3976 struct spdk_blob_md_page *page; 3977 3978 if (bserrno != 0) { 3979 _spdk_bs_load_ctx_fail(ctx, bserrno); 3980 return; 3981 } 3982 3983 page_num = ctx->cur_page; 3984 page = ctx->page; 3985 if (_spdk_bs_load_cur_md_page_valid(ctx) == true) { 3986 if (page->sequence_num == 0 || ctx->in_page_chain == true) { 3987 _spdk_bs_claim_md_page(ctx->bs, page_num); 3988 if (page->sequence_num == 0) { 3989 spdk_bit_array_set(ctx->bs->used_blobids, page_num); 3990 } 3991 if (_spdk_bs_load_replay_md_parse_page(ctx, page)) { 3992 _spdk_bs_load_ctx_fail(ctx, -EILSEQ); 3993 return; 3994 } 3995 if (page->next != SPDK_INVALID_MD_PAGE) { 3996 ctx->in_page_chain = true; 3997 ctx->cur_page = page->next; 3998 _spdk_bs_load_replay_cur_md_page(ctx); 3999 return; 4000 } 4001 if (ctx->num_extent_pages != 0) { 4002 /* Extent pages are read from last to first, 4003 * decreasing the num_extent_pages as they are read. */ 4004 _spdk_bs_load_replay_extent_page(seq, ctx->extent_pages[ctx->num_extent_pages - 1], ctx); 4005 return; 4006 } 4007 } 4008 } 4009 _spdk_bs_load_replay_md_chain_cpl(ctx); 4010 } 4011 4012 static void 4013 _spdk_bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx) 4014 { 4015 uint64_t lba; 4016 4017 assert(ctx->cur_page < ctx->super->md_len); 4018 lba = _spdk_bs_md_page_to_lba(ctx->bs, ctx->cur_page); 4019 spdk_bs_sequence_read_dev(ctx->seq, ctx->page, lba, 4020 _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 4021 _spdk_bs_load_replay_md_cpl, ctx); 4022 } 4023 4024 static void 4025 _spdk_bs_load_replay_md(struct spdk_bs_load_ctx *ctx) 4026 { 4027 ctx->page_index = 0; 4028 ctx->cur_page = 0; 4029 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE, 4030 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4031 if (!ctx->page) { 4032 _spdk_bs_load_ctx_fail(ctx, -ENOMEM); 4033 return; 4034 } 4035 _spdk_bs_load_replay_cur_md_page(ctx); 4036 } 4037 4038 static void 4039 _spdk_bs_recover(struct spdk_bs_load_ctx *ctx) 4040 { 4041 int rc; 4042 4043 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len); 4044 if (rc < 0) { 4045 _spdk_bs_load_ctx_fail(ctx, -ENOMEM); 4046 return; 4047 } 4048 4049 rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len); 4050 if (rc < 0) { 4051 _spdk_bs_load_ctx_fail(ctx, -ENOMEM); 4052 return; 4053 } 4054 4055 rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters); 4056 if (rc < 0) { 4057 _spdk_bs_load_ctx_fail(ctx, -ENOMEM); 4058 return; 4059 } 4060 4061 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 4062 _spdk_bs_load_replay_md(ctx); 4063 } 4064 4065 static void 4066 _spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4067 { 4068 struct spdk_bs_load_ctx *ctx = cb_arg; 4069 uint32_t crc; 4070 int rc; 4071 static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH]; 4072 4073 if (ctx->super->version > SPDK_BS_VERSION || 4074 ctx->super->version < SPDK_BS_INITIAL_VERSION) { 4075 _spdk_bs_load_ctx_fail(ctx, -EILSEQ); 4076 return; 4077 } 4078 4079 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 4080 sizeof(ctx->super->signature)) != 0) { 4081 _spdk_bs_load_ctx_fail(ctx, -EILSEQ); 4082 return; 4083 } 4084 4085 crc = _spdk_blob_md_page_calc_crc(ctx->super); 4086 if (crc != ctx->super->crc) { 4087 _spdk_bs_load_ctx_fail(ctx, -EILSEQ); 4088 return; 4089 } 4090 4091 if (memcmp(&ctx->bs->bstype, &ctx->super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 4092 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype matched - loading blobstore\n"); 4093 } else if (memcmp(&ctx->bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) { 4094 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype wildcard used - loading blobstore regardless bstype\n"); 4095 } else { 4096 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Unexpected bstype\n"); 4097 SPDK_LOGDUMP(SPDK_LOG_BLOB, "Expected:", ctx->bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 4098 SPDK_LOGDUMP(SPDK_LOG_BLOB, "Found:", ctx->super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH); 4099 _spdk_bs_load_ctx_fail(ctx, -ENXIO); 4100 return; 4101 } 4102 4103 if (ctx->super->size > ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen) { 4104 SPDK_NOTICELOG("Size mismatch, dev size: %lu, blobstore size: %lu\n", 4105 ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen, ctx->super->size); 4106 _spdk_bs_load_ctx_fail(ctx, -EILSEQ); 4107 return; 4108 } 4109 4110 if (ctx->super->size == 0) { 4111 ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen; 4112 } 4113 4114 if (ctx->super->io_unit_size == 0) { 4115 ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE; 4116 } 4117 4118 /* Parse the super block */ 4119 ctx->bs->clean = 1; 4120 ctx->bs->cluster_sz = ctx->super->cluster_size; 4121 ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size; 4122 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE; 4123 ctx->bs->io_unit_size = ctx->super->io_unit_size; 4124 rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters); 4125 if (rc < 0) { 4126 _spdk_bs_load_ctx_fail(ctx, -ENOMEM); 4127 return; 4128 } 4129 ctx->bs->md_start = ctx->super->md_start; 4130 ctx->bs->md_len = ctx->super->md_len; 4131 ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up( 4132 ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster); 4133 ctx->bs->super_blob = ctx->super->super_blob; 4134 memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); 4135 4136 if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) { 4137 _spdk_bs_recover(ctx); 4138 } else { 4139 _spdk_bs_load_read_used_pages(ctx); 4140 } 4141 } 4142 4143 void 4144 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 4145 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 4146 { 4147 struct spdk_blob_store *bs; 4148 struct spdk_bs_cpl cpl; 4149 struct spdk_bs_load_ctx *ctx; 4150 struct spdk_bs_opts opts = {}; 4151 int err; 4152 4153 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Loading blobstore from dev %p\n", dev); 4154 4155 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 4156 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "unsupported dev block length of %d\n", dev->blocklen); 4157 dev->destroy(dev); 4158 cb_fn(cb_arg, NULL, -EINVAL); 4159 return; 4160 } 4161 4162 if (o) { 4163 opts = *o; 4164 } else { 4165 spdk_bs_opts_init(&opts); 4166 } 4167 4168 if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) { 4169 dev->destroy(dev); 4170 cb_fn(cb_arg, NULL, -EINVAL); 4171 return; 4172 } 4173 4174 err = _spdk_bs_alloc(dev, &opts, &bs); 4175 if (err) { 4176 dev->destroy(dev); 4177 cb_fn(cb_arg, NULL, err); 4178 return; 4179 } 4180 4181 ctx = calloc(1, sizeof(*ctx)); 4182 if (!ctx) { 4183 _spdk_bs_free(bs); 4184 cb_fn(cb_arg, NULL, -ENOMEM); 4185 return; 4186 } 4187 4188 ctx->bs = bs; 4189 ctx->iter_cb_fn = opts.iter_cb_fn; 4190 ctx->iter_cb_arg = opts.iter_cb_arg; 4191 4192 /* Allocate memory for the super block */ 4193 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 4194 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4195 if (!ctx->super) { 4196 free(ctx); 4197 _spdk_bs_free(bs); 4198 cb_fn(cb_arg, NULL, -ENOMEM); 4199 return; 4200 } 4201 4202 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 4203 cpl.u.bs_handle.cb_fn = cb_fn; 4204 cpl.u.bs_handle.cb_arg = cb_arg; 4205 cpl.u.bs_handle.bs = bs; 4206 4207 ctx->seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 4208 if (!ctx->seq) { 4209 spdk_free(ctx->super); 4210 free(ctx); 4211 _spdk_bs_free(bs); 4212 cb_fn(cb_arg, NULL, -ENOMEM); 4213 return; 4214 } 4215 4216 /* Read the super block */ 4217 spdk_bs_sequence_read_dev(ctx->seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 4218 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 4219 _spdk_bs_load_super_cpl, ctx); 4220 } 4221 4222 /* END spdk_bs_load */ 4223 4224 /* START spdk_bs_dump */ 4225 4226 struct spdk_bs_dump_ctx { 4227 struct spdk_blob_store *bs; 4228 struct spdk_bs_super_block *super; 4229 uint32_t cur_page; 4230 struct spdk_blob_md_page *page; 4231 spdk_bs_sequence_t *seq; 4232 FILE *fp; 4233 spdk_bs_dump_print_xattr print_xattr_fn; 4234 char xattr_name[4096]; 4235 }; 4236 4237 static void 4238 _spdk_bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_dump_ctx *ctx, int bserrno) 4239 { 4240 spdk_free(ctx->super); 4241 4242 /* 4243 * We need to defer calling spdk_bs_call_cpl() until after 4244 * dev destruction, so tuck these away for later use. 4245 */ 4246 ctx->bs->unload_err = bserrno; 4247 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 4248 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 4249 4250 spdk_bs_sequence_finish(seq, 0); 4251 _spdk_bs_free(ctx->bs); 4252 free(ctx); 4253 } 4254 4255 static void _spdk_bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg); 4256 4257 static void 4258 _spdk_bs_dump_print_md_page(struct spdk_bs_dump_ctx *ctx) 4259 { 4260 uint32_t page_idx = ctx->cur_page; 4261 struct spdk_blob_md_page *page = ctx->page; 4262 struct spdk_blob_md_descriptor *desc; 4263 size_t cur_desc = 0; 4264 uint32_t crc; 4265 4266 fprintf(ctx->fp, "=========\n"); 4267 fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx); 4268 fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id); 4269 4270 crc = _spdk_blob_md_page_calc_crc(page); 4271 fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch"); 4272 4273 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 4274 while (cur_desc < sizeof(page->descriptors)) { 4275 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 4276 if (desc->length == 0) { 4277 /* If padding and length are 0, this terminates the page */ 4278 break; 4279 } 4280 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) { 4281 struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle; 4282 unsigned int i; 4283 4284 desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc; 4285 4286 for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) { 4287 if (desc_extent_rle->extents[i].cluster_idx != 0) { 4288 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 4289 desc_extent_rle->extents[i].cluster_idx); 4290 } else { 4291 fprintf(ctx->fp, "Unallocated Extent - "); 4292 } 4293 fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length); 4294 fprintf(ctx->fp, "\n"); 4295 } 4296 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) { 4297 struct spdk_blob_md_descriptor_extent_page *desc_extent; 4298 unsigned int i; 4299 4300 desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc; 4301 4302 for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) { 4303 if (desc_extent->cluster_idx[i] != 0) { 4304 fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32, 4305 desc_extent->cluster_idx[i]); 4306 } else { 4307 fprintf(ctx->fp, "Unallocated Extent"); 4308 } 4309 fprintf(ctx->fp, "\n"); 4310 } 4311 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 4312 struct spdk_blob_md_descriptor_xattr *desc_xattr; 4313 uint32_t i; 4314 4315 desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc; 4316 4317 if (desc_xattr->length != 4318 sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) + 4319 desc_xattr->name_length + desc_xattr->value_length) { 4320 } 4321 4322 memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length); 4323 ctx->xattr_name[desc_xattr->name_length] = '\0'; 4324 fprintf(ctx->fp, "XATTR: name = \"%s\"\n", ctx->xattr_name); 4325 fprintf(ctx->fp, " value = \""); 4326 ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name, 4327 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 4328 desc_xattr->value_length); 4329 fprintf(ctx->fp, "\"\n"); 4330 for (i = 0; i < desc_xattr->value_length; i++) { 4331 if (i % 16 == 0) { 4332 fprintf(ctx->fp, " "); 4333 } 4334 fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i)); 4335 if ((i + 1) % 16 == 0) { 4336 fprintf(ctx->fp, "\n"); 4337 } 4338 } 4339 if (i % 16 != 0) { 4340 fprintf(ctx->fp, "\n"); 4341 } 4342 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) { 4343 /* TODO */ 4344 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) { 4345 /* TODO */ 4346 } else { 4347 /* Error */ 4348 } 4349 /* Advance to the next descriptor */ 4350 cur_desc += sizeof(*desc) + desc->length; 4351 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 4352 break; 4353 } 4354 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 4355 } 4356 } 4357 4358 static void 4359 _spdk_bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4360 { 4361 struct spdk_bs_dump_ctx *ctx = cb_arg; 4362 4363 if (bserrno != 0) { 4364 _spdk_bs_dump_finish(seq, ctx, bserrno); 4365 return; 4366 } 4367 4368 if (ctx->page->id != 0) { 4369 _spdk_bs_dump_print_md_page(ctx); 4370 } 4371 4372 ctx->cur_page++; 4373 4374 if (ctx->cur_page < ctx->super->md_len) { 4375 _spdk_bs_dump_read_md_page(seq, ctx); 4376 } else { 4377 spdk_free(ctx->page); 4378 _spdk_bs_dump_finish(seq, ctx, 0); 4379 } 4380 } 4381 4382 static void 4383 _spdk_bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg) 4384 { 4385 struct spdk_bs_dump_ctx *ctx = cb_arg; 4386 uint64_t lba; 4387 4388 assert(ctx->cur_page < ctx->super->md_len); 4389 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page); 4390 spdk_bs_sequence_read_dev(seq, ctx->page, lba, 4391 _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), 4392 _spdk_bs_dump_read_md_page_cpl, ctx); 4393 } 4394 4395 static void 4396 _spdk_bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4397 { 4398 struct spdk_bs_dump_ctx *ctx = cb_arg; 4399 4400 fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature); 4401 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 4402 sizeof(ctx->super->signature)) != 0) { 4403 fprintf(ctx->fp, "(Mismatch)\n"); 4404 _spdk_bs_dump_finish(seq, ctx, bserrno); 4405 return; 4406 } else { 4407 fprintf(ctx->fp, "(OK)\n"); 4408 } 4409 fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version); 4410 fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc, 4411 (ctx->super->crc == _spdk_blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch"); 4412 fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype); 4413 fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size); 4414 fprintf(ctx->fp, "Super Blob ID: "); 4415 if (ctx->super->super_blob == SPDK_BLOBID_INVALID) { 4416 fprintf(ctx->fp, "(None)\n"); 4417 } else { 4418 fprintf(ctx->fp, "%" PRIu64 "\n", ctx->super->super_blob); 4419 } 4420 fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean); 4421 fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start); 4422 fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len); 4423 fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start); 4424 fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len); 4425 fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start); 4426 fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len); 4427 fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start); 4428 fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len); 4429 4430 ctx->cur_page = 0; 4431 ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE, 4432 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4433 if (!ctx->page) { 4434 _spdk_bs_dump_finish(seq, ctx, -ENOMEM); 4435 return; 4436 } 4437 _spdk_bs_dump_read_md_page(seq, ctx); 4438 } 4439 4440 void 4441 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn, 4442 spdk_bs_op_complete cb_fn, void *cb_arg) 4443 { 4444 struct spdk_blob_store *bs; 4445 struct spdk_bs_cpl cpl; 4446 spdk_bs_sequence_t *seq; 4447 struct spdk_bs_dump_ctx *ctx; 4448 struct spdk_bs_opts opts = {}; 4449 int err; 4450 4451 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Dumping blobstore from dev %p\n", dev); 4452 4453 spdk_bs_opts_init(&opts); 4454 4455 err = _spdk_bs_alloc(dev, &opts, &bs); 4456 if (err) { 4457 dev->destroy(dev); 4458 cb_fn(cb_arg, err); 4459 return; 4460 } 4461 4462 ctx = calloc(1, sizeof(*ctx)); 4463 if (!ctx) { 4464 _spdk_bs_free(bs); 4465 cb_fn(cb_arg, -ENOMEM); 4466 return; 4467 } 4468 4469 ctx->bs = bs; 4470 ctx->fp = fp; 4471 ctx->print_xattr_fn = print_xattr_fn; 4472 4473 /* Allocate memory for the super block */ 4474 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 4475 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4476 if (!ctx->super) { 4477 free(ctx); 4478 _spdk_bs_free(bs); 4479 cb_fn(cb_arg, -ENOMEM); 4480 return; 4481 } 4482 4483 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 4484 cpl.u.bs_basic.cb_fn = cb_fn; 4485 cpl.u.bs_basic.cb_arg = cb_arg; 4486 4487 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 4488 if (!seq) { 4489 spdk_free(ctx->super); 4490 free(ctx); 4491 _spdk_bs_free(bs); 4492 cb_fn(cb_arg, -ENOMEM); 4493 return; 4494 } 4495 4496 /* Read the super block */ 4497 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 4498 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 4499 _spdk_bs_dump_super_cpl, ctx); 4500 } 4501 4502 /* END spdk_bs_dump */ 4503 4504 /* START spdk_bs_init */ 4505 4506 struct spdk_bs_init_ctx { 4507 struct spdk_blob_store *bs; 4508 struct spdk_bs_super_block *super; 4509 }; 4510 4511 static void 4512 _spdk_bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4513 { 4514 struct spdk_bs_init_ctx *ctx = cb_arg; 4515 4516 spdk_free(ctx->super); 4517 free(ctx); 4518 4519 spdk_bs_sequence_finish(seq, bserrno); 4520 } 4521 4522 static void 4523 _spdk_bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4524 { 4525 struct spdk_bs_init_ctx *ctx = cb_arg; 4526 4527 /* Write super block */ 4528 spdk_bs_sequence_write_dev(seq, ctx->super, _spdk_bs_page_to_lba(ctx->bs, 0), 4529 _spdk_bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 4530 _spdk_bs_init_persist_super_cpl, ctx); 4531 } 4532 4533 void 4534 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 4535 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 4536 { 4537 struct spdk_bs_init_ctx *ctx; 4538 struct spdk_blob_store *bs; 4539 struct spdk_bs_cpl cpl; 4540 spdk_bs_sequence_t *seq; 4541 spdk_bs_batch_t *batch; 4542 uint64_t num_md_lba; 4543 uint64_t num_md_pages; 4544 uint64_t num_md_clusters; 4545 uint32_t i; 4546 struct spdk_bs_opts opts = {}; 4547 int rc; 4548 4549 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Initializing blobstore on dev %p\n", dev); 4550 4551 if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) { 4552 SPDK_ERRLOG("unsupported dev block length of %d\n", 4553 dev->blocklen); 4554 dev->destroy(dev); 4555 cb_fn(cb_arg, NULL, -EINVAL); 4556 return; 4557 } 4558 4559 if (o) { 4560 opts = *o; 4561 } else { 4562 spdk_bs_opts_init(&opts); 4563 } 4564 4565 if (_spdk_bs_opts_verify(&opts) != 0) { 4566 dev->destroy(dev); 4567 cb_fn(cb_arg, NULL, -EINVAL); 4568 return; 4569 } 4570 4571 rc = _spdk_bs_alloc(dev, &opts, &bs); 4572 if (rc) { 4573 dev->destroy(dev); 4574 cb_fn(cb_arg, NULL, rc); 4575 return; 4576 } 4577 4578 if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) { 4579 /* By default, allocate 1 page per cluster. 4580 * Technically, this over-allocates metadata 4581 * because more metadata will reduce the number 4582 * of usable clusters. This can be addressed with 4583 * more complex math in the future. 4584 */ 4585 bs->md_len = bs->total_clusters; 4586 } else { 4587 bs->md_len = opts.num_md_pages; 4588 } 4589 rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len); 4590 if (rc < 0) { 4591 _spdk_bs_free(bs); 4592 cb_fn(cb_arg, NULL, -ENOMEM); 4593 return; 4594 } 4595 4596 rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len); 4597 if (rc < 0) { 4598 _spdk_bs_free(bs); 4599 cb_fn(cb_arg, NULL, -ENOMEM); 4600 return; 4601 } 4602 4603 ctx = calloc(1, sizeof(*ctx)); 4604 if (!ctx) { 4605 _spdk_bs_free(bs); 4606 cb_fn(cb_arg, NULL, -ENOMEM); 4607 return; 4608 } 4609 4610 ctx->bs = bs; 4611 4612 /* Allocate memory for the super block */ 4613 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 4614 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4615 if (!ctx->super) { 4616 free(ctx); 4617 _spdk_bs_free(bs); 4618 cb_fn(cb_arg, NULL, -ENOMEM); 4619 return; 4620 } 4621 memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 4622 sizeof(ctx->super->signature)); 4623 ctx->super->version = SPDK_BS_VERSION; 4624 ctx->super->length = sizeof(*ctx->super); 4625 ctx->super->super_blob = bs->super_blob; 4626 ctx->super->clean = 0; 4627 ctx->super->cluster_size = bs->cluster_sz; 4628 ctx->super->io_unit_size = bs->io_unit_size; 4629 memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype)); 4630 4631 /* Calculate how many pages the metadata consumes at the front 4632 * of the disk. 4633 */ 4634 4635 /* The super block uses 1 page */ 4636 num_md_pages = 1; 4637 4638 /* The used_md_pages mask requires 1 bit per metadata page, rounded 4639 * up to the nearest page, plus a header. 4640 */ 4641 ctx->super->used_page_mask_start = num_md_pages; 4642 ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 4643 spdk_divide_round_up(bs->md_len, 8), 4644 SPDK_BS_PAGE_SIZE); 4645 num_md_pages += ctx->super->used_page_mask_len; 4646 4647 /* The used_clusters mask requires 1 bit per cluster, rounded 4648 * up to the nearest page, plus a header. 4649 */ 4650 ctx->super->used_cluster_mask_start = num_md_pages; 4651 ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 4652 spdk_divide_round_up(bs->total_clusters, 8), 4653 SPDK_BS_PAGE_SIZE); 4654 num_md_pages += ctx->super->used_cluster_mask_len; 4655 4656 /* The used_blobids mask requires 1 bit per metadata page, rounded 4657 * up to the nearest page, plus a header. 4658 */ 4659 ctx->super->used_blobid_mask_start = num_md_pages; 4660 ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) + 4661 spdk_divide_round_up(bs->md_len, 8), 4662 SPDK_BS_PAGE_SIZE); 4663 num_md_pages += ctx->super->used_blobid_mask_len; 4664 4665 /* The metadata region size was chosen above */ 4666 ctx->super->md_start = bs->md_start = num_md_pages; 4667 ctx->super->md_len = bs->md_len; 4668 num_md_pages += bs->md_len; 4669 4670 num_md_lba = _spdk_bs_page_to_lba(bs, num_md_pages); 4671 4672 ctx->super->size = dev->blockcnt * dev->blocklen; 4673 4674 ctx->super->crc = _spdk_blob_md_page_calc_crc(ctx->super); 4675 4676 num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster); 4677 if (num_md_clusters > bs->total_clusters) { 4678 SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, " 4679 "please decrease number of pages reserved for metadata " 4680 "or increase cluster size.\n"); 4681 spdk_free(ctx->super); 4682 free(ctx); 4683 _spdk_bs_free(bs); 4684 cb_fn(cb_arg, NULL, -ENOMEM); 4685 return; 4686 } 4687 /* Claim all of the clusters used by the metadata */ 4688 for (i = 0; i < num_md_clusters; i++) { 4689 _spdk_bs_claim_cluster(bs, i); 4690 } 4691 4692 bs->total_data_clusters = bs->num_free_clusters; 4693 4694 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 4695 cpl.u.bs_handle.cb_fn = cb_fn; 4696 cpl.u.bs_handle.cb_arg = cb_arg; 4697 cpl.u.bs_handle.bs = bs; 4698 4699 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 4700 if (!seq) { 4701 spdk_free(ctx->super); 4702 free(ctx); 4703 _spdk_bs_free(bs); 4704 cb_fn(cb_arg, NULL, -ENOMEM); 4705 return; 4706 } 4707 4708 batch = spdk_bs_sequence_to_batch(seq, _spdk_bs_init_trim_cpl, ctx); 4709 4710 /* Clear metadata space */ 4711 spdk_bs_batch_write_zeroes_dev(batch, 0, num_md_lba); 4712 4713 switch (opts.clear_method) { 4714 case BS_CLEAR_WITH_UNMAP: 4715 /* Trim data clusters */ 4716 spdk_bs_batch_unmap_dev(batch, num_md_lba, ctx->bs->dev->blockcnt - num_md_lba); 4717 break; 4718 case BS_CLEAR_WITH_WRITE_ZEROES: 4719 /* Write_zeroes to data clusters */ 4720 spdk_bs_batch_write_zeroes_dev(batch, num_md_lba, ctx->bs->dev->blockcnt - num_md_lba); 4721 break; 4722 case BS_CLEAR_WITH_NONE: 4723 default: 4724 break; 4725 } 4726 4727 spdk_bs_batch_close(batch); 4728 } 4729 4730 /* END spdk_bs_init */ 4731 4732 /* START spdk_bs_destroy */ 4733 4734 static void 4735 _spdk_bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4736 { 4737 struct spdk_bs_init_ctx *ctx = cb_arg; 4738 struct spdk_blob_store *bs = ctx->bs; 4739 4740 /* 4741 * We need to defer calling spdk_bs_call_cpl() until after 4742 * dev destruction, so tuck these away for later use. 4743 */ 4744 bs->unload_err = bserrno; 4745 memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 4746 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 4747 4748 spdk_bs_sequence_finish(seq, bserrno); 4749 4750 _spdk_bs_free(bs); 4751 free(ctx); 4752 } 4753 4754 void 4755 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, 4756 void *cb_arg) 4757 { 4758 struct spdk_bs_cpl cpl; 4759 spdk_bs_sequence_t *seq; 4760 struct spdk_bs_init_ctx *ctx; 4761 4762 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Destroying blobstore\n"); 4763 4764 if (!TAILQ_EMPTY(&bs->blobs)) { 4765 SPDK_ERRLOG("Blobstore still has open blobs\n"); 4766 cb_fn(cb_arg, -EBUSY); 4767 return; 4768 } 4769 4770 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 4771 cpl.u.bs_basic.cb_fn = cb_fn; 4772 cpl.u.bs_basic.cb_arg = cb_arg; 4773 4774 ctx = calloc(1, sizeof(*ctx)); 4775 if (!ctx) { 4776 cb_fn(cb_arg, -ENOMEM); 4777 return; 4778 } 4779 4780 ctx->bs = bs; 4781 4782 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 4783 if (!seq) { 4784 free(ctx); 4785 cb_fn(cb_arg, -ENOMEM); 4786 return; 4787 } 4788 4789 /* Write zeroes to the super block */ 4790 spdk_bs_sequence_write_zeroes_dev(seq, 4791 _spdk_bs_page_to_lba(bs, 0), 4792 _spdk_bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)), 4793 _spdk_bs_destroy_trim_cpl, ctx); 4794 } 4795 4796 /* END spdk_bs_destroy */ 4797 4798 /* START spdk_bs_unload */ 4799 4800 static void 4801 _spdk_bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno) 4802 { 4803 spdk_bs_sequence_t *seq = ctx->seq; 4804 4805 spdk_free(ctx->super); 4806 4807 /* 4808 * We need to defer calling spdk_bs_call_cpl() until after 4809 * dev destruction, so tuck these away for later use. 4810 */ 4811 ctx->bs->unload_err = bserrno; 4812 memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); 4813 seq->cpl.type = SPDK_BS_CPL_TYPE_NONE; 4814 4815 spdk_bs_sequence_finish(seq, bserrno); 4816 4817 _spdk_bs_free(ctx->bs); 4818 free(ctx); 4819 } 4820 4821 static void 4822 _spdk_bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4823 { 4824 struct spdk_bs_load_ctx *ctx = cb_arg; 4825 4826 _spdk_bs_unload_finish(ctx, bserrno); 4827 } 4828 4829 static void 4830 _spdk_bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4831 { 4832 struct spdk_bs_load_ctx *ctx = cb_arg; 4833 4834 spdk_free(ctx->mask); 4835 4836 if (bserrno != 0) { 4837 _spdk_bs_unload_finish(ctx, bserrno); 4838 return; 4839 } 4840 4841 ctx->super->clean = 1; 4842 4843 _spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_unload_write_super_cpl, ctx); 4844 } 4845 4846 static void 4847 _spdk_bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4848 { 4849 struct spdk_bs_load_ctx *ctx = cb_arg; 4850 4851 spdk_free(ctx->mask); 4852 ctx->mask = NULL; 4853 4854 if (bserrno != 0) { 4855 _spdk_bs_unload_finish(ctx, bserrno); 4856 return; 4857 } 4858 4859 _spdk_bs_write_used_clusters(seq, ctx, _spdk_bs_unload_write_used_clusters_cpl); 4860 } 4861 4862 static void 4863 _spdk_bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4864 { 4865 struct spdk_bs_load_ctx *ctx = cb_arg; 4866 4867 spdk_free(ctx->mask); 4868 ctx->mask = NULL; 4869 4870 if (bserrno != 0) { 4871 _spdk_bs_unload_finish(ctx, bserrno); 4872 return; 4873 } 4874 4875 _spdk_bs_write_used_blobids(seq, ctx, _spdk_bs_unload_write_used_blobids_cpl); 4876 } 4877 4878 static void 4879 _spdk_bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4880 { 4881 struct spdk_bs_load_ctx *ctx = cb_arg; 4882 4883 if (bserrno != 0) { 4884 _spdk_bs_unload_finish(ctx, bserrno); 4885 return; 4886 } 4887 4888 _spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_unload_write_used_pages_cpl); 4889 } 4890 4891 void 4892 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) 4893 { 4894 struct spdk_bs_cpl cpl; 4895 struct spdk_bs_load_ctx *ctx; 4896 4897 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blobstore\n"); 4898 4899 if (!TAILQ_EMPTY(&bs->blobs)) { 4900 SPDK_ERRLOG("Blobstore still has open blobs\n"); 4901 cb_fn(cb_arg, -EBUSY); 4902 return; 4903 } 4904 4905 ctx = calloc(1, sizeof(*ctx)); 4906 if (!ctx) { 4907 cb_fn(cb_arg, -ENOMEM); 4908 return; 4909 } 4910 4911 ctx->bs = bs; 4912 4913 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 4914 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 4915 if (!ctx->super) { 4916 free(ctx); 4917 cb_fn(cb_arg, -ENOMEM); 4918 return; 4919 } 4920 4921 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 4922 cpl.u.bs_basic.cb_fn = cb_fn; 4923 cpl.u.bs_basic.cb_arg = cb_arg; 4924 4925 ctx->seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 4926 if (!ctx->seq) { 4927 spdk_free(ctx->super); 4928 free(ctx); 4929 cb_fn(cb_arg, -ENOMEM); 4930 return; 4931 } 4932 4933 /* Read super block */ 4934 spdk_bs_sequence_read_dev(ctx->seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 4935 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 4936 _spdk_bs_unload_read_super_cpl, ctx); 4937 } 4938 4939 /* END spdk_bs_unload */ 4940 4941 /* START spdk_bs_set_super */ 4942 4943 struct spdk_bs_set_super_ctx { 4944 struct spdk_blob_store *bs; 4945 struct spdk_bs_super_block *super; 4946 }; 4947 4948 static void 4949 _spdk_bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4950 { 4951 struct spdk_bs_set_super_ctx *ctx = cb_arg; 4952 4953 if (bserrno != 0) { 4954 SPDK_ERRLOG("Unable to write to super block of blobstore\n"); 4955 } 4956 4957 spdk_free(ctx->super); 4958 4959 spdk_bs_sequence_finish(seq, bserrno); 4960 4961 free(ctx); 4962 } 4963 4964 static void 4965 _spdk_bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 4966 { 4967 struct spdk_bs_set_super_ctx *ctx = cb_arg; 4968 4969 if (bserrno != 0) { 4970 SPDK_ERRLOG("Unable to read super block of blobstore\n"); 4971 spdk_free(ctx->super); 4972 spdk_bs_sequence_finish(seq, bserrno); 4973 free(ctx); 4974 return; 4975 } 4976 4977 _spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_set_super_write_cpl, ctx); 4978 } 4979 4980 void 4981 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid, 4982 spdk_bs_op_complete cb_fn, void *cb_arg) 4983 { 4984 struct spdk_bs_cpl cpl; 4985 spdk_bs_sequence_t *seq; 4986 struct spdk_bs_set_super_ctx *ctx; 4987 4988 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Setting super blob id on blobstore\n"); 4989 4990 ctx = calloc(1, sizeof(*ctx)); 4991 if (!ctx) { 4992 cb_fn(cb_arg, -ENOMEM); 4993 return; 4994 } 4995 4996 ctx->bs = bs; 4997 4998 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL, 4999 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); 5000 if (!ctx->super) { 5001 free(ctx); 5002 cb_fn(cb_arg, -ENOMEM); 5003 return; 5004 } 5005 5006 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 5007 cpl.u.bs_basic.cb_fn = cb_fn; 5008 cpl.u.bs_basic.cb_arg = cb_arg; 5009 5010 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 5011 if (!seq) { 5012 spdk_free(ctx->super); 5013 free(ctx); 5014 cb_fn(cb_arg, -ENOMEM); 5015 return; 5016 } 5017 5018 bs->super_blob = blobid; 5019 5020 /* Read super block */ 5021 spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 5022 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 5023 _spdk_bs_set_super_read_cpl, ctx); 5024 } 5025 5026 /* END spdk_bs_set_super */ 5027 5028 void 5029 spdk_bs_get_super(struct spdk_blob_store *bs, 5030 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5031 { 5032 if (bs->super_blob == SPDK_BLOBID_INVALID) { 5033 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT); 5034 } else { 5035 cb_fn(cb_arg, bs->super_blob, 0); 5036 } 5037 } 5038 5039 uint64_t 5040 spdk_bs_get_cluster_size(struct spdk_blob_store *bs) 5041 { 5042 return bs->cluster_sz; 5043 } 5044 5045 uint64_t 5046 spdk_bs_get_page_size(struct spdk_blob_store *bs) 5047 { 5048 return SPDK_BS_PAGE_SIZE; 5049 } 5050 5051 uint64_t 5052 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs) 5053 { 5054 return bs->io_unit_size; 5055 } 5056 5057 uint64_t 5058 spdk_bs_free_cluster_count(struct spdk_blob_store *bs) 5059 { 5060 return bs->num_free_clusters; 5061 } 5062 5063 uint64_t 5064 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs) 5065 { 5066 return bs->total_data_clusters; 5067 } 5068 5069 static int 5070 spdk_bs_register_md_thread(struct spdk_blob_store *bs) 5071 { 5072 bs->md_channel = spdk_get_io_channel(bs); 5073 if (!bs->md_channel) { 5074 SPDK_ERRLOG("Failed to get IO channel.\n"); 5075 return -1; 5076 } 5077 5078 return 0; 5079 } 5080 5081 static int 5082 spdk_bs_unregister_md_thread(struct spdk_blob_store *bs) 5083 { 5084 spdk_put_io_channel(bs->md_channel); 5085 5086 return 0; 5087 } 5088 5089 spdk_blob_id spdk_blob_get_id(struct spdk_blob *blob) 5090 { 5091 assert(blob != NULL); 5092 5093 return blob->id; 5094 } 5095 5096 uint64_t spdk_blob_get_num_pages(struct spdk_blob *blob) 5097 { 5098 assert(blob != NULL); 5099 5100 return _spdk_bs_cluster_to_page(blob->bs, blob->active.num_clusters); 5101 } 5102 5103 uint64_t spdk_blob_get_num_io_units(struct spdk_blob *blob) 5104 { 5105 assert(blob != NULL); 5106 5107 return spdk_blob_get_num_pages(blob) * _spdk_bs_io_unit_per_page(blob->bs); 5108 } 5109 5110 uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob) 5111 { 5112 assert(blob != NULL); 5113 5114 return blob->active.num_clusters; 5115 } 5116 5117 /* START spdk_bs_create_blob */ 5118 5119 static void 5120 _spdk_bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 5121 { 5122 struct spdk_blob *blob = cb_arg; 5123 5124 _spdk_blob_free(blob); 5125 5126 spdk_bs_sequence_finish(seq, bserrno); 5127 } 5128 5129 static int 5130 _spdk_blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs, 5131 bool internal) 5132 { 5133 uint64_t i; 5134 size_t value_len = 0; 5135 int rc; 5136 const void *value = NULL; 5137 if (xattrs->count > 0 && xattrs->get_value == NULL) { 5138 return -EINVAL; 5139 } 5140 for (i = 0; i < xattrs->count; i++) { 5141 xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len); 5142 if (value == NULL || value_len == 0) { 5143 return -EINVAL; 5144 } 5145 rc = _spdk_blob_set_xattr(blob, xattrs->names[i], value, value_len, internal); 5146 if (rc < 0) { 5147 return rc; 5148 } 5149 } 5150 return 0; 5151 } 5152 5153 static void 5154 _spdk_bs_create_blob(struct spdk_blob_store *bs, 5155 const struct spdk_blob_opts *opts, 5156 const struct spdk_blob_xattr_opts *internal_xattrs, 5157 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5158 { 5159 struct spdk_blob *blob; 5160 uint32_t page_idx; 5161 struct spdk_bs_cpl cpl; 5162 struct spdk_blob_opts opts_default; 5163 struct spdk_blob_xattr_opts internal_xattrs_default; 5164 spdk_bs_sequence_t *seq; 5165 spdk_blob_id id; 5166 int rc; 5167 5168 assert(spdk_get_thread() == bs->md_thread); 5169 5170 page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 5171 if (page_idx == UINT32_MAX) { 5172 cb_fn(cb_arg, 0, -ENOMEM); 5173 return; 5174 } 5175 spdk_bit_array_set(bs->used_blobids, page_idx); 5176 _spdk_bs_claim_md_page(bs, page_idx); 5177 5178 id = _spdk_bs_page_to_blobid(page_idx); 5179 5180 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Creating blob with id %lu at page %u\n", id, page_idx); 5181 5182 blob = _spdk_blob_alloc(bs, id); 5183 if (!blob) { 5184 cb_fn(cb_arg, 0, -ENOMEM); 5185 return; 5186 } 5187 5188 if (!opts) { 5189 spdk_blob_opts_init(&opts_default); 5190 opts = &opts_default; 5191 } 5192 5193 blob->use_extent_table = opts->use_extent_table; 5194 if (blob->use_extent_table) { 5195 blob->invalid_flags |= SPDK_BLOB_EXTENT_TABLE; 5196 } 5197 5198 if (!internal_xattrs) { 5199 _spdk_blob_xattrs_init(&internal_xattrs_default); 5200 internal_xattrs = &internal_xattrs_default; 5201 } 5202 5203 rc = _spdk_blob_set_xattrs(blob, &opts->xattrs, false); 5204 if (rc < 0) { 5205 _spdk_blob_free(blob); 5206 cb_fn(cb_arg, 0, rc); 5207 return; 5208 } 5209 5210 rc = _spdk_blob_set_xattrs(blob, internal_xattrs, true); 5211 if (rc < 0) { 5212 _spdk_blob_free(blob); 5213 cb_fn(cb_arg, 0, rc); 5214 return; 5215 } 5216 5217 if (opts->thin_provision) { 5218 _spdk_blob_set_thin_provision(blob); 5219 } 5220 5221 _spdk_blob_set_clear_method(blob, opts->clear_method); 5222 5223 rc = _spdk_blob_resize(blob, opts->num_clusters); 5224 if (rc < 0) { 5225 _spdk_blob_free(blob); 5226 cb_fn(cb_arg, 0, rc); 5227 return; 5228 } 5229 cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 5230 cpl.u.blobid.cb_fn = cb_fn; 5231 cpl.u.blobid.cb_arg = cb_arg; 5232 cpl.u.blobid.blobid = blob->id; 5233 5234 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 5235 if (!seq) { 5236 _spdk_blob_free(blob); 5237 cb_fn(cb_arg, 0, -ENOMEM); 5238 return; 5239 } 5240 5241 _spdk_blob_persist(seq, blob, _spdk_bs_create_blob_cpl, blob); 5242 } 5243 5244 void spdk_bs_create_blob(struct spdk_blob_store *bs, 5245 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5246 { 5247 _spdk_bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg); 5248 } 5249 5250 void spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts, 5251 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5252 { 5253 _spdk_bs_create_blob(bs, opts, NULL, cb_fn, cb_arg); 5254 } 5255 5256 /* END spdk_bs_create_blob */ 5257 5258 /* START blob_cleanup */ 5259 5260 struct spdk_clone_snapshot_ctx { 5261 struct spdk_bs_cpl cpl; 5262 int bserrno; 5263 bool frozen; 5264 5265 struct spdk_io_channel *channel; 5266 5267 /* Current cluster for inflate operation */ 5268 uint64_t cluster; 5269 5270 /* For inflation force allocation of all unallocated clusters and remove 5271 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */ 5272 bool allocate_all; 5273 5274 struct { 5275 spdk_blob_id id; 5276 struct spdk_blob *blob; 5277 } original; 5278 struct { 5279 spdk_blob_id id; 5280 struct spdk_blob *blob; 5281 } new; 5282 5283 /* xattrs specified for snapshot/clones only. They have no impact on 5284 * the original blobs xattrs. */ 5285 const struct spdk_blob_xattr_opts *xattrs; 5286 }; 5287 5288 static void 5289 _spdk_bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno) 5290 { 5291 struct spdk_clone_snapshot_ctx *ctx = cb_arg; 5292 struct spdk_bs_cpl *cpl = &ctx->cpl; 5293 5294 if (bserrno != 0) { 5295 if (ctx->bserrno != 0) { 5296 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 5297 } else { 5298 ctx->bserrno = bserrno; 5299 } 5300 } 5301 5302 switch (cpl->type) { 5303 case SPDK_BS_CPL_TYPE_BLOBID: 5304 cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno); 5305 break; 5306 case SPDK_BS_CPL_TYPE_BLOB_BASIC: 5307 cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno); 5308 break; 5309 default: 5310 SPDK_UNREACHABLE(); 5311 break; 5312 } 5313 5314 free(ctx); 5315 } 5316 5317 static void 5318 _spdk_bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 5319 { 5320 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5321 struct spdk_blob *origblob = ctx->original.blob; 5322 5323 if (bserrno != 0) { 5324 if (ctx->bserrno != 0) { 5325 SPDK_ERRLOG("Unfreeze error %d\n", bserrno); 5326 } else { 5327 ctx->bserrno = bserrno; 5328 } 5329 } 5330 5331 ctx->original.id = origblob->id; 5332 origblob->locked_operation_in_progress = false; 5333 5334 spdk_blob_close(origblob, _spdk_bs_clone_snapshot_cleanup_finish, ctx); 5335 } 5336 5337 static void 5338 _spdk_bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno) 5339 { 5340 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5341 struct spdk_blob *origblob = ctx->original.blob; 5342 5343 if (bserrno != 0) { 5344 if (ctx->bserrno != 0) { 5345 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 5346 } else { 5347 ctx->bserrno = bserrno; 5348 } 5349 } 5350 5351 if (ctx->frozen) { 5352 /* Unfreeze any outstanding I/O */ 5353 _spdk_blob_unfreeze_io(origblob, _spdk_bs_snapshot_unfreeze_cpl, ctx); 5354 } else { 5355 _spdk_bs_snapshot_unfreeze_cpl(ctx, 0); 5356 } 5357 5358 } 5359 5360 static void 5361 _spdk_bs_clone_snapshot_newblob_cleanup(void *cb_arg, int bserrno) 5362 { 5363 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5364 struct spdk_blob *newblob = ctx->new.blob; 5365 5366 if (bserrno != 0) { 5367 if (ctx->bserrno != 0) { 5368 SPDK_ERRLOG("Cleanup error %d\n", bserrno); 5369 } else { 5370 ctx->bserrno = bserrno; 5371 } 5372 } 5373 5374 ctx->new.id = newblob->id; 5375 spdk_blob_close(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 5376 } 5377 5378 /* END blob_cleanup */ 5379 5380 /* START spdk_bs_create_snapshot */ 5381 5382 static void 5383 _spdk_bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2) 5384 { 5385 uint64_t *cluster_temp; 5386 uint32_t *extent_page_temp; 5387 5388 cluster_temp = blob1->active.clusters; 5389 blob1->active.clusters = blob2->active.clusters; 5390 blob2->active.clusters = cluster_temp; 5391 5392 extent_page_temp = blob1->active.extent_pages; 5393 blob1->active.extent_pages = blob2->active.extent_pages; 5394 blob2->active.extent_pages = extent_page_temp; 5395 } 5396 5397 static void 5398 _spdk_bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno) 5399 { 5400 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5401 struct spdk_blob *origblob = ctx->original.blob; 5402 struct spdk_blob *newblob = ctx->new.blob; 5403 5404 if (bserrno != 0) { 5405 _spdk_bs_snapshot_swap_cluster_maps(newblob, origblob); 5406 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 5407 return; 5408 } 5409 5410 /* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */ 5411 bserrno = _spdk_blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true); 5412 if (bserrno != 0) { 5413 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 5414 return; 5415 } 5416 5417 _spdk_bs_blob_list_add(ctx->original.blob); 5418 5419 spdk_blob_set_read_only(newblob); 5420 5421 /* sync snapshot metadata */ 5422 spdk_blob_sync_md(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 5423 } 5424 5425 static void 5426 _spdk_bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno) 5427 { 5428 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5429 struct spdk_blob *origblob = ctx->original.blob; 5430 struct spdk_blob *newblob = ctx->new.blob; 5431 5432 if (bserrno != 0) { 5433 /* return cluster map back to original */ 5434 _spdk_bs_snapshot_swap_cluster_maps(newblob, origblob); 5435 5436 /* Newblob md sync failed. Valid clusters are only present in origblob. 5437 * Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occured. 5438 * Newblob needs to be reverted to thin_provisioned state at creation to properly close. */ 5439 _spdk_blob_set_thin_provision(newblob); 5440 assert(spdk_mem_all_zero(newblob->active.clusters, 5441 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 5442 assert(spdk_mem_all_zero(newblob->active.extent_pages, 5443 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 5444 5445 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 5446 return; 5447 } 5448 5449 /* Set internal xattr for snapshot id */ 5450 bserrno = _spdk_blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true); 5451 if (bserrno != 0) { 5452 /* return cluster map back to original */ 5453 _spdk_bs_snapshot_swap_cluster_maps(newblob, origblob); 5454 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 5455 return; 5456 } 5457 5458 _spdk_bs_blob_list_remove(origblob); 5459 origblob->parent_id = newblob->id; 5460 5461 /* Create new back_bs_dev for snapshot */ 5462 origblob->back_bs_dev = spdk_bs_create_blob_bs_dev(newblob); 5463 if (origblob->back_bs_dev == NULL) { 5464 /* return cluster map back to original */ 5465 _spdk_bs_snapshot_swap_cluster_maps(newblob, origblob); 5466 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL); 5467 return; 5468 } 5469 5470 /* set clone blob as thin provisioned */ 5471 _spdk_blob_set_thin_provision(origblob); 5472 5473 _spdk_bs_blob_list_add(newblob); 5474 5475 /* sync clone metadata */ 5476 spdk_blob_sync_md(origblob, _spdk_bs_snapshot_origblob_sync_cpl, ctx); 5477 } 5478 5479 static void 5480 _spdk_bs_snapshot_freeze_cpl(void *cb_arg, int rc) 5481 { 5482 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5483 struct spdk_blob *origblob = ctx->original.blob; 5484 struct spdk_blob *newblob = ctx->new.blob; 5485 int bserrno; 5486 5487 if (rc != 0) { 5488 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, rc); 5489 return; 5490 } 5491 5492 ctx->frozen = true; 5493 5494 /* set new back_bs_dev for snapshot */ 5495 newblob->back_bs_dev = origblob->back_bs_dev; 5496 /* Set invalid flags from origblob */ 5497 newblob->invalid_flags = origblob->invalid_flags; 5498 5499 /* inherit parent from original blob if set */ 5500 newblob->parent_id = origblob->parent_id; 5501 if (origblob->parent_id != SPDK_BLOBID_INVALID) { 5502 /* Set internal xattr for snapshot id */ 5503 bserrno = _spdk_blob_set_xattr(newblob, BLOB_SNAPSHOT, 5504 &origblob->parent_id, sizeof(spdk_blob_id), true); 5505 if (bserrno != 0) { 5506 _spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno); 5507 return; 5508 } 5509 } 5510 5511 /* swap cluster maps */ 5512 _spdk_bs_snapshot_swap_cluster_maps(newblob, origblob); 5513 5514 /* Set the clear method on the new blob to match the original. */ 5515 _spdk_blob_set_clear_method(newblob, origblob->clear_method); 5516 5517 /* sync snapshot metadata */ 5518 spdk_blob_sync_md(newblob, _spdk_bs_snapshot_newblob_sync_cpl, ctx); 5519 } 5520 5521 static void 5522 _spdk_bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 5523 { 5524 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5525 struct spdk_blob *origblob = ctx->original.blob; 5526 struct spdk_blob *newblob = _blob; 5527 5528 if (bserrno != 0) { 5529 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 5530 return; 5531 } 5532 5533 ctx->new.blob = newblob; 5534 assert(spdk_blob_is_thin_provisioned(newblob)); 5535 assert(spdk_mem_all_zero(newblob->active.clusters, 5536 newblob->active.num_clusters * sizeof(*newblob->active.clusters))); 5537 assert(spdk_mem_all_zero(newblob->active.extent_pages, 5538 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages))); 5539 5540 _spdk_blob_freeze_io(origblob, _spdk_bs_snapshot_freeze_cpl, ctx); 5541 } 5542 5543 static void 5544 _spdk_bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 5545 { 5546 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5547 struct spdk_blob *origblob = ctx->original.blob; 5548 5549 if (bserrno != 0) { 5550 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 5551 return; 5552 } 5553 5554 ctx->new.id = blobid; 5555 ctx->cpl.u.blobid.blobid = blobid; 5556 5557 spdk_bs_open_blob(origblob->bs, ctx->new.id, _spdk_bs_snapshot_newblob_open_cpl, ctx); 5558 } 5559 5560 5561 static void 5562 _spdk_bs_xattr_snapshot(void *arg, const char *name, 5563 const void **value, size_t *value_len) 5564 { 5565 assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0); 5566 5567 struct spdk_blob *blob = (struct spdk_blob *)arg; 5568 *value = &blob->id; 5569 *value_len = sizeof(blob->id); 5570 } 5571 5572 static void 5573 _spdk_bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 5574 { 5575 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5576 struct spdk_blob_opts opts; 5577 struct spdk_blob_xattr_opts internal_xattrs; 5578 char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS }; 5579 5580 if (bserrno != 0) { 5581 _spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno); 5582 return; 5583 } 5584 5585 ctx->original.blob = _blob; 5586 5587 if (_blob->data_ro || _blob->md_ro) { 5588 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create snapshot from read only blob with id %lu\n", 5589 _blob->id); 5590 ctx->bserrno = -EINVAL; 5591 spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx); 5592 return; 5593 } 5594 5595 if (_blob->locked_operation_in_progress) { 5596 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create snapshot - another operation in progress\n"); 5597 ctx->bserrno = -EBUSY; 5598 spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx); 5599 return; 5600 } 5601 5602 _blob->locked_operation_in_progress = true; 5603 5604 spdk_blob_opts_init(&opts); 5605 _spdk_blob_xattrs_init(&internal_xattrs); 5606 5607 /* Change the size of new blob to the same as in original blob, 5608 * but do not allocate clusters */ 5609 opts.thin_provision = true; 5610 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 5611 opts.use_extent_table = _blob->use_extent_table; 5612 5613 /* If there are any xattrs specified for snapshot, set them now */ 5614 if (ctx->xattrs) { 5615 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 5616 } 5617 /* Set internal xattr SNAPSHOT_IN_PROGRESS */ 5618 internal_xattrs.count = 1; 5619 internal_xattrs.ctx = _blob; 5620 internal_xattrs.names = xattrs_names; 5621 internal_xattrs.get_value = _spdk_bs_xattr_snapshot; 5622 5623 _spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs, 5624 _spdk_bs_snapshot_newblob_create_cpl, ctx); 5625 } 5626 5627 void spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid, 5628 const struct spdk_blob_xattr_opts *snapshot_xattrs, 5629 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5630 { 5631 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 5632 5633 if (!ctx) { 5634 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 5635 return; 5636 } 5637 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 5638 ctx->cpl.u.blobid.cb_fn = cb_fn; 5639 ctx->cpl.u.blobid.cb_arg = cb_arg; 5640 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 5641 ctx->bserrno = 0; 5642 ctx->frozen = false; 5643 ctx->original.id = blobid; 5644 ctx->xattrs = snapshot_xattrs; 5645 5646 spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_snapshot_origblob_open_cpl, ctx); 5647 } 5648 /* END spdk_bs_create_snapshot */ 5649 5650 /* START spdk_bs_create_clone */ 5651 5652 static void 5653 _spdk_bs_xattr_clone(void *arg, const char *name, 5654 const void **value, size_t *value_len) 5655 { 5656 assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0); 5657 5658 struct spdk_blob *blob = (struct spdk_blob *)arg; 5659 *value = &blob->id; 5660 *value_len = sizeof(blob->id); 5661 } 5662 5663 static void 5664 _spdk_bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 5665 { 5666 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5667 struct spdk_blob *clone = _blob; 5668 5669 ctx->new.blob = clone; 5670 _spdk_bs_blob_list_add(clone); 5671 5672 spdk_blob_close(clone, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 5673 } 5674 5675 static void 5676 _spdk_bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno) 5677 { 5678 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5679 5680 ctx->cpl.u.blobid.blobid = blobid; 5681 spdk_bs_open_blob(ctx->original.blob->bs, blobid, _spdk_bs_clone_newblob_open_cpl, ctx); 5682 } 5683 5684 static void 5685 _spdk_bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 5686 { 5687 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5688 struct spdk_blob_opts opts; 5689 struct spdk_blob_xattr_opts internal_xattrs; 5690 char *xattr_names[] = { BLOB_SNAPSHOT }; 5691 5692 if (bserrno != 0) { 5693 _spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno); 5694 return; 5695 } 5696 5697 ctx->original.blob = _blob; 5698 5699 if (!_blob->data_ro || !_blob->md_ro) { 5700 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Clone not from read-only blob\n"); 5701 ctx->bserrno = -EINVAL; 5702 spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx); 5703 return; 5704 } 5705 5706 if (_blob->locked_operation_in_progress) { 5707 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create clone - another operation in progress\n"); 5708 ctx->bserrno = -EBUSY; 5709 spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx); 5710 return; 5711 } 5712 5713 _blob->locked_operation_in_progress = true; 5714 5715 spdk_blob_opts_init(&opts); 5716 _spdk_blob_xattrs_init(&internal_xattrs); 5717 5718 opts.thin_provision = true; 5719 opts.num_clusters = spdk_blob_get_num_clusters(_blob); 5720 opts.use_extent_table = _blob->use_extent_table; 5721 if (ctx->xattrs) { 5722 memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs)); 5723 } 5724 5725 /* Set internal xattr BLOB_SNAPSHOT */ 5726 internal_xattrs.count = 1; 5727 internal_xattrs.ctx = _blob; 5728 internal_xattrs.names = xattr_names; 5729 internal_xattrs.get_value = _spdk_bs_xattr_clone; 5730 5731 _spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs, 5732 _spdk_bs_clone_newblob_create_cpl, ctx); 5733 } 5734 5735 void spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid, 5736 const struct spdk_blob_xattr_opts *clone_xattrs, 5737 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 5738 { 5739 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 5740 5741 if (!ctx) { 5742 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM); 5743 return; 5744 } 5745 5746 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 5747 ctx->cpl.u.blobid.cb_fn = cb_fn; 5748 ctx->cpl.u.blobid.cb_arg = cb_arg; 5749 ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID; 5750 ctx->bserrno = 0; 5751 ctx->xattrs = clone_xattrs; 5752 ctx->original.id = blobid; 5753 5754 spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_clone_origblob_open_cpl, ctx); 5755 } 5756 5757 /* END spdk_bs_create_clone */ 5758 5759 /* START spdk_bs_inflate_blob */ 5760 5761 static void 5762 _spdk_bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno) 5763 { 5764 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5765 struct spdk_blob *_blob = ctx->original.blob; 5766 5767 if (bserrno != 0) { 5768 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 5769 return; 5770 } 5771 5772 assert(_parent != NULL); 5773 5774 _spdk_bs_blob_list_remove(_blob); 5775 _blob->parent_id = _parent->id; 5776 _spdk_blob_set_xattr(_blob, BLOB_SNAPSHOT, &_blob->parent_id, 5777 sizeof(spdk_blob_id), true); 5778 5779 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 5780 _blob->back_bs_dev = spdk_bs_create_blob_bs_dev(_parent); 5781 _spdk_bs_blob_list_add(_blob); 5782 5783 spdk_blob_sync_md(_blob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 5784 } 5785 5786 static void 5787 _spdk_bs_inflate_blob_done(void *cb_arg, int bserrno) 5788 { 5789 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5790 struct spdk_blob *_blob = ctx->original.blob; 5791 struct spdk_blob *_parent; 5792 5793 if (bserrno != 0) { 5794 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 5795 return; 5796 } 5797 5798 if (ctx->allocate_all) { 5799 /* remove thin provisioning */ 5800 _spdk_bs_blob_list_remove(_blob); 5801 _spdk_blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 5802 _blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV; 5803 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 5804 _blob->back_bs_dev = NULL; 5805 _blob->parent_id = SPDK_BLOBID_INVALID; 5806 } else { 5807 _parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob; 5808 if (_parent->parent_id != SPDK_BLOBID_INVALID) { 5809 /* We must change the parent of the inflated blob */ 5810 spdk_bs_open_blob(_blob->bs, _parent->parent_id, 5811 _spdk_bs_inflate_blob_set_parent_cpl, ctx); 5812 return; 5813 } 5814 5815 _spdk_bs_blob_list_remove(_blob); 5816 _spdk_blob_remove_xattr(_blob, BLOB_SNAPSHOT, true); 5817 _blob->parent_id = SPDK_BLOBID_INVALID; 5818 _blob->back_bs_dev->destroy(_blob->back_bs_dev); 5819 _blob->back_bs_dev = spdk_bs_create_zeroes_dev(); 5820 } 5821 5822 _blob->state = SPDK_BLOB_STATE_DIRTY; 5823 spdk_blob_sync_md(_blob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx); 5824 } 5825 5826 /* Check if cluster needs allocation */ 5827 static inline bool 5828 _spdk_bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all) 5829 { 5830 struct spdk_blob_bs_dev *b; 5831 5832 assert(blob != NULL); 5833 5834 if (blob->active.clusters[cluster] != 0) { 5835 /* Cluster is already allocated */ 5836 return false; 5837 } 5838 5839 if (blob->parent_id == SPDK_BLOBID_INVALID) { 5840 /* Blob have no parent blob */ 5841 return allocate_all; 5842 } 5843 5844 b = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 5845 return (allocate_all || b->blob->active.clusters[cluster] != 0); 5846 } 5847 5848 static void 5849 _spdk_bs_inflate_blob_touch_next(void *cb_arg, int bserrno) 5850 { 5851 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5852 struct spdk_blob *_blob = ctx->original.blob; 5853 uint64_t offset; 5854 5855 if (bserrno != 0) { 5856 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno); 5857 return; 5858 } 5859 5860 for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) { 5861 if (_spdk_bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) { 5862 break; 5863 } 5864 } 5865 5866 if (ctx->cluster < _blob->active.num_clusters) { 5867 offset = _spdk_bs_cluster_to_lba(_blob->bs, ctx->cluster); 5868 5869 /* We may safely increment a cluster before write */ 5870 ctx->cluster++; 5871 5872 /* Use zero length write to touch a cluster */ 5873 spdk_blob_io_write(_blob, ctx->channel, NULL, offset, 0, 5874 _spdk_bs_inflate_blob_touch_next, ctx); 5875 } else { 5876 _spdk_bs_inflate_blob_done(cb_arg, bserrno); 5877 } 5878 } 5879 5880 static void 5881 _spdk_bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 5882 { 5883 struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg; 5884 uint64_t lfc; /* lowest free cluster */ 5885 uint64_t i; 5886 5887 if (bserrno != 0) { 5888 _spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno); 5889 return; 5890 } 5891 5892 ctx->original.blob = _blob; 5893 5894 if (_blob->locked_operation_in_progress) { 5895 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot inflate blob - another operation in progress\n"); 5896 ctx->bserrno = -EBUSY; 5897 spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx); 5898 return; 5899 } 5900 5901 _blob->locked_operation_in_progress = true; 5902 5903 if (!ctx->allocate_all && _blob->parent_id == SPDK_BLOBID_INVALID) { 5904 /* This blob have no parent, so we cannot decouple it. */ 5905 SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n"); 5906 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL); 5907 return; 5908 } 5909 5910 if (spdk_blob_is_thin_provisioned(_blob) == false) { 5911 /* This is not thin provisioned blob. No need to inflate. */ 5912 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, 0); 5913 return; 5914 } 5915 5916 /* Do two passes - one to verify that we can obtain enough clusters 5917 * and another to actually claim them. 5918 */ 5919 lfc = 0; 5920 for (i = 0; i < _blob->active.num_clusters; i++) { 5921 if (_spdk_bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) { 5922 lfc = spdk_bit_array_find_first_clear(_blob->bs->used_clusters, lfc); 5923 if (lfc == UINT32_MAX) { 5924 /* No more free clusters. Cannot satisfy the request */ 5925 _spdk_bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC); 5926 return; 5927 } 5928 lfc++; 5929 } 5930 } 5931 5932 ctx->cluster = 0; 5933 _spdk_bs_inflate_blob_touch_next(ctx, 0); 5934 } 5935 5936 static void 5937 _spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 5938 spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg) 5939 { 5940 struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx)); 5941 5942 if (!ctx) { 5943 cb_fn(cb_arg, -ENOMEM); 5944 return; 5945 } 5946 ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 5947 ctx->cpl.u.bs_basic.cb_fn = cb_fn; 5948 ctx->cpl.u.bs_basic.cb_arg = cb_arg; 5949 ctx->bserrno = 0; 5950 ctx->original.id = blobid; 5951 ctx->channel = channel; 5952 ctx->allocate_all = allocate_all; 5953 5954 spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_inflate_blob_open_cpl, ctx); 5955 } 5956 5957 void 5958 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 5959 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 5960 { 5961 _spdk_bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg); 5962 } 5963 5964 void 5965 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel, 5966 spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg) 5967 { 5968 _spdk_bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg); 5969 } 5970 /* END spdk_bs_inflate_blob */ 5971 5972 /* START spdk_blob_resize */ 5973 struct spdk_bs_resize_ctx { 5974 spdk_blob_op_complete cb_fn; 5975 void *cb_arg; 5976 struct spdk_blob *blob; 5977 uint64_t sz; 5978 int rc; 5979 }; 5980 5981 static void 5982 _spdk_bs_resize_unfreeze_cpl(void *cb_arg, int rc) 5983 { 5984 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 5985 5986 if (rc != 0) { 5987 SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc); 5988 } 5989 5990 if (ctx->rc != 0) { 5991 SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc); 5992 rc = ctx->rc; 5993 } 5994 5995 ctx->blob->locked_operation_in_progress = false; 5996 5997 ctx->cb_fn(ctx->cb_arg, rc); 5998 free(ctx); 5999 } 6000 6001 static void 6002 _spdk_bs_resize_freeze_cpl(void *cb_arg, int rc) 6003 { 6004 struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg; 6005 6006 if (rc != 0) { 6007 ctx->blob->locked_operation_in_progress = false; 6008 ctx->cb_fn(ctx->cb_arg, rc); 6009 free(ctx); 6010 return; 6011 } 6012 6013 ctx->rc = _spdk_blob_resize(ctx->blob, ctx->sz); 6014 6015 _spdk_blob_unfreeze_io(ctx->blob, _spdk_bs_resize_unfreeze_cpl, ctx); 6016 } 6017 6018 void 6019 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg) 6020 { 6021 struct spdk_bs_resize_ctx *ctx; 6022 6023 _spdk_blob_verify_md_op(blob); 6024 6025 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Resizing blob %lu to %lu clusters\n", blob->id, sz); 6026 6027 if (blob->md_ro) { 6028 cb_fn(cb_arg, -EPERM); 6029 return; 6030 } 6031 6032 if (sz == blob->active.num_clusters) { 6033 cb_fn(cb_arg, 0); 6034 return; 6035 } 6036 6037 if (blob->locked_operation_in_progress) { 6038 cb_fn(cb_arg, -EBUSY); 6039 return; 6040 } 6041 6042 ctx = calloc(1, sizeof(*ctx)); 6043 if (!ctx) { 6044 cb_fn(cb_arg, -ENOMEM); 6045 return; 6046 } 6047 6048 blob->locked_operation_in_progress = true; 6049 ctx->cb_fn = cb_fn; 6050 ctx->cb_arg = cb_arg; 6051 ctx->blob = blob; 6052 ctx->sz = sz; 6053 _spdk_blob_freeze_io(blob, _spdk_bs_resize_freeze_cpl, ctx); 6054 } 6055 6056 /* END spdk_blob_resize */ 6057 6058 6059 /* START spdk_bs_delete_blob */ 6060 6061 static void 6062 _spdk_bs_delete_close_cpl(void *cb_arg, int bserrno) 6063 { 6064 spdk_bs_sequence_t *seq = cb_arg; 6065 6066 spdk_bs_sequence_finish(seq, bserrno); 6067 } 6068 6069 static void 6070 _spdk_bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 6071 { 6072 struct spdk_blob *blob = cb_arg; 6073 6074 if (bserrno != 0) { 6075 /* 6076 * We already removed this blob from the blobstore tailq, so 6077 * we need to free it here since this is the last reference 6078 * to it. 6079 */ 6080 _spdk_blob_free(blob); 6081 _spdk_bs_delete_close_cpl(seq, bserrno); 6082 return; 6083 } 6084 6085 /* 6086 * This will immediately decrement the ref_count and call 6087 * the completion routine since the metadata state is clean. 6088 * By calling spdk_blob_close, we reduce the number of call 6089 * points into code that touches the blob->open_ref count 6090 * and the blobstore's blob list. 6091 */ 6092 spdk_blob_close(blob, _spdk_bs_delete_close_cpl, seq); 6093 } 6094 6095 struct delete_snapshot_ctx { 6096 struct spdk_blob_list *parent_snapshot_entry; 6097 struct spdk_blob *snapshot; 6098 bool snapshot_md_ro; 6099 struct spdk_blob *clone; 6100 bool clone_md_ro; 6101 spdk_blob_op_with_handle_complete cb_fn; 6102 void *cb_arg; 6103 int bserrno; 6104 }; 6105 6106 static void 6107 _spdk_delete_blob_cleanup_finish(void *cb_arg, int bserrno) 6108 { 6109 struct delete_snapshot_ctx *ctx = cb_arg; 6110 6111 if (bserrno != 0) { 6112 SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno); 6113 } 6114 6115 assert(ctx != NULL); 6116 6117 if (bserrno != 0 && ctx->bserrno == 0) { 6118 ctx->bserrno = bserrno; 6119 } 6120 6121 ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno); 6122 free(ctx); 6123 } 6124 6125 static void 6126 _spdk_delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno) 6127 { 6128 struct delete_snapshot_ctx *ctx = cb_arg; 6129 6130 if (bserrno != 0) { 6131 ctx->bserrno = bserrno; 6132 SPDK_ERRLOG("Clone cleanup error %d\n", bserrno); 6133 } 6134 6135 if (ctx->bserrno != 0) { 6136 assert(_spdk_blob_lookup(ctx->snapshot->bs, ctx->snapshot->id) == NULL); 6137 TAILQ_INSERT_HEAD(&ctx->snapshot->bs->blobs, ctx->snapshot, link); 6138 } 6139 6140 ctx->snapshot->locked_operation_in_progress = false; 6141 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 6142 6143 spdk_blob_close(ctx->snapshot, _spdk_delete_blob_cleanup_finish, ctx); 6144 } 6145 6146 static void 6147 _spdk_delete_snapshot_cleanup_clone(void *cb_arg, int bserrno) 6148 { 6149 struct delete_snapshot_ctx *ctx = cb_arg; 6150 6151 ctx->clone->locked_operation_in_progress = false; 6152 ctx->clone->md_ro = ctx->clone_md_ro; 6153 6154 spdk_blob_close(ctx->clone, _spdk_delete_snapshot_cleanup_snapshot, ctx); 6155 } 6156 6157 static void 6158 _spdk_delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno) 6159 { 6160 struct delete_snapshot_ctx *ctx = cb_arg; 6161 6162 if (bserrno) { 6163 ctx->bserrno = bserrno; 6164 _spdk_delete_snapshot_cleanup_clone(ctx, 0); 6165 return; 6166 } 6167 6168 ctx->clone->locked_operation_in_progress = false; 6169 spdk_blob_close(ctx->clone, _spdk_delete_blob_cleanup_finish, ctx); 6170 } 6171 6172 static void 6173 _spdk_delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno) 6174 { 6175 struct delete_snapshot_ctx *ctx = cb_arg; 6176 struct spdk_blob_list *parent_snapshot_entry = NULL; 6177 struct spdk_blob_list *snapshot_entry = NULL; 6178 struct spdk_blob_list *clone_entry = NULL; 6179 struct spdk_blob_list *snapshot_clone_entry = NULL; 6180 6181 if (bserrno) { 6182 SPDK_ERRLOG("Failed to sync MD on blob\n"); 6183 ctx->bserrno = bserrno; 6184 _spdk_delete_snapshot_cleanup_clone(ctx, 0); 6185 return; 6186 } 6187 6188 /* Get snapshot entry for the snapshot we want to remove */ 6189 snapshot_entry = _spdk_bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id); 6190 6191 assert(snapshot_entry != NULL); 6192 6193 /* Remove clone entry in this snapshot (at this point there can be only one clone) */ 6194 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 6195 assert(clone_entry != NULL); 6196 TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link); 6197 snapshot_entry->clone_count--; 6198 assert(TAILQ_EMPTY(&snapshot_entry->clones)); 6199 6200 if (ctx->snapshot->parent_id != SPDK_BLOBID_INVALID) { 6201 /* This snapshot is at the same time a clone of another snapshot - we need to 6202 * update parent snapshot (remove current clone, add new one inherited from 6203 * the snapshot that is being removed) */ 6204 6205 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 6206 * snapshot that we are removing */ 6207 _spdk_blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry, 6208 &snapshot_clone_entry); 6209 6210 /* Switch clone entry in parent snapshot */ 6211 TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link); 6212 TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link); 6213 free(snapshot_clone_entry); 6214 } else { 6215 /* No parent snapshot - just remove clone entry */ 6216 free(clone_entry); 6217 } 6218 6219 /* Restore md_ro flags */ 6220 ctx->clone->md_ro = ctx->clone_md_ro; 6221 ctx->snapshot->md_ro = ctx->snapshot_md_ro; 6222 6223 _spdk_blob_unfreeze_io(ctx->clone, _spdk_delete_snapshot_unfreeze_cpl, ctx); 6224 } 6225 6226 static void 6227 _spdk_delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno) 6228 { 6229 struct delete_snapshot_ctx *ctx = cb_arg; 6230 uint64_t i; 6231 6232 ctx->snapshot->md_ro = false; 6233 6234 if (bserrno) { 6235 SPDK_ERRLOG("Failed to sync MD on clone\n"); 6236 ctx->bserrno = bserrno; 6237 6238 /* Restore snapshot to previous state */ 6239 bserrno = _spdk_blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true); 6240 if (bserrno != 0) { 6241 _spdk_delete_snapshot_cleanup_clone(ctx, bserrno); 6242 return; 6243 } 6244 6245 spdk_blob_sync_md(ctx->snapshot, _spdk_delete_snapshot_cleanup_clone, ctx); 6246 return; 6247 } 6248 6249 /* Clear cluster map entries for snapshot */ 6250 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 6251 if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) { 6252 ctx->snapshot->active.clusters[i] = 0; 6253 } 6254 } 6255 for (i = 0; i < ctx->snapshot->active.num_extent_pages && 6256 i < ctx->clone->active.num_extent_pages; i++) { 6257 if (ctx->clone->active.extent_pages[i] == ctx->snapshot->active.extent_pages[i]) { 6258 ctx->snapshot->active.extent_pages[i] = 0; 6259 } 6260 } 6261 6262 _spdk_blob_set_thin_provision(ctx->snapshot); 6263 ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY; 6264 6265 if (ctx->parent_snapshot_entry != NULL) { 6266 ctx->snapshot->back_bs_dev = NULL; 6267 } 6268 6269 spdk_blob_sync_md(ctx->snapshot, _spdk_delete_snapshot_sync_snapshot_cpl, ctx); 6270 } 6271 6272 static void 6273 _spdk_delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno) 6274 { 6275 struct delete_snapshot_ctx *ctx = cb_arg; 6276 uint64_t i; 6277 6278 /* Temporarily override md_ro flag for clone for MD modification */ 6279 ctx->clone_md_ro = ctx->clone->md_ro; 6280 ctx->clone->md_ro = false; 6281 6282 if (bserrno) { 6283 SPDK_ERRLOG("Failed to sync MD with xattr on blob\n"); 6284 ctx->bserrno = bserrno; 6285 _spdk_delete_snapshot_cleanup_clone(ctx, 0); 6286 return; 6287 } 6288 6289 /* Copy snapshot map to clone map (only unallocated clusters in clone) */ 6290 for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) { 6291 if (ctx->clone->active.clusters[i] == 0) { 6292 ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i]; 6293 } 6294 } 6295 for (i = 0; i < ctx->snapshot->active.num_extent_pages && 6296 i < ctx->clone->active.num_extent_pages; i++) { 6297 if (ctx->clone->active.extent_pages[i] == 0) { 6298 ctx->clone->active.extent_pages[i] = ctx->snapshot->active.extent_pages[i]; 6299 } 6300 } 6301 6302 /* Delete old backing bs_dev from clone (related to snapshot that will be removed) */ 6303 ctx->clone->back_bs_dev->destroy(ctx->clone->back_bs_dev); 6304 6305 /* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */ 6306 if (ctx->parent_snapshot_entry != NULL) { 6307 /* ...to parent snapshot */ 6308 ctx->clone->parent_id = ctx->parent_snapshot_entry->id; 6309 ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev; 6310 _spdk_blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id, 6311 sizeof(spdk_blob_id), 6312 true); 6313 } else { 6314 /* ...to blobid invalid and zeroes dev */ 6315 ctx->clone->parent_id = SPDK_BLOBID_INVALID; 6316 ctx->clone->back_bs_dev = spdk_bs_create_zeroes_dev(); 6317 _spdk_blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true); 6318 } 6319 6320 spdk_blob_sync_md(ctx->clone, _spdk_delete_snapshot_sync_clone_cpl, ctx); 6321 } 6322 6323 static void 6324 _spdk_delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno) 6325 { 6326 struct delete_snapshot_ctx *ctx = cb_arg; 6327 6328 if (bserrno) { 6329 SPDK_ERRLOG("Failed to freeze I/O on clone\n"); 6330 ctx->bserrno = bserrno; 6331 _spdk_delete_snapshot_cleanup_clone(ctx, 0); 6332 return; 6333 } 6334 6335 /* Temporarily override md_ro flag for snapshot for MD modification */ 6336 ctx->snapshot_md_ro = ctx->snapshot->md_ro; 6337 ctx->snapshot->md_ro = false; 6338 6339 /* Mark blob as pending for removal for power failure safety, use clone id for recovery */ 6340 ctx->bserrno = _spdk_blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id, 6341 sizeof(spdk_blob_id), true); 6342 if (ctx->bserrno != 0) { 6343 _spdk_delete_snapshot_cleanup_clone(ctx, 0); 6344 return; 6345 } 6346 6347 spdk_blob_sync_md(ctx->snapshot, _spdk_delete_snapshot_sync_snapshot_xattr_cpl, ctx); 6348 } 6349 6350 static void 6351 _spdk_delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno) 6352 { 6353 struct delete_snapshot_ctx *ctx = cb_arg; 6354 6355 if (bserrno) { 6356 SPDK_ERRLOG("Failed to open clone\n"); 6357 ctx->bserrno = bserrno; 6358 _spdk_delete_snapshot_cleanup_snapshot(ctx, 0); 6359 return; 6360 } 6361 6362 ctx->clone = clone; 6363 6364 if (clone->locked_operation_in_progress) { 6365 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot remove blob - another operation in progress on its clone\n"); 6366 ctx->bserrno = -EBUSY; 6367 spdk_blob_close(ctx->clone, _spdk_delete_snapshot_cleanup_snapshot, ctx); 6368 return; 6369 } 6370 6371 clone->locked_operation_in_progress = true; 6372 6373 _spdk_blob_freeze_io(clone, _spdk_delete_snapshot_freeze_io_cb, ctx); 6374 } 6375 6376 static void 6377 _spdk_update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx) 6378 { 6379 struct spdk_blob_list *snapshot_entry = NULL; 6380 struct spdk_blob_list *clone_entry = NULL; 6381 struct spdk_blob_list *snapshot_clone_entry = NULL; 6382 6383 /* Get snapshot entry for the snapshot we want to remove */ 6384 snapshot_entry = _spdk_bs_get_snapshot_entry(snapshot->bs, snapshot->id); 6385 6386 assert(snapshot_entry != NULL); 6387 6388 /* Get clone of the snapshot (at this point there can be only one clone) */ 6389 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 6390 assert(snapshot_entry->clone_count == 1); 6391 assert(clone_entry != NULL); 6392 6393 /* Get snapshot entry for parent snapshot and clone entry within that snapshot for 6394 * snapshot that we are removing */ 6395 _spdk_blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry, 6396 &snapshot_clone_entry); 6397 6398 spdk_bs_open_blob(snapshot->bs, clone_entry->id, _spdk_delete_snapshot_open_clone_cb, ctx); 6399 } 6400 6401 static void 6402 _spdk_bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno) 6403 { 6404 spdk_bs_sequence_t *seq = cb_arg; 6405 struct spdk_blob_list *snapshot_entry = NULL; 6406 uint32_t page_num; 6407 6408 if (bserrno) { 6409 SPDK_ERRLOG("Failed to remove blob\n"); 6410 spdk_bs_sequence_finish(seq, bserrno); 6411 return; 6412 } 6413 6414 /* Remove snapshot from the list */ 6415 snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, blob->id); 6416 if (snapshot_entry != NULL) { 6417 TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link); 6418 free(snapshot_entry); 6419 } 6420 6421 page_num = _spdk_bs_blobid_to_page(blob->id); 6422 spdk_bit_array_clear(blob->bs->used_blobids, page_num); 6423 blob->state = SPDK_BLOB_STATE_DIRTY; 6424 blob->active.num_pages = 0; 6425 _spdk_blob_resize(blob, 0); 6426 6427 _spdk_blob_persist(seq, blob, _spdk_bs_delete_persist_cpl, blob); 6428 } 6429 6430 static int 6431 _spdk_bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone) 6432 { 6433 struct spdk_blob_list *snapshot_entry = NULL; 6434 struct spdk_blob_list *clone_entry = NULL; 6435 struct spdk_blob *clone = NULL; 6436 bool has_one_clone = false; 6437 6438 /* Check if this is a snapshot with clones */ 6439 snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, blob->id); 6440 if (snapshot_entry != NULL) { 6441 if (snapshot_entry->clone_count > 1) { 6442 SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n"); 6443 return -EBUSY; 6444 } else if (snapshot_entry->clone_count == 1) { 6445 has_one_clone = true; 6446 } 6447 } 6448 6449 /* Check if someone has this blob open (besides this delete context): 6450 * - open_ref = 1 - only this context opened blob, so it is ok to remove it 6451 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot 6452 * and that is ok, because we will update it accordingly */ 6453 if (blob->open_ref <= 2 && has_one_clone) { 6454 clone_entry = TAILQ_FIRST(&snapshot_entry->clones); 6455 assert(clone_entry != NULL); 6456 clone = _spdk_blob_lookup(blob->bs, clone_entry->id); 6457 6458 if (blob->open_ref == 2 && clone == NULL) { 6459 /* Clone is closed and someone else opened this blob */ 6460 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 6461 return -EBUSY; 6462 } 6463 6464 *update_clone = true; 6465 return 0; 6466 } 6467 6468 if (blob->open_ref > 1) { 6469 SPDK_ERRLOG("Cannot remove snapshot because it is open\n"); 6470 return -EBUSY; 6471 } 6472 6473 assert(has_one_clone == false); 6474 *update_clone = false; 6475 return 0; 6476 } 6477 6478 static void 6479 _spdk_bs_delete_enomem_close_cpl(void *cb_arg, int bserrno) 6480 { 6481 spdk_bs_sequence_t *seq = cb_arg; 6482 6483 spdk_bs_sequence_finish(seq, -ENOMEM); 6484 } 6485 6486 static void 6487 _spdk_bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 6488 { 6489 spdk_bs_sequence_t *seq = cb_arg; 6490 struct delete_snapshot_ctx *ctx; 6491 bool update_clone = false; 6492 6493 if (bserrno != 0) { 6494 spdk_bs_sequence_finish(seq, bserrno); 6495 return; 6496 } 6497 6498 _spdk_blob_verify_md_op(blob); 6499 6500 ctx = calloc(1, sizeof(*ctx)); 6501 if (ctx == NULL) { 6502 spdk_blob_close(blob, _spdk_bs_delete_enomem_close_cpl, seq); 6503 return; 6504 } 6505 6506 ctx->snapshot = blob; 6507 ctx->cb_fn = _spdk_bs_delete_blob_finish; 6508 ctx->cb_arg = seq; 6509 6510 /* Check if blob can be removed and if it is a snapshot with clone on top of it */ 6511 ctx->bserrno = _spdk_bs_is_blob_deletable(blob, &update_clone); 6512 if (ctx->bserrno) { 6513 spdk_blob_close(blob, _spdk_delete_blob_cleanup_finish, ctx); 6514 return; 6515 } 6516 6517 if (blob->locked_operation_in_progress) { 6518 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot remove blob - another operation in progress\n"); 6519 ctx->bserrno = -EBUSY; 6520 spdk_blob_close(blob, _spdk_delete_blob_cleanup_finish, ctx); 6521 return; 6522 } 6523 6524 blob->locked_operation_in_progress = true; 6525 6526 /* 6527 * Remove the blob from the blob_store list now, to ensure it does not 6528 * get returned after this point by _spdk_blob_lookup(). 6529 */ 6530 TAILQ_REMOVE(&blob->bs->blobs, blob, link); 6531 6532 if (update_clone) { 6533 /* This blob is a snapshot with active clone - update clone first */ 6534 _spdk_update_clone_on_snapshot_deletion(blob, ctx); 6535 } else { 6536 /* This blob does not have any clones - just remove it */ 6537 _spdk_bs_blob_list_remove(blob); 6538 _spdk_bs_delete_blob_finish(seq, blob, 0); 6539 free(ctx); 6540 } 6541 } 6542 6543 void 6544 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 6545 spdk_blob_op_complete cb_fn, void *cb_arg) 6546 { 6547 struct spdk_bs_cpl cpl; 6548 spdk_bs_sequence_t *seq; 6549 6550 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Deleting blob %lu\n", blobid); 6551 6552 assert(spdk_get_thread() == bs->md_thread); 6553 6554 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 6555 cpl.u.blob_basic.cb_fn = cb_fn; 6556 cpl.u.blob_basic.cb_arg = cb_arg; 6557 6558 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 6559 if (!seq) { 6560 cb_fn(cb_arg, -ENOMEM); 6561 return; 6562 } 6563 6564 spdk_bs_open_blob(bs, blobid, _spdk_bs_delete_open_cpl, seq); 6565 } 6566 6567 /* END spdk_bs_delete_blob */ 6568 6569 /* START spdk_bs_open_blob */ 6570 6571 static void 6572 _spdk_bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 6573 { 6574 struct spdk_blob *blob = cb_arg; 6575 6576 if (bserrno != 0) { 6577 _spdk_blob_free(blob); 6578 seq->cpl.u.blob_handle.blob = NULL; 6579 spdk_bs_sequence_finish(seq, bserrno); 6580 return; 6581 } 6582 6583 blob->open_ref++; 6584 6585 TAILQ_INSERT_HEAD(&blob->bs->blobs, blob, link); 6586 6587 spdk_bs_sequence_finish(seq, bserrno); 6588 } 6589 6590 static void _spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 6591 struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 6592 { 6593 struct spdk_blob *blob; 6594 struct spdk_bs_cpl cpl; 6595 struct spdk_blob_open_opts opts_default; 6596 spdk_bs_sequence_t *seq; 6597 uint32_t page_num; 6598 6599 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Opening blob %lu\n", blobid); 6600 assert(spdk_get_thread() == bs->md_thread); 6601 6602 page_num = _spdk_bs_blobid_to_page(blobid); 6603 if (spdk_bit_array_get(bs->used_blobids, page_num) == false) { 6604 /* Invalid blobid */ 6605 cb_fn(cb_arg, NULL, -ENOENT); 6606 return; 6607 } 6608 6609 blob = _spdk_blob_lookup(bs, blobid); 6610 if (blob) { 6611 blob->open_ref++; 6612 cb_fn(cb_arg, blob, 0); 6613 return; 6614 } 6615 6616 blob = _spdk_blob_alloc(bs, blobid); 6617 if (!blob) { 6618 cb_fn(cb_arg, NULL, -ENOMEM); 6619 return; 6620 } 6621 6622 if (!opts) { 6623 spdk_blob_open_opts_init(&opts_default); 6624 opts = &opts_default; 6625 } 6626 6627 blob->clear_method = opts->clear_method; 6628 6629 cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE; 6630 cpl.u.blob_handle.cb_fn = cb_fn; 6631 cpl.u.blob_handle.cb_arg = cb_arg; 6632 cpl.u.blob_handle.blob = blob; 6633 6634 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 6635 if (!seq) { 6636 _spdk_blob_free(blob); 6637 cb_fn(cb_arg, NULL, -ENOMEM); 6638 return; 6639 } 6640 6641 _spdk_blob_load(seq, blob, _spdk_bs_open_blob_cpl, blob); 6642 } 6643 6644 void spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 6645 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 6646 { 6647 _spdk_bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg); 6648 } 6649 6650 void spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid, 6651 struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 6652 { 6653 _spdk_bs_open_blob(bs, blobid, opts, cb_fn, cb_arg); 6654 } 6655 6656 /* END spdk_bs_open_blob */ 6657 6658 /* START spdk_blob_set_read_only */ 6659 int spdk_blob_set_read_only(struct spdk_blob *blob) 6660 { 6661 _spdk_blob_verify_md_op(blob); 6662 6663 blob->data_ro_flags |= SPDK_BLOB_READ_ONLY; 6664 6665 blob->state = SPDK_BLOB_STATE_DIRTY; 6666 return 0; 6667 } 6668 /* END spdk_blob_set_read_only */ 6669 6670 /* START spdk_blob_sync_md */ 6671 6672 static void 6673 _spdk_blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 6674 { 6675 struct spdk_blob *blob = cb_arg; 6676 6677 if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) { 6678 blob->data_ro = true; 6679 blob->md_ro = true; 6680 } 6681 6682 spdk_bs_sequence_finish(seq, bserrno); 6683 } 6684 6685 static void 6686 _spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 6687 { 6688 struct spdk_bs_cpl cpl; 6689 spdk_bs_sequence_t *seq; 6690 6691 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 6692 cpl.u.blob_basic.cb_fn = cb_fn; 6693 cpl.u.blob_basic.cb_arg = cb_arg; 6694 6695 seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl); 6696 if (!seq) { 6697 cb_fn(cb_arg, -ENOMEM); 6698 return; 6699 } 6700 6701 _spdk_blob_persist(seq, blob, _spdk_blob_sync_md_cpl, blob); 6702 } 6703 6704 void 6705 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 6706 { 6707 _spdk_blob_verify_md_op(blob); 6708 6709 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blob %lu\n", blob->id); 6710 6711 if (blob->md_ro) { 6712 assert(blob->state == SPDK_BLOB_STATE_CLEAN); 6713 cb_fn(cb_arg, 0); 6714 return; 6715 } 6716 6717 _spdk_blob_sync_md(blob, cb_fn, cb_arg); 6718 } 6719 6720 /* END spdk_blob_sync_md */ 6721 6722 struct spdk_blob_insert_cluster_ctx { 6723 struct spdk_thread *thread; 6724 struct spdk_blob *blob; 6725 uint32_t cluster_num; /* cluster index in blob */ 6726 uint32_t cluster; /* cluster on disk */ 6727 uint32_t extent_page; /* extent page on disk */ 6728 int rc; 6729 spdk_blob_op_complete cb_fn; 6730 void *cb_arg; 6731 }; 6732 6733 static void 6734 _spdk_blob_insert_cluster_msg_cpl(void *arg) 6735 { 6736 struct spdk_blob_insert_cluster_ctx *ctx = arg; 6737 6738 ctx->cb_fn(ctx->cb_arg, ctx->rc); 6739 free(ctx); 6740 } 6741 6742 static void 6743 _spdk_blob_insert_cluster_msg_cb(void *arg, int bserrno) 6744 { 6745 struct spdk_blob_insert_cluster_ctx *ctx = arg; 6746 6747 ctx->rc = bserrno; 6748 spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx); 6749 } 6750 6751 static void 6752 _spdk_blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 6753 { 6754 struct spdk_blob_md_page *page = cb_arg; 6755 6756 spdk_bs_sequence_finish(seq, bserrno); 6757 spdk_free(page); 6758 } 6759 6760 static void 6761 _spdk_blob_insert_extent(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num, 6762 spdk_blob_op_complete cb_fn, void *cb_arg) 6763 { 6764 spdk_bs_sequence_t *seq; 6765 struct spdk_bs_cpl cpl; 6766 struct spdk_blob_md_page *page = NULL; 6767 uint32_t page_count = 0; 6768 int rc; 6769 6770 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 6771 cpl.u.blob_basic.cb_fn = cb_fn; 6772 cpl.u.blob_basic.cb_arg = cb_arg; 6773 6774 seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl); 6775 if (!seq) { 6776 cb_fn(cb_arg, -ENOMEM); 6777 return; 6778 } 6779 rc = _spdk_blob_serialize_add_page(blob, &page, &page_count, &page); 6780 if (rc < 0) { 6781 spdk_bs_sequence_finish(seq, rc); 6782 return; 6783 } 6784 6785 _spdk_blob_serialize_extent_page(blob, cluster_num, page); 6786 6787 page->crc = _spdk_blob_md_page_calc_crc(page); 6788 6789 assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true); 6790 6791 spdk_bs_sequence_write_dev(seq, page, _spdk_bs_md_page_to_lba(blob->bs, extent), 6792 _spdk_bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE), 6793 _spdk_blob_persist_extent_page_cpl, page); 6794 } 6795 6796 static void 6797 _spdk_blob_insert_cluster_msg(void *arg) 6798 { 6799 struct spdk_blob_insert_cluster_ctx *ctx = arg; 6800 uint32_t *extent_page; 6801 6802 ctx->rc = _spdk_blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster); 6803 if (ctx->rc != 0) { 6804 spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx); 6805 return; 6806 } 6807 6808 if (ctx->blob->use_extent_table == false) { 6809 /* Extent table is not used, proceed with sync of md that will only use extents_rle. */ 6810 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 6811 _spdk_blob_sync_md(ctx->blob, _spdk_blob_insert_cluster_msg_cb, ctx); 6812 return; 6813 } 6814 6815 extent_page = _spdk_bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num); 6816 if (*extent_page == 0) { 6817 /* Extent page requires allocation. 6818 * It was already claimed in the used_md_pages map and placed in ctx. 6819 * Blob persist will take care of writing out new extent page on disk. */ 6820 assert(ctx->extent_page != 0); 6821 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 6822 *extent_page = ctx->extent_page; 6823 ctx->blob->state = SPDK_BLOB_STATE_DIRTY; 6824 _spdk_blob_sync_md(ctx->blob, _spdk_blob_insert_cluster_msg_cb, ctx); 6825 } else { 6826 /* It is possible for original thread to allocate extent page for 6827 * different cluster in the same extent page. In such case proceed with 6828 * updating the existing extent page, but release the additional one. */ 6829 if (ctx->extent_page != 0) { 6830 assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true); 6831 _spdk_bs_release_md_page(ctx->blob->bs, ctx->extent_page); 6832 } 6833 /* Extent page already allocated. 6834 * Every cluster allocation, requires just an update of single extent page. */ 6835 _spdk_blob_insert_extent(ctx->blob, *extent_page, ctx->cluster_num, 6836 _spdk_blob_insert_cluster_msg_cb, ctx); 6837 } 6838 } 6839 6840 static void 6841 _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, 6842 uint64_t cluster, uint32_t extent_page, spdk_blob_op_complete cb_fn, void *cb_arg) 6843 { 6844 struct spdk_blob_insert_cluster_ctx *ctx; 6845 6846 ctx = calloc(1, sizeof(*ctx)); 6847 if (ctx == NULL) { 6848 cb_fn(cb_arg, -ENOMEM); 6849 return; 6850 } 6851 6852 ctx->thread = spdk_get_thread(); 6853 ctx->blob = blob; 6854 ctx->cluster_num = cluster_num; 6855 ctx->cluster = cluster; 6856 ctx->extent_page = extent_page; 6857 ctx->cb_fn = cb_fn; 6858 ctx->cb_arg = cb_arg; 6859 6860 spdk_thread_send_msg(blob->bs->md_thread, _spdk_blob_insert_cluster_msg, ctx); 6861 } 6862 6863 /* START spdk_blob_close */ 6864 6865 static void 6866 _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 6867 { 6868 struct spdk_blob *blob = cb_arg; 6869 6870 if (bserrno == 0) { 6871 blob->open_ref--; 6872 if (blob->open_ref == 0) { 6873 /* 6874 * Blobs with active.num_pages == 0 are deleted blobs. 6875 * these blobs are removed from the blob_store list 6876 * when the deletion process starts - so don't try to 6877 * remove them again. 6878 */ 6879 if (blob->active.num_pages > 0) { 6880 TAILQ_REMOVE(&blob->bs->blobs, blob, link); 6881 } 6882 _spdk_blob_free(blob); 6883 } 6884 } 6885 6886 spdk_bs_sequence_finish(seq, bserrno); 6887 } 6888 6889 void spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg) 6890 { 6891 struct spdk_bs_cpl cpl; 6892 spdk_bs_sequence_t *seq; 6893 6894 _spdk_blob_verify_md_op(blob); 6895 6896 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Closing blob %lu\n", blob->id); 6897 6898 if (blob->open_ref == 0) { 6899 cb_fn(cb_arg, -EBADF); 6900 return; 6901 } 6902 6903 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 6904 cpl.u.blob_basic.cb_fn = cb_fn; 6905 cpl.u.blob_basic.cb_arg = cb_arg; 6906 6907 seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl); 6908 if (!seq) { 6909 cb_fn(cb_arg, -ENOMEM); 6910 return; 6911 } 6912 6913 /* Sync metadata */ 6914 _spdk_blob_persist(seq, blob, _spdk_blob_close_cpl, blob); 6915 } 6916 6917 /* END spdk_blob_close */ 6918 6919 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs) 6920 { 6921 return spdk_get_io_channel(bs); 6922 } 6923 6924 void spdk_bs_free_io_channel(struct spdk_io_channel *channel) 6925 { 6926 spdk_put_io_channel(channel); 6927 } 6928 6929 void spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel, 6930 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 6931 { 6932 _spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 6933 SPDK_BLOB_UNMAP); 6934 } 6935 6936 void spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel, 6937 uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg) 6938 { 6939 _spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg, 6940 SPDK_BLOB_WRITE_ZEROES); 6941 } 6942 6943 void spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel, 6944 void *payload, uint64_t offset, uint64_t length, 6945 spdk_blob_op_complete cb_fn, void *cb_arg) 6946 { 6947 _spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 6948 SPDK_BLOB_WRITE); 6949 } 6950 6951 void spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel, 6952 void *payload, uint64_t offset, uint64_t length, 6953 spdk_blob_op_complete cb_fn, void *cb_arg) 6954 { 6955 _spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg, 6956 SPDK_BLOB_READ); 6957 } 6958 6959 void spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel, 6960 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 6961 spdk_blob_op_complete cb_fn, void *cb_arg) 6962 { 6963 _spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false); 6964 } 6965 6966 void spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel, 6967 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length, 6968 spdk_blob_op_complete cb_fn, void *cb_arg) 6969 { 6970 _spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true); 6971 } 6972 6973 struct spdk_bs_iter_ctx { 6974 int64_t page_num; 6975 struct spdk_blob_store *bs; 6976 6977 spdk_blob_op_with_handle_complete cb_fn; 6978 void *cb_arg; 6979 }; 6980 6981 static void 6982 _spdk_bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno) 6983 { 6984 struct spdk_bs_iter_ctx *ctx = cb_arg; 6985 struct spdk_blob_store *bs = ctx->bs; 6986 spdk_blob_id id; 6987 6988 if (bserrno == 0) { 6989 ctx->cb_fn(ctx->cb_arg, _blob, bserrno); 6990 free(ctx); 6991 return; 6992 } 6993 6994 ctx->page_num++; 6995 ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num); 6996 if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) { 6997 ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT); 6998 free(ctx); 6999 return; 7000 } 7001 7002 id = _spdk_bs_page_to_blobid(ctx->page_num); 7003 7004 spdk_bs_open_blob(bs, id, _spdk_bs_iter_cpl, ctx); 7005 } 7006 7007 void 7008 spdk_bs_iter_first(struct spdk_blob_store *bs, 7009 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7010 { 7011 struct spdk_bs_iter_ctx *ctx; 7012 7013 ctx = calloc(1, sizeof(*ctx)); 7014 if (!ctx) { 7015 cb_fn(cb_arg, NULL, -ENOMEM); 7016 return; 7017 } 7018 7019 ctx->page_num = -1; 7020 ctx->bs = bs; 7021 ctx->cb_fn = cb_fn; 7022 ctx->cb_arg = cb_arg; 7023 7024 _spdk_bs_iter_cpl(ctx, NULL, -1); 7025 } 7026 7027 static void 7028 _spdk_bs_iter_close_cpl(void *cb_arg, int bserrno) 7029 { 7030 struct spdk_bs_iter_ctx *ctx = cb_arg; 7031 7032 _spdk_bs_iter_cpl(ctx, NULL, -1); 7033 } 7034 7035 void 7036 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob, 7037 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 7038 { 7039 struct spdk_bs_iter_ctx *ctx; 7040 7041 assert(blob != NULL); 7042 7043 ctx = calloc(1, sizeof(*ctx)); 7044 if (!ctx) { 7045 cb_fn(cb_arg, NULL, -ENOMEM); 7046 return; 7047 } 7048 7049 ctx->page_num = _spdk_bs_blobid_to_page(blob->id); 7050 ctx->bs = bs; 7051 ctx->cb_fn = cb_fn; 7052 ctx->cb_arg = cb_arg; 7053 7054 /* Close the existing blob */ 7055 spdk_blob_close(blob, _spdk_bs_iter_close_cpl, ctx); 7056 } 7057 7058 static int 7059 _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 7060 uint16_t value_len, bool internal) 7061 { 7062 struct spdk_xattr_tailq *xattrs; 7063 struct spdk_xattr *xattr; 7064 size_t desc_size; 7065 7066 _spdk_blob_verify_md_op(blob); 7067 7068 if (blob->md_ro) { 7069 return -EPERM; 7070 } 7071 7072 desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len; 7073 if (desc_size > SPDK_BS_MAX_DESC_SIZE) { 7074 SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Xattr '%s' of size %ld does not fix into single page %ld\n", name, 7075 desc_size, SPDK_BS_MAX_DESC_SIZE); 7076 return -ENOMEM; 7077 } 7078 7079 if (internal) { 7080 xattrs = &blob->xattrs_internal; 7081 blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR; 7082 } else { 7083 xattrs = &blob->xattrs; 7084 } 7085 7086 TAILQ_FOREACH(xattr, xattrs, link) { 7087 if (!strcmp(name, xattr->name)) { 7088 free(xattr->value); 7089 xattr->value_len = value_len; 7090 xattr->value = malloc(value_len); 7091 memcpy(xattr->value, value, value_len); 7092 7093 blob->state = SPDK_BLOB_STATE_DIRTY; 7094 7095 return 0; 7096 } 7097 } 7098 7099 xattr = calloc(1, sizeof(*xattr)); 7100 if (!xattr) { 7101 return -ENOMEM; 7102 } 7103 xattr->name = strdup(name); 7104 xattr->value_len = value_len; 7105 xattr->value = malloc(value_len); 7106 memcpy(xattr->value, value, value_len); 7107 TAILQ_INSERT_TAIL(xattrs, xattr, link); 7108 7109 blob->state = SPDK_BLOB_STATE_DIRTY; 7110 7111 return 0; 7112 } 7113 7114 int 7115 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 7116 uint16_t value_len) 7117 { 7118 return _spdk_blob_set_xattr(blob, name, value, value_len, false); 7119 } 7120 7121 static int 7122 _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal) 7123 { 7124 struct spdk_xattr_tailq *xattrs; 7125 struct spdk_xattr *xattr; 7126 7127 _spdk_blob_verify_md_op(blob); 7128 7129 if (blob->md_ro) { 7130 return -EPERM; 7131 } 7132 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 7133 7134 TAILQ_FOREACH(xattr, xattrs, link) { 7135 if (!strcmp(name, xattr->name)) { 7136 TAILQ_REMOVE(xattrs, xattr, link); 7137 free(xattr->value); 7138 free(xattr->name); 7139 free(xattr); 7140 7141 if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) { 7142 blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR; 7143 } 7144 blob->state = SPDK_BLOB_STATE_DIRTY; 7145 7146 return 0; 7147 } 7148 } 7149 7150 return -ENOENT; 7151 } 7152 7153 int 7154 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name) 7155 { 7156 return _spdk_blob_remove_xattr(blob, name, false); 7157 } 7158 7159 static int 7160 _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 7161 const void **value, size_t *value_len, bool internal) 7162 { 7163 struct spdk_xattr *xattr; 7164 struct spdk_xattr_tailq *xattrs; 7165 7166 xattrs = internal ? &blob->xattrs_internal : &blob->xattrs; 7167 7168 TAILQ_FOREACH(xattr, xattrs, link) { 7169 if (!strcmp(name, xattr->name)) { 7170 *value = xattr->value; 7171 *value_len = xattr->value_len; 7172 return 0; 7173 } 7174 } 7175 return -ENOENT; 7176 } 7177 7178 int 7179 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name, 7180 const void **value, size_t *value_len) 7181 { 7182 _spdk_blob_verify_md_op(blob); 7183 7184 return _spdk_blob_get_xattr_value(blob, name, value, value_len, false); 7185 } 7186 7187 struct spdk_xattr_names { 7188 uint32_t count; 7189 const char *names[0]; 7190 }; 7191 7192 static int 7193 _spdk_blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names) 7194 { 7195 struct spdk_xattr *xattr; 7196 int count = 0; 7197 7198 TAILQ_FOREACH(xattr, xattrs, link) { 7199 count++; 7200 } 7201 7202 *names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *)); 7203 if (*names == NULL) { 7204 return -ENOMEM; 7205 } 7206 7207 TAILQ_FOREACH(xattr, xattrs, link) { 7208 (*names)->names[(*names)->count++] = xattr->name; 7209 } 7210 7211 return 0; 7212 } 7213 7214 int 7215 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names) 7216 { 7217 _spdk_blob_verify_md_op(blob); 7218 7219 return _spdk_blob_get_xattr_names(&blob->xattrs, names); 7220 } 7221 7222 uint32_t 7223 spdk_xattr_names_get_count(struct spdk_xattr_names *names) 7224 { 7225 assert(names != NULL); 7226 7227 return names->count; 7228 } 7229 7230 const char * 7231 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index) 7232 { 7233 if (index >= names->count) { 7234 return NULL; 7235 } 7236 7237 return names->names[index]; 7238 } 7239 7240 void 7241 spdk_xattr_names_free(struct spdk_xattr_names *names) 7242 { 7243 free(names); 7244 } 7245 7246 struct spdk_bs_type 7247 spdk_bs_get_bstype(struct spdk_blob_store *bs) 7248 { 7249 return bs->bstype; 7250 } 7251 7252 void 7253 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype) 7254 { 7255 memcpy(&bs->bstype, &bstype, sizeof(bstype)); 7256 } 7257 7258 bool 7259 spdk_blob_is_read_only(struct spdk_blob *blob) 7260 { 7261 assert(blob != NULL); 7262 return (blob->data_ro || blob->md_ro); 7263 } 7264 7265 bool 7266 spdk_blob_is_snapshot(struct spdk_blob *blob) 7267 { 7268 struct spdk_blob_list *snapshot_entry; 7269 7270 assert(blob != NULL); 7271 7272 snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, blob->id); 7273 if (snapshot_entry == NULL) { 7274 return false; 7275 } 7276 7277 return true; 7278 } 7279 7280 bool 7281 spdk_blob_is_clone(struct spdk_blob *blob) 7282 { 7283 assert(blob != NULL); 7284 7285 if (blob->parent_id != SPDK_BLOBID_INVALID) { 7286 assert(spdk_blob_is_thin_provisioned(blob)); 7287 return true; 7288 } 7289 7290 return false; 7291 } 7292 7293 bool 7294 spdk_blob_is_thin_provisioned(struct spdk_blob *blob) 7295 { 7296 assert(blob != NULL); 7297 return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 7298 } 7299 7300 static void 7301 _spdk_blob_update_clear_method(struct spdk_blob *blob) 7302 { 7303 enum blob_clear_method stored_cm; 7304 7305 assert(blob != NULL); 7306 7307 /* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored 7308 * in metadata previously. If something other than the default was 7309 * specified, ignore stored value and used what was passed in. 7310 */ 7311 stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT); 7312 7313 if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) { 7314 blob->clear_method = stored_cm; 7315 } else if (blob->clear_method != stored_cm) { 7316 SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n", 7317 blob->clear_method, stored_cm); 7318 } 7319 } 7320 7321 spdk_blob_id 7322 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id) 7323 { 7324 struct spdk_blob_list *snapshot_entry = NULL; 7325 struct spdk_blob_list *clone_entry = NULL; 7326 7327 TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) { 7328 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 7329 if (clone_entry->id == blob_id) { 7330 return snapshot_entry->id; 7331 } 7332 } 7333 } 7334 7335 return SPDK_BLOBID_INVALID; 7336 } 7337 7338 int 7339 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids, 7340 size_t *count) 7341 { 7342 struct spdk_blob_list *snapshot_entry, *clone_entry; 7343 size_t n; 7344 7345 snapshot_entry = _spdk_bs_get_snapshot_entry(bs, blobid); 7346 if (snapshot_entry == NULL) { 7347 *count = 0; 7348 return 0; 7349 } 7350 7351 if (ids == NULL || *count < snapshot_entry->clone_count) { 7352 *count = snapshot_entry->clone_count; 7353 return -ENOMEM; 7354 } 7355 *count = snapshot_entry->clone_count; 7356 7357 n = 0; 7358 TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) { 7359 ids[n++] = clone_entry->id; 7360 } 7361 7362 return 0; 7363 } 7364 7365 SPDK_LOG_REGISTER_COMPONENT("blob", SPDK_LOG_BLOB) 7366