1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/blob.h" 37 #include "spdk/env.h" 38 #include "spdk/queue.h" 39 #include "spdk/io_channel.h" 40 #include "spdk/bit_array.h" 41 42 #include "spdk_internal/log.h" 43 44 #include "blobstore.h" 45 #include "request.h" 46 47 static inline size_t 48 divide_round_up(size_t num, size_t divisor) 49 { 50 return (num + divisor - 1) / divisor; 51 } 52 53 static void 54 _spdk_bs_claim_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 55 { 56 assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters)); 57 assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == false); 58 assert(bs->num_free_clusters > 0); 59 60 SPDK_TRACELOG(SPDK_TRACE_BLOB, "Claiming cluster %u\n", cluster_num); 61 62 spdk_bit_array_set(bs->used_clusters, cluster_num); 63 bs->num_free_clusters--; 64 } 65 66 static void 67 _spdk_bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num) 68 { 69 assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters)); 70 assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == true); 71 assert(bs->num_free_clusters < bs->total_clusters); 72 73 SPDK_TRACELOG(SPDK_TRACE_BLOB, "Releasing cluster %u\n", cluster_num); 74 75 spdk_bit_array_clear(bs->used_clusters, cluster_num); 76 bs->num_free_clusters++; 77 } 78 79 static struct spdk_blob * 80 _spdk_blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id) 81 { 82 struct spdk_blob *blob; 83 84 blob = calloc(1, sizeof(*blob)); 85 if (!blob) { 86 return NULL; 87 } 88 89 blob->id = id; 90 blob->bs = bs; 91 92 blob->state = SPDK_BLOB_STATE_DIRTY; 93 blob->active.num_pages = 1; 94 blob->active.pages = calloc(1, sizeof(*blob->active.pages)); 95 if (!blob->active.pages) { 96 free(blob); 97 return NULL; 98 } 99 100 blob->active.pages[0] = _spdk_bs_blobid_to_page(id); 101 102 TAILQ_INIT(&blob->xattrs); 103 104 return blob; 105 } 106 107 static void 108 _spdk_blob_free(struct spdk_blob *blob) 109 { 110 struct spdk_xattr *xattr, *xattr_tmp; 111 112 assert(blob != NULL); 113 114 free(blob->active.clusters); 115 free(blob->clean.clusters); 116 free(blob->active.pages); 117 free(blob->clean.pages); 118 119 TAILQ_FOREACH_SAFE(xattr, &blob->xattrs, link, xattr_tmp) { 120 TAILQ_REMOVE(&blob->xattrs, xattr, link); 121 free(xattr->name); 122 free(xattr->value); 123 free(xattr); 124 } 125 126 free(blob); 127 } 128 129 static int 130 _spdk_blob_mark_clean(struct spdk_blob *blob) 131 { 132 uint64_t *clusters = NULL; 133 uint32_t *pages = NULL; 134 135 assert(blob != NULL); 136 assert(blob->state == SPDK_BLOB_STATE_LOADING || 137 blob->state == SPDK_BLOB_STATE_SYNCING); 138 139 if (blob->active.num_clusters) { 140 assert(blob->active.clusters); 141 clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters)); 142 if (!clusters) { 143 return -1; 144 } 145 memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*clusters)); 146 } 147 148 if (blob->active.num_pages) { 149 assert(blob->active.pages); 150 pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages)); 151 if (!pages) { 152 free(clusters); 153 return -1; 154 } 155 memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*pages)); 156 } 157 158 free(blob->clean.clusters); 159 free(blob->clean.pages); 160 161 blob->clean.num_clusters = blob->active.num_clusters; 162 blob->clean.clusters = blob->active.clusters; 163 blob->clean.num_pages = blob->active.num_pages; 164 blob->clean.pages = blob->active.pages; 165 166 blob->active.clusters = clusters; 167 blob->active.pages = pages; 168 169 blob->state = SPDK_BLOB_STATE_CLEAN; 170 171 return 0; 172 } 173 174 static void 175 _spdk_blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob) 176 { 177 struct spdk_blob_md_descriptor *desc; 178 size_t cur_desc = 0; 179 void *tmp; 180 181 desc = (struct spdk_blob_md_descriptor *)page->descriptors; 182 while (cur_desc < sizeof(page->descriptors)) { 183 if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) { 184 if (desc->length == 0) { 185 /* If padding and length are 0, this terminates the page */ 186 break; 187 } 188 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) { 189 struct spdk_blob_md_descriptor_extent *desc_extent; 190 unsigned int i, j; 191 unsigned int cluster_count = blob->active.num_clusters; 192 193 desc_extent = (struct spdk_blob_md_descriptor_extent *)desc; 194 195 assert(desc_extent->length > 0); 196 assert(desc_extent->length % sizeof(desc_extent->extents[0]) == 0); 197 198 for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) { 199 for (j = 0; j < desc_extent->extents[i].length; j++) { 200 assert(spdk_bit_array_get(blob->bs->used_clusters, desc_extent->extents[i].cluster_idx + j)); 201 cluster_count++; 202 } 203 } 204 205 assert(cluster_count > 0); 206 tmp = realloc(blob->active.clusters, cluster_count * sizeof(uint64_t)); 207 assert(tmp != NULL); 208 blob->active.clusters = tmp; 209 blob->active.cluster_array_size = cluster_count; 210 211 for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) { 212 for (j = 0; j < desc_extent->extents[i].length; j++) { 213 blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs, 214 desc_extent->extents[i].cluster_idx + j); 215 } 216 } 217 218 } else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) { 219 struct spdk_blob_md_descriptor_xattr *desc_xattr; 220 struct spdk_xattr *xattr; 221 222 desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc; 223 224 assert(desc_xattr->length == sizeof(desc_xattr->name_length) + 225 sizeof(desc_xattr->value_length) + 226 desc_xattr->name_length + desc_xattr->value_length); 227 228 xattr = calloc(1, sizeof(*xattr)); 229 assert(xattr != NULL); 230 231 xattr->name = malloc(desc_xattr->name_length + 1); 232 assert(xattr->name); 233 strncpy(xattr->name, desc_xattr->name, desc_xattr->name_length); 234 xattr->name[desc_xattr->name_length] = '\0'; 235 236 xattr->value = malloc(desc_xattr->value_length); 237 assert(xattr->value != NULL); 238 xattr->value_len = desc_xattr->value_length; 239 memcpy(xattr->value, 240 (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length), 241 desc_xattr->value_length); 242 243 TAILQ_INSERT_TAIL(&blob->xattrs, xattr, link); 244 } else { 245 /* Error */ 246 break; 247 } 248 249 /* Advance to the next descriptor */ 250 cur_desc += sizeof(*desc) + desc->length; 251 if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) { 252 break; 253 } 254 desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc); 255 } 256 } 257 258 static int 259 _spdk_blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count, 260 struct spdk_blob *blob) 261 { 262 const struct spdk_blob_md_page *page; 263 uint32_t i; 264 265 assert(page_count > 0); 266 assert(pages[0].sequence_num == 0); 267 assert(blob != NULL); 268 assert(blob->state == SPDK_BLOB_STATE_LOADING); 269 assert(blob->active.clusters == NULL); 270 assert(blob->id == pages[0].id); 271 assert(blob->state == SPDK_BLOB_STATE_LOADING); 272 273 for (i = 0; i < page_count; i++) { 274 page = &pages[i]; 275 276 assert(page->id == blob->id); 277 assert(page->sequence_num == i); 278 279 _spdk_blob_parse_page(page, blob); 280 } 281 282 return 0; 283 } 284 285 static int 286 _spdk_blob_serialize_add_page(const struct spdk_blob *blob, 287 struct spdk_blob_md_page **pages, 288 uint32_t *page_count, 289 struct spdk_blob_md_page **last_page) 290 { 291 struct spdk_blob_md_page *page; 292 293 assert(pages != NULL); 294 assert(page_count != NULL); 295 296 if (*page_count == 0) { 297 assert(*pages == NULL); 298 *page_count = 1; 299 *pages = spdk_malloc(sizeof(struct spdk_blob_md_page), 300 sizeof(struct spdk_blob_md_page), 301 NULL); 302 } else { 303 assert(*pages != NULL); 304 (*page_count)++; 305 *pages = spdk_realloc(*pages, 306 sizeof(struct spdk_blob_md_page) * (*page_count), 307 sizeof(struct spdk_blob_md_page), 308 NULL); 309 } 310 311 if (*pages == NULL) { 312 *page_count = 0; 313 *last_page = NULL; 314 return -ENOMEM; 315 } 316 317 page = &(*pages)[*page_count - 1]; 318 memset(page, 0, sizeof(*page)); 319 page->id = blob->id; 320 page->sequence_num = *page_count - 1; 321 page->next = SPDK_INVALID_MD_PAGE; 322 *last_page = page; 323 324 return 0; 325 } 326 327 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor. 328 * Update required_sz on both success and failure. 329 * 330 */ 331 static int 332 _spdk_blob_serialize_xattr(const struct spdk_xattr *xattr, 333 uint8_t *buf, size_t buf_sz, 334 size_t *required_sz) 335 { 336 struct spdk_blob_md_descriptor_xattr *desc; 337 338 *required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) + 339 strlen(xattr->name) + 340 xattr->value_len; 341 342 if (buf_sz < *required_sz) { 343 return -1; 344 } 345 346 desc = (struct spdk_blob_md_descriptor_xattr *)buf; 347 348 desc->type = SPDK_MD_DESCRIPTOR_TYPE_XATTR; 349 desc->length = sizeof(desc->name_length) + 350 sizeof(desc->value_length) + 351 strlen(xattr->name) + 352 xattr->value_len; 353 desc->name_length = strlen(xattr->name); 354 desc->value_length = xattr->value_len; 355 356 memcpy(desc->name, xattr->name, desc->name_length); 357 memcpy((void *)((uintptr_t)desc->name + desc->name_length), 358 xattr->value, 359 desc->value_length); 360 361 return 0; 362 } 363 364 static void 365 _spdk_blob_serialize_extent(const struct spdk_blob *blob, 366 uint64_t start_cluster, uint64_t *next_cluster, 367 uint8_t *buf, size_t buf_sz) 368 { 369 struct spdk_blob_md_descriptor_extent *desc; 370 size_t cur_sz; 371 uint64_t i, extent_idx; 372 uint32_t lba, lba_per_cluster, lba_count; 373 374 /* The buffer must have room for at least one extent */ 375 cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->extents[0]); 376 if (buf_sz < cur_sz) { 377 *next_cluster = start_cluster; 378 return; 379 } 380 381 desc = (struct spdk_blob_md_descriptor_extent *)buf; 382 desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT; 383 384 lba_per_cluster = _spdk_bs_cluster_to_lba(blob->bs, 1); 385 386 lba = blob->active.clusters[start_cluster]; 387 lba_count = lba_per_cluster; 388 extent_idx = 0; 389 for (i = start_cluster + 1; i < blob->active.num_clusters; i++) { 390 if ((lba + lba_count) == blob->active.clusters[i]) { 391 lba_count += lba_per_cluster; 392 continue; 393 } 394 desc->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 395 desc->extents[extent_idx].length = lba_count / lba_per_cluster; 396 extent_idx++; 397 398 cur_sz += sizeof(desc->extents[extent_idx]); 399 400 if (buf_sz < cur_sz) { 401 /* If we ran out of buffer space, return */ 402 desc->length = sizeof(desc->extents[0]) * extent_idx; 403 *next_cluster = i; 404 return; 405 } 406 407 lba = blob->active.clusters[i]; 408 lba_count = lba_per_cluster; 409 } 410 411 desc->extents[extent_idx].cluster_idx = lba / lba_per_cluster; 412 desc->extents[extent_idx].length = lba_count / lba_per_cluster; 413 extent_idx++; 414 415 desc->length = sizeof(desc->extents[0]) * extent_idx; 416 *next_cluster = blob->active.num_clusters; 417 418 return; 419 } 420 421 static int 422 _spdk_blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages, 423 uint32_t *page_count) 424 { 425 struct spdk_blob_md_page *cur_page; 426 const struct spdk_xattr *xattr; 427 int rc; 428 uint8_t *buf; 429 size_t remaining_sz; 430 431 assert(pages != NULL); 432 assert(page_count != NULL); 433 assert(blob != NULL); 434 assert(blob->state == SPDK_BLOB_STATE_SYNCING); 435 436 *pages = NULL; 437 *page_count = 0; 438 439 /* A blob always has at least 1 page, even if it has no descriptors */ 440 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page); 441 if (rc < 0) { 442 return rc; 443 } 444 445 buf = (uint8_t *)cur_page->descriptors; 446 remaining_sz = sizeof(cur_page->descriptors); 447 448 /* Serialize xattrs */ 449 TAILQ_FOREACH(xattr, &blob->xattrs, link) { 450 size_t required_sz = 0; 451 rc = _spdk_blob_serialize_xattr(xattr, 452 buf, remaining_sz, 453 &required_sz); 454 if (rc < 0) { 455 /* Need to add a new page to the chain */ 456 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, 457 &cur_page); 458 if (rc < 0) { 459 spdk_free(*pages); 460 *pages = NULL; 461 *page_count = 0; 462 return rc; 463 } 464 465 buf = (uint8_t *)cur_page->descriptors; 466 remaining_sz = sizeof(cur_page->descriptors); 467 468 /* Try again */ 469 required_sz = 0; 470 rc = _spdk_blob_serialize_xattr(xattr, 471 buf, remaining_sz, 472 &required_sz); 473 474 if (rc < 0) { 475 spdk_free(*pages); 476 *pages = NULL; 477 *page_count = 0; 478 return -1; 479 } 480 } 481 482 remaining_sz -= required_sz; 483 buf += required_sz; 484 } 485 486 /* Serialize extents */ 487 uint64_t last_cluster = 0; 488 while (last_cluster < blob->active.num_clusters) { 489 _spdk_blob_serialize_extent(blob, last_cluster, &last_cluster, 490 buf, remaining_sz); 491 492 if (last_cluster == blob->active.num_clusters) { 493 break; 494 } 495 496 rc = _spdk_blob_serialize_add_page(blob, pages, page_count, 497 &cur_page); 498 if (rc < 0) { 499 return rc; 500 } 501 502 buf = (uint8_t *)cur_page->descriptors; 503 remaining_sz = sizeof(cur_page->descriptors); 504 } 505 506 return 0; 507 } 508 509 struct spdk_blob_load_ctx { 510 struct spdk_blob *blob; 511 512 struct spdk_blob_md_page *pages; 513 uint32_t num_pages; 514 515 spdk_bs_sequence_cpl cb_fn; 516 void *cb_arg; 517 }; 518 519 static void 520 _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 521 { 522 struct spdk_blob_load_ctx *ctx = cb_arg; 523 struct spdk_blob *blob = ctx->blob; 524 struct spdk_blob_md_page *page; 525 int rc; 526 527 page = &ctx->pages[ctx->num_pages - 1]; 528 529 if (page->next != SPDK_INVALID_MD_PAGE) { 530 uint32_t next_page = page->next; 531 uint64_t next_lba = _spdk_bs_page_to_lba(blob->bs, blob->bs->md_start + next_page); 532 533 534 assert(next_lba < (blob->bs->md_start + blob->bs->md_len)); 535 536 /* Read the next page */ 537 ctx->num_pages++; 538 ctx->pages = spdk_realloc(ctx->pages, (sizeof(*page) * ctx->num_pages), 539 sizeof(*page), NULL); 540 if (ctx->pages == NULL) { 541 ctx->cb_fn(seq, ctx->cb_arg, -ENOMEM); 542 free(ctx); 543 return; 544 } 545 546 spdk_bs_sequence_read(seq, &ctx->pages[ctx->num_pages - 1], 547 next_lba, 548 _spdk_bs_byte_to_lba(blob->bs, sizeof(*page)), 549 _spdk_blob_load_cpl, ctx); 550 return; 551 } 552 553 /* Parse the pages */ 554 rc = _spdk_blob_parse(ctx->pages, ctx->num_pages, blob); 555 556 _spdk_blob_mark_clean(blob); 557 558 ctx->cb_fn(seq, ctx->cb_arg, rc); 559 560 /* Free the memory */ 561 spdk_free(ctx->pages); 562 free(ctx); 563 } 564 565 /* Load a blob from disk given a blobid */ 566 static void 567 _spdk_blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 568 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 569 { 570 struct spdk_blob_load_ctx *ctx; 571 struct spdk_blob_store *bs; 572 uint32_t page_num; 573 uint64_t lba; 574 575 assert(blob != NULL); 576 assert(blob->state == SPDK_BLOB_STATE_CLEAN || 577 blob->state == SPDK_BLOB_STATE_DIRTY); 578 579 bs = blob->bs; 580 581 ctx = calloc(1, sizeof(*ctx)); 582 if (!ctx) { 583 cb_fn(seq, cb_arg, -ENOMEM); 584 return; 585 } 586 587 ctx->blob = blob; 588 ctx->pages = spdk_realloc(ctx->pages, sizeof(struct spdk_blob_md_page), 589 sizeof(struct spdk_blob_md_page), NULL); 590 if (!ctx->pages) { 591 free(ctx); 592 cb_fn(seq, cb_arg, -ENOMEM); 593 return; 594 } 595 ctx->num_pages = 1; 596 ctx->cb_fn = cb_fn; 597 ctx->cb_arg = cb_arg; 598 599 page_num = _spdk_bs_blobid_to_page(blob->id); 600 lba = _spdk_bs_page_to_lba(blob->bs, bs->md_start + page_num); 601 602 blob->state = SPDK_BLOB_STATE_LOADING; 603 604 spdk_bs_sequence_read(seq, &ctx->pages[0], lba, 605 _spdk_bs_byte_to_lba(bs, sizeof(struct spdk_blob_md_page)), 606 _spdk_blob_load_cpl, ctx); 607 } 608 609 struct spdk_blob_persist_ctx { 610 struct spdk_blob *blob; 611 612 struct spdk_blob_md_page *pages; 613 614 uint64_t idx; 615 616 spdk_bs_sequence_cpl cb_fn; 617 void *cb_arg; 618 }; 619 620 static void 621 _spdk_blob_persist_complete(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 622 { 623 struct spdk_blob_persist_ctx *ctx = cb_arg; 624 struct spdk_blob *blob = ctx->blob; 625 626 if (bserrno == 0) { 627 _spdk_blob_mark_clean(blob); 628 } 629 630 /* Call user callback */ 631 ctx->cb_fn(seq, ctx->cb_arg, bserrno); 632 633 /* Free the memory */ 634 spdk_free(ctx->pages); 635 free(ctx); 636 } 637 638 static void 639 _spdk_blob_persist_unmap_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 640 { 641 struct spdk_blob_persist_ctx *ctx = cb_arg; 642 struct spdk_blob *blob = ctx->blob; 643 struct spdk_blob_store *bs = blob->bs; 644 void *tmp; 645 size_t i; 646 647 /* Release all clusters that were truncated */ 648 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 649 uint32_t cluster_num = _spdk_bs_lba_to_cluster(bs, blob->active.clusters[i]); 650 651 _spdk_bs_release_cluster(bs, cluster_num); 652 } 653 654 if (blob->active.num_clusters == 0) { 655 free(blob->active.clusters); 656 blob->active.clusters = NULL; 657 blob->active.cluster_array_size = 0; 658 } else { 659 tmp = realloc(blob->active.clusters, sizeof(uint64_t) * blob->active.num_clusters); 660 assert(tmp != NULL); 661 blob->active.clusters = tmp; 662 blob->active.cluster_array_size = blob->active.num_clusters; 663 } 664 665 _spdk_blob_persist_complete(seq, ctx, bserrno); 666 } 667 668 static void 669 _spdk_blob_persist_unmap_clusters(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 670 { 671 struct spdk_blob_persist_ctx *ctx = cb_arg; 672 struct spdk_blob *blob = ctx->blob; 673 struct spdk_blob_store *bs = blob->bs; 674 spdk_bs_batch_t *batch; 675 size_t i; 676 677 /* Clusters don't move around in blobs. The list shrinks or grows 678 * at the end, but no changes ever occur in the middle of the list. 679 */ 680 681 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_unmap_clusters_cpl, ctx); 682 683 /* Unmap all clusters that were truncated */ 684 for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) { 685 uint64_t lba = blob->active.clusters[i]; 686 uint32_t lba_count = _spdk_bs_cluster_to_lba(bs, 1); 687 688 spdk_bs_batch_unmap(batch, lba, lba_count); 689 } 690 691 spdk_bs_batch_close(batch); 692 } 693 694 static void 695 _spdk_blob_persist_unmap_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 696 { 697 struct spdk_blob_persist_ctx *ctx = cb_arg; 698 struct spdk_blob *blob = ctx->blob; 699 struct spdk_blob_store *bs = blob->bs; 700 size_t i; 701 702 /* This loop starts at 1 because the first page is special and handled 703 * below. The pages (except the first) are never written in place, 704 * so any pages in the clean list must be unmapped. 705 */ 706 for (i = 1; i < blob->clean.num_pages; i++) { 707 spdk_bit_array_clear(bs->used_md_pages, blob->clean.pages[i]); 708 } 709 710 if (blob->active.num_pages == 0) { 711 uint32_t page_num; 712 713 page_num = _spdk_bs_blobid_to_page(blob->id); 714 spdk_bit_array_clear(bs->used_md_pages, page_num); 715 } 716 717 /* Move on to unmapping clusters */ 718 _spdk_blob_persist_unmap_clusters(seq, ctx, 0); 719 } 720 721 static void 722 _spdk_blob_persist_unmap_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 723 { 724 struct spdk_blob_persist_ctx *ctx = cb_arg; 725 struct spdk_blob *blob = ctx->blob; 726 struct spdk_blob_store *bs = blob->bs; 727 uint64_t lba; 728 uint32_t lba_count; 729 spdk_bs_batch_t *batch; 730 size_t i; 731 732 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_unmap_pages_cpl, ctx); 733 734 lba_count = _spdk_bs_byte_to_lba(bs, sizeof(struct spdk_blob_md_page)); 735 736 /* This loop starts at 1 because the first page is special and handled 737 * below. The pages (except the first) are never written in place, 738 * so any pages in the clean list must be unmapped. 739 */ 740 for (i = 1; i < blob->clean.num_pages; i++) { 741 lba = _spdk_bs_page_to_lba(bs, bs->md_start + blob->clean.pages[i]); 742 743 spdk_bs_batch_unmap(batch, lba, lba_count); 744 } 745 746 /* The first page will only be unmapped if this is a delete. */ 747 if (blob->active.num_pages == 0) { 748 uint32_t page_num; 749 750 /* The first page in the metadata goes where the blobid indicates */ 751 page_num = _spdk_bs_blobid_to_page(blob->id); 752 lba = _spdk_bs_page_to_lba(bs, bs->md_start + page_num); 753 754 spdk_bs_batch_unmap(batch, lba, lba_count); 755 } 756 757 spdk_bs_batch_close(batch); 758 } 759 760 static void 761 _spdk_blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 762 { 763 struct spdk_blob_persist_ctx *ctx = cb_arg; 764 struct spdk_blob *blob = ctx->blob; 765 struct spdk_blob_store *bs = blob->bs; 766 uint64_t lba; 767 uint32_t lba_count; 768 struct spdk_blob_md_page *page; 769 770 if (blob->active.num_pages == 0) { 771 /* Move on to the next step */ 772 _spdk_blob_persist_unmap_pages(seq, ctx, 0); 773 return; 774 } 775 776 lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page)); 777 778 page = &ctx->pages[0]; 779 /* The first page in the metadata goes where the blobid indicates */ 780 lba = _spdk_bs_page_to_lba(bs, bs->md_start + _spdk_bs_blobid_to_page(blob->id)); 781 782 spdk_bs_sequence_write(seq, page, lba, lba_count, 783 _spdk_blob_persist_unmap_pages, ctx); 784 } 785 786 static void 787 _spdk_blob_persist_write_page_chain(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 788 { 789 struct spdk_blob_persist_ctx *ctx = cb_arg; 790 struct spdk_blob *blob = ctx->blob; 791 struct spdk_blob_store *bs = blob->bs; 792 uint64_t lba; 793 uint32_t lba_count; 794 struct spdk_blob_md_page *page; 795 spdk_bs_batch_t *batch; 796 size_t i; 797 798 /* Clusters don't move around in blobs. The list shrinks or grows 799 * at the end, but no changes ever occur in the middle of the list. 800 */ 801 802 lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page)); 803 804 batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_write_page_root, ctx); 805 806 /* This starts at 1. The root page is not written until 807 * all of the others are finished 808 */ 809 for (i = 1; i < blob->active.num_pages; i++) { 810 page = &ctx->pages[i]; 811 assert(page->sequence_num == i); 812 813 lba = _spdk_bs_page_to_lba(bs, bs->md_start + blob->active.pages[i]); 814 815 spdk_bs_batch_write(batch, page, lba, lba_count); 816 } 817 818 spdk_bs_batch_close(batch); 819 } 820 821 static int 822 _spdk_resize_blob(struct spdk_blob *blob, uint64_t sz) 823 { 824 uint64_t i; 825 uint64_t *tmp; 826 uint64_t lfc; /* lowest free cluster */ 827 struct spdk_blob_store *bs; 828 829 bs = blob->bs; 830 831 assert(blob->state != SPDK_BLOB_STATE_LOADING && 832 blob->state != SPDK_BLOB_STATE_SYNCING); 833 834 if (blob->active.num_clusters == sz) { 835 return 0; 836 } 837 838 if (blob->active.num_clusters < blob->active.cluster_array_size) { 839 /* If this blob was resized to be larger, then smaller, then 840 * larger without syncing, then the cluster array already 841 * contains spare assigned clusters we can use. 842 */ 843 blob->active.num_clusters = spdk_min(blob->active.cluster_array_size, 844 sz); 845 } 846 847 blob->state = SPDK_BLOB_STATE_DIRTY; 848 849 /* Do two passes - one to verify that we can obtain enough clusters 850 * and another to actually claim them. 851 */ 852 853 lfc = 0; 854 for (i = blob->active.num_clusters; i < sz; i++) { 855 lfc = spdk_bit_array_find_first_clear(bs->used_clusters, lfc); 856 if (lfc >= bs->total_clusters) { 857 /* No more free clusters. Cannot satisfy the request */ 858 assert(false); 859 return -1; 860 } 861 lfc++; 862 } 863 864 if (sz > blob->active.num_clusters) { 865 /* Expand the cluster array if necessary. 866 * We only shrink the array when persisting. 867 */ 868 tmp = realloc(blob->active.clusters, sizeof(uint64_t) * sz); 869 if (sz > 0 && tmp == NULL) { 870 assert(false); 871 return -1; 872 } 873 blob->active.clusters = tmp; 874 blob->active.cluster_array_size = sz; 875 } 876 877 lfc = 0; 878 for (i = blob->active.num_clusters; i < sz; i++) { 879 lfc = spdk_bit_array_find_first_clear(bs->used_clusters, lfc); 880 SPDK_TRACELOG(SPDK_TRACE_BLOB, "Claiming cluster %lu for blob %lu\n", lfc, blob->id); 881 _spdk_bs_claim_cluster(bs, lfc); 882 blob->active.clusters[i] = _spdk_bs_cluster_to_lba(bs, lfc); 883 lfc++; 884 } 885 886 blob->active.num_clusters = sz; 887 888 return 0; 889 } 890 891 /* Write a blob to disk */ 892 static void 893 _spdk_blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob, 894 spdk_bs_sequence_cpl cb_fn, void *cb_arg) 895 { 896 struct spdk_blob_persist_ctx *ctx; 897 int rc; 898 uint64_t i; 899 uint32_t page_num; 900 struct spdk_blob_store *bs; 901 902 assert(blob != NULL); 903 assert(blob->state == SPDK_BLOB_STATE_CLEAN || 904 blob->state == SPDK_BLOB_STATE_DIRTY); 905 906 if (blob->state == SPDK_BLOB_STATE_CLEAN) { 907 cb_fn(seq, cb_arg, 0); 908 return; 909 } 910 911 bs = blob->bs; 912 913 ctx = calloc(1, sizeof(*ctx)); 914 if (!ctx) { 915 cb_fn(seq, cb_arg, -ENOMEM); 916 return; 917 } 918 ctx->blob = blob; 919 ctx->cb_fn = cb_fn; 920 ctx->cb_arg = cb_arg; 921 922 blob->state = SPDK_BLOB_STATE_SYNCING; 923 924 if (blob->active.num_pages == 0) { 925 /* This is the signal that the blob should be deleted. 926 * Immediately jump to the clean up routine. */ 927 assert(blob->clean.num_pages > 0); 928 ctx->idx = blob->clean.num_pages - 1; 929 _spdk_blob_persist_unmap_pages(seq, ctx, 0); 930 return; 931 932 } 933 934 /* Generate the new metadata */ 935 rc = _spdk_blob_serialize(blob, &ctx->pages, &blob->active.num_pages); 936 if (rc < 0) { 937 free(ctx); 938 cb_fn(seq, cb_arg, rc); 939 return; 940 } 941 942 assert(blob->active.num_pages >= 1); 943 944 /* Resize the cache of page indices */ 945 blob->active.pages = realloc(blob->active.pages, 946 blob->active.num_pages * sizeof(*blob->active.pages)); 947 if (!blob->active.pages) { 948 free(ctx); 949 cb_fn(seq, cb_arg, -ENOMEM); 950 return; 951 } 952 953 /* Assign this metadata to pages. This requires two passes - 954 * one to verify that there are enough pages and a second 955 * to actually claim them. */ 956 page_num = 0; 957 /* Note that this loop starts at one. The first page location is fixed by the blobid. */ 958 for (i = 1; i < blob->active.num_pages; i++) { 959 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 960 if (page_num >= spdk_bit_array_capacity(bs->used_md_pages)) { 961 spdk_free(ctx->pages); 962 free(ctx); 963 blob->state = SPDK_BLOB_STATE_DIRTY; 964 cb_fn(seq, cb_arg, -ENOMEM); 965 return; 966 } 967 page_num++; 968 } 969 970 page_num = 0; 971 blob->active.pages[0] = _spdk_bs_blobid_to_page(blob->id); 972 for (i = 1; i < blob->active.num_pages; i++) { 973 page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num); 974 ctx->pages[i - 1].next = page_num; 975 blob->active.pages[i] = page_num; 976 spdk_bit_array_set(bs->used_md_pages, page_num); 977 SPDK_TRACELOG(SPDK_TRACE_BLOB, "Claiming page %u for blob %lu\n", page_num, blob->id); 978 page_num++; 979 } 980 981 /* Start writing the metadata from last page to first */ 982 ctx->idx = blob->active.num_pages - 1; 983 _spdk_blob_persist_write_page_chain(seq, ctx, 0); 984 } 985 986 static void 987 _spdk_blob_request_submit_rw(struct spdk_blob *blob, struct spdk_io_channel *_channel, 988 void *payload, uint64_t offset, uint64_t length, 989 spdk_blob_op_complete cb_fn, void *cb_arg, bool read) 990 { 991 spdk_bs_batch_t *batch; 992 struct spdk_bs_cpl cpl; 993 uint64_t lba; 994 uint32_t lba_count; 995 uint8_t *buf; 996 uint64_t page; 997 998 assert(blob != NULL); 999 1000 if (offset + length > blob->active.num_clusters * blob->bs->pages_per_cluster) { 1001 cb_fn(cb_arg, -EINVAL); 1002 return; 1003 } 1004 1005 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 1006 cpl.u.blob_basic.cb_fn = cb_fn; 1007 cpl.u.blob_basic.cb_arg = cb_arg; 1008 1009 batch = spdk_bs_batch_open(_channel, &cpl); 1010 if (!batch) { 1011 cb_fn(cb_arg, -ENOMEM); 1012 return; 1013 } 1014 1015 length = _spdk_bs_page_to_lba(blob->bs, length); 1016 page = offset; 1017 buf = payload; 1018 while (length > 0) { 1019 lba = _spdk_bs_blob_page_to_lba(blob, page); 1020 lba_count = spdk_min(length, 1021 _spdk_bs_page_to_lba(blob->bs, 1022 _spdk_bs_num_pages_to_cluster_boundary(blob, page))); 1023 1024 if (read) { 1025 spdk_bs_batch_read(batch, buf, lba, lba_count); 1026 } else { 1027 spdk_bs_batch_write(batch, buf, lba, lba_count); 1028 } 1029 1030 length -= lba_count; 1031 buf += _spdk_bs_lba_to_byte(blob->bs, lba_count); 1032 page += _spdk_bs_lba_to_page(blob->bs, lba_count); 1033 } 1034 1035 spdk_bs_batch_close(batch); 1036 } 1037 1038 static struct spdk_blob * 1039 _spdk_blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid) 1040 { 1041 struct spdk_blob *blob; 1042 1043 TAILQ_FOREACH(blob, &bs->blobs, link) { 1044 if (blob->id == blobid) { 1045 return blob; 1046 } 1047 } 1048 1049 return NULL; 1050 } 1051 1052 static int 1053 _spdk_bs_channel_create(void *io_device, uint32_t priority, void *ctx_buf, void *unique_ctx) 1054 { 1055 struct spdk_blob_store *bs = io_device; 1056 struct spdk_bs_dev *dev = bs->dev; 1057 struct spdk_bs_channel *channel = ctx_buf; 1058 uint32_t max_ops = *(uint32_t *)unique_ctx; 1059 uint32_t i; 1060 1061 channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set)); 1062 if (!channel->req_mem) { 1063 free(channel); 1064 return -1; 1065 } 1066 1067 TAILQ_INIT(&channel->reqs); 1068 1069 for (i = 0; i < max_ops; i++) { 1070 TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link); 1071 } 1072 1073 channel->bs = bs; 1074 channel->dev = dev; 1075 channel->dev_channel = dev->create_channel(dev); 1076 1077 return 0; 1078 } 1079 1080 static void 1081 _spdk_bs_channel_destroy(void *io_device, void *ctx_buf) 1082 { 1083 struct spdk_bs_channel *channel = ctx_buf; 1084 1085 free(channel->req_mem); 1086 channel->dev->destroy_channel(channel->dev, channel->dev_channel); 1087 } 1088 1089 static void 1090 _spdk_bs_free(struct spdk_blob_store *bs) 1091 { 1092 struct spdk_blob *blob, *blob_tmp; 1093 1094 spdk_bs_unregister_md_thread(bs); 1095 spdk_io_device_unregister(bs); 1096 1097 TAILQ_FOREACH_SAFE(blob, &bs->blobs, link, blob_tmp) { 1098 TAILQ_REMOVE(&bs->blobs, blob, link); 1099 _spdk_blob_free(blob); 1100 } 1101 1102 spdk_bit_array_free(&bs->used_md_pages); 1103 spdk_bit_array_free(&bs->used_clusters); 1104 1105 bs->dev->destroy(bs->dev); 1106 free(bs); 1107 } 1108 1109 void 1110 spdk_bs_opts_init(struct spdk_bs_opts *opts) 1111 { 1112 opts->cluster_sz = SPDK_BLOB_OPTS_CLUSTER_SZ; 1113 opts->num_md_pages = SPDK_BLOB_OPTS_NUM_MD_PAGES; 1114 opts->max_md_ops = SPDK_BLOB_OPTS_MAX_MD_OPS; 1115 } 1116 1117 static struct spdk_blob_store * 1118 _spdk_bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts) 1119 { 1120 struct spdk_blob_store *bs; 1121 1122 bs = calloc(1, sizeof(struct spdk_blob_store)); 1123 if (!bs) { 1124 return NULL; 1125 } 1126 1127 TAILQ_INIT(&bs->blobs); 1128 bs->dev = dev; 1129 1130 /* 1131 * Do not use _spdk_bs_lba_to_cluster() here since blockcnt may not be an 1132 * even multiple of the cluster size. 1133 */ 1134 bs->cluster_sz = opts->cluster_sz; 1135 bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen); 1136 bs->pages_per_cluster = bs->cluster_sz / sizeof(struct spdk_blob_md_page); 1137 bs->num_free_clusters = bs->total_clusters; 1138 bs->used_clusters = spdk_bit_array_create(bs->total_clusters); 1139 if (bs->used_clusters == NULL) { 1140 _spdk_bs_free(bs); 1141 return NULL; 1142 } 1143 1144 bs->max_md_ops = opts->max_md_ops; 1145 bs->super_blob = SPDK_BLOBID_INVALID; 1146 1147 /* The metadata is assumed to be at least 1 page */ 1148 bs->used_md_pages = spdk_bit_array_create(1); 1149 1150 spdk_io_device_register(bs, _spdk_bs_channel_create, _spdk_bs_channel_destroy, 1151 sizeof(struct spdk_bs_channel)); 1152 spdk_bs_register_md_thread(bs); 1153 1154 return bs; 1155 } 1156 1157 /* START spdk_bs_load */ 1158 1159 struct spdk_bs_load_ctx { 1160 struct spdk_blob_store *bs; 1161 struct spdk_bs_super_block *super; 1162 1163 struct spdk_bs_md_mask *mask; 1164 }; 1165 1166 static void 1167 _spdk_bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1168 { 1169 struct spdk_bs_load_ctx *ctx = cb_arg; 1170 uint32_t i, j; 1171 int rc; 1172 1173 /* The type must be correct */ 1174 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS); 1175 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 1176 assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof( 1177 struct spdk_blob_md_page) * 8)); 1178 /* The length of the mask must be exactly equal to the total number of clusters */ 1179 assert(ctx->mask->length == ctx->bs->total_clusters); 1180 1181 rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters); 1182 if (rc < 0) { 1183 spdk_free(ctx->super); 1184 spdk_free(ctx->mask); 1185 _spdk_bs_free(ctx->bs); 1186 free(ctx); 1187 spdk_bs_sequence_finish(seq, -ENOMEM); 1188 return; 1189 } 1190 1191 ctx->bs->num_free_clusters = ctx->bs->total_clusters; 1192 for (i = 0; i < ctx->mask->length / 8; i++) { 1193 uint8_t segment = ctx->mask->mask[i]; 1194 for (j = 0; segment && (j < 8); j++) { 1195 if (segment & 1U) { 1196 spdk_bit_array_set(ctx->bs->used_clusters, (i * 8) + j); 1197 assert(ctx->bs->num_free_clusters > 0); 1198 ctx->bs->num_free_clusters--; 1199 } 1200 segment >>= 1U; 1201 } 1202 } 1203 1204 spdk_free(ctx->super); 1205 spdk_free(ctx->mask); 1206 free(ctx); 1207 1208 spdk_bs_sequence_finish(seq, bserrno); 1209 } 1210 1211 static void 1212 _spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1213 { 1214 struct spdk_bs_load_ctx *ctx = cb_arg; 1215 uint64_t lba, lba_count; 1216 uint32_t i, j; 1217 int rc; 1218 1219 /* The type must be correct */ 1220 assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES); 1221 /* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */ 1222 assert(ctx->mask->length <= (ctx->super->used_page_mask_len * sizeof(struct spdk_blob_md_page) * 1223 8)); 1224 /* The length of the mask must be exactly equal to the size (in pages) of the metadata region */ 1225 assert(ctx->mask->length == ctx->super->md_len); 1226 1227 rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length); 1228 if (rc < 0) { 1229 spdk_free(ctx->super); 1230 spdk_free(ctx->mask); 1231 _spdk_bs_free(ctx->bs); 1232 free(ctx); 1233 spdk_bs_sequence_finish(seq, -ENOMEM); 1234 return; 1235 } 1236 1237 for (i = 0; i < ctx->mask->length / 8; i++) { 1238 uint8_t segment = ctx->mask->mask[i]; 1239 for (j = 0; segment && (j < 8); j++) { 1240 if (segment & 1U) { 1241 spdk_bit_array_set(ctx->bs->used_md_pages, (i * 8) + j); 1242 } 1243 segment >>= 1U; 1244 } 1245 } 1246 spdk_free(ctx->mask); 1247 1248 /* Read the used clusters mask */ 1249 ctx->mask = spdk_zmalloc(ctx->super->used_cluster_mask_len * sizeof(struct spdk_blob_md_page), 1250 0x1000, NULL); 1251 if (!ctx->mask) { 1252 spdk_free(ctx->super); 1253 _spdk_bs_free(ctx->bs); 1254 free(ctx); 1255 spdk_bs_sequence_finish(seq, -ENOMEM); 1256 return; 1257 } 1258 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 1259 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 1260 spdk_bs_sequence_read(seq, ctx->mask, lba, lba_count, 1261 _spdk_bs_load_used_clusters_cpl, ctx); 1262 } 1263 1264 static void 1265 _spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1266 { 1267 struct spdk_bs_load_ctx *ctx = cb_arg; 1268 uint64_t lba, lba_count; 1269 1270 if (ctx->super->version != SPDK_BS_VERSION) { 1271 spdk_free(ctx->super); 1272 _spdk_bs_free(ctx->bs); 1273 free(ctx); 1274 spdk_bs_sequence_finish(seq, -EILSEQ); 1275 return; 1276 } 1277 1278 if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 1279 sizeof(ctx->super->signature)) != 0) { 1280 spdk_free(ctx->super); 1281 _spdk_bs_free(ctx->bs); 1282 free(ctx); 1283 spdk_bs_sequence_finish(seq, -EILSEQ); 1284 return; 1285 } 1286 1287 if (ctx->super->clean != 1) { 1288 /* TODO: ONLY CLEAN SHUTDOWN IS CURRENTLY SUPPORTED. 1289 * All of the necessary data to recover is available 1290 * on disk - the code just has not been written yet. 1291 */ 1292 assert(false); 1293 spdk_free(ctx->super); 1294 _spdk_bs_free(ctx->bs); 1295 free(ctx); 1296 spdk_bs_sequence_finish(seq, -EILSEQ); 1297 return; 1298 } 1299 ctx->super->clean = 0; 1300 1301 /* Parse the super block */ 1302 ctx->bs->cluster_sz = ctx->super->cluster_size; 1303 ctx->bs->total_clusters = ctx->bs->dev->blockcnt / (ctx->bs->cluster_sz / ctx->bs->dev->blocklen); 1304 ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / sizeof(struct spdk_blob_md_page); 1305 ctx->bs->md_start = ctx->super->md_start; 1306 ctx->bs->md_len = ctx->super->md_len; 1307 1308 /* Read the used pages mask */ 1309 ctx->mask = spdk_zmalloc(ctx->super->used_page_mask_len * sizeof(struct spdk_blob_md_page), 0x1000, 1310 NULL); 1311 if (!ctx->mask) { 1312 spdk_free(ctx->super); 1313 _spdk_bs_free(ctx->bs); 1314 free(ctx); 1315 spdk_bs_sequence_finish(seq, -ENOMEM); 1316 return; 1317 } 1318 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 1319 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 1320 spdk_bs_sequence_read(seq, ctx->mask, lba, lba_count, 1321 _spdk_bs_load_used_pages_cpl, ctx); 1322 } 1323 1324 void 1325 spdk_bs_load(struct spdk_bs_dev *dev, 1326 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 1327 { 1328 struct spdk_blob_store *bs; 1329 struct spdk_bs_cpl cpl; 1330 spdk_bs_sequence_t *seq; 1331 struct spdk_bs_load_ctx *ctx; 1332 struct spdk_bs_opts opts = {}; 1333 1334 SPDK_TRACELOG(SPDK_TRACE_BLOB, "Loading blobstore from dev %p\n", dev); 1335 1336 spdk_bs_opts_init(&opts); 1337 1338 bs = _spdk_bs_alloc(dev, &opts); 1339 if (!bs) { 1340 cb_fn(cb_arg, NULL, -ENOMEM); 1341 return; 1342 } 1343 1344 ctx = calloc(1, sizeof(*ctx)); 1345 if (!ctx) { 1346 _spdk_bs_free(bs); 1347 cb_fn(cb_arg, NULL, -ENOMEM); 1348 return; 1349 } 1350 1351 ctx->bs = bs; 1352 1353 /* Allocate memory for the super block */ 1354 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 1355 if (!ctx->super) { 1356 free(ctx); 1357 _spdk_bs_free(bs); 1358 return; 1359 } 1360 1361 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 1362 cpl.u.bs_handle.cb_fn = cb_fn; 1363 cpl.u.bs_handle.cb_arg = cb_arg; 1364 cpl.u.bs_handle.bs = bs; 1365 1366 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 1367 if (!seq) { 1368 spdk_free(ctx->super); 1369 free(ctx); 1370 _spdk_bs_free(bs); 1371 cb_fn(cb_arg, NULL, -ENOMEM); 1372 return; 1373 } 1374 1375 /* Read the super block */ 1376 spdk_bs_sequence_read(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 1377 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 1378 _spdk_bs_load_super_cpl, ctx); 1379 } 1380 1381 /* END spdk_bs_load */ 1382 1383 /* START spdk_bs_init */ 1384 1385 struct spdk_bs_init_ctx { 1386 struct spdk_blob_store *bs; 1387 struct spdk_bs_super_block *super; 1388 }; 1389 1390 static void 1391 _spdk_bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1392 { 1393 struct spdk_bs_init_ctx *ctx = cb_arg; 1394 1395 spdk_free(ctx->super); 1396 free(ctx); 1397 1398 spdk_bs_sequence_finish(seq, bserrno); 1399 } 1400 1401 static void 1402 _spdk_bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1403 { 1404 struct spdk_bs_init_ctx *ctx = cb_arg; 1405 1406 /* Write super block */ 1407 spdk_bs_sequence_write(seq, ctx->super, _spdk_bs_page_to_lba(ctx->bs, 0), 1408 _spdk_bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 1409 _spdk_bs_init_persist_super_cpl, ctx); 1410 } 1411 1412 void 1413 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o, 1414 spdk_bs_op_with_handle_complete cb_fn, void *cb_arg) 1415 { 1416 struct spdk_bs_init_ctx *ctx; 1417 struct spdk_blob_store *bs; 1418 struct spdk_bs_cpl cpl; 1419 spdk_bs_sequence_t *seq; 1420 uint64_t num_md_pages; 1421 uint32_t i; 1422 struct spdk_bs_opts opts = {}; 1423 int rc; 1424 1425 SPDK_TRACELOG(SPDK_TRACE_BLOB, "Initializing blobstore on dev %p\n", dev); 1426 1427 if (o) { 1428 opts = *o; 1429 } else { 1430 spdk_bs_opts_init(&opts); 1431 } 1432 1433 bs = _spdk_bs_alloc(dev, &opts); 1434 if (!bs) { 1435 cb_fn(cb_arg, NULL, -ENOMEM); 1436 return; 1437 } 1438 1439 if (opts.num_md_pages == UINT32_MAX) { 1440 /* By default, allocate 1 page per cluster. 1441 * Technically, this over-allocates metadata 1442 * because more metadata will reduce the number 1443 * of usable clusters. This can be addressed with 1444 * more complex math in the future. 1445 */ 1446 bs->md_len = bs->total_clusters; 1447 } else { 1448 bs->md_len = opts.num_md_pages; 1449 } 1450 1451 rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len); 1452 if (rc < 0) { 1453 _spdk_bs_free(bs); 1454 cb_fn(cb_arg, NULL, -ENOMEM); 1455 return; 1456 } 1457 1458 ctx = calloc(1, sizeof(*ctx)); 1459 if (!ctx) { 1460 _spdk_bs_free(bs); 1461 cb_fn(cb_arg, NULL, -ENOMEM); 1462 return; 1463 } 1464 1465 ctx->bs = bs; 1466 1467 /* Allocate memory for the super block */ 1468 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 1469 if (!ctx->super) { 1470 free(ctx); 1471 _spdk_bs_free(bs); 1472 return; 1473 } 1474 memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG, 1475 sizeof(ctx->super->signature)); 1476 ctx->super->version = SPDK_BS_VERSION; 1477 ctx->super->length = sizeof(*ctx->super); 1478 ctx->super->super_blob = bs->super_blob; 1479 ctx->super->clean = 0; 1480 ctx->super->cluster_size = bs->cluster_sz; 1481 1482 /* Calculate how many pages the metadata consumes at the front 1483 * of the disk. 1484 */ 1485 1486 /* The super block uses 1 page */ 1487 num_md_pages = 1; 1488 1489 /* The used_md_pages mask requires 1 bit per metadata page, rounded 1490 * up to the nearest page, plus a header. 1491 */ 1492 ctx->super->used_page_mask_start = num_md_pages; 1493 ctx->super->used_page_mask_len = divide_round_up(sizeof(struct spdk_bs_md_mask) + 1494 divide_round_up(bs->md_len, 8), 1495 sizeof(struct spdk_blob_md_page)); 1496 num_md_pages += ctx->super->used_page_mask_len; 1497 1498 /* The used_clusters mask requires 1 bit per cluster, rounded 1499 * up to the nearest page, plus a header. 1500 */ 1501 ctx->super->used_cluster_mask_start = num_md_pages; 1502 ctx->super->used_cluster_mask_len = divide_round_up(sizeof(struct spdk_bs_md_mask) + 1503 divide_round_up(bs->total_clusters, 8), 1504 sizeof(struct spdk_blob_md_page)); 1505 num_md_pages += ctx->super->used_cluster_mask_len; 1506 1507 /* The metadata region size was chosen above */ 1508 ctx->super->md_start = bs->md_start = num_md_pages; 1509 ctx->super->md_len = bs->md_len; 1510 num_md_pages += bs->md_len; 1511 1512 /* Claim all of the clusters used by the metadata */ 1513 for (i = 0; i < divide_round_up(num_md_pages, bs->pages_per_cluster); i++) { 1514 _spdk_bs_claim_cluster(bs, i); 1515 } 1516 1517 cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE; 1518 cpl.u.bs_handle.cb_fn = cb_fn; 1519 cpl.u.bs_handle.cb_arg = cb_arg; 1520 cpl.u.bs_handle.bs = bs; 1521 1522 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 1523 if (!seq) { 1524 spdk_free(ctx->super); 1525 free(ctx); 1526 _spdk_bs_free(bs); 1527 cb_fn(cb_arg, NULL, -ENOMEM); 1528 return; 1529 } 1530 1531 /* TRIM the entire device */ 1532 spdk_bs_sequence_unmap(seq, 0, bs->dev->blockcnt, _spdk_bs_init_trim_cpl, ctx); 1533 } 1534 1535 /* END spdk_bs_init */ 1536 1537 /* START spdk_bs_unload */ 1538 1539 struct spdk_bs_unload_ctx { 1540 struct spdk_blob_store *bs; 1541 struct spdk_bs_super_block *super; 1542 1543 struct spdk_bs_md_mask *mask; 1544 }; 1545 1546 static void 1547 _spdk_bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1548 { 1549 struct spdk_bs_unload_ctx *ctx = cb_arg; 1550 1551 spdk_free(ctx->super); 1552 1553 spdk_bs_sequence_finish(seq, bserrno); 1554 1555 _spdk_bs_free(ctx->bs); 1556 free(ctx); 1557 } 1558 1559 static void 1560 _spdk_bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1561 { 1562 struct spdk_bs_unload_ctx *ctx = cb_arg; 1563 1564 spdk_free(ctx->mask); 1565 1566 /* Update the values in the super block */ 1567 ctx->super->super_blob = ctx->bs->super_blob; 1568 ctx->super->clean = 1; 1569 1570 spdk_bs_sequence_write(seq, ctx->super, _spdk_bs_page_to_lba(ctx->bs, 0), 1571 _spdk_bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)), 1572 _spdk_bs_unload_write_super_cpl, ctx); 1573 } 1574 1575 static void 1576 _spdk_bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1577 { 1578 struct spdk_bs_unload_ctx *ctx = cb_arg; 1579 uint32_t i; 1580 uint64_t lba, lba_count; 1581 1582 spdk_free(ctx->mask); 1583 1584 /* Write out the used clusters mask */ 1585 ctx->mask = spdk_zmalloc(ctx->super->used_cluster_mask_len * sizeof(struct spdk_blob_md_page), 1586 0x1000, NULL); 1587 if (!ctx->mask) { 1588 spdk_free(ctx->super); 1589 free(ctx); 1590 spdk_bs_sequence_finish(seq, -ENOMEM); 1591 return; 1592 } 1593 1594 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS; 1595 ctx->mask->length = ctx->bs->total_clusters; 1596 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_clusters)); 1597 1598 i = 0; 1599 while (true) { 1600 i = spdk_bit_array_find_first_set(ctx->bs->used_clusters, i); 1601 if (i > ctx->mask->length) { 1602 break; 1603 } 1604 ctx->mask->mask[i / 8] |= 1U << (i % 8); 1605 i++; 1606 } 1607 1608 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start); 1609 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len); 1610 spdk_bs_sequence_write(seq, ctx->mask, lba, lba_count, 1611 _spdk_bs_unload_write_used_clusters_cpl, ctx); 1612 } 1613 1614 static void 1615 _spdk_bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1616 { 1617 struct spdk_bs_unload_ctx *ctx = cb_arg; 1618 uint32_t i; 1619 uint64_t lba, lba_count; 1620 1621 /* Write out the used page mask */ 1622 ctx->mask = spdk_zmalloc(ctx->super->used_page_mask_len * sizeof(struct spdk_blob_md_page), 1623 0x1000, NULL); 1624 if (!ctx->mask) { 1625 spdk_free(ctx->super); 1626 free(ctx); 1627 spdk_bs_sequence_finish(seq, -ENOMEM); 1628 return; 1629 } 1630 1631 ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES; 1632 ctx->mask->length = ctx->super->md_len; 1633 assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages)); 1634 1635 i = 0; 1636 while (true) { 1637 i = spdk_bit_array_find_first_set(ctx->bs->used_md_pages, i); 1638 if (i > ctx->mask->length) { 1639 break; 1640 } 1641 ctx->mask->mask[i / 8] |= 1U << (i % 8); 1642 i++; 1643 } 1644 1645 lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); 1646 lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); 1647 spdk_bs_sequence_write(seq, ctx->mask, lba, lba_count, 1648 _spdk_bs_unload_write_used_pages_cpl, ctx); 1649 } 1650 1651 void 1652 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) 1653 { 1654 struct spdk_bs_cpl cpl; 1655 spdk_bs_sequence_t *seq; 1656 struct spdk_bs_unload_ctx *ctx; 1657 1658 SPDK_TRACELOG(SPDK_TRACE_BLOB, "Syncing blobstore\n"); 1659 1660 ctx = calloc(1, sizeof(*ctx)); 1661 if (!ctx) { 1662 cb_fn(cb_arg, -ENOMEM); 1663 return; 1664 } 1665 1666 ctx->bs = bs; 1667 1668 ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL); 1669 if (!ctx->super) { 1670 free(ctx); 1671 cb_fn(cb_arg, -ENOMEM); 1672 return; 1673 } 1674 1675 cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC; 1676 cpl.u.bs_basic.cb_fn = cb_fn; 1677 cpl.u.bs_basic.cb_arg = cb_arg; 1678 1679 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 1680 if (!seq) { 1681 spdk_free(ctx->super); 1682 free(ctx); 1683 cb_fn(cb_arg, -ENOMEM); 1684 return; 1685 } 1686 1687 assert(TAILQ_EMPTY(&bs->blobs)); 1688 1689 /* Read super block */ 1690 spdk_bs_sequence_read(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), 1691 _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), 1692 _spdk_bs_unload_read_super_cpl, ctx); 1693 } 1694 1695 /* END spdk_bs_unload */ 1696 1697 void 1698 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid, 1699 spdk_bs_op_complete cb_fn, void *cb_arg) 1700 { 1701 bs->super_blob = blobid; 1702 cb_fn(cb_arg, 0); 1703 } 1704 1705 void 1706 spdk_bs_get_super(struct spdk_blob_store *bs, 1707 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 1708 { 1709 if (bs->super_blob == SPDK_BLOBID_INVALID) { 1710 cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT); 1711 } else { 1712 cb_fn(cb_arg, bs->super_blob, 0); 1713 } 1714 } 1715 1716 uint64_t 1717 spdk_bs_get_cluster_size(struct spdk_blob_store *bs) 1718 { 1719 return bs->cluster_sz; 1720 } 1721 1722 uint64_t 1723 spdk_bs_get_page_size(struct spdk_blob_store *bs) 1724 { 1725 return sizeof(struct spdk_blob_md_page); 1726 } 1727 1728 uint64_t 1729 spdk_bs_free_cluster_count(struct spdk_blob_store *bs) 1730 { 1731 return bs->num_free_clusters; 1732 } 1733 1734 int spdk_bs_register_md_thread(struct spdk_blob_store *bs) 1735 { 1736 bs->md_channel = spdk_get_io_channel(bs, SPDK_IO_PRIORITY_DEFAULT, true, 1737 (void *)&bs->max_md_ops); 1738 1739 return 0; 1740 } 1741 1742 int spdk_bs_unregister_md_thread(struct spdk_blob_store *bs) 1743 { 1744 spdk_put_io_channel(bs->md_channel); 1745 1746 return 0; 1747 } 1748 1749 spdk_blob_id spdk_blob_get_id(struct spdk_blob *blob) 1750 { 1751 assert(blob != NULL); 1752 1753 return blob->id; 1754 } 1755 1756 uint64_t spdk_blob_get_num_pages(struct spdk_blob *blob) 1757 { 1758 assert(blob != NULL); 1759 1760 return _spdk_bs_cluster_to_page(blob->bs, blob->active.num_clusters); 1761 } 1762 1763 uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob) 1764 { 1765 assert(blob != NULL); 1766 1767 return blob->active.num_clusters; 1768 } 1769 1770 /* START spdk_bs_md_create_blob */ 1771 1772 static void 1773 _spdk_bs_md_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1774 { 1775 struct spdk_blob *blob = cb_arg; 1776 1777 _spdk_blob_free(blob); 1778 1779 spdk_bs_sequence_finish(seq, bserrno); 1780 } 1781 1782 void spdk_bs_md_create_blob(struct spdk_blob_store *bs, 1783 spdk_blob_op_with_id_complete cb_fn, void *cb_arg) 1784 { 1785 struct spdk_blob *blob; 1786 uint32_t page_idx; 1787 struct spdk_bs_cpl cpl; 1788 spdk_bs_sequence_t *seq; 1789 spdk_blob_id id; 1790 1791 page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 1792 if (page_idx >= spdk_bit_array_capacity(bs->used_md_pages)) { 1793 cb_fn(cb_arg, 0, -ENOMEM); 1794 return; 1795 } 1796 spdk_bit_array_set(bs->used_md_pages, page_idx); 1797 1798 /* The blob id is a 64 bit number. The lower 32 bits are the page_idx. The upper 1799 * 32 bits are not currently used. Stick a 1 there just to catch bugs where the 1800 * code assumes blob id == page_idx. 1801 */ 1802 id = (1ULL << 32) | page_idx; 1803 1804 SPDK_TRACELOG(SPDK_TRACE_BLOB, "Creating blob with id %lu at page %u\n", id, page_idx); 1805 1806 blob = _spdk_blob_alloc(bs, id); 1807 if (!blob) { 1808 cb_fn(cb_arg, 0, -ENOMEM); 1809 return; 1810 } 1811 1812 cpl.type = SPDK_BS_CPL_TYPE_BLOBID; 1813 cpl.u.blobid.cb_fn = cb_fn; 1814 cpl.u.blobid.cb_arg = cb_arg; 1815 cpl.u.blobid.blobid = blob->id; 1816 1817 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 1818 if (!seq) { 1819 _spdk_blob_free(blob); 1820 cb_fn(cb_arg, 0, -ENOMEM); 1821 return; 1822 } 1823 1824 _spdk_blob_persist(seq, blob, _spdk_bs_md_create_blob_cpl, blob); 1825 } 1826 1827 /* END spdk_bs_md_create_blob */ 1828 1829 /* START spdk_bs_md_resize_blob */ 1830 int 1831 spdk_bs_md_resize_blob(struct spdk_blob *blob, uint64_t sz) 1832 { 1833 int rc; 1834 1835 assert(blob != NULL); 1836 1837 SPDK_TRACELOG(SPDK_TRACE_BLOB, "Resizing blob %lu to %lu clusters\n", blob->id, sz); 1838 1839 if (sz == blob->active.num_clusters) { 1840 return 0; 1841 } 1842 1843 rc = _spdk_resize_blob(blob, sz); 1844 if (rc < 0) { 1845 return rc; 1846 } 1847 1848 return 0; 1849 } 1850 1851 /* END spdk_bs_md_resize_blob */ 1852 1853 1854 /* START spdk_bs_md_delete_blob */ 1855 1856 static void 1857 _spdk_bs_md_delete_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1858 { 1859 struct spdk_blob *blob = cb_arg; 1860 1861 _spdk_blob_free(blob); 1862 1863 spdk_bs_sequence_finish(seq, bserrno); 1864 } 1865 1866 static void 1867 _spdk_bs_md_delete_open_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1868 { 1869 struct spdk_blob *blob = cb_arg; 1870 1871 blob->state = SPDK_BLOB_STATE_DIRTY; 1872 blob->active.num_pages = 0; 1873 _spdk_resize_blob(blob, 0); 1874 1875 _spdk_blob_persist(seq, blob, _spdk_bs_md_delete_blob_cpl, blob); 1876 } 1877 1878 void 1879 spdk_bs_md_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 1880 spdk_blob_op_complete cb_fn, void *cb_arg) 1881 { 1882 struct spdk_blob *blob; 1883 struct spdk_bs_cpl cpl; 1884 spdk_bs_sequence_t *seq; 1885 1886 SPDK_TRACELOG(SPDK_TRACE_BLOB, "Deleting blob %lu\n", blobid); 1887 1888 blob = _spdk_blob_lookup(bs, blobid); 1889 if (blob) { 1890 assert(blob->open_ref > 0); 1891 cb_fn(cb_arg, -EINVAL); 1892 return; 1893 } 1894 1895 blob = _spdk_blob_alloc(bs, blobid); 1896 if (!blob) { 1897 cb_fn(cb_arg, -ENOMEM); 1898 return; 1899 } 1900 1901 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 1902 cpl.u.blob_basic.cb_fn = cb_fn; 1903 cpl.u.blob_basic.cb_arg = cb_arg; 1904 1905 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 1906 if (!seq) { 1907 _spdk_blob_free(blob); 1908 cb_fn(cb_arg, -ENOMEM); 1909 return; 1910 } 1911 1912 _spdk_blob_load(seq, blob, _spdk_bs_md_delete_open_cpl, blob); 1913 } 1914 1915 /* END spdk_bs_md_delete_blob */ 1916 1917 /* START spdk_bs_md_open_blob */ 1918 1919 static void 1920 _spdk_bs_md_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1921 { 1922 struct spdk_blob *blob = cb_arg; 1923 1924 blob->open_ref++; 1925 1926 TAILQ_INSERT_HEAD(&blob->bs->blobs, blob, link); 1927 1928 spdk_bs_sequence_finish(seq, bserrno); 1929 } 1930 1931 void spdk_bs_md_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid, 1932 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 1933 { 1934 struct spdk_blob *blob; 1935 struct spdk_bs_cpl cpl; 1936 spdk_bs_sequence_t *seq; 1937 uint32_t page_num; 1938 1939 SPDK_TRACELOG(SPDK_TRACE_BLOB, "Opening blob %lu\n", blobid); 1940 1941 blob = _spdk_blob_lookup(bs, blobid); 1942 if (blob) { 1943 blob->open_ref++; 1944 cb_fn(cb_arg, blob, 0); 1945 return; 1946 } 1947 1948 page_num = _spdk_bs_blobid_to_page(blobid); 1949 if (spdk_bit_array_get(bs->used_md_pages, page_num) == false) { 1950 /* Invalid blobid */ 1951 cb_fn(cb_arg, NULL, -ENOENT); 1952 return; 1953 } 1954 1955 blob = _spdk_blob_alloc(bs, blobid); 1956 if (!blob) { 1957 cb_fn(cb_arg, NULL, -ENOMEM); 1958 return; 1959 } 1960 1961 cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE; 1962 cpl.u.blob_handle.cb_fn = cb_fn; 1963 cpl.u.blob_handle.cb_arg = cb_arg; 1964 cpl.u.blob_handle.blob = blob; 1965 1966 seq = spdk_bs_sequence_start(bs->md_channel, &cpl); 1967 if (!seq) { 1968 _spdk_blob_free(blob); 1969 cb_fn(cb_arg, NULL, -ENOMEM); 1970 return; 1971 } 1972 1973 _spdk_blob_load(seq, blob, _spdk_bs_md_open_blob_cpl, blob); 1974 } 1975 1976 /* START spdk_bs_md_sync_blob */ 1977 static void 1978 _spdk_blob_sync_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 1979 { 1980 spdk_bs_sequence_finish(seq, bserrno); 1981 } 1982 1983 void spdk_bs_md_sync_blob(struct spdk_blob *blob, 1984 spdk_blob_op_complete cb_fn, void *cb_arg) 1985 { 1986 struct spdk_bs_cpl cpl; 1987 spdk_bs_sequence_t *seq; 1988 1989 assert(blob != NULL); 1990 1991 SPDK_TRACELOG(SPDK_TRACE_BLOB, "Syncing blob %lu\n", blob->id); 1992 1993 assert(blob->state != SPDK_BLOB_STATE_LOADING && 1994 blob->state != SPDK_BLOB_STATE_SYNCING); 1995 1996 if (blob->state == SPDK_BLOB_STATE_CLEAN) { 1997 cb_fn(cb_arg, 0); 1998 return; 1999 } 2000 2001 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2002 cpl.u.blob_basic.cb_fn = cb_fn; 2003 cpl.u.blob_basic.cb_arg = cb_arg; 2004 2005 seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl); 2006 if (!seq) { 2007 cb_fn(cb_arg, -ENOMEM); 2008 return; 2009 } 2010 2011 _spdk_blob_persist(seq, blob, _spdk_blob_sync_cpl, blob); 2012 } 2013 2014 /* END spdk_bs_md_sync_blob */ 2015 2016 /* START spdk_bs_md_close_blob */ 2017 2018 static void 2019 _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) 2020 { 2021 struct spdk_blob **blob = cb_arg; 2022 2023 if ((*blob)->open_ref == 0) { 2024 TAILQ_REMOVE(&(*blob)->bs->blobs, (*blob), link); 2025 _spdk_blob_free((*blob)); 2026 } 2027 2028 *blob = NULL; 2029 2030 spdk_bs_sequence_finish(seq, bserrno); 2031 } 2032 2033 void spdk_bs_md_close_blob(struct spdk_blob **b, 2034 spdk_blob_op_complete cb_fn, void *cb_arg) 2035 { 2036 struct spdk_bs_cpl cpl; 2037 struct spdk_blob *blob; 2038 spdk_bs_sequence_t *seq; 2039 2040 assert(b != NULL); 2041 blob = *b; 2042 assert(blob != NULL); 2043 2044 SPDK_TRACELOG(SPDK_TRACE_BLOB, "Closing blob %lu\n", blob->id); 2045 2046 assert(blob->state != SPDK_BLOB_STATE_LOADING && 2047 blob->state != SPDK_BLOB_STATE_SYNCING); 2048 2049 if (blob->open_ref == 0) { 2050 cb_fn(cb_arg, -EBADF); 2051 return; 2052 } 2053 2054 blob->open_ref--; 2055 2056 cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC; 2057 cpl.u.blob_basic.cb_fn = cb_fn; 2058 cpl.u.blob_basic.cb_arg = cb_arg; 2059 2060 seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl); 2061 if (!seq) { 2062 cb_fn(cb_arg, -ENOMEM); 2063 return; 2064 } 2065 2066 if (blob->state == SPDK_BLOB_STATE_CLEAN) { 2067 _spdk_blob_close_cpl(seq, b, 0); 2068 return; 2069 } 2070 2071 /* Sync metadata */ 2072 _spdk_blob_persist(seq, blob, _spdk_blob_close_cpl, b); 2073 } 2074 2075 /* END spdk_bs_md_close_blob */ 2076 2077 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs, 2078 uint32_t priority, uint32_t max_ops) 2079 { 2080 return spdk_get_io_channel(bs, priority, true, (void *)&max_ops); 2081 } 2082 2083 void spdk_bs_free_io_channel(struct spdk_io_channel *channel) 2084 { 2085 spdk_put_io_channel(channel); 2086 } 2087 2088 void spdk_bs_io_flush_channel(struct spdk_io_channel *channel, 2089 spdk_blob_op_complete cb_fn, void *cb_arg) 2090 { 2091 /* Flush is synchronous right now */ 2092 cb_fn(cb_arg, 0); 2093 } 2094 2095 void spdk_bs_io_write_blob(struct spdk_blob *blob, struct spdk_io_channel *channel, 2096 void *payload, uint64_t offset, uint64_t length, 2097 spdk_blob_op_complete cb_fn, void *cb_arg) 2098 { 2099 _spdk_blob_request_submit_rw(blob, channel, payload, offset, length, cb_fn, cb_arg, false); 2100 } 2101 2102 void spdk_bs_io_read_blob(struct spdk_blob *blob, struct spdk_io_channel *channel, 2103 void *payload, uint64_t offset, uint64_t length, 2104 spdk_blob_op_complete cb_fn, void *cb_arg) 2105 { 2106 _spdk_blob_request_submit_rw(blob, channel, payload, offset, length, cb_fn, cb_arg, true); 2107 } 2108 2109 struct spdk_bs_iter_ctx { 2110 int64_t page_num; 2111 struct spdk_blob_store *bs; 2112 2113 spdk_blob_op_with_handle_complete cb_fn; 2114 void *cb_arg; 2115 }; 2116 2117 static void 2118 _spdk_bs_iter_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno) 2119 { 2120 struct spdk_bs_iter_ctx *ctx = cb_arg; 2121 struct spdk_blob_store *bs = ctx->bs; 2122 spdk_blob_id id; 2123 2124 if (bserrno == 0) { 2125 ctx->cb_fn(ctx->cb_arg, blob, bserrno); 2126 free(ctx); 2127 return; 2128 } 2129 2130 ctx->page_num++; 2131 ctx->page_num = spdk_bit_array_find_first_set(bs->used_md_pages, ctx->page_num); 2132 if (ctx->page_num >= spdk_bit_array_capacity(bs->used_md_pages)) { 2133 ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT); 2134 free(ctx); 2135 return; 2136 } 2137 2138 id = (1ULL << 32) | ctx->page_num; 2139 2140 blob = _spdk_blob_lookup(bs, id); 2141 if (blob) { 2142 blob->open_ref++; 2143 ctx->cb_fn(ctx->cb_arg, blob, 0); 2144 free(ctx); 2145 return; 2146 } 2147 2148 spdk_bs_md_open_blob(bs, id, _spdk_bs_iter_cpl, ctx); 2149 } 2150 2151 void 2152 spdk_bs_md_iter_first(struct spdk_blob_store *bs, 2153 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 2154 { 2155 struct spdk_bs_iter_ctx *ctx; 2156 2157 ctx = calloc(1, sizeof(*ctx)); 2158 if (!ctx) { 2159 cb_fn(cb_arg, NULL, -ENOMEM); 2160 return; 2161 } 2162 2163 ctx->page_num = -1; 2164 ctx->bs = bs; 2165 ctx->cb_fn = cb_fn; 2166 ctx->cb_arg = cb_arg; 2167 2168 _spdk_bs_iter_cpl(ctx, NULL, -1); 2169 } 2170 2171 static void 2172 _spdk_bs_iter_close_cpl(void *cb_arg, int bserrno) 2173 { 2174 struct spdk_bs_iter_ctx *ctx = cb_arg; 2175 2176 _spdk_bs_iter_cpl(ctx, NULL, -1); 2177 } 2178 2179 void 2180 spdk_bs_md_iter_next(struct spdk_blob_store *bs, struct spdk_blob **b, 2181 spdk_blob_op_with_handle_complete cb_fn, void *cb_arg) 2182 { 2183 struct spdk_bs_iter_ctx *ctx; 2184 struct spdk_blob *blob; 2185 2186 assert(b != NULL); 2187 blob = *b; 2188 assert(blob != NULL); 2189 2190 ctx = calloc(1, sizeof(*ctx)); 2191 if (!ctx) { 2192 cb_fn(cb_arg, NULL, -ENOMEM); 2193 return; 2194 } 2195 2196 ctx->page_num = _spdk_bs_blobid_to_page(blob->id); 2197 ctx->bs = bs; 2198 ctx->cb_fn = cb_fn; 2199 ctx->cb_arg = cb_arg; 2200 2201 /* Close the existing blob */ 2202 spdk_bs_md_close_blob(b, _spdk_bs_iter_close_cpl, ctx); 2203 } 2204 2205 int 2206 spdk_blob_md_set_xattr(struct spdk_blob *blob, const char *name, const void *value, 2207 uint16_t value_len) 2208 { 2209 struct spdk_xattr *xattr; 2210 2211 assert(blob != NULL); 2212 2213 assert(blob->state != SPDK_BLOB_STATE_LOADING && 2214 blob->state != SPDK_BLOB_STATE_SYNCING); 2215 2216 TAILQ_FOREACH(xattr, &blob->xattrs, link) { 2217 if (!strcmp(name, xattr->name)) { 2218 free(xattr->value); 2219 xattr->value_len = value_len; 2220 xattr->value = malloc(value_len); 2221 memcpy(xattr->value, value, value_len); 2222 2223 blob->state = SPDK_BLOB_STATE_DIRTY; 2224 2225 return 0; 2226 } 2227 } 2228 2229 xattr = calloc(1, sizeof(*xattr)); 2230 if (!xattr) { 2231 return -1; 2232 } 2233 xattr->name = strdup(name); 2234 xattr->value_len = value_len; 2235 xattr->value = malloc(value_len); 2236 memcpy(xattr->value, value, value_len); 2237 TAILQ_INSERT_TAIL(&blob->xattrs, xattr, link); 2238 2239 blob->state = SPDK_BLOB_STATE_DIRTY; 2240 2241 return 0; 2242 } 2243 2244 int 2245 spdk_blob_md_remove_xattr(struct spdk_blob *blob, const char *name) 2246 { 2247 struct spdk_xattr *xattr; 2248 2249 assert(blob != NULL); 2250 2251 assert(blob->state != SPDK_BLOB_STATE_LOADING && 2252 blob->state != SPDK_BLOB_STATE_SYNCING); 2253 2254 TAILQ_FOREACH(xattr, &blob->xattrs, link) { 2255 if (!strcmp(name, xattr->name)) { 2256 TAILQ_REMOVE(&blob->xattrs, xattr, link); 2257 free(xattr->value); 2258 free(xattr->name); 2259 free(xattr); 2260 2261 blob->state = SPDK_BLOB_STATE_DIRTY; 2262 2263 return 0; 2264 } 2265 } 2266 2267 return -ENOENT; 2268 } 2269 2270 int 2271 spdk_bs_md_get_xattr_value(struct spdk_blob *blob, const char *name, 2272 const void **value, size_t *value_len) 2273 { 2274 struct spdk_xattr *xattr; 2275 2276 TAILQ_FOREACH(xattr, &blob->xattrs, link) { 2277 if (!strcmp(name, xattr->name)) { 2278 *value = xattr->value; 2279 *value_len = xattr->value_len; 2280 return 0; 2281 } 2282 } 2283 2284 return -ENOENT; 2285 } 2286 2287 struct spdk_xattr_names { 2288 uint32_t count; 2289 const char *names[0]; 2290 }; 2291 2292 int 2293 spdk_bs_md_get_xattr_names(struct spdk_blob *blob, 2294 struct spdk_xattr_names **names) 2295 { 2296 struct spdk_xattr *xattr; 2297 int count = 0; 2298 2299 TAILQ_FOREACH(xattr, &blob->xattrs, link) { 2300 count++; 2301 } 2302 2303 *names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *)); 2304 if (*names == NULL) { 2305 return -ENOMEM; 2306 } 2307 2308 TAILQ_FOREACH(xattr, &blob->xattrs, link) { 2309 (*names)->names[(*names)->count++] = xattr->name; 2310 } 2311 2312 return 0; 2313 } 2314 2315 uint32_t 2316 spdk_xattr_names_get_count(struct spdk_xattr_names *names) 2317 { 2318 assert(names != NULL); 2319 2320 return names->count; 2321 } 2322 2323 const char * 2324 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index) 2325 { 2326 if (index >= names->count) { 2327 return NULL; 2328 } 2329 2330 return names->names[index]; 2331 } 2332 2333 void 2334 spdk_xattr_names_free(struct spdk_xattr_names *names) 2335 { 2336 free(names); 2337 } 2338 2339 SPDK_LOG_REGISTER_TRACE_FLAG("blob", SPDK_TRACE_BLOB); 2340