1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 #include "spdk/blob.h" 38 #include "spdk/string.h" 39 #include "spdk_internal/thread.h" 40 41 #include "common/lib/ut_multithread.c" 42 #include "../bs_dev_common.c" 43 #include "blob/blobstore.c" 44 #include "blob/request.c" 45 #include "blob/zeroes.c" 46 #include "blob/blob_bs_dev.c" 47 48 struct spdk_blob_store *g_bs; 49 spdk_blob_id g_blobid; 50 struct spdk_blob *g_blob, *g_blob2; 51 int g_bserrno, g_bserrno2; 52 struct spdk_xattr_names *g_names; 53 int g_done; 54 char *g_xattr_names[] = {"first", "second", "third"}; 55 char *g_xattr_values[] = {"one", "two", "three"}; 56 uint64_t g_ctx = 1729; 57 bool g_use_extent_table = false; 58 59 struct spdk_bs_super_block_ver1 { 60 uint8_t signature[8]; 61 uint32_t version; 62 uint32_t length; 63 uint32_t clean; /* If there was a clean shutdown, this is 1. */ 64 spdk_blob_id super_blob; 65 66 uint32_t cluster_size; /* In bytes */ 67 68 uint32_t used_page_mask_start; /* Offset from beginning of disk, in pages */ 69 uint32_t used_page_mask_len; /* Count, in pages */ 70 71 uint32_t used_cluster_mask_start; /* Offset from beginning of disk, in pages */ 72 uint32_t used_cluster_mask_len; /* Count, in pages */ 73 74 uint32_t md_start; /* Offset from beginning of disk, in pages */ 75 uint32_t md_len; /* Count, in pages */ 76 77 uint8_t reserved[4036]; 78 uint32_t crc; 79 } __attribute__((packed)); 80 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size"); 81 82 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs, 83 struct spdk_blob_opts *blob_opts); 84 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob); 85 static void suite_blob_setup(void); 86 static void suite_blob_cleanup(void); 87 88 static void 89 _get_xattr_value(void *arg, const char *name, 90 const void **value, size_t *value_len) 91 { 92 uint64_t i; 93 94 SPDK_CU_ASSERT_FATAL(value_len != NULL); 95 SPDK_CU_ASSERT_FATAL(value != NULL); 96 CU_ASSERT(arg == &g_ctx); 97 98 for (i = 0; i < sizeof(g_xattr_names); i++) { 99 if (!strcmp(name, g_xattr_names[i])) { 100 *value_len = strlen(g_xattr_values[i]); 101 *value = g_xattr_values[i]; 102 break; 103 } 104 } 105 } 106 107 static void 108 _get_xattr_value_null(void *arg, const char *name, 109 const void **value, size_t *value_len) 110 { 111 SPDK_CU_ASSERT_FATAL(value_len != NULL); 112 SPDK_CU_ASSERT_FATAL(value != NULL); 113 CU_ASSERT(arg == NULL); 114 115 *value_len = 0; 116 *value = NULL; 117 } 118 119 static int 120 _get_snapshots_count(struct spdk_blob_store *bs) 121 { 122 struct spdk_blob_list *snapshot = NULL; 123 int count = 0; 124 125 TAILQ_FOREACH(snapshot, &bs->snapshots, link) { 126 count += 1; 127 } 128 129 return count; 130 } 131 132 static void 133 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts) 134 { 135 spdk_blob_opts_init(opts); 136 opts->use_extent_table = g_use_extent_table; 137 } 138 139 static void 140 bs_op_complete(void *cb_arg, int bserrno) 141 { 142 g_bserrno = bserrno; 143 } 144 145 static void 146 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs, 147 int bserrno) 148 { 149 g_bs = bs; 150 g_bserrno = bserrno; 151 } 152 153 static void 154 blob_op_complete(void *cb_arg, int bserrno) 155 { 156 g_bserrno = bserrno; 157 } 158 159 static void 160 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno) 161 { 162 g_blobid = blobid; 163 g_bserrno = bserrno; 164 } 165 166 static void 167 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno) 168 { 169 g_blob = blb; 170 g_bserrno = bserrno; 171 } 172 173 static void 174 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno) 175 { 176 if (g_blob == NULL) { 177 g_blob = blob; 178 g_bserrno = bserrno; 179 } else { 180 g_blob2 = blob; 181 g_bserrno2 = bserrno; 182 } 183 } 184 185 static void 186 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts) 187 { 188 struct spdk_bs_dev *dev; 189 190 /* Unload the blob store */ 191 spdk_bs_unload(*bs, bs_op_complete, NULL); 192 poll_threads(); 193 CU_ASSERT(g_bserrno == 0); 194 195 dev = init_dev(); 196 /* Load an existing blob store */ 197 spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL); 198 poll_threads(); 199 CU_ASSERT(g_bserrno == 0); 200 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 201 *bs = g_bs; 202 203 g_bserrno = -1; 204 } 205 206 static void 207 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts) 208 { 209 struct spdk_bs_dev *dev; 210 211 /* Dirty shutdown */ 212 bs_free(*bs); 213 214 dev = init_dev(); 215 /* Load an existing blob store */ 216 spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL); 217 poll_threads(); 218 CU_ASSERT(g_bserrno == 0); 219 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 220 *bs = g_bs; 221 222 g_bserrno = -1; 223 } 224 225 static void 226 blob_init(void) 227 { 228 struct spdk_blob_store *bs; 229 struct spdk_bs_dev *dev; 230 231 dev = init_dev(); 232 233 /* should fail for an unsupported blocklen */ 234 dev->blocklen = 500; 235 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 236 poll_threads(); 237 CU_ASSERT(g_bserrno == -EINVAL); 238 239 dev = init_dev(); 240 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 241 poll_threads(); 242 CU_ASSERT(g_bserrno == 0); 243 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 244 bs = g_bs; 245 246 spdk_bs_unload(bs, bs_op_complete, NULL); 247 poll_threads(); 248 CU_ASSERT(g_bserrno == 0); 249 g_bs = NULL; 250 } 251 252 static void 253 blob_super(void) 254 { 255 struct spdk_blob_store *bs = g_bs; 256 spdk_blob_id blobid; 257 struct spdk_blob_opts blob_opts; 258 259 /* Get the super blob without having set one */ 260 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 261 poll_threads(); 262 CU_ASSERT(g_bserrno == -ENOENT); 263 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 264 265 /* Create a blob */ 266 ut_spdk_blob_opts_init(&blob_opts); 267 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 268 poll_threads(); 269 CU_ASSERT(g_bserrno == 0); 270 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 271 blobid = g_blobid; 272 273 /* Set the blob as the super blob */ 274 spdk_bs_set_super(bs, blobid, blob_op_complete, NULL); 275 poll_threads(); 276 CU_ASSERT(g_bserrno == 0); 277 278 /* Get the super blob */ 279 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 280 poll_threads(); 281 CU_ASSERT(g_bserrno == 0); 282 CU_ASSERT(blobid == g_blobid); 283 } 284 285 static void 286 blob_open(void) 287 { 288 struct spdk_blob_store *bs = g_bs; 289 struct spdk_blob *blob; 290 struct spdk_blob_opts blob_opts; 291 spdk_blob_id blobid, blobid2; 292 293 ut_spdk_blob_opts_init(&blob_opts); 294 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 295 poll_threads(); 296 CU_ASSERT(g_bserrno == 0); 297 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 298 blobid = g_blobid; 299 300 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 301 poll_threads(); 302 CU_ASSERT(g_bserrno == 0); 303 CU_ASSERT(g_blob != NULL); 304 blob = g_blob; 305 306 blobid2 = spdk_blob_get_id(blob); 307 CU_ASSERT(blobid == blobid2); 308 309 /* Try to open file again. It should return success. */ 310 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 311 poll_threads(); 312 CU_ASSERT(g_bserrno == 0); 313 CU_ASSERT(blob == g_blob); 314 315 spdk_blob_close(blob, blob_op_complete, NULL); 316 poll_threads(); 317 CU_ASSERT(g_bserrno == 0); 318 319 /* 320 * Close the file a second time, releasing the second reference. This 321 * should succeed. 322 */ 323 blob = g_blob; 324 spdk_blob_close(blob, blob_op_complete, NULL); 325 poll_threads(); 326 CU_ASSERT(g_bserrno == 0); 327 328 /* 329 * Try to open file again. It should succeed. This tests the case 330 * where the file is opened, closed, then re-opened again. 331 */ 332 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 333 poll_threads(); 334 CU_ASSERT(g_bserrno == 0); 335 CU_ASSERT(g_blob != NULL); 336 blob = g_blob; 337 spdk_blob_close(blob, blob_op_complete, NULL); 338 poll_threads(); 339 CU_ASSERT(g_bserrno == 0); 340 341 /* Try to open file twice in succession. This should return the same 342 * blob object. 343 */ 344 g_blob = NULL; 345 g_blob2 = NULL; 346 g_bserrno = -1; 347 g_bserrno2 = -1; 348 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL); 349 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL); 350 poll_threads(); 351 CU_ASSERT(g_bserrno == 0); 352 CU_ASSERT(g_bserrno2 == 0); 353 CU_ASSERT(g_blob != NULL); 354 CU_ASSERT(g_blob2 != NULL); 355 CU_ASSERT(g_blob == g_blob2); 356 357 g_bserrno = -1; 358 spdk_blob_close(g_blob, blob_op_complete, NULL); 359 poll_threads(); 360 CU_ASSERT(g_bserrno == 0); 361 362 ut_blob_close_and_delete(bs, g_blob); 363 } 364 365 static void 366 blob_create(void) 367 { 368 struct spdk_blob_store *bs = g_bs; 369 struct spdk_blob *blob; 370 struct spdk_blob_opts opts; 371 spdk_blob_id blobid; 372 373 /* Create blob with 10 clusters */ 374 375 ut_spdk_blob_opts_init(&opts); 376 opts.num_clusters = 10; 377 378 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 379 poll_threads(); 380 CU_ASSERT(g_bserrno == 0); 381 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 382 blobid = g_blobid; 383 384 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 385 poll_threads(); 386 CU_ASSERT(g_bserrno == 0); 387 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 388 blob = g_blob; 389 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 390 391 spdk_blob_close(blob, blob_op_complete, NULL); 392 poll_threads(); 393 CU_ASSERT(g_bserrno == 0); 394 395 /* Create blob with 0 clusters */ 396 397 ut_spdk_blob_opts_init(&opts); 398 opts.num_clusters = 0; 399 400 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 401 poll_threads(); 402 CU_ASSERT(g_bserrno == 0); 403 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 404 blobid = g_blobid; 405 406 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 407 poll_threads(); 408 CU_ASSERT(g_bserrno == 0); 409 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 410 blob = g_blob; 411 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 412 413 spdk_blob_close(blob, blob_op_complete, NULL); 414 poll_threads(); 415 CU_ASSERT(g_bserrno == 0); 416 417 /* Create blob with default options (opts == NULL) */ 418 419 spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL); 420 poll_threads(); 421 CU_ASSERT(g_bserrno == 0); 422 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 423 blobid = g_blobid; 424 425 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 426 poll_threads(); 427 CU_ASSERT(g_bserrno == 0); 428 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 429 blob = g_blob; 430 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 431 432 spdk_blob_close(blob, blob_op_complete, NULL); 433 poll_threads(); 434 CU_ASSERT(g_bserrno == 0); 435 436 /* Try to create blob with size larger than blobstore */ 437 438 ut_spdk_blob_opts_init(&opts); 439 opts.num_clusters = bs->total_clusters + 1; 440 441 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 442 poll_threads(); 443 CU_ASSERT(g_bserrno == -ENOSPC); 444 } 445 446 /* 447 * Create and delete one blob in a loop over and over again. This helps ensure 448 * that the internal bit masks tracking used clusters and md_pages are being 449 * tracked correctly. 450 */ 451 static void 452 blob_create_loop(void) 453 { 454 struct spdk_blob_store *bs = g_bs; 455 struct spdk_blob_opts opts; 456 uint32_t i, loop_count; 457 458 loop_count = 4 * spdk_max(spdk_bit_array_capacity(bs->used_md_pages), 459 spdk_bit_pool_capacity(bs->used_clusters)); 460 461 for (i = 0; i < loop_count; i++) { 462 ut_spdk_blob_opts_init(&opts); 463 opts.num_clusters = 1; 464 g_bserrno = -1; 465 g_blobid = SPDK_BLOBID_INVALID; 466 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 467 poll_threads(); 468 CU_ASSERT(g_bserrno == 0); 469 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 470 spdk_bs_delete_blob(bs, g_blobid, blob_op_complete, NULL); 471 poll_threads(); 472 CU_ASSERT(g_bserrno == 0); 473 } 474 } 475 476 static void 477 blob_create_fail(void) 478 { 479 struct spdk_blob_store *bs = g_bs; 480 struct spdk_blob_opts opts; 481 spdk_blob_id blobid; 482 uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids); 483 uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages); 484 485 /* NULL callback */ 486 ut_spdk_blob_opts_init(&opts); 487 opts.xattrs.names = g_xattr_names; 488 opts.xattrs.get_value = NULL; 489 opts.xattrs.count = 1; 490 opts.xattrs.ctx = &g_ctx; 491 492 blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 493 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 494 poll_threads(); 495 CU_ASSERT(g_bserrno == -EINVAL); 496 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 497 CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count); 498 CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count); 499 500 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 501 poll_threads(); 502 CU_ASSERT(g_bserrno == -ENOENT); 503 SPDK_CU_ASSERT_FATAL(g_blob == NULL); 504 505 ut_bs_reload(&bs, NULL); 506 CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count); 507 CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count); 508 509 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 510 poll_threads(); 511 CU_ASSERT(g_blob == NULL); 512 CU_ASSERT(g_bserrno == -ENOENT); 513 } 514 515 static void 516 blob_create_internal(void) 517 { 518 struct spdk_blob_store *bs = g_bs; 519 struct spdk_blob *blob; 520 struct spdk_blob_opts opts; 521 struct spdk_blob_xattr_opts internal_xattrs; 522 const void *value; 523 size_t value_len; 524 spdk_blob_id blobid; 525 int rc; 526 527 /* Create blob with custom xattrs */ 528 529 ut_spdk_blob_opts_init(&opts); 530 blob_xattrs_init(&internal_xattrs); 531 internal_xattrs.count = 3; 532 internal_xattrs.names = g_xattr_names; 533 internal_xattrs.get_value = _get_xattr_value; 534 internal_xattrs.ctx = &g_ctx; 535 536 bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL); 537 poll_threads(); 538 CU_ASSERT(g_bserrno == 0); 539 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 540 blobid = g_blobid; 541 542 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 543 poll_threads(); 544 CU_ASSERT(g_bserrno == 0); 545 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 546 blob = g_blob; 547 548 rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true); 549 CU_ASSERT(rc == 0); 550 SPDK_CU_ASSERT_FATAL(value != NULL); 551 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 552 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 553 554 rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true); 555 CU_ASSERT(rc == 0); 556 SPDK_CU_ASSERT_FATAL(value != NULL); 557 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 558 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 559 560 rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true); 561 CU_ASSERT(rc == 0); 562 SPDK_CU_ASSERT_FATAL(value != NULL); 563 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 564 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 565 566 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len); 567 CU_ASSERT(rc != 0); 568 569 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len); 570 CU_ASSERT(rc != 0); 571 572 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len); 573 CU_ASSERT(rc != 0); 574 575 spdk_blob_close(blob, blob_op_complete, NULL); 576 poll_threads(); 577 CU_ASSERT(g_bserrno == 0); 578 579 /* Create blob with NULL internal options */ 580 581 bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL); 582 poll_threads(); 583 CU_ASSERT(g_bserrno == 0); 584 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 585 blobid = g_blobid; 586 587 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 588 poll_threads(); 589 CU_ASSERT(g_bserrno == 0); 590 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 591 CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL); 592 593 blob = g_blob; 594 595 spdk_blob_close(blob, blob_op_complete, NULL); 596 poll_threads(); 597 CU_ASSERT(g_bserrno == 0); 598 } 599 600 static void 601 blob_thin_provision(void) 602 { 603 struct spdk_blob_store *bs; 604 struct spdk_bs_dev *dev; 605 struct spdk_blob *blob; 606 struct spdk_blob_opts opts; 607 struct spdk_bs_opts bs_opts; 608 spdk_blob_id blobid; 609 610 dev = init_dev(); 611 spdk_bs_opts_init(&bs_opts); 612 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 613 614 /* Initialize a new blob store */ 615 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 616 poll_threads(); 617 CU_ASSERT(g_bserrno == 0); 618 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 619 620 bs = g_bs; 621 622 /* Create blob with thin provisioning enabled */ 623 624 ut_spdk_blob_opts_init(&opts); 625 opts.thin_provision = true; 626 opts.num_clusters = 10; 627 628 blob = ut_blob_create_and_open(bs, &opts); 629 blobid = spdk_blob_get_id(blob); 630 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 631 632 spdk_blob_close(blob, blob_op_complete, NULL); 633 CU_ASSERT(g_bserrno == 0); 634 635 /* Do not shut down cleanly. This makes sure that when we load again 636 * and try to recover a valid used_cluster map, that blobstore will 637 * ignore clusters with index 0 since these are unallocated clusters. 638 */ 639 ut_bs_dirty_load(&bs, &bs_opts); 640 641 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 642 poll_threads(); 643 CU_ASSERT(g_bserrno == 0); 644 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 645 blob = g_blob; 646 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 647 648 ut_blob_close_and_delete(bs, blob); 649 650 spdk_bs_unload(bs, bs_op_complete, NULL); 651 poll_threads(); 652 CU_ASSERT(g_bserrno == 0); 653 g_bs = NULL; 654 } 655 656 static void 657 blob_snapshot(void) 658 { 659 struct spdk_blob_store *bs = g_bs; 660 struct spdk_blob *blob; 661 struct spdk_blob *snapshot, *snapshot2; 662 struct spdk_blob_bs_dev *blob_bs_dev; 663 struct spdk_blob_opts opts; 664 struct spdk_blob_xattr_opts xattrs; 665 spdk_blob_id blobid; 666 spdk_blob_id snapshotid; 667 spdk_blob_id snapshotid2; 668 const void *value; 669 size_t value_len; 670 int rc; 671 spdk_blob_id ids[2]; 672 size_t count; 673 674 /* Create blob with 10 clusters */ 675 ut_spdk_blob_opts_init(&opts); 676 opts.num_clusters = 10; 677 678 blob = ut_blob_create_and_open(bs, &opts); 679 blobid = spdk_blob_get_id(blob); 680 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 681 682 /* Create snapshot from blob */ 683 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); 684 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 685 poll_threads(); 686 CU_ASSERT(g_bserrno == 0); 687 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 688 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 689 snapshotid = g_blobid; 690 691 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 692 poll_threads(); 693 CU_ASSERT(g_bserrno == 0); 694 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 695 snapshot = g_blob; 696 CU_ASSERT(snapshot->data_ro == true); 697 CU_ASSERT(snapshot->md_ro == true); 698 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 699 700 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 701 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 702 CU_ASSERT(spdk_mem_all_zero(blob->active.clusters, 703 blob->active.num_clusters * sizeof(blob->active.clusters[0]))); 704 705 /* Try to create snapshot from clone with xattrs */ 706 xattrs.names = g_xattr_names; 707 xattrs.get_value = _get_xattr_value; 708 xattrs.count = 3; 709 xattrs.ctx = &g_ctx; 710 spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL); 711 poll_threads(); 712 CU_ASSERT(g_bserrno == 0); 713 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 714 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); 715 snapshotid2 = g_blobid; 716 717 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 718 CU_ASSERT(g_bserrno == 0); 719 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 720 snapshot2 = g_blob; 721 CU_ASSERT(snapshot2->data_ro == true); 722 CU_ASSERT(snapshot2->md_ro == true); 723 CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10); 724 725 /* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */ 726 CU_ASSERT(snapshot->back_bs_dev == NULL); 727 SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL); 728 SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL); 729 730 blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 731 CU_ASSERT(blob_bs_dev->blob == snapshot2); 732 733 blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev; 734 CU_ASSERT(blob_bs_dev->blob == snapshot); 735 736 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len); 737 CU_ASSERT(rc == 0); 738 SPDK_CU_ASSERT_FATAL(value != NULL); 739 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 740 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 741 742 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len); 743 CU_ASSERT(rc == 0); 744 SPDK_CU_ASSERT_FATAL(value != NULL); 745 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 746 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 747 748 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len); 749 CU_ASSERT(rc == 0); 750 SPDK_CU_ASSERT_FATAL(value != NULL); 751 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 752 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 753 754 /* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */ 755 count = 2; 756 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 757 CU_ASSERT(count == 1); 758 CU_ASSERT(ids[0] == blobid); 759 760 count = 2; 761 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 762 CU_ASSERT(count == 1); 763 CU_ASSERT(ids[0] == snapshotid2); 764 765 /* Try to create snapshot from snapshot */ 766 spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 767 poll_threads(); 768 CU_ASSERT(g_bserrno == -EINVAL); 769 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 770 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); 771 772 /* Delete blob and confirm that it is no longer on snapshot2 clone list */ 773 ut_blob_close_and_delete(bs, blob); 774 count = 2; 775 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 776 CU_ASSERT(count == 0); 777 778 /* Delete snapshot2 and confirm that it is no longer on snapshot clone list */ 779 ut_blob_close_and_delete(bs, snapshot2); 780 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 781 count = 2; 782 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 783 CU_ASSERT(count == 0); 784 785 ut_blob_close_and_delete(bs, snapshot); 786 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); 787 } 788 789 static void 790 blob_snapshot_freeze_io(void) 791 { 792 struct spdk_io_channel *channel; 793 struct spdk_bs_channel *bs_channel; 794 struct spdk_blob_store *bs = g_bs; 795 struct spdk_blob *blob; 796 struct spdk_blob_opts opts; 797 spdk_blob_id blobid; 798 uint32_t num_of_pages = 10; 799 uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE]; 800 uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE]; 801 uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE]; 802 803 memset(payload_write, 0xE5, sizeof(payload_write)); 804 memset(payload_read, 0x00, sizeof(payload_read)); 805 memset(payload_zero, 0x00, sizeof(payload_zero)); 806 807 /* Test freeze I/O during snapshot */ 808 channel = spdk_bs_alloc_io_channel(bs); 809 bs_channel = spdk_io_channel_get_ctx(channel); 810 811 /* Create blob with 10 clusters */ 812 ut_spdk_blob_opts_init(&opts); 813 opts.num_clusters = 10; 814 opts.thin_provision = false; 815 816 blob = ut_blob_create_and_open(bs, &opts); 817 blobid = spdk_blob_get_id(blob); 818 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 819 820 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 821 822 /* This is implementation specific. 823 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback. 824 * Four async I/O operations happen before that. */ 825 poll_thread_times(0, 3); 826 827 CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io)); 828 829 /* Blob I/O should be frozen here */ 830 CU_ASSERT(blob->frozen_refcnt == 1); 831 832 /* Write to the blob */ 833 spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL); 834 835 /* Verify that I/O is queued */ 836 CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io)); 837 /* Verify that payload is not written to disk */ 838 CU_ASSERT(memcmp(payload_zero, &g_dev_buffer[blob->active.clusters[0]*SPDK_BS_PAGE_SIZE], 839 SPDK_BS_PAGE_SIZE) == 0); 840 841 /* Finish all operations including spdk_bs_create_snapshot */ 842 poll_threads(); 843 844 /* Verify snapshot */ 845 CU_ASSERT(g_bserrno == 0); 846 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 847 848 /* Verify that blob has unset frozen_io */ 849 CU_ASSERT(blob->frozen_refcnt == 0); 850 851 /* Verify that postponed I/O completed successfully by comparing payload */ 852 spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL); 853 poll_threads(); 854 CU_ASSERT(g_bserrno == 0); 855 CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0); 856 857 spdk_bs_free_io_channel(channel); 858 poll_threads(); 859 860 ut_blob_close_and_delete(bs, blob); 861 } 862 863 static void 864 blob_clone(void) 865 { 866 struct spdk_blob_store *bs = g_bs; 867 struct spdk_blob_opts opts; 868 struct spdk_blob *blob, *snapshot, *clone; 869 spdk_blob_id blobid, cloneid, snapshotid; 870 struct spdk_blob_xattr_opts xattrs; 871 const void *value; 872 size_t value_len; 873 int rc; 874 875 /* Create blob with 10 clusters */ 876 877 ut_spdk_blob_opts_init(&opts); 878 opts.num_clusters = 10; 879 880 blob = ut_blob_create_and_open(bs, &opts); 881 blobid = spdk_blob_get_id(blob); 882 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 883 884 /* Create snapshot */ 885 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 886 poll_threads(); 887 CU_ASSERT(g_bserrno == 0); 888 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 889 snapshotid = g_blobid; 890 891 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 892 poll_threads(); 893 CU_ASSERT(g_bserrno == 0); 894 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 895 snapshot = g_blob; 896 CU_ASSERT(snapshot->data_ro == true); 897 CU_ASSERT(snapshot->md_ro == true); 898 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 899 900 spdk_blob_close(snapshot, blob_op_complete, NULL); 901 poll_threads(); 902 CU_ASSERT(g_bserrno == 0); 903 904 /* Create clone from snapshot with xattrs */ 905 xattrs.names = g_xattr_names; 906 xattrs.get_value = _get_xattr_value; 907 xattrs.count = 3; 908 xattrs.ctx = &g_ctx; 909 910 spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL); 911 poll_threads(); 912 CU_ASSERT(g_bserrno == 0); 913 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 914 cloneid = g_blobid; 915 916 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 917 poll_threads(); 918 CU_ASSERT(g_bserrno == 0); 919 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 920 clone = g_blob; 921 CU_ASSERT(clone->data_ro == false); 922 CU_ASSERT(clone->md_ro == false); 923 CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10); 924 925 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len); 926 CU_ASSERT(rc == 0); 927 SPDK_CU_ASSERT_FATAL(value != NULL); 928 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 929 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 930 931 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len); 932 CU_ASSERT(rc == 0); 933 SPDK_CU_ASSERT_FATAL(value != NULL); 934 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 935 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 936 937 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len); 938 CU_ASSERT(rc == 0); 939 SPDK_CU_ASSERT_FATAL(value != NULL); 940 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 941 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 942 943 944 spdk_blob_close(clone, blob_op_complete, NULL); 945 poll_threads(); 946 CU_ASSERT(g_bserrno == 0); 947 948 /* Try to create clone from not read only blob */ 949 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 950 poll_threads(); 951 CU_ASSERT(g_bserrno == -EINVAL); 952 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 953 954 /* Mark blob as read only */ 955 spdk_blob_set_read_only(blob); 956 spdk_blob_sync_md(blob, blob_op_complete, NULL); 957 poll_threads(); 958 CU_ASSERT(g_bserrno == 0); 959 960 /* Create clone from read only blob */ 961 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 962 poll_threads(); 963 CU_ASSERT(g_bserrno == 0); 964 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 965 cloneid = g_blobid; 966 967 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 968 poll_threads(); 969 CU_ASSERT(g_bserrno == 0); 970 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 971 clone = g_blob; 972 CU_ASSERT(clone->data_ro == false); 973 CU_ASSERT(clone->md_ro == false); 974 CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10); 975 976 ut_blob_close_and_delete(bs, clone); 977 ut_blob_close_and_delete(bs, blob); 978 } 979 980 static void 981 _blob_inflate(bool decouple_parent) 982 { 983 struct spdk_blob_store *bs = g_bs; 984 struct spdk_blob_opts opts; 985 struct spdk_blob *blob, *snapshot; 986 spdk_blob_id blobid, snapshotid; 987 struct spdk_io_channel *channel; 988 uint64_t free_clusters; 989 990 channel = spdk_bs_alloc_io_channel(bs); 991 SPDK_CU_ASSERT_FATAL(channel != NULL); 992 993 /* Create blob with 10 clusters */ 994 995 ut_spdk_blob_opts_init(&opts); 996 opts.num_clusters = 10; 997 opts.thin_provision = true; 998 999 blob = ut_blob_create_and_open(bs, &opts); 1000 blobid = spdk_blob_get_id(blob); 1001 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 1002 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true); 1003 1004 /* 1) Blob with no parent */ 1005 if (decouple_parent) { 1006 /* Decouple parent of blob with no parent (should fail) */ 1007 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 1008 poll_threads(); 1009 CU_ASSERT(g_bserrno != 0); 1010 } else { 1011 /* Inflate of thin blob with no parent should made it thick */ 1012 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 1013 poll_threads(); 1014 CU_ASSERT(g_bserrno == 0); 1015 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false); 1016 } 1017 1018 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 1019 poll_threads(); 1020 CU_ASSERT(g_bserrno == 0); 1021 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 1022 snapshotid = g_blobid; 1023 1024 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true); 1025 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 1026 1027 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 1028 poll_threads(); 1029 CU_ASSERT(g_bserrno == 0); 1030 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1031 snapshot = g_blob; 1032 CU_ASSERT(snapshot->data_ro == true); 1033 CU_ASSERT(snapshot->md_ro == true); 1034 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 1035 1036 spdk_blob_close(snapshot, blob_op_complete, NULL); 1037 poll_threads(); 1038 CU_ASSERT(g_bserrno == 0); 1039 1040 free_clusters = spdk_bs_free_cluster_count(bs); 1041 1042 /* 2) Blob with parent */ 1043 if (!decouple_parent) { 1044 /* Do full blob inflation */ 1045 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 1046 poll_threads(); 1047 CU_ASSERT(g_bserrno == 0); 1048 /* all 10 clusters should be allocated */ 1049 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10); 1050 } else { 1051 /* Decouple parent of blob */ 1052 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 1053 poll_threads(); 1054 CU_ASSERT(g_bserrno == 0); 1055 /* when only parent is removed, none of the clusters should be allocated */ 1056 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters); 1057 } 1058 1059 /* Now, it should be possible to delete snapshot */ 1060 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 1061 poll_threads(); 1062 CU_ASSERT(g_bserrno == 0); 1063 1064 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 1065 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent); 1066 1067 spdk_bs_free_io_channel(channel); 1068 poll_threads(); 1069 1070 ut_blob_close_and_delete(bs, blob); 1071 } 1072 1073 static void 1074 blob_inflate(void) 1075 { 1076 _blob_inflate(false); 1077 _blob_inflate(true); 1078 } 1079 1080 static void 1081 blob_delete(void) 1082 { 1083 struct spdk_blob_store *bs = g_bs; 1084 struct spdk_blob_opts blob_opts; 1085 spdk_blob_id blobid; 1086 1087 /* Create a blob and then delete it. */ 1088 ut_spdk_blob_opts_init(&blob_opts); 1089 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 1090 poll_threads(); 1091 CU_ASSERT(g_bserrno == 0); 1092 CU_ASSERT(g_blobid > 0); 1093 blobid = g_blobid; 1094 1095 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 1096 poll_threads(); 1097 CU_ASSERT(g_bserrno == 0); 1098 1099 /* Try to open the blob */ 1100 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1101 poll_threads(); 1102 CU_ASSERT(g_bserrno == -ENOENT); 1103 } 1104 1105 static void 1106 blob_resize_test(void) 1107 { 1108 struct spdk_blob_store *bs = g_bs; 1109 struct spdk_blob *blob; 1110 uint64_t free_clusters; 1111 1112 free_clusters = spdk_bs_free_cluster_count(bs); 1113 1114 blob = ut_blob_create_and_open(bs, NULL); 1115 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 1116 1117 /* Confirm that resize fails if blob is marked read-only. */ 1118 blob->md_ro = true; 1119 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1120 poll_threads(); 1121 CU_ASSERT(g_bserrno == -EPERM); 1122 blob->md_ro = false; 1123 1124 /* The blob started at 0 clusters. Resize it to be 5. */ 1125 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1126 poll_threads(); 1127 CU_ASSERT(g_bserrno == 0); 1128 CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs)); 1129 1130 /* Shrink the blob to 3 clusters. This will not actually release 1131 * the old clusters until the blob is synced. 1132 */ 1133 spdk_blob_resize(blob, 3, blob_op_complete, NULL); 1134 poll_threads(); 1135 CU_ASSERT(g_bserrno == 0); 1136 /* Verify there are still 5 clusters in use */ 1137 CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs)); 1138 1139 spdk_blob_sync_md(blob, blob_op_complete, NULL); 1140 poll_threads(); 1141 CU_ASSERT(g_bserrno == 0); 1142 /* Now there are only 3 clusters in use */ 1143 CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs)); 1144 1145 /* Resize the blob to be 10 clusters. Growth takes effect immediately. */ 1146 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 1147 poll_threads(); 1148 CU_ASSERT(g_bserrno == 0); 1149 CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs)); 1150 1151 /* Try to resize the blob to size larger than blobstore. */ 1152 spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL); 1153 poll_threads(); 1154 CU_ASSERT(g_bserrno == -ENOSPC); 1155 1156 ut_blob_close_and_delete(bs, blob); 1157 } 1158 1159 static void 1160 blob_read_only(void) 1161 { 1162 struct spdk_blob_store *bs; 1163 struct spdk_bs_dev *dev; 1164 struct spdk_blob *blob; 1165 struct spdk_bs_opts opts; 1166 spdk_blob_id blobid; 1167 int rc; 1168 1169 dev = init_dev(); 1170 spdk_bs_opts_init(&opts); 1171 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 1172 1173 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 1174 poll_threads(); 1175 CU_ASSERT(g_bserrno == 0); 1176 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 1177 bs = g_bs; 1178 1179 blob = ut_blob_create_and_open(bs, NULL); 1180 blobid = spdk_blob_get_id(blob); 1181 1182 rc = spdk_blob_set_read_only(blob); 1183 CU_ASSERT(rc == 0); 1184 1185 CU_ASSERT(blob->data_ro == false); 1186 CU_ASSERT(blob->md_ro == false); 1187 1188 spdk_blob_sync_md(blob, bs_op_complete, NULL); 1189 poll_threads(); 1190 1191 CU_ASSERT(blob->data_ro == true); 1192 CU_ASSERT(blob->md_ro == true); 1193 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1194 1195 spdk_blob_close(blob, blob_op_complete, NULL); 1196 poll_threads(); 1197 CU_ASSERT(g_bserrno == 0); 1198 1199 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1200 poll_threads(); 1201 CU_ASSERT(g_bserrno == 0); 1202 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1203 blob = g_blob; 1204 1205 CU_ASSERT(blob->data_ro == true); 1206 CU_ASSERT(blob->md_ro == true); 1207 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1208 1209 spdk_blob_close(blob, blob_op_complete, NULL); 1210 poll_threads(); 1211 CU_ASSERT(g_bserrno == 0); 1212 1213 ut_bs_reload(&bs, &opts); 1214 1215 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1216 poll_threads(); 1217 CU_ASSERT(g_bserrno == 0); 1218 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1219 blob = g_blob; 1220 1221 CU_ASSERT(blob->data_ro == true); 1222 CU_ASSERT(blob->md_ro == true); 1223 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1224 1225 ut_blob_close_and_delete(bs, blob); 1226 1227 spdk_bs_unload(bs, bs_op_complete, NULL); 1228 poll_threads(); 1229 CU_ASSERT(g_bserrno == 0); 1230 } 1231 1232 static void 1233 channel_ops(void) 1234 { 1235 struct spdk_blob_store *bs = g_bs; 1236 struct spdk_io_channel *channel; 1237 1238 channel = spdk_bs_alloc_io_channel(bs); 1239 CU_ASSERT(channel != NULL); 1240 1241 spdk_bs_free_io_channel(channel); 1242 poll_threads(); 1243 } 1244 1245 static void 1246 blob_write(void) 1247 { 1248 struct spdk_blob_store *bs = g_bs; 1249 struct spdk_blob *blob = g_blob; 1250 struct spdk_io_channel *channel; 1251 uint64_t pages_per_cluster; 1252 uint8_t payload[10 * 4096]; 1253 1254 pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs); 1255 1256 channel = spdk_bs_alloc_io_channel(bs); 1257 CU_ASSERT(channel != NULL); 1258 1259 /* Write to a blob with 0 size */ 1260 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1261 poll_threads(); 1262 CU_ASSERT(g_bserrno == -EINVAL); 1263 1264 /* Resize the blob */ 1265 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1266 poll_threads(); 1267 CU_ASSERT(g_bserrno == 0); 1268 1269 /* Confirm that write fails if blob is marked read-only. */ 1270 blob->data_ro = true; 1271 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1272 poll_threads(); 1273 CU_ASSERT(g_bserrno == -EPERM); 1274 blob->data_ro = false; 1275 1276 /* Write to the blob */ 1277 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1278 poll_threads(); 1279 CU_ASSERT(g_bserrno == 0); 1280 1281 /* Write starting beyond the end */ 1282 spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete, 1283 NULL); 1284 poll_threads(); 1285 CU_ASSERT(g_bserrno == -EINVAL); 1286 1287 /* Write starting at a valid location but going off the end */ 1288 spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1, 1289 blob_op_complete, NULL); 1290 poll_threads(); 1291 CU_ASSERT(g_bserrno == -EINVAL); 1292 1293 spdk_bs_free_io_channel(channel); 1294 poll_threads(); 1295 } 1296 1297 static void 1298 blob_read(void) 1299 { 1300 struct spdk_blob_store *bs = g_bs; 1301 struct spdk_blob *blob = g_blob; 1302 struct spdk_io_channel *channel; 1303 uint64_t pages_per_cluster; 1304 uint8_t payload[10 * 4096]; 1305 1306 pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs); 1307 1308 channel = spdk_bs_alloc_io_channel(bs); 1309 CU_ASSERT(channel != NULL); 1310 1311 /* Read from a blob with 0 size */ 1312 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1313 poll_threads(); 1314 CU_ASSERT(g_bserrno == -EINVAL); 1315 1316 /* Resize the blob */ 1317 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1318 poll_threads(); 1319 CU_ASSERT(g_bserrno == 0); 1320 1321 /* Confirm that read passes if blob is marked read-only. */ 1322 blob->data_ro = true; 1323 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1324 poll_threads(); 1325 CU_ASSERT(g_bserrno == 0); 1326 blob->data_ro = false; 1327 1328 /* Read from the blob */ 1329 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1330 poll_threads(); 1331 CU_ASSERT(g_bserrno == 0); 1332 1333 /* Read starting beyond the end */ 1334 spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete, 1335 NULL); 1336 poll_threads(); 1337 CU_ASSERT(g_bserrno == -EINVAL); 1338 1339 /* Read starting at a valid location but going off the end */ 1340 spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1, 1341 blob_op_complete, NULL); 1342 poll_threads(); 1343 CU_ASSERT(g_bserrno == -EINVAL); 1344 1345 spdk_bs_free_io_channel(channel); 1346 poll_threads(); 1347 } 1348 1349 static void 1350 blob_rw_verify(void) 1351 { 1352 struct spdk_blob_store *bs = g_bs; 1353 struct spdk_blob *blob = g_blob; 1354 struct spdk_io_channel *channel; 1355 uint8_t payload_read[10 * 4096]; 1356 uint8_t payload_write[10 * 4096]; 1357 1358 channel = spdk_bs_alloc_io_channel(bs); 1359 CU_ASSERT(channel != NULL); 1360 1361 spdk_blob_resize(blob, 32, blob_op_complete, NULL); 1362 poll_threads(); 1363 CU_ASSERT(g_bserrno == 0); 1364 1365 memset(payload_write, 0xE5, sizeof(payload_write)); 1366 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 1367 poll_threads(); 1368 CU_ASSERT(g_bserrno == 0); 1369 1370 memset(payload_read, 0x00, sizeof(payload_read)); 1371 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 1372 poll_threads(); 1373 CU_ASSERT(g_bserrno == 0); 1374 CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0); 1375 1376 spdk_bs_free_io_channel(channel); 1377 poll_threads(); 1378 } 1379 1380 static void 1381 blob_rw_verify_iov(void) 1382 { 1383 struct spdk_blob_store *bs = g_bs; 1384 struct spdk_blob *blob; 1385 struct spdk_io_channel *channel; 1386 uint8_t payload_read[10 * 4096]; 1387 uint8_t payload_write[10 * 4096]; 1388 struct iovec iov_read[3]; 1389 struct iovec iov_write[3]; 1390 void *buf; 1391 1392 channel = spdk_bs_alloc_io_channel(bs); 1393 CU_ASSERT(channel != NULL); 1394 1395 blob = ut_blob_create_and_open(bs, NULL); 1396 1397 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1398 poll_threads(); 1399 CU_ASSERT(g_bserrno == 0); 1400 1401 /* 1402 * Manually adjust the offset of the blob's second cluster. This allows 1403 * us to make sure that the readv/write code correctly accounts for I/O 1404 * that cross cluster boundaries. Start by asserting that the allocated 1405 * clusters are where we expect before modifying the second cluster. 1406 */ 1407 CU_ASSERT(blob->active.clusters[0] == 1 * 256); 1408 CU_ASSERT(blob->active.clusters[1] == 2 * 256); 1409 blob->active.clusters[1] = 3 * 256; 1410 1411 memset(payload_write, 0xE5, sizeof(payload_write)); 1412 iov_write[0].iov_base = payload_write; 1413 iov_write[0].iov_len = 1 * 4096; 1414 iov_write[1].iov_base = payload_write + 1 * 4096; 1415 iov_write[1].iov_len = 5 * 4096; 1416 iov_write[2].iov_base = payload_write + 6 * 4096; 1417 iov_write[2].iov_len = 4 * 4096; 1418 /* 1419 * Choose a page offset just before the cluster boundary. The first 6 pages of payload 1420 * will get written to the first cluster, the last 4 to the second cluster. 1421 */ 1422 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 1423 poll_threads(); 1424 CU_ASSERT(g_bserrno == 0); 1425 1426 memset(payload_read, 0xAA, sizeof(payload_read)); 1427 iov_read[0].iov_base = payload_read; 1428 iov_read[0].iov_len = 3 * 4096; 1429 iov_read[1].iov_base = payload_read + 3 * 4096; 1430 iov_read[1].iov_len = 4 * 4096; 1431 iov_read[2].iov_base = payload_read + 7 * 4096; 1432 iov_read[2].iov_len = 3 * 4096; 1433 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 1434 poll_threads(); 1435 CU_ASSERT(g_bserrno == 0); 1436 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 1437 1438 buf = calloc(1, 256 * 4096); 1439 SPDK_CU_ASSERT_FATAL(buf != NULL); 1440 /* Check that cluster 2 on "disk" was not modified. */ 1441 CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0); 1442 free(buf); 1443 1444 spdk_blob_close(blob, blob_op_complete, NULL); 1445 poll_threads(); 1446 CU_ASSERT(g_bserrno == 0); 1447 1448 spdk_bs_free_io_channel(channel); 1449 poll_threads(); 1450 } 1451 1452 static uint32_t 1453 bs_channel_get_req_count(struct spdk_io_channel *_channel) 1454 { 1455 struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel); 1456 struct spdk_bs_request_set *set; 1457 uint32_t count = 0; 1458 1459 TAILQ_FOREACH(set, &channel->reqs, link) { 1460 count++; 1461 } 1462 1463 return count; 1464 } 1465 1466 static void 1467 blob_rw_verify_iov_nomem(void) 1468 { 1469 struct spdk_blob_store *bs = g_bs; 1470 struct spdk_blob *blob = g_blob; 1471 struct spdk_io_channel *channel; 1472 uint8_t payload_write[10 * 4096]; 1473 struct iovec iov_write[3]; 1474 uint32_t req_count; 1475 1476 channel = spdk_bs_alloc_io_channel(bs); 1477 CU_ASSERT(channel != NULL); 1478 1479 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1480 poll_threads(); 1481 CU_ASSERT(g_bserrno == 0); 1482 1483 /* 1484 * Choose a page offset just before the cluster boundary. The first 6 pages of payload 1485 * will get written to the first cluster, the last 4 to the second cluster. 1486 */ 1487 iov_write[0].iov_base = payload_write; 1488 iov_write[0].iov_len = 1 * 4096; 1489 iov_write[1].iov_base = payload_write + 1 * 4096; 1490 iov_write[1].iov_len = 5 * 4096; 1491 iov_write[2].iov_base = payload_write + 6 * 4096; 1492 iov_write[2].iov_len = 4 * 4096; 1493 MOCK_SET(calloc, NULL); 1494 req_count = bs_channel_get_req_count(channel); 1495 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 1496 poll_threads(); 1497 CU_ASSERT(g_bserrno = -ENOMEM); 1498 CU_ASSERT(req_count == bs_channel_get_req_count(channel)); 1499 MOCK_CLEAR(calloc); 1500 1501 spdk_bs_free_io_channel(channel); 1502 poll_threads(); 1503 } 1504 1505 static void 1506 blob_rw_iov_read_only(void) 1507 { 1508 struct spdk_blob_store *bs = g_bs; 1509 struct spdk_blob *blob = g_blob; 1510 struct spdk_io_channel *channel; 1511 uint8_t payload_read[4096]; 1512 uint8_t payload_write[4096]; 1513 struct iovec iov_read; 1514 struct iovec iov_write; 1515 1516 channel = spdk_bs_alloc_io_channel(bs); 1517 CU_ASSERT(channel != NULL); 1518 1519 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1520 poll_threads(); 1521 CU_ASSERT(g_bserrno == 0); 1522 1523 /* Verify that writev failed if read_only flag is set. */ 1524 blob->data_ro = true; 1525 iov_write.iov_base = payload_write; 1526 iov_write.iov_len = sizeof(payload_write); 1527 spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL); 1528 poll_threads(); 1529 CU_ASSERT(g_bserrno == -EPERM); 1530 1531 /* Verify that reads pass if data_ro flag is set. */ 1532 iov_read.iov_base = payload_read; 1533 iov_read.iov_len = sizeof(payload_read); 1534 spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL); 1535 poll_threads(); 1536 CU_ASSERT(g_bserrno == 0); 1537 1538 spdk_bs_free_io_channel(channel); 1539 poll_threads(); 1540 } 1541 1542 static void 1543 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel, 1544 uint8_t *payload, uint64_t offset, uint64_t length, 1545 spdk_blob_op_complete cb_fn, void *cb_arg) 1546 { 1547 uint64_t i; 1548 uint8_t *buf; 1549 uint64_t page_size = spdk_bs_get_page_size(blob->bs); 1550 1551 /* To be sure that operation is NOT splitted, read one page at the time */ 1552 buf = payload; 1553 for (i = 0; i < length; i++) { 1554 spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL); 1555 poll_threads(); 1556 if (g_bserrno != 0) { 1557 /* Pass the error code up */ 1558 break; 1559 } 1560 buf += page_size; 1561 } 1562 1563 cb_fn(cb_arg, g_bserrno); 1564 } 1565 1566 static void 1567 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel, 1568 uint8_t *payload, uint64_t offset, uint64_t length, 1569 spdk_blob_op_complete cb_fn, void *cb_arg) 1570 { 1571 uint64_t i; 1572 uint8_t *buf; 1573 uint64_t page_size = spdk_bs_get_page_size(blob->bs); 1574 1575 /* To be sure that operation is NOT splitted, write one page at the time */ 1576 buf = payload; 1577 for (i = 0; i < length; i++) { 1578 spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL); 1579 poll_threads(); 1580 if (g_bserrno != 0) { 1581 /* Pass the error code up */ 1582 break; 1583 } 1584 buf += page_size; 1585 } 1586 1587 cb_fn(cb_arg, g_bserrno); 1588 } 1589 1590 static void 1591 blob_operation_split_rw(void) 1592 { 1593 struct spdk_blob_store *bs = g_bs; 1594 struct spdk_blob *blob; 1595 struct spdk_io_channel *channel; 1596 struct spdk_blob_opts opts; 1597 uint64_t cluster_size; 1598 1599 uint64_t payload_size; 1600 uint8_t *payload_read; 1601 uint8_t *payload_write; 1602 uint8_t *payload_pattern; 1603 1604 uint64_t page_size; 1605 uint64_t pages_per_cluster; 1606 uint64_t pages_per_payload; 1607 1608 uint64_t i; 1609 1610 cluster_size = spdk_bs_get_cluster_size(bs); 1611 page_size = spdk_bs_get_page_size(bs); 1612 pages_per_cluster = cluster_size / page_size; 1613 pages_per_payload = pages_per_cluster * 5; 1614 payload_size = cluster_size * 5; 1615 1616 payload_read = malloc(payload_size); 1617 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 1618 1619 payload_write = malloc(payload_size); 1620 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 1621 1622 payload_pattern = malloc(payload_size); 1623 SPDK_CU_ASSERT_FATAL(payload_pattern != NULL); 1624 1625 /* Prepare random pattern to write */ 1626 memset(payload_pattern, 0xFF, payload_size); 1627 for (i = 0; i < pages_per_payload; i++) { 1628 *((uint64_t *)(payload_pattern + page_size * i)) = (i + 1); 1629 } 1630 1631 channel = spdk_bs_alloc_io_channel(bs); 1632 SPDK_CU_ASSERT_FATAL(channel != NULL); 1633 1634 /* Create blob */ 1635 ut_spdk_blob_opts_init(&opts); 1636 opts.thin_provision = false; 1637 opts.num_clusters = 5; 1638 1639 blob = ut_blob_create_and_open(bs, &opts); 1640 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 1641 1642 /* Initial read should return zeroed payload */ 1643 memset(payload_read, 0xFF, payload_size); 1644 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1645 poll_threads(); 1646 CU_ASSERT(g_bserrno == 0); 1647 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 1648 1649 /* Fill whole blob except last page */ 1650 spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1, 1651 blob_op_complete, NULL); 1652 poll_threads(); 1653 CU_ASSERT(g_bserrno == 0); 1654 1655 /* Write last page with a pattern */ 1656 spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1, 1657 blob_op_complete, NULL); 1658 poll_threads(); 1659 CU_ASSERT(g_bserrno == 0); 1660 1661 /* Read whole blob and check consistency */ 1662 memset(payload_read, 0xFF, payload_size); 1663 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1664 poll_threads(); 1665 CU_ASSERT(g_bserrno == 0); 1666 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0); 1667 CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0); 1668 1669 /* Fill whole blob except first page */ 1670 spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1, 1671 blob_op_complete, NULL); 1672 poll_threads(); 1673 CU_ASSERT(g_bserrno == 0); 1674 1675 /* Write first page with a pattern */ 1676 spdk_blob_io_write(blob, channel, payload_pattern, 0, 1, 1677 blob_op_complete, NULL); 1678 poll_threads(); 1679 CU_ASSERT(g_bserrno == 0); 1680 1681 /* Read whole blob and check consistency */ 1682 memset(payload_read, 0xFF, payload_size); 1683 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1684 poll_threads(); 1685 CU_ASSERT(g_bserrno == 0); 1686 CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0); 1687 CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0); 1688 1689 1690 /* Fill whole blob with a pattern (5 clusters) */ 1691 1692 /* 1. Read test. */ 1693 _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload, 1694 blob_op_complete, NULL); 1695 poll_threads(); 1696 CU_ASSERT(g_bserrno == 0); 1697 1698 memset(payload_read, 0xFF, payload_size); 1699 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1700 poll_threads(); 1701 poll_threads(); 1702 CU_ASSERT(g_bserrno == 0); 1703 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1704 1705 /* 2. Write test. */ 1706 spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload, 1707 blob_op_complete, NULL); 1708 poll_threads(); 1709 CU_ASSERT(g_bserrno == 0); 1710 1711 memset(payload_read, 0xFF, payload_size); 1712 _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1713 poll_threads(); 1714 CU_ASSERT(g_bserrno == 0); 1715 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1716 1717 spdk_bs_free_io_channel(channel); 1718 poll_threads(); 1719 1720 g_blob = NULL; 1721 g_blobid = 0; 1722 1723 free(payload_read); 1724 free(payload_write); 1725 free(payload_pattern); 1726 1727 ut_blob_close_and_delete(bs, blob); 1728 } 1729 1730 static void 1731 blob_operation_split_rw_iov(void) 1732 { 1733 struct spdk_blob_store *bs = g_bs; 1734 struct spdk_blob *blob; 1735 struct spdk_io_channel *channel; 1736 struct spdk_blob_opts opts; 1737 uint64_t cluster_size; 1738 1739 uint64_t payload_size; 1740 uint8_t *payload_read; 1741 uint8_t *payload_write; 1742 uint8_t *payload_pattern; 1743 1744 uint64_t page_size; 1745 uint64_t pages_per_cluster; 1746 uint64_t pages_per_payload; 1747 1748 struct iovec iov_read[2]; 1749 struct iovec iov_write[2]; 1750 1751 uint64_t i, j; 1752 1753 cluster_size = spdk_bs_get_cluster_size(bs); 1754 page_size = spdk_bs_get_page_size(bs); 1755 pages_per_cluster = cluster_size / page_size; 1756 pages_per_payload = pages_per_cluster * 5; 1757 payload_size = cluster_size * 5; 1758 1759 payload_read = malloc(payload_size); 1760 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 1761 1762 payload_write = malloc(payload_size); 1763 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 1764 1765 payload_pattern = malloc(payload_size); 1766 SPDK_CU_ASSERT_FATAL(payload_pattern != NULL); 1767 1768 /* Prepare random pattern to write */ 1769 for (i = 0; i < pages_per_payload; i++) { 1770 for (j = 0; j < page_size / sizeof(uint64_t); j++) { 1771 uint64_t *tmp; 1772 1773 tmp = (uint64_t *)payload_pattern; 1774 tmp += ((page_size * i) / sizeof(uint64_t)) + j; 1775 *tmp = i + 1; 1776 } 1777 } 1778 1779 channel = spdk_bs_alloc_io_channel(bs); 1780 SPDK_CU_ASSERT_FATAL(channel != NULL); 1781 1782 /* Create blob */ 1783 ut_spdk_blob_opts_init(&opts); 1784 opts.thin_provision = false; 1785 opts.num_clusters = 5; 1786 1787 blob = ut_blob_create_and_open(bs, &opts); 1788 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 1789 1790 /* Initial read should return zeroes payload */ 1791 memset(payload_read, 0xFF, payload_size); 1792 iov_read[0].iov_base = payload_read; 1793 iov_read[0].iov_len = cluster_size * 3; 1794 iov_read[1].iov_base = payload_read + cluster_size * 3; 1795 iov_read[1].iov_len = cluster_size * 2; 1796 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1797 poll_threads(); 1798 CU_ASSERT(g_bserrno == 0); 1799 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 1800 1801 /* First of iovs fills whole blob except last page and second of iovs writes last page 1802 * with a pattern. */ 1803 iov_write[0].iov_base = payload_pattern; 1804 iov_write[0].iov_len = payload_size - page_size; 1805 iov_write[1].iov_base = payload_pattern; 1806 iov_write[1].iov_len = page_size; 1807 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1808 poll_threads(); 1809 CU_ASSERT(g_bserrno == 0); 1810 1811 /* Read whole blob and check consistency */ 1812 memset(payload_read, 0xFF, payload_size); 1813 iov_read[0].iov_base = payload_read; 1814 iov_read[0].iov_len = cluster_size * 2; 1815 iov_read[1].iov_base = payload_read + cluster_size * 2; 1816 iov_read[1].iov_len = cluster_size * 3; 1817 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1818 poll_threads(); 1819 CU_ASSERT(g_bserrno == 0); 1820 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0); 1821 CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0); 1822 1823 /* First of iovs fills only first page and second of iovs writes whole blob except 1824 * first page with a pattern. */ 1825 iov_write[0].iov_base = payload_pattern; 1826 iov_write[0].iov_len = page_size; 1827 iov_write[1].iov_base = payload_pattern; 1828 iov_write[1].iov_len = payload_size - page_size; 1829 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1830 poll_threads(); 1831 CU_ASSERT(g_bserrno == 0); 1832 1833 /* Read whole blob and check consistency */ 1834 memset(payload_read, 0xFF, payload_size); 1835 iov_read[0].iov_base = payload_read; 1836 iov_read[0].iov_len = cluster_size * 4; 1837 iov_read[1].iov_base = payload_read + cluster_size * 4; 1838 iov_read[1].iov_len = cluster_size; 1839 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1840 poll_threads(); 1841 CU_ASSERT(g_bserrno == 0); 1842 CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0); 1843 CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0); 1844 1845 1846 /* Fill whole blob with a pattern (5 clusters) */ 1847 1848 /* 1. Read test. */ 1849 _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload, 1850 blob_op_complete, NULL); 1851 poll_threads(); 1852 CU_ASSERT(g_bserrno == 0); 1853 1854 memset(payload_read, 0xFF, payload_size); 1855 iov_read[0].iov_base = payload_read; 1856 iov_read[0].iov_len = cluster_size; 1857 iov_read[1].iov_base = payload_read + cluster_size; 1858 iov_read[1].iov_len = cluster_size * 4; 1859 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1860 poll_threads(); 1861 CU_ASSERT(g_bserrno == 0); 1862 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1863 1864 /* 2. Write test. */ 1865 iov_write[0].iov_base = payload_read; 1866 iov_write[0].iov_len = cluster_size * 2; 1867 iov_write[1].iov_base = payload_read + cluster_size * 2; 1868 iov_write[1].iov_len = cluster_size * 3; 1869 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1870 poll_threads(); 1871 CU_ASSERT(g_bserrno == 0); 1872 1873 memset(payload_read, 0xFF, payload_size); 1874 _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1875 poll_threads(); 1876 CU_ASSERT(g_bserrno == 0); 1877 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1878 1879 spdk_bs_free_io_channel(channel); 1880 poll_threads(); 1881 1882 g_blob = NULL; 1883 g_blobid = 0; 1884 1885 free(payload_read); 1886 free(payload_write); 1887 free(payload_pattern); 1888 1889 ut_blob_close_and_delete(bs, blob); 1890 } 1891 1892 static void 1893 blob_unmap(void) 1894 { 1895 struct spdk_blob_store *bs = g_bs; 1896 struct spdk_blob *blob; 1897 struct spdk_io_channel *channel; 1898 struct spdk_blob_opts opts; 1899 uint8_t payload[4096]; 1900 int i; 1901 1902 channel = spdk_bs_alloc_io_channel(bs); 1903 CU_ASSERT(channel != NULL); 1904 1905 ut_spdk_blob_opts_init(&opts); 1906 opts.num_clusters = 10; 1907 1908 blob = ut_blob_create_and_open(bs, &opts); 1909 1910 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 1911 poll_threads(); 1912 CU_ASSERT(g_bserrno == 0); 1913 1914 memset(payload, 0, sizeof(payload)); 1915 payload[0] = 0xFF; 1916 1917 /* 1918 * Set first byte of every cluster to 0xFF. 1919 * First cluster on device is reserved so let's start from cluster number 1 1920 */ 1921 for (i = 1; i < 11; i++) { 1922 g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF; 1923 } 1924 1925 /* Confirm writes */ 1926 for (i = 0; i < 10; i++) { 1927 payload[0] = 0; 1928 spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1, 1929 blob_op_complete, NULL); 1930 poll_threads(); 1931 CU_ASSERT(g_bserrno == 0); 1932 CU_ASSERT(payload[0] == 0xFF); 1933 } 1934 1935 /* Mark some clusters as unallocated */ 1936 blob->active.clusters[1] = 0; 1937 blob->active.clusters[2] = 0; 1938 blob->active.clusters[3] = 0; 1939 blob->active.clusters[6] = 0; 1940 blob->active.clusters[8] = 0; 1941 1942 /* Unmap clusters by resizing to 0 */ 1943 spdk_blob_resize(blob, 0, blob_op_complete, NULL); 1944 poll_threads(); 1945 CU_ASSERT(g_bserrno == 0); 1946 1947 spdk_blob_sync_md(blob, blob_op_complete, NULL); 1948 poll_threads(); 1949 CU_ASSERT(g_bserrno == 0); 1950 1951 /* Confirm that only 'allocated' clusters were unmapped */ 1952 for (i = 1; i < 11; i++) { 1953 switch (i) { 1954 case 2: 1955 case 3: 1956 case 4: 1957 case 7: 1958 case 9: 1959 CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF); 1960 break; 1961 default: 1962 CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0); 1963 break; 1964 } 1965 } 1966 1967 spdk_bs_free_io_channel(channel); 1968 poll_threads(); 1969 1970 ut_blob_close_and_delete(bs, blob); 1971 } 1972 1973 static void 1974 blob_iter(void) 1975 { 1976 struct spdk_blob_store *bs = g_bs; 1977 struct spdk_blob *blob; 1978 spdk_blob_id blobid; 1979 struct spdk_blob_opts blob_opts; 1980 1981 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 1982 poll_threads(); 1983 CU_ASSERT(g_blob == NULL); 1984 CU_ASSERT(g_bserrno == -ENOENT); 1985 1986 ut_spdk_blob_opts_init(&blob_opts); 1987 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 1988 poll_threads(); 1989 CU_ASSERT(g_bserrno == 0); 1990 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 1991 blobid = g_blobid; 1992 1993 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 1994 poll_threads(); 1995 CU_ASSERT(g_blob != NULL); 1996 CU_ASSERT(g_bserrno == 0); 1997 blob = g_blob; 1998 CU_ASSERT(spdk_blob_get_id(blob) == blobid); 1999 2000 spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL); 2001 poll_threads(); 2002 CU_ASSERT(g_blob == NULL); 2003 CU_ASSERT(g_bserrno == -ENOENT); 2004 } 2005 2006 static void 2007 blob_xattr(void) 2008 { 2009 struct spdk_blob_store *bs = g_bs; 2010 struct spdk_blob *blob = g_blob; 2011 spdk_blob_id blobid = spdk_blob_get_id(blob); 2012 uint64_t length; 2013 int rc; 2014 const char *name1, *name2; 2015 const void *value; 2016 size_t value_len; 2017 struct spdk_xattr_names *names; 2018 2019 /* Test that set_xattr fails if md_ro flag is set. */ 2020 blob->md_ro = true; 2021 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 2022 CU_ASSERT(rc == -EPERM); 2023 2024 blob->md_ro = false; 2025 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 2026 CU_ASSERT(rc == 0); 2027 2028 length = 2345; 2029 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2030 CU_ASSERT(rc == 0); 2031 2032 /* Overwrite "length" xattr. */ 2033 length = 3456; 2034 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2035 CU_ASSERT(rc == 0); 2036 2037 /* get_xattr should still work even if md_ro flag is set. */ 2038 value = NULL; 2039 blob->md_ro = true; 2040 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 2041 CU_ASSERT(rc == 0); 2042 SPDK_CU_ASSERT_FATAL(value != NULL); 2043 CU_ASSERT(*(uint64_t *)value == length); 2044 CU_ASSERT(value_len == 8); 2045 blob->md_ro = false; 2046 2047 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 2048 CU_ASSERT(rc == -ENOENT); 2049 2050 names = NULL; 2051 rc = spdk_blob_get_xattr_names(blob, &names); 2052 CU_ASSERT(rc == 0); 2053 SPDK_CU_ASSERT_FATAL(names != NULL); 2054 CU_ASSERT(spdk_xattr_names_get_count(names) == 2); 2055 name1 = spdk_xattr_names_get_name(names, 0); 2056 SPDK_CU_ASSERT_FATAL(name1 != NULL); 2057 CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length")); 2058 name2 = spdk_xattr_names_get_name(names, 1); 2059 SPDK_CU_ASSERT_FATAL(name2 != NULL); 2060 CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length")); 2061 CU_ASSERT(strcmp(name1, name2)); 2062 spdk_xattr_names_free(names); 2063 2064 /* Confirm that remove_xattr fails if md_ro is set to true. */ 2065 blob->md_ro = true; 2066 rc = spdk_blob_remove_xattr(blob, "name"); 2067 CU_ASSERT(rc == -EPERM); 2068 2069 blob->md_ro = false; 2070 rc = spdk_blob_remove_xattr(blob, "name"); 2071 CU_ASSERT(rc == 0); 2072 2073 rc = spdk_blob_remove_xattr(blob, "foobar"); 2074 CU_ASSERT(rc == -ENOENT); 2075 2076 /* Set internal xattr */ 2077 length = 7898; 2078 rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true); 2079 CU_ASSERT(rc == 0); 2080 rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true); 2081 CU_ASSERT(rc == 0); 2082 CU_ASSERT(*(uint64_t *)value == length); 2083 /* try to get public xattr with same name */ 2084 rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len); 2085 CU_ASSERT(rc != 0); 2086 rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false); 2087 CU_ASSERT(rc != 0); 2088 /* Check if SPDK_BLOB_INTERNAL_XATTR is set */ 2089 CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 2090 SPDK_BLOB_INTERNAL_XATTR); 2091 2092 spdk_blob_close(blob, blob_op_complete, NULL); 2093 poll_threads(); 2094 2095 /* Check if xattrs are persisted */ 2096 ut_bs_reload(&bs, NULL); 2097 2098 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2099 poll_threads(); 2100 CU_ASSERT(g_bserrno == 0); 2101 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2102 blob = g_blob; 2103 2104 rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true); 2105 CU_ASSERT(rc == 0); 2106 CU_ASSERT(*(uint64_t *)value == length); 2107 2108 /* try to get internal xattr trough public call */ 2109 rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len); 2110 CU_ASSERT(rc != 0); 2111 2112 rc = blob_remove_xattr(blob, "internal", true); 2113 CU_ASSERT(rc == 0); 2114 2115 CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0); 2116 } 2117 2118 static void 2119 blob_parse_md(void) 2120 { 2121 struct spdk_blob_store *bs = g_bs; 2122 struct spdk_blob *blob; 2123 int rc; 2124 uint32_t used_pages; 2125 size_t xattr_length; 2126 char *xattr; 2127 2128 used_pages = spdk_bit_array_count_set(bs->used_md_pages); 2129 blob = ut_blob_create_and_open(bs, NULL); 2130 2131 /* Create large extent to force more than 1 page of metadata. */ 2132 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 2133 strlen("large_xattr"); 2134 xattr = calloc(xattr_length, sizeof(char)); 2135 SPDK_CU_ASSERT_FATAL(xattr != NULL); 2136 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 2137 free(xattr); 2138 SPDK_CU_ASSERT_FATAL(rc == 0); 2139 2140 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2141 poll_threads(); 2142 2143 /* Delete the blob and verify that number of pages returned to before its creation. */ 2144 SPDK_CU_ASSERT_FATAL(used_pages != spdk_bit_array_count_set(bs->used_md_pages)); 2145 ut_blob_close_and_delete(bs, blob); 2146 SPDK_CU_ASSERT_FATAL(used_pages == spdk_bit_array_count_set(bs->used_md_pages)); 2147 } 2148 2149 static void 2150 bs_load(void) 2151 { 2152 struct spdk_blob_store *bs; 2153 struct spdk_bs_dev *dev; 2154 spdk_blob_id blobid; 2155 struct spdk_blob *blob; 2156 struct spdk_bs_super_block *super_block; 2157 uint64_t length; 2158 int rc; 2159 const void *value; 2160 size_t value_len; 2161 struct spdk_bs_opts opts; 2162 struct spdk_blob_opts blob_opts; 2163 2164 dev = init_dev(); 2165 spdk_bs_opts_init(&opts); 2166 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2167 2168 /* Initialize a new blob store */ 2169 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2170 poll_threads(); 2171 CU_ASSERT(g_bserrno == 0); 2172 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2173 bs = g_bs; 2174 2175 /* Try to open a blobid that does not exist */ 2176 spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL); 2177 poll_threads(); 2178 CU_ASSERT(g_bserrno == -ENOENT); 2179 CU_ASSERT(g_blob == NULL); 2180 2181 /* Create a blob */ 2182 blob = ut_blob_create_and_open(bs, NULL); 2183 blobid = spdk_blob_get_id(blob); 2184 2185 /* Try again to open valid blob but without the upper bit set */ 2186 spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL); 2187 poll_threads(); 2188 CU_ASSERT(g_bserrno == -ENOENT); 2189 CU_ASSERT(g_blob == NULL); 2190 2191 /* Set some xattrs */ 2192 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 2193 CU_ASSERT(rc == 0); 2194 2195 length = 2345; 2196 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2197 CU_ASSERT(rc == 0); 2198 2199 /* Resize the blob */ 2200 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 2201 poll_threads(); 2202 CU_ASSERT(g_bserrno == 0); 2203 2204 spdk_blob_close(blob, blob_op_complete, NULL); 2205 poll_threads(); 2206 CU_ASSERT(g_bserrno == 0); 2207 blob = NULL; 2208 g_blob = NULL; 2209 g_blobid = SPDK_BLOBID_INVALID; 2210 2211 /* Unload the blob store */ 2212 spdk_bs_unload(bs, bs_op_complete, NULL); 2213 poll_threads(); 2214 CU_ASSERT(g_bserrno == 0); 2215 g_bs = NULL; 2216 g_blob = NULL; 2217 g_blobid = 0; 2218 2219 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2220 CU_ASSERT(super_block->clean == 1); 2221 2222 /* Load should fail for device with an unsupported blocklen */ 2223 dev = init_dev(); 2224 dev->blocklen = SPDK_BS_PAGE_SIZE * 2; 2225 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 2226 poll_threads(); 2227 CU_ASSERT(g_bserrno == -EINVAL); 2228 2229 /* Load should when max_md_ops is set to zero */ 2230 dev = init_dev(); 2231 spdk_bs_opts_init(&opts); 2232 opts.max_md_ops = 0; 2233 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2234 poll_threads(); 2235 CU_ASSERT(g_bserrno == -EINVAL); 2236 2237 /* Load should when max_channel_ops is set to zero */ 2238 dev = init_dev(); 2239 spdk_bs_opts_init(&opts); 2240 opts.max_channel_ops = 0; 2241 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2242 poll_threads(); 2243 CU_ASSERT(g_bserrno == -EINVAL); 2244 2245 /* Load an existing blob store */ 2246 dev = init_dev(); 2247 spdk_bs_opts_init(&opts); 2248 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2249 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2250 poll_threads(); 2251 CU_ASSERT(g_bserrno == 0); 2252 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2253 bs = g_bs; 2254 2255 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2256 CU_ASSERT(super_block->clean == 1); 2257 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2258 2259 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2260 poll_threads(); 2261 CU_ASSERT(g_bserrno == 0); 2262 CU_ASSERT(g_blob != NULL); 2263 blob = g_blob; 2264 2265 /* Verify that blobstore is marked dirty after first metadata sync */ 2266 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2267 CU_ASSERT(super_block->clean == 1); 2268 2269 /* Get the xattrs */ 2270 value = NULL; 2271 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 2272 CU_ASSERT(rc == 0); 2273 SPDK_CU_ASSERT_FATAL(value != NULL); 2274 CU_ASSERT(*(uint64_t *)value == length); 2275 CU_ASSERT(value_len == 8); 2276 2277 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 2278 CU_ASSERT(rc == -ENOENT); 2279 2280 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 2281 2282 spdk_blob_close(blob, blob_op_complete, NULL); 2283 poll_threads(); 2284 CU_ASSERT(g_bserrno == 0); 2285 blob = NULL; 2286 g_blob = NULL; 2287 2288 spdk_bs_unload(bs, bs_op_complete, NULL); 2289 poll_threads(); 2290 CU_ASSERT(g_bserrno == 0); 2291 g_bs = NULL; 2292 2293 /* Load should fail: bdev size < saved size */ 2294 dev = init_dev(); 2295 dev->blockcnt /= 2; 2296 2297 spdk_bs_opts_init(&opts); 2298 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2299 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2300 poll_threads(); 2301 2302 CU_ASSERT(g_bserrno == -EILSEQ); 2303 2304 /* Load should succeed: bdev size > saved size */ 2305 dev = init_dev(); 2306 dev->blockcnt *= 4; 2307 2308 spdk_bs_opts_init(&opts); 2309 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2310 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2311 poll_threads(); 2312 CU_ASSERT(g_bserrno == 0); 2313 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2314 bs = g_bs; 2315 2316 CU_ASSERT(g_bserrno == 0); 2317 spdk_bs_unload(bs, bs_op_complete, NULL); 2318 poll_threads(); 2319 2320 2321 /* Test compatibility mode */ 2322 2323 dev = init_dev(); 2324 super_block->size = 0; 2325 super_block->crc = blob_md_page_calc_crc(super_block); 2326 2327 spdk_bs_opts_init(&opts); 2328 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2329 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2330 poll_threads(); 2331 CU_ASSERT(g_bserrno == 0); 2332 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2333 bs = g_bs; 2334 2335 /* Create a blob */ 2336 ut_spdk_blob_opts_init(&blob_opts); 2337 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 2338 poll_threads(); 2339 CU_ASSERT(g_bserrno == 0); 2340 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2341 2342 /* Blobstore should update number of blocks in super_block */ 2343 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2344 CU_ASSERT(super_block->clean == 0); 2345 2346 spdk_bs_unload(bs, bs_op_complete, NULL); 2347 poll_threads(); 2348 CU_ASSERT(g_bserrno == 0); 2349 CU_ASSERT(super_block->clean == 1); 2350 g_bs = NULL; 2351 2352 } 2353 2354 static void 2355 bs_load_pending_removal(void) 2356 { 2357 struct spdk_blob_store *bs = g_bs; 2358 struct spdk_blob_opts opts; 2359 struct spdk_blob *blob, *snapshot; 2360 spdk_blob_id blobid, snapshotid; 2361 const void *value; 2362 size_t value_len; 2363 int rc; 2364 2365 /* Create blob */ 2366 ut_spdk_blob_opts_init(&opts); 2367 opts.num_clusters = 10; 2368 2369 blob = ut_blob_create_and_open(bs, &opts); 2370 blobid = spdk_blob_get_id(blob); 2371 2372 /* Create snapshot */ 2373 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 2374 poll_threads(); 2375 CU_ASSERT(g_bserrno == 0); 2376 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2377 snapshotid = g_blobid; 2378 2379 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2380 poll_threads(); 2381 CU_ASSERT(g_bserrno == 0); 2382 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2383 snapshot = g_blob; 2384 2385 /* Set SNAPSHOT_PENDING_REMOVAL xattr */ 2386 snapshot->md_ro = false; 2387 rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true); 2388 CU_ASSERT(rc == 0); 2389 snapshot->md_ro = true; 2390 2391 spdk_blob_close(snapshot, blob_op_complete, NULL); 2392 poll_threads(); 2393 CU_ASSERT(g_bserrno == 0); 2394 2395 spdk_blob_close(blob, blob_op_complete, NULL); 2396 poll_threads(); 2397 CU_ASSERT(g_bserrno == 0); 2398 2399 /* Reload blobstore */ 2400 ut_bs_reload(&bs, NULL); 2401 2402 /* Snapshot should not be removed as blob is still pointing to it */ 2403 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2404 poll_threads(); 2405 CU_ASSERT(g_bserrno == 0); 2406 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2407 snapshot = g_blob; 2408 2409 /* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */ 2410 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len); 2411 CU_ASSERT(rc != 0); 2412 2413 /* Set SNAPSHOT_PENDING_REMOVAL xattr again */ 2414 snapshot->md_ro = false; 2415 rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true); 2416 CU_ASSERT(rc == 0); 2417 snapshot->md_ro = true; 2418 2419 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2420 poll_threads(); 2421 CU_ASSERT(g_bserrno == 0); 2422 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2423 blob = g_blob; 2424 2425 /* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */ 2426 blob_remove_xattr(blob, BLOB_SNAPSHOT, true); 2427 2428 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2429 poll_threads(); 2430 CU_ASSERT(g_bserrno == 0); 2431 2432 spdk_blob_close(snapshot, blob_op_complete, NULL); 2433 poll_threads(); 2434 CU_ASSERT(g_bserrno == 0); 2435 2436 spdk_blob_close(blob, blob_op_complete, NULL); 2437 poll_threads(); 2438 CU_ASSERT(g_bserrno == 0); 2439 2440 /* Reload blobstore */ 2441 ut_bs_reload(&bs, NULL); 2442 2443 /* Snapshot should be removed as blob is not pointing to it anymore */ 2444 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2445 poll_threads(); 2446 CU_ASSERT(g_bserrno != 0); 2447 } 2448 2449 static void 2450 bs_load_custom_cluster_size(void) 2451 { 2452 struct spdk_blob_store *bs; 2453 struct spdk_bs_dev *dev; 2454 struct spdk_bs_super_block *super_block; 2455 struct spdk_bs_opts opts; 2456 uint32_t custom_cluster_size = 4194304; /* 4MiB */ 2457 uint32_t cluster_sz; 2458 uint64_t total_clusters; 2459 2460 dev = init_dev(); 2461 spdk_bs_opts_init(&opts); 2462 opts.cluster_sz = custom_cluster_size; 2463 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2464 2465 /* Initialize a new blob store */ 2466 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2467 poll_threads(); 2468 CU_ASSERT(g_bserrno == 0); 2469 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2470 bs = g_bs; 2471 cluster_sz = bs->cluster_sz; 2472 total_clusters = bs->total_clusters; 2473 2474 /* Unload the blob store */ 2475 spdk_bs_unload(bs, bs_op_complete, NULL); 2476 poll_threads(); 2477 CU_ASSERT(g_bserrno == 0); 2478 g_bs = NULL; 2479 g_blob = NULL; 2480 g_blobid = 0; 2481 2482 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2483 CU_ASSERT(super_block->clean == 1); 2484 2485 /* Load an existing blob store */ 2486 dev = init_dev(); 2487 spdk_bs_opts_init(&opts); 2488 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2489 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2490 poll_threads(); 2491 CU_ASSERT(g_bserrno == 0); 2492 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2493 bs = g_bs; 2494 /* Compare cluster size and number to one after initialization */ 2495 CU_ASSERT(cluster_sz == bs->cluster_sz); 2496 CU_ASSERT(total_clusters == bs->total_clusters); 2497 2498 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2499 CU_ASSERT(super_block->clean == 1); 2500 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2501 2502 spdk_bs_unload(bs, bs_op_complete, NULL); 2503 poll_threads(); 2504 CU_ASSERT(g_bserrno == 0); 2505 CU_ASSERT(super_block->clean == 1); 2506 g_bs = NULL; 2507 } 2508 2509 static void 2510 bs_type(void) 2511 { 2512 struct spdk_blob_store *bs; 2513 struct spdk_bs_dev *dev; 2514 struct spdk_bs_opts opts; 2515 2516 dev = init_dev(); 2517 spdk_bs_opts_init(&opts); 2518 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2519 2520 /* Initialize a new blob store */ 2521 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2522 poll_threads(); 2523 CU_ASSERT(g_bserrno == 0); 2524 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2525 bs = g_bs; 2526 2527 /* Unload the blob store */ 2528 spdk_bs_unload(bs, bs_op_complete, NULL); 2529 poll_threads(); 2530 CU_ASSERT(g_bserrno == 0); 2531 g_bs = NULL; 2532 g_blob = NULL; 2533 g_blobid = 0; 2534 2535 /* Load non existing blobstore type */ 2536 dev = init_dev(); 2537 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING"); 2538 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2539 poll_threads(); 2540 CU_ASSERT(g_bserrno != 0); 2541 2542 /* Load with empty blobstore type */ 2543 dev = init_dev(); 2544 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2545 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2546 poll_threads(); 2547 CU_ASSERT(g_bserrno == 0); 2548 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2549 bs = g_bs; 2550 2551 spdk_bs_unload(bs, bs_op_complete, NULL); 2552 poll_threads(); 2553 CU_ASSERT(g_bserrno == 0); 2554 g_bs = NULL; 2555 2556 /* Initialize a new blob store with empty bstype */ 2557 dev = init_dev(); 2558 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2559 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2560 poll_threads(); 2561 CU_ASSERT(g_bserrno == 0); 2562 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2563 bs = g_bs; 2564 2565 spdk_bs_unload(bs, bs_op_complete, NULL); 2566 poll_threads(); 2567 CU_ASSERT(g_bserrno == 0); 2568 g_bs = NULL; 2569 2570 /* Load non existing blobstore type */ 2571 dev = init_dev(); 2572 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING"); 2573 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2574 poll_threads(); 2575 CU_ASSERT(g_bserrno != 0); 2576 2577 /* Load with empty blobstore type */ 2578 dev = init_dev(); 2579 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2580 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2581 poll_threads(); 2582 CU_ASSERT(g_bserrno == 0); 2583 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2584 bs = g_bs; 2585 2586 spdk_bs_unload(bs, bs_op_complete, NULL); 2587 poll_threads(); 2588 CU_ASSERT(g_bserrno == 0); 2589 g_bs = NULL; 2590 } 2591 2592 static void 2593 bs_super_block(void) 2594 { 2595 struct spdk_blob_store *bs; 2596 struct spdk_bs_dev *dev; 2597 struct spdk_bs_super_block *super_block; 2598 struct spdk_bs_opts opts; 2599 struct spdk_bs_super_block_ver1 super_block_v1; 2600 2601 dev = init_dev(); 2602 spdk_bs_opts_init(&opts); 2603 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2604 2605 /* Initialize a new blob store */ 2606 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2607 poll_threads(); 2608 CU_ASSERT(g_bserrno == 0); 2609 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2610 bs = g_bs; 2611 2612 /* Unload the blob store */ 2613 spdk_bs_unload(bs, bs_op_complete, NULL); 2614 poll_threads(); 2615 CU_ASSERT(g_bserrno == 0); 2616 g_bs = NULL; 2617 g_blob = NULL; 2618 g_blobid = 0; 2619 2620 /* Load an existing blob store with version newer than supported */ 2621 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2622 super_block->version++; 2623 2624 dev = init_dev(); 2625 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2626 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2627 poll_threads(); 2628 CU_ASSERT(g_bserrno != 0); 2629 2630 /* Create a new blob store with super block version 1 */ 2631 dev = init_dev(); 2632 super_block_v1.version = 1; 2633 memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature)); 2634 super_block_v1.length = 0x1000; 2635 super_block_v1.clean = 1; 2636 super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF; 2637 super_block_v1.cluster_size = 0x100000; 2638 super_block_v1.used_page_mask_start = 0x01; 2639 super_block_v1.used_page_mask_len = 0x01; 2640 super_block_v1.used_cluster_mask_start = 0x02; 2641 super_block_v1.used_cluster_mask_len = 0x01; 2642 super_block_v1.md_start = 0x03; 2643 super_block_v1.md_len = 0x40; 2644 memset(super_block_v1.reserved, 0, 4036); 2645 super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1); 2646 memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1)); 2647 2648 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2649 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2650 poll_threads(); 2651 CU_ASSERT(g_bserrno == 0); 2652 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2653 bs = g_bs; 2654 2655 spdk_bs_unload(bs, bs_op_complete, NULL); 2656 poll_threads(); 2657 CU_ASSERT(g_bserrno == 0); 2658 g_bs = NULL; 2659 } 2660 2661 /* 2662 * Create a blobstore and then unload it. 2663 */ 2664 static void 2665 bs_unload(void) 2666 { 2667 struct spdk_blob_store *bs = g_bs; 2668 struct spdk_blob *blob; 2669 2670 /* Create a blob and open it. */ 2671 blob = ut_blob_create_and_open(bs, NULL); 2672 2673 /* Try to unload blobstore, should fail with open blob */ 2674 g_bserrno = -1; 2675 spdk_bs_unload(bs, bs_op_complete, NULL); 2676 poll_threads(); 2677 CU_ASSERT(g_bserrno == -EBUSY); 2678 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2679 2680 /* Close the blob, then successfully unload blobstore */ 2681 g_bserrno = -1; 2682 spdk_blob_close(blob, blob_op_complete, NULL); 2683 poll_threads(); 2684 CU_ASSERT(g_bserrno == 0); 2685 } 2686 2687 /* 2688 * Create a blobstore with a cluster size different than the default, and ensure it is 2689 * persisted. 2690 */ 2691 static void 2692 bs_cluster_sz(void) 2693 { 2694 struct spdk_blob_store *bs; 2695 struct spdk_bs_dev *dev; 2696 struct spdk_bs_opts opts; 2697 uint32_t cluster_sz; 2698 2699 /* Set cluster size to zero */ 2700 dev = init_dev(); 2701 spdk_bs_opts_init(&opts); 2702 opts.cluster_sz = 0; 2703 2704 /* Initialize a new blob store */ 2705 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2706 poll_threads(); 2707 CU_ASSERT(g_bserrno == -EINVAL); 2708 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2709 2710 /* 2711 * Set cluster size to blobstore page size, 2712 * to work it is required to be at least twice the blobstore page size. 2713 */ 2714 dev = init_dev(); 2715 spdk_bs_opts_init(&opts); 2716 opts.cluster_sz = SPDK_BS_PAGE_SIZE; 2717 2718 /* Initialize a new blob store */ 2719 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2720 poll_threads(); 2721 CU_ASSERT(g_bserrno == -ENOMEM); 2722 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2723 2724 /* 2725 * Set cluster size to lower than page size, 2726 * to work it is required to be at least twice the blobstore page size. 2727 */ 2728 dev = init_dev(); 2729 spdk_bs_opts_init(&opts); 2730 opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1; 2731 2732 /* Initialize a new blob store */ 2733 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2734 poll_threads(); 2735 CU_ASSERT(g_bserrno == -EINVAL); 2736 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2737 2738 /* Set cluster size to twice the default */ 2739 dev = init_dev(); 2740 spdk_bs_opts_init(&opts); 2741 opts.cluster_sz *= 2; 2742 cluster_sz = opts.cluster_sz; 2743 2744 /* Initialize a new blob store */ 2745 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2746 poll_threads(); 2747 CU_ASSERT(g_bserrno == 0); 2748 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2749 bs = g_bs; 2750 2751 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2752 2753 ut_bs_reload(&bs, &opts); 2754 2755 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2756 2757 spdk_bs_unload(bs, bs_op_complete, NULL); 2758 poll_threads(); 2759 CU_ASSERT(g_bserrno == 0); 2760 g_bs = NULL; 2761 } 2762 2763 /* 2764 * Create a blobstore, reload it and ensure total usable cluster count 2765 * stays the same. 2766 */ 2767 static void 2768 bs_usable_clusters(void) 2769 { 2770 struct spdk_blob_store *bs = g_bs; 2771 struct spdk_blob *blob; 2772 uint32_t clusters; 2773 int i; 2774 2775 2776 clusters = spdk_bs_total_data_cluster_count(bs); 2777 2778 ut_bs_reload(&bs, NULL); 2779 2780 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2781 2782 /* Create and resize blobs to make sure that useable cluster count won't change */ 2783 for (i = 0; i < 4; i++) { 2784 g_bserrno = -1; 2785 g_blobid = SPDK_BLOBID_INVALID; 2786 blob = ut_blob_create_and_open(bs, NULL); 2787 2788 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 2789 poll_threads(); 2790 CU_ASSERT(g_bserrno == 0); 2791 2792 g_bserrno = -1; 2793 spdk_blob_close(blob, blob_op_complete, NULL); 2794 poll_threads(); 2795 CU_ASSERT(g_bserrno == 0); 2796 2797 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2798 } 2799 2800 /* Reload the blob store to make sure that nothing changed */ 2801 ut_bs_reload(&bs, NULL); 2802 2803 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2804 } 2805 2806 /* 2807 * Test resizing of the metadata blob. This requires creating enough blobs 2808 * so that one cluster is not enough to fit the metadata for those blobs. 2809 * To induce this condition to happen more quickly, we reduce the cluster 2810 * size to 16KB, which means only 4 4KB blob metadata pages can fit. 2811 */ 2812 static void 2813 bs_resize_md(void) 2814 { 2815 struct spdk_blob_store *bs; 2816 const int CLUSTER_PAGE_COUNT = 4; 2817 const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4; 2818 struct spdk_bs_dev *dev; 2819 struct spdk_bs_opts opts; 2820 struct spdk_blob *blob; 2821 struct spdk_blob_opts blob_opts; 2822 uint32_t cluster_sz; 2823 spdk_blob_id blobids[NUM_BLOBS]; 2824 int i; 2825 2826 2827 dev = init_dev(); 2828 spdk_bs_opts_init(&opts); 2829 opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096; 2830 cluster_sz = opts.cluster_sz; 2831 2832 /* Initialize a new blob store */ 2833 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2834 poll_threads(); 2835 CU_ASSERT(g_bserrno == 0); 2836 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2837 bs = g_bs; 2838 2839 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2840 2841 ut_spdk_blob_opts_init(&blob_opts); 2842 2843 for (i = 0; i < NUM_BLOBS; i++) { 2844 g_bserrno = -1; 2845 g_blobid = SPDK_BLOBID_INVALID; 2846 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 2847 poll_threads(); 2848 CU_ASSERT(g_bserrno == 0); 2849 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2850 blobids[i] = g_blobid; 2851 } 2852 2853 ut_bs_reload(&bs, &opts); 2854 2855 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2856 2857 for (i = 0; i < NUM_BLOBS; i++) { 2858 g_bserrno = -1; 2859 g_blob = NULL; 2860 spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL); 2861 poll_threads(); 2862 CU_ASSERT(g_bserrno == 0); 2863 CU_ASSERT(g_blob != NULL); 2864 blob = g_blob; 2865 g_bserrno = -1; 2866 spdk_blob_close(blob, blob_op_complete, NULL); 2867 poll_threads(); 2868 CU_ASSERT(g_bserrno == 0); 2869 } 2870 2871 spdk_bs_unload(bs, bs_op_complete, NULL); 2872 poll_threads(); 2873 CU_ASSERT(g_bserrno == 0); 2874 g_bs = NULL; 2875 } 2876 2877 static void 2878 bs_destroy(void) 2879 { 2880 struct spdk_blob_store *bs; 2881 struct spdk_bs_dev *dev; 2882 2883 /* Initialize a new blob store */ 2884 dev = init_dev(); 2885 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2886 poll_threads(); 2887 CU_ASSERT(g_bserrno == 0); 2888 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2889 bs = g_bs; 2890 2891 /* Destroy the blob store */ 2892 g_bserrno = -1; 2893 spdk_bs_destroy(bs, bs_op_complete, NULL); 2894 poll_threads(); 2895 CU_ASSERT(g_bserrno == 0); 2896 2897 /* Loading an non-existent blob store should fail. */ 2898 g_bs = NULL; 2899 dev = init_dev(); 2900 2901 g_bserrno = 0; 2902 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 2903 poll_threads(); 2904 CU_ASSERT(g_bserrno != 0); 2905 } 2906 2907 /* Try to hit all of the corner cases associated with serializing 2908 * a blob to disk 2909 */ 2910 static void 2911 blob_serialize_test(void) 2912 { 2913 struct spdk_bs_dev *dev; 2914 struct spdk_bs_opts opts; 2915 struct spdk_blob_store *bs; 2916 spdk_blob_id blobid[2]; 2917 struct spdk_blob *blob[2]; 2918 uint64_t i; 2919 char *value; 2920 int rc; 2921 2922 dev = init_dev(); 2923 2924 /* Initialize a new blobstore with very small clusters */ 2925 spdk_bs_opts_init(&opts); 2926 opts.cluster_sz = dev->blocklen * 8; 2927 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2928 poll_threads(); 2929 CU_ASSERT(g_bserrno == 0); 2930 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2931 bs = g_bs; 2932 2933 /* Create and open two blobs */ 2934 for (i = 0; i < 2; i++) { 2935 blob[i] = ut_blob_create_and_open(bs, NULL); 2936 blobid[i] = spdk_blob_get_id(blob[i]); 2937 2938 /* Set a fairly large xattr on both blobs to eat up 2939 * metadata space 2940 */ 2941 value = calloc(dev->blocklen - 64, sizeof(char)); 2942 SPDK_CU_ASSERT_FATAL(value != NULL); 2943 memset(value, i, dev->blocklen / 2); 2944 rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64); 2945 CU_ASSERT(rc == 0); 2946 free(value); 2947 } 2948 2949 /* Resize the blobs, alternating 1 cluster at a time. 2950 * This thwarts run length encoding and will cause spill 2951 * over of the extents. 2952 */ 2953 for (i = 0; i < 6; i++) { 2954 spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL); 2955 poll_threads(); 2956 CU_ASSERT(g_bserrno == 0); 2957 } 2958 2959 for (i = 0; i < 2; i++) { 2960 spdk_blob_sync_md(blob[i], blob_op_complete, NULL); 2961 poll_threads(); 2962 CU_ASSERT(g_bserrno == 0); 2963 } 2964 2965 /* Close the blobs */ 2966 for (i = 0; i < 2; i++) { 2967 spdk_blob_close(blob[i], blob_op_complete, NULL); 2968 poll_threads(); 2969 CU_ASSERT(g_bserrno == 0); 2970 } 2971 2972 ut_bs_reload(&bs, &opts); 2973 2974 for (i = 0; i < 2; i++) { 2975 blob[i] = NULL; 2976 2977 spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL); 2978 poll_threads(); 2979 CU_ASSERT(g_bserrno == 0); 2980 CU_ASSERT(g_blob != NULL); 2981 blob[i] = g_blob; 2982 2983 CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3); 2984 2985 spdk_blob_close(blob[i], blob_op_complete, NULL); 2986 poll_threads(); 2987 CU_ASSERT(g_bserrno == 0); 2988 } 2989 2990 spdk_bs_unload(bs, bs_op_complete, NULL); 2991 poll_threads(); 2992 CU_ASSERT(g_bserrno == 0); 2993 g_bs = NULL; 2994 } 2995 2996 static void 2997 blob_crc(void) 2998 { 2999 struct spdk_blob_store *bs = g_bs; 3000 struct spdk_blob *blob; 3001 spdk_blob_id blobid; 3002 uint32_t page_num; 3003 int index; 3004 struct spdk_blob_md_page *page; 3005 3006 blob = ut_blob_create_and_open(bs, NULL); 3007 blobid = spdk_blob_get_id(blob); 3008 3009 spdk_blob_close(blob, blob_op_complete, NULL); 3010 poll_threads(); 3011 CU_ASSERT(g_bserrno == 0); 3012 3013 page_num = bs_blobid_to_page(blobid); 3014 index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num); 3015 page = (struct spdk_blob_md_page *)&g_dev_buffer[index]; 3016 page->crc = 0; 3017 3018 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3019 poll_threads(); 3020 CU_ASSERT(g_bserrno == -EINVAL); 3021 CU_ASSERT(g_blob == NULL); 3022 g_bserrno = 0; 3023 3024 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 3025 poll_threads(); 3026 CU_ASSERT(g_bserrno == -EINVAL); 3027 } 3028 3029 static void 3030 super_block_crc(void) 3031 { 3032 struct spdk_blob_store *bs; 3033 struct spdk_bs_dev *dev; 3034 struct spdk_bs_super_block *super_block; 3035 3036 dev = init_dev(); 3037 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 3038 poll_threads(); 3039 CU_ASSERT(g_bserrno == 0); 3040 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3041 bs = g_bs; 3042 3043 spdk_bs_unload(bs, bs_op_complete, NULL); 3044 poll_threads(); 3045 CU_ASSERT(g_bserrno == 0); 3046 g_bs = NULL; 3047 3048 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 3049 super_block->crc = 0; 3050 dev = init_dev(); 3051 3052 /* Load an existing blob store */ 3053 g_bserrno = 0; 3054 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3055 poll_threads(); 3056 CU_ASSERT(g_bserrno == -EILSEQ); 3057 } 3058 3059 /* For blob dirty shutdown test case we do the following sub-test cases: 3060 * 1 Initialize new blob store and create 1 super blob with some xattrs, then we 3061 * dirty shutdown and reload the blob store and verify the xattrs. 3062 * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown, 3063 * reload the blob store and verify the clusters number. 3064 * 3 Create the second blob and then dirty shutdown, reload the blob store 3065 * and verify the second blob. 3066 * 4 Delete the second blob and then dirty shutdown, reload the blob store 3067 * and verify the second blob is invalid. 3068 * 5 Create the second blob again and also create the third blob, modify the 3069 * md of second blob which makes the md invalid, and then dirty shutdown, 3070 * reload the blob store verify the second blob, it should invalid and also 3071 * verify the third blob, it should correct. 3072 */ 3073 static void 3074 blob_dirty_shutdown(void) 3075 { 3076 int rc; 3077 int index; 3078 struct spdk_blob_store *bs = g_bs; 3079 spdk_blob_id blobid1, blobid2, blobid3; 3080 struct spdk_blob *blob = g_blob; 3081 uint64_t length; 3082 uint64_t free_clusters; 3083 const void *value; 3084 size_t value_len; 3085 uint32_t page_num; 3086 struct spdk_blob_md_page *page; 3087 struct spdk_blob_opts blob_opts; 3088 3089 /* Create first blob */ 3090 blobid1 = spdk_blob_get_id(blob); 3091 3092 /* Set some xattrs */ 3093 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 3094 CU_ASSERT(rc == 0); 3095 3096 length = 2345; 3097 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3098 CU_ASSERT(rc == 0); 3099 3100 /* Put xattr that fits exactly single page. 3101 * This results in adding additional pages to MD. 3102 * First is flags and smaller xattr, second the large xattr, 3103 * third are just the extents. 3104 */ 3105 size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) - 3106 strlen("large_xattr"); 3107 char *xattr = calloc(xattr_length, sizeof(char)); 3108 SPDK_CU_ASSERT_FATAL(xattr != NULL); 3109 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 3110 free(xattr); 3111 SPDK_CU_ASSERT_FATAL(rc == 0); 3112 3113 /* Resize the blob */ 3114 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 3115 poll_threads(); 3116 CU_ASSERT(g_bserrno == 0); 3117 3118 /* Set the blob as the super blob */ 3119 spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL); 3120 poll_threads(); 3121 CU_ASSERT(g_bserrno == 0); 3122 3123 free_clusters = spdk_bs_free_cluster_count(bs); 3124 3125 spdk_blob_close(blob, blob_op_complete, NULL); 3126 poll_threads(); 3127 CU_ASSERT(g_bserrno == 0); 3128 blob = NULL; 3129 g_blob = NULL; 3130 g_blobid = SPDK_BLOBID_INVALID; 3131 3132 ut_bs_dirty_load(&bs, NULL); 3133 3134 /* Get the super blob */ 3135 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 3136 poll_threads(); 3137 CU_ASSERT(g_bserrno == 0); 3138 CU_ASSERT(blobid1 == g_blobid); 3139 3140 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3141 poll_threads(); 3142 CU_ASSERT(g_bserrno == 0); 3143 CU_ASSERT(g_blob != NULL); 3144 blob = g_blob; 3145 3146 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3147 3148 /* Get the xattrs */ 3149 value = NULL; 3150 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 3151 CU_ASSERT(rc == 0); 3152 SPDK_CU_ASSERT_FATAL(value != NULL); 3153 CU_ASSERT(*(uint64_t *)value == length); 3154 CU_ASSERT(value_len == 8); 3155 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 3156 3157 /* Resize the blob */ 3158 spdk_blob_resize(blob, 20, blob_op_complete, NULL); 3159 poll_threads(); 3160 CU_ASSERT(g_bserrno == 0); 3161 3162 free_clusters = spdk_bs_free_cluster_count(bs); 3163 3164 spdk_blob_close(blob, blob_op_complete, NULL); 3165 poll_threads(); 3166 CU_ASSERT(g_bserrno == 0); 3167 blob = NULL; 3168 g_blob = NULL; 3169 g_blobid = SPDK_BLOBID_INVALID; 3170 3171 ut_bs_dirty_load(&bs, NULL); 3172 3173 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3174 poll_threads(); 3175 CU_ASSERT(g_bserrno == 0); 3176 CU_ASSERT(g_blob != NULL); 3177 blob = g_blob; 3178 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20); 3179 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3180 3181 spdk_blob_close(blob, blob_op_complete, NULL); 3182 poll_threads(); 3183 CU_ASSERT(g_bserrno == 0); 3184 blob = NULL; 3185 g_blob = NULL; 3186 g_blobid = SPDK_BLOBID_INVALID; 3187 3188 /* Create second blob */ 3189 blob = ut_blob_create_and_open(bs, NULL); 3190 blobid2 = spdk_blob_get_id(blob); 3191 3192 /* Set some xattrs */ 3193 rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1); 3194 CU_ASSERT(rc == 0); 3195 3196 length = 5432; 3197 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3198 CU_ASSERT(rc == 0); 3199 3200 /* Resize the blob */ 3201 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 3202 poll_threads(); 3203 CU_ASSERT(g_bserrno == 0); 3204 3205 free_clusters = spdk_bs_free_cluster_count(bs); 3206 3207 spdk_blob_close(blob, blob_op_complete, NULL); 3208 poll_threads(); 3209 CU_ASSERT(g_bserrno == 0); 3210 blob = NULL; 3211 g_blob = NULL; 3212 g_blobid = SPDK_BLOBID_INVALID; 3213 3214 ut_bs_dirty_load(&bs, NULL); 3215 3216 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3217 poll_threads(); 3218 CU_ASSERT(g_bserrno == 0); 3219 CU_ASSERT(g_blob != NULL); 3220 blob = g_blob; 3221 3222 /* Get the xattrs */ 3223 value = NULL; 3224 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 3225 CU_ASSERT(rc == 0); 3226 SPDK_CU_ASSERT_FATAL(value != NULL); 3227 CU_ASSERT(*(uint64_t *)value == length); 3228 CU_ASSERT(value_len == 8); 3229 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 3230 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3231 3232 ut_blob_close_and_delete(bs, blob); 3233 3234 free_clusters = spdk_bs_free_cluster_count(bs); 3235 3236 ut_bs_dirty_load(&bs, NULL); 3237 3238 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3239 poll_threads(); 3240 CU_ASSERT(g_bserrno != 0); 3241 CU_ASSERT(g_blob == NULL); 3242 3243 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3244 poll_threads(); 3245 CU_ASSERT(g_bserrno == 0); 3246 CU_ASSERT(g_blob != NULL); 3247 blob = g_blob; 3248 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3249 spdk_blob_close(blob, blob_op_complete, NULL); 3250 poll_threads(); 3251 CU_ASSERT(g_bserrno == 0); 3252 3253 ut_bs_reload(&bs, NULL); 3254 3255 /* Create second blob */ 3256 ut_spdk_blob_opts_init(&blob_opts); 3257 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3258 poll_threads(); 3259 CU_ASSERT(g_bserrno == 0); 3260 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3261 blobid2 = g_blobid; 3262 3263 /* Create third blob */ 3264 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3265 poll_threads(); 3266 CU_ASSERT(g_bserrno == 0); 3267 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3268 blobid3 = g_blobid; 3269 3270 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3271 poll_threads(); 3272 CU_ASSERT(g_bserrno == 0); 3273 CU_ASSERT(g_blob != NULL); 3274 blob = g_blob; 3275 3276 /* Set some xattrs for second blob */ 3277 rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1); 3278 CU_ASSERT(rc == 0); 3279 3280 length = 5432; 3281 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3282 CU_ASSERT(rc == 0); 3283 3284 spdk_blob_close(blob, blob_op_complete, NULL); 3285 poll_threads(); 3286 CU_ASSERT(g_bserrno == 0); 3287 blob = NULL; 3288 g_blob = NULL; 3289 g_blobid = SPDK_BLOBID_INVALID; 3290 3291 spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL); 3292 poll_threads(); 3293 CU_ASSERT(g_bserrno == 0); 3294 CU_ASSERT(g_blob != NULL); 3295 blob = g_blob; 3296 3297 /* Set some xattrs for third blob */ 3298 rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1); 3299 CU_ASSERT(rc == 0); 3300 3301 length = 5432; 3302 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3303 CU_ASSERT(rc == 0); 3304 3305 spdk_blob_close(blob, blob_op_complete, NULL); 3306 poll_threads(); 3307 CU_ASSERT(g_bserrno == 0); 3308 blob = NULL; 3309 g_blob = NULL; 3310 g_blobid = SPDK_BLOBID_INVALID; 3311 3312 /* Mark second blob as invalid */ 3313 page_num = bs_blobid_to_page(blobid2); 3314 3315 index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num); 3316 page = (struct spdk_blob_md_page *)&g_dev_buffer[index]; 3317 page->sequence_num = 1; 3318 page->crc = blob_md_page_calc_crc(page); 3319 3320 free_clusters = spdk_bs_free_cluster_count(bs); 3321 3322 ut_bs_dirty_load(&bs, NULL); 3323 3324 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3325 poll_threads(); 3326 CU_ASSERT(g_bserrno != 0); 3327 CU_ASSERT(g_blob == NULL); 3328 3329 spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL); 3330 poll_threads(); 3331 CU_ASSERT(g_bserrno == 0); 3332 CU_ASSERT(g_blob != NULL); 3333 blob = g_blob; 3334 3335 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3336 } 3337 3338 static void 3339 blob_flags(void) 3340 { 3341 struct spdk_blob_store *bs = g_bs; 3342 spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro; 3343 struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro; 3344 struct spdk_blob_opts blob_opts; 3345 int rc; 3346 3347 /* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */ 3348 blob_invalid = ut_blob_create_and_open(bs, NULL); 3349 blobid_invalid = spdk_blob_get_id(blob_invalid); 3350 3351 blob_data_ro = ut_blob_create_and_open(bs, NULL); 3352 blobid_data_ro = spdk_blob_get_id(blob_data_ro); 3353 3354 ut_spdk_blob_opts_init(&blob_opts); 3355 blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES; 3356 blob_md_ro = ut_blob_create_and_open(bs, &blob_opts); 3357 blobid_md_ro = spdk_blob_get_id(blob_md_ro); 3358 CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES); 3359 3360 /* Change the size of blob_data_ro to check if flags are serialized 3361 * when blob has non zero number of extents */ 3362 spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL); 3363 poll_threads(); 3364 CU_ASSERT(g_bserrno == 0); 3365 3366 /* Set the xattr to check if flags are serialized 3367 * when blob has non zero number of xattrs */ 3368 rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1); 3369 CU_ASSERT(rc == 0); 3370 3371 blob_invalid->invalid_flags = (1ULL << 63); 3372 blob_invalid->state = SPDK_BLOB_STATE_DIRTY; 3373 blob_data_ro->data_ro_flags = (1ULL << 62); 3374 blob_data_ro->state = SPDK_BLOB_STATE_DIRTY; 3375 blob_md_ro->md_ro_flags = (1ULL << 61); 3376 blob_md_ro->state = SPDK_BLOB_STATE_DIRTY; 3377 3378 g_bserrno = -1; 3379 spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL); 3380 poll_threads(); 3381 CU_ASSERT(g_bserrno == 0); 3382 g_bserrno = -1; 3383 spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL); 3384 poll_threads(); 3385 CU_ASSERT(g_bserrno == 0); 3386 g_bserrno = -1; 3387 spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL); 3388 poll_threads(); 3389 CU_ASSERT(g_bserrno == 0); 3390 3391 g_bserrno = -1; 3392 spdk_blob_close(blob_invalid, blob_op_complete, NULL); 3393 poll_threads(); 3394 CU_ASSERT(g_bserrno == 0); 3395 blob_invalid = NULL; 3396 g_bserrno = -1; 3397 spdk_blob_close(blob_data_ro, blob_op_complete, NULL); 3398 poll_threads(); 3399 CU_ASSERT(g_bserrno == 0); 3400 blob_data_ro = NULL; 3401 g_bserrno = -1; 3402 spdk_blob_close(blob_md_ro, blob_op_complete, NULL); 3403 poll_threads(); 3404 CU_ASSERT(g_bserrno == 0); 3405 blob_md_ro = NULL; 3406 3407 g_blob = NULL; 3408 g_blobid = SPDK_BLOBID_INVALID; 3409 3410 ut_bs_reload(&bs, NULL); 3411 3412 g_blob = NULL; 3413 g_bserrno = 0; 3414 spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL); 3415 poll_threads(); 3416 CU_ASSERT(g_bserrno != 0); 3417 CU_ASSERT(g_blob == NULL); 3418 3419 g_blob = NULL; 3420 g_bserrno = -1; 3421 spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL); 3422 poll_threads(); 3423 CU_ASSERT(g_bserrno == 0); 3424 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3425 blob_data_ro = g_blob; 3426 /* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */ 3427 CU_ASSERT(blob_data_ro->data_ro == true); 3428 CU_ASSERT(blob_data_ro->md_ro == true); 3429 CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10); 3430 3431 g_blob = NULL; 3432 g_bserrno = -1; 3433 spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL); 3434 poll_threads(); 3435 CU_ASSERT(g_bserrno == 0); 3436 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3437 blob_md_ro = g_blob; 3438 CU_ASSERT(blob_md_ro->data_ro == false); 3439 CU_ASSERT(blob_md_ro->md_ro == true); 3440 3441 g_bserrno = -1; 3442 spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL); 3443 poll_threads(); 3444 CU_ASSERT(g_bserrno == 0); 3445 3446 ut_blob_close_and_delete(bs, blob_data_ro); 3447 ut_blob_close_and_delete(bs, blob_md_ro); 3448 } 3449 3450 static void 3451 bs_version(void) 3452 { 3453 struct spdk_bs_super_block *super; 3454 struct spdk_blob_store *bs = g_bs; 3455 struct spdk_bs_dev *dev; 3456 struct spdk_blob *blob; 3457 struct spdk_blob_opts blob_opts; 3458 spdk_blob_id blobid; 3459 3460 /* Unload the blob store */ 3461 spdk_bs_unload(bs, bs_op_complete, NULL); 3462 poll_threads(); 3463 CU_ASSERT(g_bserrno == 0); 3464 g_bs = NULL; 3465 3466 /* 3467 * Change the bs version on disk. This will allow us to 3468 * test that the version does not get modified automatically 3469 * when loading and unloading the blobstore. 3470 */ 3471 super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 3472 CU_ASSERT(super->version == SPDK_BS_VERSION); 3473 CU_ASSERT(super->clean == 1); 3474 super->version = 2; 3475 /* 3476 * Version 2 metadata does not have a used blobid mask, so clear 3477 * those fields in the super block and zero the corresponding 3478 * region on "disk". We will use this to ensure blob IDs are 3479 * correctly reconstructed. 3480 */ 3481 memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0, 3482 super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE); 3483 super->used_blobid_mask_start = 0; 3484 super->used_blobid_mask_len = 0; 3485 super->crc = blob_md_page_calc_crc(super); 3486 3487 /* Load an existing blob store */ 3488 dev = init_dev(); 3489 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3490 poll_threads(); 3491 CU_ASSERT(g_bserrno == 0); 3492 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3493 CU_ASSERT(super->clean == 1); 3494 bs = g_bs; 3495 3496 /* 3497 * Create a blob - just to make sure that when we unload it 3498 * results in writing the super block (since metadata pages 3499 * were allocated. 3500 */ 3501 ut_spdk_blob_opts_init(&blob_opts); 3502 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3503 poll_threads(); 3504 CU_ASSERT(g_bserrno == 0); 3505 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3506 blobid = g_blobid; 3507 3508 /* Unload the blob store */ 3509 spdk_bs_unload(bs, bs_op_complete, NULL); 3510 poll_threads(); 3511 CU_ASSERT(g_bserrno == 0); 3512 g_bs = NULL; 3513 CU_ASSERT(super->version == 2); 3514 CU_ASSERT(super->used_blobid_mask_start == 0); 3515 CU_ASSERT(super->used_blobid_mask_len == 0); 3516 3517 dev = init_dev(); 3518 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3519 poll_threads(); 3520 CU_ASSERT(g_bserrno == 0); 3521 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3522 bs = g_bs; 3523 3524 g_blob = NULL; 3525 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3526 poll_threads(); 3527 CU_ASSERT(g_bserrno == 0); 3528 CU_ASSERT(g_blob != NULL); 3529 blob = g_blob; 3530 3531 ut_blob_close_and_delete(bs, blob); 3532 3533 CU_ASSERT(super->version == 2); 3534 CU_ASSERT(super->used_blobid_mask_start == 0); 3535 CU_ASSERT(super->used_blobid_mask_len == 0); 3536 } 3537 3538 static void 3539 blob_set_xattrs_test(void) 3540 { 3541 struct spdk_blob_store *bs = g_bs; 3542 struct spdk_blob *blob; 3543 struct spdk_blob_opts opts; 3544 const void *value; 3545 size_t value_len; 3546 char *xattr; 3547 size_t xattr_length; 3548 int rc; 3549 3550 /* Create blob with extra attributes */ 3551 ut_spdk_blob_opts_init(&opts); 3552 3553 opts.xattrs.names = g_xattr_names; 3554 opts.xattrs.get_value = _get_xattr_value; 3555 opts.xattrs.count = 3; 3556 opts.xattrs.ctx = &g_ctx; 3557 3558 blob = ut_blob_create_and_open(bs, &opts); 3559 3560 /* Get the xattrs */ 3561 value = NULL; 3562 3563 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len); 3564 CU_ASSERT(rc == 0); 3565 SPDK_CU_ASSERT_FATAL(value != NULL); 3566 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 3567 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 3568 3569 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len); 3570 CU_ASSERT(rc == 0); 3571 SPDK_CU_ASSERT_FATAL(value != NULL); 3572 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 3573 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 3574 3575 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len); 3576 CU_ASSERT(rc == 0); 3577 SPDK_CU_ASSERT_FATAL(value != NULL); 3578 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 3579 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 3580 3581 /* Try to get non existing attribute */ 3582 3583 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 3584 CU_ASSERT(rc == -ENOENT); 3585 3586 /* Try xattr exceeding maximum length of descriptor in single page */ 3587 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 3588 strlen("large_xattr") + 1; 3589 xattr = calloc(xattr_length, sizeof(char)); 3590 SPDK_CU_ASSERT_FATAL(xattr != NULL); 3591 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 3592 free(xattr); 3593 SPDK_CU_ASSERT_FATAL(rc == -ENOMEM); 3594 3595 spdk_blob_close(blob, blob_op_complete, NULL); 3596 poll_threads(); 3597 CU_ASSERT(g_bserrno == 0); 3598 blob = NULL; 3599 g_blob = NULL; 3600 g_blobid = SPDK_BLOBID_INVALID; 3601 3602 /* NULL callback */ 3603 ut_spdk_blob_opts_init(&opts); 3604 opts.xattrs.names = g_xattr_names; 3605 opts.xattrs.get_value = NULL; 3606 opts.xattrs.count = 1; 3607 opts.xattrs.ctx = &g_ctx; 3608 3609 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 3610 poll_threads(); 3611 CU_ASSERT(g_bserrno == -EINVAL); 3612 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3613 3614 /* NULL values */ 3615 ut_spdk_blob_opts_init(&opts); 3616 opts.xattrs.names = g_xattr_names; 3617 opts.xattrs.get_value = _get_xattr_value_null; 3618 opts.xattrs.count = 1; 3619 opts.xattrs.ctx = NULL; 3620 3621 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 3622 poll_threads(); 3623 CU_ASSERT(g_bserrno == -EINVAL); 3624 } 3625 3626 static void 3627 blob_thin_prov_alloc(void) 3628 { 3629 struct spdk_blob_store *bs = g_bs; 3630 struct spdk_blob *blob; 3631 struct spdk_blob_opts opts; 3632 spdk_blob_id blobid; 3633 uint64_t free_clusters; 3634 3635 free_clusters = spdk_bs_free_cluster_count(bs); 3636 3637 /* Set blob as thin provisioned */ 3638 ut_spdk_blob_opts_init(&opts); 3639 opts.thin_provision = true; 3640 3641 blob = ut_blob_create_and_open(bs, &opts); 3642 blobid = spdk_blob_get_id(blob); 3643 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3644 3645 CU_ASSERT(blob->active.num_clusters == 0); 3646 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 3647 3648 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3649 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3650 poll_threads(); 3651 CU_ASSERT(g_bserrno == 0); 3652 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3653 CU_ASSERT(blob->active.num_clusters == 5); 3654 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 3655 3656 /* Grow it to 1TB - still unallocated */ 3657 spdk_blob_resize(blob, 262144, blob_op_complete, NULL); 3658 poll_threads(); 3659 CU_ASSERT(g_bserrno == 0); 3660 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3661 CU_ASSERT(blob->active.num_clusters == 262144); 3662 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144); 3663 3664 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3665 poll_threads(); 3666 CU_ASSERT(g_bserrno == 0); 3667 /* Sync must not change anything */ 3668 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3669 CU_ASSERT(blob->active.num_clusters == 262144); 3670 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144); 3671 /* Since clusters are not allocated, 3672 * number of metadata pages is expected to be minimal. 3673 */ 3674 CU_ASSERT(blob->active.num_pages == 1); 3675 3676 /* Shrink the blob to 3 clusters - still unallocated */ 3677 spdk_blob_resize(blob, 3, blob_op_complete, NULL); 3678 poll_threads(); 3679 CU_ASSERT(g_bserrno == 0); 3680 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3681 CU_ASSERT(blob->active.num_clusters == 3); 3682 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3); 3683 3684 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3685 poll_threads(); 3686 CU_ASSERT(g_bserrno == 0); 3687 /* Sync must not change anything */ 3688 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3689 CU_ASSERT(blob->active.num_clusters == 3); 3690 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3); 3691 3692 spdk_blob_close(blob, blob_op_complete, NULL); 3693 poll_threads(); 3694 CU_ASSERT(g_bserrno == 0); 3695 3696 ut_bs_reload(&bs, NULL); 3697 3698 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3699 poll_threads(); 3700 CU_ASSERT(g_bserrno == 0); 3701 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3702 blob = g_blob; 3703 3704 /* Check that clusters allocation and size is still the same */ 3705 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3706 CU_ASSERT(blob->active.num_clusters == 3); 3707 3708 ut_blob_close_and_delete(bs, blob); 3709 } 3710 3711 static void 3712 blob_insert_cluster_msg_test(void) 3713 { 3714 struct spdk_blob_store *bs = g_bs; 3715 struct spdk_blob *blob; 3716 struct spdk_blob_opts opts; 3717 spdk_blob_id blobid; 3718 uint64_t free_clusters; 3719 uint64_t new_cluster = 0; 3720 uint32_t cluster_num = 3; 3721 uint32_t extent_page = 0; 3722 3723 free_clusters = spdk_bs_free_cluster_count(bs); 3724 3725 /* Set blob as thin provisioned */ 3726 ut_spdk_blob_opts_init(&opts); 3727 opts.thin_provision = true; 3728 opts.num_clusters = 4; 3729 3730 blob = ut_blob_create_and_open(bs, &opts); 3731 blobid = spdk_blob_get_id(blob); 3732 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3733 3734 CU_ASSERT(blob->active.num_clusters == 4); 3735 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4); 3736 CU_ASSERT(blob->active.clusters[cluster_num] == 0); 3737 3738 /* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread. 3739 * This is to simulate behaviour when cluster is allocated after blob creation. 3740 * Such as _spdk_bs_allocate_and_copy_cluster(). */ 3741 bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false); 3742 CU_ASSERT(blob->active.clusters[cluster_num] == 0); 3743 3744 blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, 3745 blob_op_complete, NULL); 3746 poll_threads(); 3747 3748 CU_ASSERT(blob->active.clusters[cluster_num] != 0); 3749 3750 spdk_blob_close(blob, blob_op_complete, NULL); 3751 poll_threads(); 3752 CU_ASSERT(g_bserrno == 0); 3753 3754 ut_bs_reload(&bs, NULL); 3755 3756 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3757 poll_threads(); 3758 CU_ASSERT(g_bserrno == 0); 3759 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3760 blob = g_blob; 3761 3762 CU_ASSERT(blob->active.clusters[cluster_num] != 0); 3763 3764 ut_blob_close_and_delete(bs, blob); 3765 } 3766 3767 static void 3768 blob_thin_prov_rw(void) 3769 { 3770 static const uint8_t zero[10 * 4096] = { 0 }; 3771 struct spdk_blob_store *bs = g_bs; 3772 struct spdk_blob *blob, *blob_id0; 3773 struct spdk_io_channel *channel, *channel_thread1; 3774 struct spdk_blob_opts opts; 3775 uint64_t free_clusters; 3776 uint64_t page_size; 3777 uint8_t payload_read[10 * 4096]; 3778 uint8_t payload_write[10 * 4096]; 3779 uint64_t write_bytes; 3780 uint64_t read_bytes; 3781 3782 free_clusters = spdk_bs_free_cluster_count(bs); 3783 page_size = spdk_bs_get_page_size(bs); 3784 3785 channel = spdk_bs_alloc_io_channel(bs); 3786 CU_ASSERT(channel != NULL); 3787 3788 ut_spdk_blob_opts_init(&opts); 3789 opts.thin_provision = true; 3790 3791 /* Create and delete blob at md page 0, so that next md page allocation 3792 * for extent will use that. */ 3793 blob_id0 = ut_blob_create_and_open(bs, &opts); 3794 blob = ut_blob_create_and_open(bs, &opts); 3795 ut_blob_close_and_delete(bs, blob_id0); 3796 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3797 3798 CU_ASSERT(blob->active.num_clusters == 0); 3799 3800 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3801 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3802 poll_threads(); 3803 CU_ASSERT(g_bserrno == 0); 3804 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3805 CU_ASSERT(blob->active.num_clusters == 5); 3806 3807 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3808 poll_threads(); 3809 CU_ASSERT(g_bserrno == 0); 3810 /* Sync must not change anything */ 3811 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3812 CU_ASSERT(blob->active.num_clusters == 5); 3813 3814 /* Payload should be all zeros from unallocated clusters */ 3815 memset(payload_read, 0xFF, sizeof(payload_read)); 3816 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 3817 poll_threads(); 3818 CU_ASSERT(g_bserrno == 0); 3819 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 3820 3821 write_bytes = g_dev_write_bytes; 3822 read_bytes = g_dev_read_bytes; 3823 3824 /* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */ 3825 set_thread(1); 3826 channel_thread1 = spdk_bs_alloc_io_channel(bs); 3827 CU_ASSERT(channel_thread1 != NULL); 3828 memset(payload_write, 0xE5, sizeof(payload_write)); 3829 spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL); 3830 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3831 /* Perform write on thread 0. That will try to allocate cluster, 3832 * but fail due to another thread issuing the cluster allocation first. */ 3833 set_thread(0); 3834 memset(payload_write, 0xE5, sizeof(payload_write)); 3835 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 3836 CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs)); 3837 poll_threads(); 3838 CU_ASSERT(g_bserrno == 0); 3839 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3840 /* For thin-provisioned blob we need to write 20 pages plus one page metadata and 3841 * read 0 bytes */ 3842 if (g_use_extent_table) { 3843 /* Add one more page for EXTENT_PAGE write */ 3844 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22); 3845 } else { 3846 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21); 3847 } 3848 CU_ASSERT(g_dev_read_bytes - read_bytes == 0); 3849 3850 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 3851 poll_threads(); 3852 CU_ASSERT(g_bserrno == 0); 3853 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3854 3855 ut_blob_close_and_delete(bs, blob); 3856 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3857 3858 set_thread(1); 3859 spdk_bs_free_io_channel(channel_thread1); 3860 set_thread(0); 3861 spdk_bs_free_io_channel(channel); 3862 poll_threads(); 3863 g_blob = NULL; 3864 g_blobid = 0; 3865 } 3866 3867 static void 3868 blob_thin_prov_rle(void) 3869 { 3870 static const uint8_t zero[10 * 4096] = { 0 }; 3871 struct spdk_blob_store *bs = g_bs; 3872 struct spdk_blob *blob; 3873 struct spdk_io_channel *channel; 3874 struct spdk_blob_opts opts; 3875 spdk_blob_id blobid; 3876 uint64_t free_clusters; 3877 uint64_t page_size; 3878 uint8_t payload_read[10 * 4096]; 3879 uint8_t payload_write[10 * 4096]; 3880 uint64_t write_bytes; 3881 uint64_t read_bytes; 3882 uint64_t io_unit; 3883 3884 free_clusters = spdk_bs_free_cluster_count(bs); 3885 page_size = spdk_bs_get_page_size(bs); 3886 3887 ut_spdk_blob_opts_init(&opts); 3888 opts.thin_provision = true; 3889 opts.num_clusters = 5; 3890 3891 blob = ut_blob_create_and_open(bs, &opts); 3892 blobid = spdk_blob_get_id(blob); 3893 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3894 3895 channel = spdk_bs_alloc_io_channel(bs); 3896 CU_ASSERT(channel != NULL); 3897 3898 /* Target specifically second cluster in a blob as first allocation */ 3899 io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs); 3900 3901 /* Payload should be all zeros from unallocated clusters */ 3902 memset(payload_read, 0xFF, sizeof(payload_read)); 3903 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3904 poll_threads(); 3905 CU_ASSERT(g_bserrno == 0); 3906 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 3907 3908 write_bytes = g_dev_write_bytes; 3909 read_bytes = g_dev_read_bytes; 3910 3911 /* Issue write to second cluster in a blob */ 3912 memset(payload_write, 0xE5, sizeof(payload_write)); 3913 spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL); 3914 poll_threads(); 3915 CU_ASSERT(g_bserrno == 0); 3916 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3917 /* For thin-provisioned blob we need to write 10 pages plus one page metadata and 3918 * read 0 bytes */ 3919 if (g_use_extent_table) { 3920 /* Add one more page for EXTENT_PAGE write */ 3921 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12); 3922 } else { 3923 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11); 3924 } 3925 CU_ASSERT(g_dev_read_bytes - read_bytes == 0); 3926 3927 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3928 poll_threads(); 3929 CU_ASSERT(g_bserrno == 0); 3930 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3931 3932 spdk_bs_free_io_channel(channel); 3933 poll_threads(); 3934 3935 spdk_blob_close(blob, blob_op_complete, NULL); 3936 poll_threads(); 3937 CU_ASSERT(g_bserrno == 0); 3938 3939 ut_bs_reload(&bs, NULL); 3940 3941 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3942 poll_threads(); 3943 CU_ASSERT(g_bserrno == 0); 3944 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3945 blob = g_blob; 3946 3947 channel = spdk_bs_alloc_io_channel(bs); 3948 CU_ASSERT(channel != NULL); 3949 3950 /* Read second cluster after blob reload to confirm data written */ 3951 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3952 poll_threads(); 3953 CU_ASSERT(g_bserrno == 0); 3954 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3955 3956 spdk_bs_free_io_channel(channel); 3957 poll_threads(); 3958 3959 ut_blob_close_and_delete(bs, blob); 3960 } 3961 3962 static void 3963 blob_thin_prov_rw_iov(void) 3964 { 3965 static const uint8_t zero[10 * 4096] = { 0 }; 3966 struct spdk_blob_store *bs = g_bs; 3967 struct spdk_blob *blob; 3968 struct spdk_io_channel *channel; 3969 struct spdk_blob_opts opts; 3970 uint64_t free_clusters; 3971 uint8_t payload_read[10 * 4096]; 3972 uint8_t payload_write[10 * 4096]; 3973 struct iovec iov_read[3]; 3974 struct iovec iov_write[3]; 3975 3976 free_clusters = spdk_bs_free_cluster_count(bs); 3977 3978 channel = spdk_bs_alloc_io_channel(bs); 3979 CU_ASSERT(channel != NULL); 3980 3981 ut_spdk_blob_opts_init(&opts); 3982 opts.thin_provision = true; 3983 3984 blob = ut_blob_create_and_open(bs, &opts); 3985 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3986 3987 CU_ASSERT(blob->active.num_clusters == 0); 3988 3989 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3990 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3991 poll_threads(); 3992 CU_ASSERT(g_bserrno == 0); 3993 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3994 CU_ASSERT(blob->active.num_clusters == 5); 3995 3996 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3997 poll_threads(); 3998 CU_ASSERT(g_bserrno == 0); 3999 /* Sync must not change anything */ 4000 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4001 CU_ASSERT(blob->active.num_clusters == 5); 4002 4003 /* Payload should be all zeros from unallocated clusters */ 4004 memset(payload_read, 0xAA, sizeof(payload_read)); 4005 iov_read[0].iov_base = payload_read; 4006 iov_read[0].iov_len = 3 * 4096; 4007 iov_read[1].iov_base = payload_read + 3 * 4096; 4008 iov_read[1].iov_len = 4 * 4096; 4009 iov_read[2].iov_base = payload_read + 7 * 4096; 4010 iov_read[2].iov_len = 3 * 4096; 4011 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4012 poll_threads(); 4013 CU_ASSERT(g_bserrno == 0); 4014 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4015 4016 memset(payload_write, 0xE5, sizeof(payload_write)); 4017 iov_write[0].iov_base = payload_write; 4018 iov_write[0].iov_len = 1 * 4096; 4019 iov_write[1].iov_base = payload_write + 1 * 4096; 4020 iov_write[1].iov_len = 5 * 4096; 4021 iov_write[2].iov_base = payload_write + 6 * 4096; 4022 iov_write[2].iov_len = 4 * 4096; 4023 4024 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 4025 poll_threads(); 4026 CU_ASSERT(g_bserrno == 0); 4027 4028 memset(payload_read, 0xAA, sizeof(payload_read)); 4029 iov_read[0].iov_base = payload_read; 4030 iov_read[0].iov_len = 3 * 4096; 4031 iov_read[1].iov_base = payload_read + 3 * 4096; 4032 iov_read[1].iov_len = 4 * 4096; 4033 iov_read[2].iov_base = payload_read + 7 * 4096; 4034 iov_read[2].iov_len = 3 * 4096; 4035 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4036 poll_threads(); 4037 CU_ASSERT(g_bserrno == 0); 4038 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4039 4040 spdk_bs_free_io_channel(channel); 4041 poll_threads(); 4042 4043 ut_blob_close_and_delete(bs, blob); 4044 } 4045 4046 struct iter_ctx { 4047 int current_iter; 4048 spdk_blob_id blobid[4]; 4049 }; 4050 4051 static void 4052 test_iter(void *arg, struct spdk_blob *blob, int bserrno) 4053 { 4054 struct iter_ctx *iter_ctx = arg; 4055 spdk_blob_id blobid; 4056 4057 CU_ASSERT(bserrno == 0); 4058 blobid = spdk_blob_get_id(blob); 4059 CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]); 4060 } 4061 4062 static void 4063 bs_load_iter_test(void) 4064 { 4065 struct spdk_blob_store *bs; 4066 struct spdk_bs_dev *dev; 4067 struct iter_ctx iter_ctx = { 0 }; 4068 struct spdk_blob *blob; 4069 int i, rc; 4070 struct spdk_bs_opts opts; 4071 4072 dev = init_dev(); 4073 spdk_bs_opts_init(&opts); 4074 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4075 4076 /* Initialize a new blob store */ 4077 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 4078 poll_threads(); 4079 CU_ASSERT(g_bserrno == 0); 4080 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4081 bs = g_bs; 4082 4083 for (i = 0; i < 4; i++) { 4084 blob = ut_blob_create_and_open(bs, NULL); 4085 iter_ctx.blobid[i] = spdk_blob_get_id(blob); 4086 4087 /* Just save the blobid as an xattr for testing purposes. */ 4088 rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id)); 4089 CU_ASSERT(rc == 0); 4090 4091 /* Resize the blob */ 4092 spdk_blob_resize(blob, i, blob_op_complete, NULL); 4093 poll_threads(); 4094 CU_ASSERT(g_bserrno == 0); 4095 4096 spdk_blob_close(blob, blob_op_complete, NULL); 4097 poll_threads(); 4098 CU_ASSERT(g_bserrno == 0); 4099 } 4100 4101 g_bserrno = -1; 4102 spdk_bs_unload(bs, bs_op_complete, NULL); 4103 poll_threads(); 4104 CU_ASSERT(g_bserrno == 0); 4105 4106 dev = init_dev(); 4107 spdk_bs_opts_init(&opts); 4108 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4109 opts.iter_cb_fn = test_iter; 4110 opts.iter_cb_arg = &iter_ctx; 4111 4112 /* Test blob iteration during load after a clean shutdown. */ 4113 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 4114 poll_threads(); 4115 CU_ASSERT(g_bserrno == 0); 4116 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4117 bs = g_bs; 4118 4119 /* Dirty shutdown */ 4120 bs_free(bs); 4121 4122 dev = init_dev(); 4123 spdk_bs_opts_init(&opts); 4124 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4125 opts.iter_cb_fn = test_iter; 4126 iter_ctx.current_iter = 0; 4127 opts.iter_cb_arg = &iter_ctx; 4128 4129 /* Test blob iteration during load after a dirty shutdown. */ 4130 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 4131 poll_threads(); 4132 CU_ASSERT(g_bserrno == 0); 4133 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4134 bs = g_bs; 4135 4136 spdk_bs_unload(bs, bs_op_complete, NULL); 4137 poll_threads(); 4138 CU_ASSERT(g_bserrno == 0); 4139 g_bs = NULL; 4140 } 4141 4142 static void 4143 blob_snapshot_rw(void) 4144 { 4145 static const uint8_t zero[10 * 4096] = { 0 }; 4146 struct spdk_blob_store *bs = g_bs; 4147 struct spdk_blob *blob, *snapshot; 4148 struct spdk_io_channel *channel; 4149 struct spdk_blob_opts opts; 4150 spdk_blob_id blobid, snapshotid; 4151 uint64_t free_clusters; 4152 uint64_t cluster_size; 4153 uint64_t page_size; 4154 uint8_t payload_read[10 * 4096]; 4155 uint8_t payload_write[10 * 4096]; 4156 uint64_t write_bytes; 4157 uint64_t read_bytes; 4158 4159 free_clusters = spdk_bs_free_cluster_count(bs); 4160 cluster_size = spdk_bs_get_cluster_size(bs); 4161 page_size = spdk_bs_get_page_size(bs); 4162 4163 channel = spdk_bs_alloc_io_channel(bs); 4164 CU_ASSERT(channel != NULL); 4165 4166 ut_spdk_blob_opts_init(&opts); 4167 opts.thin_provision = true; 4168 opts.num_clusters = 5; 4169 4170 blob = ut_blob_create_and_open(bs, &opts); 4171 blobid = spdk_blob_get_id(blob); 4172 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4173 4174 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4175 4176 memset(payload_read, 0xFF, sizeof(payload_read)); 4177 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 4178 poll_threads(); 4179 CU_ASSERT(g_bserrno == 0); 4180 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4181 4182 memset(payload_write, 0xE5, sizeof(payload_write)); 4183 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 4184 poll_threads(); 4185 CU_ASSERT(g_bserrno == 0); 4186 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4187 4188 /* Create snapshot from blob */ 4189 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4190 poll_threads(); 4191 CU_ASSERT(g_bserrno == 0); 4192 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4193 snapshotid = g_blobid; 4194 4195 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4196 poll_threads(); 4197 CU_ASSERT(g_bserrno == 0); 4198 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4199 snapshot = g_blob; 4200 CU_ASSERT(snapshot->data_ro == true); 4201 CU_ASSERT(snapshot->md_ro == true); 4202 4203 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4204 4205 write_bytes = g_dev_write_bytes; 4206 read_bytes = g_dev_read_bytes; 4207 4208 memset(payload_write, 0xAA, sizeof(payload_write)); 4209 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 4210 poll_threads(); 4211 CU_ASSERT(g_bserrno == 0); 4212 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4213 4214 /* For a clone we need to allocate and copy one cluster, update one page of metadata 4215 * and then write 10 pages of payload. 4216 */ 4217 if (g_use_extent_table) { 4218 /* Add one more page for EXTENT_PAGE write */ 4219 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12 + cluster_size); 4220 } else { 4221 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11 + cluster_size); 4222 } 4223 CU_ASSERT(g_dev_read_bytes - read_bytes == cluster_size); 4224 4225 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 4226 poll_threads(); 4227 CU_ASSERT(g_bserrno == 0); 4228 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4229 4230 /* Data on snapshot should not change after write to clone */ 4231 memset(payload_write, 0xE5, sizeof(payload_write)); 4232 spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL); 4233 poll_threads(); 4234 CU_ASSERT(g_bserrno == 0); 4235 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4236 4237 ut_blob_close_and_delete(bs, blob); 4238 ut_blob_close_and_delete(bs, snapshot); 4239 4240 spdk_bs_free_io_channel(channel); 4241 poll_threads(); 4242 g_blob = NULL; 4243 g_blobid = 0; 4244 } 4245 4246 static void 4247 blob_snapshot_rw_iov(void) 4248 { 4249 static const uint8_t zero[10 * 4096] = { 0 }; 4250 struct spdk_blob_store *bs = g_bs; 4251 struct spdk_blob *blob, *snapshot; 4252 struct spdk_io_channel *channel; 4253 struct spdk_blob_opts opts; 4254 spdk_blob_id blobid, snapshotid; 4255 uint64_t free_clusters; 4256 uint8_t payload_read[10 * 4096]; 4257 uint8_t payload_write[10 * 4096]; 4258 struct iovec iov_read[3]; 4259 struct iovec iov_write[3]; 4260 4261 free_clusters = spdk_bs_free_cluster_count(bs); 4262 4263 channel = spdk_bs_alloc_io_channel(bs); 4264 CU_ASSERT(channel != NULL); 4265 4266 ut_spdk_blob_opts_init(&opts); 4267 opts.thin_provision = true; 4268 opts.num_clusters = 5; 4269 4270 blob = ut_blob_create_and_open(bs, &opts); 4271 blobid = spdk_blob_get_id(blob); 4272 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4273 4274 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4275 4276 /* Create snapshot from blob */ 4277 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4278 poll_threads(); 4279 CU_ASSERT(g_bserrno == 0); 4280 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4281 snapshotid = g_blobid; 4282 4283 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4284 poll_threads(); 4285 CU_ASSERT(g_bserrno == 0); 4286 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4287 snapshot = g_blob; 4288 CU_ASSERT(snapshot->data_ro == true); 4289 CU_ASSERT(snapshot->md_ro == true); 4290 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4291 4292 /* Payload should be all zeros from unallocated clusters */ 4293 memset(payload_read, 0xAA, sizeof(payload_read)); 4294 iov_read[0].iov_base = payload_read; 4295 iov_read[0].iov_len = 3 * 4096; 4296 iov_read[1].iov_base = payload_read + 3 * 4096; 4297 iov_read[1].iov_len = 4 * 4096; 4298 iov_read[2].iov_base = payload_read + 7 * 4096; 4299 iov_read[2].iov_len = 3 * 4096; 4300 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4301 poll_threads(); 4302 CU_ASSERT(g_bserrno == 0); 4303 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4304 4305 memset(payload_write, 0xE5, sizeof(payload_write)); 4306 iov_write[0].iov_base = payload_write; 4307 iov_write[0].iov_len = 1 * 4096; 4308 iov_write[1].iov_base = payload_write + 1 * 4096; 4309 iov_write[1].iov_len = 5 * 4096; 4310 iov_write[2].iov_base = payload_write + 6 * 4096; 4311 iov_write[2].iov_len = 4 * 4096; 4312 4313 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 4314 poll_threads(); 4315 CU_ASSERT(g_bserrno == 0); 4316 4317 memset(payload_read, 0xAA, sizeof(payload_read)); 4318 iov_read[0].iov_base = payload_read; 4319 iov_read[0].iov_len = 3 * 4096; 4320 iov_read[1].iov_base = payload_read + 3 * 4096; 4321 iov_read[1].iov_len = 4 * 4096; 4322 iov_read[2].iov_base = payload_read + 7 * 4096; 4323 iov_read[2].iov_len = 3 * 4096; 4324 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4325 poll_threads(); 4326 CU_ASSERT(g_bserrno == 0); 4327 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4328 4329 spdk_bs_free_io_channel(channel); 4330 poll_threads(); 4331 4332 ut_blob_close_and_delete(bs, blob); 4333 ut_blob_close_and_delete(bs, snapshot); 4334 } 4335 4336 /** 4337 * Inflate / decouple parent rw unit tests. 4338 * 4339 * -------------- 4340 * original blob: 0 1 2 3 4 4341 * ,---------+---------+---------+---------+---------. 4342 * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - | 4343 * +---------+---------+---------+---------+---------+ 4344 * snapshot2 | - |yyyyyyyyy| - |yyyyyyyyy| - | 4345 * +---------+---------+---------+---------+---------+ 4346 * blob | - |zzzzzzzzz| - | - | - | 4347 * '---------+---------+---------+---------+---------' 4348 * . . . . . . 4349 * -------- . . . . . . 4350 * inflate: . . . . . . 4351 * ,---------+---------+---------+---------+---------. 4352 * blob |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000| 4353 * '---------+---------+---------+---------+---------' 4354 * 4355 * NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency 4356 * on snapshot2 and snapshot removed . . . 4357 * . . . . . . 4358 * ---------------- . . . . . . 4359 * decouple parent: . . . . . . 4360 * ,---------+---------+---------+---------+---------. 4361 * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - | 4362 * +---------+---------+---------+---------+---------+ 4363 * blob | - |zzzzzzzzz| - |yyyyyyyyy| - | 4364 * '---------+---------+---------+---------+---------' 4365 * 4366 * NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency 4367 * on snapshot2 removed and on snapshot still exists. Snapshot2 4368 * should remain a clone of snapshot. 4369 */ 4370 static void 4371 _blob_inflate_rw(bool decouple_parent) 4372 { 4373 struct spdk_blob_store *bs = g_bs; 4374 struct spdk_blob *blob, *snapshot, *snapshot2; 4375 struct spdk_io_channel *channel; 4376 struct spdk_blob_opts opts; 4377 spdk_blob_id blobid, snapshotid, snapshot2id; 4378 uint64_t free_clusters; 4379 uint64_t cluster_size; 4380 4381 uint64_t payload_size; 4382 uint8_t *payload_read; 4383 uint8_t *payload_write; 4384 uint8_t *payload_clone; 4385 4386 uint64_t pages_per_cluster; 4387 uint64_t pages_per_payload; 4388 4389 int i; 4390 spdk_blob_id ids[2]; 4391 size_t count; 4392 4393 free_clusters = spdk_bs_free_cluster_count(bs); 4394 cluster_size = spdk_bs_get_cluster_size(bs); 4395 pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs); 4396 pages_per_payload = pages_per_cluster * 5; 4397 4398 payload_size = cluster_size * 5; 4399 4400 payload_read = malloc(payload_size); 4401 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 4402 4403 payload_write = malloc(payload_size); 4404 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 4405 4406 payload_clone = malloc(payload_size); 4407 SPDK_CU_ASSERT_FATAL(payload_clone != NULL); 4408 4409 channel = spdk_bs_alloc_io_channel(bs); 4410 SPDK_CU_ASSERT_FATAL(channel != NULL); 4411 4412 /* Create blob */ 4413 ut_spdk_blob_opts_init(&opts); 4414 opts.thin_provision = true; 4415 opts.num_clusters = 5; 4416 4417 blob = ut_blob_create_and_open(bs, &opts); 4418 blobid = spdk_blob_get_id(blob); 4419 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4420 4421 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4422 4423 /* 1) Initial read should return zeroed payload */ 4424 memset(payload_read, 0xFF, payload_size); 4425 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4426 blob_op_complete, NULL); 4427 poll_threads(); 4428 CU_ASSERT(g_bserrno == 0); 4429 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 4430 4431 /* Fill whole blob with a pattern, except last cluster (to be sure it 4432 * isn't allocated) */ 4433 memset(payload_write, 0xE5, payload_size - cluster_size); 4434 spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload - 4435 pages_per_cluster, blob_op_complete, NULL); 4436 poll_threads(); 4437 CU_ASSERT(g_bserrno == 0); 4438 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4439 4440 /* 2) Create snapshot from blob (first level) */ 4441 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4442 poll_threads(); 4443 CU_ASSERT(g_bserrno == 0); 4444 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4445 snapshotid = g_blobid; 4446 4447 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4448 poll_threads(); 4449 CU_ASSERT(g_bserrno == 0); 4450 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4451 snapshot = g_blob; 4452 CU_ASSERT(snapshot->data_ro == true); 4453 CU_ASSERT(snapshot->md_ro == true); 4454 4455 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4456 4457 /* Write every second cluster with a pattern. 4458 * 4459 * Last cluster shouldn't be written, to be sure that snapshot nor clone 4460 * doesn't allocate it. 4461 * 4462 * payload_clone stores expected result on "blob" read at the time and 4463 * is used only to check data consistency on clone before and after 4464 * inflation. Initially we fill it with a backing snapshots pattern 4465 * used before. 4466 */ 4467 memset(payload_clone, 0xE5, payload_size - cluster_size); 4468 memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size); 4469 memset(payload_write, 0xAA, payload_size); 4470 for (i = 1; i < 5; i += 2) { 4471 spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster, 4472 pages_per_cluster, blob_op_complete, NULL); 4473 poll_threads(); 4474 CU_ASSERT(g_bserrno == 0); 4475 4476 /* Update expected result */ 4477 memcpy(payload_clone + (cluster_size * i), payload_write, 4478 cluster_size); 4479 } 4480 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4481 4482 /* Check data consistency on clone */ 4483 memset(payload_read, 0xFF, payload_size); 4484 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4485 blob_op_complete, NULL); 4486 poll_threads(); 4487 CU_ASSERT(g_bserrno == 0); 4488 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4489 4490 /* 3) Create second levels snapshot from blob */ 4491 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4492 poll_threads(); 4493 CU_ASSERT(g_bserrno == 0); 4494 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4495 snapshot2id = g_blobid; 4496 4497 spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL); 4498 poll_threads(); 4499 CU_ASSERT(g_bserrno == 0); 4500 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4501 snapshot2 = g_blob; 4502 CU_ASSERT(snapshot2->data_ro == true); 4503 CU_ASSERT(snapshot2->md_ro == true); 4504 4505 CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5); 4506 4507 CU_ASSERT(snapshot2->parent_id == snapshotid); 4508 4509 /* Write one cluster on the top level blob. This cluster (1) covers 4510 * already allocated cluster in the snapshot2, so shouldn't be inflated 4511 * at all */ 4512 spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster, 4513 pages_per_cluster, blob_op_complete, NULL); 4514 poll_threads(); 4515 CU_ASSERT(g_bserrno == 0); 4516 4517 /* Update expected result */ 4518 memcpy(payload_clone + cluster_size, payload_write, cluster_size); 4519 4520 /* Check data consistency on clone */ 4521 memset(payload_read, 0xFF, payload_size); 4522 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4523 blob_op_complete, NULL); 4524 poll_threads(); 4525 CU_ASSERT(g_bserrno == 0); 4526 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4527 4528 4529 /* Close all blobs */ 4530 spdk_blob_close(blob, blob_op_complete, NULL); 4531 poll_threads(); 4532 CU_ASSERT(g_bserrno == 0); 4533 4534 spdk_blob_close(snapshot2, blob_op_complete, NULL); 4535 poll_threads(); 4536 CU_ASSERT(g_bserrno == 0); 4537 4538 spdk_blob_close(snapshot, blob_op_complete, NULL); 4539 poll_threads(); 4540 CU_ASSERT(g_bserrno == 0); 4541 4542 /* Check snapshot-clone relations */ 4543 count = 2; 4544 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4545 CU_ASSERT(count == 1); 4546 CU_ASSERT(ids[0] == snapshot2id); 4547 4548 count = 2; 4549 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4550 CU_ASSERT(count == 1); 4551 CU_ASSERT(ids[0] == blobid); 4552 4553 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id); 4554 4555 free_clusters = spdk_bs_free_cluster_count(bs); 4556 if (!decouple_parent) { 4557 /* Do full blob inflation */ 4558 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 4559 poll_threads(); 4560 CU_ASSERT(g_bserrno == 0); 4561 4562 /* All clusters should be inflated (except one already allocated 4563 * in a top level blob) */ 4564 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4); 4565 4566 /* Check if relation tree updated correctly */ 4567 count = 2; 4568 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4569 4570 /* snapshotid have one clone */ 4571 CU_ASSERT(count == 1); 4572 CU_ASSERT(ids[0] == snapshot2id); 4573 4574 /* snapshot2id have no clones */ 4575 count = 2; 4576 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4577 CU_ASSERT(count == 0); 4578 4579 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 4580 } else { 4581 /* Decouple parent of blob */ 4582 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 4583 poll_threads(); 4584 CU_ASSERT(g_bserrno == 0); 4585 4586 /* Only one cluster from a parent should be inflated (second one 4587 * is covered by a cluster written on a top level blob, and 4588 * already allocated) */ 4589 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1); 4590 4591 /* Check if relation tree updated correctly */ 4592 count = 2; 4593 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4594 4595 /* snapshotid have two clones now */ 4596 CU_ASSERT(count == 2); 4597 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4598 CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id); 4599 4600 /* snapshot2id have no clones */ 4601 count = 2; 4602 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4603 CU_ASSERT(count == 0); 4604 4605 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4606 } 4607 4608 /* Try to delete snapshot2 (should pass) */ 4609 spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL); 4610 poll_threads(); 4611 CU_ASSERT(g_bserrno == 0); 4612 4613 /* Try to delete base snapshot */ 4614 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4615 poll_threads(); 4616 CU_ASSERT(g_bserrno == 0); 4617 4618 /* Reopen blob after snapshot deletion */ 4619 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 4620 poll_threads(); 4621 CU_ASSERT(g_bserrno == 0); 4622 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4623 blob = g_blob; 4624 4625 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4626 4627 /* Check data consistency on inflated blob */ 4628 memset(payload_read, 0xFF, payload_size); 4629 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4630 blob_op_complete, NULL); 4631 poll_threads(); 4632 CU_ASSERT(g_bserrno == 0); 4633 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4634 4635 spdk_bs_free_io_channel(channel); 4636 poll_threads(); 4637 4638 free(payload_read); 4639 free(payload_write); 4640 free(payload_clone); 4641 4642 ut_blob_close_and_delete(bs, blob); 4643 } 4644 4645 static void 4646 blob_inflate_rw(void) 4647 { 4648 _blob_inflate_rw(false); 4649 _blob_inflate_rw(true); 4650 } 4651 4652 /** 4653 * Snapshot-clones relation test 4654 * 4655 * snapshot 4656 * | 4657 * +-----+-----+ 4658 * | | 4659 * blob(ro) snapshot2 4660 * | | 4661 * clone2 clone 4662 */ 4663 static void 4664 blob_relations(void) 4665 { 4666 struct spdk_blob_store *bs; 4667 struct spdk_bs_dev *dev; 4668 struct spdk_bs_opts bs_opts; 4669 struct spdk_blob_opts opts; 4670 struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2; 4671 spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2; 4672 int rc; 4673 size_t count; 4674 spdk_blob_id ids[10] = {}; 4675 4676 dev = init_dev(); 4677 spdk_bs_opts_init(&bs_opts); 4678 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 4679 4680 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 4681 poll_threads(); 4682 CU_ASSERT(g_bserrno == 0); 4683 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4684 bs = g_bs; 4685 4686 /* 1. Create blob with 10 clusters */ 4687 4688 ut_spdk_blob_opts_init(&opts); 4689 opts.num_clusters = 10; 4690 4691 blob = ut_blob_create_and_open(bs, &opts); 4692 blobid = spdk_blob_get_id(blob); 4693 4694 CU_ASSERT(!spdk_blob_is_read_only(blob)); 4695 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4696 CU_ASSERT(!spdk_blob_is_clone(blob)); 4697 CU_ASSERT(!spdk_blob_is_thin_provisioned(blob)); 4698 4699 /* blob should not have underlying snapshot nor clones */ 4700 CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID); 4701 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 4702 count = SPDK_COUNTOF(ids); 4703 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4704 CU_ASSERT(rc == 0); 4705 CU_ASSERT(count == 0); 4706 4707 4708 /* 2. Create snapshot */ 4709 4710 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4711 poll_threads(); 4712 CU_ASSERT(g_bserrno == 0); 4713 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4714 snapshotid = g_blobid; 4715 4716 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4717 poll_threads(); 4718 CU_ASSERT(g_bserrno == 0); 4719 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4720 snapshot = g_blob; 4721 4722 CU_ASSERT(spdk_blob_is_read_only(snapshot)); 4723 CU_ASSERT(spdk_blob_is_snapshot(snapshot)); 4724 CU_ASSERT(!spdk_blob_is_clone(snapshot)); 4725 CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID); 4726 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID); 4727 4728 /* Check if original blob is converted to the clone of snapshot */ 4729 CU_ASSERT(!spdk_blob_is_read_only(blob)); 4730 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4731 CU_ASSERT(spdk_blob_is_clone(blob)); 4732 CU_ASSERT(spdk_blob_is_thin_provisioned(blob)); 4733 CU_ASSERT(blob->parent_id == snapshotid); 4734 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4735 4736 count = SPDK_COUNTOF(ids); 4737 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4738 CU_ASSERT(rc == 0); 4739 CU_ASSERT(count == 1); 4740 CU_ASSERT(ids[0] == blobid); 4741 4742 4743 /* 3. Create clone from snapshot */ 4744 4745 spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 4746 poll_threads(); 4747 CU_ASSERT(g_bserrno == 0); 4748 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4749 cloneid = g_blobid; 4750 4751 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 4752 poll_threads(); 4753 CU_ASSERT(g_bserrno == 0); 4754 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4755 clone = g_blob; 4756 4757 CU_ASSERT(!spdk_blob_is_read_only(clone)); 4758 CU_ASSERT(!spdk_blob_is_snapshot(clone)); 4759 CU_ASSERT(spdk_blob_is_clone(clone)); 4760 CU_ASSERT(spdk_blob_is_thin_provisioned(clone)); 4761 CU_ASSERT(clone->parent_id == snapshotid); 4762 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid); 4763 4764 count = SPDK_COUNTOF(ids); 4765 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 4766 CU_ASSERT(rc == 0); 4767 CU_ASSERT(count == 0); 4768 4769 /* Check if clone is on the snapshot's list */ 4770 count = SPDK_COUNTOF(ids); 4771 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4772 CU_ASSERT(rc == 0); 4773 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4774 CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid); 4775 4776 4777 /* 4. Create snapshot of the clone */ 4778 4779 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 4780 poll_threads(); 4781 CU_ASSERT(g_bserrno == 0); 4782 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4783 snapshotid2 = g_blobid; 4784 4785 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 4786 poll_threads(); 4787 CU_ASSERT(g_bserrno == 0); 4788 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4789 snapshot2 = g_blob; 4790 4791 CU_ASSERT(spdk_blob_is_read_only(snapshot2)); 4792 CU_ASSERT(spdk_blob_is_snapshot(snapshot2)); 4793 CU_ASSERT(spdk_blob_is_clone(snapshot2)); 4794 CU_ASSERT(snapshot2->parent_id == snapshotid); 4795 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid); 4796 4797 /* Check if clone is converted to the clone of snapshot2 and snapshot2 4798 * is a child of snapshot */ 4799 CU_ASSERT(!spdk_blob_is_read_only(clone)); 4800 CU_ASSERT(!spdk_blob_is_snapshot(clone)); 4801 CU_ASSERT(spdk_blob_is_clone(clone)); 4802 CU_ASSERT(spdk_blob_is_thin_provisioned(clone)); 4803 CU_ASSERT(clone->parent_id == snapshotid2); 4804 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 4805 4806 count = SPDK_COUNTOF(ids); 4807 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 4808 CU_ASSERT(rc == 0); 4809 CU_ASSERT(count == 1); 4810 CU_ASSERT(ids[0] == cloneid); 4811 4812 4813 /* 5. Try to create clone from read only blob */ 4814 4815 /* Mark blob as read only */ 4816 spdk_blob_set_read_only(blob); 4817 spdk_blob_sync_md(blob, blob_op_complete, NULL); 4818 poll_threads(); 4819 CU_ASSERT(g_bserrno == 0); 4820 4821 /* Check if previously created blob is read only clone */ 4822 CU_ASSERT(spdk_blob_is_read_only(blob)); 4823 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4824 CU_ASSERT(spdk_blob_is_clone(blob)); 4825 CU_ASSERT(spdk_blob_is_thin_provisioned(blob)); 4826 4827 /* Create clone from read only blob */ 4828 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4829 poll_threads(); 4830 CU_ASSERT(g_bserrno == 0); 4831 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4832 cloneid2 = g_blobid; 4833 4834 spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL); 4835 poll_threads(); 4836 CU_ASSERT(g_bserrno == 0); 4837 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4838 clone2 = g_blob; 4839 4840 CU_ASSERT(!spdk_blob_is_read_only(clone2)); 4841 CU_ASSERT(!spdk_blob_is_snapshot(clone2)); 4842 CU_ASSERT(spdk_blob_is_clone(clone2)); 4843 CU_ASSERT(spdk_blob_is_thin_provisioned(clone2)); 4844 4845 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 4846 4847 count = SPDK_COUNTOF(ids); 4848 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4849 CU_ASSERT(rc == 0); 4850 4851 CU_ASSERT(count == 1); 4852 CU_ASSERT(ids[0] == cloneid2); 4853 4854 /* Close blobs */ 4855 4856 spdk_blob_close(clone2, blob_op_complete, NULL); 4857 poll_threads(); 4858 CU_ASSERT(g_bserrno == 0); 4859 4860 spdk_blob_close(blob, blob_op_complete, NULL); 4861 poll_threads(); 4862 CU_ASSERT(g_bserrno == 0); 4863 4864 spdk_blob_close(clone, blob_op_complete, NULL); 4865 poll_threads(); 4866 CU_ASSERT(g_bserrno == 0); 4867 4868 spdk_blob_close(snapshot, blob_op_complete, NULL); 4869 poll_threads(); 4870 CU_ASSERT(g_bserrno == 0); 4871 4872 spdk_blob_close(snapshot2, blob_op_complete, NULL); 4873 poll_threads(); 4874 CU_ASSERT(g_bserrno == 0); 4875 4876 /* Try to delete snapshot with more than 1 clone */ 4877 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4878 poll_threads(); 4879 CU_ASSERT(g_bserrno != 0); 4880 4881 ut_bs_reload(&bs, &bs_opts); 4882 4883 /* NULL ids array should return number of clones in count */ 4884 count = SPDK_COUNTOF(ids); 4885 rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count); 4886 CU_ASSERT(rc == -ENOMEM); 4887 CU_ASSERT(count == 2); 4888 4889 /* incorrect array size */ 4890 count = 1; 4891 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4892 CU_ASSERT(rc == -ENOMEM); 4893 CU_ASSERT(count == 2); 4894 4895 4896 /* Verify structure of loaded blob store */ 4897 4898 /* snapshot */ 4899 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID); 4900 4901 count = SPDK_COUNTOF(ids); 4902 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4903 CU_ASSERT(rc == 0); 4904 CU_ASSERT(count == 2); 4905 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4906 CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2); 4907 4908 /* blob */ 4909 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4910 count = SPDK_COUNTOF(ids); 4911 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4912 CU_ASSERT(rc == 0); 4913 CU_ASSERT(count == 1); 4914 CU_ASSERT(ids[0] == cloneid2); 4915 4916 /* clone */ 4917 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 4918 count = SPDK_COUNTOF(ids); 4919 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 4920 CU_ASSERT(rc == 0); 4921 CU_ASSERT(count == 0); 4922 4923 /* snapshot2 */ 4924 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid); 4925 count = SPDK_COUNTOF(ids); 4926 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 4927 CU_ASSERT(rc == 0); 4928 CU_ASSERT(count == 1); 4929 CU_ASSERT(ids[0] == cloneid); 4930 4931 /* clone2 */ 4932 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 4933 count = SPDK_COUNTOF(ids); 4934 rc = spdk_blob_get_clones(bs, cloneid2, ids, &count); 4935 CU_ASSERT(rc == 0); 4936 CU_ASSERT(count == 0); 4937 4938 /* Try to delete blob that user should not be able to remove */ 4939 4940 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4941 poll_threads(); 4942 CU_ASSERT(g_bserrno != 0); 4943 4944 /* Remove all blobs */ 4945 4946 spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL); 4947 poll_threads(); 4948 CU_ASSERT(g_bserrno == 0); 4949 4950 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 4951 poll_threads(); 4952 CU_ASSERT(g_bserrno == 0); 4953 4954 spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL); 4955 poll_threads(); 4956 CU_ASSERT(g_bserrno == 0); 4957 4958 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 4959 poll_threads(); 4960 CU_ASSERT(g_bserrno == 0); 4961 4962 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4963 poll_threads(); 4964 CU_ASSERT(g_bserrno == 0); 4965 4966 spdk_bs_unload(bs, bs_op_complete, NULL); 4967 poll_threads(); 4968 CU_ASSERT(g_bserrno == 0); 4969 4970 g_bs = NULL; 4971 } 4972 4973 /** 4974 * Snapshot-clones relation test 2 4975 * 4976 * snapshot1 4977 * | 4978 * snapshot2 4979 * | 4980 * +-----+-----+ 4981 * | | 4982 * blob(ro) snapshot3 4983 * | | 4984 * | snapshot4 4985 * | | | 4986 * clone2 clone clone3 4987 */ 4988 static void 4989 blob_relations2(void) 4990 { 4991 struct spdk_blob_store *bs; 4992 struct spdk_bs_dev *dev; 4993 struct spdk_bs_opts bs_opts; 4994 struct spdk_blob_opts opts; 4995 struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2; 4996 spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2, 4997 cloneid3; 4998 int rc; 4999 size_t count; 5000 spdk_blob_id ids[10] = {}; 5001 5002 dev = init_dev(); 5003 spdk_bs_opts_init(&bs_opts); 5004 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 5005 5006 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 5007 poll_threads(); 5008 CU_ASSERT(g_bserrno == 0); 5009 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5010 bs = g_bs; 5011 5012 /* 1. Create blob with 10 clusters */ 5013 5014 ut_spdk_blob_opts_init(&opts); 5015 opts.num_clusters = 10; 5016 5017 blob = ut_blob_create_and_open(bs, &opts); 5018 blobid = spdk_blob_get_id(blob); 5019 5020 /* 2. Create snapshot1 */ 5021 5022 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5023 poll_threads(); 5024 CU_ASSERT(g_bserrno == 0); 5025 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5026 snapshotid1 = g_blobid; 5027 5028 spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL); 5029 poll_threads(); 5030 CU_ASSERT(g_bserrno == 0); 5031 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5032 snapshot1 = g_blob; 5033 5034 CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID); 5035 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID); 5036 5037 CU_ASSERT(blob->parent_id == snapshotid1); 5038 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1); 5039 5040 /* Check if blob is the clone of snapshot1 */ 5041 CU_ASSERT(blob->parent_id == snapshotid1); 5042 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1); 5043 5044 count = SPDK_COUNTOF(ids); 5045 rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count); 5046 CU_ASSERT(rc == 0); 5047 CU_ASSERT(count == 1); 5048 CU_ASSERT(ids[0] == blobid); 5049 5050 /* 3. Create another snapshot */ 5051 5052 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5053 poll_threads(); 5054 CU_ASSERT(g_bserrno == 0); 5055 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5056 snapshotid2 = g_blobid; 5057 5058 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 5059 poll_threads(); 5060 CU_ASSERT(g_bserrno == 0); 5061 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5062 snapshot2 = g_blob; 5063 5064 CU_ASSERT(spdk_blob_is_clone(snapshot2)); 5065 CU_ASSERT(snapshot2->parent_id == snapshotid1); 5066 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1); 5067 5068 /* Check if snapshot2 is the clone of snapshot1 and blob 5069 * is a child of snapshot2 */ 5070 CU_ASSERT(blob->parent_id == snapshotid2); 5071 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2); 5072 5073 count = SPDK_COUNTOF(ids); 5074 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5075 CU_ASSERT(rc == 0); 5076 CU_ASSERT(count == 1); 5077 CU_ASSERT(ids[0] == blobid); 5078 5079 /* 4. Create clone from snapshot */ 5080 5081 spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL); 5082 poll_threads(); 5083 CU_ASSERT(g_bserrno == 0); 5084 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5085 cloneid = g_blobid; 5086 5087 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 5088 poll_threads(); 5089 CU_ASSERT(g_bserrno == 0); 5090 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5091 clone = g_blob; 5092 5093 CU_ASSERT(clone->parent_id == snapshotid2); 5094 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 5095 5096 /* Check if clone is on the snapshot's list */ 5097 count = SPDK_COUNTOF(ids); 5098 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5099 CU_ASSERT(rc == 0); 5100 CU_ASSERT(count == 2); 5101 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5102 CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid); 5103 5104 /* 5. Create snapshot of the clone */ 5105 5106 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 5107 poll_threads(); 5108 CU_ASSERT(g_bserrno == 0); 5109 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5110 snapshotid3 = g_blobid; 5111 5112 spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL); 5113 poll_threads(); 5114 CU_ASSERT(g_bserrno == 0); 5115 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5116 snapshot3 = g_blob; 5117 5118 CU_ASSERT(snapshot3->parent_id == snapshotid2); 5119 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2); 5120 5121 /* Check if clone is converted to the clone of snapshot3 and snapshot3 5122 * is a child of snapshot2 */ 5123 CU_ASSERT(clone->parent_id == snapshotid3); 5124 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5125 5126 count = SPDK_COUNTOF(ids); 5127 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5128 CU_ASSERT(rc == 0); 5129 CU_ASSERT(count == 1); 5130 CU_ASSERT(ids[0] == cloneid); 5131 5132 /* 6. Create another snapshot of the clone */ 5133 5134 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 5135 poll_threads(); 5136 CU_ASSERT(g_bserrno == 0); 5137 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5138 snapshotid4 = g_blobid; 5139 5140 spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL); 5141 poll_threads(); 5142 CU_ASSERT(g_bserrno == 0); 5143 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5144 snapshot4 = g_blob; 5145 5146 CU_ASSERT(snapshot4->parent_id == snapshotid3); 5147 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3); 5148 5149 /* Check if clone is converted to the clone of snapshot4 and snapshot4 5150 * is a child of snapshot3 */ 5151 CU_ASSERT(clone->parent_id == snapshotid4); 5152 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4); 5153 5154 count = SPDK_COUNTOF(ids); 5155 rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count); 5156 CU_ASSERT(rc == 0); 5157 CU_ASSERT(count == 1); 5158 CU_ASSERT(ids[0] == cloneid); 5159 5160 /* 7. Remove snapshot 4 */ 5161 5162 ut_blob_close_and_delete(bs, snapshot4); 5163 5164 /* Check if relations are back to state from before creating snapshot 4 */ 5165 CU_ASSERT(clone->parent_id == snapshotid3); 5166 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5167 5168 count = SPDK_COUNTOF(ids); 5169 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5170 CU_ASSERT(rc == 0); 5171 CU_ASSERT(count == 1); 5172 CU_ASSERT(ids[0] == cloneid); 5173 5174 /* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */ 5175 5176 spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL); 5177 poll_threads(); 5178 CU_ASSERT(g_bserrno == 0); 5179 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5180 cloneid3 = g_blobid; 5181 5182 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5183 poll_threads(); 5184 CU_ASSERT(g_bserrno != 0); 5185 5186 /* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */ 5187 5188 spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL); 5189 poll_threads(); 5190 CU_ASSERT(g_bserrno == 0); 5191 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5192 snapshot3 = g_blob; 5193 5194 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5195 poll_threads(); 5196 CU_ASSERT(g_bserrno != 0); 5197 5198 spdk_blob_close(snapshot3, blob_op_complete, NULL); 5199 poll_threads(); 5200 CU_ASSERT(g_bserrno == 0); 5201 5202 spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL); 5203 poll_threads(); 5204 CU_ASSERT(g_bserrno == 0); 5205 5206 /* 10. Remove snapshot 1 */ 5207 5208 ut_blob_close_and_delete(bs, snapshot1); 5209 5210 /* Check if relations are back to state from before creating snapshot 4 (before step 6) */ 5211 CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID); 5212 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID); 5213 5214 count = SPDK_COUNTOF(ids); 5215 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5216 CU_ASSERT(rc == 0); 5217 CU_ASSERT(count == 2); 5218 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5219 CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3); 5220 5221 /* 11. Try to create clone from read only blob */ 5222 5223 /* Mark blob as read only */ 5224 spdk_blob_set_read_only(blob); 5225 spdk_blob_sync_md(blob, blob_op_complete, NULL); 5226 poll_threads(); 5227 CU_ASSERT(g_bserrno == 0); 5228 5229 /* Create clone from read only blob */ 5230 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5231 poll_threads(); 5232 CU_ASSERT(g_bserrno == 0); 5233 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5234 cloneid2 = g_blobid; 5235 5236 spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL); 5237 poll_threads(); 5238 CU_ASSERT(g_bserrno == 0); 5239 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5240 clone2 = g_blob; 5241 5242 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5243 5244 count = SPDK_COUNTOF(ids); 5245 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5246 CU_ASSERT(rc == 0); 5247 CU_ASSERT(count == 1); 5248 CU_ASSERT(ids[0] == cloneid2); 5249 5250 /* Close blobs */ 5251 5252 spdk_blob_close(clone2, blob_op_complete, NULL); 5253 poll_threads(); 5254 CU_ASSERT(g_bserrno == 0); 5255 5256 spdk_blob_close(blob, blob_op_complete, NULL); 5257 poll_threads(); 5258 CU_ASSERT(g_bserrno == 0); 5259 5260 spdk_blob_close(clone, blob_op_complete, NULL); 5261 poll_threads(); 5262 CU_ASSERT(g_bserrno == 0); 5263 5264 spdk_blob_close(snapshot2, blob_op_complete, NULL); 5265 poll_threads(); 5266 CU_ASSERT(g_bserrno == 0); 5267 5268 spdk_blob_close(snapshot3, blob_op_complete, NULL); 5269 poll_threads(); 5270 CU_ASSERT(g_bserrno == 0); 5271 5272 ut_bs_reload(&bs, &bs_opts); 5273 5274 /* Verify structure of loaded blob store */ 5275 5276 /* snapshot2 */ 5277 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID); 5278 5279 count = SPDK_COUNTOF(ids); 5280 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5281 CU_ASSERT(rc == 0); 5282 CU_ASSERT(count == 2); 5283 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5284 CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3); 5285 5286 /* blob */ 5287 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2); 5288 count = SPDK_COUNTOF(ids); 5289 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5290 CU_ASSERT(rc == 0); 5291 CU_ASSERT(count == 1); 5292 CU_ASSERT(ids[0] == cloneid2); 5293 5294 /* clone */ 5295 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5296 count = SPDK_COUNTOF(ids); 5297 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 5298 CU_ASSERT(rc == 0); 5299 CU_ASSERT(count == 0); 5300 5301 /* snapshot3 */ 5302 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2); 5303 count = SPDK_COUNTOF(ids); 5304 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5305 CU_ASSERT(rc == 0); 5306 CU_ASSERT(count == 1); 5307 CU_ASSERT(ids[0] == cloneid); 5308 5309 /* clone2 */ 5310 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5311 count = SPDK_COUNTOF(ids); 5312 rc = spdk_blob_get_clones(bs, cloneid2, ids, &count); 5313 CU_ASSERT(rc == 0); 5314 CU_ASSERT(count == 0); 5315 5316 /* Try to delete all blobs in the worse possible order */ 5317 5318 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5319 poll_threads(); 5320 CU_ASSERT(g_bserrno != 0); 5321 5322 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5323 poll_threads(); 5324 CU_ASSERT(g_bserrno == 0); 5325 5326 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5327 poll_threads(); 5328 CU_ASSERT(g_bserrno != 0); 5329 5330 spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL); 5331 poll_threads(); 5332 CU_ASSERT(g_bserrno == 0); 5333 5334 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5335 poll_threads(); 5336 CU_ASSERT(g_bserrno == 0); 5337 5338 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 5339 poll_threads(); 5340 CU_ASSERT(g_bserrno == 0); 5341 5342 spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL); 5343 poll_threads(); 5344 CU_ASSERT(g_bserrno == 0); 5345 5346 spdk_bs_unload(bs, bs_op_complete, NULL); 5347 poll_threads(); 5348 CU_ASSERT(g_bserrno == 0); 5349 5350 g_bs = NULL; 5351 } 5352 5353 static void 5354 blobstore_clean_power_failure(void) 5355 { 5356 struct spdk_blob_store *bs; 5357 struct spdk_blob *blob; 5358 struct spdk_power_failure_thresholds thresholds = {}; 5359 bool clean = false; 5360 struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 5361 struct spdk_bs_super_block super_copy = {}; 5362 5363 thresholds.general_threshold = 1; 5364 while (!clean) { 5365 /* Create bs and blob */ 5366 suite_blob_setup(); 5367 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5368 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5369 bs = g_bs; 5370 blob = g_blob; 5371 5372 /* Super block should not change for rest of the UT, 5373 * save it and compare later. */ 5374 memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block)); 5375 SPDK_CU_ASSERT_FATAL(super->clean == 0); 5376 SPDK_CU_ASSERT_FATAL(bs->clean == 0); 5377 5378 /* Force bs/super block in a clean state. 5379 * Along with marking blob dirty, to cause blob persist. */ 5380 blob->state = SPDK_BLOB_STATE_DIRTY; 5381 bs->clean = 1; 5382 super->clean = 1; 5383 super->crc = blob_md_page_calc_crc(super); 5384 5385 g_bserrno = -1; 5386 dev_set_power_failure_thresholds(thresholds); 5387 spdk_blob_sync_md(blob, blob_op_complete, NULL); 5388 poll_threads(); 5389 dev_reset_power_failure_event(); 5390 5391 if (g_bserrno == 0) { 5392 /* After successful md sync, both bs and super block 5393 * should be marked as not clean. */ 5394 SPDK_CU_ASSERT_FATAL(bs->clean == 0); 5395 SPDK_CU_ASSERT_FATAL(super->clean == 0); 5396 clean = true; 5397 } 5398 5399 /* Depending on the point of failure, super block was either updated or not. */ 5400 super_copy.clean = super->clean; 5401 super_copy.crc = blob_md_page_calc_crc(&super_copy); 5402 /* Compare that the values in super block remained unchanged. */ 5403 SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block))); 5404 5405 /* Delete blob and unload bs */ 5406 suite_blob_cleanup(); 5407 5408 thresholds.general_threshold++; 5409 } 5410 } 5411 5412 static void 5413 blob_delete_snapshot_power_failure(void) 5414 { 5415 struct spdk_bs_dev *dev; 5416 struct spdk_blob_store *bs; 5417 struct spdk_blob_opts opts; 5418 struct spdk_blob *blob, *snapshot; 5419 struct spdk_power_failure_thresholds thresholds = {}; 5420 spdk_blob_id blobid, snapshotid; 5421 const void *value; 5422 size_t value_len; 5423 size_t count; 5424 spdk_blob_id ids[3] = {}; 5425 int rc; 5426 bool deleted = false; 5427 int delete_snapshot_bserrno = -1; 5428 5429 thresholds.general_threshold = 1; 5430 while (!deleted) { 5431 dev = init_dev(); 5432 5433 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 5434 poll_threads(); 5435 CU_ASSERT(g_bserrno == 0); 5436 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5437 bs = g_bs; 5438 5439 /* Create blob */ 5440 ut_spdk_blob_opts_init(&opts); 5441 opts.num_clusters = 10; 5442 5443 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 5444 poll_threads(); 5445 CU_ASSERT(g_bserrno == 0); 5446 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5447 blobid = g_blobid; 5448 5449 /* Create snapshot */ 5450 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5451 poll_threads(); 5452 CU_ASSERT(g_bserrno == 0); 5453 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5454 snapshotid = g_blobid; 5455 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5456 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5457 5458 dev_set_power_failure_thresholds(thresholds); 5459 5460 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 5461 poll_threads(); 5462 delete_snapshot_bserrno = g_bserrno; 5463 5464 /* Do not shut down cleanly. Assumption is that after snapshot deletion 5465 * reports success, changes to both blobs should already persisted. */ 5466 dev_reset_power_failure_event(); 5467 ut_bs_dirty_load(&bs, NULL); 5468 5469 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5470 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5471 5472 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 5473 poll_threads(); 5474 CU_ASSERT(g_bserrno == 0); 5475 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5476 blob = g_blob; 5477 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true); 5478 5479 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 5480 poll_threads(); 5481 5482 if (g_bserrno == 0) { 5483 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5484 snapshot = g_blob; 5485 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 5486 count = SPDK_COUNTOF(ids); 5487 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5488 CU_ASSERT(rc == 0); 5489 CU_ASSERT(count == 1); 5490 CU_ASSERT(ids[0] == blobid); 5491 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len); 5492 CU_ASSERT(rc != 0); 5493 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false); 5494 5495 spdk_blob_close(snapshot, blob_op_complete, NULL); 5496 poll_threads(); 5497 CU_ASSERT(g_bserrno == 0); 5498 } else { 5499 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 5500 /* Snapshot might have been left in unrecoverable state, so it does not open. 5501 * Yet delete might perform further changes to the clone after that. 5502 * This UT should test until snapshot is deleted and delete call succeeds. */ 5503 if (delete_snapshot_bserrno == 0) { 5504 deleted = true; 5505 } 5506 } 5507 5508 spdk_blob_close(blob, blob_op_complete, NULL); 5509 poll_threads(); 5510 CU_ASSERT(g_bserrno == 0); 5511 5512 spdk_bs_unload(bs, bs_op_complete, NULL); 5513 poll_threads(); 5514 CU_ASSERT(g_bserrno == 0); 5515 5516 thresholds.general_threshold++; 5517 } 5518 } 5519 5520 static void 5521 blob_create_snapshot_power_failure(void) 5522 { 5523 struct spdk_blob_store *bs = g_bs; 5524 struct spdk_bs_dev *dev; 5525 struct spdk_blob_opts opts; 5526 struct spdk_blob *blob, *snapshot; 5527 struct spdk_power_failure_thresholds thresholds = {}; 5528 spdk_blob_id blobid, snapshotid; 5529 const void *value; 5530 size_t value_len; 5531 size_t count; 5532 spdk_blob_id ids[3] = {}; 5533 int rc; 5534 bool created = false; 5535 int create_snapshot_bserrno = -1; 5536 5537 thresholds.general_threshold = 1; 5538 while (!created) { 5539 dev = init_dev(); 5540 5541 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 5542 poll_threads(); 5543 CU_ASSERT(g_bserrno == 0); 5544 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5545 bs = g_bs; 5546 5547 /* Create blob */ 5548 ut_spdk_blob_opts_init(&opts); 5549 opts.num_clusters = 10; 5550 5551 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 5552 poll_threads(); 5553 CU_ASSERT(g_bserrno == 0); 5554 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5555 blobid = g_blobid; 5556 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5557 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5558 5559 dev_set_power_failure_thresholds(thresholds); 5560 5561 /* Create snapshot */ 5562 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5563 poll_threads(); 5564 create_snapshot_bserrno = g_bserrno; 5565 snapshotid = g_blobid; 5566 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5567 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5568 5569 /* Do not shut down cleanly. Assumption is that after create snapshot 5570 * reports success, both blobs should be power-fail safe. */ 5571 dev_reset_power_failure_event(); 5572 ut_bs_dirty_load(&bs, NULL); 5573 5574 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5575 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5576 5577 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 5578 poll_threads(); 5579 CU_ASSERT(g_bserrno == 0); 5580 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5581 blob = g_blob; 5582 5583 if (snapshotid != SPDK_BLOBID_INVALID) { 5584 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 5585 poll_threads(); 5586 } 5587 5588 if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) { 5589 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5590 snapshot = g_blob; 5591 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true); 5592 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false); 5593 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 5594 count = SPDK_COUNTOF(ids); 5595 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5596 CU_ASSERT(rc == 0); 5597 CU_ASSERT(count == 1); 5598 CU_ASSERT(ids[0] == blobid); 5599 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len); 5600 CU_ASSERT(rc != 0); 5601 5602 spdk_blob_close(snapshot, blob_op_complete, NULL); 5603 poll_threads(); 5604 CU_ASSERT(g_bserrno == 0); 5605 if (create_snapshot_bserrno == 0) { 5606 created = true; 5607 } 5608 } else { 5609 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 5610 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false); 5611 } 5612 5613 spdk_blob_close(blob, blob_op_complete, NULL); 5614 poll_threads(); 5615 CU_ASSERT(g_bserrno == 0); 5616 5617 spdk_bs_unload(bs, bs_op_complete, NULL); 5618 poll_threads(); 5619 CU_ASSERT(g_bserrno == 0); 5620 5621 thresholds.general_threshold++; 5622 } 5623 } 5624 5625 static void 5626 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5627 { 5628 uint8_t payload_ff[64 * 512]; 5629 uint8_t payload_aa[64 * 512]; 5630 uint8_t payload_00[64 * 512]; 5631 uint8_t *cluster0, *cluster1; 5632 5633 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5634 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5635 memset(payload_00, 0x00, sizeof(payload_00)); 5636 5637 /* Try to perform I/O with io unit = 512 */ 5638 spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL); 5639 poll_threads(); 5640 CU_ASSERT(g_bserrno == 0); 5641 5642 /* If thin provisioned is set cluster should be allocated now */ 5643 SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0); 5644 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5645 5646 /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character. 5647 * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */ 5648 /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5649 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5650 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0); 5651 5652 /* Verify write with offset on first page */ 5653 spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL); 5654 poll_threads(); 5655 CU_ASSERT(g_bserrno == 0); 5656 5657 /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5658 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5659 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5660 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5661 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5662 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0); 5663 5664 /* Verify write with offset on first page */ 5665 spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL); 5666 poll_threads(); 5667 5668 /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5669 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5670 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5671 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5672 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5673 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0); 5674 CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0); 5675 5676 /* Verify write with offset on second page */ 5677 spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL); 5678 poll_threads(); 5679 5680 /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */ 5681 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5682 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5683 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5684 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5685 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0); 5686 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5687 5688 /* Verify write across multiple pages */ 5689 spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL); 5690 poll_threads(); 5691 5692 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */ 5693 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5694 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5695 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5696 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5697 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5698 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5699 5700 /* Verify write across multiple clusters */ 5701 spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL); 5702 poll_threads(); 5703 5704 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5705 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5706 5707 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5708 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5709 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5710 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5711 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5712 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5713 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5714 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5715 5716 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5717 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0); 5718 5719 /* Verify write to second cluster */ 5720 spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL); 5721 poll_threads(); 5722 5723 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5724 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5725 5726 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5727 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */ 5728 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5729 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5730 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5731 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5732 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5733 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5734 5735 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5736 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0); 5737 CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0); 5738 CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0); 5739 } 5740 5741 static void 5742 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5743 { 5744 uint8_t payload_read[64 * 512]; 5745 uint8_t payload_ff[64 * 512]; 5746 uint8_t payload_aa[64 * 512]; 5747 uint8_t payload_00[64 * 512]; 5748 5749 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5750 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5751 memset(payload_00, 0x00, sizeof(payload_00)); 5752 5753 /* Read only first io unit */ 5754 /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5755 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5756 * payload_read: F000 0000 | 0000 0000 ... */ 5757 memset(payload_read, 0x00, sizeof(payload_read)); 5758 spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL); 5759 poll_threads(); 5760 CU_ASSERT(g_bserrno == 0); 5761 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5762 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0); 5763 5764 /* Read four io_units starting from offset = 2 5765 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5766 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5767 * payload_read: F0AA 0000 | 0000 0000 ... */ 5768 5769 memset(payload_read, 0x00, sizeof(payload_read)); 5770 spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL); 5771 poll_threads(); 5772 CU_ASSERT(g_bserrno == 0); 5773 5774 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5775 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 5776 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0); 5777 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0); 5778 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 5779 5780 /* Read eight io_units across multiple pages 5781 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 5782 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5783 * payload_read: AAAA AAAA | 0000 0000 ... */ 5784 memset(payload_read, 0x00, sizeof(payload_read)); 5785 spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL); 5786 poll_threads(); 5787 CU_ASSERT(g_bserrno == 0); 5788 5789 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0); 5790 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 5791 5792 /* Read eight io_units across multiple clusters 5793 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ] 5794 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5795 * payload_read: FFFF FFFF | 0000 0000 ... */ 5796 memset(payload_read, 0x00, sizeof(payload_read)); 5797 spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL); 5798 poll_threads(); 5799 CU_ASSERT(g_bserrno == 0); 5800 5801 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0); 5802 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 5803 5804 /* Read four io_units from second cluster 5805 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5806 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ] 5807 * payload_read: 00FF 0000 | 0000 0000 ... */ 5808 memset(payload_read, 0x00, sizeof(payload_read)); 5809 spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL); 5810 poll_threads(); 5811 CU_ASSERT(g_bserrno == 0); 5812 5813 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0); 5814 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0); 5815 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 5816 5817 /* Read second cluster 5818 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5819 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] 5820 * payload_read: FFFF 0000 | 0000 FF00 ... */ 5821 memset(payload_read, 0x00, sizeof(payload_read)); 5822 spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL); 5823 poll_threads(); 5824 CU_ASSERT(g_bserrno == 0); 5825 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0); 5826 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0); 5827 CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0); 5828 CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0); 5829 5830 /* Read whole two clusters 5831 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 5832 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */ 5833 memset(payload_read, 0x00, sizeof(payload_read)); 5834 spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL); 5835 poll_threads(); 5836 CU_ASSERT(g_bserrno == 0); 5837 5838 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5839 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 5840 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0); 5841 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0); 5842 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0); 5843 CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0); 5844 5845 CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0); 5846 CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0); 5847 CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0); 5848 CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0); 5849 } 5850 5851 5852 static void 5853 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5854 { 5855 uint8_t payload_ff[64 * 512]; 5856 uint8_t payload_aa[64 * 512]; 5857 uint8_t payload_00[64 * 512]; 5858 uint8_t *cluster0, *cluster1; 5859 5860 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5861 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5862 memset(payload_00, 0x00, sizeof(payload_00)); 5863 5864 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5865 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5866 5867 /* Unmap */ 5868 spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL); 5869 poll_threads(); 5870 5871 CU_ASSERT(g_bserrno == 0); 5872 5873 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0); 5874 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0); 5875 } 5876 5877 static void 5878 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5879 { 5880 uint8_t payload_ff[64 * 512]; 5881 uint8_t payload_aa[64 * 512]; 5882 uint8_t payload_00[64 * 512]; 5883 uint8_t *cluster0, *cluster1; 5884 5885 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5886 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5887 memset(payload_00, 0x00, sizeof(payload_00)); 5888 5889 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5890 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5891 5892 /* Write zeroes */ 5893 spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL); 5894 poll_threads(); 5895 5896 CU_ASSERT(g_bserrno == 0); 5897 5898 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0); 5899 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0); 5900 } 5901 5902 5903 static void 5904 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5905 { 5906 uint8_t payload_ff[64 * 512]; 5907 uint8_t payload_aa[64 * 512]; 5908 uint8_t payload_00[64 * 512]; 5909 uint8_t *cluster0, *cluster1; 5910 struct iovec iov[4]; 5911 5912 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5913 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5914 memset(payload_00, 0x00, sizeof(payload_00)); 5915 5916 /* Try to perform I/O with io unit = 512 */ 5917 iov[0].iov_base = payload_ff; 5918 iov[0].iov_len = 1 * 512; 5919 spdk_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL); 5920 poll_threads(); 5921 CU_ASSERT(g_bserrno == 0); 5922 5923 /* If thin provisioned is set cluster should be allocated now */ 5924 SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0); 5925 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5926 5927 /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character. 5928 * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */ 5929 /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5930 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5931 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0); 5932 5933 /* Verify write with offset on first page */ 5934 iov[0].iov_base = payload_ff; 5935 iov[0].iov_len = 1 * 512; 5936 spdk_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL); 5937 poll_threads(); 5938 CU_ASSERT(g_bserrno == 0); 5939 5940 /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5941 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5942 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5943 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5944 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5945 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0); 5946 5947 /* Verify write with offset on first page */ 5948 iov[0].iov_base = payload_ff; 5949 iov[0].iov_len = 4 * 512; 5950 spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL); 5951 poll_threads(); 5952 5953 /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5954 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5955 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5956 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5957 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5958 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0); 5959 CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0); 5960 5961 /* Verify write with offset on second page */ 5962 iov[0].iov_base = payload_ff; 5963 iov[0].iov_len = 4 * 512; 5964 spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL); 5965 poll_threads(); 5966 5967 /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */ 5968 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5969 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5970 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5971 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5972 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0); 5973 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5974 5975 /* Verify write across multiple pages */ 5976 iov[0].iov_base = payload_aa; 5977 iov[0].iov_len = 8 * 512; 5978 spdk_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL); 5979 poll_threads(); 5980 5981 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */ 5982 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5983 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5984 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5985 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5986 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5987 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5988 5989 /* Verify write across multiple clusters */ 5990 5991 iov[0].iov_base = payload_ff; 5992 iov[0].iov_len = 8 * 512; 5993 spdk_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL); 5994 poll_threads(); 5995 5996 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5997 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5998 5999 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6000 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 6001 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6002 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6003 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6004 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6005 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 6006 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0); 6007 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 6008 6009 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 6010 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0); 6011 6012 /* Verify write to second cluster */ 6013 6014 iov[0].iov_base = payload_ff; 6015 iov[0].iov_len = 2 * 512; 6016 spdk_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL); 6017 poll_threads(); 6018 6019 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 6020 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 6021 6022 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6023 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */ 6024 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6025 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6026 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6027 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6028 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 6029 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 6030 6031 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 6032 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0); 6033 CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0); 6034 CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0); 6035 } 6036 6037 static void 6038 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 6039 { 6040 uint8_t payload_read[64 * 512]; 6041 uint8_t payload_ff[64 * 512]; 6042 uint8_t payload_aa[64 * 512]; 6043 uint8_t payload_00[64 * 512]; 6044 struct iovec iov[4]; 6045 6046 memset(payload_ff, 0xFF, sizeof(payload_ff)); 6047 memset(payload_aa, 0xAA, sizeof(payload_aa)); 6048 memset(payload_00, 0x00, sizeof(payload_00)); 6049 6050 /* Read only first io unit */ 6051 /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6052 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6053 * payload_read: F000 0000 | 0000 0000 ... */ 6054 memset(payload_read, 0x00, sizeof(payload_read)); 6055 iov[0].iov_base = payload_read; 6056 iov[0].iov_len = 1 * 512; 6057 spdk_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL); 6058 poll_threads(); 6059 6060 CU_ASSERT(g_bserrno == 0); 6061 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6062 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0); 6063 6064 /* Read four io_units starting from offset = 2 6065 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6066 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6067 * payload_read: F0AA 0000 | 0000 0000 ... */ 6068 6069 memset(payload_read, 0x00, sizeof(payload_read)); 6070 iov[0].iov_base = payload_read; 6071 iov[0].iov_len = 4 * 512; 6072 spdk_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL); 6073 poll_threads(); 6074 CU_ASSERT(g_bserrno == 0); 6075 6076 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6077 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 6078 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0); 6079 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0); 6080 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 6081 6082 /* Read eight io_units across multiple pages 6083 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 6084 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6085 * payload_read: AAAA AAAA | 0000 0000 ... */ 6086 memset(payload_read, 0x00, sizeof(payload_read)); 6087 iov[0].iov_base = payload_read; 6088 iov[0].iov_len = 4 * 512; 6089 iov[1].iov_base = payload_read + 4 * 512; 6090 iov[1].iov_len = 4 * 512; 6091 spdk_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL); 6092 poll_threads(); 6093 CU_ASSERT(g_bserrno == 0); 6094 6095 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0); 6096 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 6097 6098 /* Read eight io_units across multiple clusters 6099 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ] 6100 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6101 * payload_read: FFFF FFFF | 0000 0000 ... */ 6102 memset(payload_read, 0x00, sizeof(payload_read)); 6103 iov[0].iov_base = payload_read; 6104 iov[0].iov_len = 2 * 512; 6105 iov[1].iov_base = payload_read + 2 * 512; 6106 iov[1].iov_len = 2 * 512; 6107 iov[2].iov_base = payload_read + 4 * 512; 6108 iov[2].iov_len = 2 * 512; 6109 iov[3].iov_base = payload_read + 6 * 512; 6110 iov[3].iov_len = 2 * 512; 6111 spdk_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL); 6112 poll_threads(); 6113 CU_ASSERT(g_bserrno == 0); 6114 6115 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0); 6116 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 6117 6118 /* Read four io_units from second cluster 6119 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6120 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ] 6121 * payload_read: 00FF 0000 | 0000 0000 ... */ 6122 memset(payload_read, 0x00, sizeof(payload_read)); 6123 iov[0].iov_base = payload_read; 6124 iov[0].iov_len = 1 * 512; 6125 iov[1].iov_base = payload_read + 1 * 512; 6126 iov[1].iov_len = 3 * 512; 6127 spdk_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL); 6128 poll_threads(); 6129 CU_ASSERT(g_bserrno == 0); 6130 6131 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0); 6132 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0); 6133 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 6134 6135 /* Read second cluster 6136 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6137 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] 6138 * payload_read: FFFF 0000 | 0000 FF00 ... */ 6139 memset(payload_read, 0x00, sizeof(payload_read)); 6140 iov[0].iov_base = payload_read; 6141 iov[0].iov_len = 1 * 512; 6142 iov[1].iov_base = payload_read + 1 * 512; 6143 iov[1].iov_len = 2 * 512; 6144 iov[2].iov_base = payload_read + 3 * 512; 6145 iov[2].iov_len = 4 * 512; 6146 iov[3].iov_base = payload_read + 7 * 512; 6147 iov[3].iov_len = 25 * 512; 6148 spdk_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL); 6149 poll_threads(); 6150 CU_ASSERT(g_bserrno == 0); 6151 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0); 6152 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0); 6153 CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0); 6154 CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0); 6155 6156 /* Read whole two clusters 6157 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 6158 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */ 6159 memset(payload_read, 0x00, sizeof(payload_read)); 6160 iov[0].iov_base = payload_read; 6161 iov[0].iov_len = 1 * 512; 6162 iov[1].iov_base = payload_read + 1 * 512; 6163 iov[1].iov_len = 8 * 512; 6164 iov[2].iov_base = payload_read + 9 * 512; 6165 iov[2].iov_len = 16 * 512; 6166 iov[3].iov_base = payload_read + 25 * 512; 6167 iov[3].iov_len = 39 * 512; 6168 spdk_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL); 6169 poll_threads(); 6170 CU_ASSERT(g_bserrno == 0); 6171 6172 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6173 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 6174 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0); 6175 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0); 6176 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0); 6177 CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0); 6178 6179 CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0); 6180 CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0); 6181 CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0); 6182 CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0); 6183 } 6184 6185 static void 6186 blob_io_unit(void) 6187 { 6188 struct spdk_bs_opts bsopts; 6189 struct spdk_blob_opts opts; 6190 struct spdk_blob_store *bs; 6191 struct spdk_bs_dev *dev; 6192 struct spdk_blob *blob, *snapshot, *clone; 6193 spdk_blob_id blobid; 6194 struct spdk_io_channel *channel; 6195 6196 /* Create dev with 512 bytes io unit size */ 6197 6198 spdk_bs_opts_init(&bsopts); 6199 bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */ 6200 snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE"); 6201 6202 /* Try to initialize a new blob store with unsupported io_unit */ 6203 dev = init_dev(); 6204 dev->blocklen = 512; 6205 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6206 6207 /* Initialize a new blob store */ 6208 spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL); 6209 poll_threads(); 6210 CU_ASSERT(g_bserrno == 0); 6211 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6212 bs = g_bs; 6213 6214 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512); 6215 channel = spdk_bs_alloc_io_channel(bs); 6216 6217 /* Create thick provisioned blob */ 6218 ut_spdk_blob_opts_init(&opts); 6219 opts.thin_provision = false; 6220 opts.num_clusters = 32; 6221 6222 blob = ut_blob_create_and_open(bs, &opts); 6223 blobid = spdk_blob_get_id(blob); 6224 6225 test_io_write(dev, blob, channel); 6226 test_io_read(dev, blob, channel); 6227 test_io_zeroes(dev, blob, channel); 6228 6229 test_iov_write(dev, blob, channel); 6230 test_iov_read(dev, blob, channel); 6231 6232 test_io_unmap(dev, blob, channel); 6233 6234 spdk_blob_close(blob, blob_op_complete, NULL); 6235 poll_threads(); 6236 CU_ASSERT(g_bserrno == 0); 6237 blob = NULL; 6238 g_blob = NULL; 6239 6240 /* Create thin provisioned blob */ 6241 6242 ut_spdk_blob_opts_init(&opts); 6243 opts.thin_provision = true; 6244 opts.num_clusters = 32; 6245 6246 blob = ut_blob_create_and_open(bs, &opts); 6247 blobid = spdk_blob_get_id(blob); 6248 6249 test_io_write(dev, blob, channel); 6250 test_io_read(dev, blob, channel); 6251 6252 test_io_zeroes(dev, blob, channel); 6253 6254 test_iov_write(dev, blob, channel); 6255 test_iov_read(dev, blob, channel); 6256 6257 /* Create snapshot */ 6258 6259 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6260 poll_threads(); 6261 CU_ASSERT(g_bserrno == 0); 6262 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6263 blobid = g_blobid; 6264 6265 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6266 poll_threads(); 6267 CU_ASSERT(g_bserrno == 0); 6268 CU_ASSERT(g_blob != NULL); 6269 snapshot = g_blob; 6270 6271 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6272 poll_threads(); 6273 CU_ASSERT(g_bserrno == 0); 6274 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6275 blobid = g_blobid; 6276 6277 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6278 poll_threads(); 6279 CU_ASSERT(g_bserrno == 0); 6280 CU_ASSERT(g_blob != NULL); 6281 clone = g_blob; 6282 6283 test_io_read(dev, blob, channel); 6284 test_io_read(dev, snapshot, channel); 6285 test_io_read(dev, clone, channel); 6286 6287 test_iov_read(dev, blob, channel); 6288 test_iov_read(dev, snapshot, channel); 6289 test_iov_read(dev, clone, channel); 6290 6291 /* Inflate clone */ 6292 6293 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 6294 poll_threads(); 6295 6296 CU_ASSERT(g_bserrno == 0); 6297 6298 test_io_read(dev, clone, channel); 6299 6300 test_io_unmap(dev, clone, channel); 6301 6302 test_iov_write(dev, clone, channel); 6303 test_iov_read(dev, clone, channel); 6304 6305 spdk_blob_close(blob, blob_op_complete, NULL); 6306 spdk_blob_close(snapshot, blob_op_complete, NULL); 6307 spdk_blob_close(clone, blob_op_complete, NULL); 6308 poll_threads(); 6309 CU_ASSERT(g_bserrno == 0); 6310 blob = NULL; 6311 g_blob = NULL; 6312 6313 spdk_bs_free_io_channel(channel); 6314 poll_threads(); 6315 6316 /* Unload the blob store */ 6317 spdk_bs_unload(bs, bs_op_complete, NULL); 6318 poll_threads(); 6319 CU_ASSERT(g_bserrno == 0); 6320 g_bs = NULL; 6321 g_blob = NULL; 6322 g_blobid = 0; 6323 } 6324 6325 static void 6326 blob_io_unit_compatiblity(void) 6327 { 6328 struct spdk_bs_opts bsopts; 6329 struct spdk_blob_store *bs; 6330 struct spdk_bs_dev *dev; 6331 struct spdk_bs_super_block *super; 6332 6333 /* Create dev with 512 bytes io unit size */ 6334 6335 spdk_bs_opts_init(&bsopts); 6336 bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */ 6337 snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE"); 6338 6339 /* Try to initialize a new blob store with unsupported io_unit */ 6340 dev = init_dev(); 6341 dev->blocklen = 512; 6342 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6343 6344 /* Initialize a new blob store */ 6345 spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL); 6346 poll_threads(); 6347 CU_ASSERT(g_bserrno == 0); 6348 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6349 bs = g_bs; 6350 6351 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512); 6352 6353 /* Unload the blob store */ 6354 spdk_bs_unload(bs, bs_op_complete, NULL); 6355 poll_threads(); 6356 CU_ASSERT(g_bserrno == 0); 6357 6358 /* Modify super block to behave like older version. 6359 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */ 6360 super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 6361 super->io_unit_size = 0; 6362 super->crc = blob_md_page_calc_crc(super); 6363 6364 dev = init_dev(); 6365 dev->blocklen = 512; 6366 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6367 6368 spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL); 6369 poll_threads(); 6370 CU_ASSERT(g_bserrno == 0); 6371 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6372 bs = g_bs; 6373 6374 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE); 6375 6376 /* Unload the blob store */ 6377 spdk_bs_unload(bs, bs_op_complete, NULL); 6378 poll_threads(); 6379 CU_ASSERT(g_bserrno == 0); 6380 6381 g_bs = NULL; 6382 g_blob = NULL; 6383 g_blobid = 0; 6384 } 6385 6386 static void 6387 blob_simultaneous_operations(void) 6388 { 6389 struct spdk_blob_store *bs = g_bs; 6390 struct spdk_blob_opts opts; 6391 struct spdk_blob *blob, *snapshot; 6392 spdk_blob_id blobid, snapshotid; 6393 struct spdk_io_channel *channel; 6394 6395 channel = spdk_bs_alloc_io_channel(bs); 6396 SPDK_CU_ASSERT_FATAL(channel != NULL); 6397 6398 ut_spdk_blob_opts_init(&opts); 6399 opts.num_clusters = 10; 6400 6401 blob = ut_blob_create_and_open(bs, &opts); 6402 blobid = spdk_blob_get_id(blob); 6403 6404 /* Create snapshot and try to remove blob in the same time: 6405 * - snapshot should be created successfully 6406 * - delete operation should fail w -EBUSY */ 6407 CU_ASSERT(blob->locked_operation_in_progress == false); 6408 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6409 CU_ASSERT(blob->locked_operation_in_progress == true); 6410 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6411 CU_ASSERT(blob->locked_operation_in_progress == true); 6412 /* Deletion failure */ 6413 CU_ASSERT(g_bserrno == -EBUSY); 6414 poll_threads(); 6415 CU_ASSERT(blob->locked_operation_in_progress == false); 6416 /* Snapshot creation success */ 6417 CU_ASSERT(g_bserrno == 0); 6418 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6419 6420 snapshotid = g_blobid; 6421 6422 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 6423 poll_threads(); 6424 CU_ASSERT(g_bserrno == 0); 6425 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 6426 snapshot = g_blob; 6427 6428 /* Inflate blob and try to remove blob in the same time: 6429 * - blob should be inflated successfully 6430 * - delete operation should fail w -EBUSY */ 6431 CU_ASSERT(blob->locked_operation_in_progress == false); 6432 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 6433 CU_ASSERT(blob->locked_operation_in_progress == true); 6434 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6435 CU_ASSERT(blob->locked_operation_in_progress == true); 6436 /* Deletion failure */ 6437 CU_ASSERT(g_bserrno == -EBUSY); 6438 poll_threads(); 6439 CU_ASSERT(blob->locked_operation_in_progress == false); 6440 /* Inflation success */ 6441 CU_ASSERT(g_bserrno == 0); 6442 6443 /* Clone snapshot and try to remove snapshot in the same time: 6444 * - snapshot should be cloned successfully 6445 * - delete operation should fail w -EBUSY */ 6446 CU_ASSERT(blob->locked_operation_in_progress == false); 6447 spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 6448 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 6449 /* Deletion failure */ 6450 CU_ASSERT(g_bserrno == -EBUSY); 6451 poll_threads(); 6452 CU_ASSERT(blob->locked_operation_in_progress == false); 6453 /* Clone created */ 6454 CU_ASSERT(g_bserrno == 0); 6455 6456 /* Resize blob and try to remove blob in the same time: 6457 * - blob should be resized successfully 6458 * - delete operation should fail w -EBUSY */ 6459 CU_ASSERT(blob->locked_operation_in_progress == false); 6460 spdk_blob_resize(blob, 50, blob_op_complete, NULL); 6461 CU_ASSERT(blob->locked_operation_in_progress == true); 6462 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6463 CU_ASSERT(blob->locked_operation_in_progress == true); 6464 /* Deletion failure */ 6465 CU_ASSERT(g_bserrno == -EBUSY); 6466 poll_threads(); 6467 CU_ASSERT(blob->locked_operation_in_progress == false); 6468 /* Blob resized successfully */ 6469 CU_ASSERT(g_bserrno == 0); 6470 6471 /* Issue two consecutive blob syncs, neither should fail. 6472 * Force sync to actually occur by marking blob dirty each time. 6473 * Execution of sync should not be enough to complete the operation, 6474 * since disk I/O is required to complete it. */ 6475 g_bserrno = -1; 6476 6477 blob->state = SPDK_BLOB_STATE_DIRTY; 6478 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6479 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6480 6481 blob->state = SPDK_BLOB_STATE_DIRTY; 6482 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6483 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6484 6485 uint32_t completions = 0; 6486 while (completions < 2) { 6487 SPDK_CU_ASSERT_FATAL(poll_thread_times(0, 1)); 6488 if (g_bserrno == 0) { 6489 g_bserrno = -1; 6490 completions++; 6491 } 6492 /* Never should the g_bserrno be other than -1. 6493 * It would mean that either of syncs failed. */ 6494 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6495 } 6496 6497 spdk_bs_free_io_channel(channel); 6498 poll_threads(); 6499 6500 ut_blob_close_and_delete(bs, snapshot); 6501 ut_blob_close_and_delete(bs, blob); 6502 } 6503 6504 static void 6505 blob_persist_test(void) 6506 { 6507 struct spdk_blob_store *bs = g_bs; 6508 struct spdk_blob_opts opts; 6509 struct spdk_blob *blob; 6510 spdk_blob_id blobid; 6511 struct spdk_io_channel *channel; 6512 char *xattr; 6513 size_t xattr_length; 6514 int rc; 6515 uint32_t page_count_clear, page_count_xattr; 6516 uint64_t poller_iterations; 6517 bool run_poller; 6518 6519 channel = spdk_bs_alloc_io_channel(bs); 6520 SPDK_CU_ASSERT_FATAL(channel != NULL); 6521 6522 ut_spdk_blob_opts_init(&opts); 6523 opts.num_clusters = 10; 6524 6525 blob = ut_blob_create_and_open(bs, &opts); 6526 blobid = spdk_blob_get_id(blob); 6527 6528 /* Save the amount of md pages used after creation of a blob. 6529 * This should be consistent after removing xattr. */ 6530 page_count_clear = spdk_bit_array_count_set(bs->used_md_pages); 6531 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear); 6532 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear); 6533 6534 /* Add xattr with maximum length of descriptor to exceed single metadata page. */ 6535 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 6536 strlen("large_xattr"); 6537 xattr = calloc(xattr_length, sizeof(char)); 6538 SPDK_CU_ASSERT_FATAL(xattr != NULL); 6539 6540 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 6541 SPDK_CU_ASSERT_FATAL(rc == 0); 6542 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6543 poll_threads(); 6544 SPDK_CU_ASSERT_FATAL(g_bserrno == 0); 6545 6546 /* Save the amount of md pages used after adding the large xattr */ 6547 page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages); 6548 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr); 6549 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr); 6550 6551 /* Add xattr to a blob and sync it. While sync is occuring, remove the xattr and sync again. 6552 * Interrupt the first sync after increasing number of poller iterations, until it succeeds. 6553 * Expectation is that after second sync completes no xattr is saved in metadata. */ 6554 poller_iterations = 1; 6555 run_poller = true; 6556 while (run_poller) { 6557 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 6558 SPDK_CU_ASSERT_FATAL(rc == 0); 6559 g_bserrno = -1; 6560 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6561 poll_thread_times(0, poller_iterations); 6562 if (g_bserrno == 0) { 6563 /* Poller iteration count was high enough for first sync to complete. 6564 * Verify that blob takes up enough of md_pages to store the xattr. */ 6565 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr); 6566 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr); 6567 SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr); 6568 run_poller = false; 6569 } 6570 rc = spdk_blob_remove_xattr(blob, "large_xattr"); 6571 SPDK_CU_ASSERT_FATAL(rc == 0); 6572 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6573 poll_threads(); 6574 SPDK_CU_ASSERT_FATAL(g_bserrno == 0); 6575 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear); 6576 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear); 6577 SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear); 6578 6579 /* Reload bs and re-open blob to verify that xattr was not persisted. */ 6580 spdk_blob_close(blob, blob_op_complete, NULL); 6581 poll_threads(); 6582 CU_ASSERT(g_bserrno == 0); 6583 6584 ut_bs_reload(&bs, NULL); 6585 6586 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6587 poll_threads(); 6588 CU_ASSERT(g_bserrno == 0); 6589 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 6590 blob = g_blob; 6591 6592 rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length); 6593 SPDK_CU_ASSERT_FATAL(rc == -ENOENT); 6594 6595 poller_iterations++; 6596 /* Stop at high iteration count to prevent infinite loop. 6597 * This value should be enough for first md sync to complete in any case. */ 6598 SPDK_CU_ASSERT_FATAL(poller_iterations < 50); 6599 } 6600 6601 free(xattr); 6602 6603 ut_blob_close_and_delete(bs, blob); 6604 6605 spdk_bs_free_io_channel(channel); 6606 poll_threads(); 6607 } 6608 6609 static void 6610 suite_bs_setup(void) 6611 { 6612 struct spdk_bs_dev *dev; 6613 6614 dev = init_dev(); 6615 memset(g_dev_buffer, 0, DEV_BUFFER_SIZE); 6616 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 6617 poll_threads(); 6618 CU_ASSERT(g_bserrno == 0); 6619 CU_ASSERT(g_bs != NULL); 6620 } 6621 6622 static void 6623 suite_bs_cleanup(void) 6624 { 6625 spdk_bs_unload(g_bs, bs_op_complete, NULL); 6626 poll_threads(); 6627 CU_ASSERT(g_bserrno == 0); 6628 g_bs = NULL; 6629 memset(g_dev_buffer, 0, DEV_BUFFER_SIZE); 6630 } 6631 6632 static struct spdk_blob * 6633 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts) 6634 { 6635 struct spdk_blob *blob; 6636 struct spdk_blob_opts create_blob_opts; 6637 spdk_blob_id blobid; 6638 6639 if (blob_opts == NULL) { 6640 ut_spdk_blob_opts_init(&create_blob_opts); 6641 blob_opts = &create_blob_opts; 6642 } 6643 6644 spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL); 6645 poll_threads(); 6646 CU_ASSERT(g_bserrno == 0); 6647 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6648 blobid = g_blobid; 6649 g_blobid = -1; 6650 6651 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6652 poll_threads(); 6653 CU_ASSERT(g_bserrno == 0); 6654 CU_ASSERT(g_blob != NULL); 6655 blob = g_blob; 6656 6657 g_blob = NULL; 6658 g_bserrno = -1; 6659 6660 return blob; 6661 } 6662 6663 static void 6664 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob) 6665 { 6666 spdk_blob_id blobid = spdk_blob_get_id(blob); 6667 6668 spdk_blob_close(blob, blob_op_complete, NULL); 6669 poll_threads(); 6670 CU_ASSERT(g_bserrno == 0); 6671 g_blob = NULL; 6672 6673 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6674 poll_threads(); 6675 CU_ASSERT(g_bserrno == 0); 6676 g_bserrno = -1; 6677 } 6678 6679 static void 6680 suite_blob_setup(void) 6681 { 6682 suite_bs_setup(); 6683 CU_ASSERT(g_bs != NULL); 6684 6685 g_blob = ut_blob_create_and_open(g_bs, NULL); 6686 CU_ASSERT(g_blob != NULL); 6687 } 6688 6689 static void 6690 suite_blob_cleanup(void) 6691 { 6692 ut_blob_close_and_delete(g_bs, g_blob); 6693 CU_ASSERT(g_blob == NULL); 6694 6695 suite_bs_cleanup(); 6696 CU_ASSERT(g_bs == NULL); 6697 } 6698 6699 int main(int argc, char **argv) 6700 { 6701 CU_pSuite suite, suite_bs, suite_blob; 6702 unsigned int num_failures; 6703 6704 CU_set_error_action(CUEA_ABORT); 6705 CU_initialize_registry(); 6706 6707 suite = CU_add_suite("blob", NULL, NULL); 6708 suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL, 6709 suite_bs_setup, suite_bs_cleanup); 6710 suite_blob = CU_add_suite_with_setup_and_teardown("blob_blob", NULL, NULL, 6711 suite_blob_setup, suite_blob_cleanup); 6712 6713 CU_ADD_TEST(suite, blob_init); 6714 CU_ADD_TEST(suite_bs, blob_open); 6715 CU_ADD_TEST(suite_bs, blob_create); 6716 CU_ADD_TEST(suite_bs, blob_create_loop); 6717 CU_ADD_TEST(suite_bs, blob_create_fail); 6718 CU_ADD_TEST(suite_bs, blob_create_internal); 6719 CU_ADD_TEST(suite, blob_thin_provision); 6720 CU_ADD_TEST(suite_bs, blob_snapshot); 6721 CU_ADD_TEST(suite_bs, blob_clone); 6722 CU_ADD_TEST(suite_bs, blob_inflate); 6723 CU_ADD_TEST(suite_bs, blob_delete); 6724 CU_ADD_TEST(suite_bs, blob_resize_test); 6725 CU_ADD_TEST(suite, blob_read_only); 6726 CU_ADD_TEST(suite_bs, channel_ops); 6727 CU_ADD_TEST(suite_bs, blob_super); 6728 CU_ADD_TEST(suite_blob, blob_write); 6729 CU_ADD_TEST(suite_blob, blob_read); 6730 CU_ADD_TEST(suite_blob, blob_rw_verify); 6731 CU_ADD_TEST(suite_bs, blob_rw_verify_iov); 6732 CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem); 6733 CU_ADD_TEST(suite_blob, blob_rw_iov_read_only); 6734 CU_ADD_TEST(suite_bs, blob_unmap); 6735 CU_ADD_TEST(suite_bs, blob_iter); 6736 CU_ADD_TEST(suite_blob, blob_xattr); 6737 CU_ADD_TEST(suite_bs, blob_parse_md); 6738 CU_ADD_TEST(suite, bs_load); 6739 CU_ADD_TEST(suite_bs, bs_load_pending_removal); 6740 CU_ADD_TEST(suite, bs_load_custom_cluster_size); 6741 CU_ADD_TEST(suite_bs, bs_unload); 6742 CU_ADD_TEST(suite, bs_cluster_sz); 6743 CU_ADD_TEST(suite_bs, bs_usable_clusters); 6744 CU_ADD_TEST(suite, bs_resize_md); 6745 CU_ADD_TEST(suite, bs_destroy); 6746 CU_ADD_TEST(suite, bs_type); 6747 CU_ADD_TEST(suite, bs_super_block); 6748 CU_ADD_TEST(suite, blob_serialize_test); 6749 CU_ADD_TEST(suite_bs, blob_crc); 6750 CU_ADD_TEST(suite, super_block_crc); 6751 CU_ADD_TEST(suite_blob, blob_dirty_shutdown); 6752 CU_ADD_TEST(suite_bs, blob_flags); 6753 CU_ADD_TEST(suite_bs, bs_version); 6754 CU_ADD_TEST(suite_bs, blob_set_xattrs_test); 6755 CU_ADD_TEST(suite_bs, blob_thin_prov_alloc); 6756 CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test); 6757 CU_ADD_TEST(suite_bs, blob_thin_prov_rw); 6758 CU_ADD_TEST(suite_bs, blob_thin_prov_rle); 6759 CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov); 6760 CU_ADD_TEST(suite, bs_load_iter_test); 6761 CU_ADD_TEST(suite_bs, blob_snapshot_rw); 6762 CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov); 6763 CU_ADD_TEST(suite, blob_relations); 6764 CU_ADD_TEST(suite, blob_relations2); 6765 CU_ADD_TEST(suite, blobstore_clean_power_failure); 6766 CU_ADD_TEST(suite, blob_delete_snapshot_power_failure); 6767 CU_ADD_TEST(suite, blob_create_snapshot_power_failure); 6768 CU_ADD_TEST(suite_bs, blob_inflate_rw); 6769 CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io); 6770 CU_ADD_TEST(suite_bs, blob_operation_split_rw); 6771 CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov); 6772 CU_ADD_TEST(suite, blob_io_unit); 6773 CU_ADD_TEST(suite, blob_io_unit_compatiblity); 6774 CU_ADD_TEST(suite_bs, blob_simultaneous_operations); 6775 CU_ADD_TEST(suite_bs, blob_persist_test); 6776 6777 allocate_threads(2); 6778 set_thread(0); 6779 6780 g_dev_buffer = calloc(1, DEV_BUFFER_SIZE); 6781 6782 CU_basic_set_mode(CU_BRM_VERBOSE); 6783 g_use_extent_table = false; 6784 CU_basic_run_tests(); 6785 num_failures = CU_get_number_of_failures(); 6786 g_use_extent_table = true; 6787 CU_basic_run_tests(); 6788 num_failures += CU_get_number_of_failures(); 6789 CU_cleanup_registry(); 6790 6791 free(g_dev_buffer); 6792 6793 free_threads(); 6794 6795 return num_failures; 6796 } 6797