1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 #include "spdk/blob.h" 38 #include "spdk/string.h" 39 #include "spdk_internal/thread.h" 40 41 #include "common/lib/ut_multithread.c" 42 #include "../bs_dev_common.c" 43 #include "blob/blobstore.c" 44 #include "blob/request.c" 45 #include "blob/zeroes.c" 46 #include "blob/blob_bs_dev.c" 47 48 struct spdk_blob_store *g_bs; 49 spdk_blob_id g_blobid; 50 struct spdk_blob *g_blob, *g_blob2; 51 int g_bserrno, g_bserrno2; 52 struct spdk_xattr_names *g_names; 53 int g_done; 54 char *g_xattr_names[] = {"first", "second", "third"}; 55 char *g_xattr_values[] = {"one", "two", "three"}; 56 uint64_t g_ctx = 1729; 57 bool g_use_extent_table = false; 58 59 struct spdk_bs_super_block_ver1 { 60 uint8_t signature[8]; 61 uint32_t version; 62 uint32_t length; 63 uint32_t clean; /* If there was a clean shutdown, this is 1. */ 64 spdk_blob_id super_blob; 65 66 uint32_t cluster_size; /* In bytes */ 67 68 uint32_t used_page_mask_start; /* Offset from beginning of disk, in pages */ 69 uint32_t used_page_mask_len; /* Count, in pages */ 70 71 uint32_t used_cluster_mask_start; /* Offset from beginning of disk, in pages */ 72 uint32_t used_cluster_mask_len; /* Count, in pages */ 73 74 uint32_t md_start; /* Offset from beginning of disk, in pages */ 75 uint32_t md_len; /* Count, in pages */ 76 77 uint8_t reserved[4036]; 78 uint32_t crc; 79 } __attribute__((packed)); 80 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size"); 81 82 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs, 83 struct spdk_blob_opts *blob_opts); 84 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob); 85 static void suite_blob_setup(void); 86 static void suite_blob_cleanup(void); 87 88 static void 89 _get_xattr_value(void *arg, const char *name, 90 const void **value, size_t *value_len) 91 { 92 uint64_t i; 93 94 SPDK_CU_ASSERT_FATAL(value_len != NULL); 95 SPDK_CU_ASSERT_FATAL(value != NULL); 96 CU_ASSERT(arg == &g_ctx); 97 98 for (i = 0; i < sizeof(g_xattr_names); i++) { 99 if (!strcmp(name, g_xattr_names[i])) { 100 *value_len = strlen(g_xattr_values[i]); 101 *value = g_xattr_values[i]; 102 break; 103 } 104 } 105 } 106 107 static void 108 _get_xattr_value_null(void *arg, const char *name, 109 const void **value, size_t *value_len) 110 { 111 SPDK_CU_ASSERT_FATAL(value_len != NULL); 112 SPDK_CU_ASSERT_FATAL(value != NULL); 113 CU_ASSERT(arg == NULL); 114 115 *value_len = 0; 116 *value = NULL; 117 } 118 119 static int 120 _get_snapshots_count(struct spdk_blob_store *bs) 121 { 122 struct spdk_blob_list *snapshot = NULL; 123 int count = 0; 124 125 TAILQ_FOREACH(snapshot, &bs->snapshots, link) { 126 count += 1; 127 } 128 129 return count; 130 } 131 132 static void 133 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts) 134 { 135 spdk_blob_opts_init(opts); 136 opts->use_extent_table = g_use_extent_table; 137 } 138 139 static void 140 bs_op_complete(void *cb_arg, int bserrno) 141 { 142 g_bserrno = bserrno; 143 } 144 145 static void 146 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs, 147 int bserrno) 148 { 149 g_bs = bs; 150 g_bserrno = bserrno; 151 } 152 153 static void 154 blob_op_complete(void *cb_arg, int bserrno) 155 { 156 g_bserrno = bserrno; 157 } 158 159 static void 160 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno) 161 { 162 g_blobid = blobid; 163 g_bserrno = bserrno; 164 } 165 166 static void 167 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno) 168 { 169 g_blob = blb; 170 g_bserrno = bserrno; 171 } 172 173 static void 174 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno) 175 { 176 if (g_blob == NULL) { 177 g_blob = blob; 178 g_bserrno = bserrno; 179 } else { 180 g_blob2 = blob; 181 g_bserrno2 = bserrno; 182 } 183 } 184 185 static void 186 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts) 187 { 188 struct spdk_bs_dev *dev; 189 190 /* Unload the blob store */ 191 spdk_bs_unload(*bs, bs_op_complete, NULL); 192 poll_threads(); 193 CU_ASSERT(g_bserrno == 0); 194 195 dev = init_dev(); 196 /* Load an existing blob store */ 197 spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL); 198 poll_threads(); 199 CU_ASSERT(g_bserrno == 0); 200 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 201 *bs = g_bs; 202 203 g_bserrno = -1; 204 } 205 206 static void 207 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts) 208 { 209 struct spdk_bs_dev *dev; 210 211 /* Dirty shutdown */ 212 bs_free(*bs); 213 214 dev = init_dev(); 215 /* Load an existing blob store */ 216 spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL); 217 poll_threads(); 218 CU_ASSERT(g_bserrno == 0); 219 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 220 *bs = g_bs; 221 222 g_bserrno = -1; 223 } 224 225 static void 226 blob_init(void) 227 { 228 struct spdk_blob_store *bs; 229 struct spdk_bs_dev *dev; 230 231 dev = init_dev(); 232 233 /* should fail for an unsupported blocklen */ 234 dev->blocklen = 500; 235 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 236 poll_threads(); 237 CU_ASSERT(g_bserrno == -EINVAL); 238 239 dev = init_dev(); 240 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 241 poll_threads(); 242 CU_ASSERT(g_bserrno == 0); 243 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 244 bs = g_bs; 245 246 spdk_bs_unload(bs, bs_op_complete, NULL); 247 poll_threads(); 248 CU_ASSERT(g_bserrno == 0); 249 g_bs = NULL; 250 } 251 252 static void 253 blob_super(void) 254 { 255 struct spdk_blob_store *bs = g_bs; 256 spdk_blob_id blobid; 257 struct spdk_blob_opts blob_opts; 258 259 /* Get the super blob without having set one */ 260 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 261 poll_threads(); 262 CU_ASSERT(g_bserrno == -ENOENT); 263 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 264 265 /* Create a blob */ 266 ut_spdk_blob_opts_init(&blob_opts); 267 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 268 poll_threads(); 269 CU_ASSERT(g_bserrno == 0); 270 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 271 blobid = g_blobid; 272 273 /* Set the blob as the super blob */ 274 spdk_bs_set_super(bs, blobid, blob_op_complete, NULL); 275 poll_threads(); 276 CU_ASSERT(g_bserrno == 0); 277 278 /* Get the super blob */ 279 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 280 poll_threads(); 281 CU_ASSERT(g_bserrno == 0); 282 CU_ASSERT(blobid == g_blobid); 283 } 284 285 static void 286 blob_open(void) 287 { 288 struct spdk_blob_store *bs = g_bs; 289 struct spdk_blob *blob; 290 struct spdk_blob_opts blob_opts; 291 spdk_blob_id blobid, blobid2; 292 293 ut_spdk_blob_opts_init(&blob_opts); 294 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 295 poll_threads(); 296 CU_ASSERT(g_bserrno == 0); 297 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 298 blobid = g_blobid; 299 300 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 301 poll_threads(); 302 CU_ASSERT(g_bserrno == 0); 303 CU_ASSERT(g_blob != NULL); 304 blob = g_blob; 305 306 blobid2 = spdk_blob_get_id(blob); 307 CU_ASSERT(blobid == blobid2); 308 309 /* Try to open file again. It should return success. */ 310 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 311 poll_threads(); 312 CU_ASSERT(g_bserrno == 0); 313 CU_ASSERT(blob == g_blob); 314 315 spdk_blob_close(blob, blob_op_complete, NULL); 316 poll_threads(); 317 CU_ASSERT(g_bserrno == 0); 318 319 /* 320 * Close the file a second time, releasing the second reference. This 321 * should succeed. 322 */ 323 blob = g_blob; 324 spdk_blob_close(blob, blob_op_complete, NULL); 325 poll_threads(); 326 CU_ASSERT(g_bserrno == 0); 327 328 /* 329 * Try to open file again. It should succeed. This tests the case 330 * where the file is opened, closed, then re-opened again. 331 */ 332 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 333 poll_threads(); 334 CU_ASSERT(g_bserrno == 0); 335 CU_ASSERT(g_blob != NULL); 336 blob = g_blob; 337 spdk_blob_close(blob, blob_op_complete, NULL); 338 poll_threads(); 339 CU_ASSERT(g_bserrno == 0); 340 341 /* Try to open file twice in succession. This should return the same 342 * blob object. 343 */ 344 g_blob = NULL; 345 g_blob2 = NULL; 346 g_bserrno = -1; 347 g_bserrno2 = -1; 348 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL); 349 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL); 350 poll_threads(); 351 CU_ASSERT(g_bserrno == 0); 352 CU_ASSERT(g_bserrno2 == 0); 353 CU_ASSERT(g_blob != NULL); 354 CU_ASSERT(g_blob2 != NULL); 355 CU_ASSERT(g_blob == g_blob2); 356 357 g_bserrno = -1; 358 spdk_blob_close(g_blob, blob_op_complete, NULL); 359 poll_threads(); 360 CU_ASSERT(g_bserrno == 0); 361 362 ut_blob_close_and_delete(bs, g_blob); 363 } 364 365 static void 366 blob_create(void) 367 { 368 struct spdk_blob_store *bs = g_bs; 369 struct spdk_blob *blob; 370 struct spdk_blob_opts opts; 371 spdk_blob_id blobid; 372 373 /* Create blob with 10 clusters */ 374 375 ut_spdk_blob_opts_init(&opts); 376 opts.num_clusters = 10; 377 378 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 379 poll_threads(); 380 CU_ASSERT(g_bserrno == 0); 381 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 382 blobid = g_blobid; 383 384 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 385 poll_threads(); 386 CU_ASSERT(g_bserrno == 0); 387 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 388 blob = g_blob; 389 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 390 391 spdk_blob_close(blob, blob_op_complete, NULL); 392 poll_threads(); 393 CU_ASSERT(g_bserrno == 0); 394 395 /* Create blob with 0 clusters */ 396 397 ut_spdk_blob_opts_init(&opts); 398 opts.num_clusters = 0; 399 400 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 401 poll_threads(); 402 CU_ASSERT(g_bserrno == 0); 403 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 404 blobid = g_blobid; 405 406 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 407 poll_threads(); 408 CU_ASSERT(g_bserrno == 0); 409 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 410 blob = g_blob; 411 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 412 413 spdk_blob_close(blob, blob_op_complete, NULL); 414 poll_threads(); 415 CU_ASSERT(g_bserrno == 0); 416 417 /* Create blob with default options (opts == NULL) */ 418 419 spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL); 420 poll_threads(); 421 CU_ASSERT(g_bserrno == 0); 422 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 423 blobid = g_blobid; 424 425 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 426 poll_threads(); 427 CU_ASSERT(g_bserrno == 0); 428 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 429 blob = g_blob; 430 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 431 432 spdk_blob_close(blob, blob_op_complete, NULL); 433 poll_threads(); 434 CU_ASSERT(g_bserrno == 0); 435 436 /* Try to create blob with size larger than blobstore */ 437 438 ut_spdk_blob_opts_init(&opts); 439 opts.num_clusters = bs->total_clusters + 1; 440 441 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 442 poll_threads(); 443 CU_ASSERT(g_bserrno == -ENOSPC); 444 } 445 446 /* 447 * Create and delete one blob in a loop over and over again. This helps ensure 448 * that the internal bit masks tracking used clusters and md_pages are being 449 * tracked correctly. 450 */ 451 static void 452 blob_create_loop(void) 453 { 454 struct spdk_blob_store *bs = g_bs; 455 struct spdk_blob_opts opts; 456 uint32_t i, loop_count; 457 458 loop_count = 4 * spdk_max(spdk_bit_array_capacity(bs->used_md_pages), 459 spdk_bit_pool_capacity(bs->used_clusters)); 460 461 for (i = 0; i < loop_count; i++) { 462 ut_spdk_blob_opts_init(&opts); 463 opts.num_clusters = 1; 464 g_bserrno = -1; 465 g_blobid = SPDK_BLOBID_INVALID; 466 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 467 poll_threads(); 468 CU_ASSERT(g_bserrno == 0); 469 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 470 spdk_bs_delete_blob(bs, g_blobid, blob_op_complete, NULL); 471 poll_threads(); 472 CU_ASSERT(g_bserrno == 0); 473 } 474 } 475 476 static void 477 blob_create_fail(void) 478 { 479 struct spdk_blob_store *bs = g_bs; 480 struct spdk_blob_opts opts; 481 spdk_blob_id blobid; 482 uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids); 483 uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages); 484 485 /* NULL callback */ 486 ut_spdk_blob_opts_init(&opts); 487 opts.xattrs.names = g_xattr_names; 488 opts.xattrs.get_value = NULL; 489 opts.xattrs.count = 1; 490 opts.xattrs.ctx = &g_ctx; 491 492 blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 493 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 494 poll_threads(); 495 CU_ASSERT(g_bserrno == -EINVAL); 496 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 497 CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count); 498 CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count); 499 500 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 501 poll_threads(); 502 CU_ASSERT(g_bserrno == -ENOENT); 503 SPDK_CU_ASSERT_FATAL(g_blob == NULL); 504 505 ut_bs_reload(&bs, NULL); 506 CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count); 507 CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count); 508 509 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 510 poll_threads(); 511 CU_ASSERT(g_blob == NULL); 512 CU_ASSERT(g_bserrno == -ENOENT); 513 } 514 515 static void 516 blob_create_internal(void) 517 { 518 struct spdk_blob_store *bs = g_bs; 519 struct spdk_blob *blob; 520 struct spdk_blob_opts opts; 521 struct spdk_blob_xattr_opts internal_xattrs; 522 const void *value; 523 size_t value_len; 524 spdk_blob_id blobid; 525 int rc; 526 527 /* Create blob with custom xattrs */ 528 529 ut_spdk_blob_opts_init(&opts); 530 blob_xattrs_init(&internal_xattrs); 531 internal_xattrs.count = 3; 532 internal_xattrs.names = g_xattr_names; 533 internal_xattrs.get_value = _get_xattr_value; 534 internal_xattrs.ctx = &g_ctx; 535 536 bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL); 537 poll_threads(); 538 CU_ASSERT(g_bserrno == 0); 539 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 540 blobid = g_blobid; 541 542 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 543 poll_threads(); 544 CU_ASSERT(g_bserrno == 0); 545 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 546 blob = g_blob; 547 548 rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true); 549 CU_ASSERT(rc == 0); 550 SPDK_CU_ASSERT_FATAL(value != NULL); 551 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 552 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 553 554 rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true); 555 CU_ASSERT(rc == 0); 556 SPDK_CU_ASSERT_FATAL(value != NULL); 557 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 558 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 559 560 rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true); 561 CU_ASSERT(rc == 0); 562 SPDK_CU_ASSERT_FATAL(value != NULL); 563 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 564 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 565 566 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len); 567 CU_ASSERT(rc != 0); 568 569 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len); 570 CU_ASSERT(rc != 0); 571 572 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len); 573 CU_ASSERT(rc != 0); 574 575 spdk_blob_close(blob, blob_op_complete, NULL); 576 poll_threads(); 577 CU_ASSERT(g_bserrno == 0); 578 579 /* Create blob with NULL internal options */ 580 581 bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL); 582 poll_threads(); 583 CU_ASSERT(g_bserrno == 0); 584 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 585 blobid = g_blobid; 586 587 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 588 poll_threads(); 589 CU_ASSERT(g_bserrno == 0); 590 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 591 CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL); 592 593 blob = g_blob; 594 595 spdk_blob_close(blob, blob_op_complete, NULL); 596 poll_threads(); 597 CU_ASSERT(g_bserrno == 0); 598 } 599 600 static void 601 blob_thin_provision(void) 602 { 603 struct spdk_blob_store *bs; 604 struct spdk_bs_dev *dev; 605 struct spdk_blob *blob; 606 struct spdk_blob_opts opts; 607 struct spdk_bs_opts bs_opts; 608 spdk_blob_id blobid; 609 610 dev = init_dev(); 611 spdk_bs_opts_init(&bs_opts); 612 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 613 614 /* Initialize a new blob store */ 615 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 616 poll_threads(); 617 CU_ASSERT(g_bserrno == 0); 618 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 619 620 bs = g_bs; 621 622 /* Create blob with thin provisioning enabled */ 623 624 ut_spdk_blob_opts_init(&opts); 625 opts.thin_provision = true; 626 opts.num_clusters = 10; 627 628 blob = ut_blob_create_and_open(bs, &opts); 629 blobid = spdk_blob_get_id(blob); 630 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 631 632 spdk_blob_close(blob, blob_op_complete, NULL); 633 CU_ASSERT(g_bserrno == 0); 634 635 /* Do not shut down cleanly. This makes sure that when we load again 636 * and try to recover a valid used_cluster map, that blobstore will 637 * ignore clusters with index 0 since these are unallocated clusters. 638 */ 639 ut_bs_dirty_load(&bs, &bs_opts); 640 641 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 642 poll_threads(); 643 CU_ASSERT(g_bserrno == 0); 644 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 645 blob = g_blob; 646 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 647 648 ut_blob_close_and_delete(bs, blob); 649 650 spdk_bs_unload(bs, bs_op_complete, NULL); 651 poll_threads(); 652 CU_ASSERT(g_bserrno == 0); 653 g_bs = NULL; 654 } 655 656 static void 657 blob_snapshot(void) 658 { 659 struct spdk_blob_store *bs = g_bs; 660 struct spdk_blob *blob; 661 struct spdk_blob *snapshot, *snapshot2; 662 struct spdk_blob_bs_dev *blob_bs_dev; 663 struct spdk_blob_opts opts; 664 struct spdk_blob_xattr_opts xattrs; 665 spdk_blob_id blobid; 666 spdk_blob_id snapshotid; 667 spdk_blob_id snapshotid2; 668 const void *value; 669 size_t value_len; 670 int rc; 671 spdk_blob_id ids[2]; 672 size_t count; 673 674 /* Create blob with 10 clusters */ 675 ut_spdk_blob_opts_init(&opts); 676 opts.num_clusters = 10; 677 678 blob = ut_blob_create_and_open(bs, &opts); 679 blobid = spdk_blob_get_id(blob); 680 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 681 682 /* Create snapshot from blob */ 683 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); 684 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 685 poll_threads(); 686 CU_ASSERT(g_bserrno == 0); 687 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 688 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 689 snapshotid = g_blobid; 690 691 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 692 poll_threads(); 693 CU_ASSERT(g_bserrno == 0); 694 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 695 snapshot = g_blob; 696 CU_ASSERT(snapshot->data_ro == true); 697 CU_ASSERT(snapshot->md_ro == true); 698 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 699 700 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 701 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 702 CU_ASSERT(spdk_mem_all_zero(blob->active.clusters, 703 blob->active.num_clusters * sizeof(blob->active.clusters[0]))); 704 705 /* Try to create snapshot from clone with xattrs */ 706 xattrs.names = g_xattr_names; 707 xattrs.get_value = _get_xattr_value; 708 xattrs.count = 3; 709 xattrs.ctx = &g_ctx; 710 spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL); 711 poll_threads(); 712 CU_ASSERT(g_bserrno == 0); 713 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 714 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); 715 snapshotid2 = g_blobid; 716 717 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 718 CU_ASSERT(g_bserrno == 0); 719 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 720 snapshot2 = g_blob; 721 CU_ASSERT(snapshot2->data_ro == true); 722 CU_ASSERT(snapshot2->md_ro == true); 723 CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10); 724 725 /* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */ 726 CU_ASSERT(snapshot->back_bs_dev == NULL); 727 SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL); 728 SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL); 729 730 blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 731 CU_ASSERT(blob_bs_dev->blob == snapshot2); 732 733 blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev; 734 CU_ASSERT(blob_bs_dev->blob == snapshot); 735 736 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len); 737 CU_ASSERT(rc == 0); 738 SPDK_CU_ASSERT_FATAL(value != NULL); 739 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 740 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 741 742 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len); 743 CU_ASSERT(rc == 0); 744 SPDK_CU_ASSERT_FATAL(value != NULL); 745 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 746 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 747 748 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len); 749 CU_ASSERT(rc == 0); 750 SPDK_CU_ASSERT_FATAL(value != NULL); 751 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 752 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 753 754 /* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */ 755 count = 2; 756 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 757 CU_ASSERT(count == 1); 758 CU_ASSERT(ids[0] == blobid); 759 760 count = 2; 761 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 762 CU_ASSERT(count == 1); 763 CU_ASSERT(ids[0] == snapshotid2); 764 765 /* Try to create snapshot from snapshot */ 766 spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 767 poll_threads(); 768 CU_ASSERT(g_bserrno == -EINVAL); 769 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 770 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); 771 772 /* Delete blob and confirm that it is no longer on snapshot2 clone list */ 773 ut_blob_close_and_delete(bs, blob); 774 count = 2; 775 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 776 CU_ASSERT(count == 0); 777 778 /* Delete snapshot2 and confirm that it is no longer on snapshot clone list */ 779 ut_blob_close_and_delete(bs, snapshot2); 780 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 781 count = 2; 782 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 783 CU_ASSERT(count == 0); 784 785 ut_blob_close_and_delete(bs, snapshot); 786 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); 787 } 788 789 static void 790 blob_snapshot_freeze_io(void) 791 { 792 struct spdk_io_channel *channel; 793 struct spdk_bs_channel *bs_channel; 794 struct spdk_blob_store *bs = g_bs; 795 struct spdk_blob *blob; 796 struct spdk_blob_opts opts; 797 spdk_blob_id blobid; 798 uint32_t num_of_pages = 10; 799 uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE]; 800 uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE]; 801 uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE]; 802 803 memset(payload_write, 0xE5, sizeof(payload_write)); 804 memset(payload_read, 0x00, sizeof(payload_read)); 805 memset(payload_zero, 0x00, sizeof(payload_zero)); 806 807 /* Test freeze I/O during snapshot */ 808 channel = spdk_bs_alloc_io_channel(bs); 809 bs_channel = spdk_io_channel_get_ctx(channel); 810 811 /* Create blob with 10 clusters */ 812 ut_spdk_blob_opts_init(&opts); 813 opts.num_clusters = 10; 814 opts.thin_provision = false; 815 816 blob = ut_blob_create_and_open(bs, &opts); 817 blobid = spdk_blob_get_id(blob); 818 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 819 820 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 821 822 /* This is implementation specific. 823 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback. 824 * Four async I/O operations happen before that. */ 825 poll_thread_times(0, 3); 826 827 CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io)); 828 829 /* Blob I/O should be frozen here */ 830 CU_ASSERT(blob->frozen_refcnt == 1); 831 832 /* Write to the blob */ 833 spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL); 834 835 /* Verify that I/O is queued */ 836 CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io)); 837 /* Verify that payload is not written to disk */ 838 CU_ASSERT(memcmp(payload_zero, &g_dev_buffer[blob->active.clusters[0]*SPDK_BS_PAGE_SIZE], 839 SPDK_BS_PAGE_SIZE) == 0); 840 841 /* Finish all operations including spdk_bs_create_snapshot */ 842 poll_threads(); 843 844 /* Verify snapshot */ 845 CU_ASSERT(g_bserrno == 0); 846 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 847 848 /* Verify that blob has unset frozen_io */ 849 CU_ASSERT(blob->frozen_refcnt == 0); 850 851 /* Verify that postponed I/O completed successfully by comparing payload */ 852 spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL); 853 poll_threads(); 854 CU_ASSERT(g_bserrno == 0); 855 CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0); 856 857 spdk_bs_free_io_channel(channel); 858 poll_threads(); 859 860 ut_blob_close_and_delete(bs, blob); 861 } 862 863 static void 864 blob_clone(void) 865 { 866 struct spdk_blob_store *bs = g_bs; 867 struct spdk_blob_opts opts; 868 struct spdk_blob *blob, *snapshot, *clone; 869 spdk_blob_id blobid, cloneid, snapshotid; 870 struct spdk_blob_xattr_opts xattrs; 871 const void *value; 872 size_t value_len; 873 int rc; 874 875 /* Create blob with 10 clusters */ 876 877 ut_spdk_blob_opts_init(&opts); 878 opts.num_clusters = 10; 879 880 blob = ut_blob_create_and_open(bs, &opts); 881 blobid = spdk_blob_get_id(blob); 882 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 883 884 /* Create snapshot */ 885 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 886 poll_threads(); 887 CU_ASSERT(g_bserrno == 0); 888 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 889 snapshotid = g_blobid; 890 891 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 892 poll_threads(); 893 CU_ASSERT(g_bserrno == 0); 894 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 895 snapshot = g_blob; 896 CU_ASSERT(snapshot->data_ro == true); 897 CU_ASSERT(snapshot->md_ro == true); 898 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 899 900 spdk_blob_close(snapshot, blob_op_complete, NULL); 901 poll_threads(); 902 CU_ASSERT(g_bserrno == 0); 903 904 /* Create clone from snapshot with xattrs */ 905 xattrs.names = g_xattr_names; 906 xattrs.get_value = _get_xattr_value; 907 xattrs.count = 3; 908 xattrs.ctx = &g_ctx; 909 910 spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL); 911 poll_threads(); 912 CU_ASSERT(g_bserrno == 0); 913 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 914 cloneid = g_blobid; 915 916 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 917 poll_threads(); 918 CU_ASSERT(g_bserrno == 0); 919 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 920 clone = g_blob; 921 CU_ASSERT(clone->data_ro == false); 922 CU_ASSERT(clone->md_ro == false); 923 CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10); 924 925 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len); 926 CU_ASSERT(rc == 0); 927 SPDK_CU_ASSERT_FATAL(value != NULL); 928 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 929 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 930 931 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len); 932 CU_ASSERT(rc == 0); 933 SPDK_CU_ASSERT_FATAL(value != NULL); 934 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 935 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 936 937 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len); 938 CU_ASSERT(rc == 0); 939 SPDK_CU_ASSERT_FATAL(value != NULL); 940 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 941 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 942 943 944 spdk_blob_close(clone, blob_op_complete, NULL); 945 poll_threads(); 946 CU_ASSERT(g_bserrno == 0); 947 948 /* Try to create clone from not read only blob */ 949 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 950 poll_threads(); 951 CU_ASSERT(g_bserrno == -EINVAL); 952 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 953 954 /* Mark blob as read only */ 955 spdk_blob_set_read_only(blob); 956 spdk_blob_sync_md(blob, blob_op_complete, NULL); 957 poll_threads(); 958 CU_ASSERT(g_bserrno == 0); 959 960 /* Create clone from read only blob */ 961 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 962 poll_threads(); 963 CU_ASSERT(g_bserrno == 0); 964 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 965 cloneid = g_blobid; 966 967 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 968 poll_threads(); 969 CU_ASSERT(g_bserrno == 0); 970 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 971 clone = g_blob; 972 CU_ASSERT(clone->data_ro == false); 973 CU_ASSERT(clone->md_ro == false); 974 CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10); 975 976 ut_blob_close_and_delete(bs, clone); 977 ut_blob_close_and_delete(bs, blob); 978 } 979 980 static void 981 _blob_inflate(bool decouple_parent) 982 { 983 struct spdk_blob_store *bs = g_bs; 984 struct spdk_blob_opts opts; 985 struct spdk_blob *blob, *snapshot; 986 spdk_blob_id blobid, snapshotid; 987 struct spdk_io_channel *channel; 988 uint64_t free_clusters; 989 990 channel = spdk_bs_alloc_io_channel(bs); 991 SPDK_CU_ASSERT_FATAL(channel != NULL); 992 993 /* Create blob with 10 clusters */ 994 995 ut_spdk_blob_opts_init(&opts); 996 opts.num_clusters = 10; 997 opts.thin_provision = true; 998 999 blob = ut_blob_create_and_open(bs, &opts); 1000 blobid = spdk_blob_get_id(blob); 1001 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 1002 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true); 1003 1004 /* 1) Blob with no parent */ 1005 if (decouple_parent) { 1006 /* Decouple parent of blob with no parent (should fail) */ 1007 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 1008 poll_threads(); 1009 CU_ASSERT(g_bserrno != 0); 1010 } else { 1011 /* Inflate of thin blob with no parent should made it thick */ 1012 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 1013 poll_threads(); 1014 CU_ASSERT(g_bserrno == 0); 1015 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false); 1016 } 1017 1018 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 1019 poll_threads(); 1020 CU_ASSERT(g_bserrno == 0); 1021 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 1022 snapshotid = g_blobid; 1023 1024 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true); 1025 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 1026 1027 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 1028 poll_threads(); 1029 CU_ASSERT(g_bserrno == 0); 1030 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1031 snapshot = g_blob; 1032 CU_ASSERT(snapshot->data_ro == true); 1033 CU_ASSERT(snapshot->md_ro == true); 1034 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 1035 1036 spdk_blob_close(snapshot, blob_op_complete, NULL); 1037 poll_threads(); 1038 CU_ASSERT(g_bserrno == 0); 1039 1040 free_clusters = spdk_bs_free_cluster_count(bs); 1041 1042 /* 2) Blob with parent */ 1043 if (!decouple_parent) { 1044 /* Do full blob inflation */ 1045 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 1046 poll_threads(); 1047 CU_ASSERT(g_bserrno == 0); 1048 /* all 10 clusters should be allocated */ 1049 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10); 1050 } else { 1051 /* Decouple parent of blob */ 1052 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 1053 poll_threads(); 1054 CU_ASSERT(g_bserrno == 0); 1055 /* when only parent is removed, none of the clusters should be allocated */ 1056 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters); 1057 } 1058 1059 /* Now, it should be possible to delete snapshot */ 1060 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 1061 poll_threads(); 1062 CU_ASSERT(g_bserrno == 0); 1063 1064 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 1065 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent); 1066 1067 spdk_bs_free_io_channel(channel); 1068 poll_threads(); 1069 1070 ut_blob_close_and_delete(bs, blob); 1071 } 1072 1073 static void 1074 blob_inflate(void) 1075 { 1076 _blob_inflate(false); 1077 _blob_inflate(true); 1078 } 1079 1080 static void 1081 blob_delete(void) 1082 { 1083 struct spdk_blob_store *bs = g_bs; 1084 struct spdk_blob_opts blob_opts; 1085 spdk_blob_id blobid; 1086 1087 /* Create a blob and then delete it. */ 1088 ut_spdk_blob_opts_init(&blob_opts); 1089 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 1090 poll_threads(); 1091 CU_ASSERT(g_bserrno == 0); 1092 CU_ASSERT(g_blobid > 0); 1093 blobid = g_blobid; 1094 1095 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 1096 poll_threads(); 1097 CU_ASSERT(g_bserrno == 0); 1098 1099 /* Try to open the blob */ 1100 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1101 poll_threads(); 1102 CU_ASSERT(g_bserrno == -ENOENT); 1103 } 1104 1105 static void 1106 blob_resize_test(void) 1107 { 1108 struct spdk_blob_store *bs = g_bs; 1109 struct spdk_blob *blob; 1110 uint64_t free_clusters; 1111 1112 free_clusters = spdk_bs_free_cluster_count(bs); 1113 1114 blob = ut_blob_create_and_open(bs, NULL); 1115 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 1116 1117 /* Confirm that resize fails if blob is marked read-only. */ 1118 blob->md_ro = true; 1119 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1120 poll_threads(); 1121 CU_ASSERT(g_bserrno == -EPERM); 1122 blob->md_ro = false; 1123 1124 /* The blob started at 0 clusters. Resize it to be 5. */ 1125 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1126 poll_threads(); 1127 CU_ASSERT(g_bserrno == 0); 1128 CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs)); 1129 1130 /* Shrink the blob to 3 clusters. This will not actually release 1131 * the old clusters until the blob is synced. 1132 */ 1133 spdk_blob_resize(blob, 3, blob_op_complete, NULL); 1134 poll_threads(); 1135 CU_ASSERT(g_bserrno == 0); 1136 /* Verify there are still 5 clusters in use */ 1137 CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs)); 1138 1139 spdk_blob_sync_md(blob, blob_op_complete, NULL); 1140 poll_threads(); 1141 CU_ASSERT(g_bserrno == 0); 1142 /* Now there are only 3 clusters in use */ 1143 CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs)); 1144 1145 /* Resize the blob to be 10 clusters. Growth takes effect immediately. */ 1146 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 1147 poll_threads(); 1148 CU_ASSERT(g_bserrno == 0); 1149 CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs)); 1150 1151 /* Try to resize the blob to size larger than blobstore. */ 1152 spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL); 1153 poll_threads(); 1154 CU_ASSERT(g_bserrno == -ENOSPC); 1155 1156 ut_blob_close_and_delete(bs, blob); 1157 } 1158 1159 static void 1160 blob_read_only(void) 1161 { 1162 struct spdk_blob_store *bs; 1163 struct spdk_bs_dev *dev; 1164 struct spdk_blob *blob; 1165 struct spdk_bs_opts opts; 1166 spdk_blob_id blobid; 1167 int rc; 1168 1169 dev = init_dev(); 1170 spdk_bs_opts_init(&opts); 1171 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 1172 1173 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 1174 poll_threads(); 1175 CU_ASSERT(g_bserrno == 0); 1176 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 1177 bs = g_bs; 1178 1179 blob = ut_blob_create_and_open(bs, NULL); 1180 blobid = spdk_blob_get_id(blob); 1181 1182 rc = spdk_blob_set_read_only(blob); 1183 CU_ASSERT(rc == 0); 1184 1185 CU_ASSERT(blob->data_ro == false); 1186 CU_ASSERT(blob->md_ro == false); 1187 1188 spdk_blob_sync_md(blob, bs_op_complete, NULL); 1189 poll_threads(); 1190 1191 CU_ASSERT(blob->data_ro == true); 1192 CU_ASSERT(blob->md_ro == true); 1193 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1194 1195 spdk_blob_close(blob, blob_op_complete, NULL); 1196 poll_threads(); 1197 CU_ASSERT(g_bserrno == 0); 1198 1199 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1200 poll_threads(); 1201 CU_ASSERT(g_bserrno == 0); 1202 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1203 blob = g_blob; 1204 1205 CU_ASSERT(blob->data_ro == true); 1206 CU_ASSERT(blob->md_ro == true); 1207 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1208 1209 spdk_blob_close(blob, blob_op_complete, NULL); 1210 poll_threads(); 1211 CU_ASSERT(g_bserrno == 0); 1212 1213 ut_bs_reload(&bs, &opts); 1214 1215 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1216 poll_threads(); 1217 CU_ASSERT(g_bserrno == 0); 1218 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1219 blob = g_blob; 1220 1221 CU_ASSERT(blob->data_ro == true); 1222 CU_ASSERT(blob->md_ro == true); 1223 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1224 1225 ut_blob_close_and_delete(bs, blob); 1226 1227 spdk_bs_unload(bs, bs_op_complete, NULL); 1228 poll_threads(); 1229 CU_ASSERT(g_bserrno == 0); 1230 } 1231 1232 static void 1233 channel_ops(void) 1234 { 1235 struct spdk_blob_store *bs = g_bs; 1236 struct spdk_io_channel *channel; 1237 1238 channel = spdk_bs_alloc_io_channel(bs); 1239 CU_ASSERT(channel != NULL); 1240 1241 spdk_bs_free_io_channel(channel); 1242 poll_threads(); 1243 } 1244 1245 static void 1246 blob_write(void) 1247 { 1248 struct spdk_blob_store *bs = g_bs; 1249 struct spdk_blob *blob = g_blob; 1250 struct spdk_io_channel *channel; 1251 uint64_t pages_per_cluster; 1252 uint8_t payload[10 * 4096]; 1253 1254 pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs); 1255 1256 channel = spdk_bs_alloc_io_channel(bs); 1257 CU_ASSERT(channel != NULL); 1258 1259 /* Write to a blob with 0 size */ 1260 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1261 poll_threads(); 1262 CU_ASSERT(g_bserrno == -EINVAL); 1263 1264 /* Resize the blob */ 1265 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1266 poll_threads(); 1267 CU_ASSERT(g_bserrno == 0); 1268 1269 /* Confirm that write fails if blob is marked read-only. */ 1270 blob->data_ro = true; 1271 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1272 poll_threads(); 1273 CU_ASSERT(g_bserrno == -EPERM); 1274 blob->data_ro = false; 1275 1276 /* Write to the blob */ 1277 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1278 poll_threads(); 1279 CU_ASSERT(g_bserrno == 0); 1280 1281 /* Write starting beyond the end */ 1282 spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete, 1283 NULL); 1284 poll_threads(); 1285 CU_ASSERT(g_bserrno == -EINVAL); 1286 1287 /* Write starting at a valid location but going off the end */ 1288 spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1, 1289 blob_op_complete, NULL); 1290 poll_threads(); 1291 CU_ASSERT(g_bserrno == -EINVAL); 1292 1293 spdk_bs_free_io_channel(channel); 1294 poll_threads(); 1295 } 1296 1297 static void 1298 blob_read(void) 1299 { 1300 struct spdk_blob_store *bs = g_bs; 1301 struct spdk_blob *blob = g_blob; 1302 struct spdk_io_channel *channel; 1303 uint64_t pages_per_cluster; 1304 uint8_t payload[10 * 4096]; 1305 1306 pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs); 1307 1308 channel = spdk_bs_alloc_io_channel(bs); 1309 CU_ASSERT(channel != NULL); 1310 1311 /* Read from a blob with 0 size */ 1312 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1313 poll_threads(); 1314 CU_ASSERT(g_bserrno == -EINVAL); 1315 1316 /* Resize the blob */ 1317 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1318 poll_threads(); 1319 CU_ASSERT(g_bserrno == 0); 1320 1321 /* Confirm that read passes if blob is marked read-only. */ 1322 blob->data_ro = true; 1323 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1324 poll_threads(); 1325 CU_ASSERT(g_bserrno == 0); 1326 blob->data_ro = false; 1327 1328 /* Read from the blob */ 1329 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1330 poll_threads(); 1331 CU_ASSERT(g_bserrno == 0); 1332 1333 /* Read starting beyond the end */ 1334 spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete, 1335 NULL); 1336 poll_threads(); 1337 CU_ASSERT(g_bserrno == -EINVAL); 1338 1339 /* Read starting at a valid location but going off the end */ 1340 spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1, 1341 blob_op_complete, NULL); 1342 poll_threads(); 1343 CU_ASSERT(g_bserrno == -EINVAL); 1344 1345 spdk_bs_free_io_channel(channel); 1346 poll_threads(); 1347 } 1348 1349 static void 1350 blob_rw_verify(void) 1351 { 1352 struct spdk_blob_store *bs = g_bs; 1353 struct spdk_blob *blob = g_blob; 1354 struct spdk_io_channel *channel; 1355 uint8_t payload_read[10 * 4096]; 1356 uint8_t payload_write[10 * 4096]; 1357 1358 channel = spdk_bs_alloc_io_channel(bs); 1359 CU_ASSERT(channel != NULL); 1360 1361 spdk_blob_resize(blob, 32, blob_op_complete, NULL); 1362 poll_threads(); 1363 CU_ASSERT(g_bserrno == 0); 1364 1365 memset(payload_write, 0xE5, sizeof(payload_write)); 1366 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 1367 poll_threads(); 1368 CU_ASSERT(g_bserrno == 0); 1369 1370 memset(payload_read, 0x00, sizeof(payload_read)); 1371 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 1372 poll_threads(); 1373 CU_ASSERT(g_bserrno == 0); 1374 CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0); 1375 1376 spdk_bs_free_io_channel(channel); 1377 poll_threads(); 1378 } 1379 1380 static void 1381 blob_rw_verify_iov(void) 1382 { 1383 struct spdk_blob_store *bs = g_bs; 1384 struct spdk_blob *blob; 1385 struct spdk_io_channel *channel; 1386 uint8_t payload_read[10 * 4096]; 1387 uint8_t payload_write[10 * 4096]; 1388 struct iovec iov_read[3]; 1389 struct iovec iov_write[3]; 1390 void *buf; 1391 1392 channel = spdk_bs_alloc_io_channel(bs); 1393 CU_ASSERT(channel != NULL); 1394 1395 blob = ut_blob_create_and_open(bs, NULL); 1396 1397 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1398 poll_threads(); 1399 CU_ASSERT(g_bserrno == 0); 1400 1401 /* 1402 * Manually adjust the offset of the blob's second cluster. This allows 1403 * us to make sure that the readv/write code correctly accounts for I/O 1404 * that cross cluster boundaries. Start by asserting that the allocated 1405 * clusters are where we expect before modifying the second cluster. 1406 */ 1407 CU_ASSERT(blob->active.clusters[0] == 1 * 256); 1408 CU_ASSERT(blob->active.clusters[1] == 2 * 256); 1409 blob->active.clusters[1] = 3 * 256; 1410 1411 memset(payload_write, 0xE5, sizeof(payload_write)); 1412 iov_write[0].iov_base = payload_write; 1413 iov_write[0].iov_len = 1 * 4096; 1414 iov_write[1].iov_base = payload_write + 1 * 4096; 1415 iov_write[1].iov_len = 5 * 4096; 1416 iov_write[2].iov_base = payload_write + 6 * 4096; 1417 iov_write[2].iov_len = 4 * 4096; 1418 /* 1419 * Choose a page offset just before the cluster boundary. The first 6 pages of payload 1420 * will get written to the first cluster, the last 4 to the second cluster. 1421 */ 1422 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 1423 poll_threads(); 1424 CU_ASSERT(g_bserrno == 0); 1425 1426 memset(payload_read, 0xAA, sizeof(payload_read)); 1427 iov_read[0].iov_base = payload_read; 1428 iov_read[0].iov_len = 3 * 4096; 1429 iov_read[1].iov_base = payload_read + 3 * 4096; 1430 iov_read[1].iov_len = 4 * 4096; 1431 iov_read[2].iov_base = payload_read + 7 * 4096; 1432 iov_read[2].iov_len = 3 * 4096; 1433 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 1434 poll_threads(); 1435 CU_ASSERT(g_bserrno == 0); 1436 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 1437 1438 buf = calloc(1, 256 * 4096); 1439 SPDK_CU_ASSERT_FATAL(buf != NULL); 1440 /* Check that cluster 2 on "disk" was not modified. */ 1441 CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0); 1442 free(buf); 1443 1444 spdk_blob_close(blob, blob_op_complete, NULL); 1445 poll_threads(); 1446 CU_ASSERT(g_bserrno == 0); 1447 1448 spdk_bs_free_io_channel(channel); 1449 poll_threads(); 1450 } 1451 1452 static uint32_t 1453 bs_channel_get_req_count(struct spdk_io_channel *_channel) 1454 { 1455 struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel); 1456 struct spdk_bs_request_set *set; 1457 uint32_t count = 0; 1458 1459 TAILQ_FOREACH(set, &channel->reqs, link) { 1460 count++; 1461 } 1462 1463 return count; 1464 } 1465 1466 static void 1467 blob_rw_verify_iov_nomem(void) 1468 { 1469 struct spdk_blob_store *bs = g_bs; 1470 struct spdk_blob *blob = g_blob; 1471 struct spdk_io_channel *channel; 1472 uint8_t payload_write[10 * 4096]; 1473 struct iovec iov_write[3]; 1474 uint32_t req_count; 1475 1476 channel = spdk_bs_alloc_io_channel(bs); 1477 CU_ASSERT(channel != NULL); 1478 1479 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1480 poll_threads(); 1481 CU_ASSERT(g_bserrno == 0); 1482 1483 /* 1484 * Choose a page offset just before the cluster boundary. The first 6 pages of payload 1485 * will get written to the first cluster, the last 4 to the second cluster. 1486 */ 1487 iov_write[0].iov_base = payload_write; 1488 iov_write[0].iov_len = 1 * 4096; 1489 iov_write[1].iov_base = payload_write + 1 * 4096; 1490 iov_write[1].iov_len = 5 * 4096; 1491 iov_write[2].iov_base = payload_write + 6 * 4096; 1492 iov_write[2].iov_len = 4 * 4096; 1493 MOCK_SET(calloc, NULL); 1494 req_count = bs_channel_get_req_count(channel); 1495 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 1496 poll_threads(); 1497 CU_ASSERT(g_bserrno = -ENOMEM); 1498 CU_ASSERT(req_count == bs_channel_get_req_count(channel)); 1499 MOCK_CLEAR(calloc); 1500 1501 spdk_bs_free_io_channel(channel); 1502 poll_threads(); 1503 } 1504 1505 static void 1506 blob_rw_iov_read_only(void) 1507 { 1508 struct spdk_blob_store *bs = g_bs; 1509 struct spdk_blob *blob = g_blob; 1510 struct spdk_io_channel *channel; 1511 uint8_t payload_read[4096]; 1512 uint8_t payload_write[4096]; 1513 struct iovec iov_read; 1514 struct iovec iov_write; 1515 1516 channel = spdk_bs_alloc_io_channel(bs); 1517 CU_ASSERT(channel != NULL); 1518 1519 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1520 poll_threads(); 1521 CU_ASSERT(g_bserrno == 0); 1522 1523 /* Verify that writev failed if read_only flag is set. */ 1524 blob->data_ro = true; 1525 iov_write.iov_base = payload_write; 1526 iov_write.iov_len = sizeof(payload_write); 1527 spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL); 1528 poll_threads(); 1529 CU_ASSERT(g_bserrno == -EPERM); 1530 1531 /* Verify that reads pass if data_ro flag is set. */ 1532 iov_read.iov_base = payload_read; 1533 iov_read.iov_len = sizeof(payload_read); 1534 spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL); 1535 poll_threads(); 1536 CU_ASSERT(g_bserrno == 0); 1537 1538 spdk_bs_free_io_channel(channel); 1539 poll_threads(); 1540 } 1541 1542 static void 1543 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel, 1544 uint8_t *payload, uint64_t offset, uint64_t length, 1545 spdk_blob_op_complete cb_fn, void *cb_arg) 1546 { 1547 uint64_t i; 1548 uint8_t *buf; 1549 uint64_t page_size = spdk_bs_get_page_size(blob->bs); 1550 1551 /* To be sure that operation is NOT splitted, read one page at the time */ 1552 buf = payload; 1553 for (i = 0; i < length; i++) { 1554 spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL); 1555 poll_threads(); 1556 if (g_bserrno != 0) { 1557 /* Pass the error code up */ 1558 break; 1559 } 1560 buf += page_size; 1561 } 1562 1563 cb_fn(cb_arg, g_bserrno); 1564 } 1565 1566 static void 1567 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel, 1568 uint8_t *payload, uint64_t offset, uint64_t length, 1569 spdk_blob_op_complete cb_fn, void *cb_arg) 1570 { 1571 uint64_t i; 1572 uint8_t *buf; 1573 uint64_t page_size = spdk_bs_get_page_size(blob->bs); 1574 1575 /* To be sure that operation is NOT splitted, write one page at the time */ 1576 buf = payload; 1577 for (i = 0; i < length; i++) { 1578 spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL); 1579 poll_threads(); 1580 if (g_bserrno != 0) { 1581 /* Pass the error code up */ 1582 break; 1583 } 1584 buf += page_size; 1585 } 1586 1587 cb_fn(cb_arg, g_bserrno); 1588 } 1589 1590 static void 1591 blob_operation_split_rw(void) 1592 { 1593 struct spdk_blob_store *bs = g_bs; 1594 struct spdk_blob *blob; 1595 struct spdk_io_channel *channel; 1596 struct spdk_blob_opts opts; 1597 uint64_t cluster_size; 1598 1599 uint64_t payload_size; 1600 uint8_t *payload_read; 1601 uint8_t *payload_write; 1602 uint8_t *payload_pattern; 1603 1604 uint64_t page_size; 1605 uint64_t pages_per_cluster; 1606 uint64_t pages_per_payload; 1607 1608 uint64_t i; 1609 1610 cluster_size = spdk_bs_get_cluster_size(bs); 1611 page_size = spdk_bs_get_page_size(bs); 1612 pages_per_cluster = cluster_size / page_size; 1613 pages_per_payload = pages_per_cluster * 5; 1614 payload_size = cluster_size * 5; 1615 1616 payload_read = malloc(payload_size); 1617 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 1618 1619 payload_write = malloc(payload_size); 1620 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 1621 1622 payload_pattern = malloc(payload_size); 1623 SPDK_CU_ASSERT_FATAL(payload_pattern != NULL); 1624 1625 /* Prepare random pattern to write */ 1626 memset(payload_pattern, 0xFF, payload_size); 1627 for (i = 0; i < pages_per_payload; i++) { 1628 *((uint64_t *)(payload_pattern + page_size * i)) = (i + 1); 1629 } 1630 1631 channel = spdk_bs_alloc_io_channel(bs); 1632 SPDK_CU_ASSERT_FATAL(channel != NULL); 1633 1634 /* Create blob */ 1635 ut_spdk_blob_opts_init(&opts); 1636 opts.thin_provision = false; 1637 opts.num_clusters = 5; 1638 1639 blob = ut_blob_create_and_open(bs, &opts); 1640 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 1641 1642 /* Initial read should return zeroed payload */ 1643 memset(payload_read, 0xFF, payload_size); 1644 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1645 poll_threads(); 1646 CU_ASSERT(g_bserrno == 0); 1647 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 1648 1649 /* Fill whole blob except last page */ 1650 spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1, 1651 blob_op_complete, NULL); 1652 poll_threads(); 1653 CU_ASSERT(g_bserrno == 0); 1654 1655 /* Write last page with a pattern */ 1656 spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1, 1657 blob_op_complete, NULL); 1658 poll_threads(); 1659 CU_ASSERT(g_bserrno == 0); 1660 1661 /* Read whole blob and check consistency */ 1662 memset(payload_read, 0xFF, payload_size); 1663 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1664 poll_threads(); 1665 CU_ASSERT(g_bserrno == 0); 1666 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0); 1667 CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0); 1668 1669 /* Fill whole blob except first page */ 1670 spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1, 1671 blob_op_complete, NULL); 1672 poll_threads(); 1673 CU_ASSERT(g_bserrno == 0); 1674 1675 /* Write first page with a pattern */ 1676 spdk_blob_io_write(blob, channel, payload_pattern, 0, 1, 1677 blob_op_complete, NULL); 1678 poll_threads(); 1679 CU_ASSERT(g_bserrno == 0); 1680 1681 /* Read whole blob and check consistency */ 1682 memset(payload_read, 0xFF, payload_size); 1683 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1684 poll_threads(); 1685 CU_ASSERT(g_bserrno == 0); 1686 CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0); 1687 CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0); 1688 1689 1690 /* Fill whole blob with a pattern (5 clusters) */ 1691 1692 /* 1. Read test. */ 1693 _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload, 1694 blob_op_complete, NULL); 1695 poll_threads(); 1696 CU_ASSERT(g_bserrno == 0); 1697 1698 memset(payload_read, 0xFF, payload_size); 1699 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1700 poll_threads(); 1701 poll_threads(); 1702 CU_ASSERT(g_bserrno == 0); 1703 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1704 1705 /* 2. Write test. */ 1706 spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload, 1707 blob_op_complete, NULL); 1708 poll_threads(); 1709 CU_ASSERT(g_bserrno == 0); 1710 1711 memset(payload_read, 0xFF, payload_size); 1712 _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1713 poll_threads(); 1714 CU_ASSERT(g_bserrno == 0); 1715 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1716 1717 spdk_bs_free_io_channel(channel); 1718 poll_threads(); 1719 1720 g_blob = NULL; 1721 g_blobid = 0; 1722 1723 free(payload_read); 1724 free(payload_write); 1725 free(payload_pattern); 1726 1727 ut_blob_close_and_delete(bs, blob); 1728 } 1729 1730 static void 1731 blob_operation_split_rw_iov(void) 1732 { 1733 struct spdk_blob_store *bs = g_bs; 1734 struct spdk_blob *blob; 1735 struct spdk_io_channel *channel; 1736 struct spdk_blob_opts opts; 1737 uint64_t cluster_size; 1738 1739 uint64_t payload_size; 1740 uint8_t *payload_read; 1741 uint8_t *payload_write; 1742 uint8_t *payload_pattern; 1743 1744 uint64_t page_size; 1745 uint64_t pages_per_cluster; 1746 uint64_t pages_per_payload; 1747 1748 struct iovec iov_read[2]; 1749 struct iovec iov_write[2]; 1750 1751 uint64_t i, j; 1752 1753 cluster_size = spdk_bs_get_cluster_size(bs); 1754 page_size = spdk_bs_get_page_size(bs); 1755 pages_per_cluster = cluster_size / page_size; 1756 pages_per_payload = pages_per_cluster * 5; 1757 payload_size = cluster_size * 5; 1758 1759 payload_read = malloc(payload_size); 1760 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 1761 1762 payload_write = malloc(payload_size); 1763 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 1764 1765 payload_pattern = malloc(payload_size); 1766 SPDK_CU_ASSERT_FATAL(payload_pattern != NULL); 1767 1768 /* Prepare random pattern to write */ 1769 for (i = 0; i < pages_per_payload; i++) { 1770 for (j = 0; j < page_size / sizeof(uint64_t); j++) { 1771 uint64_t *tmp; 1772 1773 tmp = (uint64_t *)payload_pattern; 1774 tmp += ((page_size * i) / sizeof(uint64_t)) + j; 1775 *tmp = i + 1; 1776 } 1777 } 1778 1779 channel = spdk_bs_alloc_io_channel(bs); 1780 SPDK_CU_ASSERT_FATAL(channel != NULL); 1781 1782 /* Create blob */ 1783 ut_spdk_blob_opts_init(&opts); 1784 opts.thin_provision = false; 1785 opts.num_clusters = 5; 1786 1787 blob = ut_blob_create_and_open(bs, &opts); 1788 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 1789 1790 /* Initial read should return zeroes payload */ 1791 memset(payload_read, 0xFF, payload_size); 1792 iov_read[0].iov_base = payload_read; 1793 iov_read[0].iov_len = cluster_size * 3; 1794 iov_read[1].iov_base = payload_read + cluster_size * 3; 1795 iov_read[1].iov_len = cluster_size * 2; 1796 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1797 poll_threads(); 1798 CU_ASSERT(g_bserrno == 0); 1799 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 1800 1801 /* First of iovs fills whole blob except last page and second of iovs writes last page 1802 * with a pattern. */ 1803 iov_write[0].iov_base = payload_pattern; 1804 iov_write[0].iov_len = payload_size - page_size; 1805 iov_write[1].iov_base = payload_pattern; 1806 iov_write[1].iov_len = page_size; 1807 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1808 poll_threads(); 1809 CU_ASSERT(g_bserrno == 0); 1810 1811 /* Read whole blob and check consistency */ 1812 memset(payload_read, 0xFF, payload_size); 1813 iov_read[0].iov_base = payload_read; 1814 iov_read[0].iov_len = cluster_size * 2; 1815 iov_read[1].iov_base = payload_read + cluster_size * 2; 1816 iov_read[1].iov_len = cluster_size * 3; 1817 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1818 poll_threads(); 1819 CU_ASSERT(g_bserrno == 0); 1820 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0); 1821 CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0); 1822 1823 /* First of iovs fills only first page and second of iovs writes whole blob except 1824 * first page with a pattern. */ 1825 iov_write[0].iov_base = payload_pattern; 1826 iov_write[0].iov_len = page_size; 1827 iov_write[1].iov_base = payload_pattern; 1828 iov_write[1].iov_len = payload_size - page_size; 1829 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1830 poll_threads(); 1831 CU_ASSERT(g_bserrno == 0); 1832 1833 /* Read whole blob and check consistency */ 1834 memset(payload_read, 0xFF, payload_size); 1835 iov_read[0].iov_base = payload_read; 1836 iov_read[0].iov_len = cluster_size * 4; 1837 iov_read[1].iov_base = payload_read + cluster_size * 4; 1838 iov_read[1].iov_len = cluster_size; 1839 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1840 poll_threads(); 1841 CU_ASSERT(g_bserrno == 0); 1842 CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0); 1843 CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0); 1844 1845 1846 /* Fill whole blob with a pattern (5 clusters) */ 1847 1848 /* 1. Read test. */ 1849 _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload, 1850 blob_op_complete, NULL); 1851 poll_threads(); 1852 CU_ASSERT(g_bserrno == 0); 1853 1854 memset(payload_read, 0xFF, payload_size); 1855 iov_read[0].iov_base = payload_read; 1856 iov_read[0].iov_len = cluster_size; 1857 iov_read[1].iov_base = payload_read + cluster_size; 1858 iov_read[1].iov_len = cluster_size * 4; 1859 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1860 poll_threads(); 1861 CU_ASSERT(g_bserrno == 0); 1862 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1863 1864 /* 2. Write test. */ 1865 iov_write[0].iov_base = payload_read; 1866 iov_write[0].iov_len = cluster_size * 2; 1867 iov_write[1].iov_base = payload_read + cluster_size * 2; 1868 iov_write[1].iov_len = cluster_size * 3; 1869 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1870 poll_threads(); 1871 CU_ASSERT(g_bserrno == 0); 1872 1873 memset(payload_read, 0xFF, payload_size); 1874 _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1875 poll_threads(); 1876 CU_ASSERT(g_bserrno == 0); 1877 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1878 1879 spdk_bs_free_io_channel(channel); 1880 poll_threads(); 1881 1882 g_blob = NULL; 1883 g_blobid = 0; 1884 1885 free(payload_read); 1886 free(payload_write); 1887 free(payload_pattern); 1888 1889 ut_blob_close_and_delete(bs, blob); 1890 } 1891 1892 static void 1893 blob_unmap(void) 1894 { 1895 struct spdk_blob_store *bs = g_bs; 1896 struct spdk_blob *blob; 1897 struct spdk_io_channel *channel; 1898 struct spdk_blob_opts opts; 1899 uint8_t payload[4096]; 1900 int i; 1901 1902 channel = spdk_bs_alloc_io_channel(bs); 1903 CU_ASSERT(channel != NULL); 1904 1905 ut_spdk_blob_opts_init(&opts); 1906 opts.num_clusters = 10; 1907 1908 blob = ut_blob_create_and_open(bs, &opts); 1909 1910 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 1911 poll_threads(); 1912 CU_ASSERT(g_bserrno == 0); 1913 1914 memset(payload, 0, sizeof(payload)); 1915 payload[0] = 0xFF; 1916 1917 /* 1918 * Set first byte of every cluster to 0xFF. 1919 * First cluster on device is reserved so let's start from cluster number 1 1920 */ 1921 for (i = 1; i < 11; i++) { 1922 g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF; 1923 } 1924 1925 /* Confirm writes */ 1926 for (i = 0; i < 10; i++) { 1927 payload[0] = 0; 1928 spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1, 1929 blob_op_complete, NULL); 1930 poll_threads(); 1931 CU_ASSERT(g_bserrno == 0); 1932 CU_ASSERT(payload[0] == 0xFF); 1933 } 1934 1935 /* Mark some clusters as unallocated */ 1936 blob->active.clusters[1] = 0; 1937 blob->active.clusters[2] = 0; 1938 blob->active.clusters[3] = 0; 1939 blob->active.clusters[6] = 0; 1940 blob->active.clusters[8] = 0; 1941 1942 /* Unmap clusters by resizing to 0 */ 1943 spdk_blob_resize(blob, 0, blob_op_complete, NULL); 1944 poll_threads(); 1945 CU_ASSERT(g_bserrno == 0); 1946 1947 spdk_blob_sync_md(blob, blob_op_complete, NULL); 1948 poll_threads(); 1949 CU_ASSERT(g_bserrno == 0); 1950 1951 /* Confirm that only 'allocated' clusters were unmapped */ 1952 for (i = 1; i < 11; i++) { 1953 switch (i) { 1954 case 2: 1955 case 3: 1956 case 4: 1957 case 7: 1958 case 9: 1959 CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF); 1960 break; 1961 default: 1962 CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0); 1963 break; 1964 } 1965 } 1966 1967 spdk_bs_free_io_channel(channel); 1968 poll_threads(); 1969 1970 ut_blob_close_and_delete(bs, blob); 1971 } 1972 1973 static void 1974 blob_iter(void) 1975 { 1976 struct spdk_blob_store *bs = g_bs; 1977 struct spdk_blob *blob; 1978 spdk_blob_id blobid; 1979 struct spdk_blob_opts blob_opts; 1980 1981 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 1982 poll_threads(); 1983 CU_ASSERT(g_blob == NULL); 1984 CU_ASSERT(g_bserrno == -ENOENT); 1985 1986 ut_spdk_blob_opts_init(&blob_opts); 1987 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 1988 poll_threads(); 1989 CU_ASSERT(g_bserrno == 0); 1990 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 1991 blobid = g_blobid; 1992 1993 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 1994 poll_threads(); 1995 CU_ASSERT(g_blob != NULL); 1996 CU_ASSERT(g_bserrno == 0); 1997 blob = g_blob; 1998 CU_ASSERT(spdk_blob_get_id(blob) == blobid); 1999 2000 spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL); 2001 poll_threads(); 2002 CU_ASSERT(g_blob == NULL); 2003 CU_ASSERT(g_bserrno == -ENOENT); 2004 } 2005 2006 static void 2007 blob_xattr(void) 2008 { 2009 struct spdk_blob_store *bs = g_bs; 2010 struct spdk_blob *blob = g_blob; 2011 spdk_blob_id blobid = spdk_blob_get_id(blob); 2012 uint64_t length; 2013 int rc; 2014 const char *name1, *name2; 2015 const void *value; 2016 size_t value_len; 2017 struct spdk_xattr_names *names; 2018 2019 /* Test that set_xattr fails if md_ro flag is set. */ 2020 blob->md_ro = true; 2021 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 2022 CU_ASSERT(rc == -EPERM); 2023 2024 blob->md_ro = false; 2025 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 2026 CU_ASSERT(rc == 0); 2027 2028 length = 2345; 2029 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2030 CU_ASSERT(rc == 0); 2031 2032 /* Overwrite "length" xattr. */ 2033 length = 3456; 2034 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2035 CU_ASSERT(rc == 0); 2036 2037 /* get_xattr should still work even if md_ro flag is set. */ 2038 value = NULL; 2039 blob->md_ro = true; 2040 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 2041 CU_ASSERT(rc == 0); 2042 SPDK_CU_ASSERT_FATAL(value != NULL); 2043 CU_ASSERT(*(uint64_t *)value == length); 2044 CU_ASSERT(value_len == 8); 2045 blob->md_ro = false; 2046 2047 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 2048 CU_ASSERT(rc == -ENOENT); 2049 2050 names = NULL; 2051 rc = spdk_blob_get_xattr_names(blob, &names); 2052 CU_ASSERT(rc == 0); 2053 SPDK_CU_ASSERT_FATAL(names != NULL); 2054 CU_ASSERT(spdk_xattr_names_get_count(names) == 2); 2055 name1 = spdk_xattr_names_get_name(names, 0); 2056 SPDK_CU_ASSERT_FATAL(name1 != NULL); 2057 CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length")); 2058 name2 = spdk_xattr_names_get_name(names, 1); 2059 SPDK_CU_ASSERT_FATAL(name2 != NULL); 2060 CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length")); 2061 CU_ASSERT(strcmp(name1, name2)); 2062 spdk_xattr_names_free(names); 2063 2064 /* Confirm that remove_xattr fails if md_ro is set to true. */ 2065 blob->md_ro = true; 2066 rc = spdk_blob_remove_xattr(blob, "name"); 2067 CU_ASSERT(rc == -EPERM); 2068 2069 blob->md_ro = false; 2070 rc = spdk_blob_remove_xattr(blob, "name"); 2071 CU_ASSERT(rc == 0); 2072 2073 rc = spdk_blob_remove_xattr(blob, "foobar"); 2074 CU_ASSERT(rc == -ENOENT); 2075 2076 /* Set internal xattr */ 2077 length = 7898; 2078 rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true); 2079 CU_ASSERT(rc == 0); 2080 rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true); 2081 CU_ASSERT(rc == 0); 2082 CU_ASSERT(*(uint64_t *)value == length); 2083 /* try to get public xattr with same name */ 2084 rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len); 2085 CU_ASSERT(rc != 0); 2086 rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false); 2087 CU_ASSERT(rc != 0); 2088 /* Check if SPDK_BLOB_INTERNAL_XATTR is set */ 2089 CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 2090 SPDK_BLOB_INTERNAL_XATTR); 2091 2092 spdk_blob_close(blob, blob_op_complete, NULL); 2093 poll_threads(); 2094 2095 /* Check if xattrs are persisted */ 2096 ut_bs_reload(&bs, NULL); 2097 2098 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2099 poll_threads(); 2100 CU_ASSERT(g_bserrno == 0); 2101 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2102 blob = g_blob; 2103 2104 rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true); 2105 CU_ASSERT(rc == 0); 2106 CU_ASSERT(*(uint64_t *)value == length); 2107 2108 /* try to get internal xattr trough public call */ 2109 rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len); 2110 CU_ASSERT(rc != 0); 2111 2112 rc = blob_remove_xattr(blob, "internal", true); 2113 CU_ASSERT(rc == 0); 2114 2115 CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0); 2116 } 2117 2118 static void 2119 bs_load(void) 2120 { 2121 struct spdk_blob_store *bs; 2122 struct spdk_bs_dev *dev; 2123 spdk_blob_id blobid; 2124 struct spdk_blob *blob; 2125 struct spdk_bs_super_block *super_block; 2126 uint64_t length; 2127 int rc; 2128 const void *value; 2129 size_t value_len; 2130 struct spdk_bs_opts opts; 2131 struct spdk_blob_opts blob_opts; 2132 2133 dev = init_dev(); 2134 spdk_bs_opts_init(&opts); 2135 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2136 2137 /* Initialize a new blob store */ 2138 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2139 poll_threads(); 2140 CU_ASSERT(g_bserrno == 0); 2141 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2142 bs = g_bs; 2143 2144 /* Try to open a blobid that does not exist */ 2145 spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL); 2146 poll_threads(); 2147 CU_ASSERT(g_bserrno == -ENOENT); 2148 CU_ASSERT(g_blob == NULL); 2149 2150 /* Create a blob */ 2151 blob = ut_blob_create_and_open(bs, NULL); 2152 blobid = spdk_blob_get_id(blob); 2153 2154 /* Try again to open valid blob but without the upper bit set */ 2155 spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL); 2156 poll_threads(); 2157 CU_ASSERT(g_bserrno == -ENOENT); 2158 CU_ASSERT(g_blob == NULL); 2159 2160 /* Set some xattrs */ 2161 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 2162 CU_ASSERT(rc == 0); 2163 2164 length = 2345; 2165 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2166 CU_ASSERT(rc == 0); 2167 2168 /* Resize the blob */ 2169 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 2170 poll_threads(); 2171 CU_ASSERT(g_bserrno == 0); 2172 2173 spdk_blob_close(blob, blob_op_complete, NULL); 2174 poll_threads(); 2175 CU_ASSERT(g_bserrno == 0); 2176 blob = NULL; 2177 g_blob = NULL; 2178 g_blobid = SPDK_BLOBID_INVALID; 2179 2180 /* Unload the blob store */ 2181 spdk_bs_unload(bs, bs_op_complete, NULL); 2182 poll_threads(); 2183 CU_ASSERT(g_bserrno == 0); 2184 g_bs = NULL; 2185 g_blob = NULL; 2186 g_blobid = 0; 2187 2188 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2189 CU_ASSERT(super_block->clean == 1); 2190 2191 /* Load should fail for device with an unsupported blocklen */ 2192 dev = init_dev(); 2193 dev->blocklen = SPDK_BS_PAGE_SIZE * 2; 2194 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 2195 poll_threads(); 2196 CU_ASSERT(g_bserrno == -EINVAL); 2197 2198 /* Load should when max_md_ops is set to zero */ 2199 dev = init_dev(); 2200 spdk_bs_opts_init(&opts); 2201 opts.max_md_ops = 0; 2202 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2203 poll_threads(); 2204 CU_ASSERT(g_bserrno == -EINVAL); 2205 2206 /* Load should when max_channel_ops is set to zero */ 2207 dev = init_dev(); 2208 spdk_bs_opts_init(&opts); 2209 opts.max_channel_ops = 0; 2210 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2211 poll_threads(); 2212 CU_ASSERT(g_bserrno == -EINVAL); 2213 2214 /* Load an existing blob store */ 2215 dev = init_dev(); 2216 spdk_bs_opts_init(&opts); 2217 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2218 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2219 poll_threads(); 2220 CU_ASSERT(g_bserrno == 0); 2221 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2222 bs = g_bs; 2223 2224 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2225 CU_ASSERT(super_block->clean == 1); 2226 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2227 2228 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2229 poll_threads(); 2230 CU_ASSERT(g_bserrno == 0); 2231 CU_ASSERT(g_blob != NULL); 2232 blob = g_blob; 2233 2234 /* Verify that blobstore is marked dirty after first metadata sync */ 2235 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2236 CU_ASSERT(super_block->clean == 1); 2237 2238 /* Get the xattrs */ 2239 value = NULL; 2240 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 2241 CU_ASSERT(rc == 0); 2242 SPDK_CU_ASSERT_FATAL(value != NULL); 2243 CU_ASSERT(*(uint64_t *)value == length); 2244 CU_ASSERT(value_len == 8); 2245 2246 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 2247 CU_ASSERT(rc == -ENOENT); 2248 2249 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 2250 2251 spdk_blob_close(blob, blob_op_complete, NULL); 2252 poll_threads(); 2253 CU_ASSERT(g_bserrno == 0); 2254 blob = NULL; 2255 g_blob = NULL; 2256 2257 spdk_bs_unload(bs, bs_op_complete, NULL); 2258 poll_threads(); 2259 CU_ASSERT(g_bserrno == 0); 2260 g_bs = NULL; 2261 2262 /* Load should fail: bdev size < saved size */ 2263 dev = init_dev(); 2264 dev->blockcnt /= 2; 2265 2266 spdk_bs_opts_init(&opts); 2267 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2268 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2269 poll_threads(); 2270 2271 CU_ASSERT(g_bserrno == -EILSEQ); 2272 2273 /* Load should succeed: bdev size > saved size */ 2274 dev = init_dev(); 2275 dev->blockcnt *= 4; 2276 2277 spdk_bs_opts_init(&opts); 2278 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2279 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2280 poll_threads(); 2281 CU_ASSERT(g_bserrno == 0); 2282 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2283 bs = g_bs; 2284 2285 CU_ASSERT(g_bserrno == 0); 2286 spdk_bs_unload(bs, bs_op_complete, NULL); 2287 poll_threads(); 2288 2289 2290 /* Test compatibility mode */ 2291 2292 dev = init_dev(); 2293 super_block->size = 0; 2294 super_block->crc = blob_md_page_calc_crc(super_block); 2295 2296 spdk_bs_opts_init(&opts); 2297 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2298 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2299 poll_threads(); 2300 CU_ASSERT(g_bserrno == 0); 2301 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2302 bs = g_bs; 2303 2304 /* Create a blob */ 2305 ut_spdk_blob_opts_init(&blob_opts); 2306 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 2307 poll_threads(); 2308 CU_ASSERT(g_bserrno == 0); 2309 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2310 2311 /* Blobstore should update number of blocks in super_block */ 2312 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2313 CU_ASSERT(super_block->clean == 0); 2314 2315 spdk_bs_unload(bs, bs_op_complete, NULL); 2316 poll_threads(); 2317 CU_ASSERT(g_bserrno == 0); 2318 CU_ASSERT(super_block->clean == 1); 2319 g_bs = NULL; 2320 2321 } 2322 2323 static void 2324 bs_load_pending_removal(void) 2325 { 2326 struct spdk_blob_store *bs = g_bs; 2327 struct spdk_blob_opts opts; 2328 struct spdk_blob *blob, *snapshot; 2329 spdk_blob_id blobid, snapshotid; 2330 const void *value; 2331 size_t value_len; 2332 int rc; 2333 2334 /* Create blob */ 2335 ut_spdk_blob_opts_init(&opts); 2336 opts.num_clusters = 10; 2337 2338 blob = ut_blob_create_and_open(bs, &opts); 2339 blobid = spdk_blob_get_id(blob); 2340 2341 /* Create snapshot */ 2342 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 2343 poll_threads(); 2344 CU_ASSERT(g_bserrno == 0); 2345 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2346 snapshotid = g_blobid; 2347 2348 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2349 poll_threads(); 2350 CU_ASSERT(g_bserrno == 0); 2351 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2352 snapshot = g_blob; 2353 2354 /* Set SNAPSHOT_PENDING_REMOVAL xattr */ 2355 snapshot->md_ro = false; 2356 rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true); 2357 CU_ASSERT(rc == 0); 2358 snapshot->md_ro = true; 2359 2360 spdk_blob_close(snapshot, blob_op_complete, NULL); 2361 poll_threads(); 2362 CU_ASSERT(g_bserrno == 0); 2363 2364 spdk_blob_close(blob, blob_op_complete, NULL); 2365 poll_threads(); 2366 CU_ASSERT(g_bserrno == 0); 2367 2368 /* Reload blobstore */ 2369 ut_bs_reload(&bs, NULL); 2370 2371 /* Snapshot should not be removed as blob is still pointing to it */ 2372 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2373 poll_threads(); 2374 CU_ASSERT(g_bserrno == 0); 2375 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2376 snapshot = g_blob; 2377 2378 /* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */ 2379 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len); 2380 CU_ASSERT(rc != 0); 2381 2382 /* Set SNAPSHOT_PENDING_REMOVAL xattr again */ 2383 snapshot->md_ro = false; 2384 rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true); 2385 CU_ASSERT(rc == 0); 2386 snapshot->md_ro = true; 2387 2388 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2389 poll_threads(); 2390 CU_ASSERT(g_bserrno == 0); 2391 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2392 blob = g_blob; 2393 2394 /* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */ 2395 blob_remove_xattr(blob, BLOB_SNAPSHOT, true); 2396 2397 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2398 poll_threads(); 2399 CU_ASSERT(g_bserrno == 0); 2400 2401 spdk_blob_close(snapshot, blob_op_complete, NULL); 2402 poll_threads(); 2403 CU_ASSERT(g_bserrno == 0); 2404 2405 spdk_blob_close(blob, blob_op_complete, NULL); 2406 poll_threads(); 2407 CU_ASSERT(g_bserrno == 0); 2408 2409 /* Reload blobstore */ 2410 ut_bs_reload(&bs, NULL); 2411 2412 /* Snapshot should be removed as blob is not pointing to it anymore */ 2413 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2414 poll_threads(); 2415 CU_ASSERT(g_bserrno != 0); 2416 } 2417 2418 static void 2419 bs_load_custom_cluster_size(void) 2420 { 2421 struct spdk_blob_store *bs; 2422 struct spdk_bs_dev *dev; 2423 struct spdk_bs_super_block *super_block; 2424 struct spdk_bs_opts opts; 2425 uint32_t custom_cluster_size = 4194304; /* 4MiB */ 2426 uint32_t cluster_sz; 2427 uint64_t total_clusters; 2428 2429 dev = init_dev(); 2430 spdk_bs_opts_init(&opts); 2431 opts.cluster_sz = custom_cluster_size; 2432 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2433 2434 /* Initialize a new blob store */ 2435 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2436 poll_threads(); 2437 CU_ASSERT(g_bserrno == 0); 2438 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2439 bs = g_bs; 2440 cluster_sz = bs->cluster_sz; 2441 total_clusters = bs->total_clusters; 2442 2443 /* Unload the blob store */ 2444 spdk_bs_unload(bs, bs_op_complete, NULL); 2445 poll_threads(); 2446 CU_ASSERT(g_bserrno == 0); 2447 g_bs = NULL; 2448 g_blob = NULL; 2449 g_blobid = 0; 2450 2451 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2452 CU_ASSERT(super_block->clean == 1); 2453 2454 /* Load an existing blob store */ 2455 dev = init_dev(); 2456 spdk_bs_opts_init(&opts); 2457 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2458 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2459 poll_threads(); 2460 CU_ASSERT(g_bserrno == 0); 2461 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2462 bs = g_bs; 2463 /* Compare cluster size and number to one after initialization */ 2464 CU_ASSERT(cluster_sz == bs->cluster_sz); 2465 CU_ASSERT(total_clusters == bs->total_clusters); 2466 2467 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2468 CU_ASSERT(super_block->clean == 1); 2469 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2470 2471 spdk_bs_unload(bs, bs_op_complete, NULL); 2472 poll_threads(); 2473 CU_ASSERT(g_bserrno == 0); 2474 CU_ASSERT(super_block->clean == 1); 2475 g_bs = NULL; 2476 } 2477 2478 static void 2479 bs_type(void) 2480 { 2481 struct spdk_blob_store *bs; 2482 struct spdk_bs_dev *dev; 2483 struct spdk_bs_opts opts; 2484 2485 dev = init_dev(); 2486 spdk_bs_opts_init(&opts); 2487 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2488 2489 /* Initialize a new blob store */ 2490 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2491 poll_threads(); 2492 CU_ASSERT(g_bserrno == 0); 2493 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2494 bs = g_bs; 2495 2496 /* Unload the blob store */ 2497 spdk_bs_unload(bs, bs_op_complete, NULL); 2498 poll_threads(); 2499 CU_ASSERT(g_bserrno == 0); 2500 g_bs = NULL; 2501 g_blob = NULL; 2502 g_blobid = 0; 2503 2504 /* Load non existing blobstore type */ 2505 dev = init_dev(); 2506 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING"); 2507 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2508 poll_threads(); 2509 CU_ASSERT(g_bserrno != 0); 2510 2511 /* Load with empty blobstore type */ 2512 dev = init_dev(); 2513 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2514 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2515 poll_threads(); 2516 CU_ASSERT(g_bserrno == 0); 2517 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2518 bs = g_bs; 2519 2520 spdk_bs_unload(bs, bs_op_complete, NULL); 2521 poll_threads(); 2522 CU_ASSERT(g_bserrno == 0); 2523 g_bs = NULL; 2524 2525 /* Initialize a new blob store with empty bstype */ 2526 dev = init_dev(); 2527 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2528 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2529 poll_threads(); 2530 CU_ASSERT(g_bserrno == 0); 2531 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2532 bs = g_bs; 2533 2534 spdk_bs_unload(bs, bs_op_complete, NULL); 2535 poll_threads(); 2536 CU_ASSERT(g_bserrno == 0); 2537 g_bs = NULL; 2538 2539 /* Load non existing blobstore type */ 2540 dev = init_dev(); 2541 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING"); 2542 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2543 poll_threads(); 2544 CU_ASSERT(g_bserrno != 0); 2545 2546 /* Load with empty blobstore type */ 2547 dev = init_dev(); 2548 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2549 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2550 poll_threads(); 2551 CU_ASSERT(g_bserrno == 0); 2552 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2553 bs = g_bs; 2554 2555 spdk_bs_unload(bs, bs_op_complete, NULL); 2556 poll_threads(); 2557 CU_ASSERT(g_bserrno == 0); 2558 g_bs = NULL; 2559 } 2560 2561 static void 2562 bs_super_block(void) 2563 { 2564 struct spdk_blob_store *bs; 2565 struct spdk_bs_dev *dev; 2566 struct spdk_bs_super_block *super_block; 2567 struct spdk_bs_opts opts; 2568 struct spdk_bs_super_block_ver1 super_block_v1; 2569 2570 dev = init_dev(); 2571 spdk_bs_opts_init(&opts); 2572 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2573 2574 /* Initialize a new blob store */ 2575 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2576 poll_threads(); 2577 CU_ASSERT(g_bserrno == 0); 2578 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2579 bs = g_bs; 2580 2581 /* Unload the blob store */ 2582 spdk_bs_unload(bs, bs_op_complete, NULL); 2583 poll_threads(); 2584 CU_ASSERT(g_bserrno == 0); 2585 g_bs = NULL; 2586 g_blob = NULL; 2587 g_blobid = 0; 2588 2589 /* Load an existing blob store with version newer than supported */ 2590 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2591 super_block->version++; 2592 2593 dev = init_dev(); 2594 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2595 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2596 poll_threads(); 2597 CU_ASSERT(g_bserrno != 0); 2598 2599 /* Create a new blob store with super block version 1 */ 2600 dev = init_dev(); 2601 super_block_v1.version = 1; 2602 memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature)); 2603 super_block_v1.length = 0x1000; 2604 super_block_v1.clean = 1; 2605 super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF; 2606 super_block_v1.cluster_size = 0x100000; 2607 super_block_v1.used_page_mask_start = 0x01; 2608 super_block_v1.used_page_mask_len = 0x01; 2609 super_block_v1.used_cluster_mask_start = 0x02; 2610 super_block_v1.used_cluster_mask_len = 0x01; 2611 super_block_v1.md_start = 0x03; 2612 super_block_v1.md_len = 0x40; 2613 memset(super_block_v1.reserved, 0, 4036); 2614 super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1); 2615 memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1)); 2616 2617 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2618 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2619 poll_threads(); 2620 CU_ASSERT(g_bserrno == 0); 2621 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2622 bs = g_bs; 2623 2624 spdk_bs_unload(bs, bs_op_complete, NULL); 2625 poll_threads(); 2626 CU_ASSERT(g_bserrno == 0); 2627 g_bs = NULL; 2628 } 2629 2630 /* 2631 * Create a blobstore and then unload it. 2632 */ 2633 static void 2634 bs_unload(void) 2635 { 2636 struct spdk_blob_store *bs = g_bs; 2637 struct spdk_blob *blob; 2638 2639 /* Create a blob and open it. */ 2640 blob = ut_blob_create_and_open(bs, NULL); 2641 2642 /* Try to unload blobstore, should fail with open blob */ 2643 g_bserrno = -1; 2644 spdk_bs_unload(bs, bs_op_complete, NULL); 2645 poll_threads(); 2646 CU_ASSERT(g_bserrno == -EBUSY); 2647 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2648 2649 /* Close the blob, then successfully unload blobstore */ 2650 g_bserrno = -1; 2651 spdk_blob_close(blob, blob_op_complete, NULL); 2652 poll_threads(); 2653 CU_ASSERT(g_bserrno == 0); 2654 } 2655 2656 /* 2657 * Create a blobstore with a cluster size different than the default, and ensure it is 2658 * persisted. 2659 */ 2660 static void 2661 bs_cluster_sz(void) 2662 { 2663 struct spdk_blob_store *bs; 2664 struct spdk_bs_dev *dev; 2665 struct spdk_bs_opts opts; 2666 uint32_t cluster_sz; 2667 2668 /* Set cluster size to zero */ 2669 dev = init_dev(); 2670 spdk_bs_opts_init(&opts); 2671 opts.cluster_sz = 0; 2672 2673 /* Initialize a new blob store */ 2674 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2675 poll_threads(); 2676 CU_ASSERT(g_bserrno == -EINVAL); 2677 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2678 2679 /* 2680 * Set cluster size to blobstore page size, 2681 * to work it is required to be at least twice the blobstore page size. 2682 */ 2683 dev = init_dev(); 2684 spdk_bs_opts_init(&opts); 2685 opts.cluster_sz = SPDK_BS_PAGE_SIZE; 2686 2687 /* Initialize a new blob store */ 2688 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2689 poll_threads(); 2690 CU_ASSERT(g_bserrno == -ENOMEM); 2691 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2692 2693 /* 2694 * Set cluster size to lower than page size, 2695 * to work it is required to be at least twice the blobstore page size. 2696 */ 2697 dev = init_dev(); 2698 spdk_bs_opts_init(&opts); 2699 opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1; 2700 2701 /* Initialize a new blob store */ 2702 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2703 poll_threads(); 2704 CU_ASSERT(g_bserrno == -EINVAL); 2705 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2706 2707 /* Set cluster size to twice the default */ 2708 dev = init_dev(); 2709 spdk_bs_opts_init(&opts); 2710 opts.cluster_sz *= 2; 2711 cluster_sz = opts.cluster_sz; 2712 2713 /* Initialize a new blob store */ 2714 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2715 poll_threads(); 2716 CU_ASSERT(g_bserrno == 0); 2717 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2718 bs = g_bs; 2719 2720 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2721 2722 ut_bs_reload(&bs, &opts); 2723 2724 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2725 2726 spdk_bs_unload(bs, bs_op_complete, NULL); 2727 poll_threads(); 2728 CU_ASSERT(g_bserrno == 0); 2729 g_bs = NULL; 2730 } 2731 2732 /* 2733 * Create a blobstore, reload it and ensure total usable cluster count 2734 * stays the same. 2735 */ 2736 static void 2737 bs_usable_clusters(void) 2738 { 2739 struct spdk_blob_store *bs = g_bs; 2740 struct spdk_blob *blob; 2741 uint32_t clusters; 2742 int i; 2743 2744 2745 clusters = spdk_bs_total_data_cluster_count(bs); 2746 2747 ut_bs_reload(&bs, NULL); 2748 2749 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2750 2751 /* Create and resize blobs to make sure that useable cluster count won't change */ 2752 for (i = 0; i < 4; i++) { 2753 g_bserrno = -1; 2754 g_blobid = SPDK_BLOBID_INVALID; 2755 blob = ut_blob_create_and_open(bs, NULL); 2756 2757 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 2758 poll_threads(); 2759 CU_ASSERT(g_bserrno == 0); 2760 2761 g_bserrno = -1; 2762 spdk_blob_close(blob, blob_op_complete, NULL); 2763 poll_threads(); 2764 CU_ASSERT(g_bserrno == 0); 2765 2766 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2767 } 2768 2769 /* Reload the blob store to make sure that nothing changed */ 2770 ut_bs_reload(&bs, NULL); 2771 2772 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2773 } 2774 2775 /* 2776 * Test resizing of the metadata blob. This requires creating enough blobs 2777 * so that one cluster is not enough to fit the metadata for those blobs. 2778 * To induce this condition to happen more quickly, we reduce the cluster 2779 * size to 16KB, which means only 4 4KB blob metadata pages can fit. 2780 */ 2781 static void 2782 bs_resize_md(void) 2783 { 2784 struct spdk_blob_store *bs; 2785 const int CLUSTER_PAGE_COUNT = 4; 2786 const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4; 2787 struct spdk_bs_dev *dev; 2788 struct spdk_bs_opts opts; 2789 struct spdk_blob *blob; 2790 struct spdk_blob_opts blob_opts; 2791 uint32_t cluster_sz; 2792 spdk_blob_id blobids[NUM_BLOBS]; 2793 int i; 2794 2795 2796 dev = init_dev(); 2797 spdk_bs_opts_init(&opts); 2798 opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096; 2799 cluster_sz = opts.cluster_sz; 2800 2801 /* Initialize a new blob store */ 2802 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2803 poll_threads(); 2804 CU_ASSERT(g_bserrno == 0); 2805 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2806 bs = g_bs; 2807 2808 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2809 2810 ut_spdk_blob_opts_init(&blob_opts); 2811 2812 for (i = 0; i < NUM_BLOBS; i++) { 2813 g_bserrno = -1; 2814 g_blobid = SPDK_BLOBID_INVALID; 2815 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 2816 poll_threads(); 2817 CU_ASSERT(g_bserrno == 0); 2818 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2819 blobids[i] = g_blobid; 2820 } 2821 2822 ut_bs_reload(&bs, &opts); 2823 2824 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2825 2826 for (i = 0; i < NUM_BLOBS; i++) { 2827 g_bserrno = -1; 2828 g_blob = NULL; 2829 spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL); 2830 poll_threads(); 2831 CU_ASSERT(g_bserrno == 0); 2832 CU_ASSERT(g_blob != NULL); 2833 blob = g_blob; 2834 g_bserrno = -1; 2835 spdk_blob_close(blob, blob_op_complete, NULL); 2836 poll_threads(); 2837 CU_ASSERT(g_bserrno == 0); 2838 } 2839 2840 spdk_bs_unload(bs, bs_op_complete, NULL); 2841 poll_threads(); 2842 CU_ASSERT(g_bserrno == 0); 2843 g_bs = NULL; 2844 } 2845 2846 static void 2847 bs_destroy(void) 2848 { 2849 struct spdk_blob_store *bs; 2850 struct spdk_bs_dev *dev; 2851 2852 /* Initialize a new blob store */ 2853 dev = init_dev(); 2854 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2855 poll_threads(); 2856 CU_ASSERT(g_bserrno == 0); 2857 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2858 bs = g_bs; 2859 2860 /* Destroy the blob store */ 2861 g_bserrno = -1; 2862 spdk_bs_destroy(bs, bs_op_complete, NULL); 2863 poll_threads(); 2864 CU_ASSERT(g_bserrno == 0); 2865 2866 /* Loading an non-existent blob store should fail. */ 2867 g_bs = NULL; 2868 dev = init_dev(); 2869 2870 g_bserrno = 0; 2871 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 2872 poll_threads(); 2873 CU_ASSERT(g_bserrno != 0); 2874 } 2875 2876 /* Try to hit all of the corner cases associated with serializing 2877 * a blob to disk 2878 */ 2879 static void 2880 blob_serialize_test(void) 2881 { 2882 struct spdk_bs_dev *dev; 2883 struct spdk_bs_opts opts; 2884 struct spdk_blob_store *bs; 2885 spdk_blob_id blobid[2]; 2886 struct spdk_blob *blob[2]; 2887 uint64_t i; 2888 char *value; 2889 int rc; 2890 2891 dev = init_dev(); 2892 2893 /* Initialize a new blobstore with very small clusters */ 2894 spdk_bs_opts_init(&opts); 2895 opts.cluster_sz = dev->blocklen * 8; 2896 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2897 poll_threads(); 2898 CU_ASSERT(g_bserrno == 0); 2899 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2900 bs = g_bs; 2901 2902 /* Create and open two blobs */ 2903 for (i = 0; i < 2; i++) { 2904 blob[i] = ut_blob_create_and_open(bs, NULL); 2905 blobid[i] = spdk_blob_get_id(blob[i]); 2906 2907 /* Set a fairly large xattr on both blobs to eat up 2908 * metadata space 2909 */ 2910 value = calloc(dev->blocklen - 64, sizeof(char)); 2911 SPDK_CU_ASSERT_FATAL(value != NULL); 2912 memset(value, i, dev->blocklen / 2); 2913 rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64); 2914 CU_ASSERT(rc == 0); 2915 free(value); 2916 } 2917 2918 /* Resize the blobs, alternating 1 cluster at a time. 2919 * This thwarts run length encoding and will cause spill 2920 * over of the extents. 2921 */ 2922 for (i = 0; i < 6; i++) { 2923 spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL); 2924 poll_threads(); 2925 CU_ASSERT(g_bserrno == 0); 2926 } 2927 2928 for (i = 0; i < 2; i++) { 2929 spdk_blob_sync_md(blob[i], blob_op_complete, NULL); 2930 poll_threads(); 2931 CU_ASSERT(g_bserrno == 0); 2932 } 2933 2934 /* Close the blobs */ 2935 for (i = 0; i < 2; i++) { 2936 spdk_blob_close(blob[i], blob_op_complete, NULL); 2937 poll_threads(); 2938 CU_ASSERT(g_bserrno == 0); 2939 } 2940 2941 ut_bs_reload(&bs, &opts); 2942 2943 for (i = 0; i < 2; i++) { 2944 blob[i] = NULL; 2945 2946 spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL); 2947 poll_threads(); 2948 CU_ASSERT(g_bserrno == 0); 2949 CU_ASSERT(g_blob != NULL); 2950 blob[i] = g_blob; 2951 2952 CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3); 2953 2954 spdk_blob_close(blob[i], blob_op_complete, NULL); 2955 poll_threads(); 2956 CU_ASSERT(g_bserrno == 0); 2957 } 2958 2959 spdk_bs_unload(bs, bs_op_complete, NULL); 2960 poll_threads(); 2961 CU_ASSERT(g_bserrno == 0); 2962 g_bs = NULL; 2963 } 2964 2965 static void 2966 blob_crc(void) 2967 { 2968 struct spdk_blob_store *bs = g_bs; 2969 struct spdk_blob *blob; 2970 spdk_blob_id blobid; 2971 uint32_t page_num; 2972 int index; 2973 struct spdk_blob_md_page *page; 2974 2975 blob = ut_blob_create_and_open(bs, NULL); 2976 blobid = spdk_blob_get_id(blob); 2977 2978 spdk_blob_close(blob, blob_op_complete, NULL); 2979 poll_threads(); 2980 CU_ASSERT(g_bserrno == 0); 2981 2982 page_num = bs_blobid_to_page(blobid); 2983 index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num); 2984 page = (struct spdk_blob_md_page *)&g_dev_buffer[index]; 2985 page->crc = 0; 2986 2987 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2988 poll_threads(); 2989 CU_ASSERT(g_bserrno == -EINVAL); 2990 CU_ASSERT(g_blob == NULL); 2991 g_bserrno = 0; 2992 2993 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 2994 poll_threads(); 2995 CU_ASSERT(g_bserrno == -EINVAL); 2996 } 2997 2998 static void 2999 super_block_crc(void) 3000 { 3001 struct spdk_blob_store *bs; 3002 struct spdk_bs_dev *dev; 3003 struct spdk_bs_super_block *super_block; 3004 3005 dev = init_dev(); 3006 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 3007 poll_threads(); 3008 CU_ASSERT(g_bserrno == 0); 3009 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3010 bs = g_bs; 3011 3012 spdk_bs_unload(bs, bs_op_complete, NULL); 3013 poll_threads(); 3014 CU_ASSERT(g_bserrno == 0); 3015 g_bs = NULL; 3016 3017 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 3018 super_block->crc = 0; 3019 dev = init_dev(); 3020 3021 /* Load an existing blob store */ 3022 g_bserrno = 0; 3023 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3024 poll_threads(); 3025 CU_ASSERT(g_bserrno == -EILSEQ); 3026 } 3027 3028 /* For blob dirty shutdown test case we do the following sub-test cases: 3029 * 1 Initialize new blob store and create 1 super blob with some xattrs, then we 3030 * dirty shutdown and reload the blob store and verify the xattrs. 3031 * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown, 3032 * reload the blob store and verify the clusters number. 3033 * 3 Create the second blob and then dirty shutdown, reload the blob store 3034 * and verify the second blob. 3035 * 4 Delete the second blob and then dirty shutdown, reload the blob store 3036 * and verify the second blob is invalid. 3037 * 5 Create the second blob again and also create the third blob, modify the 3038 * md of second blob which makes the md invalid, and then dirty shutdown, 3039 * reload the blob store verify the second blob, it should invalid and also 3040 * verify the third blob, it should correct. 3041 */ 3042 static void 3043 blob_dirty_shutdown(void) 3044 { 3045 int rc; 3046 int index; 3047 struct spdk_blob_store *bs = g_bs; 3048 spdk_blob_id blobid1, blobid2, blobid3; 3049 struct spdk_blob *blob = g_blob; 3050 uint64_t length; 3051 uint64_t free_clusters; 3052 const void *value; 3053 size_t value_len; 3054 uint32_t page_num; 3055 struct spdk_blob_md_page *page; 3056 struct spdk_blob_opts blob_opts; 3057 3058 /* Create first blob */ 3059 blobid1 = spdk_blob_get_id(blob); 3060 3061 /* Set some xattrs */ 3062 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 3063 CU_ASSERT(rc == 0); 3064 3065 length = 2345; 3066 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3067 CU_ASSERT(rc == 0); 3068 3069 /* Put xattr that fits exactly single page. 3070 * This results in adding additional pages to MD. 3071 * First is flags and smaller xattr, second the large xattr, 3072 * third are just the extents. 3073 */ 3074 size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) - 3075 strlen("large_xattr"); 3076 char *xattr = calloc(xattr_length, sizeof(char)); 3077 SPDK_CU_ASSERT_FATAL(xattr != NULL); 3078 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 3079 free(xattr); 3080 SPDK_CU_ASSERT_FATAL(rc == 0); 3081 3082 /* Resize the blob */ 3083 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 3084 poll_threads(); 3085 CU_ASSERT(g_bserrno == 0); 3086 3087 /* Set the blob as the super blob */ 3088 spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL); 3089 poll_threads(); 3090 CU_ASSERT(g_bserrno == 0); 3091 3092 free_clusters = spdk_bs_free_cluster_count(bs); 3093 3094 spdk_blob_close(blob, blob_op_complete, NULL); 3095 poll_threads(); 3096 CU_ASSERT(g_bserrno == 0); 3097 blob = NULL; 3098 g_blob = NULL; 3099 g_blobid = SPDK_BLOBID_INVALID; 3100 3101 ut_bs_dirty_load(&bs, NULL); 3102 3103 /* Get the super blob */ 3104 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 3105 poll_threads(); 3106 CU_ASSERT(g_bserrno == 0); 3107 CU_ASSERT(blobid1 == g_blobid); 3108 3109 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3110 poll_threads(); 3111 CU_ASSERT(g_bserrno == 0); 3112 CU_ASSERT(g_blob != NULL); 3113 blob = g_blob; 3114 3115 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3116 3117 /* Get the xattrs */ 3118 value = NULL; 3119 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 3120 CU_ASSERT(rc == 0); 3121 SPDK_CU_ASSERT_FATAL(value != NULL); 3122 CU_ASSERT(*(uint64_t *)value == length); 3123 CU_ASSERT(value_len == 8); 3124 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 3125 3126 /* Resize the blob */ 3127 spdk_blob_resize(blob, 20, blob_op_complete, NULL); 3128 poll_threads(); 3129 CU_ASSERT(g_bserrno == 0); 3130 3131 free_clusters = spdk_bs_free_cluster_count(bs); 3132 3133 spdk_blob_close(blob, blob_op_complete, NULL); 3134 poll_threads(); 3135 CU_ASSERT(g_bserrno == 0); 3136 blob = NULL; 3137 g_blob = NULL; 3138 g_blobid = SPDK_BLOBID_INVALID; 3139 3140 ut_bs_dirty_load(&bs, NULL); 3141 3142 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3143 poll_threads(); 3144 CU_ASSERT(g_bserrno == 0); 3145 CU_ASSERT(g_blob != NULL); 3146 blob = g_blob; 3147 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20); 3148 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3149 3150 spdk_blob_close(blob, blob_op_complete, NULL); 3151 poll_threads(); 3152 CU_ASSERT(g_bserrno == 0); 3153 blob = NULL; 3154 g_blob = NULL; 3155 g_blobid = SPDK_BLOBID_INVALID; 3156 3157 /* Create second blob */ 3158 blob = ut_blob_create_and_open(bs, NULL); 3159 blobid2 = spdk_blob_get_id(blob); 3160 3161 /* Set some xattrs */ 3162 rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1); 3163 CU_ASSERT(rc == 0); 3164 3165 length = 5432; 3166 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3167 CU_ASSERT(rc == 0); 3168 3169 /* Resize the blob */ 3170 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 3171 poll_threads(); 3172 CU_ASSERT(g_bserrno == 0); 3173 3174 free_clusters = spdk_bs_free_cluster_count(bs); 3175 3176 spdk_blob_close(blob, blob_op_complete, NULL); 3177 poll_threads(); 3178 CU_ASSERT(g_bserrno == 0); 3179 blob = NULL; 3180 g_blob = NULL; 3181 g_blobid = SPDK_BLOBID_INVALID; 3182 3183 ut_bs_dirty_load(&bs, NULL); 3184 3185 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3186 poll_threads(); 3187 CU_ASSERT(g_bserrno == 0); 3188 CU_ASSERT(g_blob != NULL); 3189 blob = g_blob; 3190 3191 /* Get the xattrs */ 3192 value = NULL; 3193 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 3194 CU_ASSERT(rc == 0); 3195 SPDK_CU_ASSERT_FATAL(value != NULL); 3196 CU_ASSERT(*(uint64_t *)value == length); 3197 CU_ASSERT(value_len == 8); 3198 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 3199 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3200 3201 ut_blob_close_and_delete(bs, blob); 3202 3203 free_clusters = spdk_bs_free_cluster_count(bs); 3204 3205 ut_bs_dirty_load(&bs, NULL); 3206 3207 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3208 poll_threads(); 3209 CU_ASSERT(g_bserrno != 0); 3210 CU_ASSERT(g_blob == NULL); 3211 3212 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3213 poll_threads(); 3214 CU_ASSERT(g_bserrno == 0); 3215 CU_ASSERT(g_blob != NULL); 3216 blob = g_blob; 3217 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3218 spdk_blob_close(blob, blob_op_complete, NULL); 3219 poll_threads(); 3220 CU_ASSERT(g_bserrno == 0); 3221 3222 ut_bs_reload(&bs, NULL); 3223 3224 /* Create second blob */ 3225 ut_spdk_blob_opts_init(&blob_opts); 3226 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3227 poll_threads(); 3228 CU_ASSERT(g_bserrno == 0); 3229 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3230 blobid2 = g_blobid; 3231 3232 /* Create third blob */ 3233 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3234 poll_threads(); 3235 CU_ASSERT(g_bserrno == 0); 3236 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3237 blobid3 = g_blobid; 3238 3239 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3240 poll_threads(); 3241 CU_ASSERT(g_bserrno == 0); 3242 CU_ASSERT(g_blob != NULL); 3243 blob = g_blob; 3244 3245 /* Set some xattrs for second blob */ 3246 rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1); 3247 CU_ASSERT(rc == 0); 3248 3249 length = 5432; 3250 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3251 CU_ASSERT(rc == 0); 3252 3253 spdk_blob_close(blob, blob_op_complete, NULL); 3254 poll_threads(); 3255 CU_ASSERT(g_bserrno == 0); 3256 blob = NULL; 3257 g_blob = NULL; 3258 g_blobid = SPDK_BLOBID_INVALID; 3259 3260 spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL); 3261 poll_threads(); 3262 CU_ASSERT(g_bserrno == 0); 3263 CU_ASSERT(g_blob != NULL); 3264 blob = g_blob; 3265 3266 /* Set some xattrs for third blob */ 3267 rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1); 3268 CU_ASSERT(rc == 0); 3269 3270 length = 5432; 3271 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3272 CU_ASSERT(rc == 0); 3273 3274 spdk_blob_close(blob, blob_op_complete, NULL); 3275 poll_threads(); 3276 CU_ASSERT(g_bserrno == 0); 3277 blob = NULL; 3278 g_blob = NULL; 3279 g_blobid = SPDK_BLOBID_INVALID; 3280 3281 /* Mark second blob as invalid */ 3282 page_num = bs_blobid_to_page(blobid2); 3283 3284 index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num); 3285 page = (struct spdk_blob_md_page *)&g_dev_buffer[index]; 3286 page->sequence_num = 1; 3287 page->crc = blob_md_page_calc_crc(page); 3288 3289 free_clusters = spdk_bs_free_cluster_count(bs); 3290 3291 ut_bs_dirty_load(&bs, NULL); 3292 3293 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3294 poll_threads(); 3295 CU_ASSERT(g_bserrno != 0); 3296 CU_ASSERT(g_blob == NULL); 3297 3298 spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL); 3299 poll_threads(); 3300 CU_ASSERT(g_bserrno == 0); 3301 CU_ASSERT(g_blob != NULL); 3302 blob = g_blob; 3303 3304 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3305 } 3306 3307 static void 3308 blob_flags(void) 3309 { 3310 struct spdk_blob_store *bs = g_bs; 3311 spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro; 3312 struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro; 3313 struct spdk_blob_opts blob_opts; 3314 int rc; 3315 3316 /* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */ 3317 blob_invalid = ut_blob_create_and_open(bs, NULL); 3318 blobid_invalid = spdk_blob_get_id(blob_invalid); 3319 3320 blob_data_ro = ut_blob_create_and_open(bs, NULL); 3321 blobid_data_ro = spdk_blob_get_id(blob_data_ro); 3322 3323 ut_spdk_blob_opts_init(&blob_opts); 3324 blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES; 3325 blob_md_ro = ut_blob_create_and_open(bs, &blob_opts); 3326 blobid_md_ro = spdk_blob_get_id(blob_md_ro); 3327 CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES); 3328 3329 /* Change the size of blob_data_ro to check if flags are serialized 3330 * when blob has non zero number of extents */ 3331 spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL); 3332 poll_threads(); 3333 CU_ASSERT(g_bserrno == 0); 3334 3335 /* Set the xattr to check if flags are serialized 3336 * when blob has non zero number of xattrs */ 3337 rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1); 3338 CU_ASSERT(rc == 0); 3339 3340 blob_invalid->invalid_flags = (1ULL << 63); 3341 blob_invalid->state = SPDK_BLOB_STATE_DIRTY; 3342 blob_data_ro->data_ro_flags = (1ULL << 62); 3343 blob_data_ro->state = SPDK_BLOB_STATE_DIRTY; 3344 blob_md_ro->md_ro_flags = (1ULL << 61); 3345 blob_md_ro->state = SPDK_BLOB_STATE_DIRTY; 3346 3347 g_bserrno = -1; 3348 spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL); 3349 poll_threads(); 3350 CU_ASSERT(g_bserrno == 0); 3351 g_bserrno = -1; 3352 spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL); 3353 poll_threads(); 3354 CU_ASSERT(g_bserrno == 0); 3355 g_bserrno = -1; 3356 spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL); 3357 poll_threads(); 3358 CU_ASSERT(g_bserrno == 0); 3359 3360 g_bserrno = -1; 3361 spdk_blob_close(blob_invalid, blob_op_complete, NULL); 3362 poll_threads(); 3363 CU_ASSERT(g_bserrno == 0); 3364 blob_invalid = NULL; 3365 g_bserrno = -1; 3366 spdk_blob_close(blob_data_ro, blob_op_complete, NULL); 3367 poll_threads(); 3368 CU_ASSERT(g_bserrno == 0); 3369 blob_data_ro = NULL; 3370 g_bserrno = -1; 3371 spdk_blob_close(blob_md_ro, blob_op_complete, NULL); 3372 poll_threads(); 3373 CU_ASSERT(g_bserrno == 0); 3374 blob_md_ro = NULL; 3375 3376 g_blob = NULL; 3377 g_blobid = SPDK_BLOBID_INVALID; 3378 3379 ut_bs_reload(&bs, NULL); 3380 3381 g_blob = NULL; 3382 g_bserrno = 0; 3383 spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL); 3384 poll_threads(); 3385 CU_ASSERT(g_bserrno != 0); 3386 CU_ASSERT(g_blob == NULL); 3387 3388 g_blob = NULL; 3389 g_bserrno = -1; 3390 spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL); 3391 poll_threads(); 3392 CU_ASSERT(g_bserrno == 0); 3393 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3394 blob_data_ro = g_blob; 3395 /* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */ 3396 CU_ASSERT(blob_data_ro->data_ro == true); 3397 CU_ASSERT(blob_data_ro->md_ro == true); 3398 CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10); 3399 3400 g_blob = NULL; 3401 g_bserrno = -1; 3402 spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL); 3403 poll_threads(); 3404 CU_ASSERT(g_bserrno == 0); 3405 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3406 blob_md_ro = g_blob; 3407 CU_ASSERT(blob_md_ro->data_ro == false); 3408 CU_ASSERT(blob_md_ro->md_ro == true); 3409 3410 g_bserrno = -1; 3411 spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL); 3412 poll_threads(); 3413 CU_ASSERT(g_bserrno == 0); 3414 3415 ut_blob_close_and_delete(bs, blob_data_ro); 3416 ut_blob_close_and_delete(bs, blob_md_ro); 3417 } 3418 3419 static void 3420 bs_version(void) 3421 { 3422 struct spdk_bs_super_block *super; 3423 struct spdk_blob_store *bs = g_bs; 3424 struct spdk_bs_dev *dev; 3425 struct spdk_blob *blob; 3426 struct spdk_blob_opts blob_opts; 3427 spdk_blob_id blobid; 3428 3429 /* Unload the blob store */ 3430 spdk_bs_unload(bs, bs_op_complete, NULL); 3431 poll_threads(); 3432 CU_ASSERT(g_bserrno == 0); 3433 g_bs = NULL; 3434 3435 /* 3436 * Change the bs version on disk. This will allow us to 3437 * test that the version does not get modified automatically 3438 * when loading and unloading the blobstore. 3439 */ 3440 super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 3441 CU_ASSERT(super->version == SPDK_BS_VERSION); 3442 CU_ASSERT(super->clean == 1); 3443 super->version = 2; 3444 /* 3445 * Version 2 metadata does not have a used blobid mask, so clear 3446 * those fields in the super block and zero the corresponding 3447 * region on "disk". We will use this to ensure blob IDs are 3448 * correctly reconstructed. 3449 */ 3450 memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0, 3451 super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE); 3452 super->used_blobid_mask_start = 0; 3453 super->used_blobid_mask_len = 0; 3454 super->crc = blob_md_page_calc_crc(super); 3455 3456 /* Load an existing blob store */ 3457 dev = init_dev(); 3458 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3459 poll_threads(); 3460 CU_ASSERT(g_bserrno == 0); 3461 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3462 CU_ASSERT(super->clean == 1); 3463 bs = g_bs; 3464 3465 /* 3466 * Create a blob - just to make sure that when we unload it 3467 * results in writing the super block (since metadata pages 3468 * were allocated. 3469 */ 3470 ut_spdk_blob_opts_init(&blob_opts); 3471 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3472 poll_threads(); 3473 CU_ASSERT(g_bserrno == 0); 3474 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3475 blobid = g_blobid; 3476 3477 /* Unload the blob store */ 3478 spdk_bs_unload(bs, bs_op_complete, NULL); 3479 poll_threads(); 3480 CU_ASSERT(g_bserrno == 0); 3481 g_bs = NULL; 3482 CU_ASSERT(super->version == 2); 3483 CU_ASSERT(super->used_blobid_mask_start == 0); 3484 CU_ASSERT(super->used_blobid_mask_len == 0); 3485 3486 dev = init_dev(); 3487 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3488 poll_threads(); 3489 CU_ASSERT(g_bserrno == 0); 3490 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3491 bs = g_bs; 3492 3493 g_blob = NULL; 3494 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3495 poll_threads(); 3496 CU_ASSERT(g_bserrno == 0); 3497 CU_ASSERT(g_blob != NULL); 3498 blob = g_blob; 3499 3500 ut_blob_close_and_delete(bs, blob); 3501 3502 CU_ASSERT(super->version == 2); 3503 CU_ASSERT(super->used_blobid_mask_start == 0); 3504 CU_ASSERT(super->used_blobid_mask_len == 0); 3505 } 3506 3507 static void 3508 blob_set_xattrs_test(void) 3509 { 3510 struct spdk_blob_store *bs = g_bs; 3511 struct spdk_blob *blob; 3512 struct spdk_blob_opts opts; 3513 const void *value; 3514 size_t value_len; 3515 char *xattr; 3516 size_t xattr_length; 3517 int rc; 3518 3519 /* Create blob with extra attributes */ 3520 ut_spdk_blob_opts_init(&opts); 3521 3522 opts.xattrs.names = g_xattr_names; 3523 opts.xattrs.get_value = _get_xattr_value; 3524 opts.xattrs.count = 3; 3525 opts.xattrs.ctx = &g_ctx; 3526 3527 blob = ut_blob_create_and_open(bs, &opts); 3528 3529 /* Get the xattrs */ 3530 value = NULL; 3531 3532 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len); 3533 CU_ASSERT(rc == 0); 3534 SPDK_CU_ASSERT_FATAL(value != NULL); 3535 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 3536 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 3537 3538 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len); 3539 CU_ASSERT(rc == 0); 3540 SPDK_CU_ASSERT_FATAL(value != NULL); 3541 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 3542 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 3543 3544 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len); 3545 CU_ASSERT(rc == 0); 3546 SPDK_CU_ASSERT_FATAL(value != NULL); 3547 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 3548 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 3549 3550 /* Try to get non existing attribute */ 3551 3552 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 3553 CU_ASSERT(rc == -ENOENT); 3554 3555 /* Try xattr exceeding maximum length of descriptor in single page */ 3556 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 3557 strlen("large_xattr") + 1; 3558 xattr = calloc(xattr_length, sizeof(char)); 3559 SPDK_CU_ASSERT_FATAL(xattr != NULL); 3560 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 3561 free(xattr); 3562 SPDK_CU_ASSERT_FATAL(rc == -ENOMEM); 3563 3564 spdk_blob_close(blob, blob_op_complete, NULL); 3565 poll_threads(); 3566 CU_ASSERT(g_bserrno == 0); 3567 blob = NULL; 3568 g_blob = NULL; 3569 g_blobid = SPDK_BLOBID_INVALID; 3570 3571 /* NULL callback */ 3572 ut_spdk_blob_opts_init(&opts); 3573 opts.xattrs.names = g_xattr_names; 3574 opts.xattrs.get_value = NULL; 3575 opts.xattrs.count = 1; 3576 opts.xattrs.ctx = &g_ctx; 3577 3578 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 3579 poll_threads(); 3580 CU_ASSERT(g_bserrno == -EINVAL); 3581 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3582 3583 /* NULL values */ 3584 ut_spdk_blob_opts_init(&opts); 3585 opts.xattrs.names = g_xattr_names; 3586 opts.xattrs.get_value = _get_xattr_value_null; 3587 opts.xattrs.count = 1; 3588 opts.xattrs.ctx = NULL; 3589 3590 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 3591 poll_threads(); 3592 CU_ASSERT(g_bserrno == -EINVAL); 3593 } 3594 3595 static void 3596 blob_thin_prov_alloc(void) 3597 { 3598 struct spdk_blob_store *bs = g_bs; 3599 struct spdk_blob *blob; 3600 struct spdk_blob_opts opts; 3601 spdk_blob_id blobid; 3602 uint64_t free_clusters; 3603 3604 free_clusters = spdk_bs_free_cluster_count(bs); 3605 3606 /* Set blob as thin provisioned */ 3607 ut_spdk_blob_opts_init(&opts); 3608 opts.thin_provision = true; 3609 3610 blob = ut_blob_create_and_open(bs, &opts); 3611 blobid = spdk_blob_get_id(blob); 3612 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3613 3614 CU_ASSERT(blob->active.num_clusters == 0); 3615 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 3616 3617 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3618 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3619 poll_threads(); 3620 CU_ASSERT(g_bserrno == 0); 3621 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3622 CU_ASSERT(blob->active.num_clusters == 5); 3623 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 3624 3625 /* Grow it to 1TB - still unallocated */ 3626 spdk_blob_resize(blob, 262144, blob_op_complete, NULL); 3627 poll_threads(); 3628 CU_ASSERT(g_bserrno == 0); 3629 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3630 CU_ASSERT(blob->active.num_clusters == 262144); 3631 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144); 3632 3633 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3634 poll_threads(); 3635 CU_ASSERT(g_bserrno == 0); 3636 /* Sync must not change anything */ 3637 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3638 CU_ASSERT(blob->active.num_clusters == 262144); 3639 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144); 3640 /* Since clusters are not allocated, 3641 * number of metadata pages is expected to be minimal. 3642 */ 3643 CU_ASSERT(blob->active.num_pages == 1); 3644 3645 /* Shrink the blob to 3 clusters - still unallocated */ 3646 spdk_blob_resize(blob, 3, blob_op_complete, NULL); 3647 poll_threads(); 3648 CU_ASSERT(g_bserrno == 0); 3649 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3650 CU_ASSERT(blob->active.num_clusters == 3); 3651 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3); 3652 3653 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3654 poll_threads(); 3655 CU_ASSERT(g_bserrno == 0); 3656 /* Sync must not change anything */ 3657 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3658 CU_ASSERT(blob->active.num_clusters == 3); 3659 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3); 3660 3661 spdk_blob_close(blob, blob_op_complete, NULL); 3662 poll_threads(); 3663 CU_ASSERT(g_bserrno == 0); 3664 3665 ut_bs_reload(&bs, NULL); 3666 3667 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3668 poll_threads(); 3669 CU_ASSERT(g_bserrno == 0); 3670 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3671 blob = g_blob; 3672 3673 /* Check that clusters allocation and size is still the same */ 3674 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3675 CU_ASSERT(blob->active.num_clusters == 3); 3676 3677 ut_blob_close_and_delete(bs, blob); 3678 } 3679 3680 static void 3681 blob_insert_cluster_msg_test(void) 3682 { 3683 struct spdk_blob_store *bs = g_bs; 3684 struct spdk_blob *blob; 3685 struct spdk_blob_opts opts; 3686 spdk_blob_id blobid; 3687 uint64_t free_clusters; 3688 uint64_t new_cluster = 0; 3689 uint32_t cluster_num = 3; 3690 uint32_t extent_page = 0; 3691 3692 free_clusters = spdk_bs_free_cluster_count(bs); 3693 3694 /* Set blob as thin provisioned */ 3695 ut_spdk_blob_opts_init(&opts); 3696 opts.thin_provision = true; 3697 opts.num_clusters = 4; 3698 3699 blob = ut_blob_create_and_open(bs, &opts); 3700 blobid = spdk_blob_get_id(blob); 3701 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3702 3703 CU_ASSERT(blob->active.num_clusters == 4); 3704 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4); 3705 CU_ASSERT(blob->active.clusters[cluster_num] == 0); 3706 3707 /* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread. 3708 * This is to simulate behaviour when cluster is allocated after blob creation. 3709 * Such as _spdk_bs_allocate_and_copy_cluster(). */ 3710 bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false); 3711 CU_ASSERT(blob->active.clusters[cluster_num] == 0); 3712 3713 blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, 3714 blob_op_complete, NULL); 3715 poll_threads(); 3716 3717 CU_ASSERT(blob->active.clusters[cluster_num] != 0); 3718 3719 spdk_blob_close(blob, blob_op_complete, NULL); 3720 poll_threads(); 3721 CU_ASSERT(g_bserrno == 0); 3722 3723 ut_bs_reload(&bs, NULL); 3724 3725 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3726 poll_threads(); 3727 CU_ASSERT(g_bserrno == 0); 3728 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3729 blob = g_blob; 3730 3731 CU_ASSERT(blob->active.clusters[cluster_num] != 0); 3732 3733 ut_blob_close_and_delete(bs, blob); 3734 } 3735 3736 static void 3737 blob_thin_prov_rw(void) 3738 { 3739 static const uint8_t zero[10 * 4096] = { 0 }; 3740 struct spdk_blob_store *bs = g_bs; 3741 struct spdk_blob *blob; 3742 struct spdk_io_channel *channel, *channel_thread1; 3743 struct spdk_blob_opts opts; 3744 uint64_t free_clusters; 3745 uint64_t page_size; 3746 uint8_t payload_read[10 * 4096]; 3747 uint8_t payload_write[10 * 4096]; 3748 uint64_t write_bytes; 3749 uint64_t read_bytes; 3750 3751 free_clusters = spdk_bs_free_cluster_count(bs); 3752 page_size = spdk_bs_get_page_size(bs); 3753 3754 channel = spdk_bs_alloc_io_channel(bs); 3755 CU_ASSERT(channel != NULL); 3756 3757 ut_spdk_blob_opts_init(&opts); 3758 opts.thin_provision = true; 3759 3760 blob = ut_blob_create_and_open(bs, &opts); 3761 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3762 3763 CU_ASSERT(blob->active.num_clusters == 0); 3764 3765 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3766 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3767 poll_threads(); 3768 CU_ASSERT(g_bserrno == 0); 3769 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3770 CU_ASSERT(blob->active.num_clusters == 5); 3771 3772 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3773 poll_threads(); 3774 CU_ASSERT(g_bserrno == 0); 3775 /* Sync must not change anything */ 3776 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3777 CU_ASSERT(blob->active.num_clusters == 5); 3778 3779 /* Payload should be all zeros from unallocated clusters */ 3780 memset(payload_read, 0xFF, sizeof(payload_read)); 3781 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 3782 poll_threads(); 3783 CU_ASSERT(g_bserrno == 0); 3784 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 3785 3786 write_bytes = g_dev_write_bytes; 3787 read_bytes = g_dev_read_bytes; 3788 3789 /* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */ 3790 set_thread(1); 3791 channel_thread1 = spdk_bs_alloc_io_channel(bs); 3792 CU_ASSERT(channel_thread1 != NULL); 3793 memset(payload_write, 0xE5, sizeof(payload_write)); 3794 spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL); 3795 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3796 /* Perform write on thread 0. That will try to allocate cluster, 3797 * but fail due to another thread issuing the cluster allocation first. */ 3798 set_thread(0); 3799 memset(payload_write, 0xE5, sizeof(payload_write)); 3800 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 3801 CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs)); 3802 poll_threads(); 3803 CU_ASSERT(g_bserrno == 0); 3804 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3805 /* For thin-provisioned blob we need to write 20 pages plus one page metadata and 3806 * read 0 bytes */ 3807 if (g_use_extent_table) { 3808 /* Add one more page for EXTENT_PAGE write */ 3809 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22); 3810 } else { 3811 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21); 3812 } 3813 CU_ASSERT(g_dev_read_bytes - read_bytes == 0); 3814 3815 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 3816 poll_threads(); 3817 CU_ASSERT(g_bserrno == 0); 3818 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3819 3820 ut_blob_close_and_delete(bs, blob); 3821 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3822 3823 set_thread(1); 3824 spdk_bs_free_io_channel(channel_thread1); 3825 set_thread(0); 3826 spdk_bs_free_io_channel(channel); 3827 poll_threads(); 3828 g_blob = NULL; 3829 g_blobid = 0; 3830 } 3831 3832 static void 3833 blob_thin_prov_rle(void) 3834 { 3835 static const uint8_t zero[10 * 4096] = { 0 }; 3836 struct spdk_blob_store *bs = g_bs; 3837 struct spdk_blob *blob; 3838 struct spdk_io_channel *channel; 3839 struct spdk_blob_opts opts; 3840 spdk_blob_id blobid; 3841 uint64_t free_clusters; 3842 uint64_t page_size; 3843 uint8_t payload_read[10 * 4096]; 3844 uint8_t payload_write[10 * 4096]; 3845 uint64_t write_bytes; 3846 uint64_t read_bytes; 3847 uint64_t io_unit; 3848 3849 free_clusters = spdk_bs_free_cluster_count(bs); 3850 page_size = spdk_bs_get_page_size(bs); 3851 3852 ut_spdk_blob_opts_init(&opts); 3853 opts.thin_provision = true; 3854 opts.num_clusters = 5; 3855 3856 blob = ut_blob_create_and_open(bs, &opts); 3857 blobid = spdk_blob_get_id(blob); 3858 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3859 3860 channel = spdk_bs_alloc_io_channel(bs); 3861 CU_ASSERT(channel != NULL); 3862 3863 /* Target specifically second cluster in a blob as first allocation */ 3864 io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs); 3865 3866 /* Payload should be all zeros from unallocated clusters */ 3867 memset(payload_read, 0xFF, sizeof(payload_read)); 3868 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3869 poll_threads(); 3870 CU_ASSERT(g_bserrno == 0); 3871 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 3872 3873 write_bytes = g_dev_write_bytes; 3874 read_bytes = g_dev_read_bytes; 3875 3876 /* Issue write to second cluster in a blob */ 3877 memset(payload_write, 0xE5, sizeof(payload_write)); 3878 spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL); 3879 poll_threads(); 3880 CU_ASSERT(g_bserrno == 0); 3881 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3882 /* For thin-provisioned blob we need to write 10 pages plus one page metadata and 3883 * read 0 bytes */ 3884 if (g_use_extent_table) { 3885 /* Add one more page for EXTENT_PAGE write */ 3886 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12); 3887 } else { 3888 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11); 3889 } 3890 CU_ASSERT(g_dev_read_bytes - read_bytes == 0); 3891 3892 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3893 poll_threads(); 3894 CU_ASSERT(g_bserrno == 0); 3895 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3896 3897 spdk_bs_free_io_channel(channel); 3898 poll_threads(); 3899 3900 spdk_blob_close(blob, blob_op_complete, NULL); 3901 poll_threads(); 3902 CU_ASSERT(g_bserrno == 0); 3903 3904 ut_bs_reload(&bs, NULL); 3905 3906 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3907 poll_threads(); 3908 CU_ASSERT(g_bserrno == 0); 3909 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3910 blob = g_blob; 3911 3912 channel = spdk_bs_alloc_io_channel(bs); 3913 CU_ASSERT(channel != NULL); 3914 3915 /* Read second cluster after blob reload to confirm data written */ 3916 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3917 poll_threads(); 3918 CU_ASSERT(g_bserrno == 0); 3919 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3920 3921 spdk_bs_free_io_channel(channel); 3922 poll_threads(); 3923 3924 ut_blob_close_and_delete(bs, blob); 3925 } 3926 3927 static void 3928 blob_thin_prov_rw_iov(void) 3929 { 3930 static const uint8_t zero[10 * 4096] = { 0 }; 3931 struct spdk_blob_store *bs = g_bs; 3932 struct spdk_blob *blob; 3933 struct spdk_io_channel *channel; 3934 struct spdk_blob_opts opts; 3935 uint64_t free_clusters; 3936 uint8_t payload_read[10 * 4096]; 3937 uint8_t payload_write[10 * 4096]; 3938 struct iovec iov_read[3]; 3939 struct iovec iov_write[3]; 3940 3941 free_clusters = spdk_bs_free_cluster_count(bs); 3942 3943 channel = spdk_bs_alloc_io_channel(bs); 3944 CU_ASSERT(channel != NULL); 3945 3946 ut_spdk_blob_opts_init(&opts); 3947 opts.thin_provision = true; 3948 3949 blob = ut_blob_create_and_open(bs, &opts); 3950 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3951 3952 CU_ASSERT(blob->active.num_clusters == 0); 3953 3954 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3955 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3956 poll_threads(); 3957 CU_ASSERT(g_bserrno == 0); 3958 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3959 CU_ASSERT(blob->active.num_clusters == 5); 3960 3961 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3962 poll_threads(); 3963 CU_ASSERT(g_bserrno == 0); 3964 /* Sync must not change anything */ 3965 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3966 CU_ASSERT(blob->active.num_clusters == 5); 3967 3968 /* Payload should be all zeros from unallocated clusters */ 3969 memset(payload_read, 0xAA, sizeof(payload_read)); 3970 iov_read[0].iov_base = payload_read; 3971 iov_read[0].iov_len = 3 * 4096; 3972 iov_read[1].iov_base = payload_read + 3 * 4096; 3973 iov_read[1].iov_len = 4 * 4096; 3974 iov_read[2].iov_base = payload_read + 7 * 4096; 3975 iov_read[2].iov_len = 3 * 4096; 3976 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 3977 poll_threads(); 3978 CU_ASSERT(g_bserrno == 0); 3979 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 3980 3981 memset(payload_write, 0xE5, sizeof(payload_write)); 3982 iov_write[0].iov_base = payload_write; 3983 iov_write[0].iov_len = 1 * 4096; 3984 iov_write[1].iov_base = payload_write + 1 * 4096; 3985 iov_write[1].iov_len = 5 * 4096; 3986 iov_write[2].iov_base = payload_write + 6 * 4096; 3987 iov_write[2].iov_len = 4 * 4096; 3988 3989 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 3990 poll_threads(); 3991 CU_ASSERT(g_bserrno == 0); 3992 3993 memset(payload_read, 0xAA, sizeof(payload_read)); 3994 iov_read[0].iov_base = payload_read; 3995 iov_read[0].iov_len = 3 * 4096; 3996 iov_read[1].iov_base = payload_read + 3 * 4096; 3997 iov_read[1].iov_len = 4 * 4096; 3998 iov_read[2].iov_base = payload_read + 7 * 4096; 3999 iov_read[2].iov_len = 3 * 4096; 4000 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4001 poll_threads(); 4002 CU_ASSERT(g_bserrno == 0); 4003 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4004 4005 spdk_bs_free_io_channel(channel); 4006 poll_threads(); 4007 4008 ut_blob_close_and_delete(bs, blob); 4009 } 4010 4011 struct iter_ctx { 4012 int current_iter; 4013 spdk_blob_id blobid[4]; 4014 }; 4015 4016 static void 4017 test_iter(void *arg, struct spdk_blob *blob, int bserrno) 4018 { 4019 struct iter_ctx *iter_ctx = arg; 4020 spdk_blob_id blobid; 4021 4022 CU_ASSERT(bserrno == 0); 4023 blobid = spdk_blob_get_id(blob); 4024 CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]); 4025 } 4026 4027 static void 4028 bs_load_iter_test(void) 4029 { 4030 struct spdk_blob_store *bs; 4031 struct spdk_bs_dev *dev; 4032 struct iter_ctx iter_ctx = { 0 }; 4033 struct spdk_blob *blob; 4034 int i, rc; 4035 struct spdk_bs_opts opts; 4036 4037 dev = init_dev(); 4038 spdk_bs_opts_init(&opts); 4039 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4040 4041 /* Initialize a new blob store */ 4042 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 4043 poll_threads(); 4044 CU_ASSERT(g_bserrno == 0); 4045 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4046 bs = g_bs; 4047 4048 for (i = 0; i < 4; i++) { 4049 blob = ut_blob_create_and_open(bs, NULL); 4050 iter_ctx.blobid[i] = spdk_blob_get_id(blob); 4051 4052 /* Just save the blobid as an xattr for testing purposes. */ 4053 rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id)); 4054 CU_ASSERT(rc == 0); 4055 4056 /* Resize the blob */ 4057 spdk_blob_resize(blob, i, blob_op_complete, NULL); 4058 poll_threads(); 4059 CU_ASSERT(g_bserrno == 0); 4060 4061 spdk_blob_close(blob, blob_op_complete, NULL); 4062 poll_threads(); 4063 CU_ASSERT(g_bserrno == 0); 4064 } 4065 4066 g_bserrno = -1; 4067 spdk_bs_unload(bs, bs_op_complete, NULL); 4068 poll_threads(); 4069 CU_ASSERT(g_bserrno == 0); 4070 4071 dev = init_dev(); 4072 spdk_bs_opts_init(&opts); 4073 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4074 opts.iter_cb_fn = test_iter; 4075 opts.iter_cb_arg = &iter_ctx; 4076 4077 /* Test blob iteration during load after a clean shutdown. */ 4078 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 4079 poll_threads(); 4080 CU_ASSERT(g_bserrno == 0); 4081 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4082 bs = g_bs; 4083 4084 /* Dirty shutdown */ 4085 bs_free(bs); 4086 4087 dev = init_dev(); 4088 spdk_bs_opts_init(&opts); 4089 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4090 opts.iter_cb_fn = test_iter; 4091 iter_ctx.current_iter = 0; 4092 opts.iter_cb_arg = &iter_ctx; 4093 4094 /* Test blob iteration during load after a dirty shutdown. */ 4095 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 4096 poll_threads(); 4097 CU_ASSERT(g_bserrno == 0); 4098 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4099 bs = g_bs; 4100 4101 spdk_bs_unload(bs, bs_op_complete, NULL); 4102 poll_threads(); 4103 CU_ASSERT(g_bserrno == 0); 4104 g_bs = NULL; 4105 } 4106 4107 static void 4108 blob_snapshot_rw(void) 4109 { 4110 static const uint8_t zero[10 * 4096] = { 0 }; 4111 struct spdk_blob_store *bs = g_bs; 4112 struct spdk_blob *blob, *snapshot; 4113 struct spdk_io_channel *channel; 4114 struct spdk_blob_opts opts; 4115 spdk_blob_id blobid, snapshotid; 4116 uint64_t free_clusters; 4117 uint64_t cluster_size; 4118 uint64_t page_size; 4119 uint8_t payload_read[10 * 4096]; 4120 uint8_t payload_write[10 * 4096]; 4121 uint64_t write_bytes; 4122 uint64_t read_bytes; 4123 4124 free_clusters = spdk_bs_free_cluster_count(bs); 4125 cluster_size = spdk_bs_get_cluster_size(bs); 4126 page_size = spdk_bs_get_page_size(bs); 4127 4128 channel = spdk_bs_alloc_io_channel(bs); 4129 CU_ASSERT(channel != NULL); 4130 4131 ut_spdk_blob_opts_init(&opts); 4132 opts.thin_provision = true; 4133 opts.num_clusters = 5; 4134 4135 blob = ut_blob_create_and_open(bs, &opts); 4136 blobid = spdk_blob_get_id(blob); 4137 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4138 4139 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4140 4141 memset(payload_read, 0xFF, sizeof(payload_read)); 4142 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 4143 poll_threads(); 4144 CU_ASSERT(g_bserrno == 0); 4145 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4146 4147 memset(payload_write, 0xE5, sizeof(payload_write)); 4148 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 4149 poll_threads(); 4150 CU_ASSERT(g_bserrno == 0); 4151 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4152 4153 /* Create snapshot from blob */ 4154 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4155 poll_threads(); 4156 CU_ASSERT(g_bserrno == 0); 4157 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4158 snapshotid = g_blobid; 4159 4160 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4161 poll_threads(); 4162 CU_ASSERT(g_bserrno == 0); 4163 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4164 snapshot = g_blob; 4165 CU_ASSERT(snapshot->data_ro == true); 4166 CU_ASSERT(snapshot->md_ro == true); 4167 4168 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4169 4170 write_bytes = g_dev_write_bytes; 4171 read_bytes = g_dev_read_bytes; 4172 4173 memset(payload_write, 0xAA, sizeof(payload_write)); 4174 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 4175 poll_threads(); 4176 CU_ASSERT(g_bserrno == 0); 4177 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4178 4179 /* For a clone we need to allocate and copy one cluster, update one page of metadata 4180 * and then write 10 pages of payload. 4181 */ 4182 if (g_use_extent_table) { 4183 /* Add one more page for EXTENT_PAGE write */ 4184 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12 + cluster_size); 4185 } else { 4186 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11 + cluster_size); 4187 } 4188 CU_ASSERT(g_dev_read_bytes - read_bytes == cluster_size); 4189 4190 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 4191 poll_threads(); 4192 CU_ASSERT(g_bserrno == 0); 4193 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4194 4195 /* Data on snapshot should not change after write to clone */ 4196 memset(payload_write, 0xE5, sizeof(payload_write)); 4197 spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL); 4198 poll_threads(); 4199 CU_ASSERT(g_bserrno == 0); 4200 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4201 4202 ut_blob_close_and_delete(bs, blob); 4203 ut_blob_close_and_delete(bs, snapshot); 4204 4205 spdk_bs_free_io_channel(channel); 4206 poll_threads(); 4207 g_blob = NULL; 4208 g_blobid = 0; 4209 } 4210 4211 static void 4212 blob_snapshot_rw_iov(void) 4213 { 4214 static const uint8_t zero[10 * 4096] = { 0 }; 4215 struct spdk_blob_store *bs = g_bs; 4216 struct spdk_blob *blob, *snapshot; 4217 struct spdk_io_channel *channel; 4218 struct spdk_blob_opts opts; 4219 spdk_blob_id blobid, snapshotid; 4220 uint64_t free_clusters; 4221 uint8_t payload_read[10 * 4096]; 4222 uint8_t payload_write[10 * 4096]; 4223 struct iovec iov_read[3]; 4224 struct iovec iov_write[3]; 4225 4226 free_clusters = spdk_bs_free_cluster_count(bs); 4227 4228 channel = spdk_bs_alloc_io_channel(bs); 4229 CU_ASSERT(channel != NULL); 4230 4231 ut_spdk_blob_opts_init(&opts); 4232 opts.thin_provision = true; 4233 opts.num_clusters = 5; 4234 4235 blob = ut_blob_create_and_open(bs, &opts); 4236 blobid = spdk_blob_get_id(blob); 4237 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4238 4239 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4240 4241 /* Create snapshot from blob */ 4242 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4243 poll_threads(); 4244 CU_ASSERT(g_bserrno == 0); 4245 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4246 snapshotid = g_blobid; 4247 4248 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4249 poll_threads(); 4250 CU_ASSERT(g_bserrno == 0); 4251 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4252 snapshot = g_blob; 4253 CU_ASSERT(snapshot->data_ro == true); 4254 CU_ASSERT(snapshot->md_ro == true); 4255 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4256 4257 /* Payload should be all zeros from unallocated clusters */ 4258 memset(payload_read, 0xAA, sizeof(payload_read)); 4259 iov_read[0].iov_base = payload_read; 4260 iov_read[0].iov_len = 3 * 4096; 4261 iov_read[1].iov_base = payload_read + 3 * 4096; 4262 iov_read[1].iov_len = 4 * 4096; 4263 iov_read[2].iov_base = payload_read + 7 * 4096; 4264 iov_read[2].iov_len = 3 * 4096; 4265 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4266 poll_threads(); 4267 CU_ASSERT(g_bserrno == 0); 4268 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4269 4270 memset(payload_write, 0xE5, sizeof(payload_write)); 4271 iov_write[0].iov_base = payload_write; 4272 iov_write[0].iov_len = 1 * 4096; 4273 iov_write[1].iov_base = payload_write + 1 * 4096; 4274 iov_write[1].iov_len = 5 * 4096; 4275 iov_write[2].iov_base = payload_write + 6 * 4096; 4276 iov_write[2].iov_len = 4 * 4096; 4277 4278 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 4279 poll_threads(); 4280 CU_ASSERT(g_bserrno == 0); 4281 4282 memset(payload_read, 0xAA, sizeof(payload_read)); 4283 iov_read[0].iov_base = payload_read; 4284 iov_read[0].iov_len = 3 * 4096; 4285 iov_read[1].iov_base = payload_read + 3 * 4096; 4286 iov_read[1].iov_len = 4 * 4096; 4287 iov_read[2].iov_base = payload_read + 7 * 4096; 4288 iov_read[2].iov_len = 3 * 4096; 4289 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4290 poll_threads(); 4291 CU_ASSERT(g_bserrno == 0); 4292 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4293 4294 spdk_bs_free_io_channel(channel); 4295 poll_threads(); 4296 4297 ut_blob_close_and_delete(bs, blob); 4298 ut_blob_close_and_delete(bs, snapshot); 4299 } 4300 4301 /** 4302 * Inflate / decouple parent rw unit tests. 4303 * 4304 * -------------- 4305 * original blob: 0 1 2 3 4 4306 * ,---------+---------+---------+---------+---------. 4307 * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - | 4308 * +---------+---------+---------+---------+---------+ 4309 * snapshot2 | - |yyyyyyyyy| - |yyyyyyyyy| - | 4310 * +---------+---------+---------+---------+---------+ 4311 * blob | - |zzzzzzzzz| - | - | - | 4312 * '---------+---------+---------+---------+---------' 4313 * . . . . . . 4314 * -------- . . . . . . 4315 * inflate: . . . . . . 4316 * ,---------+---------+---------+---------+---------. 4317 * blob |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000| 4318 * '---------+---------+---------+---------+---------' 4319 * 4320 * NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency 4321 * on snapshot2 and snapshot removed . . . 4322 * . . . . . . 4323 * ---------------- . . . . . . 4324 * decouple parent: . . . . . . 4325 * ,---------+---------+---------+---------+---------. 4326 * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - | 4327 * +---------+---------+---------+---------+---------+ 4328 * blob | - |zzzzzzzzz| - |yyyyyyyyy| - | 4329 * '---------+---------+---------+---------+---------' 4330 * 4331 * NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency 4332 * on snapshot2 removed and on snapshot still exists. Snapshot2 4333 * should remain a clone of snapshot. 4334 */ 4335 static void 4336 _blob_inflate_rw(bool decouple_parent) 4337 { 4338 struct spdk_blob_store *bs = g_bs; 4339 struct spdk_blob *blob, *snapshot, *snapshot2; 4340 struct spdk_io_channel *channel; 4341 struct spdk_blob_opts opts; 4342 spdk_blob_id blobid, snapshotid, snapshot2id; 4343 uint64_t free_clusters; 4344 uint64_t cluster_size; 4345 4346 uint64_t payload_size; 4347 uint8_t *payload_read; 4348 uint8_t *payload_write; 4349 uint8_t *payload_clone; 4350 4351 uint64_t pages_per_cluster; 4352 uint64_t pages_per_payload; 4353 4354 int i; 4355 spdk_blob_id ids[2]; 4356 size_t count; 4357 4358 free_clusters = spdk_bs_free_cluster_count(bs); 4359 cluster_size = spdk_bs_get_cluster_size(bs); 4360 pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs); 4361 pages_per_payload = pages_per_cluster * 5; 4362 4363 payload_size = cluster_size * 5; 4364 4365 payload_read = malloc(payload_size); 4366 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 4367 4368 payload_write = malloc(payload_size); 4369 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 4370 4371 payload_clone = malloc(payload_size); 4372 SPDK_CU_ASSERT_FATAL(payload_clone != NULL); 4373 4374 channel = spdk_bs_alloc_io_channel(bs); 4375 SPDK_CU_ASSERT_FATAL(channel != NULL); 4376 4377 /* Create blob */ 4378 ut_spdk_blob_opts_init(&opts); 4379 opts.thin_provision = true; 4380 opts.num_clusters = 5; 4381 4382 blob = ut_blob_create_and_open(bs, &opts); 4383 blobid = spdk_blob_get_id(blob); 4384 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4385 4386 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4387 4388 /* 1) Initial read should return zeroed payload */ 4389 memset(payload_read, 0xFF, payload_size); 4390 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4391 blob_op_complete, NULL); 4392 poll_threads(); 4393 CU_ASSERT(g_bserrno == 0); 4394 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 4395 4396 /* Fill whole blob with a pattern, except last cluster (to be sure it 4397 * isn't allocated) */ 4398 memset(payload_write, 0xE5, payload_size - cluster_size); 4399 spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload - 4400 pages_per_cluster, blob_op_complete, NULL); 4401 poll_threads(); 4402 CU_ASSERT(g_bserrno == 0); 4403 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4404 4405 /* 2) Create snapshot from blob (first level) */ 4406 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4407 poll_threads(); 4408 CU_ASSERT(g_bserrno == 0); 4409 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4410 snapshotid = g_blobid; 4411 4412 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4413 poll_threads(); 4414 CU_ASSERT(g_bserrno == 0); 4415 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4416 snapshot = g_blob; 4417 CU_ASSERT(snapshot->data_ro == true); 4418 CU_ASSERT(snapshot->md_ro == true); 4419 4420 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4421 4422 /* Write every second cluster with a pattern. 4423 * 4424 * Last cluster shouldn't be written, to be sure that snapshot nor clone 4425 * doesn't allocate it. 4426 * 4427 * payload_clone stores expected result on "blob" read at the time and 4428 * is used only to check data consistency on clone before and after 4429 * inflation. Initially we fill it with a backing snapshots pattern 4430 * used before. 4431 */ 4432 memset(payload_clone, 0xE5, payload_size - cluster_size); 4433 memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size); 4434 memset(payload_write, 0xAA, payload_size); 4435 for (i = 1; i < 5; i += 2) { 4436 spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster, 4437 pages_per_cluster, blob_op_complete, NULL); 4438 poll_threads(); 4439 CU_ASSERT(g_bserrno == 0); 4440 4441 /* Update expected result */ 4442 memcpy(payload_clone + (cluster_size * i), payload_write, 4443 cluster_size); 4444 } 4445 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4446 4447 /* Check data consistency on clone */ 4448 memset(payload_read, 0xFF, payload_size); 4449 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4450 blob_op_complete, NULL); 4451 poll_threads(); 4452 CU_ASSERT(g_bserrno == 0); 4453 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4454 4455 /* 3) Create second levels snapshot from blob */ 4456 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4457 poll_threads(); 4458 CU_ASSERT(g_bserrno == 0); 4459 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4460 snapshot2id = g_blobid; 4461 4462 spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL); 4463 poll_threads(); 4464 CU_ASSERT(g_bserrno == 0); 4465 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4466 snapshot2 = g_blob; 4467 CU_ASSERT(snapshot2->data_ro == true); 4468 CU_ASSERT(snapshot2->md_ro == true); 4469 4470 CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5); 4471 4472 CU_ASSERT(snapshot2->parent_id == snapshotid); 4473 4474 /* Write one cluster on the top level blob. This cluster (1) covers 4475 * already allocated cluster in the snapshot2, so shouldn't be inflated 4476 * at all */ 4477 spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster, 4478 pages_per_cluster, blob_op_complete, NULL); 4479 poll_threads(); 4480 CU_ASSERT(g_bserrno == 0); 4481 4482 /* Update expected result */ 4483 memcpy(payload_clone + cluster_size, payload_write, cluster_size); 4484 4485 /* Check data consistency on clone */ 4486 memset(payload_read, 0xFF, payload_size); 4487 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4488 blob_op_complete, NULL); 4489 poll_threads(); 4490 CU_ASSERT(g_bserrno == 0); 4491 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4492 4493 4494 /* Close all blobs */ 4495 spdk_blob_close(blob, blob_op_complete, NULL); 4496 poll_threads(); 4497 CU_ASSERT(g_bserrno == 0); 4498 4499 spdk_blob_close(snapshot2, blob_op_complete, NULL); 4500 poll_threads(); 4501 CU_ASSERT(g_bserrno == 0); 4502 4503 spdk_blob_close(snapshot, blob_op_complete, NULL); 4504 poll_threads(); 4505 CU_ASSERT(g_bserrno == 0); 4506 4507 /* Check snapshot-clone relations */ 4508 count = 2; 4509 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4510 CU_ASSERT(count == 1); 4511 CU_ASSERT(ids[0] == snapshot2id); 4512 4513 count = 2; 4514 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4515 CU_ASSERT(count == 1); 4516 CU_ASSERT(ids[0] == blobid); 4517 4518 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id); 4519 4520 free_clusters = spdk_bs_free_cluster_count(bs); 4521 if (!decouple_parent) { 4522 /* Do full blob inflation */ 4523 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 4524 poll_threads(); 4525 CU_ASSERT(g_bserrno == 0); 4526 4527 /* All clusters should be inflated (except one already allocated 4528 * in a top level blob) */ 4529 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4); 4530 4531 /* Check if relation tree updated correctly */ 4532 count = 2; 4533 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4534 4535 /* snapshotid have one clone */ 4536 CU_ASSERT(count == 1); 4537 CU_ASSERT(ids[0] == snapshot2id); 4538 4539 /* snapshot2id have no clones */ 4540 count = 2; 4541 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4542 CU_ASSERT(count == 0); 4543 4544 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 4545 } else { 4546 /* Decouple parent of blob */ 4547 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 4548 poll_threads(); 4549 CU_ASSERT(g_bserrno == 0); 4550 4551 /* Only one cluster from a parent should be inflated (second one 4552 * is covered by a cluster written on a top level blob, and 4553 * already allocated) */ 4554 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1); 4555 4556 /* Check if relation tree updated correctly */ 4557 count = 2; 4558 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4559 4560 /* snapshotid have two clones now */ 4561 CU_ASSERT(count == 2); 4562 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4563 CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id); 4564 4565 /* snapshot2id have no clones */ 4566 count = 2; 4567 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4568 CU_ASSERT(count == 0); 4569 4570 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4571 } 4572 4573 /* Try to delete snapshot2 (should pass) */ 4574 spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL); 4575 poll_threads(); 4576 CU_ASSERT(g_bserrno == 0); 4577 4578 /* Try to delete base snapshot */ 4579 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4580 poll_threads(); 4581 CU_ASSERT(g_bserrno == 0); 4582 4583 /* Reopen blob after snapshot deletion */ 4584 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 4585 poll_threads(); 4586 CU_ASSERT(g_bserrno == 0); 4587 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4588 blob = g_blob; 4589 4590 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4591 4592 /* Check data consistency on inflated blob */ 4593 memset(payload_read, 0xFF, payload_size); 4594 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4595 blob_op_complete, NULL); 4596 poll_threads(); 4597 CU_ASSERT(g_bserrno == 0); 4598 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4599 4600 spdk_bs_free_io_channel(channel); 4601 poll_threads(); 4602 4603 free(payload_read); 4604 free(payload_write); 4605 free(payload_clone); 4606 4607 ut_blob_close_and_delete(bs, blob); 4608 } 4609 4610 static void 4611 blob_inflate_rw(void) 4612 { 4613 _blob_inflate_rw(false); 4614 _blob_inflate_rw(true); 4615 } 4616 4617 /** 4618 * Snapshot-clones relation test 4619 * 4620 * snapshot 4621 * | 4622 * +-----+-----+ 4623 * | | 4624 * blob(ro) snapshot2 4625 * | | 4626 * clone2 clone 4627 */ 4628 static void 4629 blob_relations(void) 4630 { 4631 struct spdk_blob_store *bs; 4632 struct spdk_bs_dev *dev; 4633 struct spdk_bs_opts bs_opts; 4634 struct spdk_blob_opts opts; 4635 struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2; 4636 spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2; 4637 int rc; 4638 size_t count; 4639 spdk_blob_id ids[10] = {}; 4640 4641 dev = init_dev(); 4642 spdk_bs_opts_init(&bs_opts); 4643 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 4644 4645 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 4646 poll_threads(); 4647 CU_ASSERT(g_bserrno == 0); 4648 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4649 bs = g_bs; 4650 4651 /* 1. Create blob with 10 clusters */ 4652 4653 ut_spdk_blob_opts_init(&opts); 4654 opts.num_clusters = 10; 4655 4656 blob = ut_blob_create_and_open(bs, &opts); 4657 blobid = spdk_blob_get_id(blob); 4658 4659 CU_ASSERT(!spdk_blob_is_read_only(blob)); 4660 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4661 CU_ASSERT(!spdk_blob_is_clone(blob)); 4662 CU_ASSERT(!spdk_blob_is_thin_provisioned(blob)); 4663 4664 /* blob should not have underlying snapshot nor clones */ 4665 CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID); 4666 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 4667 count = SPDK_COUNTOF(ids); 4668 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4669 CU_ASSERT(rc == 0); 4670 CU_ASSERT(count == 0); 4671 4672 4673 /* 2. Create snapshot */ 4674 4675 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4676 poll_threads(); 4677 CU_ASSERT(g_bserrno == 0); 4678 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4679 snapshotid = g_blobid; 4680 4681 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4682 poll_threads(); 4683 CU_ASSERT(g_bserrno == 0); 4684 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4685 snapshot = g_blob; 4686 4687 CU_ASSERT(spdk_blob_is_read_only(snapshot)); 4688 CU_ASSERT(spdk_blob_is_snapshot(snapshot)); 4689 CU_ASSERT(!spdk_blob_is_clone(snapshot)); 4690 CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID); 4691 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID); 4692 4693 /* Check if original blob is converted to the clone of snapshot */ 4694 CU_ASSERT(!spdk_blob_is_read_only(blob)); 4695 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4696 CU_ASSERT(spdk_blob_is_clone(blob)); 4697 CU_ASSERT(spdk_blob_is_thin_provisioned(blob)); 4698 CU_ASSERT(blob->parent_id == snapshotid); 4699 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4700 4701 count = SPDK_COUNTOF(ids); 4702 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4703 CU_ASSERT(rc == 0); 4704 CU_ASSERT(count == 1); 4705 CU_ASSERT(ids[0] == blobid); 4706 4707 4708 /* 3. Create clone from snapshot */ 4709 4710 spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 4711 poll_threads(); 4712 CU_ASSERT(g_bserrno == 0); 4713 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4714 cloneid = g_blobid; 4715 4716 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 4717 poll_threads(); 4718 CU_ASSERT(g_bserrno == 0); 4719 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4720 clone = g_blob; 4721 4722 CU_ASSERT(!spdk_blob_is_read_only(clone)); 4723 CU_ASSERT(!spdk_blob_is_snapshot(clone)); 4724 CU_ASSERT(spdk_blob_is_clone(clone)); 4725 CU_ASSERT(spdk_blob_is_thin_provisioned(clone)); 4726 CU_ASSERT(clone->parent_id == snapshotid); 4727 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid); 4728 4729 count = SPDK_COUNTOF(ids); 4730 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 4731 CU_ASSERT(rc == 0); 4732 CU_ASSERT(count == 0); 4733 4734 /* Check if clone is on the snapshot's list */ 4735 count = SPDK_COUNTOF(ids); 4736 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4737 CU_ASSERT(rc == 0); 4738 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4739 CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid); 4740 4741 4742 /* 4. Create snapshot of the clone */ 4743 4744 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 4745 poll_threads(); 4746 CU_ASSERT(g_bserrno == 0); 4747 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4748 snapshotid2 = g_blobid; 4749 4750 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 4751 poll_threads(); 4752 CU_ASSERT(g_bserrno == 0); 4753 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4754 snapshot2 = g_blob; 4755 4756 CU_ASSERT(spdk_blob_is_read_only(snapshot2)); 4757 CU_ASSERT(spdk_blob_is_snapshot(snapshot2)); 4758 CU_ASSERT(spdk_blob_is_clone(snapshot2)); 4759 CU_ASSERT(snapshot2->parent_id == snapshotid); 4760 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid); 4761 4762 /* Check if clone is converted to the clone of snapshot2 and snapshot2 4763 * is a child of snapshot */ 4764 CU_ASSERT(!spdk_blob_is_read_only(clone)); 4765 CU_ASSERT(!spdk_blob_is_snapshot(clone)); 4766 CU_ASSERT(spdk_blob_is_clone(clone)); 4767 CU_ASSERT(spdk_blob_is_thin_provisioned(clone)); 4768 CU_ASSERT(clone->parent_id == snapshotid2); 4769 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 4770 4771 count = SPDK_COUNTOF(ids); 4772 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 4773 CU_ASSERT(rc == 0); 4774 CU_ASSERT(count == 1); 4775 CU_ASSERT(ids[0] == cloneid); 4776 4777 4778 /* 5. Try to create clone from read only blob */ 4779 4780 /* Mark blob as read only */ 4781 spdk_blob_set_read_only(blob); 4782 spdk_blob_sync_md(blob, blob_op_complete, NULL); 4783 poll_threads(); 4784 CU_ASSERT(g_bserrno == 0); 4785 4786 /* Check if previously created blob is read only clone */ 4787 CU_ASSERT(spdk_blob_is_read_only(blob)); 4788 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4789 CU_ASSERT(spdk_blob_is_clone(blob)); 4790 CU_ASSERT(spdk_blob_is_thin_provisioned(blob)); 4791 4792 /* Create clone from read only blob */ 4793 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4794 poll_threads(); 4795 CU_ASSERT(g_bserrno == 0); 4796 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4797 cloneid2 = g_blobid; 4798 4799 spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL); 4800 poll_threads(); 4801 CU_ASSERT(g_bserrno == 0); 4802 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4803 clone2 = g_blob; 4804 4805 CU_ASSERT(!spdk_blob_is_read_only(clone2)); 4806 CU_ASSERT(!spdk_blob_is_snapshot(clone2)); 4807 CU_ASSERT(spdk_blob_is_clone(clone2)); 4808 CU_ASSERT(spdk_blob_is_thin_provisioned(clone2)); 4809 4810 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 4811 4812 count = SPDK_COUNTOF(ids); 4813 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4814 CU_ASSERT(rc == 0); 4815 4816 CU_ASSERT(count == 1); 4817 CU_ASSERT(ids[0] == cloneid2); 4818 4819 /* Close blobs */ 4820 4821 spdk_blob_close(clone2, blob_op_complete, NULL); 4822 poll_threads(); 4823 CU_ASSERT(g_bserrno == 0); 4824 4825 spdk_blob_close(blob, blob_op_complete, NULL); 4826 poll_threads(); 4827 CU_ASSERT(g_bserrno == 0); 4828 4829 spdk_blob_close(clone, blob_op_complete, NULL); 4830 poll_threads(); 4831 CU_ASSERT(g_bserrno == 0); 4832 4833 spdk_blob_close(snapshot, blob_op_complete, NULL); 4834 poll_threads(); 4835 CU_ASSERT(g_bserrno == 0); 4836 4837 spdk_blob_close(snapshot2, blob_op_complete, NULL); 4838 poll_threads(); 4839 CU_ASSERT(g_bserrno == 0); 4840 4841 /* Try to delete snapshot with more than 1 clone */ 4842 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4843 poll_threads(); 4844 CU_ASSERT(g_bserrno != 0); 4845 4846 ut_bs_reload(&bs, &bs_opts); 4847 4848 /* NULL ids array should return number of clones in count */ 4849 count = SPDK_COUNTOF(ids); 4850 rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count); 4851 CU_ASSERT(rc == -ENOMEM); 4852 CU_ASSERT(count == 2); 4853 4854 /* incorrect array size */ 4855 count = 1; 4856 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4857 CU_ASSERT(rc == -ENOMEM); 4858 CU_ASSERT(count == 2); 4859 4860 4861 /* Verify structure of loaded blob store */ 4862 4863 /* snapshot */ 4864 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID); 4865 4866 count = SPDK_COUNTOF(ids); 4867 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4868 CU_ASSERT(rc == 0); 4869 CU_ASSERT(count == 2); 4870 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4871 CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2); 4872 4873 /* blob */ 4874 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4875 count = SPDK_COUNTOF(ids); 4876 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4877 CU_ASSERT(rc == 0); 4878 CU_ASSERT(count == 1); 4879 CU_ASSERT(ids[0] == cloneid2); 4880 4881 /* clone */ 4882 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 4883 count = SPDK_COUNTOF(ids); 4884 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 4885 CU_ASSERT(rc == 0); 4886 CU_ASSERT(count == 0); 4887 4888 /* snapshot2 */ 4889 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid); 4890 count = SPDK_COUNTOF(ids); 4891 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 4892 CU_ASSERT(rc == 0); 4893 CU_ASSERT(count == 1); 4894 CU_ASSERT(ids[0] == cloneid); 4895 4896 /* clone2 */ 4897 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 4898 count = SPDK_COUNTOF(ids); 4899 rc = spdk_blob_get_clones(bs, cloneid2, ids, &count); 4900 CU_ASSERT(rc == 0); 4901 CU_ASSERT(count == 0); 4902 4903 /* Try to delete blob that user should not be able to remove */ 4904 4905 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4906 poll_threads(); 4907 CU_ASSERT(g_bserrno != 0); 4908 4909 /* Remove all blobs */ 4910 4911 spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL); 4912 poll_threads(); 4913 CU_ASSERT(g_bserrno == 0); 4914 4915 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 4916 poll_threads(); 4917 CU_ASSERT(g_bserrno == 0); 4918 4919 spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL); 4920 poll_threads(); 4921 CU_ASSERT(g_bserrno == 0); 4922 4923 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 4924 poll_threads(); 4925 CU_ASSERT(g_bserrno == 0); 4926 4927 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4928 poll_threads(); 4929 CU_ASSERT(g_bserrno == 0); 4930 4931 spdk_bs_unload(bs, bs_op_complete, NULL); 4932 poll_threads(); 4933 CU_ASSERT(g_bserrno == 0); 4934 4935 g_bs = NULL; 4936 } 4937 4938 /** 4939 * Snapshot-clones relation test 2 4940 * 4941 * snapshot1 4942 * | 4943 * snapshot2 4944 * | 4945 * +-----+-----+ 4946 * | | 4947 * blob(ro) snapshot3 4948 * | | 4949 * | snapshot4 4950 * | | | 4951 * clone2 clone clone3 4952 */ 4953 static void 4954 blob_relations2(void) 4955 { 4956 struct spdk_blob_store *bs; 4957 struct spdk_bs_dev *dev; 4958 struct spdk_bs_opts bs_opts; 4959 struct spdk_blob_opts opts; 4960 struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2; 4961 spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2, 4962 cloneid3; 4963 int rc; 4964 size_t count; 4965 spdk_blob_id ids[10] = {}; 4966 4967 dev = init_dev(); 4968 spdk_bs_opts_init(&bs_opts); 4969 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 4970 4971 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 4972 poll_threads(); 4973 CU_ASSERT(g_bserrno == 0); 4974 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4975 bs = g_bs; 4976 4977 /* 1. Create blob with 10 clusters */ 4978 4979 ut_spdk_blob_opts_init(&opts); 4980 opts.num_clusters = 10; 4981 4982 blob = ut_blob_create_and_open(bs, &opts); 4983 blobid = spdk_blob_get_id(blob); 4984 4985 /* 2. Create snapshot1 */ 4986 4987 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4988 poll_threads(); 4989 CU_ASSERT(g_bserrno == 0); 4990 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4991 snapshotid1 = g_blobid; 4992 4993 spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL); 4994 poll_threads(); 4995 CU_ASSERT(g_bserrno == 0); 4996 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4997 snapshot1 = g_blob; 4998 4999 CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID); 5000 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID); 5001 5002 CU_ASSERT(blob->parent_id == snapshotid1); 5003 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1); 5004 5005 /* Check if blob is the clone of snapshot1 */ 5006 CU_ASSERT(blob->parent_id == snapshotid1); 5007 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1); 5008 5009 count = SPDK_COUNTOF(ids); 5010 rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count); 5011 CU_ASSERT(rc == 0); 5012 CU_ASSERT(count == 1); 5013 CU_ASSERT(ids[0] == blobid); 5014 5015 /* 3. Create another snapshot */ 5016 5017 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5018 poll_threads(); 5019 CU_ASSERT(g_bserrno == 0); 5020 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5021 snapshotid2 = g_blobid; 5022 5023 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 5024 poll_threads(); 5025 CU_ASSERT(g_bserrno == 0); 5026 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5027 snapshot2 = g_blob; 5028 5029 CU_ASSERT(spdk_blob_is_clone(snapshot2)); 5030 CU_ASSERT(snapshot2->parent_id == snapshotid1); 5031 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1); 5032 5033 /* Check if snapshot2 is the clone of snapshot1 and blob 5034 * is a child of snapshot2 */ 5035 CU_ASSERT(blob->parent_id == snapshotid2); 5036 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2); 5037 5038 count = SPDK_COUNTOF(ids); 5039 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5040 CU_ASSERT(rc == 0); 5041 CU_ASSERT(count == 1); 5042 CU_ASSERT(ids[0] == blobid); 5043 5044 /* 4. Create clone from snapshot */ 5045 5046 spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL); 5047 poll_threads(); 5048 CU_ASSERT(g_bserrno == 0); 5049 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5050 cloneid = g_blobid; 5051 5052 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 5053 poll_threads(); 5054 CU_ASSERT(g_bserrno == 0); 5055 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5056 clone = g_blob; 5057 5058 CU_ASSERT(clone->parent_id == snapshotid2); 5059 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 5060 5061 /* Check if clone is on the snapshot's list */ 5062 count = SPDK_COUNTOF(ids); 5063 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5064 CU_ASSERT(rc == 0); 5065 CU_ASSERT(count == 2); 5066 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5067 CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid); 5068 5069 /* 5. Create snapshot of the clone */ 5070 5071 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 5072 poll_threads(); 5073 CU_ASSERT(g_bserrno == 0); 5074 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5075 snapshotid3 = g_blobid; 5076 5077 spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL); 5078 poll_threads(); 5079 CU_ASSERT(g_bserrno == 0); 5080 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5081 snapshot3 = g_blob; 5082 5083 CU_ASSERT(snapshot3->parent_id == snapshotid2); 5084 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2); 5085 5086 /* Check if clone is converted to the clone of snapshot3 and snapshot3 5087 * is a child of snapshot2 */ 5088 CU_ASSERT(clone->parent_id == snapshotid3); 5089 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5090 5091 count = SPDK_COUNTOF(ids); 5092 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5093 CU_ASSERT(rc == 0); 5094 CU_ASSERT(count == 1); 5095 CU_ASSERT(ids[0] == cloneid); 5096 5097 /* 6. Create another snapshot of the clone */ 5098 5099 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 5100 poll_threads(); 5101 CU_ASSERT(g_bserrno == 0); 5102 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5103 snapshotid4 = g_blobid; 5104 5105 spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL); 5106 poll_threads(); 5107 CU_ASSERT(g_bserrno == 0); 5108 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5109 snapshot4 = g_blob; 5110 5111 CU_ASSERT(snapshot4->parent_id == snapshotid3); 5112 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3); 5113 5114 /* Check if clone is converted to the clone of snapshot4 and snapshot4 5115 * is a child of snapshot3 */ 5116 CU_ASSERT(clone->parent_id == snapshotid4); 5117 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4); 5118 5119 count = SPDK_COUNTOF(ids); 5120 rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count); 5121 CU_ASSERT(rc == 0); 5122 CU_ASSERT(count == 1); 5123 CU_ASSERT(ids[0] == cloneid); 5124 5125 /* 7. Remove snapshot 4 */ 5126 5127 ut_blob_close_and_delete(bs, snapshot4); 5128 5129 /* Check if relations are back to state from before creating snapshot 4 */ 5130 CU_ASSERT(clone->parent_id == snapshotid3); 5131 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5132 5133 count = SPDK_COUNTOF(ids); 5134 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5135 CU_ASSERT(rc == 0); 5136 CU_ASSERT(count == 1); 5137 CU_ASSERT(ids[0] == cloneid); 5138 5139 /* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */ 5140 5141 spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL); 5142 poll_threads(); 5143 CU_ASSERT(g_bserrno == 0); 5144 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5145 cloneid3 = g_blobid; 5146 5147 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5148 poll_threads(); 5149 CU_ASSERT(g_bserrno != 0); 5150 5151 /* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */ 5152 5153 spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL); 5154 poll_threads(); 5155 CU_ASSERT(g_bserrno == 0); 5156 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5157 snapshot3 = g_blob; 5158 5159 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5160 poll_threads(); 5161 CU_ASSERT(g_bserrno != 0); 5162 5163 spdk_blob_close(snapshot3, blob_op_complete, NULL); 5164 poll_threads(); 5165 CU_ASSERT(g_bserrno == 0); 5166 5167 spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL); 5168 poll_threads(); 5169 CU_ASSERT(g_bserrno == 0); 5170 5171 /* 10. Remove snapshot 1 */ 5172 5173 ut_blob_close_and_delete(bs, snapshot1); 5174 5175 /* Check if relations are back to state from before creating snapshot 4 (before step 6) */ 5176 CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID); 5177 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID); 5178 5179 count = SPDK_COUNTOF(ids); 5180 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5181 CU_ASSERT(rc == 0); 5182 CU_ASSERT(count == 2); 5183 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5184 CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3); 5185 5186 /* 11. Try to create clone from read only blob */ 5187 5188 /* Mark blob as read only */ 5189 spdk_blob_set_read_only(blob); 5190 spdk_blob_sync_md(blob, blob_op_complete, NULL); 5191 poll_threads(); 5192 CU_ASSERT(g_bserrno == 0); 5193 5194 /* Create clone from read only blob */ 5195 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5196 poll_threads(); 5197 CU_ASSERT(g_bserrno == 0); 5198 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5199 cloneid2 = g_blobid; 5200 5201 spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL); 5202 poll_threads(); 5203 CU_ASSERT(g_bserrno == 0); 5204 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5205 clone2 = g_blob; 5206 5207 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5208 5209 count = SPDK_COUNTOF(ids); 5210 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5211 CU_ASSERT(rc == 0); 5212 CU_ASSERT(count == 1); 5213 CU_ASSERT(ids[0] == cloneid2); 5214 5215 /* Close blobs */ 5216 5217 spdk_blob_close(clone2, blob_op_complete, NULL); 5218 poll_threads(); 5219 CU_ASSERT(g_bserrno == 0); 5220 5221 spdk_blob_close(blob, blob_op_complete, NULL); 5222 poll_threads(); 5223 CU_ASSERT(g_bserrno == 0); 5224 5225 spdk_blob_close(clone, blob_op_complete, NULL); 5226 poll_threads(); 5227 CU_ASSERT(g_bserrno == 0); 5228 5229 spdk_blob_close(snapshot2, blob_op_complete, NULL); 5230 poll_threads(); 5231 CU_ASSERT(g_bserrno == 0); 5232 5233 spdk_blob_close(snapshot3, blob_op_complete, NULL); 5234 poll_threads(); 5235 CU_ASSERT(g_bserrno == 0); 5236 5237 ut_bs_reload(&bs, &bs_opts); 5238 5239 /* Verify structure of loaded blob store */ 5240 5241 /* snapshot2 */ 5242 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID); 5243 5244 count = SPDK_COUNTOF(ids); 5245 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5246 CU_ASSERT(rc == 0); 5247 CU_ASSERT(count == 2); 5248 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5249 CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3); 5250 5251 /* blob */ 5252 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2); 5253 count = SPDK_COUNTOF(ids); 5254 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5255 CU_ASSERT(rc == 0); 5256 CU_ASSERT(count == 1); 5257 CU_ASSERT(ids[0] == cloneid2); 5258 5259 /* clone */ 5260 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5261 count = SPDK_COUNTOF(ids); 5262 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 5263 CU_ASSERT(rc == 0); 5264 CU_ASSERT(count == 0); 5265 5266 /* snapshot3 */ 5267 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2); 5268 count = SPDK_COUNTOF(ids); 5269 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5270 CU_ASSERT(rc == 0); 5271 CU_ASSERT(count == 1); 5272 CU_ASSERT(ids[0] == cloneid); 5273 5274 /* clone2 */ 5275 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5276 count = SPDK_COUNTOF(ids); 5277 rc = spdk_blob_get_clones(bs, cloneid2, ids, &count); 5278 CU_ASSERT(rc == 0); 5279 CU_ASSERT(count == 0); 5280 5281 /* Try to delete all blobs in the worse possible order */ 5282 5283 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5284 poll_threads(); 5285 CU_ASSERT(g_bserrno != 0); 5286 5287 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5288 poll_threads(); 5289 CU_ASSERT(g_bserrno == 0); 5290 5291 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5292 poll_threads(); 5293 CU_ASSERT(g_bserrno != 0); 5294 5295 spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL); 5296 poll_threads(); 5297 CU_ASSERT(g_bserrno == 0); 5298 5299 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5300 poll_threads(); 5301 CU_ASSERT(g_bserrno == 0); 5302 5303 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 5304 poll_threads(); 5305 CU_ASSERT(g_bserrno == 0); 5306 5307 spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL); 5308 poll_threads(); 5309 CU_ASSERT(g_bserrno == 0); 5310 5311 spdk_bs_unload(bs, bs_op_complete, NULL); 5312 poll_threads(); 5313 CU_ASSERT(g_bserrno == 0); 5314 5315 g_bs = NULL; 5316 } 5317 5318 static void 5319 blobstore_clean_power_failure(void) 5320 { 5321 struct spdk_blob_store *bs; 5322 struct spdk_blob *blob; 5323 struct spdk_power_failure_thresholds thresholds = {}; 5324 bool clean = false; 5325 struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 5326 struct spdk_bs_super_block super_copy = {}; 5327 5328 thresholds.general_threshold = 1; 5329 while (!clean) { 5330 /* Create bs and blob */ 5331 suite_blob_setup(); 5332 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5333 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5334 bs = g_bs; 5335 blob = g_blob; 5336 5337 /* Super block should not change for rest of the UT, 5338 * save it and compare later. */ 5339 memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block)); 5340 SPDK_CU_ASSERT_FATAL(super->clean == 0); 5341 SPDK_CU_ASSERT_FATAL(bs->clean == 0); 5342 5343 /* Force bs/super block in a clean state. 5344 * Along with marking blob dirty, to cause blob persist. */ 5345 blob->state = SPDK_BLOB_STATE_DIRTY; 5346 bs->clean = 1; 5347 super->clean = 1; 5348 super->crc = blob_md_page_calc_crc(super); 5349 5350 g_bserrno = -1; 5351 dev_set_power_failure_thresholds(thresholds); 5352 spdk_blob_sync_md(blob, blob_op_complete, NULL); 5353 poll_threads(); 5354 dev_reset_power_failure_event(); 5355 5356 if (g_bserrno == 0) { 5357 /* After successful md sync, both bs and super block 5358 * should be marked as not clean. */ 5359 SPDK_CU_ASSERT_FATAL(bs->clean == 0); 5360 SPDK_CU_ASSERT_FATAL(super->clean == 0); 5361 clean = true; 5362 } 5363 5364 /* Depending on the point of failure, super block was either updated or not. */ 5365 super_copy.clean = super->clean; 5366 super_copy.crc = blob_md_page_calc_crc(&super_copy); 5367 /* Compare that the values in super block remained unchanged. */ 5368 SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block))); 5369 5370 /* Delete blob and unload bs */ 5371 suite_blob_cleanup(); 5372 5373 thresholds.general_threshold++; 5374 } 5375 } 5376 5377 static void 5378 blob_delete_snapshot_power_failure(void) 5379 { 5380 struct spdk_bs_dev *dev; 5381 struct spdk_blob_store *bs; 5382 struct spdk_blob_opts opts; 5383 struct spdk_blob *blob, *snapshot; 5384 struct spdk_power_failure_thresholds thresholds = {}; 5385 spdk_blob_id blobid, snapshotid; 5386 const void *value; 5387 size_t value_len; 5388 size_t count; 5389 spdk_blob_id ids[3] = {}; 5390 int rc; 5391 bool deleted = false; 5392 int delete_snapshot_bserrno = -1; 5393 5394 thresholds.general_threshold = 1; 5395 while (!deleted) { 5396 dev = init_dev(); 5397 5398 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 5399 poll_threads(); 5400 CU_ASSERT(g_bserrno == 0); 5401 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5402 bs = g_bs; 5403 5404 /* Create blob */ 5405 ut_spdk_blob_opts_init(&opts); 5406 opts.num_clusters = 10; 5407 5408 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 5409 poll_threads(); 5410 CU_ASSERT(g_bserrno == 0); 5411 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5412 blobid = g_blobid; 5413 5414 /* Create snapshot */ 5415 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5416 poll_threads(); 5417 CU_ASSERT(g_bserrno == 0); 5418 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5419 snapshotid = g_blobid; 5420 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5421 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5422 5423 dev_set_power_failure_thresholds(thresholds); 5424 5425 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 5426 poll_threads(); 5427 delete_snapshot_bserrno = g_bserrno; 5428 5429 /* Do not shut down cleanly. Assumption is that after snapshot deletion 5430 * reports success, changes to both blobs should already persisted. */ 5431 dev_reset_power_failure_event(); 5432 ut_bs_dirty_load(&bs, NULL); 5433 5434 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5435 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5436 5437 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 5438 poll_threads(); 5439 CU_ASSERT(g_bserrno == 0); 5440 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5441 blob = g_blob; 5442 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true); 5443 5444 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 5445 poll_threads(); 5446 5447 if (g_bserrno == 0) { 5448 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5449 snapshot = g_blob; 5450 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 5451 count = SPDK_COUNTOF(ids); 5452 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5453 CU_ASSERT(rc == 0); 5454 CU_ASSERT(count == 1); 5455 CU_ASSERT(ids[0] == blobid); 5456 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len); 5457 CU_ASSERT(rc != 0); 5458 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false); 5459 5460 spdk_blob_close(snapshot, blob_op_complete, NULL); 5461 poll_threads(); 5462 CU_ASSERT(g_bserrno == 0); 5463 } else { 5464 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 5465 /* Snapshot might have been left in unrecoverable state, so it does not open. 5466 * Yet delete might perform further changes to the clone after that. 5467 * This UT should test until snapshot is deleted and delete call succeeds. */ 5468 if (delete_snapshot_bserrno == 0) { 5469 deleted = true; 5470 } 5471 } 5472 5473 spdk_blob_close(blob, blob_op_complete, NULL); 5474 poll_threads(); 5475 CU_ASSERT(g_bserrno == 0); 5476 5477 spdk_bs_unload(bs, bs_op_complete, NULL); 5478 poll_threads(); 5479 CU_ASSERT(g_bserrno == 0); 5480 5481 thresholds.general_threshold++; 5482 } 5483 } 5484 5485 static void 5486 blob_create_snapshot_power_failure(void) 5487 { 5488 struct spdk_blob_store *bs = g_bs; 5489 struct spdk_bs_dev *dev; 5490 struct spdk_blob_opts opts; 5491 struct spdk_blob *blob, *snapshot; 5492 struct spdk_power_failure_thresholds thresholds = {}; 5493 spdk_blob_id blobid, snapshotid; 5494 const void *value; 5495 size_t value_len; 5496 size_t count; 5497 spdk_blob_id ids[3] = {}; 5498 int rc; 5499 bool created = false; 5500 int create_snapshot_bserrno = -1; 5501 5502 thresholds.general_threshold = 1; 5503 while (!created) { 5504 dev = init_dev(); 5505 5506 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 5507 poll_threads(); 5508 CU_ASSERT(g_bserrno == 0); 5509 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5510 bs = g_bs; 5511 5512 /* Create blob */ 5513 ut_spdk_blob_opts_init(&opts); 5514 opts.num_clusters = 10; 5515 5516 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 5517 poll_threads(); 5518 CU_ASSERT(g_bserrno == 0); 5519 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5520 blobid = g_blobid; 5521 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5522 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5523 5524 dev_set_power_failure_thresholds(thresholds); 5525 5526 /* Create snapshot */ 5527 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5528 poll_threads(); 5529 create_snapshot_bserrno = g_bserrno; 5530 snapshotid = g_blobid; 5531 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5532 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5533 5534 /* Do not shut down cleanly. Assumption is that after create snapshot 5535 * reports success, both blobs should be power-fail safe. */ 5536 dev_reset_power_failure_event(); 5537 ut_bs_dirty_load(&bs, NULL); 5538 5539 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5540 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5541 5542 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 5543 poll_threads(); 5544 CU_ASSERT(g_bserrno == 0); 5545 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5546 blob = g_blob; 5547 5548 if (snapshotid != SPDK_BLOBID_INVALID) { 5549 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 5550 poll_threads(); 5551 } 5552 5553 if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) { 5554 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5555 snapshot = g_blob; 5556 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true); 5557 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false); 5558 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 5559 count = SPDK_COUNTOF(ids); 5560 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5561 CU_ASSERT(rc == 0); 5562 CU_ASSERT(count == 1); 5563 CU_ASSERT(ids[0] == blobid); 5564 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len); 5565 CU_ASSERT(rc != 0); 5566 5567 spdk_blob_close(snapshot, blob_op_complete, NULL); 5568 poll_threads(); 5569 CU_ASSERT(g_bserrno == 0); 5570 if (create_snapshot_bserrno == 0) { 5571 created = true; 5572 } 5573 } else { 5574 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 5575 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false); 5576 } 5577 5578 spdk_blob_close(blob, blob_op_complete, NULL); 5579 poll_threads(); 5580 CU_ASSERT(g_bserrno == 0); 5581 5582 spdk_bs_unload(bs, bs_op_complete, NULL); 5583 poll_threads(); 5584 CU_ASSERT(g_bserrno == 0); 5585 5586 thresholds.general_threshold++; 5587 } 5588 } 5589 5590 static void 5591 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5592 { 5593 uint8_t payload_ff[64 * 512]; 5594 uint8_t payload_aa[64 * 512]; 5595 uint8_t payload_00[64 * 512]; 5596 uint8_t *cluster0, *cluster1; 5597 5598 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5599 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5600 memset(payload_00, 0x00, sizeof(payload_00)); 5601 5602 /* Try to perform I/O with io unit = 512 */ 5603 spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL); 5604 poll_threads(); 5605 CU_ASSERT(g_bserrno == 0); 5606 5607 /* If thin provisioned is set cluster should be allocated now */ 5608 SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0); 5609 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5610 5611 /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character. 5612 * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */ 5613 /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5614 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5615 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0); 5616 5617 /* Verify write with offset on first page */ 5618 spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL); 5619 poll_threads(); 5620 CU_ASSERT(g_bserrno == 0); 5621 5622 /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5623 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5624 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5625 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5626 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5627 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0); 5628 5629 /* Verify write with offset on first page */ 5630 spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL); 5631 poll_threads(); 5632 5633 /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5634 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5635 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5636 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5637 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5638 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0); 5639 CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0); 5640 5641 /* Verify write with offset on second page */ 5642 spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL); 5643 poll_threads(); 5644 5645 /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */ 5646 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5647 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5648 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5649 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5650 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0); 5651 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5652 5653 /* Verify write across multiple pages */ 5654 spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL); 5655 poll_threads(); 5656 5657 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */ 5658 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5659 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5660 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5661 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5662 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5663 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5664 5665 /* Verify write across multiple clusters */ 5666 spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL); 5667 poll_threads(); 5668 5669 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5670 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5671 5672 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5673 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5674 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5675 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5676 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5677 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5678 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5679 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5680 5681 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5682 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0); 5683 5684 /* Verify write to second cluster */ 5685 spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL); 5686 poll_threads(); 5687 5688 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5689 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5690 5691 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5692 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */ 5693 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5694 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5695 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5696 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5697 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5698 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5699 5700 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5701 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0); 5702 CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0); 5703 CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0); 5704 } 5705 5706 static void 5707 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5708 { 5709 uint8_t payload_read[64 * 512]; 5710 uint8_t payload_ff[64 * 512]; 5711 uint8_t payload_aa[64 * 512]; 5712 uint8_t payload_00[64 * 512]; 5713 5714 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5715 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5716 memset(payload_00, 0x00, sizeof(payload_00)); 5717 5718 /* Read only first io unit */ 5719 /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5720 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5721 * payload_read: F000 0000 | 0000 0000 ... */ 5722 memset(payload_read, 0x00, sizeof(payload_read)); 5723 spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL); 5724 poll_threads(); 5725 CU_ASSERT(g_bserrno == 0); 5726 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5727 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0); 5728 5729 /* Read four io_units starting from offset = 2 5730 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5731 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5732 * payload_read: F0AA 0000 | 0000 0000 ... */ 5733 5734 memset(payload_read, 0x00, sizeof(payload_read)); 5735 spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL); 5736 poll_threads(); 5737 CU_ASSERT(g_bserrno == 0); 5738 5739 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5740 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 5741 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0); 5742 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0); 5743 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 5744 5745 /* Read eight io_units across multiple pages 5746 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 5747 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5748 * payload_read: AAAA AAAA | 0000 0000 ... */ 5749 memset(payload_read, 0x00, sizeof(payload_read)); 5750 spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL); 5751 poll_threads(); 5752 CU_ASSERT(g_bserrno == 0); 5753 5754 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0); 5755 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 5756 5757 /* Read eight io_units across multiple clusters 5758 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ] 5759 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5760 * payload_read: FFFF FFFF | 0000 0000 ... */ 5761 memset(payload_read, 0x00, sizeof(payload_read)); 5762 spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL); 5763 poll_threads(); 5764 CU_ASSERT(g_bserrno == 0); 5765 5766 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0); 5767 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 5768 5769 /* Read four io_units from second cluster 5770 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5771 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ] 5772 * payload_read: 00FF 0000 | 0000 0000 ... */ 5773 memset(payload_read, 0x00, sizeof(payload_read)); 5774 spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL); 5775 poll_threads(); 5776 CU_ASSERT(g_bserrno == 0); 5777 5778 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0); 5779 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0); 5780 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 5781 5782 /* Read second cluster 5783 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5784 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] 5785 * payload_read: FFFF 0000 | 0000 FF00 ... */ 5786 memset(payload_read, 0x00, sizeof(payload_read)); 5787 spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL); 5788 poll_threads(); 5789 CU_ASSERT(g_bserrno == 0); 5790 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0); 5791 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0); 5792 CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0); 5793 CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0); 5794 5795 /* Read whole two clusters 5796 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 5797 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */ 5798 memset(payload_read, 0x00, sizeof(payload_read)); 5799 spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL); 5800 poll_threads(); 5801 CU_ASSERT(g_bserrno == 0); 5802 5803 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5804 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 5805 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0); 5806 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0); 5807 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0); 5808 CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0); 5809 5810 CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0); 5811 CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0); 5812 CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0); 5813 CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0); 5814 } 5815 5816 5817 static void 5818 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5819 { 5820 uint8_t payload_ff[64 * 512]; 5821 uint8_t payload_aa[64 * 512]; 5822 uint8_t payload_00[64 * 512]; 5823 uint8_t *cluster0, *cluster1; 5824 5825 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5826 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5827 memset(payload_00, 0x00, sizeof(payload_00)); 5828 5829 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5830 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5831 5832 /* Unmap */ 5833 spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL); 5834 poll_threads(); 5835 5836 CU_ASSERT(g_bserrno == 0); 5837 5838 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0); 5839 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0); 5840 } 5841 5842 static void 5843 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5844 { 5845 uint8_t payload_ff[64 * 512]; 5846 uint8_t payload_aa[64 * 512]; 5847 uint8_t payload_00[64 * 512]; 5848 uint8_t *cluster0, *cluster1; 5849 5850 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5851 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5852 memset(payload_00, 0x00, sizeof(payload_00)); 5853 5854 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5855 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5856 5857 /* Write zeroes */ 5858 spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL); 5859 poll_threads(); 5860 5861 CU_ASSERT(g_bserrno == 0); 5862 5863 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0); 5864 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0); 5865 } 5866 5867 5868 static void 5869 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5870 { 5871 uint8_t payload_ff[64 * 512]; 5872 uint8_t payload_aa[64 * 512]; 5873 uint8_t payload_00[64 * 512]; 5874 uint8_t *cluster0, *cluster1; 5875 struct iovec iov[4]; 5876 5877 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5878 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5879 memset(payload_00, 0x00, sizeof(payload_00)); 5880 5881 /* Try to perform I/O with io unit = 512 */ 5882 iov[0].iov_base = payload_ff; 5883 iov[0].iov_len = 1 * 512; 5884 spdk_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL); 5885 poll_threads(); 5886 CU_ASSERT(g_bserrno == 0); 5887 5888 /* If thin provisioned is set cluster should be allocated now */ 5889 SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0); 5890 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5891 5892 /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character. 5893 * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */ 5894 /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5895 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5896 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0); 5897 5898 /* Verify write with offset on first page */ 5899 iov[0].iov_base = payload_ff; 5900 iov[0].iov_len = 1 * 512; 5901 spdk_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL); 5902 poll_threads(); 5903 CU_ASSERT(g_bserrno == 0); 5904 5905 /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5906 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5907 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5908 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5909 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5910 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0); 5911 5912 /* Verify write with offset on first page */ 5913 iov[0].iov_base = payload_ff; 5914 iov[0].iov_len = 4 * 512; 5915 spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL); 5916 poll_threads(); 5917 5918 /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5919 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5920 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5921 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5922 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5923 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0); 5924 CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0); 5925 5926 /* Verify write with offset on second page */ 5927 iov[0].iov_base = payload_ff; 5928 iov[0].iov_len = 4 * 512; 5929 spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL); 5930 poll_threads(); 5931 5932 /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */ 5933 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5934 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5935 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5936 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5937 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0); 5938 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5939 5940 /* Verify write across multiple pages */ 5941 iov[0].iov_base = payload_aa; 5942 iov[0].iov_len = 8 * 512; 5943 spdk_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL); 5944 poll_threads(); 5945 5946 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */ 5947 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5948 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5949 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5950 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5951 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5952 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5953 5954 /* Verify write across multiple clusters */ 5955 5956 iov[0].iov_base = payload_ff; 5957 iov[0].iov_len = 8 * 512; 5958 spdk_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL); 5959 poll_threads(); 5960 5961 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5962 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5963 5964 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5965 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5966 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5967 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5968 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5969 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5970 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5971 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0); 5972 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5973 5974 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5975 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0); 5976 5977 /* Verify write to second cluster */ 5978 5979 iov[0].iov_base = payload_ff; 5980 iov[0].iov_len = 2 * 512; 5981 spdk_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL); 5982 poll_threads(); 5983 5984 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5985 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5986 5987 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5988 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */ 5989 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5990 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5991 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5992 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5993 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5994 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5995 5996 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5997 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0); 5998 CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0); 5999 CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0); 6000 } 6001 6002 static void 6003 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 6004 { 6005 uint8_t payload_read[64 * 512]; 6006 uint8_t payload_ff[64 * 512]; 6007 uint8_t payload_aa[64 * 512]; 6008 uint8_t payload_00[64 * 512]; 6009 struct iovec iov[4]; 6010 6011 memset(payload_ff, 0xFF, sizeof(payload_ff)); 6012 memset(payload_aa, 0xAA, sizeof(payload_aa)); 6013 memset(payload_00, 0x00, sizeof(payload_00)); 6014 6015 /* Read only first io unit */ 6016 /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6017 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6018 * payload_read: F000 0000 | 0000 0000 ... */ 6019 memset(payload_read, 0x00, sizeof(payload_read)); 6020 iov[0].iov_base = payload_read; 6021 iov[0].iov_len = 1 * 512; 6022 spdk_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL); 6023 poll_threads(); 6024 6025 CU_ASSERT(g_bserrno == 0); 6026 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6027 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0); 6028 6029 /* Read four io_units starting from offset = 2 6030 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6031 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6032 * payload_read: F0AA 0000 | 0000 0000 ... */ 6033 6034 memset(payload_read, 0x00, sizeof(payload_read)); 6035 iov[0].iov_base = payload_read; 6036 iov[0].iov_len = 4 * 512; 6037 spdk_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL); 6038 poll_threads(); 6039 CU_ASSERT(g_bserrno == 0); 6040 6041 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6042 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 6043 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0); 6044 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0); 6045 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 6046 6047 /* Read eight io_units across multiple pages 6048 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 6049 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6050 * payload_read: AAAA AAAA | 0000 0000 ... */ 6051 memset(payload_read, 0x00, sizeof(payload_read)); 6052 iov[0].iov_base = payload_read; 6053 iov[0].iov_len = 4 * 512; 6054 iov[1].iov_base = payload_read + 4 * 512; 6055 iov[1].iov_len = 4 * 512; 6056 spdk_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL); 6057 poll_threads(); 6058 CU_ASSERT(g_bserrno == 0); 6059 6060 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0); 6061 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 6062 6063 /* Read eight io_units across multiple clusters 6064 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ] 6065 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6066 * payload_read: FFFF FFFF | 0000 0000 ... */ 6067 memset(payload_read, 0x00, sizeof(payload_read)); 6068 iov[0].iov_base = payload_read; 6069 iov[0].iov_len = 2 * 512; 6070 iov[1].iov_base = payload_read + 2 * 512; 6071 iov[1].iov_len = 2 * 512; 6072 iov[2].iov_base = payload_read + 4 * 512; 6073 iov[2].iov_len = 2 * 512; 6074 iov[3].iov_base = payload_read + 6 * 512; 6075 iov[3].iov_len = 2 * 512; 6076 spdk_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL); 6077 poll_threads(); 6078 CU_ASSERT(g_bserrno == 0); 6079 6080 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0); 6081 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 6082 6083 /* Read four io_units from second cluster 6084 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6085 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ] 6086 * payload_read: 00FF 0000 | 0000 0000 ... */ 6087 memset(payload_read, 0x00, sizeof(payload_read)); 6088 iov[0].iov_base = payload_read; 6089 iov[0].iov_len = 1 * 512; 6090 iov[1].iov_base = payload_read + 1 * 512; 6091 iov[1].iov_len = 3 * 512; 6092 spdk_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL); 6093 poll_threads(); 6094 CU_ASSERT(g_bserrno == 0); 6095 6096 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0); 6097 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0); 6098 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 6099 6100 /* Read second cluster 6101 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6102 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] 6103 * payload_read: FFFF 0000 | 0000 FF00 ... */ 6104 memset(payload_read, 0x00, sizeof(payload_read)); 6105 iov[0].iov_base = payload_read; 6106 iov[0].iov_len = 1 * 512; 6107 iov[1].iov_base = payload_read + 1 * 512; 6108 iov[1].iov_len = 2 * 512; 6109 iov[2].iov_base = payload_read + 3 * 512; 6110 iov[2].iov_len = 4 * 512; 6111 iov[3].iov_base = payload_read + 7 * 512; 6112 iov[3].iov_len = 25 * 512; 6113 spdk_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL); 6114 poll_threads(); 6115 CU_ASSERT(g_bserrno == 0); 6116 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0); 6117 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0); 6118 CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0); 6119 CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0); 6120 6121 /* Read whole two clusters 6122 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 6123 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */ 6124 memset(payload_read, 0x00, sizeof(payload_read)); 6125 iov[0].iov_base = payload_read; 6126 iov[0].iov_len = 1 * 512; 6127 iov[1].iov_base = payload_read + 1 * 512; 6128 iov[1].iov_len = 8 * 512; 6129 iov[2].iov_base = payload_read + 9 * 512; 6130 iov[2].iov_len = 16 * 512; 6131 iov[3].iov_base = payload_read + 25 * 512; 6132 iov[3].iov_len = 39 * 512; 6133 spdk_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL); 6134 poll_threads(); 6135 CU_ASSERT(g_bserrno == 0); 6136 6137 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6138 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 6139 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0); 6140 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0); 6141 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0); 6142 CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0); 6143 6144 CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0); 6145 CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0); 6146 CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0); 6147 CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0); 6148 } 6149 6150 static void 6151 blob_io_unit(void) 6152 { 6153 struct spdk_bs_opts bsopts; 6154 struct spdk_blob_opts opts; 6155 struct spdk_blob_store *bs; 6156 struct spdk_bs_dev *dev; 6157 struct spdk_blob *blob, *snapshot, *clone; 6158 spdk_blob_id blobid; 6159 struct spdk_io_channel *channel; 6160 6161 /* Create dev with 512 bytes io unit size */ 6162 6163 spdk_bs_opts_init(&bsopts); 6164 bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */ 6165 snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE"); 6166 6167 /* Try to initialize a new blob store with unsupported io_unit */ 6168 dev = init_dev(); 6169 dev->blocklen = 512; 6170 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6171 6172 /* Initialize a new blob store */ 6173 spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL); 6174 poll_threads(); 6175 CU_ASSERT(g_bserrno == 0); 6176 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6177 bs = g_bs; 6178 6179 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512); 6180 channel = spdk_bs_alloc_io_channel(bs); 6181 6182 /* Create thick provisioned blob */ 6183 ut_spdk_blob_opts_init(&opts); 6184 opts.thin_provision = false; 6185 opts.num_clusters = 32; 6186 6187 blob = ut_blob_create_and_open(bs, &opts); 6188 blobid = spdk_blob_get_id(blob); 6189 6190 test_io_write(dev, blob, channel); 6191 test_io_read(dev, blob, channel); 6192 test_io_zeroes(dev, blob, channel); 6193 6194 test_iov_write(dev, blob, channel); 6195 test_iov_read(dev, blob, channel); 6196 6197 test_io_unmap(dev, blob, channel); 6198 6199 spdk_blob_close(blob, blob_op_complete, NULL); 6200 poll_threads(); 6201 CU_ASSERT(g_bserrno == 0); 6202 blob = NULL; 6203 g_blob = NULL; 6204 6205 /* Create thin provisioned blob */ 6206 6207 ut_spdk_blob_opts_init(&opts); 6208 opts.thin_provision = true; 6209 opts.num_clusters = 32; 6210 6211 blob = ut_blob_create_and_open(bs, &opts); 6212 blobid = spdk_blob_get_id(blob); 6213 6214 test_io_write(dev, blob, channel); 6215 test_io_read(dev, blob, channel); 6216 6217 test_io_zeroes(dev, blob, channel); 6218 6219 test_iov_write(dev, blob, channel); 6220 test_iov_read(dev, blob, channel); 6221 6222 /* Create snapshot */ 6223 6224 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6225 poll_threads(); 6226 CU_ASSERT(g_bserrno == 0); 6227 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6228 blobid = g_blobid; 6229 6230 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6231 poll_threads(); 6232 CU_ASSERT(g_bserrno == 0); 6233 CU_ASSERT(g_blob != NULL); 6234 snapshot = g_blob; 6235 6236 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6237 poll_threads(); 6238 CU_ASSERT(g_bserrno == 0); 6239 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6240 blobid = g_blobid; 6241 6242 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6243 poll_threads(); 6244 CU_ASSERT(g_bserrno == 0); 6245 CU_ASSERT(g_blob != NULL); 6246 clone = g_blob; 6247 6248 test_io_read(dev, blob, channel); 6249 test_io_read(dev, snapshot, channel); 6250 test_io_read(dev, clone, channel); 6251 6252 test_iov_read(dev, blob, channel); 6253 test_iov_read(dev, snapshot, channel); 6254 test_iov_read(dev, clone, channel); 6255 6256 /* Inflate clone */ 6257 6258 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 6259 poll_threads(); 6260 6261 CU_ASSERT(g_bserrno == 0); 6262 6263 test_io_read(dev, clone, channel); 6264 6265 test_io_unmap(dev, clone, channel); 6266 6267 test_iov_write(dev, clone, channel); 6268 test_iov_read(dev, clone, channel); 6269 6270 spdk_blob_close(blob, blob_op_complete, NULL); 6271 spdk_blob_close(snapshot, blob_op_complete, NULL); 6272 spdk_blob_close(clone, blob_op_complete, NULL); 6273 poll_threads(); 6274 CU_ASSERT(g_bserrno == 0); 6275 blob = NULL; 6276 g_blob = NULL; 6277 6278 spdk_bs_free_io_channel(channel); 6279 poll_threads(); 6280 6281 /* Unload the blob store */ 6282 spdk_bs_unload(bs, bs_op_complete, NULL); 6283 poll_threads(); 6284 CU_ASSERT(g_bserrno == 0); 6285 g_bs = NULL; 6286 g_blob = NULL; 6287 g_blobid = 0; 6288 } 6289 6290 static void 6291 blob_io_unit_compatiblity(void) 6292 { 6293 struct spdk_bs_opts bsopts; 6294 struct spdk_blob_store *bs; 6295 struct spdk_bs_dev *dev; 6296 struct spdk_bs_super_block *super; 6297 6298 /* Create dev with 512 bytes io unit size */ 6299 6300 spdk_bs_opts_init(&bsopts); 6301 bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */ 6302 snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE"); 6303 6304 /* Try to initialize a new blob store with unsupported io_unit */ 6305 dev = init_dev(); 6306 dev->blocklen = 512; 6307 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6308 6309 /* Initialize a new blob store */ 6310 spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL); 6311 poll_threads(); 6312 CU_ASSERT(g_bserrno == 0); 6313 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6314 bs = g_bs; 6315 6316 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512); 6317 6318 /* Unload the blob store */ 6319 spdk_bs_unload(bs, bs_op_complete, NULL); 6320 poll_threads(); 6321 CU_ASSERT(g_bserrno == 0); 6322 6323 /* Modify super block to behave like older version. 6324 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */ 6325 super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 6326 super->io_unit_size = 0; 6327 super->crc = blob_md_page_calc_crc(super); 6328 6329 dev = init_dev(); 6330 dev->blocklen = 512; 6331 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6332 6333 spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL); 6334 poll_threads(); 6335 CU_ASSERT(g_bserrno == 0); 6336 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6337 bs = g_bs; 6338 6339 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE); 6340 6341 /* Unload the blob store */ 6342 spdk_bs_unload(bs, bs_op_complete, NULL); 6343 poll_threads(); 6344 CU_ASSERT(g_bserrno == 0); 6345 6346 g_bs = NULL; 6347 g_blob = NULL; 6348 g_blobid = 0; 6349 } 6350 6351 static void 6352 blob_simultaneous_operations(void) 6353 { 6354 struct spdk_blob_store *bs = g_bs; 6355 struct spdk_blob_opts opts; 6356 struct spdk_blob *blob, *snapshot; 6357 spdk_blob_id blobid, snapshotid; 6358 struct spdk_io_channel *channel; 6359 6360 channel = spdk_bs_alloc_io_channel(bs); 6361 SPDK_CU_ASSERT_FATAL(channel != NULL); 6362 6363 ut_spdk_blob_opts_init(&opts); 6364 opts.num_clusters = 10; 6365 6366 blob = ut_blob_create_and_open(bs, &opts); 6367 blobid = spdk_blob_get_id(blob); 6368 6369 /* Create snapshot and try to remove blob in the same time: 6370 * - snapshot should be created successfully 6371 * - delete operation should fail w -EBUSY */ 6372 CU_ASSERT(blob->locked_operation_in_progress == false); 6373 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6374 CU_ASSERT(blob->locked_operation_in_progress == true); 6375 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6376 CU_ASSERT(blob->locked_operation_in_progress == true); 6377 /* Deletion failure */ 6378 CU_ASSERT(g_bserrno == -EBUSY); 6379 poll_threads(); 6380 CU_ASSERT(blob->locked_operation_in_progress == false); 6381 /* Snapshot creation success */ 6382 CU_ASSERT(g_bserrno == 0); 6383 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6384 6385 snapshotid = g_blobid; 6386 6387 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 6388 poll_threads(); 6389 CU_ASSERT(g_bserrno == 0); 6390 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 6391 snapshot = g_blob; 6392 6393 /* Inflate blob and try to remove blob in the same time: 6394 * - blob should be inflated successfully 6395 * - delete operation should fail w -EBUSY */ 6396 CU_ASSERT(blob->locked_operation_in_progress == false); 6397 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 6398 CU_ASSERT(blob->locked_operation_in_progress == true); 6399 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6400 CU_ASSERT(blob->locked_operation_in_progress == true); 6401 /* Deletion failure */ 6402 CU_ASSERT(g_bserrno == -EBUSY); 6403 poll_threads(); 6404 CU_ASSERT(blob->locked_operation_in_progress == false); 6405 /* Inflation success */ 6406 CU_ASSERT(g_bserrno == 0); 6407 6408 /* Clone snapshot and try to remove snapshot in the same time: 6409 * - snapshot should be cloned successfully 6410 * - delete operation should fail w -EBUSY */ 6411 CU_ASSERT(blob->locked_operation_in_progress == false); 6412 spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 6413 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 6414 /* Deletion failure */ 6415 CU_ASSERT(g_bserrno == -EBUSY); 6416 poll_threads(); 6417 CU_ASSERT(blob->locked_operation_in_progress == false); 6418 /* Clone created */ 6419 CU_ASSERT(g_bserrno == 0); 6420 6421 /* Resize blob and try to remove blob in the same time: 6422 * - blob should be resized successfully 6423 * - delete operation should fail w -EBUSY */ 6424 CU_ASSERT(blob->locked_operation_in_progress == false); 6425 spdk_blob_resize(blob, 50, blob_op_complete, NULL); 6426 CU_ASSERT(blob->locked_operation_in_progress == true); 6427 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6428 CU_ASSERT(blob->locked_operation_in_progress == true); 6429 /* Deletion failure */ 6430 CU_ASSERT(g_bserrno == -EBUSY); 6431 poll_threads(); 6432 CU_ASSERT(blob->locked_operation_in_progress == false); 6433 /* Blob resized successfully */ 6434 CU_ASSERT(g_bserrno == 0); 6435 6436 /* Issue two consecutive blob syncs, neither should fail. 6437 * Force sync to actually occur by marking blob dirty each time. 6438 * Execution of sync should not be enough to complete the operation, 6439 * since disk I/O is required to complete it. */ 6440 g_bserrno = -1; 6441 6442 blob->state = SPDK_BLOB_STATE_DIRTY; 6443 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6444 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6445 6446 blob->state = SPDK_BLOB_STATE_DIRTY; 6447 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6448 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6449 6450 uint32_t completions = 0; 6451 while (completions < 2) { 6452 SPDK_CU_ASSERT_FATAL(poll_thread_times(0, 1)); 6453 if (g_bserrno == 0) { 6454 g_bserrno = -1; 6455 completions++; 6456 } 6457 /* Never should the g_bserrno be other than -1. 6458 * It would mean that either of syncs failed. */ 6459 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6460 } 6461 6462 spdk_bs_free_io_channel(channel); 6463 poll_threads(); 6464 6465 ut_blob_close_and_delete(bs, snapshot); 6466 ut_blob_close_and_delete(bs, blob); 6467 } 6468 6469 static void 6470 blob_persist_test(void) 6471 { 6472 struct spdk_blob_store *bs = g_bs; 6473 struct spdk_blob_opts opts; 6474 struct spdk_blob *blob; 6475 spdk_blob_id blobid; 6476 struct spdk_io_channel *channel; 6477 char *xattr; 6478 size_t xattr_length; 6479 int rc; 6480 uint32_t page_count_clear, page_count_xattr; 6481 uint64_t poller_iterations; 6482 bool run_poller; 6483 6484 channel = spdk_bs_alloc_io_channel(bs); 6485 SPDK_CU_ASSERT_FATAL(channel != NULL); 6486 6487 ut_spdk_blob_opts_init(&opts); 6488 opts.num_clusters = 10; 6489 6490 blob = ut_blob_create_and_open(bs, &opts); 6491 blobid = spdk_blob_get_id(blob); 6492 6493 /* Save the amount of md pages used after creation of a blob. 6494 * This should be consistent after removing xattr. */ 6495 page_count_clear = spdk_bit_array_count_set(bs->used_md_pages); 6496 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear); 6497 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear); 6498 6499 /* Add xattr with maximum length of descriptor to exceed single metadata page. */ 6500 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 6501 strlen("large_xattr"); 6502 xattr = calloc(xattr_length, sizeof(char)); 6503 SPDK_CU_ASSERT_FATAL(xattr != NULL); 6504 6505 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 6506 SPDK_CU_ASSERT_FATAL(rc == 0); 6507 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6508 poll_threads(); 6509 SPDK_CU_ASSERT_FATAL(g_bserrno == 0); 6510 6511 /* Save the amount of md pages used after adding the large xattr */ 6512 page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages); 6513 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr); 6514 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr); 6515 6516 /* Add xattr to a blob and sync it. While sync is occuring, remove the xattr and sync again. 6517 * Interrupt the first sync after increasing number of poller iterations, until it succeeds. 6518 * Expectation is that after second sync completes no xattr is saved in metadata. */ 6519 poller_iterations = 1; 6520 run_poller = true; 6521 while (run_poller) { 6522 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 6523 SPDK_CU_ASSERT_FATAL(rc == 0); 6524 g_bserrno = -1; 6525 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6526 poll_thread_times(0, poller_iterations); 6527 if (g_bserrno == 0) { 6528 /* Poller iteration count was high enough for first sync to complete. 6529 * Verify that blob takes up enough of md_pages to store the xattr. */ 6530 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr); 6531 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr); 6532 SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr); 6533 run_poller = false; 6534 } 6535 rc = spdk_blob_remove_xattr(blob, "large_xattr"); 6536 SPDK_CU_ASSERT_FATAL(rc == 0); 6537 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6538 poll_threads(); 6539 SPDK_CU_ASSERT_FATAL(g_bserrno == 0); 6540 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear); 6541 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear); 6542 SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear); 6543 6544 /* Reload bs and re-open blob to verify that xattr was not persisted. */ 6545 spdk_blob_close(blob, blob_op_complete, NULL); 6546 poll_threads(); 6547 CU_ASSERT(g_bserrno == 0); 6548 6549 ut_bs_reload(&bs, NULL); 6550 6551 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6552 poll_threads(); 6553 CU_ASSERT(g_bserrno == 0); 6554 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 6555 blob = g_blob; 6556 6557 rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length); 6558 SPDK_CU_ASSERT_FATAL(rc == -ENOENT); 6559 6560 poller_iterations++; 6561 /* Stop at high iteration count to prevent infinite loop. 6562 * This value should be enough for first md sync to complete in any case. */ 6563 SPDK_CU_ASSERT_FATAL(poller_iterations < 50); 6564 } 6565 6566 free(xattr); 6567 6568 ut_blob_close_and_delete(bs, blob); 6569 6570 spdk_bs_free_io_channel(channel); 6571 poll_threads(); 6572 } 6573 6574 static void 6575 suite_bs_setup(void) 6576 { 6577 struct spdk_bs_dev *dev; 6578 6579 dev = init_dev(); 6580 memset(g_dev_buffer, 0, DEV_BUFFER_SIZE); 6581 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 6582 poll_threads(); 6583 CU_ASSERT(g_bserrno == 0); 6584 CU_ASSERT(g_bs != NULL); 6585 } 6586 6587 static void 6588 suite_bs_cleanup(void) 6589 { 6590 spdk_bs_unload(g_bs, bs_op_complete, NULL); 6591 poll_threads(); 6592 CU_ASSERT(g_bserrno == 0); 6593 g_bs = NULL; 6594 memset(g_dev_buffer, 0, DEV_BUFFER_SIZE); 6595 } 6596 6597 static struct spdk_blob * 6598 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts) 6599 { 6600 struct spdk_blob *blob; 6601 struct spdk_blob_opts create_blob_opts; 6602 spdk_blob_id blobid; 6603 6604 if (blob_opts == NULL) { 6605 ut_spdk_blob_opts_init(&create_blob_opts); 6606 blob_opts = &create_blob_opts; 6607 } 6608 6609 spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL); 6610 poll_threads(); 6611 CU_ASSERT(g_bserrno == 0); 6612 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6613 blobid = g_blobid; 6614 g_blobid = -1; 6615 6616 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6617 poll_threads(); 6618 CU_ASSERT(g_bserrno == 0); 6619 CU_ASSERT(g_blob != NULL); 6620 blob = g_blob; 6621 6622 g_blob = NULL; 6623 g_bserrno = -1; 6624 6625 return blob; 6626 } 6627 6628 static void 6629 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob) 6630 { 6631 spdk_blob_id blobid = spdk_blob_get_id(blob); 6632 6633 spdk_blob_close(blob, blob_op_complete, NULL); 6634 poll_threads(); 6635 CU_ASSERT(g_bserrno == 0); 6636 g_blob = NULL; 6637 6638 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6639 poll_threads(); 6640 CU_ASSERT(g_bserrno == 0); 6641 g_bserrno = -1; 6642 } 6643 6644 static void 6645 suite_blob_setup(void) 6646 { 6647 suite_bs_setup(); 6648 CU_ASSERT(g_bs != NULL); 6649 6650 g_blob = ut_blob_create_and_open(g_bs, NULL); 6651 CU_ASSERT(g_blob != NULL); 6652 } 6653 6654 static void 6655 suite_blob_cleanup(void) 6656 { 6657 ut_blob_close_and_delete(g_bs, g_blob); 6658 CU_ASSERT(g_blob == NULL); 6659 6660 suite_bs_cleanup(); 6661 CU_ASSERT(g_bs == NULL); 6662 } 6663 6664 int main(int argc, char **argv) 6665 { 6666 CU_pSuite suite, suite_bs, suite_blob; 6667 unsigned int num_failures; 6668 6669 CU_set_error_action(CUEA_ABORT); 6670 CU_initialize_registry(); 6671 6672 suite = CU_add_suite("blob", NULL, NULL); 6673 suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL, 6674 suite_bs_setup, suite_bs_cleanup); 6675 suite_blob = CU_add_suite_with_setup_and_teardown("blob_blob", NULL, NULL, 6676 suite_blob_setup, suite_blob_cleanup); 6677 6678 CU_ADD_TEST(suite, blob_init); 6679 CU_ADD_TEST(suite_bs, blob_open); 6680 CU_ADD_TEST(suite_bs, blob_create); 6681 CU_ADD_TEST(suite_bs, blob_create_loop); 6682 CU_ADD_TEST(suite_bs, blob_create_fail); 6683 CU_ADD_TEST(suite_bs, blob_create_internal); 6684 CU_ADD_TEST(suite, blob_thin_provision); 6685 CU_ADD_TEST(suite_bs, blob_snapshot); 6686 CU_ADD_TEST(suite_bs, blob_clone); 6687 CU_ADD_TEST(suite_bs, blob_inflate); 6688 CU_ADD_TEST(suite_bs, blob_delete); 6689 CU_ADD_TEST(suite_bs, blob_resize_test); 6690 CU_ADD_TEST(suite, blob_read_only); 6691 CU_ADD_TEST(suite_bs, channel_ops); 6692 CU_ADD_TEST(suite_bs, blob_super); 6693 CU_ADD_TEST(suite_blob, blob_write); 6694 CU_ADD_TEST(suite_blob, blob_read); 6695 CU_ADD_TEST(suite_blob, blob_rw_verify); 6696 CU_ADD_TEST(suite_bs, blob_rw_verify_iov); 6697 CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem); 6698 CU_ADD_TEST(suite_blob, blob_rw_iov_read_only); 6699 CU_ADD_TEST(suite_bs, blob_unmap); 6700 CU_ADD_TEST(suite_bs, blob_iter); 6701 CU_ADD_TEST(suite_blob, blob_xattr); 6702 CU_ADD_TEST(suite, bs_load); 6703 CU_ADD_TEST(suite_bs, bs_load_pending_removal); 6704 CU_ADD_TEST(suite, bs_load_custom_cluster_size); 6705 CU_ADD_TEST(suite_bs, bs_unload); 6706 CU_ADD_TEST(suite, bs_cluster_sz); 6707 CU_ADD_TEST(suite_bs, bs_usable_clusters); 6708 CU_ADD_TEST(suite, bs_resize_md); 6709 CU_ADD_TEST(suite, bs_destroy); 6710 CU_ADD_TEST(suite, bs_type); 6711 CU_ADD_TEST(suite, bs_super_block); 6712 CU_ADD_TEST(suite, blob_serialize_test); 6713 CU_ADD_TEST(suite_bs, blob_crc); 6714 CU_ADD_TEST(suite, super_block_crc); 6715 CU_ADD_TEST(suite_blob, blob_dirty_shutdown); 6716 CU_ADD_TEST(suite_bs, blob_flags); 6717 CU_ADD_TEST(suite_bs, bs_version); 6718 CU_ADD_TEST(suite_bs, blob_set_xattrs_test); 6719 CU_ADD_TEST(suite_bs, blob_thin_prov_alloc); 6720 CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test); 6721 CU_ADD_TEST(suite_bs, blob_thin_prov_rw); 6722 CU_ADD_TEST(suite_bs, blob_thin_prov_rle); 6723 CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov); 6724 CU_ADD_TEST(suite, bs_load_iter_test); 6725 CU_ADD_TEST(suite_bs, blob_snapshot_rw); 6726 CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov); 6727 CU_ADD_TEST(suite, blob_relations); 6728 CU_ADD_TEST(suite, blob_relations2); 6729 CU_ADD_TEST(suite, blobstore_clean_power_failure); 6730 CU_ADD_TEST(suite, blob_delete_snapshot_power_failure); 6731 CU_ADD_TEST(suite, blob_create_snapshot_power_failure); 6732 CU_ADD_TEST(suite_bs, blob_inflate_rw); 6733 CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io); 6734 CU_ADD_TEST(suite_bs, blob_operation_split_rw); 6735 CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov); 6736 CU_ADD_TEST(suite, blob_io_unit); 6737 CU_ADD_TEST(suite, blob_io_unit_compatiblity); 6738 CU_ADD_TEST(suite_bs, blob_simultaneous_operations); 6739 CU_ADD_TEST(suite_bs, blob_persist_test); 6740 6741 allocate_threads(2); 6742 set_thread(0); 6743 6744 g_dev_buffer = calloc(1, DEV_BUFFER_SIZE); 6745 6746 CU_basic_set_mode(CU_BRM_VERBOSE); 6747 g_use_extent_table = false; 6748 CU_basic_run_tests(); 6749 num_failures = CU_get_number_of_failures(); 6750 g_use_extent_table = true; 6751 CU_basic_run_tests(); 6752 num_failures += CU_get_number_of_failures(); 6753 CU_cleanup_registry(); 6754 6755 free(g_dev_buffer); 6756 6757 free_threads(); 6758 6759 return num_failures; 6760 } 6761