1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 #include "spdk/blob.h" 38 #include "spdk/string.h" 39 40 #include "common/lib/ut_multithread.c" 41 #include "../bs_dev_common.c" 42 #include "blob/blobstore.c" 43 #include "blob/request.c" 44 #include "blob/zeroes.c" 45 #include "blob/blob_bs_dev.c" 46 47 struct spdk_blob_store *g_bs; 48 spdk_blob_id g_blobid; 49 struct spdk_blob *g_blob, *g_blob2; 50 int g_bserrno, g_bserrno2; 51 struct spdk_xattr_names *g_names; 52 int g_done; 53 char *g_xattr_names[] = {"first", "second", "third"}; 54 char *g_xattr_values[] = {"one", "two", "three"}; 55 uint64_t g_ctx = 1729; 56 bool g_use_extent_table = false; 57 58 struct spdk_bs_super_block_ver1 { 59 uint8_t signature[8]; 60 uint32_t version; 61 uint32_t length; 62 uint32_t clean; /* If there was a clean shutdown, this is 1. */ 63 spdk_blob_id super_blob; 64 65 uint32_t cluster_size; /* In bytes */ 66 67 uint32_t used_page_mask_start; /* Offset from beginning of disk, in pages */ 68 uint32_t used_page_mask_len; /* Count, in pages */ 69 70 uint32_t used_cluster_mask_start; /* Offset from beginning of disk, in pages */ 71 uint32_t used_cluster_mask_len; /* Count, in pages */ 72 73 uint32_t md_start; /* Offset from beginning of disk, in pages */ 74 uint32_t md_len; /* Count, in pages */ 75 76 uint8_t reserved[4036]; 77 uint32_t crc; 78 } __attribute__((packed)); 79 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size"); 80 81 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs, 82 struct spdk_blob_opts *blob_opts); 83 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob); 84 static void suite_blob_setup(void); 85 static void suite_blob_cleanup(void); 86 87 static void 88 _get_xattr_value(void *arg, const char *name, 89 const void **value, size_t *value_len) 90 { 91 uint64_t i; 92 93 SPDK_CU_ASSERT_FATAL(value_len != NULL); 94 SPDK_CU_ASSERT_FATAL(value != NULL); 95 CU_ASSERT(arg == &g_ctx); 96 97 for (i = 0; i < sizeof(g_xattr_names); i++) { 98 if (!strcmp(name, g_xattr_names[i])) { 99 *value_len = strlen(g_xattr_values[i]); 100 *value = g_xattr_values[i]; 101 break; 102 } 103 } 104 } 105 106 static void 107 _get_xattr_value_null(void *arg, const char *name, 108 const void **value, size_t *value_len) 109 { 110 SPDK_CU_ASSERT_FATAL(value_len != NULL); 111 SPDK_CU_ASSERT_FATAL(value != NULL); 112 CU_ASSERT(arg == NULL); 113 114 *value_len = 0; 115 *value = NULL; 116 } 117 118 static int 119 _get_snapshots_count(struct spdk_blob_store *bs) 120 { 121 struct spdk_blob_list *snapshot = NULL; 122 int count = 0; 123 124 TAILQ_FOREACH(snapshot, &bs->snapshots, link) { 125 count += 1; 126 } 127 128 return count; 129 } 130 131 static void 132 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts) 133 { 134 spdk_blob_opts_init(opts, sizeof(*opts)); 135 opts->use_extent_table = g_use_extent_table; 136 } 137 138 static void 139 bs_op_complete(void *cb_arg, int bserrno) 140 { 141 g_bserrno = bserrno; 142 } 143 144 static void 145 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs, 146 int bserrno) 147 { 148 g_bs = bs; 149 g_bserrno = bserrno; 150 } 151 152 static void 153 blob_op_complete(void *cb_arg, int bserrno) 154 { 155 g_bserrno = bserrno; 156 } 157 158 static void 159 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno) 160 { 161 g_blobid = blobid; 162 g_bserrno = bserrno; 163 } 164 165 static void 166 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno) 167 { 168 g_blob = blb; 169 g_bserrno = bserrno; 170 } 171 172 static void 173 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno) 174 { 175 if (g_blob == NULL) { 176 g_blob = blob; 177 g_bserrno = bserrno; 178 } else { 179 g_blob2 = blob; 180 g_bserrno2 = bserrno; 181 } 182 } 183 184 static void 185 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts) 186 { 187 struct spdk_bs_dev *dev; 188 189 /* Unload the blob store */ 190 spdk_bs_unload(*bs, bs_op_complete, NULL); 191 poll_threads(); 192 CU_ASSERT(g_bserrno == 0); 193 194 dev = init_dev(); 195 /* Load an existing blob store */ 196 spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL); 197 poll_threads(); 198 CU_ASSERT(g_bserrno == 0); 199 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 200 *bs = g_bs; 201 202 g_bserrno = -1; 203 } 204 205 static void 206 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts) 207 { 208 struct spdk_bs_dev *dev; 209 210 /* Dirty shutdown */ 211 bs_free(*bs); 212 213 dev = init_dev(); 214 /* Load an existing blob store */ 215 spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL); 216 poll_threads(); 217 CU_ASSERT(g_bserrno == 0); 218 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 219 *bs = g_bs; 220 221 g_bserrno = -1; 222 } 223 224 static void 225 blob_init(void) 226 { 227 struct spdk_blob_store *bs; 228 struct spdk_bs_dev *dev; 229 230 dev = init_dev(); 231 232 /* should fail for an unsupported blocklen */ 233 dev->blocklen = 500; 234 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 235 poll_threads(); 236 CU_ASSERT(g_bserrno == -EINVAL); 237 238 dev = init_dev(); 239 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 240 poll_threads(); 241 CU_ASSERT(g_bserrno == 0); 242 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 243 bs = g_bs; 244 245 spdk_bs_unload(bs, bs_op_complete, NULL); 246 poll_threads(); 247 CU_ASSERT(g_bserrno == 0); 248 g_bs = NULL; 249 } 250 251 static void 252 blob_super(void) 253 { 254 struct spdk_blob_store *bs = g_bs; 255 spdk_blob_id blobid; 256 struct spdk_blob_opts blob_opts; 257 258 /* Get the super blob without having set one */ 259 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 260 poll_threads(); 261 CU_ASSERT(g_bserrno == -ENOENT); 262 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 263 264 /* Create a blob */ 265 ut_spdk_blob_opts_init(&blob_opts); 266 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 267 poll_threads(); 268 CU_ASSERT(g_bserrno == 0); 269 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 270 blobid = g_blobid; 271 272 /* Set the blob as the super blob */ 273 spdk_bs_set_super(bs, blobid, blob_op_complete, NULL); 274 poll_threads(); 275 CU_ASSERT(g_bserrno == 0); 276 277 /* Get the super blob */ 278 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 279 poll_threads(); 280 CU_ASSERT(g_bserrno == 0); 281 CU_ASSERT(blobid == g_blobid); 282 } 283 284 static void 285 blob_open(void) 286 { 287 struct spdk_blob_store *bs = g_bs; 288 struct spdk_blob *blob; 289 struct spdk_blob_opts blob_opts; 290 spdk_blob_id blobid, blobid2; 291 292 ut_spdk_blob_opts_init(&blob_opts); 293 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 294 poll_threads(); 295 CU_ASSERT(g_bserrno == 0); 296 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 297 blobid = g_blobid; 298 299 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 300 poll_threads(); 301 CU_ASSERT(g_bserrno == 0); 302 CU_ASSERT(g_blob != NULL); 303 blob = g_blob; 304 305 blobid2 = spdk_blob_get_id(blob); 306 CU_ASSERT(blobid == blobid2); 307 308 /* Try to open file again. It should return success. */ 309 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 310 poll_threads(); 311 CU_ASSERT(g_bserrno == 0); 312 CU_ASSERT(blob == g_blob); 313 314 spdk_blob_close(blob, blob_op_complete, NULL); 315 poll_threads(); 316 CU_ASSERT(g_bserrno == 0); 317 318 /* 319 * Close the file a second time, releasing the second reference. This 320 * should succeed. 321 */ 322 blob = g_blob; 323 spdk_blob_close(blob, blob_op_complete, NULL); 324 poll_threads(); 325 CU_ASSERT(g_bserrno == 0); 326 327 /* 328 * Try to open file again. It should succeed. This tests the case 329 * where the file is opened, closed, then re-opened again. 330 */ 331 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 332 poll_threads(); 333 CU_ASSERT(g_bserrno == 0); 334 CU_ASSERT(g_blob != NULL); 335 blob = g_blob; 336 spdk_blob_close(blob, blob_op_complete, NULL); 337 poll_threads(); 338 CU_ASSERT(g_bserrno == 0); 339 340 /* Try to open file twice in succession. This should return the same 341 * blob object. 342 */ 343 g_blob = NULL; 344 g_blob2 = NULL; 345 g_bserrno = -1; 346 g_bserrno2 = -1; 347 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL); 348 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL); 349 poll_threads(); 350 CU_ASSERT(g_bserrno == 0); 351 CU_ASSERT(g_bserrno2 == 0); 352 CU_ASSERT(g_blob != NULL); 353 CU_ASSERT(g_blob2 != NULL); 354 CU_ASSERT(g_blob == g_blob2); 355 356 g_bserrno = -1; 357 spdk_blob_close(g_blob, blob_op_complete, NULL); 358 poll_threads(); 359 CU_ASSERT(g_bserrno == 0); 360 361 ut_blob_close_and_delete(bs, g_blob); 362 } 363 364 static void 365 blob_create(void) 366 { 367 struct spdk_blob_store *bs = g_bs; 368 struct spdk_blob *blob; 369 struct spdk_blob_opts opts; 370 spdk_blob_id blobid; 371 372 /* Create blob with 10 clusters */ 373 374 ut_spdk_blob_opts_init(&opts); 375 opts.num_clusters = 10; 376 377 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 378 poll_threads(); 379 CU_ASSERT(g_bserrno == 0); 380 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 381 blobid = g_blobid; 382 383 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 384 poll_threads(); 385 CU_ASSERT(g_bserrno == 0); 386 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 387 blob = g_blob; 388 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 389 390 spdk_blob_close(blob, blob_op_complete, NULL); 391 poll_threads(); 392 CU_ASSERT(g_bserrno == 0); 393 394 /* Create blob with 0 clusters */ 395 396 ut_spdk_blob_opts_init(&opts); 397 opts.num_clusters = 0; 398 399 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 400 poll_threads(); 401 CU_ASSERT(g_bserrno == 0); 402 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 403 blobid = g_blobid; 404 405 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 406 poll_threads(); 407 CU_ASSERT(g_bserrno == 0); 408 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 409 blob = g_blob; 410 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 411 412 spdk_blob_close(blob, blob_op_complete, NULL); 413 poll_threads(); 414 CU_ASSERT(g_bserrno == 0); 415 416 /* Create blob with default options (opts == NULL) */ 417 418 spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL); 419 poll_threads(); 420 CU_ASSERT(g_bserrno == 0); 421 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 422 blobid = g_blobid; 423 424 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 425 poll_threads(); 426 CU_ASSERT(g_bserrno == 0); 427 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 428 blob = g_blob; 429 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 430 431 spdk_blob_close(blob, blob_op_complete, NULL); 432 poll_threads(); 433 CU_ASSERT(g_bserrno == 0); 434 435 /* Try to create blob with size larger than blobstore */ 436 437 ut_spdk_blob_opts_init(&opts); 438 opts.num_clusters = bs->total_clusters + 1; 439 440 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 441 poll_threads(); 442 CU_ASSERT(g_bserrno == -ENOSPC); 443 } 444 445 /* 446 * Create and delete one blob in a loop over and over again. This helps ensure 447 * that the internal bit masks tracking used clusters and md_pages are being 448 * tracked correctly. 449 */ 450 static void 451 blob_create_loop(void) 452 { 453 struct spdk_blob_store *bs = g_bs; 454 struct spdk_blob_opts opts; 455 uint32_t i, loop_count; 456 457 loop_count = 4 * spdk_max(spdk_bit_array_capacity(bs->used_md_pages), 458 spdk_bit_pool_capacity(bs->used_clusters)); 459 460 for (i = 0; i < loop_count; i++) { 461 ut_spdk_blob_opts_init(&opts); 462 opts.num_clusters = 1; 463 g_bserrno = -1; 464 g_blobid = SPDK_BLOBID_INVALID; 465 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 466 poll_threads(); 467 CU_ASSERT(g_bserrno == 0); 468 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 469 spdk_bs_delete_blob(bs, g_blobid, blob_op_complete, NULL); 470 poll_threads(); 471 CU_ASSERT(g_bserrno == 0); 472 } 473 } 474 475 static void 476 blob_create_fail(void) 477 { 478 struct spdk_blob_store *bs = g_bs; 479 struct spdk_blob_opts opts; 480 spdk_blob_id blobid; 481 uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids); 482 uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages); 483 484 /* NULL callback */ 485 ut_spdk_blob_opts_init(&opts); 486 opts.xattrs.names = g_xattr_names; 487 opts.xattrs.get_value = NULL; 488 opts.xattrs.count = 1; 489 opts.xattrs.ctx = &g_ctx; 490 491 blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 492 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 493 poll_threads(); 494 CU_ASSERT(g_bserrno == -EINVAL); 495 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 496 CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count); 497 CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count); 498 499 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 500 poll_threads(); 501 CU_ASSERT(g_bserrno == -ENOENT); 502 SPDK_CU_ASSERT_FATAL(g_blob == NULL); 503 504 ut_bs_reload(&bs, NULL); 505 CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count); 506 CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count); 507 508 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 509 poll_threads(); 510 CU_ASSERT(g_blob == NULL); 511 CU_ASSERT(g_bserrno == -ENOENT); 512 } 513 514 static void 515 blob_create_internal(void) 516 { 517 struct spdk_blob_store *bs = g_bs; 518 struct spdk_blob *blob; 519 struct spdk_blob_opts opts; 520 struct spdk_blob_xattr_opts internal_xattrs; 521 const void *value; 522 size_t value_len; 523 spdk_blob_id blobid; 524 int rc; 525 526 /* Create blob with custom xattrs */ 527 528 ut_spdk_blob_opts_init(&opts); 529 blob_xattrs_init(&internal_xattrs); 530 internal_xattrs.count = 3; 531 internal_xattrs.names = g_xattr_names; 532 internal_xattrs.get_value = _get_xattr_value; 533 internal_xattrs.ctx = &g_ctx; 534 535 bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL); 536 poll_threads(); 537 CU_ASSERT(g_bserrno == 0); 538 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 539 blobid = g_blobid; 540 541 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 542 poll_threads(); 543 CU_ASSERT(g_bserrno == 0); 544 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 545 blob = g_blob; 546 547 rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true); 548 CU_ASSERT(rc == 0); 549 SPDK_CU_ASSERT_FATAL(value != NULL); 550 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 551 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 552 553 rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true); 554 CU_ASSERT(rc == 0); 555 SPDK_CU_ASSERT_FATAL(value != NULL); 556 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 557 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 558 559 rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true); 560 CU_ASSERT(rc == 0); 561 SPDK_CU_ASSERT_FATAL(value != NULL); 562 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 563 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 564 565 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len); 566 CU_ASSERT(rc != 0); 567 568 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len); 569 CU_ASSERT(rc != 0); 570 571 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len); 572 CU_ASSERT(rc != 0); 573 574 spdk_blob_close(blob, blob_op_complete, NULL); 575 poll_threads(); 576 CU_ASSERT(g_bserrno == 0); 577 578 /* Create blob with NULL internal options */ 579 580 bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL); 581 poll_threads(); 582 CU_ASSERT(g_bserrno == 0); 583 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 584 blobid = g_blobid; 585 586 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 587 poll_threads(); 588 CU_ASSERT(g_bserrno == 0); 589 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 590 CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL); 591 592 blob = g_blob; 593 594 spdk_blob_close(blob, blob_op_complete, NULL); 595 poll_threads(); 596 CU_ASSERT(g_bserrno == 0); 597 } 598 599 static void 600 blob_thin_provision(void) 601 { 602 struct spdk_blob_store *bs; 603 struct spdk_bs_dev *dev; 604 struct spdk_blob *blob; 605 struct spdk_blob_opts opts; 606 struct spdk_bs_opts bs_opts; 607 spdk_blob_id blobid; 608 609 dev = init_dev(); 610 spdk_bs_opts_init(&bs_opts, sizeof(bs_opts)); 611 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 612 613 /* Initialize a new blob store */ 614 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 615 poll_threads(); 616 CU_ASSERT(g_bserrno == 0); 617 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 618 619 bs = g_bs; 620 621 /* Create blob with thin provisioning enabled */ 622 623 ut_spdk_blob_opts_init(&opts); 624 opts.thin_provision = true; 625 opts.num_clusters = 10; 626 627 blob = ut_blob_create_and_open(bs, &opts); 628 blobid = spdk_blob_get_id(blob); 629 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 630 631 spdk_blob_close(blob, blob_op_complete, NULL); 632 CU_ASSERT(g_bserrno == 0); 633 634 /* Do not shut down cleanly. This makes sure that when we load again 635 * and try to recover a valid used_cluster map, that blobstore will 636 * ignore clusters with index 0 since these are unallocated clusters. 637 */ 638 ut_bs_dirty_load(&bs, &bs_opts); 639 640 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 641 poll_threads(); 642 CU_ASSERT(g_bserrno == 0); 643 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 644 blob = g_blob; 645 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 646 647 ut_blob_close_and_delete(bs, blob); 648 649 spdk_bs_unload(bs, bs_op_complete, NULL); 650 poll_threads(); 651 CU_ASSERT(g_bserrno == 0); 652 g_bs = NULL; 653 } 654 655 static void 656 blob_snapshot(void) 657 { 658 struct spdk_blob_store *bs = g_bs; 659 struct spdk_blob *blob; 660 struct spdk_blob *snapshot, *snapshot2; 661 struct spdk_blob_bs_dev *blob_bs_dev; 662 struct spdk_blob_opts opts; 663 struct spdk_blob_xattr_opts xattrs; 664 spdk_blob_id blobid; 665 spdk_blob_id snapshotid; 666 spdk_blob_id snapshotid2; 667 const void *value; 668 size_t value_len; 669 int rc; 670 spdk_blob_id ids[2]; 671 size_t count; 672 673 /* Create blob with 10 clusters */ 674 ut_spdk_blob_opts_init(&opts); 675 opts.num_clusters = 10; 676 677 blob = ut_blob_create_and_open(bs, &opts); 678 blobid = spdk_blob_get_id(blob); 679 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 680 681 /* Create snapshot from blob */ 682 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); 683 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 684 poll_threads(); 685 CU_ASSERT(g_bserrno == 0); 686 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 687 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 688 snapshotid = g_blobid; 689 690 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 691 poll_threads(); 692 CU_ASSERT(g_bserrno == 0); 693 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 694 snapshot = g_blob; 695 CU_ASSERT(snapshot->data_ro == true); 696 CU_ASSERT(snapshot->md_ro == true); 697 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 698 699 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 700 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 701 CU_ASSERT(spdk_mem_all_zero(blob->active.clusters, 702 blob->active.num_clusters * sizeof(blob->active.clusters[0]))); 703 704 /* Try to create snapshot from clone with xattrs */ 705 xattrs.names = g_xattr_names; 706 xattrs.get_value = _get_xattr_value; 707 xattrs.count = 3; 708 xattrs.ctx = &g_ctx; 709 spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL); 710 poll_threads(); 711 CU_ASSERT(g_bserrno == 0); 712 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 713 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); 714 snapshotid2 = g_blobid; 715 716 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 717 CU_ASSERT(g_bserrno == 0); 718 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 719 snapshot2 = g_blob; 720 CU_ASSERT(snapshot2->data_ro == true); 721 CU_ASSERT(snapshot2->md_ro == true); 722 CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10); 723 724 /* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */ 725 CU_ASSERT(snapshot->back_bs_dev == NULL); 726 SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL); 727 SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL); 728 729 blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 730 CU_ASSERT(blob_bs_dev->blob == snapshot2); 731 732 blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev; 733 CU_ASSERT(blob_bs_dev->blob == snapshot); 734 735 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len); 736 CU_ASSERT(rc == 0); 737 SPDK_CU_ASSERT_FATAL(value != NULL); 738 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 739 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 740 741 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len); 742 CU_ASSERT(rc == 0); 743 SPDK_CU_ASSERT_FATAL(value != NULL); 744 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 745 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 746 747 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len); 748 CU_ASSERT(rc == 0); 749 SPDK_CU_ASSERT_FATAL(value != NULL); 750 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 751 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 752 753 /* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */ 754 count = 2; 755 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 756 CU_ASSERT(count == 1); 757 CU_ASSERT(ids[0] == blobid); 758 759 count = 2; 760 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 761 CU_ASSERT(count == 1); 762 CU_ASSERT(ids[0] == snapshotid2); 763 764 /* Try to create snapshot from snapshot */ 765 spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 766 poll_threads(); 767 CU_ASSERT(g_bserrno == -EINVAL); 768 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 769 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); 770 771 /* Delete blob and confirm that it is no longer on snapshot2 clone list */ 772 ut_blob_close_and_delete(bs, blob); 773 count = 2; 774 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 775 CU_ASSERT(count == 0); 776 777 /* Delete snapshot2 and confirm that it is no longer on snapshot clone list */ 778 ut_blob_close_and_delete(bs, snapshot2); 779 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 780 count = 2; 781 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 782 CU_ASSERT(count == 0); 783 784 ut_blob_close_and_delete(bs, snapshot); 785 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); 786 } 787 788 static void 789 blob_snapshot_freeze_io(void) 790 { 791 struct spdk_io_channel *channel; 792 struct spdk_bs_channel *bs_channel; 793 struct spdk_blob_store *bs = g_bs; 794 struct spdk_blob *blob; 795 struct spdk_blob_opts opts; 796 spdk_blob_id blobid; 797 uint32_t num_of_pages = 10; 798 uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE]; 799 uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE]; 800 uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE]; 801 802 memset(payload_write, 0xE5, sizeof(payload_write)); 803 memset(payload_read, 0x00, sizeof(payload_read)); 804 memset(payload_zero, 0x00, sizeof(payload_zero)); 805 806 /* Test freeze I/O during snapshot */ 807 channel = spdk_bs_alloc_io_channel(bs); 808 bs_channel = spdk_io_channel_get_ctx(channel); 809 810 /* Create blob with 10 clusters */ 811 ut_spdk_blob_opts_init(&opts); 812 opts.num_clusters = 10; 813 opts.thin_provision = false; 814 815 blob = ut_blob_create_and_open(bs, &opts); 816 blobid = spdk_blob_get_id(blob); 817 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 818 819 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 820 821 /* This is implementation specific. 822 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback. 823 * Four async I/O operations happen before that. */ 824 poll_thread_times(0, 3); 825 826 CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io)); 827 828 /* Blob I/O should be frozen here */ 829 CU_ASSERT(blob->frozen_refcnt == 1); 830 831 /* Write to the blob */ 832 spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL); 833 834 /* Verify that I/O is queued */ 835 CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io)); 836 /* Verify that payload is not written to disk */ 837 CU_ASSERT(memcmp(payload_zero, &g_dev_buffer[blob->active.clusters[0]*SPDK_BS_PAGE_SIZE], 838 SPDK_BS_PAGE_SIZE) == 0); 839 840 /* Finish all operations including spdk_bs_create_snapshot */ 841 poll_threads(); 842 843 /* Verify snapshot */ 844 CU_ASSERT(g_bserrno == 0); 845 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 846 847 /* Verify that blob has unset frozen_io */ 848 CU_ASSERT(blob->frozen_refcnt == 0); 849 850 /* Verify that postponed I/O completed successfully by comparing payload */ 851 spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL); 852 poll_threads(); 853 CU_ASSERT(g_bserrno == 0); 854 CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0); 855 856 spdk_bs_free_io_channel(channel); 857 poll_threads(); 858 859 ut_blob_close_and_delete(bs, blob); 860 } 861 862 static void 863 blob_clone(void) 864 { 865 struct spdk_blob_store *bs = g_bs; 866 struct spdk_blob_opts opts; 867 struct spdk_blob *blob, *snapshot, *clone; 868 spdk_blob_id blobid, cloneid, snapshotid; 869 struct spdk_blob_xattr_opts xattrs; 870 const void *value; 871 size_t value_len; 872 int rc; 873 874 /* Create blob with 10 clusters */ 875 876 ut_spdk_blob_opts_init(&opts); 877 opts.num_clusters = 10; 878 879 blob = ut_blob_create_and_open(bs, &opts); 880 blobid = spdk_blob_get_id(blob); 881 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 882 883 /* Create snapshot */ 884 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 885 poll_threads(); 886 CU_ASSERT(g_bserrno == 0); 887 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 888 snapshotid = g_blobid; 889 890 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 891 poll_threads(); 892 CU_ASSERT(g_bserrno == 0); 893 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 894 snapshot = g_blob; 895 CU_ASSERT(snapshot->data_ro == true); 896 CU_ASSERT(snapshot->md_ro == true); 897 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 898 899 spdk_blob_close(snapshot, blob_op_complete, NULL); 900 poll_threads(); 901 CU_ASSERT(g_bserrno == 0); 902 903 /* Create clone from snapshot with xattrs */ 904 xattrs.names = g_xattr_names; 905 xattrs.get_value = _get_xattr_value; 906 xattrs.count = 3; 907 xattrs.ctx = &g_ctx; 908 909 spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL); 910 poll_threads(); 911 CU_ASSERT(g_bserrno == 0); 912 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 913 cloneid = g_blobid; 914 915 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 916 poll_threads(); 917 CU_ASSERT(g_bserrno == 0); 918 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 919 clone = g_blob; 920 CU_ASSERT(clone->data_ro == false); 921 CU_ASSERT(clone->md_ro == false); 922 CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10); 923 924 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len); 925 CU_ASSERT(rc == 0); 926 SPDK_CU_ASSERT_FATAL(value != NULL); 927 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 928 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 929 930 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len); 931 CU_ASSERT(rc == 0); 932 SPDK_CU_ASSERT_FATAL(value != NULL); 933 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 934 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 935 936 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len); 937 CU_ASSERT(rc == 0); 938 SPDK_CU_ASSERT_FATAL(value != NULL); 939 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 940 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 941 942 943 spdk_blob_close(clone, blob_op_complete, NULL); 944 poll_threads(); 945 CU_ASSERT(g_bserrno == 0); 946 947 /* Try to create clone from not read only blob */ 948 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 949 poll_threads(); 950 CU_ASSERT(g_bserrno == -EINVAL); 951 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 952 953 /* Mark blob as read only */ 954 spdk_blob_set_read_only(blob); 955 spdk_blob_sync_md(blob, blob_op_complete, NULL); 956 poll_threads(); 957 CU_ASSERT(g_bserrno == 0); 958 959 /* Create clone from read only blob */ 960 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 961 poll_threads(); 962 CU_ASSERT(g_bserrno == 0); 963 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 964 cloneid = g_blobid; 965 966 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 967 poll_threads(); 968 CU_ASSERT(g_bserrno == 0); 969 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 970 clone = g_blob; 971 CU_ASSERT(clone->data_ro == false); 972 CU_ASSERT(clone->md_ro == false); 973 CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10); 974 975 ut_blob_close_and_delete(bs, clone); 976 ut_blob_close_and_delete(bs, blob); 977 } 978 979 static void 980 _blob_inflate(bool decouple_parent) 981 { 982 struct spdk_blob_store *bs = g_bs; 983 struct spdk_blob_opts opts; 984 struct spdk_blob *blob, *snapshot; 985 spdk_blob_id blobid, snapshotid; 986 struct spdk_io_channel *channel; 987 uint64_t free_clusters; 988 989 channel = spdk_bs_alloc_io_channel(bs); 990 SPDK_CU_ASSERT_FATAL(channel != NULL); 991 992 /* Create blob with 10 clusters */ 993 994 ut_spdk_blob_opts_init(&opts); 995 opts.num_clusters = 10; 996 opts.thin_provision = true; 997 998 blob = ut_blob_create_and_open(bs, &opts); 999 blobid = spdk_blob_get_id(blob); 1000 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 1001 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true); 1002 1003 /* 1) Blob with no parent */ 1004 if (decouple_parent) { 1005 /* Decouple parent of blob with no parent (should fail) */ 1006 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 1007 poll_threads(); 1008 CU_ASSERT(g_bserrno != 0); 1009 } else { 1010 /* Inflate of thin blob with no parent should made it thick */ 1011 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 1012 poll_threads(); 1013 CU_ASSERT(g_bserrno == 0); 1014 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false); 1015 } 1016 1017 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 1018 poll_threads(); 1019 CU_ASSERT(g_bserrno == 0); 1020 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 1021 snapshotid = g_blobid; 1022 1023 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true); 1024 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 1025 1026 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 1027 poll_threads(); 1028 CU_ASSERT(g_bserrno == 0); 1029 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1030 snapshot = g_blob; 1031 CU_ASSERT(snapshot->data_ro == true); 1032 CU_ASSERT(snapshot->md_ro == true); 1033 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 1034 1035 spdk_blob_close(snapshot, blob_op_complete, NULL); 1036 poll_threads(); 1037 CU_ASSERT(g_bserrno == 0); 1038 1039 free_clusters = spdk_bs_free_cluster_count(bs); 1040 1041 /* 2) Blob with parent */ 1042 if (!decouple_parent) { 1043 /* Do full blob inflation */ 1044 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 1045 poll_threads(); 1046 CU_ASSERT(g_bserrno == 0); 1047 /* all 10 clusters should be allocated */ 1048 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10); 1049 } else { 1050 /* Decouple parent of blob */ 1051 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 1052 poll_threads(); 1053 CU_ASSERT(g_bserrno == 0); 1054 /* when only parent is removed, none of the clusters should be allocated */ 1055 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters); 1056 } 1057 1058 /* Now, it should be possible to delete snapshot */ 1059 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 1060 poll_threads(); 1061 CU_ASSERT(g_bserrno == 0); 1062 1063 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 1064 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent); 1065 1066 spdk_bs_free_io_channel(channel); 1067 poll_threads(); 1068 1069 ut_blob_close_and_delete(bs, blob); 1070 } 1071 1072 static void 1073 blob_inflate(void) 1074 { 1075 _blob_inflate(false); 1076 _blob_inflate(true); 1077 } 1078 1079 static void 1080 blob_delete(void) 1081 { 1082 struct spdk_blob_store *bs = g_bs; 1083 struct spdk_blob_opts blob_opts; 1084 spdk_blob_id blobid; 1085 1086 /* Create a blob and then delete it. */ 1087 ut_spdk_blob_opts_init(&blob_opts); 1088 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 1089 poll_threads(); 1090 CU_ASSERT(g_bserrno == 0); 1091 CU_ASSERT(g_blobid > 0); 1092 blobid = g_blobid; 1093 1094 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 1095 poll_threads(); 1096 CU_ASSERT(g_bserrno == 0); 1097 1098 /* Try to open the blob */ 1099 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1100 poll_threads(); 1101 CU_ASSERT(g_bserrno == -ENOENT); 1102 } 1103 1104 static void 1105 blob_resize_test(void) 1106 { 1107 struct spdk_blob_store *bs = g_bs; 1108 struct spdk_blob *blob; 1109 uint64_t free_clusters; 1110 1111 free_clusters = spdk_bs_free_cluster_count(bs); 1112 1113 blob = ut_blob_create_and_open(bs, NULL); 1114 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 1115 1116 /* Confirm that resize fails if blob is marked read-only. */ 1117 blob->md_ro = true; 1118 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1119 poll_threads(); 1120 CU_ASSERT(g_bserrno == -EPERM); 1121 blob->md_ro = false; 1122 1123 /* The blob started at 0 clusters. Resize it to be 5. */ 1124 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1125 poll_threads(); 1126 CU_ASSERT(g_bserrno == 0); 1127 CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs)); 1128 1129 /* Shrink the blob to 3 clusters. This will not actually release 1130 * the old clusters until the blob is synced. 1131 */ 1132 spdk_blob_resize(blob, 3, blob_op_complete, NULL); 1133 poll_threads(); 1134 CU_ASSERT(g_bserrno == 0); 1135 /* Verify there are still 5 clusters in use */ 1136 CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs)); 1137 1138 spdk_blob_sync_md(blob, blob_op_complete, NULL); 1139 poll_threads(); 1140 CU_ASSERT(g_bserrno == 0); 1141 /* Now there are only 3 clusters in use */ 1142 CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs)); 1143 1144 /* Resize the blob to be 10 clusters. Growth takes effect immediately. */ 1145 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 1146 poll_threads(); 1147 CU_ASSERT(g_bserrno == 0); 1148 CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs)); 1149 1150 /* Try to resize the blob to size larger than blobstore. */ 1151 spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL); 1152 poll_threads(); 1153 CU_ASSERT(g_bserrno == -ENOSPC); 1154 1155 ut_blob_close_and_delete(bs, blob); 1156 } 1157 1158 static void 1159 blob_read_only(void) 1160 { 1161 struct spdk_blob_store *bs; 1162 struct spdk_bs_dev *dev; 1163 struct spdk_blob *blob; 1164 struct spdk_bs_opts opts; 1165 spdk_blob_id blobid; 1166 int rc; 1167 1168 dev = init_dev(); 1169 spdk_bs_opts_init(&opts, sizeof(opts)); 1170 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 1171 1172 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 1173 poll_threads(); 1174 CU_ASSERT(g_bserrno == 0); 1175 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 1176 bs = g_bs; 1177 1178 blob = ut_blob_create_and_open(bs, NULL); 1179 blobid = spdk_blob_get_id(blob); 1180 1181 rc = spdk_blob_set_read_only(blob); 1182 CU_ASSERT(rc == 0); 1183 1184 CU_ASSERT(blob->data_ro == false); 1185 CU_ASSERT(blob->md_ro == false); 1186 1187 spdk_blob_sync_md(blob, bs_op_complete, NULL); 1188 poll_threads(); 1189 1190 CU_ASSERT(blob->data_ro == true); 1191 CU_ASSERT(blob->md_ro == true); 1192 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1193 1194 spdk_blob_close(blob, blob_op_complete, NULL); 1195 poll_threads(); 1196 CU_ASSERT(g_bserrno == 0); 1197 1198 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1199 poll_threads(); 1200 CU_ASSERT(g_bserrno == 0); 1201 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1202 blob = g_blob; 1203 1204 CU_ASSERT(blob->data_ro == true); 1205 CU_ASSERT(blob->md_ro == true); 1206 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1207 1208 spdk_blob_close(blob, blob_op_complete, NULL); 1209 poll_threads(); 1210 CU_ASSERT(g_bserrno == 0); 1211 1212 ut_bs_reload(&bs, &opts); 1213 1214 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1215 poll_threads(); 1216 CU_ASSERT(g_bserrno == 0); 1217 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1218 blob = g_blob; 1219 1220 CU_ASSERT(blob->data_ro == true); 1221 CU_ASSERT(blob->md_ro == true); 1222 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1223 1224 ut_blob_close_and_delete(bs, blob); 1225 1226 spdk_bs_unload(bs, bs_op_complete, NULL); 1227 poll_threads(); 1228 CU_ASSERT(g_bserrno == 0); 1229 } 1230 1231 static void 1232 channel_ops(void) 1233 { 1234 struct spdk_blob_store *bs = g_bs; 1235 struct spdk_io_channel *channel; 1236 1237 channel = spdk_bs_alloc_io_channel(bs); 1238 CU_ASSERT(channel != NULL); 1239 1240 spdk_bs_free_io_channel(channel); 1241 poll_threads(); 1242 } 1243 1244 static void 1245 blob_write(void) 1246 { 1247 struct spdk_blob_store *bs = g_bs; 1248 struct spdk_blob *blob = g_blob; 1249 struct spdk_io_channel *channel; 1250 uint64_t pages_per_cluster; 1251 uint8_t payload[10 * 4096]; 1252 1253 pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs); 1254 1255 channel = spdk_bs_alloc_io_channel(bs); 1256 CU_ASSERT(channel != NULL); 1257 1258 /* Write to a blob with 0 size */ 1259 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1260 poll_threads(); 1261 CU_ASSERT(g_bserrno == -EINVAL); 1262 1263 /* Resize the blob */ 1264 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1265 poll_threads(); 1266 CU_ASSERT(g_bserrno == 0); 1267 1268 /* Confirm that write fails if blob is marked read-only. */ 1269 blob->data_ro = true; 1270 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1271 poll_threads(); 1272 CU_ASSERT(g_bserrno == -EPERM); 1273 blob->data_ro = false; 1274 1275 /* Write to the blob */ 1276 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1277 poll_threads(); 1278 CU_ASSERT(g_bserrno == 0); 1279 1280 /* Write starting beyond the end */ 1281 spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete, 1282 NULL); 1283 poll_threads(); 1284 CU_ASSERT(g_bserrno == -EINVAL); 1285 1286 /* Write starting at a valid location but going off the end */ 1287 spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1, 1288 blob_op_complete, NULL); 1289 poll_threads(); 1290 CU_ASSERT(g_bserrno == -EINVAL); 1291 1292 spdk_bs_free_io_channel(channel); 1293 poll_threads(); 1294 } 1295 1296 static void 1297 blob_read(void) 1298 { 1299 struct spdk_blob_store *bs = g_bs; 1300 struct spdk_blob *blob = g_blob; 1301 struct spdk_io_channel *channel; 1302 uint64_t pages_per_cluster; 1303 uint8_t payload[10 * 4096]; 1304 1305 pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs); 1306 1307 channel = spdk_bs_alloc_io_channel(bs); 1308 CU_ASSERT(channel != NULL); 1309 1310 /* Read from a blob with 0 size */ 1311 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1312 poll_threads(); 1313 CU_ASSERT(g_bserrno == -EINVAL); 1314 1315 /* Resize the blob */ 1316 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1317 poll_threads(); 1318 CU_ASSERT(g_bserrno == 0); 1319 1320 /* Confirm that read passes if blob is marked read-only. */ 1321 blob->data_ro = true; 1322 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1323 poll_threads(); 1324 CU_ASSERT(g_bserrno == 0); 1325 blob->data_ro = false; 1326 1327 /* Read from the blob */ 1328 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1329 poll_threads(); 1330 CU_ASSERT(g_bserrno == 0); 1331 1332 /* Read starting beyond the end */ 1333 spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete, 1334 NULL); 1335 poll_threads(); 1336 CU_ASSERT(g_bserrno == -EINVAL); 1337 1338 /* Read starting at a valid location but going off the end */ 1339 spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1, 1340 blob_op_complete, NULL); 1341 poll_threads(); 1342 CU_ASSERT(g_bserrno == -EINVAL); 1343 1344 spdk_bs_free_io_channel(channel); 1345 poll_threads(); 1346 } 1347 1348 static void 1349 blob_rw_verify(void) 1350 { 1351 struct spdk_blob_store *bs = g_bs; 1352 struct spdk_blob *blob = g_blob; 1353 struct spdk_io_channel *channel; 1354 uint8_t payload_read[10 * 4096]; 1355 uint8_t payload_write[10 * 4096]; 1356 1357 channel = spdk_bs_alloc_io_channel(bs); 1358 CU_ASSERT(channel != NULL); 1359 1360 spdk_blob_resize(blob, 32, blob_op_complete, NULL); 1361 poll_threads(); 1362 CU_ASSERT(g_bserrno == 0); 1363 1364 memset(payload_write, 0xE5, sizeof(payload_write)); 1365 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 1366 poll_threads(); 1367 CU_ASSERT(g_bserrno == 0); 1368 1369 memset(payload_read, 0x00, sizeof(payload_read)); 1370 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 1371 poll_threads(); 1372 CU_ASSERT(g_bserrno == 0); 1373 CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0); 1374 1375 spdk_bs_free_io_channel(channel); 1376 poll_threads(); 1377 } 1378 1379 static void 1380 blob_rw_verify_iov(void) 1381 { 1382 struct spdk_blob_store *bs = g_bs; 1383 struct spdk_blob *blob; 1384 struct spdk_io_channel *channel; 1385 uint8_t payload_read[10 * 4096]; 1386 uint8_t payload_write[10 * 4096]; 1387 struct iovec iov_read[3]; 1388 struct iovec iov_write[3]; 1389 void *buf; 1390 1391 channel = spdk_bs_alloc_io_channel(bs); 1392 CU_ASSERT(channel != NULL); 1393 1394 blob = ut_blob_create_and_open(bs, NULL); 1395 1396 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1397 poll_threads(); 1398 CU_ASSERT(g_bserrno == 0); 1399 1400 /* 1401 * Manually adjust the offset of the blob's second cluster. This allows 1402 * us to make sure that the readv/write code correctly accounts for I/O 1403 * that cross cluster boundaries. Start by asserting that the allocated 1404 * clusters are where we expect before modifying the second cluster. 1405 */ 1406 CU_ASSERT(blob->active.clusters[0] == 1 * 256); 1407 CU_ASSERT(blob->active.clusters[1] == 2 * 256); 1408 blob->active.clusters[1] = 3 * 256; 1409 1410 memset(payload_write, 0xE5, sizeof(payload_write)); 1411 iov_write[0].iov_base = payload_write; 1412 iov_write[0].iov_len = 1 * 4096; 1413 iov_write[1].iov_base = payload_write + 1 * 4096; 1414 iov_write[1].iov_len = 5 * 4096; 1415 iov_write[2].iov_base = payload_write + 6 * 4096; 1416 iov_write[2].iov_len = 4 * 4096; 1417 /* 1418 * Choose a page offset just before the cluster boundary. The first 6 pages of payload 1419 * will get written to the first cluster, the last 4 to the second cluster. 1420 */ 1421 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 1422 poll_threads(); 1423 CU_ASSERT(g_bserrno == 0); 1424 1425 memset(payload_read, 0xAA, sizeof(payload_read)); 1426 iov_read[0].iov_base = payload_read; 1427 iov_read[0].iov_len = 3 * 4096; 1428 iov_read[1].iov_base = payload_read + 3 * 4096; 1429 iov_read[1].iov_len = 4 * 4096; 1430 iov_read[2].iov_base = payload_read + 7 * 4096; 1431 iov_read[2].iov_len = 3 * 4096; 1432 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 1433 poll_threads(); 1434 CU_ASSERT(g_bserrno == 0); 1435 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 1436 1437 buf = calloc(1, 256 * 4096); 1438 SPDK_CU_ASSERT_FATAL(buf != NULL); 1439 /* Check that cluster 2 on "disk" was not modified. */ 1440 CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0); 1441 free(buf); 1442 1443 spdk_blob_close(blob, blob_op_complete, NULL); 1444 poll_threads(); 1445 CU_ASSERT(g_bserrno == 0); 1446 1447 spdk_bs_free_io_channel(channel); 1448 poll_threads(); 1449 } 1450 1451 static uint32_t 1452 bs_channel_get_req_count(struct spdk_io_channel *_channel) 1453 { 1454 struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel); 1455 struct spdk_bs_request_set *set; 1456 uint32_t count = 0; 1457 1458 TAILQ_FOREACH(set, &channel->reqs, link) { 1459 count++; 1460 } 1461 1462 return count; 1463 } 1464 1465 static void 1466 blob_rw_verify_iov_nomem(void) 1467 { 1468 struct spdk_blob_store *bs = g_bs; 1469 struct spdk_blob *blob = g_blob; 1470 struct spdk_io_channel *channel; 1471 uint8_t payload_write[10 * 4096]; 1472 struct iovec iov_write[3]; 1473 uint32_t req_count; 1474 1475 channel = spdk_bs_alloc_io_channel(bs); 1476 CU_ASSERT(channel != NULL); 1477 1478 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1479 poll_threads(); 1480 CU_ASSERT(g_bserrno == 0); 1481 1482 /* 1483 * Choose a page offset just before the cluster boundary. The first 6 pages of payload 1484 * will get written to the first cluster, the last 4 to the second cluster. 1485 */ 1486 iov_write[0].iov_base = payload_write; 1487 iov_write[0].iov_len = 1 * 4096; 1488 iov_write[1].iov_base = payload_write + 1 * 4096; 1489 iov_write[1].iov_len = 5 * 4096; 1490 iov_write[2].iov_base = payload_write + 6 * 4096; 1491 iov_write[2].iov_len = 4 * 4096; 1492 MOCK_SET(calloc, NULL); 1493 req_count = bs_channel_get_req_count(channel); 1494 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 1495 poll_threads(); 1496 CU_ASSERT(g_bserrno = -ENOMEM); 1497 CU_ASSERT(req_count == bs_channel_get_req_count(channel)); 1498 MOCK_CLEAR(calloc); 1499 1500 spdk_bs_free_io_channel(channel); 1501 poll_threads(); 1502 } 1503 1504 static void 1505 blob_rw_iov_read_only(void) 1506 { 1507 struct spdk_blob_store *bs = g_bs; 1508 struct spdk_blob *blob = g_blob; 1509 struct spdk_io_channel *channel; 1510 uint8_t payload_read[4096]; 1511 uint8_t payload_write[4096]; 1512 struct iovec iov_read; 1513 struct iovec iov_write; 1514 1515 channel = spdk_bs_alloc_io_channel(bs); 1516 CU_ASSERT(channel != NULL); 1517 1518 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1519 poll_threads(); 1520 CU_ASSERT(g_bserrno == 0); 1521 1522 /* Verify that writev failed if read_only flag is set. */ 1523 blob->data_ro = true; 1524 iov_write.iov_base = payload_write; 1525 iov_write.iov_len = sizeof(payload_write); 1526 spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL); 1527 poll_threads(); 1528 CU_ASSERT(g_bserrno == -EPERM); 1529 1530 /* Verify that reads pass if data_ro flag is set. */ 1531 iov_read.iov_base = payload_read; 1532 iov_read.iov_len = sizeof(payload_read); 1533 spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL); 1534 poll_threads(); 1535 CU_ASSERT(g_bserrno == 0); 1536 1537 spdk_bs_free_io_channel(channel); 1538 poll_threads(); 1539 } 1540 1541 static void 1542 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel, 1543 uint8_t *payload, uint64_t offset, uint64_t length, 1544 spdk_blob_op_complete cb_fn, void *cb_arg) 1545 { 1546 uint64_t i; 1547 uint8_t *buf; 1548 uint64_t page_size = spdk_bs_get_page_size(blob->bs); 1549 1550 /* To be sure that operation is NOT splitted, read one page at the time */ 1551 buf = payload; 1552 for (i = 0; i < length; i++) { 1553 spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL); 1554 poll_threads(); 1555 if (g_bserrno != 0) { 1556 /* Pass the error code up */ 1557 break; 1558 } 1559 buf += page_size; 1560 } 1561 1562 cb_fn(cb_arg, g_bserrno); 1563 } 1564 1565 static void 1566 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel, 1567 uint8_t *payload, uint64_t offset, uint64_t length, 1568 spdk_blob_op_complete cb_fn, void *cb_arg) 1569 { 1570 uint64_t i; 1571 uint8_t *buf; 1572 uint64_t page_size = spdk_bs_get_page_size(blob->bs); 1573 1574 /* To be sure that operation is NOT splitted, write one page at the time */ 1575 buf = payload; 1576 for (i = 0; i < length; i++) { 1577 spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL); 1578 poll_threads(); 1579 if (g_bserrno != 0) { 1580 /* Pass the error code up */ 1581 break; 1582 } 1583 buf += page_size; 1584 } 1585 1586 cb_fn(cb_arg, g_bserrno); 1587 } 1588 1589 static void 1590 blob_operation_split_rw(void) 1591 { 1592 struct spdk_blob_store *bs = g_bs; 1593 struct spdk_blob *blob; 1594 struct spdk_io_channel *channel; 1595 struct spdk_blob_opts opts; 1596 uint64_t cluster_size; 1597 1598 uint64_t payload_size; 1599 uint8_t *payload_read; 1600 uint8_t *payload_write; 1601 uint8_t *payload_pattern; 1602 1603 uint64_t page_size; 1604 uint64_t pages_per_cluster; 1605 uint64_t pages_per_payload; 1606 1607 uint64_t i; 1608 1609 cluster_size = spdk_bs_get_cluster_size(bs); 1610 page_size = spdk_bs_get_page_size(bs); 1611 pages_per_cluster = cluster_size / page_size; 1612 pages_per_payload = pages_per_cluster * 5; 1613 payload_size = cluster_size * 5; 1614 1615 payload_read = malloc(payload_size); 1616 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 1617 1618 payload_write = malloc(payload_size); 1619 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 1620 1621 payload_pattern = malloc(payload_size); 1622 SPDK_CU_ASSERT_FATAL(payload_pattern != NULL); 1623 1624 /* Prepare random pattern to write */ 1625 memset(payload_pattern, 0xFF, payload_size); 1626 for (i = 0; i < pages_per_payload; i++) { 1627 *((uint64_t *)(payload_pattern + page_size * i)) = (i + 1); 1628 } 1629 1630 channel = spdk_bs_alloc_io_channel(bs); 1631 SPDK_CU_ASSERT_FATAL(channel != NULL); 1632 1633 /* Create blob */ 1634 ut_spdk_blob_opts_init(&opts); 1635 opts.thin_provision = false; 1636 opts.num_clusters = 5; 1637 1638 blob = ut_blob_create_and_open(bs, &opts); 1639 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 1640 1641 /* Initial read should return zeroed payload */ 1642 memset(payload_read, 0xFF, payload_size); 1643 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1644 poll_threads(); 1645 CU_ASSERT(g_bserrno == 0); 1646 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 1647 1648 /* Fill whole blob except last page */ 1649 spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1, 1650 blob_op_complete, NULL); 1651 poll_threads(); 1652 CU_ASSERT(g_bserrno == 0); 1653 1654 /* Write last page with a pattern */ 1655 spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1, 1656 blob_op_complete, NULL); 1657 poll_threads(); 1658 CU_ASSERT(g_bserrno == 0); 1659 1660 /* Read whole blob and check consistency */ 1661 memset(payload_read, 0xFF, payload_size); 1662 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1663 poll_threads(); 1664 CU_ASSERT(g_bserrno == 0); 1665 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0); 1666 CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0); 1667 1668 /* Fill whole blob except first page */ 1669 spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1, 1670 blob_op_complete, NULL); 1671 poll_threads(); 1672 CU_ASSERT(g_bserrno == 0); 1673 1674 /* Write first page with a pattern */ 1675 spdk_blob_io_write(blob, channel, payload_pattern, 0, 1, 1676 blob_op_complete, NULL); 1677 poll_threads(); 1678 CU_ASSERT(g_bserrno == 0); 1679 1680 /* Read whole blob and check consistency */ 1681 memset(payload_read, 0xFF, payload_size); 1682 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1683 poll_threads(); 1684 CU_ASSERT(g_bserrno == 0); 1685 CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0); 1686 CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0); 1687 1688 1689 /* Fill whole blob with a pattern (5 clusters) */ 1690 1691 /* 1. Read test. */ 1692 _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload, 1693 blob_op_complete, NULL); 1694 poll_threads(); 1695 CU_ASSERT(g_bserrno == 0); 1696 1697 memset(payload_read, 0xFF, payload_size); 1698 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1699 poll_threads(); 1700 poll_threads(); 1701 CU_ASSERT(g_bserrno == 0); 1702 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1703 1704 /* 2. Write test. */ 1705 spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload, 1706 blob_op_complete, NULL); 1707 poll_threads(); 1708 CU_ASSERT(g_bserrno == 0); 1709 1710 memset(payload_read, 0xFF, payload_size); 1711 _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1712 poll_threads(); 1713 CU_ASSERT(g_bserrno == 0); 1714 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1715 1716 spdk_bs_free_io_channel(channel); 1717 poll_threads(); 1718 1719 g_blob = NULL; 1720 g_blobid = 0; 1721 1722 free(payload_read); 1723 free(payload_write); 1724 free(payload_pattern); 1725 1726 ut_blob_close_and_delete(bs, blob); 1727 } 1728 1729 static void 1730 blob_operation_split_rw_iov(void) 1731 { 1732 struct spdk_blob_store *bs = g_bs; 1733 struct spdk_blob *blob; 1734 struct spdk_io_channel *channel; 1735 struct spdk_blob_opts opts; 1736 uint64_t cluster_size; 1737 1738 uint64_t payload_size; 1739 uint8_t *payload_read; 1740 uint8_t *payload_write; 1741 uint8_t *payload_pattern; 1742 1743 uint64_t page_size; 1744 uint64_t pages_per_cluster; 1745 uint64_t pages_per_payload; 1746 1747 struct iovec iov_read[2]; 1748 struct iovec iov_write[2]; 1749 1750 uint64_t i, j; 1751 1752 cluster_size = spdk_bs_get_cluster_size(bs); 1753 page_size = spdk_bs_get_page_size(bs); 1754 pages_per_cluster = cluster_size / page_size; 1755 pages_per_payload = pages_per_cluster * 5; 1756 payload_size = cluster_size * 5; 1757 1758 payload_read = malloc(payload_size); 1759 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 1760 1761 payload_write = malloc(payload_size); 1762 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 1763 1764 payload_pattern = malloc(payload_size); 1765 SPDK_CU_ASSERT_FATAL(payload_pattern != NULL); 1766 1767 /* Prepare random pattern to write */ 1768 for (i = 0; i < pages_per_payload; i++) { 1769 for (j = 0; j < page_size / sizeof(uint64_t); j++) { 1770 uint64_t *tmp; 1771 1772 tmp = (uint64_t *)payload_pattern; 1773 tmp += ((page_size * i) / sizeof(uint64_t)) + j; 1774 *tmp = i + 1; 1775 } 1776 } 1777 1778 channel = spdk_bs_alloc_io_channel(bs); 1779 SPDK_CU_ASSERT_FATAL(channel != NULL); 1780 1781 /* Create blob */ 1782 ut_spdk_blob_opts_init(&opts); 1783 opts.thin_provision = false; 1784 opts.num_clusters = 5; 1785 1786 blob = ut_blob_create_and_open(bs, &opts); 1787 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 1788 1789 /* Initial read should return zeroes payload */ 1790 memset(payload_read, 0xFF, payload_size); 1791 iov_read[0].iov_base = payload_read; 1792 iov_read[0].iov_len = cluster_size * 3; 1793 iov_read[1].iov_base = payload_read + cluster_size * 3; 1794 iov_read[1].iov_len = cluster_size * 2; 1795 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1796 poll_threads(); 1797 CU_ASSERT(g_bserrno == 0); 1798 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 1799 1800 /* First of iovs fills whole blob except last page and second of iovs writes last page 1801 * with a pattern. */ 1802 iov_write[0].iov_base = payload_pattern; 1803 iov_write[0].iov_len = payload_size - page_size; 1804 iov_write[1].iov_base = payload_pattern; 1805 iov_write[1].iov_len = page_size; 1806 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1807 poll_threads(); 1808 CU_ASSERT(g_bserrno == 0); 1809 1810 /* Read whole blob and check consistency */ 1811 memset(payload_read, 0xFF, payload_size); 1812 iov_read[0].iov_base = payload_read; 1813 iov_read[0].iov_len = cluster_size * 2; 1814 iov_read[1].iov_base = payload_read + cluster_size * 2; 1815 iov_read[1].iov_len = cluster_size * 3; 1816 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1817 poll_threads(); 1818 CU_ASSERT(g_bserrno == 0); 1819 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0); 1820 CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0); 1821 1822 /* First of iovs fills only first page and second of iovs writes whole blob except 1823 * first page with a pattern. */ 1824 iov_write[0].iov_base = payload_pattern; 1825 iov_write[0].iov_len = page_size; 1826 iov_write[1].iov_base = payload_pattern; 1827 iov_write[1].iov_len = payload_size - page_size; 1828 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1829 poll_threads(); 1830 CU_ASSERT(g_bserrno == 0); 1831 1832 /* Read whole blob and check consistency */ 1833 memset(payload_read, 0xFF, payload_size); 1834 iov_read[0].iov_base = payload_read; 1835 iov_read[0].iov_len = cluster_size * 4; 1836 iov_read[1].iov_base = payload_read + cluster_size * 4; 1837 iov_read[1].iov_len = cluster_size; 1838 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1839 poll_threads(); 1840 CU_ASSERT(g_bserrno == 0); 1841 CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0); 1842 CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0); 1843 1844 1845 /* Fill whole blob with a pattern (5 clusters) */ 1846 1847 /* 1. Read test. */ 1848 _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload, 1849 blob_op_complete, NULL); 1850 poll_threads(); 1851 CU_ASSERT(g_bserrno == 0); 1852 1853 memset(payload_read, 0xFF, payload_size); 1854 iov_read[0].iov_base = payload_read; 1855 iov_read[0].iov_len = cluster_size; 1856 iov_read[1].iov_base = payload_read + cluster_size; 1857 iov_read[1].iov_len = cluster_size * 4; 1858 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1859 poll_threads(); 1860 CU_ASSERT(g_bserrno == 0); 1861 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1862 1863 /* 2. Write test. */ 1864 iov_write[0].iov_base = payload_read; 1865 iov_write[0].iov_len = cluster_size * 2; 1866 iov_write[1].iov_base = payload_read + cluster_size * 2; 1867 iov_write[1].iov_len = cluster_size * 3; 1868 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1869 poll_threads(); 1870 CU_ASSERT(g_bserrno == 0); 1871 1872 memset(payload_read, 0xFF, payload_size); 1873 _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1874 poll_threads(); 1875 CU_ASSERT(g_bserrno == 0); 1876 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1877 1878 spdk_bs_free_io_channel(channel); 1879 poll_threads(); 1880 1881 g_blob = NULL; 1882 g_blobid = 0; 1883 1884 free(payload_read); 1885 free(payload_write); 1886 free(payload_pattern); 1887 1888 ut_blob_close_and_delete(bs, blob); 1889 } 1890 1891 static void 1892 blob_unmap(void) 1893 { 1894 struct spdk_blob_store *bs = g_bs; 1895 struct spdk_blob *blob; 1896 struct spdk_io_channel *channel; 1897 struct spdk_blob_opts opts; 1898 uint8_t payload[4096]; 1899 int i; 1900 1901 channel = spdk_bs_alloc_io_channel(bs); 1902 CU_ASSERT(channel != NULL); 1903 1904 ut_spdk_blob_opts_init(&opts); 1905 opts.num_clusters = 10; 1906 1907 blob = ut_blob_create_and_open(bs, &opts); 1908 1909 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 1910 poll_threads(); 1911 CU_ASSERT(g_bserrno == 0); 1912 1913 memset(payload, 0, sizeof(payload)); 1914 payload[0] = 0xFF; 1915 1916 /* 1917 * Set first byte of every cluster to 0xFF. 1918 * First cluster on device is reserved so let's start from cluster number 1 1919 */ 1920 for (i = 1; i < 11; i++) { 1921 g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF; 1922 } 1923 1924 /* Confirm writes */ 1925 for (i = 0; i < 10; i++) { 1926 payload[0] = 0; 1927 spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1, 1928 blob_op_complete, NULL); 1929 poll_threads(); 1930 CU_ASSERT(g_bserrno == 0); 1931 CU_ASSERT(payload[0] == 0xFF); 1932 } 1933 1934 /* Mark some clusters as unallocated */ 1935 blob->active.clusters[1] = 0; 1936 blob->active.clusters[2] = 0; 1937 blob->active.clusters[3] = 0; 1938 blob->active.clusters[6] = 0; 1939 blob->active.clusters[8] = 0; 1940 1941 /* Unmap clusters by resizing to 0 */ 1942 spdk_blob_resize(blob, 0, blob_op_complete, NULL); 1943 poll_threads(); 1944 CU_ASSERT(g_bserrno == 0); 1945 1946 spdk_blob_sync_md(blob, blob_op_complete, NULL); 1947 poll_threads(); 1948 CU_ASSERT(g_bserrno == 0); 1949 1950 /* Confirm that only 'allocated' clusters were unmapped */ 1951 for (i = 1; i < 11; i++) { 1952 switch (i) { 1953 case 2: 1954 case 3: 1955 case 4: 1956 case 7: 1957 case 9: 1958 CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF); 1959 break; 1960 default: 1961 CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0); 1962 break; 1963 } 1964 } 1965 1966 spdk_bs_free_io_channel(channel); 1967 poll_threads(); 1968 1969 ut_blob_close_and_delete(bs, blob); 1970 } 1971 1972 static void 1973 blob_iter(void) 1974 { 1975 struct spdk_blob_store *bs = g_bs; 1976 struct spdk_blob *blob; 1977 spdk_blob_id blobid; 1978 struct spdk_blob_opts blob_opts; 1979 1980 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 1981 poll_threads(); 1982 CU_ASSERT(g_blob == NULL); 1983 CU_ASSERT(g_bserrno == -ENOENT); 1984 1985 ut_spdk_blob_opts_init(&blob_opts); 1986 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 1987 poll_threads(); 1988 CU_ASSERT(g_bserrno == 0); 1989 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 1990 blobid = g_blobid; 1991 1992 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 1993 poll_threads(); 1994 CU_ASSERT(g_blob != NULL); 1995 CU_ASSERT(g_bserrno == 0); 1996 blob = g_blob; 1997 CU_ASSERT(spdk_blob_get_id(blob) == blobid); 1998 1999 spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL); 2000 poll_threads(); 2001 CU_ASSERT(g_blob == NULL); 2002 CU_ASSERT(g_bserrno == -ENOENT); 2003 } 2004 2005 static void 2006 blob_xattr(void) 2007 { 2008 struct spdk_blob_store *bs = g_bs; 2009 struct spdk_blob *blob = g_blob; 2010 spdk_blob_id blobid = spdk_blob_get_id(blob); 2011 uint64_t length; 2012 int rc; 2013 const char *name1, *name2; 2014 const void *value; 2015 size_t value_len; 2016 struct spdk_xattr_names *names; 2017 2018 /* Test that set_xattr fails if md_ro flag is set. */ 2019 blob->md_ro = true; 2020 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 2021 CU_ASSERT(rc == -EPERM); 2022 2023 blob->md_ro = false; 2024 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 2025 CU_ASSERT(rc == 0); 2026 2027 length = 2345; 2028 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2029 CU_ASSERT(rc == 0); 2030 2031 /* Overwrite "length" xattr. */ 2032 length = 3456; 2033 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2034 CU_ASSERT(rc == 0); 2035 2036 /* get_xattr should still work even if md_ro flag is set. */ 2037 value = NULL; 2038 blob->md_ro = true; 2039 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 2040 CU_ASSERT(rc == 0); 2041 SPDK_CU_ASSERT_FATAL(value != NULL); 2042 CU_ASSERT(*(uint64_t *)value == length); 2043 CU_ASSERT(value_len == 8); 2044 blob->md_ro = false; 2045 2046 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 2047 CU_ASSERT(rc == -ENOENT); 2048 2049 names = NULL; 2050 rc = spdk_blob_get_xattr_names(blob, &names); 2051 CU_ASSERT(rc == 0); 2052 SPDK_CU_ASSERT_FATAL(names != NULL); 2053 CU_ASSERT(spdk_xattr_names_get_count(names) == 2); 2054 name1 = spdk_xattr_names_get_name(names, 0); 2055 SPDK_CU_ASSERT_FATAL(name1 != NULL); 2056 CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length")); 2057 name2 = spdk_xattr_names_get_name(names, 1); 2058 SPDK_CU_ASSERT_FATAL(name2 != NULL); 2059 CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length")); 2060 CU_ASSERT(strcmp(name1, name2)); 2061 spdk_xattr_names_free(names); 2062 2063 /* Confirm that remove_xattr fails if md_ro is set to true. */ 2064 blob->md_ro = true; 2065 rc = spdk_blob_remove_xattr(blob, "name"); 2066 CU_ASSERT(rc == -EPERM); 2067 2068 blob->md_ro = false; 2069 rc = spdk_blob_remove_xattr(blob, "name"); 2070 CU_ASSERT(rc == 0); 2071 2072 rc = spdk_blob_remove_xattr(blob, "foobar"); 2073 CU_ASSERT(rc == -ENOENT); 2074 2075 /* Set internal xattr */ 2076 length = 7898; 2077 rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true); 2078 CU_ASSERT(rc == 0); 2079 rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true); 2080 CU_ASSERT(rc == 0); 2081 CU_ASSERT(*(uint64_t *)value == length); 2082 /* try to get public xattr with same name */ 2083 rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len); 2084 CU_ASSERT(rc != 0); 2085 rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false); 2086 CU_ASSERT(rc != 0); 2087 /* Check if SPDK_BLOB_INTERNAL_XATTR is set */ 2088 CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 2089 SPDK_BLOB_INTERNAL_XATTR); 2090 2091 spdk_blob_close(blob, blob_op_complete, NULL); 2092 poll_threads(); 2093 2094 /* Check if xattrs are persisted */ 2095 ut_bs_reload(&bs, NULL); 2096 2097 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2098 poll_threads(); 2099 CU_ASSERT(g_bserrno == 0); 2100 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2101 blob = g_blob; 2102 2103 rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true); 2104 CU_ASSERT(rc == 0); 2105 CU_ASSERT(*(uint64_t *)value == length); 2106 2107 /* try to get internal xattr trough public call */ 2108 rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len); 2109 CU_ASSERT(rc != 0); 2110 2111 rc = blob_remove_xattr(blob, "internal", true); 2112 CU_ASSERT(rc == 0); 2113 2114 CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0); 2115 } 2116 2117 static void 2118 blob_parse_md(void) 2119 { 2120 struct spdk_blob_store *bs = g_bs; 2121 struct spdk_blob *blob; 2122 int rc; 2123 uint32_t used_pages; 2124 size_t xattr_length; 2125 char *xattr; 2126 2127 used_pages = spdk_bit_array_count_set(bs->used_md_pages); 2128 blob = ut_blob_create_and_open(bs, NULL); 2129 2130 /* Create large extent to force more than 1 page of metadata. */ 2131 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 2132 strlen("large_xattr"); 2133 xattr = calloc(xattr_length, sizeof(char)); 2134 SPDK_CU_ASSERT_FATAL(xattr != NULL); 2135 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 2136 free(xattr); 2137 SPDK_CU_ASSERT_FATAL(rc == 0); 2138 2139 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2140 poll_threads(); 2141 2142 /* Delete the blob and verify that number of pages returned to before its creation. */ 2143 SPDK_CU_ASSERT_FATAL(used_pages != spdk_bit_array_count_set(bs->used_md_pages)); 2144 ut_blob_close_and_delete(bs, blob); 2145 SPDK_CU_ASSERT_FATAL(used_pages == spdk_bit_array_count_set(bs->used_md_pages)); 2146 } 2147 2148 static void 2149 bs_load(void) 2150 { 2151 struct spdk_blob_store *bs; 2152 struct spdk_bs_dev *dev; 2153 spdk_blob_id blobid; 2154 struct spdk_blob *blob; 2155 struct spdk_bs_super_block *super_block; 2156 uint64_t length; 2157 int rc; 2158 const void *value; 2159 size_t value_len; 2160 struct spdk_bs_opts opts; 2161 struct spdk_blob_opts blob_opts; 2162 2163 dev = init_dev(); 2164 spdk_bs_opts_init(&opts, sizeof(opts)); 2165 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2166 2167 /* Initialize a new blob store */ 2168 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2169 poll_threads(); 2170 CU_ASSERT(g_bserrno == 0); 2171 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2172 bs = g_bs; 2173 2174 /* Try to open a blobid that does not exist */ 2175 spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL); 2176 poll_threads(); 2177 CU_ASSERT(g_bserrno == -ENOENT); 2178 CU_ASSERT(g_blob == NULL); 2179 2180 /* Create a blob */ 2181 blob = ut_blob_create_and_open(bs, NULL); 2182 blobid = spdk_blob_get_id(blob); 2183 2184 /* Try again to open valid blob but without the upper bit set */ 2185 spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL); 2186 poll_threads(); 2187 CU_ASSERT(g_bserrno == -ENOENT); 2188 CU_ASSERT(g_blob == NULL); 2189 2190 /* Set some xattrs */ 2191 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 2192 CU_ASSERT(rc == 0); 2193 2194 length = 2345; 2195 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2196 CU_ASSERT(rc == 0); 2197 2198 /* Resize the blob */ 2199 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 2200 poll_threads(); 2201 CU_ASSERT(g_bserrno == 0); 2202 2203 spdk_blob_close(blob, blob_op_complete, NULL); 2204 poll_threads(); 2205 CU_ASSERT(g_bserrno == 0); 2206 blob = NULL; 2207 g_blob = NULL; 2208 g_blobid = SPDK_BLOBID_INVALID; 2209 2210 /* Unload the blob store */ 2211 spdk_bs_unload(bs, bs_op_complete, NULL); 2212 poll_threads(); 2213 CU_ASSERT(g_bserrno == 0); 2214 g_bs = NULL; 2215 g_blob = NULL; 2216 g_blobid = 0; 2217 2218 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2219 CU_ASSERT(super_block->clean == 1); 2220 2221 /* Load should fail for device with an unsupported blocklen */ 2222 dev = init_dev(); 2223 dev->blocklen = SPDK_BS_PAGE_SIZE * 2; 2224 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 2225 poll_threads(); 2226 CU_ASSERT(g_bserrno == -EINVAL); 2227 2228 /* Load should when max_md_ops is set to zero */ 2229 dev = init_dev(); 2230 spdk_bs_opts_init(&opts, sizeof(opts)); 2231 opts.max_md_ops = 0; 2232 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2233 poll_threads(); 2234 CU_ASSERT(g_bserrno == -EINVAL); 2235 2236 /* Load should when max_channel_ops is set to zero */ 2237 dev = init_dev(); 2238 spdk_bs_opts_init(&opts, sizeof(opts)); 2239 opts.max_channel_ops = 0; 2240 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2241 poll_threads(); 2242 CU_ASSERT(g_bserrno == -EINVAL); 2243 2244 /* Load an existing blob store */ 2245 dev = init_dev(); 2246 spdk_bs_opts_init(&opts, sizeof(opts)); 2247 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2248 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2249 poll_threads(); 2250 CU_ASSERT(g_bserrno == 0); 2251 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2252 bs = g_bs; 2253 2254 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2255 CU_ASSERT(super_block->clean == 1); 2256 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2257 2258 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2259 poll_threads(); 2260 CU_ASSERT(g_bserrno == 0); 2261 CU_ASSERT(g_blob != NULL); 2262 blob = g_blob; 2263 2264 /* Verify that blobstore is marked dirty after first metadata sync */ 2265 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2266 CU_ASSERT(super_block->clean == 1); 2267 2268 /* Get the xattrs */ 2269 value = NULL; 2270 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 2271 CU_ASSERT(rc == 0); 2272 SPDK_CU_ASSERT_FATAL(value != NULL); 2273 CU_ASSERT(*(uint64_t *)value == length); 2274 CU_ASSERT(value_len == 8); 2275 2276 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 2277 CU_ASSERT(rc == -ENOENT); 2278 2279 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 2280 2281 spdk_blob_close(blob, blob_op_complete, NULL); 2282 poll_threads(); 2283 CU_ASSERT(g_bserrno == 0); 2284 blob = NULL; 2285 g_blob = NULL; 2286 2287 spdk_bs_unload(bs, bs_op_complete, NULL); 2288 poll_threads(); 2289 CU_ASSERT(g_bserrno == 0); 2290 g_bs = NULL; 2291 2292 /* Load should fail: bdev size < saved size */ 2293 dev = init_dev(); 2294 dev->blockcnt /= 2; 2295 2296 spdk_bs_opts_init(&opts, sizeof(opts)); 2297 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2298 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2299 poll_threads(); 2300 2301 CU_ASSERT(g_bserrno == -EILSEQ); 2302 2303 /* Load should succeed: bdev size > saved size */ 2304 dev = init_dev(); 2305 dev->blockcnt *= 4; 2306 2307 spdk_bs_opts_init(&opts, sizeof(opts)); 2308 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2309 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2310 poll_threads(); 2311 CU_ASSERT(g_bserrno == 0); 2312 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2313 bs = g_bs; 2314 2315 CU_ASSERT(g_bserrno == 0); 2316 spdk_bs_unload(bs, bs_op_complete, NULL); 2317 poll_threads(); 2318 2319 2320 /* Test compatibility mode */ 2321 2322 dev = init_dev(); 2323 super_block->size = 0; 2324 super_block->crc = blob_md_page_calc_crc(super_block); 2325 2326 spdk_bs_opts_init(&opts, sizeof(opts)); 2327 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2328 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2329 poll_threads(); 2330 CU_ASSERT(g_bserrno == 0); 2331 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2332 bs = g_bs; 2333 2334 /* Create a blob */ 2335 ut_spdk_blob_opts_init(&blob_opts); 2336 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 2337 poll_threads(); 2338 CU_ASSERT(g_bserrno == 0); 2339 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2340 2341 /* Blobstore should update number of blocks in super_block */ 2342 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2343 CU_ASSERT(super_block->clean == 0); 2344 2345 spdk_bs_unload(bs, bs_op_complete, NULL); 2346 poll_threads(); 2347 CU_ASSERT(g_bserrno == 0); 2348 CU_ASSERT(super_block->clean == 1); 2349 g_bs = NULL; 2350 2351 } 2352 2353 static void 2354 bs_load_pending_removal(void) 2355 { 2356 struct spdk_blob_store *bs = g_bs; 2357 struct spdk_blob_opts opts; 2358 struct spdk_blob *blob, *snapshot; 2359 spdk_blob_id blobid, snapshotid; 2360 const void *value; 2361 size_t value_len; 2362 int rc; 2363 2364 /* Create blob */ 2365 ut_spdk_blob_opts_init(&opts); 2366 opts.num_clusters = 10; 2367 2368 blob = ut_blob_create_and_open(bs, &opts); 2369 blobid = spdk_blob_get_id(blob); 2370 2371 /* Create snapshot */ 2372 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 2373 poll_threads(); 2374 CU_ASSERT(g_bserrno == 0); 2375 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2376 snapshotid = g_blobid; 2377 2378 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2379 poll_threads(); 2380 CU_ASSERT(g_bserrno == 0); 2381 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2382 snapshot = g_blob; 2383 2384 /* Set SNAPSHOT_PENDING_REMOVAL xattr */ 2385 snapshot->md_ro = false; 2386 rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true); 2387 CU_ASSERT(rc == 0); 2388 snapshot->md_ro = true; 2389 2390 spdk_blob_close(snapshot, blob_op_complete, NULL); 2391 poll_threads(); 2392 CU_ASSERT(g_bserrno == 0); 2393 2394 spdk_blob_close(blob, blob_op_complete, NULL); 2395 poll_threads(); 2396 CU_ASSERT(g_bserrno == 0); 2397 2398 /* Reload blobstore */ 2399 ut_bs_reload(&bs, NULL); 2400 2401 /* Snapshot should not be removed as blob is still pointing to it */ 2402 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2403 poll_threads(); 2404 CU_ASSERT(g_bserrno == 0); 2405 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2406 snapshot = g_blob; 2407 2408 /* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */ 2409 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len); 2410 CU_ASSERT(rc != 0); 2411 2412 /* Set SNAPSHOT_PENDING_REMOVAL xattr again */ 2413 snapshot->md_ro = false; 2414 rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true); 2415 CU_ASSERT(rc == 0); 2416 snapshot->md_ro = true; 2417 2418 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2419 poll_threads(); 2420 CU_ASSERT(g_bserrno == 0); 2421 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2422 blob = g_blob; 2423 2424 /* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */ 2425 blob_remove_xattr(blob, BLOB_SNAPSHOT, true); 2426 2427 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2428 poll_threads(); 2429 CU_ASSERT(g_bserrno == 0); 2430 2431 spdk_blob_close(snapshot, blob_op_complete, NULL); 2432 poll_threads(); 2433 CU_ASSERT(g_bserrno == 0); 2434 2435 spdk_blob_close(blob, blob_op_complete, NULL); 2436 poll_threads(); 2437 CU_ASSERT(g_bserrno == 0); 2438 2439 /* Reload blobstore */ 2440 ut_bs_reload(&bs, NULL); 2441 2442 /* Snapshot should be removed as blob is not pointing to it anymore */ 2443 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2444 poll_threads(); 2445 CU_ASSERT(g_bserrno != 0); 2446 } 2447 2448 static void 2449 bs_load_custom_cluster_size(void) 2450 { 2451 struct spdk_blob_store *bs; 2452 struct spdk_bs_dev *dev; 2453 struct spdk_bs_super_block *super_block; 2454 struct spdk_bs_opts opts; 2455 uint32_t custom_cluster_size = 4194304; /* 4MiB */ 2456 uint32_t cluster_sz; 2457 uint64_t total_clusters; 2458 2459 dev = init_dev(); 2460 spdk_bs_opts_init(&opts, sizeof(opts)); 2461 opts.cluster_sz = custom_cluster_size; 2462 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2463 2464 /* Initialize a new blob store */ 2465 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2466 poll_threads(); 2467 CU_ASSERT(g_bserrno == 0); 2468 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2469 bs = g_bs; 2470 cluster_sz = bs->cluster_sz; 2471 total_clusters = bs->total_clusters; 2472 2473 /* Unload the blob store */ 2474 spdk_bs_unload(bs, bs_op_complete, NULL); 2475 poll_threads(); 2476 CU_ASSERT(g_bserrno == 0); 2477 g_bs = NULL; 2478 g_blob = NULL; 2479 g_blobid = 0; 2480 2481 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2482 CU_ASSERT(super_block->clean == 1); 2483 2484 /* Load an existing blob store */ 2485 dev = init_dev(); 2486 spdk_bs_opts_init(&opts, sizeof(opts)); 2487 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2488 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2489 poll_threads(); 2490 CU_ASSERT(g_bserrno == 0); 2491 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2492 bs = g_bs; 2493 /* Compare cluster size and number to one after initialization */ 2494 CU_ASSERT(cluster_sz == bs->cluster_sz); 2495 CU_ASSERT(total_clusters == bs->total_clusters); 2496 2497 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2498 CU_ASSERT(super_block->clean == 1); 2499 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2500 2501 spdk_bs_unload(bs, bs_op_complete, NULL); 2502 poll_threads(); 2503 CU_ASSERT(g_bserrno == 0); 2504 CU_ASSERT(super_block->clean == 1); 2505 g_bs = NULL; 2506 } 2507 2508 static void 2509 bs_type(void) 2510 { 2511 struct spdk_blob_store *bs; 2512 struct spdk_bs_dev *dev; 2513 struct spdk_bs_opts opts; 2514 2515 dev = init_dev(); 2516 spdk_bs_opts_init(&opts, sizeof(opts)); 2517 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2518 2519 /* Initialize a new blob store */ 2520 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2521 poll_threads(); 2522 CU_ASSERT(g_bserrno == 0); 2523 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2524 bs = g_bs; 2525 2526 /* Unload the blob store */ 2527 spdk_bs_unload(bs, bs_op_complete, NULL); 2528 poll_threads(); 2529 CU_ASSERT(g_bserrno == 0); 2530 g_bs = NULL; 2531 g_blob = NULL; 2532 g_blobid = 0; 2533 2534 /* Load non existing blobstore type */ 2535 dev = init_dev(); 2536 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING"); 2537 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2538 poll_threads(); 2539 CU_ASSERT(g_bserrno != 0); 2540 2541 /* Load with empty blobstore type */ 2542 dev = init_dev(); 2543 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2544 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2545 poll_threads(); 2546 CU_ASSERT(g_bserrno == 0); 2547 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2548 bs = g_bs; 2549 2550 spdk_bs_unload(bs, bs_op_complete, NULL); 2551 poll_threads(); 2552 CU_ASSERT(g_bserrno == 0); 2553 g_bs = NULL; 2554 2555 /* Initialize a new blob store with empty bstype */ 2556 dev = init_dev(); 2557 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2558 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2559 poll_threads(); 2560 CU_ASSERT(g_bserrno == 0); 2561 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2562 bs = g_bs; 2563 2564 spdk_bs_unload(bs, bs_op_complete, NULL); 2565 poll_threads(); 2566 CU_ASSERT(g_bserrno == 0); 2567 g_bs = NULL; 2568 2569 /* Load non existing blobstore type */ 2570 dev = init_dev(); 2571 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING"); 2572 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2573 poll_threads(); 2574 CU_ASSERT(g_bserrno != 0); 2575 2576 /* Load with empty blobstore type */ 2577 dev = init_dev(); 2578 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2579 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2580 poll_threads(); 2581 CU_ASSERT(g_bserrno == 0); 2582 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2583 bs = g_bs; 2584 2585 spdk_bs_unload(bs, bs_op_complete, NULL); 2586 poll_threads(); 2587 CU_ASSERT(g_bserrno == 0); 2588 g_bs = NULL; 2589 } 2590 2591 static void 2592 bs_super_block(void) 2593 { 2594 struct spdk_blob_store *bs; 2595 struct spdk_bs_dev *dev; 2596 struct spdk_bs_super_block *super_block; 2597 struct spdk_bs_opts opts; 2598 struct spdk_bs_super_block_ver1 super_block_v1; 2599 2600 dev = init_dev(); 2601 spdk_bs_opts_init(&opts, sizeof(opts)); 2602 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2603 2604 /* Initialize a new blob store */ 2605 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2606 poll_threads(); 2607 CU_ASSERT(g_bserrno == 0); 2608 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2609 bs = g_bs; 2610 2611 /* Unload the blob store */ 2612 spdk_bs_unload(bs, bs_op_complete, NULL); 2613 poll_threads(); 2614 CU_ASSERT(g_bserrno == 0); 2615 g_bs = NULL; 2616 g_blob = NULL; 2617 g_blobid = 0; 2618 2619 /* Load an existing blob store with version newer than supported */ 2620 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2621 super_block->version++; 2622 2623 dev = init_dev(); 2624 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2625 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2626 poll_threads(); 2627 CU_ASSERT(g_bserrno != 0); 2628 2629 /* Create a new blob store with super block version 1 */ 2630 dev = init_dev(); 2631 super_block_v1.version = 1; 2632 memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature)); 2633 super_block_v1.length = 0x1000; 2634 super_block_v1.clean = 1; 2635 super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF; 2636 super_block_v1.cluster_size = 0x100000; 2637 super_block_v1.used_page_mask_start = 0x01; 2638 super_block_v1.used_page_mask_len = 0x01; 2639 super_block_v1.used_cluster_mask_start = 0x02; 2640 super_block_v1.used_cluster_mask_len = 0x01; 2641 super_block_v1.md_start = 0x03; 2642 super_block_v1.md_len = 0x40; 2643 memset(super_block_v1.reserved, 0, 4036); 2644 super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1); 2645 memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1)); 2646 2647 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2648 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2649 poll_threads(); 2650 CU_ASSERT(g_bserrno == 0); 2651 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2652 bs = g_bs; 2653 2654 spdk_bs_unload(bs, bs_op_complete, NULL); 2655 poll_threads(); 2656 CU_ASSERT(g_bserrno == 0); 2657 g_bs = NULL; 2658 } 2659 2660 /* 2661 * Create a blobstore and then unload it. 2662 */ 2663 static void 2664 bs_unload(void) 2665 { 2666 struct spdk_blob_store *bs = g_bs; 2667 struct spdk_blob *blob; 2668 2669 /* Create a blob and open it. */ 2670 blob = ut_blob_create_and_open(bs, NULL); 2671 2672 /* Try to unload blobstore, should fail with open blob */ 2673 g_bserrno = -1; 2674 spdk_bs_unload(bs, bs_op_complete, NULL); 2675 poll_threads(); 2676 CU_ASSERT(g_bserrno == -EBUSY); 2677 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2678 2679 /* Close the blob, then successfully unload blobstore */ 2680 g_bserrno = -1; 2681 spdk_blob_close(blob, blob_op_complete, NULL); 2682 poll_threads(); 2683 CU_ASSERT(g_bserrno == 0); 2684 } 2685 2686 /* 2687 * Create a blobstore with a cluster size different than the default, and ensure it is 2688 * persisted. 2689 */ 2690 static void 2691 bs_cluster_sz(void) 2692 { 2693 struct spdk_blob_store *bs; 2694 struct spdk_bs_dev *dev; 2695 struct spdk_bs_opts opts; 2696 uint32_t cluster_sz; 2697 2698 /* Set cluster size to zero */ 2699 dev = init_dev(); 2700 spdk_bs_opts_init(&opts, sizeof(opts)); 2701 opts.cluster_sz = 0; 2702 2703 /* Initialize a new blob store */ 2704 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2705 poll_threads(); 2706 CU_ASSERT(g_bserrno == -EINVAL); 2707 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2708 2709 /* 2710 * Set cluster size to blobstore page size, 2711 * to work it is required to be at least twice the blobstore page size. 2712 */ 2713 dev = init_dev(); 2714 spdk_bs_opts_init(&opts, sizeof(opts)); 2715 opts.cluster_sz = SPDK_BS_PAGE_SIZE; 2716 2717 /* Initialize a new blob store */ 2718 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2719 poll_threads(); 2720 CU_ASSERT(g_bserrno == -ENOMEM); 2721 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2722 2723 /* 2724 * Set cluster size to lower than page size, 2725 * to work it is required to be at least twice the blobstore page size. 2726 */ 2727 dev = init_dev(); 2728 spdk_bs_opts_init(&opts, sizeof(opts)); 2729 opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1; 2730 2731 /* Initialize a new blob store */ 2732 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2733 poll_threads(); 2734 CU_ASSERT(g_bserrno == -EINVAL); 2735 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2736 2737 /* Set cluster size to twice the default */ 2738 dev = init_dev(); 2739 spdk_bs_opts_init(&opts, sizeof(opts)); 2740 opts.cluster_sz *= 2; 2741 cluster_sz = opts.cluster_sz; 2742 2743 /* Initialize a new blob store */ 2744 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2745 poll_threads(); 2746 CU_ASSERT(g_bserrno == 0); 2747 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2748 bs = g_bs; 2749 2750 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2751 2752 ut_bs_reload(&bs, &opts); 2753 2754 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2755 2756 spdk_bs_unload(bs, bs_op_complete, NULL); 2757 poll_threads(); 2758 CU_ASSERT(g_bserrno == 0); 2759 g_bs = NULL; 2760 } 2761 2762 /* 2763 * Create a blobstore, reload it and ensure total usable cluster count 2764 * stays the same. 2765 */ 2766 static void 2767 bs_usable_clusters(void) 2768 { 2769 struct spdk_blob_store *bs = g_bs; 2770 struct spdk_blob *blob; 2771 uint32_t clusters; 2772 int i; 2773 2774 2775 clusters = spdk_bs_total_data_cluster_count(bs); 2776 2777 ut_bs_reload(&bs, NULL); 2778 2779 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2780 2781 /* Create and resize blobs to make sure that useable cluster count won't change */ 2782 for (i = 0; i < 4; i++) { 2783 g_bserrno = -1; 2784 g_blobid = SPDK_BLOBID_INVALID; 2785 blob = ut_blob_create_and_open(bs, NULL); 2786 2787 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 2788 poll_threads(); 2789 CU_ASSERT(g_bserrno == 0); 2790 2791 g_bserrno = -1; 2792 spdk_blob_close(blob, blob_op_complete, NULL); 2793 poll_threads(); 2794 CU_ASSERT(g_bserrno == 0); 2795 2796 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2797 } 2798 2799 /* Reload the blob store to make sure that nothing changed */ 2800 ut_bs_reload(&bs, NULL); 2801 2802 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2803 } 2804 2805 /* 2806 * Test resizing of the metadata blob. This requires creating enough blobs 2807 * so that one cluster is not enough to fit the metadata for those blobs. 2808 * To induce this condition to happen more quickly, we reduce the cluster 2809 * size to 16KB, which means only 4 4KB blob metadata pages can fit. 2810 */ 2811 static void 2812 bs_resize_md(void) 2813 { 2814 struct spdk_blob_store *bs; 2815 const int CLUSTER_PAGE_COUNT = 4; 2816 const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4; 2817 struct spdk_bs_dev *dev; 2818 struct spdk_bs_opts opts; 2819 struct spdk_blob *blob; 2820 struct spdk_blob_opts blob_opts; 2821 uint32_t cluster_sz; 2822 spdk_blob_id blobids[NUM_BLOBS]; 2823 int i; 2824 2825 2826 dev = init_dev(); 2827 spdk_bs_opts_init(&opts, sizeof(opts)); 2828 opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096; 2829 cluster_sz = opts.cluster_sz; 2830 2831 /* Initialize a new blob store */ 2832 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2833 poll_threads(); 2834 CU_ASSERT(g_bserrno == 0); 2835 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2836 bs = g_bs; 2837 2838 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2839 2840 ut_spdk_blob_opts_init(&blob_opts); 2841 2842 for (i = 0; i < NUM_BLOBS; i++) { 2843 g_bserrno = -1; 2844 g_blobid = SPDK_BLOBID_INVALID; 2845 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 2846 poll_threads(); 2847 CU_ASSERT(g_bserrno == 0); 2848 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2849 blobids[i] = g_blobid; 2850 } 2851 2852 ut_bs_reload(&bs, &opts); 2853 2854 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2855 2856 for (i = 0; i < NUM_BLOBS; i++) { 2857 g_bserrno = -1; 2858 g_blob = NULL; 2859 spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL); 2860 poll_threads(); 2861 CU_ASSERT(g_bserrno == 0); 2862 CU_ASSERT(g_blob != NULL); 2863 blob = g_blob; 2864 g_bserrno = -1; 2865 spdk_blob_close(blob, blob_op_complete, NULL); 2866 poll_threads(); 2867 CU_ASSERT(g_bserrno == 0); 2868 } 2869 2870 spdk_bs_unload(bs, bs_op_complete, NULL); 2871 poll_threads(); 2872 CU_ASSERT(g_bserrno == 0); 2873 g_bs = NULL; 2874 } 2875 2876 static void 2877 bs_destroy(void) 2878 { 2879 struct spdk_blob_store *bs; 2880 struct spdk_bs_dev *dev; 2881 2882 /* Initialize a new blob store */ 2883 dev = init_dev(); 2884 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2885 poll_threads(); 2886 CU_ASSERT(g_bserrno == 0); 2887 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2888 bs = g_bs; 2889 2890 /* Destroy the blob store */ 2891 g_bserrno = -1; 2892 spdk_bs_destroy(bs, bs_op_complete, NULL); 2893 poll_threads(); 2894 CU_ASSERT(g_bserrno == 0); 2895 2896 /* Loading an non-existent blob store should fail. */ 2897 g_bs = NULL; 2898 dev = init_dev(); 2899 2900 g_bserrno = 0; 2901 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 2902 poll_threads(); 2903 CU_ASSERT(g_bserrno != 0); 2904 } 2905 2906 /* Try to hit all of the corner cases associated with serializing 2907 * a blob to disk 2908 */ 2909 static void 2910 blob_serialize_test(void) 2911 { 2912 struct spdk_bs_dev *dev; 2913 struct spdk_bs_opts opts; 2914 struct spdk_blob_store *bs; 2915 spdk_blob_id blobid[2]; 2916 struct spdk_blob *blob[2]; 2917 uint64_t i; 2918 char *value; 2919 int rc; 2920 2921 dev = init_dev(); 2922 2923 /* Initialize a new blobstore with very small clusters */ 2924 spdk_bs_opts_init(&opts, sizeof(opts)); 2925 opts.cluster_sz = dev->blocklen * 8; 2926 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2927 poll_threads(); 2928 CU_ASSERT(g_bserrno == 0); 2929 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2930 bs = g_bs; 2931 2932 /* Create and open two blobs */ 2933 for (i = 0; i < 2; i++) { 2934 blob[i] = ut_blob_create_and_open(bs, NULL); 2935 blobid[i] = spdk_blob_get_id(blob[i]); 2936 2937 /* Set a fairly large xattr on both blobs to eat up 2938 * metadata space 2939 */ 2940 value = calloc(dev->blocklen - 64, sizeof(char)); 2941 SPDK_CU_ASSERT_FATAL(value != NULL); 2942 memset(value, i, dev->blocklen / 2); 2943 rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64); 2944 CU_ASSERT(rc == 0); 2945 free(value); 2946 } 2947 2948 /* Resize the blobs, alternating 1 cluster at a time. 2949 * This thwarts run length encoding and will cause spill 2950 * over of the extents. 2951 */ 2952 for (i = 0; i < 6; i++) { 2953 spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL); 2954 poll_threads(); 2955 CU_ASSERT(g_bserrno == 0); 2956 } 2957 2958 for (i = 0; i < 2; i++) { 2959 spdk_blob_sync_md(blob[i], blob_op_complete, NULL); 2960 poll_threads(); 2961 CU_ASSERT(g_bserrno == 0); 2962 } 2963 2964 /* Close the blobs */ 2965 for (i = 0; i < 2; i++) { 2966 spdk_blob_close(blob[i], blob_op_complete, NULL); 2967 poll_threads(); 2968 CU_ASSERT(g_bserrno == 0); 2969 } 2970 2971 ut_bs_reload(&bs, &opts); 2972 2973 for (i = 0; i < 2; i++) { 2974 blob[i] = NULL; 2975 2976 spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL); 2977 poll_threads(); 2978 CU_ASSERT(g_bserrno == 0); 2979 CU_ASSERT(g_blob != NULL); 2980 blob[i] = g_blob; 2981 2982 CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3); 2983 2984 spdk_blob_close(blob[i], blob_op_complete, NULL); 2985 poll_threads(); 2986 CU_ASSERT(g_bserrno == 0); 2987 } 2988 2989 spdk_bs_unload(bs, bs_op_complete, NULL); 2990 poll_threads(); 2991 CU_ASSERT(g_bserrno == 0); 2992 g_bs = NULL; 2993 } 2994 2995 static void 2996 blob_crc(void) 2997 { 2998 struct spdk_blob_store *bs = g_bs; 2999 struct spdk_blob *blob; 3000 spdk_blob_id blobid; 3001 uint32_t page_num; 3002 int index; 3003 struct spdk_blob_md_page *page; 3004 3005 blob = ut_blob_create_and_open(bs, NULL); 3006 blobid = spdk_blob_get_id(blob); 3007 3008 spdk_blob_close(blob, blob_op_complete, NULL); 3009 poll_threads(); 3010 CU_ASSERT(g_bserrno == 0); 3011 3012 page_num = bs_blobid_to_page(blobid); 3013 index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num); 3014 page = (struct spdk_blob_md_page *)&g_dev_buffer[index]; 3015 page->crc = 0; 3016 3017 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3018 poll_threads(); 3019 CU_ASSERT(g_bserrno == -EINVAL); 3020 CU_ASSERT(g_blob == NULL); 3021 g_bserrno = 0; 3022 3023 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 3024 poll_threads(); 3025 CU_ASSERT(g_bserrno == -EINVAL); 3026 } 3027 3028 static void 3029 super_block_crc(void) 3030 { 3031 struct spdk_blob_store *bs; 3032 struct spdk_bs_dev *dev; 3033 struct spdk_bs_super_block *super_block; 3034 3035 dev = init_dev(); 3036 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 3037 poll_threads(); 3038 CU_ASSERT(g_bserrno == 0); 3039 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3040 bs = g_bs; 3041 3042 spdk_bs_unload(bs, bs_op_complete, NULL); 3043 poll_threads(); 3044 CU_ASSERT(g_bserrno == 0); 3045 g_bs = NULL; 3046 3047 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 3048 super_block->crc = 0; 3049 dev = init_dev(); 3050 3051 /* Load an existing blob store */ 3052 g_bserrno = 0; 3053 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3054 poll_threads(); 3055 CU_ASSERT(g_bserrno == -EILSEQ); 3056 } 3057 3058 /* For blob dirty shutdown test case we do the following sub-test cases: 3059 * 1 Initialize new blob store and create 1 super blob with some xattrs, then we 3060 * dirty shutdown and reload the blob store and verify the xattrs. 3061 * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown, 3062 * reload the blob store and verify the clusters number. 3063 * 3 Create the second blob and then dirty shutdown, reload the blob store 3064 * and verify the second blob. 3065 * 4 Delete the second blob and then dirty shutdown, reload the blob store 3066 * and verify the second blob is invalid. 3067 * 5 Create the second blob again and also create the third blob, modify the 3068 * md of second blob which makes the md invalid, and then dirty shutdown, 3069 * reload the blob store verify the second blob, it should invalid and also 3070 * verify the third blob, it should correct. 3071 */ 3072 static void 3073 blob_dirty_shutdown(void) 3074 { 3075 int rc; 3076 int index; 3077 struct spdk_blob_store *bs = g_bs; 3078 spdk_blob_id blobid1, blobid2, blobid3; 3079 struct spdk_blob *blob = g_blob; 3080 uint64_t length; 3081 uint64_t free_clusters; 3082 const void *value; 3083 size_t value_len; 3084 uint32_t page_num; 3085 struct spdk_blob_md_page *page; 3086 struct spdk_blob_opts blob_opts; 3087 3088 /* Create first blob */ 3089 blobid1 = spdk_blob_get_id(blob); 3090 3091 /* Set some xattrs */ 3092 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 3093 CU_ASSERT(rc == 0); 3094 3095 length = 2345; 3096 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3097 CU_ASSERT(rc == 0); 3098 3099 /* Put xattr that fits exactly single page. 3100 * This results in adding additional pages to MD. 3101 * First is flags and smaller xattr, second the large xattr, 3102 * third are just the extents. 3103 */ 3104 size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) - 3105 strlen("large_xattr"); 3106 char *xattr = calloc(xattr_length, sizeof(char)); 3107 SPDK_CU_ASSERT_FATAL(xattr != NULL); 3108 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 3109 free(xattr); 3110 SPDK_CU_ASSERT_FATAL(rc == 0); 3111 3112 /* Resize the blob */ 3113 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 3114 poll_threads(); 3115 CU_ASSERT(g_bserrno == 0); 3116 3117 /* Set the blob as the super blob */ 3118 spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL); 3119 poll_threads(); 3120 CU_ASSERT(g_bserrno == 0); 3121 3122 free_clusters = spdk_bs_free_cluster_count(bs); 3123 3124 spdk_blob_close(blob, blob_op_complete, NULL); 3125 poll_threads(); 3126 CU_ASSERT(g_bserrno == 0); 3127 blob = NULL; 3128 g_blob = NULL; 3129 g_blobid = SPDK_BLOBID_INVALID; 3130 3131 ut_bs_dirty_load(&bs, NULL); 3132 3133 /* Get the super blob */ 3134 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 3135 poll_threads(); 3136 CU_ASSERT(g_bserrno == 0); 3137 CU_ASSERT(blobid1 == g_blobid); 3138 3139 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3140 poll_threads(); 3141 CU_ASSERT(g_bserrno == 0); 3142 CU_ASSERT(g_blob != NULL); 3143 blob = g_blob; 3144 3145 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3146 3147 /* Get the xattrs */ 3148 value = NULL; 3149 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 3150 CU_ASSERT(rc == 0); 3151 SPDK_CU_ASSERT_FATAL(value != NULL); 3152 CU_ASSERT(*(uint64_t *)value == length); 3153 CU_ASSERT(value_len == 8); 3154 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 3155 3156 /* Resize the blob */ 3157 spdk_blob_resize(blob, 20, blob_op_complete, NULL); 3158 poll_threads(); 3159 CU_ASSERT(g_bserrno == 0); 3160 3161 free_clusters = spdk_bs_free_cluster_count(bs); 3162 3163 spdk_blob_close(blob, blob_op_complete, NULL); 3164 poll_threads(); 3165 CU_ASSERT(g_bserrno == 0); 3166 blob = NULL; 3167 g_blob = NULL; 3168 g_blobid = SPDK_BLOBID_INVALID; 3169 3170 ut_bs_dirty_load(&bs, NULL); 3171 3172 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3173 poll_threads(); 3174 CU_ASSERT(g_bserrno == 0); 3175 CU_ASSERT(g_blob != NULL); 3176 blob = g_blob; 3177 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20); 3178 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3179 3180 spdk_blob_close(blob, blob_op_complete, NULL); 3181 poll_threads(); 3182 CU_ASSERT(g_bserrno == 0); 3183 blob = NULL; 3184 g_blob = NULL; 3185 g_blobid = SPDK_BLOBID_INVALID; 3186 3187 /* Create second blob */ 3188 blob = ut_blob_create_and_open(bs, NULL); 3189 blobid2 = spdk_blob_get_id(blob); 3190 3191 /* Set some xattrs */ 3192 rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1); 3193 CU_ASSERT(rc == 0); 3194 3195 length = 5432; 3196 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3197 CU_ASSERT(rc == 0); 3198 3199 /* Resize the blob */ 3200 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 3201 poll_threads(); 3202 CU_ASSERT(g_bserrno == 0); 3203 3204 free_clusters = spdk_bs_free_cluster_count(bs); 3205 3206 spdk_blob_close(blob, blob_op_complete, NULL); 3207 poll_threads(); 3208 CU_ASSERT(g_bserrno == 0); 3209 blob = NULL; 3210 g_blob = NULL; 3211 g_blobid = SPDK_BLOBID_INVALID; 3212 3213 ut_bs_dirty_load(&bs, NULL); 3214 3215 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3216 poll_threads(); 3217 CU_ASSERT(g_bserrno == 0); 3218 CU_ASSERT(g_blob != NULL); 3219 blob = g_blob; 3220 3221 /* Get the xattrs */ 3222 value = NULL; 3223 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 3224 CU_ASSERT(rc == 0); 3225 SPDK_CU_ASSERT_FATAL(value != NULL); 3226 CU_ASSERT(*(uint64_t *)value == length); 3227 CU_ASSERT(value_len == 8); 3228 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 3229 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3230 3231 ut_blob_close_and_delete(bs, blob); 3232 3233 free_clusters = spdk_bs_free_cluster_count(bs); 3234 3235 ut_bs_dirty_load(&bs, NULL); 3236 3237 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3238 poll_threads(); 3239 CU_ASSERT(g_bserrno != 0); 3240 CU_ASSERT(g_blob == NULL); 3241 3242 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3243 poll_threads(); 3244 CU_ASSERT(g_bserrno == 0); 3245 CU_ASSERT(g_blob != NULL); 3246 blob = g_blob; 3247 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3248 spdk_blob_close(blob, blob_op_complete, NULL); 3249 poll_threads(); 3250 CU_ASSERT(g_bserrno == 0); 3251 3252 ut_bs_reload(&bs, NULL); 3253 3254 /* Create second blob */ 3255 ut_spdk_blob_opts_init(&blob_opts); 3256 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3257 poll_threads(); 3258 CU_ASSERT(g_bserrno == 0); 3259 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3260 blobid2 = g_blobid; 3261 3262 /* Create third blob */ 3263 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3264 poll_threads(); 3265 CU_ASSERT(g_bserrno == 0); 3266 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3267 blobid3 = g_blobid; 3268 3269 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3270 poll_threads(); 3271 CU_ASSERT(g_bserrno == 0); 3272 CU_ASSERT(g_blob != NULL); 3273 blob = g_blob; 3274 3275 /* Set some xattrs for second blob */ 3276 rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1); 3277 CU_ASSERT(rc == 0); 3278 3279 length = 5432; 3280 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3281 CU_ASSERT(rc == 0); 3282 3283 spdk_blob_close(blob, blob_op_complete, NULL); 3284 poll_threads(); 3285 CU_ASSERT(g_bserrno == 0); 3286 blob = NULL; 3287 g_blob = NULL; 3288 g_blobid = SPDK_BLOBID_INVALID; 3289 3290 spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL); 3291 poll_threads(); 3292 CU_ASSERT(g_bserrno == 0); 3293 CU_ASSERT(g_blob != NULL); 3294 blob = g_blob; 3295 3296 /* Set some xattrs for third blob */ 3297 rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1); 3298 CU_ASSERT(rc == 0); 3299 3300 length = 5432; 3301 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3302 CU_ASSERT(rc == 0); 3303 3304 spdk_blob_close(blob, blob_op_complete, NULL); 3305 poll_threads(); 3306 CU_ASSERT(g_bserrno == 0); 3307 blob = NULL; 3308 g_blob = NULL; 3309 g_blobid = SPDK_BLOBID_INVALID; 3310 3311 /* Mark second blob as invalid */ 3312 page_num = bs_blobid_to_page(blobid2); 3313 3314 index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num); 3315 page = (struct spdk_blob_md_page *)&g_dev_buffer[index]; 3316 page->sequence_num = 1; 3317 page->crc = blob_md_page_calc_crc(page); 3318 3319 free_clusters = spdk_bs_free_cluster_count(bs); 3320 3321 ut_bs_dirty_load(&bs, NULL); 3322 3323 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3324 poll_threads(); 3325 CU_ASSERT(g_bserrno != 0); 3326 CU_ASSERT(g_blob == NULL); 3327 3328 spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL); 3329 poll_threads(); 3330 CU_ASSERT(g_bserrno == 0); 3331 CU_ASSERT(g_blob != NULL); 3332 blob = g_blob; 3333 3334 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3335 } 3336 3337 static void 3338 blob_flags(void) 3339 { 3340 struct spdk_blob_store *bs = g_bs; 3341 spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro; 3342 struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro; 3343 struct spdk_blob_opts blob_opts; 3344 int rc; 3345 3346 /* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */ 3347 blob_invalid = ut_blob_create_and_open(bs, NULL); 3348 blobid_invalid = spdk_blob_get_id(blob_invalid); 3349 3350 blob_data_ro = ut_blob_create_and_open(bs, NULL); 3351 blobid_data_ro = spdk_blob_get_id(blob_data_ro); 3352 3353 ut_spdk_blob_opts_init(&blob_opts); 3354 blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES; 3355 blob_md_ro = ut_blob_create_and_open(bs, &blob_opts); 3356 blobid_md_ro = spdk_blob_get_id(blob_md_ro); 3357 CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES); 3358 3359 /* Change the size of blob_data_ro to check if flags are serialized 3360 * when blob has non zero number of extents */ 3361 spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL); 3362 poll_threads(); 3363 CU_ASSERT(g_bserrno == 0); 3364 3365 /* Set the xattr to check if flags are serialized 3366 * when blob has non zero number of xattrs */ 3367 rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1); 3368 CU_ASSERT(rc == 0); 3369 3370 blob_invalid->invalid_flags = (1ULL << 63); 3371 blob_invalid->state = SPDK_BLOB_STATE_DIRTY; 3372 blob_data_ro->data_ro_flags = (1ULL << 62); 3373 blob_data_ro->state = SPDK_BLOB_STATE_DIRTY; 3374 blob_md_ro->md_ro_flags = (1ULL << 61); 3375 blob_md_ro->state = SPDK_BLOB_STATE_DIRTY; 3376 3377 g_bserrno = -1; 3378 spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL); 3379 poll_threads(); 3380 CU_ASSERT(g_bserrno == 0); 3381 g_bserrno = -1; 3382 spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL); 3383 poll_threads(); 3384 CU_ASSERT(g_bserrno == 0); 3385 g_bserrno = -1; 3386 spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL); 3387 poll_threads(); 3388 CU_ASSERT(g_bserrno == 0); 3389 3390 g_bserrno = -1; 3391 spdk_blob_close(blob_invalid, blob_op_complete, NULL); 3392 poll_threads(); 3393 CU_ASSERT(g_bserrno == 0); 3394 blob_invalid = NULL; 3395 g_bserrno = -1; 3396 spdk_blob_close(blob_data_ro, blob_op_complete, NULL); 3397 poll_threads(); 3398 CU_ASSERT(g_bserrno == 0); 3399 blob_data_ro = NULL; 3400 g_bserrno = -1; 3401 spdk_blob_close(blob_md_ro, blob_op_complete, NULL); 3402 poll_threads(); 3403 CU_ASSERT(g_bserrno == 0); 3404 blob_md_ro = NULL; 3405 3406 g_blob = NULL; 3407 g_blobid = SPDK_BLOBID_INVALID; 3408 3409 ut_bs_reload(&bs, NULL); 3410 3411 g_blob = NULL; 3412 g_bserrno = 0; 3413 spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL); 3414 poll_threads(); 3415 CU_ASSERT(g_bserrno != 0); 3416 CU_ASSERT(g_blob == NULL); 3417 3418 g_blob = NULL; 3419 g_bserrno = -1; 3420 spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL); 3421 poll_threads(); 3422 CU_ASSERT(g_bserrno == 0); 3423 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3424 blob_data_ro = g_blob; 3425 /* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */ 3426 CU_ASSERT(blob_data_ro->data_ro == true); 3427 CU_ASSERT(blob_data_ro->md_ro == true); 3428 CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10); 3429 3430 g_blob = NULL; 3431 g_bserrno = -1; 3432 spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL); 3433 poll_threads(); 3434 CU_ASSERT(g_bserrno == 0); 3435 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3436 blob_md_ro = g_blob; 3437 CU_ASSERT(blob_md_ro->data_ro == false); 3438 CU_ASSERT(blob_md_ro->md_ro == true); 3439 3440 g_bserrno = -1; 3441 spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL); 3442 poll_threads(); 3443 CU_ASSERT(g_bserrno == 0); 3444 3445 ut_blob_close_and_delete(bs, blob_data_ro); 3446 ut_blob_close_and_delete(bs, blob_md_ro); 3447 } 3448 3449 static void 3450 bs_version(void) 3451 { 3452 struct spdk_bs_super_block *super; 3453 struct spdk_blob_store *bs = g_bs; 3454 struct spdk_bs_dev *dev; 3455 struct spdk_blob *blob; 3456 struct spdk_blob_opts blob_opts; 3457 spdk_blob_id blobid; 3458 3459 /* Unload the blob store */ 3460 spdk_bs_unload(bs, bs_op_complete, NULL); 3461 poll_threads(); 3462 CU_ASSERT(g_bserrno == 0); 3463 g_bs = NULL; 3464 3465 /* 3466 * Change the bs version on disk. This will allow us to 3467 * test that the version does not get modified automatically 3468 * when loading and unloading the blobstore. 3469 */ 3470 super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 3471 CU_ASSERT(super->version == SPDK_BS_VERSION); 3472 CU_ASSERT(super->clean == 1); 3473 super->version = 2; 3474 /* 3475 * Version 2 metadata does not have a used blobid mask, so clear 3476 * those fields in the super block and zero the corresponding 3477 * region on "disk". We will use this to ensure blob IDs are 3478 * correctly reconstructed. 3479 */ 3480 memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0, 3481 super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE); 3482 super->used_blobid_mask_start = 0; 3483 super->used_blobid_mask_len = 0; 3484 super->crc = blob_md_page_calc_crc(super); 3485 3486 /* Load an existing blob store */ 3487 dev = init_dev(); 3488 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3489 poll_threads(); 3490 CU_ASSERT(g_bserrno == 0); 3491 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3492 CU_ASSERT(super->clean == 1); 3493 bs = g_bs; 3494 3495 /* 3496 * Create a blob - just to make sure that when we unload it 3497 * results in writing the super block (since metadata pages 3498 * were allocated. 3499 */ 3500 ut_spdk_blob_opts_init(&blob_opts); 3501 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3502 poll_threads(); 3503 CU_ASSERT(g_bserrno == 0); 3504 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3505 blobid = g_blobid; 3506 3507 /* Unload the blob store */ 3508 spdk_bs_unload(bs, bs_op_complete, NULL); 3509 poll_threads(); 3510 CU_ASSERT(g_bserrno == 0); 3511 g_bs = NULL; 3512 CU_ASSERT(super->version == 2); 3513 CU_ASSERT(super->used_blobid_mask_start == 0); 3514 CU_ASSERT(super->used_blobid_mask_len == 0); 3515 3516 dev = init_dev(); 3517 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3518 poll_threads(); 3519 CU_ASSERT(g_bserrno == 0); 3520 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3521 bs = g_bs; 3522 3523 g_blob = NULL; 3524 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3525 poll_threads(); 3526 CU_ASSERT(g_bserrno == 0); 3527 CU_ASSERT(g_blob != NULL); 3528 blob = g_blob; 3529 3530 ut_blob_close_and_delete(bs, blob); 3531 3532 CU_ASSERT(super->version == 2); 3533 CU_ASSERT(super->used_blobid_mask_start == 0); 3534 CU_ASSERT(super->used_blobid_mask_len == 0); 3535 } 3536 3537 static void 3538 blob_set_xattrs_test(void) 3539 { 3540 struct spdk_blob_store *bs = g_bs; 3541 struct spdk_blob *blob; 3542 struct spdk_blob_opts opts; 3543 const void *value; 3544 size_t value_len; 3545 char *xattr; 3546 size_t xattr_length; 3547 int rc; 3548 3549 /* Create blob with extra attributes */ 3550 ut_spdk_blob_opts_init(&opts); 3551 3552 opts.xattrs.names = g_xattr_names; 3553 opts.xattrs.get_value = _get_xattr_value; 3554 opts.xattrs.count = 3; 3555 opts.xattrs.ctx = &g_ctx; 3556 3557 blob = ut_blob_create_and_open(bs, &opts); 3558 3559 /* Get the xattrs */ 3560 value = NULL; 3561 3562 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len); 3563 CU_ASSERT(rc == 0); 3564 SPDK_CU_ASSERT_FATAL(value != NULL); 3565 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 3566 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 3567 3568 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len); 3569 CU_ASSERT(rc == 0); 3570 SPDK_CU_ASSERT_FATAL(value != NULL); 3571 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 3572 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 3573 3574 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len); 3575 CU_ASSERT(rc == 0); 3576 SPDK_CU_ASSERT_FATAL(value != NULL); 3577 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 3578 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 3579 3580 /* Try to get non existing attribute */ 3581 3582 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 3583 CU_ASSERT(rc == -ENOENT); 3584 3585 /* Try xattr exceeding maximum length of descriptor in single page */ 3586 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 3587 strlen("large_xattr") + 1; 3588 xattr = calloc(xattr_length, sizeof(char)); 3589 SPDK_CU_ASSERT_FATAL(xattr != NULL); 3590 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 3591 free(xattr); 3592 SPDK_CU_ASSERT_FATAL(rc == -ENOMEM); 3593 3594 spdk_blob_close(blob, blob_op_complete, NULL); 3595 poll_threads(); 3596 CU_ASSERT(g_bserrno == 0); 3597 blob = NULL; 3598 g_blob = NULL; 3599 g_blobid = SPDK_BLOBID_INVALID; 3600 3601 /* NULL callback */ 3602 ut_spdk_blob_opts_init(&opts); 3603 opts.xattrs.names = g_xattr_names; 3604 opts.xattrs.get_value = NULL; 3605 opts.xattrs.count = 1; 3606 opts.xattrs.ctx = &g_ctx; 3607 3608 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 3609 poll_threads(); 3610 CU_ASSERT(g_bserrno == -EINVAL); 3611 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3612 3613 /* NULL values */ 3614 ut_spdk_blob_opts_init(&opts); 3615 opts.xattrs.names = g_xattr_names; 3616 opts.xattrs.get_value = _get_xattr_value_null; 3617 opts.xattrs.count = 1; 3618 opts.xattrs.ctx = NULL; 3619 3620 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 3621 poll_threads(); 3622 CU_ASSERT(g_bserrno == -EINVAL); 3623 } 3624 3625 static void 3626 blob_thin_prov_alloc(void) 3627 { 3628 struct spdk_blob_store *bs = g_bs; 3629 struct spdk_blob *blob; 3630 struct spdk_blob_opts opts; 3631 spdk_blob_id blobid; 3632 uint64_t free_clusters; 3633 3634 free_clusters = spdk_bs_free_cluster_count(bs); 3635 3636 /* Set blob as thin provisioned */ 3637 ut_spdk_blob_opts_init(&opts); 3638 opts.thin_provision = true; 3639 3640 blob = ut_blob_create_and_open(bs, &opts); 3641 blobid = spdk_blob_get_id(blob); 3642 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3643 3644 CU_ASSERT(blob->active.num_clusters == 0); 3645 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 3646 3647 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3648 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3649 poll_threads(); 3650 CU_ASSERT(g_bserrno == 0); 3651 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3652 CU_ASSERT(blob->active.num_clusters == 5); 3653 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 3654 3655 /* Grow it to 1TB - still unallocated */ 3656 spdk_blob_resize(blob, 262144, blob_op_complete, NULL); 3657 poll_threads(); 3658 CU_ASSERT(g_bserrno == 0); 3659 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3660 CU_ASSERT(blob->active.num_clusters == 262144); 3661 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144); 3662 3663 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3664 poll_threads(); 3665 CU_ASSERT(g_bserrno == 0); 3666 /* Sync must not change anything */ 3667 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3668 CU_ASSERT(blob->active.num_clusters == 262144); 3669 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144); 3670 /* Since clusters are not allocated, 3671 * number of metadata pages is expected to be minimal. 3672 */ 3673 CU_ASSERT(blob->active.num_pages == 1); 3674 3675 /* Shrink the blob to 3 clusters - still unallocated */ 3676 spdk_blob_resize(blob, 3, blob_op_complete, NULL); 3677 poll_threads(); 3678 CU_ASSERT(g_bserrno == 0); 3679 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3680 CU_ASSERT(blob->active.num_clusters == 3); 3681 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3); 3682 3683 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3684 poll_threads(); 3685 CU_ASSERT(g_bserrno == 0); 3686 /* Sync must not change anything */ 3687 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3688 CU_ASSERT(blob->active.num_clusters == 3); 3689 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3); 3690 3691 spdk_blob_close(blob, blob_op_complete, NULL); 3692 poll_threads(); 3693 CU_ASSERT(g_bserrno == 0); 3694 3695 ut_bs_reload(&bs, NULL); 3696 3697 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3698 poll_threads(); 3699 CU_ASSERT(g_bserrno == 0); 3700 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3701 blob = g_blob; 3702 3703 /* Check that clusters allocation and size is still the same */ 3704 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3705 CU_ASSERT(blob->active.num_clusters == 3); 3706 3707 ut_blob_close_and_delete(bs, blob); 3708 } 3709 3710 static void 3711 blob_insert_cluster_msg_test(void) 3712 { 3713 struct spdk_blob_store *bs = g_bs; 3714 struct spdk_blob *blob; 3715 struct spdk_blob_opts opts; 3716 spdk_blob_id blobid; 3717 uint64_t free_clusters; 3718 uint64_t new_cluster = 0; 3719 uint32_t cluster_num = 3; 3720 uint32_t extent_page = 0; 3721 3722 free_clusters = spdk_bs_free_cluster_count(bs); 3723 3724 /* Set blob as thin provisioned */ 3725 ut_spdk_blob_opts_init(&opts); 3726 opts.thin_provision = true; 3727 opts.num_clusters = 4; 3728 3729 blob = ut_blob_create_and_open(bs, &opts); 3730 blobid = spdk_blob_get_id(blob); 3731 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3732 3733 CU_ASSERT(blob->active.num_clusters == 4); 3734 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4); 3735 CU_ASSERT(blob->active.clusters[cluster_num] == 0); 3736 3737 /* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread. 3738 * This is to simulate behaviour when cluster is allocated after blob creation. 3739 * Such as _spdk_bs_allocate_and_copy_cluster(). */ 3740 bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false); 3741 CU_ASSERT(blob->active.clusters[cluster_num] == 0); 3742 3743 blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, 3744 blob_op_complete, NULL); 3745 poll_threads(); 3746 3747 CU_ASSERT(blob->active.clusters[cluster_num] != 0); 3748 3749 spdk_blob_close(blob, blob_op_complete, NULL); 3750 poll_threads(); 3751 CU_ASSERT(g_bserrno == 0); 3752 3753 ut_bs_reload(&bs, NULL); 3754 3755 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3756 poll_threads(); 3757 CU_ASSERT(g_bserrno == 0); 3758 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3759 blob = g_blob; 3760 3761 CU_ASSERT(blob->active.clusters[cluster_num] != 0); 3762 3763 ut_blob_close_and_delete(bs, blob); 3764 } 3765 3766 static void 3767 blob_thin_prov_rw(void) 3768 { 3769 static const uint8_t zero[10 * 4096] = { 0 }; 3770 struct spdk_blob_store *bs = g_bs; 3771 struct spdk_blob *blob, *blob_id0; 3772 struct spdk_io_channel *channel, *channel_thread1; 3773 struct spdk_blob_opts opts; 3774 uint64_t free_clusters; 3775 uint64_t page_size; 3776 uint8_t payload_read[10 * 4096]; 3777 uint8_t payload_write[10 * 4096]; 3778 uint64_t write_bytes; 3779 uint64_t read_bytes; 3780 3781 free_clusters = spdk_bs_free_cluster_count(bs); 3782 page_size = spdk_bs_get_page_size(bs); 3783 3784 channel = spdk_bs_alloc_io_channel(bs); 3785 CU_ASSERT(channel != NULL); 3786 3787 ut_spdk_blob_opts_init(&opts); 3788 opts.thin_provision = true; 3789 3790 /* Create and delete blob at md page 0, so that next md page allocation 3791 * for extent will use that. */ 3792 blob_id0 = ut_blob_create_and_open(bs, &opts); 3793 blob = ut_blob_create_and_open(bs, &opts); 3794 ut_blob_close_and_delete(bs, blob_id0); 3795 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3796 3797 CU_ASSERT(blob->active.num_clusters == 0); 3798 3799 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3800 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3801 poll_threads(); 3802 CU_ASSERT(g_bserrno == 0); 3803 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3804 CU_ASSERT(blob->active.num_clusters == 5); 3805 3806 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3807 poll_threads(); 3808 CU_ASSERT(g_bserrno == 0); 3809 /* Sync must not change anything */ 3810 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3811 CU_ASSERT(blob->active.num_clusters == 5); 3812 3813 /* Payload should be all zeros from unallocated clusters */ 3814 memset(payload_read, 0xFF, sizeof(payload_read)); 3815 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 3816 poll_threads(); 3817 CU_ASSERT(g_bserrno == 0); 3818 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 3819 3820 write_bytes = g_dev_write_bytes; 3821 read_bytes = g_dev_read_bytes; 3822 3823 /* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */ 3824 set_thread(1); 3825 channel_thread1 = spdk_bs_alloc_io_channel(bs); 3826 CU_ASSERT(channel_thread1 != NULL); 3827 memset(payload_write, 0xE5, sizeof(payload_write)); 3828 spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL); 3829 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3830 /* Perform write on thread 0. That will try to allocate cluster, 3831 * but fail due to another thread issuing the cluster allocation first. */ 3832 set_thread(0); 3833 memset(payload_write, 0xE5, sizeof(payload_write)); 3834 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 3835 CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs)); 3836 poll_threads(); 3837 CU_ASSERT(g_bserrno == 0); 3838 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3839 /* For thin-provisioned blob we need to write 20 pages plus one page metadata and 3840 * read 0 bytes */ 3841 if (g_use_extent_table) { 3842 /* Add one more page for EXTENT_PAGE write */ 3843 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22); 3844 } else { 3845 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21); 3846 } 3847 CU_ASSERT(g_dev_read_bytes - read_bytes == 0); 3848 3849 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 3850 poll_threads(); 3851 CU_ASSERT(g_bserrno == 0); 3852 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3853 3854 ut_blob_close_and_delete(bs, blob); 3855 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3856 3857 set_thread(1); 3858 spdk_bs_free_io_channel(channel_thread1); 3859 set_thread(0); 3860 spdk_bs_free_io_channel(channel); 3861 poll_threads(); 3862 g_blob = NULL; 3863 g_blobid = 0; 3864 } 3865 3866 static void 3867 blob_thin_prov_rle(void) 3868 { 3869 static const uint8_t zero[10 * 4096] = { 0 }; 3870 struct spdk_blob_store *bs = g_bs; 3871 struct spdk_blob *blob; 3872 struct spdk_io_channel *channel; 3873 struct spdk_blob_opts opts; 3874 spdk_blob_id blobid; 3875 uint64_t free_clusters; 3876 uint64_t page_size; 3877 uint8_t payload_read[10 * 4096]; 3878 uint8_t payload_write[10 * 4096]; 3879 uint64_t write_bytes; 3880 uint64_t read_bytes; 3881 uint64_t io_unit; 3882 3883 free_clusters = spdk_bs_free_cluster_count(bs); 3884 page_size = spdk_bs_get_page_size(bs); 3885 3886 ut_spdk_blob_opts_init(&opts); 3887 opts.thin_provision = true; 3888 opts.num_clusters = 5; 3889 3890 blob = ut_blob_create_and_open(bs, &opts); 3891 blobid = spdk_blob_get_id(blob); 3892 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3893 3894 channel = spdk_bs_alloc_io_channel(bs); 3895 CU_ASSERT(channel != NULL); 3896 3897 /* Target specifically second cluster in a blob as first allocation */ 3898 io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs); 3899 3900 /* Payload should be all zeros from unallocated clusters */ 3901 memset(payload_read, 0xFF, sizeof(payload_read)); 3902 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3903 poll_threads(); 3904 CU_ASSERT(g_bserrno == 0); 3905 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 3906 3907 write_bytes = g_dev_write_bytes; 3908 read_bytes = g_dev_read_bytes; 3909 3910 /* Issue write to second cluster in a blob */ 3911 memset(payload_write, 0xE5, sizeof(payload_write)); 3912 spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL); 3913 poll_threads(); 3914 CU_ASSERT(g_bserrno == 0); 3915 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3916 /* For thin-provisioned blob we need to write 10 pages plus one page metadata and 3917 * read 0 bytes */ 3918 if (g_use_extent_table) { 3919 /* Add one more page for EXTENT_PAGE write */ 3920 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12); 3921 } else { 3922 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11); 3923 } 3924 CU_ASSERT(g_dev_read_bytes - read_bytes == 0); 3925 3926 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3927 poll_threads(); 3928 CU_ASSERT(g_bserrno == 0); 3929 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3930 3931 spdk_bs_free_io_channel(channel); 3932 poll_threads(); 3933 3934 spdk_blob_close(blob, blob_op_complete, NULL); 3935 poll_threads(); 3936 CU_ASSERT(g_bserrno == 0); 3937 3938 ut_bs_reload(&bs, NULL); 3939 3940 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3941 poll_threads(); 3942 CU_ASSERT(g_bserrno == 0); 3943 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3944 blob = g_blob; 3945 3946 channel = spdk_bs_alloc_io_channel(bs); 3947 CU_ASSERT(channel != NULL); 3948 3949 /* Read second cluster after blob reload to confirm data written */ 3950 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3951 poll_threads(); 3952 CU_ASSERT(g_bserrno == 0); 3953 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3954 3955 spdk_bs_free_io_channel(channel); 3956 poll_threads(); 3957 3958 ut_blob_close_and_delete(bs, blob); 3959 } 3960 3961 static void 3962 blob_thin_prov_rw_iov(void) 3963 { 3964 static const uint8_t zero[10 * 4096] = { 0 }; 3965 struct spdk_blob_store *bs = g_bs; 3966 struct spdk_blob *blob; 3967 struct spdk_io_channel *channel; 3968 struct spdk_blob_opts opts; 3969 uint64_t free_clusters; 3970 uint8_t payload_read[10 * 4096]; 3971 uint8_t payload_write[10 * 4096]; 3972 struct iovec iov_read[3]; 3973 struct iovec iov_write[3]; 3974 3975 free_clusters = spdk_bs_free_cluster_count(bs); 3976 3977 channel = spdk_bs_alloc_io_channel(bs); 3978 CU_ASSERT(channel != NULL); 3979 3980 ut_spdk_blob_opts_init(&opts); 3981 opts.thin_provision = true; 3982 3983 blob = ut_blob_create_and_open(bs, &opts); 3984 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3985 3986 CU_ASSERT(blob->active.num_clusters == 0); 3987 3988 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3989 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3990 poll_threads(); 3991 CU_ASSERT(g_bserrno == 0); 3992 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3993 CU_ASSERT(blob->active.num_clusters == 5); 3994 3995 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3996 poll_threads(); 3997 CU_ASSERT(g_bserrno == 0); 3998 /* Sync must not change anything */ 3999 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4000 CU_ASSERT(blob->active.num_clusters == 5); 4001 4002 /* Payload should be all zeros from unallocated clusters */ 4003 memset(payload_read, 0xAA, sizeof(payload_read)); 4004 iov_read[0].iov_base = payload_read; 4005 iov_read[0].iov_len = 3 * 4096; 4006 iov_read[1].iov_base = payload_read + 3 * 4096; 4007 iov_read[1].iov_len = 4 * 4096; 4008 iov_read[2].iov_base = payload_read + 7 * 4096; 4009 iov_read[2].iov_len = 3 * 4096; 4010 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4011 poll_threads(); 4012 CU_ASSERT(g_bserrno == 0); 4013 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4014 4015 memset(payload_write, 0xE5, sizeof(payload_write)); 4016 iov_write[0].iov_base = payload_write; 4017 iov_write[0].iov_len = 1 * 4096; 4018 iov_write[1].iov_base = payload_write + 1 * 4096; 4019 iov_write[1].iov_len = 5 * 4096; 4020 iov_write[2].iov_base = payload_write + 6 * 4096; 4021 iov_write[2].iov_len = 4 * 4096; 4022 4023 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 4024 poll_threads(); 4025 CU_ASSERT(g_bserrno == 0); 4026 4027 memset(payload_read, 0xAA, sizeof(payload_read)); 4028 iov_read[0].iov_base = payload_read; 4029 iov_read[0].iov_len = 3 * 4096; 4030 iov_read[1].iov_base = payload_read + 3 * 4096; 4031 iov_read[1].iov_len = 4 * 4096; 4032 iov_read[2].iov_base = payload_read + 7 * 4096; 4033 iov_read[2].iov_len = 3 * 4096; 4034 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4035 poll_threads(); 4036 CU_ASSERT(g_bserrno == 0); 4037 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4038 4039 spdk_bs_free_io_channel(channel); 4040 poll_threads(); 4041 4042 ut_blob_close_and_delete(bs, blob); 4043 } 4044 4045 struct iter_ctx { 4046 int current_iter; 4047 spdk_blob_id blobid[4]; 4048 }; 4049 4050 static void 4051 test_iter(void *arg, struct spdk_blob *blob, int bserrno) 4052 { 4053 struct iter_ctx *iter_ctx = arg; 4054 spdk_blob_id blobid; 4055 4056 CU_ASSERT(bserrno == 0); 4057 blobid = spdk_blob_get_id(blob); 4058 CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]); 4059 } 4060 4061 static void 4062 bs_load_iter_test(void) 4063 { 4064 struct spdk_blob_store *bs; 4065 struct spdk_bs_dev *dev; 4066 struct iter_ctx iter_ctx = { 0 }; 4067 struct spdk_blob *blob; 4068 int i, rc; 4069 struct spdk_bs_opts opts; 4070 4071 dev = init_dev(); 4072 spdk_bs_opts_init(&opts, sizeof(opts)); 4073 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4074 4075 /* Initialize a new blob store */ 4076 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 4077 poll_threads(); 4078 CU_ASSERT(g_bserrno == 0); 4079 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4080 bs = g_bs; 4081 4082 for (i = 0; i < 4; i++) { 4083 blob = ut_blob_create_and_open(bs, NULL); 4084 iter_ctx.blobid[i] = spdk_blob_get_id(blob); 4085 4086 /* Just save the blobid as an xattr for testing purposes. */ 4087 rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id)); 4088 CU_ASSERT(rc == 0); 4089 4090 /* Resize the blob */ 4091 spdk_blob_resize(blob, i, blob_op_complete, NULL); 4092 poll_threads(); 4093 CU_ASSERT(g_bserrno == 0); 4094 4095 spdk_blob_close(blob, blob_op_complete, NULL); 4096 poll_threads(); 4097 CU_ASSERT(g_bserrno == 0); 4098 } 4099 4100 g_bserrno = -1; 4101 spdk_bs_unload(bs, bs_op_complete, NULL); 4102 poll_threads(); 4103 CU_ASSERT(g_bserrno == 0); 4104 4105 dev = init_dev(); 4106 spdk_bs_opts_init(&opts, sizeof(opts)); 4107 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4108 opts.iter_cb_fn = test_iter; 4109 opts.iter_cb_arg = &iter_ctx; 4110 4111 /* Test blob iteration during load after a clean shutdown. */ 4112 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 4113 poll_threads(); 4114 CU_ASSERT(g_bserrno == 0); 4115 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4116 bs = g_bs; 4117 4118 /* Dirty shutdown */ 4119 bs_free(bs); 4120 4121 dev = init_dev(); 4122 spdk_bs_opts_init(&opts, sizeof(opts)); 4123 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4124 opts.iter_cb_fn = test_iter; 4125 iter_ctx.current_iter = 0; 4126 opts.iter_cb_arg = &iter_ctx; 4127 4128 /* Test blob iteration during load after a dirty shutdown. */ 4129 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 4130 poll_threads(); 4131 CU_ASSERT(g_bserrno == 0); 4132 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4133 bs = g_bs; 4134 4135 spdk_bs_unload(bs, bs_op_complete, NULL); 4136 poll_threads(); 4137 CU_ASSERT(g_bserrno == 0); 4138 g_bs = NULL; 4139 } 4140 4141 static void 4142 blob_snapshot_rw(void) 4143 { 4144 static const uint8_t zero[10 * 4096] = { 0 }; 4145 struct spdk_blob_store *bs = g_bs; 4146 struct spdk_blob *blob, *snapshot; 4147 struct spdk_io_channel *channel; 4148 struct spdk_blob_opts opts; 4149 spdk_blob_id blobid, snapshotid; 4150 uint64_t free_clusters; 4151 uint64_t cluster_size; 4152 uint64_t page_size; 4153 uint8_t payload_read[10 * 4096]; 4154 uint8_t payload_write[10 * 4096]; 4155 uint64_t write_bytes; 4156 uint64_t read_bytes; 4157 4158 free_clusters = spdk_bs_free_cluster_count(bs); 4159 cluster_size = spdk_bs_get_cluster_size(bs); 4160 page_size = spdk_bs_get_page_size(bs); 4161 4162 channel = spdk_bs_alloc_io_channel(bs); 4163 CU_ASSERT(channel != NULL); 4164 4165 ut_spdk_blob_opts_init(&opts); 4166 opts.thin_provision = true; 4167 opts.num_clusters = 5; 4168 4169 blob = ut_blob_create_and_open(bs, &opts); 4170 blobid = spdk_blob_get_id(blob); 4171 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4172 4173 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4174 4175 memset(payload_read, 0xFF, sizeof(payload_read)); 4176 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 4177 poll_threads(); 4178 CU_ASSERT(g_bserrno == 0); 4179 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4180 4181 memset(payload_write, 0xE5, sizeof(payload_write)); 4182 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 4183 poll_threads(); 4184 CU_ASSERT(g_bserrno == 0); 4185 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4186 4187 /* Create snapshot from blob */ 4188 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4189 poll_threads(); 4190 CU_ASSERT(g_bserrno == 0); 4191 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4192 snapshotid = g_blobid; 4193 4194 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4195 poll_threads(); 4196 CU_ASSERT(g_bserrno == 0); 4197 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4198 snapshot = g_blob; 4199 CU_ASSERT(snapshot->data_ro == true); 4200 CU_ASSERT(snapshot->md_ro == true); 4201 4202 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4203 4204 write_bytes = g_dev_write_bytes; 4205 read_bytes = g_dev_read_bytes; 4206 4207 memset(payload_write, 0xAA, sizeof(payload_write)); 4208 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 4209 poll_threads(); 4210 CU_ASSERT(g_bserrno == 0); 4211 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4212 4213 /* For a clone we need to allocate and copy one cluster, update one page of metadata 4214 * and then write 10 pages of payload. 4215 */ 4216 if (g_use_extent_table) { 4217 /* Add one more page for EXTENT_PAGE write */ 4218 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12 + cluster_size); 4219 } else { 4220 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11 + cluster_size); 4221 } 4222 CU_ASSERT(g_dev_read_bytes - read_bytes == cluster_size); 4223 4224 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 4225 poll_threads(); 4226 CU_ASSERT(g_bserrno == 0); 4227 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4228 4229 /* Data on snapshot should not change after write to clone */ 4230 memset(payload_write, 0xE5, sizeof(payload_write)); 4231 spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL); 4232 poll_threads(); 4233 CU_ASSERT(g_bserrno == 0); 4234 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4235 4236 ut_blob_close_and_delete(bs, blob); 4237 ut_blob_close_and_delete(bs, snapshot); 4238 4239 spdk_bs_free_io_channel(channel); 4240 poll_threads(); 4241 g_blob = NULL; 4242 g_blobid = 0; 4243 } 4244 4245 static void 4246 blob_snapshot_rw_iov(void) 4247 { 4248 static const uint8_t zero[10 * 4096] = { 0 }; 4249 struct spdk_blob_store *bs = g_bs; 4250 struct spdk_blob *blob, *snapshot; 4251 struct spdk_io_channel *channel; 4252 struct spdk_blob_opts opts; 4253 spdk_blob_id blobid, snapshotid; 4254 uint64_t free_clusters; 4255 uint8_t payload_read[10 * 4096]; 4256 uint8_t payload_write[10 * 4096]; 4257 struct iovec iov_read[3]; 4258 struct iovec iov_write[3]; 4259 4260 free_clusters = spdk_bs_free_cluster_count(bs); 4261 4262 channel = spdk_bs_alloc_io_channel(bs); 4263 CU_ASSERT(channel != NULL); 4264 4265 ut_spdk_blob_opts_init(&opts); 4266 opts.thin_provision = true; 4267 opts.num_clusters = 5; 4268 4269 blob = ut_blob_create_and_open(bs, &opts); 4270 blobid = spdk_blob_get_id(blob); 4271 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4272 4273 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4274 4275 /* Create snapshot from blob */ 4276 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4277 poll_threads(); 4278 CU_ASSERT(g_bserrno == 0); 4279 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4280 snapshotid = g_blobid; 4281 4282 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4283 poll_threads(); 4284 CU_ASSERT(g_bserrno == 0); 4285 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4286 snapshot = g_blob; 4287 CU_ASSERT(snapshot->data_ro == true); 4288 CU_ASSERT(snapshot->md_ro == true); 4289 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4290 4291 /* Payload should be all zeros from unallocated clusters */ 4292 memset(payload_read, 0xAA, sizeof(payload_read)); 4293 iov_read[0].iov_base = payload_read; 4294 iov_read[0].iov_len = 3 * 4096; 4295 iov_read[1].iov_base = payload_read + 3 * 4096; 4296 iov_read[1].iov_len = 4 * 4096; 4297 iov_read[2].iov_base = payload_read + 7 * 4096; 4298 iov_read[2].iov_len = 3 * 4096; 4299 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4300 poll_threads(); 4301 CU_ASSERT(g_bserrno == 0); 4302 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4303 4304 memset(payload_write, 0xE5, sizeof(payload_write)); 4305 iov_write[0].iov_base = payload_write; 4306 iov_write[0].iov_len = 1 * 4096; 4307 iov_write[1].iov_base = payload_write + 1 * 4096; 4308 iov_write[1].iov_len = 5 * 4096; 4309 iov_write[2].iov_base = payload_write + 6 * 4096; 4310 iov_write[2].iov_len = 4 * 4096; 4311 4312 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 4313 poll_threads(); 4314 CU_ASSERT(g_bserrno == 0); 4315 4316 memset(payload_read, 0xAA, sizeof(payload_read)); 4317 iov_read[0].iov_base = payload_read; 4318 iov_read[0].iov_len = 3 * 4096; 4319 iov_read[1].iov_base = payload_read + 3 * 4096; 4320 iov_read[1].iov_len = 4 * 4096; 4321 iov_read[2].iov_base = payload_read + 7 * 4096; 4322 iov_read[2].iov_len = 3 * 4096; 4323 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4324 poll_threads(); 4325 CU_ASSERT(g_bserrno == 0); 4326 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4327 4328 spdk_bs_free_io_channel(channel); 4329 poll_threads(); 4330 4331 ut_blob_close_and_delete(bs, blob); 4332 ut_blob_close_and_delete(bs, snapshot); 4333 } 4334 4335 /** 4336 * Inflate / decouple parent rw unit tests. 4337 * 4338 * -------------- 4339 * original blob: 0 1 2 3 4 4340 * ,---------+---------+---------+---------+---------. 4341 * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - | 4342 * +---------+---------+---------+---------+---------+ 4343 * snapshot2 | - |yyyyyyyyy| - |yyyyyyyyy| - | 4344 * +---------+---------+---------+---------+---------+ 4345 * blob | - |zzzzzzzzz| - | - | - | 4346 * '---------+---------+---------+---------+---------' 4347 * . . . . . . 4348 * -------- . . . . . . 4349 * inflate: . . . . . . 4350 * ,---------+---------+---------+---------+---------. 4351 * blob |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000| 4352 * '---------+---------+---------+---------+---------' 4353 * 4354 * NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency 4355 * on snapshot2 and snapshot removed . . . 4356 * . . . . . . 4357 * ---------------- . . . . . . 4358 * decouple parent: . . . . . . 4359 * ,---------+---------+---------+---------+---------. 4360 * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - | 4361 * +---------+---------+---------+---------+---------+ 4362 * blob | - |zzzzzzzzz| - |yyyyyyyyy| - | 4363 * '---------+---------+---------+---------+---------' 4364 * 4365 * NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency 4366 * on snapshot2 removed and on snapshot still exists. Snapshot2 4367 * should remain a clone of snapshot. 4368 */ 4369 static void 4370 _blob_inflate_rw(bool decouple_parent) 4371 { 4372 struct spdk_blob_store *bs = g_bs; 4373 struct spdk_blob *blob, *snapshot, *snapshot2; 4374 struct spdk_io_channel *channel; 4375 struct spdk_blob_opts opts; 4376 spdk_blob_id blobid, snapshotid, snapshot2id; 4377 uint64_t free_clusters; 4378 uint64_t cluster_size; 4379 4380 uint64_t payload_size; 4381 uint8_t *payload_read; 4382 uint8_t *payload_write; 4383 uint8_t *payload_clone; 4384 4385 uint64_t pages_per_cluster; 4386 uint64_t pages_per_payload; 4387 4388 int i; 4389 spdk_blob_id ids[2]; 4390 size_t count; 4391 4392 free_clusters = spdk_bs_free_cluster_count(bs); 4393 cluster_size = spdk_bs_get_cluster_size(bs); 4394 pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs); 4395 pages_per_payload = pages_per_cluster * 5; 4396 4397 payload_size = cluster_size * 5; 4398 4399 payload_read = malloc(payload_size); 4400 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 4401 4402 payload_write = malloc(payload_size); 4403 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 4404 4405 payload_clone = malloc(payload_size); 4406 SPDK_CU_ASSERT_FATAL(payload_clone != NULL); 4407 4408 channel = spdk_bs_alloc_io_channel(bs); 4409 SPDK_CU_ASSERT_FATAL(channel != NULL); 4410 4411 /* Create blob */ 4412 ut_spdk_blob_opts_init(&opts); 4413 opts.thin_provision = true; 4414 opts.num_clusters = 5; 4415 4416 blob = ut_blob_create_and_open(bs, &opts); 4417 blobid = spdk_blob_get_id(blob); 4418 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4419 4420 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4421 4422 /* 1) Initial read should return zeroed payload */ 4423 memset(payload_read, 0xFF, payload_size); 4424 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4425 blob_op_complete, NULL); 4426 poll_threads(); 4427 CU_ASSERT(g_bserrno == 0); 4428 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 4429 4430 /* Fill whole blob with a pattern, except last cluster (to be sure it 4431 * isn't allocated) */ 4432 memset(payload_write, 0xE5, payload_size - cluster_size); 4433 spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload - 4434 pages_per_cluster, blob_op_complete, NULL); 4435 poll_threads(); 4436 CU_ASSERT(g_bserrno == 0); 4437 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4438 4439 /* 2) Create snapshot from blob (first level) */ 4440 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4441 poll_threads(); 4442 CU_ASSERT(g_bserrno == 0); 4443 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4444 snapshotid = g_blobid; 4445 4446 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4447 poll_threads(); 4448 CU_ASSERT(g_bserrno == 0); 4449 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4450 snapshot = g_blob; 4451 CU_ASSERT(snapshot->data_ro == true); 4452 CU_ASSERT(snapshot->md_ro == true); 4453 4454 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4455 4456 /* Write every second cluster with a pattern. 4457 * 4458 * Last cluster shouldn't be written, to be sure that snapshot nor clone 4459 * doesn't allocate it. 4460 * 4461 * payload_clone stores expected result on "blob" read at the time and 4462 * is used only to check data consistency on clone before and after 4463 * inflation. Initially we fill it with a backing snapshots pattern 4464 * used before. 4465 */ 4466 memset(payload_clone, 0xE5, payload_size - cluster_size); 4467 memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size); 4468 memset(payload_write, 0xAA, payload_size); 4469 for (i = 1; i < 5; i += 2) { 4470 spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster, 4471 pages_per_cluster, blob_op_complete, NULL); 4472 poll_threads(); 4473 CU_ASSERT(g_bserrno == 0); 4474 4475 /* Update expected result */ 4476 memcpy(payload_clone + (cluster_size * i), payload_write, 4477 cluster_size); 4478 } 4479 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4480 4481 /* Check data consistency on clone */ 4482 memset(payload_read, 0xFF, payload_size); 4483 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4484 blob_op_complete, NULL); 4485 poll_threads(); 4486 CU_ASSERT(g_bserrno == 0); 4487 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4488 4489 /* 3) Create second levels snapshot from blob */ 4490 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4491 poll_threads(); 4492 CU_ASSERT(g_bserrno == 0); 4493 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4494 snapshot2id = g_blobid; 4495 4496 spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL); 4497 poll_threads(); 4498 CU_ASSERT(g_bserrno == 0); 4499 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4500 snapshot2 = g_blob; 4501 CU_ASSERT(snapshot2->data_ro == true); 4502 CU_ASSERT(snapshot2->md_ro == true); 4503 4504 CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5); 4505 4506 CU_ASSERT(snapshot2->parent_id == snapshotid); 4507 4508 /* Write one cluster on the top level blob. This cluster (1) covers 4509 * already allocated cluster in the snapshot2, so shouldn't be inflated 4510 * at all */ 4511 spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster, 4512 pages_per_cluster, blob_op_complete, NULL); 4513 poll_threads(); 4514 CU_ASSERT(g_bserrno == 0); 4515 4516 /* Update expected result */ 4517 memcpy(payload_clone + cluster_size, payload_write, cluster_size); 4518 4519 /* Check data consistency on clone */ 4520 memset(payload_read, 0xFF, payload_size); 4521 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4522 blob_op_complete, NULL); 4523 poll_threads(); 4524 CU_ASSERT(g_bserrno == 0); 4525 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4526 4527 4528 /* Close all blobs */ 4529 spdk_blob_close(blob, blob_op_complete, NULL); 4530 poll_threads(); 4531 CU_ASSERT(g_bserrno == 0); 4532 4533 spdk_blob_close(snapshot2, blob_op_complete, NULL); 4534 poll_threads(); 4535 CU_ASSERT(g_bserrno == 0); 4536 4537 spdk_blob_close(snapshot, blob_op_complete, NULL); 4538 poll_threads(); 4539 CU_ASSERT(g_bserrno == 0); 4540 4541 /* Check snapshot-clone relations */ 4542 count = 2; 4543 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4544 CU_ASSERT(count == 1); 4545 CU_ASSERT(ids[0] == snapshot2id); 4546 4547 count = 2; 4548 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4549 CU_ASSERT(count == 1); 4550 CU_ASSERT(ids[0] == blobid); 4551 4552 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id); 4553 4554 free_clusters = spdk_bs_free_cluster_count(bs); 4555 if (!decouple_parent) { 4556 /* Do full blob inflation */ 4557 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 4558 poll_threads(); 4559 CU_ASSERT(g_bserrno == 0); 4560 4561 /* All clusters should be inflated (except one already allocated 4562 * in a top level blob) */ 4563 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4); 4564 4565 /* Check if relation tree updated correctly */ 4566 count = 2; 4567 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4568 4569 /* snapshotid have one clone */ 4570 CU_ASSERT(count == 1); 4571 CU_ASSERT(ids[0] == snapshot2id); 4572 4573 /* snapshot2id have no clones */ 4574 count = 2; 4575 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4576 CU_ASSERT(count == 0); 4577 4578 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 4579 } else { 4580 /* Decouple parent of blob */ 4581 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 4582 poll_threads(); 4583 CU_ASSERT(g_bserrno == 0); 4584 4585 /* Only one cluster from a parent should be inflated (second one 4586 * is covered by a cluster written on a top level blob, and 4587 * already allocated) */ 4588 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1); 4589 4590 /* Check if relation tree updated correctly */ 4591 count = 2; 4592 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4593 4594 /* snapshotid have two clones now */ 4595 CU_ASSERT(count == 2); 4596 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4597 CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id); 4598 4599 /* snapshot2id have no clones */ 4600 count = 2; 4601 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4602 CU_ASSERT(count == 0); 4603 4604 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4605 } 4606 4607 /* Try to delete snapshot2 (should pass) */ 4608 spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL); 4609 poll_threads(); 4610 CU_ASSERT(g_bserrno == 0); 4611 4612 /* Try to delete base snapshot */ 4613 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4614 poll_threads(); 4615 CU_ASSERT(g_bserrno == 0); 4616 4617 /* Reopen blob after snapshot deletion */ 4618 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 4619 poll_threads(); 4620 CU_ASSERT(g_bserrno == 0); 4621 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4622 blob = g_blob; 4623 4624 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4625 4626 /* Check data consistency on inflated blob */ 4627 memset(payload_read, 0xFF, payload_size); 4628 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4629 blob_op_complete, NULL); 4630 poll_threads(); 4631 CU_ASSERT(g_bserrno == 0); 4632 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4633 4634 spdk_bs_free_io_channel(channel); 4635 poll_threads(); 4636 4637 free(payload_read); 4638 free(payload_write); 4639 free(payload_clone); 4640 4641 ut_blob_close_and_delete(bs, blob); 4642 } 4643 4644 static void 4645 blob_inflate_rw(void) 4646 { 4647 _blob_inflate_rw(false); 4648 _blob_inflate_rw(true); 4649 } 4650 4651 /** 4652 * Snapshot-clones relation test 4653 * 4654 * snapshot 4655 * | 4656 * +-----+-----+ 4657 * | | 4658 * blob(ro) snapshot2 4659 * | | 4660 * clone2 clone 4661 */ 4662 static void 4663 blob_relations(void) 4664 { 4665 struct spdk_blob_store *bs; 4666 struct spdk_bs_dev *dev; 4667 struct spdk_bs_opts bs_opts; 4668 struct spdk_blob_opts opts; 4669 struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2; 4670 spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2; 4671 int rc; 4672 size_t count; 4673 spdk_blob_id ids[10] = {}; 4674 4675 dev = init_dev(); 4676 spdk_bs_opts_init(&bs_opts, sizeof(opts)); 4677 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 4678 4679 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 4680 poll_threads(); 4681 CU_ASSERT(g_bserrno == 0); 4682 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4683 bs = g_bs; 4684 4685 /* 1. Create blob with 10 clusters */ 4686 4687 ut_spdk_blob_opts_init(&opts); 4688 opts.num_clusters = 10; 4689 4690 blob = ut_blob_create_and_open(bs, &opts); 4691 blobid = spdk_blob_get_id(blob); 4692 4693 CU_ASSERT(!spdk_blob_is_read_only(blob)); 4694 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4695 CU_ASSERT(!spdk_blob_is_clone(blob)); 4696 CU_ASSERT(!spdk_blob_is_thin_provisioned(blob)); 4697 4698 /* blob should not have underlying snapshot nor clones */ 4699 CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID); 4700 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 4701 count = SPDK_COUNTOF(ids); 4702 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4703 CU_ASSERT(rc == 0); 4704 CU_ASSERT(count == 0); 4705 4706 4707 /* 2. Create snapshot */ 4708 4709 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4710 poll_threads(); 4711 CU_ASSERT(g_bserrno == 0); 4712 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4713 snapshotid = g_blobid; 4714 4715 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4716 poll_threads(); 4717 CU_ASSERT(g_bserrno == 0); 4718 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4719 snapshot = g_blob; 4720 4721 CU_ASSERT(spdk_blob_is_read_only(snapshot)); 4722 CU_ASSERT(spdk_blob_is_snapshot(snapshot)); 4723 CU_ASSERT(!spdk_blob_is_clone(snapshot)); 4724 CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID); 4725 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID); 4726 4727 /* Check if original blob is converted to the clone of snapshot */ 4728 CU_ASSERT(!spdk_blob_is_read_only(blob)); 4729 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4730 CU_ASSERT(spdk_blob_is_clone(blob)); 4731 CU_ASSERT(spdk_blob_is_thin_provisioned(blob)); 4732 CU_ASSERT(blob->parent_id == snapshotid); 4733 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4734 4735 count = SPDK_COUNTOF(ids); 4736 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4737 CU_ASSERT(rc == 0); 4738 CU_ASSERT(count == 1); 4739 CU_ASSERT(ids[0] == blobid); 4740 4741 4742 /* 3. Create clone from snapshot */ 4743 4744 spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 4745 poll_threads(); 4746 CU_ASSERT(g_bserrno == 0); 4747 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4748 cloneid = g_blobid; 4749 4750 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 4751 poll_threads(); 4752 CU_ASSERT(g_bserrno == 0); 4753 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4754 clone = g_blob; 4755 4756 CU_ASSERT(!spdk_blob_is_read_only(clone)); 4757 CU_ASSERT(!spdk_blob_is_snapshot(clone)); 4758 CU_ASSERT(spdk_blob_is_clone(clone)); 4759 CU_ASSERT(spdk_blob_is_thin_provisioned(clone)); 4760 CU_ASSERT(clone->parent_id == snapshotid); 4761 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid); 4762 4763 count = SPDK_COUNTOF(ids); 4764 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 4765 CU_ASSERT(rc == 0); 4766 CU_ASSERT(count == 0); 4767 4768 /* Check if clone is on the snapshot's list */ 4769 count = SPDK_COUNTOF(ids); 4770 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4771 CU_ASSERT(rc == 0); 4772 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4773 CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid); 4774 4775 4776 /* 4. Create snapshot of the clone */ 4777 4778 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 4779 poll_threads(); 4780 CU_ASSERT(g_bserrno == 0); 4781 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4782 snapshotid2 = g_blobid; 4783 4784 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 4785 poll_threads(); 4786 CU_ASSERT(g_bserrno == 0); 4787 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4788 snapshot2 = g_blob; 4789 4790 CU_ASSERT(spdk_blob_is_read_only(snapshot2)); 4791 CU_ASSERT(spdk_blob_is_snapshot(snapshot2)); 4792 CU_ASSERT(spdk_blob_is_clone(snapshot2)); 4793 CU_ASSERT(snapshot2->parent_id == snapshotid); 4794 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid); 4795 4796 /* Check if clone is converted to the clone of snapshot2 and snapshot2 4797 * is a child of snapshot */ 4798 CU_ASSERT(!spdk_blob_is_read_only(clone)); 4799 CU_ASSERT(!spdk_blob_is_snapshot(clone)); 4800 CU_ASSERT(spdk_blob_is_clone(clone)); 4801 CU_ASSERT(spdk_blob_is_thin_provisioned(clone)); 4802 CU_ASSERT(clone->parent_id == snapshotid2); 4803 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 4804 4805 count = SPDK_COUNTOF(ids); 4806 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 4807 CU_ASSERT(rc == 0); 4808 CU_ASSERT(count == 1); 4809 CU_ASSERT(ids[0] == cloneid); 4810 4811 4812 /* 5. Try to create clone from read only blob */ 4813 4814 /* Mark blob as read only */ 4815 spdk_blob_set_read_only(blob); 4816 spdk_blob_sync_md(blob, blob_op_complete, NULL); 4817 poll_threads(); 4818 CU_ASSERT(g_bserrno == 0); 4819 4820 /* Check if previously created blob is read only clone */ 4821 CU_ASSERT(spdk_blob_is_read_only(blob)); 4822 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4823 CU_ASSERT(spdk_blob_is_clone(blob)); 4824 CU_ASSERT(spdk_blob_is_thin_provisioned(blob)); 4825 4826 /* Create clone from read only blob */ 4827 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4828 poll_threads(); 4829 CU_ASSERT(g_bserrno == 0); 4830 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4831 cloneid2 = g_blobid; 4832 4833 spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL); 4834 poll_threads(); 4835 CU_ASSERT(g_bserrno == 0); 4836 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4837 clone2 = g_blob; 4838 4839 CU_ASSERT(!spdk_blob_is_read_only(clone2)); 4840 CU_ASSERT(!spdk_blob_is_snapshot(clone2)); 4841 CU_ASSERT(spdk_blob_is_clone(clone2)); 4842 CU_ASSERT(spdk_blob_is_thin_provisioned(clone2)); 4843 4844 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 4845 4846 count = SPDK_COUNTOF(ids); 4847 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4848 CU_ASSERT(rc == 0); 4849 4850 CU_ASSERT(count == 1); 4851 CU_ASSERT(ids[0] == cloneid2); 4852 4853 /* Close blobs */ 4854 4855 spdk_blob_close(clone2, blob_op_complete, NULL); 4856 poll_threads(); 4857 CU_ASSERT(g_bserrno == 0); 4858 4859 spdk_blob_close(blob, blob_op_complete, NULL); 4860 poll_threads(); 4861 CU_ASSERT(g_bserrno == 0); 4862 4863 spdk_blob_close(clone, blob_op_complete, NULL); 4864 poll_threads(); 4865 CU_ASSERT(g_bserrno == 0); 4866 4867 spdk_blob_close(snapshot, blob_op_complete, NULL); 4868 poll_threads(); 4869 CU_ASSERT(g_bserrno == 0); 4870 4871 spdk_blob_close(snapshot2, blob_op_complete, NULL); 4872 poll_threads(); 4873 CU_ASSERT(g_bserrno == 0); 4874 4875 /* Try to delete snapshot with more than 1 clone */ 4876 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4877 poll_threads(); 4878 CU_ASSERT(g_bserrno != 0); 4879 4880 ut_bs_reload(&bs, &bs_opts); 4881 4882 /* NULL ids array should return number of clones in count */ 4883 count = SPDK_COUNTOF(ids); 4884 rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count); 4885 CU_ASSERT(rc == -ENOMEM); 4886 CU_ASSERT(count == 2); 4887 4888 /* incorrect array size */ 4889 count = 1; 4890 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4891 CU_ASSERT(rc == -ENOMEM); 4892 CU_ASSERT(count == 2); 4893 4894 4895 /* Verify structure of loaded blob store */ 4896 4897 /* snapshot */ 4898 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID); 4899 4900 count = SPDK_COUNTOF(ids); 4901 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4902 CU_ASSERT(rc == 0); 4903 CU_ASSERT(count == 2); 4904 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4905 CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2); 4906 4907 /* blob */ 4908 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4909 count = SPDK_COUNTOF(ids); 4910 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4911 CU_ASSERT(rc == 0); 4912 CU_ASSERT(count == 1); 4913 CU_ASSERT(ids[0] == cloneid2); 4914 4915 /* clone */ 4916 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 4917 count = SPDK_COUNTOF(ids); 4918 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 4919 CU_ASSERT(rc == 0); 4920 CU_ASSERT(count == 0); 4921 4922 /* snapshot2 */ 4923 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid); 4924 count = SPDK_COUNTOF(ids); 4925 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 4926 CU_ASSERT(rc == 0); 4927 CU_ASSERT(count == 1); 4928 CU_ASSERT(ids[0] == cloneid); 4929 4930 /* clone2 */ 4931 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 4932 count = SPDK_COUNTOF(ids); 4933 rc = spdk_blob_get_clones(bs, cloneid2, ids, &count); 4934 CU_ASSERT(rc == 0); 4935 CU_ASSERT(count == 0); 4936 4937 /* Try to delete blob that user should not be able to remove */ 4938 4939 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4940 poll_threads(); 4941 CU_ASSERT(g_bserrno != 0); 4942 4943 /* Remove all blobs */ 4944 4945 spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL); 4946 poll_threads(); 4947 CU_ASSERT(g_bserrno == 0); 4948 4949 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 4950 poll_threads(); 4951 CU_ASSERT(g_bserrno == 0); 4952 4953 spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL); 4954 poll_threads(); 4955 CU_ASSERT(g_bserrno == 0); 4956 4957 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 4958 poll_threads(); 4959 CU_ASSERT(g_bserrno == 0); 4960 4961 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4962 poll_threads(); 4963 CU_ASSERT(g_bserrno == 0); 4964 4965 spdk_bs_unload(bs, bs_op_complete, NULL); 4966 poll_threads(); 4967 CU_ASSERT(g_bserrno == 0); 4968 4969 g_bs = NULL; 4970 } 4971 4972 /** 4973 * Snapshot-clones relation test 2 4974 * 4975 * snapshot1 4976 * | 4977 * snapshot2 4978 * | 4979 * +-----+-----+ 4980 * | | 4981 * blob(ro) snapshot3 4982 * | | 4983 * | snapshot4 4984 * | | | 4985 * clone2 clone clone3 4986 */ 4987 static void 4988 blob_relations2(void) 4989 { 4990 struct spdk_blob_store *bs; 4991 struct spdk_bs_dev *dev; 4992 struct spdk_bs_opts bs_opts; 4993 struct spdk_blob_opts opts; 4994 struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2; 4995 spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2, 4996 cloneid3; 4997 int rc; 4998 size_t count; 4999 spdk_blob_id ids[10] = {}; 5000 5001 dev = init_dev(); 5002 spdk_bs_opts_init(&bs_opts, sizeof(bs_opts)); 5003 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 5004 5005 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 5006 poll_threads(); 5007 CU_ASSERT(g_bserrno == 0); 5008 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5009 bs = g_bs; 5010 5011 /* 1. Create blob with 10 clusters */ 5012 5013 ut_spdk_blob_opts_init(&opts); 5014 opts.num_clusters = 10; 5015 5016 blob = ut_blob_create_and_open(bs, &opts); 5017 blobid = spdk_blob_get_id(blob); 5018 5019 /* 2. Create snapshot1 */ 5020 5021 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5022 poll_threads(); 5023 CU_ASSERT(g_bserrno == 0); 5024 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5025 snapshotid1 = g_blobid; 5026 5027 spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL); 5028 poll_threads(); 5029 CU_ASSERT(g_bserrno == 0); 5030 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5031 snapshot1 = g_blob; 5032 5033 CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID); 5034 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID); 5035 5036 CU_ASSERT(blob->parent_id == snapshotid1); 5037 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1); 5038 5039 /* Check if blob is the clone of snapshot1 */ 5040 CU_ASSERT(blob->parent_id == snapshotid1); 5041 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1); 5042 5043 count = SPDK_COUNTOF(ids); 5044 rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count); 5045 CU_ASSERT(rc == 0); 5046 CU_ASSERT(count == 1); 5047 CU_ASSERT(ids[0] == blobid); 5048 5049 /* 3. Create another snapshot */ 5050 5051 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5052 poll_threads(); 5053 CU_ASSERT(g_bserrno == 0); 5054 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5055 snapshotid2 = g_blobid; 5056 5057 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 5058 poll_threads(); 5059 CU_ASSERT(g_bserrno == 0); 5060 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5061 snapshot2 = g_blob; 5062 5063 CU_ASSERT(spdk_blob_is_clone(snapshot2)); 5064 CU_ASSERT(snapshot2->parent_id == snapshotid1); 5065 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1); 5066 5067 /* Check if snapshot2 is the clone of snapshot1 and blob 5068 * is a child of snapshot2 */ 5069 CU_ASSERT(blob->parent_id == snapshotid2); 5070 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2); 5071 5072 count = SPDK_COUNTOF(ids); 5073 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5074 CU_ASSERT(rc == 0); 5075 CU_ASSERT(count == 1); 5076 CU_ASSERT(ids[0] == blobid); 5077 5078 /* 4. Create clone from snapshot */ 5079 5080 spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL); 5081 poll_threads(); 5082 CU_ASSERT(g_bserrno == 0); 5083 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5084 cloneid = g_blobid; 5085 5086 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 5087 poll_threads(); 5088 CU_ASSERT(g_bserrno == 0); 5089 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5090 clone = g_blob; 5091 5092 CU_ASSERT(clone->parent_id == snapshotid2); 5093 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 5094 5095 /* Check if clone is on the snapshot's list */ 5096 count = SPDK_COUNTOF(ids); 5097 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5098 CU_ASSERT(rc == 0); 5099 CU_ASSERT(count == 2); 5100 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5101 CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid); 5102 5103 /* 5. Create snapshot of the clone */ 5104 5105 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 5106 poll_threads(); 5107 CU_ASSERT(g_bserrno == 0); 5108 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5109 snapshotid3 = g_blobid; 5110 5111 spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL); 5112 poll_threads(); 5113 CU_ASSERT(g_bserrno == 0); 5114 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5115 snapshot3 = g_blob; 5116 5117 CU_ASSERT(snapshot3->parent_id == snapshotid2); 5118 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2); 5119 5120 /* Check if clone is converted to the clone of snapshot3 and snapshot3 5121 * is a child of snapshot2 */ 5122 CU_ASSERT(clone->parent_id == snapshotid3); 5123 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5124 5125 count = SPDK_COUNTOF(ids); 5126 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5127 CU_ASSERT(rc == 0); 5128 CU_ASSERT(count == 1); 5129 CU_ASSERT(ids[0] == cloneid); 5130 5131 /* 6. Create another snapshot of the clone */ 5132 5133 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 5134 poll_threads(); 5135 CU_ASSERT(g_bserrno == 0); 5136 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5137 snapshotid4 = g_blobid; 5138 5139 spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL); 5140 poll_threads(); 5141 CU_ASSERT(g_bserrno == 0); 5142 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5143 snapshot4 = g_blob; 5144 5145 CU_ASSERT(snapshot4->parent_id == snapshotid3); 5146 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3); 5147 5148 /* Check if clone is converted to the clone of snapshot4 and snapshot4 5149 * is a child of snapshot3 */ 5150 CU_ASSERT(clone->parent_id == snapshotid4); 5151 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4); 5152 5153 count = SPDK_COUNTOF(ids); 5154 rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count); 5155 CU_ASSERT(rc == 0); 5156 CU_ASSERT(count == 1); 5157 CU_ASSERT(ids[0] == cloneid); 5158 5159 /* 7. Remove snapshot 4 */ 5160 5161 ut_blob_close_and_delete(bs, snapshot4); 5162 5163 /* Check if relations are back to state from before creating snapshot 4 */ 5164 CU_ASSERT(clone->parent_id == snapshotid3); 5165 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5166 5167 count = SPDK_COUNTOF(ids); 5168 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5169 CU_ASSERT(rc == 0); 5170 CU_ASSERT(count == 1); 5171 CU_ASSERT(ids[0] == cloneid); 5172 5173 /* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */ 5174 5175 spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL); 5176 poll_threads(); 5177 CU_ASSERT(g_bserrno == 0); 5178 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5179 cloneid3 = g_blobid; 5180 5181 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5182 poll_threads(); 5183 CU_ASSERT(g_bserrno != 0); 5184 5185 /* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */ 5186 5187 spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL); 5188 poll_threads(); 5189 CU_ASSERT(g_bserrno == 0); 5190 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5191 snapshot3 = g_blob; 5192 5193 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5194 poll_threads(); 5195 CU_ASSERT(g_bserrno != 0); 5196 5197 spdk_blob_close(snapshot3, blob_op_complete, NULL); 5198 poll_threads(); 5199 CU_ASSERT(g_bserrno == 0); 5200 5201 spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL); 5202 poll_threads(); 5203 CU_ASSERT(g_bserrno == 0); 5204 5205 /* 10. Remove snapshot 1 */ 5206 5207 ut_blob_close_and_delete(bs, snapshot1); 5208 5209 /* Check if relations are back to state from before creating snapshot 4 (before step 6) */ 5210 CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID); 5211 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID); 5212 5213 count = SPDK_COUNTOF(ids); 5214 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5215 CU_ASSERT(rc == 0); 5216 CU_ASSERT(count == 2); 5217 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5218 CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3); 5219 5220 /* 11. Try to create clone from read only blob */ 5221 5222 /* Mark blob as read only */ 5223 spdk_blob_set_read_only(blob); 5224 spdk_blob_sync_md(blob, blob_op_complete, NULL); 5225 poll_threads(); 5226 CU_ASSERT(g_bserrno == 0); 5227 5228 /* Create clone from read only blob */ 5229 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5230 poll_threads(); 5231 CU_ASSERT(g_bserrno == 0); 5232 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5233 cloneid2 = g_blobid; 5234 5235 spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL); 5236 poll_threads(); 5237 CU_ASSERT(g_bserrno == 0); 5238 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5239 clone2 = g_blob; 5240 5241 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5242 5243 count = SPDK_COUNTOF(ids); 5244 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5245 CU_ASSERT(rc == 0); 5246 CU_ASSERT(count == 1); 5247 CU_ASSERT(ids[0] == cloneid2); 5248 5249 /* Close blobs */ 5250 5251 spdk_blob_close(clone2, blob_op_complete, NULL); 5252 poll_threads(); 5253 CU_ASSERT(g_bserrno == 0); 5254 5255 spdk_blob_close(blob, blob_op_complete, NULL); 5256 poll_threads(); 5257 CU_ASSERT(g_bserrno == 0); 5258 5259 spdk_blob_close(clone, blob_op_complete, NULL); 5260 poll_threads(); 5261 CU_ASSERT(g_bserrno == 0); 5262 5263 spdk_blob_close(snapshot2, blob_op_complete, NULL); 5264 poll_threads(); 5265 CU_ASSERT(g_bserrno == 0); 5266 5267 spdk_blob_close(snapshot3, blob_op_complete, NULL); 5268 poll_threads(); 5269 CU_ASSERT(g_bserrno == 0); 5270 5271 ut_bs_reload(&bs, &bs_opts); 5272 5273 /* Verify structure of loaded blob store */ 5274 5275 /* snapshot2 */ 5276 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID); 5277 5278 count = SPDK_COUNTOF(ids); 5279 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5280 CU_ASSERT(rc == 0); 5281 CU_ASSERT(count == 2); 5282 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5283 CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3); 5284 5285 /* blob */ 5286 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2); 5287 count = SPDK_COUNTOF(ids); 5288 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5289 CU_ASSERT(rc == 0); 5290 CU_ASSERT(count == 1); 5291 CU_ASSERT(ids[0] == cloneid2); 5292 5293 /* clone */ 5294 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5295 count = SPDK_COUNTOF(ids); 5296 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 5297 CU_ASSERT(rc == 0); 5298 CU_ASSERT(count == 0); 5299 5300 /* snapshot3 */ 5301 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2); 5302 count = SPDK_COUNTOF(ids); 5303 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5304 CU_ASSERT(rc == 0); 5305 CU_ASSERT(count == 1); 5306 CU_ASSERT(ids[0] == cloneid); 5307 5308 /* clone2 */ 5309 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5310 count = SPDK_COUNTOF(ids); 5311 rc = spdk_blob_get_clones(bs, cloneid2, ids, &count); 5312 CU_ASSERT(rc == 0); 5313 CU_ASSERT(count == 0); 5314 5315 /* Try to delete all blobs in the worse possible order */ 5316 5317 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5318 poll_threads(); 5319 CU_ASSERT(g_bserrno != 0); 5320 5321 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5322 poll_threads(); 5323 CU_ASSERT(g_bserrno == 0); 5324 5325 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5326 poll_threads(); 5327 CU_ASSERT(g_bserrno != 0); 5328 5329 spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL); 5330 poll_threads(); 5331 CU_ASSERT(g_bserrno == 0); 5332 5333 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5334 poll_threads(); 5335 CU_ASSERT(g_bserrno == 0); 5336 5337 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 5338 poll_threads(); 5339 CU_ASSERT(g_bserrno == 0); 5340 5341 spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL); 5342 poll_threads(); 5343 CU_ASSERT(g_bserrno == 0); 5344 5345 spdk_bs_unload(bs, bs_op_complete, NULL); 5346 poll_threads(); 5347 CU_ASSERT(g_bserrno == 0); 5348 5349 g_bs = NULL; 5350 } 5351 5352 static void 5353 blobstore_clean_power_failure(void) 5354 { 5355 struct spdk_blob_store *bs; 5356 struct spdk_blob *blob; 5357 struct spdk_power_failure_thresholds thresholds = {}; 5358 bool clean = false; 5359 struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 5360 struct spdk_bs_super_block super_copy = {}; 5361 5362 thresholds.general_threshold = 1; 5363 while (!clean) { 5364 /* Create bs and blob */ 5365 suite_blob_setup(); 5366 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5367 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5368 bs = g_bs; 5369 blob = g_blob; 5370 5371 /* Super block should not change for rest of the UT, 5372 * save it and compare later. */ 5373 memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block)); 5374 SPDK_CU_ASSERT_FATAL(super->clean == 0); 5375 SPDK_CU_ASSERT_FATAL(bs->clean == 0); 5376 5377 /* Force bs/super block in a clean state. 5378 * Along with marking blob dirty, to cause blob persist. */ 5379 blob->state = SPDK_BLOB_STATE_DIRTY; 5380 bs->clean = 1; 5381 super->clean = 1; 5382 super->crc = blob_md_page_calc_crc(super); 5383 5384 g_bserrno = -1; 5385 dev_set_power_failure_thresholds(thresholds); 5386 spdk_blob_sync_md(blob, blob_op_complete, NULL); 5387 poll_threads(); 5388 dev_reset_power_failure_event(); 5389 5390 if (g_bserrno == 0) { 5391 /* After successful md sync, both bs and super block 5392 * should be marked as not clean. */ 5393 SPDK_CU_ASSERT_FATAL(bs->clean == 0); 5394 SPDK_CU_ASSERT_FATAL(super->clean == 0); 5395 clean = true; 5396 } 5397 5398 /* Depending on the point of failure, super block was either updated or not. */ 5399 super_copy.clean = super->clean; 5400 super_copy.crc = blob_md_page_calc_crc(&super_copy); 5401 /* Compare that the values in super block remained unchanged. */ 5402 SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block))); 5403 5404 /* Delete blob and unload bs */ 5405 suite_blob_cleanup(); 5406 5407 thresholds.general_threshold++; 5408 } 5409 } 5410 5411 static void 5412 blob_delete_snapshot_power_failure(void) 5413 { 5414 struct spdk_bs_dev *dev; 5415 struct spdk_blob_store *bs; 5416 struct spdk_blob_opts opts; 5417 struct spdk_blob *blob, *snapshot; 5418 struct spdk_power_failure_thresholds thresholds = {}; 5419 spdk_blob_id blobid, snapshotid; 5420 const void *value; 5421 size_t value_len; 5422 size_t count; 5423 spdk_blob_id ids[3] = {}; 5424 int rc; 5425 bool deleted = false; 5426 int delete_snapshot_bserrno = -1; 5427 5428 thresholds.general_threshold = 1; 5429 while (!deleted) { 5430 dev = init_dev(); 5431 5432 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 5433 poll_threads(); 5434 CU_ASSERT(g_bserrno == 0); 5435 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5436 bs = g_bs; 5437 5438 /* Create blob */ 5439 ut_spdk_blob_opts_init(&opts); 5440 opts.num_clusters = 10; 5441 5442 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 5443 poll_threads(); 5444 CU_ASSERT(g_bserrno == 0); 5445 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5446 blobid = g_blobid; 5447 5448 /* Create snapshot */ 5449 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5450 poll_threads(); 5451 CU_ASSERT(g_bserrno == 0); 5452 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5453 snapshotid = g_blobid; 5454 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5455 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5456 5457 dev_set_power_failure_thresholds(thresholds); 5458 5459 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 5460 poll_threads(); 5461 delete_snapshot_bserrno = g_bserrno; 5462 5463 /* Do not shut down cleanly. Assumption is that after snapshot deletion 5464 * reports success, changes to both blobs should already persisted. */ 5465 dev_reset_power_failure_event(); 5466 ut_bs_dirty_load(&bs, NULL); 5467 5468 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5469 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5470 5471 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 5472 poll_threads(); 5473 CU_ASSERT(g_bserrno == 0); 5474 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5475 blob = g_blob; 5476 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true); 5477 5478 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 5479 poll_threads(); 5480 5481 if (g_bserrno == 0) { 5482 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5483 snapshot = g_blob; 5484 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 5485 count = SPDK_COUNTOF(ids); 5486 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5487 CU_ASSERT(rc == 0); 5488 CU_ASSERT(count == 1); 5489 CU_ASSERT(ids[0] == blobid); 5490 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len); 5491 CU_ASSERT(rc != 0); 5492 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false); 5493 5494 spdk_blob_close(snapshot, blob_op_complete, NULL); 5495 poll_threads(); 5496 CU_ASSERT(g_bserrno == 0); 5497 } else { 5498 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 5499 /* Snapshot might have been left in unrecoverable state, so it does not open. 5500 * Yet delete might perform further changes to the clone after that. 5501 * This UT should test until snapshot is deleted and delete call succeeds. */ 5502 if (delete_snapshot_bserrno == 0) { 5503 deleted = true; 5504 } 5505 } 5506 5507 spdk_blob_close(blob, blob_op_complete, NULL); 5508 poll_threads(); 5509 CU_ASSERT(g_bserrno == 0); 5510 5511 spdk_bs_unload(bs, bs_op_complete, NULL); 5512 poll_threads(); 5513 CU_ASSERT(g_bserrno == 0); 5514 5515 thresholds.general_threshold++; 5516 } 5517 } 5518 5519 static void 5520 blob_create_snapshot_power_failure(void) 5521 { 5522 struct spdk_blob_store *bs = g_bs; 5523 struct spdk_bs_dev *dev; 5524 struct spdk_blob_opts opts; 5525 struct spdk_blob *blob, *snapshot; 5526 struct spdk_power_failure_thresholds thresholds = {}; 5527 spdk_blob_id blobid, snapshotid; 5528 const void *value; 5529 size_t value_len; 5530 size_t count; 5531 spdk_blob_id ids[3] = {}; 5532 int rc; 5533 bool created = false; 5534 int create_snapshot_bserrno = -1; 5535 5536 thresholds.general_threshold = 1; 5537 while (!created) { 5538 dev = init_dev(); 5539 5540 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 5541 poll_threads(); 5542 CU_ASSERT(g_bserrno == 0); 5543 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5544 bs = g_bs; 5545 5546 /* Create blob */ 5547 ut_spdk_blob_opts_init(&opts); 5548 opts.num_clusters = 10; 5549 5550 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 5551 poll_threads(); 5552 CU_ASSERT(g_bserrno == 0); 5553 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5554 blobid = g_blobid; 5555 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5556 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5557 5558 dev_set_power_failure_thresholds(thresholds); 5559 5560 /* Create snapshot */ 5561 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5562 poll_threads(); 5563 create_snapshot_bserrno = g_bserrno; 5564 snapshotid = g_blobid; 5565 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5566 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5567 5568 /* Do not shut down cleanly. Assumption is that after create snapshot 5569 * reports success, both blobs should be power-fail safe. */ 5570 dev_reset_power_failure_event(); 5571 ut_bs_dirty_load(&bs, NULL); 5572 5573 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5574 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5575 5576 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 5577 poll_threads(); 5578 CU_ASSERT(g_bserrno == 0); 5579 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5580 blob = g_blob; 5581 5582 if (snapshotid != SPDK_BLOBID_INVALID) { 5583 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 5584 poll_threads(); 5585 } 5586 5587 if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) { 5588 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5589 snapshot = g_blob; 5590 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true); 5591 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false); 5592 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 5593 count = SPDK_COUNTOF(ids); 5594 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5595 CU_ASSERT(rc == 0); 5596 CU_ASSERT(count == 1); 5597 CU_ASSERT(ids[0] == blobid); 5598 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len); 5599 CU_ASSERT(rc != 0); 5600 5601 spdk_blob_close(snapshot, blob_op_complete, NULL); 5602 poll_threads(); 5603 CU_ASSERT(g_bserrno == 0); 5604 if (create_snapshot_bserrno == 0) { 5605 created = true; 5606 } 5607 } else { 5608 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 5609 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false); 5610 } 5611 5612 spdk_blob_close(blob, blob_op_complete, NULL); 5613 poll_threads(); 5614 CU_ASSERT(g_bserrno == 0); 5615 5616 spdk_bs_unload(bs, bs_op_complete, NULL); 5617 poll_threads(); 5618 CU_ASSERT(g_bserrno == 0); 5619 5620 thresholds.general_threshold++; 5621 } 5622 } 5623 5624 static void 5625 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5626 { 5627 uint8_t payload_ff[64 * 512]; 5628 uint8_t payload_aa[64 * 512]; 5629 uint8_t payload_00[64 * 512]; 5630 uint8_t *cluster0, *cluster1; 5631 5632 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5633 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5634 memset(payload_00, 0x00, sizeof(payload_00)); 5635 5636 /* Try to perform I/O with io unit = 512 */ 5637 spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL); 5638 poll_threads(); 5639 CU_ASSERT(g_bserrno == 0); 5640 5641 /* If thin provisioned is set cluster should be allocated now */ 5642 SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0); 5643 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5644 5645 /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character. 5646 * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */ 5647 /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5648 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5649 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0); 5650 5651 /* Verify write with offset on first page */ 5652 spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL); 5653 poll_threads(); 5654 CU_ASSERT(g_bserrno == 0); 5655 5656 /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5657 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5658 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5659 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5660 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5661 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0); 5662 5663 /* Verify write with offset on first page */ 5664 spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL); 5665 poll_threads(); 5666 5667 /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5668 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5669 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5670 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5671 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5672 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0); 5673 CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0); 5674 5675 /* Verify write with offset on second page */ 5676 spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL); 5677 poll_threads(); 5678 5679 /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */ 5680 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5681 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5682 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5683 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5684 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0); 5685 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5686 5687 /* Verify write across multiple pages */ 5688 spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL); 5689 poll_threads(); 5690 5691 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */ 5692 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5693 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5694 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5695 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5696 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5697 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5698 5699 /* Verify write across multiple clusters */ 5700 spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL); 5701 poll_threads(); 5702 5703 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5704 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5705 5706 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5707 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5708 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5709 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5710 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5711 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5712 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5713 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5714 5715 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5716 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0); 5717 5718 /* Verify write to second cluster */ 5719 spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL); 5720 poll_threads(); 5721 5722 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5723 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5724 5725 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5726 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */ 5727 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5728 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5729 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5730 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5731 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5732 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5733 5734 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5735 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0); 5736 CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0); 5737 CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0); 5738 } 5739 5740 static void 5741 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5742 { 5743 uint8_t payload_read[64 * 512]; 5744 uint8_t payload_ff[64 * 512]; 5745 uint8_t payload_aa[64 * 512]; 5746 uint8_t payload_00[64 * 512]; 5747 5748 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5749 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5750 memset(payload_00, 0x00, sizeof(payload_00)); 5751 5752 /* Read only first io unit */ 5753 /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5754 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5755 * payload_read: F000 0000 | 0000 0000 ... */ 5756 memset(payload_read, 0x00, sizeof(payload_read)); 5757 spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL); 5758 poll_threads(); 5759 CU_ASSERT(g_bserrno == 0); 5760 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5761 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0); 5762 5763 /* Read four io_units starting from offset = 2 5764 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5765 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5766 * payload_read: F0AA 0000 | 0000 0000 ... */ 5767 5768 memset(payload_read, 0x00, sizeof(payload_read)); 5769 spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL); 5770 poll_threads(); 5771 CU_ASSERT(g_bserrno == 0); 5772 5773 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5774 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 5775 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0); 5776 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0); 5777 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 5778 5779 /* Read eight io_units across multiple pages 5780 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 5781 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5782 * payload_read: AAAA AAAA | 0000 0000 ... */ 5783 memset(payload_read, 0x00, sizeof(payload_read)); 5784 spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL); 5785 poll_threads(); 5786 CU_ASSERT(g_bserrno == 0); 5787 5788 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0); 5789 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 5790 5791 /* Read eight io_units across multiple clusters 5792 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ] 5793 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5794 * payload_read: FFFF FFFF | 0000 0000 ... */ 5795 memset(payload_read, 0x00, sizeof(payload_read)); 5796 spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL); 5797 poll_threads(); 5798 CU_ASSERT(g_bserrno == 0); 5799 5800 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0); 5801 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 5802 5803 /* Read four io_units from second cluster 5804 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5805 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ] 5806 * payload_read: 00FF 0000 | 0000 0000 ... */ 5807 memset(payload_read, 0x00, sizeof(payload_read)); 5808 spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL); 5809 poll_threads(); 5810 CU_ASSERT(g_bserrno == 0); 5811 5812 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0); 5813 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0); 5814 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 5815 5816 /* Read second cluster 5817 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5818 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] 5819 * payload_read: FFFF 0000 | 0000 FF00 ... */ 5820 memset(payload_read, 0x00, sizeof(payload_read)); 5821 spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL); 5822 poll_threads(); 5823 CU_ASSERT(g_bserrno == 0); 5824 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0); 5825 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0); 5826 CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0); 5827 CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0); 5828 5829 /* Read whole two clusters 5830 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 5831 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */ 5832 memset(payload_read, 0x00, sizeof(payload_read)); 5833 spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL); 5834 poll_threads(); 5835 CU_ASSERT(g_bserrno == 0); 5836 5837 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5838 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 5839 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0); 5840 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0); 5841 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0); 5842 CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0); 5843 5844 CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0); 5845 CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0); 5846 CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0); 5847 CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0); 5848 } 5849 5850 5851 static void 5852 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5853 { 5854 uint8_t payload_ff[64 * 512]; 5855 uint8_t payload_aa[64 * 512]; 5856 uint8_t payload_00[64 * 512]; 5857 uint8_t *cluster0, *cluster1; 5858 5859 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5860 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5861 memset(payload_00, 0x00, sizeof(payload_00)); 5862 5863 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5864 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5865 5866 /* Unmap */ 5867 spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL); 5868 poll_threads(); 5869 5870 CU_ASSERT(g_bserrno == 0); 5871 5872 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0); 5873 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0); 5874 } 5875 5876 static void 5877 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5878 { 5879 uint8_t payload_ff[64 * 512]; 5880 uint8_t payload_aa[64 * 512]; 5881 uint8_t payload_00[64 * 512]; 5882 uint8_t *cluster0, *cluster1; 5883 5884 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5885 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5886 memset(payload_00, 0x00, sizeof(payload_00)); 5887 5888 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5889 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5890 5891 /* Write zeroes */ 5892 spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL); 5893 poll_threads(); 5894 5895 CU_ASSERT(g_bserrno == 0); 5896 5897 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0); 5898 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0); 5899 } 5900 5901 5902 static void 5903 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5904 { 5905 uint8_t payload_ff[64 * 512]; 5906 uint8_t payload_aa[64 * 512]; 5907 uint8_t payload_00[64 * 512]; 5908 uint8_t *cluster0, *cluster1; 5909 struct iovec iov[4]; 5910 5911 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5912 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5913 memset(payload_00, 0x00, sizeof(payload_00)); 5914 5915 /* Try to perform I/O with io unit = 512 */ 5916 iov[0].iov_base = payload_ff; 5917 iov[0].iov_len = 1 * 512; 5918 spdk_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL); 5919 poll_threads(); 5920 CU_ASSERT(g_bserrno == 0); 5921 5922 /* If thin provisioned is set cluster should be allocated now */ 5923 SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0); 5924 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5925 5926 /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character. 5927 * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */ 5928 /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5929 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5930 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0); 5931 5932 /* Verify write with offset on first page */ 5933 iov[0].iov_base = payload_ff; 5934 iov[0].iov_len = 1 * 512; 5935 spdk_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL); 5936 poll_threads(); 5937 CU_ASSERT(g_bserrno == 0); 5938 5939 /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5940 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5941 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5942 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5943 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5944 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0); 5945 5946 /* Verify write with offset on first page */ 5947 iov[0].iov_base = payload_ff; 5948 iov[0].iov_len = 4 * 512; 5949 spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL); 5950 poll_threads(); 5951 5952 /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5953 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5954 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5955 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5956 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5957 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0); 5958 CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0); 5959 5960 /* Verify write with offset on second page */ 5961 iov[0].iov_base = payload_ff; 5962 iov[0].iov_len = 4 * 512; 5963 spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL); 5964 poll_threads(); 5965 5966 /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */ 5967 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5968 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5969 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5970 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5971 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0); 5972 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5973 5974 /* Verify write across multiple pages */ 5975 iov[0].iov_base = payload_aa; 5976 iov[0].iov_len = 8 * 512; 5977 spdk_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL); 5978 poll_threads(); 5979 5980 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */ 5981 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5982 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5983 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5984 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5985 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5986 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5987 5988 /* Verify write across multiple clusters */ 5989 5990 iov[0].iov_base = payload_ff; 5991 iov[0].iov_len = 8 * 512; 5992 spdk_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL); 5993 poll_threads(); 5994 5995 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5996 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5997 5998 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5999 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 6000 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6001 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6002 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6003 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6004 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 6005 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0); 6006 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 6007 6008 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 6009 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0); 6010 6011 /* Verify write to second cluster */ 6012 6013 iov[0].iov_base = payload_ff; 6014 iov[0].iov_len = 2 * 512; 6015 spdk_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL); 6016 poll_threads(); 6017 6018 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 6019 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 6020 6021 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6022 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */ 6023 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6024 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6025 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6026 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6027 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 6028 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 6029 6030 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 6031 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0); 6032 CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0); 6033 CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0); 6034 } 6035 6036 static void 6037 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 6038 { 6039 uint8_t payload_read[64 * 512]; 6040 uint8_t payload_ff[64 * 512]; 6041 uint8_t payload_aa[64 * 512]; 6042 uint8_t payload_00[64 * 512]; 6043 struct iovec iov[4]; 6044 6045 memset(payload_ff, 0xFF, sizeof(payload_ff)); 6046 memset(payload_aa, 0xAA, sizeof(payload_aa)); 6047 memset(payload_00, 0x00, sizeof(payload_00)); 6048 6049 /* Read only first io unit */ 6050 /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6051 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6052 * payload_read: F000 0000 | 0000 0000 ... */ 6053 memset(payload_read, 0x00, sizeof(payload_read)); 6054 iov[0].iov_base = payload_read; 6055 iov[0].iov_len = 1 * 512; 6056 spdk_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL); 6057 poll_threads(); 6058 6059 CU_ASSERT(g_bserrno == 0); 6060 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6061 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0); 6062 6063 /* Read four io_units starting from offset = 2 6064 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6065 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6066 * payload_read: F0AA 0000 | 0000 0000 ... */ 6067 6068 memset(payload_read, 0x00, sizeof(payload_read)); 6069 iov[0].iov_base = payload_read; 6070 iov[0].iov_len = 4 * 512; 6071 spdk_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL); 6072 poll_threads(); 6073 CU_ASSERT(g_bserrno == 0); 6074 6075 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6076 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 6077 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0); 6078 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0); 6079 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 6080 6081 /* Read eight io_units across multiple pages 6082 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 6083 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6084 * payload_read: AAAA AAAA | 0000 0000 ... */ 6085 memset(payload_read, 0x00, sizeof(payload_read)); 6086 iov[0].iov_base = payload_read; 6087 iov[0].iov_len = 4 * 512; 6088 iov[1].iov_base = payload_read + 4 * 512; 6089 iov[1].iov_len = 4 * 512; 6090 spdk_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL); 6091 poll_threads(); 6092 CU_ASSERT(g_bserrno == 0); 6093 6094 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0); 6095 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 6096 6097 /* Read eight io_units across multiple clusters 6098 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ] 6099 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6100 * payload_read: FFFF FFFF | 0000 0000 ... */ 6101 memset(payload_read, 0x00, sizeof(payload_read)); 6102 iov[0].iov_base = payload_read; 6103 iov[0].iov_len = 2 * 512; 6104 iov[1].iov_base = payload_read + 2 * 512; 6105 iov[1].iov_len = 2 * 512; 6106 iov[2].iov_base = payload_read + 4 * 512; 6107 iov[2].iov_len = 2 * 512; 6108 iov[3].iov_base = payload_read + 6 * 512; 6109 iov[3].iov_len = 2 * 512; 6110 spdk_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL); 6111 poll_threads(); 6112 CU_ASSERT(g_bserrno == 0); 6113 6114 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0); 6115 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 6116 6117 /* Read four io_units from second cluster 6118 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6119 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ] 6120 * payload_read: 00FF 0000 | 0000 0000 ... */ 6121 memset(payload_read, 0x00, sizeof(payload_read)); 6122 iov[0].iov_base = payload_read; 6123 iov[0].iov_len = 1 * 512; 6124 iov[1].iov_base = payload_read + 1 * 512; 6125 iov[1].iov_len = 3 * 512; 6126 spdk_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL); 6127 poll_threads(); 6128 CU_ASSERT(g_bserrno == 0); 6129 6130 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0); 6131 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0); 6132 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 6133 6134 /* Read second cluster 6135 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6136 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] 6137 * payload_read: FFFF 0000 | 0000 FF00 ... */ 6138 memset(payload_read, 0x00, sizeof(payload_read)); 6139 iov[0].iov_base = payload_read; 6140 iov[0].iov_len = 1 * 512; 6141 iov[1].iov_base = payload_read + 1 * 512; 6142 iov[1].iov_len = 2 * 512; 6143 iov[2].iov_base = payload_read + 3 * 512; 6144 iov[2].iov_len = 4 * 512; 6145 iov[3].iov_base = payload_read + 7 * 512; 6146 iov[3].iov_len = 25 * 512; 6147 spdk_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL); 6148 poll_threads(); 6149 CU_ASSERT(g_bserrno == 0); 6150 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0); 6151 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0); 6152 CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0); 6153 CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0); 6154 6155 /* Read whole two clusters 6156 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 6157 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */ 6158 memset(payload_read, 0x00, sizeof(payload_read)); 6159 iov[0].iov_base = payload_read; 6160 iov[0].iov_len = 1 * 512; 6161 iov[1].iov_base = payload_read + 1 * 512; 6162 iov[1].iov_len = 8 * 512; 6163 iov[2].iov_base = payload_read + 9 * 512; 6164 iov[2].iov_len = 16 * 512; 6165 iov[3].iov_base = payload_read + 25 * 512; 6166 iov[3].iov_len = 39 * 512; 6167 spdk_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL); 6168 poll_threads(); 6169 CU_ASSERT(g_bserrno == 0); 6170 6171 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6172 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 6173 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0); 6174 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0); 6175 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0); 6176 CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0); 6177 6178 CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0); 6179 CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0); 6180 CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0); 6181 CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0); 6182 } 6183 6184 static void 6185 blob_io_unit(void) 6186 { 6187 struct spdk_bs_opts bsopts; 6188 struct spdk_blob_opts opts; 6189 struct spdk_blob_store *bs; 6190 struct spdk_bs_dev *dev; 6191 struct spdk_blob *blob, *snapshot, *clone; 6192 spdk_blob_id blobid; 6193 struct spdk_io_channel *channel; 6194 6195 /* Create dev with 512 bytes io unit size */ 6196 6197 spdk_bs_opts_init(&bsopts, sizeof(bsopts)); 6198 bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */ 6199 snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE"); 6200 6201 /* Try to initialize a new blob store with unsupported io_unit */ 6202 dev = init_dev(); 6203 dev->blocklen = 512; 6204 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6205 6206 /* Initialize a new blob store */ 6207 spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL); 6208 poll_threads(); 6209 CU_ASSERT(g_bserrno == 0); 6210 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6211 bs = g_bs; 6212 6213 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512); 6214 channel = spdk_bs_alloc_io_channel(bs); 6215 6216 /* Create thick provisioned blob */ 6217 ut_spdk_blob_opts_init(&opts); 6218 opts.thin_provision = false; 6219 opts.num_clusters = 32; 6220 6221 blob = ut_blob_create_and_open(bs, &opts); 6222 blobid = spdk_blob_get_id(blob); 6223 6224 test_io_write(dev, blob, channel); 6225 test_io_read(dev, blob, channel); 6226 test_io_zeroes(dev, blob, channel); 6227 6228 test_iov_write(dev, blob, channel); 6229 test_iov_read(dev, blob, channel); 6230 6231 test_io_unmap(dev, blob, channel); 6232 6233 spdk_blob_close(blob, blob_op_complete, NULL); 6234 poll_threads(); 6235 CU_ASSERT(g_bserrno == 0); 6236 blob = NULL; 6237 g_blob = NULL; 6238 6239 /* Create thin provisioned blob */ 6240 6241 ut_spdk_blob_opts_init(&opts); 6242 opts.thin_provision = true; 6243 opts.num_clusters = 32; 6244 6245 blob = ut_blob_create_and_open(bs, &opts); 6246 blobid = spdk_blob_get_id(blob); 6247 6248 test_io_write(dev, blob, channel); 6249 test_io_read(dev, blob, channel); 6250 6251 test_io_zeroes(dev, blob, channel); 6252 6253 test_iov_write(dev, blob, channel); 6254 test_iov_read(dev, blob, channel); 6255 6256 /* Create snapshot */ 6257 6258 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6259 poll_threads(); 6260 CU_ASSERT(g_bserrno == 0); 6261 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6262 blobid = g_blobid; 6263 6264 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6265 poll_threads(); 6266 CU_ASSERT(g_bserrno == 0); 6267 CU_ASSERT(g_blob != NULL); 6268 snapshot = g_blob; 6269 6270 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6271 poll_threads(); 6272 CU_ASSERT(g_bserrno == 0); 6273 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6274 blobid = g_blobid; 6275 6276 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6277 poll_threads(); 6278 CU_ASSERT(g_bserrno == 0); 6279 CU_ASSERT(g_blob != NULL); 6280 clone = g_blob; 6281 6282 test_io_read(dev, blob, channel); 6283 test_io_read(dev, snapshot, channel); 6284 test_io_read(dev, clone, channel); 6285 6286 test_iov_read(dev, blob, channel); 6287 test_iov_read(dev, snapshot, channel); 6288 test_iov_read(dev, clone, channel); 6289 6290 /* Inflate clone */ 6291 6292 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 6293 poll_threads(); 6294 6295 CU_ASSERT(g_bserrno == 0); 6296 6297 test_io_read(dev, clone, channel); 6298 6299 test_io_unmap(dev, clone, channel); 6300 6301 test_iov_write(dev, clone, channel); 6302 test_iov_read(dev, clone, channel); 6303 6304 spdk_blob_close(blob, blob_op_complete, NULL); 6305 spdk_blob_close(snapshot, blob_op_complete, NULL); 6306 spdk_blob_close(clone, blob_op_complete, NULL); 6307 poll_threads(); 6308 CU_ASSERT(g_bserrno == 0); 6309 blob = NULL; 6310 g_blob = NULL; 6311 6312 spdk_bs_free_io_channel(channel); 6313 poll_threads(); 6314 6315 /* Unload the blob store */ 6316 spdk_bs_unload(bs, bs_op_complete, NULL); 6317 poll_threads(); 6318 CU_ASSERT(g_bserrno == 0); 6319 g_bs = NULL; 6320 g_blob = NULL; 6321 g_blobid = 0; 6322 } 6323 6324 static void 6325 blob_io_unit_compatiblity(void) 6326 { 6327 struct spdk_bs_opts bsopts; 6328 struct spdk_blob_store *bs; 6329 struct spdk_bs_dev *dev; 6330 struct spdk_bs_super_block *super; 6331 6332 /* Create dev with 512 bytes io unit size */ 6333 6334 spdk_bs_opts_init(&bsopts, sizeof(bsopts)); 6335 bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */ 6336 snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE"); 6337 6338 /* Try to initialize a new blob store with unsupported io_unit */ 6339 dev = init_dev(); 6340 dev->blocklen = 512; 6341 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6342 6343 /* Initialize a new blob store */ 6344 spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL); 6345 poll_threads(); 6346 CU_ASSERT(g_bserrno == 0); 6347 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6348 bs = g_bs; 6349 6350 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512); 6351 6352 /* Unload the blob store */ 6353 spdk_bs_unload(bs, bs_op_complete, NULL); 6354 poll_threads(); 6355 CU_ASSERT(g_bserrno == 0); 6356 6357 /* Modify super block to behave like older version. 6358 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */ 6359 super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 6360 super->io_unit_size = 0; 6361 super->crc = blob_md_page_calc_crc(super); 6362 6363 dev = init_dev(); 6364 dev->blocklen = 512; 6365 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6366 6367 spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL); 6368 poll_threads(); 6369 CU_ASSERT(g_bserrno == 0); 6370 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6371 bs = g_bs; 6372 6373 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE); 6374 6375 /* Unload the blob store */ 6376 spdk_bs_unload(bs, bs_op_complete, NULL); 6377 poll_threads(); 6378 CU_ASSERT(g_bserrno == 0); 6379 6380 g_bs = NULL; 6381 g_blob = NULL; 6382 g_blobid = 0; 6383 } 6384 6385 static void 6386 blob_simultaneous_operations(void) 6387 { 6388 struct spdk_blob_store *bs = g_bs; 6389 struct spdk_blob_opts opts; 6390 struct spdk_blob *blob, *snapshot; 6391 spdk_blob_id blobid, snapshotid; 6392 struct spdk_io_channel *channel; 6393 6394 channel = spdk_bs_alloc_io_channel(bs); 6395 SPDK_CU_ASSERT_FATAL(channel != NULL); 6396 6397 ut_spdk_blob_opts_init(&opts); 6398 opts.num_clusters = 10; 6399 6400 blob = ut_blob_create_and_open(bs, &opts); 6401 blobid = spdk_blob_get_id(blob); 6402 6403 /* Create snapshot and try to remove blob in the same time: 6404 * - snapshot should be created successfully 6405 * - delete operation should fail w -EBUSY */ 6406 CU_ASSERT(blob->locked_operation_in_progress == false); 6407 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6408 CU_ASSERT(blob->locked_operation_in_progress == true); 6409 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6410 CU_ASSERT(blob->locked_operation_in_progress == true); 6411 /* Deletion failure */ 6412 CU_ASSERT(g_bserrno == -EBUSY); 6413 poll_threads(); 6414 CU_ASSERT(blob->locked_operation_in_progress == false); 6415 /* Snapshot creation success */ 6416 CU_ASSERT(g_bserrno == 0); 6417 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6418 6419 snapshotid = g_blobid; 6420 6421 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 6422 poll_threads(); 6423 CU_ASSERT(g_bserrno == 0); 6424 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 6425 snapshot = g_blob; 6426 6427 /* Inflate blob and try to remove blob in the same time: 6428 * - blob should be inflated successfully 6429 * - delete operation should fail w -EBUSY */ 6430 CU_ASSERT(blob->locked_operation_in_progress == false); 6431 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 6432 CU_ASSERT(blob->locked_operation_in_progress == true); 6433 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6434 CU_ASSERT(blob->locked_operation_in_progress == true); 6435 /* Deletion failure */ 6436 CU_ASSERT(g_bserrno == -EBUSY); 6437 poll_threads(); 6438 CU_ASSERT(blob->locked_operation_in_progress == false); 6439 /* Inflation success */ 6440 CU_ASSERT(g_bserrno == 0); 6441 6442 /* Clone snapshot and try to remove snapshot in the same time: 6443 * - snapshot should be cloned successfully 6444 * - delete operation should fail w -EBUSY */ 6445 CU_ASSERT(blob->locked_operation_in_progress == false); 6446 spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 6447 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 6448 /* Deletion failure */ 6449 CU_ASSERT(g_bserrno == -EBUSY); 6450 poll_threads(); 6451 CU_ASSERT(blob->locked_operation_in_progress == false); 6452 /* Clone created */ 6453 CU_ASSERT(g_bserrno == 0); 6454 6455 /* Resize blob and try to remove blob in the same time: 6456 * - blob should be resized successfully 6457 * - delete operation should fail w -EBUSY */ 6458 CU_ASSERT(blob->locked_operation_in_progress == false); 6459 spdk_blob_resize(blob, 50, blob_op_complete, NULL); 6460 CU_ASSERT(blob->locked_operation_in_progress == true); 6461 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6462 CU_ASSERT(blob->locked_operation_in_progress == true); 6463 /* Deletion failure */ 6464 CU_ASSERT(g_bserrno == -EBUSY); 6465 poll_threads(); 6466 CU_ASSERT(blob->locked_operation_in_progress == false); 6467 /* Blob resized successfully */ 6468 CU_ASSERT(g_bserrno == 0); 6469 6470 /* Issue two consecutive blob syncs, neither should fail. 6471 * Force sync to actually occur by marking blob dirty each time. 6472 * Execution of sync should not be enough to complete the operation, 6473 * since disk I/O is required to complete it. */ 6474 g_bserrno = -1; 6475 6476 blob->state = SPDK_BLOB_STATE_DIRTY; 6477 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6478 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6479 6480 blob->state = SPDK_BLOB_STATE_DIRTY; 6481 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6482 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6483 6484 uint32_t completions = 0; 6485 while (completions < 2) { 6486 SPDK_CU_ASSERT_FATAL(poll_thread_times(0, 1)); 6487 if (g_bserrno == 0) { 6488 g_bserrno = -1; 6489 completions++; 6490 } 6491 /* Never should the g_bserrno be other than -1. 6492 * It would mean that either of syncs failed. */ 6493 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6494 } 6495 6496 spdk_bs_free_io_channel(channel); 6497 poll_threads(); 6498 6499 ut_blob_close_and_delete(bs, snapshot); 6500 ut_blob_close_and_delete(bs, blob); 6501 } 6502 6503 static void 6504 blob_persist_test(void) 6505 { 6506 struct spdk_blob_store *bs = g_bs; 6507 struct spdk_blob_opts opts; 6508 struct spdk_blob *blob; 6509 spdk_blob_id blobid; 6510 struct spdk_io_channel *channel; 6511 char *xattr; 6512 size_t xattr_length; 6513 int rc; 6514 uint32_t page_count_clear, page_count_xattr; 6515 uint64_t poller_iterations; 6516 bool run_poller; 6517 6518 channel = spdk_bs_alloc_io_channel(bs); 6519 SPDK_CU_ASSERT_FATAL(channel != NULL); 6520 6521 ut_spdk_blob_opts_init(&opts); 6522 opts.num_clusters = 10; 6523 6524 blob = ut_blob_create_and_open(bs, &opts); 6525 blobid = spdk_blob_get_id(blob); 6526 6527 /* Save the amount of md pages used after creation of a blob. 6528 * This should be consistent after removing xattr. */ 6529 page_count_clear = spdk_bit_array_count_set(bs->used_md_pages); 6530 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear); 6531 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear); 6532 6533 /* Add xattr with maximum length of descriptor to exceed single metadata page. */ 6534 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 6535 strlen("large_xattr"); 6536 xattr = calloc(xattr_length, sizeof(char)); 6537 SPDK_CU_ASSERT_FATAL(xattr != NULL); 6538 6539 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 6540 SPDK_CU_ASSERT_FATAL(rc == 0); 6541 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6542 poll_threads(); 6543 SPDK_CU_ASSERT_FATAL(g_bserrno == 0); 6544 6545 /* Save the amount of md pages used after adding the large xattr */ 6546 page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages); 6547 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr); 6548 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr); 6549 6550 /* Add xattr to a blob and sync it. While sync is occuring, remove the xattr and sync again. 6551 * Interrupt the first sync after increasing number of poller iterations, until it succeeds. 6552 * Expectation is that after second sync completes no xattr is saved in metadata. */ 6553 poller_iterations = 1; 6554 run_poller = true; 6555 while (run_poller) { 6556 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 6557 SPDK_CU_ASSERT_FATAL(rc == 0); 6558 g_bserrno = -1; 6559 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6560 poll_thread_times(0, poller_iterations); 6561 if (g_bserrno == 0) { 6562 /* Poller iteration count was high enough for first sync to complete. 6563 * Verify that blob takes up enough of md_pages to store the xattr. */ 6564 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr); 6565 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr); 6566 SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr); 6567 run_poller = false; 6568 } 6569 rc = spdk_blob_remove_xattr(blob, "large_xattr"); 6570 SPDK_CU_ASSERT_FATAL(rc == 0); 6571 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6572 poll_threads(); 6573 SPDK_CU_ASSERT_FATAL(g_bserrno == 0); 6574 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear); 6575 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear); 6576 SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear); 6577 6578 /* Reload bs and re-open blob to verify that xattr was not persisted. */ 6579 spdk_blob_close(blob, blob_op_complete, NULL); 6580 poll_threads(); 6581 CU_ASSERT(g_bserrno == 0); 6582 6583 ut_bs_reload(&bs, NULL); 6584 6585 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6586 poll_threads(); 6587 CU_ASSERT(g_bserrno == 0); 6588 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 6589 blob = g_blob; 6590 6591 rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length); 6592 SPDK_CU_ASSERT_FATAL(rc == -ENOENT); 6593 6594 poller_iterations++; 6595 /* Stop at high iteration count to prevent infinite loop. 6596 * This value should be enough for first md sync to complete in any case. */ 6597 SPDK_CU_ASSERT_FATAL(poller_iterations < 50); 6598 } 6599 6600 free(xattr); 6601 6602 ut_blob_close_and_delete(bs, blob); 6603 6604 spdk_bs_free_io_channel(channel); 6605 poll_threads(); 6606 } 6607 6608 static void 6609 suite_bs_setup(void) 6610 { 6611 struct spdk_bs_dev *dev; 6612 6613 dev = init_dev(); 6614 memset(g_dev_buffer, 0, DEV_BUFFER_SIZE); 6615 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 6616 poll_threads(); 6617 CU_ASSERT(g_bserrno == 0); 6618 CU_ASSERT(g_bs != NULL); 6619 } 6620 6621 static void 6622 suite_bs_cleanup(void) 6623 { 6624 spdk_bs_unload(g_bs, bs_op_complete, NULL); 6625 poll_threads(); 6626 CU_ASSERT(g_bserrno == 0); 6627 g_bs = NULL; 6628 memset(g_dev_buffer, 0, DEV_BUFFER_SIZE); 6629 } 6630 6631 static struct spdk_blob * 6632 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts) 6633 { 6634 struct spdk_blob *blob; 6635 struct spdk_blob_opts create_blob_opts; 6636 spdk_blob_id blobid; 6637 6638 if (blob_opts == NULL) { 6639 ut_spdk_blob_opts_init(&create_blob_opts); 6640 blob_opts = &create_blob_opts; 6641 } 6642 6643 spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL); 6644 poll_threads(); 6645 CU_ASSERT(g_bserrno == 0); 6646 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6647 blobid = g_blobid; 6648 g_blobid = -1; 6649 6650 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6651 poll_threads(); 6652 CU_ASSERT(g_bserrno == 0); 6653 CU_ASSERT(g_blob != NULL); 6654 blob = g_blob; 6655 6656 g_blob = NULL; 6657 g_bserrno = -1; 6658 6659 return blob; 6660 } 6661 6662 static void 6663 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob) 6664 { 6665 spdk_blob_id blobid = spdk_blob_get_id(blob); 6666 6667 spdk_blob_close(blob, blob_op_complete, NULL); 6668 poll_threads(); 6669 CU_ASSERT(g_bserrno == 0); 6670 g_blob = NULL; 6671 6672 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6673 poll_threads(); 6674 CU_ASSERT(g_bserrno == 0); 6675 g_bserrno = -1; 6676 } 6677 6678 static void 6679 suite_blob_setup(void) 6680 { 6681 suite_bs_setup(); 6682 CU_ASSERT(g_bs != NULL); 6683 6684 g_blob = ut_blob_create_and_open(g_bs, NULL); 6685 CU_ASSERT(g_blob != NULL); 6686 } 6687 6688 static void 6689 suite_blob_cleanup(void) 6690 { 6691 ut_blob_close_and_delete(g_bs, g_blob); 6692 CU_ASSERT(g_blob == NULL); 6693 6694 suite_bs_cleanup(); 6695 CU_ASSERT(g_bs == NULL); 6696 } 6697 6698 int main(int argc, char **argv) 6699 { 6700 CU_pSuite suite, suite_bs, suite_blob; 6701 unsigned int num_failures; 6702 6703 CU_set_error_action(CUEA_ABORT); 6704 CU_initialize_registry(); 6705 6706 suite = CU_add_suite("blob", NULL, NULL); 6707 suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL, 6708 suite_bs_setup, suite_bs_cleanup); 6709 suite_blob = CU_add_suite_with_setup_and_teardown("blob_blob", NULL, NULL, 6710 suite_blob_setup, suite_blob_cleanup); 6711 6712 CU_ADD_TEST(suite, blob_init); 6713 CU_ADD_TEST(suite_bs, blob_open); 6714 CU_ADD_TEST(suite_bs, blob_create); 6715 CU_ADD_TEST(suite_bs, blob_create_loop); 6716 CU_ADD_TEST(suite_bs, blob_create_fail); 6717 CU_ADD_TEST(suite_bs, blob_create_internal); 6718 CU_ADD_TEST(suite, blob_thin_provision); 6719 CU_ADD_TEST(suite_bs, blob_snapshot); 6720 CU_ADD_TEST(suite_bs, blob_clone); 6721 CU_ADD_TEST(suite_bs, blob_inflate); 6722 CU_ADD_TEST(suite_bs, blob_delete); 6723 CU_ADD_TEST(suite_bs, blob_resize_test); 6724 CU_ADD_TEST(suite, blob_read_only); 6725 CU_ADD_TEST(suite_bs, channel_ops); 6726 CU_ADD_TEST(suite_bs, blob_super); 6727 CU_ADD_TEST(suite_blob, blob_write); 6728 CU_ADD_TEST(suite_blob, blob_read); 6729 CU_ADD_TEST(suite_blob, blob_rw_verify); 6730 CU_ADD_TEST(suite_bs, blob_rw_verify_iov); 6731 CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem); 6732 CU_ADD_TEST(suite_blob, blob_rw_iov_read_only); 6733 CU_ADD_TEST(suite_bs, blob_unmap); 6734 CU_ADD_TEST(suite_bs, blob_iter); 6735 CU_ADD_TEST(suite_blob, blob_xattr); 6736 CU_ADD_TEST(suite_bs, blob_parse_md); 6737 CU_ADD_TEST(suite, bs_load); 6738 CU_ADD_TEST(suite_bs, bs_load_pending_removal); 6739 CU_ADD_TEST(suite, bs_load_custom_cluster_size); 6740 CU_ADD_TEST(suite_bs, bs_unload); 6741 CU_ADD_TEST(suite, bs_cluster_sz); 6742 CU_ADD_TEST(suite_bs, bs_usable_clusters); 6743 CU_ADD_TEST(suite, bs_resize_md); 6744 CU_ADD_TEST(suite, bs_destroy); 6745 CU_ADD_TEST(suite, bs_type); 6746 CU_ADD_TEST(suite, bs_super_block); 6747 CU_ADD_TEST(suite, blob_serialize_test); 6748 CU_ADD_TEST(suite_bs, blob_crc); 6749 CU_ADD_TEST(suite, super_block_crc); 6750 CU_ADD_TEST(suite_blob, blob_dirty_shutdown); 6751 CU_ADD_TEST(suite_bs, blob_flags); 6752 CU_ADD_TEST(suite_bs, bs_version); 6753 CU_ADD_TEST(suite_bs, blob_set_xattrs_test); 6754 CU_ADD_TEST(suite_bs, blob_thin_prov_alloc); 6755 CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test); 6756 CU_ADD_TEST(suite_bs, blob_thin_prov_rw); 6757 CU_ADD_TEST(suite_bs, blob_thin_prov_rle); 6758 CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov); 6759 CU_ADD_TEST(suite, bs_load_iter_test); 6760 CU_ADD_TEST(suite_bs, blob_snapshot_rw); 6761 CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov); 6762 CU_ADD_TEST(suite, blob_relations); 6763 CU_ADD_TEST(suite, blob_relations2); 6764 CU_ADD_TEST(suite, blobstore_clean_power_failure); 6765 CU_ADD_TEST(suite, blob_delete_snapshot_power_failure); 6766 CU_ADD_TEST(suite, blob_create_snapshot_power_failure); 6767 CU_ADD_TEST(suite_bs, blob_inflate_rw); 6768 CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io); 6769 CU_ADD_TEST(suite_bs, blob_operation_split_rw); 6770 CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov); 6771 CU_ADD_TEST(suite, blob_io_unit); 6772 CU_ADD_TEST(suite, blob_io_unit_compatiblity); 6773 CU_ADD_TEST(suite_bs, blob_simultaneous_operations); 6774 CU_ADD_TEST(suite_bs, blob_persist_test); 6775 6776 allocate_threads(2); 6777 set_thread(0); 6778 6779 g_dev_buffer = calloc(1, DEV_BUFFER_SIZE); 6780 6781 CU_basic_set_mode(CU_BRM_VERBOSE); 6782 g_use_extent_table = false; 6783 CU_basic_run_tests(); 6784 num_failures = CU_get_number_of_failures(); 6785 g_use_extent_table = true; 6786 CU_basic_run_tests(); 6787 num_failures += CU_get_number_of_failures(); 6788 CU_cleanup_registry(); 6789 6790 free(g_dev_buffer); 6791 6792 free_threads(); 6793 6794 return num_failures; 6795 } 6796