1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 #include "spdk/blob.h" 38 #include "spdk/string.h" 39 #include "spdk_internal/thread.h" 40 41 #include "common/lib/ut_multithread.c" 42 #include "../bs_dev_common.c" 43 #include "blob/blobstore.c" 44 #include "blob/request.c" 45 #include "blob/zeroes.c" 46 #include "blob/blob_bs_dev.c" 47 48 struct spdk_blob_store *g_bs; 49 spdk_blob_id g_blobid; 50 struct spdk_blob *g_blob, *g_blob2; 51 int g_bserrno, g_bserrno2; 52 struct spdk_xattr_names *g_names; 53 int g_done; 54 char *g_xattr_names[] = {"first", "second", "third"}; 55 char *g_xattr_values[] = {"one", "two", "three"}; 56 uint64_t g_ctx = 1729; 57 bool g_use_extent_table = false; 58 59 struct spdk_bs_super_block_ver1 { 60 uint8_t signature[8]; 61 uint32_t version; 62 uint32_t length; 63 uint32_t clean; /* If there was a clean shutdown, this is 1. */ 64 spdk_blob_id super_blob; 65 66 uint32_t cluster_size; /* In bytes */ 67 68 uint32_t used_page_mask_start; /* Offset from beginning of disk, in pages */ 69 uint32_t used_page_mask_len; /* Count, in pages */ 70 71 uint32_t used_cluster_mask_start; /* Offset from beginning of disk, in pages */ 72 uint32_t used_cluster_mask_len; /* Count, in pages */ 73 74 uint32_t md_start; /* Offset from beginning of disk, in pages */ 75 uint32_t md_len; /* Count, in pages */ 76 77 uint8_t reserved[4036]; 78 uint32_t crc; 79 } __attribute__((packed)); 80 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size"); 81 82 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs, 83 struct spdk_blob_opts *blob_opts); 84 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob); 85 static void suite_blob_setup(void); 86 static void suite_blob_cleanup(void); 87 88 static void 89 _get_xattr_value(void *arg, const char *name, 90 const void **value, size_t *value_len) 91 { 92 uint64_t i; 93 94 SPDK_CU_ASSERT_FATAL(value_len != NULL); 95 SPDK_CU_ASSERT_FATAL(value != NULL); 96 CU_ASSERT(arg == &g_ctx); 97 98 for (i = 0; i < sizeof(g_xattr_names); i++) { 99 if (!strcmp(name, g_xattr_names[i])) { 100 *value_len = strlen(g_xattr_values[i]); 101 *value = g_xattr_values[i]; 102 break; 103 } 104 } 105 } 106 107 static void 108 _get_xattr_value_null(void *arg, const char *name, 109 const void **value, size_t *value_len) 110 { 111 SPDK_CU_ASSERT_FATAL(value_len != NULL); 112 SPDK_CU_ASSERT_FATAL(value != NULL); 113 CU_ASSERT(arg == NULL); 114 115 *value_len = 0; 116 *value = NULL; 117 } 118 119 static int 120 _get_snapshots_count(struct spdk_blob_store *bs) 121 { 122 struct spdk_blob_list *snapshot = NULL; 123 int count = 0; 124 125 TAILQ_FOREACH(snapshot, &bs->snapshots, link) { 126 count += 1; 127 } 128 129 return count; 130 } 131 132 static void 133 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts) 134 { 135 spdk_blob_opts_init(opts); 136 opts->use_extent_table = g_use_extent_table; 137 } 138 139 static void 140 bs_op_complete(void *cb_arg, int bserrno) 141 { 142 g_bserrno = bserrno; 143 } 144 145 static void 146 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs, 147 int bserrno) 148 { 149 g_bs = bs; 150 g_bserrno = bserrno; 151 } 152 153 static void 154 blob_op_complete(void *cb_arg, int bserrno) 155 { 156 g_bserrno = bserrno; 157 } 158 159 static void 160 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno) 161 { 162 g_blobid = blobid; 163 g_bserrno = bserrno; 164 } 165 166 static void 167 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno) 168 { 169 g_blob = blb; 170 g_bserrno = bserrno; 171 } 172 173 static void 174 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno) 175 { 176 if (g_blob == NULL) { 177 g_blob = blob; 178 g_bserrno = bserrno; 179 } else { 180 g_blob2 = blob; 181 g_bserrno2 = bserrno; 182 } 183 } 184 185 static void 186 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts) 187 { 188 struct spdk_bs_dev *dev; 189 190 /* Unload the blob store */ 191 spdk_bs_unload(*bs, bs_op_complete, NULL); 192 poll_threads(); 193 CU_ASSERT(g_bserrno == 0); 194 195 dev = init_dev(); 196 /* Load an existing blob store */ 197 spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL); 198 poll_threads(); 199 CU_ASSERT(g_bserrno == 0); 200 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 201 *bs = g_bs; 202 203 g_bserrno = -1; 204 } 205 206 static void 207 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts) 208 { 209 struct spdk_bs_dev *dev; 210 211 /* Dirty shutdown */ 212 bs_free(*bs); 213 214 dev = init_dev(); 215 /* Load an existing blob store */ 216 spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL); 217 poll_threads(); 218 CU_ASSERT(g_bserrno == 0); 219 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 220 *bs = g_bs; 221 222 g_bserrno = -1; 223 } 224 225 static void 226 blob_init(void) 227 { 228 struct spdk_blob_store *bs; 229 struct spdk_bs_dev *dev; 230 231 dev = init_dev(); 232 233 /* should fail for an unsupported blocklen */ 234 dev->blocklen = 500; 235 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 236 poll_threads(); 237 CU_ASSERT(g_bserrno == -EINVAL); 238 239 dev = init_dev(); 240 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 241 poll_threads(); 242 CU_ASSERT(g_bserrno == 0); 243 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 244 bs = g_bs; 245 246 spdk_bs_unload(bs, bs_op_complete, NULL); 247 poll_threads(); 248 CU_ASSERT(g_bserrno == 0); 249 g_bs = NULL; 250 } 251 252 static void 253 blob_super(void) 254 { 255 struct spdk_blob_store *bs = g_bs; 256 spdk_blob_id blobid; 257 struct spdk_blob_opts blob_opts; 258 259 /* Get the super blob without having set one */ 260 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 261 poll_threads(); 262 CU_ASSERT(g_bserrno == -ENOENT); 263 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 264 265 /* Create a blob */ 266 ut_spdk_blob_opts_init(&blob_opts); 267 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 268 poll_threads(); 269 CU_ASSERT(g_bserrno == 0); 270 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 271 blobid = g_blobid; 272 273 /* Set the blob as the super blob */ 274 spdk_bs_set_super(bs, blobid, blob_op_complete, NULL); 275 poll_threads(); 276 CU_ASSERT(g_bserrno == 0); 277 278 /* Get the super blob */ 279 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 280 poll_threads(); 281 CU_ASSERT(g_bserrno == 0); 282 CU_ASSERT(blobid == g_blobid); 283 } 284 285 static void 286 blob_open(void) 287 { 288 struct spdk_blob_store *bs = g_bs; 289 struct spdk_blob *blob; 290 struct spdk_blob_opts blob_opts; 291 spdk_blob_id blobid, blobid2; 292 293 ut_spdk_blob_opts_init(&blob_opts); 294 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 295 poll_threads(); 296 CU_ASSERT(g_bserrno == 0); 297 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 298 blobid = g_blobid; 299 300 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 301 poll_threads(); 302 CU_ASSERT(g_bserrno == 0); 303 CU_ASSERT(g_blob != NULL); 304 blob = g_blob; 305 306 blobid2 = spdk_blob_get_id(blob); 307 CU_ASSERT(blobid == blobid2); 308 309 /* Try to open file again. It should return success. */ 310 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 311 poll_threads(); 312 CU_ASSERT(g_bserrno == 0); 313 CU_ASSERT(blob == g_blob); 314 315 spdk_blob_close(blob, blob_op_complete, NULL); 316 poll_threads(); 317 CU_ASSERT(g_bserrno == 0); 318 319 /* 320 * Close the file a second time, releasing the second reference. This 321 * should succeed. 322 */ 323 blob = g_blob; 324 spdk_blob_close(blob, blob_op_complete, NULL); 325 poll_threads(); 326 CU_ASSERT(g_bserrno == 0); 327 328 /* 329 * Try to open file again. It should succeed. This tests the case 330 * where the file is opened, closed, then re-opened again. 331 */ 332 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 333 poll_threads(); 334 CU_ASSERT(g_bserrno == 0); 335 CU_ASSERT(g_blob != NULL); 336 blob = g_blob; 337 spdk_blob_close(blob, blob_op_complete, NULL); 338 poll_threads(); 339 CU_ASSERT(g_bserrno == 0); 340 341 /* Try to open file twice in succession. This should return the same 342 * blob object. 343 */ 344 g_blob = NULL; 345 g_blob2 = NULL; 346 g_bserrno = -1; 347 g_bserrno2 = -1; 348 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL); 349 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL); 350 poll_threads(); 351 CU_ASSERT(g_bserrno == 0); 352 CU_ASSERT(g_bserrno2 == 0); 353 CU_ASSERT(g_blob != NULL); 354 CU_ASSERT(g_blob2 != NULL); 355 CU_ASSERT(g_blob == g_blob2); 356 357 g_bserrno = -1; 358 spdk_blob_close(g_blob, blob_op_complete, NULL); 359 poll_threads(); 360 CU_ASSERT(g_bserrno == 0); 361 362 ut_blob_close_and_delete(bs, g_blob); 363 } 364 365 static void 366 blob_create(void) 367 { 368 struct spdk_blob_store *bs = g_bs; 369 struct spdk_blob *blob; 370 struct spdk_blob_opts opts; 371 spdk_blob_id blobid; 372 373 /* Create blob with 10 clusters */ 374 375 ut_spdk_blob_opts_init(&opts); 376 opts.num_clusters = 10; 377 378 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 379 poll_threads(); 380 CU_ASSERT(g_bserrno == 0); 381 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 382 blobid = g_blobid; 383 384 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 385 poll_threads(); 386 CU_ASSERT(g_bserrno == 0); 387 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 388 blob = g_blob; 389 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 390 391 spdk_blob_close(blob, blob_op_complete, NULL); 392 poll_threads(); 393 CU_ASSERT(g_bserrno == 0); 394 395 /* Create blob with 0 clusters */ 396 397 ut_spdk_blob_opts_init(&opts); 398 opts.num_clusters = 0; 399 400 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 401 poll_threads(); 402 CU_ASSERT(g_bserrno == 0); 403 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 404 blobid = g_blobid; 405 406 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 407 poll_threads(); 408 CU_ASSERT(g_bserrno == 0); 409 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 410 blob = g_blob; 411 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 412 413 spdk_blob_close(blob, blob_op_complete, NULL); 414 poll_threads(); 415 CU_ASSERT(g_bserrno == 0); 416 417 /* Create blob with default options (opts == NULL) */ 418 419 spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL); 420 poll_threads(); 421 CU_ASSERT(g_bserrno == 0); 422 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 423 blobid = g_blobid; 424 425 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 426 poll_threads(); 427 CU_ASSERT(g_bserrno == 0); 428 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 429 blob = g_blob; 430 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 431 432 spdk_blob_close(blob, blob_op_complete, NULL); 433 poll_threads(); 434 CU_ASSERT(g_bserrno == 0); 435 436 /* Try to create blob with size larger than blobstore */ 437 438 ut_spdk_blob_opts_init(&opts); 439 opts.num_clusters = bs->total_clusters + 1; 440 441 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 442 poll_threads(); 443 CU_ASSERT(g_bserrno == -ENOSPC); 444 } 445 446 static void 447 blob_create_fail(void) 448 { 449 struct spdk_blob_store *bs = g_bs; 450 struct spdk_blob_opts opts; 451 spdk_blob_id blobid; 452 uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids); 453 uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages); 454 455 /* NULL callback */ 456 ut_spdk_blob_opts_init(&opts); 457 opts.xattrs.names = g_xattr_names; 458 opts.xattrs.get_value = NULL; 459 opts.xattrs.count = 1; 460 opts.xattrs.ctx = &g_ctx; 461 462 blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 463 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 464 poll_threads(); 465 CU_ASSERT(g_bserrno == -EINVAL); 466 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 467 CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count); 468 CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count); 469 470 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 471 poll_threads(); 472 CU_ASSERT(g_bserrno == -ENOENT); 473 SPDK_CU_ASSERT_FATAL(g_blob == NULL); 474 475 ut_bs_reload(&bs, NULL); 476 CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count); 477 CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count); 478 479 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 480 poll_threads(); 481 CU_ASSERT(g_blob == NULL); 482 CU_ASSERT(g_bserrno == -ENOENT); 483 } 484 485 static void 486 blob_create_internal(void) 487 { 488 struct spdk_blob_store *bs = g_bs; 489 struct spdk_blob *blob; 490 struct spdk_blob_opts opts; 491 struct spdk_blob_xattr_opts internal_xattrs; 492 const void *value; 493 size_t value_len; 494 spdk_blob_id blobid; 495 int rc; 496 497 /* Create blob with custom xattrs */ 498 499 ut_spdk_blob_opts_init(&opts); 500 blob_xattrs_init(&internal_xattrs); 501 internal_xattrs.count = 3; 502 internal_xattrs.names = g_xattr_names; 503 internal_xattrs.get_value = _get_xattr_value; 504 internal_xattrs.ctx = &g_ctx; 505 506 bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL); 507 poll_threads(); 508 CU_ASSERT(g_bserrno == 0); 509 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 510 blobid = g_blobid; 511 512 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 513 poll_threads(); 514 CU_ASSERT(g_bserrno == 0); 515 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 516 blob = g_blob; 517 518 rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true); 519 CU_ASSERT(rc == 0); 520 SPDK_CU_ASSERT_FATAL(value != NULL); 521 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 522 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 523 524 rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true); 525 CU_ASSERT(rc == 0); 526 SPDK_CU_ASSERT_FATAL(value != NULL); 527 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 528 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 529 530 rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true); 531 CU_ASSERT(rc == 0); 532 SPDK_CU_ASSERT_FATAL(value != NULL); 533 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 534 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 535 536 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len); 537 CU_ASSERT(rc != 0); 538 539 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len); 540 CU_ASSERT(rc != 0); 541 542 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len); 543 CU_ASSERT(rc != 0); 544 545 spdk_blob_close(blob, blob_op_complete, NULL); 546 poll_threads(); 547 CU_ASSERT(g_bserrno == 0); 548 549 /* Create blob with NULL internal options */ 550 551 bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL); 552 poll_threads(); 553 CU_ASSERT(g_bserrno == 0); 554 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 555 blobid = g_blobid; 556 557 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 558 poll_threads(); 559 CU_ASSERT(g_bserrno == 0); 560 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 561 CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL); 562 563 blob = g_blob; 564 565 spdk_blob_close(blob, blob_op_complete, NULL); 566 poll_threads(); 567 CU_ASSERT(g_bserrno == 0); 568 } 569 570 static void 571 blob_thin_provision(void) 572 { 573 struct spdk_blob_store *bs; 574 struct spdk_bs_dev *dev; 575 struct spdk_blob *blob; 576 struct spdk_blob_opts opts; 577 struct spdk_bs_opts bs_opts; 578 spdk_blob_id blobid; 579 580 dev = init_dev(); 581 spdk_bs_opts_init(&bs_opts); 582 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 583 584 /* Initialize a new blob store */ 585 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 586 poll_threads(); 587 CU_ASSERT(g_bserrno == 0); 588 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 589 590 bs = g_bs; 591 592 /* Create blob with thin provisioning enabled */ 593 594 ut_spdk_blob_opts_init(&opts); 595 opts.thin_provision = true; 596 opts.num_clusters = 10; 597 598 blob = ut_blob_create_and_open(bs, &opts); 599 blobid = spdk_blob_get_id(blob); 600 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 601 602 spdk_blob_close(blob, blob_op_complete, NULL); 603 CU_ASSERT(g_bserrno == 0); 604 605 /* Do not shut down cleanly. This makes sure that when we load again 606 * and try to recover a valid used_cluster map, that blobstore will 607 * ignore clusters with index 0 since these are unallocated clusters. 608 */ 609 ut_bs_dirty_load(&bs, &bs_opts); 610 611 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 612 poll_threads(); 613 CU_ASSERT(g_bserrno == 0); 614 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 615 blob = g_blob; 616 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 617 618 ut_blob_close_and_delete(bs, blob); 619 620 spdk_bs_unload(bs, bs_op_complete, NULL); 621 poll_threads(); 622 CU_ASSERT(g_bserrno == 0); 623 g_bs = NULL; 624 } 625 626 static void 627 blob_snapshot(void) 628 { 629 struct spdk_blob_store *bs = g_bs; 630 struct spdk_blob *blob; 631 struct spdk_blob *snapshot, *snapshot2; 632 struct spdk_blob_bs_dev *blob_bs_dev; 633 struct spdk_blob_opts opts; 634 struct spdk_blob_xattr_opts xattrs; 635 spdk_blob_id blobid; 636 spdk_blob_id snapshotid; 637 spdk_blob_id snapshotid2; 638 const void *value; 639 size_t value_len; 640 int rc; 641 spdk_blob_id ids[2]; 642 size_t count; 643 644 /* Create blob with 10 clusters */ 645 ut_spdk_blob_opts_init(&opts); 646 opts.num_clusters = 10; 647 648 blob = ut_blob_create_and_open(bs, &opts); 649 blobid = spdk_blob_get_id(blob); 650 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 651 652 /* Create snapshot from blob */ 653 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); 654 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 655 poll_threads(); 656 CU_ASSERT(g_bserrno == 0); 657 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 658 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 659 snapshotid = g_blobid; 660 661 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 662 poll_threads(); 663 CU_ASSERT(g_bserrno == 0); 664 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 665 snapshot = g_blob; 666 CU_ASSERT(snapshot->data_ro == true); 667 CU_ASSERT(snapshot->md_ro == true); 668 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 669 670 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 671 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 672 CU_ASSERT(spdk_mem_all_zero(blob->active.clusters, 673 blob->active.num_clusters * sizeof(blob->active.clusters[0]))); 674 675 /* Try to create snapshot from clone with xattrs */ 676 xattrs.names = g_xattr_names; 677 xattrs.get_value = _get_xattr_value; 678 xattrs.count = 3; 679 xattrs.ctx = &g_ctx; 680 spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL); 681 poll_threads(); 682 CU_ASSERT(g_bserrno == 0); 683 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 684 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); 685 snapshotid2 = g_blobid; 686 687 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 688 CU_ASSERT(g_bserrno == 0); 689 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 690 snapshot2 = g_blob; 691 CU_ASSERT(snapshot2->data_ro == true); 692 CU_ASSERT(snapshot2->md_ro == true); 693 CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10); 694 695 /* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */ 696 CU_ASSERT(snapshot->back_bs_dev == NULL); 697 SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL); 698 SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL); 699 700 blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 701 CU_ASSERT(blob_bs_dev->blob == snapshot2); 702 703 blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev; 704 CU_ASSERT(blob_bs_dev->blob == snapshot); 705 706 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len); 707 CU_ASSERT(rc == 0); 708 SPDK_CU_ASSERT_FATAL(value != NULL); 709 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 710 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 711 712 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len); 713 CU_ASSERT(rc == 0); 714 SPDK_CU_ASSERT_FATAL(value != NULL); 715 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 716 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 717 718 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len); 719 CU_ASSERT(rc == 0); 720 SPDK_CU_ASSERT_FATAL(value != NULL); 721 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 722 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 723 724 /* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */ 725 count = 2; 726 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 727 CU_ASSERT(count == 1); 728 CU_ASSERT(ids[0] == blobid); 729 730 count = 2; 731 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 732 CU_ASSERT(count == 1); 733 CU_ASSERT(ids[0] == snapshotid2); 734 735 /* Try to create snapshot from snapshot */ 736 spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 737 poll_threads(); 738 CU_ASSERT(g_bserrno == -EINVAL); 739 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 740 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); 741 742 /* Delete blob and confirm that it is no longer on snapshot2 clone list */ 743 ut_blob_close_and_delete(bs, blob); 744 count = 2; 745 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 746 CU_ASSERT(count == 0); 747 748 /* Delete snapshot2 and confirm that it is no longer on snapshot clone list */ 749 ut_blob_close_and_delete(bs, snapshot2); 750 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 751 count = 2; 752 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 753 CU_ASSERT(count == 0); 754 755 ut_blob_close_and_delete(bs, snapshot); 756 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); 757 } 758 759 static void 760 blob_snapshot_freeze_io(void) 761 { 762 struct spdk_io_channel *channel; 763 struct spdk_bs_channel *bs_channel; 764 struct spdk_blob_store *bs = g_bs; 765 struct spdk_blob *blob; 766 struct spdk_blob_opts opts; 767 spdk_blob_id blobid; 768 uint32_t num_of_pages = 10; 769 uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE]; 770 uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE]; 771 uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE]; 772 773 memset(payload_write, 0xE5, sizeof(payload_write)); 774 memset(payload_read, 0x00, sizeof(payload_read)); 775 memset(payload_zero, 0x00, sizeof(payload_zero)); 776 777 /* Test freeze I/O during snapshot */ 778 channel = spdk_bs_alloc_io_channel(bs); 779 bs_channel = spdk_io_channel_get_ctx(channel); 780 781 /* Create blob with 10 clusters */ 782 ut_spdk_blob_opts_init(&opts); 783 opts.num_clusters = 10; 784 opts.thin_provision = false; 785 786 blob = ut_blob_create_and_open(bs, &opts); 787 blobid = spdk_blob_get_id(blob); 788 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 789 790 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 791 792 /* This is implementation specific. 793 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback. 794 * Four async I/O operations happen before that. */ 795 poll_thread_times(0, 3); 796 797 CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io)); 798 799 /* Blob I/O should be frozen here */ 800 CU_ASSERT(blob->frozen_refcnt == 1); 801 802 /* Write to the blob */ 803 spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL); 804 805 /* Verify that I/O is queued */ 806 CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io)); 807 /* Verify that payload is not written to disk */ 808 CU_ASSERT(memcmp(payload_zero, &g_dev_buffer[blob->active.clusters[0]*SPDK_BS_PAGE_SIZE], 809 SPDK_BS_PAGE_SIZE) == 0); 810 811 /* Finish all operations including spdk_bs_create_snapshot */ 812 poll_threads(); 813 814 /* Verify snapshot */ 815 CU_ASSERT(g_bserrno == 0); 816 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 817 818 /* Verify that blob has unset frozen_io */ 819 CU_ASSERT(blob->frozen_refcnt == 0); 820 821 /* Verify that postponed I/O completed successfully by comparing payload */ 822 spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL); 823 poll_threads(); 824 CU_ASSERT(g_bserrno == 0); 825 CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0); 826 827 spdk_bs_free_io_channel(channel); 828 poll_threads(); 829 830 ut_blob_close_and_delete(bs, blob); 831 } 832 833 static void 834 blob_clone(void) 835 { 836 struct spdk_blob_store *bs = g_bs; 837 struct spdk_blob_opts opts; 838 struct spdk_blob *blob, *snapshot, *clone; 839 spdk_blob_id blobid, cloneid, snapshotid; 840 struct spdk_blob_xattr_opts xattrs; 841 const void *value; 842 size_t value_len; 843 int rc; 844 845 /* Create blob with 10 clusters */ 846 847 ut_spdk_blob_opts_init(&opts); 848 opts.num_clusters = 10; 849 850 blob = ut_blob_create_and_open(bs, &opts); 851 blobid = spdk_blob_get_id(blob); 852 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 853 854 /* Create snapshot */ 855 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 856 poll_threads(); 857 CU_ASSERT(g_bserrno == 0); 858 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 859 snapshotid = g_blobid; 860 861 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 862 poll_threads(); 863 CU_ASSERT(g_bserrno == 0); 864 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 865 snapshot = g_blob; 866 CU_ASSERT(snapshot->data_ro == true); 867 CU_ASSERT(snapshot->md_ro == true); 868 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 869 870 spdk_blob_close(snapshot, blob_op_complete, NULL); 871 poll_threads(); 872 CU_ASSERT(g_bserrno == 0); 873 874 /* Create clone from snapshot with xattrs */ 875 xattrs.names = g_xattr_names; 876 xattrs.get_value = _get_xattr_value; 877 xattrs.count = 3; 878 xattrs.ctx = &g_ctx; 879 880 spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL); 881 poll_threads(); 882 CU_ASSERT(g_bserrno == 0); 883 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 884 cloneid = g_blobid; 885 886 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 887 poll_threads(); 888 CU_ASSERT(g_bserrno == 0); 889 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 890 clone = g_blob; 891 CU_ASSERT(clone->data_ro == false); 892 CU_ASSERT(clone->md_ro == false); 893 CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10); 894 895 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len); 896 CU_ASSERT(rc == 0); 897 SPDK_CU_ASSERT_FATAL(value != NULL); 898 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 899 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 900 901 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len); 902 CU_ASSERT(rc == 0); 903 SPDK_CU_ASSERT_FATAL(value != NULL); 904 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 905 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 906 907 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len); 908 CU_ASSERT(rc == 0); 909 SPDK_CU_ASSERT_FATAL(value != NULL); 910 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 911 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 912 913 914 spdk_blob_close(clone, blob_op_complete, NULL); 915 poll_threads(); 916 CU_ASSERT(g_bserrno == 0); 917 918 /* Try to create clone from not read only blob */ 919 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 920 poll_threads(); 921 CU_ASSERT(g_bserrno == -EINVAL); 922 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 923 924 /* Mark blob as read only */ 925 spdk_blob_set_read_only(blob); 926 spdk_blob_sync_md(blob, blob_op_complete, NULL); 927 poll_threads(); 928 CU_ASSERT(g_bserrno == 0); 929 930 /* Create clone from read only blob */ 931 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 932 poll_threads(); 933 CU_ASSERT(g_bserrno == 0); 934 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 935 cloneid = g_blobid; 936 937 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 938 poll_threads(); 939 CU_ASSERT(g_bserrno == 0); 940 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 941 clone = g_blob; 942 CU_ASSERT(clone->data_ro == false); 943 CU_ASSERT(clone->md_ro == false); 944 CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10); 945 946 ut_blob_close_and_delete(bs, clone); 947 ut_blob_close_and_delete(bs, blob); 948 } 949 950 static void 951 _blob_inflate(bool decouple_parent) 952 { 953 struct spdk_blob_store *bs = g_bs; 954 struct spdk_blob_opts opts; 955 struct spdk_blob *blob, *snapshot; 956 spdk_blob_id blobid, snapshotid; 957 struct spdk_io_channel *channel; 958 uint64_t free_clusters; 959 960 channel = spdk_bs_alloc_io_channel(bs); 961 SPDK_CU_ASSERT_FATAL(channel != NULL); 962 963 /* Create blob with 10 clusters */ 964 965 ut_spdk_blob_opts_init(&opts); 966 opts.num_clusters = 10; 967 opts.thin_provision = true; 968 969 blob = ut_blob_create_and_open(bs, &opts); 970 blobid = spdk_blob_get_id(blob); 971 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 972 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true); 973 974 /* 1) Blob with no parent */ 975 if (decouple_parent) { 976 /* Decouple parent of blob with no parent (should fail) */ 977 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 978 poll_threads(); 979 CU_ASSERT(g_bserrno != 0); 980 } else { 981 /* Inflate of thin blob with no parent should made it thick */ 982 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 983 poll_threads(); 984 CU_ASSERT(g_bserrno == 0); 985 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false); 986 } 987 988 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 989 poll_threads(); 990 CU_ASSERT(g_bserrno == 0); 991 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 992 snapshotid = g_blobid; 993 994 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true); 995 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 996 997 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 998 poll_threads(); 999 CU_ASSERT(g_bserrno == 0); 1000 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1001 snapshot = g_blob; 1002 CU_ASSERT(snapshot->data_ro == true); 1003 CU_ASSERT(snapshot->md_ro == true); 1004 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 1005 1006 spdk_blob_close(snapshot, blob_op_complete, NULL); 1007 poll_threads(); 1008 CU_ASSERT(g_bserrno == 0); 1009 1010 free_clusters = spdk_bs_free_cluster_count(bs); 1011 1012 /* 2) Blob with parent */ 1013 if (!decouple_parent) { 1014 /* Do full blob inflation */ 1015 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 1016 poll_threads(); 1017 CU_ASSERT(g_bserrno == 0); 1018 /* all 10 clusters should be allocated */ 1019 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10); 1020 } else { 1021 /* Decouple parent of blob */ 1022 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 1023 poll_threads(); 1024 CU_ASSERT(g_bserrno == 0); 1025 /* when only parent is removed, none of the clusters should be allocated */ 1026 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters); 1027 } 1028 1029 /* Now, it should be possible to delete snapshot */ 1030 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 1031 poll_threads(); 1032 CU_ASSERT(g_bserrno == 0); 1033 1034 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 1035 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent); 1036 1037 spdk_bs_free_io_channel(channel); 1038 poll_threads(); 1039 1040 ut_blob_close_and_delete(bs, blob); 1041 } 1042 1043 static void 1044 blob_inflate(void) 1045 { 1046 _blob_inflate(false); 1047 _blob_inflate(true); 1048 } 1049 1050 static void 1051 blob_delete(void) 1052 { 1053 struct spdk_blob_store *bs = g_bs; 1054 struct spdk_blob_opts blob_opts; 1055 spdk_blob_id blobid; 1056 1057 /* Create a blob and then delete it. */ 1058 ut_spdk_blob_opts_init(&blob_opts); 1059 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 1060 poll_threads(); 1061 CU_ASSERT(g_bserrno == 0); 1062 CU_ASSERT(g_blobid > 0); 1063 blobid = g_blobid; 1064 1065 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 1066 poll_threads(); 1067 CU_ASSERT(g_bserrno == 0); 1068 1069 /* Try to open the blob */ 1070 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1071 poll_threads(); 1072 CU_ASSERT(g_bserrno == -ENOENT); 1073 } 1074 1075 static void 1076 blob_resize_test(void) 1077 { 1078 struct spdk_blob_store *bs = g_bs; 1079 struct spdk_blob *blob; 1080 uint64_t free_clusters; 1081 1082 free_clusters = spdk_bs_free_cluster_count(bs); 1083 1084 blob = ut_blob_create_and_open(bs, NULL); 1085 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 1086 1087 /* Confirm that resize fails if blob is marked read-only. */ 1088 blob->md_ro = true; 1089 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1090 poll_threads(); 1091 CU_ASSERT(g_bserrno == -EPERM); 1092 blob->md_ro = false; 1093 1094 /* The blob started at 0 clusters. Resize it to be 5. */ 1095 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1096 poll_threads(); 1097 CU_ASSERT(g_bserrno == 0); 1098 CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs)); 1099 1100 /* Shrink the blob to 3 clusters. This will not actually release 1101 * the old clusters until the blob is synced. 1102 */ 1103 spdk_blob_resize(blob, 3, blob_op_complete, NULL); 1104 poll_threads(); 1105 CU_ASSERT(g_bserrno == 0); 1106 /* Verify there are still 5 clusters in use */ 1107 CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs)); 1108 1109 spdk_blob_sync_md(blob, blob_op_complete, NULL); 1110 poll_threads(); 1111 CU_ASSERT(g_bserrno == 0); 1112 /* Now there are only 3 clusters in use */ 1113 CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs)); 1114 1115 /* Resize the blob to be 10 clusters. Growth takes effect immediately. */ 1116 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 1117 poll_threads(); 1118 CU_ASSERT(g_bserrno == 0); 1119 CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs)); 1120 1121 /* Try to resize the blob to size larger than blobstore. */ 1122 spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL); 1123 poll_threads(); 1124 CU_ASSERT(g_bserrno == -ENOSPC); 1125 1126 ut_blob_close_and_delete(bs, blob); 1127 } 1128 1129 static void 1130 blob_read_only(void) 1131 { 1132 struct spdk_blob_store *bs; 1133 struct spdk_bs_dev *dev; 1134 struct spdk_blob *blob; 1135 struct spdk_bs_opts opts; 1136 spdk_blob_id blobid; 1137 int rc; 1138 1139 dev = init_dev(); 1140 spdk_bs_opts_init(&opts); 1141 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 1142 1143 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 1144 poll_threads(); 1145 CU_ASSERT(g_bserrno == 0); 1146 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 1147 bs = g_bs; 1148 1149 blob = ut_blob_create_and_open(bs, NULL); 1150 blobid = spdk_blob_get_id(blob); 1151 1152 rc = spdk_blob_set_read_only(blob); 1153 CU_ASSERT(rc == 0); 1154 1155 CU_ASSERT(blob->data_ro == false); 1156 CU_ASSERT(blob->md_ro == false); 1157 1158 spdk_blob_sync_md(blob, bs_op_complete, NULL); 1159 poll_threads(); 1160 1161 CU_ASSERT(blob->data_ro == true); 1162 CU_ASSERT(blob->md_ro == true); 1163 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1164 1165 spdk_blob_close(blob, blob_op_complete, NULL); 1166 poll_threads(); 1167 CU_ASSERT(g_bserrno == 0); 1168 1169 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1170 poll_threads(); 1171 CU_ASSERT(g_bserrno == 0); 1172 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1173 blob = g_blob; 1174 1175 CU_ASSERT(blob->data_ro == true); 1176 CU_ASSERT(blob->md_ro == true); 1177 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1178 1179 spdk_blob_close(blob, blob_op_complete, NULL); 1180 poll_threads(); 1181 CU_ASSERT(g_bserrno == 0); 1182 1183 ut_bs_reload(&bs, &opts); 1184 1185 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1186 poll_threads(); 1187 CU_ASSERT(g_bserrno == 0); 1188 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1189 blob = g_blob; 1190 1191 CU_ASSERT(blob->data_ro == true); 1192 CU_ASSERT(blob->md_ro == true); 1193 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1194 1195 ut_blob_close_and_delete(bs, blob); 1196 1197 spdk_bs_unload(bs, bs_op_complete, NULL); 1198 poll_threads(); 1199 CU_ASSERT(g_bserrno == 0); 1200 } 1201 1202 static void 1203 channel_ops(void) 1204 { 1205 struct spdk_blob_store *bs = g_bs; 1206 struct spdk_io_channel *channel; 1207 1208 channel = spdk_bs_alloc_io_channel(bs); 1209 CU_ASSERT(channel != NULL); 1210 1211 spdk_bs_free_io_channel(channel); 1212 poll_threads(); 1213 } 1214 1215 static void 1216 blob_write(void) 1217 { 1218 struct spdk_blob_store *bs = g_bs; 1219 struct spdk_blob *blob = g_blob; 1220 struct spdk_io_channel *channel; 1221 uint64_t pages_per_cluster; 1222 uint8_t payload[10 * 4096]; 1223 1224 pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs); 1225 1226 channel = spdk_bs_alloc_io_channel(bs); 1227 CU_ASSERT(channel != NULL); 1228 1229 /* Write to a blob with 0 size */ 1230 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1231 poll_threads(); 1232 CU_ASSERT(g_bserrno == -EINVAL); 1233 1234 /* Resize the blob */ 1235 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1236 poll_threads(); 1237 CU_ASSERT(g_bserrno == 0); 1238 1239 /* Confirm that write fails if blob is marked read-only. */ 1240 blob->data_ro = true; 1241 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1242 poll_threads(); 1243 CU_ASSERT(g_bserrno == -EPERM); 1244 blob->data_ro = false; 1245 1246 /* Write to the blob */ 1247 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1248 poll_threads(); 1249 CU_ASSERT(g_bserrno == 0); 1250 1251 /* Write starting beyond the end */ 1252 spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete, 1253 NULL); 1254 poll_threads(); 1255 CU_ASSERT(g_bserrno == -EINVAL); 1256 1257 /* Write starting at a valid location but going off the end */ 1258 spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1, 1259 blob_op_complete, NULL); 1260 poll_threads(); 1261 CU_ASSERT(g_bserrno == -EINVAL); 1262 1263 spdk_bs_free_io_channel(channel); 1264 poll_threads(); 1265 } 1266 1267 static void 1268 blob_read(void) 1269 { 1270 struct spdk_blob_store *bs = g_bs; 1271 struct spdk_blob *blob = g_blob; 1272 struct spdk_io_channel *channel; 1273 uint64_t pages_per_cluster; 1274 uint8_t payload[10 * 4096]; 1275 1276 pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs); 1277 1278 channel = spdk_bs_alloc_io_channel(bs); 1279 CU_ASSERT(channel != NULL); 1280 1281 /* Read from a blob with 0 size */ 1282 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1283 poll_threads(); 1284 CU_ASSERT(g_bserrno == -EINVAL); 1285 1286 /* Resize the blob */ 1287 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1288 poll_threads(); 1289 CU_ASSERT(g_bserrno == 0); 1290 1291 /* Confirm that read passes if blob is marked read-only. */ 1292 blob->data_ro = true; 1293 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1294 poll_threads(); 1295 CU_ASSERT(g_bserrno == 0); 1296 blob->data_ro = false; 1297 1298 /* Read from the blob */ 1299 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1300 poll_threads(); 1301 CU_ASSERT(g_bserrno == 0); 1302 1303 /* Read starting beyond the end */ 1304 spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete, 1305 NULL); 1306 poll_threads(); 1307 CU_ASSERT(g_bserrno == -EINVAL); 1308 1309 /* Read starting at a valid location but going off the end */ 1310 spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1, 1311 blob_op_complete, NULL); 1312 poll_threads(); 1313 CU_ASSERT(g_bserrno == -EINVAL); 1314 1315 spdk_bs_free_io_channel(channel); 1316 poll_threads(); 1317 } 1318 1319 static void 1320 blob_rw_verify(void) 1321 { 1322 struct spdk_blob_store *bs = g_bs; 1323 struct spdk_blob *blob = g_blob; 1324 struct spdk_io_channel *channel; 1325 uint8_t payload_read[10 * 4096]; 1326 uint8_t payload_write[10 * 4096]; 1327 1328 channel = spdk_bs_alloc_io_channel(bs); 1329 CU_ASSERT(channel != NULL); 1330 1331 spdk_blob_resize(blob, 32, blob_op_complete, NULL); 1332 poll_threads(); 1333 CU_ASSERT(g_bserrno == 0); 1334 1335 memset(payload_write, 0xE5, sizeof(payload_write)); 1336 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 1337 poll_threads(); 1338 CU_ASSERT(g_bserrno == 0); 1339 1340 memset(payload_read, 0x00, sizeof(payload_read)); 1341 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 1342 poll_threads(); 1343 CU_ASSERT(g_bserrno == 0); 1344 CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0); 1345 1346 spdk_bs_free_io_channel(channel); 1347 poll_threads(); 1348 } 1349 1350 static void 1351 blob_rw_verify_iov(void) 1352 { 1353 struct spdk_blob_store *bs = g_bs; 1354 struct spdk_blob *blob; 1355 struct spdk_io_channel *channel; 1356 uint8_t payload_read[10 * 4096]; 1357 uint8_t payload_write[10 * 4096]; 1358 struct iovec iov_read[3]; 1359 struct iovec iov_write[3]; 1360 void *buf; 1361 1362 channel = spdk_bs_alloc_io_channel(bs); 1363 CU_ASSERT(channel != NULL); 1364 1365 blob = ut_blob_create_and_open(bs, NULL); 1366 1367 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1368 poll_threads(); 1369 CU_ASSERT(g_bserrno == 0); 1370 1371 /* 1372 * Manually adjust the offset of the blob's second cluster. This allows 1373 * us to make sure that the readv/write code correctly accounts for I/O 1374 * that cross cluster boundaries. Start by asserting that the allocated 1375 * clusters are where we expect before modifying the second cluster. 1376 */ 1377 CU_ASSERT(blob->active.clusters[0] == 1 * 256); 1378 CU_ASSERT(blob->active.clusters[1] == 2 * 256); 1379 blob->active.clusters[1] = 3 * 256; 1380 1381 memset(payload_write, 0xE5, sizeof(payload_write)); 1382 iov_write[0].iov_base = payload_write; 1383 iov_write[0].iov_len = 1 * 4096; 1384 iov_write[1].iov_base = payload_write + 1 * 4096; 1385 iov_write[1].iov_len = 5 * 4096; 1386 iov_write[2].iov_base = payload_write + 6 * 4096; 1387 iov_write[2].iov_len = 4 * 4096; 1388 /* 1389 * Choose a page offset just before the cluster boundary. The first 6 pages of payload 1390 * will get written to the first cluster, the last 4 to the second cluster. 1391 */ 1392 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 1393 poll_threads(); 1394 CU_ASSERT(g_bserrno == 0); 1395 1396 memset(payload_read, 0xAA, sizeof(payload_read)); 1397 iov_read[0].iov_base = payload_read; 1398 iov_read[0].iov_len = 3 * 4096; 1399 iov_read[1].iov_base = payload_read + 3 * 4096; 1400 iov_read[1].iov_len = 4 * 4096; 1401 iov_read[2].iov_base = payload_read + 7 * 4096; 1402 iov_read[2].iov_len = 3 * 4096; 1403 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 1404 poll_threads(); 1405 CU_ASSERT(g_bserrno == 0); 1406 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 1407 1408 buf = calloc(1, 256 * 4096); 1409 SPDK_CU_ASSERT_FATAL(buf != NULL); 1410 /* Check that cluster 2 on "disk" was not modified. */ 1411 CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0); 1412 free(buf); 1413 1414 spdk_blob_close(blob, blob_op_complete, NULL); 1415 poll_threads(); 1416 CU_ASSERT(g_bserrno == 0); 1417 1418 spdk_bs_free_io_channel(channel); 1419 poll_threads(); 1420 } 1421 1422 static uint32_t 1423 bs_channel_get_req_count(struct spdk_io_channel *_channel) 1424 { 1425 struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel); 1426 struct spdk_bs_request_set *set; 1427 uint32_t count = 0; 1428 1429 TAILQ_FOREACH(set, &channel->reqs, link) { 1430 count++; 1431 } 1432 1433 return count; 1434 } 1435 1436 static void 1437 blob_rw_verify_iov_nomem(void) 1438 { 1439 struct spdk_blob_store *bs = g_bs; 1440 struct spdk_blob *blob = g_blob; 1441 struct spdk_io_channel *channel; 1442 uint8_t payload_write[10 * 4096]; 1443 struct iovec iov_write[3]; 1444 uint32_t req_count; 1445 1446 channel = spdk_bs_alloc_io_channel(bs); 1447 CU_ASSERT(channel != NULL); 1448 1449 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1450 poll_threads(); 1451 CU_ASSERT(g_bserrno == 0); 1452 1453 /* 1454 * Choose a page offset just before the cluster boundary. The first 6 pages of payload 1455 * will get written to the first cluster, the last 4 to the second cluster. 1456 */ 1457 iov_write[0].iov_base = payload_write; 1458 iov_write[0].iov_len = 1 * 4096; 1459 iov_write[1].iov_base = payload_write + 1 * 4096; 1460 iov_write[1].iov_len = 5 * 4096; 1461 iov_write[2].iov_base = payload_write + 6 * 4096; 1462 iov_write[2].iov_len = 4 * 4096; 1463 MOCK_SET(calloc, NULL); 1464 req_count = bs_channel_get_req_count(channel); 1465 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 1466 poll_threads(); 1467 CU_ASSERT(g_bserrno = -ENOMEM); 1468 CU_ASSERT(req_count == bs_channel_get_req_count(channel)); 1469 MOCK_CLEAR(calloc); 1470 1471 spdk_bs_free_io_channel(channel); 1472 poll_threads(); 1473 } 1474 1475 static void 1476 blob_rw_iov_read_only(void) 1477 { 1478 struct spdk_blob_store *bs = g_bs; 1479 struct spdk_blob *blob = g_blob; 1480 struct spdk_io_channel *channel; 1481 uint8_t payload_read[4096]; 1482 uint8_t payload_write[4096]; 1483 struct iovec iov_read; 1484 struct iovec iov_write; 1485 1486 channel = spdk_bs_alloc_io_channel(bs); 1487 CU_ASSERT(channel != NULL); 1488 1489 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1490 poll_threads(); 1491 CU_ASSERT(g_bserrno == 0); 1492 1493 /* Verify that writev failed if read_only flag is set. */ 1494 blob->data_ro = true; 1495 iov_write.iov_base = payload_write; 1496 iov_write.iov_len = sizeof(payload_write); 1497 spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL); 1498 poll_threads(); 1499 CU_ASSERT(g_bserrno == -EPERM); 1500 1501 /* Verify that reads pass if data_ro flag is set. */ 1502 iov_read.iov_base = payload_read; 1503 iov_read.iov_len = sizeof(payload_read); 1504 spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL); 1505 poll_threads(); 1506 CU_ASSERT(g_bserrno == 0); 1507 1508 spdk_bs_free_io_channel(channel); 1509 poll_threads(); 1510 } 1511 1512 static void 1513 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel, 1514 uint8_t *payload, uint64_t offset, uint64_t length, 1515 spdk_blob_op_complete cb_fn, void *cb_arg) 1516 { 1517 uint64_t i; 1518 uint8_t *buf; 1519 uint64_t page_size = spdk_bs_get_page_size(blob->bs); 1520 1521 /* To be sure that operation is NOT splitted, read one page at the time */ 1522 buf = payload; 1523 for (i = 0; i < length; i++) { 1524 spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL); 1525 poll_threads(); 1526 if (g_bserrno != 0) { 1527 /* Pass the error code up */ 1528 break; 1529 } 1530 buf += page_size; 1531 } 1532 1533 cb_fn(cb_arg, g_bserrno); 1534 } 1535 1536 static void 1537 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel, 1538 uint8_t *payload, uint64_t offset, uint64_t length, 1539 spdk_blob_op_complete cb_fn, void *cb_arg) 1540 { 1541 uint64_t i; 1542 uint8_t *buf; 1543 uint64_t page_size = spdk_bs_get_page_size(blob->bs); 1544 1545 /* To be sure that operation is NOT splitted, write one page at the time */ 1546 buf = payload; 1547 for (i = 0; i < length; i++) { 1548 spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL); 1549 poll_threads(); 1550 if (g_bserrno != 0) { 1551 /* Pass the error code up */ 1552 break; 1553 } 1554 buf += page_size; 1555 } 1556 1557 cb_fn(cb_arg, g_bserrno); 1558 } 1559 1560 static void 1561 blob_operation_split_rw(void) 1562 { 1563 struct spdk_blob_store *bs = g_bs; 1564 struct spdk_blob *blob; 1565 struct spdk_io_channel *channel; 1566 struct spdk_blob_opts opts; 1567 uint64_t cluster_size; 1568 1569 uint64_t payload_size; 1570 uint8_t *payload_read; 1571 uint8_t *payload_write; 1572 uint8_t *payload_pattern; 1573 1574 uint64_t page_size; 1575 uint64_t pages_per_cluster; 1576 uint64_t pages_per_payload; 1577 1578 uint64_t i; 1579 1580 cluster_size = spdk_bs_get_cluster_size(bs); 1581 page_size = spdk_bs_get_page_size(bs); 1582 pages_per_cluster = cluster_size / page_size; 1583 pages_per_payload = pages_per_cluster * 5; 1584 payload_size = cluster_size * 5; 1585 1586 payload_read = malloc(payload_size); 1587 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 1588 1589 payload_write = malloc(payload_size); 1590 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 1591 1592 payload_pattern = malloc(payload_size); 1593 SPDK_CU_ASSERT_FATAL(payload_pattern != NULL); 1594 1595 /* Prepare random pattern to write */ 1596 memset(payload_pattern, 0xFF, payload_size); 1597 for (i = 0; i < pages_per_payload; i++) { 1598 *((uint64_t *)(payload_pattern + page_size * i)) = (i + 1); 1599 } 1600 1601 channel = spdk_bs_alloc_io_channel(bs); 1602 SPDK_CU_ASSERT_FATAL(channel != NULL); 1603 1604 /* Create blob */ 1605 ut_spdk_blob_opts_init(&opts); 1606 opts.thin_provision = false; 1607 opts.num_clusters = 5; 1608 1609 blob = ut_blob_create_and_open(bs, &opts); 1610 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 1611 1612 /* Initial read should return zeroed payload */ 1613 memset(payload_read, 0xFF, payload_size); 1614 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1615 poll_threads(); 1616 CU_ASSERT(g_bserrno == 0); 1617 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 1618 1619 /* Fill whole blob except last page */ 1620 spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1, 1621 blob_op_complete, NULL); 1622 poll_threads(); 1623 CU_ASSERT(g_bserrno == 0); 1624 1625 /* Write last page with a pattern */ 1626 spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1, 1627 blob_op_complete, NULL); 1628 poll_threads(); 1629 CU_ASSERT(g_bserrno == 0); 1630 1631 /* Read whole blob and check consistency */ 1632 memset(payload_read, 0xFF, payload_size); 1633 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1634 poll_threads(); 1635 CU_ASSERT(g_bserrno == 0); 1636 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0); 1637 CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0); 1638 1639 /* Fill whole blob except first page */ 1640 spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1, 1641 blob_op_complete, NULL); 1642 poll_threads(); 1643 CU_ASSERT(g_bserrno == 0); 1644 1645 /* Write first page with a pattern */ 1646 spdk_blob_io_write(blob, channel, payload_pattern, 0, 1, 1647 blob_op_complete, NULL); 1648 poll_threads(); 1649 CU_ASSERT(g_bserrno == 0); 1650 1651 /* Read whole blob and check consistency */ 1652 memset(payload_read, 0xFF, payload_size); 1653 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1654 poll_threads(); 1655 CU_ASSERT(g_bserrno == 0); 1656 CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0); 1657 CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0); 1658 1659 1660 /* Fill whole blob with a pattern (5 clusters) */ 1661 1662 /* 1. Read test. */ 1663 _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload, 1664 blob_op_complete, NULL); 1665 poll_threads(); 1666 CU_ASSERT(g_bserrno == 0); 1667 1668 memset(payload_read, 0xFF, payload_size); 1669 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1670 poll_threads(); 1671 poll_threads(); 1672 CU_ASSERT(g_bserrno == 0); 1673 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1674 1675 /* 2. Write test. */ 1676 spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload, 1677 blob_op_complete, NULL); 1678 poll_threads(); 1679 CU_ASSERT(g_bserrno == 0); 1680 1681 memset(payload_read, 0xFF, payload_size); 1682 _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1683 poll_threads(); 1684 CU_ASSERT(g_bserrno == 0); 1685 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1686 1687 spdk_bs_free_io_channel(channel); 1688 poll_threads(); 1689 1690 g_blob = NULL; 1691 g_blobid = 0; 1692 1693 free(payload_read); 1694 free(payload_write); 1695 free(payload_pattern); 1696 1697 ut_blob_close_and_delete(bs, blob); 1698 } 1699 1700 static void 1701 blob_operation_split_rw_iov(void) 1702 { 1703 struct spdk_blob_store *bs = g_bs; 1704 struct spdk_blob *blob; 1705 struct spdk_io_channel *channel; 1706 struct spdk_blob_opts opts; 1707 uint64_t cluster_size; 1708 1709 uint64_t payload_size; 1710 uint8_t *payload_read; 1711 uint8_t *payload_write; 1712 uint8_t *payload_pattern; 1713 1714 uint64_t page_size; 1715 uint64_t pages_per_cluster; 1716 uint64_t pages_per_payload; 1717 1718 struct iovec iov_read[2]; 1719 struct iovec iov_write[2]; 1720 1721 uint64_t i, j; 1722 1723 cluster_size = spdk_bs_get_cluster_size(bs); 1724 page_size = spdk_bs_get_page_size(bs); 1725 pages_per_cluster = cluster_size / page_size; 1726 pages_per_payload = pages_per_cluster * 5; 1727 payload_size = cluster_size * 5; 1728 1729 payload_read = malloc(payload_size); 1730 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 1731 1732 payload_write = malloc(payload_size); 1733 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 1734 1735 payload_pattern = malloc(payload_size); 1736 SPDK_CU_ASSERT_FATAL(payload_pattern != NULL); 1737 1738 /* Prepare random pattern to write */ 1739 for (i = 0; i < pages_per_payload; i++) { 1740 for (j = 0; j < page_size / sizeof(uint64_t); j++) { 1741 uint64_t *tmp; 1742 1743 tmp = (uint64_t *)payload_pattern; 1744 tmp += ((page_size * i) / sizeof(uint64_t)) + j; 1745 *tmp = i + 1; 1746 } 1747 } 1748 1749 channel = spdk_bs_alloc_io_channel(bs); 1750 SPDK_CU_ASSERT_FATAL(channel != NULL); 1751 1752 /* Create blob */ 1753 ut_spdk_blob_opts_init(&opts); 1754 opts.thin_provision = false; 1755 opts.num_clusters = 5; 1756 1757 blob = ut_blob_create_and_open(bs, &opts); 1758 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 1759 1760 /* Initial read should return zeroes payload */ 1761 memset(payload_read, 0xFF, payload_size); 1762 iov_read[0].iov_base = payload_read; 1763 iov_read[0].iov_len = cluster_size * 3; 1764 iov_read[1].iov_base = payload_read + cluster_size * 3; 1765 iov_read[1].iov_len = cluster_size * 2; 1766 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1767 poll_threads(); 1768 CU_ASSERT(g_bserrno == 0); 1769 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 1770 1771 /* First of iovs fills whole blob except last page and second of iovs writes last page 1772 * with a pattern. */ 1773 iov_write[0].iov_base = payload_pattern; 1774 iov_write[0].iov_len = payload_size - page_size; 1775 iov_write[1].iov_base = payload_pattern; 1776 iov_write[1].iov_len = page_size; 1777 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1778 poll_threads(); 1779 CU_ASSERT(g_bserrno == 0); 1780 1781 /* Read whole blob and check consistency */ 1782 memset(payload_read, 0xFF, payload_size); 1783 iov_read[0].iov_base = payload_read; 1784 iov_read[0].iov_len = cluster_size * 2; 1785 iov_read[1].iov_base = payload_read + cluster_size * 2; 1786 iov_read[1].iov_len = cluster_size * 3; 1787 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1788 poll_threads(); 1789 CU_ASSERT(g_bserrno == 0); 1790 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0); 1791 CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0); 1792 1793 /* First of iovs fills only first page and second of iovs writes whole blob except 1794 * first page with a pattern. */ 1795 iov_write[0].iov_base = payload_pattern; 1796 iov_write[0].iov_len = page_size; 1797 iov_write[1].iov_base = payload_pattern; 1798 iov_write[1].iov_len = payload_size - page_size; 1799 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1800 poll_threads(); 1801 CU_ASSERT(g_bserrno == 0); 1802 1803 /* Read whole blob and check consistency */ 1804 memset(payload_read, 0xFF, payload_size); 1805 iov_read[0].iov_base = payload_read; 1806 iov_read[0].iov_len = cluster_size * 4; 1807 iov_read[1].iov_base = payload_read + cluster_size * 4; 1808 iov_read[1].iov_len = cluster_size; 1809 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1810 poll_threads(); 1811 CU_ASSERT(g_bserrno == 0); 1812 CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0); 1813 CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0); 1814 1815 1816 /* Fill whole blob with a pattern (5 clusters) */ 1817 1818 /* 1. Read test. */ 1819 _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload, 1820 blob_op_complete, NULL); 1821 poll_threads(); 1822 CU_ASSERT(g_bserrno == 0); 1823 1824 memset(payload_read, 0xFF, payload_size); 1825 iov_read[0].iov_base = payload_read; 1826 iov_read[0].iov_len = cluster_size; 1827 iov_read[1].iov_base = payload_read + cluster_size; 1828 iov_read[1].iov_len = cluster_size * 4; 1829 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1830 poll_threads(); 1831 CU_ASSERT(g_bserrno == 0); 1832 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1833 1834 /* 2. Write test. */ 1835 iov_write[0].iov_base = payload_read; 1836 iov_write[0].iov_len = cluster_size * 2; 1837 iov_write[1].iov_base = payload_read + cluster_size * 2; 1838 iov_write[1].iov_len = cluster_size * 3; 1839 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1840 poll_threads(); 1841 CU_ASSERT(g_bserrno == 0); 1842 1843 memset(payload_read, 0xFF, payload_size); 1844 _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1845 poll_threads(); 1846 CU_ASSERT(g_bserrno == 0); 1847 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1848 1849 spdk_bs_free_io_channel(channel); 1850 poll_threads(); 1851 1852 g_blob = NULL; 1853 g_blobid = 0; 1854 1855 free(payload_read); 1856 free(payload_write); 1857 free(payload_pattern); 1858 1859 ut_blob_close_and_delete(bs, blob); 1860 } 1861 1862 static void 1863 blob_unmap(void) 1864 { 1865 struct spdk_blob_store *bs = g_bs; 1866 struct spdk_blob *blob; 1867 struct spdk_io_channel *channel; 1868 struct spdk_blob_opts opts; 1869 uint8_t payload[4096]; 1870 int i; 1871 1872 channel = spdk_bs_alloc_io_channel(bs); 1873 CU_ASSERT(channel != NULL); 1874 1875 ut_spdk_blob_opts_init(&opts); 1876 opts.num_clusters = 10; 1877 1878 blob = ut_blob_create_and_open(bs, &opts); 1879 1880 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 1881 poll_threads(); 1882 CU_ASSERT(g_bserrno == 0); 1883 1884 memset(payload, 0, sizeof(payload)); 1885 payload[0] = 0xFF; 1886 1887 /* 1888 * Set first byte of every cluster to 0xFF. 1889 * First cluster on device is reserved so let's start from cluster number 1 1890 */ 1891 for (i = 1; i < 11; i++) { 1892 g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF; 1893 } 1894 1895 /* Confirm writes */ 1896 for (i = 0; i < 10; i++) { 1897 payload[0] = 0; 1898 spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1, 1899 blob_op_complete, NULL); 1900 poll_threads(); 1901 CU_ASSERT(g_bserrno == 0); 1902 CU_ASSERT(payload[0] == 0xFF); 1903 } 1904 1905 /* Mark some clusters as unallocated */ 1906 blob->active.clusters[1] = 0; 1907 blob->active.clusters[2] = 0; 1908 blob->active.clusters[3] = 0; 1909 blob->active.clusters[6] = 0; 1910 blob->active.clusters[8] = 0; 1911 1912 /* Unmap clusters by resizing to 0 */ 1913 spdk_blob_resize(blob, 0, blob_op_complete, NULL); 1914 poll_threads(); 1915 CU_ASSERT(g_bserrno == 0); 1916 1917 spdk_blob_sync_md(blob, blob_op_complete, NULL); 1918 poll_threads(); 1919 CU_ASSERT(g_bserrno == 0); 1920 1921 /* Confirm that only 'allocated' clusters were unmapped */ 1922 for (i = 1; i < 11; i++) { 1923 switch (i) { 1924 case 2: 1925 case 3: 1926 case 4: 1927 case 7: 1928 case 9: 1929 CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF); 1930 break; 1931 default: 1932 CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0); 1933 break; 1934 } 1935 } 1936 1937 spdk_bs_free_io_channel(channel); 1938 poll_threads(); 1939 1940 ut_blob_close_and_delete(bs, blob); 1941 } 1942 1943 static void 1944 blob_iter(void) 1945 { 1946 struct spdk_blob_store *bs = g_bs; 1947 struct spdk_blob *blob; 1948 spdk_blob_id blobid; 1949 struct spdk_blob_opts blob_opts; 1950 1951 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 1952 poll_threads(); 1953 CU_ASSERT(g_blob == NULL); 1954 CU_ASSERT(g_bserrno == -ENOENT); 1955 1956 ut_spdk_blob_opts_init(&blob_opts); 1957 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 1958 poll_threads(); 1959 CU_ASSERT(g_bserrno == 0); 1960 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 1961 blobid = g_blobid; 1962 1963 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 1964 poll_threads(); 1965 CU_ASSERT(g_blob != NULL); 1966 CU_ASSERT(g_bserrno == 0); 1967 blob = g_blob; 1968 CU_ASSERT(spdk_blob_get_id(blob) == blobid); 1969 1970 spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL); 1971 poll_threads(); 1972 CU_ASSERT(g_blob == NULL); 1973 CU_ASSERT(g_bserrno == -ENOENT); 1974 } 1975 1976 static void 1977 blob_xattr(void) 1978 { 1979 struct spdk_blob_store *bs = g_bs; 1980 struct spdk_blob *blob = g_blob; 1981 spdk_blob_id blobid = spdk_blob_get_id(blob); 1982 uint64_t length; 1983 int rc; 1984 const char *name1, *name2; 1985 const void *value; 1986 size_t value_len; 1987 struct spdk_xattr_names *names; 1988 1989 /* Test that set_xattr fails if md_ro flag is set. */ 1990 blob->md_ro = true; 1991 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 1992 CU_ASSERT(rc == -EPERM); 1993 1994 blob->md_ro = false; 1995 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 1996 CU_ASSERT(rc == 0); 1997 1998 length = 2345; 1999 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2000 CU_ASSERT(rc == 0); 2001 2002 /* Overwrite "length" xattr. */ 2003 length = 3456; 2004 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2005 CU_ASSERT(rc == 0); 2006 2007 /* get_xattr should still work even if md_ro flag is set. */ 2008 value = NULL; 2009 blob->md_ro = true; 2010 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 2011 CU_ASSERT(rc == 0); 2012 SPDK_CU_ASSERT_FATAL(value != NULL); 2013 CU_ASSERT(*(uint64_t *)value == length); 2014 CU_ASSERT(value_len == 8); 2015 blob->md_ro = false; 2016 2017 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 2018 CU_ASSERT(rc == -ENOENT); 2019 2020 names = NULL; 2021 rc = spdk_blob_get_xattr_names(blob, &names); 2022 CU_ASSERT(rc == 0); 2023 SPDK_CU_ASSERT_FATAL(names != NULL); 2024 CU_ASSERT(spdk_xattr_names_get_count(names) == 2); 2025 name1 = spdk_xattr_names_get_name(names, 0); 2026 SPDK_CU_ASSERT_FATAL(name1 != NULL); 2027 CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length")); 2028 name2 = spdk_xattr_names_get_name(names, 1); 2029 SPDK_CU_ASSERT_FATAL(name2 != NULL); 2030 CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length")); 2031 CU_ASSERT(strcmp(name1, name2)); 2032 spdk_xattr_names_free(names); 2033 2034 /* Confirm that remove_xattr fails if md_ro is set to true. */ 2035 blob->md_ro = true; 2036 rc = spdk_blob_remove_xattr(blob, "name"); 2037 CU_ASSERT(rc == -EPERM); 2038 2039 blob->md_ro = false; 2040 rc = spdk_blob_remove_xattr(blob, "name"); 2041 CU_ASSERT(rc == 0); 2042 2043 rc = spdk_blob_remove_xattr(blob, "foobar"); 2044 CU_ASSERT(rc == -ENOENT); 2045 2046 /* Set internal xattr */ 2047 length = 7898; 2048 rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true); 2049 CU_ASSERT(rc == 0); 2050 rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true); 2051 CU_ASSERT(rc == 0); 2052 CU_ASSERT(*(uint64_t *)value == length); 2053 /* try to get public xattr with same name */ 2054 rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len); 2055 CU_ASSERT(rc != 0); 2056 rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false); 2057 CU_ASSERT(rc != 0); 2058 /* Check if SPDK_BLOB_INTERNAL_XATTR is set */ 2059 CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 2060 SPDK_BLOB_INTERNAL_XATTR); 2061 2062 spdk_blob_close(blob, blob_op_complete, NULL); 2063 poll_threads(); 2064 2065 /* Check if xattrs are persisted */ 2066 ut_bs_reload(&bs, NULL); 2067 2068 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2069 poll_threads(); 2070 CU_ASSERT(g_bserrno == 0); 2071 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2072 blob = g_blob; 2073 2074 rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true); 2075 CU_ASSERT(rc == 0); 2076 CU_ASSERT(*(uint64_t *)value == length); 2077 2078 /* try to get internal xattr trough public call */ 2079 rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len); 2080 CU_ASSERT(rc != 0); 2081 2082 rc = blob_remove_xattr(blob, "internal", true); 2083 CU_ASSERT(rc == 0); 2084 2085 CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0); 2086 } 2087 2088 static void 2089 bs_load(void) 2090 { 2091 struct spdk_blob_store *bs; 2092 struct spdk_bs_dev *dev; 2093 spdk_blob_id blobid; 2094 struct spdk_blob *blob; 2095 struct spdk_bs_super_block *super_block; 2096 uint64_t length; 2097 int rc; 2098 const void *value; 2099 size_t value_len; 2100 struct spdk_bs_opts opts; 2101 struct spdk_blob_opts blob_opts; 2102 2103 dev = init_dev(); 2104 spdk_bs_opts_init(&opts); 2105 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2106 2107 /* Initialize a new blob store */ 2108 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2109 poll_threads(); 2110 CU_ASSERT(g_bserrno == 0); 2111 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2112 bs = g_bs; 2113 2114 /* Try to open a blobid that does not exist */ 2115 spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL); 2116 poll_threads(); 2117 CU_ASSERT(g_bserrno == -ENOENT); 2118 CU_ASSERT(g_blob == NULL); 2119 2120 /* Create a blob */ 2121 blob = ut_blob_create_and_open(bs, NULL); 2122 blobid = spdk_blob_get_id(blob); 2123 2124 /* Try again to open valid blob but without the upper bit set */ 2125 spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL); 2126 poll_threads(); 2127 CU_ASSERT(g_bserrno == -ENOENT); 2128 CU_ASSERT(g_blob == NULL); 2129 2130 /* Set some xattrs */ 2131 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 2132 CU_ASSERT(rc == 0); 2133 2134 length = 2345; 2135 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2136 CU_ASSERT(rc == 0); 2137 2138 /* Resize the blob */ 2139 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 2140 poll_threads(); 2141 CU_ASSERT(g_bserrno == 0); 2142 2143 spdk_blob_close(blob, blob_op_complete, NULL); 2144 poll_threads(); 2145 CU_ASSERT(g_bserrno == 0); 2146 blob = NULL; 2147 g_blob = NULL; 2148 g_blobid = SPDK_BLOBID_INVALID; 2149 2150 /* Unload the blob store */ 2151 spdk_bs_unload(bs, bs_op_complete, NULL); 2152 poll_threads(); 2153 CU_ASSERT(g_bserrno == 0); 2154 g_bs = NULL; 2155 g_blob = NULL; 2156 g_blobid = 0; 2157 2158 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2159 CU_ASSERT(super_block->clean == 1); 2160 2161 /* Load should fail for device with an unsupported blocklen */ 2162 dev = init_dev(); 2163 dev->blocklen = SPDK_BS_PAGE_SIZE * 2; 2164 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 2165 poll_threads(); 2166 CU_ASSERT(g_bserrno == -EINVAL); 2167 2168 /* Load should when max_md_ops is set to zero */ 2169 dev = init_dev(); 2170 spdk_bs_opts_init(&opts); 2171 opts.max_md_ops = 0; 2172 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2173 poll_threads(); 2174 CU_ASSERT(g_bserrno == -EINVAL); 2175 2176 /* Load should when max_channel_ops is set to zero */ 2177 dev = init_dev(); 2178 spdk_bs_opts_init(&opts); 2179 opts.max_channel_ops = 0; 2180 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2181 poll_threads(); 2182 CU_ASSERT(g_bserrno == -EINVAL); 2183 2184 /* Load an existing blob store */ 2185 dev = init_dev(); 2186 spdk_bs_opts_init(&opts); 2187 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2188 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2189 poll_threads(); 2190 CU_ASSERT(g_bserrno == 0); 2191 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2192 bs = g_bs; 2193 2194 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2195 CU_ASSERT(super_block->clean == 1); 2196 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2197 2198 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2199 poll_threads(); 2200 CU_ASSERT(g_bserrno == 0); 2201 CU_ASSERT(g_blob != NULL); 2202 blob = g_blob; 2203 2204 /* Verify that blobstore is marked dirty after first metadata sync */ 2205 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2206 CU_ASSERT(super_block->clean == 1); 2207 2208 /* Get the xattrs */ 2209 value = NULL; 2210 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 2211 CU_ASSERT(rc == 0); 2212 SPDK_CU_ASSERT_FATAL(value != NULL); 2213 CU_ASSERT(*(uint64_t *)value == length); 2214 CU_ASSERT(value_len == 8); 2215 2216 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 2217 CU_ASSERT(rc == -ENOENT); 2218 2219 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 2220 2221 spdk_blob_close(blob, blob_op_complete, NULL); 2222 poll_threads(); 2223 CU_ASSERT(g_bserrno == 0); 2224 blob = NULL; 2225 g_blob = NULL; 2226 2227 spdk_bs_unload(bs, bs_op_complete, NULL); 2228 poll_threads(); 2229 CU_ASSERT(g_bserrno == 0); 2230 g_bs = NULL; 2231 2232 /* Load should fail: bdev size < saved size */ 2233 dev = init_dev(); 2234 dev->blockcnt /= 2; 2235 2236 spdk_bs_opts_init(&opts); 2237 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2238 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2239 poll_threads(); 2240 2241 CU_ASSERT(g_bserrno == -EILSEQ); 2242 2243 /* Load should succeed: bdev size > saved size */ 2244 dev = init_dev(); 2245 dev->blockcnt *= 4; 2246 2247 spdk_bs_opts_init(&opts); 2248 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2249 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2250 poll_threads(); 2251 CU_ASSERT(g_bserrno == 0); 2252 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2253 bs = g_bs; 2254 2255 CU_ASSERT(g_bserrno == 0); 2256 spdk_bs_unload(bs, bs_op_complete, NULL); 2257 poll_threads(); 2258 2259 2260 /* Test compatibility mode */ 2261 2262 dev = init_dev(); 2263 super_block->size = 0; 2264 super_block->crc = blob_md_page_calc_crc(super_block); 2265 2266 spdk_bs_opts_init(&opts); 2267 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2268 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2269 poll_threads(); 2270 CU_ASSERT(g_bserrno == 0); 2271 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2272 bs = g_bs; 2273 2274 /* Create a blob */ 2275 ut_spdk_blob_opts_init(&blob_opts); 2276 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 2277 poll_threads(); 2278 CU_ASSERT(g_bserrno == 0); 2279 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2280 2281 /* Blobstore should update number of blocks in super_block */ 2282 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2283 CU_ASSERT(super_block->clean == 0); 2284 2285 spdk_bs_unload(bs, bs_op_complete, NULL); 2286 poll_threads(); 2287 CU_ASSERT(g_bserrno == 0); 2288 CU_ASSERT(super_block->clean == 1); 2289 g_bs = NULL; 2290 2291 } 2292 2293 static void 2294 bs_load_pending_removal(void) 2295 { 2296 struct spdk_blob_store *bs = g_bs; 2297 struct spdk_blob_opts opts; 2298 struct spdk_blob *blob, *snapshot; 2299 spdk_blob_id blobid, snapshotid; 2300 const void *value; 2301 size_t value_len; 2302 int rc; 2303 2304 /* Create blob */ 2305 ut_spdk_blob_opts_init(&opts); 2306 opts.num_clusters = 10; 2307 2308 blob = ut_blob_create_and_open(bs, &opts); 2309 blobid = spdk_blob_get_id(blob); 2310 2311 /* Create snapshot */ 2312 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 2313 poll_threads(); 2314 CU_ASSERT(g_bserrno == 0); 2315 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2316 snapshotid = g_blobid; 2317 2318 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2319 poll_threads(); 2320 CU_ASSERT(g_bserrno == 0); 2321 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2322 snapshot = g_blob; 2323 2324 /* Set SNAPSHOT_PENDING_REMOVAL xattr */ 2325 snapshot->md_ro = false; 2326 rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true); 2327 CU_ASSERT(rc == 0); 2328 snapshot->md_ro = true; 2329 2330 spdk_blob_close(snapshot, blob_op_complete, NULL); 2331 poll_threads(); 2332 CU_ASSERT(g_bserrno == 0); 2333 2334 spdk_blob_close(blob, blob_op_complete, NULL); 2335 poll_threads(); 2336 CU_ASSERT(g_bserrno == 0); 2337 2338 /* Reload blobstore */ 2339 ut_bs_reload(&bs, NULL); 2340 2341 /* Snapshot should not be removed as blob is still pointing to it */ 2342 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2343 poll_threads(); 2344 CU_ASSERT(g_bserrno == 0); 2345 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2346 snapshot = g_blob; 2347 2348 /* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */ 2349 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len); 2350 CU_ASSERT(rc != 0); 2351 2352 /* Set SNAPSHOT_PENDING_REMOVAL xattr again */ 2353 snapshot->md_ro = false; 2354 rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true); 2355 CU_ASSERT(rc == 0); 2356 snapshot->md_ro = true; 2357 2358 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2359 poll_threads(); 2360 CU_ASSERT(g_bserrno == 0); 2361 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2362 blob = g_blob; 2363 2364 /* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */ 2365 blob_remove_xattr(blob, BLOB_SNAPSHOT, true); 2366 2367 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2368 poll_threads(); 2369 CU_ASSERT(g_bserrno == 0); 2370 2371 spdk_blob_close(snapshot, blob_op_complete, NULL); 2372 poll_threads(); 2373 CU_ASSERT(g_bserrno == 0); 2374 2375 spdk_blob_close(blob, blob_op_complete, NULL); 2376 poll_threads(); 2377 CU_ASSERT(g_bserrno == 0); 2378 2379 /* Reload blobstore */ 2380 ut_bs_reload(&bs, NULL); 2381 2382 /* Snapshot should be removed as blob is not pointing to it anymore */ 2383 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2384 poll_threads(); 2385 CU_ASSERT(g_bserrno != 0); 2386 } 2387 2388 static void 2389 bs_load_custom_cluster_size(void) 2390 { 2391 struct spdk_blob_store *bs; 2392 struct spdk_bs_dev *dev; 2393 struct spdk_bs_super_block *super_block; 2394 struct spdk_bs_opts opts; 2395 uint32_t custom_cluster_size = 4194304; /* 4MiB */ 2396 uint32_t cluster_sz; 2397 uint64_t total_clusters; 2398 2399 dev = init_dev(); 2400 spdk_bs_opts_init(&opts); 2401 opts.cluster_sz = custom_cluster_size; 2402 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2403 2404 /* Initialize a new blob store */ 2405 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2406 poll_threads(); 2407 CU_ASSERT(g_bserrno == 0); 2408 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2409 bs = g_bs; 2410 cluster_sz = bs->cluster_sz; 2411 total_clusters = bs->total_clusters; 2412 2413 /* Unload the blob store */ 2414 spdk_bs_unload(bs, bs_op_complete, NULL); 2415 poll_threads(); 2416 CU_ASSERT(g_bserrno == 0); 2417 g_bs = NULL; 2418 g_blob = NULL; 2419 g_blobid = 0; 2420 2421 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2422 CU_ASSERT(super_block->clean == 1); 2423 2424 /* Load an existing blob store */ 2425 dev = init_dev(); 2426 spdk_bs_opts_init(&opts); 2427 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2428 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2429 poll_threads(); 2430 CU_ASSERT(g_bserrno == 0); 2431 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2432 bs = g_bs; 2433 /* Compare cluster size and number to one after initialization */ 2434 CU_ASSERT(cluster_sz == bs->cluster_sz); 2435 CU_ASSERT(total_clusters == bs->total_clusters); 2436 2437 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2438 CU_ASSERT(super_block->clean == 1); 2439 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2440 2441 spdk_bs_unload(bs, bs_op_complete, NULL); 2442 poll_threads(); 2443 CU_ASSERT(g_bserrno == 0); 2444 CU_ASSERT(super_block->clean == 1); 2445 g_bs = NULL; 2446 } 2447 2448 static void 2449 bs_type(void) 2450 { 2451 struct spdk_blob_store *bs; 2452 struct spdk_bs_dev *dev; 2453 struct spdk_bs_opts opts; 2454 2455 dev = init_dev(); 2456 spdk_bs_opts_init(&opts); 2457 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2458 2459 /* Initialize a new blob store */ 2460 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2461 poll_threads(); 2462 CU_ASSERT(g_bserrno == 0); 2463 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2464 bs = g_bs; 2465 2466 /* Unload the blob store */ 2467 spdk_bs_unload(bs, bs_op_complete, NULL); 2468 poll_threads(); 2469 CU_ASSERT(g_bserrno == 0); 2470 g_bs = NULL; 2471 g_blob = NULL; 2472 g_blobid = 0; 2473 2474 /* Load non existing blobstore type */ 2475 dev = init_dev(); 2476 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING"); 2477 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2478 poll_threads(); 2479 CU_ASSERT(g_bserrno != 0); 2480 2481 /* Load with empty blobstore type */ 2482 dev = init_dev(); 2483 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2484 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2485 poll_threads(); 2486 CU_ASSERT(g_bserrno == 0); 2487 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2488 bs = g_bs; 2489 2490 spdk_bs_unload(bs, bs_op_complete, NULL); 2491 poll_threads(); 2492 CU_ASSERT(g_bserrno == 0); 2493 g_bs = NULL; 2494 2495 /* Initialize a new blob store with empty bstype */ 2496 dev = init_dev(); 2497 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2498 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2499 poll_threads(); 2500 CU_ASSERT(g_bserrno == 0); 2501 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2502 bs = g_bs; 2503 2504 spdk_bs_unload(bs, bs_op_complete, NULL); 2505 poll_threads(); 2506 CU_ASSERT(g_bserrno == 0); 2507 g_bs = NULL; 2508 2509 /* Load non existing blobstore type */ 2510 dev = init_dev(); 2511 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING"); 2512 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2513 poll_threads(); 2514 CU_ASSERT(g_bserrno != 0); 2515 2516 /* Load with empty blobstore type */ 2517 dev = init_dev(); 2518 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2519 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2520 poll_threads(); 2521 CU_ASSERT(g_bserrno == 0); 2522 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2523 bs = g_bs; 2524 2525 spdk_bs_unload(bs, bs_op_complete, NULL); 2526 poll_threads(); 2527 CU_ASSERT(g_bserrno == 0); 2528 g_bs = NULL; 2529 } 2530 2531 static void 2532 bs_super_block(void) 2533 { 2534 struct spdk_blob_store *bs; 2535 struct spdk_bs_dev *dev; 2536 struct spdk_bs_super_block *super_block; 2537 struct spdk_bs_opts opts; 2538 struct spdk_bs_super_block_ver1 super_block_v1; 2539 2540 dev = init_dev(); 2541 spdk_bs_opts_init(&opts); 2542 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2543 2544 /* Initialize a new blob store */ 2545 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2546 poll_threads(); 2547 CU_ASSERT(g_bserrno == 0); 2548 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2549 bs = g_bs; 2550 2551 /* Unload the blob store */ 2552 spdk_bs_unload(bs, bs_op_complete, NULL); 2553 poll_threads(); 2554 CU_ASSERT(g_bserrno == 0); 2555 g_bs = NULL; 2556 g_blob = NULL; 2557 g_blobid = 0; 2558 2559 /* Load an existing blob store with version newer than supported */ 2560 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2561 super_block->version++; 2562 2563 dev = init_dev(); 2564 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2565 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2566 poll_threads(); 2567 CU_ASSERT(g_bserrno != 0); 2568 2569 /* Create a new blob store with super block version 1 */ 2570 dev = init_dev(); 2571 super_block_v1.version = 1; 2572 memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature)); 2573 super_block_v1.length = 0x1000; 2574 super_block_v1.clean = 1; 2575 super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF; 2576 super_block_v1.cluster_size = 0x100000; 2577 super_block_v1.used_page_mask_start = 0x01; 2578 super_block_v1.used_page_mask_len = 0x01; 2579 super_block_v1.used_cluster_mask_start = 0x02; 2580 super_block_v1.used_cluster_mask_len = 0x01; 2581 super_block_v1.md_start = 0x03; 2582 super_block_v1.md_len = 0x40; 2583 memset(super_block_v1.reserved, 0, 4036); 2584 super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1); 2585 memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1)); 2586 2587 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2588 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2589 poll_threads(); 2590 CU_ASSERT(g_bserrno == 0); 2591 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2592 bs = g_bs; 2593 2594 spdk_bs_unload(bs, bs_op_complete, NULL); 2595 poll_threads(); 2596 CU_ASSERT(g_bserrno == 0); 2597 g_bs = NULL; 2598 } 2599 2600 /* 2601 * Create a blobstore and then unload it. 2602 */ 2603 static void 2604 bs_unload(void) 2605 { 2606 struct spdk_blob_store *bs = g_bs; 2607 struct spdk_blob *blob; 2608 2609 /* Create a blob and open it. */ 2610 blob = ut_blob_create_and_open(bs, NULL); 2611 2612 /* Try to unload blobstore, should fail with open blob */ 2613 g_bserrno = -1; 2614 spdk_bs_unload(bs, bs_op_complete, NULL); 2615 poll_threads(); 2616 CU_ASSERT(g_bserrno == -EBUSY); 2617 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2618 2619 /* Close the blob, then successfully unload blobstore */ 2620 g_bserrno = -1; 2621 spdk_blob_close(blob, blob_op_complete, NULL); 2622 poll_threads(); 2623 CU_ASSERT(g_bserrno == 0); 2624 } 2625 2626 /* 2627 * Create a blobstore with a cluster size different than the default, and ensure it is 2628 * persisted. 2629 */ 2630 static void 2631 bs_cluster_sz(void) 2632 { 2633 struct spdk_blob_store *bs; 2634 struct spdk_bs_dev *dev; 2635 struct spdk_bs_opts opts; 2636 uint32_t cluster_sz; 2637 2638 /* Set cluster size to zero */ 2639 dev = init_dev(); 2640 spdk_bs_opts_init(&opts); 2641 opts.cluster_sz = 0; 2642 2643 /* Initialize a new blob store */ 2644 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2645 poll_threads(); 2646 CU_ASSERT(g_bserrno == -EINVAL); 2647 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2648 2649 /* 2650 * Set cluster size to blobstore page size, 2651 * to work it is required to be at least twice the blobstore page size. 2652 */ 2653 dev = init_dev(); 2654 spdk_bs_opts_init(&opts); 2655 opts.cluster_sz = SPDK_BS_PAGE_SIZE; 2656 2657 /* Initialize a new blob store */ 2658 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2659 poll_threads(); 2660 CU_ASSERT(g_bserrno == -ENOMEM); 2661 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2662 2663 /* 2664 * Set cluster size to lower than page size, 2665 * to work it is required to be at least twice the blobstore page size. 2666 */ 2667 dev = init_dev(); 2668 spdk_bs_opts_init(&opts); 2669 opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1; 2670 2671 /* Initialize a new blob store */ 2672 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2673 poll_threads(); 2674 CU_ASSERT(g_bserrno == -EINVAL); 2675 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2676 2677 /* Set cluster size to twice the default */ 2678 dev = init_dev(); 2679 spdk_bs_opts_init(&opts); 2680 opts.cluster_sz *= 2; 2681 cluster_sz = opts.cluster_sz; 2682 2683 /* Initialize a new blob store */ 2684 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2685 poll_threads(); 2686 CU_ASSERT(g_bserrno == 0); 2687 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2688 bs = g_bs; 2689 2690 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2691 2692 ut_bs_reload(&bs, &opts); 2693 2694 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2695 2696 spdk_bs_unload(bs, bs_op_complete, NULL); 2697 poll_threads(); 2698 CU_ASSERT(g_bserrno == 0); 2699 g_bs = NULL; 2700 } 2701 2702 /* 2703 * Create a blobstore, reload it and ensure total usable cluster count 2704 * stays the same. 2705 */ 2706 static void 2707 bs_usable_clusters(void) 2708 { 2709 struct spdk_blob_store *bs = g_bs; 2710 struct spdk_blob *blob; 2711 uint32_t clusters; 2712 int i; 2713 2714 2715 clusters = spdk_bs_total_data_cluster_count(bs); 2716 2717 ut_bs_reload(&bs, NULL); 2718 2719 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2720 2721 /* Create and resize blobs to make sure that useable cluster count won't change */ 2722 for (i = 0; i < 4; i++) { 2723 g_bserrno = -1; 2724 g_blobid = SPDK_BLOBID_INVALID; 2725 blob = ut_blob_create_and_open(bs, NULL); 2726 2727 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 2728 poll_threads(); 2729 CU_ASSERT(g_bserrno == 0); 2730 2731 g_bserrno = -1; 2732 spdk_blob_close(blob, blob_op_complete, NULL); 2733 poll_threads(); 2734 CU_ASSERT(g_bserrno == 0); 2735 2736 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2737 } 2738 2739 /* Reload the blob store to make sure that nothing changed */ 2740 ut_bs_reload(&bs, NULL); 2741 2742 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2743 } 2744 2745 /* 2746 * Test resizing of the metadata blob. This requires creating enough blobs 2747 * so that one cluster is not enough to fit the metadata for those blobs. 2748 * To induce this condition to happen more quickly, we reduce the cluster 2749 * size to 16KB, which means only 4 4KB blob metadata pages can fit. 2750 */ 2751 static void 2752 bs_resize_md(void) 2753 { 2754 struct spdk_blob_store *bs; 2755 const int CLUSTER_PAGE_COUNT = 4; 2756 const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4; 2757 struct spdk_bs_dev *dev; 2758 struct spdk_bs_opts opts; 2759 struct spdk_blob *blob; 2760 struct spdk_blob_opts blob_opts; 2761 uint32_t cluster_sz; 2762 spdk_blob_id blobids[NUM_BLOBS]; 2763 int i; 2764 2765 2766 dev = init_dev(); 2767 spdk_bs_opts_init(&opts); 2768 opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096; 2769 cluster_sz = opts.cluster_sz; 2770 2771 /* Initialize a new blob store */ 2772 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2773 poll_threads(); 2774 CU_ASSERT(g_bserrno == 0); 2775 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2776 bs = g_bs; 2777 2778 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2779 2780 ut_spdk_blob_opts_init(&blob_opts); 2781 2782 for (i = 0; i < NUM_BLOBS; i++) { 2783 g_bserrno = -1; 2784 g_blobid = SPDK_BLOBID_INVALID; 2785 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 2786 poll_threads(); 2787 CU_ASSERT(g_bserrno == 0); 2788 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2789 blobids[i] = g_blobid; 2790 } 2791 2792 ut_bs_reload(&bs, &opts); 2793 2794 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2795 2796 for (i = 0; i < NUM_BLOBS; i++) { 2797 g_bserrno = -1; 2798 g_blob = NULL; 2799 spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL); 2800 poll_threads(); 2801 CU_ASSERT(g_bserrno == 0); 2802 CU_ASSERT(g_blob != NULL); 2803 blob = g_blob; 2804 g_bserrno = -1; 2805 spdk_blob_close(blob, blob_op_complete, NULL); 2806 poll_threads(); 2807 CU_ASSERT(g_bserrno == 0); 2808 } 2809 2810 spdk_bs_unload(bs, bs_op_complete, NULL); 2811 poll_threads(); 2812 CU_ASSERT(g_bserrno == 0); 2813 g_bs = NULL; 2814 } 2815 2816 static void 2817 bs_destroy(void) 2818 { 2819 struct spdk_blob_store *bs; 2820 struct spdk_bs_dev *dev; 2821 2822 /* Initialize a new blob store */ 2823 dev = init_dev(); 2824 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2825 poll_threads(); 2826 CU_ASSERT(g_bserrno == 0); 2827 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2828 bs = g_bs; 2829 2830 /* Destroy the blob store */ 2831 g_bserrno = -1; 2832 spdk_bs_destroy(bs, bs_op_complete, NULL); 2833 poll_threads(); 2834 CU_ASSERT(g_bserrno == 0); 2835 2836 /* Loading an non-existent blob store should fail. */ 2837 g_bs = NULL; 2838 dev = init_dev(); 2839 2840 g_bserrno = 0; 2841 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 2842 poll_threads(); 2843 CU_ASSERT(g_bserrno != 0); 2844 } 2845 2846 /* Try to hit all of the corner cases associated with serializing 2847 * a blob to disk 2848 */ 2849 static void 2850 blob_serialize_test(void) 2851 { 2852 struct spdk_bs_dev *dev; 2853 struct spdk_bs_opts opts; 2854 struct spdk_blob_store *bs; 2855 spdk_blob_id blobid[2]; 2856 struct spdk_blob *blob[2]; 2857 uint64_t i; 2858 char *value; 2859 int rc; 2860 2861 dev = init_dev(); 2862 2863 /* Initialize a new blobstore with very small clusters */ 2864 spdk_bs_opts_init(&opts); 2865 opts.cluster_sz = dev->blocklen * 8; 2866 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2867 poll_threads(); 2868 CU_ASSERT(g_bserrno == 0); 2869 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2870 bs = g_bs; 2871 2872 /* Create and open two blobs */ 2873 for (i = 0; i < 2; i++) { 2874 blob[i] = ut_blob_create_and_open(bs, NULL); 2875 blobid[i] = spdk_blob_get_id(blob[i]); 2876 2877 /* Set a fairly large xattr on both blobs to eat up 2878 * metadata space 2879 */ 2880 value = calloc(dev->blocklen - 64, sizeof(char)); 2881 SPDK_CU_ASSERT_FATAL(value != NULL); 2882 memset(value, i, dev->blocklen / 2); 2883 rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64); 2884 CU_ASSERT(rc == 0); 2885 free(value); 2886 } 2887 2888 /* Resize the blobs, alternating 1 cluster at a time. 2889 * This thwarts run length encoding and will cause spill 2890 * over of the extents. 2891 */ 2892 for (i = 0; i < 6; i++) { 2893 spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL); 2894 poll_threads(); 2895 CU_ASSERT(g_bserrno == 0); 2896 } 2897 2898 for (i = 0; i < 2; i++) { 2899 spdk_blob_sync_md(blob[i], blob_op_complete, NULL); 2900 poll_threads(); 2901 CU_ASSERT(g_bserrno == 0); 2902 } 2903 2904 /* Close the blobs */ 2905 for (i = 0; i < 2; i++) { 2906 spdk_blob_close(blob[i], blob_op_complete, NULL); 2907 poll_threads(); 2908 CU_ASSERT(g_bserrno == 0); 2909 } 2910 2911 ut_bs_reload(&bs, &opts); 2912 2913 for (i = 0; i < 2; i++) { 2914 blob[i] = NULL; 2915 2916 spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL); 2917 poll_threads(); 2918 CU_ASSERT(g_bserrno == 0); 2919 CU_ASSERT(g_blob != NULL); 2920 blob[i] = g_blob; 2921 2922 CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3); 2923 2924 spdk_blob_close(blob[i], blob_op_complete, NULL); 2925 poll_threads(); 2926 CU_ASSERT(g_bserrno == 0); 2927 } 2928 2929 spdk_bs_unload(bs, bs_op_complete, NULL); 2930 poll_threads(); 2931 CU_ASSERT(g_bserrno == 0); 2932 g_bs = NULL; 2933 } 2934 2935 static void 2936 blob_crc(void) 2937 { 2938 struct spdk_blob_store *bs = g_bs; 2939 struct spdk_blob *blob; 2940 spdk_blob_id blobid; 2941 uint32_t page_num; 2942 int index; 2943 struct spdk_blob_md_page *page; 2944 2945 blob = ut_blob_create_and_open(bs, NULL); 2946 blobid = spdk_blob_get_id(blob); 2947 2948 spdk_blob_close(blob, blob_op_complete, NULL); 2949 poll_threads(); 2950 CU_ASSERT(g_bserrno == 0); 2951 2952 page_num = bs_blobid_to_page(blobid); 2953 index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num); 2954 page = (struct spdk_blob_md_page *)&g_dev_buffer[index]; 2955 page->crc = 0; 2956 2957 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2958 poll_threads(); 2959 CU_ASSERT(g_bserrno == -EINVAL); 2960 CU_ASSERT(g_blob == NULL); 2961 g_bserrno = 0; 2962 2963 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 2964 poll_threads(); 2965 CU_ASSERT(g_bserrno == -EINVAL); 2966 } 2967 2968 static void 2969 super_block_crc(void) 2970 { 2971 struct spdk_blob_store *bs; 2972 struct spdk_bs_dev *dev; 2973 struct spdk_bs_super_block *super_block; 2974 2975 dev = init_dev(); 2976 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2977 poll_threads(); 2978 CU_ASSERT(g_bserrno == 0); 2979 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2980 bs = g_bs; 2981 2982 spdk_bs_unload(bs, bs_op_complete, NULL); 2983 poll_threads(); 2984 CU_ASSERT(g_bserrno == 0); 2985 g_bs = NULL; 2986 2987 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2988 super_block->crc = 0; 2989 dev = init_dev(); 2990 2991 /* Load an existing blob store */ 2992 g_bserrno = 0; 2993 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 2994 poll_threads(); 2995 CU_ASSERT(g_bserrno == -EILSEQ); 2996 } 2997 2998 /* For blob dirty shutdown test case we do the following sub-test cases: 2999 * 1 Initialize new blob store and create 1 super blob with some xattrs, then we 3000 * dirty shutdown and reload the blob store and verify the xattrs. 3001 * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown, 3002 * reload the blob store and verify the clusters number. 3003 * 3 Create the second blob and then dirty shutdown, reload the blob store 3004 * and verify the second blob. 3005 * 4 Delete the second blob and then dirty shutdown, reload the blob store 3006 * and verify the second blob is invalid. 3007 * 5 Create the second blob again and also create the third blob, modify the 3008 * md of second blob which makes the md invalid, and then dirty shutdown, 3009 * reload the blob store verify the second blob, it should invalid and also 3010 * verify the third blob, it should correct. 3011 */ 3012 static void 3013 blob_dirty_shutdown(void) 3014 { 3015 int rc; 3016 int index; 3017 struct spdk_blob_store *bs = g_bs; 3018 spdk_blob_id blobid1, blobid2, blobid3; 3019 struct spdk_blob *blob = g_blob; 3020 uint64_t length; 3021 uint64_t free_clusters; 3022 const void *value; 3023 size_t value_len; 3024 uint32_t page_num; 3025 struct spdk_blob_md_page *page; 3026 struct spdk_blob_opts blob_opts; 3027 3028 /* Create first blob */ 3029 blobid1 = spdk_blob_get_id(blob); 3030 3031 /* Set some xattrs */ 3032 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 3033 CU_ASSERT(rc == 0); 3034 3035 length = 2345; 3036 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3037 CU_ASSERT(rc == 0); 3038 3039 /* Put xattr that fits exactly single page. 3040 * This results in adding additional pages to MD. 3041 * First is flags and smaller xattr, second the large xattr, 3042 * third are just the extents. 3043 */ 3044 size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) - 3045 strlen("large_xattr"); 3046 char *xattr = calloc(xattr_length, sizeof(char)); 3047 SPDK_CU_ASSERT_FATAL(xattr != NULL); 3048 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 3049 free(xattr); 3050 SPDK_CU_ASSERT_FATAL(rc == 0); 3051 3052 /* Resize the blob */ 3053 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 3054 poll_threads(); 3055 CU_ASSERT(g_bserrno == 0); 3056 3057 /* Set the blob as the super blob */ 3058 spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL); 3059 poll_threads(); 3060 CU_ASSERT(g_bserrno == 0); 3061 3062 free_clusters = spdk_bs_free_cluster_count(bs); 3063 3064 spdk_blob_close(blob, blob_op_complete, NULL); 3065 poll_threads(); 3066 CU_ASSERT(g_bserrno == 0); 3067 blob = NULL; 3068 g_blob = NULL; 3069 g_blobid = SPDK_BLOBID_INVALID; 3070 3071 ut_bs_dirty_load(&bs, NULL); 3072 3073 /* Get the super blob */ 3074 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 3075 poll_threads(); 3076 CU_ASSERT(g_bserrno == 0); 3077 CU_ASSERT(blobid1 == g_blobid); 3078 3079 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3080 poll_threads(); 3081 CU_ASSERT(g_bserrno == 0); 3082 CU_ASSERT(g_blob != NULL); 3083 blob = g_blob; 3084 3085 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3086 3087 /* Get the xattrs */ 3088 value = NULL; 3089 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 3090 CU_ASSERT(rc == 0); 3091 SPDK_CU_ASSERT_FATAL(value != NULL); 3092 CU_ASSERT(*(uint64_t *)value == length); 3093 CU_ASSERT(value_len == 8); 3094 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 3095 3096 /* Resize the blob */ 3097 spdk_blob_resize(blob, 20, blob_op_complete, NULL); 3098 poll_threads(); 3099 CU_ASSERT(g_bserrno == 0); 3100 3101 free_clusters = spdk_bs_free_cluster_count(bs); 3102 3103 spdk_blob_close(blob, blob_op_complete, NULL); 3104 poll_threads(); 3105 CU_ASSERT(g_bserrno == 0); 3106 blob = NULL; 3107 g_blob = NULL; 3108 g_blobid = SPDK_BLOBID_INVALID; 3109 3110 ut_bs_dirty_load(&bs, NULL); 3111 3112 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3113 poll_threads(); 3114 CU_ASSERT(g_bserrno == 0); 3115 CU_ASSERT(g_blob != NULL); 3116 blob = g_blob; 3117 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20); 3118 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3119 3120 spdk_blob_close(blob, blob_op_complete, NULL); 3121 poll_threads(); 3122 CU_ASSERT(g_bserrno == 0); 3123 blob = NULL; 3124 g_blob = NULL; 3125 g_blobid = SPDK_BLOBID_INVALID; 3126 3127 /* Create second blob */ 3128 blob = ut_blob_create_and_open(bs, NULL); 3129 blobid2 = spdk_blob_get_id(blob); 3130 3131 /* Set some xattrs */ 3132 rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1); 3133 CU_ASSERT(rc == 0); 3134 3135 length = 5432; 3136 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3137 CU_ASSERT(rc == 0); 3138 3139 /* Resize the blob */ 3140 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 3141 poll_threads(); 3142 CU_ASSERT(g_bserrno == 0); 3143 3144 free_clusters = spdk_bs_free_cluster_count(bs); 3145 3146 spdk_blob_close(blob, blob_op_complete, NULL); 3147 poll_threads(); 3148 CU_ASSERT(g_bserrno == 0); 3149 blob = NULL; 3150 g_blob = NULL; 3151 g_blobid = SPDK_BLOBID_INVALID; 3152 3153 ut_bs_dirty_load(&bs, NULL); 3154 3155 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3156 poll_threads(); 3157 CU_ASSERT(g_bserrno == 0); 3158 CU_ASSERT(g_blob != NULL); 3159 blob = g_blob; 3160 3161 /* Get the xattrs */ 3162 value = NULL; 3163 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 3164 CU_ASSERT(rc == 0); 3165 SPDK_CU_ASSERT_FATAL(value != NULL); 3166 CU_ASSERT(*(uint64_t *)value == length); 3167 CU_ASSERT(value_len == 8); 3168 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 3169 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3170 3171 ut_blob_close_and_delete(bs, blob); 3172 3173 free_clusters = spdk_bs_free_cluster_count(bs); 3174 3175 ut_bs_dirty_load(&bs, NULL); 3176 3177 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3178 poll_threads(); 3179 CU_ASSERT(g_bserrno != 0); 3180 CU_ASSERT(g_blob == NULL); 3181 3182 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3183 poll_threads(); 3184 CU_ASSERT(g_bserrno == 0); 3185 CU_ASSERT(g_blob != NULL); 3186 blob = g_blob; 3187 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3188 spdk_blob_close(blob, blob_op_complete, NULL); 3189 poll_threads(); 3190 CU_ASSERT(g_bserrno == 0); 3191 3192 ut_bs_reload(&bs, NULL); 3193 3194 /* Create second blob */ 3195 ut_spdk_blob_opts_init(&blob_opts); 3196 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3197 poll_threads(); 3198 CU_ASSERT(g_bserrno == 0); 3199 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3200 blobid2 = g_blobid; 3201 3202 /* Create third blob */ 3203 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3204 poll_threads(); 3205 CU_ASSERT(g_bserrno == 0); 3206 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3207 blobid3 = g_blobid; 3208 3209 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3210 poll_threads(); 3211 CU_ASSERT(g_bserrno == 0); 3212 CU_ASSERT(g_blob != NULL); 3213 blob = g_blob; 3214 3215 /* Set some xattrs for second blob */ 3216 rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1); 3217 CU_ASSERT(rc == 0); 3218 3219 length = 5432; 3220 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3221 CU_ASSERT(rc == 0); 3222 3223 spdk_blob_close(blob, blob_op_complete, NULL); 3224 poll_threads(); 3225 CU_ASSERT(g_bserrno == 0); 3226 blob = NULL; 3227 g_blob = NULL; 3228 g_blobid = SPDK_BLOBID_INVALID; 3229 3230 spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL); 3231 poll_threads(); 3232 CU_ASSERT(g_bserrno == 0); 3233 CU_ASSERT(g_blob != NULL); 3234 blob = g_blob; 3235 3236 /* Set some xattrs for third blob */ 3237 rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1); 3238 CU_ASSERT(rc == 0); 3239 3240 length = 5432; 3241 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3242 CU_ASSERT(rc == 0); 3243 3244 spdk_blob_close(blob, blob_op_complete, NULL); 3245 poll_threads(); 3246 CU_ASSERT(g_bserrno == 0); 3247 blob = NULL; 3248 g_blob = NULL; 3249 g_blobid = SPDK_BLOBID_INVALID; 3250 3251 /* Mark second blob as invalid */ 3252 page_num = bs_blobid_to_page(blobid2); 3253 3254 index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num); 3255 page = (struct spdk_blob_md_page *)&g_dev_buffer[index]; 3256 page->sequence_num = 1; 3257 page->crc = blob_md_page_calc_crc(page); 3258 3259 free_clusters = spdk_bs_free_cluster_count(bs); 3260 3261 ut_bs_dirty_load(&bs, NULL); 3262 3263 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3264 poll_threads(); 3265 CU_ASSERT(g_bserrno != 0); 3266 CU_ASSERT(g_blob == NULL); 3267 3268 spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL); 3269 poll_threads(); 3270 CU_ASSERT(g_bserrno == 0); 3271 CU_ASSERT(g_blob != NULL); 3272 blob = g_blob; 3273 3274 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3275 } 3276 3277 static void 3278 blob_flags(void) 3279 { 3280 struct spdk_blob_store *bs = g_bs; 3281 spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro; 3282 struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro; 3283 struct spdk_blob_opts blob_opts; 3284 int rc; 3285 3286 /* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */ 3287 blob_invalid = ut_blob_create_and_open(bs, NULL); 3288 blobid_invalid = spdk_blob_get_id(blob_invalid); 3289 3290 blob_data_ro = ut_blob_create_and_open(bs, NULL); 3291 blobid_data_ro = spdk_blob_get_id(blob_data_ro); 3292 3293 ut_spdk_blob_opts_init(&blob_opts); 3294 blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES; 3295 blob_md_ro = ut_blob_create_and_open(bs, &blob_opts); 3296 blobid_md_ro = spdk_blob_get_id(blob_md_ro); 3297 CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES); 3298 3299 /* Change the size of blob_data_ro to check if flags are serialized 3300 * when blob has non zero number of extents */ 3301 spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL); 3302 poll_threads(); 3303 CU_ASSERT(g_bserrno == 0); 3304 3305 /* Set the xattr to check if flags are serialized 3306 * when blob has non zero number of xattrs */ 3307 rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1); 3308 CU_ASSERT(rc == 0); 3309 3310 blob_invalid->invalid_flags = (1ULL << 63); 3311 blob_invalid->state = SPDK_BLOB_STATE_DIRTY; 3312 blob_data_ro->data_ro_flags = (1ULL << 62); 3313 blob_data_ro->state = SPDK_BLOB_STATE_DIRTY; 3314 blob_md_ro->md_ro_flags = (1ULL << 61); 3315 blob_md_ro->state = SPDK_BLOB_STATE_DIRTY; 3316 3317 g_bserrno = -1; 3318 spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL); 3319 poll_threads(); 3320 CU_ASSERT(g_bserrno == 0); 3321 g_bserrno = -1; 3322 spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL); 3323 poll_threads(); 3324 CU_ASSERT(g_bserrno == 0); 3325 g_bserrno = -1; 3326 spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL); 3327 poll_threads(); 3328 CU_ASSERT(g_bserrno == 0); 3329 3330 g_bserrno = -1; 3331 spdk_blob_close(blob_invalid, blob_op_complete, NULL); 3332 poll_threads(); 3333 CU_ASSERT(g_bserrno == 0); 3334 blob_invalid = NULL; 3335 g_bserrno = -1; 3336 spdk_blob_close(blob_data_ro, blob_op_complete, NULL); 3337 poll_threads(); 3338 CU_ASSERT(g_bserrno == 0); 3339 blob_data_ro = NULL; 3340 g_bserrno = -1; 3341 spdk_blob_close(blob_md_ro, blob_op_complete, NULL); 3342 poll_threads(); 3343 CU_ASSERT(g_bserrno == 0); 3344 blob_md_ro = NULL; 3345 3346 g_blob = NULL; 3347 g_blobid = SPDK_BLOBID_INVALID; 3348 3349 ut_bs_reload(&bs, NULL); 3350 3351 g_blob = NULL; 3352 g_bserrno = 0; 3353 spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL); 3354 poll_threads(); 3355 CU_ASSERT(g_bserrno != 0); 3356 CU_ASSERT(g_blob == NULL); 3357 3358 g_blob = NULL; 3359 g_bserrno = -1; 3360 spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL); 3361 poll_threads(); 3362 CU_ASSERT(g_bserrno == 0); 3363 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3364 blob_data_ro = g_blob; 3365 /* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */ 3366 CU_ASSERT(blob_data_ro->data_ro == true); 3367 CU_ASSERT(blob_data_ro->md_ro == true); 3368 CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10); 3369 3370 g_blob = NULL; 3371 g_bserrno = -1; 3372 spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL); 3373 poll_threads(); 3374 CU_ASSERT(g_bserrno == 0); 3375 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3376 blob_md_ro = g_blob; 3377 CU_ASSERT(blob_md_ro->data_ro == false); 3378 CU_ASSERT(blob_md_ro->md_ro == true); 3379 3380 g_bserrno = -1; 3381 spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL); 3382 poll_threads(); 3383 CU_ASSERT(g_bserrno == 0); 3384 3385 ut_blob_close_and_delete(bs, blob_data_ro); 3386 ut_blob_close_and_delete(bs, blob_md_ro); 3387 } 3388 3389 static void 3390 bs_version(void) 3391 { 3392 struct spdk_bs_super_block *super; 3393 struct spdk_blob_store *bs = g_bs; 3394 struct spdk_bs_dev *dev; 3395 struct spdk_blob *blob; 3396 struct spdk_blob_opts blob_opts; 3397 spdk_blob_id blobid; 3398 3399 /* Unload the blob store */ 3400 spdk_bs_unload(bs, bs_op_complete, NULL); 3401 poll_threads(); 3402 CU_ASSERT(g_bserrno == 0); 3403 g_bs = NULL; 3404 3405 /* 3406 * Change the bs version on disk. This will allow us to 3407 * test that the version does not get modified automatically 3408 * when loading and unloading the blobstore. 3409 */ 3410 super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 3411 CU_ASSERT(super->version == SPDK_BS_VERSION); 3412 CU_ASSERT(super->clean == 1); 3413 super->version = 2; 3414 /* 3415 * Version 2 metadata does not have a used blobid mask, so clear 3416 * those fields in the super block and zero the corresponding 3417 * region on "disk". We will use this to ensure blob IDs are 3418 * correctly reconstructed. 3419 */ 3420 memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0, 3421 super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE); 3422 super->used_blobid_mask_start = 0; 3423 super->used_blobid_mask_len = 0; 3424 super->crc = blob_md_page_calc_crc(super); 3425 3426 /* Load an existing blob store */ 3427 dev = init_dev(); 3428 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3429 poll_threads(); 3430 CU_ASSERT(g_bserrno == 0); 3431 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3432 CU_ASSERT(super->clean == 1); 3433 bs = g_bs; 3434 3435 /* 3436 * Create a blob - just to make sure that when we unload it 3437 * results in writing the super block (since metadata pages 3438 * were allocated. 3439 */ 3440 ut_spdk_blob_opts_init(&blob_opts); 3441 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3442 poll_threads(); 3443 CU_ASSERT(g_bserrno == 0); 3444 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3445 blobid = g_blobid; 3446 3447 /* Unload the blob store */ 3448 spdk_bs_unload(bs, bs_op_complete, NULL); 3449 poll_threads(); 3450 CU_ASSERT(g_bserrno == 0); 3451 g_bs = NULL; 3452 CU_ASSERT(super->version == 2); 3453 CU_ASSERT(super->used_blobid_mask_start == 0); 3454 CU_ASSERT(super->used_blobid_mask_len == 0); 3455 3456 dev = init_dev(); 3457 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3458 poll_threads(); 3459 CU_ASSERT(g_bserrno == 0); 3460 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3461 bs = g_bs; 3462 3463 g_blob = NULL; 3464 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3465 poll_threads(); 3466 CU_ASSERT(g_bserrno == 0); 3467 CU_ASSERT(g_blob != NULL); 3468 blob = g_blob; 3469 3470 ut_blob_close_and_delete(bs, blob); 3471 3472 CU_ASSERT(super->version == 2); 3473 CU_ASSERT(super->used_blobid_mask_start == 0); 3474 CU_ASSERT(super->used_blobid_mask_len == 0); 3475 } 3476 3477 static void 3478 blob_set_xattrs_test(void) 3479 { 3480 struct spdk_blob_store *bs = g_bs; 3481 struct spdk_blob *blob; 3482 struct spdk_blob_opts opts; 3483 const void *value; 3484 size_t value_len; 3485 char *xattr; 3486 size_t xattr_length; 3487 int rc; 3488 3489 /* Create blob with extra attributes */ 3490 ut_spdk_blob_opts_init(&opts); 3491 3492 opts.xattrs.names = g_xattr_names; 3493 opts.xattrs.get_value = _get_xattr_value; 3494 opts.xattrs.count = 3; 3495 opts.xattrs.ctx = &g_ctx; 3496 3497 blob = ut_blob_create_and_open(bs, &opts); 3498 3499 /* Get the xattrs */ 3500 value = NULL; 3501 3502 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len); 3503 CU_ASSERT(rc == 0); 3504 SPDK_CU_ASSERT_FATAL(value != NULL); 3505 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 3506 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 3507 3508 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len); 3509 CU_ASSERT(rc == 0); 3510 SPDK_CU_ASSERT_FATAL(value != NULL); 3511 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 3512 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 3513 3514 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len); 3515 CU_ASSERT(rc == 0); 3516 SPDK_CU_ASSERT_FATAL(value != NULL); 3517 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 3518 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 3519 3520 /* Try to get non existing attribute */ 3521 3522 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 3523 CU_ASSERT(rc == -ENOENT); 3524 3525 /* Try xattr exceeding maximum length of descriptor in single page */ 3526 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 3527 strlen("large_xattr") + 1; 3528 xattr = calloc(xattr_length, sizeof(char)); 3529 SPDK_CU_ASSERT_FATAL(xattr != NULL); 3530 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 3531 free(xattr); 3532 SPDK_CU_ASSERT_FATAL(rc == -ENOMEM); 3533 3534 spdk_blob_close(blob, blob_op_complete, NULL); 3535 poll_threads(); 3536 CU_ASSERT(g_bserrno == 0); 3537 blob = NULL; 3538 g_blob = NULL; 3539 g_blobid = SPDK_BLOBID_INVALID; 3540 3541 /* NULL callback */ 3542 ut_spdk_blob_opts_init(&opts); 3543 opts.xattrs.names = g_xattr_names; 3544 opts.xattrs.get_value = NULL; 3545 opts.xattrs.count = 1; 3546 opts.xattrs.ctx = &g_ctx; 3547 3548 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 3549 poll_threads(); 3550 CU_ASSERT(g_bserrno == -EINVAL); 3551 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3552 3553 /* NULL values */ 3554 ut_spdk_blob_opts_init(&opts); 3555 opts.xattrs.names = g_xattr_names; 3556 opts.xattrs.get_value = _get_xattr_value_null; 3557 opts.xattrs.count = 1; 3558 opts.xattrs.ctx = NULL; 3559 3560 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 3561 poll_threads(); 3562 CU_ASSERT(g_bserrno == -EINVAL); 3563 } 3564 3565 static void 3566 blob_thin_prov_alloc(void) 3567 { 3568 struct spdk_blob_store *bs = g_bs; 3569 struct spdk_blob *blob; 3570 struct spdk_blob_opts opts; 3571 spdk_blob_id blobid; 3572 uint64_t free_clusters; 3573 3574 free_clusters = spdk_bs_free_cluster_count(bs); 3575 3576 /* Set blob as thin provisioned */ 3577 ut_spdk_blob_opts_init(&opts); 3578 opts.thin_provision = true; 3579 3580 blob = ut_blob_create_and_open(bs, &opts); 3581 blobid = spdk_blob_get_id(blob); 3582 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3583 3584 CU_ASSERT(blob->active.num_clusters == 0); 3585 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 3586 3587 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3588 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3589 poll_threads(); 3590 CU_ASSERT(g_bserrno == 0); 3591 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3592 CU_ASSERT(blob->active.num_clusters == 5); 3593 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 3594 3595 /* Grow it to 1TB - still unallocated */ 3596 spdk_blob_resize(blob, 262144, blob_op_complete, NULL); 3597 poll_threads(); 3598 CU_ASSERT(g_bserrno == 0); 3599 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3600 CU_ASSERT(blob->active.num_clusters == 262144); 3601 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144); 3602 3603 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3604 poll_threads(); 3605 CU_ASSERT(g_bserrno == 0); 3606 /* Sync must not change anything */ 3607 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3608 CU_ASSERT(blob->active.num_clusters == 262144); 3609 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144); 3610 /* Since clusters are not allocated, 3611 * number of metadata pages is expected to be minimal. 3612 */ 3613 CU_ASSERT(blob->active.num_pages == 1); 3614 3615 /* Shrink the blob to 3 clusters - still unallocated */ 3616 spdk_blob_resize(blob, 3, blob_op_complete, NULL); 3617 poll_threads(); 3618 CU_ASSERT(g_bserrno == 0); 3619 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3620 CU_ASSERT(blob->active.num_clusters == 3); 3621 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3); 3622 3623 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3624 poll_threads(); 3625 CU_ASSERT(g_bserrno == 0); 3626 /* Sync must not change anything */ 3627 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3628 CU_ASSERT(blob->active.num_clusters == 3); 3629 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3); 3630 3631 spdk_blob_close(blob, blob_op_complete, NULL); 3632 poll_threads(); 3633 CU_ASSERT(g_bserrno == 0); 3634 3635 ut_bs_reload(&bs, NULL); 3636 3637 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3638 poll_threads(); 3639 CU_ASSERT(g_bserrno == 0); 3640 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3641 blob = g_blob; 3642 3643 /* Check that clusters allocation and size is still the same */ 3644 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3645 CU_ASSERT(blob->active.num_clusters == 3); 3646 3647 ut_blob_close_and_delete(bs, blob); 3648 } 3649 3650 static void 3651 blob_insert_cluster_msg_test(void) 3652 { 3653 struct spdk_blob_store *bs = g_bs; 3654 struct spdk_blob *blob; 3655 struct spdk_blob_opts opts; 3656 spdk_blob_id blobid; 3657 uint64_t free_clusters; 3658 uint64_t new_cluster = 0; 3659 uint32_t cluster_num = 3; 3660 uint32_t extent_page = 0; 3661 3662 free_clusters = spdk_bs_free_cluster_count(bs); 3663 3664 /* Set blob as thin provisioned */ 3665 ut_spdk_blob_opts_init(&opts); 3666 opts.thin_provision = true; 3667 opts.num_clusters = 4; 3668 3669 blob = ut_blob_create_and_open(bs, &opts); 3670 blobid = spdk_blob_get_id(blob); 3671 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3672 3673 CU_ASSERT(blob->active.num_clusters == 4); 3674 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4); 3675 CU_ASSERT(blob->active.clusters[cluster_num] == 0); 3676 3677 /* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread. 3678 * This is to simulate behaviour when cluster is allocated after blob creation. 3679 * Such as _spdk_bs_allocate_and_copy_cluster(). */ 3680 bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false); 3681 CU_ASSERT(blob->active.clusters[cluster_num] == 0); 3682 3683 blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, 3684 blob_op_complete, NULL); 3685 poll_threads(); 3686 3687 CU_ASSERT(blob->active.clusters[cluster_num] != 0); 3688 3689 spdk_blob_close(blob, blob_op_complete, NULL); 3690 poll_threads(); 3691 CU_ASSERT(g_bserrno == 0); 3692 3693 ut_bs_reload(&bs, NULL); 3694 3695 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3696 poll_threads(); 3697 CU_ASSERT(g_bserrno == 0); 3698 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3699 blob = g_blob; 3700 3701 CU_ASSERT(blob->active.clusters[cluster_num] != 0); 3702 3703 ut_blob_close_and_delete(bs, blob); 3704 } 3705 3706 static void 3707 blob_thin_prov_rw(void) 3708 { 3709 static const uint8_t zero[10 * 4096] = { 0 }; 3710 struct spdk_blob_store *bs = g_bs; 3711 struct spdk_blob *blob; 3712 struct spdk_io_channel *channel, *channel_thread1; 3713 struct spdk_blob_opts opts; 3714 uint64_t free_clusters; 3715 uint64_t page_size; 3716 uint8_t payload_read[10 * 4096]; 3717 uint8_t payload_write[10 * 4096]; 3718 uint64_t write_bytes; 3719 uint64_t read_bytes; 3720 3721 free_clusters = spdk_bs_free_cluster_count(bs); 3722 page_size = spdk_bs_get_page_size(bs); 3723 3724 channel = spdk_bs_alloc_io_channel(bs); 3725 CU_ASSERT(channel != NULL); 3726 3727 ut_spdk_blob_opts_init(&opts); 3728 opts.thin_provision = true; 3729 3730 blob = ut_blob_create_and_open(bs, &opts); 3731 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3732 3733 CU_ASSERT(blob->active.num_clusters == 0); 3734 3735 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3736 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3737 poll_threads(); 3738 CU_ASSERT(g_bserrno == 0); 3739 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3740 CU_ASSERT(blob->active.num_clusters == 5); 3741 3742 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3743 poll_threads(); 3744 CU_ASSERT(g_bserrno == 0); 3745 /* Sync must not change anything */ 3746 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3747 CU_ASSERT(blob->active.num_clusters == 5); 3748 3749 /* Payload should be all zeros from unallocated clusters */ 3750 memset(payload_read, 0xFF, sizeof(payload_read)); 3751 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 3752 poll_threads(); 3753 CU_ASSERT(g_bserrno == 0); 3754 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 3755 3756 write_bytes = g_dev_write_bytes; 3757 read_bytes = g_dev_read_bytes; 3758 3759 /* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */ 3760 set_thread(1); 3761 channel_thread1 = spdk_bs_alloc_io_channel(bs); 3762 CU_ASSERT(channel_thread1 != NULL); 3763 memset(payload_write, 0xE5, sizeof(payload_write)); 3764 spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL); 3765 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3766 /* Perform write on thread 0. That will try to allocate cluster, 3767 * but fail due to another thread issuing the cluster allocation first. */ 3768 set_thread(0); 3769 memset(payload_write, 0xE5, sizeof(payload_write)); 3770 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 3771 CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs)); 3772 poll_threads(); 3773 CU_ASSERT(g_bserrno == 0); 3774 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3775 /* For thin-provisioned blob we need to write 20 pages plus one page metadata and 3776 * read 0 bytes */ 3777 if (g_use_extent_table) { 3778 /* Add one more page for EXTENT_PAGE write */ 3779 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22); 3780 } else { 3781 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21); 3782 } 3783 CU_ASSERT(g_dev_read_bytes - read_bytes == 0); 3784 3785 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 3786 poll_threads(); 3787 CU_ASSERT(g_bserrno == 0); 3788 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3789 3790 ut_blob_close_and_delete(bs, blob); 3791 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3792 3793 set_thread(1); 3794 spdk_bs_free_io_channel(channel_thread1); 3795 set_thread(0); 3796 spdk_bs_free_io_channel(channel); 3797 poll_threads(); 3798 g_blob = NULL; 3799 g_blobid = 0; 3800 } 3801 3802 static void 3803 blob_thin_prov_rle(void) 3804 { 3805 static const uint8_t zero[10 * 4096] = { 0 }; 3806 struct spdk_blob_store *bs = g_bs; 3807 struct spdk_blob *blob; 3808 struct spdk_io_channel *channel; 3809 struct spdk_blob_opts opts; 3810 spdk_blob_id blobid; 3811 uint64_t free_clusters; 3812 uint64_t page_size; 3813 uint8_t payload_read[10 * 4096]; 3814 uint8_t payload_write[10 * 4096]; 3815 uint64_t write_bytes; 3816 uint64_t read_bytes; 3817 uint64_t io_unit; 3818 3819 free_clusters = spdk_bs_free_cluster_count(bs); 3820 page_size = spdk_bs_get_page_size(bs); 3821 3822 ut_spdk_blob_opts_init(&opts); 3823 opts.thin_provision = true; 3824 opts.num_clusters = 5; 3825 3826 blob = ut_blob_create_and_open(bs, &opts); 3827 blobid = spdk_blob_get_id(blob); 3828 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3829 3830 channel = spdk_bs_alloc_io_channel(bs); 3831 CU_ASSERT(channel != NULL); 3832 3833 /* Target specifically second cluster in a blob as first allocation */ 3834 io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs); 3835 3836 /* Payload should be all zeros from unallocated clusters */ 3837 memset(payload_read, 0xFF, sizeof(payload_read)); 3838 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3839 poll_threads(); 3840 CU_ASSERT(g_bserrno == 0); 3841 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 3842 3843 write_bytes = g_dev_write_bytes; 3844 read_bytes = g_dev_read_bytes; 3845 3846 /* Issue write to second cluster in a blob */ 3847 memset(payload_write, 0xE5, sizeof(payload_write)); 3848 spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL); 3849 poll_threads(); 3850 CU_ASSERT(g_bserrno == 0); 3851 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3852 /* For thin-provisioned blob we need to write 10 pages plus one page metadata and 3853 * read 0 bytes */ 3854 if (g_use_extent_table) { 3855 /* Add one more page for EXTENT_PAGE write */ 3856 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12); 3857 } else { 3858 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11); 3859 } 3860 CU_ASSERT(g_dev_read_bytes - read_bytes == 0); 3861 3862 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3863 poll_threads(); 3864 CU_ASSERT(g_bserrno == 0); 3865 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3866 3867 spdk_bs_free_io_channel(channel); 3868 poll_threads(); 3869 3870 spdk_blob_close(blob, blob_op_complete, NULL); 3871 poll_threads(); 3872 CU_ASSERT(g_bserrno == 0); 3873 3874 ut_bs_reload(&bs, NULL); 3875 3876 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3877 poll_threads(); 3878 CU_ASSERT(g_bserrno == 0); 3879 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3880 blob = g_blob; 3881 3882 channel = spdk_bs_alloc_io_channel(bs); 3883 CU_ASSERT(channel != NULL); 3884 3885 /* Read second cluster after blob reload to confirm data written */ 3886 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3887 poll_threads(); 3888 CU_ASSERT(g_bserrno == 0); 3889 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3890 3891 spdk_bs_free_io_channel(channel); 3892 poll_threads(); 3893 3894 ut_blob_close_and_delete(bs, blob); 3895 } 3896 3897 static void 3898 blob_thin_prov_rw_iov(void) 3899 { 3900 static const uint8_t zero[10 * 4096] = { 0 }; 3901 struct spdk_blob_store *bs = g_bs; 3902 struct spdk_blob *blob; 3903 struct spdk_io_channel *channel; 3904 struct spdk_blob_opts opts; 3905 uint64_t free_clusters; 3906 uint8_t payload_read[10 * 4096]; 3907 uint8_t payload_write[10 * 4096]; 3908 struct iovec iov_read[3]; 3909 struct iovec iov_write[3]; 3910 3911 free_clusters = spdk_bs_free_cluster_count(bs); 3912 3913 channel = spdk_bs_alloc_io_channel(bs); 3914 CU_ASSERT(channel != NULL); 3915 3916 ut_spdk_blob_opts_init(&opts); 3917 opts.thin_provision = true; 3918 3919 blob = ut_blob_create_and_open(bs, &opts); 3920 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3921 3922 CU_ASSERT(blob->active.num_clusters == 0); 3923 3924 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3925 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3926 poll_threads(); 3927 CU_ASSERT(g_bserrno == 0); 3928 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3929 CU_ASSERT(blob->active.num_clusters == 5); 3930 3931 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3932 poll_threads(); 3933 CU_ASSERT(g_bserrno == 0); 3934 /* Sync must not change anything */ 3935 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3936 CU_ASSERT(blob->active.num_clusters == 5); 3937 3938 /* Payload should be all zeros from unallocated clusters */ 3939 memset(payload_read, 0xAA, sizeof(payload_read)); 3940 iov_read[0].iov_base = payload_read; 3941 iov_read[0].iov_len = 3 * 4096; 3942 iov_read[1].iov_base = payload_read + 3 * 4096; 3943 iov_read[1].iov_len = 4 * 4096; 3944 iov_read[2].iov_base = payload_read + 7 * 4096; 3945 iov_read[2].iov_len = 3 * 4096; 3946 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 3947 poll_threads(); 3948 CU_ASSERT(g_bserrno == 0); 3949 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 3950 3951 memset(payload_write, 0xE5, sizeof(payload_write)); 3952 iov_write[0].iov_base = payload_write; 3953 iov_write[0].iov_len = 1 * 4096; 3954 iov_write[1].iov_base = payload_write + 1 * 4096; 3955 iov_write[1].iov_len = 5 * 4096; 3956 iov_write[2].iov_base = payload_write + 6 * 4096; 3957 iov_write[2].iov_len = 4 * 4096; 3958 3959 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 3960 poll_threads(); 3961 CU_ASSERT(g_bserrno == 0); 3962 3963 memset(payload_read, 0xAA, sizeof(payload_read)); 3964 iov_read[0].iov_base = payload_read; 3965 iov_read[0].iov_len = 3 * 4096; 3966 iov_read[1].iov_base = payload_read + 3 * 4096; 3967 iov_read[1].iov_len = 4 * 4096; 3968 iov_read[2].iov_base = payload_read + 7 * 4096; 3969 iov_read[2].iov_len = 3 * 4096; 3970 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 3971 poll_threads(); 3972 CU_ASSERT(g_bserrno == 0); 3973 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3974 3975 spdk_bs_free_io_channel(channel); 3976 poll_threads(); 3977 3978 ut_blob_close_and_delete(bs, blob); 3979 } 3980 3981 struct iter_ctx { 3982 int current_iter; 3983 spdk_blob_id blobid[4]; 3984 }; 3985 3986 static void 3987 test_iter(void *arg, struct spdk_blob *blob, int bserrno) 3988 { 3989 struct iter_ctx *iter_ctx = arg; 3990 spdk_blob_id blobid; 3991 3992 CU_ASSERT(bserrno == 0); 3993 blobid = spdk_blob_get_id(blob); 3994 CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]); 3995 } 3996 3997 static void 3998 bs_load_iter_test(void) 3999 { 4000 struct spdk_blob_store *bs; 4001 struct spdk_bs_dev *dev; 4002 struct iter_ctx iter_ctx = { 0 }; 4003 struct spdk_blob *blob; 4004 int i, rc; 4005 struct spdk_bs_opts opts; 4006 4007 dev = init_dev(); 4008 spdk_bs_opts_init(&opts); 4009 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4010 4011 /* Initialize a new blob store */ 4012 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 4013 poll_threads(); 4014 CU_ASSERT(g_bserrno == 0); 4015 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4016 bs = g_bs; 4017 4018 for (i = 0; i < 4; i++) { 4019 blob = ut_blob_create_and_open(bs, NULL); 4020 iter_ctx.blobid[i] = spdk_blob_get_id(blob); 4021 4022 /* Just save the blobid as an xattr for testing purposes. */ 4023 rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id)); 4024 CU_ASSERT(rc == 0); 4025 4026 /* Resize the blob */ 4027 spdk_blob_resize(blob, i, blob_op_complete, NULL); 4028 poll_threads(); 4029 CU_ASSERT(g_bserrno == 0); 4030 4031 spdk_blob_close(blob, blob_op_complete, NULL); 4032 poll_threads(); 4033 CU_ASSERT(g_bserrno == 0); 4034 } 4035 4036 g_bserrno = -1; 4037 spdk_bs_unload(bs, bs_op_complete, NULL); 4038 poll_threads(); 4039 CU_ASSERT(g_bserrno == 0); 4040 4041 dev = init_dev(); 4042 spdk_bs_opts_init(&opts); 4043 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4044 opts.iter_cb_fn = test_iter; 4045 opts.iter_cb_arg = &iter_ctx; 4046 4047 /* Test blob iteration during load after a clean shutdown. */ 4048 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 4049 poll_threads(); 4050 CU_ASSERT(g_bserrno == 0); 4051 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4052 bs = g_bs; 4053 4054 /* Dirty shutdown */ 4055 bs_free(bs); 4056 4057 dev = init_dev(); 4058 spdk_bs_opts_init(&opts); 4059 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4060 opts.iter_cb_fn = test_iter; 4061 iter_ctx.current_iter = 0; 4062 opts.iter_cb_arg = &iter_ctx; 4063 4064 /* Test blob iteration during load after a dirty shutdown. */ 4065 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 4066 poll_threads(); 4067 CU_ASSERT(g_bserrno == 0); 4068 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4069 bs = g_bs; 4070 4071 spdk_bs_unload(bs, bs_op_complete, NULL); 4072 poll_threads(); 4073 CU_ASSERT(g_bserrno == 0); 4074 g_bs = NULL; 4075 } 4076 4077 static void 4078 blob_snapshot_rw(void) 4079 { 4080 static const uint8_t zero[10 * 4096] = { 0 }; 4081 struct spdk_blob_store *bs = g_bs; 4082 struct spdk_blob *blob, *snapshot; 4083 struct spdk_io_channel *channel; 4084 struct spdk_blob_opts opts; 4085 spdk_blob_id blobid, snapshotid; 4086 uint64_t free_clusters; 4087 uint64_t cluster_size; 4088 uint64_t page_size; 4089 uint8_t payload_read[10 * 4096]; 4090 uint8_t payload_write[10 * 4096]; 4091 uint64_t write_bytes; 4092 uint64_t read_bytes; 4093 4094 free_clusters = spdk_bs_free_cluster_count(bs); 4095 cluster_size = spdk_bs_get_cluster_size(bs); 4096 page_size = spdk_bs_get_page_size(bs); 4097 4098 channel = spdk_bs_alloc_io_channel(bs); 4099 CU_ASSERT(channel != NULL); 4100 4101 ut_spdk_blob_opts_init(&opts); 4102 opts.thin_provision = true; 4103 opts.num_clusters = 5; 4104 4105 blob = ut_blob_create_and_open(bs, &opts); 4106 blobid = spdk_blob_get_id(blob); 4107 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4108 4109 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4110 4111 memset(payload_read, 0xFF, sizeof(payload_read)); 4112 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 4113 poll_threads(); 4114 CU_ASSERT(g_bserrno == 0); 4115 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4116 4117 memset(payload_write, 0xE5, sizeof(payload_write)); 4118 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 4119 poll_threads(); 4120 CU_ASSERT(g_bserrno == 0); 4121 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4122 4123 /* Create snapshot from blob */ 4124 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4125 poll_threads(); 4126 CU_ASSERT(g_bserrno == 0); 4127 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4128 snapshotid = g_blobid; 4129 4130 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4131 poll_threads(); 4132 CU_ASSERT(g_bserrno == 0); 4133 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4134 snapshot = g_blob; 4135 CU_ASSERT(snapshot->data_ro == true); 4136 CU_ASSERT(snapshot->md_ro == true); 4137 4138 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4139 4140 write_bytes = g_dev_write_bytes; 4141 read_bytes = g_dev_read_bytes; 4142 4143 memset(payload_write, 0xAA, sizeof(payload_write)); 4144 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 4145 poll_threads(); 4146 CU_ASSERT(g_bserrno == 0); 4147 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4148 4149 /* For a clone we need to allocate and copy one cluster, update one page of metadata 4150 * and then write 10 pages of payload. 4151 */ 4152 if (g_use_extent_table) { 4153 /* Add one more page for EXTENT_PAGE write */ 4154 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12 + cluster_size); 4155 } else { 4156 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11 + cluster_size); 4157 } 4158 CU_ASSERT(g_dev_read_bytes - read_bytes == cluster_size); 4159 4160 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 4161 poll_threads(); 4162 CU_ASSERT(g_bserrno == 0); 4163 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4164 4165 /* Data on snapshot should not change after write to clone */ 4166 memset(payload_write, 0xE5, sizeof(payload_write)); 4167 spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL); 4168 poll_threads(); 4169 CU_ASSERT(g_bserrno == 0); 4170 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4171 4172 ut_blob_close_and_delete(bs, blob); 4173 ut_blob_close_and_delete(bs, snapshot); 4174 4175 spdk_bs_free_io_channel(channel); 4176 poll_threads(); 4177 g_blob = NULL; 4178 g_blobid = 0; 4179 } 4180 4181 static void 4182 blob_snapshot_rw_iov(void) 4183 { 4184 static const uint8_t zero[10 * 4096] = { 0 }; 4185 struct spdk_blob_store *bs = g_bs; 4186 struct spdk_blob *blob, *snapshot; 4187 struct spdk_io_channel *channel; 4188 struct spdk_blob_opts opts; 4189 spdk_blob_id blobid, snapshotid; 4190 uint64_t free_clusters; 4191 uint8_t payload_read[10 * 4096]; 4192 uint8_t payload_write[10 * 4096]; 4193 struct iovec iov_read[3]; 4194 struct iovec iov_write[3]; 4195 4196 free_clusters = spdk_bs_free_cluster_count(bs); 4197 4198 channel = spdk_bs_alloc_io_channel(bs); 4199 CU_ASSERT(channel != NULL); 4200 4201 ut_spdk_blob_opts_init(&opts); 4202 opts.thin_provision = true; 4203 opts.num_clusters = 5; 4204 4205 blob = ut_blob_create_and_open(bs, &opts); 4206 blobid = spdk_blob_get_id(blob); 4207 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4208 4209 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4210 4211 /* Create snapshot from blob */ 4212 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4213 poll_threads(); 4214 CU_ASSERT(g_bserrno == 0); 4215 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4216 snapshotid = g_blobid; 4217 4218 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4219 poll_threads(); 4220 CU_ASSERT(g_bserrno == 0); 4221 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4222 snapshot = g_blob; 4223 CU_ASSERT(snapshot->data_ro == true); 4224 CU_ASSERT(snapshot->md_ro == true); 4225 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4226 4227 /* Payload should be all zeros from unallocated clusters */ 4228 memset(payload_read, 0xAA, sizeof(payload_read)); 4229 iov_read[0].iov_base = payload_read; 4230 iov_read[0].iov_len = 3 * 4096; 4231 iov_read[1].iov_base = payload_read + 3 * 4096; 4232 iov_read[1].iov_len = 4 * 4096; 4233 iov_read[2].iov_base = payload_read + 7 * 4096; 4234 iov_read[2].iov_len = 3 * 4096; 4235 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4236 poll_threads(); 4237 CU_ASSERT(g_bserrno == 0); 4238 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4239 4240 memset(payload_write, 0xE5, sizeof(payload_write)); 4241 iov_write[0].iov_base = payload_write; 4242 iov_write[0].iov_len = 1 * 4096; 4243 iov_write[1].iov_base = payload_write + 1 * 4096; 4244 iov_write[1].iov_len = 5 * 4096; 4245 iov_write[2].iov_base = payload_write + 6 * 4096; 4246 iov_write[2].iov_len = 4 * 4096; 4247 4248 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 4249 poll_threads(); 4250 CU_ASSERT(g_bserrno == 0); 4251 4252 memset(payload_read, 0xAA, sizeof(payload_read)); 4253 iov_read[0].iov_base = payload_read; 4254 iov_read[0].iov_len = 3 * 4096; 4255 iov_read[1].iov_base = payload_read + 3 * 4096; 4256 iov_read[1].iov_len = 4 * 4096; 4257 iov_read[2].iov_base = payload_read + 7 * 4096; 4258 iov_read[2].iov_len = 3 * 4096; 4259 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4260 poll_threads(); 4261 CU_ASSERT(g_bserrno == 0); 4262 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4263 4264 spdk_bs_free_io_channel(channel); 4265 poll_threads(); 4266 4267 ut_blob_close_and_delete(bs, blob); 4268 ut_blob_close_and_delete(bs, snapshot); 4269 } 4270 4271 /** 4272 * Inflate / decouple parent rw unit tests. 4273 * 4274 * -------------- 4275 * original blob: 0 1 2 3 4 4276 * ,---------+---------+---------+---------+---------. 4277 * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - | 4278 * +---------+---------+---------+---------+---------+ 4279 * snapshot2 | - |yyyyyyyyy| - |yyyyyyyyy| - | 4280 * +---------+---------+---------+---------+---------+ 4281 * blob | - |zzzzzzzzz| - | - | - | 4282 * '---------+---------+---------+---------+---------' 4283 * . . . . . . 4284 * -------- . . . . . . 4285 * inflate: . . . . . . 4286 * ,---------+---------+---------+---------+---------. 4287 * blob |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000| 4288 * '---------+---------+---------+---------+---------' 4289 * 4290 * NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency 4291 * on snapshot2 and snapshot removed . . . 4292 * . . . . . . 4293 * ---------------- . . . . . . 4294 * decouple parent: . . . . . . 4295 * ,---------+---------+---------+---------+---------. 4296 * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - | 4297 * +---------+---------+---------+---------+---------+ 4298 * blob | - |zzzzzzzzz| - |yyyyyyyyy| - | 4299 * '---------+---------+---------+---------+---------' 4300 * 4301 * NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency 4302 * on snapshot2 removed and on snapshot still exists. Snapshot2 4303 * should remain a clone of snapshot. 4304 */ 4305 static void 4306 _blob_inflate_rw(bool decouple_parent) 4307 { 4308 struct spdk_blob_store *bs = g_bs; 4309 struct spdk_blob *blob, *snapshot, *snapshot2; 4310 struct spdk_io_channel *channel; 4311 struct spdk_blob_opts opts; 4312 spdk_blob_id blobid, snapshotid, snapshot2id; 4313 uint64_t free_clusters; 4314 uint64_t cluster_size; 4315 4316 uint64_t payload_size; 4317 uint8_t *payload_read; 4318 uint8_t *payload_write; 4319 uint8_t *payload_clone; 4320 4321 uint64_t pages_per_cluster; 4322 uint64_t pages_per_payload; 4323 4324 int i; 4325 spdk_blob_id ids[2]; 4326 size_t count; 4327 4328 free_clusters = spdk_bs_free_cluster_count(bs); 4329 cluster_size = spdk_bs_get_cluster_size(bs); 4330 pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs); 4331 pages_per_payload = pages_per_cluster * 5; 4332 4333 payload_size = cluster_size * 5; 4334 4335 payload_read = malloc(payload_size); 4336 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 4337 4338 payload_write = malloc(payload_size); 4339 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 4340 4341 payload_clone = malloc(payload_size); 4342 SPDK_CU_ASSERT_FATAL(payload_clone != NULL); 4343 4344 channel = spdk_bs_alloc_io_channel(bs); 4345 SPDK_CU_ASSERT_FATAL(channel != NULL); 4346 4347 /* Create blob */ 4348 ut_spdk_blob_opts_init(&opts); 4349 opts.thin_provision = true; 4350 opts.num_clusters = 5; 4351 4352 blob = ut_blob_create_and_open(bs, &opts); 4353 blobid = spdk_blob_get_id(blob); 4354 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4355 4356 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4357 4358 /* 1) Initial read should return zeroed payload */ 4359 memset(payload_read, 0xFF, payload_size); 4360 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4361 blob_op_complete, NULL); 4362 poll_threads(); 4363 CU_ASSERT(g_bserrno == 0); 4364 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 4365 4366 /* Fill whole blob with a pattern, except last cluster (to be sure it 4367 * isn't allocated) */ 4368 memset(payload_write, 0xE5, payload_size - cluster_size); 4369 spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload - 4370 pages_per_cluster, blob_op_complete, NULL); 4371 poll_threads(); 4372 CU_ASSERT(g_bserrno == 0); 4373 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4374 4375 /* 2) Create snapshot from blob (first level) */ 4376 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4377 poll_threads(); 4378 CU_ASSERT(g_bserrno == 0); 4379 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4380 snapshotid = g_blobid; 4381 4382 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4383 poll_threads(); 4384 CU_ASSERT(g_bserrno == 0); 4385 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4386 snapshot = g_blob; 4387 CU_ASSERT(snapshot->data_ro == true); 4388 CU_ASSERT(snapshot->md_ro == true); 4389 4390 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4391 4392 /* Write every second cluster with a pattern. 4393 * 4394 * Last cluster shouldn't be written, to be sure that snapshot nor clone 4395 * doesn't allocate it. 4396 * 4397 * payload_clone stores expected result on "blob" read at the time and 4398 * is used only to check data consistency on clone before and after 4399 * inflation. Initially we fill it with a backing snapshots pattern 4400 * used before. 4401 */ 4402 memset(payload_clone, 0xE5, payload_size - cluster_size); 4403 memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size); 4404 memset(payload_write, 0xAA, payload_size); 4405 for (i = 1; i < 5; i += 2) { 4406 spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster, 4407 pages_per_cluster, blob_op_complete, NULL); 4408 poll_threads(); 4409 CU_ASSERT(g_bserrno == 0); 4410 4411 /* Update expected result */ 4412 memcpy(payload_clone + (cluster_size * i), payload_write, 4413 cluster_size); 4414 } 4415 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4416 4417 /* Check data consistency on clone */ 4418 memset(payload_read, 0xFF, payload_size); 4419 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4420 blob_op_complete, NULL); 4421 poll_threads(); 4422 CU_ASSERT(g_bserrno == 0); 4423 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4424 4425 /* 3) Create second levels snapshot from blob */ 4426 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4427 poll_threads(); 4428 CU_ASSERT(g_bserrno == 0); 4429 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4430 snapshot2id = g_blobid; 4431 4432 spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL); 4433 poll_threads(); 4434 CU_ASSERT(g_bserrno == 0); 4435 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4436 snapshot2 = g_blob; 4437 CU_ASSERT(snapshot2->data_ro == true); 4438 CU_ASSERT(snapshot2->md_ro == true); 4439 4440 CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5); 4441 4442 CU_ASSERT(snapshot2->parent_id == snapshotid); 4443 4444 /* Write one cluster on the top level blob. This cluster (1) covers 4445 * already allocated cluster in the snapshot2, so shouldn't be inflated 4446 * at all */ 4447 spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster, 4448 pages_per_cluster, blob_op_complete, NULL); 4449 poll_threads(); 4450 CU_ASSERT(g_bserrno == 0); 4451 4452 /* Update expected result */ 4453 memcpy(payload_clone + cluster_size, payload_write, cluster_size); 4454 4455 /* Check data consistency on clone */ 4456 memset(payload_read, 0xFF, payload_size); 4457 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4458 blob_op_complete, NULL); 4459 poll_threads(); 4460 CU_ASSERT(g_bserrno == 0); 4461 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4462 4463 4464 /* Close all blobs */ 4465 spdk_blob_close(blob, blob_op_complete, NULL); 4466 poll_threads(); 4467 CU_ASSERT(g_bserrno == 0); 4468 4469 spdk_blob_close(snapshot2, blob_op_complete, NULL); 4470 poll_threads(); 4471 CU_ASSERT(g_bserrno == 0); 4472 4473 spdk_blob_close(snapshot, blob_op_complete, NULL); 4474 poll_threads(); 4475 CU_ASSERT(g_bserrno == 0); 4476 4477 /* Check snapshot-clone relations */ 4478 count = 2; 4479 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4480 CU_ASSERT(count == 1); 4481 CU_ASSERT(ids[0] == snapshot2id); 4482 4483 count = 2; 4484 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4485 CU_ASSERT(count == 1); 4486 CU_ASSERT(ids[0] == blobid); 4487 4488 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id); 4489 4490 free_clusters = spdk_bs_free_cluster_count(bs); 4491 if (!decouple_parent) { 4492 /* Do full blob inflation */ 4493 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 4494 poll_threads(); 4495 CU_ASSERT(g_bserrno == 0); 4496 4497 /* All clusters should be inflated (except one already allocated 4498 * in a top level blob) */ 4499 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4); 4500 4501 /* Check if relation tree updated correctly */ 4502 count = 2; 4503 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4504 4505 /* snapshotid have one clone */ 4506 CU_ASSERT(count == 1); 4507 CU_ASSERT(ids[0] == snapshot2id); 4508 4509 /* snapshot2id have no clones */ 4510 count = 2; 4511 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4512 CU_ASSERT(count == 0); 4513 4514 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 4515 } else { 4516 /* Decouple parent of blob */ 4517 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 4518 poll_threads(); 4519 CU_ASSERT(g_bserrno == 0); 4520 4521 /* Only one cluster from a parent should be inflated (second one 4522 * is covered by a cluster written on a top level blob, and 4523 * already allocated) */ 4524 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1); 4525 4526 /* Check if relation tree updated correctly */ 4527 count = 2; 4528 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4529 4530 /* snapshotid have two clones now */ 4531 CU_ASSERT(count == 2); 4532 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4533 CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id); 4534 4535 /* snapshot2id have no clones */ 4536 count = 2; 4537 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4538 CU_ASSERT(count == 0); 4539 4540 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4541 } 4542 4543 /* Try to delete snapshot2 (should pass) */ 4544 spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL); 4545 poll_threads(); 4546 CU_ASSERT(g_bserrno == 0); 4547 4548 /* Try to delete base snapshot */ 4549 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4550 poll_threads(); 4551 CU_ASSERT(g_bserrno == 0); 4552 4553 /* Reopen blob after snapshot deletion */ 4554 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 4555 poll_threads(); 4556 CU_ASSERT(g_bserrno == 0); 4557 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4558 blob = g_blob; 4559 4560 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4561 4562 /* Check data consistency on inflated blob */ 4563 memset(payload_read, 0xFF, payload_size); 4564 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4565 blob_op_complete, NULL); 4566 poll_threads(); 4567 CU_ASSERT(g_bserrno == 0); 4568 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4569 4570 spdk_bs_free_io_channel(channel); 4571 poll_threads(); 4572 4573 free(payload_read); 4574 free(payload_write); 4575 free(payload_clone); 4576 4577 ut_blob_close_and_delete(bs, blob); 4578 } 4579 4580 static void 4581 blob_inflate_rw(void) 4582 { 4583 _blob_inflate_rw(false); 4584 _blob_inflate_rw(true); 4585 } 4586 4587 /** 4588 * Snapshot-clones relation test 4589 * 4590 * snapshot 4591 * | 4592 * +-----+-----+ 4593 * | | 4594 * blob(ro) snapshot2 4595 * | | 4596 * clone2 clone 4597 */ 4598 static void 4599 blob_relations(void) 4600 { 4601 struct spdk_blob_store *bs; 4602 struct spdk_bs_dev *dev; 4603 struct spdk_bs_opts bs_opts; 4604 struct spdk_blob_opts opts; 4605 struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2; 4606 spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2; 4607 int rc; 4608 size_t count; 4609 spdk_blob_id ids[10] = {}; 4610 4611 dev = init_dev(); 4612 spdk_bs_opts_init(&bs_opts); 4613 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 4614 4615 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 4616 poll_threads(); 4617 CU_ASSERT(g_bserrno == 0); 4618 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4619 bs = g_bs; 4620 4621 /* 1. Create blob with 10 clusters */ 4622 4623 ut_spdk_blob_opts_init(&opts); 4624 opts.num_clusters = 10; 4625 4626 blob = ut_blob_create_and_open(bs, &opts); 4627 blobid = spdk_blob_get_id(blob); 4628 4629 CU_ASSERT(!spdk_blob_is_read_only(blob)); 4630 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4631 CU_ASSERT(!spdk_blob_is_clone(blob)); 4632 CU_ASSERT(!spdk_blob_is_thin_provisioned(blob)); 4633 4634 /* blob should not have underlying snapshot nor clones */ 4635 CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID); 4636 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 4637 count = SPDK_COUNTOF(ids); 4638 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4639 CU_ASSERT(rc == 0); 4640 CU_ASSERT(count == 0); 4641 4642 4643 /* 2. Create snapshot */ 4644 4645 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4646 poll_threads(); 4647 CU_ASSERT(g_bserrno == 0); 4648 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4649 snapshotid = g_blobid; 4650 4651 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4652 poll_threads(); 4653 CU_ASSERT(g_bserrno == 0); 4654 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4655 snapshot = g_blob; 4656 4657 CU_ASSERT(spdk_blob_is_read_only(snapshot)); 4658 CU_ASSERT(spdk_blob_is_snapshot(snapshot)); 4659 CU_ASSERT(!spdk_blob_is_clone(snapshot)); 4660 CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID); 4661 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID); 4662 4663 /* Check if original blob is converted to the clone of snapshot */ 4664 CU_ASSERT(!spdk_blob_is_read_only(blob)); 4665 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4666 CU_ASSERT(spdk_blob_is_clone(blob)); 4667 CU_ASSERT(spdk_blob_is_thin_provisioned(blob)); 4668 CU_ASSERT(blob->parent_id == snapshotid); 4669 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4670 4671 count = SPDK_COUNTOF(ids); 4672 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4673 CU_ASSERT(rc == 0); 4674 CU_ASSERT(count == 1); 4675 CU_ASSERT(ids[0] == blobid); 4676 4677 4678 /* 3. Create clone from snapshot */ 4679 4680 spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 4681 poll_threads(); 4682 CU_ASSERT(g_bserrno == 0); 4683 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4684 cloneid = g_blobid; 4685 4686 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 4687 poll_threads(); 4688 CU_ASSERT(g_bserrno == 0); 4689 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4690 clone = g_blob; 4691 4692 CU_ASSERT(!spdk_blob_is_read_only(clone)); 4693 CU_ASSERT(!spdk_blob_is_snapshot(clone)); 4694 CU_ASSERT(spdk_blob_is_clone(clone)); 4695 CU_ASSERT(spdk_blob_is_thin_provisioned(clone)); 4696 CU_ASSERT(clone->parent_id == snapshotid); 4697 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid); 4698 4699 count = SPDK_COUNTOF(ids); 4700 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 4701 CU_ASSERT(rc == 0); 4702 CU_ASSERT(count == 0); 4703 4704 /* Check if clone is on the snapshot's list */ 4705 count = SPDK_COUNTOF(ids); 4706 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4707 CU_ASSERT(rc == 0); 4708 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4709 CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid); 4710 4711 4712 /* 4. Create snapshot of the clone */ 4713 4714 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 4715 poll_threads(); 4716 CU_ASSERT(g_bserrno == 0); 4717 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4718 snapshotid2 = g_blobid; 4719 4720 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 4721 poll_threads(); 4722 CU_ASSERT(g_bserrno == 0); 4723 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4724 snapshot2 = g_blob; 4725 4726 CU_ASSERT(spdk_blob_is_read_only(snapshot2)); 4727 CU_ASSERT(spdk_blob_is_snapshot(snapshot2)); 4728 CU_ASSERT(spdk_blob_is_clone(snapshot2)); 4729 CU_ASSERT(snapshot2->parent_id == snapshotid); 4730 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid); 4731 4732 /* Check if clone is converted to the clone of snapshot2 and snapshot2 4733 * is a child of snapshot */ 4734 CU_ASSERT(!spdk_blob_is_read_only(clone)); 4735 CU_ASSERT(!spdk_blob_is_snapshot(clone)); 4736 CU_ASSERT(spdk_blob_is_clone(clone)); 4737 CU_ASSERT(spdk_blob_is_thin_provisioned(clone)); 4738 CU_ASSERT(clone->parent_id == snapshotid2); 4739 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 4740 4741 count = SPDK_COUNTOF(ids); 4742 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 4743 CU_ASSERT(rc == 0); 4744 CU_ASSERT(count == 1); 4745 CU_ASSERT(ids[0] == cloneid); 4746 4747 4748 /* 5. Try to create clone from read only blob */ 4749 4750 /* Mark blob as read only */ 4751 spdk_blob_set_read_only(blob); 4752 spdk_blob_sync_md(blob, blob_op_complete, NULL); 4753 poll_threads(); 4754 CU_ASSERT(g_bserrno == 0); 4755 4756 /* Check if previously created blob is read only clone */ 4757 CU_ASSERT(spdk_blob_is_read_only(blob)); 4758 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4759 CU_ASSERT(spdk_blob_is_clone(blob)); 4760 CU_ASSERT(spdk_blob_is_thin_provisioned(blob)); 4761 4762 /* Create clone from read only blob */ 4763 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4764 poll_threads(); 4765 CU_ASSERT(g_bserrno == 0); 4766 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4767 cloneid2 = g_blobid; 4768 4769 spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL); 4770 poll_threads(); 4771 CU_ASSERT(g_bserrno == 0); 4772 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4773 clone2 = g_blob; 4774 4775 CU_ASSERT(!spdk_blob_is_read_only(clone2)); 4776 CU_ASSERT(!spdk_blob_is_snapshot(clone2)); 4777 CU_ASSERT(spdk_blob_is_clone(clone2)); 4778 CU_ASSERT(spdk_blob_is_thin_provisioned(clone2)); 4779 4780 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 4781 4782 count = SPDK_COUNTOF(ids); 4783 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4784 CU_ASSERT(rc == 0); 4785 4786 CU_ASSERT(count == 1); 4787 CU_ASSERT(ids[0] == cloneid2); 4788 4789 /* Close blobs */ 4790 4791 spdk_blob_close(clone2, blob_op_complete, NULL); 4792 poll_threads(); 4793 CU_ASSERT(g_bserrno == 0); 4794 4795 spdk_blob_close(blob, blob_op_complete, NULL); 4796 poll_threads(); 4797 CU_ASSERT(g_bserrno == 0); 4798 4799 spdk_blob_close(clone, blob_op_complete, NULL); 4800 poll_threads(); 4801 CU_ASSERT(g_bserrno == 0); 4802 4803 spdk_blob_close(snapshot, blob_op_complete, NULL); 4804 poll_threads(); 4805 CU_ASSERT(g_bserrno == 0); 4806 4807 spdk_blob_close(snapshot2, blob_op_complete, NULL); 4808 poll_threads(); 4809 CU_ASSERT(g_bserrno == 0); 4810 4811 /* Try to delete snapshot with more than 1 clone */ 4812 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4813 poll_threads(); 4814 CU_ASSERT(g_bserrno != 0); 4815 4816 ut_bs_reload(&bs, &bs_opts); 4817 4818 /* NULL ids array should return number of clones in count */ 4819 count = SPDK_COUNTOF(ids); 4820 rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count); 4821 CU_ASSERT(rc == -ENOMEM); 4822 CU_ASSERT(count == 2); 4823 4824 /* incorrect array size */ 4825 count = 1; 4826 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4827 CU_ASSERT(rc == -ENOMEM); 4828 CU_ASSERT(count == 2); 4829 4830 4831 /* Verify structure of loaded blob store */ 4832 4833 /* snapshot */ 4834 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID); 4835 4836 count = SPDK_COUNTOF(ids); 4837 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4838 CU_ASSERT(rc == 0); 4839 CU_ASSERT(count == 2); 4840 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4841 CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2); 4842 4843 /* blob */ 4844 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4845 count = SPDK_COUNTOF(ids); 4846 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4847 CU_ASSERT(rc == 0); 4848 CU_ASSERT(count == 1); 4849 CU_ASSERT(ids[0] == cloneid2); 4850 4851 /* clone */ 4852 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 4853 count = SPDK_COUNTOF(ids); 4854 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 4855 CU_ASSERT(rc == 0); 4856 CU_ASSERT(count == 0); 4857 4858 /* snapshot2 */ 4859 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid); 4860 count = SPDK_COUNTOF(ids); 4861 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 4862 CU_ASSERT(rc == 0); 4863 CU_ASSERT(count == 1); 4864 CU_ASSERT(ids[0] == cloneid); 4865 4866 /* clone2 */ 4867 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 4868 count = SPDK_COUNTOF(ids); 4869 rc = spdk_blob_get_clones(bs, cloneid2, ids, &count); 4870 CU_ASSERT(rc == 0); 4871 CU_ASSERT(count == 0); 4872 4873 /* Try to delete blob that user should not be able to remove */ 4874 4875 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4876 poll_threads(); 4877 CU_ASSERT(g_bserrno != 0); 4878 4879 /* Remove all blobs */ 4880 4881 spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL); 4882 poll_threads(); 4883 CU_ASSERT(g_bserrno == 0); 4884 4885 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 4886 poll_threads(); 4887 CU_ASSERT(g_bserrno == 0); 4888 4889 spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL); 4890 poll_threads(); 4891 CU_ASSERT(g_bserrno == 0); 4892 4893 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 4894 poll_threads(); 4895 CU_ASSERT(g_bserrno == 0); 4896 4897 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4898 poll_threads(); 4899 CU_ASSERT(g_bserrno == 0); 4900 4901 spdk_bs_unload(bs, bs_op_complete, NULL); 4902 poll_threads(); 4903 CU_ASSERT(g_bserrno == 0); 4904 4905 g_bs = NULL; 4906 } 4907 4908 /** 4909 * Snapshot-clones relation test 2 4910 * 4911 * snapshot1 4912 * | 4913 * snapshot2 4914 * | 4915 * +-----+-----+ 4916 * | | 4917 * blob(ro) snapshot3 4918 * | | 4919 * | snapshot4 4920 * | | | 4921 * clone2 clone clone3 4922 */ 4923 static void 4924 blob_relations2(void) 4925 { 4926 struct spdk_blob_store *bs; 4927 struct spdk_bs_dev *dev; 4928 struct spdk_bs_opts bs_opts; 4929 struct spdk_blob_opts opts; 4930 struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2; 4931 spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2, 4932 cloneid3; 4933 int rc; 4934 size_t count; 4935 spdk_blob_id ids[10] = {}; 4936 4937 dev = init_dev(); 4938 spdk_bs_opts_init(&bs_opts); 4939 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 4940 4941 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 4942 poll_threads(); 4943 CU_ASSERT(g_bserrno == 0); 4944 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4945 bs = g_bs; 4946 4947 /* 1. Create blob with 10 clusters */ 4948 4949 ut_spdk_blob_opts_init(&opts); 4950 opts.num_clusters = 10; 4951 4952 blob = ut_blob_create_and_open(bs, &opts); 4953 blobid = spdk_blob_get_id(blob); 4954 4955 /* 2. Create snapshot1 */ 4956 4957 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4958 poll_threads(); 4959 CU_ASSERT(g_bserrno == 0); 4960 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4961 snapshotid1 = g_blobid; 4962 4963 spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL); 4964 poll_threads(); 4965 CU_ASSERT(g_bserrno == 0); 4966 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4967 snapshot1 = g_blob; 4968 4969 CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID); 4970 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID); 4971 4972 CU_ASSERT(blob->parent_id == snapshotid1); 4973 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1); 4974 4975 /* Check if blob is the clone of snapshot1 */ 4976 CU_ASSERT(blob->parent_id == snapshotid1); 4977 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1); 4978 4979 count = SPDK_COUNTOF(ids); 4980 rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count); 4981 CU_ASSERT(rc == 0); 4982 CU_ASSERT(count == 1); 4983 CU_ASSERT(ids[0] == blobid); 4984 4985 /* 3. Create another snapshot */ 4986 4987 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4988 poll_threads(); 4989 CU_ASSERT(g_bserrno == 0); 4990 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4991 snapshotid2 = g_blobid; 4992 4993 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 4994 poll_threads(); 4995 CU_ASSERT(g_bserrno == 0); 4996 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4997 snapshot2 = g_blob; 4998 4999 CU_ASSERT(spdk_blob_is_clone(snapshot2)); 5000 CU_ASSERT(snapshot2->parent_id == snapshotid1); 5001 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1); 5002 5003 /* Check if snapshot2 is the clone of snapshot1 and blob 5004 * is a child of snapshot2 */ 5005 CU_ASSERT(blob->parent_id == snapshotid2); 5006 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2); 5007 5008 count = SPDK_COUNTOF(ids); 5009 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5010 CU_ASSERT(rc == 0); 5011 CU_ASSERT(count == 1); 5012 CU_ASSERT(ids[0] == blobid); 5013 5014 /* 4. Create clone from snapshot */ 5015 5016 spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL); 5017 poll_threads(); 5018 CU_ASSERT(g_bserrno == 0); 5019 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5020 cloneid = g_blobid; 5021 5022 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 5023 poll_threads(); 5024 CU_ASSERT(g_bserrno == 0); 5025 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5026 clone = g_blob; 5027 5028 CU_ASSERT(clone->parent_id == snapshotid2); 5029 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 5030 5031 /* Check if clone is on the snapshot's list */ 5032 count = SPDK_COUNTOF(ids); 5033 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5034 CU_ASSERT(rc == 0); 5035 CU_ASSERT(count == 2); 5036 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5037 CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid); 5038 5039 /* 5. Create snapshot of the clone */ 5040 5041 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 5042 poll_threads(); 5043 CU_ASSERT(g_bserrno == 0); 5044 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5045 snapshotid3 = g_blobid; 5046 5047 spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL); 5048 poll_threads(); 5049 CU_ASSERT(g_bserrno == 0); 5050 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5051 snapshot3 = g_blob; 5052 5053 CU_ASSERT(snapshot3->parent_id == snapshotid2); 5054 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2); 5055 5056 /* Check if clone is converted to the clone of snapshot3 and snapshot3 5057 * is a child of snapshot2 */ 5058 CU_ASSERT(clone->parent_id == snapshotid3); 5059 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5060 5061 count = SPDK_COUNTOF(ids); 5062 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5063 CU_ASSERT(rc == 0); 5064 CU_ASSERT(count == 1); 5065 CU_ASSERT(ids[0] == cloneid); 5066 5067 /* 6. Create another snapshot of the clone */ 5068 5069 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 5070 poll_threads(); 5071 CU_ASSERT(g_bserrno == 0); 5072 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5073 snapshotid4 = g_blobid; 5074 5075 spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL); 5076 poll_threads(); 5077 CU_ASSERT(g_bserrno == 0); 5078 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5079 snapshot4 = g_blob; 5080 5081 CU_ASSERT(snapshot4->parent_id == snapshotid3); 5082 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3); 5083 5084 /* Check if clone is converted to the clone of snapshot4 and snapshot4 5085 * is a child of snapshot3 */ 5086 CU_ASSERT(clone->parent_id == snapshotid4); 5087 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4); 5088 5089 count = SPDK_COUNTOF(ids); 5090 rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count); 5091 CU_ASSERT(rc == 0); 5092 CU_ASSERT(count == 1); 5093 CU_ASSERT(ids[0] == cloneid); 5094 5095 /* 7. Remove snapshot 4 */ 5096 5097 ut_blob_close_and_delete(bs, snapshot4); 5098 5099 /* Check if relations are back to state from before creating snapshot 4 */ 5100 CU_ASSERT(clone->parent_id == snapshotid3); 5101 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5102 5103 count = SPDK_COUNTOF(ids); 5104 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5105 CU_ASSERT(rc == 0); 5106 CU_ASSERT(count == 1); 5107 CU_ASSERT(ids[0] == cloneid); 5108 5109 /* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */ 5110 5111 spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL); 5112 poll_threads(); 5113 CU_ASSERT(g_bserrno == 0); 5114 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5115 cloneid3 = g_blobid; 5116 5117 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5118 poll_threads(); 5119 CU_ASSERT(g_bserrno != 0); 5120 5121 /* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */ 5122 5123 spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL); 5124 poll_threads(); 5125 CU_ASSERT(g_bserrno == 0); 5126 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5127 snapshot3 = g_blob; 5128 5129 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5130 poll_threads(); 5131 CU_ASSERT(g_bserrno != 0); 5132 5133 spdk_blob_close(snapshot3, blob_op_complete, NULL); 5134 poll_threads(); 5135 CU_ASSERT(g_bserrno == 0); 5136 5137 spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL); 5138 poll_threads(); 5139 CU_ASSERT(g_bserrno == 0); 5140 5141 /* 10. Remove snapshot 1 */ 5142 5143 ut_blob_close_and_delete(bs, snapshot1); 5144 5145 /* Check if relations are back to state from before creating snapshot 4 (before step 6) */ 5146 CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID); 5147 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID); 5148 5149 count = SPDK_COUNTOF(ids); 5150 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5151 CU_ASSERT(rc == 0); 5152 CU_ASSERT(count == 2); 5153 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5154 CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3); 5155 5156 /* 11. Try to create clone from read only blob */ 5157 5158 /* Mark blob as read only */ 5159 spdk_blob_set_read_only(blob); 5160 spdk_blob_sync_md(blob, blob_op_complete, NULL); 5161 poll_threads(); 5162 CU_ASSERT(g_bserrno == 0); 5163 5164 /* Create clone from read only blob */ 5165 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5166 poll_threads(); 5167 CU_ASSERT(g_bserrno == 0); 5168 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5169 cloneid2 = g_blobid; 5170 5171 spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL); 5172 poll_threads(); 5173 CU_ASSERT(g_bserrno == 0); 5174 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5175 clone2 = g_blob; 5176 5177 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5178 5179 count = SPDK_COUNTOF(ids); 5180 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5181 CU_ASSERT(rc == 0); 5182 CU_ASSERT(count == 1); 5183 CU_ASSERT(ids[0] == cloneid2); 5184 5185 /* Close blobs */ 5186 5187 spdk_blob_close(clone2, blob_op_complete, NULL); 5188 poll_threads(); 5189 CU_ASSERT(g_bserrno == 0); 5190 5191 spdk_blob_close(blob, blob_op_complete, NULL); 5192 poll_threads(); 5193 CU_ASSERT(g_bserrno == 0); 5194 5195 spdk_blob_close(clone, blob_op_complete, NULL); 5196 poll_threads(); 5197 CU_ASSERT(g_bserrno == 0); 5198 5199 spdk_blob_close(snapshot2, blob_op_complete, NULL); 5200 poll_threads(); 5201 CU_ASSERT(g_bserrno == 0); 5202 5203 spdk_blob_close(snapshot3, blob_op_complete, NULL); 5204 poll_threads(); 5205 CU_ASSERT(g_bserrno == 0); 5206 5207 ut_bs_reload(&bs, &bs_opts); 5208 5209 /* Verify structure of loaded blob store */ 5210 5211 /* snapshot2 */ 5212 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID); 5213 5214 count = SPDK_COUNTOF(ids); 5215 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5216 CU_ASSERT(rc == 0); 5217 CU_ASSERT(count == 2); 5218 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5219 CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3); 5220 5221 /* blob */ 5222 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2); 5223 count = SPDK_COUNTOF(ids); 5224 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5225 CU_ASSERT(rc == 0); 5226 CU_ASSERT(count == 1); 5227 CU_ASSERT(ids[0] == cloneid2); 5228 5229 /* clone */ 5230 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5231 count = SPDK_COUNTOF(ids); 5232 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 5233 CU_ASSERT(rc == 0); 5234 CU_ASSERT(count == 0); 5235 5236 /* snapshot3 */ 5237 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2); 5238 count = SPDK_COUNTOF(ids); 5239 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5240 CU_ASSERT(rc == 0); 5241 CU_ASSERT(count == 1); 5242 CU_ASSERT(ids[0] == cloneid); 5243 5244 /* clone2 */ 5245 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5246 count = SPDK_COUNTOF(ids); 5247 rc = spdk_blob_get_clones(bs, cloneid2, ids, &count); 5248 CU_ASSERT(rc == 0); 5249 CU_ASSERT(count == 0); 5250 5251 /* Try to delete all blobs in the worse possible order */ 5252 5253 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5254 poll_threads(); 5255 CU_ASSERT(g_bserrno != 0); 5256 5257 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5258 poll_threads(); 5259 CU_ASSERT(g_bserrno == 0); 5260 5261 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5262 poll_threads(); 5263 CU_ASSERT(g_bserrno != 0); 5264 5265 spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL); 5266 poll_threads(); 5267 CU_ASSERT(g_bserrno == 0); 5268 5269 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5270 poll_threads(); 5271 CU_ASSERT(g_bserrno == 0); 5272 5273 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 5274 poll_threads(); 5275 CU_ASSERT(g_bserrno == 0); 5276 5277 spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL); 5278 poll_threads(); 5279 CU_ASSERT(g_bserrno == 0); 5280 5281 spdk_bs_unload(bs, bs_op_complete, NULL); 5282 poll_threads(); 5283 CU_ASSERT(g_bserrno == 0); 5284 5285 g_bs = NULL; 5286 } 5287 5288 static void 5289 blobstore_clean_power_failure(void) 5290 { 5291 struct spdk_blob_store *bs; 5292 struct spdk_blob *blob; 5293 struct spdk_power_failure_thresholds thresholds = {}; 5294 bool clean = false; 5295 struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 5296 struct spdk_bs_super_block super_copy = {}; 5297 5298 thresholds.general_threshold = 1; 5299 while (!clean) { 5300 /* Create bs and blob */ 5301 suite_blob_setup(); 5302 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5303 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5304 bs = g_bs; 5305 blob = g_blob; 5306 5307 /* Super block should not change for rest of the UT, 5308 * save it and compare later. */ 5309 memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block)); 5310 SPDK_CU_ASSERT_FATAL(super->clean == 0); 5311 SPDK_CU_ASSERT_FATAL(bs->clean == 0); 5312 5313 /* Force bs/super block in a clean state. 5314 * Along with marking blob dirty, to cause blob persist. */ 5315 blob->state = SPDK_BLOB_STATE_DIRTY; 5316 bs->clean = 1; 5317 super->clean = 1; 5318 super->crc = blob_md_page_calc_crc(super); 5319 5320 g_bserrno = -1; 5321 dev_set_power_failure_thresholds(thresholds); 5322 spdk_blob_sync_md(blob, blob_op_complete, NULL); 5323 poll_threads(); 5324 dev_reset_power_failure_event(); 5325 5326 if (g_bserrno == 0) { 5327 /* After successful md sync, both bs and super block 5328 * should be marked as not clean. */ 5329 SPDK_CU_ASSERT_FATAL(bs->clean == 0); 5330 SPDK_CU_ASSERT_FATAL(super->clean == 0); 5331 clean = true; 5332 } 5333 5334 /* Depending on the point of failure, super block was either updated or not. */ 5335 super_copy.clean = super->clean; 5336 super_copy.crc = blob_md_page_calc_crc(&super_copy); 5337 /* Compare that the values in super block remained unchanged. */ 5338 SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block))); 5339 5340 /* Delete blob and unload bs */ 5341 suite_blob_cleanup(); 5342 5343 thresholds.general_threshold++; 5344 } 5345 } 5346 5347 static void 5348 blob_delete_snapshot_power_failure(void) 5349 { 5350 struct spdk_bs_dev *dev; 5351 struct spdk_blob_store *bs; 5352 struct spdk_blob_opts opts; 5353 struct spdk_blob *blob, *snapshot; 5354 struct spdk_power_failure_thresholds thresholds = {}; 5355 spdk_blob_id blobid, snapshotid; 5356 const void *value; 5357 size_t value_len; 5358 size_t count; 5359 spdk_blob_id ids[3] = {}; 5360 int rc; 5361 bool deleted = false; 5362 int delete_snapshot_bserrno = -1; 5363 5364 thresholds.general_threshold = 1; 5365 while (!deleted) { 5366 dev = init_dev(); 5367 5368 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 5369 poll_threads(); 5370 CU_ASSERT(g_bserrno == 0); 5371 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5372 bs = g_bs; 5373 5374 /* Create blob */ 5375 ut_spdk_blob_opts_init(&opts); 5376 opts.num_clusters = 10; 5377 5378 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 5379 poll_threads(); 5380 CU_ASSERT(g_bserrno == 0); 5381 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5382 blobid = g_blobid; 5383 5384 /* Create snapshot */ 5385 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5386 poll_threads(); 5387 CU_ASSERT(g_bserrno == 0); 5388 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5389 snapshotid = g_blobid; 5390 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5391 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5392 5393 dev_set_power_failure_thresholds(thresholds); 5394 5395 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 5396 poll_threads(); 5397 delete_snapshot_bserrno = g_bserrno; 5398 5399 /* Do not shut down cleanly. Assumption is that after snapshot deletion 5400 * reports success, changes to both blobs should already persisted. */ 5401 dev_reset_power_failure_event(); 5402 ut_bs_dirty_load(&bs, NULL); 5403 5404 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5405 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5406 5407 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 5408 poll_threads(); 5409 CU_ASSERT(g_bserrno == 0); 5410 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5411 blob = g_blob; 5412 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true); 5413 5414 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 5415 poll_threads(); 5416 5417 if (g_bserrno == 0) { 5418 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5419 snapshot = g_blob; 5420 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 5421 count = SPDK_COUNTOF(ids); 5422 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5423 CU_ASSERT(rc == 0); 5424 CU_ASSERT(count == 1); 5425 CU_ASSERT(ids[0] == blobid); 5426 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len); 5427 CU_ASSERT(rc != 0); 5428 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false); 5429 5430 spdk_blob_close(snapshot, blob_op_complete, NULL); 5431 poll_threads(); 5432 CU_ASSERT(g_bserrno == 0); 5433 } else { 5434 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 5435 /* Snapshot might have been left in unrecoverable state, so it does not open. 5436 * Yet delete might perform further changes to the clone after that. 5437 * This UT should test until snapshot is deleted and delete call succeeds. */ 5438 if (delete_snapshot_bserrno == 0) { 5439 deleted = true; 5440 } 5441 } 5442 5443 spdk_blob_close(blob, blob_op_complete, NULL); 5444 poll_threads(); 5445 CU_ASSERT(g_bserrno == 0); 5446 5447 spdk_bs_unload(bs, bs_op_complete, NULL); 5448 poll_threads(); 5449 CU_ASSERT(g_bserrno == 0); 5450 5451 thresholds.general_threshold++; 5452 } 5453 } 5454 5455 static void 5456 blob_create_snapshot_power_failure(void) 5457 { 5458 struct spdk_blob_store *bs = g_bs; 5459 struct spdk_bs_dev *dev; 5460 struct spdk_blob_opts opts; 5461 struct spdk_blob *blob, *snapshot; 5462 struct spdk_power_failure_thresholds thresholds = {}; 5463 spdk_blob_id blobid, snapshotid; 5464 const void *value; 5465 size_t value_len; 5466 size_t count; 5467 spdk_blob_id ids[3] = {}; 5468 int rc; 5469 bool created = false; 5470 int create_snapshot_bserrno = -1; 5471 5472 thresholds.general_threshold = 1; 5473 while (!created) { 5474 dev = init_dev(); 5475 5476 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 5477 poll_threads(); 5478 CU_ASSERT(g_bserrno == 0); 5479 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5480 bs = g_bs; 5481 5482 /* Create blob */ 5483 ut_spdk_blob_opts_init(&opts); 5484 opts.num_clusters = 10; 5485 5486 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 5487 poll_threads(); 5488 CU_ASSERT(g_bserrno == 0); 5489 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5490 blobid = g_blobid; 5491 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5492 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5493 5494 dev_set_power_failure_thresholds(thresholds); 5495 5496 /* Create snapshot */ 5497 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5498 poll_threads(); 5499 create_snapshot_bserrno = g_bserrno; 5500 snapshotid = g_blobid; 5501 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5502 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5503 5504 /* Do not shut down cleanly. Assumption is that after create snapshot 5505 * reports success, both blobs should be power-fail safe. */ 5506 dev_reset_power_failure_event(); 5507 ut_bs_dirty_load(&bs, NULL); 5508 5509 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5510 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5511 5512 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 5513 poll_threads(); 5514 CU_ASSERT(g_bserrno == 0); 5515 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5516 blob = g_blob; 5517 5518 if (snapshotid != SPDK_BLOBID_INVALID) { 5519 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 5520 poll_threads(); 5521 } 5522 5523 if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) { 5524 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5525 snapshot = g_blob; 5526 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true); 5527 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false); 5528 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 5529 count = SPDK_COUNTOF(ids); 5530 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5531 CU_ASSERT(rc == 0); 5532 CU_ASSERT(count == 1); 5533 CU_ASSERT(ids[0] == blobid); 5534 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len); 5535 CU_ASSERT(rc != 0); 5536 5537 spdk_blob_close(snapshot, blob_op_complete, NULL); 5538 poll_threads(); 5539 CU_ASSERT(g_bserrno == 0); 5540 if (create_snapshot_bserrno == 0) { 5541 created = true; 5542 } 5543 } else { 5544 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 5545 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false); 5546 } 5547 5548 spdk_blob_close(blob, blob_op_complete, NULL); 5549 poll_threads(); 5550 CU_ASSERT(g_bserrno == 0); 5551 5552 spdk_bs_unload(bs, bs_op_complete, NULL); 5553 poll_threads(); 5554 CU_ASSERT(g_bserrno == 0); 5555 5556 thresholds.general_threshold++; 5557 } 5558 } 5559 5560 static void 5561 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5562 { 5563 uint8_t payload_ff[64 * 512]; 5564 uint8_t payload_aa[64 * 512]; 5565 uint8_t payload_00[64 * 512]; 5566 uint8_t *cluster0, *cluster1; 5567 5568 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5569 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5570 memset(payload_00, 0x00, sizeof(payload_00)); 5571 5572 /* Try to perform I/O with io unit = 512 */ 5573 spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL); 5574 poll_threads(); 5575 CU_ASSERT(g_bserrno == 0); 5576 5577 /* If thin provisioned is set cluster should be allocated now */ 5578 SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0); 5579 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5580 5581 /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character. 5582 * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */ 5583 /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5584 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5585 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0); 5586 5587 /* Verify write with offset on first page */ 5588 spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL); 5589 poll_threads(); 5590 CU_ASSERT(g_bserrno == 0); 5591 5592 /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5593 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5594 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5595 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5596 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5597 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0); 5598 5599 /* Verify write with offset on first page */ 5600 spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL); 5601 poll_threads(); 5602 5603 /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5604 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5605 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5606 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5607 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5608 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0); 5609 CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0); 5610 5611 /* Verify write with offset on second page */ 5612 spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL); 5613 poll_threads(); 5614 5615 /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */ 5616 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5617 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5618 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5619 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5620 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0); 5621 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5622 5623 /* Verify write across multiple pages */ 5624 spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL); 5625 poll_threads(); 5626 5627 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */ 5628 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5629 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5630 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5631 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5632 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5633 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5634 5635 /* Verify write across multiple clusters */ 5636 spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL); 5637 poll_threads(); 5638 5639 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5640 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5641 5642 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5643 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5644 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5645 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5646 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5647 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5648 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5649 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5650 5651 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5652 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0); 5653 5654 /* Verify write to second cluster */ 5655 spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL); 5656 poll_threads(); 5657 5658 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5659 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5660 5661 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5662 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */ 5663 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5664 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5665 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5666 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5667 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5668 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5669 5670 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5671 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0); 5672 CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0); 5673 CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0); 5674 } 5675 5676 static void 5677 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5678 { 5679 uint8_t payload_read[64 * 512]; 5680 uint8_t payload_ff[64 * 512]; 5681 uint8_t payload_aa[64 * 512]; 5682 uint8_t payload_00[64 * 512]; 5683 5684 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5685 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5686 memset(payload_00, 0x00, sizeof(payload_00)); 5687 5688 /* Read only first io unit */ 5689 /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5690 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5691 * payload_read: F000 0000 | 0000 0000 ... */ 5692 memset(payload_read, 0x00, sizeof(payload_read)); 5693 spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL); 5694 poll_threads(); 5695 CU_ASSERT(g_bserrno == 0); 5696 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5697 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0); 5698 5699 /* Read four io_units starting from offset = 2 5700 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5701 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5702 * payload_read: F0AA 0000 | 0000 0000 ... */ 5703 5704 memset(payload_read, 0x00, sizeof(payload_read)); 5705 spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL); 5706 poll_threads(); 5707 CU_ASSERT(g_bserrno == 0); 5708 5709 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5710 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 5711 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0); 5712 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0); 5713 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 5714 5715 /* Read eight io_units across multiple pages 5716 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 5717 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5718 * payload_read: AAAA AAAA | 0000 0000 ... */ 5719 memset(payload_read, 0x00, sizeof(payload_read)); 5720 spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL); 5721 poll_threads(); 5722 CU_ASSERT(g_bserrno == 0); 5723 5724 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0); 5725 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 5726 5727 /* Read eight io_units across multiple clusters 5728 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ] 5729 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5730 * payload_read: FFFF FFFF | 0000 0000 ... */ 5731 memset(payload_read, 0x00, sizeof(payload_read)); 5732 spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL); 5733 poll_threads(); 5734 CU_ASSERT(g_bserrno == 0); 5735 5736 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0); 5737 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 5738 5739 /* Read four io_units from second cluster 5740 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5741 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ] 5742 * payload_read: 00FF 0000 | 0000 0000 ... */ 5743 memset(payload_read, 0x00, sizeof(payload_read)); 5744 spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL); 5745 poll_threads(); 5746 CU_ASSERT(g_bserrno == 0); 5747 5748 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0); 5749 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0); 5750 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 5751 5752 /* Read second cluster 5753 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5754 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] 5755 * payload_read: FFFF 0000 | 0000 FF00 ... */ 5756 memset(payload_read, 0x00, sizeof(payload_read)); 5757 spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL); 5758 poll_threads(); 5759 CU_ASSERT(g_bserrno == 0); 5760 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0); 5761 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0); 5762 CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0); 5763 CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0); 5764 5765 /* Read whole two clusters 5766 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 5767 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */ 5768 memset(payload_read, 0x00, sizeof(payload_read)); 5769 spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL); 5770 poll_threads(); 5771 CU_ASSERT(g_bserrno == 0); 5772 5773 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5774 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 5775 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0); 5776 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0); 5777 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0); 5778 CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0); 5779 5780 CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0); 5781 CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0); 5782 CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0); 5783 CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0); 5784 } 5785 5786 5787 static void 5788 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5789 { 5790 uint8_t payload_ff[64 * 512]; 5791 uint8_t payload_aa[64 * 512]; 5792 uint8_t payload_00[64 * 512]; 5793 uint8_t *cluster0, *cluster1; 5794 5795 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5796 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5797 memset(payload_00, 0x00, sizeof(payload_00)); 5798 5799 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5800 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5801 5802 /* Unmap */ 5803 spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL); 5804 poll_threads(); 5805 5806 CU_ASSERT(g_bserrno == 0); 5807 5808 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0); 5809 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0); 5810 } 5811 5812 static void 5813 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5814 { 5815 uint8_t payload_ff[64 * 512]; 5816 uint8_t payload_aa[64 * 512]; 5817 uint8_t payload_00[64 * 512]; 5818 uint8_t *cluster0, *cluster1; 5819 5820 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5821 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5822 memset(payload_00, 0x00, sizeof(payload_00)); 5823 5824 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5825 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5826 5827 /* Write zeroes */ 5828 spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL); 5829 poll_threads(); 5830 5831 CU_ASSERT(g_bserrno == 0); 5832 5833 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0); 5834 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0); 5835 } 5836 5837 5838 static void 5839 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5840 { 5841 uint8_t payload_ff[64 * 512]; 5842 uint8_t payload_aa[64 * 512]; 5843 uint8_t payload_00[64 * 512]; 5844 uint8_t *cluster0, *cluster1; 5845 struct iovec iov[4]; 5846 5847 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5848 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5849 memset(payload_00, 0x00, sizeof(payload_00)); 5850 5851 /* Try to perform I/O with io unit = 512 */ 5852 iov[0].iov_base = payload_ff; 5853 iov[0].iov_len = 1 * 512; 5854 spdk_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL); 5855 poll_threads(); 5856 CU_ASSERT(g_bserrno == 0); 5857 5858 /* If thin provisioned is set cluster should be allocated now */ 5859 SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0); 5860 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5861 5862 /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character. 5863 * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */ 5864 /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5865 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5866 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0); 5867 5868 /* Verify write with offset on first page */ 5869 iov[0].iov_base = payload_ff; 5870 iov[0].iov_len = 1 * 512; 5871 spdk_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL); 5872 poll_threads(); 5873 CU_ASSERT(g_bserrno == 0); 5874 5875 /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5876 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5877 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5878 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5879 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5880 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0); 5881 5882 /* Verify write with offset on first page */ 5883 iov[0].iov_base = payload_ff; 5884 iov[0].iov_len = 4 * 512; 5885 spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL); 5886 poll_threads(); 5887 5888 /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5889 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5890 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5891 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5892 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5893 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0); 5894 CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0); 5895 5896 /* Verify write with offset on second page */ 5897 iov[0].iov_base = payload_ff; 5898 iov[0].iov_len = 4 * 512; 5899 spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL); 5900 poll_threads(); 5901 5902 /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */ 5903 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5904 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5905 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5906 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5907 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0); 5908 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5909 5910 /* Verify write across multiple pages */ 5911 iov[0].iov_base = payload_aa; 5912 iov[0].iov_len = 8 * 512; 5913 spdk_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL); 5914 poll_threads(); 5915 5916 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */ 5917 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5918 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5919 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5920 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5921 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5922 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5923 5924 /* Verify write across multiple clusters */ 5925 5926 iov[0].iov_base = payload_ff; 5927 iov[0].iov_len = 8 * 512; 5928 spdk_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL); 5929 poll_threads(); 5930 5931 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5932 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5933 5934 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5935 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5936 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5937 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5938 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5939 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5940 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5941 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0); 5942 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5943 5944 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5945 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0); 5946 5947 /* Verify write to second cluster */ 5948 5949 iov[0].iov_base = payload_ff; 5950 iov[0].iov_len = 2 * 512; 5951 spdk_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL); 5952 poll_threads(); 5953 5954 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5955 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5956 5957 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5958 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */ 5959 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5960 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5961 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5962 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5963 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5964 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5965 5966 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5967 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0); 5968 CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0); 5969 CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0); 5970 } 5971 5972 static void 5973 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5974 { 5975 uint8_t payload_read[64 * 512]; 5976 uint8_t payload_ff[64 * 512]; 5977 uint8_t payload_aa[64 * 512]; 5978 uint8_t payload_00[64 * 512]; 5979 struct iovec iov[4]; 5980 5981 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5982 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5983 memset(payload_00, 0x00, sizeof(payload_00)); 5984 5985 /* Read only first io unit */ 5986 /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5987 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5988 * payload_read: F000 0000 | 0000 0000 ... */ 5989 memset(payload_read, 0x00, sizeof(payload_read)); 5990 iov[0].iov_base = payload_read; 5991 iov[0].iov_len = 1 * 512; 5992 spdk_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL); 5993 poll_threads(); 5994 5995 CU_ASSERT(g_bserrno == 0); 5996 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5997 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0); 5998 5999 /* Read four io_units starting from offset = 2 6000 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6001 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6002 * payload_read: F0AA 0000 | 0000 0000 ... */ 6003 6004 memset(payload_read, 0x00, sizeof(payload_read)); 6005 iov[0].iov_base = payload_read; 6006 iov[0].iov_len = 4 * 512; 6007 spdk_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL); 6008 poll_threads(); 6009 CU_ASSERT(g_bserrno == 0); 6010 6011 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6012 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 6013 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0); 6014 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0); 6015 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 6016 6017 /* Read eight io_units across multiple pages 6018 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 6019 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6020 * payload_read: AAAA AAAA | 0000 0000 ... */ 6021 memset(payload_read, 0x00, sizeof(payload_read)); 6022 iov[0].iov_base = payload_read; 6023 iov[0].iov_len = 4 * 512; 6024 iov[1].iov_base = payload_read + 4 * 512; 6025 iov[1].iov_len = 4 * 512; 6026 spdk_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL); 6027 poll_threads(); 6028 CU_ASSERT(g_bserrno == 0); 6029 6030 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0); 6031 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 6032 6033 /* Read eight io_units across multiple clusters 6034 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ] 6035 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6036 * payload_read: FFFF FFFF | 0000 0000 ... */ 6037 memset(payload_read, 0x00, sizeof(payload_read)); 6038 iov[0].iov_base = payload_read; 6039 iov[0].iov_len = 2 * 512; 6040 iov[1].iov_base = payload_read + 2 * 512; 6041 iov[1].iov_len = 2 * 512; 6042 iov[2].iov_base = payload_read + 4 * 512; 6043 iov[2].iov_len = 2 * 512; 6044 iov[3].iov_base = payload_read + 6 * 512; 6045 iov[3].iov_len = 2 * 512; 6046 spdk_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL); 6047 poll_threads(); 6048 CU_ASSERT(g_bserrno == 0); 6049 6050 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0); 6051 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 6052 6053 /* Read four io_units from second cluster 6054 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6055 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ] 6056 * payload_read: 00FF 0000 | 0000 0000 ... */ 6057 memset(payload_read, 0x00, sizeof(payload_read)); 6058 iov[0].iov_base = payload_read; 6059 iov[0].iov_len = 1 * 512; 6060 iov[1].iov_base = payload_read + 1 * 512; 6061 iov[1].iov_len = 3 * 512; 6062 spdk_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL); 6063 poll_threads(); 6064 CU_ASSERT(g_bserrno == 0); 6065 6066 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0); 6067 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0); 6068 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 6069 6070 /* Read second cluster 6071 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6072 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] 6073 * payload_read: FFFF 0000 | 0000 FF00 ... */ 6074 memset(payload_read, 0x00, sizeof(payload_read)); 6075 iov[0].iov_base = payload_read; 6076 iov[0].iov_len = 1 * 512; 6077 iov[1].iov_base = payload_read + 1 * 512; 6078 iov[1].iov_len = 2 * 512; 6079 iov[2].iov_base = payload_read + 3 * 512; 6080 iov[2].iov_len = 4 * 512; 6081 iov[3].iov_base = payload_read + 7 * 512; 6082 iov[3].iov_len = 25 * 512; 6083 spdk_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL); 6084 poll_threads(); 6085 CU_ASSERT(g_bserrno == 0); 6086 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0); 6087 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0); 6088 CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0); 6089 CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0); 6090 6091 /* Read whole two clusters 6092 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 6093 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */ 6094 memset(payload_read, 0x00, sizeof(payload_read)); 6095 iov[0].iov_base = payload_read; 6096 iov[0].iov_len = 1 * 512; 6097 iov[1].iov_base = payload_read + 1 * 512; 6098 iov[1].iov_len = 8 * 512; 6099 iov[2].iov_base = payload_read + 9 * 512; 6100 iov[2].iov_len = 16 * 512; 6101 iov[3].iov_base = payload_read + 25 * 512; 6102 iov[3].iov_len = 39 * 512; 6103 spdk_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL); 6104 poll_threads(); 6105 CU_ASSERT(g_bserrno == 0); 6106 6107 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6108 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 6109 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0); 6110 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0); 6111 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0); 6112 CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0); 6113 6114 CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0); 6115 CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0); 6116 CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0); 6117 CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0); 6118 } 6119 6120 static void 6121 blob_io_unit(void) 6122 { 6123 struct spdk_bs_opts bsopts; 6124 struct spdk_blob_opts opts; 6125 struct spdk_blob_store *bs; 6126 struct spdk_bs_dev *dev; 6127 struct spdk_blob *blob, *snapshot, *clone; 6128 spdk_blob_id blobid; 6129 struct spdk_io_channel *channel; 6130 6131 /* Create dev with 512 bytes io unit size */ 6132 6133 spdk_bs_opts_init(&bsopts); 6134 bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */ 6135 snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE"); 6136 6137 /* Try to initialize a new blob store with unsupported io_unit */ 6138 dev = init_dev(); 6139 dev->blocklen = 512; 6140 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6141 6142 /* Initialize a new blob store */ 6143 spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL); 6144 poll_threads(); 6145 CU_ASSERT(g_bserrno == 0); 6146 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6147 bs = g_bs; 6148 6149 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512); 6150 channel = spdk_bs_alloc_io_channel(bs); 6151 6152 /* Create thick provisioned blob */ 6153 ut_spdk_blob_opts_init(&opts); 6154 opts.thin_provision = false; 6155 opts.num_clusters = 32; 6156 6157 blob = ut_blob_create_and_open(bs, &opts); 6158 blobid = spdk_blob_get_id(blob); 6159 6160 test_io_write(dev, blob, channel); 6161 test_io_read(dev, blob, channel); 6162 test_io_zeroes(dev, blob, channel); 6163 6164 test_iov_write(dev, blob, channel); 6165 test_iov_read(dev, blob, channel); 6166 6167 test_io_unmap(dev, blob, channel); 6168 6169 spdk_blob_close(blob, blob_op_complete, NULL); 6170 poll_threads(); 6171 CU_ASSERT(g_bserrno == 0); 6172 blob = NULL; 6173 g_blob = NULL; 6174 6175 /* Create thin provisioned blob */ 6176 6177 ut_spdk_blob_opts_init(&opts); 6178 opts.thin_provision = true; 6179 opts.num_clusters = 32; 6180 6181 blob = ut_blob_create_and_open(bs, &opts); 6182 blobid = spdk_blob_get_id(blob); 6183 6184 test_io_write(dev, blob, channel); 6185 test_io_read(dev, blob, channel); 6186 6187 test_io_zeroes(dev, blob, channel); 6188 6189 test_iov_write(dev, blob, channel); 6190 test_iov_read(dev, blob, channel); 6191 6192 /* Create snapshot */ 6193 6194 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6195 poll_threads(); 6196 CU_ASSERT(g_bserrno == 0); 6197 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6198 blobid = g_blobid; 6199 6200 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6201 poll_threads(); 6202 CU_ASSERT(g_bserrno == 0); 6203 CU_ASSERT(g_blob != NULL); 6204 snapshot = g_blob; 6205 6206 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6207 poll_threads(); 6208 CU_ASSERT(g_bserrno == 0); 6209 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6210 blobid = g_blobid; 6211 6212 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6213 poll_threads(); 6214 CU_ASSERT(g_bserrno == 0); 6215 CU_ASSERT(g_blob != NULL); 6216 clone = g_blob; 6217 6218 test_io_read(dev, blob, channel); 6219 test_io_read(dev, snapshot, channel); 6220 test_io_read(dev, clone, channel); 6221 6222 test_iov_read(dev, blob, channel); 6223 test_iov_read(dev, snapshot, channel); 6224 test_iov_read(dev, clone, channel); 6225 6226 /* Inflate clone */ 6227 6228 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 6229 poll_threads(); 6230 6231 CU_ASSERT(g_bserrno == 0); 6232 6233 test_io_read(dev, clone, channel); 6234 6235 test_io_unmap(dev, clone, channel); 6236 6237 test_iov_write(dev, clone, channel); 6238 test_iov_read(dev, clone, channel); 6239 6240 spdk_blob_close(blob, blob_op_complete, NULL); 6241 spdk_blob_close(snapshot, blob_op_complete, NULL); 6242 spdk_blob_close(clone, blob_op_complete, NULL); 6243 poll_threads(); 6244 CU_ASSERT(g_bserrno == 0); 6245 blob = NULL; 6246 g_blob = NULL; 6247 6248 spdk_bs_free_io_channel(channel); 6249 poll_threads(); 6250 6251 /* Unload the blob store */ 6252 spdk_bs_unload(bs, bs_op_complete, NULL); 6253 poll_threads(); 6254 CU_ASSERT(g_bserrno == 0); 6255 g_bs = NULL; 6256 g_blob = NULL; 6257 g_blobid = 0; 6258 } 6259 6260 static void 6261 blob_io_unit_compatiblity(void) 6262 { 6263 struct spdk_bs_opts bsopts; 6264 struct spdk_blob_store *bs; 6265 struct spdk_bs_dev *dev; 6266 struct spdk_bs_super_block *super; 6267 6268 /* Create dev with 512 bytes io unit size */ 6269 6270 spdk_bs_opts_init(&bsopts); 6271 bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */ 6272 snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE"); 6273 6274 /* Try to initialize a new blob store with unsupported io_unit */ 6275 dev = init_dev(); 6276 dev->blocklen = 512; 6277 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6278 6279 /* Initialize a new blob store */ 6280 spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL); 6281 poll_threads(); 6282 CU_ASSERT(g_bserrno == 0); 6283 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6284 bs = g_bs; 6285 6286 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512); 6287 6288 /* Unload the blob store */ 6289 spdk_bs_unload(bs, bs_op_complete, NULL); 6290 poll_threads(); 6291 CU_ASSERT(g_bserrno == 0); 6292 6293 /* Modify super block to behave like older version. 6294 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */ 6295 super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 6296 super->io_unit_size = 0; 6297 super->crc = blob_md_page_calc_crc(super); 6298 6299 dev = init_dev(); 6300 dev->blocklen = 512; 6301 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6302 6303 spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL); 6304 poll_threads(); 6305 CU_ASSERT(g_bserrno == 0); 6306 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6307 bs = g_bs; 6308 6309 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE); 6310 6311 /* Unload the blob store */ 6312 spdk_bs_unload(bs, bs_op_complete, NULL); 6313 poll_threads(); 6314 CU_ASSERT(g_bserrno == 0); 6315 6316 g_bs = NULL; 6317 g_blob = NULL; 6318 g_blobid = 0; 6319 } 6320 6321 static void 6322 blob_simultaneous_operations(void) 6323 { 6324 struct spdk_blob_store *bs = g_bs; 6325 struct spdk_blob_opts opts; 6326 struct spdk_blob *blob, *snapshot; 6327 spdk_blob_id blobid, snapshotid; 6328 struct spdk_io_channel *channel; 6329 6330 channel = spdk_bs_alloc_io_channel(bs); 6331 SPDK_CU_ASSERT_FATAL(channel != NULL); 6332 6333 ut_spdk_blob_opts_init(&opts); 6334 opts.num_clusters = 10; 6335 6336 blob = ut_blob_create_and_open(bs, &opts); 6337 blobid = spdk_blob_get_id(blob); 6338 6339 /* Create snapshot and try to remove blob in the same time: 6340 * - snapshot should be created successfully 6341 * - delete operation should fail w -EBUSY */ 6342 CU_ASSERT(blob->locked_operation_in_progress == false); 6343 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6344 CU_ASSERT(blob->locked_operation_in_progress == true); 6345 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6346 CU_ASSERT(blob->locked_operation_in_progress == true); 6347 /* Deletion failure */ 6348 CU_ASSERT(g_bserrno == -EBUSY); 6349 poll_threads(); 6350 CU_ASSERT(blob->locked_operation_in_progress == false); 6351 /* Snapshot creation success */ 6352 CU_ASSERT(g_bserrno == 0); 6353 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6354 6355 snapshotid = g_blobid; 6356 6357 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 6358 poll_threads(); 6359 CU_ASSERT(g_bserrno == 0); 6360 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 6361 snapshot = g_blob; 6362 6363 /* Inflate blob and try to remove blob in the same time: 6364 * - blob should be inflated successfully 6365 * - delete operation should fail w -EBUSY */ 6366 CU_ASSERT(blob->locked_operation_in_progress == false); 6367 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 6368 CU_ASSERT(blob->locked_operation_in_progress == true); 6369 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6370 CU_ASSERT(blob->locked_operation_in_progress == true); 6371 /* Deletion failure */ 6372 CU_ASSERT(g_bserrno == -EBUSY); 6373 poll_threads(); 6374 CU_ASSERT(blob->locked_operation_in_progress == false); 6375 /* Inflation success */ 6376 CU_ASSERT(g_bserrno == 0); 6377 6378 /* Clone snapshot and try to remove snapshot in the same time: 6379 * - snapshot should be cloned successfully 6380 * - delete operation should fail w -EBUSY */ 6381 CU_ASSERT(blob->locked_operation_in_progress == false); 6382 spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 6383 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 6384 /* Deletion failure */ 6385 CU_ASSERT(g_bserrno == -EBUSY); 6386 poll_threads(); 6387 CU_ASSERT(blob->locked_operation_in_progress == false); 6388 /* Clone created */ 6389 CU_ASSERT(g_bserrno == 0); 6390 6391 /* Resize blob and try to remove blob in the same time: 6392 * - blob should be resized successfully 6393 * - delete operation should fail w -EBUSY */ 6394 CU_ASSERT(blob->locked_operation_in_progress == false); 6395 spdk_blob_resize(blob, 50, blob_op_complete, NULL); 6396 CU_ASSERT(blob->locked_operation_in_progress == true); 6397 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6398 CU_ASSERT(blob->locked_operation_in_progress == true); 6399 /* Deletion failure */ 6400 CU_ASSERT(g_bserrno == -EBUSY); 6401 poll_threads(); 6402 CU_ASSERT(blob->locked_operation_in_progress == false); 6403 /* Blob resized successfully */ 6404 CU_ASSERT(g_bserrno == 0); 6405 6406 /* Issue two consecutive blob syncs, neither should fail. 6407 * Force sync to actually occur by marking blob dirty each time. 6408 * Execution of sync should not be enough to complete the operation, 6409 * since disk I/O is required to complete it. */ 6410 g_bserrno = -1; 6411 6412 blob->state = SPDK_BLOB_STATE_DIRTY; 6413 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6414 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6415 6416 blob->state = SPDK_BLOB_STATE_DIRTY; 6417 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6418 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6419 6420 uint32_t completions = 0; 6421 while (completions < 2) { 6422 SPDK_CU_ASSERT_FATAL(poll_thread_times(0, 1)); 6423 if (g_bserrno == 0) { 6424 g_bserrno = -1; 6425 completions++; 6426 } 6427 /* Never should the g_bserrno be other than -1. 6428 * It would mean that either of syncs failed. */ 6429 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6430 } 6431 6432 spdk_bs_free_io_channel(channel); 6433 poll_threads(); 6434 6435 ut_blob_close_and_delete(bs, snapshot); 6436 ut_blob_close_and_delete(bs, blob); 6437 } 6438 6439 static void 6440 blob_persist_test(void) 6441 { 6442 struct spdk_blob_store *bs = g_bs; 6443 struct spdk_blob_opts opts; 6444 struct spdk_blob *blob; 6445 spdk_blob_id blobid; 6446 struct spdk_io_channel *channel; 6447 char *xattr; 6448 size_t xattr_length; 6449 int rc; 6450 uint32_t page_count_clear, page_count_xattr; 6451 uint64_t poller_iterations; 6452 bool run_poller; 6453 6454 channel = spdk_bs_alloc_io_channel(bs); 6455 SPDK_CU_ASSERT_FATAL(channel != NULL); 6456 6457 ut_spdk_blob_opts_init(&opts); 6458 opts.num_clusters = 10; 6459 6460 blob = ut_blob_create_and_open(bs, &opts); 6461 blobid = spdk_blob_get_id(blob); 6462 6463 /* Save the amount of md pages used after creation of a blob. 6464 * This should be consistent after removing xattr. */ 6465 page_count_clear = spdk_bit_array_count_set(bs->used_md_pages); 6466 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear); 6467 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear); 6468 6469 /* Add xattr with maximum length of descriptor to exceed single metadata page. */ 6470 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 6471 strlen("large_xattr"); 6472 xattr = calloc(xattr_length, sizeof(char)); 6473 SPDK_CU_ASSERT_FATAL(xattr != NULL); 6474 6475 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 6476 SPDK_CU_ASSERT_FATAL(rc == 0); 6477 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6478 poll_threads(); 6479 SPDK_CU_ASSERT_FATAL(g_bserrno == 0); 6480 6481 /* Save the amount of md pages used after adding the large xattr */ 6482 page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages); 6483 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr); 6484 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr); 6485 6486 /* Add xattr to a blob and sync it. While sync is occuring, remove the xattr and sync again. 6487 * Interrupt the first sync after increasing number of poller iterations, until it succeeds. 6488 * Expectation is that after second sync completes no xattr is saved in metadata. */ 6489 poller_iterations = 1; 6490 run_poller = true; 6491 while (run_poller) { 6492 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 6493 SPDK_CU_ASSERT_FATAL(rc == 0); 6494 g_bserrno = -1; 6495 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6496 poll_thread_times(0, poller_iterations); 6497 if (g_bserrno == 0) { 6498 /* Poller iteration count was high enough for first sync to complete. 6499 * Verify that blob takes up enough of md_pages to store the xattr. */ 6500 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr); 6501 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr); 6502 SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr); 6503 run_poller = false; 6504 } 6505 rc = spdk_blob_remove_xattr(blob, "large_xattr"); 6506 SPDK_CU_ASSERT_FATAL(rc == 0); 6507 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6508 poll_threads(); 6509 SPDK_CU_ASSERT_FATAL(g_bserrno == 0); 6510 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear); 6511 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear); 6512 SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear); 6513 6514 /* Reload bs and re-open blob to verify that xattr was not persisted. */ 6515 spdk_blob_close(blob, blob_op_complete, NULL); 6516 poll_threads(); 6517 CU_ASSERT(g_bserrno == 0); 6518 6519 ut_bs_reload(&bs, NULL); 6520 6521 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6522 poll_threads(); 6523 CU_ASSERT(g_bserrno == 0); 6524 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 6525 blob = g_blob; 6526 6527 rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length); 6528 SPDK_CU_ASSERT_FATAL(rc == -ENOENT); 6529 6530 poller_iterations++; 6531 /* Stop at high iteration count to prevent infinite loop. 6532 * This value should be enough for first md sync to complete in any case. */ 6533 SPDK_CU_ASSERT_FATAL(poller_iterations < 50); 6534 } 6535 6536 free(xattr); 6537 6538 ut_blob_close_and_delete(bs, blob); 6539 6540 spdk_bs_free_io_channel(channel); 6541 poll_threads(); 6542 } 6543 6544 static void 6545 suite_bs_setup(void) 6546 { 6547 struct spdk_bs_dev *dev; 6548 6549 dev = init_dev(); 6550 memset(g_dev_buffer, 0, DEV_BUFFER_SIZE); 6551 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 6552 poll_threads(); 6553 CU_ASSERT(g_bserrno == 0); 6554 CU_ASSERT(g_bs != NULL); 6555 } 6556 6557 static void 6558 suite_bs_cleanup(void) 6559 { 6560 spdk_bs_unload(g_bs, bs_op_complete, NULL); 6561 poll_threads(); 6562 CU_ASSERT(g_bserrno == 0); 6563 g_bs = NULL; 6564 memset(g_dev_buffer, 0, DEV_BUFFER_SIZE); 6565 } 6566 6567 static struct spdk_blob * 6568 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts) 6569 { 6570 struct spdk_blob *blob; 6571 struct spdk_blob_opts create_blob_opts; 6572 spdk_blob_id blobid; 6573 6574 if (blob_opts == NULL) { 6575 ut_spdk_blob_opts_init(&create_blob_opts); 6576 blob_opts = &create_blob_opts; 6577 } 6578 6579 spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL); 6580 poll_threads(); 6581 CU_ASSERT(g_bserrno == 0); 6582 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6583 blobid = g_blobid; 6584 g_blobid = -1; 6585 6586 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6587 poll_threads(); 6588 CU_ASSERT(g_bserrno == 0); 6589 CU_ASSERT(g_blob != NULL); 6590 blob = g_blob; 6591 6592 g_blob = NULL; 6593 g_bserrno = -1; 6594 6595 return blob; 6596 } 6597 6598 static void 6599 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob) 6600 { 6601 spdk_blob_id blobid = spdk_blob_get_id(blob); 6602 6603 spdk_blob_close(blob, blob_op_complete, NULL); 6604 poll_threads(); 6605 CU_ASSERT(g_bserrno == 0); 6606 g_blob = NULL; 6607 6608 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6609 poll_threads(); 6610 CU_ASSERT(g_bserrno == 0); 6611 g_bserrno = -1; 6612 } 6613 6614 static void 6615 suite_blob_setup(void) 6616 { 6617 suite_bs_setup(); 6618 CU_ASSERT(g_bs != NULL); 6619 6620 g_blob = ut_blob_create_and_open(g_bs, NULL); 6621 CU_ASSERT(g_blob != NULL); 6622 } 6623 6624 static void 6625 suite_blob_cleanup(void) 6626 { 6627 ut_blob_close_and_delete(g_bs, g_blob); 6628 CU_ASSERT(g_blob == NULL); 6629 6630 suite_bs_cleanup(); 6631 CU_ASSERT(g_bs == NULL); 6632 } 6633 6634 int main(int argc, char **argv) 6635 { 6636 CU_pSuite suite, suite_bs, suite_blob; 6637 unsigned int num_failures; 6638 6639 CU_set_error_action(CUEA_ABORT); 6640 CU_initialize_registry(); 6641 6642 suite = CU_add_suite("blob", NULL, NULL); 6643 suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL, 6644 suite_bs_setup, suite_bs_cleanup); 6645 suite_blob = CU_add_suite_with_setup_and_teardown("blob_blob", NULL, NULL, 6646 suite_blob_setup, suite_blob_cleanup); 6647 6648 CU_ADD_TEST(suite, blob_init); 6649 CU_ADD_TEST(suite_bs, blob_open); 6650 CU_ADD_TEST(suite_bs, blob_create); 6651 CU_ADD_TEST(suite_bs, blob_create_fail); 6652 CU_ADD_TEST(suite_bs, blob_create_internal); 6653 CU_ADD_TEST(suite, blob_thin_provision); 6654 CU_ADD_TEST(suite_bs, blob_snapshot); 6655 CU_ADD_TEST(suite_bs, blob_clone); 6656 CU_ADD_TEST(suite_bs, blob_inflate); 6657 CU_ADD_TEST(suite_bs, blob_delete); 6658 CU_ADD_TEST(suite_bs, blob_resize_test); 6659 CU_ADD_TEST(suite, blob_read_only); 6660 CU_ADD_TEST(suite_bs, channel_ops); 6661 CU_ADD_TEST(suite_bs, blob_super); 6662 CU_ADD_TEST(suite_blob, blob_write); 6663 CU_ADD_TEST(suite_blob, blob_read); 6664 CU_ADD_TEST(suite_blob, blob_rw_verify); 6665 CU_ADD_TEST(suite_bs, blob_rw_verify_iov); 6666 CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem); 6667 CU_ADD_TEST(suite_blob, blob_rw_iov_read_only); 6668 CU_ADD_TEST(suite_bs, blob_unmap); 6669 CU_ADD_TEST(suite_bs, blob_iter); 6670 CU_ADD_TEST(suite_blob, blob_xattr); 6671 CU_ADD_TEST(suite, bs_load); 6672 CU_ADD_TEST(suite_bs, bs_load_pending_removal); 6673 CU_ADD_TEST(suite, bs_load_custom_cluster_size); 6674 CU_ADD_TEST(suite_bs, bs_unload); 6675 CU_ADD_TEST(suite, bs_cluster_sz); 6676 CU_ADD_TEST(suite_bs, bs_usable_clusters); 6677 CU_ADD_TEST(suite, bs_resize_md); 6678 CU_ADD_TEST(suite, bs_destroy); 6679 CU_ADD_TEST(suite, bs_type); 6680 CU_ADD_TEST(suite, bs_super_block); 6681 CU_ADD_TEST(suite, blob_serialize_test); 6682 CU_ADD_TEST(suite_bs, blob_crc); 6683 CU_ADD_TEST(suite, super_block_crc); 6684 CU_ADD_TEST(suite_blob, blob_dirty_shutdown); 6685 CU_ADD_TEST(suite_bs, blob_flags); 6686 CU_ADD_TEST(suite_bs, bs_version); 6687 CU_ADD_TEST(suite_bs, blob_set_xattrs_test); 6688 CU_ADD_TEST(suite_bs, blob_thin_prov_alloc); 6689 CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test); 6690 CU_ADD_TEST(suite_bs, blob_thin_prov_rw); 6691 CU_ADD_TEST(suite_bs, blob_thin_prov_rle); 6692 CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov); 6693 CU_ADD_TEST(suite, bs_load_iter_test); 6694 CU_ADD_TEST(suite_bs, blob_snapshot_rw); 6695 CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov); 6696 CU_ADD_TEST(suite, blob_relations); 6697 CU_ADD_TEST(suite, blob_relations2); 6698 CU_ADD_TEST(suite, blobstore_clean_power_failure); 6699 CU_ADD_TEST(suite, blob_delete_snapshot_power_failure); 6700 CU_ADD_TEST(suite, blob_create_snapshot_power_failure); 6701 CU_ADD_TEST(suite_bs, blob_inflate_rw); 6702 CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io); 6703 CU_ADD_TEST(suite_bs, blob_operation_split_rw); 6704 CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov); 6705 CU_ADD_TEST(suite, blob_io_unit); 6706 CU_ADD_TEST(suite, blob_io_unit_compatiblity); 6707 CU_ADD_TEST(suite_bs, blob_simultaneous_operations); 6708 CU_ADD_TEST(suite_bs, blob_persist_test); 6709 6710 allocate_threads(2); 6711 set_thread(0); 6712 6713 g_dev_buffer = calloc(1, DEV_BUFFER_SIZE); 6714 6715 CU_basic_set_mode(CU_BRM_VERBOSE); 6716 g_use_extent_table = false; 6717 CU_basic_run_tests(); 6718 num_failures = CU_get_number_of_failures(); 6719 g_use_extent_table = true; 6720 CU_basic_run_tests(); 6721 num_failures += CU_get_number_of_failures(); 6722 CU_cleanup_registry(); 6723 6724 free(g_dev_buffer); 6725 6726 free_threads(); 6727 6728 return num_failures; 6729 } 6730