1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 #include "spdk/blob.h" 38 #include "spdk/string.h" 39 #include "spdk_internal/thread.h" 40 41 #include "common/lib/ut_multithread.c" 42 #include "../bs_dev_common.c" 43 #include "blob/blobstore.c" 44 #include "blob/request.c" 45 #include "blob/zeroes.c" 46 #include "blob/blob_bs_dev.c" 47 48 struct spdk_blob_store *g_bs; 49 spdk_blob_id g_blobid; 50 struct spdk_blob *g_blob; 51 int g_bserrno; 52 struct spdk_xattr_names *g_names; 53 int g_done; 54 char *g_xattr_names[] = {"first", "second", "third"}; 55 char *g_xattr_values[] = {"one", "two", "three"}; 56 uint64_t g_ctx = 1729; 57 bool g_use_extent_table = false; 58 59 struct spdk_bs_super_block_ver1 { 60 uint8_t signature[8]; 61 uint32_t version; 62 uint32_t length; 63 uint32_t clean; /* If there was a clean shutdown, this is 1. */ 64 spdk_blob_id super_blob; 65 66 uint32_t cluster_size; /* In bytes */ 67 68 uint32_t used_page_mask_start; /* Offset from beginning of disk, in pages */ 69 uint32_t used_page_mask_len; /* Count, in pages */ 70 71 uint32_t used_cluster_mask_start; /* Offset from beginning of disk, in pages */ 72 uint32_t used_cluster_mask_len; /* Count, in pages */ 73 74 uint32_t md_start; /* Offset from beginning of disk, in pages */ 75 uint32_t md_len; /* Count, in pages */ 76 77 uint8_t reserved[4036]; 78 uint32_t crc; 79 } __attribute__((packed)); 80 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size"); 81 82 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs, 83 struct spdk_blob_opts *blob_opts); 84 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob); 85 static void suite_blob_setup(void); 86 static void suite_blob_cleanup(void); 87 88 static void 89 _get_xattr_value(void *arg, const char *name, 90 const void **value, size_t *value_len) 91 { 92 uint64_t i; 93 94 SPDK_CU_ASSERT_FATAL(value_len != NULL); 95 SPDK_CU_ASSERT_FATAL(value != NULL); 96 CU_ASSERT(arg == &g_ctx); 97 98 for (i = 0; i < sizeof(g_xattr_names); i++) { 99 if (!strcmp(name, g_xattr_names[i])) { 100 *value_len = strlen(g_xattr_values[i]); 101 *value = g_xattr_values[i]; 102 break; 103 } 104 } 105 } 106 107 static void 108 _get_xattr_value_null(void *arg, const char *name, 109 const void **value, size_t *value_len) 110 { 111 SPDK_CU_ASSERT_FATAL(value_len != NULL); 112 SPDK_CU_ASSERT_FATAL(value != NULL); 113 CU_ASSERT(arg == NULL); 114 115 *value_len = 0; 116 *value = NULL; 117 } 118 119 static int 120 _get_snapshots_count(struct spdk_blob_store *bs) 121 { 122 struct spdk_blob_list *snapshot = NULL; 123 int count = 0; 124 125 TAILQ_FOREACH(snapshot, &bs->snapshots, link) { 126 count += 1; 127 } 128 129 return count; 130 } 131 132 static void 133 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts) 134 { 135 spdk_blob_opts_init(opts); 136 opts->use_extent_table = g_use_extent_table; 137 } 138 139 static void 140 bs_op_complete(void *cb_arg, int bserrno) 141 { 142 g_bserrno = bserrno; 143 } 144 145 static void 146 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs, 147 int bserrno) 148 { 149 g_bs = bs; 150 g_bserrno = bserrno; 151 } 152 153 static void 154 blob_op_complete(void *cb_arg, int bserrno) 155 { 156 g_bserrno = bserrno; 157 } 158 159 static void 160 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno) 161 { 162 g_blobid = blobid; 163 g_bserrno = bserrno; 164 } 165 166 static void 167 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno) 168 { 169 g_blob = blb; 170 g_bserrno = bserrno; 171 } 172 173 static void 174 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts) 175 { 176 struct spdk_bs_dev *dev; 177 178 /* Unload the blob store */ 179 spdk_bs_unload(*bs, bs_op_complete, NULL); 180 poll_threads(); 181 CU_ASSERT(g_bserrno == 0); 182 183 dev = init_dev(); 184 /* Load an existing blob store */ 185 spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL); 186 poll_threads(); 187 CU_ASSERT(g_bserrno == 0); 188 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 189 *bs = g_bs; 190 191 g_bserrno = -1; 192 } 193 194 static void 195 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts) 196 { 197 struct spdk_bs_dev *dev; 198 199 /* Dirty shutdown */ 200 bs_free(*bs); 201 202 dev = init_dev(); 203 /* Load an existing blob store */ 204 spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL); 205 poll_threads(); 206 CU_ASSERT(g_bserrno == 0); 207 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 208 *bs = g_bs; 209 210 g_bserrno = -1; 211 } 212 213 static void 214 blob_init(void) 215 { 216 struct spdk_blob_store *bs; 217 struct spdk_bs_dev *dev; 218 219 dev = init_dev(); 220 221 /* should fail for an unsupported blocklen */ 222 dev->blocklen = 500; 223 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 224 poll_threads(); 225 CU_ASSERT(g_bserrno == -EINVAL); 226 227 dev = init_dev(); 228 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 229 poll_threads(); 230 CU_ASSERT(g_bserrno == 0); 231 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 232 bs = g_bs; 233 234 spdk_bs_unload(bs, bs_op_complete, NULL); 235 poll_threads(); 236 CU_ASSERT(g_bserrno == 0); 237 g_bs = NULL; 238 } 239 240 static void 241 blob_super(void) 242 { 243 struct spdk_blob_store *bs = g_bs; 244 spdk_blob_id blobid; 245 struct spdk_blob_opts blob_opts; 246 247 /* Get the super blob without having set one */ 248 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 249 poll_threads(); 250 CU_ASSERT(g_bserrno == -ENOENT); 251 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 252 253 /* Create a blob */ 254 ut_spdk_blob_opts_init(&blob_opts); 255 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 256 poll_threads(); 257 CU_ASSERT(g_bserrno == 0); 258 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 259 blobid = g_blobid; 260 261 /* Set the blob as the super blob */ 262 spdk_bs_set_super(bs, blobid, blob_op_complete, NULL); 263 poll_threads(); 264 CU_ASSERT(g_bserrno == 0); 265 266 /* Get the super blob */ 267 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 268 poll_threads(); 269 CU_ASSERT(g_bserrno == 0); 270 CU_ASSERT(blobid == g_blobid); 271 } 272 273 static void 274 blob_open(void) 275 { 276 struct spdk_blob_store *bs = g_bs; 277 struct spdk_blob *blob; 278 struct spdk_blob_opts blob_opts; 279 spdk_blob_id blobid, blobid2; 280 281 ut_spdk_blob_opts_init(&blob_opts); 282 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 283 poll_threads(); 284 CU_ASSERT(g_bserrno == 0); 285 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 286 blobid = g_blobid; 287 288 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 289 poll_threads(); 290 CU_ASSERT(g_bserrno == 0); 291 CU_ASSERT(g_blob != NULL); 292 blob = g_blob; 293 294 blobid2 = spdk_blob_get_id(blob); 295 CU_ASSERT(blobid == blobid2); 296 297 /* Try to open file again. It should return success. */ 298 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 299 poll_threads(); 300 CU_ASSERT(g_bserrno == 0); 301 CU_ASSERT(blob == g_blob); 302 303 spdk_blob_close(blob, blob_op_complete, NULL); 304 poll_threads(); 305 CU_ASSERT(g_bserrno == 0); 306 307 /* 308 * Close the file a second time, releasing the second reference. This 309 * should succeed. 310 */ 311 blob = g_blob; 312 spdk_blob_close(blob, blob_op_complete, NULL); 313 poll_threads(); 314 CU_ASSERT(g_bserrno == 0); 315 316 /* 317 * Try to open file again. It should succeed. This tests the case 318 * where the file is opened, closed, then re-opened again. 319 */ 320 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 321 poll_threads(); 322 CU_ASSERT(g_bserrno == 0); 323 CU_ASSERT(g_blob != NULL); 324 blob = g_blob; 325 326 ut_blob_close_and_delete(bs, blob); 327 } 328 329 static void 330 blob_create(void) 331 { 332 struct spdk_blob_store *bs = g_bs; 333 struct spdk_blob *blob; 334 struct spdk_blob_opts opts; 335 spdk_blob_id blobid; 336 337 /* Create blob with 10 clusters */ 338 339 ut_spdk_blob_opts_init(&opts); 340 opts.num_clusters = 10; 341 342 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 343 poll_threads(); 344 CU_ASSERT(g_bserrno == 0); 345 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 346 blobid = g_blobid; 347 348 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 349 poll_threads(); 350 CU_ASSERT(g_bserrno == 0); 351 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 352 blob = g_blob; 353 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 354 355 spdk_blob_close(blob, blob_op_complete, NULL); 356 poll_threads(); 357 CU_ASSERT(g_bserrno == 0); 358 359 /* Create blob with 0 clusters */ 360 361 ut_spdk_blob_opts_init(&opts); 362 opts.num_clusters = 0; 363 364 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 365 poll_threads(); 366 CU_ASSERT(g_bserrno == 0); 367 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 368 blobid = g_blobid; 369 370 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 371 poll_threads(); 372 CU_ASSERT(g_bserrno == 0); 373 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 374 blob = g_blob; 375 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 376 377 spdk_blob_close(blob, blob_op_complete, NULL); 378 poll_threads(); 379 CU_ASSERT(g_bserrno == 0); 380 381 /* Create blob with default options (opts == NULL) */ 382 383 spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL); 384 poll_threads(); 385 CU_ASSERT(g_bserrno == 0); 386 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 387 blobid = g_blobid; 388 389 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 390 poll_threads(); 391 CU_ASSERT(g_bserrno == 0); 392 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 393 blob = g_blob; 394 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 395 396 spdk_blob_close(blob, blob_op_complete, NULL); 397 poll_threads(); 398 CU_ASSERT(g_bserrno == 0); 399 400 /* Try to create blob with size larger than blobstore */ 401 402 ut_spdk_blob_opts_init(&opts); 403 opts.num_clusters = bs->total_clusters + 1; 404 405 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 406 poll_threads(); 407 CU_ASSERT(g_bserrno == -ENOSPC); 408 } 409 410 static void 411 blob_create_fail(void) 412 { 413 struct spdk_blob_store *bs = g_bs; 414 struct spdk_blob_opts opts; 415 spdk_blob_id blobid; 416 uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids); 417 uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages); 418 419 /* NULL callback */ 420 ut_spdk_blob_opts_init(&opts); 421 opts.xattrs.names = g_xattr_names; 422 opts.xattrs.get_value = NULL; 423 opts.xattrs.count = 1; 424 opts.xattrs.ctx = &g_ctx; 425 426 blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 427 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 428 poll_threads(); 429 CU_ASSERT(g_bserrno == -EINVAL); 430 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 431 CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count); 432 CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count); 433 434 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 435 poll_threads(); 436 CU_ASSERT(g_bserrno == -ENOENT); 437 SPDK_CU_ASSERT_FATAL(g_blob == NULL); 438 439 ut_bs_reload(&bs, NULL); 440 CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count); 441 CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count); 442 443 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 444 poll_threads(); 445 CU_ASSERT(g_blob == NULL); 446 CU_ASSERT(g_bserrno == -ENOENT); 447 } 448 449 static void 450 blob_create_internal(void) 451 { 452 struct spdk_blob_store *bs = g_bs; 453 struct spdk_blob *blob; 454 struct spdk_blob_opts opts; 455 struct spdk_blob_xattr_opts internal_xattrs; 456 const void *value; 457 size_t value_len; 458 spdk_blob_id blobid; 459 int rc; 460 461 /* Create blob with custom xattrs */ 462 463 ut_spdk_blob_opts_init(&opts); 464 blob_xattrs_init(&internal_xattrs); 465 internal_xattrs.count = 3; 466 internal_xattrs.names = g_xattr_names; 467 internal_xattrs.get_value = _get_xattr_value; 468 internal_xattrs.ctx = &g_ctx; 469 470 bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL); 471 poll_threads(); 472 CU_ASSERT(g_bserrno == 0); 473 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 474 blobid = g_blobid; 475 476 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 477 poll_threads(); 478 CU_ASSERT(g_bserrno == 0); 479 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 480 blob = g_blob; 481 482 rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true); 483 CU_ASSERT(rc == 0); 484 SPDK_CU_ASSERT_FATAL(value != NULL); 485 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 486 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 487 488 rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true); 489 CU_ASSERT(rc == 0); 490 SPDK_CU_ASSERT_FATAL(value != NULL); 491 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 492 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 493 494 rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true); 495 CU_ASSERT(rc == 0); 496 SPDK_CU_ASSERT_FATAL(value != NULL); 497 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 498 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 499 500 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len); 501 CU_ASSERT(rc != 0); 502 503 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len); 504 CU_ASSERT(rc != 0); 505 506 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len); 507 CU_ASSERT(rc != 0); 508 509 spdk_blob_close(blob, blob_op_complete, NULL); 510 poll_threads(); 511 CU_ASSERT(g_bserrno == 0); 512 513 /* Create blob with NULL internal options */ 514 515 bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL); 516 poll_threads(); 517 CU_ASSERT(g_bserrno == 0); 518 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 519 blobid = g_blobid; 520 521 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 522 poll_threads(); 523 CU_ASSERT(g_bserrno == 0); 524 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 525 CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL); 526 527 blob = g_blob; 528 529 spdk_blob_close(blob, blob_op_complete, NULL); 530 poll_threads(); 531 CU_ASSERT(g_bserrno == 0); 532 } 533 534 static void 535 blob_thin_provision(void) 536 { 537 struct spdk_blob_store *bs; 538 struct spdk_bs_dev *dev; 539 struct spdk_blob *blob; 540 struct spdk_blob_opts opts; 541 struct spdk_bs_opts bs_opts; 542 spdk_blob_id blobid; 543 544 dev = init_dev(); 545 spdk_bs_opts_init(&bs_opts); 546 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 547 548 /* Initialize a new blob store */ 549 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 550 poll_threads(); 551 CU_ASSERT(g_bserrno == 0); 552 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 553 554 bs = g_bs; 555 556 /* Create blob with thin provisioning enabled */ 557 558 ut_spdk_blob_opts_init(&opts); 559 opts.thin_provision = true; 560 opts.num_clusters = 10; 561 562 blob = ut_blob_create_and_open(bs, &opts); 563 blobid = spdk_blob_get_id(blob); 564 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 565 566 spdk_blob_close(blob, blob_op_complete, NULL); 567 CU_ASSERT(g_bserrno == 0); 568 569 /* Do not shut down cleanly. This makes sure that when we load again 570 * and try to recover a valid used_cluster map, that blobstore will 571 * ignore clusters with index 0 since these are unallocated clusters. 572 */ 573 ut_bs_dirty_load(&bs, &bs_opts); 574 575 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 576 poll_threads(); 577 CU_ASSERT(g_bserrno == 0); 578 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 579 blob = g_blob; 580 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 581 582 ut_blob_close_and_delete(bs, blob); 583 584 spdk_bs_unload(bs, bs_op_complete, NULL); 585 poll_threads(); 586 CU_ASSERT(g_bserrno == 0); 587 g_bs = NULL; 588 } 589 590 static void 591 blob_snapshot(void) 592 { 593 struct spdk_blob_store *bs = g_bs; 594 struct spdk_blob *blob; 595 struct spdk_blob *snapshot, *snapshot2; 596 struct spdk_blob_bs_dev *blob_bs_dev; 597 struct spdk_blob_opts opts; 598 struct spdk_blob_xattr_opts xattrs; 599 spdk_blob_id blobid; 600 spdk_blob_id snapshotid; 601 spdk_blob_id snapshotid2; 602 const void *value; 603 size_t value_len; 604 int rc; 605 spdk_blob_id ids[2]; 606 size_t count; 607 608 /* Create blob with 10 clusters */ 609 ut_spdk_blob_opts_init(&opts); 610 opts.num_clusters = 10; 611 612 blob = ut_blob_create_and_open(bs, &opts); 613 blobid = spdk_blob_get_id(blob); 614 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 615 616 /* Create snapshot from blob */ 617 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); 618 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 619 poll_threads(); 620 CU_ASSERT(g_bserrno == 0); 621 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 622 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 623 snapshotid = g_blobid; 624 625 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 626 poll_threads(); 627 CU_ASSERT(g_bserrno == 0); 628 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 629 snapshot = g_blob; 630 CU_ASSERT(snapshot->data_ro == true); 631 CU_ASSERT(snapshot->md_ro == true); 632 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 633 634 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 635 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 636 CU_ASSERT(spdk_mem_all_zero(blob->active.clusters, 637 blob->active.num_clusters * sizeof(blob->active.clusters[0]))); 638 639 /* Try to create snapshot from clone with xattrs */ 640 xattrs.names = g_xattr_names; 641 xattrs.get_value = _get_xattr_value; 642 xattrs.count = 3; 643 xattrs.ctx = &g_ctx; 644 spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL); 645 poll_threads(); 646 CU_ASSERT(g_bserrno == 0); 647 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 648 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); 649 snapshotid2 = g_blobid; 650 651 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 652 CU_ASSERT(g_bserrno == 0); 653 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 654 snapshot2 = g_blob; 655 CU_ASSERT(snapshot2->data_ro == true); 656 CU_ASSERT(snapshot2->md_ro == true); 657 CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10); 658 659 /* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */ 660 CU_ASSERT(snapshot->back_bs_dev == NULL); 661 SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL); 662 SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL); 663 664 blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 665 CU_ASSERT(blob_bs_dev->blob == snapshot2); 666 667 blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev; 668 CU_ASSERT(blob_bs_dev->blob == snapshot); 669 670 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len); 671 CU_ASSERT(rc == 0); 672 SPDK_CU_ASSERT_FATAL(value != NULL); 673 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 674 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 675 676 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len); 677 CU_ASSERT(rc == 0); 678 SPDK_CU_ASSERT_FATAL(value != NULL); 679 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 680 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 681 682 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len); 683 CU_ASSERT(rc == 0); 684 SPDK_CU_ASSERT_FATAL(value != NULL); 685 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 686 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 687 688 /* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */ 689 count = 2; 690 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 691 CU_ASSERT(count == 1); 692 CU_ASSERT(ids[0] == blobid); 693 694 count = 2; 695 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 696 CU_ASSERT(count == 1); 697 CU_ASSERT(ids[0] == snapshotid2); 698 699 /* Try to create snapshot from snapshot */ 700 spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 701 poll_threads(); 702 CU_ASSERT(g_bserrno == -EINVAL); 703 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 704 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); 705 706 /* Delete blob and confirm that it is no longer on snapshot2 clone list */ 707 ut_blob_close_and_delete(bs, blob); 708 count = 2; 709 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 710 CU_ASSERT(count == 0); 711 712 /* Delete snapshot2 and confirm that it is no longer on snapshot clone list */ 713 ut_blob_close_and_delete(bs, snapshot2); 714 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 715 count = 2; 716 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 717 CU_ASSERT(count == 0); 718 719 ut_blob_close_and_delete(bs, snapshot); 720 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); 721 } 722 723 static void 724 blob_snapshot_freeze_io(void) 725 { 726 struct spdk_io_channel *channel; 727 struct spdk_bs_channel *bs_channel; 728 struct spdk_blob_store *bs = g_bs; 729 struct spdk_blob *blob; 730 struct spdk_blob_opts opts; 731 spdk_blob_id blobid; 732 uint32_t num_of_pages = 10; 733 uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE]; 734 uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE]; 735 uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE]; 736 737 memset(payload_write, 0xE5, sizeof(payload_write)); 738 memset(payload_read, 0x00, sizeof(payload_read)); 739 memset(payload_zero, 0x00, sizeof(payload_zero)); 740 741 /* Test freeze I/O during snapshot */ 742 channel = spdk_bs_alloc_io_channel(bs); 743 bs_channel = spdk_io_channel_get_ctx(channel); 744 745 /* Create blob with 10 clusters */ 746 ut_spdk_blob_opts_init(&opts); 747 opts.num_clusters = 10; 748 opts.thin_provision = false; 749 750 blob = ut_blob_create_and_open(bs, &opts); 751 blobid = spdk_blob_get_id(blob); 752 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 753 754 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 755 756 /* This is implementation specific. 757 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback. 758 * Four async I/O operations happen before that. */ 759 poll_thread_times(0, 3); 760 761 CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io)); 762 763 /* Blob I/O should be frozen here */ 764 CU_ASSERT(blob->frozen_refcnt == 1); 765 766 /* Write to the blob */ 767 spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL); 768 769 /* Verify that I/O is queued */ 770 CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io)); 771 /* Verify that payload is not written to disk */ 772 CU_ASSERT(memcmp(payload_zero, &g_dev_buffer[blob->active.clusters[0]*SPDK_BS_PAGE_SIZE], 773 SPDK_BS_PAGE_SIZE) == 0); 774 775 /* Finish all operations including spdk_bs_create_snapshot */ 776 poll_threads(); 777 778 /* Verify snapshot */ 779 CU_ASSERT(g_bserrno == 0); 780 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 781 782 /* Verify that blob has unset frozen_io */ 783 CU_ASSERT(blob->frozen_refcnt == 0); 784 785 /* Verify that postponed I/O completed successfully by comparing payload */ 786 spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL); 787 poll_threads(); 788 CU_ASSERT(g_bserrno == 0); 789 CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0); 790 791 spdk_bs_free_io_channel(channel); 792 poll_threads(); 793 794 ut_blob_close_and_delete(bs, blob); 795 } 796 797 static void 798 blob_clone(void) 799 { 800 struct spdk_blob_store *bs = g_bs; 801 struct spdk_blob_opts opts; 802 struct spdk_blob *blob, *snapshot, *clone; 803 spdk_blob_id blobid, cloneid, snapshotid; 804 struct spdk_blob_xattr_opts xattrs; 805 const void *value; 806 size_t value_len; 807 int rc; 808 809 /* Create blob with 10 clusters */ 810 811 ut_spdk_blob_opts_init(&opts); 812 opts.num_clusters = 10; 813 814 blob = ut_blob_create_and_open(bs, &opts); 815 blobid = spdk_blob_get_id(blob); 816 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 817 818 /* Create snapshot */ 819 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 820 poll_threads(); 821 CU_ASSERT(g_bserrno == 0); 822 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 823 snapshotid = g_blobid; 824 825 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 826 poll_threads(); 827 CU_ASSERT(g_bserrno == 0); 828 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 829 snapshot = g_blob; 830 CU_ASSERT(snapshot->data_ro == true); 831 CU_ASSERT(snapshot->md_ro == true); 832 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 833 834 spdk_blob_close(snapshot, blob_op_complete, NULL); 835 poll_threads(); 836 CU_ASSERT(g_bserrno == 0); 837 838 /* Create clone from snapshot with xattrs */ 839 xattrs.names = g_xattr_names; 840 xattrs.get_value = _get_xattr_value; 841 xattrs.count = 3; 842 xattrs.ctx = &g_ctx; 843 844 spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL); 845 poll_threads(); 846 CU_ASSERT(g_bserrno == 0); 847 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 848 cloneid = g_blobid; 849 850 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 851 poll_threads(); 852 CU_ASSERT(g_bserrno == 0); 853 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 854 clone = g_blob; 855 CU_ASSERT(clone->data_ro == false); 856 CU_ASSERT(clone->md_ro == false); 857 CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10); 858 859 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len); 860 CU_ASSERT(rc == 0); 861 SPDK_CU_ASSERT_FATAL(value != NULL); 862 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 863 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 864 865 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len); 866 CU_ASSERT(rc == 0); 867 SPDK_CU_ASSERT_FATAL(value != NULL); 868 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 869 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 870 871 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len); 872 CU_ASSERT(rc == 0); 873 SPDK_CU_ASSERT_FATAL(value != NULL); 874 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 875 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 876 877 878 spdk_blob_close(clone, blob_op_complete, NULL); 879 poll_threads(); 880 CU_ASSERT(g_bserrno == 0); 881 882 /* Try to create clone from not read only blob */ 883 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 884 poll_threads(); 885 CU_ASSERT(g_bserrno == -EINVAL); 886 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 887 888 /* Mark blob as read only */ 889 spdk_blob_set_read_only(blob); 890 spdk_blob_sync_md(blob, blob_op_complete, NULL); 891 poll_threads(); 892 CU_ASSERT(g_bserrno == 0); 893 894 /* Create clone from read only blob */ 895 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 896 poll_threads(); 897 CU_ASSERT(g_bserrno == 0); 898 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 899 cloneid = g_blobid; 900 901 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 902 poll_threads(); 903 CU_ASSERT(g_bserrno == 0); 904 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 905 clone = g_blob; 906 CU_ASSERT(clone->data_ro == false); 907 CU_ASSERT(clone->md_ro == false); 908 CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10); 909 910 ut_blob_close_and_delete(bs, clone); 911 ut_blob_close_and_delete(bs, blob); 912 } 913 914 static void 915 _blob_inflate(bool decouple_parent) 916 { 917 struct spdk_blob_store *bs = g_bs; 918 struct spdk_blob_opts opts; 919 struct spdk_blob *blob, *snapshot; 920 spdk_blob_id blobid, snapshotid; 921 struct spdk_io_channel *channel; 922 uint64_t free_clusters; 923 924 channel = spdk_bs_alloc_io_channel(bs); 925 SPDK_CU_ASSERT_FATAL(channel != NULL); 926 927 /* Create blob with 10 clusters */ 928 929 ut_spdk_blob_opts_init(&opts); 930 opts.num_clusters = 10; 931 opts.thin_provision = true; 932 933 blob = ut_blob_create_and_open(bs, &opts); 934 blobid = spdk_blob_get_id(blob); 935 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 936 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true); 937 938 /* 1) Blob with no parent */ 939 if (decouple_parent) { 940 /* Decouple parent of blob with no parent (should fail) */ 941 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 942 poll_threads(); 943 CU_ASSERT(g_bserrno != 0); 944 } else { 945 /* Inflate of thin blob with no parent should made it thick */ 946 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 947 poll_threads(); 948 CU_ASSERT(g_bserrno == 0); 949 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false); 950 } 951 952 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 953 poll_threads(); 954 CU_ASSERT(g_bserrno == 0); 955 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 956 snapshotid = g_blobid; 957 958 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true); 959 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 960 961 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 962 poll_threads(); 963 CU_ASSERT(g_bserrno == 0); 964 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 965 snapshot = g_blob; 966 CU_ASSERT(snapshot->data_ro == true); 967 CU_ASSERT(snapshot->md_ro == true); 968 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 969 970 spdk_blob_close(snapshot, blob_op_complete, NULL); 971 poll_threads(); 972 CU_ASSERT(g_bserrno == 0); 973 974 free_clusters = spdk_bs_free_cluster_count(bs); 975 976 /* 2) Blob with parent */ 977 if (!decouple_parent) { 978 /* Do full blob inflation */ 979 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 980 poll_threads(); 981 CU_ASSERT(g_bserrno == 0); 982 /* all 10 clusters should be allocated */ 983 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10); 984 } else { 985 /* Decouple parent of blob */ 986 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 987 poll_threads(); 988 CU_ASSERT(g_bserrno == 0); 989 /* when only parent is removed, none of the clusters should be allocated */ 990 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters); 991 } 992 993 /* Now, it should be possible to delete snapshot */ 994 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 995 poll_threads(); 996 CU_ASSERT(g_bserrno == 0); 997 998 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 999 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent); 1000 1001 spdk_bs_free_io_channel(channel); 1002 poll_threads(); 1003 1004 ut_blob_close_and_delete(bs, blob); 1005 } 1006 1007 static void 1008 blob_inflate(void) 1009 { 1010 _blob_inflate(false); 1011 _blob_inflate(true); 1012 } 1013 1014 static void 1015 blob_delete(void) 1016 { 1017 struct spdk_blob_store *bs = g_bs; 1018 struct spdk_blob_opts blob_opts; 1019 spdk_blob_id blobid; 1020 1021 /* Create a blob and then delete it. */ 1022 ut_spdk_blob_opts_init(&blob_opts); 1023 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 1024 poll_threads(); 1025 CU_ASSERT(g_bserrno == 0); 1026 CU_ASSERT(g_blobid > 0); 1027 blobid = g_blobid; 1028 1029 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 1030 poll_threads(); 1031 CU_ASSERT(g_bserrno == 0); 1032 1033 /* Try to open the blob */ 1034 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1035 poll_threads(); 1036 CU_ASSERT(g_bserrno == -ENOENT); 1037 } 1038 1039 static void 1040 blob_resize_test(void) 1041 { 1042 struct spdk_blob_store *bs = g_bs; 1043 struct spdk_blob *blob; 1044 uint64_t free_clusters; 1045 1046 free_clusters = spdk_bs_free_cluster_count(bs); 1047 1048 blob = ut_blob_create_and_open(bs, NULL); 1049 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 1050 1051 /* Confirm that resize fails if blob is marked read-only. */ 1052 blob->md_ro = true; 1053 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1054 poll_threads(); 1055 CU_ASSERT(g_bserrno == -EPERM); 1056 blob->md_ro = false; 1057 1058 /* The blob started at 0 clusters. Resize it to be 5. */ 1059 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1060 poll_threads(); 1061 CU_ASSERT(g_bserrno == 0); 1062 CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs)); 1063 1064 /* Shrink the blob to 3 clusters. This will not actually release 1065 * the old clusters until the blob is synced. 1066 */ 1067 spdk_blob_resize(blob, 3, blob_op_complete, NULL); 1068 poll_threads(); 1069 CU_ASSERT(g_bserrno == 0); 1070 /* Verify there are still 5 clusters in use */ 1071 CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs)); 1072 1073 spdk_blob_sync_md(blob, blob_op_complete, NULL); 1074 poll_threads(); 1075 CU_ASSERT(g_bserrno == 0); 1076 /* Now there are only 3 clusters in use */ 1077 CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs)); 1078 1079 /* Resize the blob to be 10 clusters. Growth takes effect immediately. */ 1080 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 1081 poll_threads(); 1082 CU_ASSERT(g_bserrno == 0); 1083 CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs)); 1084 1085 /* Try to resize the blob to size larger than blobstore. */ 1086 spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL); 1087 poll_threads(); 1088 CU_ASSERT(g_bserrno == -ENOSPC); 1089 1090 ut_blob_close_and_delete(bs, blob); 1091 } 1092 1093 static void 1094 blob_read_only(void) 1095 { 1096 struct spdk_blob_store *bs; 1097 struct spdk_bs_dev *dev; 1098 struct spdk_blob *blob; 1099 struct spdk_bs_opts opts; 1100 spdk_blob_id blobid; 1101 int rc; 1102 1103 dev = init_dev(); 1104 spdk_bs_opts_init(&opts); 1105 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 1106 1107 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 1108 poll_threads(); 1109 CU_ASSERT(g_bserrno == 0); 1110 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 1111 bs = g_bs; 1112 1113 blob = ut_blob_create_and_open(bs, NULL); 1114 blobid = spdk_blob_get_id(blob); 1115 1116 rc = spdk_blob_set_read_only(blob); 1117 CU_ASSERT(rc == 0); 1118 1119 CU_ASSERT(blob->data_ro == false); 1120 CU_ASSERT(blob->md_ro == false); 1121 1122 spdk_blob_sync_md(blob, bs_op_complete, NULL); 1123 poll_threads(); 1124 1125 CU_ASSERT(blob->data_ro == true); 1126 CU_ASSERT(blob->md_ro == true); 1127 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1128 1129 spdk_blob_close(blob, blob_op_complete, NULL); 1130 poll_threads(); 1131 CU_ASSERT(g_bserrno == 0); 1132 1133 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1134 poll_threads(); 1135 CU_ASSERT(g_bserrno == 0); 1136 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1137 blob = g_blob; 1138 1139 CU_ASSERT(blob->data_ro == true); 1140 CU_ASSERT(blob->md_ro == true); 1141 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1142 1143 spdk_blob_close(blob, blob_op_complete, NULL); 1144 poll_threads(); 1145 CU_ASSERT(g_bserrno == 0); 1146 1147 ut_bs_reload(&bs, &opts); 1148 1149 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1150 poll_threads(); 1151 CU_ASSERT(g_bserrno == 0); 1152 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1153 blob = g_blob; 1154 1155 CU_ASSERT(blob->data_ro == true); 1156 CU_ASSERT(blob->md_ro == true); 1157 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1158 1159 ut_blob_close_and_delete(bs, blob); 1160 1161 spdk_bs_unload(bs, bs_op_complete, NULL); 1162 poll_threads(); 1163 CU_ASSERT(g_bserrno == 0); 1164 } 1165 1166 static void 1167 channel_ops(void) 1168 { 1169 struct spdk_blob_store *bs = g_bs; 1170 struct spdk_io_channel *channel; 1171 1172 channel = spdk_bs_alloc_io_channel(bs); 1173 CU_ASSERT(channel != NULL); 1174 1175 spdk_bs_free_io_channel(channel); 1176 poll_threads(); 1177 } 1178 1179 static void 1180 blob_write(void) 1181 { 1182 struct spdk_blob_store *bs = g_bs; 1183 struct spdk_blob *blob = g_blob; 1184 struct spdk_io_channel *channel; 1185 uint64_t pages_per_cluster; 1186 uint8_t payload[10 * 4096]; 1187 1188 pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs); 1189 1190 channel = spdk_bs_alloc_io_channel(bs); 1191 CU_ASSERT(channel != NULL); 1192 1193 /* Write to a blob with 0 size */ 1194 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1195 poll_threads(); 1196 CU_ASSERT(g_bserrno == -EINVAL); 1197 1198 /* Resize the blob */ 1199 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1200 poll_threads(); 1201 CU_ASSERT(g_bserrno == 0); 1202 1203 /* Confirm that write fails if blob is marked read-only. */ 1204 blob->data_ro = true; 1205 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1206 poll_threads(); 1207 CU_ASSERT(g_bserrno == -EPERM); 1208 blob->data_ro = false; 1209 1210 /* Write to the blob */ 1211 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1212 poll_threads(); 1213 CU_ASSERT(g_bserrno == 0); 1214 1215 /* Write starting beyond the end */ 1216 spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete, 1217 NULL); 1218 poll_threads(); 1219 CU_ASSERT(g_bserrno == -EINVAL); 1220 1221 /* Write starting at a valid location but going off the end */ 1222 spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1, 1223 blob_op_complete, NULL); 1224 poll_threads(); 1225 CU_ASSERT(g_bserrno == -EINVAL); 1226 1227 spdk_bs_free_io_channel(channel); 1228 poll_threads(); 1229 } 1230 1231 static void 1232 blob_read(void) 1233 { 1234 struct spdk_blob_store *bs = g_bs; 1235 struct spdk_blob *blob = g_blob; 1236 struct spdk_io_channel *channel; 1237 uint64_t pages_per_cluster; 1238 uint8_t payload[10 * 4096]; 1239 1240 pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs); 1241 1242 channel = spdk_bs_alloc_io_channel(bs); 1243 CU_ASSERT(channel != NULL); 1244 1245 /* Read from a blob with 0 size */ 1246 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1247 poll_threads(); 1248 CU_ASSERT(g_bserrno == -EINVAL); 1249 1250 /* Resize the blob */ 1251 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1252 poll_threads(); 1253 CU_ASSERT(g_bserrno == 0); 1254 1255 /* Confirm that read passes if blob is marked read-only. */ 1256 blob->data_ro = true; 1257 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1258 poll_threads(); 1259 CU_ASSERT(g_bserrno == 0); 1260 blob->data_ro = false; 1261 1262 /* Read from the blob */ 1263 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1264 poll_threads(); 1265 CU_ASSERT(g_bserrno == 0); 1266 1267 /* Read starting beyond the end */ 1268 spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete, 1269 NULL); 1270 poll_threads(); 1271 CU_ASSERT(g_bserrno == -EINVAL); 1272 1273 /* Read starting at a valid location but going off the end */ 1274 spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1, 1275 blob_op_complete, NULL); 1276 poll_threads(); 1277 CU_ASSERT(g_bserrno == -EINVAL); 1278 1279 spdk_bs_free_io_channel(channel); 1280 poll_threads(); 1281 } 1282 1283 static void 1284 blob_rw_verify(void) 1285 { 1286 struct spdk_blob_store *bs = g_bs; 1287 struct spdk_blob *blob = g_blob; 1288 struct spdk_io_channel *channel; 1289 uint8_t payload_read[10 * 4096]; 1290 uint8_t payload_write[10 * 4096]; 1291 1292 channel = spdk_bs_alloc_io_channel(bs); 1293 CU_ASSERT(channel != NULL); 1294 1295 spdk_blob_resize(blob, 32, blob_op_complete, NULL); 1296 poll_threads(); 1297 CU_ASSERT(g_bserrno == 0); 1298 1299 memset(payload_write, 0xE5, sizeof(payload_write)); 1300 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 1301 poll_threads(); 1302 CU_ASSERT(g_bserrno == 0); 1303 1304 memset(payload_read, 0x00, sizeof(payload_read)); 1305 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 1306 poll_threads(); 1307 CU_ASSERT(g_bserrno == 0); 1308 CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0); 1309 1310 spdk_bs_free_io_channel(channel); 1311 poll_threads(); 1312 } 1313 1314 static void 1315 blob_rw_verify_iov(void) 1316 { 1317 struct spdk_blob_store *bs = g_bs; 1318 struct spdk_blob *blob; 1319 struct spdk_io_channel *channel; 1320 uint8_t payload_read[10 * 4096]; 1321 uint8_t payload_write[10 * 4096]; 1322 struct iovec iov_read[3]; 1323 struct iovec iov_write[3]; 1324 void *buf; 1325 1326 channel = spdk_bs_alloc_io_channel(bs); 1327 CU_ASSERT(channel != NULL); 1328 1329 blob = ut_blob_create_and_open(bs, NULL); 1330 1331 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1332 poll_threads(); 1333 CU_ASSERT(g_bserrno == 0); 1334 1335 /* 1336 * Manually adjust the offset of the blob's second cluster. This allows 1337 * us to make sure that the readv/write code correctly accounts for I/O 1338 * that cross cluster boundaries. Start by asserting that the allocated 1339 * clusters are where we expect before modifying the second cluster. 1340 */ 1341 CU_ASSERT(blob->active.clusters[0] == 1 * 256); 1342 CU_ASSERT(blob->active.clusters[1] == 2 * 256); 1343 blob->active.clusters[1] = 3 * 256; 1344 1345 memset(payload_write, 0xE5, sizeof(payload_write)); 1346 iov_write[0].iov_base = payload_write; 1347 iov_write[0].iov_len = 1 * 4096; 1348 iov_write[1].iov_base = payload_write + 1 * 4096; 1349 iov_write[1].iov_len = 5 * 4096; 1350 iov_write[2].iov_base = payload_write + 6 * 4096; 1351 iov_write[2].iov_len = 4 * 4096; 1352 /* 1353 * Choose a page offset just before the cluster boundary. The first 6 pages of payload 1354 * will get written to the first cluster, the last 4 to the second cluster. 1355 */ 1356 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 1357 poll_threads(); 1358 CU_ASSERT(g_bserrno == 0); 1359 1360 memset(payload_read, 0xAA, sizeof(payload_read)); 1361 iov_read[0].iov_base = payload_read; 1362 iov_read[0].iov_len = 3 * 4096; 1363 iov_read[1].iov_base = payload_read + 3 * 4096; 1364 iov_read[1].iov_len = 4 * 4096; 1365 iov_read[2].iov_base = payload_read + 7 * 4096; 1366 iov_read[2].iov_len = 3 * 4096; 1367 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 1368 poll_threads(); 1369 CU_ASSERT(g_bserrno == 0); 1370 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 1371 1372 buf = calloc(1, 256 * 4096); 1373 SPDK_CU_ASSERT_FATAL(buf != NULL); 1374 /* Check that cluster 2 on "disk" was not modified. */ 1375 CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0); 1376 free(buf); 1377 1378 spdk_blob_close(blob, blob_op_complete, NULL); 1379 poll_threads(); 1380 CU_ASSERT(g_bserrno == 0); 1381 1382 spdk_bs_free_io_channel(channel); 1383 poll_threads(); 1384 } 1385 1386 static uint32_t 1387 bs_channel_get_req_count(struct spdk_io_channel *_channel) 1388 { 1389 struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel); 1390 struct spdk_bs_request_set *set; 1391 uint32_t count = 0; 1392 1393 TAILQ_FOREACH(set, &channel->reqs, link) { 1394 count++; 1395 } 1396 1397 return count; 1398 } 1399 1400 static void 1401 blob_rw_verify_iov_nomem(void) 1402 { 1403 struct spdk_blob_store *bs = g_bs; 1404 struct spdk_blob *blob = g_blob; 1405 struct spdk_io_channel *channel; 1406 uint8_t payload_write[10 * 4096]; 1407 struct iovec iov_write[3]; 1408 uint32_t req_count; 1409 1410 channel = spdk_bs_alloc_io_channel(bs); 1411 CU_ASSERT(channel != NULL); 1412 1413 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1414 poll_threads(); 1415 CU_ASSERT(g_bserrno == 0); 1416 1417 /* 1418 * Choose a page offset just before the cluster boundary. The first 6 pages of payload 1419 * will get written to the first cluster, the last 4 to the second cluster. 1420 */ 1421 iov_write[0].iov_base = payload_write; 1422 iov_write[0].iov_len = 1 * 4096; 1423 iov_write[1].iov_base = payload_write + 1 * 4096; 1424 iov_write[1].iov_len = 5 * 4096; 1425 iov_write[2].iov_base = payload_write + 6 * 4096; 1426 iov_write[2].iov_len = 4 * 4096; 1427 MOCK_SET(calloc, NULL); 1428 req_count = bs_channel_get_req_count(channel); 1429 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 1430 poll_threads(); 1431 CU_ASSERT(g_bserrno = -ENOMEM); 1432 CU_ASSERT(req_count == bs_channel_get_req_count(channel)); 1433 MOCK_CLEAR(calloc); 1434 1435 spdk_bs_free_io_channel(channel); 1436 poll_threads(); 1437 } 1438 1439 static void 1440 blob_rw_iov_read_only(void) 1441 { 1442 struct spdk_blob_store *bs = g_bs; 1443 struct spdk_blob *blob = g_blob; 1444 struct spdk_io_channel *channel; 1445 uint8_t payload_read[4096]; 1446 uint8_t payload_write[4096]; 1447 struct iovec iov_read; 1448 struct iovec iov_write; 1449 1450 channel = spdk_bs_alloc_io_channel(bs); 1451 CU_ASSERT(channel != NULL); 1452 1453 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1454 poll_threads(); 1455 CU_ASSERT(g_bserrno == 0); 1456 1457 /* Verify that writev failed if read_only flag is set. */ 1458 blob->data_ro = true; 1459 iov_write.iov_base = payload_write; 1460 iov_write.iov_len = sizeof(payload_write); 1461 spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL); 1462 poll_threads(); 1463 CU_ASSERT(g_bserrno == -EPERM); 1464 1465 /* Verify that reads pass if data_ro flag is set. */ 1466 iov_read.iov_base = payload_read; 1467 iov_read.iov_len = sizeof(payload_read); 1468 spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL); 1469 poll_threads(); 1470 CU_ASSERT(g_bserrno == 0); 1471 1472 spdk_bs_free_io_channel(channel); 1473 poll_threads(); 1474 } 1475 1476 static void 1477 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel, 1478 uint8_t *payload, uint64_t offset, uint64_t length, 1479 spdk_blob_op_complete cb_fn, void *cb_arg) 1480 { 1481 uint64_t i; 1482 uint8_t *buf; 1483 uint64_t page_size = spdk_bs_get_page_size(blob->bs); 1484 1485 /* To be sure that operation is NOT splitted, read one page at the time */ 1486 buf = payload; 1487 for (i = 0; i < length; i++) { 1488 spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL); 1489 poll_threads(); 1490 if (g_bserrno != 0) { 1491 /* Pass the error code up */ 1492 break; 1493 } 1494 buf += page_size; 1495 } 1496 1497 cb_fn(cb_arg, g_bserrno); 1498 } 1499 1500 static void 1501 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel, 1502 uint8_t *payload, uint64_t offset, uint64_t length, 1503 spdk_blob_op_complete cb_fn, void *cb_arg) 1504 { 1505 uint64_t i; 1506 uint8_t *buf; 1507 uint64_t page_size = spdk_bs_get_page_size(blob->bs); 1508 1509 /* To be sure that operation is NOT splitted, write one page at the time */ 1510 buf = payload; 1511 for (i = 0; i < length; i++) { 1512 spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL); 1513 poll_threads(); 1514 if (g_bserrno != 0) { 1515 /* Pass the error code up */ 1516 break; 1517 } 1518 buf += page_size; 1519 } 1520 1521 cb_fn(cb_arg, g_bserrno); 1522 } 1523 1524 static void 1525 blob_operation_split_rw(void) 1526 { 1527 struct spdk_blob_store *bs = g_bs; 1528 struct spdk_blob *blob; 1529 struct spdk_io_channel *channel; 1530 struct spdk_blob_opts opts; 1531 uint64_t cluster_size; 1532 1533 uint64_t payload_size; 1534 uint8_t *payload_read; 1535 uint8_t *payload_write; 1536 uint8_t *payload_pattern; 1537 1538 uint64_t page_size; 1539 uint64_t pages_per_cluster; 1540 uint64_t pages_per_payload; 1541 1542 uint64_t i; 1543 1544 cluster_size = spdk_bs_get_cluster_size(bs); 1545 page_size = spdk_bs_get_page_size(bs); 1546 pages_per_cluster = cluster_size / page_size; 1547 pages_per_payload = pages_per_cluster * 5; 1548 payload_size = cluster_size * 5; 1549 1550 payload_read = malloc(payload_size); 1551 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 1552 1553 payload_write = malloc(payload_size); 1554 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 1555 1556 payload_pattern = malloc(payload_size); 1557 SPDK_CU_ASSERT_FATAL(payload_pattern != NULL); 1558 1559 /* Prepare random pattern to write */ 1560 memset(payload_pattern, 0xFF, payload_size); 1561 for (i = 0; i < pages_per_payload; i++) { 1562 *((uint64_t *)(payload_pattern + page_size * i)) = (i + 1); 1563 } 1564 1565 channel = spdk_bs_alloc_io_channel(bs); 1566 SPDK_CU_ASSERT_FATAL(channel != NULL); 1567 1568 /* Create blob */ 1569 ut_spdk_blob_opts_init(&opts); 1570 opts.thin_provision = false; 1571 opts.num_clusters = 5; 1572 1573 blob = ut_blob_create_and_open(bs, &opts); 1574 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 1575 1576 /* Initial read should return zeroed payload */ 1577 memset(payload_read, 0xFF, payload_size); 1578 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1579 poll_threads(); 1580 CU_ASSERT(g_bserrno == 0); 1581 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 1582 1583 /* Fill whole blob except last page */ 1584 spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1, 1585 blob_op_complete, NULL); 1586 poll_threads(); 1587 CU_ASSERT(g_bserrno == 0); 1588 1589 /* Write last page with a pattern */ 1590 spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1, 1591 blob_op_complete, NULL); 1592 poll_threads(); 1593 CU_ASSERT(g_bserrno == 0); 1594 1595 /* Read whole blob and check consistency */ 1596 memset(payload_read, 0xFF, payload_size); 1597 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1598 poll_threads(); 1599 CU_ASSERT(g_bserrno == 0); 1600 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0); 1601 CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0); 1602 1603 /* Fill whole blob except first page */ 1604 spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1, 1605 blob_op_complete, NULL); 1606 poll_threads(); 1607 CU_ASSERT(g_bserrno == 0); 1608 1609 /* Write first page with a pattern */ 1610 spdk_blob_io_write(blob, channel, payload_pattern, 0, 1, 1611 blob_op_complete, NULL); 1612 poll_threads(); 1613 CU_ASSERT(g_bserrno == 0); 1614 1615 /* Read whole blob and check consistency */ 1616 memset(payload_read, 0xFF, payload_size); 1617 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1618 poll_threads(); 1619 CU_ASSERT(g_bserrno == 0); 1620 CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0); 1621 CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0); 1622 1623 1624 /* Fill whole blob with a pattern (5 clusters) */ 1625 1626 /* 1. Read test. */ 1627 _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload, 1628 blob_op_complete, NULL); 1629 poll_threads(); 1630 CU_ASSERT(g_bserrno == 0); 1631 1632 memset(payload_read, 0xFF, payload_size); 1633 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1634 poll_threads(); 1635 poll_threads(); 1636 CU_ASSERT(g_bserrno == 0); 1637 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1638 1639 /* 2. Write test. */ 1640 spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload, 1641 blob_op_complete, NULL); 1642 poll_threads(); 1643 CU_ASSERT(g_bserrno == 0); 1644 1645 memset(payload_read, 0xFF, payload_size); 1646 _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1647 poll_threads(); 1648 CU_ASSERT(g_bserrno == 0); 1649 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1650 1651 spdk_bs_free_io_channel(channel); 1652 poll_threads(); 1653 1654 g_blob = NULL; 1655 g_blobid = 0; 1656 1657 free(payload_read); 1658 free(payload_write); 1659 free(payload_pattern); 1660 1661 ut_blob_close_and_delete(bs, blob); 1662 } 1663 1664 static void 1665 blob_operation_split_rw_iov(void) 1666 { 1667 struct spdk_blob_store *bs = g_bs; 1668 struct spdk_blob *blob; 1669 struct spdk_io_channel *channel; 1670 struct spdk_blob_opts opts; 1671 uint64_t cluster_size; 1672 1673 uint64_t payload_size; 1674 uint8_t *payload_read; 1675 uint8_t *payload_write; 1676 uint8_t *payload_pattern; 1677 1678 uint64_t page_size; 1679 uint64_t pages_per_cluster; 1680 uint64_t pages_per_payload; 1681 1682 struct iovec iov_read[2]; 1683 struct iovec iov_write[2]; 1684 1685 uint64_t i, j; 1686 1687 cluster_size = spdk_bs_get_cluster_size(bs); 1688 page_size = spdk_bs_get_page_size(bs); 1689 pages_per_cluster = cluster_size / page_size; 1690 pages_per_payload = pages_per_cluster * 5; 1691 payload_size = cluster_size * 5; 1692 1693 payload_read = malloc(payload_size); 1694 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 1695 1696 payload_write = malloc(payload_size); 1697 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 1698 1699 payload_pattern = malloc(payload_size); 1700 SPDK_CU_ASSERT_FATAL(payload_pattern != NULL); 1701 1702 /* Prepare random pattern to write */ 1703 for (i = 0; i < pages_per_payload; i++) { 1704 for (j = 0; j < page_size / sizeof(uint64_t); j++) { 1705 uint64_t *tmp; 1706 1707 tmp = (uint64_t *)payload_pattern; 1708 tmp += ((page_size * i) / sizeof(uint64_t)) + j; 1709 *tmp = i + 1; 1710 } 1711 } 1712 1713 channel = spdk_bs_alloc_io_channel(bs); 1714 SPDK_CU_ASSERT_FATAL(channel != NULL); 1715 1716 /* Create blob */ 1717 ut_spdk_blob_opts_init(&opts); 1718 opts.thin_provision = false; 1719 opts.num_clusters = 5; 1720 1721 blob = ut_blob_create_and_open(bs, &opts); 1722 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 1723 1724 /* Initial read should return zeroes payload */ 1725 memset(payload_read, 0xFF, payload_size); 1726 iov_read[0].iov_base = payload_read; 1727 iov_read[0].iov_len = cluster_size * 3; 1728 iov_read[1].iov_base = payload_read + cluster_size * 3; 1729 iov_read[1].iov_len = cluster_size * 2; 1730 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1731 poll_threads(); 1732 CU_ASSERT(g_bserrno == 0); 1733 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 1734 1735 /* First of iovs fills whole blob except last page and second of iovs writes last page 1736 * with a pattern. */ 1737 iov_write[0].iov_base = payload_pattern; 1738 iov_write[0].iov_len = payload_size - page_size; 1739 iov_write[1].iov_base = payload_pattern; 1740 iov_write[1].iov_len = page_size; 1741 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1742 poll_threads(); 1743 CU_ASSERT(g_bserrno == 0); 1744 1745 /* Read whole blob and check consistency */ 1746 memset(payload_read, 0xFF, payload_size); 1747 iov_read[0].iov_base = payload_read; 1748 iov_read[0].iov_len = cluster_size * 2; 1749 iov_read[1].iov_base = payload_read + cluster_size * 2; 1750 iov_read[1].iov_len = cluster_size * 3; 1751 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1752 poll_threads(); 1753 CU_ASSERT(g_bserrno == 0); 1754 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0); 1755 CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0); 1756 1757 /* First of iovs fills only first page and second of iovs writes whole blob except 1758 * first page with a pattern. */ 1759 iov_write[0].iov_base = payload_pattern; 1760 iov_write[0].iov_len = page_size; 1761 iov_write[1].iov_base = payload_pattern; 1762 iov_write[1].iov_len = payload_size - page_size; 1763 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1764 poll_threads(); 1765 CU_ASSERT(g_bserrno == 0); 1766 1767 /* Read whole blob and check consistency */ 1768 memset(payload_read, 0xFF, payload_size); 1769 iov_read[0].iov_base = payload_read; 1770 iov_read[0].iov_len = cluster_size * 4; 1771 iov_read[1].iov_base = payload_read + cluster_size * 4; 1772 iov_read[1].iov_len = cluster_size; 1773 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1774 poll_threads(); 1775 CU_ASSERT(g_bserrno == 0); 1776 CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0); 1777 CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0); 1778 1779 1780 /* Fill whole blob with a pattern (5 clusters) */ 1781 1782 /* 1. Read test. */ 1783 _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload, 1784 blob_op_complete, NULL); 1785 poll_threads(); 1786 CU_ASSERT(g_bserrno == 0); 1787 1788 memset(payload_read, 0xFF, payload_size); 1789 iov_read[0].iov_base = payload_read; 1790 iov_read[0].iov_len = cluster_size; 1791 iov_read[1].iov_base = payload_read + cluster_size; 1792 iov_read[1].iov_len = cluster_size * 4; 1793 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1794 poll_threads(); 1795 CU_ASSERT(g_bserrno == 0); 1796 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1797 1798 /* 2. Write test. */ 1799 iov_write[0].iov_base = payload_read; 1800 iov_write[0].iov_len = cluster_size * 2; 1801 iov_write[1].iov_base = payload_read + cluster_size * 2; 1802 iov_write[1].iov_len = cluster_size * 3; 1803 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1804 poll_threads(); 1805 CU_ASSERT(g_bserrno == 0); 1806 1807 memset(payload_read, 0xFF, payload_size); 1808 _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1809 poll_threads(); 1810 CU_ASSERT(g_bserrno == 0); 1811 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1812 1813 spdk_bs_free_io_channel(channel); 1814 poll_threads(); 1815 1816 g_blob = NULL; 1817 g_blobid = 0; 1818 1819 free(payload_read); 1820 free(payload_write); 1821 free(payload_pattern); 1822 1823 ut_blob_close_and_delete(bs, blob); 1824 } 1825 1826 static void 1827 blob_unmap(void) 1828 { 1829 struct spdk_blob_store *bs = g_bs; 1830 struct spdk_blob *blob; 1831 struct spdk_io_channel *channel; 1832 struct spdk_blob_opts opts; 1833 uint8_t payload[4096]; 1834 int i; 1835 1836 channel = spdk_bs_alloc_io_channel(bs); 1837 CU_ASSERT(channel != NULL); 1838 1839 ut_spdk_blob_opts_init(&opts); 1840 opts.num_clusters = 10; 1841 1842 blob = ut_blob_create_and_open(bs, &opts); 1843 1844 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 1845 poll_threads(); 1846 CU_ASSERT(g_bserrno == 0); 1847 1848 memset(payload, 0, sizeof(payload)); 1849 payload[0] = 0xFF; 1850 1851 /* 1852 * Set first byte of every cluster to 0xFF. 1853 * First cluster on device is reserved so let's start from cluster number 1 1854 */ 1855 for (i = 1; i < 11; i++) { 1856 g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF; 1857 } 1858 1859 /* Confirm writes */ 1860 for (i = 0; i < 10; i++) { 1861 payload[0] = 0; 1862 spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1, 1863 blob_op_complete, NULL); 1864 poll_threads(); 1865 CU_ASSERT(g_bserrno == 0); 1866 CU_ASSERT(payload[0] == 0xFF); 1867 } 1868 1869 /* Mark some clusters as unallocated */ 1870 blob->active.clusters[1] = 0; 1871 blob->active.clusters[2] = 0; 1872 blob->active.clusters[3] = 0; 1873 blob->active.clusters[6] = 0; 1874 blob->active.clusters[8] = 0; 1875 1876 /* Unmap clusters by resizing to 0 */ 1877 spdk_blob_resize(blob, 0, blob_op_complete, NULL); 1878 poll_threads(); 1879 CU_ASSERT(g_bserrno == 0); 1880 1881 spdk_blob_sync_md(blob, blob_op_complete, NULL); 1882 poll_threads(); 1883 CU_ASSERT(g_bserrno == 0); 1884 1885 /* Confirm that only 'allocated' clusters were unmapped */ 1886 for (i = 1; i < 11; i++) { 1887 switch (i) { 1888 case 2: 1889 case 3: 1890 case 4: 1891 case 7: 1892 case 9: 1893 CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF); 1894 break; 1895 default: 1896 CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0); 1897 break; 1898 } 1899 } 1900 1901 spdk_bs_free_io_channel(channel); 1902 poll_threads(); 1903 1904 ut_blob_close_and_delete(bs, blob); 1905 } 1906 1907 static void 1908 blob_iter(void) 1909 { 1910 struct spdk_blob_store *bs = g_bs; 1911 struct spdk_blob *blob; 1912 spdk_blob_id blobid; 1913 struct spdk_blob_opts blob_opts; 1914 1915 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 1916 poll_threads(); 1917 CU_ASSERT(g_blob == NULL); 1918 CU_ASSERT(g_bserrno == -ENOENT); 1919 1920 ut_spdk_blob_opts_init(&blob_opts); 1921 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 1922 poll_threads(); 1923 CU_ASSERT(g_bserrno == 0); 1924 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 1925 blobid = g_blobid; 1926 1927 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 1928 poll_threads(); 1929 CU_ASSERT(g_blob != NULL); 1930 CU_ASSERT(g_bserrno == 0); 1931 blob = g_blob; 1932 CU_ASSERT(spdk_blob_get_id(blob) == blobid); 1933 1934 spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL); 1935 poll_threads(); 1936 CU_ASSERT(g_blob == NULL); 1937 CU_ASSERT(g_bserrno == -ENOENT); 1938 } 1939 1940 static void 1941 blob_xattr(void) 1942 { 1943 struct spdk_blob_store *bs = g_bs; 1944 struct spdk_blob *blob = g_blob; 1945 spdk_blob_id blobid = spdk_blob_get_id(blob); 1946 uint64_t length; 1947 int rc; 1948 const char *name1, *name2; 1949 const void *value; 1950 size_t value_len; 1951 struct spdk_xattr_names *names; 1952 1953 /* Test that set_xattr fails if md_ro flag is set. */ 1954 blob->md_ro = true; 1955 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 1956 CU_ASSERT(rc == -EPERM); 1957 1958 blob->md_ro = false; 1959 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 1960 CU_ASSERT(rc == 0); 1961 1962 length = 2345; 1963 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 1964 CU_ASSERT(rc == 0); 1965 1966 /* Overwrite "length" xattr. */ 1967 length = 3456; 1968 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 1969 CU_ASSERT(rc == 0); 1970 1971 /* get_xattr should still work even if md_ro flag is set. */ 1972 value = NULL; 1973 blob->md_ro = true; 1974 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 1975 CU_ASSERT(rc == 0); 1976 SPDK_CU_ASSERT_FATAL(value != NULL); 1977 CU_ASSERT(*(uint64_t *)value == length); 1978 CU_ASSERT(value_len == 8); 1979 blob->md_ro = false; 1980 1981 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 1982 CU_ASSERT(rc == -ENOENT); 1983 1984 names = NULL; 1985 rc = spdk_blob_get_xattr_names(blob, &names); 1986 CU_ASSERT(rc == 0); 1987 SPDK_CU_ASSERT_FATAL(names != NULL); 1988 CU_ASSERT(spdk_xattr_names_get_count(names) == 2); 1989 name1 = spdk_xattr_names_get_name(names, 0); 1990 SPDK_CU_ASSERT_FATAL(name1 != NULL); 1991 CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length")); 1992 name2 = spdk_xattr_names_get_name(names, 1); 1993 SPDK_CU_ASSERT_FATAL(name2 != NULL); 1994 CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length")); 1995 CU_ASSERT(strcmp(name1, name2)); 1996 spdk_xattr_names_free(names); 1997 1998 /* Confirm that remove_xattr fails if md_ro is set to true. */ 1999 blob->md_ro = true; 2000 rc = spdk_blob_remove_xattr(blob, "name"); 2001 CU_ASSERT(rc == -EPERM); 2002 2003 blob->md_ro = false; 2004 rc = spdk_blob_remove_xattr(blob, "name"); 2005 CU_ASSERT(rc == 0); 2006 2007 rc = spdk_blob_remove_xattr(blob, "foobar"); 2008 CU_ASSERT(rc == -ENOENT); 2009 2010 /* Set internal xattr */ 2011 length = 7898; 2012 rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true); 2013 CU_ASSERT(rc == 0); 2014 rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true); 2015 CU_ASSERT(rc == 0); 2016 CU_ASSERT(*(uint64_t *)value == length); 2017 /* try to get public xattr with same name */ 2018 rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len); 2019 CU_ASSERT(rc != 0); 2020 rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false); 2021 CU_ASSERT(rc != 0); 2022 /* Check if SPDK_BLOB_INTERNAL_XATTR is set */ 2023 CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 2024 SPDK_BLOB_INTERNAL_XATTR); 2025 2026 spdk_blob_close(blob, blob_op_complete, NULL); 2027 poll_threads(); 2028 2029 /* Check if xattrs are persisted */ 2030 ut_bs_reload(&bs, NULL); 2031 2032 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2033 poll_threads(); 2034 CU_ASSERT(g_bserrno == 0); 2035 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2036 blob = g_blob; 2037 2038 rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true); 2039 CU_ASSERT(rc == 0); 2040 CU_ASSERT(*(uint64_t *)value == length); 2041 2042 /* try to get internal xattr trough public call */ 2043 rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len); 2044 CU_ASSERT(rc != 0); 2045 2046 rc = blob_remove_xattr(blob, "internal", true); 2047 CU_ASSERT(rc == 0); 2048 2049 CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0); 2050 } 2051 2052 static void 2053 bs_load(void) 2054 { 2055 struct spdk_blob_store *bs; 2056 struct spdk_bs_dev *dev; 2057 spdk_blob_id blobid; 2058 struct spdk_blob *blob; 2059 struct spdk_bs_super_block *super_block; 2060 uint64_t length; 2061 int rc; 2062 const void *value; 2063 size_t value_len; 2064 struct spdk_bs_opts opts; 2065 struct spdk_blob_opts blob_opts; 2066 2067 dev = init_dev(); 2068 spdk_bs_opts_init(&opts); 2069 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2070 2071 /* Initialize a new blob store */ 2072 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2073 poll_threads(); 2074 CU_ASSERT(g_bserrno == 0); 2075 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2076 bs = g_bs; 2077 2078 /* Try to open a blobid that does not exist */ 2079 spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL); 2080 poll_threads(); 2081 CU_ASSERT(g_bserrno == -ENOENT); 2082 CU_ASSERT(g_blob == NULL); 2083 2084 /* Create a blob */ 2085 blob = ut_blob_create_and_open(bs, NULL); 2086 blobid = spdk_blob_get_id(blob); 2087 2088 /* Try again to open valid blob but without the upper bit set */ 2089 spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL); 2090 poll_threads(); 2091 CU_ASSERT(g_bserrno == -ENOENT); 2092 CU_ASSERT(g_blob == NULL); 2093 2094 /* Set some xattrs */ 2095 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 2096 CU_ASSERT(rc == 0); 2097 2098 length = 2345; 2099 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2100 CU_ASSERT(rc == 0); 2101 2102 /* Resize the blob */ 2103 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 2104 poll_threads(); 2105 CU_ASSERT(g_bserrno == 0); 2106 2107 spdk_blob_close(blob, blob_op_complete, NULL); 2108 poll_threads(); 2109 CU_ASSERT(g_bserrno == 0); 2110 blob = NULL; 2111 g_blob = NULL; 2112 g_blobid = SPDK_BLOBID_INVALID; 2113 2114 /* Unload the blob store */ 2115 spdk_bs_unload(bs, bs_op_complete, NULL); 2116 poll_threads(); 2117 CU_ASSERT(g_bserrno == 0); 2118 g_bs = NULL; 2119 g_blob = NULL; 2120 g_blobid = 0; 2121 2122 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2123 CU_ASSERT(super_block->clean == 1); 2124 2125 /* Load should fail for device with an unsupported blocklen */ 2126 dev = init_dev(); 2127 dev->blocklen = SPDK_BS_PAGE_SIZE * 2; 2128 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 2129 poll_threads(); 2130 CU_ASSERT(g_bserrno == -EINVAL); 2131 2132 /* Load should when max_md_ops is set to zero */ 2133 dev = init_dev(); 2134 spdk_bs_opts_init(&opts); 2135 opts.max_md_ops = 0; 2136 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2137 poll_threads(); 2138 CU_ASSERT(g_bserrno == -EINVAL); 2139 2140 /* Load should when max_channel_ops is set to zero */ 2141 dev = init_dev(); 2142 spdk_bs_opts_init(&opts); 2143 opts.max_channel_ops = 0; 2144 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2145 poll_threads(); 2146 CU_ASSERT(g_bserrno == -EINVAL); 2147 2148 /* Load an existing blob store */ 2149 dev = init_dev(); 2150 spdk_bs_opts_init(&opts); 2151 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2152 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2153 poll_threads(); 2154 CU_ASSERT(g_bserrno == 0); 2155 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2156 bs = g_bs; 2157 2158 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2159 CU_ASSERT(super_block->clean == 1); 2160 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2161 2162 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2163 poll_threads(); 2164 CU_ASSERT(g_bserrno == 0); 2165 CU_ASSERT(g_blob != NULL); 2166 blob = g_blob; 2167 2168 /* Verify that blobstore is marked dirty after first metadata sync */ 2169 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2170 CU_ASSERT(super_block->clean == 1); 2171 2172 /* Get the xattrs */ 2173 value = NULL; 2174 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 2175 CU_ASSERT(rc == 0); 2176 SPDK_CU_ASSERT_FATAL(value != NULL); 2177 CU_ASSERT(*(uint64_t *)value == length); 2178 CU_ASSERT(value_len == 8); 2179 2180 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 2181 CU_ASSERT(rc == -ENOENT); 2182 2183 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 2184 2185 spdk_blob_close(blob, blob_op_complete, NULL); 2186 poll_threads(); 2187 CU_ASSERT(g_bserrno == 0); 2188 blob = NULL; 2189 g_blob = NULL; 2190 2191 spdk_bs_unload(bs, bs_op_complete, NULL); 2192 poll_threads(); 2193 CU_ASSERT(g_bserrno == 0); 2194 g_bs = NULL; 2195 2196 /* Load should fail: bdev size < saved size */ 2197 dev = init_dev(); 2198 dev->blockcnt /= 2; 2199 2200 spdk_bs_opts_init(&opts); 2201 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2202 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2203 poll_threads(); 2204 2205 CU_ASSERT(g_bserrno == -EILSEQ); 2206 2207 /* Load should succeed: bdev size > saved size */ 2208 dev = init_dev(); 2209 dev->blockcnt *= 4; 2210 2211 spdk_bs_opts_init(&opts); 2212 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2213 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2214 poll_threads(); 2215 CU_ASSERT(g_bserrno == 0); 2216 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2217 bs = g_bs; 2218 2219 CU_ASSERT(g_bserrno == 0); 2220 spdk_bs_unload(bs, bs_op_complete, NULL); 2221 poll_threads(); 2222 2223 2224 /* Test compatibility mode */ 2225 2226 dev = init_dev(); 2227 super_block->size = 0; 2228 super_block->crc = blob_md_page_calc_crc(super_block); 2229 2230 spdk_bs_opts_init(&opts); 2231 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2232 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2233 poll_threads(); 2234 CU_ASSERT(g_bserrno == 0); 2235 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2236 bs = g_bs; 2237 2238 /* Create a blob */ 2239 ut_spdk_blob_opts_init(&blob_opts); 2240 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 2241 poll_threads(); 2242 CU_ASSERT(g_bserrno == 0); 2243 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2244 2245 /* Blobstore should update number of blocks in super_block */ 2246 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2247 CU_ASSERT(super_block->clean == 0); 2248 2249 spdk_bs_unload(bs, bs_op_complete, NULL); 2250 poll_threads(); 2251 CU_ASSERT(g_bserrno == 0); 2252 CU_ASSERT(super_block->clean == 1); 2253 g_bs = NULL; 2254 2255 } 2256 2257 static void 2258 bs_load_pending_removal(void) 2259 { 2260 struct spdk_blob_store *bs = g_bs; 2261 struct spdk_blob_opts opts; 2262 struct spdk_blob *blob, *snapshot; 2263 spdk_blob_id blobid, snapshotid; 2264 const void *value; 2265 size_t value_len; 2266 int rc; 2267 2268 /* Create blob */ 2269 ut_spdk_blob_opts_init(&opts); 2270 opts.num_clusters = 10; 2271 2272 blob = ut_blob_create_and_open(bs, &opts); 2273 blobid = spdk_blob_get_id(blob); 2274 2275 /* Create snapshot */ 2276 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 2277 poll_threads(); 2278 CU_ASSERT(g_bserrno == 0); 2279 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2280 snapshotid = g_blobid; 2281 2282 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2283 poll_threads(); 2284 CU_ASSERT(g_bserrno == 0); 2285 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2286 snapshot = g_blob; 2287 2288 /* Set SNAPSHOT_PENDING_REMOVAL xattr */ 2289 snapshot->md_ro = false; 2290 rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true); 2291 CU_ASSERT(rc == 0); 2292 snapshot->md_ro = true; 2293 2294 spdk_blob_close(snapshot, blob_op_complete, NULL); 2295 poll_threads(); 2296 CU_ASSERT(g_bserrno == 0); 2297 2298 spdk_blob_close(blob, blob_op_complete, NULL); 2299 poll_threads(); 2300 CU_ASSERT(g_bserrno == 0); 2301 2302 /* Reload blobstore */ 2303 ut_bs_reload(&bs, NULL); 2304 2305 /* Snapshot should not be removed as blob is still pointing to it */ 2306 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2307 poll_threads(); 2308 CU_ASSERT(g_bserrno == 0); 2309 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2310 snapshot = g_blob; 2311 2312 /* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */ 2313 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len); 2314 CU_ASSERT(rc != 0); 2315 2316 /* Set SNAPSHOT_PENDING_REMOVAL xattr again */ 2317 snapshot->md_ro = false; 2318 rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true); 2319 CU_ASSERT(rc == 0); 2320 snapshot->md_ro = true; 2321 2322 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2323 poll_threads(); 2324 CU_ASSERT(g_bserrno == 0); 2325 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2326 blob = g_blob; 2327 2328 /* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */ 2329 blob_remove_xattr(blob, BLOB_SNAPSHOT, true); 2330 2331 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2332 poll_threads(); 2333 CU_ASSERT(g_bserrno == 0); 2334 2335 spdk_blob_close(snapshot, blob_op_complete, NULL); 2336 poll_threads(); 2337 CU_ASSERT(g_bserrno == 0); 2338 2339 spdk_blob_close(blob, blob_op_complete, NULL); 2340 poll_threads(); 2341 CU_ASSERT(g_bserrno == 0); 2342 2343 /* Reload blobstore */ 2344 ut_bs_reload(&bs, NULL); 2345 2346 /* Snapshot should be removed as blob is not pointing to it anymore */ 2347 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2348 poll_threads(); 2349 CU_ASSERT(g_bserrno != 0); 2350 } 2351 2352 static void 2353 bs_load_custom_cluster_size(void) 2354 { 2355 struct spdk_blob_store *bs; 2356 struct spdk_bs_dev *dev; 2357 struct spdk_bs_super_block *super_block; 2358 struct spdk_bs_opts opts; 2359 uint32_t custom_cluster_size = 4194304; /* 4MiB */ 2360 uint32_t cluster_sz; 2361 uint64_t total_clusters; 2362 2363 dev = init_dev(); 2364 spdk_bs_opts_init(&opts); 2365 opts.cluster_sz = custom_cluster_size; 2366 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2367 2368 /* Initialize a new blob store */ 2369 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2370 poll_threads(); 2371 CU_ASSERT(g_bserrno == 0); 2372 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2373 bs = g_bs; 2374 cluster_sz = bs->cluster_sz; 2375 total_clusters = bs->total_clusters; 2376 2377 /* Unload the blob store */ 2378 spdk_bs_unload(bs, bs_op_complete, NULL); 2379 poll_threads(); 2380 CU_ASSERT(g_bserrno == 0); 2381 g_bs = NULL; 2382 g_blob = NULL; 2383 g_blobid = 0; 2384 2385 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2386 CU_ASSERT(super_block->clean == 1); 2387 2388 /* Load an existing blob store */ 2389 dev = init_dev(); 2390 spdk_bs_opts_init(&opts); 2391 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2392 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2393 poll_threads(); 2394 CU_ASSERT(g_bserrno == 0); 2395 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2396 bs = g_bs; 2397 /* Compare cluster size and number to one after initialization */ 2398 CU_ASSERT(cluster_sz == bs->cluster_sz); 2399 CU_ASSERT(total_clusters == bs->total_clusters); 2400 2401 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2402 CU_ASSERT(super_block->clean == 1); 2403 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2404 2405 spdk_bs_unload(bs, bs_op_complete, NULL); 2406 poll_threads(); 2407 CU_ASSERT(g_bserrno == 0); 2408 CU_ASSERT(super_block->clean == 1); 2409 g_bs = NULL; 2410 } 2411 2412 static void 2413 bs_type(void) 2414 { 2415 struct spdk_blob_store *bs; 2416 struct spdk_bs_dev *dev; 2417 struct spdk_bs_opts opts; 2418 2419 dev = init_dev(); 2420 spdk_bs_opts_init(&opts); 2421 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2422 2423 /* Initialize a new blob store */ 2424 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2425 poll_threads(); 2426 CU_ASSERT(g_bserrno == 0); 2427 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2428 bs = g_bs; 2429 2430 /* Unload the blob store */ 2431 spdk_bs_unload(bs, bs_op_complete, NULL); 2432 poll_threads(); 2433 CU_ASSERT(g_bserrno == 0); 2434 g_bs = NULL; 2435 g_blob = NULL; 2436 g_blobid = 0; 2437 2438 /* Load non existing blobstore type */ 2439 dev = init_dev(); 2440 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING"); 2441 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2442 poll_threads(); 2443 CU_ASSERT(g_bserrno != 0); 2444 2445 /* Load with empty blobstore type */ 2446 dev = init_dev(); 2447 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2448 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2449 poll_threads(); 2450 CU_ASSERT(g_bserrno == 0); 2451 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2452 bs = g_bs; 2453 2454 spdk_bs_unload(bs, bs_op_complete, NULL); 2455 poll_threads(); 2456 CU_ASSERT(g_bserrno == 0); 2457 g_bs = NULL; 2458 2459 /* Initialize a new blob store with empty bstype */ 2460 dev = init_dev(); 2461 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2462 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2463 poll_threads(); 2464 CU_ASSERT(g_bserrno == 0); 2465 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2466 bs = g_bs; 2467 2468 spdk_bs_unload(bs, bs_op_complete, NULL); 2469 poll_threads(); 2470 CU_ASSERT(g_bserrno == 0); 2471 g_bs = NULL; 2472 2473 /* Load non existing blobstore type */ 2474 dev = init_dev(); 2475 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING"); 2476 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2477 poll_threads(); 2478 CU_ASSERT(g_bserrno != 0); 2479 2480 /* Load with empty blobstore type */ 2481 dev = init_dev(); 2482 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2483 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2484 poll_threads(); 2485 CU_ASSERT(g_bserrno == 0); 2486 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2487 bs = g_bs; 2488 2489 spdk_bs_unload(bs, bs_op_complete, NULL); 2490 poll_threads(); 2491 CU_ASSERT(g_bserrno == 0); 2492 g_bs = NULL; 2493 } 2494 2495 static void 2496 bs_super_block(void) 2497 { 2498 struct spdk_blob_store *bs; 2499 struct spdk_bs_dev *dev; 2500 struct spdk_bs_super_block *super_block; 2501 struct spdk_bs_opts opts; 2502 struct spdk_bs_super_block_ver1 super_block_v1; 2503 2504 dev = init_dev(); 2505 spdk_bs_opts_init(&opts); 2506 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2507 2508 /* Initialize a new blob store */ 2509 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2510 poll_threads(); 2511 CU_ASSERT(g_bserrno == 0); 2512 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2513 bs = g_bs; 2514 2515 /* Unload the blob store */ 2516 spdk_bs_unload(bs, bs_op_complete, NULL); 2517 poll_threads(); 2518 CU_ASSERT(g_bserrno == 0); 2519 g_bs = NULL; 2520 g_blob = NULL; 2521 g_blobid = 0; 2522 2523 /* Load an existing blob store with version newer than supported */ 2524 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2525 super_block->version++; 2526 2527 dev = init_dev(); 2528 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2529 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2530 poll_threads(); 2531 CU_ASSERT(g_bserrno != 0); 2532 2533 /* Create a new blob store with super block version 1 */ 2534 dev = init_dev(); 2535 super_block_v1.version = 1; 2536 memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature)); 2537 super_block_v1.length = 0x1000; 2538 super_block_v1.clean = 1; 2539 super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF; 2540 super_block_v1.cluster_size = 0x100000; 2541 super_block_v1.used_page_mask_start = 0x01; 2542 super_block_v1.used_page_mask_len = 0x01; 2543 super_block_v1.used_cluster_mask_start = 0x02; 2544 super_block_v1.used_cluster_mask_len = 0x01; 2545 super_block_v1.md_start = 0x03; 2546 super_block_v1.md_len = 0x40; 2547 memset(super_block_v1.reserved, 0, 4036); 2548 super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1); 2549 memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1)); 2550 2551 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2552 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2553 poll_threads(); 2554 CU_ASSERT(g_bserrno == 0); 2555 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2556 bs = g_bs; 2557 2558 spdk_bs_unload(bs, bs_op_complete, NULL); 2559 poll_threads(); 2560 CU_ASSERT(g_bserrno == 0); 2561 g_bs = NULL; 2562 } 2563 2564 /* 2565 * Create a blobstore and then unload it. 2566 */ 2567 static void 2568 bs_unload(void) 2569 { 2570 struct spdk_blob_store *bs = g_bs; 2571 struct spdk_blob *blob; 2572 2573 /* Create a blob and open it. */ 2574 blob = ut_blob_create_and_open(bs, NULL); 2575 2576 /* Try to unload blobstore, should fail with open blob */ 2577 g_bserrno = -1; 2578 spdk_bs_unload(bs, bs_op_complete, NULL); 2579 poll_threads(); 2580 CU_ASSERT(g_bserrno == -EBUSY); 2581 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2582 2583 /* Close the blob, then successfully unload blobstore */ 2584 g_bserrno = -1; 2585 spdk_blob_close(blob, blob_op_complete, NULL); 2586 poll_threads(); 2587 CU_ASSERT(g_bserrno == 0); 2588 } 2589 2590 /* 2591 * Create a blobstore with a cluster size different than the default, and ensure it is 2592 * persisted. 2593 */ 2594 static void 2595 bs_cluster_sz(void) 2596 { 2597 struct spdk_blob_store *bs; 2598 struct spdk_bs_dev *dev; 2599 struct spdk_bs_opts opts; 2600 uint32_t cluster_sz; 2601 2602 /* Set cluster size to zero */ 2603 dev = init_dev(); 2604 spdk_bs_opts_init(&opts); 2605 opts.cluster_sz = 0; 2606 2607 /* Initialize a new blob store */ 2608 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2609 poll_threads(); 2610 CU_ASSERT(g_bserrno == -EINVAL); 2611 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2612 2613 /* 2614 * Set cluster size to blobstore page size, 2615 * to work it is required to be at least twice the blobstore page size. 2616 */ 2617 dev = init_dev(); 2618 spdk_bs_opts_init(&opts); 2619 opts.cluster_sz = SPDK_BS_PAGE_SIZE; 2620 2621 /* Initialize a new blob store */ 2622 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2623 poll_threads(); 2624 CU_ASSERT(g_bserrno == -ENOMEM); 2625 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2626 2627 /* 2628 * Set cluster size to lower than page size, 2629 * to work it is required to be at least twice the blobstore page size. 2630 */ 2631 dev = init_dev(); 2632 spdk_bs_opts_init(&opts); 2633 opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1; 2634 2635 /* Initialize a new blob store */ 2636 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2637 poll_threads(); 2638 CU_ASSERT(g_bserrno == -EINVAL); 2639 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2640 2641 /* Set cluster size to twice the default */ 2642 dev = init_dev(); 2643 spdk_bs_opts_init(&opts); 2644 opts.cluster_sz *= 2; 2645 cluster_sz = opts.cluster_sz; 2646 2647 /* Initialize a new blob store */ 2648 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2649 poll_threads(); 2650 CU_ASSERT(g_bserrno == 0); 2651 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2652 bs = g_bs; 2653 2654 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2655 2656 ut_bs_reload(&bs, &opts); 2657 2658 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2659 2660 spdk_bs_unload(bs, bs_op_complete, NULL); 2661 poll_threads(); 2662 CU_ASSERT(g_bserrno == 0); 2663 g_bs = NULL; 2664 } 2665 2666 /* 2667 * Create a blobstore, reload it and ensure total usable cluster count 2668 * stays the same. 2669 */ 2670 static void 2671 bs_usable_clusters(void) 2672 { 2673 struct spdk_blob_store *bs = g_bs; 2674 struct spdk_blob *blob; 2675 uint32_t clusters; 2676 int i; 2677 2678 2679 clusters = spdk_bs_total_data_cluster_count(bs); 2680 2681 ut_bs_reload(&bs, NULL); 2682 2683 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2684 2685 /* Create and resize blobs to make sure that useable cluster count won't change */ 2686 for (i = 0; i < 4; i++) { 2687 g_bserrno = -1; 2688 g_blobid = SPDK_BLOBID_INVALID; 2689 blob = ut_blob_create_and_open(bs, NULL); 2690 2691 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 2692 poll_threads(); 2693 CU_ASSERT(g_bserrno == 0); 2694 2695 g_bserrno = -1; 2696 spdk_blob_close(blob, blob_op_complete, NULL); 2697 poll_threads(); 2698 CU_ASSERT(g_bserrno == 0); 2699 2700 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2701 } 2702 2703 /* Reload the blob store to make sure that nothing changed */ 2704 ut_bs_reload(&bs, NULL); 2705 2706 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2707 } 2708 2709 /* 2710 * Test resizing of the metadata blob. This requires creating enough blobs 2711 * so that one cluster is not enough to fit the metadata for those blobs. 2712 * To induce this condition to happen more quickly, we reduce the cluster 2713 * size to 16KB, which means only 4 4KB blob metadata pages can fit. 2714 */ 2715 static void 2716 bs_resize_md(void) 2717 { 2718 struct spdk_blob_store *bs; 2719 const int CLUSTER_PAGE_COUNT = 4; 2720 const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4; 2721 struct spdk_bs_dev *dev; 2722 struct spdk_bs_opts opts; 2723 struct spdk_blob *blob; 2724 struct spdk_blob_opts blob_opts; 2725 uint32_t cluster_sz; 2726 spdk_blob_id blobids[NUM_BLOBS]; 2727 int i; 2728 2729 2730 dev = init_dev(); 2731 spdk_bs_opts_init(&opts); 2732 opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096; 2733 cluster_sz = opts.cluster_sz; 2734 2735 /* Initialize a new blob store */ 2736 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2737 poll_threads(); 2738 CU_ASSERT(g_bserrno == 0); 2739 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2740 bs = g_bs; 2741 2742 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2743 2744 ut_spdk_blob_opts_init(&blob_opts); 2745 2746 for (i = 0; i < NUM_BLOBS; i++) { 2747 g_bserrno = -1; 2748 g_blobid = SPDK_BLOBID_INVALID; 2749 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 2750 poll_threads(); 2751 CU_ASSERT(g_bserrno == 0); 2752 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2753 blobids[i] = g_blobid; 2754 } 2755 2756 ut_bs_reload(&bs, &opts); 2757 2758 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2759 2760 for (i = 0; i < NUM_BLOBS; i++) { 2761 g_bserrno = -1; 2762 g_blob = NULL; 2763 spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL); 2764 poll_threads(); 2765 CU_ASSERT(g_bserrno == 0); 2766 CU_ASSERT(g_blob != NULL); 2767 blob = g_blob; 2768 g_bserrno = -1; 2769 spdk_blob_close(blob, blob_op_complete, NULL); 2770 poll_threads(); 2771 CU_ASSERT(g_bserrno == 0); 2772 } 2773 2774 spdk_bs_unload(bs, bs_op_complete, NULL); 2775 poll_threads(); 2776 CU_ASSERT(g_bserrno == 0); 2777 g_bs = NULL; 2778 } 2779 2780 static void 2781 bs_destroy(void) 2782 { 2783 struct spdk_blob_store *bs; 2784 struct spdk_bs_dev *dev; 2785 2786 /* Initialize a new blob store */ 2787 dev = init_dev(); 2788 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2789 poll_threads(); 2790 CU_ASSERT(g_bserrno == 0); 2791 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2792 bs = g_bs; 2793 2794 /* Destroy the blob store */ 2795 g_bserrno = -1; 2796 spdk_bs_destroy(bs, bs_op_complete, NULL); 2797 poll_threads(); 2798 CU_ASSERT(g_bserrno == 0); 2799 2800 /* Loading an non-existent blob store should fail. */ 2801 g_bs = NULL; 2802 dev = init_dev(); 2803 2804 g_bserrno = 0; 2805 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 2806 poll_threads(); 2807 CU_ASSERT(g_bserrno != 0); 2808 } 2809 2810 /* Try to hit all of the corner cases associated with serializing 2811 * a blob to disk 2812 */ 2813 static void 2814 blob_serialize_test(void) 2815 { 2816 struct spdk_bs_dev *dev; 2817 struct spdk_bs_opts opts; 2818 struct spdk_blob_store *bs; 2819 spdk_blob_id blobid[2]; 2820 struct spdk_blob *blob[2]; 2821 uint64_t i; 2822 char *value; 2823 int rc; 2824 2825 dev = init_dev(); 2826 2827 /* Initialize a new blobstore with very small clusters */ 2828 spdk_bs_opts_init(&opts); 2829 opts.cluster_sz = dev->blocklen * 8; 2830 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2831 poll_threads(); 2832 CU_ASSERT(g_bserrno == 0); 2833 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2834 bs = g_bs; 2835 2836 /* Create and open two blobs */ 2837 for (i = 0; i < 2; i++) { 2838 blob[i] = ut_blob_create_and_open(bs, NULL); 2839 blobid[i] = spdk_blob_get_id(blob[i]); 2840 2841 /* Set a fairly large xattr on both blobs to eat up 2842 * metadata space 2843 */ 2844 value = calloc(dev->blocklen - 64, sizeof(char)); 2845 SPDK_CU_ASSERT_FATAL(value != NULL); 2846 memset(value, i, dev->blocklen / 2); 2847 rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64); 2848 CU_ASSERT(rc == 0); 2849 free(value); 2850 } 2851 2852 /* Resize the blobs, alternating 1 cluster at a time. 2853 * This thwarts run length encoding and will cause spill 2854 * over of the extents. 2855 */ 2856 for (i = 0; i < 6; i++) { 2857 spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL); 2858 poll_threads(); 2859 CU_ASSERT(g_bserrno == 0); 2860 } 2861 2862 for (i = 0; i < 2; i++) { 2863 spdk_blob_sync_md(blob[i], blob_op_complete, NULL); 2864 poll_threads(); 2865 CU_ASSERT(g_bserrno == 0); 2866 } 2867 2868 /* Close the blobs */ 2869 for (i = 0; i < 2; i++) { 2870 spdk_blob_close(blob[i], blob_op_complete, NULL); 2871 poll_threads(); 2872 CU_ASSERT(g_bserrno == 0); 2873 } 2874 2875 ut_bs_reload(&bs, &opts); 2876 2877 for (i = 0; i < 2; i++) { 2878 blob[i] = NULL; 2879 2880 spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL); 2881 poll_threads(); 2882 CU_ASSERT(g_bserrno == 0); 2883 CU_ASSERT(g_blob != NULL); 2884 blob[i] = g_blob; 2885 2886 CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3); 2887 2888 spdk_blob_close(blob[i], blob_op_complete, NULL); 2889 poll_threads(); 2890 CU_ASSERT(g_bserrno == 0); 2891 } 2892 2893 spdk_bs_unload(bs, bs_op_complete, NULL); 2894 poll_threads(); 2895 CU_ASSERT(g_bserrno == 0); 2896 g_bs = NULL; 2897 } 2898 2899 static void 2900 blob_crc(void) 2901 { 2902 struct spdk_blob_store *bs = g_bs; 2903 struct spdk_blob *blob; 2904 spdk_blob_id blobid; 2905 uint32_t page_num; 2906 int index; 2907 struct spdk_blob_md_page *page; 2908 2909 blob = ut_blob_create_and_open(bs, NULL); 2910 blobid = spdk_blob_get_id(blob); 2911 2912 spdk_blob_close(blob, blob_op_complete, NULL); 2913 poll_threads(); 2914 CU_ASSERT(g_bserrno == 0); 2915 2916 page_num = bs_blobid_to_page(blobid); 2917 index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num); 2918 page = (struct spdk_blob_md_page *)&g_dev_buffer[index]; 2919 page->crc = 0; 2920 2921 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2922 poll_threads(); 2923 CU_ASSERT(g_bserrno == -EINVAL); 2924 CU_ASSERT(g_blob == NULL); 2925 g_bserrno = 0; 2926 2927 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 2928 poll_threads(); 2929 CU_ASSERT(g_bserrno == -EINVAL); 2930 } 2931 2932 static void 2933 super_block_crc(void) 2934 { 2935 struct spdk_blob_store *bs; 2936 struct spdk_bs_dev *dev; 2937 struct spdk_bs_super_block *super_block; 2938 2939 dev = init_dev(); 2940 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2941 poll_threads(); 2942 CU_ASSERT(g_bserrno == 0); 2943 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2944 bs = g_bs; 2945 2946 spdk_bs_unload(bs, bs_op_complete, NULL); 2947 poll_threads(); 2948 CU_ASSERT(g_bserrno == 0); 2949 g_bs = NULL; 2950 2951 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2952 super_block->crc = 0; 2953 dev = init_dev(); 2954 2955 /* Load an existing blob store */ 2956 g_bserrno = 0; 2957 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 2958 poll_threads(); 2959 CU_ASSERT(g_bserrno == -EILSEQ); 2960 } 2961 2962 /* For blob dirty shutdown test case we do the following sub-test cases: 2963 * 1 Initialize new blob store and create 1 super blob with some xattrs, then we 2964 * dirty shutdown and reload the blob store and verify the xattrs. 2965 * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown, 2966 * reload the blob store and verify the clusters number. 2967 * 3 Create the second blob and then dirty shutdown, reload the blob store 2968 * and verify the second blob. 2969 * 4 Delete the second blob and then dirty shutdown, reload the blob store 2970 * and verify the second blob is invalid. 2971 * 5 Create the second blob again and also create the third blob, modify the 2972 * md of second blob which makes the md invalid, and then dirty shutdown, 2973 * reload the blob store verify the second blob, it should invalid and also 2974 * verify the third blob, it should correct. 2975 */ 2976 static void 2977 blob_dirty_shutdown(void) 2978 { 2979 int rc; 2980 int index; 2981 struct spdk_blob_store *bs = g_bs; 2982 spdk_blob_id blobid1, blobid2, blobid3; 2983 struct spdk_blob *blob = g_blob; 2984 uint64_t length; 2985 uint64_t free_clusters; 2986 const void *value; 2987 size_t value_len; 2988 uint32_t page_num; 2989 struct spdk_blob_md_page *page; 2990 struct spdk_blob_opts blob_opts; 2991 2992 /* Create first blob */ 2993 blobid1 = spdk_blob_get_id(blob); 2994 2995 /* Set some xattrs */ 2996 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 2997 CU_ASSERT(rc == 0); 2998 2999 length = 2345; 3000 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3001 CU_ASSERT(rc == 0); 3002 3003 /* Put xattr that fits exactly single page. 3004 * This results in adding additional pages to MD. 3005 * First is flags and smaller xattr, second the large xattr, 3006 * third are just the extents. 3007 */ 3008 size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) - 3009 strlen("large_xattr"); 3010 char *xattr = calloc(xattr_length, sizeof(char)); 3011 SPDK_CU_ASSERT_FATAL(xattr != NULL); 3012 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 3013 free(xattr); 3014 SPDK_CU_ASSERT_FATAL(rc == 0); 3015 3016 /* Resize the blob */ 3017 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 3018 poll_threads(); 3019 CU_ASSERT(g_bserrno == 0); 3020 3021 /* Set the blob as the super blob */ 3022 spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL); 3023 poll_threads(); 3024 CU_ASSERT(g_bserrno == 0); 3025 3026 free_clusters = spdk_bs_free_cluster_count(bs); 3027 3028 spdk_blob_close(blob, blob_op_complete, NULL); 3029 poll_threads(); 3030 CU_ASSERT(g_bserrno == 0); 3031 blob = NULL; 3032 g_blob = NULL; 3033 g_blobid = SPDK_BLOBID_INVALID; 3034 3035 ut_bs_dirty_load(&bs, NULL); 3036 3037 /* Get the super blob */ 3038 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 3039 poll_threads(); 3040 CU_ASSERT(g_bserrno == 0); 3041 CU_ASSERT(blobid1 == g_blobid); 3042 3043 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3044 poll_threads(); 3045 CU_ASSERT(g_bserrno == 0); 3046 CU_ASSERT(g_blob != NULL); 3047 blob = g_blob; 3048 3049 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3050 3051 /* Get the xattrs */ 3052 value = NULL; 3053 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 3054 CU_ASSERT(rc == 0); 3055 SPDK_CU_ASSERT_FATAL(value != NULL); 3056 CU_ASSERT(*(uint64_t *)value == length); 3057 CU_ASSERT(value_len == 8); 3058 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 3059 3060 /* Resize the blob */ 3061 spdk_blob_resize(blob, 20, blob_op_complete, NULL); 3062 poll_threads(); 3063 CU_ASSERT(g_bserrno == 0); 3064 3065 free_clusters = spdk_bs_free_cluster_count(bs); 3066 3067 spdk_blob_close(blob, blob_op_complete, NULL); 3068 poll_threads(); 3069 CU_ASSERT(g_bserrno == 0); 3070 blob = NULL; 3071 g_blob = NULL; 3072 g_blobid = SPDK_BLOBID_INVALID; 3073 3074 ut_bs_dirty_load(&bs, NULL); 3075 3076 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3077 poll_threads(); 3078 CU_ASSERT(g_bserrno == 0); 3079 CU_ASSERT(g_blob != NULL); 3080 blob = g_blob; 3081 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20); 3082 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3083 3084 spdk_blob_close(blob, blob_op_complete, NULL); 3085 poll_threads(); 3086 CU_ASSERT(g_bserrno == 0); 3087 blob = NULL; 3088 g_blob = NULL; 3089 g_blobid = SPDK_BLOBID_INVALID; 3090 3091 /* Create second blob */ 3092 blob = ut_blob_create_and_open(bs, NULL); 3093 blobid2 = spdk_blob_get_id(blob); 3094 3095 /* Set some xattrs */ 3096 rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1); 3097 CU_ASSERT(rc == 0); 3098 3099 length = 5432; 3100 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3101 CU_ASSERT(rc == 0); 3102 3103 /* Resize the blob */ 3104 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 3105 poll_threads(); 3106 CU_ASSERT(g_bserrno == 0); 3107 3108 free_clusters = spdk_bs_free_cluster_count(bs); 3109 3110 spdk_blob_close(blob, blob_op_complete, NULL); 3111 poll_threads(); 3112 CU_ASSERT(g_bserrno == 0); 3113 blob = NULL; 3114 g_blob = NULL; 3115 g_blobid = SPDK_BLOBID_INVALID; 3116 3117 ut_bs_dirty_load(&bs, NULL); 3118 3119 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3120 poll_threads(); 3121 CU_ASSERT(g_bserrno == 0); 3122 CU_ASSERT(g_blob != NULL); 3123 blob = g_blob; 3124 3125 /* Get the xattrs */ 3126 value = NULL; 3127 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 3128 CU_ASSERT(rc == 0); 3129 SPDK_CU_ASSERT_FATAL(value != NULL); 3130 CU_ASSERT(*(uint64_t *)value == length); 3131 CU_ASSERT(value_len == 8); 3132 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 3133 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3134 3135 ut_blob_close_and_delete(bs, blob); 3136 3137 free_clusters = spdk_bs_free_cluster_count(bs); 3138 3139 ut_bs_dirty_load(&bs, NULL); 3140 3141 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3142 poll_threads(); 3143 CU_ASSERT(g_bserrno != 0); 3144 CU_ASSERT(g_blob == NULL); 3145 3146 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3147 poll_threads(); 3148 CU_ASSERT(g_bserrno == 0); 3149 CU_ASSERT(g_blob != NULL); 3150 blob = g_blob; 3151 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3152 spdk_blob_close(blob, blob_op_complete, NULL); 3153 poll_threads(); 3154 CU_ASSERT(g_bserrno == 0); 3155 3156 ut_bs_reload(&bs, NULL); 3157 3158 /* Create second blob */ 3159 ut_spdk_blob_opts_init(&blob_opts); 3160 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3161 poll_threads(); 3162 CU_ASSERT(g_bserrno == 0); 3163 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3164 blobid2 = g_blobid; 3165 3166 /* Create third blob */ 3167 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3168 poll_threads(); 3169 CU_ASSERT(g_bserrno == 0); 3170 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3171 blobid3 = g_blobid; 3172 3173 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3174 poll_threads(); 3175 CU_ASSERT(g_bserrno == 0); 3176 CU_ASSERT(g_blob != NULL); 3177 blob = g_blob; 3178 3179 /* Set some xattrs for second blob */ 3180 rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1); 3181 CU_ASSERT(rc == 0); 3182 3183 length = 5432; 3184 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3185 CU_ASSERT(rc == 0); 3186 3187 spdk_blob_close(blob, blob_op_complete, NULL); 3188 poll_threads(); 3189 CU_ASSERT(g_bserrno == 0); 3190 blob = NULL; 3191 g_blob = NULL; 3192 g_blobid = SPDK_BLOBID_INVALID; 3193 3194 spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL); 3195 poll_threads(); 3196 CU_ASSERT(g_bserrno == 0); 3197 CU_ASSERT(g_blob != NULL); 3198 blob = g_blob; 3199 3200 /* Set some xattrs for third blob */ 3201 rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1); 3202 CU_ASSERT(rc == 0); 3203 3204 length = 5432; 3205 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3206 CU_ASSERT(rc == 0); 3207 3208 spdk_blob_close(blob, blob_op_complete, NULL); 3209 poll_threads(); 3210 CU_ASSERT(g_bserrno == 0); 3211 blob = NULL; 3212 g_blob = NULL; 3213 g_blobid = SPDK_BLOBID_INVALID; 3214 3215 /* Mark second blob as invalid */ 3216 page_num = bs_blobid_to_page(blobid2); 3217 3218 index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num); 3219 page = (struct spdk_blob_md_page *)&g_dev_buffer[index]; 3220 page->sequence_num = 1; 3221 page->crc = blob_md_page_calc_crc(page); 3222 3223 free_clusters = spdk_bs_free_cluster_count(bs); 3224 3225 ut_bs_dirty_load(&bs, NULL); 3226 3227 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3228 poll_threads(); 3229 CU_ASSERT(g_bserrno != 0); 3230 CU_ASSERT(g_blob == NULL); 3231 3232 spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL); 3233 poll_threads(); 3234 CU_ASSERT(g_bserrno == 0); 3235 CU_ASSERT(g_blob != NULL); 3236 blob = g_blob; 3237 3238 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3239 } 3240 3241 static void 3242 blob_flags(void) 3243 { 3244 struct spdk_blob_store *bs = g_bs; 3245 spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro; 3246 struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro; 3247 struct spdk_blob_opts blob_opts; 3248 int rc; 3249 3250 /* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */ 3251 blob_invalid = ut_blob_create_and_open(bs, NULL); 3252 blobid_invalid = spdk_blob_get_id(blob_invalid); 3253 3254 blob_data_ro = ut_blob_create_and_open(bs, NULL); 3255 blobid_data_ro = spdk_blob_get_id(blob_data_ro); 3256 3257 ut_spdk_blob_opts_init(&blob_opts); 3258 blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES; 3259 blob_md_ro = ut_blob_create_and_open(bs, &blob_opts); 3260 blobid_md_ro = spdk_blob_get_id(blob_md_ro); 3261 CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES); 3262 3263 /* Change the size of blob_data_ro to check if flags are serialized 3264 * when blob has non zero number of extents */ 3265 spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL); 3266 poll_threads(); 3267 CU_ASSERT(g_bserrno == 0); 3268 3269 /* Set the xattr to check if flags are serialized 3270 * when blob has non zero number of xattrs */ 3271 rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1); 3272 CU_ASSERT(rc == 0); 3273 3274 blob_invalid->invalid_flags = (1ULL << 63); 3275 blob_invalid->state = SPDK_BLOB_STATE_DIRTY; 3276 blob_data_ro->data_ro_flags = (1ULL << 62); 3277 blob_data_ro->state = SPDK_BLOB_STATE_DIRTY; 3278 blob_md_ro->md_ro_flags = (1ULL << 61); 3279 blob_md_ro->state = SPDK_BLOB_STATE_DIRTY; 3280 3281 g_bserrno = -1; 3282 spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL); 3283 poll_threads(); 3284 CU_ASSERT(g_bserrno == 0); 3285 g_bserrno = -1; 3286 spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL); 3287 poll_threads(); 3288 CU_ASSERT(g_bserrno == 0); 3289 g_bserrno = -1; 3290 spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL); 3291 poll_threads(); 3292 CU_ASSERT(g_bserrno == 0); 3293 3294 g_bserrno = -1; 3295 spdk_blob_close(blob_invalid, blob_op_complete, NULL); 3296 poll_threads(); 3297 CU_ASSERT(g_bserrno == 0); 3298 blob_invalid = NULL; 3299 g_bserrno = -1; 3300 spdk_blob_close(blob_data_ro, blob_op_complete, NULL); 3301 poll_threads(); 3302 CU_ASSERT(g_bserrno == 0); 3303 blob_data_ro = NULL; 3304 g_bserrno = -1; 3305 spdk_blob_close(blob_md_ro, blob_op_complete, NULL); 3306 poll_threads(); 3307 CU_ASSERT(g_bserrno == 0); 3308 blob_md_ro = NULL; 3309 3310 g_blob = NULL; 3311 g_blobid = SPDK_BLOBID_INVALID; 3312 3313 ut_bs_reload(&bs, NULL); 3314 3315 g_blob = NULL; 3316 g_bserrno = 0; 3317 spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL); 3318 poll_threads(); 3319 CU_ASSERT(g_bserrno != 0); 3320 CU_ASSERT(g_blob == NULL); 3321 3322 g_blob = NULL; 3323 g_bserrno = -1; 3324 spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL); 3325 poll_threads(); 3326 CU_ASSERT(g_bserrno == 0); 3327 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3328 blob_data_ro = g_blob; 3329 /* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */ 3330 CU_ASSERT(blob_data_ro->data_ro == true); 3331 CU_ASSERT(blob_data_ro->md_ro == true); 3332 CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10); 3333 3334 g_blob = NULL; 3335 g_bserrno = -1; 3336 spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL); 3337 poll_threads(); 3338 CU_ASSERT(g_bserrno == 0); 3339 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3340 blob_md_ro = g_blob; 3341 CU_ASSERT(blob_md_ro->data_ro == false); 3342 CU_ASSERT(blob_md_ro->md_ro == true); 3343 3344 g_bserrno = -1; 3345 spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL); 3346 poll_threads(); 3347 CU_ASSERT(g_bserrno == 0); 3348 3349 ut_blob_close_and_delete(bs, blob_data_ro); 3350 ut_blob_close_and_delete(bs, blob_md_ro); 3351 } 3352 3353 static void 3354 bs_version(void) 3355 { 3356 struct spdk_bs_super_block *super; 3357 struct spdk_blob_store *bs = g_bs; 3358 struct spdk_bs_dev *dev; 3359 struct spdk_blob *blob; 3360 struct spdk_blob_opts blob_opts; 3361 spdk_blob_id blobid; 3362 3363 /* Unload the blob store */ 3364 spdk_bs_unload(bs, bs_op_complete, NULL); 3365 poll_threads(); 3366 CU_ASSERT(g_bserrno == 0); 3367 g_bs = NULL; 3368 3369 /* 3370 * Change the bs version on disk. This will allow us to 3371 * test that the version does not get modified automatically 3372 * when loading and unloading the blobstore. 3373 */ 3374 super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 3375 CU_ASSERT(super->version == SPDK_BS_VERSION); 3376 CU_ASSERT(super->clean == 1); 3377 super->version = 2; 3378 /* 3379 * Version 2 metadata does not have a used blobid mask, so clear 3380 * those fields in the super block and zero the corresponding 3381 * region on "disk". We will use this to ensure blob IDs are 3382 * correctly reconstructed. 3383 */ 3384 memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0, 3385 super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE); 3386 super->used_blobid_mask_start = 0; 3387 super->used_blobid_mask_len = 0; 3388 super->crc = blob_md_page_calc_crc(super); 3389 3390 /* Load an existing blob store */ 3391 dev = init_dev(); 3392 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3393 poll_threads(); 3394 CU_ASSERT(g_bserrno == 0); 3395 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3396 CU_ASSERT(super->clean == 1); 3397 bs = g_bs; 3398 3399 /* 3400 * Create a blob - just to make sure that when we unload it 3401 * results in writing the super block (since metadata pages 3402 * were allocated. 3403 */ 3404 ut_spdk_blob_opts_init(&blob_opts); 3405 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3406 poll_threads(); 3407 CU_ASSERT(g_bserrno == 0); 3408 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3409 blobid = g_blobid; 3410 3411 /* Unload the blob store */ 3412 spdk_bs_unload(bs, bs_op_complete, NULL); 3413 poll_threads(); 3414 CU_ASSERT(g_bserrno == 0); 3415 g_bs = NULL; 3416 CU_ASSERT(super->version == 2); 3417 CU_ASSERT(super->used_blobid_mask_start == 0); 3418 CU_ASSERT(super->used_blobid_mask_len == 0); 3419 3420 dev = init_dev(); 3421 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3422 poll_threads(); 3423 CU_ASSERT(g_bserrno == 0); 3424 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3425 bs = g_bs; 3426 3427 g_blob = NULL; 3428 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3429 poll_threads(); 3430 CU_ASSERT(g_bserrno == 0); 3431 CU_ASSERT(g_blob != NULL); 3432 blob = g_blob; 3433 3434 ut_blob_close_and_delete(bs, blob); 3435 3436 CU_ASSERT(super->version == 2); 3437 CU_ASSERT(super->used_blobid_mask_start == 0); 3438 CU_ASSERT(super->used_blobid_mask_len == 0); 3439 } 3440 3441 static void 3442 blob_set_xattrs_test(void) 3443 { 3444 struct spdk_blob_store *bs = g_bs; 3445 struct spdk_blob *blob; 3446 struct spdk_blob_opts opts; 3447 const void *value; 3448 size_t value_len; 3449 char *xattr; 3450 size_t xattr_length; 3451 int rc; 3452 3453 /* Create blob with extra attributes */ 3454 ut_spdk_blob_opts_init(&opts); 3455 3456 opts.xattrs.names = g_xattr_names; 3457 opts.xattrs.get_value = _get_xattr_value; 3458 opts.xattrs.count = 3; 3459 opts.xattrs.ctx = &g_ctx; 3460 3461 blob = ut_blob_create_and_open(bs, &opts); 3462 3463 /* Get the xattrs */ 3464 value = NULL; 3465 3466 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len); 3467 CU_ASSERT(rc == 0); 3468 SPDK_CU_ASSERT_FATAL(value != NULL); 3469 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 3470 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 3471 3472 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len); 3473 CU_ASSERT(rc == 0); 3474 SPDK_CU_ASSERT_FATAL(value != NULL); 3475 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 3476 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 3477 3478 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len); 3479 CU_ASSERT(rc == 0); 3480 SPDK_CU_ASSERT_FATAL(value != NULL); 3481 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 3482 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 3483 3484 /* Try to get non existing attribute */ 3485 3486 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 3487 CU_ASSERT(rc == -ENOENT); 3488 3489 /* Try xattr exceeding maximum length of descriptor in single page */ 3490 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 3491 strlen("large_xattr") + 1; 3492 xattr = calloc(xattr_length, sizeof(char)); 3493 SPDK_CU_ASSERT_FATAL(xattr != NULL); 3494 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 3495 free(xattr); 3496 SPDK_CU_ASSERT_FATAL(rc == -ENOMEM); 3497 3498 spdk_blob_close(blob, blob_op_complete, NULL); 3499 poll_threads(); 3500 CU_ASSERT(g_bserrno == 0); 3501 blob = NULL; 3502 g_blob = NULL; 3503 g_blobid = SPDK_BLOBID_INVALID; 3504 3505 /* NULL callback */ 3506 ut_spdk_blob_opts_init(&opts); 3507 opts.xattrs.names = g_xattr_names; 3508 opts.xattrs.get_value = NULL; 3509 opts.xattrs.count = 1; 3510 opts.xattrs.ctx = &g_ctx; 3511 3512 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 3513 poll_threads(); 3514 CU_ASSERT(g_bserrno == -EINVAL); 3515 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3516 3517 /* NULL values */ 3518 ut_spdk_blob_opts_init(&opts); 3519 opts.xattrs.names = g_xattr_names; 3520 opts.xattrs.get_value = _get_xattr_value_null; 3521 opts.xattrs.count = 1; 3522 opts.xattrs.ctx = NULL; 3523 3524 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 3525 poll_threads(); 3526 CU_ASSERT(g_bserrno == -EINVAL); 3527 } 3528 3529 static void 3530 blob_thin_prov_alloc(void) 3531 { 3532 struct spdk_blob_store *bs = g_bs; 3533 struct spdk_blob *blob; 3534 struct spdk_blob_opts opts; 3535 spdk_blob_id blobid; 3536 uint64_t free_clusters; 3537 3538 free_clusters = spdk_bs_free_cluster_count(bs); 3539 3540 /* Set blob as thin provisioned */ 3541 ut_spdk_blob_opts_init(&opts); 3542 opts.thin_provision = true; 3543 3544 blob = ut_blob_create_and_open(bs, &opts); 3545 blobid = spdk_blob_get_id(blob); 3546 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3547 3548 CU_ASSERT(blob->active.num_clusters == 0); 3549 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 3550 3551 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3552 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3553 poll_threads(); 3554 CU_ASSERT(g_bserrno == 0); 3555 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3556 CU_ASSERT(blob->active.num_clusters == 5); 3557 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 3558 3559 /* Grow it to 1TB - still unallocated */ 3560 spdk_blob_resize(blob, 262144, blob_op_complete, NULL); 3561 poll_threads(); 3562 CU_ASSERT(g_bserrno == 0); 3563 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3564 CU_ASSERT(blob->active.num_clusters == 262144); 3565 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144); 3566 3567 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3568 poll_threads(); 3569 CU_ASSERT(g_bserrno == 0); 3570 /* Sync must not change anything */ 3571 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3572 CU_ASSERT(blob->active.num_clusters == 262144); 3573 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144); 3574 /* Since clusters are not allocated, 3575 * number of metadata pages is expected to be minimal. 3576 */ 3577 CU_ASSERT(blob->active.num_pages == 1); 3578 3579 /* Shrink the blob to 3 clusters - still unallocated */ 3580 spdk_blob_resize(blob, 3, blob_op_complete, NULL); 3581 poll_threads(); 3582 CU_ASSERT(g_bserrno == 0); 3583 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3584 CU_ASSERT(blob->active.num_clusters == 3); 3585 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3); 3586 3587 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3588 poll_threads(); 3589 CU_ASSERT(g_bserrno == 0); 3590 /* Sync must not change anything */ 3591 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3592 CU_ASSERT(blob->active.num_clusters == 3); 3593 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3); 3594 3595 spdk_blob_close(blob, blob_op_complete, NULL); 3596 poll_threads(); 3597 CU_ASSERT(g_bserrno == 0); 3598 3599 ut_bs_reload(&bs, NULL); 3600 3601 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3602 poll_threads(); 3603 CU_ASSERT(g_bserrno == 0); 3604 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3605 blob = g_blob; 3606 3607 /* Check that clusters allocation and size is still the same */ 3608 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3609 CU_ASSERT(blob->active.num_clusters == 3); 3610 3611 ut_blob_close_and_delete(bs, blob); 3612 } 3613 3614 static void 3615 blob_insert_cluster_msg_test(void) 3616 { 3617 struct spdk_blob_store *bs = g_bs; 3618 struct spdk_blob *blob; 3619 struct spdk_blob_opts opts; 3620 spdk_blob_id blobid; 3621 uint64_t free_clusters; 3622 uint64_t new_cluster = 0; 3623 uint32_t cluster_num = 3; 3624 uint32_t extent_page = 0; 3625 3626 free_clusters = spdk_bs_free_cluster_count(bs); 3627 3628 /* Set blob as thin provisioned */ 3629 ut_spdk_blob_opts_init(&opts); 3630 opts.thin_provision = true; 3631 opts.num_clusters = 4; 3632 3633 blob = ut_blob_create_and_open(bs, &opts); 3634 blobid = spdk_blob_get_id(blob); 3635 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3636 3637 CU_ASSERT(blob->active.num_clusters == 4); 3638 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4); 3639 CU_ASSERT(blob->active.clusters[cluster_num] == 0); 3640 3641 /* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread. 3642 * This is to simulate behaviour when cluster is allocated after blob creation. 3643 * Such as _spdk_bs_allocate_and_copy_cluster(). */ 3644 bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false); 3645 CU_ASSERT(blob->active.clusters[cluster_num] == 0); 3646 3647 blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, 3648 blob_op_complete, NULL); 3649 poll_threads(); 3650 3651 CU_ASSERT(blob->active.clusters[cluster_num] != 0); 3652 3653 spdk_blob_close(blob, blob_op_complete, NULL); 3654 poll_threads(); 3655 CU_ASSERT(g_bserrno == 0); 3656 3657 ut_bs_reload(&bs, NULL); 3658 3659 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3660 poll_threads(); 3661 CU_ASSERT(g_bserrno == 0); 3662 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3663 blob = g_blob; 3664 3665 CU_ASSERT(blob->active.clusters[cluster_num] != 0); 3666 3667 ut_blob_close_and_delete(bs, blob); 3668 } 3669 3670 static void 3671 blob_thin_prov_rw(void) 3672 { 3673 static const uint8_t zero[10 * 4096] = { 0 }; 3674 struct spdk_blob_store *bs = g_bs; 3675 struct spdk_blob *blob; 3676 struct spdk_io_channel *channel, *channel_thread1; 3677 struct spdk_blob_opts opts; 3678 uint64_t free_clusters; 3679 uint64_t page_size; 3680 uint8_t payload_read[10 * 4096]; 3681 uint8_t payload_write[10 * 4096]; 3682 uint64_t write_bytes; 3683 uint64_t read_bytes; 3684 3685 free_clusters = spdk_bs_free_cluster_count(bs); 3686 page_size = spdk_bs_get_page_size(bs); 3687 3688 channel = spdk_bs_alloc_io_channel(bs); 3689 CU_ASSERT(channel != NULL); 3690 3691 ut_spdk_blob_opts_init(&opts); 3692 opts.thin_provision = true; 3693 3694 blob = ut_blob_create_and_open(bs, &opts); 3695 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3696 3697 CU_ASSERT(blob->active.num_clusters == 0); 3698 3699 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3700 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3701 poll_threads(); 3702 CU_ASSERT(g_bserrno == 0); 3703 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3704 CU_ASSERT(blob->active.num_clusters == 5); 3705 3706 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3707 poll_threads(); 3708 CU_ASSERT(g_bserrno == 0); 3709 /* Sync must not change anything */ 3710 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3711 CU_ASSERT(blob->active.num_clusters == 5); 3712 3713 /* Payload should be all zeros from unallocated clusters */ 3714 memset(payload_read, 0xFF, sizeof(payload_read)); 3715 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 3716 poll_threads(); 3717 CU_ASSERT(g_bserrno == 0); 3718 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 3719 3720 write_bytes = g_dev_write_bytes; 3721 read_bytes = g_dev_read_bytes; 3722 3723 /* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */ 3724 set_thread(1); 3725 channel_thread1 = spdk_bs_alloc_io_channel(bs); 3726 CU_ASSERT(channel_thread1 != NULL); 3727 memset(payload_write, 0xE5, sizeof(payload_write)); 3728 spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL); 3729 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3730 /* Perform write on thread 0. That will try to allocate cluster, 3731 * but fail due to another thread issuing the cluster allocation first. */ 3732 set_thread(0); 3733 memset(payload_write, 0xE5, sizeof(payload_write)); 3734 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 3735 CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs)); 3736 poll_threads(); 3737 CU_ASSERT(g_bserrno == 0); 3738 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3739 /* For thin-provisioned blob we need to write 20 pages plus one page metadata and 3740 * read 0 bytes */ 3741 if (g_use_extent_table) { 3742 /* Add one more page for EXTENT_PAGE write */ 3743 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22); 3744 } else { 3745 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21); 3746 } 3747 CU_ASSERT(g_dev_read_bytes - read_bytes == 0); 3748 3749 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 3750 poll_threads(); 3751 CU_ASSERT(g_bserrno == 0); 3752 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3753 3754 ut_blob_close_and_delete(bs, blob); 3755 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3756 3757 set_thread(1); 3758 spdk_bs_free_io_channel(channel_thread1); 3759 set_thread(0); 3760 spdk_bs_free_io_channel(channel); 3761 poll_threads(); 3762 g_blob = NULL; 3763 g_blobid = 0; 3764 } 3765 3766 static void 3767 blob_thin_prov_rle(void) 3768 { 3769 static const uint8_t zero[10 * 4096] = { 0 }; 3770 struct spdk_blob_store *bs = g_bs; 3771 struct spdk_blob *blob; 3772 struct spdk_io_channel *channel; 3773 struct spdk_blob_opts opts; 3774 spdk_blob_id blobid; 3775 uint64_t free_clusters; 3776 uint64_t page_size; 3777 uint8_t payload_read[10 * 4096]; 3778 uint8_t payload_write[10 * 4096]; 3779 uint64_t write_bytes; 3780 uint64_t read_bytes; 3781 uint64_t io_unit; 3782 3783 free_clusters = spdk_bs_free_cluster_count(bs); 3784 page_size = spdk_bs_get_page_size(bs); 3785 3786 ut_spdk_blob_opts_init(&opts); 3787 opts.thin_provision = true; 3788 opts.num_clusters = 5; 3789 3790 blob = ut_blob_create_and_open(bs, &opts); 3791 blobid = spdk_blob_get_id(blob); 3792 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3793 3794 channel = spdk_bs_alloc_io_channel(bs); 3795 CU_ASSERT(channel != NULL); 3796 3797 /* Target specifically second cluster in a blob as first allocation */ 3798 io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs); 3799 3800 /* Payload should be all zeros from unallocated clusters */ 3801 memset(payload_read, 0xFF, sizeof(payload_read)); 3802 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3803 poll_threads(); 3804 CU_ASSERT(g_bserrno == 0); 3805 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 3806 3807 write_bytes = g_dev_write_bytes; 3808 read_bytes = g_dev_read_bytes; 3809 3810 /* Issue write to second cluster in a blob */ 3811 memset(payload_write, 0xE5, sizeof(payload_write)); 3812 spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL); 3813 poll_threads(); 3814 CU_ASSERT(g_bserrno == 0); 3815 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3816 /* For thin-provisioned blob we need to write 10 pages plus one page metadata and 3817 * read 0 bytes */ 3818 if (g_use_extent_table) { 3819 /* Add one more page for EXTENT_PAGE write */ 3820 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12); 3821 } else { 3822 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11); 3823 } 3824 CU_ASSERT(g_dev_read_bytes - read_bytes == 0); 3825 3826 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3827 poll_threads(); 3828 CU_ASSERT(g_bserrno == 0); 3829 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3830 3831 spdk_bs_free_io_channel(channel); 3832 poll_threads(); 3833 3834 spdk_blob_close(blob, blob_op_complete, NULL); 3835 poll_threads(); 3836 CU_ASSERT(g_bserrno == 0); 3837 3838 ut_bs_reload(&bs, NULL); 3839 3840 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3841 poll_threads(); 3842 CU_ASSERT(g_bserrno == 0); 3843 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3844 blob = g_blob; 3845 3846 channel = spdk_bs_alloc_io_channel(bs); 3847 CU_ASSERT(channel != NULL); 3848 3849 /* Read second cluster after blob reload to confirm data written */ 3850 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3851 poll_threads(); 3852 CU_ASSERT(g_bserrno == 0); 3853 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3854 3855 spdk_bs_free_io_channel(channel); 3856 poll_threads(); 3857 3858 ut_blob_close_and_delete(bs, blob); 3859 } 3860 3861 static void 3862 blob_thin_prov_rw_iov(void) 3863 { 3864 static const uint8_t zero[10 * 4096] = { 0 }; 3865 struct spdk_blob_store *bs = g_bs; 3866 struct spdk_blob *blob; 3867 struct spdk_io_channel *channel; 3868 struct spdk_blob_opts opts; 3869 uint64_t free_clusters; 3870 uint8_t payload_read[10 * 4096]; 3871 uint8_t payload_write[10 * 4096]; 3872 struct iovec iov_read[3]; 3873 struct iovec iov_write[3]; 3874 3875 free_clusters = spdk_bs_free_cluster_count(bs); 3876 3877 channel = spdk_bs_alloc_io_channel(bs); 3878 CU_ASSERT(channel != NULL); 3879 3880 ut_spdk_blob_opts_init(&opts); 3881 opts.thin_provision = true; 3882 3883 blob = ut_blob_create_and_open(bs, &opts); 3884 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3885 3886 CU_ASSERT(blob->active.num_clusters == 0); 3887 3888 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3889 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3890 poll_threads(); 3891 CU_ASSERT(g_bserrno == 0); 3892 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3893 CU_ASSERT(blob->active.num_clusters == 5); 3894 3895 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3896 poll_threads(); 3897 CU_ASSERT(g_bserrno == 0); 3898 /* Sync must not change anything */ 3899 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3900 CU_ASSERT(blob->active.num_clusters == 5); 3901 3902 /* Payload should be all zeros from unallocated clusters */ 3903 memset(payload_read, 0xAA, sizeof(payload_read)); 3904 iov_read[0].iov_base = payload_read; 3905 iov_read[0].iov_len = 3 * 4096; 3906 iov_read[1].iov_base = payload_read + 3 * 4096; 3907 iov_read[1].iov_len = 4 * 4096; 3908 iov_read[2].iov_base = payload_read + 7 * 4096; 3909 iov_read[2].iov_len = 3 * 4096; 3910 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 3911 poll_threads(); 3912 CU_ASSERT(g_bserrno == 0); 3913 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 3914 3915 memset(payload_write, 0xE5, sizeof(payload_write)); 3916 iov_write[0].iov_base = payload_write; 3917 iov_write[0].iov_len = 1 * 4096; 3918 iov_write[1].iov_base = payload_write + 1 * 4096; 3919 iov_write[1].iov_len = 5 * 4096; 3920 iov_write[2].iov_base = payload_write + 6 * 4096; 3921 iov_write[2].iov_len = 4 * 4096; 3922 3923 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 3924 poll_threads(); 3925 CU_ASSERT(g_bserrno == 0); 3926 3927 memset(payload_read, 0xAA, sizeof(payload_read)); 3928 iov_read[0].iov_base = payload_read; 3929 iov_read[0].iov_len = 3 * 4096; 3930 iov_read[1].iov_base = payload_read + 3 * 4096; 3931 iov_read[1].iov_len = 4 * 4096; 3932 iov_read[2].iov_base = payload_read + 7 * 4096; 3933 iov_read[2].iov_len = 3 * 4096; 3934 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 3935 poll_threads(); 3936 CU_ASSERT(g_bserrno == 0); 3937 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3938 3939 spdk_bs_free_io_channel(channel); 3940 poll_threads(); 3941 3942 ut_blob_close_and_delete(bs, blob); 3943 } 3944 3945 struct iter_ctx { 3946 int current_iter; 3947 spdk_blob_id blobid[4]; 3948 }; 3949 3950 static void 3951 test_iter(void *arg, struct spdk_blob *blob, int bserrno) 3952 { 3953 struct iter_ctx *iter_ctx = arg; 3954 spdk_blob_id blobid; 3955 3956 CU_ASSERT(bserrno == 0); 3957 blobid = spdk_blob_get_id(blob); 3958 CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]); 3959 } 3960 3961 static void 3962 bs_load_iter_test(void) 3963 { 3964 struct spdk_blob_store *bs; 3965 struct spdk_bs_dev *dev; 3966 struct iter_ctx iter_ctx = { 0 }; 3967 struct spdk_blob *blob; 3968 int i, rc; 3969 struct spdk_bs_opts opts; 3970 3971 dev = init_dev(); 3972 spdk_bs_opts_init(&opts); 3973 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 3974 3975 /* Initialize a new blob store */ 3976 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 3977 poll_threads(); 3978 CU_ASSERT(g_bserrno == 0); 3979 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3980 bs = g_bs; 3981 3982 for (i = 0; i < 4; i++) { 3983 blob = ut_blob_create_and_open(bs, NULL); 3984 iter_ctx.blobid[i] = spdk_blob_get_id(blob); 3985 3986 /* Just save the blobid as an xattr for testing purposes. */ 3987 rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id)); 3988 CU_ASSERT(rc == 0); 3989 3990 /* Resize the blob */ 3991 spdk_blob_resize(blob, i, blob_op_complete, NULL); 3992 poll_threads(); 3993 CU_ASSERT(g_bserrno == 0); 3994 3995 spdk_blob_close(blob, blob_op_complete, NULL); 3996 poll_threads(); 3997 CU_ASSERT(g_bserrno == 0); 3998 } 3999 4000 g_bserrno = -1; 4001 spdk_bs_unload(bs, bs_op_complete, NULL); 4002 poll_threads(); 4003 CU_ASSERT(g_bserrno == 0); 4004 4005 dev = init_dev(); 4006 spdk_bs_opts_init(&opts); 4007 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4008 opts.iter_cb_fn = test_iter; 4009 opts.iter_cb_arg = &iter_ctx; 4010 4011 /* Test blob iteration during load after a clean shutdown. */ 4012 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 4013 poll_threads(); 4014 CU_ASSERT(g_bserrno == 0); 4015 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4016 bs = g_bs; 4017 4018 /* Dirty shutdown */ 4019 bs_free(bs); 4020 4021 dev = init_dev(); 4022 spdk_bs_opts_init(&opts); 4023 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4024 opts.iter_cb_fn = test_iter; 4025 iter_ctx.current_iter = 0; 4026 opts.iter_cb_arg = &iter_ctx; 4027 4028 /* Test blob iteration during load after a dirty shutdown. */ 4029 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 4030 poll_threads(); 4031 CU_ASSERT(g_bserrno == 0); 4032 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4033 bs = g_bs; 4034 4035 spdk_bs_unload(bs, bs_op_complete, NULL); 4036 poll_threads(); 4037 CU_ASSERT(g_bserrno == 0); 4038 g_bs = NULL; 4039 } 4040 4041 static void 4042 blob_snapshot_rw(void) 4043 { 4044 static const uint8_t zero[10 * 4096] = { 0 }; 4045 struct spdk_blob_store *bs = g_bs; 4046 struct spdk_blob *blob, *snapshot; 4047 struct spdk_io_channel *channel; 4048 struct spdk_blob_opts opts; 4049 spdk_blob_id blobid, snapshotid; 4050 uint64_t free_clusters; 4051 uint64_t cluster_size; 4052 uint64_t page_size; 4053 uint8_t payload_read[10 * 4096]; 4054 uint8_t payload_write[10 * 4096]; 4055 uint64_t write_bytes; 4056 uint64_t read_bytes; 4057 4058 free_clusters = spdk_bs_free_cluster_count(bs); 4059 cluster_size = spdk_bs_get_cluster_size(bs); 4060 page_size = spdk_bs_get_page_size(bs); 4061 4062 channel = spdk_bs_alloc_io_channel(bs); 4063 CU_ASSERT(channel != NULL); 4064 4065 ut_spdk_blob_opts_init(&opts); 4066 opts.thin_provision = true; 4067 opts.num_clusters = 5; 4068 4069 blob = ut_blob_create_and_open(bs, &opts); 4070 blobid = spdk_blob_get_id(blob); 4071 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4072 4073 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4074 4075 memset(payload_read, 0xFF, sizeof(payload_read)); 4076 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 4077 poll_threads(); 4078 CU_ASSERT(g_bserrno == 0); 4079 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4080 4081 memset(payload_write, 0xE5, sizeof(payload_write)); 4082 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 4083 poll_threads(); 4084 CU_ASSERT(g_bserrno == 0); 4085 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4086 4087 /* Create snapshot from blob */ 4088 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4089 poll_threads(); 4090 CU_ASSERT(g_bserrno == 0); 4091 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4092 snapshotid = g_blobid; 4093 4094 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4095 poll_threads(); 4096 CU_ASSERT(g_bserrno == 0); 4097 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4098 snapshot = g_blob; 4099 CU_ASSERT(snapshot->data_ro == true); 4100 CU_ASSERT(snapshot->md_ro == true); 4101 4102 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4103 4104 write_bytes = g_dev_write_bytes; 4105 read_bytes = g_dev_read_bytes; 4106 4107 memset(payload_write, 0xAA, sizeof(payload_write)); 4108 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 4109 poll_threads(); 4110 CU_ASSERT(g_bserrno == 0); 4111 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4112 4113 /* For a clone we need to allocate and copy one cluster, update one page of metadata 4114 * and then write 10 pages of payload. 4115 */ 4116 if (g_use_extent_table) { 4117 /* Add one more page for EXTENT_PAGE write */ 4118 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12 + cluster_size); 4119 } else { 4120 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11 + cluster_size); 4121 } 4122 CU_ASSERT(g_dev_read_bytes - read_bytes == cluster_size); 4123 4124 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 4125 poll_threads(); 4126 CU_ASSERT(g_bserrno == 0); 4127 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4128 4129 /* Data on snapshot should not change after write to clone */ 4130 memset(payload_write, 0xE5, sizeof(payload_write)); 4131 spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL); 4132 poll_threads(); 4133 CU_ASSERT(g_bserrno == 0); 4134 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4135 4136 ut_blob_close_and_delete(bs, blob); 4137 ut_blob_close_and_delete(bs, snapshot); 4138 4139 spdk_bs_free_io_channel(channel); 4140 poll_threads(); 4141 g_blob = NULL; 4142 g_blobid = 0; 4143 } 4144 4145 static void 4146 blob_snapshot_rw_iov(void) 4147 { 4148 static const uint8_t zero[10 * 4096] = { 0 }; 4149 struct spdk_blob_store *bs = g_bs; 4150 struct spdk_blob *blob, *snapshot; 4151 struct spdk_io_channel *channel; 4152 struct spdk_blob_opts opts; 4153 spdk_blob_id blobid, snapshotid; 4154 uint64_t free_clusters; 4155 uint8_t payload_read[10 * 4096]; 4156 uint8_t payload_write[10 * 4096]; 4157 struct iovec iov_read[3]; 4158 struct iovec iov_write[3]; 4159 4160 free_clusters = spdk_bs_free_cluster_count(bs); 4161 4162 channel = spdk_bs_alloc_io_channel(bs); 4163 CU_ASSERT(channel != NULL); 4164 4165 ut_spdk_blob_opts_init(&opts); 4166 opts.thin_provision = true; 4167 opts.num_clusters = 5; 4168 4169 blob = ut_blob_create_and_open(bs, &opts); 4170 blobid = spdk_blob_get_id(blob); 4171 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4172 4173 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4174 4175 /* Create snapshot from blob */ 4176 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4177 poll_threads(); 4178 CU_ASSERT(g_bserrno == 0); 4179 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4180 snapshotid = g_blobid; 4181 4182 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4183 poll_threads(); 4184 CU_ASSERT(g_bserrno == 0); 4185 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4186 snapshot = g_blob; 4187 CU_ASSERT(snapshot->data_ro == true); 4188 CU_ASSERT(snapshot->md_ro == true); 4189 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4190 4191 /* Payload should be all zeros from unallocated clusters */ 4192 memset(payload_read, 0xAA, sizeof(payload_read)); 4193 iov_read[0].iov_base = payload_read; 4194 iov_read[0].iov_len = 3 * 4096; 4195 iov_read[1].iov_base = payload_read + 3 * 4096; 4196 iov_read[1].iov_len = 4 * 4096; 4197 iov_read[2].iov_base = payload_read + 7 * 4096; 4198 iov_read[2].iov_len = 3 * 4096; 4199 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4200 poll_threads(); 4201 CU_ASSERT(g_bserrno == 0); 4202 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4203 4204 memset(payload_write, 0xE5, sizeof(payload_write)); 4205 iov_write[0].iov_base = payload_write; 4206 iov_write[0].iov_len = 1 * 4096; 4207 iov_write[1].iov_base = payload_write + 1 * 4096; 4208 iov_write[1].iov_len = 5 * 4096; 4209 iov_write[2].iov_base = payload_write + 6 * 4096; 4210 iov_write[2].iov_len = 4 * 4096; 4211 4212 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 4213 poll_threads(); 4214 CU_ASSERT(g_bserrno == 0); 4215 4216 memset(payload_read, 0xAA, sizeof(payload_read)); 4217 iov_read[0].iov_base = payload_read; 4218 iov_read[0].iov_len = 3 * 4096; 4219 iov_read[1].iov_base = payload_read + 3 * 4096; 4220 iov_read[1].iov_len = 4 * 4096; 4221 iov_read[2].iov_base = payload_read + 7 * 4096; 4222 iov_read[2].iov_len = 3 * 4096; 4223 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4224 poll_threads(); 4225 CU_ASSERT(g_bserrno == 0); 4226 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4227 4228 spdk_bs_free_io_channel(channel); 4229 poll_threads(); 4230 4231 ut_blob_close_and_delete(bs, blob); 4232 ut_blob_close_and_delete(bs, snapshot); 4233 } 4234 4235 /** 4236 * Inflate / decouple parent rw unit tests. 4237 * 4238 * -------------- 4239 * original blob: 0 1 2 3 4 4240 * ,---------+---------+---------+---------+---------. 4241 * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - | 4242 * +---------+---------+---------+---------+---------+ 4243 * snapshot2 | - |yyyyyyyyy| - |yyyyyyyyy| - | 4244 * +---------+---------+---------+---------+---------+ 4245 * blob | - |zzzzzzzzz| - | - | - | 4246 * '---------+---------+---------+---------+---------' 4247 * . . . . . . 4248 * -------- . . . . . . 4249 * inflate: . . . . . . 4250 * ,---------+---------+---------+---------+---------. 4251 * blob |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000| 4252 * '---------+---------+---------+---------+---------' 4253 * 4254 * NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency 4255 * on snapshot2 and snapshot removed . . . 4256 * . . . . . . 4257 * ---------------- . . . . . . 4258 * decouple parent: . . . . . . 4259 * ,---------+---------+---------+---------+---------. 4260 * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - | 4261 * +---------+---------+---------+---------+---------+ 4262 * blob | - |zzzzzzzzz| - |yyyyyyyyy| - | 4263 * '---------+---------+---------+---------+---------' 4264 * 4265 * NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency 4266 * on snapshot2 removed and on snapshot still exists. Snapshot2 4267 * should remain a clone of snapshot. 4268 */ 4269 static void 4270 _blob_inflate_rw(bool decouple_parent) 4271 { 4272 struct spdk_blob_store *bs = g_bs; 4273 struct spdk_blob *blob, *snapshot, *snapshot2; 4274 struct spdk_io_channel *channel; 4275 struct spdk_blob_opts opts; 4276 spdk_blob_id blobid, snapshotid, snapshot2id; 4277 uint64_t free_clusters; 4278 uint64_t cluster_size; 4279 4280 uint64_t payload_size; 4281 uint8_t *payload_read; 4282 uint8_t *payload_write; 4283 uint8_t *payload_clone; 4284 4285 uint64_t pages_per_cluster; 4286 uint64_t pages_per_payload; 4287 4288 int i; 4289 spdk_blob_id ids[2]; 4290 size_t count; 4291 4292 free_clusters = spdk_bs_free_cluster_count(bs); 4293 cluster_size = spdk_bs_get_cluster_size(bs); 4294 pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs); 4295 pages_per_payload = pages_per_cluster * 5; 4296 4297 payload_size = cluster_size * 5; 4298 4299 payload_read = malloc(payload_size); 4300 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 4301 4302 payload_write = malloc(payload_size); 4303 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 4304 4305 payload_clone = malloc(payload_size); 4306 SPDK_CU_ASSERT_FATAL(payload_clone != NULL); 4307 4308 channel = spdk_bs_alloc_io_channel(bs); 4309 SPDK_CU_ASSERT_FATAL(channel != NULL); 4310 4311 /* Create blob */ 4312 ut_spdk_blob_opts_init(&opts); 4313 opts.thin_provision = true; 4314 opts.num_clusters = 5; 4315 4316 blob = ut_blob_create_and_open(bs, &opts); 4317 blobid = spdk_blob_get_id(blob); 4318 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4319 4320 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4321 4322 /* 1) Initial read should return zeroed payload */ 4323 memset(payload_read, 0xFF, payload_size); 4324 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4325 blob_op_complete, NULL); 4326 poll_threads(); 4327 CU_ASSERT(g_bserrno == 0); 4328 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 4329 4330 /* Fill whole blob with a pattern, except last cluster (to be sure it 4331 * isn't allocated) */ 4332 memset(payload_write, 0xE5, payload_size - cluster_size); 4333 spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload - 4334 pages_per_cluster, blob_op_complete, NULL); 4335 poll_threads(); 4336 CU_ASSERT(g_bserrno == 0); 4337 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4338 4339 /* 2) Create snapshot from blob (first level) */ 4340 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4341 poll_threads(); 4342 CU_ASSERT(g_bserrno == 0); 4343 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4344 snapshotid = g_blobid; 4345 4346 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4347 poll_threads(); 4348 CU_ASSERT(g_bserrno == 0); 4349 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4350 snapshot = g_blob; 4351 CU_ASSERT(snapshot->data_ro == true); 4352 CU_ASSERT(snapshot->md_ro == true); 4353 4354 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4355 4356 /* Write every second cluster with a pattern. 4357 * 4358 * Last cluster shouldn't be written, to be sure that snapshot nor clone 4359 * doesn't allocate it. 4360 * 4361 * payload_clone stores expected result on "blob" read at the time and 4362 * is used only to check data consistency on clone before and after 4363 * inflation. Initially we fill it with a backing snapshots pattern 4364 * used before. 4365 */ 4366 memset(payload_clone, 0xE5, payload_size - cluster_size); 4367 memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size); 4368 memset(payload_write, 0xAA, payload_size); 4369 for (i = 1; i < 5; i += 2) { 4370 spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster, 4371 pages_per_cluster, blob_op_complete, NULL); 4372 poll_threads(); 4373 CU_ASSERT(g_bserrno == 0); 4374 4375 /* Update expected result */ 4376 memcpy(payload_clone + (cluster_size * i), payload_write, 4377 cluster_size); 4378 } 4379 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4380 4381 /* Check data consistency on clone */ 4382 memset(payload_read, 0xFF, payload_size); 4383 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4384 blob_op_complete, NULL); 4385 poll_threads(); 4386 CU_ASSERT(g_bserrno == 0); 4387 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4388 4389 /* 3) Create second levels snapshot from blob */ 4390 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4391 poll_threads(); 4392 CU_ASSERT(g_bserrno == 0); 4393 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4394 snapshot2id = g_blobid; 4395 4396 spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL); 4397 poll_threads(); 4398 CU_ASSERT(g_bserrno == 0); 4399 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4400 snapshot2 = g_blob; 4401 CU_ASSERT(snapshot2->data_ro == true); 4402 CU_ASSERT(snapshot2->md_ro == true); 4403 4404 CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5); 4405 4406 CU_ASSERT(snapshot2->parent_id == snapshotid); 4407 4408 /* Write one cluster on the top level blob. This cluster (1) covers 4409 * already allocated cluster in the snapshot2, so shouldn't be inflated 4410 * at all */ 4411 spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster, 4412 pages_per_cluster, blob_op_complete, NULL); 4413 poll_threads(); 4414 CU_ASSERT(g_bserrno == 0); 4415 4416 /* Update expected result */ 4417 memcpy(payload_clone + cluster_size, payload_write, cluster_size); 4418 4419 /* Check data consistency on clone */ 4420 memset(payload_read, 0xFF, payload_size); 4421 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4422 blob_op_complete, NULL); 4423 poll_threads(); 4424 CU_ASSERT(g_bserrno == 0); 4425 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4426 4427 4428 /* Close all blobs */ 4429 spdk_blob_close(blob, blob_op_complete, NULL); 4430 poll_threads(); 4431 CU_ASSERT(g_bserrno == 0); 4432 4433 spdk_blob_close(snapshot2, blob_op_complete, NULL); 4434 poll_threads(); 4435 CU_ASSERT(g_bserrno == 0); 4436 4437 spdk_blob_close(snapshot, blob_op_complete, NULL); 4438 poll_threads(); 4439 CU_ASSERT(g_bserrno == 0); 4440 4441 /* Check snapshot-clone relations */ 4442 count = 2; 4443 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4444 CU_ASSERT(count == 1); 4445 CU_ASSERT(ids[0] == snapshot2id); 4446 4447 count = 2; 4448 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4449 CU_ASSERT(count == 1); 4450 CU_ASSERT(ids[0] == blobid); 4451 4452 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id); 4453 4454 free_clusters = spdk_bs_free_cluster_count(bs); 4455 if (!decouple_parent) { 4456 /* Do full blob inflation */ 4457 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 4458 poll_threads(); 4459 CU_ASSERT(g_bserrno == 0); 4460 4461 /* All clusters should be inflated (except one already allocated 4462 * in a top level blob) */ 4463 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4); 4464 4465 /* Check if relation tree updated correctly */ 4466 count = 2; 4467 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4468 4469 /* snapshotid have one clone */ 4470 CU_ASSERT(count == 1); 4471 CU_ASSERT(ids[0] == snapshot2id); 4472 4473 /* snapshot2id have no clones */ 4474 count = 2; 4475 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4476 CU_ASSERT(count == 0); 4477 4478 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 4479 } else { 4480 /* Decouple parent of blob */ 4481 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 4482 poll_threads(); 4483 CU_ASSERT(g_bserrno == 0); 4484 4485 /* Only one cluster from a parent should be inflated (second one 4486 * is covered by a cluster written on a top level blob, and 4487 * already allocated) */ 4488 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1); 4489 4490 /* Check if relation tree updated correctly */ 4491 count = 2; 4492 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4493 4494 /* snapshotid have two clones now */ 4495 CU_ASSERT(count == 2); 4496 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4497 CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id); 4498 4499 /* snapshot2id have no clones */ 4500 count = 2; 4501 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4502 CU_ASSERT(count == 0); 4503 4504 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4505 } 4506 4507 /* Try to delete snapshot2 (should pass) */ 4508 spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL); 4509 poll_threads(); 4510 CU_ASSERT(g_bserrno == 0); 4511 4512 /* Try to delete base snapshot */ 4513 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4514 poll_threads(); 4515 CU_ASSERT(g_bserrno == 0); 4516 4517 /* Reopen blob after snapshot deletion */ 4518 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 4519 poll_threads(); 4520 CU_ASSERT(g_bserrno == 0); 4521 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4522 blob = g_blob; 4523 4524 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4525 4526 /* Check data consistency on inflated blob */ 4527 memset(payload_read, 0xFF, payload_size); 4528 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4529 blob_op_complete, NULL); 4530 poll_threads(); 4531 CU_ASSERT(g_bserrno == 0); 4532 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4533 4534 spdk_bs_free_io_channel(channel); 4535 poll_threads(); 4536 4537 free(payload_read); 4538 free(payload_write); 4539 free(payload_clone); 4540 4541 ut_blob_close_and_delete(bs, blob); 4542 } 4543 4544 static void 4545 blob_inflate_rw(void) 4546 { 4547 _blob_inflate_rw(false); 4548 _blob_inflate_rw(true); 4549 } 4550 4551 /** 4552 * Snapshot-clones relation test 4553 * 4554 * snapshot 4555 * | 4556 * +-----+-----+ 4557 * | | 4558 * blob(ro) snapshot2 4559 * | | 4560 * clone2 clone 4561 */ 4562 static void 4563 blob_relations(void) 4564 { 4565 struct spdk_blob_store *bs; 4566 struct spdk_bs_dev *dev; 4567 struct spdk_bs_opts bs_opts; 4568 struct spdk_blob_opts opts; 4569 struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2; 4570 spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2; 4571 int rc; 4572 size_t count; 4573 spdk_blob_id ids[10] = {}; 4574 4575 dev = init_dev(); 4576 spdk_bs_opts_init(&bs_opts); 4577 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 4578 4579 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 4580 poll_threads(); 4581 CU_ASSERT(g_bserrno == 0); 4582 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4583 bs = g_bs; 4584 4585 /* 1. Create blob with 10 clusters */ 4586 4587 ut_spdk_blob_opts_init(&opts); 4588 opts.num_clusters = 10; 4589 4590 blob = ut_blob_create_and_open(bs, &opts); 4591 blobid = spdk_blob_get_id(blob); 4592 4593 CU_ASSERT(!spdk_blob_is_read_only(blob)); 4594 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4595 CU_ASSERT(!spdk_blob_is_clone(blob)); 4596 CU_ASSERT(!spdk_blob_is_thin_provisioned(blob)); 4597 4598 /* blob should not have underlying snapshot nor clones */ 4599 CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID); 4600 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 4601 count = SPDK_COUNTOF(ids); 4602 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4603 CU_ASSERT(rc == 0); 4604 CU_ASSERT(count == 0); 4605 4606 4607 /* 2. Create snapshot */ 4608 4609 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4610 poll_threads(); 4611 CU_ASSERT(g_bserrno == 0); 4612 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4613 snapshotid = g_blobid; 4614 4615 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4616 poll_threads(); 4617 CU_ASSERT(g_bserrno == 0); 4618 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4619 snapshot = g_blob; 4620 4621 CU_ASSERT(spdk_blob_is_read_only(snapshot)); 4622 CU_ASSERT(spdk_blob_is_snapshot(snapshot)); 4623 CU_ASSERT(!spdk_blob_is_clone(snapshot)); 4624 CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID); 4625 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID); 4626 4627 /* Check if original blob is converted to the clone of snapshot */ 4628 CU_ASSERT(!spdk_blob_is_read_only(blob)); 4629 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4630 CU_ASSERT(spdk_blob_is_clone(blob)); 4631 CU_ASSERT(spdk_blob_is_thin_provisioned(blob)); 4632 CU_ASSERT(blob->parent_id == snapshotid); 4633 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4634 4635 count = SPDK_COUNTOF(ids); 4636 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4637 CU_ASSERT(rc == 0); 4638 CU_ASSERT(count == 1); 4639 CU_ASSERT(ids[0] == blobid); 4640 4641 4642 /* 3. Create clone from snapshot */ 4643 4644 spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 4645 poll_threads(); 4646 CU_ASSERT(g_bserrno == 0); 4647 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4648 cloneid = g_blobid; 4649 4650 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 4651 poll_threads(); 4652 CU_ASSERT(g_bserrno == 0); 4653 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4654 clone = g_blob; 4655 4656 CU_ASSERT(!spdk_blob_is_read_only(clone)); 4657 CU_ASSERT(!spdk_blob_is_snapshot(clone)); 4658 CU_ASSERT(spdk_blob_is_clone(clone)); 4659 CU_ASSERT(spdk_blob_is_thin_provisioned(clone)); 4660 CU_ASSERT(clone->parent_id == snapshotid); 4661 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid); 4662 4663 count = SPDK_COUNTOF(ids); 4664 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 4665 CU_ASSERT(rc == 0); 4666 CU_ASSERT(count == 0); 4667 4668 /* Check if clone is on the snapshot's list */ 4669 count = SPDK_COUNTOF(ids); 4670 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4671 CU_ASSERT(rc == 0); 4672 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4673 CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid); 4674 4675 4676 /* 4. Create snapshot of the clone */ 4677 4678 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 4679 poll_threads(); 4680 CU_ASSERT(g_bserrno == 0); 4681 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4682 snapshotid2 = g_blobid; 4683 4684 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 4685 poll_threads(); 4686 CU_ASSERT(g_bserrno == 0); 4687 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4688 snapshot2 = g_blob; 4689 4690 CU_ASSERT(spdk_blob_is_read_only(snapshot2)); 4691 CU_ASSERT(spdk_blob_is_snapshot(snapshot2)); 4692 CU_ASSERT(spdk_blob_is_clone(snapshot2)); 4693 CU_ASSERT(snapshot2->parent_id == snapshotid); 4694 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid); 4695 4696 /* Check if clone is converted to the clone of snapshot2 and snapshot2 4697 * is a child of snapshot */ 4698 CU_ASSERT(!spdk_blob_is_read_only(clone)); 4699 CU_ASSERT(!spdk_blob_is_snapshot(clone)); 4700 CU_ASSERT(spdk_blob_is_clone(clone)); 4701 CU_ASSERT(spdk_blob_is_thin_provisioned(clone)); 4702 CU_ASSERT(clone->parent_id == snapshotid2); 4703 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 4704 4705 count = SPDK_COUNTOF(ids); 4706 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 4707 CU_ASSERT(rc == 0); 4708 CU_ASSERT(count == 1); 4709 CU_ASSERT(ids[0] == cloneid); 4710 4711 4712 /* 5. Try to create clone from read only blob */ 4713 4714 /* Mark blob as read only */ 4715 spdk_blob_set_read_only(blob); 4716 spdk_blob_sync_md(blob, blob_op_complete, NULL); 4717 poll_threads(); 4718 CU_ASSERT(g_bserrno == 0); 4719 4720 /* Check if previously created blob is read only clone */ 4721 CU_ASSERT(spdk_blob_is_read_only(blob)); 4722 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4723 CU_ASSERT(spdk_blob_is_clone(blob)); 4724 CU_ASSERT(spdk_blob_is_thin_provisioned(blob)); 4725 4726 /* Create clone from read only blob */ 4727 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4728 poll_threads(); 4729 CU_ASSERT(g_bserrno == 0); 4730 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4731 cloneid2 = g_blobid; 4732 4733 spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL); 4734 poll_threads(); 4735 CU_ASSERT(g_bserrno == 0); 4736 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4737 clone2 = g_blob; 4738 4739 CU_ASSERT(!spdk_blob_is_read_only(clone2)); 4740 CU_ASSERT(!spdk_blob_is_snapshot(clone2)); 4741 CU_ASSERT(spdk_blob_is_clone(clone2)); 4742 CU_ASSERT(spdk_blob_is_thin_provisioned(clone2)); 4743 4744 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 4745 4746 count = SPDK_COUNTOF(ids); 4747 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4748 CU_ASSERT(rc == 0); 4749 4750 CU_ASSERT(count == 1); 4751 CU_ASSERT(ids[0] == cloneid2); 4752 4753 /* Close blobs */ 4754 4755 spdk_blob_close(clone2, blob_op_complete, NULL); 4756 poll_threads(); 4757 CU_ASSERT(g_bserrno == 0); 4758 4759 spdk_blob_close(blob, blob_op_complete, NULL); 4760 poll_threads(); 4761 CU_ASSERT(g_bserrno == 0); 4762 4763 spdk_blob_close(clone, blob_op_complete, NULL); 4764 poll_threads(); 4765 CU_ASSERT(g_bserrno == 0); 4766 4767 spdk_blob_close(snapshot, blob_op_complete, NULL); 4768 poll_threads(); 4769 CU_ASSERT(g_bserrno == 0); 4770 4771 spdk_blob_close(snapshot2, blob_op_complete, NULL); 4772 poll_threads(); 4773 CU_ASSERT(g_bserrno == 0); 4774 4775 /* Try to delete snapshot with more than 1 clone */ 4776 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4777 poll_threads(); 4778 CU_ASSERT(g_bserrno != 0); 4779 4780 ut_bs_reload(&bs, &bs_opts); 4781 4782 /* NULL ids array should return number of clones in count */ 4783 count = SPDK_COUNTOF(ids); 4784 rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count); 4785 CU_ASSERT(rc == -ENOMEM); 4786 CU_ASSERT(count == 2); 4787 4788 /* incorrect array size */ 4789 count = 1; 4790 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4791 CU_ASSERT(rc == -ENOMEM); 4792 CU_ASSERT(count == 2); 4793 4794 4795 /* Verify structure of loaded blob store */ 4796 4797 /* snapshot */ 4798 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID); 4799 4800 count = SPDK_COUNTOF(ids); 4801 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4802 CU_ASSERT(rc == 0); 4803 CU_ASSERT(count == 2); 4804 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4805 CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2); 4806 4807 /* blob */ 4808 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4809 count = SPDK_COUNTOF(ids); 4810 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4811 CU_ASSERT(rc == 0); 4812 CU_ASSERT(count == 1); 4813 CU_ASSERT(ids[0] == cloneid2); 4814 4815 /* clone */ 4816 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 4817 count = SPDK_COUNTOF(ids); 4818 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 4819 CU_ASSERT(rc == 0); 4820 CU_ASSERT(count == 0); 4821 4822 /* snapshot2 */ 4823 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid); 4824 count = SPDK_COUNTOF(ids); 4825 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 4826 CU_ASSERT(rc == 0); 4827 CU_ASSERT(count == 1); 4828 CU_ASSERT(ids[0] == cloneid); 4829 4830 /* clone2 */ 4831 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 4832 count = SPDK_COUNTOF(ids); 4833 rc = spdk_blob_get_clones(bs, cloneid2, ids, &count); 4834 CU_ASSERT(rc == 0); 4835 CU_ASSERT(count == 0); 4836 4837 /* Try to delete blob that user should not be able to remove */ 4838 4839 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4840 poll_threads(); 4841 CU_ASSERT(g_bserrno != 0); 4842 4843 /* Remove all blobs */ 4844 4845 spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL); 4846 poll_threads(); 4847 CU_ASSERT(g_bserrno == 0); 4848 4849 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 4850 poll_threads(); 4851 CU_ASSERT(g_bserrno == 0); 4852 4853 spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL); 4854 poll_threads(); 4855 CU_ASSERT(g_bserrno == 0); 4856 4857 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 4858 poll_threads(); 4859 CU_ASSERT(g_bserrno == 0); 4860 4861 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4862 poll_threads(); 4863 CU_ASSERT(g_bserrno == 0); 4864 4865 spdk_bs_unload(bs, bs_op_complete, NULL); 4866 poll_threads(); 4867 CU_ASSERT(g_bserrno == 0); 4868 4869 g_bs = NULL; 4870 } 4871 4872 /** 4873 * Snapshot-clones relation test 2 4874 * 4875 * snapshot1 4876 * | 4877 * snapshot2 4878 * | 4879 * +-----+-----+ 4880 * | | 4881 * blob(ro) snapshot3 4882 * | | 4883 * | snapshot4 4884 * | | | 4885 * clone2 clone clone3 4886 */ 4887 static void 4888 blob_relations2(void) 4889 { 4890 struct spdk_blob_store *bs; 4891 struct spdk_bs_dev *dev; 4892 struct spdk_bs_opts bs_opts; 4893 struct spdk_blob_opts opts; 4894 struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2; 4895 spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2, 4896 cloneid3; 4897 int rc; 4898 size_t count; 4899 spdk_blob_id ids[10] = {}; 4900 4901 dev = init_dev(); 4902 spdk_bs_opts_init(&bs_opts); 4903 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 4904 4905 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 4906 poll_threads(); 4907 CU_ASSERT(g_bserrno == 0); 4908 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4909 bs = g_bs; 4910 4911 /* 1. Create blob with 10 clusters */ 4912 4913 ut_spdk_blob_opts_init(&opts); 4914 opts.num_clusters = 10; 4915 4916 blob = ut_blob_create_and_open(bs, &opts); 4917 blobid = spdk_blob_get_id(blob); 4918 4919 /* 2. Create snapshot1 */ 4920 4921 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4922 poll_threads(); 4923 CU_ASSERT(g_bserrno == 0); 4924 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4925 snapshotid1 = g_blobid; 4926 4927 spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL); 4928 poll_threads(); 4929 CU_ASSERT(g_bserrno == 0); 4930 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4931 snapshot1 = g_blob; 4932 4933 CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID); 4934 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID); 4935 4936 CU_ASSERT(blob->parent_id == snapshotid1); 4937 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1); 4938 4939 /* Check if blob is the clone of snapshot1 */ 4940 CU_ASSERT(blob->parent_id == snapshotid1); 4941 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1); 4942 4943 count = SPDK_COUNTOF(ids); 4944 rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count); 4945 CU_ASSERT(rc == 0); 4946 CU_ASSERT(count == 1); 4947 CU_ASSERT(ids[0] == blobid); 4948 4949 /* 3. Create another snapshot */ 4950 4951 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4952 poll_threads(); 4953 CU_ASSERT(g_bserrno == 0); 4954 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4955 snapshotid2 = g_blobid; 4956 4957 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 4958 poll_threads(); 4959 CU_ASSERT(g_bserrno == 0); 4960 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4961 snapshot2 = g_blob; 4962 4963 CU_ASSERT(spdk_blob_is_clone(snapshot2)); 4964 CU_ASSERT(snapshot2->parent_id == snapshotid1); 4965 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1); 4966 4967 /* Check if snapshot2 is the clone of snapshot1 and blob 4968 * is a child of snapshot2 */ 4969 CU_ASSERT(blob->parent_id == snapshotid2); 4970 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2); 4971 4972 count = SPDK_COUNTOF(ids); 4973 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 4974 CU_ASSERT(rc == 0); 4975 CU_ASSERT(count == 1); 4976 CU_ASSERT(ids[0] == blobid); 4977 4978 /* 4. Create clone from snapshot */ 4979 4980 spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL); 4981 poll_threads(); 4982 CU_ASSERT(g_bserrno == 0); 4983 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4984 cloneid = g_blobid; 4985 4986 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 4987 poll_threads(); 4988 CU_ASSERT(g_bserrno == 0); 4989 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4990 clone = g_blob; 4991 4992 CU_ASSERT(clone->parent_id == snapshotid2); 4993 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 4994 4995 /* Check if clone is on the snapshot's list */ 4996 count = SPDK_COUNTOF(ids); 4997 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 4998 CU_ASSERT(rc == 0); 4999 CU_ASSERT(count == 2); 5000 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5001 CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid); 5002 5003 /* 5. Create snapshot of the clone */ 5004 5005 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 5006 poll_threads(); 5007 CU_ASSERT(g_bserrno == 0); 5008 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5009 snapshotid3 = g_blobid; 5010 5011 spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL); 5012 poll_threads(); 5013 CU_ASSERT(g_bserrno == 0); 5014 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5015 snapshot3 = g_blob; 5016 5017 CU_ASSERT(snapshot3->parent_id == snapshotid2); 5018 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2); 5019 5020 /* Check if clone is converted to the clone of snapshot3 and snapshot3 5021 * is a child of snapshot2 */ 5022 CU_ASSERT(clone->parent_id == snapshotid3); 5023 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5024 5025 count = SPDK_COUNTOF(ids); 5026 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5027 CU_ASSERT(rc == 0); 5028 CU_ASSERT(count == 1); 5029 CU_ASSERT(ids[0] == cloneid); 5030 5031 /* 6. Create another snapshot of the clone */ 5032 5033 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 5034 poll_threads(); 5035 CU_ASSERT(g_bserrno == 0); 5036 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5037 snapshotid4 = g_blobid; 5038 5039 spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL); 5040 poll_threads(); 5041 CU_ASSERT(g_bserrno == 0); 5042 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5043 snapshot4 = g_blob; 5044 5045 CU_ASSERT(snapshot4->parent_id == snapshotid3); 5046 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3); 5047 5048 /* Check if clone is converted to the clone of snapshot4 and snapshot4 5049 * is a child of snapshot3 */ 5050 CU_ASSERT(clone->parent_id == snapshotid4); 5051 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4); 5052 5053 count = SPDK_COUNTOF(ids); 5054 rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count); 5055 CU_ASSERT(rc == 0); 5056 CU_ASSERT(count == 1); 5057 CU_ASSERT(ids[0] == cloneid); 5058 5059 /* 7. Remove snapshot 4 */ 5060 5061 ut_blob_close_and_delete(bs, snapshot4); 5062 5063 /* Check if relations are back to state from before creating snapshot 4 */ 5064 CU_ASSERT(clone->parent_id == snapshotid3); 5065 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5066 5067 count = SPDK_COUNTOF(ids); 5068 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5069 CU_ASSERT(rc == 0); 5070 CU_ASSERT(count == 1); 5071 CU_ASSERT(ids[0] == cloneid); 5072 5073 /* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */ 5074 5075 spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL); 5076 poll_threads(); 5077 CU_ASSERT(g_bserrno == 0); 5078 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5079 cloneid3 = g_blobid; 5080 5081 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5082 poll_threads(); 5083 CU_ASSERT(g_bserrno != 0); 5084 5085 /* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */ 5086 5087 spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL); 5088 poll_threads(); 5089 CU_ASSERT(g_bserrno == 0); 5090 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5091 snapshot3 = g_blob; 5092 5093 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5094 poll_threads(); 5095 CU_ASSERT(g_bserrno != 0); 5096 5097 spdk_blob_close(snapshot3, blob_op_complete, NULL); 5098 poll_threads(); 5099 CU_ASSERT(g_bserrno == 0); 5100 5101 spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL); 5102 poll_threads(); 5103 CU_ASSERT(g_bserrno == 0); 5104 5105 /* 10. Remove snapshot 1 */ 5106 5107 ut_blob_close_and_delete(bs, snapshot1); 5108 5109 /* Check if relations are back to state from before creating snapshot 4 (before step 6) */ 5110 CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID); 5111 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID); 5112 5113 count = SPDK_COUNTOF(ids); 5114 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5115 CU_ASSERT(rc == 0); 5116 CU_ASSERT(count == 2); 5117 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5118 CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3); 5119 5120 /* 11. Try to create clone from read only blob */ 5121 5122 /* Mark blob as read only */ 5123 spdk_blob_set_read_only(blob); 5124 spdk_blob_sync_md(blob, blob_op_complete, NULL); 5125 poll_threads(); 5126 CU_ASSERT(g_bserrno == 0); 5127 5128 /* Create clone from read only blob */ 5129 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5130 poll_threads(); 5131 CU_ASSERT(g_bserrno == 0); 5132 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5133 cloneid2 = g_blobid; 5134 5135 spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL); 5136 poll_threads(); 5137 CU_ASSERT(g_bserrno == 0); 5138 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5139 clone2 = g_blob; 5140 5141 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5142 5143 count = SPDK_COUNTOF(ids); 5144 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5145 CU_ASSERT(rc == 0); 5146 CU_ASSERT(count == 1); 5147 CU_ASSERT(ids[0] == cloneid2); 5148 5149 /* Close blobs */ 5150 5151 spdk_blob_close(clone2, blob_op_complete, NULL); 5152 poll_threads(); 5153 CU_ASSERT(g_bserrno == 0); 5154 5155 spdk_blob_close(blob, blob_op_complete, NULL); 5156 poll_threads(); 5157 CU_ASSERT(g_bserrno == 0); 5158 5159 spdk_blob_close(clone, blob_op_complete, NULL); 5160 poll_threads(); 5161 CU_ASSERT(g_bserrno == 0); 5162 5163 spdk_blob_close(snapshot2, blob_op_complete, NULL); 5164 poll_threads(); 5165 CU_ASSERT(g_bserrno == 0); 5166 5167 spdk_blob_close(snapshot3, blob_op_complete, NULL); 5168 poll_threads(); 5169 CU_ASSERT(g_bserrno == 0); 5170 5171 ut_bs_reload(&bs, &bs_opts); 5172 5173 /* Verify structure of loaded blob store */ 5174 5175 /* snapshot2 */ 5176 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID); 5177 5178 count = SPDK_COUNTOF(ids); 5179 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5180 CU_ASSERT(rc == 0); 5181 CU_ASSERT(count == 2); 5182 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5183 CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3); 5184 5185 /* blob */ 5186 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2); 5187 count = SPDK_COUNTOF(ids); 5188 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5189 CU_ASSERT(rc == 0); 5190 CU_ASSERT(count == 1); 5191 CU_ASSERT(ids[0] == cloneid2); 5192 5193 /* clone */ 5194 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5195 count = SPDK_COUNTOF(ids); 5196 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 5197 CU_ASSERT(rc == 0); 5198 CU_ASSERT(count == 0); 5199 5200 /* snapshot3 */ 5201 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2); 5202 count = SPDK_COUNTOF(ids); 5203 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5204 CU_ASSERT(rc == 0); 5205 CU_ASSERT(count == 1); 5206 CU_ASSERT(ids[0] == cloneid); 5207 5208 /* clone2 */ 5209 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5210 count = SPDK_COUNTOF(ids); 5211 rc = spdk_blob_get_clones(bs, cloneid2, ids, &count); 5212 CU_ASSERT(rc == 0); 5213 CU_ASSERT(count == 0); 5214 5215 /* Try to delete all blobs in the worse possible order */ 5216 5217 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5218 poll_threads(); 5219 CU_ASSERT(g_bserrno != 0); 5220 5221 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5222 poll_threads(); 5223 CU_ASSERT(g_bserrno == 0); 5224 5225 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5226 poll_threads(); 5227 CU_ASSERT(g_bserrno != 0); 5228 5229 spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL); 5230 poll_threads(); 5231 CU_ASSERT(g_bserrno == 0); 5232 5233 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5234 poll_threads(); 5235 CU_ASSERT(g_bserrno == 0); 5236 5237 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 5238 poll_threads(); 5239 CU_ASSERT(g_bserrno == 0); 5240 5241 spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL); 5242 poll_threads(); 5243 CU_ASSERT(g_bserrno == 0); 5244 5245 spdk_bs_unload(bs, bs_op_complete, NULL); 5246 poll_threads(); 5247 CU_ASSERT(g_bserrno == 0); 5248 5249 g_bs = NULL; 5250 } 5251 5252 static void 5253 blobstore_clean_power_failure(void) 5254 { 5255 struct spdk_blob_store *bs; 5256 struct spdk_blob *blob; 5257 struct spdk_power_failure_thresholds thresholds = {}; 5258 bool clean = false; 5259 struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 5260 struct spdk_bs_super_block super_copy = {}; 5261 5262 thresholds.general_threshold = 1; 5263 while (!clean) { 5264 /* Create bs and blob */ 5265 suite_blob_setup(); 5266 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5267 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5268 bs = g_bs; 5269 blob = g_blob; 5270 5271 /* Super block should not change for rest of the UT, 5272 * save it and compare later. */ 5273 memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block)); 5274 SPDK_CU_ASSERT_FATAL(super->clean == 0); 5275 SPDK_CU_ASSERT_FATAL(bs->clean == 0); 5276 5277 /* Force bs/super block in a clean state. 5278 * Along with marking blob dirty, to cause blob persist. */ 5279 blob->state = SPDK_BLOB_STATE_DIRTY; 5280 bs->clean = 1; 5281 super->clean = 1; 5282 super->crc = blob_md_page_calc_crc(super); 5283 5284 g_bserrno = -1; 5285 dev_set_power_failure_thresholds(thresholds); 5286 spdk_blob_sync_md(blob, blob_op_complete, NULL); 5287 poll_threads(); 5288 dev_reset_power_failure_event(); 5289 5290 if (g_bserrno == 0) { 5291 /* After successful md sync, both bs and super block 5292 * should be marked as not clean. */ 5293 SPDK_CU_ASSERT_FATAL(bs->clean == 0); 5294 SPDK_CU_ASSERT_FATAL(super->clean == 0); 5295 clean = true; 5296 } 5297 5298 /* Depending on the point of failure, super block was either updated or not. */ 5299 super_copy.clean = super->clean; 5300 super_copy.crc = blob_md_page_calc_crc(&super_copy); 5301 /* Compare that the values in super block remained unchanged. */ 5302 SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block))); 5303 5304 /* Delete blob and unload bs */ 5305 suite_blob_cleanup(); 5306 5307 thresholds.general_threshold++; 5308 } 5309 } 5310 5311 static void 5312 blob_delete_snapshot_power_failure(void) 5313 { 5314 struct spdk_bs_dev *dev; 5315 struct spdk_blob_store *bs; 5316 struct spdk_blob_opts opts; 5317 struct spdk_blob *blob, *snapshot; 5318 struct spdk_power_failure_thresholds thresholds = {}; 5319 spdk_blob_id blobid, snapshotid; 5320 const void *value; 5321 size_t value_len; 5322 size_t count; 5323 spdk_blob_id ids[3] = {}; 5324 int rc; 5325 bool deleted = false; 5326 int delete_snapshot_bserrno = -1; 5327 5328 thresholds.general_threshold = 1; 5329 while (!deleted) { 5330 dev = init_dev(); 5331 5332 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 5333 poll_threads(); 5334 CU_ASSERT(g_bserrno == 0); 5335 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5336 bs = g_bs; 5337 5338 /* Create blob */ 5339 ut_spdk_blob_opts_init(&opts); 5340 opts.num_clusters = 10; 5341 5342 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 5343 poll_threads(); 5344 CU_ASSERT(g_bserrno == 0); 5345 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5346 blobid = g_blobid; 5347 5348 /* Create snapshot */ 5349 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5350 poll_threads(); 5351 CU_ASSERT(g_bserrno == 0); 5352 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5353 snapshotid = g_blobid; 5354 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5355 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5356 5357 dev_set_power_failure_thresholds(thresholds); 5358 5359 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 5360 poll_threads(); 5361 delete_snapshot_bserrno = g_bserrno; 5362 5363 /* Do not shut down cleanly. Assumption is that after snapshot deletion 5364 * reports success, changes to both blobs should already persisted. */ 5365 dev_reset_power_failure_event(); 5366 ut_bs_dirty_load(&bs, NULL); 5367 5368 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5369 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5370 5371 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 5372 poll_threads(); 5373 CU_ASSERT(g_bserrno == 0); 5374 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5375 blob = g_blob; 5376 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true); 5377 5378 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 5379 poll_threads(); 5380 5381 if (g_bserrno == 0) { 5382 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5383 snapshot = g_blob; 5384 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 5385 count = SPDK_COUNTOF(ids); 5386 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5387 CU_ASSERT(rc == 0); 5388 CU_ASSERT(count == 1); 5389 CU_ASSERT(ids[0] == blobid); 5390 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len); 5391 CU_ASSERT(rc != 0); 5392 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false); 5393 5394 spdk_blob_close(snapshot, blob_op_complete, NULL); 5395 poll_threads(); 5396 CU_ASSERT(g_bserrno == 0); 5397 } else { 5398 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 5399 /* Snapshot might have been left in unrecoverable state, so it does not open. 5400 * Yet delete might perform further changes to the clone after that. 5401 * This UT should test until snapshot is deleted and delete call succeeds. */ 5402 if (delete_snapshot_bserrno == 0) { 5403 deleted = true; 5404 } 5405 } 5406 5407 spdk_blob_close(blob, blob_op_complete, NULL); 5408 poll_threads(); 5409 CU_ASSERT(g_bserrno == 0); 5410 5411 spdk_bs_unload(bs, bs_op_complete, NULL); 5412 poll_threads(); 5413 CU_ASSERT(g_bserrno == 0); 5414 5415 thresholds.general_threshold++; 5416 } 5417 } 5418 5419 static void 5420 blob_create_snapshot_power_failure(void) 5421 { 5422 struct spdk_blob_store *bs = g_bs; 5423 struct spdk_bs_dev *dev; 5424 struct spdk_blob_opts opts; 5425 struct spdk_blob *blob, *snapshot; 5426 struct spdk_power_failure_thresholds thresholds = {}; 5427 spdk_blob_id blobid, snapshotid; 5428 const void *value; 5429 size_t value_len; 5430 size_t count; 5431 spdk_blob_id ids[3] = {}; 5432 int rc; 5433 bool created = false; 5434 int create_snapshot_bserrno = -1; 5435 5436 thresholds.general_threshold = 1; 5437 while (!created) { 5438 dev = init_dev(); 5439 5440 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 5441 poll_threads(); 5442 CU_ASSERT(g_bserrno == 0); 5443 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5444 bs = g_bs; 5445 5446 /* Create blob */ 5447 ut_spdk_blob_opts_init(&opts); 5448 opts.num_clusters = 10; 5449 5450 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 5451 poll_threads(); 5452 CU_ASSERT(g_bserrno == 0); 5453 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5454 blobid = g_blobid; 5455 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5456 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5457 5458 dev_set_power_failure_thresholds(thresholds); 5459 5460 /* Create snapshot */ 5461 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5462 poll_threads(); 5463 create_snapshot_bserrno = g_bserrno; 5464 snapshotid = g_blobid; 5465 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5466 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5467 5468 /* Do not shut down cleanly. Assumption is that after create snapshot 5469 * reports success, both blobs should be power-fail safe. */ 5470 dev_reset_power_failure_event(); 5471 ut_bs_dirty_load(&bs, NULL); 5472 5473 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5474 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5475 5476 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 5477 poll_threads(); 5478 CU_ASSERT(g_bserrno == 0); 5479 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5480 blob = g_blob; 5481 5482 if (snapshotid != SPDK_BLOBID_INVALID) { 5483 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 5484 poll_threads(); 5485 } 5486 5487 if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) { 5488 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5489 snapshot = g_blob; 5490 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true); 5491 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false); 5492 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 5493 count = SPDK_COUNTOF(ids); 5494 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5495 CU_ASSERT(rc == 0); 5496 CU_ASSERT(count == 1); 5497 CU_ASSERT(ids[0] == blobid); 5498 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len); 5499 CU_ASSERT(rc != 0); 5500 5501 spdk_blob_close(snapshot, blob_op_complete, NULL); 5502 poll_threads(); 5503 CU_ASSERT(g_bserrno == 0); 5504 if (create_snapshot_bserrno == 0) { 5505 created = true; 5506 } 5507 } else { 5508 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 5509 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false); 5510 } 5511 5512 spdk_blob_close(blob, blob_op_complete, NULL); 5513 poll_threads(); 5514 CU_ASSERT(g_bserrno == 0); 5515 5516 spdk_bs_unload(bs, bs_op_complete, NULL); 5517 poll_threads(); 5518 CU_ASSERT(g_bserrno == 0); 5519 5520 thresholds.general_threshold++; 5521 } 5522 } 5523 5524 static void 5525 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5526 { 5527 uint8_t payload_ff[64 * 512]; 5528 uint8_t payload_aa[64 * 512]; 5529 uint8_t payload_00[64 * 512]; 5530 uint8_t *cluster0, *cluster1; 5531 5532 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5533 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5534 memset(payload_00, 0x00, sizeof(payload_00)); 5535 5536 /* Try to perform I/O with io unit = 512 */ 5537 spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL); 5538 poll_threads(); 5539 CU_ASSERT(g_bserrno == 0); 5540 5541 /* If thin provisioned is set cluster should be allocated now */ 5542 SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0); 5543 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5544 5545 /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character. 5546 * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */ 5547 /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5548 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5549 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0); 5550 5551 /* Verify write with offset on first page */ 5552 spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL); 5553 poll_threads(); 5554 CU_ASSERT(g_bserrno == 0); 5555 5556 /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5557 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5558 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5559 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5560 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5561 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0); 5562 5563 /* Verify write with offset on first page */ 5564 spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL); 5565 poll_threads(); 5566 5567 /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5568 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5569 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5570 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5571 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5572 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0); 5573 CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0); 5574 5575 /* Verify write with offset on second page */ 5576 spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL); 5577 poll_threads(); 5578 5579 /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */ 5580 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5581 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5582 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5583 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5584 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0); 5585 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5586 5587 /* Verify write across multiple pages */ 5588 spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL); 5589 poll_threads(); 5590 5591 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */ 5592 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5593 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5594 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5595 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5596 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5597 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5598 5599 /* Verify write across multiple clusters */ 5600 spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL); 5601 poll_threads(); 5602 5603 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5604 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5605 5606 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5607 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5608 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5609 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5610 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5611 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5612 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5613 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5614 5615 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5616 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0); 5617 5618 /* Verify write to second cluster */ 5619 spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL); 5620 poll_threads(); 5621 5622 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5623 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5624 5625 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5626 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */ 5627 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5628 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5629 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5630 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5631 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5632 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5633 5634 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5635 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0); 5636 CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0); 5637 CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0); 5638 } 5639 5640 static void 5641 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5642 { 5643 uint8_t payload_read[64 * 512]; 5644 uint8_t payload_ff[64 * 512]; 5645 uint8_t payload_aa[64 * 512]; 5646 uint8_t payload_00[64 * 512]; 5647 5648 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5649 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5650 memset(payload_00, 0x00, sizeof(payload_00)); 5651 5652 /* Read only first io unit */ 5653 /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5654 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5655 * payload_read: F000 0000 | 0000 0000 ... */ 5656 memset(payload_read, 0x00, sizeof(payload_read)); 5657 spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL); 5658 poll_threads(); 5659 CU_ASSERT(g_bserrno == 0); 5660 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5661 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0); 5662 5663 /* Read four io_units starting from offset = 2 5664 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5665 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5666 * payload_read: F0AA 0000 | 0000 0000 ... */ 5667 5668 memset(payload_read, 0x00, sizeof(payload_read)); 5669 spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL); 5670 poll_threads(); 5671 CU_ASSERT(g_bserrno == 0); 5672 5673 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5674 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 5675 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0); 5676 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0); 5677 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 5678 5679 /* Read eight io_units across multiple pages 5680 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 5681 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5682 * payload_read: AAAA AAAA | 0000 0000 ... */ 5683 memset(payload_read, 0x00, sizeof(payload_read)); 5684 spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL); 5685 poll_threads(); 5686 CU_ASSERT(g_bserrno == 0); 5687 5688 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0); 5689 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 5690 5691 /* Read eight io_units across multiple clusters 5692 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ] 5693 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5694 * payload_read: FFFF FFFF | 0000 0000 ... */ 5695 memset(payload_read, 0x00, sizeof(payload_read)); 5696 spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL); 5697 poll_threads(); 5698 CU_ASSERT(g_bserrno == 0); 5699 5700 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0); 5701 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 5702 5703 /* Read four io_units from second cluster 5704 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5705 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ] 5706 * payload_read: 00FF 0000 | 0000 0000 ... */ 5707 memset(payload_read, 0x00, sizeof(payload_read)); 5708 spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL); 5709 poll_threads(); 5710 CU_ASSERT(g_bserrno == 0); 5711 5712 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0); 5713 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0); 5714 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 5715 5716 /* Read second cluster 5717 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5718 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] 5719 * payload_read: FFFF 0000 | 0000 FF00 ... */ 5720 memset(payload_read, 0x00, sizeof(payload_read)); 5721 spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL); 5722 poll_threads(); 5723 CU_ASSERT(g_bserrno == 0); 5724 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0); 5725 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0); 5726 CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0); 5727 CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0); 5728 5729 /* Read whole two clusters 5730 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 5731 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */ 5732 memset(payload_read, 0x00, sizeof(payload_read)); 5733 spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL); 5734 poll_threads(); 5735 CU_ASSERT(g_bserrno == 0); 5736 5737 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5738 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 5739 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0); 5740 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0); 5741 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0); 5742 CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0); 5743 5744 CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0); 5745 CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0); 5746 CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0); 5747 CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0); 5748 } 5749 5750 5751 static void 5752 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5753 { 5754 uint8_t payload_ff[64 * 512]; 5755 uint8_t payload_aa[64 * 512]; 5756 uint8_t payload_00[64 * 512]; 5757 uint8_t *cluster0, *cluster1; 5758 5759 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5760 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5761 memset(payload_00, 0x00, sizeof(payload_00)); 5762 5763 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5764 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5765 5766 /* Unmap */ 5767 spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL); 5768 poll_threads(); 5769 5770 CU_ASSERT(g_bserrno == 0); 5771 5772 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0); 5773 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0); 5774 } 5775 5776 static void 5777 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5778 { 5779 uint8_t payload_ff[64 * 512]; 5780 uint8_t payload_aa[64 * 512]; 5781 uint8_t payload_00[64 * 512]; 5782 uint8_t *cluster0, *cluster1; 5783 5784 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5785 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5786 memset(payload_00, 0x00, sizeof(payload_00)); 5787 5788 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5789 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5790 5791 /* Write zeroes */ 5792 spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL); 5793 poll_threads(); 5794 5795 CU_ASSERT(g_bserrno == 0); 5796 5797 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0); 5798 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0); 5799 } 5800 5801 5802 static void 5803 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5804 { 5805 uint8_t payload_ff[64 * 512]; 5806 uint8_t payload_aa[64 * 512]; 5807 uint8_t payload_00[64 * 512]; 5808 uint8_t *cluster0, *cluster1; 5809 struct iovec iov[4]; 5810 5811 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5812 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5813 memset(payload_00, 0x00, sizeof(payload_00)); 5814 5815 /* Try to perform I/O with io unit = 512 */ 5816 iov[0].iov_base = payload_ff; 5817 iov[0].iov_len = 1 * 512; 5818 spdk_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL); 5819 poll_threads(); 5820 CU_ASSERT(g_bserrno == 0); 5821 5822 /* If thin provisioned is set cluster should be allocated now */ 5823 SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0); 5824 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5825 5826 /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character. 5827 * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */ 5828 /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5829 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5830 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0); 5831 5832 /* Verify write with offset on first page */ 5833 iov[0].iov_base = payload_ff; 5834 iov[0].iov_len = 1 * 512; 5835 spdk_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL); 5836 poll_threads(); 5837 CU_ASSERT(g_bserrno == 0); 5838 5839 /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5840 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5841 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5842 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5843 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5844 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0); 5845 5846 /* Verify write with offset on first page */ 5847 iov[0].iov_base = payload_ff; 5848 iov[0].iov_len = 4 * 512; 5849 spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL); 5850 poll_threads(); 5851 5852 /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5853 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5854 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5855 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5856 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5857 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0); 5858 CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0); 5859 5860 /* Verify write with offset on second page */ 5861 iov[0].iov_base = payload_ff; 5862 iov[0].iov_len = 4 * 512; 5863 spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL); 5864 poll_threads(); 5865 5866 /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */ 5867 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5868 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5869 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5870 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5871 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0); 5872 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5873 5874 /* Verify write across multiple pages */ 5875 iov[0].iov_base = payload_aa; 5876 iov[0].iov_len = 8 * 512; 5877 spdk_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL); 5878 poll_threads(); 5879 5880 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */ 5881 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5882 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5883 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5884 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5885 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5886 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5887 5888 /* Verify write across multiple clusters */ 5889 5890 iov[0].iov_base = payload_ff; 5891 iov[0].iov_len = 8 * 512; 5892 spdk_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL); 5893 poll_threads(); 5894 5895 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5896 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5897 5898 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5899 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5900 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5901 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5902 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5903 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5904 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5905 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0); 5906 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5907 5908 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5909 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0); 5910 5911 /* Verify write to second cluster */ 5912 5913 iov[0].iov_base = payload_ff; 5914 iov[0].iov_len = 2 * 512; 5915 spdk_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL); 5916 poll_threads(); 5917 5918 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5919 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5920 5921 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5922 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */ 5923 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5924 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5925 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5926 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5927 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5928 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5929 5930 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5931 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0); 5932 CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0); 5933 CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0); 5934 } 5935 5936 static void 5937 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5938 { 5939 uint8_t payload_read[64 * 512]; 5940 uint8_t payload_ff[64 * 512]; 5941 uint8_t payload_aa[64 * 512]; 5942 uint8_t payload_00[64 * 512]; 5943 struct iovec iov[4]; 5944 5945 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5946 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5947 memset(payload_00, 0x00, sizeof(payload_00)); 5948 5949 /* Read only first io unit */ 5950 /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5951 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5952 * payload_read: F000 0000 | 0000 0000 ... */ 5953 memset(payload_read, 0x00, sizeof(payload_read)); 5954 iov[0].iov_base = payload_read; 5955 iov[0].iov_len = 1 * 512; 5956 spdk_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL); 5957 poll_threads(); 5958 5959 CU_ASSERT(g_bserrno == 0); 5960 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5961 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0); 5962 5963 /* Read four io_units starting from offset = 2 5964 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5965 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5966 * payload_read: F0AA 0000 | 0000 0000 ... */ 5967 5968 memset(payload_read, 0x00, sizeof(payload_read)); 5969 iov[0].iov_base = payload_read; 5970 iov[0].iov_len = 4 * 512; 5971 spdk_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL); 5972 poll_threads(); 5973 CU_ASSERT(g_bserrno == 0); 5974 5975 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5976 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 5977 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0); 5978 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0); 5979 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 5980 5981 /* Read eight io_units across multiple pages 5982 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 5983 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5984 * payload_read: AAAA AAAA | 0000 0000 ... */ 5985 memset(payload_read, 0x00, sizeof(payload_read)); 5986 iov[0].iov_base = payload_read; 5987 iov[0].iov_len = 4 * 512; 5988 iov[1].iov_base = payload_read + 4 * 512; 5989 iov[1].iov_len = 4 * 512; 5990 spdk_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL); 5991 poll_threads(); 5992 CU_ASSERT(g_bserrno == 0); 5993 5994 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0); 5995 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 5996 5997 /* Read eight io_units across multiple clusters 5998 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ] 5999 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6000 * payload_read: FFFF FFFF | 0000 0000 ... */ 6001 memset(payload_read, 0x00, sizeof(payload_read)); 6002 iov[0].iov_base = payload_read; 6003 iov[0].iov_len = 2 * 512; 6004 iov[1].iov_base = payload_read + 2 * 512; 6005 iov[1].iov_len = 2 * 512; 6006 iov[2].iov_base = payload_read + 4 * 512; 6007 iov[2].iov_len = 2 * 512; 6008 iov[3].iov_base = payload_read + 6 * 512; 6009 iov[3].iov_len = 2 * 512; 6010 spdk_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL); 6011 poll_threads(); 6012 CU_ASSERT(g_bserrno == 0); 6013 6014 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0); 6015 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 6016 6017 /* Read four io_units from second cluster 6018 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6019 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ] 6020 * payload_read: 00FF 0000 | 0000 0000 ... */ 6021 memset(payload_read, 0x00, sizeof(payload_read)); 6022 iov[0].iov_base = payload_read; 6023 iov[0].iov_len = 1 * 512; 6024 iov[1].iov_base = payload_read + 1 * 512; 6025 iov[1].iov_len = 3 * 512; 6026 spdk_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL); 6027 poll_threads(); 6028 CU_ASSERT(g_bserrno == 0); 6029 6030 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0); 6031 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0); 6032 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 6033 6034 /* Read second cluster 6035 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6036 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] 6037 * payload_read: FFFF 0000 | 0000 FF00 ... */ 6038 memset(payload_read, 0x00, sizeof(payload_read)); 6039 iov[0].iov_base = payload_read; 6040 iov[0].iov_len = 1 * 512; 6041 iov[1].iov_base = payload_read + 1 * 512; 6042 iov[1].iov_len = 2 * 512; 6043 iov[2].iov_base = payload_read + 3 * 512; 6044 iov[2].iov_len = 4 * 512; 6045 iov[3].iov_base = payload_read + 7 * 512; 6046 iov[3].iov_len = 25 * 512; 6047 spdk_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL); 6048 poll_threads(); 6049 CU_ASSERT(g_bserrno == 0); 6050 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0); 6051 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0); 6052 CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0); 6053 CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0); 6054 6055 /* Read whole two clusters 6056 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 6057 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */ 6058 memset(payload_read, 0x00, sizeof(payload_read)); 6059 iov[0].iov_base = payload_read; 6060 iov[0].iov_len = 1 * 512; 6061 iov[1].iov_base = payload_read + 1 * 512; 6062 iov[1].iov_len = 8 * 512; 6063 iov[2].iov_base = payload_read + 9 * 512; 6064 iov[2].iov_len = 16 * 512; 6065 iov[3].iov_base = payload_read + 25 * 512; 6066 iov[3].iov_len = 39 * 512; 6067 spdk_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL); 6068 poll_threads(); 6069 CU_ASSERT(g_bserrno == 0); 6070 6071 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6072 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 6073 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0); 6074 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0); 6075 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0); 6076 CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0); 6077 6078 CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0); 6079 CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0); 6080 CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0); 6081 CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0); 6082 } 6083 6084 static void 6085 blob_io_unit(void) 6086 { 6087 struct spdk_bs_opts bsopts; 6088 struct spdk_blob_opts opts; 6089 struct spdk_blob_store *bs; 6090 struct spdk_bs_dev *dev; 6091 struct spdk_blob *blob, *snapshot, *clone; 6092 spdk_blob_id blobid; 6093 struct spdk_io_channel *channel; 6094 6095 /* Create dev with 512 bytes io unit size */ 6096 6097 spdk_bs_opts_init(&bsopts); 6098 bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */ 6099 snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE"); 6100 6101 /* Try to initialize a new blob store with unsupported io_unit */ 6102 dev = init_dev(); 6103 dev->blocklen = 512; 6104 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6105 6106 /* Initialize a new blob store */ 6107 spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL); 6108 poll_threads(); 6109 CU_ASSERT(g_bserrno == 0); 6110 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6111 bs = g_bs; 6112 6113 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512); 6114 channel = spdk_bs_alloc_io_channel(bs); 6115 6116 /* Create thick provisioned blob */ 6117 ut_spdk_blob_opts_init(&opts); 6118 opts.thin_provision = false; 6119 opts.num_clusters = 32; 6120 6121 blob = ut_blob_create_and_open(bs, &opts); 6122 blobid = spdk_blob_get_id(blob); 6123 6124 test_io_write(dev, blob, channel); 6125 test_io_read(dev, blob, channel); 6126 test_io_zeroes(dev, blob, channel); 6127 6128 test_iov_write(dev, blob, channel); 6129 test_iov_read(dev, blob, channel); 6130 6131 test_io_unmap(dev, blob, channel); 6132 6133 spdk_blob_close(blob, blob_op_complete, NULL); 6134 poll_threads(); 6135 CU_ASSERT(g_bserrno == 0); 6136 blob = NULL; 6137 g_blob = NULL; 6138 6139 /* Create thin provisioned blob */ 6140 6141 ut_spdk_blob_opts_init(&opts); 6142 opts.thin_provision = true; 6143 opts.num_clusters = 32; 6144 6145 blob = ut_blob_create_and_open(bs, &opts); 6146 blobid = spdk_blob_get_id(blob); 6147 6148 test_io_write(dev, blob, channel); 6149 test_io_read(dev, blob, channel); 6150 6151 test_io_zeroes(dev, blob, channel); 6152 6153 test_iov_write(dev, blob, channel); 6154 test_iov_read(dev, blob, channel); 6155 6156 /* Create snapshot */ 6157 6158 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6159 poll_threads(); 6160 CU_ASSERT(g_bserrno == 0); 6161 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6162 blobid = g_blobid; 6163 6164 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6165 poll_threads(); 6166 CU_ASSERT(g_bserrno == 0); 6167 CU_ASSERT(g_blob != NULL); 6168 snapshot = g_blob; 6169 6170 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6171 poll_threads(); 6172 CU_ASSERT(g_bserrno == 0); 6173 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6174 blobid = g_blobid; 6175 6176 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6177 poll_threads(); 6178 CU_ASSERT(g_bserrno == 0); 6179 CU_ASSERT(g_blob != NULL); 6180 clone = g_blob; 6181 6182 test_io_read(dev, blob, channel); 6183 test_io_read(dev, snapshot, channel); 6184 test_io_read(dev, clone, channel); 6185 6186 test_iov_read(dev, blob, channel); 6187 test_iov_read(dev, snapshot, channel); 6188 test_iov_read(dev, clone, channel); 6189 6190 /* Inflate clone */ 6191 6192 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 6193 poll_threads(); 6194 6195 CU_ASSERT(g_bserrno == 0); 6196 6197 test_io_read(dev, clone, channel); 6198 6199 test_io_unmap(dev, clone, channel); 6200 6201 test_iov_write(dev, clone, channel); 6202 test_iov_read(dev, clone, channel); 6203 6204 spdk_blob_close(blob, blob_op_complete, NULL); 6205 spdk_blob_close(snapshot, blob_op_complete, NULL); 6206 spdk_blob_close(clone, blob_op_complete, NULL); 6207 poll_threads(); 6208 CU_ASSERT(g_bserrno == 0); 6209 blob = NULL; 6210 g_blob = NULL; 6211 6212 spdk_bs_free_io_channel(channel); 6213 poll_threads(); 6214 6215 /* Unload the blob store */ 6216 spdk_bs_unload(bs, bs_op_complete, NULL); 6217 poll_threads(); 6218 CU_ASSERT(g_bserrno == 0); 6219 g_bs = NULL; 6220 g_blob = NULL; 6221 g_blobid = 0; 6222 } 6223 6224 static void 6225 blob_io_unit_compatiblity(void) 6226 { 6227 struct spdk_bs_opts bsopts; 6228 struct spdk_blob_store *bs; 6229 struct spdk_bs_dev *dev; 6230 struct spdk_bs_super_block *super; 6231 6232 /* Create dev with 512 bytes io unit size */ 6233 6234 spdk_bs_opts_init(&bsopts); 6235 bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */ 6236 snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE"); 6237 6238 /* Try to initialize a new blob store with unsupported io_unit */ 6239 dev = init_dev(); 6240 dev->blocklen = 512; 6241 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6242 6243 /* Initialize a new blob store */ 6244 spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL); 6245 poll_threads(); 6246 CU_ASSERT(g_bserrno == 0); 6247 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6248 bs = g_bs; 6249 6250 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512); 6251 6252 /* Unload the blob store */ 6253 spdk_bs_unload(bs, bs_op_complete, NULL); 6254 poll_threads(); 6255 CU_ASSERT(g_bserrno == 0); 6256 6257 /* Modify super block to behave like older version. 6258 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */ 6259 super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 6260 super->io_unit_size = 0; 6261 super->crc = blob_md_page_calc_crc(super); 6262 6263 dev = init_dev(); 6264 dev->blocklen = 512; 6265 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6266 6267 spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL); 6268 poll_threads(); 6269 CU_ASSERT(g_bserrno == 0); 6270 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6271 bs = g_bs; 6272 6273 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE); 6274 6275 /* Unload the blob store */ 6276 spdk_bs_unload(bs, bs_op_complete, NULL); 6277 poll_threads(); 6278 CU_ASSERT(g_bserrno == 0); 6279 6280 g_bs = NULL; 6281 g_blob = NULL; 6282 g_blobid = 0; 6283 } 6284 6285 static void 6286 blob_simultaneous_operations(void) 6287 { 6288 struct spdk_blob_store *bs = g_bs; 6289 struct spdk_blob_opts opts; 6290 struct spdk_blob *blob, *snapshot; 6291 spdk_blob_id blobid, snapshotid; 6292 struct spdk_io_channel *channel; 6293 6294 channel = spdk_bs_alloc_io_channel(bs); 6295 SPDK_CU_ASSERT_FATAL(channel != NULL); 6296 6297 ut_spdk_blob_opts_init(&opts); 6298 opts.num_clusters = 10; 6299 6300 blob = ut_blob_create_and_open(bs, &opts); 6301 blobid = spdk_blob_get_id(blob); 6302 6303 /* Create snapshot and try to remove blob in the same time: 6304 * - snapshot should be created successfully 6305 * - delete operation should fail w -EBUSY */ 6306 CU_ASSERT(blob->locked_operation_in_progress == false); 6307 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6308 CU_ASSERT(blob->locked_operation_in_progress == true); 6309 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6310 CU_ASSERT(blob->locked_operation_in_progress == true); 6311 /* Deletion failure */ 6312 CU_ASSERT(g_bserrno == -EBUSY); 6313 poll_threads(); 6314 CU_ASSERT(blob->locked_operation_in_progress == false); 6315 /* Snapshot creation success */ 6316 CU_ASSERT(g_bserrno == 0); 6317 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6318 6319 snapshotid = g_blobid; 6320 6321 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 6322 poll_threads(); 6323 CU_ASSERT(g_bserrno == 0); 6324 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 6325 snapshot = g_blob; 6326 6327 /* Inflate blob and try to remove blob in the same time: 6328 * - blob should be inflated successfully 6329 * - delete operation should fail w -EBUSY */ 6330 CU_ASSERT(blob->locked_operation_in_progress == false); 6331 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 6332 CU_ASSERT(blob->locked_operation_in_progress == true); 6333 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6334 CU_ASSERT(blob->locked_operation_in_progress == true); 6335 /* Deletion failure */ 6336 CU_ASSERT(g_bserrno == -EBUSY); 6337 poll_threads(); 6338 CU_ASSERT(blob->locked_operation_in_progress == false); 6339 /* Inflation success */ 6340 CU_ASSERT(g_bserrno == 0); 6341 6342 /* Clone snapshot and try to remove snapshot in the same time: 6343 * - snapshot should be cloned successfully 6344 * - delete operation should fail w -EBUSY */ 6345 CU_ASSERT(blob->locked_operation_in_progress == false); 6346 spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 6347 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 6348 /* Deletion failure */ 6349 CU_ASSERT(g_bserrno == -EBUSY); 6350 poll_threads(); 6351 CU_ASSERT(blob->locked_operation_in_progress == false); 6352 /* Clone created */ 6353 CU_ASSERT(g_bserrno == 0); 6354 6355 /* Resize blob and try to remove blob in the same time: 6356 * - blob should be resized successfully 6357 * - delete operation should fail w -EBUSY */ 6358 CU_ASSERT(blob->locked_operation_in_progress == false); 6359 spdk_blob_resize(blob, 50, blob_op_complete, NULL); 6360 CU_ASSERT(blob->locked_operation_in_progress == true); 6361 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6362 CU_ASSERT(blob->locked_operation_in_progress == true); 6363 /* Deletion failure */ 6364 CU_ASSERT(g_bserrno == -EBUSY); 6365 poll_threads(); 6366 CU_ASSERT(blob->locked_operation_in_progress == false); 6367 /* Blob resized successfully */ 6368 CU_ASSERT(g_bserrno == 0); 6369 6370 /* Issue two consecutive blob syncs, neither should fail. 6371 * Force sync to actually occur by marking blob dirty each time. 6372 * Execution of sync should not be enough to complete the operation, 6373 * since disk I/O is required to complete it. */ 6374 g_bserrno = -1; 6375 6376 blob->state = SPDK_BLOB_STATE_DIRTY; 6377 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6378 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6379 6380 blob->state = SPDK_BLOB_STATE_DIRTY; 6381 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6382 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6383 6384 uint32_t completions = 0; 6385 while (completions < 2) { 6386 SPDK_CU_ASSERT_FATAL(poll_thread_times(0, 1)); 6387 if (g_bserrno == 0) { 6388 g_bserrno = -1; 6389 completions++; 6390 } 6391 /* Never should the g_bserrno be other than -1. 6392 * It would mean that either of syncs failed. */ 6393 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6394 } 6395 6396 spdk_bs_free_io_channel(channel); 6397 poll_threads(); 6398 6399 ut_blob_close_and_delete(bs, snapshot); 6400 ut_blob_close_and_delete(bs, blob); 6401 } 6402 6403 static void 6404 blob_persist_test(void) 6405 { 6406 struct spdk_blob_store *bs = g_bs; 6407 struct spdk_blob_opts opts; 6408 struct spdk_blob *blob; 6409 spdk_blob_id blobid; 6410 struct spdk_io_channel *channel; 6411 char *xattr; 6412 size_t xattr_length; 6413 int rc; 6414 uint32_t page_count_clear, page_count_xattr; 6415 uint64_t poller_iterations; 6416 bool run_poller; 6417 6418 channel = spdk_bs_alloc_io_channel(bs); 6419 SPDK_CU_ASSERT_FATAL(channel != NULL); 6420 6421 ut_spdk_blob_opts_init(&opts); 6422 opts.num_clusters = 10; 6423 6424 blob = ut_blob_create_and_open(bs, &opts); 6425 blobid = spdk_blob_get_id(blob); 6426 6427 /* Save the amount of md pages used after creation of a blob. 6428 * This should be consistent after removing xattr. */ 6429 page_count_clear = spdk_bit_array_count_set(bs->used_md_pages); 6430 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear); 6431 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear); 6432 6433 /* Add xattr with maximum length of descriptor to exceed single metadata page. */ 6434 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 6435 strlen("large_xattr"); 6436 xattr = calloc(xattr_length, sizeof(char)); 6437 SPDK_CU_ASSERT_FATAL(xattr != NULL); 6438 6439 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 6440 SPDK_CU_ASSERT_FATAL(rc == 0); 6441 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6442 poll_threads(); 6443 SPDK_CU_ASSERT_FATAL(g_bserrno == 0); 6444 6445 /* Save the amount of md pages used after adding the large xattr */ 6446 page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages); 6447 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr); 6448 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr); 6449 6450 /* Add xattr to a blob and sync it. While sync is occuring, remove the xattr and sync again. 6451 * Interrupt the first sync after increasing number of poller iterations, until it succeeds. 6452 * Expectation is that after second sync completes no xattr is saved in metadata. */ 6453 poller_iterations = 1; 6454 run_poller = true; 6455 while (run_poller) { 6456 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 6457 SPDK_CU_ASSERT_FATAL(rc == 0); 6458 g_bserrno = -1; 6459 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6460 poll_thread_times(0, poller_iterations); 6461 if (g_bserrno == 0) { 6462 /* Poller iteration count was high enough for first sync to complete. 6463 * Verify that blob takes up enough of md_pages to store the xattr. */ 6464 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr); 6465 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr); 6466 SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr); 6467 run_poller = false; 6468 } 6469 rc = spdk_blob_remove_xattr(blob, "large_xattr"); 6470 SPDK_CU_ASSERT_FATAL(rc == 0); 6471 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6472 poll_threads(); 6473 SPDK_CU_ASSERT_FATAL(g_bserrno == 0); 6474 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear); 6475 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear); 6476 SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear); 6477 6478 /* Reload bs and re-open blob to verify that xattr was not persisted. */ 6479 spdk_blob_close(blob, blob_op_complete, NULL); 6480 poll_threads(); 6481 CU_ASSERT(g_bserrno == 0); 6482 6483 ut_bs_reload(&bs, NULL); 6484 6485 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6486 poll_threads(); 6487 CU_ASSERT(g_bserrno == 0); 6488 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 6489 blob = g_blob; 6490 6491 rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length); 6492 SPDK_CU_ASSERT_FATAL(rc == -ENOENT); 6493 6494 poller_iterations++; 6495 /* Stop at high iteration count to prevent infinite loop. 6496 * This value should be enough for first md sync to complete in any case. */ 6497 SPDK_CU_ASSERT_FATAL(poller_iterations < 50); 6498 } 6499 6500 free(xattr); 6501 6502 ut_blob_close_and_delete(bs, blob); 6503 6504 spdk_bs_free_io_channel(channel); 6505 poll_threads(); 6506 } 6507 6508 static void 6509 suite_bs_setup(void) 6510 { 6511 struct spdk_bs_dev *dev; 6512 6513 dev = init_dev(); 6514 memset(g_dev_buffer, 0, DEV_BUFFER_SIZE); 6515 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 6516 poll_threads(); 6517 CU_ASSERT(g_bserrno == 0); 6518 CU_ASSERT(g_bs != NULL); 6519 } 6520 6521 static void 6522 suite_bs_cleanup(void) 6523 { 6524 spdk_bs_unload(g_bs, bs_op_complete, NULL); 6525 poll_threads(); 6526 CU_ASSERT(g_bserrno == 0); 6527 g_bs = NULL; 6528 memset(g_dev_buffer, 0, DEV_BUFFER_SIZE); 6529 } 6530 6531 static struct spdk_blob * 6532 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts) 6533 { 6534 struct spdk_blob *blob; 6535 struct spdk_blob_opts create_blob_opts; 6536 spdk_blob_id blobid; 6537 6538 if (blob_opts == NULL) { 6539 ut_spdk_blob_opts_init(&create_blob_opts); 6540 blob_opts = &create_blob_opts; 6541 } 6542 6543 spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL); 6544 poll_threads(); 6545 CU_ASSERT(g_bserrno == 0); 6546 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6547 blobid = g_blobid; 6548 g_blobid = -1; 6549 6550 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6551 poll_threads(); 6552 CU_ASSERT(g_bserrno == 0); 6553 CU_ASSERT(g_blob != NULL); 6554 blob = g_blob; 6555 6556 g_blob = NULL; 6557 g_bserrno = -1; 6558 6559 return blob; 6560 } 6561 6562 static void 6563 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob) 6564 { 6565 spdk_blob_id blobid = spdk_blob_get_id(blob); 6566 6567 spdk_blob_close(blob, blob_op_complete, NULL); 6568 poll_threads(); 6569 CU_ASSERT(g_bserrno == 0); 6570 g_blob = NULL; 6571 6572 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6573 poll_threads(); 6574 CU_ASSERT(g_bserrno == 0); 6575 g_bserrno = -1; 6576 } 6577 6578 static void 6579 suite_blob_setup(void) 6580 { 6581 suite_bs_setup(); 6582 CU_ASSERT(g_bs != NULL); 6583 6584 g_blob = ut_blob_create_and_open(g_bs, NULL); 6585 CU_ASSERT(g_blob != NULL); 6586 } 6587 6588 static void 6589 suite_blob_cleanup(void) 6590 { 6591 ut_blob_close_and_delete(g_bs, g_blob); 6592 CU_ASSERT(g_blob == NULL); 6593 6594 suite_bs_cleanup(); 6595 CU_ASSERT(g_bs == NULL); 6596 } 6597 6598 int main(int argc, char **argv) 6599 { 6600 CU_pSuite suite, suite_bs, suite_blob; 6601 unsigned int num_failures; 6602 6603 CU_set_error_action(CUEA_ABORT); 6604 CU_initialize_registry(); 6605 6606 suite = CU_add_suite("blob", NULL, NULL); 6607 suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL, 6608 suite_bs_setup, suite_bs_cleanup); 6609 suite_blob = CU_add_suite_with_setup_and_teardown("blob_blob", NULL, NULL, 6610 suite_blob_setup, suite_blob_cleanup); 6611 6612 CU_ADD_TEST(suite, blob_init); 6613 CU_ADD_TEST(suite_bs, blob_open); 6614 CU_ADD_TEST(suite_bs, blob_create); 6615 CU_ADD_TEST(suite_bs, blob_create_fail); 6616 CU_ADD_TEST(suite_bs, blob_create_internal); 6617 CU_ADD_TEST(suite, blob_thin_provision); 6618 CU_ADD_TEST(suite_bs, blob_snapshot); 6619 CU_ADD_TEST(suite_bs, blob_clone); 6620 CU_ADD_TEST(suite_bs, blob_inflate); 6621 CU_ADD_TEST(suite_bs, blob_delete); 6622 CU_ADD_TEST(suite_bs, blob_resize_test); 6623 CU_ADD_TEST(suite, blob_read_only); 6624 CU_ADD_TEST(suite_bs, channel_ops); 6625 CU_ADD_TEST(suite_bs, blob_super); 6626 CU_ADD_TEST(suite_blob, blob_write); 6627 CU_ADD_TEST(suite_blob, blob_read); 6628 CU_ADD_TEST(suite_blob, blob_rw_verify); 6629 CU_ADD_TEST(suite_bs, blob_rw_verify_iov); 6630 CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem); 6631 CU_ADD_TEST(suite_blob, blob_rw_iov_read_only); 6632 CU_ADD_TEST(suite_bs, blob_unmap); 6633 CU_ADD_TEST(suite_bs, blob_iter); 6634 CU_ADD_TEST(suite_blob, blob_xattr); 6635 CU_ADD_TEST(suite, bs_load); 6636 CU_ADD_TEST(suite_bs, bs_load_pending_removal); 6637 CU_ADD_TEST(suite, bs_load_custom_cluster_size); 6638 CU_ADD_TEST(suite_bs, bs_unload); 6639 CU_ADD_TEST(suite, bs_cluster_sz); 6640 CU_ADD_TEST(suite_bs, bs_usable_clusters); 6641 CU_ADD_TEST(suite, bs_resize_md); 6642 CU_ADD_TEST(suite, bs_destroy); 6643 CU_ADD_TEST(suite, bs_type); 6644 CU_ADD_TEST(suite, bs_super_block); 6645 CU_ADD_TEST(suite, blob_serialize_test); 6646 CU_ADD_TEST(suite_bs, blob_crc); 6647 CU_ADD_TEST(suite, super_block_crc); 6648 CU_ADD_TEST(suite_blob, blob_dirty_shutdown); 6649 CU_ADD_TEST(suite_bs, blob_flags); 6650 CU_ADD_TEST(suite_bs, bs_version); 6651 CU_ADD_TEST(suite_bs, blob_set_xattrs_test); 6652 CU_ADD_TEST(suite_bs, blob_thin_prov_alloc); 6653 CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test); 6654 CU_ADD_TEST(suite_bs, blob_thin_prov_rw); 6655 CU_ADD_TEST(suite_bs, blob_thin_prov_rle); 6656 CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov); 6657 CU_ADD_TEST(suite, bs_load_iter_test); 6658 CU_ADD_TEST(suite_bs, blob_snapshot_rw); 6659 CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov); 6660 CU_ADD_TEST(suite, blob_relations); 6661 CU_ADD_TEST(suite, blob_relations2); 6662 CU_ADD_TEST(suite, blobstore_clean_power_failure); 6663 CU_ADD_TEST(suite, blob_delete_snapshot_power_failure); 6664 CU_ADD_TEST(suite, blob_create_snapshot_power_failure); 6665 CU_ADD_TEST(suite_bs, blob_inflate_rw); 6666 CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io); 6667 CU_ADD_TEST(suite_bs, blob_operation_split_rw); 6668 CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov); 6669 CU_ADD_TEST(suite, blob_io_unit); 6670 CU_ADD_TEST(suite, blob_io_unit_compatiblity); 6671 CU_ADD_TEST(suite_bs, blob_simultaneous_operations); 6672 CU_ADD_TEST(suite_bs, blob_persist_test); 6673 6674 allocate_threads(2); 6675 set_thread(0); 6676 6677 g_dev_buffer = calloc(1, DEV_BUFFER_SIZE); 6678 6679 CU_basic_set_mode(CU_BRM_VERBOSE); 6680 g_use_extent_table = false; 6681 CU_basic_run_tests(); 6682 num_failures = CU_get_number_of_failures(); 6683 g_use_extent_table = true; 6684 CU_basic_run_tests(); 6685 num_failures += CU_get_number_of_failures(); 6686 CU_cleanup_registry(); 6687 6688 free(g_dev_buffer); 6689 6690 free_threads(); 6691 6692 return num_failures; 6693 } 6694