1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 #include "spdk/blob.h" 38 #include "spdk/string.h" 39 #include "spdk_internal/thread.h" 40 41 #include "common/lib/ut_multithread.c" 42 #include "../bs_dev_common.c" 43 #include "blob/blobstore.c" 44 #include "blob/request.c" 45 #include "blob/zeroes.c" 46 #include "blob/blob_bs_dev.c" 47 48 struct spdk_blob_store *g_bs; 49 spdk_blob_id g_blobid; 50 struct spdk_blob *g_blob; 51 int g_bserrno; 52 struct spdk_xattr_names *g_names; 53 int g_done; 54 char *g_xattr_names[] = {"first", "second", "third"}; 55 char *g_xattr_values[] = {"one", "two", "three"}; 56 uint64_t g_ctx = 1729; 57 bool g_use_extent_table = false; 58 59 struct spdk_bs_super_block_ver1 { 60 uint8_t signature[8]; 61 uint32_t version; 62 uint32_t length; 63 uint32_t clean; /* If there was a clean shutdown, this is 1. */ 64 spdk_blob_id super_blob; 65 66 uint32_t cluster_size; /* In bytes */ 67 68 uint32_t used_page_mask_start; /* Offset from beginning of disk, in pages */ 69 uint32_t used_page_mask_len; /* Count, in pages */ 70 71 uint32_t used_cluster_mask_start; /* Offset from beginning of disk, in pages */ 72 uint32_t used_cluster_mask_len; /* Count, in pages */ 73 74 uint32_t md_start; /* Offset from beginning of disk, in pages */ 75 uint32_t md_len; /* Count, in pages */ 76 77 uint8_t reserved[4036]; 78 uint32_t crc; 79 } __attribute__((packed)); 80 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size"); 81 82 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs, 83 struct spdk_blob_opts *blob_opts); 84 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob); 85 static void suite_blob_setup(void); 86 static void suite_blob_cleanup(void); 87 88 static void 89 _get_xattr_value(void *arg, const char *name, 90 const void **value, size_t *value_len) 91 { 92 uint64_t i; 93 94 SPDK_CU_ASSERT_FATAL(value_len != NULL); 95 SPDK_CU_ASSERT_FATAL(value != NULL); 96 CU_ASSERT(arg == &g_ctx); 97 98 for (i = 0; i < sizeof(g_xattr_names); i++) { 99 if (!strcmp(name, g_xattr_names[i])) { 100 *value_len = strlen(g_xattr_values[i]); 101 *value = g_xattr_values[i]; 102 break; 103 } 104 } 105 } 106 107 static void 108 _get_xattr_value_null(void *arg, const char *name, 109 const void **value, size_t *value_len) 110 { 111 SPDK_CU_ASSERT_FATAL(value_len != NULL); 112 SPDK_CU_ASSERT_FATAL(value != NULL); 113 CU_ASSERT(arg == NULL); 114 115 *value_len = 0; 116 *value = NULL; 117 } 118 119 static int 120 _get_snapshots_count(struct spdk_blob_store *bs) 121 { 122 struct spdk_blob_list *snapshot = NULL; 123 int count = 0; 124 125 TAILQ_FOREACH(snapshot, &bs->snapshots, link) { 126 count += 1; 127 } 128 129 return count; 130 } 131 132 static void 133 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts) 134 { 135 spdk_blob_opts_init(opts); 136 opts->use_extent_table = g_use_extent_table; 137 } 138 139 static void 140 bs_op_complete(void *cb_arg, int bserrno) 141 { 142 g_bserrno = bserrno; 143 } 144 145 static void 146 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs, 147 int bserrno) 148 { 149 g_bs = bs; 150 g_bserrno = bserrno; 151 } 152 153 static void 154 blob_op_complete(void *cb_arg, int bserrno) 155 { 156 g_bserrno = bserrno; 157 } 158 159 static void 160 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno) 161 { 162 g_blobid = blobid; 163 g_bserrno = bserrno; 164 } 165 166 static void 167 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno) 168 { 169 g_blob = blb; 170 g_bserrno = bserrno; 171 } 172 173 static void 174 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts) 175 { 176 struct spdk_bs_dev *dev; 177 178 /* Unload the blob store */ 179 spdk_bs_unload(*bs, bs_op_complete, NULL); 180 poll_threads(); 181 CU_ASSERT(g_bserrno == 0); 182 183 dev = init_dev(); 184 /* Load an existing blob store */ 185 spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL); 186 poll_threads(); 187 CU_ASSERT(g_bserrno == 0); 188 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 189 *bs = g_bs; 190 191 g_bserrno = -1; 192 } 193 194 static void 195 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts) 196 { 197 struct spdk_bs_dev *dev; 198 199 /* Dirty shutdown */ 200 _spdk_bs_free(*bs); 201 202 dev = init_dev(); 203 /* Load an existing blob store */ 204 spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL); 205 poll_threads(); 206 CU_ASSERT(g_bserrno == 0); 207 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 208 *bs = g_bs; 209 210 g_bserrno = -1; 211 } 212 213 static void 214 blob_init(void) 215 { 216 struct spdk_blob_store *bs; 217 struct spdk_bs_dev *dev; 218 219 dev = init_dev(); 220 221 /* should fail for an unsupported blocklen */ 222 dev->blocklen = 500; 223 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 224 poll_threads(); 225 CU_ASSERT(g_bserrno == -EINVAL); 226 227 dev = init_dev(); 228 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 229 poll_threads(); 230 CU_ASSERT(g_bserrno == 0); 231 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 232 bs = g_bs; 233 234 spdk_bs_unload(bs, bs_op_complete, NULL); 235 poll_threads(); 236 CU_ASSERT(g_bserrno == 0); 237 g_bs = NULL; 238 } 239 240 static void 241 blob_super(void) 242 { 243 struct spdk_blob_store *bs = g_bs; 244 spdk_blob_id blobid; 245 struct spdk_blob_opts blob_opts; 246 247 /* Get the super blob without having set one */ 248 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 249 poll_threads(); 250 CU_ASSERT(g_bserrno == -ENOENT); 251 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 252 253 /* Create a blob */ 254 ut_spdk_blob_opts_init(&blob_opts); 255 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 256 poll_threads(); 257 CU_ASSERT(g_bserrno == 0); 258 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 259 blobid = g_blobid; 260 261 /* Set the blob as the super blob */ 262 spdk_bs_set_super(bs, blobid, blob_op_complete, NULL); 263 poll_threads(); 264 CU_ASSERT(g_bserrno == 0); 265 266 /* Get the super blob */ 267 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 268 poll_threads(); 269 CU_ASSERT(g_bserrno == 0); 270 CU_ASSERT(blobid == g_blobid); 271 } 272 273 static void 274 blob_open(void) 275 { 276 struct spdk_blob_store *bs = g_bs; 277 struct spdk_blob *blob; 278 struct spdk_blob_opts blob_opts; 279 spdk_blob_id blobid, blobid2; 280 281 ut_spdk_blob_opts_init(&blob_opts); 282 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 283 poll_threads(); 284 CU_ASSERT(g_bserrno == 0); 285 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 286 blobid = g_blobid; 287 288 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 289 poll_threads(); 290 CU_ASSERT(g_bserrno == 0); 291 CU_ASSERT(g_blob != NULL); 292 blob = g_blob; 293 294 blobid2 = spdk_blob_get_id(blob); 295 CU_ASSERT(blobid == blobid2); 296 297 /* Try to open file again. It should return success. */ 298 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 299 poll_threads(); 300 CU_ASSERT(g_bserrno == 0); 301 CU_ASSERT(blob == g_blob); 302 303 spdk_blob_close(blob, blob_op_complete, NULL); 304 poll_threads(); 305 CU_ASSERT(g_bserrno == 0); 306 307 /* 308 * Close the file a second time, releasing the second reference. This 309 * should succeed. 310 */ 311 blob = g_blob; 312 spdk_blob_close(blob, blob_op_complete, NULL); 313 poll_threads(); 314 CU_ASSERT(g_bserrno == 0); 315 316 /* 317 * Try to open file again. It should succeed. This tests the case 318 * where the file is opened, closed, then re-opened again. 319 */ 320 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 321 poll_threads(); 322 CU_ASSERT(g_bserrno == 0); 323 CU_ASSERT(g_blob != NULL); 324 blob = g_blob; 325 326 ut_blob_close_and_delete(bs, blob); 327 } 328 329 static void 330 blob_create(void) 331 { 332 struct spdk_blob_store *bs = g_bs; 333 struct spdk_blob *blob; 334 struct spdk_blob_opts opts; 335 spdk_blob_id blobid; 336 337 /* Create blob with 10 clusters */ 338 339 ut_spdk_blob_opts_init(&opts); 340 opts.num_clusters = 10; 341 342 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 343 poll_threads(); 344 CU_ASSERT(g_bserrno == 0); 345 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 346 blobid = g_blobid; 347 348 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 349 poll_threads(); 350 CU_ASSERT(g_bserrno == 0); 351 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 352 blob = g_blob; 353 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 354 355 spdk_blob_close(blob, blob_op_complete, NULL); 356 poll_threads(); 357 CU_ASSERT(g_bserrno == 0); 358 359 /* Create blob with 0 clusters */ 360 361 ut_spdk_blob_opts_init(&opts); 362 opts.num_clusters = 0; 363 364 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 365 poll_threads(); 366 CU_ASSERT(g_bserrno == 0); 367 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 368 blobid = g_blobid; 369 370 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 371 poll_threads(); 372 CU_ASSERT(g_bserrno == 0); 373 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 374 blob = g_blob; 375 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 376 377 spdk_blob_close(blob, blob_op_complete, NULL); 378 poll_threads(); 379 CU_ASSERT(g_bserrno == 0); 380 381 /* Create blob with default options (opts == NULL) */ 382 383 spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL); 384 poll_threads(); 385 CU_ASSERT(g_bserrno == 0); 386 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 387 blobid = g_blobid; 388 389 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 390 poll_threads(); 391 CU_ASSERT(g_bserrno == 0); 392 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 393 blob = g_blob; 394 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 395 396 spdk_blob_close(blob, blob_op_complete, NULL); 397 poll_threads(); 398 CU_ASSERT(g_bserrno == 0); 399 400 /* Try to create blob with size larger than blobstore */ 401 402 ut_spdk_blob_opts_init(&opts); 403 opts.num_clusters = bs->total_clusters + 1; 404 405 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 406 poll_threads(); 407 CU_ASSERT(g_bserrno == -ENOSPC); 408 } 409 410 static void 411 blob_create_internal(void) 412 { 413 struct spdk_blob_store *bs = g_bs; 414 struct spdk_blob *blob; 415 struct spdk_blob_opts opts; 416 struct spdk_blob_xattr_opts internal_xattrs; 417 const void *value; 418 size_t value_len; 419 spdk_blob_id blobid; 420 int rc; 421 422 /* Create blob with custom xattrs */ 423 424 ut_spdk_blob_opts_init(&opts); 425 _spdk_blob_xattrs_init(&internal_xattrs); 426 internal_xattrs.count = 3; 427 internal_xattrs.names = g_xattr_names; 428 internal_xattrs.get_value = _get_xattr_value; 429 internal_xattrs.ctx = &g_ctx; 430 431 _spdk_bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL); 432 poll_threads(); 433 CU_ASSERT(g_bserrno == 0); 434 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 435 blobid = g_blobid; 436 437 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 438 poll_threads(); 439 CU_ASSERT(g_bserrno == 0); 440 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 441 blob = g_blob; 442 443 rc = _spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true); 444 CU_ASSERT(rc == 0); 445 SPDK_CU_ASSERT_FATAL(value != NULL); 446 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 447 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 448 449 rc = _spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true); 450 CU_ASSERT(rc == 0); 451 SPDK_CU_ASSERT_FATAL(value != NULL); 452 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 453 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 454 455 rc = _spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true); 456 CU_ASSERT(rc == 0); 457 SPDK_CU_ASSERT_FATAL(value != NULL); 458 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 459 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 460 461 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len); 462 CU_ASSERT(rc != 0); 463 464 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len); 465 CU_ASSERT(rc != 0); 466 467 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len); 468 CU_ASSERT(rc != 0); 469 470 spdk_blob_close(blob, blob_op_complete, NULL); 471 poll_threads(); 472 CU_ASSERT(g_bserrno == 0); 473 474 /* Create blob with NULL internal options */ 475 476 _spdk_bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL); 477 poll_threads(); 478 CU_ASSERT(g_bserrno == 0); 479 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 480 blobid = g_blobid; 481 482 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 483 poll_threads(); 484 CU_ASSERT(g_bserrno == 0); 485 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 486 CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL); 487 488 blob = g_blob; 489 490 spdk_blob_close(blob, blob_op_complete, NULL); 491 poll_threads(); 492 CU_ASSERT(g_bserrno == 0); 493 } 494 495 static void 496 blob_thin_provision(void) 497 { 498 struct spdk_blob_store *bs; 499 struct spdk_bs_dev *dev; 500 struct spdk_blob *blob; 501 struct spdk_blob_opts opts; 502 struct spdk_bs_opts bs_opts; 503 spdk_blob_id blobid; 504 505 dev = init_dev(); 506 spdk_bs_opts_init(&bs_opts); 507 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 508 509 /* Initialize a new blob store */ 510 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 511 poll_threads(); 512 CU_ASSERT(g_bserrno == 0); 513 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 514 515 bs = g_bs; 516 517 /* Create blob with thin provisioning enabled */ 518 519 ut_spdk_blob_opts_init(&opts); 520 opts.thin_provision = true; 521 opts.num_clusters = 10; 522 523 blob = ut_blob_create_and_open(bs, &opts); 524 blobid = spdk_blob_get_id(blob); 525 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 526 527 spdk_blob_close(blob, blob_op_complete, NULL); 528 CU_ASSERT(g_bserrno == 0); 529 530 /* Do not shut down cleanly. This makes sure that when we load again 531 * and try to recover a valid used_cluster map, that blobstore will 532 * ignore clusters with index 0 since these are unallocated clusters. 533 */ 534 ut_bs_dirty_load(&bs, &bs_opts); 535 536 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 537 poll_threads(); 538 CU_ASSERT(g_bserrno == 0); 539 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 540 blob = g_blob; 541 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 542 543 ut_blob_close_and_delete(bs, blob); 544 545 spdk_bs_unload(bs, bs_op_complete, NULL); 546 poll_threads(); 547 CU_ASSERT(g_bserrno == 0); 548 g_bs = NULL; 549 } 550 551 static void 552 blob_snapshot(void) 553 { 554 struct spdk_blob_store *bs = g_bs; 555 struct spdk_blob *blob; 556 struct spdk_blob *snapshot, *snapshot2; 557 struct spdk_blob_bs_dev *blob_bs_dev; 558 struct spdk_blob_opts opts; 559 struct spdk_blob_xattr_opts xattrs; 560 spdk_blob_id blobid; 561 spdk_blob_id snapshotid; 562 spdk_blob_id snapshotid2; 563 const void *value; 564 size_t value_len; 565 int rc; 566 spdk_blob_id ids[2]; 567 size_t count; 568 569 /* Create blob with 10 clusters */ 570 ut_spdk_blob_opts_init(&opts); 571 opts.num_clusters = 10; 572 573 blob = ut_blob_create_and_open(bs, &opts); 574 blobid = spdk_blob_get_id(blob); 575 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 576 577 /* Create snapshot from blob */ 578 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); 579 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 580 poll_threads(); 581 CU_ASSERT(g_bserrno == 0); 582 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 583 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 584 snapshotid = g_blobid; 585 586 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 587 poll_threads(); 588 CU_ASSERT(g_bserrno == 0); 589 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 590 snapshot = g_blob; 591 CU_ASSERT(snapshot->data_ro == true); 592 CU_ASSERT(snapshot->md_ro == true); 593 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 594 595 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 596 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 597 CU_ASSERT(spdk_mem_all_zero(blob->active.clusters, 598 blob->active.num_clusters * sizeof(blob->active.clusters[0]))); 599 600 /* Try to create snapshot from clone with xattrs */ 601 xattrs.names = g_xattr_names; 602 xattrs.get_value = _get_xattr_value; 603 xattrs.count = 3; 604 xattrs.ctx = &g_ctx; 605 spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL); 606 poll_threads(); 607 CU_ASSERT(g_bserrno == 0); 608 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 609 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); 610 snapshotid2 = g_blobid; 611 612 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 613 CU_ASSERT(g_bserrno == 0); 614 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 615 snapshot2 = g_blob; 616 CU_ASSERT(snapshot2->data_ro == true); 617 CU_ASSERT(snapshot2->md_ro == true); 618 CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10); 619 620 /* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */ 621 CU_ASSERT(snapshot->back_bs_dev == NULL); 622 SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL); 623 SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL); 624 625 blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 626 CU_ASSERT(blob_bs_dev->blob == snapshot2); 627 628 blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev; 629 CU_ASSERT(blob_bs_dev->blob == snapshot); 630 631 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len); 632 CU_ASSERT(rc == 0); 633 SPDK_CU_ASSERT_FATAL(value != NULL); 634 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 635 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 636 637 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len); 638 CU_ASSERT(rc == 0); 639 SPDK_CU_ASSERT_FATAL(value != NULL); 640 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 641 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 642 643 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len); 644 CU_ASSERT(rc == 0); 645 SPDK_CU_ASSERT_FATAL(value != NULL); 646 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 647 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 648 649 /* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */ 650 count = 2; 651 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 652 CU_ASSERT(count == 1); 653 CU_ASSERT(ids[0] == blobid); 654 655 count = 2; 656 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 657 CU_ASSERT(count == 1); 658 CU_ASSERT(ids[0] == snapshotid2); 659 660 /* Try to create snapshot from snapshot */ 661 spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 662 poll_threads(); 663 CU_ASSERT(g_bserrno == -EINVAL); 664 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 665 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); 666 667 /* Delete blob and confirm that it is no longer on snapshot2 clone list */ 668 ut_blob_close_and_delete(bs, blob); 669 count = 2; 670 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 671 CU_ASSERT(count == 0); 672 673 /* Delete snapshot2 and confirm that it is no longer on snapshot clone list */ 674 ut_blob_close_and_delete(bs, snapshot2); 675 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 676 count = 2; 677 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 678 CU_ASSERT(count == 0); 679 680 ut_blob_close_and_delete(bs, snapshot); 681 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); 682 } 683 684 static void 685 blob_snapshot_freeze_io(void) 686 { 687 struct spdk_io_channel *channel; 688 struct spdk_bs_channel *bs_channel; 689 struct spdk_blob_store *bs = g_bs; 690 struct spdk_blob *blob; 691 struct spdk_blob_opts opts; 692 spdk_blob_id blobid; 693 uint32_t num_of_pages = 10; 694 uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE]; 695 uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE]; 696 uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE]; 697 698 memset(payload_write, 0xE5, sizeof(payload_write)); 699 memset(payload_read, 0x00, sizeof(payload_read)); 700 memset(payload_zero, 0x00, sizeof(payload_zero)); 701 702 /* Test freeze I/O during snapshot */ 703 channel = spdk_bs_alloc_io_channel(bs); 704 bs_channel = spdk_io_channel_get_ctx(channel); 705 706 /* Create blob with 10 clusters */ 707 ut_spdk_blob_opts_init(&opts); 708 opts.num_clusters = 10; 709 opts.thin_provision = false; 710 711 blob = ut_blob_create_and_open(bs, &opts); 712 blobid = spdk_blob_get_id(blob); 713 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 714 715 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 716 717 /* This is implementation specific. 718 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback. 719 * Four async I/O operations happen before that. */ 720 poll_thread_times(0, 3); 721 722 CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io)); 723 724 /* Blob I/O should be frozen here */ 725 CU_ASSERT(blob->frozen_refcnt == 1); 726 727 /* Write to the blob */ 728 spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL); 729 730 /* Verify that I/O is queued */ 731 CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io)); 732 /* Verify that payload is not written to disk */ 733 CU_ASSERT(memcmp(payload_zero, &g_dev_buffer[blob->active.clusters[0]*SPDK_BS_PAGE_SIZE], 734 SPDK_BS_PAGE_SIZE) == 0); 735 736 /* Finish all operations including spdk_bs_create_snapshot */ 737 poll_threads(); 738 739 /* Verify snapshot */ 740 CU_ASSERT(g_bserrno == 0); 741 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 742 743 /* Verify that blob has unset frozen_io */ 744 CU_ASSERT(blob->frozen_refcnt == 0); 745 746 /* Verify that postponed I/O completed successfully by comparing payload */ 747 spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL); 748 poll_threads(); 749 CU_ASSERT(g_bserrno == 0); 750 CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0); 751 752 spdk_bs_free_io_channel(channel); 753 poll_threads(); 754 755 ut_blob_close_and_delete(bs, blob); 756 } 757 758 static void 759 blob_clone(void) 760 { 761 struct spdk_blob_store *bs = g_bs; 762 struct spdk_blob_opts opts; 763 struct spdk_blob *blob, *snapshot, *clone; 764 spdk_blob_id blobid, cloneid, snapshotid; 765 struct spdk_blob_xattr_opts xattrs; 766 const void *value; 767 size_t value_len; 768 int rc; 769 770 /* Create blob with 10 clusters */ 771 772 ut_spdk_blob_opts_init(&opts); 773 opts.num_clusters = 10; 774 775 blob = ut_blob_create_and_open(bs, &opts); 776 blobid = spdk_blob_get_id(blob); 777 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 778 779 /* Create snapshot */ 780 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 781 poll_threads(); 782 CU_ASSERT(g_bserrno == 0); 783 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 784 snapshotid = g_blobid; 785 786 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 787 poll_threads(); 788 CU_ASSERT(g_bserrno == 0); 789 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 790 snapshot = g_blob; 791 CU_ASSERT(snapshot->data_ro == true); 792 CU_ASSERT(snapshot->md_ro == true); 793 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 794 795 spdk_blob_close(snapshot, blob_op_complete, NULL); 796 poll_threads(); 797 CU_ASSERT(g_bserrno == 0); 798 799 /* Create clone from snapshot with xattrs */ 800 xattrs.names = g_xattr_names; 801 xattrs.get_value = _get_xattr_value; 802 xattrs.count = 3; 803 xattrs.ctx = &g_ctx; 804 805 spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL); 806 poll_threads(); 807 CU_ASSERT(g_bserrno == 0); 808 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 809 cloneid = g_blobid; 810 811 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 812 poll_threads(); 813 CU_ASSERT(g_bserrno == 0); 814 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 815 clone = g_blob; 816 CU_ASSERT(clone->data_ro == false); 817 CU_ASSERT(clone->md_ro == false); 818 CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10); 819 820 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len); 821 CU_ASSERT(rc == 0); 822 SPDK_CU_ASSERT_FATAL(value != NULL); 823 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 824 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 825 826 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len); 827 CU_ASSERT(rc == 0); 828 SPDK_CU_ASSERT_FATAL(value != NULL); 829 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 830 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 831 832 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len); 833 CU_ASSERT(rc == 0); 834 SPDK_CU_ASSERT_FATAL(value != NULL); 835 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 836 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 837 838 839 spdk_blob_close(clone, blob_op_complete, NULL); 840 poll_threads(); 841 CU_ASSERT(g_bserrno == 0); 842 843 /* Try to create clone from not read only blob */ 844 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 845 poll_threads(); 846 CU_ASSERT(g_bserrno == -EINVAL); 847 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 848 849 /* Mark blob as read only */ 850 spdk_blob_set_read_only(blob); 851 spdk_blob_sync_md(blob, blob_op_complete, NULL); 852 poll_threads(); 853 CU_ASSERT(g_bserrno == 0); 854 855 /* Create clone from read only blob */ 856 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 857 poll_threads(); 858 CU_ASSERT(g_bserrno == 0); 859 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 860 cloneid = g_blobid; 861 862 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 863 poll_threads(); 864 CU_ASSERT(g_bserrno == 0); 865 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 866 clone = g_blob; 867 CU_ASSERT(clone->data_ro == false); 868 CU_ASSERT(clone->md_ro == false); 869 CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10); 870 871 ut_blob_close_and_delete(bs, clone); 872 ut_blob_close_and_delete(bs, blob); 873 } 874 875 static void 876 _blob_inflate(bool decouple_parent) 877 { 878 struct spdk_blob_store *bs = g_bs; 879 struct spdk_blob_opts opts; 880 struct spdk_blob *blob, *snapshot; 881 spdk_blob_id blobid, snapshotid; 882 struct spdk_io_channel *channel; 883 uint64_t free_clusters; 884 885 channel = spdk_bs_alloc_io_channel(bs); 886 SPDK_CU_ASSERT_FATAL(channel != NULL); 887 888 /* Create blob with 10 clusters */ 889 890 ut_spdk_blob_opts_init(&opts); 891 opts.num_clusters = 10; 892 opts.thin_provision = true; 893 894 blob = ut_blob_create_and_open(bs, &opts); 895 blobid = spdk_blob_get_id(blob); 896 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 897 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true); 898 899 /* 1) Blob with no parent */ 900 if (decouple_parent) { 901 /* Decouple parent of blob with no parent (should fail) */ 902 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 903 poll_threads(); 904 CU_ASSERT(g_bserrno != 0); 905 } else { 906 /* Inflate of thin blob with no parent should made it thick */ 907 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 908 poll_threads(); 909 CU_ASSERT(g_bserrno == 0); 910 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false); 911 } 912 913 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 914 poll_threads(); 915 CU_ASSERT(g_bserrno == 0); 916 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 917 snapshotid = g_blobid; 918 919 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true); 920 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 921 922 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 923 poll_threads(); 924 CU_ASSERT(g_bserrno == 0); 925 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 926 snapshot = g_blob; 927 CU_ASSERT(snapshot->data_ro == true); 928 CU_ASSERT(snapshot->md_ro == true); 929 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 930 931 spdk_blob_close(snapshot, blob_op_complete, NULL); 932 poll_threads(); 933 CU_ASSERT(g_bserrno == 0); 934 935 free_clusters = spdk_bs_free_cluster_count(bs); 936 937 /* 2) Blob with parent */ 938 if (!decouple_parent) { 939 /* Do full blob inflation */ 940 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 941 poll_threads(); 942 CU_ASSERT(g_bserrno == 0); 943 /* all 10 clusters should be allocated */ 944 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10); 945 } else { 946 /* Decouple parent of blob */ 947 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 948 poll_threads(); 949 CU_ASSERT(g_bserrno == 0); 950 /* when only parent is removed, none of the clusters should be allocated */ 951 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters); 952 } 953 954 /* Now, it should be possible to delete snapshot */ 955 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 956 poll_threads(); 957 CU_ASSERT(g_bserrno == 0); 958 959 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 960 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent); 961 962 spdk_bs_free_io_channel(channel); 963 poll_threads(); 964 965 ut_blob_close_and_delete(bs, blob); 966 } 967 968 static void 969 blob_inflate(void) 970 { 971 _blob_inflate(false); 972 _blob_inflate(true); 973 } 974 975 static void 976 blob_delete(void) 977 { 978 struct spdk_blob_store *bs = g_bs; 979 struct spdk_blob_opts blob_opts; 980 spdk_blob_id blobid; 981 982 /* Create a blob and then delete it. */ 983 ut_spdk_blob_opts_init(&blob_opts); 984 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 985 poll_threads(); 986 CU_ASSERT(g_bserrno == 0); 987 CU_ASSERT(g_blobid > 0); 988 blobid = g_blobid; 989 990 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 991 poll_threads(); 992 CU_ASSERT(g_bserrno == 0); 993 994 /* Try to open the blob */ 995 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 996 poll_threads(); 997 CU_ASSERT(g_bserrno == -ENOENT); 998 } 999 1000 static void 1001 blob_resize(void) 1002 { 1003 struct spdk_blob_store *bs = g_bs; 1004 struct spdk_blob *blob; 1005 uint64_t free_clusters; 1006 1007 free_clusters = spdk_bs_free_cluster_count(bs); 1008 1009 blob = ut_blob_create_and_open(bs, NULL); 1010 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 1011 1012 /* Confirm that resize fails if blob is marked read-only. */ 1013 blob->md_ro = true; 1014 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1015 poll_threads(); 1016 CU_ASSERT(g_bserrno == -EPERM); 1017 blob->md_ro = false; 1018 1019 /* The blob started at 0 clusters. Resize it to be 5. */ 1020 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1021 poll_threads(); 1022 CU_ASSERT(g_bserrno == 0); 1023 CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs)); 1024 1025 /* Shrink the blob to 3 clusters. This will not actually release 1026 * the old clusters until the blob is synced. 1027 */ 1028 spdk_blob_resize(blob, 3, blob_op_complete, NULL); 1029 poll_threads(); 1030 CU_ASSERT(g_bserrno == 0); 1031 /* Verify there are still 5 clusters in use */ 1032 CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs)); 1033 1034 spdk_blob_sync_md(blob, blob_op_complete, NULL); 1035 poll_threads(); 1036 CU_ASSERT(g_bserrno == 0); 1037 /* Now there are only 3 clusters in use */ 1038 CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs)); 1039 1040 /* Resize the blob to be 10 clusters. Growth takes effect immediately. */ 1041 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 1042 poll_threads(); 1043 CU_ASSERT(g_bserrno == 0); 1044 CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs)); 1045 1046 /* Try to resize the blob to size larger than blobstore. */ 1047 spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL); 1048 poll_threads(); 1049 CU_ASSERT(g_bserrno == -ENOSPC); 1050 1051 ut_blob_close_and_delete(bs, blob); 1052 } 1053 1054 static void 1055 blob_read_only(void) 1056 { 1057 struct spdk_blob_store *bs; 1058 struct spdk_bs_dev *dev; 1059 struct spdk_blob *blob; 1060 struct spdk_bs_opts opts; 1061 spdk_blob_id blobid; 1062 int rc; 1063 1064 dev = init_dev(); 1065 spdk_bs_opts_init(&opts); 1066 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 1067 1068 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 1069 poll_threads(); 1070 CU_ASSERT(g_bserrno == 0); 1071 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 1072 bs = g_bs; 1073 1074 blob = ut_blob_create_and_open(bs, NULL); 1075 blobid = spdk_blob_get_id(blob); 1076 1077 rc = spdk_blob_set_read_only(blob); 1078 CU_ASSERT(rc == 0); 1079 1080 CU_ASSERT(blob->data_ro == false); 1081 CU_ASSERT(blob->md_ro == false); 1082 1083 spdk_blob_sync_md(blob, bs_op_complete, NULL); 1084 poll_threads(); 1085 1086 CU_ASSERT(blob->data_ro == true); 1087 CU_ASSERT(blob->md_ro == true); 1088 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1089 1090 spdk_blob_close(blob, blob_op_complete, NULL); 1091 poll_threads(); 1092 CU_ASSERT(g_bserrno == 0); 1093 1094 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1095 poll_threads(); 1096 CU_ASSERT(g_bserrno == 0); 1097 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1098 blob = g_blob; 1099 1100 CU_ASSERT(blob->data_ro == true); 1101 CU_ASSERT(blob->md_ro == true); 1102 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1103 1104 spdk_blob_close(blob, blob_op_complete, NULL); 1105 poll_threads(); 1106 CU_ASSERT(g_bserrno == 0); 1107 1108 ut_bs_reload(&bs, &opts); 1109 1110 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1111 poll_threads(); 1112 CU_ASSERT(g_bserrno == 0); 1113 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1114 blob = g_blob; 1115 1116 CU_ASSERT(blob->data_ro == true); 1117 CU_ASSERT(blob->md_ro == true); 1118 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1119 1120 ut_blob_close_and_delete(bs, blob); 1121 1122 spdk_bs_unload(bs, bs_op_complete, NULL); 1123 poll_threads(); 1124 CU_ASSERT(g_bserrno == 0); 1125 } 1126 1127 static void 1128 channel_ops(void) 1129 { 1130 struct spdk_blob_store *bs = g_bs; 1131 struct spdk_io_channel *channel; 1132 1133 channel = spdk_bs_alloc_io_channel(bs); 1134 CU_ASSERT(channel != NULL); 1135 1136 spdk_bs_free_io_channel(channel); 1137 poll_threads(); 1138 } 1139 1140 static void 1141 blob_write(void) 1142 { 1143 struct spdk_blob_store *bs = g_bs; 1144 struct spdk_blob *blob = g_blob; 1145 struct spdk_io_channel *channel; 1146 uint64_t pages_per_cluster; 1147 uint8_t payload[10 * 4096]; 1148 1149 pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs); 1150 1151 channel = spdk_bs_alloc_io_channel(bs); 1152 CU_ASSERT(channel != NULL); 1153 1154 /* Write to a blob with 0 size */ 1155 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1156 poll_threads(); 1157 CU_ASSERT(g_bserrno == -EINVAL); 1158 1159 /* Resize the blob */ 1160 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1161 poll_threads(); 1162 CU_ASSERT(g_bserrno == 0); 1163 1164 /* Confirm that write fails if blob is marked read-only. */ 1165 blob->data_ro = true; 1166 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1167 poll_threads(); 1168 CU_ASSERT(g_bserrno == -EPERM); 1169 blob->data_ro = false; 1170 1171 /* Write to the blob */ 1172 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1173 poll_threads(); 1174 CU_ASSERT(g_bserrno == 0); 1175 1176 /* Write starting beyond the end */ 1177 spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete, 1178 NULL); 1179 poll_threads(); 1180 CU_ASSERT(g_bserrno == -EINVAL); 1181 1182 /* Write starting at a valid location but going off the end */ 1183 spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1, 1184 blob_op_complete, NULL); 1185 poll_threads(); 1186 CU_ASSERT(g_bserrno == -EINVAL); 1187 1188 spdk_bs_free_io_channel(channel); 1189 poll_threads(); 1190 } 1191 1192 static void 1193 blob_read(void) 1194 { 1195 struct spdk_blob_store *bs = g_bs; 1196 struct spdk_blob *blob = g_blob; 1197 struct spdk_io_channel *channel; 1198 uint64_t pages_per_cluster; 1199 uint8_t payload[10 * 4096]; 1200 1201 pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs); 1202 1203 channel = spdk_bs_alloc_io_channel(bs); 1204 CU_ASSERT(channel != NULL); 1205 1206 /* Read from a blob with 0 size */ 1207 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1208 poll_threads(); 1209 CU_ASSERT(g_bserrno == -EINVAL); 1210 1211 /* Resize the blob */ 1212 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1213 poll_threads(); 1214 CU_ASSERT(g_bserrno == 0); 1215 1216 /* Confirm that read passes if blob is marked read-only. */ 1217 blob->data_ro = true; 1218 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1219 poll_threads(); 1220 CU_ASSERT(g_bserrno == 0); 1221 blob->data_ro = false; 1222 1223 /* Read from the blob */ 1224 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1225 poll_threads(); 1226 CU_ASSERT(g_bserrno == 0); 1227 1228 /* Read starting beyond the end */ 1229 spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete, 1230 NULL); 1231 poll_threads(); 1232 CU_ASSERT(g_bserrno == -EINVAL); 1233 1234 /* Read starting at a valid location but going off the end */ 1235 spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1, 1236 blob_op_complete, NULL); 1237 poll_threads(); 1238 CU_ASSERT(g_bserrno == -EINVAL); 1239 1240 spdk_bs_free_io_channel(channel); 1241 poll_threads(); 1242 } 1243 1244 static void 1245 blob_rw_verify(void) 1246 { 1247 struct spdk_blob_store *bs = g_bs; 1248 struct spdk_blob *blob = g_blob; 1249 struct spdk_io_channel *channel; 1250 uint8_t payload_read[10 * 4096]; 1251 uint8_t payload_write[10 * 4096]; 1252 1253 channel = spdk_bs_alloc_io_channel(bs); 1254 CU_ASSERT(channel != NULL); 1255 1256 spdk_blob_resize(blob, 32, blob_op_complete, NULL); 1257 poll_threads(); 1258 CU_ASSERT(g_bserrno == 0); 1259 1260 memset(payload_write, 0xE5, sizeof(payload_write)); 1261 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 1262 poll_threads(); 1263 CU_ASSERT(g_bserrno == 0); 1264 1265 memset(payload_read, 0x00, sizeof(payload_read)); 1266 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 1267 poll_threads(); 1268 CU_ASSERT(g_bserrno == 0); 1269 CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0); 1270 1271 spdk_bs_free_io_channel(channel); 1272 poll_threads(); 1273 } 1274 1275 static void 1276 blob_rw_verify_iov(void) 1277 { 1278 struct spdk_blob_store *bs = g_bs; 1279 struct spdk_blob *blob; 1280 struct spdk_io_channel *channel; 1281 uint8_t payload_read[10 * 4096]; 1282 uint8_t payload_write[10 * 4096]; 1283 struct iovec iov_read[3]; 1284 struct iovec iov_write[3]; 1285 void *buf; 1286 1287 channel = spdk_bs_alloc_io_channel(bs); 1288 CU_ASSERT(channel != NULL); 1289 1290 blob = ut_blob_create_and_open(bs, NULL); 1291 1292 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1293 poll_threads(); 1294 CU_ASSERT(g_bserrno == 0); 1295 1296 /* 1297 * Manually adjust the offset of the blob's second cluster. This allows 1298 * us to make sure that the readv/write code correctly accounts for I/O 1299 * that cross cluster boundaries. Start by asserting that the allocated 1300 * clusters are where we expect before modifying the second cluster. 1301 */ 1302 CU_ASSERT(blob->active.clusters[0] == 1 * 256); 1303 CU_ASSERT(blob->active.clusters[1] == 2 * 256); 1304 blob->active.clusters[1] = 3 * 256; 1305 1306 memset(payload_write, 0xE5, sizeof(payload_write)); 1307 iov_write[0].iov_base = payload_write; 1308 iov_write[0].iov_len = 1 * 4096; 1309 iov_write[1].iov_base = payload_write + 1 * 4096; 1310 iov_write[1].iov_len = 5 * 4096; 1311 iov_write[2].iov_base = payload_write + 6 * 4096; 1312 iov_write[2].iov_len = 4 * 4096; 1313 /* 1314 * Choose a page offset just before the cluster boundary. The first 6 pages of payload 1315 * will get written to the first cluster, the last 4 to the second cluster. 1316 */ 1317 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 1318 poll_threads(); 1319 CU_ASSERT(g_bserrno == 0); 1320 1321 memset(payload_read, 0xAA, sizeof(payload_read)); 1322 iov_read[0].iov_base = payload_read; 1323 iov_read[0].iov_len = 3 * 4096; 1324 iov_read[1].iov_base = payload_read + 3 * 4096; 1325 iov_read[1].iov_len = 4 * 4096; 1326 iov_read[2].iov_base = payload_read + 7 * 4096; 1327 iov_read[2].iov_len = 3 * 4096; 1328 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 1329 poll_threads(); 1330 CU_ASSERT(g_bserrno == 0); 1331 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 1332 1333 buf = calloc(1, 256 * 4096); 1334 SPDK_CU_ASSERT_FATAL(buf != NULL); 1335 /* Check that cluster 2 on "disk" was not modified. */ 1336 CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0); 1337 free(buf); 1338 1339 spdk_blob_close(blob, blob_op_complete, NULL); 1340 poll_threads(); 1341 CU_ASSERT(g_bserrno == 0); 1342 1343 spdk_bs_free_io_channel(channel); 1344 poll_threads(); 1345 } 1346 1347 static uint32_t 1348 bs_channel_get_req_count(struct spdk_io_channel *_channel) 1349 { 1350 struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel); 1351 struct spdk_bs_request_set *set; 1352 uint32_t count = 0; 1353 1354 TAILQ_FOREACH(set, &channel->reqs, link) { 1355 count++; 1356 } 1357 1358 return count; 1359 } 1360 1361 static void 1362 blob_rw_verify_iov_nomem(void) 1363 { 1364 struct spdk_blob_store *bs = g_bs; 1365 struct spdk_blob *blob = g_blob; 1366 struct spdk_io_channel *channel; 1367 uint8_t payload_write[10 * 4096]; 1368 struct iovec iov_write[3]; 1369 uint32_t req_count; 1370 1371 channel = spdk_bs_alloc_io_channel(bs); 1372 CU_ASSERT(channel != NULL); 1373 1374 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1375 poll_threads(); 1376 CU_ASSERT(g_bserrno == 0); 1377 1378 /* 1379 * Choose a page offset just before the cluster boundary. The first 6 pages of payload 1380 * will get written to the first cluster, the last 4 to the second cluster. 1381 */ 1382 iov_write[0].iov_base = payload_write; 1383 iov_write[0].iov_len = 1 * 4096; 1384 iov_write[1].iov_base = payload_write + 1 * 4096; 1385 iov_write[1].iov_len = 5 * 4096; 1386 iov_write[2].iov_base = payload_write + 6 * 4096; 1387 iov_write[2].iov_len = 4 * 4096; 1388 MOCK_SET(calloc, NULL); 1389 req_count = bs_channel_get_req_count(channel); 1390 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 1391 poll_threads(); 1392 CU_ASSERT(g_bserrno = -ENOMEM); 1393 CU_ASSERT(req_count == bs_channel_get_req_count(channel)); 1394 MOCK_CLEAR(calloc); 1395 1396 spdk_bs_free_io_channel(channel); 1397 poll_threads(); 1398 } 1399 1400 static void 1401 blob_rw_iov_read_only(void) 1402 { 1403 struct spdk_blob_store *bs = g_bs; 1404 struct spdk_blob *blob = g_blob; 1405 struct spdk_io_channel *channel; 1406 uint8_t payload_read[4096]; 1407 uint8_t payload_write[4096]; 1408 struct iovec iov_read; 1409 struct iovec iov_write; 1410 1411 channel = spdk_bs_alloc_io_channel(bs); 1412 CU_ASSERT(channel != NULL); 1413 1414 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1415 poll_threads(); 1416 CU_ASSERT(g_bserrno == 0); 1417 1418 /* Verify that writev failed if read_only flag is set. */ 1419 blob->data_ro = true; 1420 iov_write.iov_base = payload_write; 1421 iov_write.iov_len = sizeof(payload_write); 1422 spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL); 1423 poll_threads(); 1424 CU_ASSERT(g_bserrno == -EPERM); 1425 1426 /* Verify that reads pass if data_ro flag is set. */ 1427 iov_read.iov_base = payload_read; 1428 iov_read.iov_len = sizeof(payload_read); 1429 spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL); 1430 poll_threads(); 1431 CU_ASSERT(g_bserrno == 0); 1432 1433 spdk_bs_free_io_channel(channel); 1434 poll_threads(); 1435 } 1436 1437 static void 1438 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel, 1439 uint8_t *payload, uint64_t offset, uint64_t length, 1440 spdk_blob_op_complete cb_fn, void *cb_arg) 1441 { 1442 uint64_t i; 1443 uint8_t *buf; 1444 uint64_t page_size = spdk_bs_get_page_size(blob->bs); 1445 1446 /* To be sure that operation is NOT splitted, read one page at the time */ 1447 buf = payload; 1448 for (i = 0; i < length; i++) { 1449 spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL); 1450 poll_threads(); 1451 if (g_bserrno != 0) { 1452 /* Pass the error code up */ 1453 break; 1454 } 1455 buf += page_size; 1456 } 1457 1458 cb_fn(cb_arg, g_bserrno); 1459 } 1460 1461 static void 1462 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel, 1463 uint8_t *payload, uint64_t offset, uint64_t length, 1464 spdk_blob_op_complete cb_fn, void *cb_arg) 1465 { 1466 uint64_t i; 1467 uint8_t *buf; 1468 uint64_t page_size = spdk_bs_get_page_size(blob->bs); 1469 1470 /* To be sure that operation is NOT splitted, write one page at the time */ 1471 buf = payload; 1472 for (i = 0; i < length; i++) { 1473 spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL); 1474 poll_threads(); 1475 if (g_bserrno != 0) { 1476 /* Pass the error code up */ 1477 break; 1478 } 1479 buf += page_size; 1480 } 1481 1482 cb_fn(cb_arg, g_bserrno); 1483 } 1484 1485 static void 1486 blob_operation_split_rw(void) 1487 { 1488 struct spdk_blob_store *bs = g_bs; 1489 struct spdk_blob *blob; 1490 struct spdk_io_channel *channel; 1491 struct spdk_blob_opts opts; 1492 uint64_t cluster_size; 1493 1494 uint64_t payload_size; 1495 uint8_t *payload_read; 1496 uint8_t *payload_write; 1497 uint8_t *payload_pattern; 1498 1499 uint64_t page_size; 1500 uint64_t pages_per_cluster; 1501 uint64_t pages_per_payload; 1502 1503 uint64_t i; 1504 1505 cluster_size = spdk_bs_get_cluster_size(bs); 1506 page_size = spdk_bs_get_page_size(bs); 1507 pages_per_cluster = cluster_size / page_size; 1508 pages_per_payload = pages_per_cluster * 5; 1509 payload_size = cluster_size * 5; 1510 1511 payload_read = malloc(payload_size); 1512 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 1513 1514 payload_write = malloc(payload_size); 1515 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 1516 1517 payload_pattern = malloc(payload_size); 1518 SPDK_CU_ASSERT_FATAL(payload_pattern != NULL); 1519 1520 /* Prepare random pattern to write */ 1521 memset(payload_pattern, 0xFF, payload_size); 1522 for (i = 0; i < pages_per_payload; i++) { 1523 *((uint64_t *)(payload_pattern + page_size * i)) = (i + 1); 1524 } 1525 1526 channel = spdk_bs_alloc_io_channel(bs); 1527 SPDK_CU_ASSERT_FATAL(channel != NULL); 1528 1529 /* Create blob */ 1530 ut_spdk_blob_opts_init(&opts); 1531 opts.thin_provision = false; 1532 opts.num_clusters = 5; 1533 1534 blob = ut_blob_create_and_open(bs, &opts); 1535 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 1536 1537 /* Initial read should return zeroed payload */ 1538 memset(payload_read, 0xFF, payload_size); 1539 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1540 poll_threads(); 1541 CU_ASSERT(g_bserrno == 0); 1542 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 1543 1544 /* Fill whole blob except last page */ 1545 spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1, 1546 blob_op_complete, NULL); 1547 poll_threads(); 1548 CU_ASSERT(g_bserrno == 0); 1549 1550 /* Write last page with a pattern */ 1551 spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1, 1552 blob_op_complete, NULL); 1553 poll_threads(); 1554 CU_ASSERT(g_bserrno == 0); 1555 1556 /* Read whole blob and check consistency */ 1557 memset(payload_read, 0xFF, payload_size); 1558 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1559 poll_threads(); 1560 CU_ASSERT(g_bserrno == 0); 1561 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0); 1562 CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0); 1563 1564 /* Fill whole blob except first page */ 1565 spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1, 1566 blob_op_complete, NULL); 1567 poll_threads(); 1568 CU_ASSERT(g_bserrno == 0); 1569 1570 /* Write first page with a pattern */ 1571 spdk_blob_io_write(blob, channel, payload_pattern, 0, 1, 1572 blob_op_complete, NULL); 1573 poll_threads(); 1574 CU_ASSERT(g_bserrno == 0); 1575 1576 /* Read whole blob and check consistency */ 1577 memset(payload_read, 0xFF, payload_size); 1578 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1579 poll_threads(); 1580 CU_ASSERT(g_bserrno == 0); 1581 CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0); 1582 CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0); 1583 1584 1585 /* Fill whole blob with a pattern (5 clusters) */ 1586 1587 /* 1. Read test. */ 1588 _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload, 1589 blob_op_complete, NULL); 1590 poll_threads(); 1591 CU_ASSERT(g_bserrno == 0); 1592 1593 memset(payload_read, 0xFF, payload_size); 1594 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1595 poll_threads(); 1596 poll_threads(); 1597 CU_ASSERT(g_bserrno == 0); 1598 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1599 1600 /* 2. Write test. */ 1601 spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload, 1602 blob_op_complete, NULL); 1603 poll_threads(); 1604 CU_ASSERT(g_bserrno == 0); 1605 1606 memset(payload_read, 0xFF, payload_size); 1607 _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1608 poll_threads(); 1609 CU_ASSERT(g_bserrno == 0); 1610 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1611 1612 spdk_bs_free_io_channel(channel); 1613 poll_threads(); 1614 1615 g_blob = NULL; 1616 g_blobid = 0; 1617 1618 free(payload_read); 1619 free(payload_write); 1620 free(payload_pattern); 1621 1622 ut_blob_close_and_delete(bs, blob); 1623 } 1624 1625 static void 1626 blob_operation_split_rw_iov(void) 1627 { 1628 struct spdk_blob_store *bs = g_bs; 1629 struct spdk_blob *blob; 1630 struct spdk_io_channel *channel; 1631 struct spdk_blob_opts opts; 1632 uint64_t cluster_size; 1633 1634 uint64_t payload_size; 1635 uint8_t *payload_read; 1636 uint8_t *payload_write; 1637 uint8_t *payload_pattern; 1638 1639 uint64_t page_size; 1640 uint64_t pages_per_cluster; 1641 uint64_t pages_per_payload; 1642 1643 struct iovec iov_read[2]; 1644 struct iovec iov_write[2]; 1645 1646 uint64_t i, j; 1647 1648 cluster_size = spdk_bs_get_cluster_size(bs); 1649 page_size = spdk_bs_get_page_size(bs); 1650 pages_per_cluster = cluster_size / page_size; 1651 pages_per_payload = pages_per_cluster * 5; 1652 payload_size = cluster_size * 5; 1653 1654 payload_read = malloc(payload_size); 1655 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 1656 1657 payload_write = malloc(payload_size); 1658 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 1659 1660 payload_pattern = malloc(payload_size); 1661 SPDK_CU_ASSERT_FATAL(payload_pattern != NULL); 1662 1663 /* Prepare random pattern to write */ 1664 for (i = 0; i < pages_per_payload; i++) { 1665 for (j = 0; j < page_size / sizeof(uint64_t); j++) { 1666 uint64_t *tmp; 1667 1668 tmp = (uint64_t *)payload_pattern; 1669 tmp += ((page_size * i) / sizeof(uint64_t)) + j; 1670 *tmp = i + 1; 1671 } 1672 } 1673 1674 channel = spdk_bs_alloc_io_channel(bs); 1675 SPDK_CU_ASSERT_FATAL(channel != NULL); 1676 1677 /* Create blob */ 1678 ut_spdk_blob_opts_init(&opts); 1679 opts.thin_provision = false; 1680 opts.num_clusters = 5; 1681 1682 blob = ut_blob_create_and_open(bs, &opts); 1683 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 1684 1685 /* Initial read should return zeroes payload */ 1686 memset(payload_read, 0xFF, payload_size); 1687 iov_read[0].iov_base = payload_read; 1688 iov_read[0].iov_len = cluster_size * 3; 1689 iov_read[1].iov_base = payload_read + cluster_size * 3; 1690 iov_read[1].iov_len = cluster_size * 2; 1691 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1692 poll_threads(); 1693 CU_ASSERT(g_bserrno == 0); 1694 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 1695 1696 /* First of iovs fills whole blob except last page and second of iovs writes last page 1697 * with a pattern. */ 1698 iov_write[0].iov_base = payload_pattern; 1699 iov_write[0].iov_len = payload_size - page_size; 1700 iov_write[1].iov_base = payload_pattern; 1701 iov_write[1].iov_len = page_size; 1702 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1703 poll_threads(); 1704 CU_ASSERT(g_bserrno == 0); 1705 1706 /* Read whole blob and check consistency */ 1707 memset(payload_read, 0xFF, payload_size); 1708 iov_read[0].iov_base = payload_read; 1709 iov_read[0].iov_len = cluster_size * 2; 1710 iov_read[1].iov_base = payload_read + cluster_size * 2; 1711 iov_read[1].iov_len = cluster_size * 3; 1712 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1713 poll_threads(); 1714 CU_ASSERT(g_bserrno == 0); 1715 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0); 1716 CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0); 1717 1718 /* First of iovs fills only first page and second of iovs writes whole blob except 1719 * first page with a pattern. */ 1720 iov_write[0].iov_base = payload_pattern; 1721 iov_write[0].iov_len = page_size; 1722 iov_write[1].iov_base = payload_pattern; 1723 iov_write[1].iov_len = payload_size - page_size; 1724 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1725 poll_threads(); 1726 CU_ASSERT(g_bserrno == 0); 1727 1728 /* Read whole blob and check consistency */ 1729 memset(payload_read, 0xFF, payload_size); 1730 iov_read[0].iov_base = payload_read; 1731 iov_read[0].iov_len = cluster_size * 4; 1732 iov_read[1].iov_base = payload_read + cluster_size * 4; 1733 iov_read[1].iov_len = cluster_size; 1734 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1735 poll_threads(); 1736 CU_ASSERT(g_bserrno == 0); 1737 CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0); 1738 CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0); 1739 1740 1741 /* Fill whole blob with a pattern (5 clusters) */ 1742 1743 /* 1. Read test. */ 1744 _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload, 1745 blob_op_complete, NULL); 1746 poll_threads(); 1747 CU_ASSERT(g_bserrno == 0); 1748 1749 memset(payload_read, 0xFF, payload_size); 1750 iov_read[0].iov_base = payload_read; 1751 iov_read[0].iov_len = cluster_size; 1752 iov_read[1].iov_base = payload_read + cluster_size; 1753 iov_read[1].iov_len = cluster_size * 4; 1754 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1755 poll_threads(); 1756 CU_ASSERT(g_bserrno == 0); 1757 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1758 1759 /* 2. Write test. */ 1760 iov_write[0].iov_base = payload_read; 1761 iov_write[0].iov_len = cluster_size * 2; 1762 iov_write[1].iov_base = payload_read + cluster_size * 2; 1763 iov_write[1].iov_len = cluster_size * 3; 1764 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1765 poll_threads(); 1766 CU_ASSERT(g_bserrno == 0); 1767 1768 memset(payload_read, 0xFF, payload_size); 1769 _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1770 poll_threads(); 1771 CU_ASSERT(g_bserrno == 0); 1772 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1773 1774 spdk_bs_free_io_channel(channel); 1775 poll_threads(); 1776 1777 g_blob = NULL; 1778 g_blobid = 0; 1779 1780 free(payload_read); 1781 free(payload_write); 1782 free(payload_pattern); 1783 1784 ut_blob_close_and_delete(bs, blob); 1785 } 1786 1787 static void 1788 blob_unmap(void) 1789 { 1790 struct spdk_blob_store *bs = g_bs; 1791 struct spdk_blob *blob; 1792 struct spdk_io_channel *channel; 1793 struct spdk_blob_opts opts; 1794 uint8_t payload[4096]; 1795 int i; 1796 1797 channel = spdk_bs_alloc_io_channel(bs); 1798 CU_ASSERT(channel != NULL); 1799 1800 ut_spdk_blob_opts_init(&opts); 1801 opts.num_clusters = 10; 1802 1803 blob = ut_blob_create_and_open(bs, &opts); 1804 1805 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 1806 poll_threads(); 1807 CU_ASSERT(g_bserrno == 0); 1808 1809 memset(payload, 0, sizeof(payload)); 1810 payload[0] = 0xFF; 1811 1812 /* 1813 * Set first byte of every cluster to 0xFF. 1814 * First cluster on device is reserved so let's start from cluster number 1 1815 */ 1816 for (i = 1; i < 11; i++) { 1817 g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF; 1818 } 1819 1820 /* Confirm writes */ 1821 for (i = 0; i < 10; i++) { 1822 payload[0] = 0; 1823 spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1, 1824 blob_op_complete, NULL); 1825 poll_threads(); 1826 CU_ASSERT(g_bserrno == 0); 1827 CU_ASSERT(payload[0] == 0xFF); 1828 } 1829 1830 /* Mark some clusters as unallocated */ 1831 blob->active.clusters[1] = 0; 1832 blob->active.clusters[2] = 0; 1833 blob->active.clusters[3] = 0; 1834 blob->active.clusters[6] = 0; 1835 blob->active.clusters[8] = 0; 1836 1837 /* Unmap clusters by resizing to 0 */ 1838 spdk_blob_resize(blob, 0, blob_op_complete, NULL); 1839 poll_threads(); 1840 CU_ASSERT(g_bserrno == 0); 1841 1842 spdk_blob_sync_md(blob, blob_op_complete, NULL); 1843 poll_threads(); 1844 CU_ASSERT(g_bserrno == 0); 1845 1846 /* Confirm that only 'allocated' clusters were unmapped */ 1847 for (i = 1; i < 11; i++) { 1848 switch (i) { 1849 case 2: 1850 case 3: 1851 case 4: 1852 case 7: 1853 case 9: 1854 CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF); 1855 break; 1856 default: 1857 CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0); 1858 break; 1859 } 1860 } 1861 1862 spdk_bs_free_io_channel(channel); 1863 poll_threads(); 1864 1865 ut_blob_close_and_delete(bs, blob); 1866 } 1867 1868 static void 1869 blob_iter(void) 1870 { 1871 struct spdk_blob_store *bs = g_bs; 1872 struct spdk_blob *blob; 1873 spdk_blob_id blobid; 1874 struct spdk_blob_opts blob_opts; 1875 1876 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 1877 poll_threads(); 1878 CU_ASSERT(g_blob == NULL); 1879 CU_ASSERT(g_bserrno == -ENOENT); 1880 1881 ut_spdk_blob_opts_init(&blob_opts); 1882 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 1883 poll_threads(); 1884 CU_ASSERT(g_bserrno == 0); 1885 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 1886 blobid = g_blobid; 1887 1888 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 1889 poll_threads(); 1890 CU_ASSERT(g_blob != NULL); 1891 CU_ASSERT(g_bserrno == 0); 1892 blob = g_blob; 1893 CU_ASSERT(spdk_blob_get_id(blob) == blobid); 1894 1895 spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL); 1896 poll_threads(); 1897 CU_ASSERT(g_blob == NULL); 1898 CU_ASSERT(g_bserrno == -ENOENT); 1899 } 1900 1901 static void 1902 blob_xattr(void) 1903 { 1904 struct spdk_blob_store *bs = g_bs; 1905 struct spdk_blob *blob = g_blob; 1906 spdk_blob_id blobid = spdk_blob_get_id(blob); 1907 uint64_t length; 1908 int rc; 1909 const char *name1, *name2; 1910 const void *value; 1911 size_t value_len; 1912 struct spdk_xattr_names *names; 1913 1914 /* Test that set_xattr fails if md_ro flag is set. */ 1915 blob->md_ro = true; 1916 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 1917 CU_ASSERT(rc == -EPERM); 1918 1919 blob->md_ro = false; 1920 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 1921 CU_ASSERT(rc == 0); 1922 1923 length = 2345; 1924 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 1925 CU_ASSERT(rc == 0); 1926 1927 /* Overwrite "length" xattr. */ 1928 length = 3456; 1929 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 1930 CU_ASSERT(rc == 0); 1931 1932 /* get_xattr should still work even if md_ro flag is set. */ 1933 value = NULL; 1934 blob->md_ro = true; 1935 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 1936 CU_ASSERT(rc == 0); 1937 SPDK_CU_ASSERT_FATAL(value != NULL); 1938 CU_ASSERT(*(uint64_t *)value == length); 1939 CU_ASSERT(value_len == 8); 1940 blob->md_ro = false; 1941 1942 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 1943 CU_ASSERT(rc == -ENOENT); 1944 1945 names = NULL; 1946 rc = spdk_blob_get_xattr_names(blob, &names); 1947 CU_ASSERT(rc == 0); 1948 SPDK_CU_ASSERT_FATAL(names != NULL); 1949 CU_ASSERT(spdk_xattr_names_get_count(names) == 2); 1950 name1 = spdk_xattr_names_get_name(names, 0); 1951 SPDK_CU_ASSERT_FATAL(name1 != NULL); 1952 CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length")); 1953 name2 = spdk_xattr_names_get_name(names, 1); 1954 SPDK_CU_ASSERT_FATAL(name2 != NULL); 1955 CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length")); 1956 CU_ASSERT(strcmp(name1, name2)); 1957 spdk_xattr_names_free(names); 1958 1959 /* Confirm that remove_xattr fails if md_ro is set to true. */ 1960 blob->md_ro = true; 1961 rc = spdk_blob_remove_xattr(blob, "name"); 1962 CU_ASSERT(rc == -EPERM); 1963 1964 blob->md_ro = false; 1965 rc = spdk_blob_remove_xattr(blob, "name"); 1966 CU_ASSERT(rc == 0); 1967 1968 rc = spdk_blob_remove_xattr(blob, "foobar"); 1969 CU_ASSERT(rc == -ENOENT); 1970 1971 /* Set internal xattr */ 1972 length = 7898; 1973 rc = _spdk_blob_set_xattr(blob, "internal", &length, sizeof(length), true); 1974 CU_ASSERT(rc == 0); 1975 rc = _spdk_blob_get_xattr_value(blob, "internal", &value, &value_len, true); 1976 CU_ASSERT(rc == 0); 1977 CU_ASSERT(*(uint64_t *)value == length); 1978 /* try to get public xattr with same name */ 1979 rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len); 1980 CU_ASSERT(rc != 0); 1981 rc = _spdk_blob_get_xattr_value(blob, "internal", &value, &value_len, false); 1982 CU_ASSERT(rc != 0); 1983 /* Check if SPDK_BLOB_INTERNAL_XATTR is set */ 1984 CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 1985 SPDK_BLOB_INTERNAL_XATTR); 1986 1987 spdk_blob_close(blob, blob_op_complete, NULL); 1988 poll_threads(); 1989 1990 /* Check if xattrs are persisted */ 1991 ut_bs_reload(&bs, NULL); 1992 1993 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1994 poll_threads(); 1995 CU_ASSERT(g_bserrno == 0); 1996 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1997 blob = g_blob; 1998 1999 rc = _spdk_blob_get_xattr_value(blob, "internal", &value, &value_len, true); 2000 CU_ASSERT(rc == 0); 2001 CU_ASSERT(*(uint64_t *)value == length); 2002 2003 /* try to get internal xattr trough public call */ 2004 rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len); 2005 CU_ASSERT(rc != 0); 2006 2007 rc = _spdk_blob_remove_xattr(blob, "internal", true); 2008 CU_ASSERT(rc == 0); 2009 2010 CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0); 2011 } 2012 2013 static void 2014 bs_load(void) 2015 { 2016 struct spdk_blob_store *bs; 2017 struct spdk_bs_dev *dev; 2018 spdk_blob_id blobid; 2019 struct spdk_blob *blob; 2020 struct spdk_bs_super_block *super_block; 2021 uint64_t length; 2022 int rc; 2023 const void *value; 2024 size_t value_len; 2025 struct spdk_bs_opts opts; 2026 struct spdk_blob_opts blob_opts; 2027 2028 dev = init_dev(); 2029 spdk_bs_opts_init(&opts); 2030 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2031 2032 /* Initialize a new blob store */ 2033 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2034 poll_threads(); 2035 CU_ASSERT(g_bserrno == 0); 2036 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2037 bs = g_bs; 2038 2039 /* Try to open a blobid that does not exist */ 2040 spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL); 2041 poll_threads(); 2042 CU_ASSERT(g_bserrno == -ENOENT); 2043 CU_ASSERT(g_blob == NULL); 2044 2045 /* Create a blob */ 2046 blob = ut_blob_create_and_open(bs, NULL); 2047 blobid = spdk_blob_get_id(blob); 2048 2049 /* Try again to open valid blob but without the upper bit set */ 2050 spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL); 2051 poll_threads(); 2052 CU_ASSERT(g_bserrno == -ENOENT); 2053 CU_ASSERT(g_blob == NULL); 2054 2055 /* Set some xattrs */ 2056 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 2057 CU_ASSERT(rc == 0); 2058 2059 length = 2345; 2060 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2061 CU_ASSERT(rc == 0); 2062 2063 /* Resize the blob */ 2064 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 2065 poll_threads(); 2066 CU_ASSERT(g_bserrno == 0); 2067 2068 spdk_blob_close(blob, blob_op_complete, NULL); 2069 poll_threads(); 2070 CU_ASSERT(g_bserrno == 0); 2071 blob = NULL; 2072 g_blob = NULL; 2073 g_blobid = SPDK_BLOBID_INVALID; 2074 2075 /* Unload the blob store */ 2076 spdk_bs_unload(bs, bs_op_complete, NULL); 2077 poll_threads(); 2078 CU_ASSERT(g_bserrno == 0); 2079 g_bs = NULL; 2080 g_blob = NULL; 2081 g_blobid = 0; 2082 2083 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2084 CU_ASSERT(super_block->clean == 1); 2085 2086 /* Load should fail for device with an unsupported blocklen */ 2087 dev = init_dev(); 2088 dev->blocklen = SPDK_BS_PAGE_SIZE * 2; 2089 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 2090 poll_threads(); 2091 CU_ASSERT(g_bserrno == -EINVAL); 2092 2093 /* Load should when max_md_ops is set to zero */ 2094 dev = init_dev(); 2095 spdk_bs_opts_init(&opts); 2096 opts.max_md_ops = 0; 2097 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2098 poll_threads(); 2099 CU_ASSERT(g_bserrno == -EINVAL); 2100 2101 /* Load should when max_channel_ops is set to zero */ 2102 dev = init_dev(); 2103 spdk_bs_opts_init(&opts); 2104 opts.max_channel_ops = 0; 2105 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2106 poll_threads(); 2107 CU_ASSERT(g_bserrno == -EINVAL); 2108 2109 /* Load an existing blob store */ 2110 dev = init_dev(); 2111 spdk_bs_opts_init(&opts); 2112 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2113 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2114 poll_threads(); 2115 CU_ASSERT(g_bserrno == 0); 2116 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2117 bs = g_bs; 2118 2119 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2120 CU_ASSERT(super_block->clean == 1); 2121 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2122 2123 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2124 poll_threads(); 2125 CU_ASSERT(g_bserrno == 0); 2126 CU_ASSERT(g_blob != NULL); 2127 blob = g_blob; 2128 2129 /* Verify that blobstore is marked dirty after first metadata sync */ 2130 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2131 CU_ASSERT(super_block->clean == 1); 2132 2133 /* Get the xattrs */ 2134 value = NULL; 2135 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 2136 CU_ASSERT(rc == 0); 2137 SPDK_CU_ASSERT_FATAL(value != NULL); 2138 CU_ASSERT(*(uint64_t *)value == length); 2139 CU_ASSERT(value_len == 8); 2140 2141 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 2142 CU_ASSERT(rc == -ENOENT); 2143 2144 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 2145 2146 spdk_blob_close(blob, blob_op_complete, NULL); 2147 poll_threads(); 2148 CU_ASSERT(g_bserrno == 0); 2149 blob = NULL; 2150 g_blob = NULL; 2151 2152 spdk_bs_unload(bs, bs_op_complete, NULL); 2153 poll_threads(); 2154 CU_ASSERT(g_bserrno == 0); 2155 g_bs = NULL; 2156 2157 /* Load should fail: bdev size < saved size */ 2158 dev = init_dev(); 2159 dev->blockcnt /= 2; 2160 2161 spdk_bs_opts_init(&opts); 2162 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2163 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2164 poll_threads(); 2165 2166 CU_ASSERT(g_bserrno == -EILSEQ); 2167 2168 /* Load should succeed: bdev size > saved size */ 2169 dev = init_dev(); 2170 dev->blockcnt *= 4; 2171 2172 spdk_bs_opts_init(&opts); 2173 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2174 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2175 poll_threads(); 2176 CU_ASSERT(g_bserrno == 0); 2177 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2178 bs = g_bs; 2179 2180 CU_ASSERT(g_bserrno == 0); 2181 spdk_bs_unload(bs, bs_op_complete, NULL); 2182 poll_threads(); 2183 2184 2185 /* Test compatibility mode */ 2186 2187 dev = init_dev(); 2188 super_block->size = 0; 2189 super_block->crc = _spdk_blob_md_page_calc_crc(super_block); 2190 2191 spdk_bs_opts_init(&opts); 2192 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2193 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2194 poll_threads(); 2195 CU_ASSERT(g_bserrno == 0); 2196 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2197 bs = g_bs; 2198 2199 /* Create a blob */ 2200 ut_spdk_blob_opts_init(&blob_opts); 2201 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 2202 poll_threads(); 2203 CU_ASSERT(g_bserrno == 0); 2204 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2205 2206 /* Blobstore should update number of blocks in super_block */ 2207 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2208 CU_ASSERT(super_block->clean == 0); 2209 2210 spdk_bs_unload(bs, bs_op_complete, NULL); 2211 poll_threads(); 2212 CU_ASSERT(g_bserrno == 0); 2213 CU_ASSERT(super_block->clean == 1); 2214 g_bs = NULL; 2215 2216 } 2217 2218 static void 2219 bs_load_pending_removal(void) 2220 { 2221 struct spdk_blob_store *bs = g_bs; 2222 struct spdk_blob_opts opts; 2223 struct spdk_blob *blob, *snapshot; 2224 spdk_blob_id blobid, snapshotid; 2225 const void *value; 2226 size_t value_len; 2227 int rc; 2228 2229 /* Create blob */ 2230 ut_spdk_blob_opts_init(&opts); 2231 opts.num_clusters = 10; 2232 2233 blob = ut_blob_create_and_open(bs, &opts); 2234 blobid = spdk_blob_get_id(blob); 2235 2236 /* Create snapshot */ 2237 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 2238 poll_threads(); 2239 CU_ASSERT(g_bserrno == 0); 2240 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2241 snapshotid = g_blobid; 2242 2243 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2244 poll_threads(); 2245 CU_ASSERT(g_bserrno == 0); 2246 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2247 snapshot = g_blob; 2248 2249 /* Set SNAPSHOT_PENDING_REMOVAL xattr */ 2250 snapshot->md_ro = false; 2251 rc = _spdk_blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true); 2252 CU_ASSERT(rc == 0); 2253 snapshot->md_ro = true; 2254 2255 spdk_blob_close(snapshot, blob_op_complete, NULL); 2256 poll_threads(); 2257 CU_ASSERT(g_bserrno == 0); 2258 2259 spdk_blob_close(blob, blob_op_complete, NULL); 2260 poll_threads(); 2261 CU_ASSERT(g_bserrno == 0); 2262 2263 /* Reload blobstore */ 2264 ut_bs_reload(&bs, NULL); 2265 2266 /* Snapshot should not be removed as blob is still pointing to it */ 2267 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2268 poll_threads(); 2269 CU_ASSERT(g_bserrno == 0); 2270 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2271 snapshot = g_blob; 2272 2273 /* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */ 2274 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len); 2275 CU_ASSERT(rc != 0); 2276 2277 /* Set SNAPSHOT_PENDING_REMOVAL xattr again */ 2278 snapshot->md_ro = false; 2279 rc = _spdk_blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true); 2280 CU_ASSERT(rc == 0); 2281 snapshot->md_ro = true; 2282 2283 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2284 poll_threads(); 2285 CU_ASSERT(g_bserrno == 0); 2286 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2287 blob = g_blob; 2288 2289 /* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */ 2290 _spdk_blob_remove_xattr(blob, BLOB_SNAPSHOT, true); 2291 2292 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2293 poll_threads(); 2294 CU_ASSERT(g_bserrno == 0); 2295 2296 spdk_blob_close(snapshot, blob_op_complete, NULL); 2297 poll_threads(); 2298 CU_ASSERT(g_bserrno == 0); 2299 2300 spdk_blob_close(blob, blob_op_complete, NULL); 2301 poll_threads(); 2302 CU_ASSERT(g_bserrno == 0); 2303 2304 /* Reload blobstore */ 2305 ut_bs_reload(&bs, NULL); 2306 2307 /* Snapshot should be removed as blob is not pointing to it anymore */ 2308 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2309 poll_threads(); 2310 CU_ASSERT(g_bserrno != 0); 2311 } 2312 2313 static void 2314 bs_load_custom_cluster_size(void) 2315 { 2316 struct spdk_blob_store *bs; 2317 struct spdk_bs_dev *dev; 2318 struct spdk_bs_super_block *super_block; 2319 struct spdk_bs_opts opts; 2320 uint32_t custom_cluster_size = 4194304; /* 4MiB */ 2321 uint32_t cluster_sz; 2322 uint64_t total_clusters; 2323 2324 dev = init_dev(); 2325 spdk_bs_opts_init(&opts); 2326 opts.cluster_sz = custom_cluster_size; 2327 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2328 2329 /* Initialize a new blob store */ 2330 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2331 poll_threads(); 2332 CU_ASSERT(g_bserrno == 0); 2333 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2334 bs = g_bs; 2335 cluster_sz = bs->cluster_sz; 2336 total_clusters = bs->total_clusters; 2337 2338 /* Unload the blob store */ 2339 spdk_bs_unload(bs, bs_op_complete, NULL); 2340 poll_threads(); 2341 CU_ASSERT(g_bserrno == 0); 2342 g_bs = NULL; 2343 g_blob = NULL; 2344 g_blobid = 0; 2345 2346 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2347 CU_ASSERT(super_block->clean == 1); 2348 2349 /* Load an existing blob store */ 2350 dev = init_dev(); 2351 spdk_bs_opts_init(&opts); 2352 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2353 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2354 poll_threads(); 2355 CU_ASSERT(g_bserrno == 0); 2356 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2357 bs = g_bs; 2358 /* Compare cluster size and number to one after initialization */ 2359 CU_ASSERT(cluster_sz == bs->cluster_sz); 2360 CU_ASSERT(total_clusters == bs->total_clusters); 2361 2362 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2363 CU_ASSERT(super_block->clean == 1); 2364 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2365 2366 spdk_bs_unload(bs, bs_op_complete, NULL); 2367 poll_threads(); 2368 CU_ASSERT(g_bserrno == 0); 2369 CU_ASSERT(super_block->clean == 1); 2370 g_bs = NULL; 2371 } 2372 2373 static void 2374 bs_type(void) 2375 { 2376 struct spdk_blob_store *bs; 2377 struct spdk_bs_dev *dev; 2378 struct spdk_bs_opts opts; 2379 2380 dev = init_dev(); 2381 spdk_bs_opts_init(&opts); 2382 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2383 2384 /* Initialize a new blob store */ 2385 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2386 poll_threads(); 2387 CU_ASSERT(g_bserrno == 0); 2388 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2389 bs = g_bs; 2390 2391 /* Unload the blob store */ 2392 spdk_bs_unload(bs, bs_op_complete, NULL); 2393 poll_threads(); 2394 CU_ASSERT(g_bserrno == 0); 2395 g_bs = NULL; 2396 g_blob = NULL; 2397 g_blobid = 0; 2398 2399 /* Load non existing blobstore type */ 2400 dev = init_dev(); 2401 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING"); 2402 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2403 poll_threads(); 2404 CU_ASSERT(g_bserrno != 0); 2405 2406 /* Load with empty blobstore type */ 2407 dev = init_dev(); 2408 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2409 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2410 poll_threads(); 2411 CU_ASSERT(g_bserrno == 0); 2412 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2413 bs = g_bs; 2414 2415 spdk_bs_unload(bs, bs_op_complete, NULL); 2416 poll_threads(); 2417 CU_ASSERT(g_bserrno == 0); 2418 g_bs = NULL; 2419 2420 /* Initialize a new blob store with empty bstype */ 2421 dev = init_dev(); 2422 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2423 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2424 poll_threads(); 2425 CU_ASSERT(g_bserrno == 0); 2426 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2427 bs = g_bs; 2428 2429 spdk_bs_unload(bs, bs_op_complete, NULL); 2430 poll_threads(); 2431 CU_ASSERT(g_bserrno == 0); 2432 g_bs = NULL; 2433 2434 /* Load non existing blobstore type */ 2435 dev = init_dev(); 2436 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING"); 2437 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2438 poll_threads(); 2439 CU_ASSERT(g_bserrno != 0); 2440 2441 /* Load with empty blobstore type */ 2442 dev = init_dev(); 2443 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2444 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2445 poll_threads(); 2446 CU_ASSERT(g_bserrno == 0); 2447 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2448 bs = g_bs; 2449 2450 spdk_bs_unload(bs, bs_op_complete, NULL); 2451 poll_threads(); 2452 CU_ASSERT(g_bserrno == 0); 2453 g_bs = NULL; 2454 } 2455 2456 static void 2457 bs_super_block(void) 2458 { 2459 struct spdk_blob_store *bs; 2460 struct spdk_bs_dev *dev; 2461 struct spdk_bs_super_block *super_block; 2462 struct spdk_bs_opts opts; 2463 struct spdk_bs_super_block_ver1 super_block_v1; 2464 2465 dev = init_dev(); 2466 spdk_bs_opts_init(&opts); 2467 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2468 2469 /* Initialize a new blob store */ 2470 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2471 poll_threads(); 2472 CU_ASSERT(g_bserrno == 0); 2473 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2474 bs = g_bs; 2475 2476 /* Unload the blob store */ 2477 spdk_bs_unload(bs, bs_op_complete, NULL); 2478 poll_threads(); 2479 CU_ASSERT(g_bserrno == 0); 2480 g_bs = NULL; 2481 g_blob = NULL; 2482 g_blobid = 0; 2483 2484 /* Load an existing blob store with version newer than supported */ 2485 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2486 super_block->version++; 2487 2488 dev = init_dev(); 2489 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2490 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2491 poll_threads(); 2492 CU_ASSERT(g_bserrno != 0); 2493 2494 /* Create a new blob store with super block version 1 */ 2495 dev = init_dev(); 2496 super_block_v1.version = 1; 2497 memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature)); 2498 super_block_v1.length = 0x1000; 2499 super_block_v1.clean = 1; 2500 super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF; 2501 super_block_v1.cluster_size = 0x100000; 2502 super_block_v1.used_page_mask_start = 0x01; 2503 super_block_v1.used_page_mask_len = 0x01; 2504 super_block_v1.used_cluster_mask_start = 0x02; 2505 super_block_v1.used_cluster_mask_len = 0x01; 2506 super_block_v1.md_start = 0x03; 2507 super_block_v1.md_len = 0x40; 2508 memset(super_block_v1.reserved, 0, 4036); 2509 super_block_v1.crc = _spdk_blob_md_page_calc_crc(&super_block_v1); 2510 memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1)); 2511 2512 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2513 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2514 poll_threads(); 2515 CU_ASSERT(g_bserrno == 0); 2516 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2517 bs = g_bs; 2518 2519 spdk_bs_unload(bs, bs_op_complete, NULL); 2520 poll_threads(); 2521 CU_ASSERT(g_bserrno == 0); 2522 g_bs = NULL; 2523 } 2524 2525 /* 2526 * Create a blobstore and then unload it. 2527 */ 2528 static void 2529 bs_unload(void) 2530 { 2531 struct spdk_blob_store *bs = g_bs; 2532 struct spdk_blob *blob; 2533 2534 /* Create a blob and open it. */ 2535 blob = ut_blob_create_and_open(bs, NULL); 2536 2537 /* Try to unload blobstore, should fail with open blob */ 2538 g_bserrno = -1; 2539 spdk_bs_unload(bs, bs_op_complete, NULL); 2540 poll_threads(); 2541 CU_ASSERT(g_bserrno == -EBUSY); 2542 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2543 2544 /* Close the blob, then successfully unload blobstore */ 2545 g_bserrno = -1; 2546 spdk_blob_close(blob, blob_op_complete, NULL); 2547 poll_threads(); 2548 CU_ASSERT(g_bserrno == 0); 2549 } 2550 2551 /* 2552 * Create a blobstore with a cluster size different than the default, and ensure it is 2553 * persisted. 2554 */ 2555 static void 2556 bs_cluster_sz(void) 2557 { 2558 struct spdk_blob_store *bs; 2559 struct spdk_bs_dev *dev; 2560 struct spdk_bs_opts opts; 2561 uint32_t cluster_sz; 2562 2563 /* Set cluster size to zero */ 2564 dev = init_dev(); 2565 spdk_bs_opts_init(&opts); 2566 opts.cluster_sz = 0; 2567 2568 /* Initialize a new blob store */ 2569 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2570 poll_threads(); 2571 CU_ASSERT(g_bserrno == -EINVAL); 2572 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2573 2574 /* 2575 * Set cluster size to blobstore page size, 2576 * to work it is required to be at least twice the blobstore page size. 2577 */ 2578 dev = init_dev(); 2579 spdk_bs_opts_init(&opts); 2580 opts.cluster_sz = SPDK_BS_PAGE_SIZE; 2581 2582 /* Initialize a new blob store */ 2583 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2584 poll_threads(); 2585 CU_ASSERT(g_bserrno == -ENOMEM); 2586 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2587 2588 /* 2589 * Set cluster size to lower than page size, 2590 * to work it is required to be at least twice the blobstore page size. 2591 */ 2592 dev = init_dev(); 2593 spdk_bs_opts_init(&opts); 2594 opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1; 2595 2596 /* Initialize a new blob store */ 2597 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2598 poll_threads(); 2599 CU_ASSERT(g_bserrno == -EINVAL); 2600 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2601 2602 /* Set cluster size to twice the default */ 2603 dev = init_dev(); 2604 spdk_bs_opts_init(&opts); 2605 opts.cluster_sz *= 2; 2606 cluster_sz = opts.cluster_sz; 2607 2608 /* Initialize a new blob store */ 2609 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2610 poll_threads(); 2611 CU_ASSERT(g_bserrno == 0); 2612 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2613 bs = g_bs; 2614 2615 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2616 2617 ut_bs_reload(&bs, &opts); 2618 2619 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2620 2621 spdk_bs_unload(bs, bs_op_complete, NULL); 2622 poll_threads(); 2623 CU_ASSERT(g_bserrno == 0); 2624 g_bs = NULL; 2625 } 2626 2627 /* 2628 * Create a blobstore, reload it and ensure total usable cluster count 2629 * stays the same. 2630 */ 2631 static void 2632 bs_usable_clusters(void) 2633 { 2634 struct spdk_blob_store *bs = g_bs; 2635 struct spdk_blob *blob; 2636 uint32_t clusters; 2637 int i; 2638 2639 2640 clusters = spdk_bs_total_data_cluster_count(bs); 2641 2642 ut_bs_reload(&bs, NULL); 2643 2644 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2645 2646 /* Create and resize blobs to make sure that useable cluster count won't change */ 2647 for (i = 0; i < 4; i++) { 2648 g_bserrno = -1; 2649 g_blobid = SPDK_BLOBID_INVALID; 2650 blob = ut_blob_create_and_open(bs, NULL); 2651 2652 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 2653 poll_threads(); 2654 CU_ASSERT(g_bserrno == 0); 2655 2656 g_bserrno = -1; 2657 spdk_blob_close(blob, blob_op_complete, NULL); 2658 poll_threads(); 2659 CU_ASSERT(g_bserrno == 0); 2660 2661 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2662 } 2663 2664 /* Reload the blob store to make sure that nothing changed */ 2665 ut_bs_reload(&bs, NULL); 2666 2667 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2668 } 2669 2670 /* 2671 * Test resizing of the metadata blob. This requires creating enough blobs 2672 * so that one cluster is not enough to fit the metadata for those blobs. 2673 * To induce this condition to happen more quickly, we reduce the cluster 2674 * size to 16KB, which means only 4 4KB blob metadata pages can fit. 2675 */ 2676 static void 2677 bs_resize_md(void) 2678 { 2679 struct spdk_blob_store *bs; 2680 const int CLUSTER_PAGE_COUNT = 4; 2681 const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4; 2682 struct spdk_bs_dev *dev; 2683 struct spdk_bs_opts opts; 2684 struct spdk_blob *blob; 2685 struct spdk_blob_opts blob_opts; 2686 uint32_t cluster_sz; 2687 spdk_blob_id blobids[NUM_BLOBS]; 2688 int i; 2689 2690 2691 dev = init_dev(); 2692 spdk_bs_opts_init(&opts); 2693 opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096; 2694 cluster_sz = opts.cluster_sz; 2695 2696 /* Initialize a new blob store */ 2697 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2698 poll_threads(); 2699 CU_ASSERT(g_bserrno == 0); 2700 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2701 bs = g_bs; 2702 2703 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2704 2705 ut_spdk_blob_opts_init(&blob_opts); 2706 2707 for (i = 0; i < NUM_BLOBS; i++) { 2708 g_bserrno = -1; 2709 g_blobid = SPDK_BLOBID_INVALID; 2710 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 2711 poll_threads(); 2712 CU_ASSERT(g_bserrno == 0); 2713 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2714 blobids[i] = g_blobid; 2715 } 2716 2717 ut_bs_reload(&bs, &opts); 2718 2719 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2720 2721 for (i = 0; i < NUM_BLOBS; i++) { 2722 g_bserrno = -1; 2723 g_blob = NULL; 2724 spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL); 2725 poll_threads(); 2726 CU_ASSERT(g_bserrno == 0); 2727 CU_ASSERT(g_blob != NULL); 2728 blob = g_blob; 2729 g_bserrno = -1; 2730 spdk_blob_close(blob, blob_op_complete, NULL); 2731 poll_threads(); 2732 CU_ASSERT(g_bserrno == 0); 2733 } 2734 2735 spdk_bs_unload(bs, bs_op_complete, NULL); 2736 poll_threads(); 2737 CU_ASSERT(g_bserrno == 0); 2738 g_bs = NULL; 2739 } 2740 2741 static void 2742 bs_destroy(void) 2743 { 2744 struct spdk_blob_store *bs; 2745 struct spdk_bs_dev *dev; 2746 2747 /* Initialize a new blob store */ 2748 dev = init_dev(); 2749 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2750 poll_threads(); 2751 CU_ASSERT(g_bserrno == 0); 2752 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2753 bs = g_bs; 2754 2755 /* Destroy the blob store */ 2756 g_bserrno = -1; 2757 spdk_bs_destroy(bs, bs_op_complete, NULL); 2758 poll_threads(); 2759 CU_ASSERT(g_bserrno == 0); 2760 2761 /* Loading an non-existent blob store should fail. */ 2762 g_bs = NULL; 2763 dev = init_dev(); 2764 2765 g_bserrno = 0; 2766 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 2767 poll_threads(); 2768 CU_ASSERT(g_bserrno != 0); 2769 } 2770 2771 /* Try to hit all of the corner cases associated with serializing 2772 * a blob to disk 2773 */ 2774 static void 2775 blob_serialize(void) 2776 { 2777 struct spdk_bs_dev *dev; 2778 struct spdk_bs_opts opts; 2779 struct spdk_blob_store *bs; 2780 spdk_blob_id blobid[2]; 2781 struct spdk_blob *blob[2]; 2782 uint64_t i; 2783 char *value; 2784 int rc; 2785 2786 dev = init_dev(); 2787 2788 /* Initialize a new blobstore with very small clusters */ 2789 spdk_bs_opts_init(&opts); 2790 opts.cluster_sz = dev->blocklen * 8; 2791 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2792 poll_threads(); 2793 CU_ASSERT(g_bserrno == 0); 2794 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2795 bs = g_bs; 2796 2797 /* Create and open two blobs */ 2798 for (i = 0; i < 2; i++) { 2799 blob[i] = ut_blob_create_and_open(bs, NULL); 2800 blobid[i] = spdk_blob_get_id(blob[i]); 2801 2802 /* Set a fairly large xattr on both blobs to eat up 2803 * metadata space 2804 */ 2805 value = calloc(dev->blocklen - 64, sizeof(char)); 2806 SPDK_CU_ASSERT_FATAL(value != NULL); 2807 memset(value, i, dev->blocklen / 2); 2808 rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64); 2809 CU_ASSERT(rc == 0); 2810 free(value); 2811 } 2812 2813 /* Resize the blobs, alternating 1 cluster at a time. 2814 * This thwarts run length encoding and will cause spill 2815 * over of the extents. 2816 */ 2817 for (i = 0; i < 6; i++) { 2818 spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL); 2819 poll_threads(); 2820 CU_ASSERT(g_bserrno == 0); 2821 } 2822 2823 for (i = 0; i < 2; i++) { 2824 spdk_blob_sync_md(blob[i], blob_op_complete, NULL); 2825 poll_threads(); 2826 CU_ASSERT(g_bserrno == 0); 2827 } 2828 2829 /* Close the blobs */ 2830 for (i = 0; i < 2; i++) { 2831 spdk_blob_close(blob[i], blob_op_complete, NULL); 2832 poll_threads(); 2833 CU_ASSERT(g_bserrno == 0); 2834 } 2835 2836 ut_bs_reload(&bs, &opts); 2837 2838 for (i = 0; i < 2; i++) { 2839 blob[i] = NULL; 2840 2841 spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL); 2842 poll_threads(); 2843 CU_ASSERT(g_bserrno == 0); 2844 CU_ASSERT(g_blob != NULL); 2845 blob[i] = g_blob; 2846 2847 CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3); 2848 2849 spdk_blob_close(blob[i], blob_op_complete, NULL); 2850 poll_threads(); 2851 CU_ASSERT(g_bserrno == 0); 2852 } 2853 2854 spdk_bs_unload(bs, bs_op_complete, NULL); 2855 poll_threads(); 2856 CU_ASSERT(g_bserrno == 0); 2857 g_bs = NULL; 2858 } 2859 2860 static void 2861 blob_crc(void) 2862 { 2863 struct spdk_blob_store *bs = g_bs; 2864 struct spdk_blob *blob; 2865 spdk_blob_id blobid; 2866 uint32_t page_num; 2867 int index; 2868 struct spdk_blob_md_page *page; 2869 2870 blob = ut_blob_create_and_open(bs, NULL); 2871 blobid = spdk_blob_get_id(blob); 2872 2873 spdk_blob_close(blob, blob_op_complete, NULL); 2874 poll_threads(); 2875 CU_ASSERT(g_bserrno == 0); 2876 2877 page_num = _spdk_bs_blobid_to_page(blobid); 2878 index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num); 2879 page = (struct spdk_blob_md_page *)&g_dev_buffer[index]; 2880 page->crc = 0; 2881 2882 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2883 poll_threads(); 2884 CU_ASSERT(g_bserrno == -EINVAL); 2885 CU_ASSERT(g_blob == NULL); 2886 g_bserrno = 0; 2887 2888 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 2889 poll_threads(); 2890 CU_ASSERT(g_bserrno == -EINVAL); 2891 } 2892 2893 static void 2894 super_block_crc(void) 2895 { 2896 struct spdk_blob_store *bs; 2897 struct spdk_bs_dev *dev; 2898 struct spdk_bs_super_block *super_block; 2899 2900 dev = init_dev(); 2901 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2902 poll_threads(); 2903 CU_ASSERT(g_bserrno == 0); 2904 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2905 bs = g_bs; 2906 2907 spdk_bs_unload(bs, bs_op_complete, NULL); 2908 poll_threads(); 2909 CU_ASSERT(g_bserrno == 0); 2910 g_bs = NULL; 2911 2912 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2913 super_block->crc = 0; 2914 dev = init_dev(); 2915 2916 /* Load an existing blob store */ 2917 g_bserrno = 0; 2918 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 2919 poll_threads(); 2920 CU_ASSERT(g_bserrno == -EILSEQ); 2921 } 2922 2923 /* For blob dirty shutdown test case we do the following sub-test cases: 2924 * 1 Initialize new blob store and create 1 super blob with some xattrs, then we 2925 * dirty shutdown and reload the blob store and verify the xattrs. 2926 * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown, 2927 * reload the blob store and verify the clusters number. 2928 * 3 Create the second blob and then dirty shutdown, reload the blob store 2929 * and verify the second blob. 2930 * 4 Delete the second blob and then dirty shutdown, reload the blob store 2931 * and verify the second blob is invalid. 2932 * 5 Create the second blob again and also create the third blob, modify the 2933 * md of second blob which makes the md invalid, and then dirty shutdown, 2934 * reload the blob store verify the second blob, it should invalid and also 2935 * verify the third blob, it should correct. 2936 */ 2937 static void 2938 blob_dirty_shutdown(void) 2939 { 2940 int rc; 2941 int index; 2942 struct spdk_blob_store *bs = g_bs; 2943 spdk_blob_id blobid1, blobid2, blobid3; 2944 struct spdk_blob *blob = g_blob; 2945 uint64_t length; 2946 uint64_t free_clusters; 2947 const void *value; 2948 size_t value_len; 2949 uint32_t page_num; 2950 struct spdk_blob_md_page *page; 2951 struct spdk_blob_opts blob_opts; 2952 2953 /* Create first blob */ 2954 blobid1 = spdk_blob_get_id(blob); 2955 2956 /* Set some xattrs */ 2957 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 2958 CU_ASSERT(rc == 0); 2959 2960 length = 2345; 2961 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2962 CU_ASSERT(rc == 0); 2963 2964 /* Put xattr that fits exactly single page. 2965 * This results in adding additional pages to MD. 2966 * First is flags and smaller xattr, second the large xattr, 2967 * third are just the extents. 2968 */ 2969 size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) - 2970 strlen("large_xattr"); 2971 char *xattr = calloc(xattr_length, sizeof(char)); 2972 SPDK_CU_ASSERT_FATAL(xattr != NULL); 2973 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 2974 free(xattr); 2975 SPDK_CU_ASSERT_FATAL(rc == 0); 2976 2977 /* Resize the blob */ 2978 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 2979 poll_threads(); 2980 CU_ASSERT(g_bserrno == 0); 2981 2982 /* Set the blob as the super blob */ 2983 spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL); 2984 poll_threads(); 2985 CU_ASSERT(g_bserrno == 0); 2986 2987 free_clusters = spdk_bs_free_cluster_count(bs); 2988 2989 spdk_blob_close(blob, blob_op_complete, NULL); 2990 poll_threads(); 2991 CU_ASSERT(g_bserrno == 0); 2992 blob = NULL; 2993 g_blob = NULL; 2994 g_blobid = SPDK_BLOBID_INVALID; 2995 2996 ut_bs_dirty_load(&bs, NULL); 2997 2998 /* Get the super blob */ 2999 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 3000 poll_threads(); 3001 CU_ASSERT(g_bserrno == 0); 3002 CU_ASSERT(blobid1 == g_blobid); 3003 3004 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3005 poll_threads(); 3006 CU_ASSERT(g_bserrno == 0); 3007 CU_ASSERT(g_blob != NULL); 3008 blob = g_blob; 3009 3010 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3011 3012 /* Get the xattrs */ 3013 value = NULL; 3014 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 3015 CU_ASSERT(rc == 0); 3016 SPDK_CU_ASSERT_FATAL(value != NULL); 3017 CU_ASSERT(*(uint64_t *)value == length); 3018 CU_ASSERT(value_len == 8); 3019 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 3020 3021 /* Resize the blob */ 3022 spdk_blob_resize(blob, 20, blob_op_complete, NULL); 3023 poll_threads(); 3024 CU_ASSERT(g_bserrno == 0); 3025 3026 free_clusters = spdk_bs_free_cluster_count(bs); 3027 3028 spdk_blob_close(blob, blob_op_complete, NULL); 3029 poll_threads(); 3030 CU_ASSERT(g_bserrno == 0); 3031 blob = NULL; 3032 g_blob = NULL; 3033 g_blobid = SPDK_BLOBID_INVALID; 3034 3035 ut_bs_dirty_load(&bs, NULL); 3036 3037 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3038 poll_threads(); 3039 CU_ASSERT(g_bserrno == 0); 3040 CU_ASSERT(g_blob != NULL); 3041 blob = g_blob; 3042 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20); 3043 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3044 3045 spdk_blob_close(blob, blob_op_complete, NULL); 3046 poll_threads(); 3047 CU_ASSERT(g_bserrno == 0); 3048 blob = NULL; 3049 g_blob = NULL; 3050 g_blobid = SPDK_BLOBID_INVALID; 3051 3052 /* Create second blob */ 3053 blob = ut_blob_create_and_open(bs, NULL); 3054 blobid2 = spdk_blob_get_id(blob); 3055 3056 /* Set some xattrs */ 3057 rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1); 3058 CU_ASSERT(rc == 0); 3059 3060 length = 5432; 3061 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3062 CU_ASSERT(rc == 0); 3063 3064 /* Resize the blob */ 3065 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 3066 poll_threads(); 3067 CU_ASSERT(g_bserrno == 0); 3068 3069 free_clusters = spdk_bs_free_cluster_count(bs); 3070 3071 spdk_blob_close(blob, blob_op_complete, NULL); 3072 poll_threads(); 3073 CU_ASSERT(g_bserrno == 0); 3074 blob = NULL; 3075 g_blob = NULL; 3076 g_blobid = SPDK_BLOBID_INVALID; 3077 3078 ut_bs_dirty_load(&bs, NULL); 3079 3080 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3081 poll_threads(); 3082 CU_ASSERT(g_bserrno == 0); 3083 CU_ASSERT(g_blob != NULL); 3084 blob = g_blob; 3085 3086 /* Get the xattrs */ 3087 value = NULL; 3088 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 3089 CU_ASSERT(rc == 0); 3090 SPDK_CU_ASSERT_FATAL(value != NULL); 3091 CU_ASSERT(*(uint64_t *)value == length); 3092 CU_ASSERT(value_len == 8); 3093 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 3094 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3095 3096 ut_blob_close_and_delete(bs, blob); 3097 3098 free_clusters = spdk_bs_free_cluster_count(bs); 3099 3100 ut_bs_dirty_load(&bs, NULL); 3101 3102 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3103 poll_threads(); 3104 CU_ASSERT(g_bserrno != 0); 3105 CU_ASSERT(g_blob == NULL); 3106 3107 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3108 poll_threads(); 3109 CU_ASSERT(g_bserrno == 0); 3110 CU_ASSERT(g_blob != NULL); 3111 blob = g_blob; 3112 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3113 spdk_blob_close(blob, blob_op_complete, NULL); 3114 poll_threads(); 3115 CU_ASSERT(g_bserrno == 0); 3116 3117 ut_bs_reload(&bs, NULL); 3118 3119 /* Create second blob */ 3120 ut_spdk_blob_opts_init(&blob_opts); 3121 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3122 poll_threads(); 3123 CU_ASSERT(g_bserrno == 0); 3124 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3125 blobid2 = g_blobid; 3126 3127 /* Create third blob */ 3128 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3129 poll_threads(); 3130 CU_ASSERT(g_bserrno == 0); 3131 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3132 blobid3 = g_blobid; 3133 3134 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3135 poll_threads(); 3136 CU_ASSERT(g_bserrno == 0); 3137 CU_ASSERT(g_blob != NULL); 3138 blob = g_blob; 3139 3140 /* Set some xattrs for second blob */ 3141 rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1); 3142 CU_ASSERT(rc == 0); 3143 3144 length = 5432; 3145 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3146 CU_ASSERT(rc == 0); 3147 3148 spdk_blob_close(blob, blob_op_complete, NULL); 3149 poll_threads(); 3150 CU_ASSERT(g_bserrno == 0); 3151 blob = NULL; 3152 g_blob = NULL; 3153 g_blobid = SPDK_BLOBID_INVALID; 3154 3155 spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL); 3156 poll_threads(); 3157 CU_ASSERT(g_bserrno == 0); 3158 CU_ASSERT(g_blob != NULL); 3159 blob = g_blob; 3160 3161 /* Set some xattrs for third blob */ 3162 rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1); 3163 CU_ASSERT(rc == 0); 3164 3165 length = 5432; 3166 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3167 CU_ASSERT(rc == 0); 3168 3169 spdk_blob_close(blob, blob_op_complete, NULL); 3170 poll_threads(); 3171 CU_ASSERT(g_bserrno == 0); 3172 blob = NULL; 3173 g_blob = NULL; 3174 g_blobid = SPDK_BLOBID_INVALID; 3175 3176 /* Mark second blob as invalid */ 3177 page_num = _spdk_bs_blobid_to_page(blobid2); 3178 3179 index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num); 3180 page = (struct spdk_blob_md_page *)&g_dev_buffer[index]; 3181 page->sequence_num = 1; 3182 page->crc = _spdk_blob_md_page_calc_crc(page); 3183 3184 free_clusters = spdk_bs_free_cluster_count(bs); 3185 3186 ut_bs_dirty_load(&bs, NULL); 3187 3188 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3189 poll_threads(); 3190 CU_ASSERT(g_bserrno != 0); 3191 CU_ASSERT(g_blob == NULL); 3192 3193 spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL); 3194 poll_threads(); 3195 CU_ASSERT(g_bserrno == 0); 3196 CU_ASSERT(g_blob != NULL); 3197 blob = g_blob; 3198 3199 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3200 } 3201 3202 static void 3203 blob_flags(void) 3204 { 3205 struct spdk_blob_store *bs = g_bs; 3206 spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro; 3207 struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro; 3208 struct spdk_blob_opts blob_opts; 3209 int rc; 3210 3211 /* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */ 3212 blob_invalid = ut_blob_create_and_open(bs, NULL); 3213 blobid_invalid = spdk_blob_get_id(blob_invalid); 3214 3215 blob_data_ro = ut_blob_create_and_open(bs, NULL); 3216 blobid_data_ro = spdk_blob_get_id(blob_data_ro); 3217 3218 ut_spdk_blob_opts_init(&blob_opts); 3219 blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES; 3220 blob_md_ro = ut_blob_create_and_open(bs, &blob_opts); 3221 blobid_md_ro = spdk_blob_get_id(blob_md_ro); 3222 CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES); 3223 3224 /* Change the size of blob_data_ro to check if flags are serialized 3225 * when blob has non zero number of extents */ 3226 spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL); 3227 poll_threads(); 3228 CU_ASSERT(g_bserrno == 0); 3229 3230 /* Set the xattr to check if flags are serialized 3231 * when blob has non zero number of xattrs */ 3232 rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1); 3233 CU_ASSERT(rc == 0); 3234 3235 blob_invalid->invalid_flags = (1ULL << 63); 3236 blob_invalid->state = SPDK_BLOB_STATE_DIRTY; 3237 blob_data_ro->data_ro_flags = (1ULL << 62); 3238 blob_data_ro->state = SPDK_BLOB_STATE_DIRTY; 3239 blob_md_ro->md_ro_flags = (1ULL << 61); 3240 blob_md_ro->state = SPDK_BLOB_STATE_DIRTY; 3241 3242 g_bserrno = -1; 3243 spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL); 3244 poll_threads(); 3245 CU_ASSERT(g_bserrno == 0); 3246 g_bserrno = -1; 3247 spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL); 3248 poll_threads(); 3249 CU_ASSERT(g_bserrno == 0); 3250 g_bserrno = -1; 3251 spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL); 3252 poll_threads(); 3253 CU_ASSERT(g_bserrno == 0); 3254 3255 g_bserrno = -1; 3256 spdk_blob_close(blob_invalid, blob_op_complete, NULL); 3257 poll_threads(); 3258 CU_ASSERT(g_bserrno == 0); 3259 blob_invalid = NULL; 3260 g_bserrno = -1; 3261 spdk_blob_close(blob_data_ro, blob_op_complete, NULL); 3262 poll_threads(); 3263 CU_ASSERT(g_bserrno == 0); 3264 blob_data_ro = NULL; 3265 g_bserrno = -1; 3266 spdk_blob_close(blob_md_ro, blob_op_complete, NULL); 3267 poll_threads(); 3268 CU_ASSERT(g_bserrno == 0); 3269 blob_md_ro = NULL; 3270 3271 g_blob = NULL; 3272 g_blobid = SPDK_BLOBID_INVALID; 3273 3274 ut_bs_reload(&bs, NULL); 3275 3276 g_blob = NULL; 3277 g_bserrno = 0; 3278 spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL); 3279 poll_threads(); 3280 CU_ASSERT(g_bserrno != 0); 3281 CU_ASSERT(g_blob == NULL); 3282 3283 g_blob = NULL; 3284 g_bserrno = -1; 3285 spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL); 3286 poll_threads(); 3287 CU_ASSERT(g_bserrno == 0); 3288 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3289 blob_data_ro = g_blob; 3290 /* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */ 3291 CU_ASSERT(blob_data_ro->data_ro == true); 3292 CU_ASSERT(blob_data_ro->md_ro == true); 3293 CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10); 3294 3295 g_blob = NULL; 3296 g_bserrno = -1; 3297 spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL); 3298 poll_threads(); 3299 CU_ASSERT(g_bserrno == 0); 3300 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3301 blob_md_ro = g_blob; 3302 CU_ASSERT(blob_md_ro->data_ro == false); 3303 CU_ASSERT(blob_md_ro->md_ro == true); 3304 3305 g_bserrno = -1; 3306 spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL); 3307 poll_threads(); 3308 CU_ASSERT(g_bserrno == 0); 3309 3310 ut_blob_close_and_delete(bs, blob_data_ro); 3311 ut_blob_close_and_delete(bs, blob_md_ro); 3312 } 3313 3314 static void 3315 bs_version(void) 3316 { 3317 struct spdk_bs_super_block *super; 3318 struct spdk_blob_store *bs = g_bs; 3319 struct spdk_bs_dev *dev; 3320 struct spdk_blob *blob; 3321 struct spdk_blob_opts blob_opts; 3322 spdk_blob_id blobid; 3323 3324 /* Unload the blob store */ 3325 spdk_bs_unload(bs, bs_op_complete, NULL); 3326 poll_threads(); 3327 CU_ASSERT(g_bserrno == 0); 3328 g_bs = NULL; 3329 3330 /* 3331 * Change the bs version on disk. This will allow us to 3332 * test that the version does not get modified automatically 3333 * when loading and unloading the blobstore. 3334 */ 3335 super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 3336 CU_ASSERT(super->version == SPDK_BS_VERSION); 3337 CU_ASSERT(super->clean == 1); 3338 super->version = 2; 3339 /* 3340 * Version 2 metadata does not have a used blobid mask, so clear 3341 * those fields in the super block and zero the corresponding 3342 * region on "disk". We will use this to ensure blob IDs are 3343 * correctly reconstructed. 3344 */ 3345 memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0, 3346 super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE); 3347 super->used_blobid_mask_start = 0; 3348 super->used_blobid_mask_len = 0; 3349 super->crc = _spdk_blob_md_page_calc_crc(super); 3350 3351 /* Load an existing blob store */ 3352 dev = init_dev(); 3353 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3354 poll_threads(); 3355 CU_ASSERT(g_bserrno == 0); 3356 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3357 CU_ASSERT(super->clean == 1); 3358 bs = g_bs; 3359 3360 /* 3361 * Create a blob - just to make sure that when we unload it 3362 * results in writing the super block (since metadata pages 3363 * were allocated. 3364 */ 3365 ut_spdk_blob_opts_init(&blob_opts); 3366 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3367 poll_threads(); 3368 CU_ASSERT(g_bserrno == 0); 3369 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3370 blobid = g_blobid; 3371 3372 /* Unload the blob store */ 3373 spdk_bs_unload(bs, bs_op_complete, NULL); 3374 poll_threads(); 3375 CU_ASSERT(g_bserrno == 0); 3376 g_bs = NULL; 3377 CU_ASSERT(super->version == 2); 3378 CU_ASSERT(super->used_blobid_mask_start == 0); 3379 CU_ASSERT(super->used_blobid_mask_len == 0); 3380 3381 dev = init_dev(); 3382 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3383 poll_threads(); 3384 CU_ASSERT(g_bserrno == 0); 3385 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3386 bs = g_bs; 3387 3388 g_blob = NULL; 3389 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3390 poll_threads(); 3391 CU_ASSERT(g_bserrno == 0); 3392 CU_ASSERT(g_blob != NULL); 3393 blob = g_blob; 3394 3395 ut_blob_close_and_delete(bs, blob); 3396 3397 CU_ASSERT(super->version == 2); 3398 CU_ASSERT(super->used_blobid_mask_start == 0); 3399 CU_ASSERT(super->used_blobid_mask_len == 0); 3400 } 3401 3402 static void 3403 blob_set_xattrs(void) 3404 { 3405 struct spdk_blob_store *bs = g_bs; 3406 struct spdk_blob *blob; 3407 struct spdk_blob_opts opts; 3408 const void *value; 3409 size_t value_len; 3410 char *xattr; 3411 size_t xattr_length; 3412 int rc; 3413 3414 /* Create blob with extra attributes */ 3415 ut_spdk_blob_opts_init(&opts); 3416 3417 opts.xattrs.names = g_xattr_names; 3418 opts.xattrs.get_value = _get_xattr_value; 3419 opts.xattrs.count = 3; 3420 opts.xattrs.ctx = &g_ctx; 3421 3422 blob = ut_blob_create_and_open(bs, &opts); 3423 3424 /* Get the xattrs */ 3425 value = NULL; 3426 3427 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len); 3428 CU_ASSERT(rc == 0); 3429 SPDK_CU_ASSERT_FATAL(value != NULL); 3430 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 3431 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 3432 3433 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len); 3434 CU_ASSERT(rc == 0); 3435 SPDK_CU_ASSERT_FATAL(value != NULL); 3436 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 3437 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 3438 3439 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len); 3440 CU_ASSERT(rc == 0); 3441 SPDK_CU_ASSERT_FATAL(value != NULL); 3442 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 3443 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 3444 3445 /* Try to get non existing attribute */ 3446 3447 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 3448 CU_ASSERT(rc == -ENOENT); 3449 3450 /* Try xattr exceeding maximum length of descriptor in single page */ 3451 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 3452 strlen("large_xattr") + 1; 3453 xattr = calloc(xattr_length, sizeof(char)); 3454 SPDK_CU_ASSERT_FATAL(xattr != NULL); 3455 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 3456 free(xattr); 3457 SPDK_CU_ASSERT_FATAL(rc == -ENOMEM); 3458 3459 spdk_blob_close(blob, blob_op_complete, NULL); 3460 poll_threads(); 3461 CU_ASSERT(g_bserrno == 0); 3462 blob = NULL; 3463 g_blob = NULL; 3464 g_blobid = SPDK_BLOBID_INVALID; 3465 3466 /* NULL callback */ 3467 ut_spdk_blob_opts_init(&opts); 3468 opts.xattrs.names = g_xattr_names; 3469 opts.xattrs.get_value = NULL; 3470 opts.xattrs.count = 1; 3471 opts.xattrs.ctx = &g_ctx; 3472 3473 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 3474 poll_threads(); 3475 CU_ASSERT(g_bserrno == -EINVAL); 3476 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3477 3478 /* NULL values */ 3479 ut_spdk_blob_opts_init(&opts); 3480 opts.xattrs.names = g_xattr_names; 3481 opts.xattrs.get_value = _get_xattr_value_null; 3482 opts.xattrs.count = 1; 3483 opts.xattrs.ctx = NULL; 3484 3485 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 3486 poll_threads(); 3487 CU_ASSERT(g_bserrno == -EINVAL); 3488 } 3489 3490 static void 3491 blob_thin_prov_alloc(void) 3492 { 3493 struct spdk_blob_store *bs = g_bs; 3494 struct spdk_blob *blob; 3495 struct spdk_blob_opts opts; 3496 spdk_blob_id blobid; 3497 uint64_t free_clusters; 3498 3499 free_clusters = spdk_bs_free_cluster_count(bs); 3500 3501 /* Set blob as thin provisioned */ 3502 ut_spdk_blob_opts_init(&opts); 3503 opts.thin_provision = true; 3504 3505 blob = ut_blob_create_and_open(bs, &opts); 3506 blobid = spdk_blob_get_id(blob); 3507 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3508 3509 CU_ASSERT(blob->active.num_clusters == 0); 3510 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 3511 3512 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3513 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3514 poll_threads(); 3515 CU_ASSERT(g_bserrno == 0); 3516 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3517 CU_ASSERT(blob->active.num_clusters == 5); 3518 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 3519 3520 /* Grow it to 1TB - still unallocated */ 3521 spdk_blob_resize(blob, 262144, blob_op_complete, NULL); 3522 poll_threads(); 3523 CU_ASSERT(g_bserrno == 0); 3524 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3525 CU_ASSERT(blob->active.num_clusters == 262144); 3526 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144); 3527 3528 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3529 poll_threads(); 3530 CU_ASSERT(g_bserrno == 0); 3531 /* Sync must not change anything */ 3532 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3533 CU_ASSERT(blob->active.num_clusters == 262144); 3534 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144); 3535 /* Since clusters are not allocated, 3536 * number of metadata pages is expected to be minimal. 3537 */ 3538 CU_ASSERT(blob->active.num_pages == 1); 3539 3540 /* Shrink the blob to 3 clusters - still unallocated */ 3541 spdk_blob_resize(blob, 3, blob_op_complete, NULL); 3542 poll_threads(); 3543 CU_ASSERT(g_bserrno == 0); 3544 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3545 CU_ASSERT(blob->active.num_clusters == 3); 3546 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3); 3547 3548 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3549 poll_threads(); 3550 CU_ASSERT(g_bserrno == 0); 3551 /* Sync must not change anything */ 3552 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3553 CU_ASSERT(blob->active.num_clusters == 3); 3554 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3); 3555 3556 spdk_blob_close(blob, blob_op_complete, NULL); 3557 poll_threads(); 3558 CU_ASSERT(g_bserrno == 0); 3559 3560 ut_bs_reload(&bs, NULL); 3561 3562 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3563 poll_threads(); 3564 CU_ASSERT(g_bserrno == 0); 3565 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3566 blob = g_blob; 3567 3568 /* Check that clusters allocation and size is still the same */ 3569 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3570 CU_ASSERT(blob->active.num_clusters == 3); 3571 3572 ut_blob_close_and_delete(bs, blob); 3573 } 3574 3575 static void 3576 blob_insert_cluster_msg(void) 3577 { 3578 struct spdk_blob_store *bs = g_bs; 3579 struct spdk_blob *blob; 3580 struct spdk_blob_opts opts; 3581 spdk_blob_id blobid; 3582 uint64_t free_clusters; 3583 uint64_t new_cluster = 0; 3584 uint32_t cluster_num = 3; 3585 uint32_t extent_page = 0; 3586 3587 free_clusters = spdk_bs_free_cluster_count(bs); 3588 3589 /* Set blob as thin provisioned */ 3590 ut_spdk_blob_opts_init(&opts); 3591 opts.thin_provision = true; 3592 opts.num_clusters = 4; 3593 3594 blob = ut_blob_create_and_open(bs, &opts); 3595 blobid = spdk_blob_get_id(blob); 3596 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3597 3598 CU_ASSERT(blob->active.num_clusters == 4); 3599 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4); 3600 CU_ASSERT(blob->active.clusters[cluster_num] == 0); 3601 3602 /* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread. 3603 * This is to simulate behaviour when cluster is allocated after blob creation. 3604 * Such as _spdk_bs_allocate_and_copy_cluster(). */ 3605 _spdk_bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false); 3606 CU_ASSERT(blob->active.clusters[cluster_num] == 0); 3607 3608 _spdk_blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, 3609 blob_op_complete, NULL); 3610 poll_threads(); 3611 3612 CU_ASSERT(blob->active.clusters[cluster_num] != 0); 3613 3614 spdk_blob_close(blob, blob_op_complete, NULL); 3615 poll_threads(); 3616 CU_ASSERT(g_bserrno == 0); 3617 3618 ut_bs_reload(&bs, NULL); 3619 3620 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3621 poll_threads(); 3622 CU_ASSERT(g_bserrno == 0); 3623 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3624 blob = g_blob; 3625 3626 CU_ASSERT(blob->active.clusters[cluster_num] != 0); 3627 3628 ut_blob_close_and_delete(bs, blob); 3629 } 3630 3631 static void 3632 blob_thin_prov_rw(void) 3633 { 3634 static const uint8_t zero[10 * 4096] = { 0 }; 3635 struct spdk_blob_store *bs = g_bs; 3636 struct spdk_blob *blob; 3637 struct spdk_io_channel *channel, *channel_thread1; 3638 struct spdk_blob_opts opts; 3639 uint64_t free_clusters; 3640 uint64_t page_size; 3641 uint8_t payload_read[10 * 4096]; 3642 uint8_t payload_write[10 * 4096]; 3643 uint64_t write_bytes; 3644 uint64_t read_bytes; 3645 3646 free_clusters = spdk_bs_free_cluster_count(bs); 3647 page_size = spdk_bs_get_page_size(bs); 3648 3649 channel = spdk_bs_alloc_io_channel(bs); 3650 CU_ASSERT(channel != NULL); 3651 3652 ut_spdk_blob_opts_init(&opts); 3653 opts.thin_provision = true; 3654 3655 blob = ut_blob_create_and_open(bs, &opts); 3656 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3657 3658 CU_ASSERT(blob->active.num_clusters == 0); 3659 3660 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3661 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3662 poll_threads(); 3663 CU_ASSERT(g_bserrno == 0); 3664 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3665 CU_ASSERT(blob->active.num_clusters == 5); 3666 3667 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3668 poll_threads(); 3669 CU_ASSERT(g_bserrno == 0); 3670 /* Sync must not change anything */ 3671 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3672 CU_ASSERT(blob->active.num_clusters == 5); 3673 3674 /* Payload should be all zeros from unallocated clusters */ 3675 memset(payload_read, 0xFF, sizeof(payload_read)); 3676 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 3677 poll_threads(); 3678 CU_ASSERT(g_bserrno == 0); 3679 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 3680 3681 write_bytes = g_dev_write_bytes; 3682 read_bytes = g_dev_read_bytes; 3683 3684 /* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */ 3685 set_thread(1); 3686 channel_thread1 = spdk_bs_alloc_io_channel(bs); 3687 CU_ASSERT(channel_thread1 != NULL); 3688 memset(payload_write, 0xE5, sizeof(payload_write)); 3689 spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL); 3690 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3691 /* Perform write on thread 0. That will try to allocate cluster, 3692 * but fail due to another thread issuing the cluster allocation first. */ 3693 set_thread(0); 3694 memset(payload_write, 0xE5, sizeof(payload_write)); 3695 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 3696 CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs)); 3697 poll_threads(); 3698 CU_ASSERT(g_bserrno == 0); 3699 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3700 /* For thin-provisioned blob we need to write 20 pages plus one page metadata and 3701 * read 0 bytes */ 3702 if (g_use_extent_table) { 3703 /* Add one more page for EXTENT_PAGE write */ 3704 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22); 3705 } else { 3706 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21); 3707 } 3708 CU_ASSERT(g_dev_read_bytes - read_bytes == 0); 3709 3710 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 3711 poll_threads(); 3712 CU_ASSERT(g_bserrno == 0); 3713 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3714 3715 ut_blob_close_and_delete(bs, blob); 3716 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3717 3718 set_thread(1); 3719 spdk_bs_free_io_channel(channel_thread1); 3720 set_thread(0); 3721 spdk_bs_free_io_channel(channel); 3722 poll_threads(); 3723 g_blob = NULL; 3724 g_blobid = 0; 3725 } 3726 3727 static void 3728 blob_thin_prov_rle(void) 3729 { 3730 static const uint8_t zero[10 * 4096] = { 0 }; 3731 struct spdk_blob_store *bs = g_bs; 3732 struct spdk_blob *blob; 3733 struct spdk_io_channel *channel; 3734 struct spdk_blob_opts opts; 3735 spdk_blob_id blobid; 3736 uint64_t free_clusters; 3737 uint64_t page_size; 3738 uint8_t payload_read[10 * 4096]; 3739 uint8_t payload_write[10 * 4096]; 3740 uint64_t write_bytes; 3741 uint64_t read_bytes; 3742 uint64_t io_unit; 3743 3744 free_clusters = spdk_bs_free_cluster_count(bs); 3745 page_size = spdk_bs_get_page_size(bs); 3746 3747 ut_spdk_blob_opts_init(&opts); 3748 opts.thin_provision = true; 3749 opts.num_clusters = 5; 3750 3751 blob = ut_blob_create_and_open(bs, &opts); 3752 blobid = spdk_blob_get_id(blob); 3753 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3754 3755 channel = spdk_bs_alloc_io_channel(bs); 3756 CU_ASSERT(channel != NULL); 3757 3758 /* Target specifically second cluster in a blob as first allocation */ 3759 io_unit = _spdk_bs_cluster_to_page(bs, 1) * _spdk_bs_io_unit_per_page(bs); 3760 3761 /* Payload should be all zeros from unallocated clusters */ 3762 memset(payload_read, 0xFF, sizeof(payload_read)); 3763 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3764 poll_threads(); 3765 CU_ASSERT(g_bserrno == 0); 3766 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 3767 3768 write_bytes = g_dev_write_bytes; 3769 read_bytes = g_dev_read_bytes; 3770 3771 /* Issue write to second cluster in a blob */ 3772 memset(payload_write, 0xE5, sizeof(payload_write)); 3773 spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL); 3774 poll_threads(); 3775 CU_ASSERT(g_bserrno == 0); 3776 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3777 /* For thin-provisioned blob we need to write 10 pages plus one page metadata and 3778 * read 0 bytes */ 3779 if (g_use_extent_table) { 3780 /* Add one more page for EXTENT_PAGE write */ 3781 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12); 3782 } else { 3783 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11); 3784 } 3785 CU_ASSERT(g_dev_read_bytes - read_bytes == 0); 3786 3787 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3788 poll_threads(); 3789 CU_ASSERT(g_bserrno == 0); 3790 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3791 3792 spdk_bs_free_io_channel(channel); 3793 poll_threads(); 3794 3795 spdk_blob_close(blob, blob_op_complete, NULL); 3796 poll_threads(); 3797 CU_ASSERT(g_bserrno == 0); 3798 3799 ut_bs_reload(&bs, NULL); 3800 3801 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3802 poll_threads(); 3803 CU_ASSERT(g_bserrno == 0); 3804 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3805 blob = g_blob; 3806 3807 channel = spdk_bs_alloc_io_channel(bs); 3808 CU_ASSERT(channel != NULL); 3809 3810 /* Read second cluster after blob reload to confirm data written */ 3811 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3812 poll_threads(); 3813 CU_ASSERT(g_bserrno == 0); 3814 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3815 3816 spdk_bs_free_io_channel(channel); 3817 poll_threads(); 3818 3819 ut_blob_close_and_delete(bs, blob); 3820 } 3821 3822 static void 3823 blob_thin_prov_rw_iov(void) 3824 { 3825 static const uint8_t zero[10 * 4096] = { 0 }; 3826 struct spdk_blob_store *bs = g_bs; 3827 struct spdk_blob *blob; 3828 struct spdk_io_channel *channel; 3829 struct spdk_blob_opts opts; 3830 uint64_t free_clusters; 3831 uint8_t payload_read[10 * 4096]; 3832 uint8_t payload_write[10 * 4096]; 3833 struct iovec iov_read[3]; 3834 struct iovec iov_write[3]; 3835 3836 free_clusters = spdk_bs_free_cluster_count(bs); 3837 3838 channel = spdk_bs_alloc_io_channel(bs); 3839 CU_ASSERT(channel != NULL); 3840 3841 ut_spdk_blob_opts_init(&opts); 3842 opts.thin_provision = true; 3843 3844 blob = ut_blob_create_and_open(bs, &opts); 3845 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3846 3847 CU_ASSERT(blob->active.num_clusters == 0); 3848 3849 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3850 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3851 poll_threads(); 3852 CU_ASSERT(g_bserrno == 0); 3853 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3854 CU_ASSERT(blob->active.num_clusters == 5); 3855 3856 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3857 poll_threads(); 3858 CU_ASSERT(g_bserrno == 0); 3859 /* Sync must not change anything */ 3860 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3861 CU_ASSERT(blob->active.num_clusters == 5); 3862 3863 /* Payload should be all zeros from unallocated clusters */ 3864 memset(payload_read, 0xAA, sizeof(payload_read)); 3865 iov_read[0].iov_base = payload_read; 3866 iov_read[0].iov_len = 3 * 4096; 3867 iov_read[1].iov_base = payload_read + 3 * 4096; 3868 iov_read[1].iov_len = 4 * 4096; 3869 iov_read[2].iov_base = payload_read + 7 * 4096; 3870 iov_read[2].iov_len = 3 * 4096; 3871 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 3872 poll_threads(); 3873 CU_ASSERT(g_bserrno == 0); 3874 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 3875 3876 memset(payload_write, 0xE5, sizeof(payload_write)); 3877 iov_write[0].iov_base = payload_write; 3878 iov_write[0].iov_len = 1 * 4096; 3879 iov_write[1].iov_base = payload_write + 1 * 4096; 3880 iov_write[1].iov_len = 5 * 4096; 3881 iov_write[2].iov_base = payload_write + 6 * 4096; 3882 iov_write[2].iov_len = 4 * 4096; 3883 3884 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 3885 poll_threads(); 3886 CU_ASSERT(g_bserrno == 0); 3887 3888 memset(payload_read, 0xAA, sizeof(payload_read)); 3889 iov_read[0].iov_base = payload_read; 3890 iov_read[0].iov_len = 3 * 4096; 3891 iov_read[1].iov_base = payload_read + 3 * 4096; 3892 iov_read[1].iov_len = 4 * 4096; 3893 iov_read[2].iov_base = payload_read + 7 * 4096; 3894 iov_read[2].iov_len = 3 * 4096; 3895 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 3896 poll_threads(); 3897 CU_ASSERT(g_bserrno == 0); 3898 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3899 3900 spdk_bs_free_io_channel(channel); 3901 poll_threads(); 3902 3903 ut_blob_close_and_delete(bs, blob); 3904 } 3905 3906 struct iter_ctx { 3907 int current_iter; 3908 spdk_blob_id blobid[4]; 3909 }; 3910 3911 static void 3912 test_iter(void *arg, struct spdk_blob *blob, int bserrno) 3913 { 3914 struct iter_ctx *iter_ctx = arg; 3915 spdk_blob_id blobid; 3916 3917 CU_ASSERT(bserrno == 0); 3918 blobid = spdk_blob_get_id(blob); 3919 CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]); 3920 } 3921 3922 static void 3923 bs_load_iter(void) 3924 { 3925 struct spdk_blob_store *bs; 3926 struct spdk_bs_dev *dev; 3927 struct iter_ctx iter_ctx = { 0 }; 3928 struct spdk_blob *blob; 3929 int i, rc; 3930 struct spdk_bs_opts opts; 3931 3932 dev = init_dev(); 3933 spdk_bs_opts_init(&opts); 3934 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 3935 3936 /* Initialize a new blob store */ 3937 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 3938 poll_threads(); 3939 CU_ASSERT(g_bserrno == 0); 3940 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3941 bs = g_bs; 3942 3943 for (i = 0; i < 4; i++) { 3944 blob = ut_blob_create_and_open(bs, NULL); 3945 iter_ctx.blobid[i] = spdk_blob_get_id(blob); 3946 3947 /* Just save the blobid as an xattr for testing purposes. */ 3948 rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id)); 3949 CU_ASSERT(rc == 0); 3950 3951 /* Resize the blob */ 3952 spdk_blob_resize(blob, i, blob_op_complete, NULL); 3953 poll_threads(); 3954 CU_ASSERT(g_bserrno == 0); 3955 3956 spdk_blob_close(blob, blob_op_complete, NULL); 3957 poll_threads(); 3958 CU_ASSERT(g_bserrno == 0); 3959 } 3960 3961 g_bserrno = -1; 3962 spdk_bs_unload(bs, bs_op_complete, NULL); 3963 poll_threads(); 3964 CU_ASSERT(g_bserrno == 0); 3965 3966 dev = init_dev(); 3967 spdk_bs_opts_init(&opts); 3968 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 3969 opts.iter_cb_fn = test_iter; 3970 opts.iter_cb_arg = &iter_ctx; 3971 3972 /* Test blob iteration during load after a clean shutdown. */ 3973 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 3974 poll_threads(); 3975 CU_ASSERT(g_bserrno == 0); 3976 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3977 bs = g_bs; 3978 3979 /* Dirty shutdown */ 3980 _spdk_bs_free(bs); 3981 3982 dev = init_dev(); 3983 spdk_bs_opts_init(&opts); 3984 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 3985 opts.iter_cb_fn = test_iter; 3986 iter_ctx.current_iter = 0; 3987 opts.iter_cb_arg = &iter_ctx; 3988 3989 /* Test blob iteration during load after a dirty shutdown. */ 3990 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 3991 poll_threads(); 3992 CU_ASSERT(g_bserrno == 0); 3993 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3994 bs = g_bs; 3995 3996 spdk_bs_unload(bs, bs_op_complete, NULL); 3997 poll_threads(); 3998 CU_ASSERT(g_bserrno == 0); 3999 g_bs = NULL; 4000 } 4001 4002 static void 4003 blob_snapshot_rw(void) 4004 { 4005 static const uint8_t zero[10 * 4096] = { 0 }; 4006 struct spdk_blob_store *bs = g_bs; 4007 struct spdk_blob *blob, *snapshot; 4008 struct spdk_io_channel *channel; 4009 struct spdk_blob_opts opts; 4010 spdk_blob_id blobid, snapshotid; 4011 uint64_t free_clusters; 4012 uint64_t cluster_size; 4013 uint64_t page_size; 4014 uint8_t payload_read[10 * 4096]; 4015 uint8_t payload_write[10 * 4096]; 4016 uint64_t write_bytes; 4017 uint64_t read_bytes; 4018 4019 free_clusters = spdk_bs_free_cluster_count(bs); 4020 cluster_size = spdk_bs_get_cluster_size(bs); 4021 page_size = spdk_bs_get_page_size(bs); 4022 4023 channel = spdk_bs_alloc_io_channel(bs); 4024 CU_ASSERT(channel != NULL); 4025 4026 ut_spdk_blob_opts_init(&opts); 4027 opts.thin_provision = true; 4028 opts.num_clusters = 5; 4029 4030 blob = ut_blob_create_and_open(bs, &opts); 4031 blobid = spdk_blob_get_id(blob); 4032 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4033 4034 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4035 4036 memset(payload_read, 0xFF, sizeof(payload_read)); 4037 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 4038 poll_threads(); 4039 CU_ASSERT(g_bserrno == 0); 4040 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4041 4042 memset(payload_write, 0xE5, sizeof(payload_write)); 4043 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 4044 poll_threads(); 4045 CU_ASSERT(g_bserrno == 0); 4046 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4047 4048 /* Create snapshot from blob */ 4049 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4050 poll_threads(); 4051 CU_ASSERT(g_bserrno == 0); 4052 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4053 snapshotid = g_blobid; 4054 4055 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4056 poll_threads(); 4057 CU_ASSERT(g_bserrno == 0); 4058 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4059 snapshot = g_blob; 4060 CU_ASSERT(snapshot->data_ro == true); 4061 CU_ASSERT(snapshot->md_ro == true); 4062 4063 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4064 4065 write_bytes = g_dev_write_bytes; 4066 read_bytes = g_dev_read_bytes; 4067 4068 memset(payload_write, 0xAA, sizeof(payload_write)); 4069 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 4070 poll_threads(); 4071 CU_ASSERT(g_bserrno == 0); 4072 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4073 4074 /* For a clone we need to allocate and copy one cluster, update one page of metadata 4075 * and then write 10 pages of payload. 4076 */ 4077 if (g_use_extent_table) { 4078 /* Add one more page for EXTENT_PAGE write */ 4079 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12 + cluster_size); 4080 } else { 4081 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11 + cluster_size); 4082 } 4083 CU_ASSERT(g_dev_read_bytes - read_bytes == cluster_size); 4084 4085 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 4086 poll_threads(); 4087 CU_ASSERT(g_bserrno == 0); 4088 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4089 4090 /* Data on snapshot should not change after write to clone */ 4091 memset(payload_write, 0xE5, sizeof(payload_write)); 4092 spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL); 4093 poll_threads(); 4094 CU_ASSERT(g_bserrno == 0); 4095 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4096 4097 ut_blob_close_and_delete(bs, blob); 4098 ut_blob_close_and_delete(bs, snapshot); 4099 4100 spdk_bs_free_io_channel(channel); 4101 poll_threads(); 4102 g_blob = NULL; 4103 g_blobid = 0; 4104 } 4105 4106 static void 4107 blob_snapshot_rw_iov(void) 4108 { 4109 static const uint8_t zero[10 * 4096] = { 0 }; 4110 struct spdk_blob_store *bs = g_bs; 4111 struct spdk_blob *blob, *snapshot; 4112 struct spdk_io_channel *channel; 4113 struct spdk_blob_opts opts; 4114 spdk_blob_id blobid, snapshotid; 4115 uint64_t free_clusters; 4116 uint8_t payload_read[10 * 4096]; 4117 uint8_t payload_write[10 * 4096]; 4118 struct iovec iov_read[3]; 4119 struct iovec iov_write[3]; 4120 4121 free_clusters = spdk_bs_free_cluster_count(bs); 4122 4123 channel = spdk_bs_alloc_io_channel(bs); 4124 CU_ASSERT(channel != NULL); 4125 4126 ut_spdk_blob_opts_init(&opts); 4127 opts.thin_provision = true; 4128 opts.num_clusters = 5; 4129 4130 blob = ut_blob_create_and_open(bs, &opts); 4131 blobid = spdk_blob_get_id(blob); 4132 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4133 4134 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4135 4136 /* Create snapshot from blob */ 4137 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4138 poll_threads(); 4139 CU_ASSERT(g_bserrno == 0); 4140 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4141 snapshotid = g_blobid; 4142 4143 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4144 poll_threads(); 4145 CU_ASSERT(g_bserrno == 0); 4146 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4147 snapshot = g_blob; 4148 CU_ASSERT(snapshot->data_ro == true); 4149 CU_ASSERT(snapshot->md_ro == true); 4150 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4151 4152 /* Payload should be all zeros from unallocated clusters */ 4153 memset(payload_read, 0xAA, sizeof(payload_read)); 4154 iov_read[0].iov_base = payload_read; 4155 iov_read[0].iov_len = 3 * 4096; 4156 iov_read[1].iov_base = payload_read + 3 * 4096; 4157 iov_read[1].iov_len = 4 * 4096; 4158 iov_read[2].iov_base = payload_read + 7 * 4096; 4159 iov_read[2].iov_len = 3 * 4096; 4160 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4161 poll_threads(); 4162 CU_ASSERT(g_bserrno == 0); 4163 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4164 4165 memset(payload_write, 0xE5, sizeof(payload_write)); 4166 iov_write[0].iov_base = payload_write; 4167 iov_write[0].iov_len = 1 * 4096; 4168 iov_write[1].iov_base = payload_write + 1 * 4096; 4169 iov_write[1].iov_len = 5 * 4096; 4170 iov_write[2].iov_base = payload_write + 6 * 4096; 4171 iov_write[2].iov_len = 4 * 4096; 4172 4173 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 4174 poll_threads(); 4175 CU_ASSERT(g_bserrno == 0); 4176 4177 memset(payload_read, 0xAA, sizeof(payload_read)); 4178 iov_read[0].iov_base = payload_read; 4179 iov_read[0].iov_len = 3 * 4096; 4180 iov_read[1].iov_base = payload_read + 3 * 4096; 4181 iov_read[1].iov_len = 4 * 4096; 4182 iov_read[2].iov_base = payload_read + 7 * 4096; 4183 iov_read[2].iov_len = 3 * 4096; 4184 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4185 poll_threads(); 4186 CU_ASSERT(g_bserrno == 0); 4187 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4188 4189 spdk_bs_free_io_channel(channel); 4190 poll_threads(); 4191 4192 ut_blob_close_and_delete(bs, blob); 4193 ut_blob_close_and_delete(bs, snapshot); 4194 } 4195 4196 /** 4197 * Inflate / decouple parent rw unit tests. 4198 * 4199 * -------------- 4200 * original blob: 0 1 2 3 4 4201 * ,---------+---------+---------+---------+---------. 4202 * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - | 4203 * +---------+---------+---------+---------+---------+ 4204 * snapshot2 | - |yyyyyyyyy| - |yyyyyyyyy| - | 4205 * +---------+---------+---------+---------+---------+ 4206 * blob | - |zzzzzzzzz| - | - | - | 4207 * '---------+---------+---------+---------+---------' 4208 * . . . . . . 4209 * -------- . . . . . . 4210 * inflate: . . . . . . 4211 * ,---------+---------+---------+---------+---------. 4212 * blob |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000| 4213 * '---------+---------+---------+---------+---------' 4214 * 4215 * NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency 4216 * on snapshot2 and snapshot removed . . . 4217 * . . . . . . 4218 * ---------------- . . . . . . 4219 * decouple parent: . . . . . . 4220 * ,---------+---------+---------+---------+---------. 4221 * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - | 4222 * +---------+---------+---------+---------+---------+ 4223 * blob | - |zzzzzzzzz| - |yyyyyyyyy| - | 4224 * '---------+---------+---------+---------+---------' 4225 * 4226 * NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency 4227 * on snapshot2 removed and on snapshot still exists. Snapshot2 4228 * should remain a clone of snapshot. 4229 */ 4230 static void 4231 _blob_inflate_rw(bool decouple_parent) 4232 { 4233 struct spdk_blob_store *bs = g_bs; 4234 struct spdk_blob *blob, *snapshot, *snapshot2; 4235 struct spdk_io_channel *channel; 4236 struct spdk_blob_opts opts; 4237 spdk_blob_id blobid, snapshotid, snapshot2id; 4238 uint64_t free_clusters; 4239 uint64_t cluster_size; 4240 4241 uint64_t payload_size; 4242 uint8_t *payload_read; 4243 uint8_t *payload_write; 4244 uint8_t *payload_clone; 4245 4246 uint64_t pages_per_cluster; 4247 uint64_t pages_per_payload; 4248 4249 int i; 4250 spdk_blob_id ids[2]; 4251 size_t count; 4252 4253 free_clusters = spdk_bs_free_cluster_count(bs); 4254 cluster_size = spdk_bs_get_cluster_size(bs); 4255 pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs); 4256 pages_per_payload = pages_per_cluster * 5; 4257 4258 payload_size = cluster_size * 5; 4259 4260 payload_read = malloc(payload_size); 4261 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 4262 4263 payload_write = malloc(payload_size); 4264 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 4265 4266 payload_clone = malloc(payload_size); 4267 SPDK_CU_ASSERT_FATAL(payload_clone != NULL); 4268 4269 channel = spdk_bs_alloc_io_channel(bs); 4270 SPDK_CU_ASSERT_FATAL(channel != NULL); 4271 4272 /* Create blob */ 4273 ut_spdk_blob_opts_init(&opts); 4274 opts.thin_provision = true; 4275 opts.num_clusters = 5; 4276 4277 blob = ut_blob_create_and_open(bs, &opts); 4278 blobid = spdk_blob_get_id(blob); 4279 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4280 4281 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4282 4283 /* 1) Initial read should return zeroed payload */ 4284 memset(payload_read, 0xFF, payload_size); 4285 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4286 blob_op_complete, NULL); 4287 poll_threads(); 4288 CU_ASSERT(g_bserrno == 0); 4289 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 4290 4291 /* Fill whole blob with a pattern, except last cluster (to be sure it 4292 * isn't allocated) */ 4293 memset(payload_write, 0xE5, payload_size - cluster_size); 4294 spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload - 4295 pages_per_cluster, blob_op_complete, NULL); 4296 poll_threads(); 4297 CU_ASSERT(g_bserrno == 0); 4298 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4299 4300 /* 2) Create snapshot from blob (first level) */ 4301 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4302 poll_threads(); 4303 CU_ASSERT(g_bserrno == 0); 4304 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4305 snapshotid = g_blobid; 4306 4307 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4308 poll_threads(); 4309 CU_ASSERT(g_bserrno == 0); 4310 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4311 snapshot = g_blob; 4312 CU_ASSERT(snapshot->data_ro == true); 4313 CU_ASSERT(snapshot->md_ro == true); 4314 4315 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4316 4317 /* Write every second cluster with a pattern. 4318 * 4319 * Last cluster shouldn't be written, to be sure that snapshot nor clone 4320 * doesn't allocate it. 4321 * 4322 * payload_clone stores expected result on "blob" read at the time and 4323 * is used only to check data consistency on clone before and after 4324 * inflation. Initially we fill it with a backing snapshots pattern 4325 * used before. 4326 */ 4327 memset(payload_clone, 0xE5, payload_size - cluster_size); 4328 memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size); 4329 memset(payload_write, 0xAA, payload_size); 4330 for (i = 1; i < 5; i += 2) { 4331 spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster, 4332 pages_per_cluster, blob_op_complete, NULL); 4333 poll_threads(); 4334 CU_ASSERT(g_bserrno == 0); 4335 4336 /* Update expected result */ 4337 memcpy(payload_clone + (cluster_size * i), payload_write, 4338 cluster_size); 4339 } 4340 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4341 4342 /* Check data consistency on clone */ 4343 memset(payload_read, 0xFF, payload_size); 4344 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4345 blob_op_complete, NULL); 4346 poll_threads(); 4347 CU_ASSERT(g_bserrno == 0); 4348 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4349 4350 /* 3) Create second levels snapshot from blob */ 4351 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4352 poll_threads(); 4353 CU_ASSERT(g_bserrno == 0); 4354 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4355 snapshot2id = g_blobid; 4356 4357 spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL); 4358 poll_threads(); 4359 CU_ASSERT(g_bserrno == 0); 4360 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4361 snapshot2 = g_blob; 4362 CU_ASSERT(snapshot2->data_ro == true); 4363 CU_ASSERT(snapshot2->md_ro == true); 4364 4365 CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5); 4366 4367 CU_ASSERT(snapshot2->parent_id == snapshotid); 4368 4369 /* Write one cluster on the top level blob. This cluster (1) covers 4370 * already allocated cluster in the snapshot2, so shouldn't be inflated 4371 * at all */ 4372 spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster, 4373 pages_per_cluster, blob_op_complete, NULL); 4374 poll_threads(); 4375 CU_ASSERT(g_bserrno == 0); 4376 4377 /* Update expected result */ 4378 memcpy(payload_clone + cluster_size, payload_write, cluster_size); 4379 4380 /* Check data consistency on clone */ 4381 memset(payload_read, 0xFF, payload_size); 4382 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4383 blob_op_complete, NULL); 4384 poll_threads(); 4385 CU_ASSERT(g_bserrno == 0); 4386 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4387 4388 4389 /* Close all blobs */ 4390 spdk_blob_close(blob, blob_op_complete, NULL); 4391 poll_threads(); 4392 CU_ASSERT(g_bserrno == 0); 4393 4394 spdk_blob_close(snapshot2, blob_op_complete, NULL); 4395 poll_threads(); 4396 CU_ASSERT(g_bserrno == 0); 4397 4398 spdk_blob_close(snapshot, blob_op_complete, NULL); 4399 poll_threads(); 4400 CU_ASSERT(g_bserrno == 0); 4401 4402 /* Check snapshot-clone relations */ 4403 count = 2; 4404 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4405 CU_ASSERT(count == 1); 4406 CU_ASSERT(ids[0] == snapshot2id); 4407 4408 count = 2; 4409 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4410 CU_ASSERT(count == 1); 4411 CU_ASSERT(ids[0] == blobid); 4412 4413 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id); 4414 4415 free_clusters = spdk_bs_free_cluster_count(bs); 4416 if (!decouple_parent) { 4417 /* Do full blob inflation */ 4418 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 4419 poll_threads(); 4420 CU_ASSERT(g_bserrno == 0); 4421 4422 /* All clusters should be inflated (except one already allocated 4423 * in a top level blob) */ 4424 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4); 4425 4426 /* Check if relation tree updated correctly */ 4427 count = 2; 4428 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4429 4430 /* snapshotid have one clone */ 4431 CU_ASSERT(count == 1); 4432 CU_ASSERT(ids[0] == snapshot2id); 4433 4434 /* snapshot2id have no clones */ 4435 count = 2; 4436 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4437 CU_ASSERT(count == 0); 4438 4439 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 4440 } else { 4441 /* Decouple parent of blob */ 4442 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 4443 poll_threads(); 4444 CU_ASSERT(g_bserrno == 0); 4445 4446 /* Only one cluster from a parent should be inflated (second one 4447 * is covered by a cluster written on a top level blob, and 4448 * already allocated) */ 4449 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1); 4450 4451 /* Check if relation tree updated correctly */ 4452 count = 2; 4453 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4454 4455 /* snapshotid have two clones now */ 4456 CU_ASSERT(count == 2); 4457 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4458 CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id); 4459 4460 /* snapshot2id have no clones */ 4461 count = 2; 4462 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4463 CU_ASSERT(count == 0); 4464 4465 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4466 } 4467 4468 /* Try to delete snapshot2 (should pass) */ 4469 spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL); 4470 poll_threads(); 4471 CU_ASSERT(g_bserrno == 0); 4472 4473 /* Try to delete base snapshot */ 4474 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4475 poll_threads(); 4476 CU_ASSERT(g_bserrno == 0); 4477 4478 /* Reopen blob after snapshot deletion */ 4479 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 4480 poll_threads(); 4481 CU_ASSERT(g_bserrno == 0); 4482 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4483 blob = g_blob; 4484 4485 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4486 4487 /* Check data consistency on inflated blob */ 4488 memset(payload_read, 0xFF, payload_size); 4489 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4490 blob_op_complete, NULL); 4491 poll_threads(); 4492 CU_ASSERT(g_bserrno == 0); 4493 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4494 4495 spdk_bs_free_io_channel(channel); 4496 poll_threads(); 4497 4498 free(payload_read); 4499 free(payload_write); 4500 free(payload_clone); 4501 4502 ut_blob_close_and_delete(bs, blob); 4503 } 4504 4505 static void 4506 blob_inflate_rw(void) 4507 { 4508 _blob_inflate_rw(false); 4509 _blob_inflate_rw(true); 4510 } 4511 4512 /** 4513 * Snapshot-clones relation test 4514 * 4515 * snapshot 4516 * | 4517 * +-----+-----+ 4518 * | | 4519 * blob(ro) snapshot2 4520 * | | 4521 * clone2 clone 4522 */ 4523 static void 4524 blob_relations(void) 4525 { 4526 struct spdk_blob_store *bs; 4527 struct spdk_bs_dev *dev; 4528 struct spdk_bs_opts bs_opts; 4529 struct spdk_blob_opts opts; 4530 struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2; 4531 spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2; 4532 int rc; 4533 size_t count; 4534 spdk_blob_id ids[10] = {}; 4535 4536 dev = init_dev(); 4537 spdk_bs_opts_init(&bs_opts); 4538 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 4539 4540 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 4541 poll_threads(); 4542 CU_ASSERT(g_bserrno == 0); 4543 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4544 bs = g_bs; 4545 4546 /* 1. Create blob with 10 clusters */ 4547 4548 ut_spdk_blob_opts_init(&opts); 4549 opts.num_clusters = 10; 4550 4551 blob = ut_blob_create_and_open(bs, &opts); 4552 blobid = spdk_blob_get_id(blob); 4553 4554 CU_ASSERT(!spdk_blob_is_read_only(blob)); 4555 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4556 CU_ASSERT(!spdk_blob_is_clone(blob)); 4557 CU_ASSERT(!spdk_blob_is_thin_provisioned(blob)); 4558 4559 /* blob should not have underlying snapshot nor clones */ 4560 CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID); 4561 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 4562 count = SPDK_COUNTOF(ids); 4563 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4564 CU_ASSERT(rc == 0); 4565 CU_ASSERT(count == 0); 4566 4567 4568 /* 2. Create snapshot */ 4569 4570 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4571 poll_threads(); 4572 CU_ASSERT(g_bserrno == 0); 4573 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4574 snapshotid = g_blobid; 4575 4576 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4577 poll_threads(); 4578 CU_ASSERT(g_bserrno == 0); 4579 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4580 snapshot = g_blob; 4581 4582 CU_ASSERT(spdk_blob_is_read_only(snapshot)); 4583 CU_ASSERT(spdk_blob_is_snapshot(snapshot)); 4584 CU_ASSERT(!spdk_blob_is_clone(snapshot)); 4585 CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID); 4586 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID); 4587 4588 /* Check if original blob is converted to the clone of snapshot */ 4589 CU_ASSERT(!spdk_blob_is_read_only(blob)); 4590 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4591 CU_ASSERT(spdk_blob_is_clone(blob)); 4592 CU_ASSERT(spdk_blob_is_thin_provisioned(blob)); 4593 CU_ASSERT(blob->parent_id == snapshotid); 4594 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4595 4596 count = SPDK_COUNTOF(ids); 4597 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4598 CU_ASSERT(rc == 0); 4599 CU_ASSERT(count == 1); 4600 CU_ASSERT(ids[0] == blobid); 4601 4602 4603 /* 3. Create clone from snapshot */ 4604 4605 spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 4606 poll_threads(); 4607 CU_ASSERT(g_bserrno == 0); 4608 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4609 cloneid = g_blobid; 4610 4611 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 4612 poll_threads(); 4613 CU_ASSERT(g_bserrno == 0); 4614 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4615 clone = g_blob; 4616 4617 CU_ASSERT(!spdk_blob_is_read_only(clone)); 4618 CU_ASSERT(!spdk_blob_is_snapshot(clone)); 4619 CU_ASSERT(spdk_blob_is_clone(clone)); 4620 CU_ASSERT(spdk_blob_is_thin_provisioned(clone)); 4621 CU_ASSERT(clone->parent_id == snapshotid); 4622 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid); 4623 4624 count = SPDK_COUNTOF(ids); 4625 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 4626 CU_ASSERT(rc == 0); 4627 CU_ASSERT(count == 0); 4628 4629 /* Check if clone is on the snapshot's list */ 4630 count = SPDK_COUNTOF(ids); 4631 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4632 CU_ASSERT(rc == 0); 4633 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4634 CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid); 4635 4636 4637 /* 4. Create snapshot of the clone */ 4638 4639 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 4640 poll_threads(); 4641 CU_ASSERT(g_bserrno == 0); 4642 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4643 snapshotid2 = g_blobid; 4644 4645 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 4646 poll_threads(); 4647 CU_ASSERT(g_bserrno == 0); 4648 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4649 snapshot2 = g_blob; 4650 4651 CU_ASSERT(spdk_blob_is_read_only(snapshot2)); 4652 CU_ASSERT(spdk_blob_is_snapshot(snapshot2)); 4653 CU_ASSERT(spdk_blob_is_clone(snapshot2)); 4654 CU_ASSERT(snapshot2->parent_id == snapshotid); 4655 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid); 4656 4657 /* Check if clone is converted to the clone of snapshot2 and snapshot2 4658 * is a child of snapshot */ 4659 CU_ASSERT(!spdk_blob_is_read_only(clone)); 4660 CU_ASSERT(!spdk_blob_is_snapshot(clone)); 4661 CU_ASSERT(spdk_blob_is_clone(clone)); 4662 CU_ASSERT(spdk_blob_is_thin_provisioned(clone)); 4663 CU_ASSERT(clone->parent_id == snapshotid2); 4664 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 4665 4666 count = SPDK_COUNTOF(ids); 4667 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 4668 CU_ASSERT(rc == 0); 4669 CU_ASSERT(count == 1); 4670 CU_ASSERT(ids[0] == cloneid); 4671 4672 4673 /* 5. Try to create clone from read only blob */ 4674 4675 /* Mark blob as read only */ 4676 spdk_blob_set_read_only(blob); 4677 spdk_blob_sync_md(blob, blob_op_complete, NULL); 4678 poll_threads(); 4679 CU_ASSERT(g_bserrno == 0); 4680 4681 /* Check if previously created blob is read only clone */ 4682 CU_ASSERT(spdk_blob_is_read_only(blob)); 4683 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4684 CU_ASSERT(spdk_blob_is_clone(blob)); 4685 CU_ASSERT(spdk_blob_is_thin_provisioned(blob)); 4686 4687 /* Create clone from read only blob */ 4688 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4689 poll_threads(); 4690 CU_ASSERT(g_bserrno == 0); 4691 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4692 cloneid2 = g_blobid; 4693 4694 spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL); 4695 poll_threads(); 4696 CU_ASSERT(g_bserrno == 0); 4697 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4698 clone2 = g_blob; 4699 4700 CU_ASSERT(!spdk_blob_is_read_only(clone2)); 4701 CU_ASSERT(!spdk_blob_is_snapshot(clone2)); 4702 CU_ASSERT(spdk_blob_is_clone(clone2)); 4703 CU_ASSERT(spdk_blob_is_thin_provisioned(clone2)); 4704 4705 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 4706 4707 count = SPDK_COUNTOF(ids); 4708 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4709 CU_ASSERT(rc == 0); 4710 4711 CU_ASSERT(count == 1); 4712 CU_ASSERT(ids[0] == cloneid2); 4713 4714 /* Close blobs */ 4715 4716 spdk_blob_close(clone2, blob_op_complete, NULL); 4717 poll_threads(); 4718 CU_ASSERT(g_bserrno == 0); 4719 4720 spdk_blob_close(blob, blob_op_complete, NULL); 4721 poll_threads(); 4722 CU_ASSERT(g_bserrno == 0); 4723 4724 spdk_blob_close(clone, blob_op_complete, NULL); 4725 poll_threads(); 4726 CU_ASSERT(g_bserrno == 0); 4727 4728 spdk_blob_close(snapshot, blob_op_complete, NULL); 4729 poll_threads(); 4730 CU_ASSERT(g_bserrno == 0); 4731 4732 spdk_blob_close(snapshot2, blob_op_complete, NULL); 4733 poll_threads(); 4734 CU_ASSERT(g_bserrno == 0); 4735 4736 /* Try to delete snapshot with more than 1 clone */ 4737 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4738 poll_threads(); 4739 CU_ASSERT(g_bserrno != 0); 4740 4741 ut_bs_reload(&bs, &bs_opts); 4742 4743 /* NULL ids array should return number of clones in count */ 4744 count = SPDK_COUNTOF(ids); 4745 rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count); 4746 CU_ASSERT(rc == -ENOMEM); 4747 CU_ASSERT(count == 2); 4748 4749 /* incorrect array size */ 4750 count = 1; 4751 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4752 CU_ASSERT(rc == -ENOMEM); 4753 CU_ASSERT(count == 2); 4754 4755 4756 /* Verify structure of loaded blob store */ 4757 4758 /* snapshot */ 4759 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID); 4760 4761 count = SPDK_COUNTOF(ids); 4762 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4763 CU_ASSERT(rc == 0); 4764 CU_ASSERT(count == 2); 4765 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4766 CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2); 4767 4768 /* blob */ 4769 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4770 count = SPDK_COUNTOF(ids); 4771 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4772 CU_ASSERT(rc == 0); 4773 CU_ASSERT(count == 1); 4774 CU_ASSERT(ids[0] == cloneid2); 4775 4776 /* clone */ 4777 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 4778 count = SPDK_COUNTOF(ids); 4779 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 4780 CU_ASSERT(rc == 0); 4781 CU_ASSERT(count == 0); 4782 4783 /* snapshot2 */ 4784 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid); 4785 count = SPDK_COUNTOF(ids); 4786 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 4787 CU_ASSERT(rc == 0); 4788 CU_ASSERT(count == 1); 4789 CU_ASSERT(ids[0] == cloneid); 4790 4791 /* clone2 */ 4792 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 4793 count = SPDK_COUNTOF(ids); 4794 rc = spdk_blob_get_clones(bs, cloneid2, ids, &count); 4795 CU_ASSERT(rc == 0); 4796 CU_ASSERT(count == 0); 4797 4798 /* Try to delete blob that user should not be able to remove */ 4799 4800 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4801 poll_threads(); 4802 CU_ASSERT(g_bserrno != 0); 4803 4804 /* Remove all blobs */ 4805 4806 spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL); 4807 poll_threads(); 4808 CU_ASSERT(g_bserrno == 0); 4809 4810 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 4811 poll_threads(); 4812 CU_ASSERT(g_bserrno == 0); 4813 4814 spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL); 4815 poll_threads(); 4816 CU_ASSERT(g_bserrno == 0); 4817 4818 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 4819 poll_threads(); 4820 CU_ASSERT(g_bserrno == 0); 4821 4822 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4823 poll_threads(); 4824 CU_ASSERT(g_bserrno == 0); 4825 4826 spdk_bs_unload(bs, bs_op_complete, NULL); 4827 poll_threads(); 4828 CU_ASSERT(g_bserrno == 0); 4829 4830 g_bs = NULL; 4831 } 4832 4833 /** 4834 * Snapshot-clones relation test 2 4835 * 4836 * snapshot1 4837 * | 4838 * snapshot2 4839 * | 4840 * +-----+-----+ 4841 * | | 4842 * blob(ro) snapshot3 4843 * | | 4844 * | snapshot4 4845 * | | | 4846 * clone2 clone clone3 4847 */ 4848 static void 4849 blob_relations2(void) 4850 { 4851 struct spdk_blob_store *bs; 4852 struct spdk_bs_dev *dev; 4853 struct spdk_bs_opts bs_opts; 4854 struct spdk_blob_opts opts; 4855 struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2; 4856 spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2, 4857 cloneid3; 4858 int rc; 4859 size_t count; 4860 spdk_blob_id ids[10] = {}; 4861 4862 dev = init_dev(); 4863 spdk_bs_opts_init(&bs_opts); 4864 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 4865 4866 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 4867 poll_threads(); 4868 CU_ASSERT(g_bserrno == 0); 4869 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4870 bs = g_bs; 4871 4872 /* 1. Create blob with 10 clusters */ 4873 4874 ut_spdk_blob_opts_init(&opts); 4875 opts.num_clusters = 10; 4876 4877 blob = ut_blob_create_and_open(bs, &opts); 4878 blobid = spdk_blob_get_id(blob); 4879 4880 /* 2. Create snapshot1 */ 4881 4882 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4883 poll_threads(); 4884 CU_ASSERT(g_bserrno == 0); 4885 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4886 snapshotid1 = g_blobid; 4887 4888 spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL); 4889 poll_threads(); 4890 CU_ASSERT(g_bserrno == 0); 4891 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4892 snapshot1 = g_blob; 4893 4894 CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID); 4895 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID); 4896 4897 CU_ASSERT(blob->parent_id == snapshotid1); 4898 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1); 4899 4900 /* Check if blob is the clone of snapshot1 */ 4901 CU_ASSERT(blob->parent_id == snapshotid1); 4902 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1); 4903 4904 count = SPDK_COUNTOF(ids); 4905 rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count); 4906 CU_ASSERT(rc == 0); 4907 CU_ASSERT(count == 1); 4908 CU_ASSERT(ids[0] == blobid); 4909 4910 /* 3. Create another snapshot */ 4911 4912 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4913 poll_threads(); 4914 CU_ASSERT(g_bserrno == 0); 4915 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4916 snapshotid2 = g_blobid; 4917 4918 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 4919 poll_threads(); 4920 CU_ASSERT(g_bserrno == 0); 4921 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4922 snapshot2 = g_blob; 4923 4924 CU_ASSERT(spdk_blob_is_clone(snapshot2)); 4925 CU_ASSERT(snapshot2->parent_id == snapshotid1); 4926 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1); 4927 4928 /* Check if snapshot2 is the clone of snapshot1 and blob 4929 * is a child of snapshot2 */ 4930 CU_ASSERT(blob->parent_id == snapshotid2); 4931 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2); 4932 4933 count = SPDK_COUNTOF(ids); 4934 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 4935 CU_ASSERT(rc == 0); 4936 CU_ASSERT(count == 1); 4937 CU_ASSERT(ids[0] == blobid); 4938 4939 /* 4. Create clone from snapshot */ 4940 4941 spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL); 4942 poll_threads(); 4943 CU_ASSERT(g_bserrno == 0); 4944 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4945 cloneid = g_blobid; 4946 4947 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 4948 poll_threads(); 4949 CU_ASSERT(g_bserrno == 0); 4950 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4951 clone = g_blob; 4952 4953 CU_ASSERT(clone->parent_id == snapshotid2); 4954 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 4955 4956 /* Check if clone is on the snapshot's list */ 4957 count = SPDK_COUNTOF(ids); 4958 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 4959 CU_ASSERT(rc == 0); 4960 CU_ASSERT(count == 2); 4961 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4962 CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid); 4963 4964 /* 5. Create snapshot of the clone */ 4965 4966 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 4967 poll_threads(); 4968 CU_ASSERT(g_bserrno == 0); 4969 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4970 snapshotid3 = g_blobid; 4971 4972 spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL); 4973 poll_threads(); 4974 CU_ASSERT(g_bserrno == 0); 4975 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4976 snapshot3 = g_blob; 4977 4978 CU_ASSERT(snapshot3->parent_id == snapshotid2); 4979 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2); 4980 4981 /* Check if clone is converted to the clone of snapshot3 and snapshot3 4982 * is a child of snapshot2 */ 4983 CU_ASSERT(clone->parent_id == snapshotid3); 4984 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 4985 4986 count = SPDK_COUNTOF(ids); 4987 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 4988 CU_ASSERT(rc == 0); 4989 CU_ASSERT(count == 1); 4990 CU_ASSERT(ids[0] == cloneid); 4991 4992 /* 6. Create another snapshot of the clone */ 4993 4994 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 4995 poll_threads(); 4996 CU_ASSERT(g_bserrno == 0); 4997 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4998 snapshotid4 = g_blobid; 4999 5000 spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL); 5001 poll_threads(); 5002 CU_ASSERT(g_bserrno == 0); 5003 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5004 snapshot4 = g_blob; 5005 5006 CU_ASSERT(snapshot4->parent_id == snapshotid3); 5007 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3); 5008 5009 /* Check if clone is converted to the clone of snapshot4 and snapshot4 5010 * is a child of snapshot3 */ 5011 CU_ASSERT(clone->parent_id == snapshotid4); 5012 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4); 5013 5014 count = SPDK_COUNTOF(ids); 5015 rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count); 5016 CU_ASSERT(rc == 0); 5017 CU_ASSERT(count == 1); 5018 CU_ASSERT(ids[0] == cloneid); 5019 5020 /* 7. Remove snapshot 4 */ 5021 5022 ut_blob_close_and_delete(bs, snapshot4); 5023 5024 /* Check if relations are back to state from before creating snapshot 4 */ 5025 CU_ASSERT(clone->parent_id == snapshotid3); 5026 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5027 5028 count = SPDK_COUNTOF(ids); 5029 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5030 CU_ASSERT(rc == 0); 5031 CU_ASSERT(count == 1); 5032 CU_ASSERT(ids[0] == cloneid); 5033 5034 /* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */ 5035 5036 spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL); 5037 poll_threads(); 5038 CU_ASSERT(g_bserrno == 0); 5039 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5040 cloneid3 = g_blobid; 5041 5042 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5043 poll_threads(); 5044 CU_ASSERT(g_bserrno != 0); 5045 5046 /* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */ 5047 5048 spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL); 5049 poll_threads(); 5050 CU_ASSERT(g_bserrno == 0); 5051 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5052 snapshot3 = g_blob; 5053 5054 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5055 poll_threads(); 5056 CU_ASSERT(g_bserrno != 0); 5057 5058 spdk_blob_close(snapshot3, blob_op_complete, NULL); 5059 poll_threads(); 5060 CU_ASSERT(g_bserrno == 0); 5061 5062 spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL); 5063 poll_threads(); 5064 CU_ASSERT(g_bserrno == 0); 5065 5066 /* 10. Remove snapshot 1 */ 5067 5068 ut_blob_close_and_delete(bs, snapshot1); 5069 5070 /* Check if relations are back to state from before creating snapshot 4 (before step 6) */ 5071 CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID); 5072 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID); 5073 5074 count = SPDK_COUNTOF(ids); 5075 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5076 CU_ASSERT(rc == 0); 5077 CU_ASSERT(count == 2); 5078 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5079 CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3); 5080 5081 /* 11. Try to create clone from read only blob */ 5082 5083 /* Mark blob as read only */ 5084 spdk_blob_set_read_only(blob); 5085 spdk_blob_sync_md(blob, blob_op_complete, NULL); 5086 poll_threads(); 5087 CU_ASSERT(g_bserrno == 0); 5088 5089 /* Create clone from read only blob */ 5090 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5091 poll_threads(); 5092 CU_ASSERT(g_bserrno == 0); 5093 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5094 cloneid2 = g_blobid; 5095 5096 spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL); 5097 poll_threads(); 5098 CU_ASSERT(g_bserrno == 0); 5099 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5100 clone2 = g_blob; 5101 5102 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5103 5104 count = SPDK_COUNTOF(ids); 5105 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5106 CU_ASSERT(rc == 0); 5107 CU_ASSERT(count == 1); 5108 CU_ASSERT(ids[0] == cloneid2); 5109 5110 /* Close blobs */ 5111 5112 spdk_blob_close(clone2, blob_op_complete, NULL); 5113 poll_threads(); 5114 CU_ASSERT(g_bserrno == 0); 5115 5116 spdk_blob_close(blob, blob_op_complete, NULL); 5117 poll_threads(); 5118 CU_ASSERT(g_bserrno == 0); 5119 5120 spdk_blob_close(clone, blob_op_complete, NULL); 5121 poll_threads(); 5122 CU_ASSERT(g_bserrno == 0); 5123 5124 spdk_blob_close(snapshot2, blob_op_complete, NULL); 5125 poll_threads(); 5126 CU_ASSERT(g_bserrno == 0); 5127 5128 spdk_blob_close(snapshot3, blob_op_complete, NULL); 5129 poll_threads(); 5130 CU_ASSERT(g_bserrno == 0); 5131 5132 ut_bs_reload(&bs, &bs_opts); 5133 5134 /* Verify structure of loaded blob store */ 5135 5136 /* snapshot2 */ 5137 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID); 5138 5139 count = SPDK_COUNTOF(ids); 5140 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5141 CU_ASSERT(rc == 0); 5142 CU_ASSERT(count == 2); 5143 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5144 CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3); 5145 5146 /* blob */ 5147 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2); 5148 count = SPDK_COUNTOF(ids); 5149 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5150 CU_ASSERT(rc == 0); 5151 CU_ASSERT(count == 1); 5152 CU_ASSERT(ids[0] == cloneid2); 5153 5154 /* clone */ 5155 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5156 count = SPDK_COUNTOF(ids); 5157 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 5158 CU_ASSERT(rc == 0); 5159 CU_ASSERT(count == 0); 5160 5161 /* snapshot3 */ 5162 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2); 5163 count = SPDK_COUNTOF(ids); 5164 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5165 CU_ASSERT(rc == 0); 5166 CU_ASSERT(count == 1); 5167 CU_ASSERT(ids[0] == cloneid); 5168 5169 /* clone2 */ 5170 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5171 count = SPDK_COUNTOF(ids); 5172 rc = spdk_blob_get_clones(bs, cloneid2, ids, &count); 5173 CU_ASSERT(rc == 0); 5174 CU_ASSERT(count == 0); 5175 5176 /* Try to delete all blobs in the worse possible order */ 5177 5178 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5179 poll_threads(); 5180 CU_ASSERT(g_bserrno != 0); 5181 5182 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5183 poll_threads(); 5184 CU_ASSERT(g_bserrno == 0); 5185 5186 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5187 poll_threads(); 5188 CU_ASSERT(g_bserrno != 0); 5189 5190 spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL); 5191 poll_threads(); 5192 CU_ASSERT(g_bserrno == 0); 5193 5194 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5195 poll_threads(); 5196 CU_ASSERT(g_bserrno == 0); 5197 5198 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 5199 poll_threads(); 5200 CU_ASSERT(g_bserrno == 0); 5201 5202 spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL); 5203 poll_threads(); 5204 CU_ASSERT(g_bserrno == 0); 5205 5206 spdk_bs_unload(bs, bs_op_complete, NULL); 5207 poll_threads(); 5208 CU_ASSERT(g_bserrno == 0); 5209 5210 g_bs = NULL; 5211 } 5212 5213 static void 5214 blobstore_clean_power_failure(void) 5215 { 5216 struct spdk_blob_store *bs; 5217 struct spdk_blob *blob; 5218 struct spdk_power_failure_thresholds thresholds = {}; 5219 bool clean = false; 5220 struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 5221 struct spdk_bs_super_block super_copy = {}; 5222 5223 thresholds.general_threshold = 1; 5224 while (!clean) { 5225 /* Create bs and blob */ 5226 suite_blob_setup(); 5227 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5228 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5229 bs = g_bs; 5230 blob = g_blob; 5231 5232 /* Super block should not change for rest of the UT, 5233 * save it and compare later. */ 5234 memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block)); 5235 SPDK_CU_ASSERT_FATAL(super->clean == 0); 5236 SPDK_CU_ASSERT_FATAL(bs->clean == 0); 5237 5238 /* Force bs/super block in a clean state. 5239 * Along with marking blob dirty, to cause blob persist. */ 5240 blob->state = SPDK_BLOB_STATE_DIRTY; 5241 bs->clean = 1; 5242 super->clean = 1; 5243 super->crc = _spdk_blob_md_page_calc_crc(super); 5244 5245 g_bserrno = -1; 5246 dev_set_power_failure_thresholds(thresholds); 5247 spdk_blob_sync_md(blob, blob_op_complete, NULL); 5248 poll_threads(); 5249 dev_reset_power_failure_event(); 5250 5251 if (g_bserrno == 0) { 5252 /* After successful md sync, both bs and super block 5253 * should be marked as not clean. */ 5254 SPDK_CU_ASSERT_FATAL(bs->clean == 0); 5255 SPDK_CU_ASSERT_FATAL(super->clean == 0); 5256 clean = true; 5257 } 5258 5259 /* Depending on the point of failure, super block was either updated or not. */ 5260 super_copy.clean = super->clean; 5261 super_copy.crc = _spdk_blob_md_page_calc_crc(&super_copy); 5262 /* Compare that the values in super block remained unchanged. */ 5263 SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block))); 5264 5265 /* Delete blob and unload bs */ 5266 suite_blob_cleanup(); 5267 5268 thresholds.general_threshold++; 5269 } 5270 } 5271 5272 static void 5273 blob_delete_snapshot_power_failure(void) 5274 { 5275 struct spdk_bs_dev *dev; 5276 struct spdk_blob_store *bs; 5277 struct spdk_blob_opts opts; 5278 struct spdk_blob *blob, *snapshot; 5279 struct spdk_power_failure_thresholds thresholds = {}; 5280 spdk_blob_id blobid, snapshotid; 5281 const void *value; 5282 size_t value_len; 5283 size_t count; 5284 spdk_blob_id ids[3] = {}; 5285 int rc; 5286 bool deleted = false; 5287 int delete_snapshot_bserrno = -1; 5288 5289 thresholds.general_threshold = 1; 5290 while (!deleted) { 5291 dev = init_dev(); 5292 5293 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 5294 poll_threads(); 5295 CU_ASSERT(g_bserrno == 0); 5296 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5297 bs = g_bs; 5298 5299 /* Create blob */ 5300 ut_spdk_blob_opts_init(&opts); 5301 opts.num_clusters = 10; 5302 5303 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 5304 poll_threads(); 5305 CU_ASSERT(g_bserrno == 0); 5306 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5307 blobid = g_blobid; 5308 5309 /* Create snapshot */ 5310 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5311 poll_threads(); 5312 CU_ASSERT(g_bserrno == 0); 5313 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5314 snapshotid = g_blobid; 5315 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5316 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5317 5318 dev_set_power_failure_thresholds(thresholds); 5319 5320 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 5321 poll_threads(); 5322 delete_snapshot_bserrno = g_bserrno; 5323 5324 /* Do not shut down cleanly. Assumption is that after snapshot deletion 5325 * reports success, changes to both blobs should already persisted. */ 5326 dev_reset_power_failure_event(); 5327 ut_bs_dirty_load(&bs, NULL); 5328 5329 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5330 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5331 5332 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 5333 poll_threads(); 5334 CU_ASSERT(g_bserrno == 0); 5335 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5336 blob = g_blob; 5337 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true); 5338 5339 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 5340 poll_threads(); 5341 5342 if (g_bserrno == 0) { 5343 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5344 snapshot = g_blob; 5345 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 5346 count = SPDK_COUNTOF(ids); 5347 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5348 CU_ASSERT(rc == 0); 5349 CU_ASSERT(count == 1); 5350 CU_ASSERT(ids[0] == blobid); 5351 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len); 5352 CU_ASSERT(rc != 0); 5353 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false); 5354 5355 spdk_blob_close(snapshot, blob_op_complete, NULL); 5356 poll_threads(); 5357 CU_ASSERT(g_bserrno == 0); 5358 } else { 5359 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 5360 /* Snapshot might have been left in unrecoverable state, so it does not open. 5361 * Yet delete might perform further changes to the clone after that. 5362 * This UT should test until snapshot is deleted and delete call succeeds. */ 5363 if (delete_snapshot_bserrno == 0) { 5364 deleted = true; 5365 } 5366 } 5367 5368 spdk_blob_close(blob, blob_op_complete, NULL); 5369 poll_threads(); 5370 CU_ASSERT(g_bserrno == 0); 5371 5372 spdk_bs_unload(bs, bs_op_complete, NULL); 5373 poll_threads(); 5374 CU_ASSERT(g_bserrno == 0); 5375 5376 thresholds.general_threshold++; 5377 } 5378 } 5379 5380 static void 5381 blob_create_snapshot_power_failure(void) 5382 { 5383 struct spdk_blob_store *bs = g_bs; 5384 struct spdk_bs_dev *dev; 5385 struct spdk_blob_opts opts; 5386 struct spdk_blob *blob, *snapshot; 5387 struct spdk_power_failure_thresholds thresholds = {}; 5388 spdk_blob_id blobid, snapshotid; 5389 const void *value; 5390 size_t value_len; 5391 size_t count; 5392 spdk_blob_id ids[3] = {}; 5393 int rc; 5394 bool created = false; 5395 int create_snapshot_bserrno = -1; 5396 5397 thresholds.general_threshold = 1; 5398 while (!created) { 5399 dev = init_dev(); 5400 5401 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 5402 poll_threads(); 5403 CU_ASSERT(g_bserrno == 0); 5404 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5405 bs = g_bs; 5406 5407 /* Create blob */ 5408 ut_spdk_blob_opts_init(&opts); 5409 opts.num_clusters = 10; 5410 5411 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 5412 poll_threads(); 5413 CU_ASSERT(g_bserrno == 0); 5414 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5415 blobid = g_blobid; 5416 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5417 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5418 5419 dev_set_power_failure_thresholds(thresholds); 5420 5421 /* Create snapshot */ 5422 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5423 poll_threads(); 5424 create_snapshot_bserrno = g_bserrno; 5425 snapshotid = g_blobid; 5426 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5427 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5428 5429 /* Do not shut down cleanly. Assumption is that after create snapshot 5430 * reports success, both blobs should be power-fail safe. */ 5431 dev_reset_power_failure_event(); 5432 ut_bs_dirty_load(&bs, NULL); 5433 5434 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5435 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5436 5437 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 5438 poll_threads(); 5439 CU_ASSERT(g_bserrno == 0); 5440 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5441 blob = g_blob; 5442 5443 if (snapshotid != SPDK_BLOBID_INVALID) { 5444 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 5445 poll_threads(); 5446 } 5447 5448 if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) { 5449 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5450 snapshot = g_blob; 5451 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true); 5452 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false); 5453 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 5454 count = SPDK_COUNTOF(ids); 5455 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5456 CU_ASSERT(rc == 0); 5457 CU_ASSERT(count == 1); 5458 CU_ASSERT(ids[0] == blobid); 5459 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len); 5460 CU_ASSERT(rc != 0); 5461 5462 spdk_blob_close(snapshot, blob_op_complete, NULL); 5463 poll_threads(); 5464 CU_ASSERT(g_bserrno == 0); 5465 if (create_snapshot_bserrno == 0) { 5466 created = true; 5467 } 5468 } else { 5469 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 5470 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false); 5471 } 5472 5473 spdk_blob_close(blob, blob_op_complete, NULL); 5474 poll_threads(); 5475 CU_ASSERT(g_bserrno == 0); 5476 5477 spdk_bs_unload(bs, bs_op_complete, NULL); 5478 poll_threads(); 5479 CU_ASSERT(g_bserrno == 0); 5480 5481 thresholds.general_threshold++; 5482 } 5483 } 5484 5485 static void 5486 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5487 { 5488 uint8_t payload_ff[64 * 512]; 5489 uint8_t payload_aa[64 * 512]; 5490 uint8_t payload_00[64 * 512]; 5491 uint8_t *cluster0, *cluster1; 5492 5493 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5494 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5495 memset(payload_00, 0x00, sizeof(payload_00)); 5496 5497 /* Try to perform I/O with io unit = 512 */ 5498 spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL); 5499 poll_threads(); 5500 CU_ASSERT(g_bserrno == 0); 5501 5502 /* If thin provisioned is set cluster should be allocated now */ 5503 SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0); 5504 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5505 5506 /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character. 5507 * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */ 5508 /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5509 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5510 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0); 5511 5512 /* Verify write with offset on first page */ 5513 spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL); 5514 poll_threads(); 5515 CU_ASSERT(g_bserrno == 0); 5516 5517 /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5518 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5519 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5520 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5521 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5522 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0); 5523 5524 /* Verify write with offset on first page */ 5525 spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL); 5526 poll_threads(); 5527 5528 /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5529 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5530 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5531 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5532 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5533 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0); 5534 CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0); 5535 5536 /* Verify write with offset on second page */ 5537 spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL); 5538 poll_threads(); 5539 5540 /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */ 5541 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5542 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5543 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5544 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5545 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0); 5546 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5547 5548 /* Verify write across multiple pages */ 5549 spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL); 5550 poll_threads(); 5551 5552 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */ 5553 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5554 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5555 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5556 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5557 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5558 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5559 5560 /* Verify write across multiple clusters */ 5561 spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL); 5562 poll_threads(); 5563 5564 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5565 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5566 5567 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5568 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5569 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5570 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5571 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5572 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5573 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5574 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5575 5576 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5577 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0); 5578 5579 /* Verify write to second cluster */ 5580 spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL); 5581 poll_threads(); 5582 5583 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5584 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5585 5586 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5587 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */ 5588 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5589 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5590 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5591 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5592 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5593 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5594 5595 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5596 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0); 5597 CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0); 5598 CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0); 5599 } 5600 5601 static void 5602 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5603 { 5604 uint8_t payload_read[64 * 512]; 5605 uint8_t payload_ff[64 * 512]; 5606 uint8_t payload_aa[64 * 512]; 5607 uint8_t payload_00[64 * 512]; 5608 5609 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5610 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5611 memset(payload_00, 0x00, sizeof(payload_00)); 5612 5613 /* Read only first io unit */ 5614 /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5615 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5616 * payload_read: F000 0000 | 0000 0000 ... */ 5617 memset(payload_read, 0x00, sizeof(payload_read)); 5618 spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL); 5619 poll_threads(); 5620 CU_ASSERT(g_bserrno == 0); 5621 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5622 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0); 5623 5624 /* Read four io_units starting from offset = 2 5625 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5626 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5627 * payload_read: F0AA 0000 | 0000 0000 ... */ 5628 5629 memset(payload_read, 0x00, sizeof(payload_read)); 5630 spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL); 5631 poll_threads(); 5632 CU_ASSERT(g_bserrno == 0); 5633 5634 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5635 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 5636 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0); 5637 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0); 5638 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 5639 5640 /* Read eight io_units across multiple pages 5641 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 5642 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5643 * payload_read: AAAA AAAA | 0000 0000 ... */ 5644 memset(payload_read, 0x00, sizeof(payload_read)); 5645 spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL); 5646 poll_threads(); 5647 CU_ASSERT(g_bserrno == 0); 5648 5649 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0); 5650 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 5651 5652 /* Read eight io_units across multiple clusters 5653 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ] 5654 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5655 * payload_read: FFFF FFFF | 0000 0000 ... */ 5656 memset(payload_read, 0x00, sizeof(payload_read)); 5657 spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL); 5658 poll_threads(); 5659 CU_ASSERT(g_bserrno == 0); 5660 5661 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0); 5662 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 5663 5664 /* Read four io_units from second cluster 5665 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5666 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ] 5667 * payload_read: 00FF 0000 | 0000 0000 ... */ 5668 memset(payload_read, 0x00, sizeof(payload_read)); 5669 spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL); 5670 poll_threads(); 5671 CU_ASSERT(g_bserrno == 0); 5672 5673 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0); 5674 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0); 5675 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 5676 5677 /* Read second cluster 5678 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5679 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] 5680 * payload_read: FFFF 0000 | 0000 FF00 ... */ 5681 memset(payload_read, 0x00, sizeof(payload_read)); 5682 spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL); 5683 poll_threads(); 5684 CU_ASSERT(g_bserrno == 0); 5685 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0); 5686 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0); 5687 CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0); 5688 CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0); 5689 5690 /* Read whole two clusters 5691 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 5692 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */ 5693 memset(payload_read, 0x00, sizeof(payload_read)); 5694 spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL); 5695 poll_threads(); 5696 CU_ASSERT(g_bserrno == 0); 5697 5698 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5699 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 5700 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0); 5701 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0); 5702 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0); 5703 CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0); 5704 5705 CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0); 5706 CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0); 5707 CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0); 5708 CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0); 5709 } 5710 5711 5712 static void 5713 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5714 { 5715 uint8_t payload_ff[64 * 512]; 5716 uint8_t payload_aa[64 * 512]; 5717 uint8_t payload_00[64 * 512]; 5718 uint8_t *cluster0, *cluster1; 5719 5720 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5721 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5722 memset(payload_00, 0x00, sizeof(payload_00)); 5723 5724 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5725 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5726 5727 /* Unmap */ 5728 spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL); 5729 poll_threads(); 5730 5731 CU_ASSERT(g_bserrno == 0); 5732 5733 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0); 5734 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0); 5735 } 5736 5737 static void 5738 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5739 { 5740 uint8_t payload_ff[64 * 512]; 5741 uint8_t payload_aa[64 * 512]; 5742 uint8_t payload_00[64 * 512]; 5743 uint8_t *cluster0, *cluster1; 5744 5745 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5746 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5747 memset(payload_00, 0x00, sizeof(payload_00)); 5748 5749 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5750 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5751 5752 /* Write zeroes */ 5753 spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL); 5754 poll_threads(); 5755 5756 CU_ASSERT(g_bserrno == 0); 5757 5758 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0); 5759 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0); 5760 } 5761 5762 5763 static void 5764 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5765 { 5766 uint8_t payload_ff[64 * 512]; 5767 uint8_t payload_aa[64 * 512]; 5768 uint8_t payload_00[64 * 512]; 5769 uint8_t *cluster0, *cluster1; 5770 struct iovec iov[4]; 5771 5772 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5773 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5774 memset(payload_00, 0x00, sizeof(payload_00)); 5775 5776 /* Try to perform I/O with io unit = 512 */ 5777 iov[0].iov_base = payload_ff; 5778 iov[0].iov_len = 1 * 512; 5779 spdk_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL); 5780 poll_threads(); 5781 CU_ASSERT(g_bserrno == 0); 5782 5783 /* If thin provisioned is set cluster should be allocated now */ 5784 SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0); 5785 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5786 5787 /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character. 5788 * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */ 5789 /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5790 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5791 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0); 5792 5793 /* Verify write with offset on first page */ 5794 iov[0].iov_base = payload_ff; 5795 iov[0].iov_len = 1 * 512; 5796 spdk_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL); 5797 poll_threads(); 5798 CU_ASSERT(g_bserrno == 0); 5799 5800 /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5801 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5802 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5803 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5804 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5805 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0); 5806 5807 /* Verify write with offset on first page */ 5808 iov[0].iov_base = payload_ff; 5809 iov[0].iov_len = 4 * 512; 5810 spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL); 5811 poll_threads(); 5812 5813 /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5814 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5815 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5816 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5817 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5818 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0); 5819 CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0); 5820 5821 /* Verify write with offset on second page */ 5822 iov[0].iov_base = payload_ff; 5823 iov[0].iov_len = 4 * 512; 5824 spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL); 5825 poll_threads(); 5826 5827 /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */ 5828 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5829 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5830 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5831 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5832 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0); 5833 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5834 5835 /* Verify write across multiple pages */ 5836 iov[0].iov_base = payload_aa; 5837 iov[0].iov_len = 8 * 512; 5838 spdk_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL); 5839 poll_threads(); 5840 5841 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */ 5842 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5843 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5844 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5845 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5846 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5847 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5848 5849 /* Verify write across multiple clusters */ 5850 5851 iov[0].iov_base = payload_ff; 5852 iov[0].iov_len = 8 * 512; 5853 spdk_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL); 5854 poll_threads(); 5855 5856 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5857 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5858 5859 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5860 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5861 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5862 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5863 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5864 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5865 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5866 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0); 5867 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5868 5869 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5870 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0); 5871 5872 /* Verify write to second cluster */ 5873 5874 iov[0].iov_base = payload_ff; 5875 iov[0].iov_len = 2 * 512; 5876 spdk_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL); 5877 poll_threads(); 5878 5879 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5880 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5881 5882 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5883 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */ 5884 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5885 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5886 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5887 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5888 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5889 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5890 5891 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5892 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0); 5893 CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0); 5894 CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0); 5895 } 5896 5897 static void 5898 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5899 { 5900 uint8_t payload_read[64 * 512]; 5901 uint8_t payload_ff[64 * 512]; 5902 uint8_t payload_aa[64 * 512]; 5903 uint8_t payload_00[64 * 512]; 5904 struct iovec iov[4]; 5905 5906 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5907 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5908 memset(payload_00, 0x00, sizeof(payload_00)); 5909 5910 /* Read only first io unit */ 5911 /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5912 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5913 * payload_read: F000 0000 | 0000 0000 ... */ 5914 memset(payload_read, 0x00, sizeof(payload_read)); 5915 iov[0].iov_base = payload_read; 5916 iov[0].iov_len = 1 * 512; 5917 spdk_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL); 5918 poll_threads(); 5919 5920 CU_ASSERT(g_bserrno == 0); 5921 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5922 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0); 5923 5924 /* Read four io_units starting from offset = 2 5925 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5926 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5927 * payload_read: F0AA 0000 | 0000 0000 ... */ 5928 5929 memset(payload_read, 0x00, sizeof(payload_read)); 5930 iov[0].iov_base = payload_read; 5931 iov[0].iov_len = 4 * 512; 5932 spdk_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL); 5933 poll_threads(); 5934 CU_ASSERT(g_bserrno == 0); 5935 5936 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5937 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 5938 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0); 5939 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0); 5940 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 5941 5942 /* Read eight io_units across multiple pages 5943 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 5944 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5945 * payload_read: AAAA AAAA | 0000 0000 ... */ 5946 memset(payload_read, 0x00, sizeof(payload_read)); 5947 iov[0].iov_base = payload_read; 5948 iov[0].iov_len = 4 * 512; 5949 iov[1].iov_base = payload_read + 4 * 512; 5950 iov[1].iov_len = 4 * 512; 5951 spdk_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL); 5952 poll_threads(); 5953 CU_ASSERT(g_bserrno == 0); 5954 5955 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0); 5956 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 5957 5958 /* Read eight io_units across multiple clusters 5959 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ] 5960 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5961 * payload_read: FFFF FFFF | 0000 0000 ... */ 5962 memset(payload_read, 0x00, sizeof(payload_read)); 5963 iov[0].iov_base = payload_read; 5964 iov[0].iov_len = 2 * 512; 5965 iov[1].iov_base = payload_read + 2 * 512; 5966 iov[1].iov_len = 2 * 512; 5967 iov[2].iov_base = payload_read + 4 * 512; 5968 iov[2].iov_len = 2 * 512; 5969 iov[3].iov_base = payload_read + 6 * 512; 5970 iov[3].iov_len = 2 * 512; 5971 spdk_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL); 5972 poll_threads(); 5973 CU_ASSERT(g_bserrno == 0); 5974 5975 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0); 5976 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 5977 5978 /* Read four io_units from second cluster 5979 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5980 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ] 5981 * payload_read: 00FF 0000 | 0000 0000 ... */ 5982 memset(payload_read, 0x00, sizeof(payload_read)); 5983 iov[0].iov_base = payload_read; 5984 iov[0].iov_len = 1 * 512; 5985 iov[1].iov_base = payload_read + 1 * 512; 5986 iov[1].iov_len = 3 * 512; 5987 spdk_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL); 5988 poll_threads(); 5989 CU_ASSERT(g_bserrno == 0); 5990 5991 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0); 5992 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0); 5993 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 5994 5995 /* Read second cluster 5996 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5997 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] 5998 * payload_read: FFFF 0000 | 0000 FF00 ... */ 5999 memset(payload_read, 0x00, sizeof(payload_read)); 6000 iov[0].iov_base = payload_read; 6001 iov[0].iov_len = 1 * 512; 6002 iov[1].iov_base = payload_read + 1 * 512; 6003 iov[1].iov_len = 2 * 512; 6004 iov[2].iov_base = payload_read + 3 * 512; 6005 iov[2].iov_len = 4 * 512; 6006 iov[3].iov_base = payload_read + 7 * 512; 6007 iov[3].iov_len = 25 * 512; 6008 spdk_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL); 6009 poll_threads(); 6010 CU_ASSERT(g_bserrno == 0); 6011 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0); 6012 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0); 6013 CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0); 6014 CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0); 6015 6016 /* Read whole two clusters 6017 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 6018 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */ 6019 memset(payload_read, 0x00, sizeof(payload_read)); 6020 iov[0].iov_base = payload_read; 6021 iov[0].iov_len = 1 * 512; 6022 iov[1].iov_base = payload_read + 1 * 512; 6023 iov[1].iov_len = 8 * 512; 6024 iov[2].iov_base = payload_read + 9 * 512; 6025 iov[2].iov_len = 16 * 512; 6026 iov[3].iov_base = payload_read + 25 * 512; 6027 iov[3].iov_len = 39 * 512; 6028 spdk_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL); 6029 poll_threads(); 6030 CU_ASSERT(g_bserrno == 0); 6031 6032 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6033 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 6034 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0); 6035 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0); 6036 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0); 6037 CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0); 6038 6039 CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0); 6040 CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0); 6041 CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0); 6042 CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0); 6043 } 6044 6045 static void 6046 blob_io_unit(void) 6047 { 6048 struct spdk_bs_opts bsopts; 6049 struct spdk_blob_opts opts; 6050 struct spdk_blob_store *bs; 6051 struct spdk_bs_dev *dev; 6052 struct spdk_blob *blob, *snapshot, *clone; 6053 spdk_blob_id blobid; 6054 struct spdk_io_channel *channel; 6055 6056 /* Create dev with 512 bytes io unit size */ 6057 6058 spdk_bs_opts_init(&bsopts); 6059 bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */ 6060 snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE"); 6061 6062 /* Try to initialize a new blob store with unsupported io_unit */ 6063 dev = init_dev(); 6064 dev->blocklen = 512; 6065 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6066 6067 /* Initialize a new blob store */ 6068 spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL); 6069 poll_threads(); 6070 CU_ASSERT(g_bserrno == 0); 6071 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6072 bs = g_bs; 6073 6074 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512); 6075 channel = spdk_bs_alloc_io_channel(bs); 6076 6077 /* Create thick provisioned blob */ 6078 ut_spdk_blob_opts_init(&opts); 6079 opts.thin_provision = false; 6080 opts.num_clusters = 32; 6081 6082 blob = ut_blob_create_and_open(bs, &opts); 6083 blobid = spdk_blob_get_id(blob); 6084 6085 test_io_write(dev, blob, channel); 6086 test_io_read(dev, blob, channel); 6087 test_io_zeroes(dev, blob, channel); 6088 6089 test_iov_write(dev, blob, channel); 6090 test_iov_read(dev, blob, channel); 6091 6092 test_io_unmap(dev, blob, channel); 6093 6094 spdk_blob_close(blob, blob_op_complete, NULL); 6095 poll_threads(); 6096 CU_ASSERT(g_bserrno == 0); 6097 blob = NULL; 6098 g_blob = NULL; 6099 6100 /* Create thin provisioned blob */ 6101 6102 ut_spdk_blob_opts_init(&opts); 6103 opts.thin_provision = true; 6104 opts.num_clusters = 32; 6105 6106 blob = ut_blob_create_and_open(bs, &opts); 6107 blobid = spdk_blob_get_id(blob); 6108 6109 test_io_write(dev, blob, channel); 6110 test_io_read(dev, blob, channel); 6111 6112 test_io_zeroes(dev, blob, channel); 6113 6114 test_iov_write(dev, blob, channel); 6115 test_iov_read(dev, blob, channel); 6116 6117 /* Create snapshot */ 6118 6119 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6120 poll_threads(); 6121 CU_ASSERT(g_bserrno == 0); 6122 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6123 blobid = g_blobid; 6124 6125 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6126 poll_threads(); 6127 CU_ASSERT(g_bserrno == 0); 6128 CU_ASSERT(g_blob != NULL); 6129 snapshot = g_blob; 6130 6131 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6132 poll_threads(); 6133 CU_ASSERT(g_bserrno == 0); 6134 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6135 blobid = g_blobid; 6136 6137 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6138 poll_threads(); 6139 CU_ASSERT(g_bserrno == 0); 6140 CU_ASSERT(g_blob != NULL); 6141 clone = g_blob; 6142 6143 test_io_read(dev, blob, channel); 6144 test_io_read(dev, snapshot, channel); 6145 test_io_read(dev, clone, channel); 6146 6147 test_iov_read(dev, blob, channel); 6148 test_iov_read(dev, snapshot, channel); 6149 test_iov_read(dev, clone, channel); 6150 6151 /* Inflate clone */ 6152 6153 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 6154 poll_threads(); 6155 6156 CU_ASSERT(g_bserrno == 0); 6157 6158 test_io_read(dev, clone, channel); 6159 6160 test_io_unmap(dev, clone, channel); 6161 6162 test_iov_write(dev, clone, channel); 6163 test_iov_read(dev, clone, channel); 6164 6165 spdk_blob_close(blob, blob_op_complete, NULL); 6166 spdk_blob_close(snapshot, blob_op_complete, NULL); 6167 spdk_blob_close(clone, blob_op_complete, NULL); 6168 poll_threads(); 6169 CU_ASSERT(g_bserrno == 0); 6170 blob = NULL; 6171 g_blob = NULL; 6172 6173 spdk_bs_free_io_channel(channel); 6174 poll_threads(); 6175 6176 /* Unload the blob store */ 6177 spdk_bs_unload(bs, bs_op_complete, NULL); 6178 poll_threads(); 6179 CU_ASSERT(g_bserrno == 0); 6180 g_bs = NULL; 6181 g_blob = NULL; 6182 g_blobid = 0; 6183 } 6184 6185 static void 6186 blob_io_unit_compatiblity(void) 6187 { 6188 struct spdk_bs_opts bsopts; 6189 struct spdk_blob_store *bs; 6190 struct spdk_bs_dev *dev; 6191 struct spdk_bs_super_block *super; 6192 6193 /* Create dev with 512 bytes io unit size */ 6194 6195 spdk_bs_opts_init(&bsopts); 6196 bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */ 6197 snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE"); 6198 6199 /* Try to initialize a new blob store with unsupported io_unit */ 6200 dev = init_dev(); 6201 dev->blocklen = 512; 6202 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6203 6204 /* Initialize a new blob store */ 6205 spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL); 6206 poll_threads(); 6207 CU_ASSERT(g_bserrno == 0); 6208 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6209 bs = g_bs; 6210 6211 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512); 6212 6213 /* Unload the blob store */ 6214 spdk_bs_unload(bs, bs_op_complete, NULL); 6215 poll_threads(); 6216 CU_ASSERT(g_bserrno == 0); 6217 6218 /* Modify super block to behave like older version. 6219 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */ 6220 super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 6221 super->io_unit_size = 0; 6222 super->crc = _spdk_blob_md_page_calc_crc(super); 6223 6224 dev = init_dev(); 6225 dev->blocklen = 512; 6226 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6227 6228 spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL); 6229 poll_threads(); 6230 CU_ASSERT(g_bserrno == 0); 6231 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6232 bs = g_bs; 6233 6234 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE); 6235 6236 /* Unload the blob store */ 6237 spdk_bs_unload(bs, bs_op_complete, NULL); 6238 poll_threads(); 6239 CU_ASSERT(g_bserrno == 0); 6240 6241 g_bs = NULL; 6242 g_blob = NULL; 6243 g_blobid = 0; 6244 } 6245 6246 static void 6247 blob_simultaneous_operations(void) 6248 { 6249 struct spdk_blob_store *bs = g_bs; 6250 struct spdk_blob_opts opts; 6251 struct spdk_blob *blob, *snapshot; 6252 spdk_blob_id blobid, snapshotid; 6253 struct spdk_io_channel *channel; 6254 6255 channel = spdk_bs_alloc_io_channel(bs); 6256 SPDK_CU_ASSERT_FATAL(channel != NULL); 6257 6258 ut_spdk_blob_opts_init(&opts); 6259 opts.num_clusters = 10; 6260 6261 blob = ut_blob_create_and_open(bs, &opts); 6262 blobid = spdk_blob_get_id(blob); 6263 6264 /* Create snapshot and try to remove blob in the same time: 6265 * - snapshot should be created successfully 6266 * - delete operation should fail w -EBUSY */ 6267 CU_ASSERT(blob->locked_operation_in_progress == false); 6268 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6269 CU_ASSERT(blob->locked_operation_in_progress == true); 6270 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6271 CU_ASSERT(blob->locked_operation_in_progress == true); 6272 /* Deletion failure */ 6273 CU_ASSERT(g_bserrno == -EBUSY); 6274 poll_threads(); 6275 CU_ASSERT(blob->locked_operation_in_progress == false); 6276 /* Snapshot creation success */ 6277 CU_ASSERT(g_bserrno == 0); 6278 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6279 6280 snapshotid = g_blobid; 6281 6282 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 6283 poll_threads(); 6284 CU_ASSERT(g_bserrno == 0); 6285 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 6286 snapshot = g_blob; 6287 6288 /* Inflate blob and try to remove blob in the same time: 6289 * - blob should be inflated successfully 6290 * - delete operation should fail w -EBUSY */ 6291 CU_ASSERT(blob->locked_operation_in_progress == false); 6292 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 6293 CU_ASSERT(blob->locked_operation_in_progress == true); 6294 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6295 CU_ASSERT(blob->locked_operation_in_progress == true); 6296 /* Deletion failure */ 6297 CU_ASSERT(g_bserrno == -EBUSY); 6298 poll_threads(); 6299 CU_ASSERT(blob->locked_operation_in_progress == false); 6300 /* Inflation success */ 6301 CU_ASSERT(g_bserrno == 0); 6302 6303 /* Clone snapshot and try to remove snapshot in the same time: 6304 * - snapshot should be cloned successfully 6305 * - delete operation should fail w -EBUSY */ 6306 CU_ASSERT(blob->locked_operation_in_progress == false); 6307 spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 6308 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 6309 /* Deletion failure */ 6310 CU_ASSERT(g_bserrno == -EBUSY); 6311 poll_threads(); 6312 CU_ASSERT(blob->locked_operation_in_progress == false); 6313 /* Clone created */ 6314 CU_ASSERT(g_bserrno == 0); 6315 6316 /* Resize blob and try to remove blob in the same time: 6317 * - blob should be resized successfully 6318 * - delete operation should fail w -EBUSY */ 6319 CU_ASSERT(blob->locked_operation_in_progress == false); 6320 spdk_blob_resize(blob, 50, blob_op_complete, NULL); 6321 CU_ASSERT(blob->locked_operation_in_progress == true); 6322 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6323 CU_ASSERT(blob->locked_operation_in_progress == true); 6324 /* Deletion failure */ 6325 CU_ASSERT(g_bserrno == -EBUSY); 6326 poll_threads(); 6327 CU_ASSERT(blob->locked_operation_in_progress == false); 6328 /* Blob resized successfully */ 6329 CU_ASSERT(g_bserrno == 0); 6330 6331 /* Issue two consecutive blob syncs, neither should fail. 6332 * Force sync to actually occur by marking blob dirty each time. 6333 * Execution of sync should not be enough to complete the operation, 6334 * since disk I/O is required to complete it. */ 6335 g_bserrno = -1; 6336 6337 blob->state = SPDK_BLOB_STATE_DIRTY; 6338 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6339 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6340 6341 blob->state = SPDK_BLOB_STATE_DIRTY; 6342 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6343 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6344 6345 uint32_t completions = 0; 6346 while (completions < 2) { 6347 SPDK_CU_ASSERT_FATAL(poll_thread_times(0, 1)); 6348 if (g_bserrno == 0) { 6349 g_bserrno = -1; 6350 completions++; 6351 } 6352 /* Never should the g_bserrno be other than -1. 6353 * It would mean that either of syncs failed. */ 6354 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6355 } 6356 6357 spdk_bs_free_io_channel(channel); 6358 poll_threads(); 6359 6360 ut_blob_close_and_delete(bs, snapshot); 6361 ut_blob_close_and_delete(bs, blob); 6362 } 6363 6364 static void 6365 blob_persist(void) 6366 { 6367 struct spdk_blob_store *bs = g_bs; 6368 struct spdk_blob_opts opts; 6369 struct spdk_blob *blob; 6370 spdk_blob_id blobid; 6371 struct spdk_io_channel *channel; 6372 char *xattr; 6373 size_t xattr_length; 6374 int rc; 6375 uint32_t page_count_clear, page_count_xattr; 6376 uint64_t poller_iterations; 6377 bool run_poller; 6378 6379 channel = spdk_bs_alloc_io_channel(bs); 6380 SPDK_CU_ASSERT_FATAL(channel != NULL); 6381 6382 ut_spdk_blob_opts_init(&opts); 6383 opts.num_clusters = 10; 6384 6385 blob = ut_blob_create_and_open(bs, &opts); 6386 blobid = spdk_blob_get_id(blob); 6387 6388 /* Save the amount of md pages used after creation of a blob. 6389 * This should be consistent after removing xattr. */ 6390 page_count_clear = spdk_bit_array_count_set(bs->used_md_pages); 6391 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear); 6392 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear); 6393 6394 /* Add xattr with maximum length of descriptor to exceed single metadata page. */ 6395 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 6396 strlen("large_xattr"); 6397 xattr = calloc(xattr_length, sizeof(char)); 6398 SPDK_CU_ASSERT_FATAL(xattr != NULL); 6399 6400 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 6401 SPDK_CU_ASSERT_FATAL(rc == 0); 6402 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6403 poll_threads(); 6404 SPDK_CU_ASSERT_FATAL(g_bserrno == 0); 6405 6406 /* Save the amount of md pages used after adding the large xattr */ 6407 page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages); 6408 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr); 6409 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr); 6410 6411 /* Add xattr to a blob and sync it. While sync is occuring, remove the xattr and sync again. 6412 * Interrupt the first sync after increasing number of poller iterations, until it succeeds. 6413 * Expectation is that after second sync completes no xattr is saved in metadata. */ 6414 poller_iterations = 1; 6415 run_poller = true; 6416 while (run_poller) { 6417 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 6418 SPDK_CU_ASSERT_FATAL(rc == 0); 6419 g_bserrno = -1; 6420 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6421 poll_thread_times(0, poller_iterations); 6422 if (g_bserrno == 0) { 6423 /* Poller iteration count was high enough for first sync to complete. 6424 * Verify that blob takes up enough of md_pages to store the xattr. */ 6425 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr); 6426 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr); 6427 SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr); 6428 run_poller = false; 6429 } 6430 rc = spdk_blob_remove_xattr(blob, "large_xattr"); 6431 SPDK_CU_ASSERT_FATAL(rc == 0); 6432 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6433 poll_threads(); 6434 SPDK_CU_ASSERT_FATAL(g_bserrno == 0); 6435 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear); 6436 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear); 6437 SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear); 6438 6439 /* Reload bs and re-open blob to verify that xattr was not persisted. */ 6440 spdk_blob_close(blob, blob_op_complete, NULL); 6441 poll_threads(); 6442 CU_ASSERT(g_bserrno == 0); 6443 6444 ut_bs_reload(&bs, NULL); 6445 6446 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6447 poll_threads(); 6448 CU_ASSERT(g_bserrno == 0); 6449 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 6450 blob = g_blob; 6451 6452 rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length); 6453 SPDK_CU_ASSERT_FATAL(rc == -ENOENT); 6454 6455 poller_iterations++; 6456 /* Stop at high iteration count to prevent infinite loop. 6457 * This value should be enough for first md sync to complete in any case. */ 6458 SPDK_CU_ASSERT_FATAL(poller_iterations < 50); 6459 } 6460 6461 free(xattr); 6462 6463 ut_blob_close_and_delete(bs, blob); 6464 6465 spdk_bs_free_io_channel(channel); 6466 poll_threads(); 6467 } 6468 6469 static void 6470 suite_bs_setup(void) 6471 { 6472 struct spdk_bs_dev *dev; 6473 6474 dev = init_dev(); 6475 memset(g_dev_buffer, 0, DEV_BUFFER_SIZE); 6476 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 6477 poll_threads(); 6478 CU_ASSERT(g_bserrno == 0); 6479 CU_ASSERT(g_bs != NULL); 6480 } 6481 6482 static void 6483 suite_bs_cleanup(void) 6484 { 6485 spdk_bs_unload(g_bs, bs_op_complete, NULL); 6486 poll_threads(); 6487 CU_ASSERT(g_bserrno == 0); 6488 g_bs = NULL; 6489 memset(g_dev_buffer, 0, DEV_BUFFER_SIZE); 6490 } 6491 6492 static struct spdk_blob * 6493 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts) 6494 { 6495 struct spdk_blob *blob; 6496 struct spdk_blob_opts create_blob_opts; 6497 spdk_blob_id blobid; 6498 6499 if (blob_opts == NULL) { 6500 ut_spdk_blob_opts_init(&create_blob_opts); 6501 blob_opts = &create_blob_opts; 6502 } 6503 6504 spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL); 6505 poll_threads(); 6506 CU_ASSERT(g_bserrno == 0); 6507 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6508 blobid = g_blobid; 6509 g_blobid = -1; 6510 6511 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6512 poll_threads(); 6513 CU_ASSERT(g_bserrno == 0); 6514 CU_ASSERT(g_blob != NULL); 6515 blob = g_blob; 6516 6517 g_blob = NULL; 6518 g_bserrno = -1; 6519 6520 return blob; 6521 } 6522 6523 static void 6524 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob) 6525 { 6526 spdk_blob_id blobid = spdk_blob_get_id(blob); 6527 6528 spdk_blob_close(blob, blob_op_complete, NULL); 6529 poll_threads(); 6530 CU_ASSERT(g_bserrno == 0); 6531 g_blob = NULL; 6532 6533 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6534 poll_threads(); 6535 CU_ASSERT(g_bserrno == 0); 6536 g_bserrno = -1; 6537 } 6538 6539 static void 6540 suite_blob_setup(void) 6541 { 6542 suite_bs_setup(); 6543 CU_ASSERT(g_bs != NULL); 6544 6545 g_blob = ut_blob_create_and_open(g_bs, NULL); 6546 CU_ASSERT(g_blob != NULL); 6547 } 6548 6549 static void 6550 suite_blob_cleanup(void) 6551 { 6552 ut_blob_close_and_delete(g_bs, g_blob); 6553 CU_ASSERT(g_blob == NULL); 6554 6555 suite_bs_cleanup(); 6556 CU_ASSERT(g_bs == NULL); 6557 } 6558 6559 int main(int argc, char **argv) 6560 { 6561 CU_pSuite suite, suite_bs, suite_blob; 6562 unsigned int num_failures; 6563 6564 CU_set_error_action(CUEA_ABORT); 6565 CU_initialize_registry(); 6566 6567 suite = CU_add_suite("blob", NULL, NULL); 6568 suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL, 6569 suite_bs_setup, suite_bs_cleanup); 6570 suite_blob = CU_add_suite_with_setup_and_teardown("blob_blob", NULL, NULL, 6571 suite_blob_setup, suite_blob_cleanup); 6572 6573 CU_ADD_TEST(suite, blob_init); 6574 CU_ADD_TEST(suite_bs, blob_open); 6575 CU_ADD_TEST(suite_bs, blob_create); 6576 CU_ADD_TEST(suite_bs, blob_create_internal); 6577 CU_ADD_TEST(suite, blob_thin_provision); 6578 CU_ADD_TEST(suite_bs, blob_snapshot); 6579 CU_ADD_TEST(suite_bs, blob_clone); 6580 CU_ADD_TEST(suite_bs, blob_inflate); 6581 CU_ADD_TEST(suite_bs, blob_delete); 6582 CU_ADD_TEST(suite_bs, blob_resize); 6583 CU_ADD_TEST(suite, blob_read_only); 6584 CU_ADD_TEST(suite_bs, channel_ops); 6585 CU_ADD_TEST(suite_bs, blob_super); 6586 CU_ADD_TEST(suite_blob, blob_write); 6587 CU_ADD_TEST(suite_blob, blob_read); 6588 CU_ADD_TEST(suite_blob, blob_rw_verify); 6589 CU_ADD_TEST(suite_bs, blob_rw_verify_iov); 6590 CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem); 6591 CU_ADD_TEST(suite_blob, blob_rw_iov_read_only); 6592 CU_ADD_TEST(suite_bs, blob_unmap); 6593 CU_ADD_TEST(suite_bs, blob_iter); 6594 CU_ADD_TEST(suite_blob, blob_xattr); 6595 CU_ADD_TEST(suite, bs_load); 6596 CU_ADD_TEST(suite_bs, bs_load_pending_removal); 6597 CU_ADD_TEST(suite, bs_load_custom_cluster_size); 6598 CU_ADD_TEST(suite_bs, bs_unload); 6599 CU_ADD_TEST(suite, bs_cluster_sz); 6600 CU_ADD_TEST(suite_bs, bs_usable_clusters); 6601 CU_ADD_TEST(suite, bs_resize_md); 6602 CU_ADD_TEST(suite, bs_destroy); 6603 CU_ADD_TEST(suite, bs_type); 6604 CU_ADD_TEST(suite, bs_super_block); 6605 CU_ADD_TEST(suite, blob_serialize); 6606 CU_ADD_TEST(suite_bs, blob_crc); 6607 CU_ADD_TEST(suite, super_block_crc); 6608 CU_ADD_TEST(suite_blob, blob_dirty_shutdown); 6609 CU_ADD_TEST(suite_bs, blob_flags); 6610 CU_ADD_TEST(suite_bs, bs_version); 6611 CU_ADD_TEST(suite_bs, blob_set_xattrs); 6612 CU_ADD_TEST(suite_bs, blob_thin_prov_alloc); 6613 CU_ADD_TEST(suite_bs, blob_insert_cluster_msg); 6614 CU_ADD_TEST(suite_bs, blob_thin_prov_rw); 6615 CU_ADD_TEST(suite_bs, blob_thin_prov_rle); 6616 CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov); 6617 CU_ADD_TEST(suite, bs_load_iter); 6618 CU_ADD_TEST(suite_bs, blob_snapshot_rw); 6619 CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov); 6620 CU_ADD_TEST(suite, blob_relations); 6621 CU_ADD_TEST(suite, blob_relations2); 6622 CU_ADD_TEST(suite, blobstore_clean_power_failure); 6623 CU_ADD_TEST(suite, blob_delete_snapshot_power_failure); 6624 CU_ADD_TEST(suite, blob_create_snapshot_power_failure); 6625 CU_ADD_TEST(suite_bs, blob_inflate_rw); 6626 CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io); 6627 CU_ADD_TEST(suite_bs, blob_operation_split_rw); 6628 CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov); 6629 CU_ADD_TEST(suite, blob_io_unit); 6630 CU_ADD_TEST(suite, blob_io_unit_compatiblity); 6631 CU_ADD_TEST(suite_bs, blob_simultaneous_operations); 6632 CU_ADD_TEST(suite_bs, blob_persist); 6633 6634 allocate_threads(2); 6635 set_thread(0); 6636 6637 g_dev_buffer = calloc(1, DEV_BUFFER_SIZE); 6638 6639 CU_basic_set_mode(CU_BRM_VERBOSE); 6640 g_use_extent_table = false; 6641 CU_basic_run_tests(); 6642 num_failures = CU_get_number_of_failures(); 6643 g_use_extent_table = true; 6644 CU_basic_run_tests(); 6645 num_failures += CU_get_number_of_failures(); 6646 CU_cleanup_registry(); 6647 6648 free(g_dev_buffer); 6649 6650 free_threads(); 6651 6652 return num_failures; 6653 } 6654