1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 #include "spdk/blob.h" 38 #include "spdk/string.h" 39 #include "spdk_internal/thread.h" 40 41 #include "common/lib/ut_multithread.c" 42 #include "../bs_dev_common.c" 43 #include "blob/blobstore.c" 44 #include "blob/request.c" 45 #include "blob/zeroes.c" 46 #include "blob/blob_bs_dev.c" 47 48 struct spdk_blob_store *g_bs; 49 spdk_blob_id g_blobid; 50 struct spdk_blob *g_blob; 51 int g_bserrno; 52 struct spdk_xattr_names *g_names; 53 int g_done; 54 char *g_xattr_names[] = {"first", "second", "third"}; 55 char *g_xattr_values[] = {"one", "two", "three"}; 56 uint64_t g_ctx = 1729; 57 bool g_use_extent_table = false; 58 59 struct spdk_bs_super_block_ver1 { 60 uint8_t signature[8]; 61 uint32_t version; 62 uint32_t length; 63 uint32_t clean; /* If there was a clean shutdown, this is 1. */ 64 spdk_blob_id super_blob; 65 66 uint32_t cluster_size; /* In bytes */ 67 68 uint32_t used_page_mask_start; /* Offset from beginning of disk, in pages */ 69 uint32_t used_page_mask_len; /* Count, in pages */ 70 71 uint32_t used_cluster_mask_start; /* Offset from beginning of disk, in pages */ 72 uint32_t used_cluster_mask_len; /* Count, in pages */ 73 74 uint32_t md_start; /* Offset from beginning of disk, in pages */ 75 uint32_t md_len; /* Count, in pages */ 76 77 uint8_t reserved[4036]; 78 uint32_t crc; 79 } __attribute__((packed)); 80 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size"); 81 82 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs, 83 struct spdk_blob_opts *blob_opts); 84 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob); 85 86 static void 87 _get_xattr_value(void *arg, const char *name, 88 const void **value, size_t *value_len) 89 { 90 uint64_t i; 91 92 SPDK_CU_ASSERT_FATAL(value_len != NULL); 93 SPDK_CU_ASSERT_FATAL(value != NULL); 94 CU_ASSERT(arg == &g_ctx); 95 96 for (i = 0; i < sizeof(g_xattr_names); i++) { 97 if (!strcmp(name, g_xattr_names[i])) { 98 *value_len = strlen(g_xattr_values[i]); 99 *value = g_xattr_values[i]; 100 break; 101 } 102 } 103 } 104 105 static void 106 _get_xattr_value_null(void *arg, const char *name, 107 const void **value, size_t *value_len) 108 { 109 SPDK_CU_ASSERT_FATAL(value_len != NULL); 110 SPDK_CU_ASSERT_FATAL(value != NULL); 111 CU_ASSERT(arg == NULL); 112 113 *value_len = 0; 114 *value = NULL; 115 } 116 117 static int 118 _get_snapshots_count(struct spdk_blob_store *bs) 119 { 120 struct spdk_blob_list *snapshot = NULL; 121 int count = 0; 122 123 TAILQ_FOREACH(snapshot, &bs->snapshots, link) { 124 count += 1; 125 } 126 127 return count; 128 } 129 130 static void 131 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts) 132 { 133 spdk_blob_opts_init(opts); 134 opts->use_extent_table = g_use_extent_table; 135 } 136 137 static void 138 bs_op_complete(void *cb_arg, int bserrno) 139 { 140 g_bserrno = bserrno; 141 } 142 143 static void 144 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs, 145 int bserrno) 146 { 147 g_bs = bs; 148 g_bserrno = bserrno; 149 } 150 151 static void 152 blob_op_complete(void *cb_arg, int bserrno) 153 { 154 g_bserrno = bserrno; 155 } 156 157 static void 158 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno) 159 { 160 g_blobid = blobid; 161 g_bserrno = bserrno; 162 } 163 164 static void 165 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno) 166 { 167 g_blob = blb; 168 g_bserrno = bserrno; 169 } 170 171 static void 172 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts) 173 { 174 struct spdk_bs_dev *dev; 175 176 /* Unload the blob store */ 177 spdk_bs_unload(*bs, bs_op_complete, NULL); 178 poll_threads(); 179 CU_ASSERT(g_bserrno == 0); 180 181 dev = init_dev(); 182 /* Load an existing blob store */ 183 spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL); 184 poll_threads(); 185 CU_ASSERT(g_bserrno == 0); 186 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 187 *bs = g_bs; 188 189 g_bserrno = -1; 190 } 191 192 static void 193 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts) 194 { 195 struct spdk_bs_dev *dev; 196 197 /* Dirty shutdown */ 198 _spdk_bs_free(*bs); 199 200 dev = init_dev(); 201 /* Load an existing blob store */ 202 spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL); 203 poll_threads(); 204 CU_ASSERT(g_bserrno == 0); 205 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 206 *bs = g_bs; 207 208 g_bserrno = -1; 209 } 210 211 static void 212 blob_init(void) 213 { 214 struct spdk_blob_store *bs; 215 struct spdk_bs_dev *dev; 216 217 dev = init_dev(); 218 219 /* should fail for an unsupported blocklen */ 220 dev->blocklen = 500; 221 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 222 poll_threads(); 223 CU_ASSERT(g_bserrno == -EINVAL); 224 225 dev = init_dev(); 226 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 227 poll_threads(); 228 CU_ASSERT(g_bserrno == 0); 229 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 230 bs = g_bs; 231 232 spdk_bs_unload(bs, bs_op_complete, NULL); 233 poll_threads(); 234 CU_ASSERT(g_bserrno == 0); 235 g_bs = NULL; 236 } 237 238 static void 239 blob_super(void) 240 { 241 struct spdk_blob_store *bs = g_bs; 242 spdk_blob_id blobid; 243 struct spdk_blob_opts blob_opts; 244 245 /* Get the super blob without having set one */ 246 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 247 poll_threads(); 248 CU_ASSERT(g_bserrno == -ENOENT); 249 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 250 251 /* Create a blob */ 252 ut_spdk_blob_opts_init(&blob_opts); 253 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 254 poll_threads(); 255 CU_ASSERT(g_bserrno == 0); 256 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 257 blobid = g_blobid; 258 259 /* Set the blob as the super blob */ 260 spdk_bs_set_super(bs, blobid, blob_op_complete, NULL); 261 poll_threads(); 262 CU_ASSERT(g_bserrno == 0); 263 264 /* Get the super blob */ 265 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 266 poll_threads(); 267 CU_ASSERT(g_bserrno == 0); 268 CU_ASSERT(blobid == g_blobid); 269 } 270 271 static void 272 blob_open(void) 273 { 274 struct spdk_blob_store *bs = g_bs; 275 struct spdk_blob *blob; 276 struct spdk_blob_opts blob_opts; 277 spdk_blob_id blobid, blobid2; 278 279 ut_spdk_blob_opts_init(&blob_opts); 280 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 281 poll_threads(); 282 CU_ASSERT(g_bserrno == 0); 283 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 284 blobid = g_blobid; 285 286 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 287 poll_threads(); 288 CU_ASSERT(g_bserrno == 0); 289 CU_ASSERT(g_blob != NULL); 290 blob = g_blob; 291 292 blobid2 = spdk_blob_get_id(blob); 293 CU_ASSERT(blobid == blobid2); 294 295 /* Try to open file again. It should return success. */ 296 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 297 poll_threads(); 298 CU_ASSERT(g_bserrno == 0); 299 CU_ASSERT(blob == g_blob); 300 301 spdk_blob_close(blob, blob_op_complete, NULL); 302 poll_threads(); 303 CU_ASSERT(g_bserrno == 0); 304 305 /* 306 * Close the file a second time, releasing the second reference. This 307 * should succeed. 308 */ 309 blob = g_blob; 310 spdk_blob_close(blob, blob_op_complete, NULL); 311 poll_threads(); 312 CU_ASSERT(g_bserrno == 0); 313 314 /* 315 * Try to open file again. It should succeed. This tests the case 316 * where the file is opened, closed, then re-opened again. 317 */ 318 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 319 poll_threads(); 320 CU_ASSERT(g_bserrno == 0); 321 CU_ASSERT(g_blob != NULL); 322 blob = g_blob; 323 324 spdk_blob_close(blob, blob_op_complete, NULL); 325 poll_threads(); 326 CU_ASSERT(g_bserrno == 0); 327 } 328 329 static void 330 blob_create(void) 331 { 332 struct spdk_blob_store *bs = g_bs; 333 struct spdk_blob *blob; 334 struct spdk_blob_opts opts; 335 spdk_blob_id blobid; 336 337 /* Create blob with 10 clusters */ 338 339 ut_spdk_blob_opts_init(&opts); 340 opts.num_clusters = 10; 341 342 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 343 poll_threads(); 344 CU_ASSERT(g_bserrno == 0); 345 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 346 blobid = g_blobid; 347 348 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 349 poll_threads(); 350 CU_ASSERT(g_bserrno == 0); 351 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 352 blob = g_blob; 353 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 354 355 spdk_blob_close(blob, blob_op_complete, NULL); 356 poll_threads(); 357 CU_ASSERT(g_bserrno == 0); 358 359 /* Create blob with 0 clusters */ 360 361 ut_spdk_blob_opts_init(&opts); 362 opts.num_clusters = 0; 363 364 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 365 poll_threads(); 366 CU_ASSERT(g_bserrno == 0); 367 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 368 blobid = g_blobid; 369 370 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 371 poll_threads(); 372 CU_ASSERT(g_bserrno == 0); 373 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 374 blob = g_blob; 375 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 376 377 spdk_blob_close(blob, blob_op_complete, NULL); 378 poll_threads(); 379 CU_ASSERT(g_bserrno == 0); 380 381 /* Create blob with default options (opts == NULL) */ 382 383 spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL); 384 poll_threads(); 385 CU_ASSERT(g_bserrno == 0); 386 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 387 blobid = g_blobid; 388 389 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 390 poll_threads(); 391 CU_ASSERT(g_bserrno == 0); 392 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 393 blob = g_blob; 394 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 395 396 spdk_blob_close(blob, blob_op_complete, NULL); 397 poll_threads(); 398 CU_ASSERT(g_bserrno == 0); 399 400 /* Try to create blob with size larger than blobstore */ 401 402 ut_spdk_blob_opts_init(&opts); 403 opts.num_clusters = bs->total_clusters + 1; 404 405 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 406 poll_threads(); 407 CU_ASSERT(g_bserrno == -ENOSPC); 408 } 409 410 static void 411 blob_create_internal(void) 412 { 413 struct spdk_blob_store *bs = g_bs; 414 struct spdk_blob *blob; 415 struct spdk_blob_opts opts; 416 struct spdk_blob_xattr_opts internal_xattrs; 417 const void *value; 418 size_t value_len; 419 spdk_blob_id blobid; 420 int rc; 421 422 /* Create blob with custom xattrs */ 423 424 ut_spdk_blob_opts_init(&opts); 425 _spdk_blob_xattrs_init(&internal_xattrs); 426 internal_xattrs.count = 3; 427 internal_xattrs.names = g_xattr_names; 428 internal_xattrs.get_value = _get_xattr_value; 429 internal_xattrs.ctx = &g_ctx; 430 431 _spdk_bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL); 432 poll_threads(); 433 CU_ASSERT(g_bserrno == 0); 434 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 435 blobid = g_blobid; 436 437 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 438 poll_threads(); 439 CU_ASSERT(g_bserrno == 0); 440 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 441 blob = g_blob; 442 443 rc = _spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true); 444 CU_ASSERT(rc == 0); 445 SPDK_CU_ASSERT_FATAL(value != NULL); 446 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 447 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 448 449 rc = _spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true); 450 CU_ASSERT(rc == 0); 451 SPDK_CU_ASSERT_FATAL(value != NULL); 452 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 453 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 454 455 rc = _spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true); 456 CU_ASSERT(rc == 0); 457 SPDK_CU_ASSERT_FATAL(value != NULL); 458 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 459 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 460 461 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len); 462 CU_ASSERT(rc != 0); 463 464 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len); 465 CU_ASSERT(rc != 0); 466 467 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len); 468 CU_ASSERT(rc != 0); 469 470 spdk_blob_close(blob, blob_op_complete, NULL); 471 poll_threads(); 472 CU_ASSERT(g_bserrno == 0); 473 474 /* Create blob with NULL internal options */ 475 476 _spdk_bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL); 477 poll_threads(); 478 CU_ASSERT(g_bserrno == 0); 479 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 480 blobid = g_blobid; 481 482 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 483 poll_threads(); 484 CU_ASSERT(g_bserrno == 0); 485 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 486 CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL); 487 488 blob = g_blob; 489 490 spdk_blob_close(blob, blob_op_complete, NULL); 491 poll_threads(); 492 CU_ASSERT(g_bserrno == 0); 493 } 494 495 static void 496 blob_thin_provision(void) 497 { 498 struct spdk_blob_store *bs; 499 struct spdk_bs_dev *dev; 500 struct spdk_blob *blob; 501 struct spdk_blob_opts opts; 502 struct spdk_bs_opts bs_opts; 503 spdk_blob_id blobid; 504 505 dev = init_dev(); 506 spdk_bs_opts_init(&bs_opts); 507 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 508 509 /* Initialize a new blob store */ 510 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 511 poll_threads(); 512 CU_ASSERT(g_bserrno == 0); 513 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 514 515 bs = g_bs; 516 517 /* Create blob with thin provisioning enabled */ 518 519 ut_spdk_blob_opts_init(&opts); 520 opts.thin_provision = true; 521 opts.num_clusters = 10; 522 523 blob = ut_blob_create_and_open(bs, &opts); 524 blobid = spdk_blob_get_id(blob); 525 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 526 527 spdk_blob_close(blob, blob_op_complete, NULL); 528 CU_ASSERT(g_bserrno == 0); 529 530 /* Do not shut down cleanly. This makes sure that when we load again 531 * and try to recover a valid used_cluster map, that blobstore will 532 * ignore clusters with index 0 since these are unallocated clusters. 533 */ 534 ut_bs_dirty_load(&bs, &bs_opts); 535 536 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 537 poll_threads(); 538 CU_ASSERT(g_bserrno == 0); 539 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 540 blob = g_blob; 541 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 542 543 spdk_blob_close(blob, blob_op_complete, NULL); 544 poll_threads(); 545 CU_ASSERT(g_bserrno == 0); 546 547 spdk_bs_unload(bs, bs_op_complete, NULL); 548 poll_threads(); 549 CU_ASSERT(g_bserrno == 0); 550 g_bs = NULL; 551 } 552 553 static void 554 blob_snapshot(void) 555 { 556 struct spdk_blob_store *bs = g_bs; 557 struct spdk_blob *blob; 558 struct spdk_blob *snapshot, *snapshot2; 559 struct spdk_blob_bs_dev *blob_bs_dev; 560 struct spdk_blob_opts opts; 561 struct spdk_blob_xattr_opts xattrs; 562 spdk_blob_id blobid; 563 spdk_blob_id snapshotid; 564 spdk_blob_id snapshotid2; 565 const void *value; 566 size_t value_len; 567 int rc; 568 spdk_blob_id ids[2]; 569 size_t count; 570 571 /* Create blob with 10 clusters */ 572 ut_spdk_blob_opts_init(&opts); 573 opts.num_clusters = 10; 574 575 blob = ut_blob_create_and_open(bs, &opts); 576 blobid = spdk_blob_get_id(blob); 577 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 578 579 /* Create snapshot from blob */ 580 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); 581 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 582 poll_threads(); 583 CU_ASSERT(g_bserrno == 0); 584 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 585 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 586 snapshotid = g_blobid; 587 588 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 589 poll_threads(); 590 CU_ASSERT(g_bserrno == 0); 591 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 592 snapshot = g_blob; 593 CU_ASSERT(snapshot->data_ro == true); 594 CU_ASSERT(snapshot->md_ro == true); 595 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 596 597 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 598 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 599 CU_ASSERT(spdk_mem_all_zero(blob->active.clusters, 600 blob->active.num_clusters * sizeof(blob->active.clusters[0]))); 601 602 /* Try to create snapshot from clone with xattrs */ 603 xattrs.names = g_xattr_names; 604 xattrs.get_value = _get_xattr_value; 605 xattrs.count = 3; 606 xattrs.ctx = &g_ctx; 607 spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL); 608 poll_threads(); 609 CU_ASSERT(g_bserrno == 0); 610 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 611 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); 612 snapshotid2 = g_blobid; 613 614 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 615 CU_ASSERT(g_bserrno == 0); 616 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 617 snapshot2 = g_blob; 618 CU_ASSERT(snapshot2->data_ro == true); 619 CU_ASSERT(snapshot2->md_ro == true); 620 CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10); 621 622 /* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */ 623 CU_ASSERT(snapshot->back_bs_dev == NULL); 624 SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL); 625 SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL); 626 627 blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 628 CU_ASSERT(blob_bs_dev->blob == snapshot2); 629 630 blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev; 631 CU_ASSERT(blob_bs_dev->blob == snapshot); 632 633 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len); 634 CU_ASSERT(rc == 0); 635 SPDK_CU_ASSERT_FATAL(value != NULL); 636 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 637 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 638 639 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len); 640 CU_ASSERT(rc == 0); 641 SPDK_CU_ASSERT_FATAL(value != NULL); 642 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 643 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 644 645 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len); 646 CU_ASSERT(rc == 0); 647 SPDK_CU_ASSERT_FATAL(value != NULL); 648 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 649 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 650 651 /* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */ 652 count = 2; 653 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 654 CU_ASSERT(count == 1); 655 CU_ASSERT(ids[0] == blobid); 656 657 count = 2; 658 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 659 CU_ASSERT(count == 1); 660 CU_ASSERT(ids[0] == snapshotid2); 661 662 /* Try to create snapshot from snapshot */ 663 spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 664 poll_threads(); 665 CU_ASSERT(g_bserrno == -EINVAL); 666 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 667 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); 668 669 /* Delete blob and confirm that it is no longer on snapshot2 clone list */ 670 ut_blob_close_and_delete(bs, blob); 671 count = 2; 672 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 673 CU_ASSERT(count == 0); 674 675 /* Delete snapshot2 and confirm that it is no longer on snapshot clone list */ 676 ut_blob_close_and_delete(bs, snapshot2); 677 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 678 count = 2; 679 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 680 CU_ASSERT(count == 0); 681 682 ut_blob_close_and_delete(bs, snapshot); 683 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); 684 } 685 686 static void 687 blob_snapshot_freeze_io(void) 688 { 689 struct spdk_io_channel *channel; 690 struct spdk_bs_channel *bs_channel; 691 struct spdk_blob_store *bs = g_bs; 692 struct spdk_blob *blob; 693 struct spdk_blob_opts opts; 694 spdk_blob_id blobid; 695 uint32_t num_of_pages = 10; 696 uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE]; 697 uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE]; 698 uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE]; 699 700 memset(payload_write, 0xE5, sizeof(payload_write)); 701 memset(payload_read, 0x00, sizeof(payload_read)); 702 memset(payload_zero, 0x00, sizeof(payload_zero)); 703 704 /* Test freeze I/O during snapshot */ 705 channel = spdk_bs_alloc_io_channel(bs); 706 bs_channel = spdk_io_channel_get_ctx(channel); 707 708 /* Create blob with 10 clusters */ 709 ut_spdk_blob_opts_init(&opts); 710 opts.num_clusters = 10; 711 opts.thin_provision = false; 712 713 blob = ut_blob_create_and_open(bs, &opts); 714 blobid = spdk_blob_get_id(blob); 715 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 716 717 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 718 719 /* This is implementation specific. 720 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback. 721 * Four async I/O operations happen before that. */ 722 poll_thread_times(0, 3); 723 724 CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io)); 725 726 /* Blob I/O should be frozen here */ 727 CU_ASSERT(blob->frozen_refcnt == 1); 728 729 /* Write to the blob */ 730 spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL); 731 732 /* Verify that I/O is queued */ 733 CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io)); 734 /* Verify that payload is not written to disk */ 735 CU_ASSERT(memcmp(payload_zero, &g_dev_buffer[blob->active.clusters[0]*SPDK_BS_PAGE_SIZE], 736 SPDK_BS_PAGE_SIZE) == 0); 737 738 /* Finish all operations including spdk_bs_create_snapshot */ 739 poll_threads(); 740 741 /* Verify snapshot */ 742 CU_ASSERT(g_bserrno == 0); 743 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 744 745 /* Verify that blob has unset frozen_io */ 746 CU_ASSERT(blob->frozen_refcnt == 0); 747 748 /* Verify that postponed I/O completed successfully by comparing payload */ 749 spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL); 750 poll_threads(); 751 CU_ASSERT(g_bserrno == 0); 752 CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0); 753 754 spdk_blob_close(blob, blob_op_complete, NULL); 755 poll_threads(); 756 CU_ASSERT(g_bserrno == 0); 757 758 spdk_bs_free_io_channel(channel); 759 poll_threads(); 760 } 761 762 static void 763 blob_clone(void) 764 { 765 struct spdk_blob_store *bs = g_bs; 766 struct spdk_blob_opts opts; 767 struct spdk_blob *blob, *snapshot, *clone; 768 spdk_blob_id blobid, cloneid, snapshotid; 769 struct spdk_blob_xattr_opts xattrs; 770 const void *value; 771 size_t value_len; 772 int rc; 773 774 /* Create blob with 10 clusters */ 775 776 ut_spdk_blob_opts_init(&opts); 777 opts.num_clusters = 10; 778 779 blob = ut_blob_create_and_open(bs, &opts); 780 blobid = spdk_blob_get_id(blob); 781 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 782 783 /* Create snapshot */ 784 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 785 poll_threads(); 786 CU_ASSERT(g_bserrno == 0); 787 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 788 snapshotid = g_blobid; 789 790 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 791 poll_threads(); 792 CU_ASSERT(g_bserrno == 0); 793 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 794 snapshot = g_blob; 795 CU_ASSERT(snapshot->data_ro == true); 796 CU_ASSERT(snapshot->md_ro == true); 797 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 798 799 spdk_blob_close(snapshot, blob_op_complete, NULL); 800 poll_threads(); 801 CU_ASSERT(g_bserrno == 0); 802 803 /* Create clone from snapshot with xattrs */ 804 xattrs.names = g_xattr_names; 805 xattrs.get_value = _get_xattr_value; 806 xattrs.count = 3; 807 xattrs.ctx = &g_ctx; 808 809 spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL); 810 poll_threads(); 811 CU_ASSERT(g_bserrno == 0); 812 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 813 cloneid = g_blobid; 814 815 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 816 poll_threads(); 817 CU_ASSERT(g_bserrno == 0); 818 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 819 clone = g_blob; 820 CU_ASSERT(clone->data_ro == false); 821 CU_ASSERT(clone->md_ro == false); 822 CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10); 823 824 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len); 825 CU_ASSERT(rc == 0); 826 SPDK_CU_ASSERT_FATAL(value != NULL); 827 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 828 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 829 830 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len); 831 CU_ASSERT(rc == 0); 832 SPDK_CU_ASSERT_FATAL(value != NULL); 833 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 834 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 835 836 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len); 837 CU_ASSERT(rc == 0); 838 SPDK_CU_ASSERT_FATAL(value != NULL); 839 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 840 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 841 842 843 spdk_blob_close(clone, blob_op_complete, NULL); 844 poll_threads(); 845 CU_ASSERT(g_bserrno == 0); 846 847 /* Try to create clone from not read only blob */ 848 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 849 poll_threads(); 850 CU_ASSERT(g_bserrno == -EINVAL); 851 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 852 853 /* Mark blob as read only */ 854 spdk_blob_set_read_only(blob); 855 spdk_blob_sync_md(blob, blob_op_complete, NULL); 856 poll_threads(); 857 CU_ASSERT(g_bserrno == 0); 858 859 /* Create clone from read only blob */ 860 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 861 poll_threads(); 862 CU_ASSERT(g_bserrno == 0); 863 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 864 cloneid = g_blobid; 865 866 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 867 poll_threads(); 868 CU_ASSERT(g_bserrno == 0); 869 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 870 clone = g_blob; 871 CU_ASSERT(clone->data_ro == false); 872 CU_ASSERT(clone->md_ro == false); 873 CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10); 874 875 spdk_blob_close(clone, blob_op_complete, NULL); 876 poll_threads(); 877 CU_ASSERT(g_bserrno == 0); 878 879 spdk_blob_close(blob, blob_op_complete, NULL); 880 poll_threads(); 881 CU_ASSERT(g_bserrno == 0); 882 } 883 884 static void 885 _blob_inflate(bool decouple_parent) 886 { 887 struct spdk_blob_store *bs = g_bs; 888 struct spdk_blob_opts opts; 889 struct spdk_blob *blob, *snapshot; 890 spdk_blob_id blobid, snapshotid; 891 struct spdk_io_channel *channel; 892 uint64_t free_clusters; 893 894 channel = spdk_bs_alloc_io_channel(bs); 895 SPDK_CU_ASSERT_FATAL(channel != NULL); 896 897 /* Create blob with 10 clusters */ 898 899 ut_spdk_blob_opts_init(&opts); 900 opts.num_clusters = 10; 901 opts.thin_provision = true; 902 903 blob = ut_blob_create_and_open(bs, &opts); 904 blobid = spdk_blob_get_id(blob); 905 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 906 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true); 907 908 /* 1) Blob with no parent */ 909 if (decouple_parent) { 910 /* Decouple parent of blob with no parent (should fail) */ 911 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 912 poll_threads(); 913 CU_ASSERT(g_bserrno != 0); 914 } else { 915 /* Inflate of thin blob with no parent should made it thick */ 916 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 917 poll_threads(); 918 CU_ASSERT(g_bserrno == 0); 919 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false); 920 } 921 922 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 923 poll_threads(); 924 CU_ASSERT(g_bserrno == 0); 925 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 926 snapshotid = g_blobid; 927 928 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true); 929 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 930 931 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 932 poll_threads(); 933 CU_ASSERT(g_bserrno == 0); 934 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 935 snapshot = g_blob; 936 CU_ASSERT(snapshot->data_ro == true); 937 CU_ASSERT(snapshot->md_ro == true); 938 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 939 940 spdk_blob_close(snapshot, blob_op_complete, NULL); 941 poll_threads(); 942 CU_ASSERT(g_bserrno == 0); 943 944 free_clusters = spdk_bs_free_cluster_count(bs); 945 946 /* 2) Blob with parent */ 947 if (!decouple_parent) { 948 /* Do full blob inflation */ 949 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 950 poll_threads(); 951 CU_ASSERT(g_bserrno == 0); 952 /* all 10 clusters should be allocated */ 953 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10); 954 } else { 955 /* Decouple parent of blob */ 956 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 957 poll_threads(); 958 CU_ASSERT(g_bserrno == 0); 959 /* when only parent is removed, none of the clusters should be allocated */ 960 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters); 961 } 962 963 /* Now, it should be possible to delete snapshot */ 964 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 965 poll_threads(); 966 CU_ASSERT(g_bserrno == 0); 967 968 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 969 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent); 970 971 spdk_blob_close(blob, blob_op_complete, NULL); 972 poll_threads(); 973 CU_ASSERT(g_bserrno == 0); 974 975 spdk_bs_free_io_channel(channel); 976 poll_threads(); 977 } 978 979 static void 980 blob_inflate(void) 981 { 982 _blob_inflate(false); 983 _blob_inflate(true); 984 } 985 986 static void 987 blob_delete(void) 988 { 989 struct spdk_blob_store *bs = g_bs; 990 struct spdk_blob_opts blob_opts; 991 spdk_blob_id blobid; 992 993 /* Create a blob and then delete it. */ 994 ut_spdk_blob_opts_init(&blob_opts); 995 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 996 poll_threads(); 997 CU_ASSERT(g_bserrno == 0); 998 CU_ASSERT(g_blobid > 0); 999 blobid = g_blobid; 1000 1001 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 1002 poll_threads(); 1003 CU_ASSERT(g_bserrno == 0); 1004 1005 /* Try to open the blob */ 1006 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1007 poll_threads(); 1008 CU_ASSERT(g_bserrno == -ENOENT); 1009 } 1010 1011 static void 1012 blob_resize(void) 1013 { 1014 struct spdk_blob_store *bs = g_bs; 1015 struct spdk_blob *blob; 1016 uint64_t free_clusters; 1017 1018 free_clusters = spdk_bs_free_cluster_count(bs); 1019 1020 blob = ut_blob_create_and_open(bs, NULL); 1021 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 1022 1023 /* Confirm that resize fails if blob is marked read-only. */ 1024 blob->md_ro = true; 1025 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1026 poll_threads(); 1027 CU_ASSERT(g_bserrno == -EPERM); 1028 blob->md_ro = false; 1029 1030 /* The blob started at 0 clusters. Resize it to be 5. */ 1031 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1032 poll_threads(); 1033 CU_ASSERT(g_bserrno == 0); 1034 CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs)); 1035 1036 /* Shrink the blob to 3 clusters. This will not actually release 1037 * the old clusters until the blob is synced. 1038 */ 1039 spdk_blob_resize(blob, 3, blob_op_complete, NULL); 1040 poll_threads(); 1041 CU_ASSERT(g_bserrno == 0); 1042 /* Verify there are still 5 clusters in use */ 1043 CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs)); 1044 1045 spdk_blob_sync_md(blob, blob_op_complete, NULL); 1046 poll_threads(); 1047 CU_ASSERT(g_bserrno == 0); 1048 /* Now there are only 3 clusters in use */ 1049 CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs)); 1050 1051 /* Resize the blob to be 10 clusters. Growth takes effect immediately. */ 1052 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 1053 poll_threads(); 1054 CU_ASSERT(g_bserrno == 0); 1055 CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs)); 1056 1057 /* Try to resize the blob to size larger than blobstore. */ 1058 spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL); 1059 poll_threads(); 1060 CU_ASSERT(g_bserrno == -ENOSPC); 1061 1062 ut_blob_close_and_delete(bs, blob); 1063 } 1064 1065 static void 1066 blob_read_only(void) 1067 { 1068 struct spdk_blob_store *bs; 1069 struct spdk_bs_dev *dev; 1070 struct spdk_blob *blob; 1071 struct spdk_bs_opts opts; 1072 spdk_blob_id blobid; 1073 int rc; 1074 1075 dev = init_dev(); 1076 spdk_bs_opts_init(&opts); 1077 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 1078 1079 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 1080 poll_threads(); 1081 CU_ASSERT(g_bserrno == 0); 1082 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 1083 bs = g_bs; 1084 1085 blob = ut_blob_create_and_open(bs, NULL); 1086 blobid = spdk_blob_get_id(blob); 1087 1088 rc = spdk_blob_set_read_only(blob); 1089 CU_ASSERT(rc == 0); 1090 1091 CU_ASSERT(blob->data_ro == false); 1092 CU_ASSERT(blob->md_ro == false); 1093 1094 spdk_blob_sync_md(blob, bs_op_complete, NULL); 1095 poll_threads(); 1096 1097 CU_ASSERT(blob->data_ro == true); 1098 CU_ASSERT(blob->md_ro == true); 1099 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1100 1101 spdk_blob_close(blob, blob_op_complete, NULL); 1102 poll_threads(); 1103 CU_ASSERT(g_bserrno == 0); 1104 1105 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1106 poll_threads(); 1107 CU_ASSERT(g_bserrno == 0); 1108 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1109 blob = g_blob; 1110 1111 CU_ASSERT(blob->data_ro == true); 1112 CU_ASSERT(blob->md_ro == true); 1113 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1114 1115 spdk_blob_close(blob, blob_op_complete, NULL); 1116 poll_threads(); 1117 CU_ASSERT(g_bserrno == 0); 1118 1119 ut_bs_reload(&bs, &opts); 1120 1121 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1122 poll_threads(); 1123 CU_ASSERT(g_bserrno == 0); 1124 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1125 blob = g_blob; 1126 1127 CU_ASSERT(blob->data_ro == true); 1128 CU_ASSERT(blob->md_ro == true); 1129 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1130 1131 spdk_blob_close(blob, blob_op_complete, NULL); 1132 poll_threads(); 1133 CU_ASSERT(g_bserrno == 0); 1134 1135 spdk_bs_unload(bs, bs_op_complete, NULL); 1136 poll_threads(); 1137 CU_ASSERT(g_bserrno == 0); 1138 1139 } 1140 1141 static void 1142 channel_ops(void) 1143 { 1144 struct spdk_blob_store *bs = g_bs; 1145 struct spdk_io_channel *channel; 1146 1147 channel = spdk_bs_alloc_io_channel(bs); 1148 CU_ASSERT(channel != NULL); 1149 1150 spdk_bs_free_io_channel(channel); 1151 poll_threads(); 1152 } 1153 1154 static void 1155 blob_write(void) 1156 { 1157 struct spdk_blob_store *bs = g_bs; 1158 struct spdk_blob *blob; 1159 struct spdk_io_channel *channel; 1160 uint64_t pages_per_cluster; 1161 uint8_t payload[10 * 4096]; 1162 1163 blob = ut_blob_create_and_open(bs, NULL); 1164 1165 pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs); 1166 1167 channel = spdk_bs_alloc_io_channel(bs); 1168 CU_ASSERT(channel != NULL); 1169 1170 /* Write to a blob with 0 size */ 1171 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1172 poll_threads(); 1173 CU_ASSERT(g_bserrno == -EINVAL); 1174 1175 /* Resize the blob */ 1176 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1177 poll_threads(); 1178 CU_ASSERT(g_bserrno == 0); 1179 1180 /* Confirm that write fails if blob is marked read-only. */ 1181 blob->data_ro = true; 1182 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1183 poll_threads(); 1184 CU_ASSERT(g_bserrno == -EPERM); 1185 blob->data_ro = false; 1186 1187 /* Write to the blob */ 1188 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1189 poll_threads(); 1190 CU_ASSERT(g_bserrno == 0); 1191 1192 /* Write starting beyond the end */ 1193 spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete, 1194 NULL); 1195 poll_threads(); 1196 CU_ASSERT(g_bserrno == -EINVAL); 1197 1198 /* Write starting at a valid location but going off the end */ 1199 spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1, 1200 blob_op_complete, NULL); 1201 poll_threads(); 1202 CU_ASSERT(g_bserrno == -EINVAL); 1203 1204 spdk_blob_close(blob, blob_op_complete, NULL); 1205 poll_threads(); 1206 CU_ASSERT(g_bserrno == 0); 1207 1208 spdk_bs_free_io_channel(channel); 1209 poll_threads(); 1210 } 1211 1212 static void 1213 blob_read(void) 1214 { 1215 struct spdk_blob_store *bs = g_bs; 1216 struct spdk_blob *blob; 1217 struct spdk_io_channel *channel; 1218 uint64_t pages_per_cluster; 1219 uint8_t payload[10 * 4096]; 1220 1221 pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs); 1222 1223 channel = spdk_bs_alloc_io_channel(bs); 1224 CU_ASSERT(channel != NULL); 1225 1226 blob = ut_blob_create_and_open(bs, NULL); 1227 1228 /* Read from a blob with 0 size */ 1229 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1230 poll_threads(); 1231 CU_ASSERT(g_bserrno == -EINVAL); 1232 1233 /* Resize the blob */ 1234 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1235 poll_threads(); 1236 CU_ASSERT(g_bserrno == 0); 1237 1238 /* Confirm that read passes if blob is marked read-only. */ 1239 blob->data_ro = true; 1240 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1241 poll_threads(); 1242 CU_ASSERT(g_bserrno == 0); 1243 blob->data_ro = false; 1244 1245 /* Read from the blob */ 1246 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1247 poll_threads(); 1248 CU_ASSERT(g_bserrno == 0); 1249 1250 /* Read starting beyond the end */ 1251 spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete, 1252 NULL); 1253 poll_threads(); 1254 CU_ASSERT(g_bserrno == -EINVAL); 1255 1256 /* Read starting at a valid location but going off the end */ 1257 spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1, 1258 blob_op_complete, NULL); 1259 poll_threads(); 1260 CU_ASSERT(g_bserrno == -EINVAL); 1261 1262 spdk_bs_free_io_channel(channel); 1263 poll_threads(); 1264 1265 ut_blob_close_and_delete(bs, blob); 1266 } 1267 1268 static void 1269 blob_rw_verify(void) 1270 { 1271 struct spdk_blob_store *bs = g_bs; 1272 struct spdk_blob *blob; 1273 struct spdk_io_channel *channel; 1274 uint8_t payload_read[10 * 4096]; 1275 uint8_t payload_write[10 * 4096]; 1276 1277 channel = spdk_bs_alloc_io_channel(bs); 1278 CU_ASSERT(channel != NULL); 1279 1280 blob = ut_blob_create_and_open(bs, NULL); 1281 1282 spdk_blob_resize(blob, 32, blob_op_complete, NULL); 1283 poll_threads(); 1284 CU_ASSERT(g_bserrno == 0); 1285 1286 memset(payload_write, 0xE5, sizeof(payload_write)); 1287 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 1288 poll_threads(); 1289 CU_ASSERT(g_bserrno == 0); 1290 1291 memset(payload_read, 0x00, sizeof(payload_read)); 1292 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 1293 poll_threads(); 1294 CU_ASSERT(g_bserrno == 0); 1295 CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0); 1296 1297 spdk_blob_close(blob, blob_op_complete, NULL); 1298 poll_threads(); 1299 CU_ASSERT(g_bserrno == 0); 1300 1301 spdk_bs_free_io_channel(channel); 1302 poll_threads(); 1303 } 1304 1305 static void 1306 blob_rw_verify_iov(void) 1307 { 1308 struct spdk_blob_store *bs = g_bs; 1309 struct spdk_blob *blob; 1310 struct spdk_io_channel *channel; 1311 uint8_t payload_read[10 * 4096]; 1312 uint8_t payload_write[10 * 4096]; 1313 struct iovec iov_read[3]; 1314 struct iovec iov_write[3]; 1315 void *buf; 1316 1317 channel = spdk_bs_alloc_io_channel(bs); 1318 CU_ASSERT(channel != NULL); 1319 1320 blob = ut_blob_create_and_open(bs, NULL); 1321 1322 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1323 poll_threads(); 1324 CU_ASSERT(g_bserrno == 0); 1325 1326 /* 1327 * Manually adjust the offset of the blob's second cluster. This allows 1328 * us to make sure that the readv/write code correctly accounts for I/O 1329 * that cross cluster boundaries. Start by asserting that the allocated 1330 * clusters are where we expect before modifying the second cluster. 1331 */ 1332 CU_ASSERT(blob->active.clusters[0] == 1 * 256); 1333 CU_ASSERT(blob->active.clusters[1] == 2 * 256); 1334 blob->active.clusters[1] = 3 * 256; 1335 1336 memset(payload_write, 0xE5, sizeof(payload_write)); 1337 iov_write[0].iov_base = payload_write; 1338 iov_write[0].iov_len = 1 * 4096; 1339 iov_write[1].iov_base = payload_write + 1 * 4096; 1340 iov_write[1].iov_len = 5 * 4096; 1341 iov_write[2].iov_base = payload_write + 6 * 4096; 1342 iov_write[2].iov_len = 4 * 4096; 1343 /* 1344 * Choose a page offset just before the cluster boundary. The first 6 pages of payload 1345 * will get written to the first cluster, the last 4 to the second cluster. 1346 */ 1347 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 1348 poll_threads(); 1349 CU_ASSERT(g_bserrno == 0); 1350 1351 memset(payload_read, 0xAA, sizeof(payload_read)); 1352 iov_read[0].iov_base = payload_read; 1353 iov_read[0].iov_len = 3 * 4096; 1354 iov_read[1].iov_base = payload_read + 3 * 4096; 1355 iov_read[1].iov_len = 4 * 4096; 1356 iov_read[2].iov_base = payload_read + 7 * 4096; 1357 iov_read[2].iov_len = 3 * 4096; 1358 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 1359 poll_threads(); 1360 CU_ASSERT(g_bserrno == 0); 1361 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 1362 1363 buf = calloc(1, 256 * 4096); 1364 SPDK_CU_ASSERT_FATAL(buf != NULL); 1365 /* Check that cluster 2 on "disk" was not modified. */ 1366 CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0); 1367 free(buf); 1368 1369 spdk_blob_close(blob, blob_op_complete, NULL); 1370 poll_threads(); 1371 CU_ASSERT(g_bserrno == 0); 1372 1373 spdk_bs_free_io_channel(channel); 1374 poll_threads(); 1375 } 1376 1377 static uint32_t 1378 bs_channel_get_req_count(struct spdk_io_channel *_channel) 1379 { 1380 struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel); 1381 struct spdk_bs_request_set *set; 1382 uint32_t count = 0; 1383 1384 TAILQ_FOREACH(set, &channel->reqs, link) { 1385 count++; 1386 } 1387 1388 return count; 1389 } 1390 1391 static void 1392 blob_rw_verify_iov_nomem(void) 1393 { 1394 struct spdk_blob_store *bs = g_bs; 1395 struct spdk_blob *blob; 1396 struct spdk_io_channel *channel; 1397 uint8_t payload_write[10 * 4096]; 1398 struct iovec iov_write[3]; 1399 uint32_t req_count; 1400 1401 channel = spdk_bs_alloc_io_channel(bs); 1402 CU_ASSERT(channel != NULL); 1403 1404 blob = ut_blob_create_and_open(bs, NULL); 1405 1406 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1407 poll_threads(); 1408 CU_ASSERT(g_bserrno == 0); 1409 1410 /* 1411 * Choose a page offset just before the cluster boundary. The first 6 pages of payload 1412 * will get written to the first cluster, the last 4 to the second cluster. 1413 */ 1414 iov_write[0].iov_base = payload_write; 1415 iov_write[0].iov_len = 1 * 4096; 1416 iov_write[1].iov_base = payload_write + 1 * 4096; 1417 iov_write[1].iov_len = 5 * 4096; 1418 iov_write[2].iov_base = payload_write + 6 * 4096; 1419 iov_write[2].iov_len = 4 * 4096; 1420 MOCK_SET(calloc, NULL); 1421 req_count = bs_channel_get_req_count(channel); 1422 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 1423 poll_threads(); 1424 CU_ASSERT(g_bserrno = -ENOMEM); 1425 CU_ASSERT(req_count == bs_channel_get_req_count(channel)); 1426 MOCK_CLEAR(calloc); 1427 1428 spdk_blob_close(blob, blob_op_complete, NULL); 1429 poll_threads(); 1430 CU_ASSERT(g_bserrno == 0); 1431 1432 spdk_bs_free_io_channel(channel); 1433 poll_threads(); 1434 } 1435 1436 static void 1437 blob_rw_iov_read_only(void) 1438 { 1439 struct spdk_blob_store *bs = g_bs; 1440 struct spdk_blob *blob; 1441 struct spdk_io_channel *channel; 1442 uint8_t payload_read[4096]; 1443 uint8_t payload_write[4096]; 1444 struct iovec iov_read; 1445 struct iovec iov_write; 1446 1447 channel = spdk_bs_alloc_io_channel(bs); 1448 CU_ASSERT(channel != NULL); 1449 1450 blob = ut_blob_create_and_open(bs, NULL); 1451 1452 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1453 poll_threads(); 1454 CU_ASSERT(g_bserrno == 0); 1455 1456 /* Verify that writev failed if read_only flag is set. */ 1457 blob->data_ro = true; 1458 iov_write.iov_base = payload_write; 1459 iov_write.iov_len = sizeof(payload_write); 1460 spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL); 1461 poll_threads(); 1462 CU_ASSERT(g_bserrno == -EPERM); 1463 1464 /* Verify that reads pass if data_ro flag is set. */ 1465 iov_read.iov_base = payload_read; 1466 iov_read.iov_len = sizeof(payload_read); 1467 spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL); 1468 poll_threads(); 1469 CU_ASSERT(g_bserrno == 0); 1470 1471 spdk_blob_close(blob, blob_op_complete, NULL); 1472 poll_threads(); 1473 CU_ASSERT(g_bserrno == 0); 1474 1475 spdk_bs_free_io_channel(channel); 1476 poll_threads(); 1477 } 1478 1479 static void 1480 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel, 1481 uint8_t *payload, uint64_t offset, uint64_t length, 1482 spdk_blob_op_complete cb_fn, void *cb_arg) 1483 { 1484 uint64_t i; 1485 uint8_t *buf; 1486 uint64_t page_size = spdk_bs_get_page_size(blob->bs); 1487 1488 /* To be sure that operation is NOT splitted, read one page at the time */ 1489 buf = payload; 1490 for (i = 0; i < length; i++) { 1491 spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL); 1492 poll_threads(); 1493 if (g_bserrno != 0) { 1494 /* Pass the error code up */ 1495 break; 1496 } 1497 buf += page_size; 1498 } 1499 1500 cb_fn(cb_arg, g_bserrno); 1501 } 1502 1503 static void 1504 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel, 1505 uint8_t *payload, uint64_t offset, uint64_t length, 1506 spdk_blob_op_complete cb_fn, void *cb_arg) 1507 { 1508 uint64_t i; 1509 uint8_t *buf; 1510 uint64_t page_size = spdk_bs_get_page_size(blob->bs); 1511 1512 /* To be sure that operation is NOT splitted, write one page at the time */ 1513 buf = payload; 1514 for (i = 0; i < length; i++) { 1515 spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL); 1516 poll_threads(); 1517 if (g_bserrno != 0) { 1518 /* Pass the error code up */ 1519 break; 1520 } 1521 buf += page_size; 1522 } 1523 1524 cb_fn(cb_arg, g_bserrno); 1525 } 1526 1527 static void 1528 blob_operation_split_rw(void) 1529 { 1530 struct spdk_blob_store *bs = g_bs; 1531 struct spdk_blob *blob; 1532 struct spdk_io_channel *channel; 1533 struct spdk_blob_opts opts; 1534 uint64_t cluster_size; 1535 1536 uint64_t payload_size; 1537 uint8_t *payload_read; 1538 uint8_t *payload_write; 1539 uint8_t *payload_pattern; 1540 1541 uint64_t page_size; 1542 uint64_t pages_per_cluster; 1543 uint64_t pages_per_payload; 1544 1545 uint64_t i; 1546 1547 cluster_size = spdk_bs_get_cluster_size(bs); 1548 page_size = spdk_bs_get_page_size(bs); 1549 pages_per_cluster = cluster_size / page_size; 1550 pages_per_payload = pages_per_cluster * 5; 1551 payload_size = cluster_size * 5; 1552 1553 payload_read = malloc(payload_size); 1554 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 1555 1556 payload_write = malloc(payload_size); 1557 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 1558 1559 payload_pattern = malloc(payload_size); 1560 SPDK_CU_ASSERT_FATAL(payload_pattern != NULL); 1561 1562 /* Prepare random pattern to write */ 1563 memset(payload_pattern, 0xFF, payload_size); 1564 for (i = 0; i < pages_per_payload; i++) { 1565 *((uint64_t *)(payload_pattern + page_size * i)) = (i + 1); 1566 } 1567 1568 channel = spdk_bs_alloc_io_channel(bs); 1569 SPDK_CU_ASSERT_FATAL(channel != NULL); 1570 1571 /* Create blob */ 1572 ut_spdk_blob_opts_init(&opts); 1573 opts.thin_provision = false; 1574 opts.num_clusters = 5; 1575 1576 blob = ut_blob_create_and_open(bs, &opts); 1577 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 1578 1579 /* Initial read should return zeroed payload */ 1580 memset(payload_read, 0xFF, payload_size); 1581 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1582 poll_threads(); 1583 CU_ASSERT(g_bserrno == 0); 1584 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 1585 1586 /* Fill whole blob except last page */ 1587 spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1, 1588 blob_op_complete, NULL); 1589 poll_threads(); 1590 CU_ASSERT(g_bserrno == 0); 1591 1592 /* Write last page with a pattern */ 1593 spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1, 1594 blob_op_complete, NULL); 1595 poll_threads(); 1596 CU_ASSERT(g_bserrno == 0); 1597 1598 /* Read whole blob and check consistency */ 1599 memset(payload_read, 0xFF, payload_size); 1600 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1601 poll_threads(); 1602 CU_ASSERT(g_bserrno == 0); 1603 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0); 1604 CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0); 1605 1606 /* Fill whole blob except first page */ 1607 spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1, 1608 blob_op_complete, NULL); 1609 poll_threads(); 1610 CU_ASSERT(g_bserrno == 0); 1611 1612 /* Write first page with a pattern */ 1613 spdk_blob_io_write(blob, channel, payload_pattern, 0, 1, 1614 blob_op_complete, NULL); 1615 poll_threads(); 1616 CU_ASSERT(g_bserrno == 0); 1617 1618 /* Read whole blob and check consistency */ 1619 memset(payload_read, 0xFF, payload_size); 1620 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1621 poll_threads(); 1622 CU_ASSERT(g_bserrno == 0); 1623 CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0); 1624 CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0); 1625 1626 1627 /* Fill whole blob with a pattern (5 clusters) */ 1628 1629 /* 1. Read test. */ 1630 _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload, 1631 blob_op_complete, NULL); 1632 poll_threads(); 1633 CU_ASSERT(g_bserrno == 0); 1634 1635 memset(payload_read, 0xFF, payload_size); 1636 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1637 poll_threads(); 1638 poll_threads(); 1639 CU_ASSERT(g_bserrno == 0); 1640 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1641 1642 /* 2. Write test. */ 1643 spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload, 1644 blob_op_complete, NULL); 1645 poll_threads(); 1646 CU_ASSERT(g_bserrno == 0); 1647 1648 memset(payload_read, 0xFF, payload_size); 1649 _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1650 poll_threads(); 1651 CU_ASSERT(g_bserrno == 0); 1652 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1653 1654 spdk_blob_close(blob, blob_op_complete, NULL); 1655 poll_threads(); 1656 CU_ASSERT(g_bserrno == 0); 1657 1658 spdk_bs_free_io_channel(channel); 1659 poll_threads(); 1660 1661 g_blob = NULL; 1662 g_blobid = 0; 1663 1664 free(payload_read); 1665 free(payload_write); 1666 free(payload_pattern); 1667 } 1668 1669 static void 1670 blob_operation_split_rw_iov(void) 1671 { 1672 struct spdk_blob_store *bs = g_bs; 1673 struct spdk_blob *blob; 1674 struct spdk_io_channel *channel; 1675 struct spdk_blob_opts opts; 1676 uint64_t cluster_size; 1677 1678 uint64_t payload_size; 1679 uint8_t *payload_read; 1680 uint8_t *payload_write; 1681 uint8_t *payload_pattern; 1682 1683 uint64_t page_size; 1684 uint64_t pages_per_cluster; 1685 uint64_t pages_per_payload; 1686 1687 struct iovec iov_read[2]; 1688 struct iovec iov_write[2]; 1689 1690 uint64_t i, j; 1691 1692 cluster_size = spdk_bs_get_cluster_size(bs); 1693 page_size = spdk_bs_get_page_size(bs); 1694 pages_per_cluster = cluster_size / page_size; 1695 pages_per_payload = pages_per_cluster * 5; 1696 payload_size = cluster_size * 5; 1697 1698 payload_read = malloc(payload_size); 1699 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 1700 1701 payload_write = malloc(payload_size); 1702 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 1703 1704 payload_pattern = malloc(payload_size); 1705 SPDK_CU_ASSERT_FATAL(payload_pattern != NULL); 1706 1707 /* Prepare random pattern to write */ 1708 for (i = 0; i < pages_per_payload; i++) { 1709 for (j = 0; j < page_size / sizeof(uint64_t); j++) { 1710 uint64_t *tmp; 1711 1712 tmp = (uint64_t *)payload_pattern; 1713 tmp += ((page_size * i) / sizeof(uint64_t)) + j; 1714 *tmp = i + 1; 1715 } 1716 } 1717 1718 channel = spdk_bs_alloc_io_channel(bs); 1719 SPDK_CU_ASSERT_FATAL(channel != NULL); 1720 1721 /* Create blob */ 1722 ut_spdk_blob_opts_init(&opts); 1723 opts.thin_provision = false; 1724 opts.num_clusters = 5; 1725 1726 blob = ut_blob_create_and_open(bs, &opts); 1727 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 1728 1729 /* Initial read should return zeroes payload */ 1730 memset(payload_read, 0xFF, payload_size); 1731 iov_read[0].iov_base = payload_read; 1732 iov_read[0].iov_len = cluster_size * 3; 1733 iov_read[1].iov_base = payload_read + cluster_size * 3; 1734 iov_read[1].iov_len = cluster_size * 2; 1735 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1736 poll_threads(); 1737 CU_ASSERT(g_bserrno == 0); 1738 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 1739 1740 /* First of iovs fills whole blob except last page and second of iovs writes last page 1741 * with a pattern. */ 1742 iov_write[0].iov_base = payload_pattern; 1743 iov_write[0].iov_len = payload_size - page_size; 1744 iov_write[1].iov_base = payload_pattern; 1745 iov_write[1].iov_len = page_size; 1746 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1747 poll_threads(); 1748 CU_ASSERT(g_bserrno == 0); 1749 1750 /* Read whole blob and check consistency */ 1751 memset(payload_read, 0xFF, payload_size); 1752 iov_read[0].iov_base = payload_read; 1753 iov_read[0].iov_len = cluster_size * 2; 1754 iov_read[1].iov_base = payload_read + cluster_size * 2; 1755 iov_read[1].iov_len = cluster_size * 3; 1756 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1757 poll_threads(); 1758 CU_ASSERT(g_bserrno == 0); 1759 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0); 1760 CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0); 1761 1762 /* First of iovs fills only first page and second of iovs writes whole blob except 1763 * first page with a pattern. */ 1764 iov_write[0].iov_base = payload_pattern; 1765 iov_write[0].iov_len = page_size; 1766 iov_write[1].iov_base = payload_pattern; 1767 iov_write[1].iov_len = payload_size - page_size; 1768 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1769 poll_threads(); 1770 CU_ASSERT(g_bserrno == 0); 1771 1772 /* Read whole blob and check consistency */ 1773 memset(payload_read, 0xFF, payload_size); 1774 iov_read[0].iov_base = payload_read; 1775 iov_read[0].iov_len = cluster_size * 4; 1776 iov_read[1].iov_base = payload_read + cluster_size * 4; 1777 iov_read[1].iov_len = cluster_size; 1778 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1779 poll_threads(); 1780 CU_ASSERT(g_bserrno == 0); 1781 CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0); 1782 CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0); 1783 1784 1785 /* Fill whole blob with a pattern (5 clusters) */ 1786 1787 /* 1. Read test. */ 1788 _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload, 1789 blob_op_complete, NULL); 1790 poll_threads(); 1791 CU_ASSERT(g_bserrno == 0); 1792 1793 memset(payload_read, 0xFF, payload_size); 1794 iov_read[0].iov_base = payload_read; 1795 iov_read[0].iov_len = cluster_size; 1796 iov_read[1].iov_base = payload_read + cluster_size; 1797 iov_read[1].iov_len = cluster_size * 4; 1798 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1799 poll_threads(); 1800 CU_ASSERT(g_bserrno == 0); 1801 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1802 1803 /* 2. Write test. */ 1804 iov_write[0].iov_base = payload_read; 1805 iov_write[0].iov_len = cluster_size * 2; 1806 iov_write[1].iov_base = payload_read + cluster_size * 2; 1807 iov_write[1].iov_len = cluster_size * 3; 1808 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1809 poll_threads(); 1810 CU_ASSERT(g_bserrno == 0); 1811 1812 memset(payload_read, 0xFF, payload_size); 1813 _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1814 poll_threads(); 1815 CU_ASSERT(g_bserrno == 0); 1816 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1817 1818 spdk_blob_close(blob, blob_op_complete, NULL); 1819 CU_ASSERT(g_bserrno == 0); 1820 1821 spdk_bs_free_io_channel(channel); 1822 poll_threads(); 1823 1824 g_blob = NULL; 1825 g_blobid = 0; 1826 1827 free(payload_read); 1828 free(payload_write); 1829 free(payload_pattern); 1830 } 1831 1832 static void 1833 blob_unmap(void) 1834 { 1835 struct spdk_blob_store *bs = g_bs; 1836 struct spdk_blob *blob; 1837 struct spdk_io_channel *channel; 1838 struct spdk_blob_opts opts; 1839 uint8_t payload[4096]; 1840 int i; 1841 1842 channel = spdk_bs_alloc_io_channel(bs); 1843 CU_ASSERT(channel != NULL); 1844 1845 ut_spdk_blob_opts_init(&opts); 1846 opts.num_clusters = 10; 1847 1848 blob = ut_blob_create_and_open(bs, &opts); 1849 1850 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 1851 poll_threads(); 1852 CU_ASSERT(g_bserrno == 0); 1853 1854 memset(payload, 0, sizeof(payload)); 1855 payload[0] = 0xFF; 1856 1857 /* 1858 * Set first byte of every cluster to 0xFF. 1859 * First cluster on device is reserved so let's start from cluster number 1 1860 */ 1861 for (i = 1; i < 11; i++) { 1862 g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF; 1863 } 1864 1865 /* Confirm writes */ 1866 for (i = 0; i < 10; i++) { 1867 payload[0] = 0; 1868 spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1, 1869 blob_op_complete, NULL); 1870 poll_threads(); 1871 CU_ASSERT(g_bserrno == 0); 1872 CU_ASSERT(payload[0] == 0xFF); 1873 } 1874 1875 /* Mark some clusters as unallocated */ 1876 blob->active.clusters[1] = 0; 1877 blob->active.clusters[2] = 0; 1878 blob->active.clusters[3] = 0; 1879 blob->active.clusters[6] = 0; 1880 blob->active.clusters[8] = 0; 1881 1882 /* Unmap clusters by resizing to 0 */ 1883 spdk_blob_resize(blob, 0, blob_op_complete, NULL); 1884 poll_threads(); 1885 CU_ASSERT(g_bserrno == 0); 1886 1887 spdk_blob_sync_md(blob, blob_op_complete, NULL); 1888 poll_threads(); 1889 CU_ASSERT(g_bserrno == 0); 1890 1891 /* Confirm that only 'allocated' clusters were unmapped */ 1892 for (i = 1; i < 11; i++) { 1893 switch (i) { 1894 case 2: 1895 case 3: 1896 case 4: 1897 case 7: 1898 case 9: 1899 CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF); 1900 break; 1901 default: 1902 CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0); 1903 break; 1904 } 1905 } 1906 1907 spdk_blob_close(blob, blob_op_complete, NULL); 1908 poll_threads(); 1909 CU_ASSERT(g_bserrno == 0); 1910 1911 spdk_bs_free_io_channel(channel); 1912 poll_threads(); 1913 } 1914 1915 1916 static void 1917 blob_iter(void) 1918 { 1919 struct spdk_blob_store *bs = g_bs; 1920 struct spdk_blob *blob; 1921 spdk_blob_id blobid; 1922 struct spdk_blob_opts blob_opts; 1923 1924 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 1925 poll_threads(); 1926 CU_ASSERT(g_blob == NULL); 1927 CU_ASSERT(g_bserrno == -ENOENT); 1928 1929 ut_spdk_blob_opts_init(&blob_opts); 1930 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 1931 poll_threads(); 1932 CU_ASSERT(g_bserrno == 0); 1933 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 1934 blobid = g_blobid; 1935 1936 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 1937 poll_threads(); 1938 CU_ASSERT(g_blob != NULL); 1939 CU_ASSERT(g_bserrno == 0); 1940 blob = g_blob; 1941 CU_ASSERT(spdk_blob_get_id(blob) == blobid); 1942 1943 spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL); 1944 poll_threads(); 1945 CU_ASSERT(g_blob == NULL); 1946 CU_ASSERT(g_bserrno == -ENOENT); 1947 } 1948 1949 static void 1950 blob_xattr(void) 1951 { 1952 struct spdk_blob_store *bs = g_bs; 1953 struct spdk_blob *blob; 1954 spdk_blob_id blobid; 1955 uint64_t length; 1956 int rc; 1957 const char *name1, *name2; 1958 const void *value; 1959 size_t value_len; 1960 struct spdk_xattr_names *names; 1961 1962 blob = ut_blob_create_and_open(bs, NULL); 1963 blobid = spdk_blob_get_id(blob); 1964 1965 /* Test that set_xattr fails if md_ro flag is set. */ 1966 blob->md_ro = true; 1967 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 1968 CU_ASSERT(rc == -EPERM); 1969 1970 blob->md_ro = false; 1971 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 1972 CU_ASSERT(rc == 0); 1973 1974 length = 2345; 1975 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 1976 CU_ASSERT(rc == 0); 1977 1978 /* Overwrite "length" xattr. */ 1979 length = 3456; 1980 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 1981 CU_ASSERT(rc == 0); 1982 1983 /* get_xattr should still work even if md_ro flag is set. */ 1984 value = NULL; 1985 blob->md_ro = true; 1986 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 1987 CU_ASSERT(rc == 0); 1988 SPDK_CU_ASSERT_FATAL(value != NULL); 1989 CU_ASSERT(*(uint64_t *)value == length); 1990 CU_ASSERT(value_len == 8); 1991 blob->md_ro = false; 1992 1993 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 1994 CU_ASSERT(rc == -ENOENT); 1995 1996 names = NULL; 1997 rc = spdk_blob_get_xattr_names(blob, &names); 1998 CU_ASSERT(rc == 0); 1999 SPDK_CU_ASSERT_FATAL(names != NULL); 2000 CU_ASSERT(spdk_xattr_names_get_count(names) == 2); 2001 name1 = spdk_xattr_names_get_name(names, 0); 2002 SPDK_CU_ASSERT_FATAL(name1 != NULL); 2003 CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length")); 2004 name2 = spdk_xattr_names_get_name(names, 1); 2005 SPDK_CU_ASSERT_FATAL(name2 != NULL); 2006 CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length")); 2007 CU_ASSERT(strcmp(name1, name2)); 2008 spdk_xattr_names_free(names); 2009 2010 /* Confirm that remove_xattr fails if md_ro is set to true. */ 2011 blob->md_ro = true; 2012 rc = spdk_blob_remove_xattr(blob, "name"); 2013 CU_ASSERT(rc == -EPERM); 2014 2015 blob->md_ro = false; 2016 rc = spdk_blob_remove_xattr(blob, "name"); 2017 CU_ASSERT(rc == 0); 2018 2019 rc = spdk_blob_remove_xattr(blob, "foobar"); 2020 CU_ASSERT(rc == -ENOENT); 2021 2022 /* Set internal xattr */ 2023 length = 7898; 2024 rc = _spdk_blob_set_xattr(blob, "internal", &length, sizeof(length), true); 2025 CU_ASSERT(rc == 0); 2026 rc = _spdk_blob_get_xattr_value(blob, "internal", &value, &value_len, true); 2027 CU_ASSERT(rc == 0); 2028 CU_ASSERT(*(uint64_t *)value == length); 2029 /* try to get public xattr with same name */ 2030 rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len); 2031 CU_ASSERT(rc != 0); 2032 rc = _spdk_blob_get_xattr_value(blob, "internal", &value, &value_len, false); 2033 CU_ASSERT(rc != 0); 2034 /* Check if SPDK_BLOB_INTERNAL_XATTR is set */ 2035 CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 2036 SPDK_BLOB_INTERNAL_XATTR); 2037 2038 spdk_blob_close(blob, blob_op_complete, NULL); 2039 poll_threads(); 2040 2041 /* Check if xattrs are persisted */ 2042 ut_bs_reload(&bs, NULL); 2043 2044 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2045 poll_threads(); 2046 CU_ASSERT(g_bserrno == 0); 2047 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2048 blob = g_blob; 2049 2050 rc = _spdk_blob_get_xattr_value(blob, "internal", &value, &value_len, true); 2051 CU_ASSERT(rc == 0); 2052 CU_ASSERT(*(uint64_t *)value == length); 2053 2054 /* try to get internal xattr trough public call */ 2055 rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len); 2056 CU_ASSERT(rc != 0); 2057 2058 rc = _spdk_blob_remove_xattr(blob, "internal", true); 2059 CU_ASSERT(rc == 0); 2060 2061 CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0); 2062 2063 spdk_blob_close(blob, blob_op_complete, NULL); 2064 poll_threads(); 2065 CU_ASSERT(g_bserrno == 0); 2066 } 2067 2068 static void 2069 bs_load(void) 2070 { 2071 struct spdk_blob_store *bs; 2072 struct spdk_bs_dev *dev; 2073 spdk_blob_id blobid; 2074 struct spdk_blob *blob; 2075 struct spdk_bs_super_block *super_block; 2076 uint64_t length; 2077 int rc; 2078 const void *value; 2079 size_t value_len; 2080 struct spdk_bs_opts opts; 2081 struct spdk_blob_opts blob_opts; 2082 2083 dev = init_dev(); 2084 spdk_bs_opts_init(&opts); 2085 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2086 2087 /* Initialize a new blob store */ 2088 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2089 poll_threads(); 2090 CU_ASSERT(g_bserrno == 0); 2091 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2092 bs = g_bs; 2093 2094 /* Try to open a blobid that does not exist */ 2095 spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL); 2096 poll_threads(); 2097 CU_ASSERT(g_bserrno == -ENOENT); 2098 CU_ASSERT(g_blob == NULL); 2099 2100 /* Create a blob */ 2101 blob = ut_blob_create_and_open(bs, NULL); 2102 blobid = spdk_blob_get_id(blob); 2103 2104 /* Try again to open valid blob but without the upper bit set */ 2105 spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL); 2106 poll_threads(); 2107 CU_ASSERT(g_bserrno == -ENOENT); 2108 CU_ASSERT(g_blob == NULL); 2109 2110 /* Set some xattrs */ 2111 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 2112 CU_ASSERT(rc == 0); 2113 2114 length = 2345; 2115 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2116 CU_ASSERT(rc == 0); 2117 2118 /* Resize the blob */ 2119 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 2120 poll_threads(); 2121 CU_ASSERT(g_bserrno == 0); 2122 2123 spdk_blob_close(blob, blob_op_complete, NULL); 2124 poll_threads(); 2125 CU_ASSERT(g_bserrno == 0); 2126 blob = NULL; 2127 g_blob = NULL; 2128 g_blobid = SPDK_BLOBID_INVALID; 2129 2130 /* Unload the blob store */ 2131 spdk_bs_unload(bs, bs_op_complete, NULL); 2132 poll_threads(); 2133 CU_ASSERT(g_bserrno == 0); 2134 g_bs = NULL; 2135 g_blob = NULL; 2136 g_blobid = 0; 2137 2138 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2139 CU_ASSERT(super_block->clean == 1); 2140 2141 /* Load should fail for device with an unsupported blocklen */ 2142 dev = init_dev(); 2143 dev->blocklen = SPDK_BS_PAGE_SIZE * 2; 2144 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 2145 poll_threads(); 2146 CU_ASSERT(g_bserrno == -EINVAL); 2147 2148 /* Load should when max_md_ops is set to zero */ 2149 dev = init_dev(); 2150 spdk_bs_opts_init(&opts); 2151 opts.max_md_ops = 0; 2152 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2153 poll_threads(); 2154 CU_ASSERT(g_bserrno == -EINVAL); 2155 2156 /* Load should when max_channel_ops is set to zero */ 2157 dev = init_dev(); 2158 spdk_bs_opts_init(&opts); 2159 opts.max_channel_ops = 0; 2160 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2161 poll_threads(); 2162 CU_ASSERT(g_bserrno == -EINVAL); 2163 2164 /* Load an existing blob store */ 2165 dev = init_dev(); 2166 spdk_bs_opts_init(&opts); 2167 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2168 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2169 poll_threads(); 2170 CU_ASSERT(g_bserrno == 0); 2171 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2172 bs = g_bs; 2173 2174 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2175 CU_ASSERT(super_block->clean == 1); 2176 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2177 2178 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2179 poll_threads(); 2180 CU_ASSERT(g_bserrno == 0); 2181 CU_ASSERT(g_blob != NULL); 2182 blob = g_blob; 2183 2184 /* Verify that blobstore is marked dirty after first metadata sync */ 2185 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2186 CU_ASSERT(super_block->clean == 1); 2187 2188 /* Get the xattrs */ 2189 value = NULL; 2190 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 2191 CU_ASSERT(rc == 0); 2192 SPDK_CU_ASSERT_FATAL(value != NULL); 2193 CU_ASSERT(*(uint64_t *)value == length); 2194 CU_ASSERT(value_len == 8); 2195 2196 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 2197 CU_ASSERT(rc == -ENOENT); 2198 2199 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 2200 2201 spdk_blob_close(blob, blob_op_complete, NULL); 2202 poll_threads(); 2203 CU_ASSERT(g_bserrno == 0); 2204 blob = NULL; 2205 g_blob = NULL; 2206 2207 spdk_bs_unload(bs, bs_op_complete, NULL); 2208 poll_threads(); 2209 CU_ASSERT(g_bserrno == 0); 2210 g_bs = NULL; 2211 2212 /* Load should fail: bdev size < saved size */ 2213 dev = init_dev(); 2214 dev->blockcnt /= 2; 2215 2216 spdk_bs_opts_init(&opts); 2217 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2218 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2219 poll_threads(); 2220 2221 CU_ASSERT(g_bserrno == -EILSEQ); 2222 2223 /* Load should succeed: bdev size > saved size */ 2224 dev = init_dev(); 2225 dev->blockcnt *= 4; 2226 2227 spdk_bs_opts_init(&opts); 2228 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2229 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2230 poll_threads(); 2231 CU_ASSERT(g_bserrno == 0); 2232 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2233 bs = g_bs; 2234 2235 CU_ASSERT(g_bserrno == 0); 2236 spdk_bs_unload(bs, bs_op_complete, NULL); 2237 poll_threads(); 2238 2239 2240 /* Test compatibility mode */ 2241 2242 dev = init_dev(); 2243 super_block->size = 0; 2244 super_block->crc = _spdk_blob_md_page_calc_crc(super_block); 2245 2246 spdk_bs_opts_init(&opts); 2247 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2248 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2249 poll_threads(); 2250 CU_ASSERT(g_bserrno == 0); 2251 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2252 bs = g_bs; 2253 2254 /* Create a blob */ 2255 ut_spdk_blob_opts_init(&blob_opts); 2256 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 2257 poll_threads(); 2258 CU_ASSERT(g_bserrno == 0); 2259 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2260 2261 /* Blobstore should update number of blocks in super_block */ 2262 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2263 CU_ASSERT(super_block->clean == 0); 2264 2265 spdk_bs_unload(bs, bs_op_complete, NULL); 2266 poll_threads(); 2267 CU_ASSERT(g_bserrno == 0); 2268 CU_ASSERT(super_block->clean == 1); 2269 g_bs = NULL; 2270 2271 } 2272 2273 static void 2274 bs_load_pending_removal(void) 2275 { 2276 struct spdk_blob_store *bs = g_bs; 2277 struct spdk_blob_opts opts; 2278 struct spdk_blob *blob, *snapshot; 2279 spdk_blob_id blobid, snapshotid; 2280 const void *value; 2281 size_t value_len; 2282 int rc; 2283 2284 /* Create blob */ 2285 ut_spdk_blob_opts_init(&opts); 2286 opts.num_clusters = 10; 2287 2288 blob = ut_blob_create_and_open(bs, &opts); 2289 blobid = spdk_blob_get_id(blob); 2290 2291 /* Create snapshot */ 2292 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 2293 poll_threads(); 2294 CU_ASSERT(g_bserrno == 0); 2295 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2296 snapshotid = g_blobid; 2297 2298 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2299 poll_threads(); 2300 CU_ASSERT(g_bserrno == 0); 2301 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2302 snapshot = g_blob; 2303 2304 /* Set SNAPSHOT_PENDING_REMOVAL xattr */ 2305 snapshot->md_ro = false; 2306 rc = _spdk_blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true); 2307 CU_ASSERT(rc == 0); 2308 snapshot->md_ro = true; 2309 2310 spdk_blob_close(snapshot, blob_op_complete, NULL); 2311 poll_threads(); 2312 CU_ASSERT(g_bserrno == 0); 2313 2314 spdk_blob_close(blob, blob_op_complete, NULL); 2315 poll_threads(); 2316 CU_ASSERT(g_bserrno == 0); 2317 2318 /* Reload blobstore */ 2319 ut_bs_reload(&bs, NULL); 2320 2321 /* Snapshot should not be removed as blob is still pointing to it */ 2322 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2323 poll_threads(); 2324 CU_ASSERT(g_bserrno == 0); 2325 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2326 snapshot = g_blob; 2327 2328 /* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */ 2329 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len); 2330 CU_ASSERT(rc != 0); 2331 2332 /* Set SNAPSHOT_PENDING_REMOVAL xattr again */ 2333 snapshot->md_ro = false; 2334 rc = _spdk_blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true); 2335 CU_ASSERT(rc == 0); 2336 snapshot->md_ro = true; 2337 2338 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2339 poll_threads(); 2340 CU_ASSERT(g_bserrno == 0); 2341 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2342 blob = g_blob; 2343 2344 /* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */ 2345 _spdk_blob_remove_xattr(blob, BLOB_SNAPSHOT, true); 2346 2347 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2348 poll_threads(); 2349 CU_ASSERT(g_bserrno == 0); 2350 2351 spdk_blob_close(snapshot, blob_op_complete, NULL); 2352 poll_threads(); 2353 CU_ASSERT(g_bserrno == 0); 2354 2355 spdk_blob_close(blob, blob_op_complete, NULL); 2356 poll_threads(); 2357 CU_ASSERT(g_bserrno == 0); 2358 2359 /* Reload blobstore */ 2360 ut_bs_reload(&bs, NULL); 2361 2362 /* Snapshot should be removed as blob is not pointing to it anymore */ 2363 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2364 poll_threads(); 2365 CU_ASSERT(g_bserrno != 0); 2366 } 2367 2368 static void 2369 bs_load_custom_cluster_size(void) 2370 { 2371 struct spdk_blob_store *bs; 2372 struct spdk_bs_dev *dev; 2373 struct spdk_bs_super_block *super_block; 2374 struct spdk_bs_opts opts; 2375 uint32_t custom_cluster_size = 4194304; /* 4MiB */ 2376 uint32_t cluster_sz; 2377 uint64_t total_clusters; 2378 2379 dev = init_dev(); 2380 spdk_bs_opts_init(&opts); 2381 opts.cluster_sz = custom_cluster_size; 2382 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2383 2384 /* Initialize a new blob store */ 2385 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2386 poll_threads(); 2387 CU_ASSERT(g_bserrno == 0); 2388 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2389 bs = g_bs; 2390 cluster_sz = bs->cluster_sz; 2391 total_clusters = bs->total_clusters; 2392 2393 /* Unload the blob store */ 2394 spdk_bs_unload(bs, bs_op_complete, NULL); 2395 poll_threads(); 2396 CU_ASSERT(g_bserrno == 0); 2397 g_bs = NULL; 2398 g_blob = NULL; 2399 g_blobid = 0; 2400 2401 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2402 CU_ASSERT(super_block->clean == 1); 2403 2404 /* Load an existing blob store */ 2405 dev = init_dev(); 2406 spdk_bs_opts_init(&opts); 2407 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2408 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2409 poll_threads(); 2410 CU_ASSERT(g_bserrno == 0); 2411 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2412 bs = g_bs; 2413 /* Compare cluster size and number to one after initialization */ 2414 CU_ASSERT(cluster_sz == bs->cluster_sz); 2415 CU_ASSERT(total_clusters == bs->total_clusters); 2416 2417 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2418 CU_ASSERT(super_block->clean == 1); 2419 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2420 2421 spdk_bs_unload(bs, bs_op_complete, NULL); 2422 poll_threads(); 2423 CU_ASSERT(g_bserrno == 0); 2424 CU_ASSERT(super_block->clean == 1); 2425 g_bs = NULL; 2426 } 2427 2428 static void 2429 bs_type(void) 2430 { 2431 struct spdk_blob_store *bs; 2432 struct spdk_bs_dev *dev; 2433 struct spdk_bs_opts opts; 2434 2435 dev = init_dev(); 2436 spdk_bs_opts_init(&opts); 2437 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2438 2439 /* Initialize a new blob store */ 2440 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2441 poll_threads(); 2442 CU_ASSERT(g_bserrno == 0); 2443 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2444 bs = g_bs; 2445 2446 /* Unload the blob store */ 2447 spdk_bs_unload(bs, bs_op_complete, NULL); 2448 poll_threads(); 2449 CU_ASSERT(g_bserrno == 0); 2450 g_bs = NULL; 2451 g_blob = NULL; 2452 g_blobid = 0; 2453 2454 /* Load non existing blobstore type */ 2455 dev = init_dev(); 2456 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING"); 2457 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2458 poll_threads(); 2459 CU_ASSERT(g_bserrno != 0); 2460 2461 /* Load with empty blobstore type */ 2462 dev = init_dev(); 2463 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2464 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2465 poll_threads(); 2466 CU_ASSERT(g_bserrno == 0); 2467 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2468 bs = g_bs; 2469 2470 spdk_bs_unload(bs, bs_op_complete, NULL); 2471 poll_threads(); 2472 CU_ASSERT(g_bserrno == 0); 2473 g_bs = NULL; 2474 2475 /* Initialize a new blob store with empty bstype */ 2476 dev = init_dev(); 2477 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2478 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2479 poll_threads(); 2480 CU_ASSERT(g_bserrno == 0); 2481 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2482 bs = g_bs; 2483 2484 spdk_bs_unload(bs, bs_op_complete, NULL); 2485 poll_threads(); 2486 CU_ASSERT(g_bserrno == 0); 2487 g_bs = NULL; 2488 2489 /* Load non existing blobstore type */ 2490 dev = init_dev(); 2491 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING"); 2492 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2493 poll_threads(); 2494 CU_ASSERT(g_bserrno != 0); 2495 2496 /* Load with empty blobstore type */ 2497 dev = init_dev(); 2498 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2499 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2500 poll_threads(); 2501 CU_ASSERT(g_bserrno == 0); 2502 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2503 bs = g_bs; 2504 2505 spdk_bs_unload(bs, bs_op_complete, NULL); 2506 poll_threads(); 2507 CU_ASSERT(g_bserrno == 0); 2508 g_bs = NULL; 2509 } 2510 2511 static void 2512 bs_super_block(void) 2513 { 2514 struct spdk_blob_store *bs; 2515 struct spdk_bs_dev *dev; 2516 struct spdk_bs_super_block *super_block; 2517 struct spdk_bs_opts opts; 2518 struct spdk_bs_super_block_ver1 super_block_v1; 2519 2520 dev = init_dev(); 2521 spdk_bs_opts_init(&opts); 2522 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2523 2524 /* Initialize a new blob store */ 2525 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2526 poll_threads(); 2527 CU_ASSERT(g_bserrno == 0); 2528 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2529 bs = g_bs; 2530 2531 /* Unload the blob store */ 2532 spdk_bs_unload(bs, bs_op_complete, NULL); 2533 poll_threads(); 2534 CU_ASSERT(g_bserrno == 0); 2535 g_bs = NULL; 2536 g_blob = NULL; 2537 g_blobid = 0; 2538 2539 /* Load an existing blob store with version newer than supported */ 2540 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2541 super_block->version++; 2542 2543 dev = init_dev(); 2544 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2545 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2546 poll_threads(); 2547 CU_ASSERT(g_bserrno != 0); 2548 2549 /* Create a new blob store with super block version 1 */ 2550 dev = init_dev(); 2551 super_block_v1.version = 1; 2552 memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature)); 2553 super_block_v1.length = 0x1000; 2554 super_block_v1.clean = 1; 2555 super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF; 2556 super_block_v1.cluster_size = 0x100000; 2557 super_block_v1.used_page_mask_start = 0x01; 2558 super_block_v1.used_page_mask_len = 0x01; 2559 super_block_v1.used_cluster_mask_start = 0x02; 2560 super_block_v1.used_cluster_mask_len = 0x01; 2561 super_block_v1.md_start = 0x03; 2562 super_block_v1.md_len = 0x40; 2563 memset(super_block_v1.reserved, 0, 4036); 2564 super_block_v1.crc = _spdk_blob_md_page_calc_crc(&super_block_v1); 2565 memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1)); 2566 2567 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2568 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2569 poll_threads(); 2570 CU_ASSERT(g_bserrno == 0); 2571 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2572 bs = g_bs; 2573 2574 spdk_bs_unload(bs, bs_op_complete, NULL); 2575 poll_threads(); 2576 CU_ASSERT(g_bserrno == 0); 2577 g_bs = NULL; 2578 } 2579 2580 /* 2581 * Create a blobstore and then unload it. 2582 */ 2583 static void 2584 bs_unload(void) 2585 { 2586 struct spdk_blob_store *bs = g_bs; 2587 struct spdk_blob *blob; 2588 2589 /* Create a blob and open it. */ 2590 blob = ut_blob_create_and_open(bs, NULL); 2591 2592 /* Try to unload blobstore, should fail with open blob */ 2593 g_bserrno = -1; 2594 spdk_bs_unload(bs, bs_op_complete, NULL); 2595 poll_threads(); 2596 CU_ASSERT(g_bserrno == -EBUSY); 2597 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2598 2599 /* Close the blob, then successfully unload blobstore */ 2600 g_bserrno = -1; 2601 spdk_blob_close(blob, blob_op_complete, NULL); 2602 poll_threads(); 2603 CU_ASSERT(g_bserrno == 0); 2604 } 2605 2606 /* 2607 * Create a blobstore with a cluster size different than the default, and ensure it is 2608 * persisted. 2609 */ 2610 static void 2611 bs_cluster_sz(void) 2612 { 2613 struct spdk_blob_store *bs; 2614 struct spdk_bs_dev *dev; 2615 struct spdk_bs_opts opts; 2616 uint32_t cluster_sz; 2617 2618 /* Set cluster size to zero */ 2619 dev = init_dev(); 2620 spdk_bs_opts_init(&opts); 2621 opts.cluster_sz = 0; 2622 2623 /* Initialize a new blob store */ 2624 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2625 poll_threads(); 2626 CU_ASSERT(g_bserrno == -EINVAL); 2627 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2628 2629 /* 2630 * Set cluster size to blobstore page size, 2631 * to work it is required to be at least twice the blobstore page size. 2632 */ 2633 dev = init_dev(); 2634 spdk_bs_opts_init(&opts); 2635 opts.cluster_sz = SPDK_BS_PAGE_SIZE; 2636 2637 /* Initialize a new blob store */ 2638 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2639 poll_threads(); 2640 CU_ASSERT(g_bserrno == -ENOMEM); 2641 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2642 2643 /* 2644 * Set cluster size to lower than page size, 2645 * to work it is required to be at least twice the blobstore page size. 2646 */ 2647 dev = init_dev(); 2648 spdk_bs_opts_init(&opts); 2649 opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1; 2650 2651 /* Initialize a new blob store */ 2652 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2653 poll_threads(); 2654 CU_ASSERT(g_bserrno == -EINVAL); 2655 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2656 2657 /* Set cluster size to twice the default */ 2658 dev = init_dev(); 2659 spdk_bs_opts_init(&opts); 2660 opts.cluster_sz *= 2; 2661 cluster_sz = opts.cluster_sz; 2662 2663 /* Initialize a new blob store */ 2664 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2665 poll_threads(); 2666 CU_ASSERT(g_bserrno == 0); 2667 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2668 bs = g_bs; 2669 2670 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2671 2672 ut_bs_reload(&bs, &opts); 2673 2674 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2675 2676 spdk_bs_unload(bs, bs_op_complete, NULL); 2677 poll_threads(); 2678 CU_ASSERT(g_bserrno == 0); 2679 g_bs = NULL; 2680 } 2681 2682 /* 2683 * Create a blobstore, reload it and ensure total usable cluster count 2684 * stays the same. 2685 */ 2686 static void 2687 bs_usable_clusters(void) 2688 { 2689 struct spdk_blob_store *bs = g_bs; 2690 struct spdk_blob *blob; 2691 uint32_t clusters; 2692 int i; 2693 2694 2695 clusters = spdk_bs_total_data_cluster_count(bs); 2696 2697 ut_bs_reload(&bs, NULL); 2698 2699 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2700 2701 /* Create and resize blobs to make sure that useable cluster count won't change */ 2702 for (i = 0; i < 4; i++) { 2703 g_bserrno = -1; 2704 g_blobid = SPDK_BLOBID_INVALID; 2705 blob = ut_blob_create_and_open(bs, NULL); 2706 2707 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 2708 poll_threads(); 2709 CU_ASSERT(g_bserrno == 0); 2710 2711 g_bserrno = -1; 2712 spdk_blob_close(blob, blob_op_complete, NULL); 2713 poll_threads(); 2714 CU_ASSERT(g_bserrno == 0); 2715 2716 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2717 } 2718 2719 /* Reload the blob store to make sure that nothing changed */ 2720 ut_bs_reload(&bs, NULL); 2721 2722 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2723 } 2724 2725 /* 2726 * Test resizing of the metadata blob. This requires creating enough blobs 2727 * so that one cluster is not enough to fit the metadata for those blobs. 2728 * To induce this condition to happen more quickly, we reduce the cluster 2729 * size to 16KB, which means only 4 4KB blob metadata pages can fit. 2730 */ 2731 static void 2732 bs_resize_md(void) 2733 { 2734 struct spdk_blob_store *bs; 2735 const int CLUSTER_PAGE_COUNT = 4; 2736 const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4; 2737 struct spdk_bs_dev *dev; 2738 struct spdk_bs_opts opts; 2739 struct spdk_blob *blob; 2740 struct spdk_blob_opts blob_opts; 2741 uint32_t cluster_sz; 2742 spdk_blob_id blobids[NUM_BLOBS]; 2743 int i; 2744 2745 2746 dev = init_dev(); 2747 spdk_bs_opts_init(&opts); 2748 opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096; 2749 cluster_sz = opts.cluster_sz; 2750 2751 /* Initialize a new blob store */ 2752 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2753 poll_threads(); 2754 CU_ASSERT(g_bserrno == 0); 2755 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2756 bs = g_bs; 2757 2758 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2759 2760 ut_spdk_blob_opts_init(&blob_opts); 2761 2762 for (i = 0; i < NUM_BLOBS; i++) { 2763 g_bserrno = -1; 2764 g_blobid = SPDK_BLOBID_INVALID; 2765 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 2766 poll_threads(); 2767 CU_ASSERT(g_bserrno == 0); 2768 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2769 blobids[i] = g_blobid; 2770 } 2771 2772 ut_bs_reload(&bs, &opts); 2773 2774 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2775 2776 for (i = 0; i < NUM_BLOBS; i++) { 2777 g_bserrno = -1; 2778 g_blob = NULL; 2779 spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL); 2780 poll_threads(); 2781 CU_ASSERT(g_bserrno == 0); 2782 CU_ASSERT(g_blob != NULL); 2783 blob = g_blob; 2784 g_bserrno = -1; 2785 spdk_blob_close(blob, blob_op_complete, NULL); 2786 poll_threads(); 2787 CU_ASSERT(g_bserrno == 0); 2788 } 2789 2790 spdk_bs_unload(bs, bs_op_complete, NULL); 2791 poll_threads(); 2792 CU_ASSERT(g_bserrno == 0); 2793 g_bs = NULL; 2794 } 2795 2796 static void 2797 bs_destroy(void) 2798 { 2799 struct spdk_blob_store *bs; 2800 struct spdk_bs_dev *dev; 2801 2802 /* Initialize a new blob store */ 2803 dev = init_dev(); 2804 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2805 poll_threads(); 2806 CU_ASSERT(g_bserrno == 0); 2807 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2808 bs = g_bs; 2809 2810 /* Destroy the blob store */ 2811 g_bserrno = -1; 2812 spdk_bs_destroy(bs, bs_op_complete, NULL); 2813 poll_threads(); 2814 CU_ASSERT(g_bserrno == 0); 2815 2816 /* Loading an non-existent blob store should fail. */ 2817 g_bs = NULL; 2818 dev = init_dev(); 2819 2820 g_bserrno = 0; 2821 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 2822 poll_threads(); 2823 CU_ASSERT(g_bserrno != 0); 2824 } 2825 2826 /* Try to hit all of the corner cases associated with serializing 2827 * a blob to disk 2828 */ 2829 static void 2830 blob_serialize(void) 2831 { 2832 struct spdk_bs_dev *dev; 2833 struct spdk_bs_opts opts; 2834 struct spdk_blob_store *bs; 2835 spdk_blob_id blobid[2]; 2836 struct spdk_blob *blob[2]; 2837 uint64_t i; 2838 char *value; 2839 int rc; 2840 2841 dev = init_dev(); 2842 2843 /* Initialize a new blobstore with very small clusters */ 2844 spdk_bs_opts_init(&opts); 2845 opts.cluster_sz = dev->blocklen * 8; 2846 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2847 poll_threads(); 2848 CU_ASSERT(g_bserrno == 0); 2849 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2850 bs = g_bs; 2851 2852 /* Create and open two blobs */ 2853 for (i = 0; i < 2; i++) { 2854 blob[i] = ut_blob_create_and_open(bs, NULL); 2855 blobid[i] = spdk_blob_get_id(blob[i]); 2856 2857 /* Set a fairly large xattr on both blobs to eat up 2858 * metadata space 2859 */ 2860 value = calloc(dev->blocklen - 64, sizeof(char)); 2861 SPDK_CU_ASSERT_FATAL(value != NULL); 2862 memset(value, i, dev->blocklen / 2); 2863 rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64); 2864 CU_ASSERT(rc == 0); 2865 free(value); 2866 } 2867 2868 /* Resize the blobs, alternating 1 cluster at a time. 2869 * This thwarts run length encoding and will cause spill 2870 * over of the extents. 2871 */ 2872 for (i = 0; i < 6; i++) { 2873 spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL); 2874 poll_threads(); 2875 CU_ASSERT(g_bserrno == 0); 2876 } 2877 2878 for (i = 0; i < 2; i++) { 2879 spdk_blob_sync_md(blob[i], blob_op_complete, NULL); 2880 poll_threads(); 2881 CU_ASSERT(g_bserrno == 0); 2882 } 2883 2884 /* Close the blobs */ 2885 for (i = 0; i < 2; i++) { 2886 spdk_blob_close(blob[i], blob_op_complete, NULL); 2887 poll_threads(); 2888 CU_ASSERT(g_bserrno == 0); 2889 } 2890 2891 ut_bs_reload(&bs, &opts); 2892 2893 for (i = 0; i < 2; i++) { 2894 blob[i] = NULL; 2895 2896 spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL); 2897 poll_threads(); 2898 CU_ASSERT(g_bserrno == 0); 2899 CU_ASSERT(g_blob != NULL); 2900 blob[i] = g_blob; 2901 2902 CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3); 2903 2904 spdk_blob_close(blob[i], blob_op_complete, NULL); 2905 poll_threads(); 2906 CU_ASSERT(g_bserrno == 0); 2907 } 2908 2909 spdk_bs_unload(bs, bs_op_complete, NULL); 2910 poll_threads(); 2911 CU_ASSERT(g_bserrno == 0); 2912 g_bs = NULL; 2913 } 2914 2915 static void 2916 blob_crc(void) 2917 { 2918 struct spdk_blob_store *bs = g_bs; 2919 struct spdk_blob *blob; 2920 spdk_blob_id blobid; 2921 uint32_t page_num; 2922 int index; 2923 struct spdk_blob_md_page *page; 2924 2925 blob = ut_blob_create_and_open(bs, NULL); 2926 blobid = spdk_blob_get_id(blob); 2927 2928 spdk_blob_close(blob, blob_op_complete, NULL); 2929 poll_threads(); 2930 CU_ASSERT(g_bserrno == 0); 2931 2932 page_num = _spdk_bs_blobid_to_page(blobid); 2933 index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num); 2934 page = (struct spdk_blob_md_page *)&g_dev_buffer[index]; 2935 page->crc = 0; 2936 2937 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2938 poll_threads(); 2939 CU_ASSERT(g_bserrno == -EINVAL); 2940 CU_ASSERT(g_blob == NULL); 2941 g_bserrno = 0; 2942 2943 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 2944 poll_threads(); 2945 CU_ASSERT(g_bserrno == -EINVAL); 2946 } 2947 2948 static void 2949 super_block_crc(void) 2950 { 2951 struct spdk_blob_store *bs; 2952 struct spdk_bs_dev *dev; 2953 struct spdk_bs_super_block *super_block; 2954 2955 dev = init_dev(); 2956 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2957 poll_threads(); 2958 CU_ASSERT(g_bserrno == 0); 2959 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2960 bs = g_bs; 2961 2962 spdk_bs_unload(bs, bs_op_complete, NULL); 2963 poll_threads(); 2964 CU_ASSERT(g_bserrno == 0); 2965 g_bs = NULL; 2966 2967 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2968 super_block->crc = 0; 2969 dev = init_dev(); 2970 2971 /* Load an existing blob store */ 2972 g_bserrno = 0; 2973 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 2974 poll_threads(); 2975 CU_ASSERT(g_bserrno == -EILSEQ); 2976 } 2977 2978 /* For blob dirty shutdown test case we do the following sub-test cases: 2979 * 1 Initialize new blob store and create 1 super blob with some xattrs, then we 2980 * dirty shutdown and reload the blob store and verify the xattrs. 2981 * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown, 2982 * reload the blob store and verify the clusters number. 2983 * 3 Create the second blob and then dirty shutdown, reload the blob store 2984 * and verify the second blob. 2985 * 4 Delete the second blob and then dirty shutdown, reload the blob store 2986 * and verify the second blob is invalid. 2987 * 5 Create the second blob again and also create the third blob, modify the 2988 * md of second blob which makes the md invalid, and then dirty shutdown, 2989 * reload the blob store verify the second blob, it should invalid and also 2990 * verify the third blob, it should correct. 2991 */ 2992 static void 2993 blob_dirty_shutdown(void) 2994 { 2995 int rc; 2996 int index; 2997 struct spdk_blob_store *bs = g_bs; 2998 spdk_blob_id blobid1, blobid2, blobid3; 2999 struct spdk_blob *blob; 3000 uint64_t length; 3001 uint64_t free_clusters; 3002 const void *value; 3003 size_t value_len; 3004 uint32_t page_num; 3005 struct spdk_blob_md_page *page; 3006 struct spdk_blob_opts blob_opts; 3007 3008 /* Create first blob */ 3009 blob = ut_blob_create_and_open(bs, NULL); 3010 blobid1 = spdk_blob_get_id(blob); 3011 3012 /* Set some xattrs */ 3013 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 3014 CU_ASSERT(rc == 0); 3015 3016 length = 2345; 3017 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3018 CU_ASSERT(rc == 0); 3019 3020 /* Put xattr that fits exactly single page. 3021 * This results in adding additional pages to MD. 3022 * First is flags and smaller xattr, second the large xattr, 3023 * third are just the extents. 3024 */ 3025 size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) - 3026 strlen("large_xattr"); 3027 char *xattr = calloc(xattr_length, sizeof(char)); 3028 SPDK_CU_ASSERT_FATAL(xattr != NULL); 3029 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 3030 free(xattr); 3031 SPDK_CU_ASSERT_FATAL(rc == 0); 3032 3033 /* Resize the blob */ 3034 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 3035 poll_threads(); 3036 CU_ASSERT(g_bserrno == 0); 3037 3038 /* Set the blob as the super blob */ 3039 spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL); 3040 poll_threads(); 3041 CU_ASSERT(g_bserrno == 0); 3042 3043 free_clusters = spdk_bs_free_cluster_count(bs); 3044 3045 spdk_blob_close(blob, blob_op_complete, NULL); 3046 poll_threads(); 3047 CU_ASSERT(g_bserrno == 0); 3048 blob = NULL; 3049 g_blob = NULL; 3050 g_blobid = SPDK_BLOBID_INVALID; 3051 3052 ut_bs_dirty_load(&bs, NULL); 3053 3054 /* Get the super blob */ 3055 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 3056 poll_threads(); 3057 CU_ASSERT(g_bserrno == 0); 3058 CU_ASSERT(blobid1 == g_blobid); 3059 3060 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3061 poll_threads(); 3062 CU_ASSERT(g_bserrno == 0); 3063 CU_ASSERT(g_blob != NULL); 3064 blob = g_blob; 3065 3066 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3067 3068 /* Get the xattrs */ 3069 value = NULL; 3070 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 3071 CU_ASSERT(rc == 0); 3072 SPDK_CU_ASSERT_FATAL(value != NULL); 3073 CU_ASSERT(*(uint64_t *)value == length); 3074 CU_ASSERT(value_len == 8); 3075 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 3076 3077 /* Resize the blob */ 3078 spdk_blob_resize(blob, 20, blob_op_complete, NULL); 3079 poll_threads(); 3080 CU_ASSERT(g_bserrno == 0); 3081 3082 free_clusters = spdk_bs_free_cluster_count(bs); 3083 3084 spdk_blob_close(blob, blob_op_complete, NULL); 3085 poll_threads(); 3086 CU_ASSERT(g_bserrno == 0); 3087 blob = NULL; 3088 g_blob = NULL; 3089 g_blobid = SPDK_BLOBID_INVALID; 3090 3091 ut_bs_dirty_load(&bs, NULL); 3092 3093 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3094 poll_threads(); 3095 CU_ASSERT(g_bserrno == 0); 3096 CU_ASSERT(g_blob != NULL); 3097 blob = g_blob; 3098 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20); 3099 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3100 3101 spdk_blob_close(blob, blob_op_complete, NULL); 3102 poll_threads(); 3103 CU_ASSERT(g_bserrno == 0); 3104 blob = NULL; 3105 g_blob = NULL; 3106 g_blobid = SPDK_BLOBID_INVALID; 3107 3108 /* Create second blob */ 3109 blob = ut_blob_create_and_open(bs, NULL); 3110 blobid2 = spdk_blob_get_id(blob); 3111 3112 /* Set some xattrs */ 3113 rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1); 3114 CU_ASSERT(rc == 0); 3115 3116 length = 5432; 3117 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3118 CU_ASSERT(rc == 0); 3119 3120 /* Resize the blob */ 3121 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 3122 poll_threads(); 3123 CU_ASSERT(g_bserrno == 0); 3124 3125 free_clusters = spdk_bs_free_cluster_count(bs); 3126 3127 spdk_blob_close(blob, blob_op_complete, NULL); 3128 poll_threads(); 3129 CU_ASSERT(g_bserrno == 0); 3130 blob = NULL; 3131 g_blob = NULL; 3132 g_blobid = SPDK_BLOBID_INVALID; 3133 3134 ut_bs_dirty_load(&bs, NULL); 3135 3136 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3137 poll_threads(); 3138 CU_ASSERT(g_bserrno == 0); 3139 CU_ASSERT(g_blob != NULL); 3140 blob = g_blob; 3141 3142 /* Get the xattrs */ 3143 value = NULL; 3144 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 3145 CU_ASSERT(rc == 0); 3146 SPDK_CU_ASSERT_FATAL(value != NULL); 3147 CU_ASSERT(*(uint64_t *)value == length); 3148 CU_ASSERT(value_len == 8); 3149 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 3150 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3151 3152 ut_blob_close_and_delete(bs, blob); 3153 3154 free_clusters = spdk_bs_free_cluster_count(bs); 3155 3156 ut_bs_dirty_load(&bs, NULL); 3157 3158 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3159 poll_threads(); 3160 CU_ASSERT(g_bserrno != 0); 3161 CU_ASSERT(g_blob == NULL); 3162 3163 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3164 poll_threads(); 3165 CU_ASSERT(g_bserrno == 0); 3166 CU_ASSERT(g_blob != NULL); 3167 blob = g_blob; 3168 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3169 spdk_blob_close(blob, blob_op_complete, NULL); 3170 poll_threads(); 3171 CU_ASSERT(g_bserrno == 0); 3172 3173 ut_bs_reload(&bs, NULL); 3174 3175 /* Create second blob */ 3176 ut_spdk_blob_opts_init(&blob_opts); 3177 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3178 poll_threads(); 3179 CU_ASSERT(g_bserrno == 0); 3180 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3181 blobid2 = g_blobid; 3182 3183 /* Create third blob */ 3184 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3185 poll_threads(); 3186 CU_ASSERT(g_bserrno == 0); 3187 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3188 blobid3 = g_blobid; 3189 3190 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3191 poll_threads(); 3192 CU_ASSERT(g_bserrno == 0); 3193 CU_ASSERT(g_blob != NULL); 3194 blob = g_blob; 3195 3196 /* Set some xattrs for second blob */ 3197 rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1); 3198 CU_ASSERT(rc == 0); 3199 3200 length = 5432; 3201 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3202 CU_ASSERT(rc == 0); 3203 3204 spdk_blob_close(blob, blob_op_complete, NULL); 3205 poll_threads(); 3206 CU_ASSERT(g_bserrno == 0); 3207 blob = NULL; 3208 g_blob = NULL; 3209 g_blobid = SPDK_BLOBID_INVALID; 3210 3211 spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL); 3212 poll_threads(); 3213 CU_ASSERT(g_bserrno == 0); 3214 CU_ASSERT(g_blob != NULL); 3215 blob = g_blob; 3216 3217 /* Set some xattrs for third blob */ 3218 rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1); 3219 CU_ASSERT(rc == 0); 3220 3221 length = 5432; 3222 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3223 CU_ASSERT(rc == 0); 3224 3225 spdk_blob_close(blob, blob_op_complete, NULL); 3226 poll_threads(); 3227 CU_ASSERT(g_bserrno == 0); 3228 blob = NULL; 3229 g_blob = NULL; 3230 g_blobid = SPDK_BLOBID_INVALID; 3231 3232 /* Mark second blob as invalid */ 3233 page_num = _spdk_bs_blobid_to_page(blobid2); 3234 3235 index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num); 3236 page = (struct spdk_blob_md_page *)&g_dev_buffer[index]; 3237 page->sequence_num = 1; 3238 page->crc = _spdk_blob_md_page_calc_crc(page); 3239 3240 free_clusters = spdk_bs_free_cluster_count(bs); 3241 3242 ut_bs_dirty_load(&bs, NULL); 3243 3244 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3245 poll_threads(); 3246 CU_ASSERT(g_bserrno != 0); 3247 CU_ASSERT(g_blob == NULL); 3248 3249 spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL); 3250 poll_threads(); 3251 CU_ASSERT(g_bserrno == 0); 3252 CU_ASSERT(g_blob != NULL); 3253 blob = g_blob; 3254 3255 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3256 3257 spdk_blob_close(blob, blob_op_complete, NULL); 3258 poll_threads(); 3259 CU_ASSERT(g_bserrno == 0); 3260 blob = NULL; 3261 g_blob = NULL; 3262 g_blobid = SPDK_BLOBID_INVALID; 3263 } 3264 3265 static void 3266 blob_flags(void) 3267 { 3268 struct spdk_blob_store *bs = g_bs; 3269 spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro; 3270 struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro; 3271 struct spdk_blob_opts blob_opts; 3272 int rc; 3273 3274 /* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */ 3275 blob_invalid = ut_blob_create_and_open(bs, NULL); 3276 blobid_invalid = spdk_blob_get_id(blob_invalid); 3277 3278 blob_data_ro = ut_blob_create_and_open(bs, NULL); 3279 blobid_data_ro = spdk_blob_get_id(blob_data_ro); 3280 3281 ut_spdk_blob_opts_init(&blob_opts); 3282 blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES; 3283 blob_md_ro = ut_blob_create_and_open(bs, &blob_opts); 3284 blobid_md_ro = spdk_blob_get_id(blob_md_ro); 3285 CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES); 3286 3287 /* Change the size of blob_data_ro to check if flags are serialized 3288 * when blob has non zero number of extents */ 3289 spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL); 3290 poll_threads(); 3291 CU_ASSERT(g_bserrno == 0); 3292 3293 /* Set the xattr to check if flags are serialized 3294 * when blob has non zero number of xattrs */ 3295 rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1); 3296 CU_ASSERT(rc == 0); 3297 3298 blob_invalid->invalid_flags = (1ULL << 63); 3299 blob_invalid->state = SPDK_BLOB_STATE_DIRTY; 3300 blob_data_ro->data_ro_flags = (1ULL << 62); 3301 blob_data_ro->state = SPDK_BLOB_STATE_DIRTY; 3302 blob_md_ro->md_ro_flags = (1ULL << 61); 3303 blob_md_ro->state = SPDK_BLOB_STATE_DIRTY; 3304 3305 g_bserrno = -1; 3306 spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL); 3307 poll_threads(); 3308 CU_ASSERT(g_bserrno == 0); 3309 g_bserrno = -1; 3310 spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL); 3311 poll_threads(); 3312 CU_ASSERT(g_bserrno == 0); 3313 g_bserrno = -1; 3314 spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL); 3315 poll_threads(); 3316 CU_ASSERT(g_bserrno == 0); 3317 3318 g_bserrno = -1; 3319 spdk_blob_close(blob_invalid, blob_op_complete, NULL); 3320 poll_threads(); 3321 CU_ASSERT(g_bserrno == 0); 3322 blob_invalid = NULL; 3323 g_bserrno = -1; 3324 spdk_blob_close(blob_data_ro, blob_op_complete, NULL); 3325 poll_threads(); 3326 CU_ASSERT(g_bserrno == 0); 3327 blob_data_ro = NULL; 3328 g_bserrno = -1; 3329 spdk_blob_close(blob_md_ro, blob_op_complete, NULL); 3330 poll_threads(); 3331 CU_ASSERT(g_bserrno == 0); 3332 blob_md_ro = NULL; 3333 3334 g_blob = NULL; 3335 g_blobid = SPDK_BLOBID_INVALID; 3336 3337 ut_bs_reload(&bs, NULL); 3338 3339 g_blob = NULL; 3340 g_bserrno = 0; 3341 spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL); 3342 poll_threads(); 3343 CU_ASSERT(g_bserrno != 0); 3344 CU_ASSERT(g_blob == NULL); 3345 3346 g_blob = NULL; 3347 g_bserrno = -1; 3348 spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL); 3349 poll_threads(); 3350 CU_ASSERT(g_bserrno == 0); 3351 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3352 blob_data_ro = g_blob; 3353 /* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */ 3354 CU_ASSERT(blob_data_ro->data_ro == true); 3355 CU_ASSERT(blob_data_ro->md_ro == true); 3356 CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10); 3357 3358 g_blob = NULL; 3359 g_bserrno = -1; 3360 spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL); 3361 poll_threads(); 3362 CU_ASSERT(g_bserrno == 0); 3363 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3364 blob_md_ro = g_blob; 3365 CU_ASSERT(blob_md_ro->data_ro == false); 3366 CU_ASSERT(blob_md_ro->md_ro == true); 3367 3368 g_bserrno = -1; 3369 spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL); 3370 poll_threads(); 3371 CU_ASSERT(g_bserrno == 0); 3372 3373 spdk_blob_close(blob_data_ro, blob_op_complete, NULL); 3374 poll_threads(); 3375 CU_ASSERT(g_bserrno == 0); 3376 spdk_blob_close(blob_md_ro, blob_op_complete, NULL); 3377 poll_threads(); 3378 CU_ASSERT(g_bserrno == 0); 3379 } 3380 3381 static void 3382 bs_version(void) 3383 { 3384 struct spdk_bs_super_block *super; 3385 struct spdk_blob_store *bs = g_bs; 3386 struct spdk_bs_dev *dev; 3387 struct spdk_blob *blob; 3388 struct spdk_blob_opts blob_opts; 3389 spdk_blob_id blobid; 3390 3391 /* Unload the blob store */ 3392 spdk_bs_unload(bs, bs_op_complete, NULL); 3393 poll_threads(); 3394 CU_ASSERT(g_bserrno == 0); 3395 g_bs = NULL; 3396 3397 /* 3398 * Change the bs version on disk. This will allow us to 3399 * test that the version does not get modified automatically 3400 * when loading and unloading the blobstore. 3401 */ 3402 super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 3403 CU_ASSERT(super->version == SPDK_BS_VERSION); 3404 CU_ASSERT(super->clean == 1); 3405 super->version = 2; 3406 /* 3407 * Version 2 metadata does not have a used blobid mask, so clear 3408 * those fields in the super block and zero the corresponding 3409 * region on "disk". We will use this to ensure blob IDs are 3410 * correctly reconstructed. 3411 */ 3412 memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0, 3413 super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE); 3414 super->used_blobid_mask_start = 0; 3415 super->used_blobid_mask_len = 0; 3416 super->crc = _spdk_blob_md_page_calc_crc(super); 3417 3418 /* Load an existing blob store */ 3419 dev = init_dev(); 3420 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3421 poll_threads(); 3422 CU_ASSERT(g_bserrno == 0); 3423 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3424 CU_ASSERT(super->clean == 1); 3425 bs = g_bs; 3426 3427 /* 3428 * Create a blob - just to make sure that when we unload it 3429 * results in writing the super block (since metadata pages 3430 * were allocated. 3431 */ 3432 ut_spdk_blob_opts_init(&blob_opts); 3433 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3434 poll_threads(); 3435 CU_ASSERT(g_bserrno == 0); 3436 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3437 blobid = g_blobid; 3438 3439 /* Unload the blob store */ 3440 spdk_bs_unload(bs, bs_op_complete, NULL); 3441 poll_threads(); 3442 CU_ASSERT(g_bserrno == 0); 3443 g_bs = NULL; 3444 CU_ASSERT(super->version == 2); 3445 CU_ASSERT(super->used_blobid_mask_start == 0); 3446 CU_ASSERT(super->used_blobid_mask_len == 0); 3447 3448 dev = init_dev(); 3449 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3450 poll_threads(); 3451 CU_ASSERT(g_bserrno == 0); 3452 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3453 bs = g_bs; 3454 3455 g_blob = NULL; 3456 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3457 poll_threads(); 3458 CU_ASSERT(g_bserrno == 0); 3459 CU_ASSERT(g_blob != NULL); 3460 blob = g_blob; 3461 3462 spdk_blob_close(blob, blob_op_complete, NULL); 3463 poll_threads(); 3464 CU_ASSERT(g_bserrno == 0); 3465 3466 CU_ASSERT(super->version == 2); 3467 CU_ASSERT(super->used_blobid_mask_start == 0); 3468 CU_ASSERT(super->used_blobid_mask_len == 0); 3469 } 3470 3471 static void 3472 blob_set_xattrs(void) 3473 { 3474 struct spdk_blob_store *bs = g_bs; 3475 struct spdk_blob *blob; 3476 struct spdk_blob_opts opts; 3477 const void *value; 3478 size_t value_len; 3479 char *xattr; 3480 size_t xattr_length; 3481 int rc; 3482 3483 /* Create blob with extra attributes */ 3484 ut_spdk_blob_opts_init(&opts); 3485 3486 opts.xattrs.names = g_xattr_names; 3487 opts.xattrs.get_value = _get_xattr_value; 3488 opts.xattrs.count = 3; 3489 opts.xattrs.ctx = &g_ctx; 3490 3491 blob = ut_blob_create_and_open(bs, &opts); 3492 3493 /* Get the xattrs */ 3494 value = NULL; 3495 3496 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len); 3497 CU_ASSERT(rc == 0); 3498 SPDK_CU_ASSERT_FATAL(value != NULL); 3499 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 3500 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 3501 3502 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len); 3503 CU_ASSERT(rc == 0); 3504 SPDK_CU_ASSERT_FATAL(value != NULL); 3505 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 3506 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 3507 3508 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len); 3509 CU_ASSERT(rc == 0); 3510 SPDK_CU_ASSERT_FATAL(value != NULL); 3511 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 3512 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 3513 3514 /* Try to get non existing attribute */ 3515 3516 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 3517 CU_ASSERT(rc == -ENOENT); 3518 3519 /* Try xattr exceeding maximum length of descriptor in single page */ 3520 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 3521 strlen("large_xattr") + 1; 3522 xattr = calloc(xattr_length, sizeof(char)); 3523 SPDK_CU_ASSERT_FATAL(xattr != NULL); 3524 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 3525 free(xattr); 3526 SPDK_CU_ASSERT_FATAL(rc == -ENOMEM); 3527 3528 spdk_blob_close(blob, blob_op_complete, NULL); 3529 poll_threads(); 3530 CU_ASSERT(g_bserrno == 0); 3531 blob = NULL; 3532 g_blob = NULL; 3533 g_blobid = SPDK_BLOBID_INVALID; 3534 3535 /* NULL callback */ 3536 ut_spdk_blob_opts_init(&opts); 3537 opts.xattrs.names = g_xattr_names; 3538 opts.xattrs.get_value = NULL; 3539 opts.xattrs.count = 1; 3540 opts.xattrs.ctx = &g_ctx; 3541 3542 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 3543 poll_threads(); 3544 CU_ASSERT(g_bserrno == -EINVAL); 3545 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3546 3547 /* NULL values */ 3548 ut_spdk_blob_opts_init(&opts); 3549 opts.xattrs.names = g_xattr_names; 3550 opts.xattrs.get_value = _get_xattr_value_null; 3551 opts.xattrs.count = 1; 3552 opts.xattrs.ctx = NULL; 3553 3554 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 3555 poll_threads(); 3556 CU_ASSERT(g_bserrno == -EINVAL); 3557 } 3558 3559 static void 3560 blob_thin_prov_alloc(void) 3561 { 3562 struct spdk_blob_store *bs = g_bs; 3563 struct spdk_blob *blob; 3564 struct spdk_blob_opts opts; 3565 spdk_blob_id blobid; 3566 uint64_t free_clusters; 3567 3568 free_clusters = spdk_bs_free_cluster_count(bs); 3569 3570 /* Set blob as thin provisioned */ 3571 ut_spdk_blob_opts_init(&opts); 3572 opts.thin_provision = true; 3573 3574 blob = ut_blob_create_and_open(bs, &opts); 3575 blobid = spdk_blob_get_id(blob); 3576 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3577 3578 CU_ASSERT(blob->active.num_clusters == 0); 3579 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 3580 3581 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3582 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3583 poll_threads(); 3584 CU_ASSERT(g_bserrno == 0); 3585 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3586 CU_ASSERT(blob->active.num_clusters == 5); 3587 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 3588 3589 /* Grow it to 1TB - still unallocated */ 3590 spdk_blob_resize(blob, 262144, blob_op_complete, NULL); 3591 poll_threads(); 3592 CU_ASSERT(g_bserrno == 0); 3593 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3594 CU_ASSERT(blob->active.num_clusters == 262144); 3595 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144); 3596 3597 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3598 poll_threads(); 3599 CU_ASSERT(g_bserrno == 0); 3600 /* Sync must not change anything */ 3601 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3602 CU_ASSERT(blob->active.num_clusters == 262144); 3603 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144); 3604 /* Since clusters are not allocated, 3605 * number of metadata pages is expected to be minimal. 3606 */ 3607 CU_ASSERT(blob->active.num_pages == 1); 3608 3609 /* Shrink the blob to 3 clusters - still unallocated */ 3610 spdk_blob_resize(blob, 3, blob_op_complete, NULL); 3611 poll_threads(); 3612 CU_ASSERT(g_bserrno == 0); 3613 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3614 CU_ASSERT(blob->active.num_clusters == 3); 3615 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3); 3616 3617 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3618 poll_threads(); 3619 CU_ASSERT(g_bserrno == 0); 3620 /* Sync must not change anything */ 3621 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3622 CU_ASSERT(blob->active.num_clusters == 3); 3623 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3); 3624 3625 spdk_blob_close(blob, blob_op_complete, NULL); 3626 poll_threads(); 3627 CU_ASSERT(g_bserrno == 0); 3628 3629 ut_bs_reload(&bs, NULL); 3630 3631 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3632 poll_threads(); 3633 CU_ASSERT(g_bserrno == 0); 3634 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3635 blob = g_blob; 3636 3637 /* Check that clusters allocation and size is still the same */ 3638 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3639 CU_ASSERT(blob->active.num_clusters == 3); 3640 3641 ut_blob_close_and_delete(bs, blob); 3642 } 3643 3644 static void 3645 blob_insert_cluster_msg(void) 3646 { 3647 struct spdk_blob_store *bs = g_bs; 3648 struct spdk_blob *blob; 3649 struct spdk_blob_opts opts; 3650 spdk_blob_id blobid; 3651 uint64_t free_clusters; 3652 uint64_t new_cluster = 0; 3653 uint32_t cluster_num = 3; 3654 uint32_t extent_page = 0; 3655 3656 free_clusters = spdk_bs_free_cluster_count(bs); 3657 3658 /* Set blob as thin provisioned */ 3659 ut_spdk_blob_opts_init(&opts); 3660 opts.thin_provision = true; 3661 opts.num_clusters = 4; 3662 3663 blob = ut_blob_create_and_open(bs, &opts); 3664 blobid = spdk_blob_get_id(blob); 3665 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3666 3667 CU_ASSERT(blob->active.num_clusters == 4); 3668 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4); 3669 CU_ASSERT(blob->active.clusters[cluster_num] == 0); 3670 3671 /* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread. 3672 * This is to simulate behaviour when cluster is allocated after blob creation. 3673 * Such as _spdk_bs_allocate_and_copy_cluster(). */ 3674 _spdk_bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false); 3675 CU_ASSERT(blob->active.clusters[cluster_num] == 0); 3676 3677 _spdk_blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, 3678 blob_op_complete, NULL); 3679 poll_threads(); 3680 3681 CU_ASSERT(blob->active.clusters[cluster_num] != 0); 3682 3683 spdk_blob_close(blob, blob_op_complete, NULL); 3684 poll_threads(); 3685 CU_ASSERT(g_bserrno == 0); 3686 3687 ut_bs_reload(&bs, NULL); 3688 3689 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3690 poll_threads(); 3691 CU_ASSERT(g_bserrno == 0); 3692 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3693 blob = g_blob; 3694 3695 CU_ASSERT(blob->active.clusters[cluster_num] != 0); 3696 3697 ut_blob_close_and_delete(bs, blob); 3698 } 3699 3700 static void 3701 blob_thin_prov_rw(void) 3702 { 3703 static const uint8_t zero[10 * 4096] = { 0 }; 3704 struct spdk_blob_store *bs = g_bs; 3705 struct spdk_blob *blob; 3706 struct spdk_io_channel *channel, *channel_thread1; 3707 struct spdk_blob_opts opts; 3708 uint64_t free_clusters; 3709 uint64_t page_size; 3710 uint8_t payload_read[10 * 4096]; 3711 uint8_t payload_write[10 * 4096]; 3712 uint64_t write_bytes; 3713 uint64_t read_bytes; 3714 3715 free_clusters = spdk_bs_free_cluster_count(bs); 3716 page_size = spdk_bs_get_page_size(bs); 3717 3718 channel = spdk_bs_alloc_io_channel(bs); 3719 CU_ASSERT(channel != NULL); 3720 3721 ut_spdk_blob_opts_init(&opts); 3722 opts.thin_provision = true; 3723 3724 blob = ut_blob_create_and_open(bs, &opts); 3725 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3726 3727 CU_ASSERT(blob->active.num_clusters == 0); 3728 3729 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3730 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3731 poll_threads(); 3732 CU_ASSERT(g_bserrno == 0); 3733 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3734 CU_ASSERT(blob->active.num_clusters == 5); 3735 3736 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3737 poll_threads(); 3738 CU_ASSERT(g_bserrno == 0); 3739 /* Sync must not change anything */ 3740 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3741 CU_ASSERT(blob->active.num_clusters == 5); 3742 3743 /* Payload should be all zeros from unallocated clusters */ 3744 memset(payload_read, 0xFF, sizeof(payload_read)); 3745 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 3746 poll_threads(); 3747 CU_ASSERT(g_bserrno == 0); 3748 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 3749 3750 write_bytes = g_dev_write_bytes; 3751 read_bytes = g_dev_read_bytes; 3752 3753 /* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */ 3754 set_thread(1); 3755 channel_thread1 = spdk_bs_alloc_io_channel(bs); 3756 CU_ASSERT(channel_thread1 != NULL); 3757 memset(payload_write, 0xE5, sizeof(payload_write)); 3758 spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL); 3759 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3760 /* Perform write on thread 0. That will try to allocate cluster, 3761 * but fail due to another thread issuing the cluster allocation first. */ 3762 set_thread(0); 3763 memset(payload_write, 0xE5, sizeof(payload_write)); 3764 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 3765 CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs)); 3766 poll_threads(); 3767 CU_ASSERT(g_bserrno == 0); 3768 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3769 /* For thin-provisioned blob we need to write 20 pages plus one page metadata and 3770 * read 0 bytes */ 3771 if (g_use_extent_table) { 3772 /* Add one more page for EXTENT_PAGE write */ 3773 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22); 3774 } else { 3775 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21); 3776 } 3777 CU_ASSERT(g_dev_read_bytes - read_bytes == 0); 3778 3779 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 3780 poll_threads(); 3781 CU_ASSERT(g_bserrno == 0); 3782 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3783 3784 ut_blob_close_and_delete(bs, blob); 3785 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3786 3787 set_thread(1); 3788 spdk_bs_free_io_channel(channel_thread1); 3789 set_thread(0); 3790 spdk_bs_free_io_channel(channel); 3791 poll_threads(); 3792 g_blob = NULL; 3793 g_blobid = 0; 3794 } 3795 3796 static void 3797 blob_thin_prov_rle(void) 3798 { 3799 static const uint8_t zero[10 * 4096] = { 0 }; 3800 struct spdk_blob_store *bs = g_bs; 3801 struct spdk_blob *blob; 3802 struct spdk_io_channel *channel; 3803 struct spdk_blob_opts opts; 3804 spdk_blob_id blobid; 3805 uint64_t free_clusters; 3806 uint64_t page_size; 3807 uint8_t payload_read[10 * 4096]; 3808 uint8_t payload_write[10 * 4096]; 3809 uint64_t write_bytes; 3810 uint64_t read_bytes; 3811 uint64_t io_unit; 3812 3813 free_clusters = spdk_bs_free_cluster_count(bs); 3814 page_size = spdk_bs_get_page_size(bs); 3815 3816 ut_spdk_blob_opts_init(&opts); 3817 opts.thin_provision = true; 3818 opts.num_clusters = 5; 3819 3820 blob = ut_blob_create_and_open(bs, &opts); 3821 blobid = spdk_blob_get_id(blob); 3822 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3823 3824 channel = spdk_bs_alloc_io_channel(bs); 3825 CU_ASSERT(channel != NULL); 3826 3827 /* Target specifically second cluster in a blob as first allocation */ 3828 io_unit = _spdk_bs_cluster_to_page(bs, 1) * _spdk_bs_io_unit_per_page(bs); 3829 3830 /* Payload should be all zeros from unallocated clusters */ 3831 memset(payload_read, 0xFF, sizeof(payload_read)); 3832 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3833 poll_threads(); 3834 CU_ASSERT(g_bserrno == 0); 3835 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 3836 3837 write_bytes = g_dev_write_bytes; 3838 read_bytes = g_dev_read_bytes; 3839 3840 /* Issue write to second cluster in a blob */ 3841 memset(payload_write, 0xE5, sizeof(payload_write)); 3842 spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL); 3843 poll_threads(); 3844 CU_ASSERT(g_bserrno == 0); 3845 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3846 /* For thin-provisioned blob we need to write 10 pages plus one page metadata and 3847 * read 0 bytes */ 3848 if (g_use_extent_table) { 3849 /* Add one more page for EXTENT_PAGE write */ 3850 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12); 3851 } else { 3852 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11); 3853 } 3854 CU_ASSERT(g_dev_read_bytes - read_bytes == 0); 3855 3856 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3857 poll_threads(); 3858 CU_ASSERT(g_bserrno == 0); 3859 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3860 3861 spdk_bs_free_io_channel(channel); 3862 poll_threads(); 3863 3864 spdk_blob_close(blob, blob_op_complete, NULL); 3865 poll_threads(); 3866 CU_ASSERT(g_bserrno == 0); 3867 3868 ut_bs_reload(&bs, NULL); 3869 3870 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3871 poll_threads(); 3872 CU_ASSERT(g_bserrno == 0); 3873 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3874 blob = g_blob; 3875 3876 channel = spdk_bs_alloc_io_channel(bs); 3877 CU_ASSERT(channel != NULL); 3878 3879 /* Read second cluster after blob reload to confirm data written */ 3880 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3881 poll_threads(); 3882 CU_ASSERT(g_bserrno == 0); 3883 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3884 3885 spdk_bs_free_io_channel(channel); 3886 poll_threads(); 3887 3888 ut_blob_close_and_delete(bs, blob); 3889 } 3890 3891 static void 3892 blob_thin_prov_rw_iov(void) 3893 { 3894 static const uint8_t zero[10 * 4096] = { 0 }; 3895 struct spdk_blob_store *bs = g_bs; 3896 struct spdk_blob *blob; 3897 struct spdk_io_channel *channel; 3898 struct spdk_blob_opts opts; 3899 uint64_t free_clusters; 3900 uint8_t payload_read[10 * 4096]; 3901 uint8_t payload_write[10 * 4096]; 3902 struct iovec iov_read[3]; 3903 struct iovec iov_write[3]; 3904 3905 free_clusters = spdk_bs_free_cluster_count(bs); 3906 3907 channel = spdk_bs_alloc_io_channel(bs); 3908 CU_ASSERT(channel != NULL); 3909 3910 ut_spdk_blob_opts_init(&opts); 3911 opts.thin_provision = true; 3912 3913 blob = ut_blob_create_and_open(bs, &opts); 3914 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3915 3916 CU_ASSERT(blob->active.num_clusters == 0); 3917 3918 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3919 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3920 poll_threads(); 3921 CU_ASSERT(g_bserrno == 0); 3922 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3923 CU_ASSERT(blob->active.num_clusters == 5); 3924 3925 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3926 poll_threads(); 3927 CU_ASSERT(g_bserrno == 0); 3928 /* Sync must not change anything */ 3929 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3930 CU_ASSERT(blob->active.num_clusters == 5); 3931 3932 /* Payload should be all zeros from unallocated clusters */ 3933 memset(payload_read, 0xAA, sizeof(payload_read)); 3934 iov_read[0].iov_base = payload_read; 3935 iov_read[0].iov_len = 3 * 4096; 3936 iov_read[1].iov_base = payload_read + 3 * 4096; 3937 iov_read[1].iov_len = 4 * 4096; 3938 iov_read[2].iov_base = payload_read + 7 * 4096; 3939 iov_read[2].iov_len = 3 * 4096; 3940 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 3941 poll_threads(); 3942 CU_ASSERT(g_bserrno == 0); 3943 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 3944 3945 memset(payload_write, 0xE5, sizeof(payload_write)); 3946 iov_write[0].iov_base = payload_write; 3947 iov_write[0].iov_len = 1 * 4096; 3948 iov_write[1].iov_base = payload_write + 1 * 4096; 3949 iov_write[1].iov_len = 5 * 4096; 3950 iov_write[2].iov_base = payload_write + 6 * 4096; 3951 iov_write[2].iov_len = 4 * 4096; 3952 3953 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 3954 poll_threads(); 3955 CU_ASSERT(g_bserrno == 0); 3956 3957 memset(payload_read, 0xAA, sizeof(payload_read)); 3958 iov_read[0].iov_base = payload_read; 3959 iov_read[0].iov_len = 3 * 4096; 3960 iov_read[1].iov_base = payload_read + 3 * 4096; 3961 iov_read[1].iov_len = 4 * 4096; 3962 iov_read[2].iov_base = payload_read + 7 * 4096; 3963 iov_read[2].iov_len = 3 * 4096; 3964 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 3965 poll_threads(); 3966 CU_ASSERT(g_bserrno == 0); 3967 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3968 3969 spdk_blob_close(blob, blob_op_complete, NULL); 3970 poll_threads(); 3971 CU_ASSERT(g_bserrno == 0); 3972 3973 spdk_bs_free_io_channel(channel); 3974 poll_threads(); 3975 3976 g_blob = NULL; 3977 g_blobid = 0; 3978 } 3979 3980 struct iter_ctx { 3981 int current_iter; 3982 spdk_blob_id blobid[4]; 3983 }; 3984 3985 static void 3986 test_iter(void *arg, struct spdk_blob *blob, int bserrno) 3987 { 3988 struct iter_ctx *iter_ctx = arg; 3989 spdk_blob_id blobid; 3990 3991 CU_ASSERT(bserrno == 0); 3992 blobid = spdk_blob_get_id(blob); 3993 CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]); 3994 } 3995 3996 static void 3997 bs_load_iter(void) 3998 { 3999 struct spdk_blob_store *bs; 4000 struct spdk_bs_dev *dev; 4001 struct iter_ctx iter_ctx = { 0 }; 4002 struct spdk_blob *blob; 4003 int i, rc; 4004 struct spdk_bs_opts opts; 4005 4006 dev = init_dev(); 4007 spdk_bs_opts_init(&opts); 4008 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4009 4010 /* Initialize a new blob store */ 4011 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 4012 poll_threads(); 4013 CU_ASSERT(g_bserrno == 0); 4014 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4015 bs = g_bs; 4016 4017 for (i = 0; i < 4; i++) { 4018 blob = ut_blob_create_and_open(bs, NULL); 4019 iter_ctx.blobid[i] = spdk_blob_get_id(blob); 4020 4021 /* Just save the blobid as an xattr for testing purposes. */ 4022 rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id)); 4023 CU_ASSERT(rc == 0); 4024 4025 /* Resize the blob */ 4026 spdk_blob_resize(blob, i, blob_op_complete, NULL); 4027 poll_threads(); 4028 CU_ASSERT(g_bserrno == 0); 4029 4030 spdk_blob_close(blob, blob_op_complete, NULL); 4031 poll_threads(); 4032 CU_ASSERT(g_bserrno == 0); 4033 } 4034 4035 g_bserrno = -1; 4036 spdk_bs_unload(bs, bs_op_complete, NULL); 4037 poll_threads(); 4038 CU_ASSERT(g_bserrno == 0); 4039 4040 dev = init_dev(); 4041 spdk_bs_opts_init(&opts); 4042 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4043 opts.iter_cb_fn = test_iter; 4044 opts.iter_cb_arg = &iter_ctx; 4045 4046 /* Test blob iteration during load after a clean shutdown. */ 4047 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 4048 poll_threads(); 4049 CU_ASSERT(g_bserrno == 0); 4050 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4051 bs = g_bs; 4052 4053 /* Dirty shutdown */ 4054 _spdk_bs_free(bs); 4055 4056 dev = init_dev(); 4057 spdk_bs_opts_init(&opts); 4058 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4059 opts.iter_cb_fn = test_iter; 4060 iter_ctx.current_iter = 0; 4061 opts.iter_cb_arg = &iter_ctx; 4062 4063 /* Test blob iteration during load after a dirty shutdown. */ 4064 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 4065 poll_threads(); 4066 CU_ASSERT(g_bserrno == 0); 4067 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4068 bs = g_bs; 4069 4070 spdk_bs_unload(bs, bs_op_complete, NULL); 4071 poll_threads(); 4072 CU_ASSERT(g_bserrno == 0); 4073 g_bs = NULL; 4074 } 4075 4076 static void 4077 blob_snapshot_rw(void) 4078 { 4079 static const uint8_t zero[10 * 4096] = { 0 }; 4080 struct spdk_blob_store *bs = g_bs; 4081 struct spdk_blob *blob, *snapshot; 4082 struct spdk_io_channel *channel; 4083 struct spdk_blob_opts opts; 4084 spdk_blob_id blobid, snapshotid; 4085 uint64_t free_clusters; 4086 uint64_t cluster_size; 4087 uint64_t page_size; 4088 uint8_t payload_read[10 * 4096]; 4089 uint8_t payload_write[10 * 4096]; 4090 uint64_t write_bytes; 4091 uint64_t read_bytes; 4092 4093 free_clusters = spdk_bs_free_cluster_count(bs); 4094 cluster_size = spdk_bs_get_cluster_size(bs); 4095 page_size = spdk_bs_get_page_size(bs); 4096 4097 channel = spdk_bs_alloc_io_channel(bs); 4098 CU_ASSERT(channel != NULL); 4099 4100 ut_spdk_blob_opts_init(&opts); 4101 opts.thin_provision = true; 4102 opts.num_clusters = 5; 4103 4104 blob = ut_blob_create_and_open(bs, &opts); 4105 blobid = spdk_blob_get_id(blob); 4106 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4107 4108 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4109 4110 memset(payload_read, 0xFF, sizeof(payload_read)); 4111 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 4112 poll_threads(); 4113 CU_ASSERT(g_bserrno == 0); 4114 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4115 4116 memset(payload_write, 0xE5, sizeof(payload_write)); 4117 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 4118 poll_threads(); 4119 CU_ASSERT(g_bserrno == 0); 4120 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4121 4122 /* Create snapshot from blob */ 4123 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4124 poll_threads(); 4125 CU_ASSERT(g_bserrno == 0); 4126 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4127 snapshotid = g_blobid; 4128 4129 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4130 poll_threads(); 4131 CU_ASSERT(g_bserrno == 0); 4132 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4133 snapshot = g_blob; 4134 CU_ASSERT(snapshot->data_ro == true); 4135 CU_ASSERT(snapshot->md_ro == true); 4136 4137 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4138 4139 write_bytes = g_dev_write_bytes; 4140 read_bytes = g_dev_read_bytes; 4141 4142 memset(payload_write, 0xAA, sizeof(payload_write)); 4143 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 4144 poll_threads(); 4145 CU_ASSERT(g_bserrno == 0); 4146 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4147 4148 /* For a clone we need to allocate and copy one cluster, update one page of metadata 4149 * and then write 10 pages of payload. 4150 */ 4151 if (g_use_extent_table) { 4152 /* Add one more page for EXTENT_PAGE write */ 4153 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12 + cluster_size); 4154 } else { 4155 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11 + cluster_size); 4156 } 4157 CU_ASSERT(g_dev_read_bytes - read_bytes == cluster_size); 4158 4159 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 4160 poll_threads(); 4161 CU_ASSERT(g_bserrno == 0); 4162 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4163 4164 /* Data on snapshot should not change after write to clone */ 4165 memset(payload_write, 0xE5, sizeof(payload_write)); 4166 spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL); 4167 poll_threads(); 4168 CU_ASSERT(g_bserrno == 0); 4169 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4170 4171 ut_blob_close_and_delete(bs, blob); 4172 ut_blob_close_and_delete(bs, snapshot); 4173 4174 spdk_bs_free_io_channel(channel); 4175 poll_threads(); 4176 g_blob = NULL; 4177 g_blobid = 0; 4178 } 4179 4180 static void 4181 blob_snapshot_rw_iov(void) 4182 { 4183 static const uint8_t zero[10 * 4096] = { 0 }; 4184 struct spdk_blob_store *bs = g_bs; 4185 struct spdk_blob *blob, *snapshot; 4186 struct spdk_io_channel *channel; 4187 struct spdk_blob_opts opts; 4188 spdk_blob_id blobid, snapshotid; 4189 uint64_t free_clusters; 4190 uint8_t payload_read[10 * 4096]; 4191 uint8_t payload_write[10 * 4096]; 4192 struct iovec iov_read[3]; 4193 struct iovec iov_write[3]; 4194 4195 free_clusters = spdk_bs_free_cluster_count(bs); 4196 4197 channel = spdk_bs_alloc_io_channel(bs); 4198 CU_ASSERT(channel != NULL); 4199 4200 ut_spdk_blob_opts_init(&opts); 4201 opts.thin_provision = true; 4202 opts.num_clusters = 5; 4203 4204 blob = ut_blob_create_and_open(bs, &opts); 4205 blobid = spdk_blob_get_id(blob); 4206 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4207 4208 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4209 4210 /* Create snapshot from blob */ 4211 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4212 poll_threads(); 4213 CU_ASSERT(g_bserrno == 0); 4214 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4215 snapshotid = g_blobid; 4216 4217 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4218 poll_threads(); 4219 CU_ASSERT(g_bserrno == 0); 4220 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4221 snapshot = g_blob; 4222 CU_ASSERT(snapshot->data_ro == true); 4223 CU_ASSERT(snapshot->md_ro == true); 4224 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4225 4226 /* Payload should be all zeros from unallocated clusters */ 4227 memset(payload_read, 0xAA, sizeof(payload_read)); 4228 iov_read[0].iov_base = payload_read; 4229 iov_read[0].iov_len = 3 * 4096; 4230 iov_read[1].iov_base = payload_read + 3 * 4096; 4231 iov_read[1].iov_len = 4 * 4096; 4232 iov_read[2].iov_base = payload_read + 7 * 4096; 4233 iov_read[2].iov_len = 3 * 4096; 4234 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4235 poll_threads(); 4236 CU_ASSERT(g_bserrno == 0); 4237 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4238 4239 memset(payload_write, 0xE5, sizeof(payload_write)); 4240 iov_write[0].iov_base = payload_write; 4241 iov_write[0].iov_len = 1 * 4096; 4242 iov_write[1].iov_base = payload_write + 1 * 4096; 4243 iov_write[1].iov_len = 5 * 4096; 4244 iov_write[2].iov_base = payload_write + 6 * 4096; 4245 iov_write[2].iov_len = 4 * 4096; 4246 4247 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 4248 poll_threads(); 4249 CU_ASSERT(g_bserrno == 0); 4250 4251 memset(payload_read, 0xAA, sizeof(payload_read)); 4252 iov_read[0].iov_base = payload_read; 4253 iov_read[0].iov_len = 3 * 4096; 4254 iov_read[1].iov_base = payload_read + 3 * 4096; 4255 iov_read[1].iov_len = 4 * 4096; 4256 iov_read[2].iov_base = payload_read + 7 * 4096; 4257 iov_read[2].iov_len = 3 * 4096; 4258 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4259 poll_threads(); 4260 CU_ASSERT(g_bserrno == 0); 4261 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4262 4263 spdk_blob_close(blob, blob_op_complete, NULL); 4264 poll_threads(); 4265 CU_ASSERT(g_bserrno == 0); 4266 4267 spdk_blob_close(snapshot, blob_op_complete, NULL); 4268 poll_threads(); 4269 CU_ASSERT(g_bserrno == 0); 4270 4271 spdk_bs_free_io_channel(channel); 4272 poll_threads(); 4273 4274 g_blob = NULL; 4275 g_blobid = 0; 4276 } 4277 4278 /** 4279 * Inflate / decouple parent rw unit tests. 4280 * 4281 * -------------- 4282 * original blob: 0 1 2 3 4 4283 * ,---------+---------+---------+---------+---------. 4284 * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - | 4285 * +---------+---------+---------+---------+---------+ 4286 * snapshot2 | - |yyyyyyyyy| - |yyyyyyyyy| - | 4287 * +---------+---------+---------+---------+---------+ 4288 * blob | - |zzzzzzzzz| - | - | - | 4289 * '---------+---------+---------+---------+---------' 4290 * . . . . . . 4291 * -------- . . . . . . 4292 * inflate: . . . . . . 4293 * ,---------+---------+---------+---------+---------. 4294 * blob |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000| 4295 * '---------+---------+---------+---------+---------' 4296 * 4297 * NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency 4298 * on snapshot2 and snapshot removed . . . 4299 * . . . . . . 4300 * ---------------- . . . . . . 4301 * decouple parent: . . . . . . 4302 * ,---------+---------+---------+---------+---------. 4303 * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - | 4304 * +---------+---------+---------+---------+---------+ 4305 * blob | - |zzzzzzzzz| - |yyyyyyyyy| - | 4306 * '---------+---------+---------+---------+---------' 4307 * 4308 * NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency 4309 * on snapshot2 removed and on snapshot still exists. Snapshot2 4310 * should remain a clone of snapshot. 4311 */ 4312 static void 4313 _blob_inflate_rw(bool decouple_parent) 4314 { 4315 struct spdk_blob_store *bs = g_bs; 4316 struct spdk_blob *blob, *snapshot, *snapshot2; 4317 struct spdk_io_channel *channel; 4318 struct spdk_blob_opts opts; 4319 spdk_blob_id blobid, snapshotid, snapshot2id; 4320 uint64_t free_clusters; 4321 uint64_t cluster_size; 4322 4323 uint64_t payload_size; 4324 uint8_t *payload_read; 4325 uint8_t *payload_write; 4326 uint8_t *payload_clone; 4327 4328 uint64_t pages_per_cluster; 4329 uint64_t pages_per_payload; 4330 4331 int i; 4332 spdk_blob_id ids[2]; 4333 size_t count; 4334 4335 free_clusters = spdk_bs_free_cluster_count(bs); 4336 cluster_size = spdk_bs_get_cluster_size(bs); 4337 pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs); 4338 pages_per_payload = pages_per_cluster * 5; 4339 4340 payload_size = cluster_size * 5; 4341 4342 payload_read = malloc(payload_size); 4343 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 4344 4345 payload_write = malloc(payload_size); 4346 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 4347 4348 payload_clone = malloc(payload_size); 4349 SPDK_CU_ASSERT_FATAL(payload_clone != NULL); 4350 4351 channel = spdk_bs_alloc_io_channel(bs); 4352 SPDK_CU_ASSERT_FATAL(channel != NULL); 4353 4354 /* Create blob */ 4355 ut_spdk_blob_opts_init(&opts); 4356 opts.thin_provision = true; 4357 opts.num_clusters = 5; 4358 4359 blob = ut_blob_create_and_open(bs, &opts); 4360 blobid = spdk_blob_get_id(blob); 4361 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4362 4363 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4364 4365 /* 1) Initial read should return zeroed payload */ 4366 memset(payload_read, 0xFF, payload_size); 4367 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4368 blob_op_complete, NULL); 4369 poll_threads(); 4370 CU_ASSERT(g_bserrno == 0); 4371 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 4372 4373 /* Fill whole blob with a pattern, except last cluster (to be sure it 4374 * isn't allocated) */ 4375 memset(payload_write, 0xE5, payload_size - cluster_size); 4376 spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload - 4377 pages_per_cluster, blob_op_complete, NULL); 4378 poll_threads(); 4379 CU_ASSERT(g_bserrno == 0); 4380 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4381 4382 /* 2) Create snapshot from blob (first level) */ 4383 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4384 poll_threads(); 4385 CU_ASSERT(g_bserrno == 0); 4386 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4387 snapshotid = g_blobid; 4388 4389 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4390 poll_threads(); 4391 CU_ASSERT(g_bserrno == 0); 4392 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4393 snapshot = g_blob; 4394 CU_ASSERT(snapshot->data_ro == true); 4395 CU_ASSERT(snapshot->md_ro == true); 4396 4397 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4398 4399 /* Write every second cluster with a pattern. 4400 * 4401 * Last cluster shouldn't be written, to be sure that snapshot nor clone 4402 * doesn't allocate it. 4403 * 4404 * payload_clone stores expected result on "blob" read at the time and 4405 * is used only to check data consistency on clone before and after 4406 * inflation. Initially we fill it with a backing snapshots pattern 4407 * used before. 4408 */ 4409 memset(payload_clone, 0xE5, payload_size - cluster_size); 4410 memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size); 4411 memset(payload_write, 0xAA, payload_size); 4412 for (i = 1; i < 5; i += 2) { 4413 spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster, 4414 pages_per_cluster, blob_op_complete, NULL); 4415 poll_threads(); 4416 CU_ASSERT(g_bserrno == 0); 4417 4418 /* Update expected result */ 4419 memcpy(payload_clone + (cluster_size * i), payload_write, 4420 cluster_size); 4421 } 4422 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4423 4424 /* Check data consistency on clone */ 4425 memset(payload_read, 0xFF, payload_size); 4426 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4427 blob_op_complete, NULL); 4428 poll_threads(); 4429 CU_ASSERT(g_bserrno == 0); 4430 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4431 4432 /* 3) Create second levels snapshot from blob */ 4433 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4434 poll_threads(); 4435 CU_ASSERT(g_bserrno == 0); 4436 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4437 snapshot2id = g_blobid; 4438 4439 spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL); 4440 poll_threads(); 4441 CU_ASSERT(g_bserrno == 0); 4442 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4443 snapshot2 = g_blob; 4444 CU_ASSERT(snapshot2->data_ro == true); 4445 CU_ASSERT(snapshot2->md_ro == true); 4446 4447 CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5); 4448 4449 CU_ASSERT(snapshot2->parent_id == snapshotid); 4450 4451 /* Write one cluster on the top level blob. This cluster (1) covers 4452 * already allocated cluster in the snapshot2, so shouldn't be inflated 4453 * at all */ 4454 spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster, 4455 pages_per_cluster, blob_op_complete, NULL); 4456 poll_threads(); 4457 CU_ASSERT(g_bserrno == 0); 4458 4459 /* Update expected result */ 4460 memcpy(payload_clone + cluster_size, payload_write, cluster_size); 4461 4462 /* Check data consistency on clone */ 4463 memset(payload_read, 0xFF, payload_size); 4464 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4465 blob_op_complete, NULL); 4466 poll_threads(); 4467 CU_ASSERT(g_bserrno == 0); 4468 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4469 4470 4471 /* Close all blobs */ 4472 spdk_blob_close(blob, blob_op_complete, NULL); 4473 poll_threads(); 4474 CU_ASSERT(g_bserrno == 0); 4475 4476 spdk_blob_close(snapshot2, blob_op_complete, NULL); 4477 poll_threads(); 4478 CU_ASSERT(g_bserrno == 0); 4479 4480 spdk_blob_close(snapshot, blob_op_complete, NULL); 4481 poll_threads(); 4482 CU_ASSERT(g_bserrno == 0); 4483 4484 /* Check snapshot-clone relations */ 4485 count = 2; 4486 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4487 CU_ASSERT(count == 1); 4488 CU_ASSERT(ids[0] == snapshot2id); 4489 4490 count = 2; 4491 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4492 CU_ASSERT(count == 1); 4493 CU_ASSERT(ids[0] == blobid); 4494 4495 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id); 4496 4497 free_clusters = spdk_bs_free_cluster_count(bs); 4498 if (!decouple_parent) { 4499 /* Do full blob inflation */ 4500 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 4501 poll_threads(); 4502 CU_ASSERT(g_bserrno == 0); 4503 4504 /* All clusters should be inflated (except one already allocated 4505 * in a top level blob) */ 4506 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4); 4507 4508 /* Check if relation tree updated correctly */ 4509 count = 2; 4510 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4511 4512 /* snapshotid have one clone */ 4513 CU_ASSERT(count == 1); 4514 CU_ASSERT(ids[0] == snapshot2id); 4515 4516 /* snapshot2id have no clones */ 4517 count = 2; 4518 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4519 CU_ASSERT(count == 0); 4520 4521 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 4522 } else { 4523 /* Decouple parent of blob */ 4524 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 4525 poll_threads(); 4526 CU_ASSERT(g_bserrno == 0); 4527 4528 /* Only one cluster from a parent should be inflated (second one 4529 * is covered by a cluster written on a top level blob, and 4530 * already allocated) */ 4531 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1); 4532 4533 /* Check if relation tree updated correctly */ 4534 count = 2; 4535 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4536 4537 /* snapshotid have two clones now */ 4538 CU_ASSERT(count == 2); 4539 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4540 CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id); 4541 4542 /* snapshot2id have no clones */ 4543 count = 2; 4544 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4545 CU_ASSERT(count == 0); 4546 4547 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4548 } 4549 4550 /* Try to delete snapshot2 (should pass) */ 4551 spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL); 4552 poll_threads(); 4553 CU_ASSERT(g_bserrno == 0); 4554 4555 /* Try to delete base snapshot */ 4556 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4557 poll_threads(); 4558 CU_ASSERT(g_bserrno == 0); 4559 4560 /* Reopen blob after snapshot deletion */ 4561 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 4562 poll_threads(); 4563 CU_ASSERT(g_bserrno == 0); 4564 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4565 blob = g_blob; 4566 4567 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4568 4569 /* Check data consistency on inflated blob */ 4570 memset(payload_read, 0xFF, payload_size); 4571 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4572 blob_op_complete, NULL); 4573 poll_threads(); 4574 CU_ASSERT(g_bserrno == 0); 4575 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4576 4577 spdk_blob_close(blob, blob_op_complete, NULL); 4578 poll_threads(); 4579 CU_ASSERT(g_bserrno == 0); 4580 4581 spdk_bs_free_io_channel(channel); 4582 poll_threads(); 4583 4584 g_blob = NULL; 4585 g_blobid = 0; 4586 4587 free(payload_read); 4588 free(payload_write); 4589 free(payload_clone); 4590 } 4591 4592 static void 4593 blob_inflate_rw(void) 4594 { 4595 _blob_inflate_rw(false); 4596 _blob_inflate_rw(true); 4597 } 4598 4599 /** 4600 * Snapshot-clones relation test 4601 * 4602 * snapshot 4603 * | 4604 * +-----+-----+ 4605 * | | 4606 * blob(ro) snapshot2 4607 * | | 4608 * clone2 clone 4609 */ 4610 static void 4611 blob_relations(void) 4612 { 4613 struct spdk_blob_store *bs; 4614 struct spdk_bs_dev *dev; 4615 struct spdk_bs_opts bs_opts; 4616 struct spdk_blob_opts opts; 4617 struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2; 4618 spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2; 4619 int rc; 4620 size_t count; 4621 spdk_blob_id ids[10] = {}; 4622 4623 dev = init_dev(); 4624 spdk_bs_opts_init(&bs_opts); 4625 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 4626 4627 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 4628 poll_threads(); 4629 CU_ASSERT(g_bserrno == 0); 4630 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4631 bs = g_bs; 4632 4633 /* 1. Create blob with 10 clusters */ 4634 4635 ut_spdk_blob_opts_init(&opts); 4636 opts.num_clusters = 10; 4637 4638 blob = ut_blob_create_and_open(bs, &opts); 4639 blobid = spdk_blob_get_id(blob); 4640 4641 CU_ASSERT(!spdk_blob_is_read_only(blob)); 4642 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4643 CU_ASSERT(!spdk_blob_is_clone(blob)); 4644 CU_ASSERT(!spdk_blob_is_thin_provisioned(blob)); 4645 4646 /* blob should not have underlying snapshot nor clones */ 4647 CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID); 4648 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 4649 count = SPDK_COUNTOF(ids); 4650 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4651 CU_ASSERT(rc == 0); 4652 CU_ASSERT(count == 0); 4653 4654 4655 /* 2. Create snapshot */ 4656 4657 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4658 poll_threads(); 4659 CU_ASSERT(g_bserrno == 0); 4660 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4661 snapshotid = g_blobid; 4662 4663 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4664 poll_threads(); 4665 CU_ASSERT(g_bserrno == 0); 4666 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4667 snapshot = g_blob; 4668 4669 CU_ASSERT(spdk_blob_is_read_only(snapshot)); 4670 CU_ASSERT(spdk_blob_is_snapshot(snapshot)); 4671 CU_ASSERT(!spdk_blob_is_clone(snapshot)); 4672 CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID); 4673 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID); 4674 4675 /* Check if original blob is converted to the clone of snapshot */ 4676 CU_ASSERT(!spdk_blob_is_read_only(blob)); 4677 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4678 CU_ASSERT(spdk_blob_is_clone(blob)); 4679 CU_ASSERT(spdk_blob_is_thin_provisioned(blob)); 4680 CU_ASSERT(blob->parent_id == snapshotid); 4681 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4682 4683 count = SPDK_COUNTOF(ids); 4684 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4685 CU_ASSERT(rc == 0); 4686 CU_ASSERT(count == 1); 4687 CU_ASSERT(ids[0] == blobid); 4688 4689 4690 /* 3. Create clone from snapshot */ 4691 4692 spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 4693 poll_threads(); 4694 CU_ASSERT(g_bserrno == 0); 4695 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4696 cloneid = g_blobid; 4697 4698 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 4699 poll_threads(); 4700 CU_ASSERT(g_bserrno == 0); 4701 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4702 clone = g_blob; 4703 4704 CU_ASSERT(!spdk_blob_is_read_only(clone)); 4705 CU_ASSERT(!spdk_blob_is_snapshot(clone)); 4706 CU_ASSERT(spdk_blob_is_clone(clone)); 4707 CU_ASSERT(spdk_blob_is_thin_provisioned(clone)); 4708 CU_ASSERT(clone->parent_id == snapshotid); 4709 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid); 4710 4711 count = SPDK_COUNTOF(ids); 4712 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 4713 CU_ASSERT(rc == 0); 4714 CU_ASSERT(count == 0); 4715 4716 /* Check if clone is on the snapshot's list */ 4717 count = SPDK_COUNTOF(ids); 4718 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4719 CU_ASSERT(rc == 0); 4720 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4721 CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid); 4722 4723 4724 /* 4. Create snapshot of the clone */ 4725 4726 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 4727 poll_threads(); 4728 CU_ASSERT(g_bserrno == 0); 4729 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4730 snapshotid2 = g_blobid; 4731 4732 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 4733 poll_threads(); 4734 CU_ASSERT(g_bserrno == 0); 4735 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4736 snapshot2 = g_blob; 4737 4738 CU_ASSERT(spdk_blob_is_read_only(snapshot2)); 4739 CU_ASSERT(spdk_blob_is_snapshot(snapshot2)); 4740 CU_ASSERT(spdk_blob_is_clone(snapshot2)); 4741 CU_ASSERT(snapshot2->parent_id == snapshotid); 4742 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid); 4743 4744 /* Check if clone is converted to the clone of snapshot2 and snapshot2 4745 * is a child of snapshot */ 4746 CU_ASSERT(!spdk_blob_is_read_only(clone)); 4747 CU_ASSERT(!spdk_blob_is_snapshot(clone)); 4748 CU_ASSERT(spdk_blob_is_clone(clone)); 4749 CU_ASSERT(spdk_blob_is_thin_provisioned(clone)); 4750 CU_ASSERT(clone->parent_id == snapshotid2); 4751 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 4752 4753 count = SPDK_COUNTOF(ids); 4754 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 4755 CU_ASSERT(rc == 0); 4756 CU_ASSERT(count == 1); 4757 CU_ASSERT(ids[0] == cloneid); 4758 4759 4760 /* 5. Try to create clone from read only blob */ 4761 4762 /* Mark blob as read only */ 4763 spdk_blob_set_read_only(blob); 4764 spdk_blob_sync_md(blob, blob_op_complete, NULL); 4765 poll_threads(); 4766 CU_ASSERT(g_bserrno == 0); 4767 4768 /* Check if previously created blob is read only clone */ 4769 CU_ASSERT(spdk_blob_is_read_only(blob)); 4770 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4771 CU_ASSERT(spdk_blob_is_clone(blob)); 4772 CU_ASSERT(spdk_blob_is_thin_provisioned(blob)); 4773 4774 /* Create clone from read only blob */ 4775 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4776 poll_threads(); 4777 CU_ASSERT(g_bserrno == 0); 4778 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4779 cloneid2 = g_blobid; 4780 4781 spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL); 4782 poll_threads(); 4783 CU_ASSERT(g_bserrno == 0); 4784 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4785 clone2 = g_blob; 4786 4787 CU_ASSERT(!spdk_blob_is_read_only(clone2)); 4788 CU_ASSERT(!spdk_blob_is_snapshot(clone2)); 4789 CU_ASSERT(spdk_blob_is_clone(clone2)); 4790 CU_ASSERT(spdk_blob_is_thin_provisioned(clone2)); 4791 4792 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 4793 4794 count = SPDK_COUNTOF(ids); 4795 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4796 CU_ASSERT(rc == 0); 4797 4798 CU_ASSERT(count == 1); 4799 CU_ASSERT(ids[0] == cloneid2); 4800 4801 /* Close blobs */ 4802 4803 spdk_blob_close(clone2, blob_op_complete, NULL); 4804 poll_threads(); 4805 CU_ASSERT(g_bserrno == 0); 4806 4807 spdk_blob_close(blob, blob_op_complete, NULL); 4808 poll_threads(); 4809 CU_ASSERT(g_bserrno == 0); 4810 4811 spdk_blob_close(clone, blob_op_complete, NULL); 4812 poll_threads(); 4813 CU_ASSERT(g_bserrno == 0); 4814 4815 spdk_blob_close(snapshot, blob_op_complete, NULL); 4816 poll_threads(); 4817 CU_ASSERT(g_bserrno == 0); 4818 4819 spdk_blob_close(snapshot2, blob_op_complete, NULL); 4820 poll_threads(); 4821 CU_ASSERT(g_bserrno == 0); 4822 4823 /* Try to delete snapshot with more than 1 clone */ 4824 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4825 poll_threads(); 4826 CU_ASSERT(g_bserrno != 0); 4827 4828 ut_bs_reload(&bs, &bs_opts); 4829 4830 /* NULL ids array should return number of clones in count */ 4831 count = SPDK_COUNTOF(ids); 4832 rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count); 4833 CU_ASSERT(rc == -ENOMEM); 4834 CU_ASSERT(count == 2); 4835 4836 /* incorrect array size */ 4837 count = 1; 4838 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4839 CU_ASSERT(rc == -ENOMEM); 4840 CU_ASSERT(count == 2); 4841 4842 4843 /* Verify structure of loaded blob store */ 4844 4845 /* snapshot */ 4846 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID); 4847 4848 count = SPDK_COUNTOF(ids); 4849 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4850 CU_ASSERT(rc == 0); 4851 CU_ASSERT(count == 2); 4852 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4853 CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2); 4854 4855 /* blob */ 4856 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4857 count = SPDK_COUNTOF(ids); 4858 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4859 CU_ASSERT(rc == 0); 4860 CU_ASSERT(count == 1); 4861 CU_ASSERT(ids[0] == cloneid2); 4862 4863 /* clone */ 4864 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 4865 count = SPDK_COUNTOF(ids); 4866 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 4867 CU_ASSERT(rc == 0); 4868 CU_ASSERT(count == 0); 4869 4870 /* snapshot2 */ 4871 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid); 4872 count = SPDK_COUNTOF(ids); 4873 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 4874 CU_ASSERT(rc == 0); 4875 CU_ASSERT(count == 1); 4876 CU_ASSERT(ids[0] == cloneid); 4877 4878 /* clone2 */ 4879 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 4880 count = SPDK_COUNTOF(ids); 4881 rc = spdk_blob_get_clones(bs, cloneid2, ids, &count); 4882 CU_ASSERT(rc == 0); 4883 CU_ASSERT(count == 0); 4884 4885 /* Try to delete blob that user should not be able to remove */ 4886 4887 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4888 poll_threads(); 4889 CU_ASSERT(g_bserrno != 0); 4890 4891 /* Remove all blobs */ 4892 4893 spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL); 4894 poll_threads(); 4895 CU_ASSERT(g_bserrno == 0); 4896 4897 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 4898 poll_threads(); 4899 CU_ASSERT(g_bserrno == 0); 4900 4901 spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL); 4902 poll_threads(); 4903 CU_ASSERT(g_bserrno == 0); 4904 4905 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 4906 poll_threads(); 4907 CU_ASSERT(g_bserrno == 0); 4908 4909 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4910 poll_threads(); 4911 CU_ASSERT(g_bserrno == 0); 4912 4913 spdk_bs_unload(bs, bs_op_complete, NULL); 4914 poll_threads(); 4915 CU_ASSERT(g_bserrno == 0); 4916 4917 g_bs = NULL; 4918 } 4919 4920 /** 4921 * Snapshot-clones relation test 2 4922 * 4923 * snapshot1 4924 * | 4925 * snapshot2 4926 * | 4927 * +-----+-----+ 4928 * | | 4929 * blob(ro) snapshot3 4930 * | | 4931 * | snapshot4 4932 * | | | 4933 * clone2 clone clone3 4934 */ 4935 static void 4936 blob_relations2(void) 4937 { 4938 struct spdk_blob_store *bs; 4939 struct spdk_bs_dev *dev; 4940 struct spdk_bs_opts bs_opts; 4941 struct spdk_blob_opts opts; 4942 struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2; 4943 spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2, 4944 cloneid3; 4945 int rc; 4946 size_t count; 4947 spdk_blob_id ids[10] = {}; 4948 4949 dev = init_dev(); 4950 spdk_bs_opts_init(&bs_opts); 4951 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 4952 4953 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 4954 poll_threads(); 4955 CU_ASSERT(g_bserrno == 0); 4956 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4957 bs = g_bs; 4958 4959 /* 1. Create blob with 10 clusters */ 4960 4961 ut_spdk_blob_opts_init(&opts); 4962 opts.num_clusters = 10; 4963 4964 blob = ut_blob_create_and_open(bs, &opts); 4965 blobid = spdk_blob_get_id(blob); 4966 4967 /* 2. Create snapshot1 */ 4968 4969 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4970 poll_threads(); 4971 CU_ASSERT(g_bserrno == 0); 4972 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4973 snapshotid1 = g_blobid; 4974 4975 spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL); 4976 poll_threads(); 4977 CU_ASSERT(g_bserrno == 0); 4978 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4979 snapshot1 = g_blob; 4980 4981 CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID); 4982 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID); 4983 4984 CU_ASSERT(blob->parent_id == snapshotid1); 4985 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1); 4986 4987 /* Check if blob is the clone of snapshot1 */ 4988 CU_ASSERT(blob->parent_id == snapshotid1); 4989 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1); 4990 4991 count = SPDK_COUNTOF(ids); 4992 rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count); 4993 CU_ASSERT(rc == 0); 4994 CU_ASSERT(count == 1); 4995 CU_ASSERT(ids[0] == blobid); 4996 4997 /* 3. Create another snapshot */ 4998 4999 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5000 poll_threads(); 5001 CU_ASSERT(g_bserrno == 0); 5002 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5003 snapshotid2 = g_blobid; 5004 5005 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 5006 poll_threads(); 5007 CU_ASSERT(g_bserrno == 0); 5008 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5009 snapshot2 = g_blob; 5010 5011 CU_ASSERT(spdk_blob_is_clone(snapshot2)); 5012 CU_ASSERT(snapshot2->parent_id == snapshotid1); 5013 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1); 5014 5015 /* Check if snapshot2 is the clone of snapshot1 and blob 5016 * is a child of snapshot2 */ 5017 CU_ASSERT(blob->parent_id == snapshotid2); 5018 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2); 5019 5020 count = SPDK_COUNTOF(ids); 5021 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5022 CU_ASSERT(rc == 0); 5023 CU_ASSERT(count == 1); 5024 CU_ASSERT(ids[0] == blobid); 5025 5026 /* 4. Create clone from snapshot */ 5027 5028 spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL); 5029 poll_threads(); 5030 CU_ASSERT(g_bserrno == 0); 5031 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5032 cloneid = g_blobid; 5033 5034 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 5035 poll_threads(); 5036 CU_ASSERT(g_bserrno == 0); 5037 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5038 clone = g_blob; 5039 5040 CU_ASSERT(clone->parent_id == snapshotid2); 5041 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 5042 5043 /* Check if clone is on the snapshot's list */ 5044 count = SPDK_COUNTOF(ids); 5045 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5046 CU_ASSERT(rc == 0); 5047 CU_ASSERT(count == 2); 5048 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5049 CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid); 5050 5051 /* 5. Create snapshot of the clone */ 5052 5053 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 5054 poll_threads(); 5055 CU_ASSERT(g_bserrno == 0); 5056 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5057 snapshotid3 = g_blobid; 5058 5059 spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL); 5060 poll_threads(); 5061 CU_ASSERT(g_bserrno == 0); 5062 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5063 snapshot3 = g_blob; 5064 5065 CU_ASSERT(snapshot3->parent_id == snapshotid2); 5066 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2); 5067 5068 /* Check if clone is converted to the clone of snapshot3 and snapshot3 5069 * is a child of snapshot2 */ 5070 CU_ASSERT(clone->parent_id == snapshotid3); 5071 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5072 5073 count = SPDK_COUNTOF(ids); 5074 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5075 CU_ASSERT(rc == 0); 5076 CU_ASSERT(count == 1); 5077 CU_ASSERT(ids[0] == cloneid); 5078 5079 /* 6. Create another snapshot of the clone */ 5080 5081 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 5082 poll_threads(); 5083 CU_ASSERT(g_bserrno == 0); 5084 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5085 snapshotid4 = g_blobid; 5086 5087 spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL); 5088 poll_threads(); 5089 CU_ASSERT(g_bserrno == 0); 5090 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5091 snapshot4 = g_blob; 5092 5093 CU_ASSERT(snapshot4->parent_id == snapshotid3); 5094 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3); 5095 5096 /* Check if clone is converted to the clone of snapshot4 and snapshot4 5097 * is a child of snapshot3 */ 5098 CU_ASSERT(clone->parent_id == snapshotid4); 5099 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4); 5100 5101 count = SPDK_COUNTOF(ids); 5102 rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count); 5103 CU_ASSERT(rc == 0); 5104 CU_ASSERT(count == 1); 5105 CU_ASSERT(ids[0] == cloneid); 5106 5107 /* 7. Remove snapshot 4 */ 5108 5109 ut_blob_close_and_delete(bs, snapshot4); 5110 5111 /* Check if relations are back to state from before creating snapshot 4 */ 5112 CU_ASSERT(clone->parent_id == snapshotid3); 5113 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5114 5115 count = SPDK_COUNTOF(ids); 5116 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5117 CU_ASSERT(rc == 0); 5118 CU_ASSERT(count == 1); 5119 CU_ASSERT(ids[0] == cloneid); 5120 5121 /* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */ 5122 5123 spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL); 5124 poll_threads(); 5125 CU_ASSERT(g_bserrno == 0); 5126 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5127 cloneid3 = g_blobid; 5128 5129 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5130 poll_threads(); 5131 CU_ASSERT(g_bserrno != 0); 5132 5133 /* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */ 5134 5135 spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL); 5136 poll_threads(); 5137 CU_ASSERT(g_bserrno == 0); 5138 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5139 snapshot3 = g_blob; 5140 5141 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5142 poll_threads(); 5143 CU_ASSERT(g_bserrno != 0); 5144 5145 spdk_blob_close(snapshot3, blob_op_complete, NULL); 5146 poll_threads(); 5147 CU_ASSERT(g_bserrno == 0); 5148 5149 spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL); 5150 poll_threads(); 5151 CU_ASSERT(g_bserrno == 0); 5152 5153 /* 10. Remove snapshot 1 */ 5154 5155 ut_blob_close_and_delete(bs, snapshot1); 5156 5157 /* Check if relations are back to state from before creating snapshot 4 (before step 6) */ 5158 CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID); 5159 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID); 5160 5161 count = SPDK_COUNTOF(ids); 5162 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5163 CU_ASSERT(rc == 0); 5164 CU_ASSERT(count == 2); 5165 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5166 CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3); 5167 5168 /* 11. Try to create clone from read only blob */ 5169 5170 /* Mark blob as read only */ 5171 spdk_blob_set_read_only(blob); 5172 spdk_blob_sync_md(blob, blob_op_complete, NULL); 5173 poll_threads(); 5174 CU_ASSERT(g_bserrno == 0); 5175 5176 /* Create clone from read only blob */ 5177 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5178 poll_threads(); 5179 CU_ASSERT(g_bserrno == 0); 5180 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5181 cloneid2 = g_blobid; 5182 5183 spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL); 5184 poll_threads(); 5185 CU_ASSERT(g_bserrno == 0); 5186 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5187 clone2 = g_blob; 5188 5189 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5190 5191 count = SPDK_COUNTOF(ids); 5192 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5193 CU_ASSERT(rc == 0); 5194 CU_ASSERT(count == 1); 5195 CU_ASSERT(ids[0] == cloneid2); 5196 5197 /* Close blobs */ 5198 5199 spdk_blob_close(clone2, blob_op_complete, NULL); 5200 poll_threads(); 5201 CU_ASSERT(g_bserrno == 0); 5202 5203 spdk_blob_close(blob, blob_op_complete, NULL); 5204 poll_threads(); 5205 CU_ASSERT(g_bserrno == 0); 5206 5207 spdk_blob_close(clone, blob_op_complete, NULL); 5208 poll_threads(); 5209 CU_ASSERT(g_bserrno == 0); 5210 5211 spdk_blob_close(snapshot2, blob_op_complete, NULL); 5212 poll_threads(); 5213 CU_ASSERT(g_bserrno == 0); 5214 5215 spdk_blob_close(snapshot3, blob_op_complete, NULL); 5216 poll_threads(); 5217 CU_ASSERT(g_bserrno == 0); 5218 5219 ut_bs_reload(&bs, &bs_opts); 5220 5221 /* Verify structure of loaded blob store */ 5222 5223 /* snapshot2 */ 5224 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID); 5225 5226 count = SPDK_COUNTOF(ids); 5227 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5228 CU_ASSERT(rc == 0); 5229 CU_ASSERT(count == 2); 5230 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5231 CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3); 5232 5233 /* blob */ 5234 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2); 5235 count = SPDK_COUNTOF(ids); 5236 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5237 CU_ASSERT(rc == 0); 5238 CU_ASSERT(count == 1); 5239 CU_ASSERT(ids[0] == cloneid2); 5240 5241 /* clone */ 5242 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5243 count = SPDK_COUNTOF(ids); 5244 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 5245 CU_ASSERT(rc == 0); 5246 CU_ASSERT(count == 0); 5247 5248 /* snapshot3 */ 5249 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2); 5250 count = SPDK_COUNTOF(ids); 5251 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5252 CU_ASSERT(rc == 0); 5253 CU_ASSERT(count == 1); 5254 CU_ASSERT(ids[0] == cloneid); 5255 5256 /* clone2 */ 5257 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5258 count = SPDK_COUNTOF(ids); 5259 rc = spdk_blob_get_clones(bs, cloneid2, ids, &count); 5260 CU_ASSERT(rc == 0); 5261 CU_ASSERT(count == 0); 5262 5263 /* Try to delete all blobs in the worse possible order */ 5264 5265 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5266 poll_threads(); 5267 CU_ASSERT(g_bserrno != 0); 5268 5269 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5270 poll_threads(); 5271 CU_ASSERT(g_bserrno == 0); 5272 5273 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5274 poll_threads(); 5275 CU_ASSERT(g_bserrno != 0); 5276 5277 spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL); 5278 poll_threads(); 5279 CU_ASSERT(g_bserrno == 0); 5280 5281 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5282 poll_threads(); 5283 CU_ASSERT(g_bserrno == 0); 5284 5285 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 5286 poll_threads(); 5287 CU_ASSERT(g_bserrno == 0); 5288 5289 spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL); 5290 poll_threads(); 5291 CU_ASSERT(g_bserrno == 0); 5292 5293 spdk_bs_unload(bs, bs_op_complete, NULL); 5294 poll_threads(); 5295 CU_ASSERT(g_bserrno == 0); 5296 5297 g_bs = NULL; 5298 } 5299 5300 static void 5301 blob_delete_snapshot_power_failure(void) 5302 { 5303 struct spdk_blob_store *bs = g_bs; 5304 struct spdk_blob_opts opts; 5305 struct spdk_blob *blob, *snapshot; 5306 struct spdk_power_failure_thresholds thresholds = {}; 5307 spdk_blob_id blobid, snapshotid; 5308 const void *value; 5309 size_t value_len; 5310 size_t count; 5311 spdk_blob_id ids[3] = {}; 5312 int rc; 5313 bool deleted = false; 5314 5315 /* Create blob */ 5316 ut_spdk_blob_opts_init(&opts); 5317 opts.num_clusters = 10; 5318 5319 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 5320 poll_threads(); 5321 CU_ASSERT(g_bserrno == 0); 5322 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5323 blobid = g_blobid; 5324 5325 /* Create snapshot */ 5326 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5327 poll_threads(); 5328 CU_ASSERT(g_bserrno == 0); 5329 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5330 snapshotid = g_blobid; 5331 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5332 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5333 5334 thresholds.general_threshold = 1; 5335 while (!deleted) { 5336 dev_set_power_failure_thresholds(thresholds); 5337 5338 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 5339 poll_threads(); 5340 5341 /* Do not shut down cleanly. Assumption is that after snapshot deletion 5342 * reports success, changes to both blobs should already persisted. */ 5343 dev_reset_power_failure_event(); 5344 ut_bs_dirty_load(&bs, NULL); 5345 5346 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5347 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5348 5349 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 5350 poll_threads(); 5351 CU_ASSERT(g_bserrno == 0); 5352 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5353 blob = g_blob; 5354 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true); 5355 5356 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 5357 poll_threads(); 5358 5359 if (g_bserrno == 0) { 5360 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5361 snapshot = g_blob; 5362 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 5363 count = SPDK_COUNTOF(ids); 5364 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5365 CU_ASSERT(rc == 0); 5366 CU_ASSERT(count == 1); 5367 CU_ASSERT(ids[0] == blobid); 5368 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len); 5369 CU_ASSERT(rc != 0); 5370 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false); 5371 5372 spdk_blob_close(snapshot, blob_op_complete, NULL); 5373 poll_threads(); 5374 CU_ASSERT(g_bserrno == 0); 5375 } else { 5376 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 5377 deleted = true; 5378 } 5379 5380 spdk_blob_close(blob, blob_op_complete, NULL); 5381 poll_threads(); 5382 CU_ASSERT(g_bserrno == 0); 5383 5384 /* Reload blobstore to have the same starting conditions (as the previous blobstore load 5385 * may trigger cleanup after power failure or may not) */ 5386 ut_bs_reload(&bs, NULL); 5387 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5388 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5389 5390 thresholds.general_threshold++; 5391 } 5392 } 5393 5394 static void 5395 blob_create_snapshot_power_failure(void) 5396 { 5397 struct spdk_blob_store *bs = g_bs; 5398 struct spdk_blob_opts opts; 5399 struct spdk_blob *blob, *snapshot; 5400 struct spdk_power_failure_thresholds thresholds = {}; 5401 spdk_blob_id blobid, snapshotid; 5402 const void *value; 5403 size_t value_len; 5404 size_t count; 5405 spdk_blob_id ids[3] = {}; 5406 int rc; 5407 bool created = false; 5408 5409 /* Create blob */ 5410 ut_spdk_blob_opts_init(&opts); 5411 opts.num_clusters = 10; 5412 5413 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 5414 poll_threads(); 5415 CU_ASSERT(g_bserrno == 0); 5416 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5417 blobid = g_blobid; 5418 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5419 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5420 5421 thresholds.general_threshold = 1; 5422 while (!created) { 5423 dev_set_power_failure_thresholds(thresholds); 5424 5425 /* Create snapshot */ 5426 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5427 poll_threads(); 5428 snapshotid = g_blobid; 5429 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5430 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5431 5432 /* Do not shut down cleanly. Assumption is that after create snapshot 5433 * reports success, both blobs should be power-fail safe. */ 5434 dev_reset_power_failure_event(); 5435 ut_bs_dirty_load(&bs, NULL); 5436 5437 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5438 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5439 5440 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 5441 poll_threads(); 5442 CU_ASSERT(g_bserrno == 0); 5443 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5444 blob = g_blob; 5445 5446 if (snapshotid != SPDK_BLOBID_INVALID) { 5447 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 5448 poll_threads(); 5449 } 5450 5451 if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) { 5452 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5453 snapshot = g_blob; 5454 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true); 5455 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false); 5456 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 5457 count = SPDK_COUNTOF(ids); 5458 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5459 CU_ASSERT(rc == 0); 5460 CU_ASSERT(count == 1); 5461 CU_ASSERT(ids[0] == blobid); 5462 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len); 5463 CU_ASSERT(rc != 0); 5464 5465 spdk_blob_close(snapshot, blob_op_complete, NULL); 5466 poll_threads(); 5467 CU_ASSERT(g_bserrno == 0); 5468 created = true; 5469 } else { 5470 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 5471 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false); 5472 } 5473 5474 spdk_blob_close(blob, blob_op_complete, NULL); 5475 poll_threads(); 5476 CU_ASSERT(g_bserrno == 0); 5477 5478 /* Reload blobstore to have the same starting conditions (as the previous blobstore load 5479 * may trigger cleanup after power failure or may not) */ 5480 ut_bs_reload(&bs, NULL); 5481 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5482 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5483 5484 thresholds.general_threshold++; 5485 } 5486 } 5487 5488 static void 5489 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5490 { 5491 uint8_t payload_ff[64 * 512]; 5492 uint8_t payload_aa[64 * 512]; 5493 uint8_t payload_00[64 * 512]; 5494 uint8_t *cluster0, *cluster1; 5495 5496 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5497 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5498 memset(payload_00, 0x00, sizeof(payload_00)); 5499 5500 /* Try to perform I/O with io unit = 512 */ 5501 spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL); 5502 poll_threads(); 5503 CU_ASSERT(g_bserrno == 0); 5504 5505 /* If thin provisioned is set cluster should be allocated now */ 5506 SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0); 5507 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5508 5509 /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character. 5510 * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */ 5511 /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5512 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5513 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0); 5514 5515 /* Verify write with offset on first page */ 5516 spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL); 5517 poll_threads(); 5518 CU_ASSERT(g_bserrno == 0); 5519 5520 /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5521 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5522 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5523 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5524 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5525 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0); 5526 5527 /* Verify write with offset on first page */ 5528 spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL); 5529 poll_threads(); 5530 5531 /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5532 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5533 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5534 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5535 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5536 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0); 5537 CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0); 5538 5539 /* Verify write with offset on second page */ 5540 spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL); 5541 poll_threads(); 5542 5543 /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */ 5544 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5545 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5546 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5547 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5548 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0); 5549 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5550 5551 /* Verify write across multiple pages */ 5552 spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL); 5553 poll_threads(); 5554 5555 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */ 5556 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5557 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5558 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5559 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5560 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5561 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5562 5563 /* Verify write across multiple clusters */ 5564 spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL); 5565 poll_threads(); 5566 5567 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5568 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5569 5570 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5571 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5572 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5573 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5574 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5575 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5576 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5577 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5578 5579 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5580 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0); 5581 5582 /* Verify write to second cluster */ 5583 spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL); 5584 poll_threads(); 5585 5586 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5587 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5588 5589 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5590 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */ 5591 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5592 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5593 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5594 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5595 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5596 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5597 5598 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5599 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0); 5600 CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0); 5601 CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0); 5602 } 5603 5604 static void 5605 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5606 { 5607 uint8_t payload_read[64 * 512]; 5608 uint8_t payload_ff[64 * 512]; 5609 uint8_t payload_aa[64 * 512]; 5610 uint8_t payload_00[64 * 512]; 5611 5612 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5613 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5614 memset(payload_00, 0x00, sizeof(payload_00)); 5615 5616 /* Read only first io unit */ 5617 /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5618 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5619 * payload_read: F000 0000 | 0000 0000 ... */ 5620 memset(payload_read, 0x00, sizeof(payload_read)); 5621 spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL); 5622 poll_threads(); 5623 CU_ASSERT(g_bserrno == 0); 5624 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5625 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0); 5626 5627 /* Read four io_units starting from offset = 2 5628 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5629 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5630 * payload_read: F0AA 0000 | 0000 0000 ... */ 5631 5632 memset(payload_read, 0x00, sizeof(payload_read)); 5633 spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL); 5634 poll_threads(); 5635 CU_ASSERT(g_bserrno == 0); 5636 5637 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5638 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 5639 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0); 5640 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0); 5641 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 5642 5643 /* Read eight io_units across multiple pages 5644 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 5645 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5646 * payload_read: AAAA AAAA | 0000 0000 ... */ 5647 memset(payload_read, 0x00, sizeof(payload_read)); 5648 spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL); 5649 poll_threads(); 5650 CU_ASSERT(g_bserrno == 0); 5651 5652 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0); 5653 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 5654 5655 /* Read eight io_units across multiple clusters 5656 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ] 5657 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5658 * payload_read: FFFF FFFF | 0000 0000 ... */ 5659 memset(payload_read, 0x00, sizeof(payload_read)); 5660 spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL); 5661 poll_threads(); 5662 CU_ASSERT(g_bserrno == 0); 5663 5664 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0); 5665 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 5666 5667 /* Read four io_units from second cluster 5668 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5669 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ] 5670 * payload_read: 00FF 0000 | 0000 0000 ... */ 5671 memset(payload_read, 0x00, sizeof(payload_read)); 5672 spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL); 5673 poll_threads(); 5674 CU_ASSERT(g_bserrno == 0); 5675 5676 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0); 5677 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0); 5678 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 5679 5680 /* Read second cluster 5681 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5682 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] 5683 * payload_read: FFFF 0000 | 0000 FF00 ... */ 5684 memset(payload_read, 0x00, sizeof(payload_read)); 5685 spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL); 5686 poll_threads(); 5687 CU_ASSERT(g_bserrno == 0); 5688 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0); 5689 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0); 5690 CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0); 5691 CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0); 5692 5693 /* Read whole two clusters 5694 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 5695 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */ 5696 memset(payload_read, 0x00, sizeof(payload_read)); 5697 spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL); 5698 poll_threads(); 5699 CU_ASSERT(g_bserrno == 0); 5700 5701 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5702 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 5703 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0); 5704 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0); 5705 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0); 5706 CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0); 5707 5708 CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0); 5709 CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0); 5710 CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0); 5711 CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0); 5712 } 5713 5714 5715 static void 5716 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5717 { 5718 uint8_t payload_ff[64 * 512]; 5719 uint8_t payload_aa[64 * 512]; 5720 uint8_t payload_00[64 * 512]; 5721 uint8_t *cluster0, *cluster1; 5722 5723 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5724 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5725 memset(payload_00, 0x00, sizeof(payload_00)); 5726 5727 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5728 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5729 5730 /* Unmap */ 5731 spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL); 5732 poll_threads(); 5733 5734 CU_ASSERT(g_bserrno == 0); 5735 5736 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0); 5737 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0); 5738 } 5739 5740 static void 5741 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5742 { 5743 uint8_t payload_ff[64 * 512]; 5744 uint8_t payload_aa[64 * 512]; 5745 uint8_t payload_00[64 * 512]; 5746 uint8_t *cluster0, *cluster1; 5747 5748 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5749 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5750 memset(payload_00, 0x00, sizeof(payload_00)); 5751 5752 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5753 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5754 5755 /* Write zeroes */ 5756 spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL); 5757 poll_threads(); 5758 5759 CU_ASSERT(g_bserrno == 0); 5760 5761 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0); 5762 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0); 5763 } 5764 5765 5766 static void 5767 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5768 { 5769 uint8_t payload_ff[64 * 512]; 5770 uint8_t payload_aa[64 * 512]; 5771 uint8_t payload_00[64 * 512]; 5772 uint8_t *cluster0, *cluster1; 5773 struct iovec iov[4]; 5774 5775 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5776 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5777 memset(payload_00, 0x00, sizeof(payload_00)); 5778 5779 /* Try to perform I/O with io unit = 512 */ 5780 iov[0].iov_base = payload_ff; 5781 iov[0].iov_len = 1 * 512; 5782 spdk_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL); 5783 poll_threads(); 5784 CU_ASSERT(g_bserrno == 0); 5785 5786 /* If thin provisioned is set cluster should be allocated now */ 5787 SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0); 5788 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5789 5790 /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character. 5791 * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */ 5792 /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5793 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5794 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0); 5795 5796 /* Verify write with offset on first page */ 5797 iov[0].iov_base = payload_ff; 5798 iov[0].iov_len = 1 * 512; 5799 spdk_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL); 5800 poll_threads(); 5801 CU_ASSERT(g_bserrno == 0); 5802 5803 /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5804 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5805 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5806 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5807 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5808 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0); 5809 5810 /* Verify write with offset on first page */ 5811 iov[0].iov_base = payload_ff; 5812 iov[0].iov_len = 4 * 512; 5813 spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL); 5814 poll_threads(); 5815 5816 /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5817 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5818 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5819 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5820 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5821 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0); 5822 CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0); 5823 5824 /* Verify write with offset on second page */ 5825 iov[0].iov_base = payload_ff; 5826 iov[0].iov_len = 4 * 512; 5827 spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL); 5828 poll_threads(); 5829 5830 /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */ 5831 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5832 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5833 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5834 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5835 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0); 5836 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5837 5838 /* Verify write across multiple pages */ 5839 iov[0].iov_base = payload_aa; 5840 iov[0].iov_len = 8 * 512; 5841 spdk_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL); 5842 poll_threads(); 5843 5844 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */ 5845 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5846 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5847 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5848 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5849 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5850 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5851 5852 /* Verify write across multiple clusters */ 5853 5854 iov[0].iov_base = payload_ff; 5855 iov[0].iov_len = 8 * 512; 5856 spdk_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL); 5857 poll_threads(); 5858 5859 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5860 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5861 5862 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5863 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5864 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5865 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5866 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5867 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5868 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5869 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0); 5870 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5871 5872 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5873 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0); 5874 5875 /* Verify write to second cluster */ 5876 5877 iov[0].iov_base = payload_ff; 5878 iov[0].iov_len = 2 * 512; 5879 spdk_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL); 5880 poll_threads(); 5881 5882 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5883 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5884 5885 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5886 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */ 5887 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5888 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5889 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5890 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5891 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5892 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5893 5894 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5895 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0); 5896 CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0); 5897 CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0); 5898 } 5899 5900 static void 5901 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5902 { 5903 uint8_t payload_read[64 * 512]; 5904 uint8_t payload_ff[64 * 512]; 5905 uint8_t payload_aa[64 * 512]; 5906 uint8_t payload_00[64 * 512]; 5907 struct iovec iov[4]; 5908 5909 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5910 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5911 memset(payload_00, 0x00, sizeof(payload_00)); 5912 5913 /* Read only first io unit */ 5914 /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5915 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5916 * payload_read: F000 0000 | 0000 0000 ... */ 5917 memset(payload_read, 0x00, sizeof(payload_read)); 5918 iov[0].iov_base = payload_read; 5919 iov[0].iov_len = 1 * 512; 5920 spdk_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL); 5921 poll_threads(); 5922 5923 CU_ASSERT(g_bserrno == 0); 5924 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5925 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0); 5926 5927 /* Read four io_units starting from offset = 2 5928 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5929 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5930 * payload_read: F0AA 0000 | 0000 0000 ... */ 5931 5932 memset(payload_read, 0x00, sizeof(payload_read)); 5933 iov[0].iov_base = payload_read; 5934 iov[0].iov_len = 4 * 512; 5935 spdk_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL); 5936 poll_threads(); 5937 CU_ASSERT(g_bserrno == 0); 5938 5939 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5940 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 5941 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0); 5942 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0); 5943 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 5944 5945 /* Read eight io_units across multiple pages 5946 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 5947 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5948 * payload_read: AAAA AAAA | 0000 0000 ... */ 5949 memset(payload_read, 0x00, sizeof(payload_read)); 5950 iov[0].iov_base = payload_read; 5951 iov[0].iov_len = 4 * 512; 5952 iov[1].iov_base = payload_read + 4 * 512; 5953 iov[1].iov_len = 4 * 512; 5954 spdk_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL); 5955 poll_threads(); 5956 CU_ASSERT(g_bserrno == 0); 5957 5958 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0); 5959 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 5960 5961 /* Read eight io_units across multiple clusters 5962 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ] 5963 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5964 * payload_read: FFFF FFFF | 0000 0000 ... */ 5965 memset(payload_read, 0x00, sizeof(payload_read)); 5966 iov[0].iov_base = payload_read; 5967 iov[0].iov_len = 2 * 512; 5968 iov[1].iov_base = payload_read + 2 * 512; 5969 iov[1].iov_len = 2 * 512; 5970 iov[2].iov_base = payload_read + 4 * 512; 5971 iov[2].iov_len = 2 * 512; 5972 iov[3].iov_base = payload_read + 6 * 512; 5973 iov[3].iov_len = 2 * 512; 5974 spdk_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL); 5975 poll_threads(); 5976 CU_ASSERT(g_bserrno == 0); 5977 5978 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0); 5979 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 5980 5981 /* Read four io_units from second cluster 5982 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5983 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ] 5984 * payload_read: 00FF 0000 | 0000 0000 ... */ 5985 memset(payload_read, 0x00, sizeof(payload_read)); 5986 iov[0].iov_base = payload_read; 5987 iov[0].iov_len = 1 * 512; 5988 iov[1].iov_base = payload_read + 1 * 512; 5989 iov[1].iov_len = 3 * 512; 5990 spdk_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL); 5991 poll_threads(); 5992 CU_ASSERT(g_bserrno == 0); 5993 5994 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0); 5995 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0); 5996 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 5997 5998 /* Read second cluster 5999 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6000 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] 6001 * payload_read: FFFF 0000 | 0000 FF00 ... */ 6002 memset(payload_read, 0x00, sizeof(payload_read)); 6003 iov[0].iov_base = payload_read; 6004 iov[0].iov_len = 1 * 512; 6005 iov[1].iov_base = payload_read + 1 * 512; 6006 iov[1].iov_len = 2 * 512; 6007 iov[2].iov_base = payload_read + 3 * 512; 6008 iov[2].iov_len = 4 * 512; 6009 iov[3].iov_base = payload_read + 7 * 512; 6010 iov[3].iov_len = 25 * 512; 6011 spdk_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL); 6012 poll_threads(); 6013 CU_ASSERT(g_bserrno == 0); 6014 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0); 6015 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0); 6016 CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0); 6017 CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0); 6018 6019 /* Read whole two clusters 6020 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 6021 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */ 6022 memset(payload_read, 0x00, sizeof(payload_read)); 6023 iov[0].iov_base = payload_read; 6024 iov[0].iov_len = 1 * 512; 6025 iov[1].iov_base = payload_read + 1 * 512; 6026 iov[1].iov_len = 8 * 512; 6027 iov[2].iov_base = payload_read + 9 * 512; 6028 iov[2].iov_len = 16 * 512; 6029 iov[3].iov_base = payload_read + 25 * 512; 6030 iov[3].iov_len = 39 * 512; 6031 spdk_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL); 6032 poll_threads(); 6033 CU_ASSERT(g_bserrno == 0); 6034 6035 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6036 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 6037 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0); 6038 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0); 6039 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0); 6040 CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0); 6041 6042 CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0); 6043 CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0); 6044 CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0); 6045 CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0); 6046 } 6047 6048 static void 6049 blob_io_unit(void) 6050 { 6051 struct spdk_bs_opts bsopts; 6052 struct spdk_blob_opts opts; 6053 struct spdk_blob_store *bs; 6054 struct spdk_bs_dev *dev; 6055 struct spdk_blob *blob, *snapshot, *clone; 6056 spdk_blob_id blobid; 6057 struct spdk_io_channel *channel; 6058 6059 /* Create dev with 512 bytes io unit size */ 6060 6061 spdk_bs_opts_init(&bsopts); 6062 bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */ 6063 snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE"); 6064 6065 /* Try to initialize a new blob store with unsupported io_unit */ 6066 dev = init_dev(); 6067 dev->blocklen = 512; 6068 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6069 6070 /* Initialize a new blob store */ 6071 spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL); 6072 poll_threads(); 6073 CU_ASSERT(g_bserrno == 0); 6074 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6075 bs = g_bs; 6076 6077 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512); 6078 channel = spdk_bs_alloc_io_channel(bs); 6079 6080 /* Create thick provisioned blob */ 6081 ut_spdk_blob_opts_init(&opts); 6082 opts.thin_provision = false; 6083 opts.num_clusters = 32; 6084 6085 blob = ut_blob_create_and_open(bs, &opts); 6086 blobid = spdk_blob_get_id(blob); 6087 6088 test_io_write(dev, blob, channel); 6089 test_io_read(dev, blob, channel); 6090 test_io_zeroes(dev, blob, channel); 6091 6092 test_iov_write(dev, blob, channel); 6093 test_iov_read(dev, blob, channel); 6094 6095 test_io_unmap(dev, blob, channel); 6096 6097 spdk_blob_close(blob, blob_op_complete, NULL); 6098 poll_threads(); 6099 CU_ASSERT(g_bserrno == 0); 6100 blob = NULL; 6101 g_blob = NULL; 6102 6103 /* Create thin provisioned blob */ 6104 6105 ut_spdk_blob_opts_init(&opts); 6106 opts.thin_provision = true; 6107 opts.num_clusters = 32; 6108 6109 blob = ut_blob_create_and_open(bs, &opts); 6110 blobid = spdk_blob_get_id(blob); 6111 6112 test_io_write(dev, blob, channel); 6113 test_io_read(dev, blob, channel); 6114 6115 test_io_zeroes(dev, blob, channel); 6116 6117 test_iov_write(dev, blob, channel); 6118 test_iov_read(dev, blob, channel); 6119 6120 /* Create snapshot */ 6121 6122 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6123 poll_threads(); 6124 CU_ASSERT(g_bserrno == 0); 6125 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6126 blobid = g_blobid; 6127 6128 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6129 poll_threads(); 6130 CU_ASSERT(g_bserrno == 0); 6131 CU_ASSERT(g_blob != NULL); 6132 snapshot = g_blob; 6133 6134 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6135 poll_threads(); 6136 CU_ASSERT(g_bserrno == 0); 6137 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6138 blobid = g_blobid; 6139 6140 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6141 poll_threads(); 6142 CU_ASSERT(g_bserrno == 0); 6143 CU_ASSERT(g_blob != NULL); 6144 clone = g_blob; 6145 6146 test_io_read(dev, blob, channel); 6147 test_io_read(dev, snapshot, channel); 6148 test_io_read(dev, clone, channel); 6149 6150 test_iov_read(dev, blob, channel); 6151 test_iov_read(dev, snapshot, channel); 6152 test_iov_read(dev, clone, channel); 6153 6154 /* Inflate clone */ 6155 6156 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 6157 poll_threads(); 6158 6159 CU_ASSERT(g_bserrno == 0); 6160 6161 test_io_read(dev, clone, channel); 6162 6163 test_io_unmap(dev, clone, channel); 6164 6165 test_iov_write(dev, clone, channel); 6166 test_iov_read(dev, clone, channel); 6167 6168 spdk_blob_close(blob, blob_op_complete, NULL); 6169 spdk_blob_close(snapshot, blob_op_complete, NULL); 6170 spdk_blob_close(clone, blob_op_complete, NULL); 6171 poll_threads(); 6172 CU_ASSERT(g_bserrno == 0); 6173 blob = NULL; 6174 g_blob = NULL; 6175 6176 spdk_bs_free_io_channel(channel); 6177 poll_threads(); 6178 6179 /* Unload the blob store */ 6180 spdk_bs_unload(bs, bs_op_complete, NULL); 6181 poll_threads(); 6182 CU_ASSERT(g_bserrno == 0); 6183 g_bs = NULL; 6184 g_blob = NULL; 6185 g_blobid = 0; 6186 } 6187 6188 static void 6189 blob_io_unit_compatiblity(void) 6190 { 6191 struct spdk_bs_opts bsopts; 6192 struct spdk_blob_store *bs; 6193 struct spdk_bs_dev *dev; 6194 struct spdk_bs_super_block *super; 6195 6196 /* Create dev with 512 bytes io unit size */ 6197 6198 spdk_bs_opts_init(&bsopts); 6199 bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */ 6200 snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE"); 6201 6202 /* Try to initialize a new blob store with unsupported io_unit */ 6203 dev = init_dev(); 6204 dev->blocklen = 512; 6205 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6206 6207 /* Initialize a new blob store */ 6208 spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL); 6209 poll_threads(); 6210 CU_ASSERT(g_bserrno == 0); 6211 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6212 bs = g_bs; 6213 6214 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512); 6215 6216 /* Unload the blob store */ 6217 spdk_bs_unload(bs, bs_op_complete, NULL); 6218 poll_threads(); 6219 CU_ASSERT(g_bserrno == 0); 6220 6221 /* Modify super block to behave like older version. 6222 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */ 6223 super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 6224 super->io_unit_size = 0; 6225 super->crc = _spdk_blob_md_page_calc_crc(super); 6226 6227 dev = init_dev(); 6228 dev->blocklen = 512; 6229 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6230 6231 spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL); 6232 poll_threads(); 6233 CU_ASSERT(g_bserrno == 0); 6234 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6235 bs = g_bs; 6236 6237 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE); 6238 6239 /* Unload the blob store */ 6240 spdk_bs_unload(bs, bs_op_complete, NULL); 6241 poll_threads(); 6242 CU_ASSERT(g_bserrno == 0); 6243 6244 g_bs = NULL; 6245 g_blob = NULL; 6246 g_blobid = 0; 6247 } 6248 6249 static void 6250 blob_simultaneous_operations(void) 6251 { 6252 struct spdk_blob_store *bs = g_bs; 6253 struct spdk_blob_opts opts; 6254 struct spdk_blob *blob, *snapshot; 6255 spdk_blob_id blobid, snapshotid; 6256 struct spdk_io_channel *channel; 6257 6258 channel = spdk_bs_alloc_io_channel(bs); 6259 SPDK_CU_ASSERT_FATAL(channel != NULL); 6260 6261 ut_spdk_blob_opts_init(&opts); 6262 opts.num_clusters = 10; 6263 6264 blob = ut_blob_create_and_open(bs, &opts); 6265 blobid = spdk_blob_get_id(blob); 6266 6267 /* Create snapshot and try to remove blob in the same time: 6268 * - snapshot should be created successfully 6269 * - delete operation should fail w -EBUSY */ 6270 CU_ASSERT(blob->locked_operation_in_progress == false); 6271 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6272 CU_ASSERT(blob->locked_operation_in_progress == true); 6273 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6274 CU_ASSERT(blob->locked_operation_in_progress == true); 6275 /* Deletion failure */ 6276 CU_ASSERT(g_bserrno == -EBUSY); 6277 poll_threads(); 6278 CU_ASSERT(blob->locked_operation_in_progress == false); 6279 /* Snapshot creation success */ 6280 CU_ASSERT(g_bserrno == 0); 6281 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6282 6283 snapshotid = g_blobid; 6284 6285 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 6286 poll_threads(); 6287 CU_ASSERT(g_bserrno == 0); 6288 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 6289 snapshot = g_blob; 6290 6291 /* Inflate blob and try to remove blob in the same time: 6292 * - blob should be inflated successfully 6293 * - delete operation should fail w -EBUSY */ 6294 CU_ASSERT(blob->locked_operation_in_progress == false); 6295 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 6296 CU_ASSERT(blob->locked_operation_in_progress == true); 6297 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6298 CU_ASSERT(blob->locked_operation_in_progress == true); 6299 /* Deletion failure */ 6300 CU_ASSERT(g_bserrno == -EBUSY); 6301 poll_threads(); 6302 CU_ASSERT(blob->locked_operation_in_progress == false); 6303 /* Inflation success */ 6304 CU_ASSERT(g_bserrno == 0); 6305 6306 /* Clone snapshot and try to remove snapshot in the same time: 6307 * - snapshot should be cloned successfully 6308 * - delete operation should fail w -EBUSY */ 6309 CU_ASSERT(blob->locked_operation_in_progress == false); 6310 spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 6311 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 6312 /* Deletion failure */ 6313 CU_ASSERT(g_bserrno == -EBUSY); 6314 poll_threads(); 6315 CU_ASSERT(blob->locked_operation_in_progress == false); 6316 /* Clone created */ 6317 CU_ASSERT(g_bserrno == 0); 6318 6319 /* Resize blob and try to remove blob in the same time: 6320 * - blob should be resized successfully 6321 * - delete operation should fail w -EBUSY */ 6322 CU_ASSERT(blob->locked_operation_in_progress == false); 6323 spdk_blob_resize(blob, 50, blob_op_complete, NULL); 6324 CU_ASSERT(blob->locked_operation_in_progress == true); 6325 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6326 CU_ASSERT(blob->locked_operation_in_progress == true); 6327 /* Deletion failure */ 6328 CU_ASSERT(g_bserrno == -EBUSY); 6329 poll_threads(); 6330 CU_ASSERT(blob->locked_operation_in_progress == false); 6331 /* Blob resized successfully */ 6332 CU_ASSERT(g_bserrno == 0); 6333 6334 /* Issue two consecutive blob syncs, neither should fail. 6335 * Force sync to actually occur by marking blob dirty each time. 6336 * Execution of sync should not be enough to complete the operation, 6337 * since disk I/O is required to complete it. */ 6338 g_bserrno = -1; 6339 6340 blob->state = SPDK_BLOB_STATE_DIRTY; 6341 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6342 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6343 6344 blob->state = SPDK_BLOB_STATE_DIRTY; 6345 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6346 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6347 6348 uint32_t completions = 0; 6349 while (completions < 2) { 6350 SPDK_CU_ASSERT_FATAL(poll_thread_times(0, 1)); 6351 if (g_bserrno == 0) { 6352 g_bserrno = -1; 6353 completions++; 6354 } 6355 /* Never should the g_bserrno be other than -1. 6356 * It would mean that either of syncs failed. */ 6357 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6358 } 6359 6360 spdk_blob_close(snapshot, blob_op_complete, NULL); 6361 poll_threads(); 6362 CU_ASSERT(g_bserrno == 0); 6363 6364 ut_blob_close_and_delete(bs, blob); 6365 6366 spdk_bs_free_io_channel(channel); 6367 poll_threads(); 6368 } 6369 6370 static void 6371 blob_persist(void) 6372 { 6373 struct spdk_blob_store *bs = g_bs; 6374 struct spdk_blob_opts opts; 6375 struct spdk_blob *blob; 6376 spdk_blob_id blobid; 6377 struct spdk_io_channel *channel; 6378 char *xattr; 6379 size_t xattr_length; 6380 int rc; 6381 uint32_t page_count_clear, page_count_xattr; 6382 uint64_t poller_iterations; 6383 bool run_poller; 6384 6385 channel = spdk_bs_alloc_io_channel(bs); 6386 SPDK_CU_ASSERT_FATAL(channel != NULL); 6387 6388 ut_spdk_blob_opts_init(&opts); 6389 opts.num_clusters = 10; 6390 6391 blob = ut_blob_create_and_open(bs, &opts); 6392 blobid = spdk_blob_get_id(blob); 6393 6394 /* Save the amount of md pages used after creation of a blob. 6395 * This should be consistent after removing xattr. */ 6396 page_count_clear = spdk_bit_array_count_set(bs->used_md_pages); 6397 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear); 6398 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear); 6399 6400 /* Add xattr with maximum length of descriptor to exceed single metadata page. */ 6401 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 6402 strlen("large_xattr"); 6403 xattr = calloc(xattr_length, sizeof(char)); 6404 SPDK_CU_ASSERT_FATAL(xattr != NULL); 6405 6406 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 6407 SPDK_CU_ASSERT_FATAL(rc == 0); 6408 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6409 poll_threads(); 6410 SPDK_CU_ASSERT_FATAL(g_bserrno == 0); 6411 6412 /* Save the amount of md pages used after adding the large xattr */ 6413 page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages); 6414 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr); 6415 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr); 6416 6417 /* Add xattr to a blob and sync it. While sync is occuring, remove the xattr and sync again. 6418 * Interrupt the first sync after increasing number of poller iterations, until it succeeds. 6419 * Expectation is that after second sync completes no xattr is saved in metadata. */ 6420 poller_iterations = 1; 6421 run_poller = true; 6422 while (run_poller) { 6423 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 6424 SPDK_CU_ASSERT_FATAL(rc == 0); 6425 g_bserrno = -1; 6426 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6427 poll_thread_times(0, poller_iterations); 6428 if (g_bserrno == 0) { 6429 /* Poller iteration count was high enough for first sync to complete. 6430 * Verify that blob takes up enough of md_pages to store the xattr. */ 6431 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr); 6432 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr); 6433 SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr); 6434 run_poller = false; 6435 } 6436 rc = spdk_blob_remove_xattr(blob, "large_xattr"); 6437 SPDK_CU_ASSERT_FATAL(rc == 0); 6438 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6439 poll_threads(); 6440 SPDK_CU_ASSERT_FATAL(g_bserrno == 0); 6441 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear); 6442 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear); 6443 SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear); 6444 6445 /* Reload bs and re-open blob to verify that xattr was not persisted. */ 6446 spdk_blob_close(blob, blob_op_complete, NULL); 6447 poll_threads(); 6448 CU_ASSERT(g_bserrno == 0); 6449 6450 ut_bs_reload(&bs, NULL); 6451 6452 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6453 poll_threads(); 6454 CU_ASSERT(g_bserrno == 0); 6455 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 6456 blob = g_blob; 6457 6458 rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length); 6459 SPDK_CU_ASSERT_FATAL(rc == -ENOENT); 6460 6461 poller_iterations++; 6462 /* Stop at high iteration count to prevent infinite loop. 6463 * This value should be enough for first md sync to complete in any case. */ 6464 SPDK_CU_ASSERT_FATAL(poller_iterations < 50); 6465 } 6466 6467 free(xattr); 6468 6469 ut_blob_close_and_delete(bs, blob); 6470 6471 spdk_bs_free_io_channel(channel); 6472 poll_threads(); 6473 } 6474 6475 static void 6476 suite_bs_setup(void) 6477 { 6478 struct spdk_bs_dev *dev; 6479 6480 dev = init_dev(); 6481 memset(g_dev_buffer, 0, DEV_BUFFER_SIZE); 6482 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 6483 poll_threads(); 6484 CU_ASSERT(g_bserrno == 0); 6485 CU_ASSERT(g_bs != NULL); 6486 } 6487 6488 static void 6489 suite_bs_cleanup(void) 6490 { 6491 spdk_bs_unload(g_bs, bs_op_complete, NULL); 6492 poll_threads(); 6493 CU_ASSERT(g_bserrno == 0); 6494 g_bs = NULL; 6495 memset(g_dev_buffer, 0, DEV_BUFFER_SIZE); 6496 } 6497 6498 static struct spdk_blob * 6499 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts) 6500 { 6501 struct spdk_blob *blob; 6502 struct spdk_blob_opts create_blob_opts; 6503 spdk_blob_id blobid; 6504 6505 if (blob_opts == NULL) { 6506 ut_spdk_blob_opts_init(&create_blob_opts); 6507 blob_opts = &create_blob_opts; 6508 } 6509 6510 spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL); 6511 poll_threads(); 6512 CU_ASSERT(g_bserrno == 0); 6513 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6514 blobid = g_blobid; 6515 g_blobid = -1; 6516 6517 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6518 poll_threads(); 6519 CU_ASSERT(g_bserrno == 0); 6520 CU_ASSERT(g_blob != NULL); 6521 blob = g_blob; 6522 6523 g_blob = NULL; 6524 g_bserrno = -1; 6525 6526 return blob; 6527 } 6528 6529 static void 6530 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob) 6531 { 6532 spdk_blob_id blobid = spdk_blob_get_id(blob); 6533 6534 spdk_blob_close(blob, blob_op_complete, NULL); 6535 poll_threads(); 6536 CU_ASSERT(g_bserrno == 0); 6537 g_blob = NULL; 6538 6539 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6540 poll_threads(); 6541 CU_ASSERT(g_bserrno == 0); 6542 g_bserrno = -1; 6543 } 6544 6545 int main(int argc, char **argv) 6546 { 6547 CU_pSuite suite, suite_bs; 6548 unsigned int num_failures; 6549 6550 CU_set_error_action(CUEA_ABORT); 6551 CU_initialize_registry(); 6552 6553 suite = CU_add_suite("blob", NULL, NULL); 6554 suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL, 6555 suite_bs_setup, suite_bs_cleanup); 6556 6557 CU_ADD_TEST(suite, blob_init); 6558 CU_ADD_TEST(suite_bs, blob_open); 6559 CU_ADD_TEST(suite_bs, blob_create); 6560 CU_ADD_TEST(suite_bs, blob_create_internal); 6561 CU_ADD_TEST(suite, blob_thin_provision); 6562 CU_ADD_TEST(suite_bs, blob_snapshot); 6563 CU_ADD_TEST(suite_bs, blob_clone); 6564 CU_ADD_TEST(suite_bs, blob_inflate); 6565 CU_ADD_TEST(suite_bs, blob_delete); 6566 CU_ADD_TEST(suite_bs, blob_resize); 6567 CU_ADD_TEST(suite, blob_read_only); 6568 CU_ADD_TEST(suite_bs, channel_ops); 6569 CU_ADD_TEST(suite_bs, blob_super); 6570 CU_ADD_TEST(suite_bs, blob_write); 6571 CU_ADD_TEST(suite_bs, blob_read); 6572 CU_ADD_TEST(suite_bs, blob_rw_verify); 6573 CU_ADD_TEST(suite_bs, blob_rw_verify_iov); 6574 CU_ADD_TEST(suite_bs, blob_rw_verify_iov_nomem); 6575 CU_ADD_TEST(suite_bs, blob_rw_iov_read_only); 6576 CU_ADD_TEST(suite_bs, blob_unmap); 6577 CU_ADD_TEST(suite_bs, blob_iter); 6578 CU_ADD_TEST(suite_bs, blob_xattr); 6579 CU_ADD_TEST(suite, bs_load); 6580 CU_ADD_TEST(suite_bs, bs_load_pending_removal); 6581 CU_ADD_TEST(suite, bs_load_custom_cluster_size); 6582 CU_ADD_TEST(suite_bs, bs_unload); 6583 CU_ADD_TEST(suite, bs_cluster_sz); 6584 CU_ADD_TEST(suite_bs, bs_usable_clusters); 6585 CU_ADD_TEST(suite, bs_resize_md); 6586 CU_ADD_TEST(suite, bs_destroy); 6587 CU_ADD_TEST(suite, bs_type); 6588 CU_ADD_TEST(suite, bs_super_block); 6589 CU_ADD_TEST(suite, blob_serialize); 6590 CU_ADD_TEST(suite_bs, blob_crc); 6591 CU_ADD_TEST(suite, super_block_crc); 6592 CU_ADD_TEST(suite_bs, blob_dirty_shutdown); 6593 CU_ADD_TEST(suite_bs, blob_flags); 6594 CU_ADD_TEST(suite_bs, bs_version); 6595 CU_ADD_TEST(suite_bs, blob_set_xattrs); 6596 CU_ADD_TEST(suite_bs, blob_thin_prov_alloc); 6597 CU_ADD_TEST(suite_bs, blob_insert_cluster_msg); 6598 CU_ADD_TEST(suite_bs, blob_thin_prov_rw); 6599 CU_ADD_TEST(suite_bs, blob_thin_prov_rle); 6600 CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov); 6601 CU_ADD_TEST(suite, bs_load_iter); 6602 CU_ADD_TEST(suite_bs, blob_snapshot_rw); 6603 CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov); 6604 CU_ADD_TEST(suite, blob_relations); 6605 CU_ADD_TEST(suite, blob_relations2); 6606 CU_ADD_TEST(suite_bs, blob_delete_snapshot_power_failure); 6607 CU_ADD_TEST(suite_bs, blob_create_snapshot_power_failure); 6608 CU_ADD_TEST(suite_bs, blob_inflate_rw); 6609 CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io); 6610 CU_ADD_TEST(suite_bs, blob_operation_split_rw); 6611 CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov); 6612 CU_ADD_TEST(suite, blob_io_unit); 6613 CU_ADD_TEST(suite, blob_io_unit_compatiblity); 6614 CU_ADD_TEST(suite_bs, blob_simultaneous_operations); 6615 CU_ADD_TEST(suite_bs, blob_persist); 6616 6617 allocate_threads(2); 6618 set_thread(0); 6619 6620 g_dev_buffer = calloc(1, DEV_BUFFER_SIZE); 6621 6622 CU_basic_set_mode(CU_BRM_VERBOSE); 6623 g_use_extent_table = false; 6624 CU_basic_run_tests(); 6625 num_failures = CU_get_number_of_failures(); 6626 g_use_extent_table = true; 6627 CU_basic_run_tests(); 6628 num_failures += CU_get_number_of_failures(); 6629 CU_cleanup_registry(); 6630 6631 free(g_dev_buffer); 6632 6633 free_threads(); 6634 6635 return num_failures; 6636 } 6637