1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 #include "spdk/blob.h" 38 #include "spdk/string.h" 39 #include "spdk_internal/thread.h" 40 41 #include "common/lib/ut_multithread.c" 42 #include "../bs_dev_common.c" 43 #include "blob/blobstore.c" 44 #include "blob/request.c" 45 #include "blob/zeroes.c" 46 #include "blob/blob_bs_dev.c" 47 48 struct spdk_blob_store *g_bs; 49 spdk_blob_id g_blobid; 50 struct spdk_blob *g_blob; 51 int g_bserrno; 52 struct spdk_xattr_names *g_names; 53 int g_done; 54 char *g_xattr_names[] = {"first", "second", "third"}; 55 char *g_xattr_values[] = {"one", "two", "three"}; 56 uint64_t g_ctx = 1729; 57 bool g_use_extent_table = false; 58 59 struct spdk_bs_super_block_ver1 { 60 uint8_t signature[8]; 61 uint32_t version; 62 uint32_t length; 63 uint32_t clean; /* If there was a clean shutdown, this is 1. */ 64 spdk_blob_id super_blob; 65 66 uint32_t cluster_size; /* In bytes */ 67 68 uint32_t used_page_mask_start; /* Offset from beginning of disk, in pages */ 69 uint32_t used_page_mask_len; /* Count, in pages */ 70 71 uint32_t used_cluster_mask_start; /* Offset from beginning of disk, in pages */ 72 uint32_t used_cluster_mask_len; /* Count, in pages */ 73 74 uint32_t md_start; /* Offset from beginning of disk, in pages */ 75 uint32_t md_len; /* Count, in pages */ 76 77 uint8_t reserved[4036]; 78 uint32_t crc; 79 } __attribute__((packed)); 80 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size"); 81 82 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs, 83 struct spdk_blob_opts *blob_opts); 84 85 static void 86 _get_xattr_value(void *arg, const char *name, 87 const void **value, size_t *value_len) 88 { 89 uint64_t i; 90 91 SPDK_CU_ASSERT_FATAL(value_len != NULL); 92 SPDK_CU_ASSERT_FATAL(value != NULL); 93 CU_ASSERT(arg == &g_ctx); 94 95 for (i = 0; i < sizeof(g_xattr_names); i++) { 96 if (!strcmp(name, g_xattr_names[i])) { 97 *value_len = strlen(g_xattr_values[i]); 98 *value = g_xattr_values[i]; 99 break; 100 } 101 } 102 } 103 104 static void 105 _get_xattr_value_null(void *arg, const char *name, 106 const void **value, size_t *value_len) 107 { 108 SPDK_CU_ASSERT_FATAL(value_len != NULL); 109 SPDK_CU_ASSERT_FATAL(value != NULL); 110 CU_ASSERT(arg == NULL); 111 112 *value_len = 0; 113 *value = NULL; 114 } 115 116 static int 117 _get_snapshots_count(struct spdk_blob_store *bs) 118 { 119 struct spdk_blob_list *snapshot = NULL; 120 int count = 0; 121 122 TAILQ_FOREACH(snapshot, &bs->snapshots, link) { 123 count += 1; 124 } 125 126 return count; 127 } 128 129 static void 130 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts) 131 { 132 spdk_blob_opts_init(opts); 133 opts->use_extent_table = g_use_extent_table; 134 } 135 136 static void 137 bs_op_complete(void *cb_arg, int bserrno) 138 { 139 g_bserrno = bserrno; 140 } 141 142 static void 143 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs, 144 int bserrno) 145 { 146 g_bs = bs; 147 g_bserrno = bserrno; 148 } 149 150 static void 151 blob_op_complete(void *cb_arg, int bserrno) 152 { 153 g_bserrno = bserrno; 154 } 155 156 static void 157 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno) 158 { 159 g_blobid = blobid; 160 g_bserrno = bserrno; 161 } 162 163 static void 164 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno) 165 { 166 g_blob = blb; 167 g_bserrno = bserrno; 168 } 169 170 static void 171 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts) 172 { 173 struct spdk_bs_dev *dev; 174 175 /* Unload the blob store */ 176 spdk_bs_unload(*bs, bs_op_complete, NULL); 177 poll_threads(); 178 CU_ASSERT(g_bserrno == 0); 179 180 dev = init_dev(); 181 /* Load an existing blob store */ 182 spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL); 183 poll_threads(); 184 CU_ASSERT(g_bserrno == 0); 185 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 186 *bs = g_bs; 187 188 g_bserrno = -1; 189 } 190 191 static void 192 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts) 193 { 194 struct spdk_bs_dev *dev; 195 196 /* Dirty shutdown */ 197 _spdk_bs_free(*bs); 198 199 dev = init_dev(); 200 /* Load an existing blob store */ 201 spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL); 202 poll_threads(); 203 CU_ASSERT(g_bserrno == 0); 204 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 205 *bs = g_bs; 206 207 g_bserrno = -1; 208 } 209 210 static void 211 blob_init(void) 212 { 213 struct spdk_blob_store *bs; 214 struct spdk_bs_dev *dev; 215 216 dev = init_dev(); 217 218 /* should fail for an unsupported blocklen */ 219 dev->blocklen = 500; 220 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 221 poll_threads(); 222 CU_ASSERT(g_bserrno == -EINVAL); 223 224 dev = init_dev(); 225 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 226 poll_threads(); 227 CU_ASSERT(g_bserrno == 0); 228 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 229 bs = g_bs; 230 231 spdk_bs_unload(bs, bs_op_complete, NULL); 232 poll_threads(); 233 CU_ASSERT(g_bserrno == 0); 234 g_bs = NULL; 235 } 236 237 static void 238 blob_super(void) 239 { 240 struct spdk_blob_store *bs = g_bs; 241 spdk_blob_id blobid; 242 struct spdk_blob_opts blob_opts; 243 244 /* Get the super blob without having set one */ 245 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 246 poll_threads(); 247 CU_ASSERT(g_bserrno == -ENOENT); 248 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 249 250 /* Create a blob */ 251 ut_spdk_blob_opts_init(&blob_opts); 252 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 253 poll_threads(); 254 CU_ASSERT(g_bserrno == 0); 255 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 256 blobid = g_blobid; 257 258 /* Set the blob as the super blob */ 259 spdk_bs_set_super(bs, blobid, blob_op_complete, NULL); 260 poll_threads(); 261 CU_ASSERT(g_bserrno == 0); 262 263 /* Get the super blob */ 264 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 265 poll_threads(); 266 CU_ASSERT(g_bserrno == 0); 267 CU_ASSERT(blobid == g_blobid); 268 } 269 270 static void 271 blob_open(void) 272 { 273 struct spdk_blob_store *bs = g_bs; 274 struct spdk_blob *blob; 275 struct spdk_blob_opts blob_opts; 276 spdk_blob_id blobid, blobid2; 277 278 ut_spdk_blob_opts_init(&blob_opts); 279 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 280 poll_threads(); 281 CU_ASSERT(g_bserrno == 0); 282 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 283 blobid = g_blobid; 284 285 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 286 poll_threads(); 287 CU_ASSERT(g_bserrno == 0); 288 CU_ASSERT(g_blob != NULL); 289 blob = g_blob; 290 291 blobid2 = spdk_blob_get_id(blob); 292 CU_ASSERT(blobid == blobid2); 293 294 /* Try to open file again. It should return success. */ 295 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 296 poll_threads(); 297 CU_ASSERT(g_bserrno == 0); 298 CU_ASSERT(blob == g_blob); 299 300 spdk_blob_close(blob, blob_op_complete, NULL); 301 poll_threads(); 302 CU_ASSERT(g_bserrno == 0); 303 304 /* 305 * Close the file a second time, releasing the second reference. This 306 * should succeed. 307 */ 308 blob = g_blob; 309 spdk_blob_close(blob, blob_op_complete, NULL); 310 poll_threads(); 311 CU_ASSERT(g_bserrno == 0); 312 313 /* 314 * Try to open file again. It should succeed. This tests the case 315 * where the file is opened, closed, then re-opened again. 316 */ 317 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 318 poll_threads(); 319 CU_ASSERT(g_bserrno == 0); 320 CU_ASSERT(g_blob != NULL); 321 blob = g_blob; 322 323 spdk_blob_close(blob, blob_op_complete, NULL); 324 poll_threads(); 325 CU_ASSERT(g_bserrno == 0); 326 } 327 328 static void 329 blob_create(void) 330 { 331 struct spdk_blob_store *bs = g_bs; 332 struct spdk_blob *blob; 333 struct spdk_blob_opts opts; 334 spdk_blob_id blobid; 335 336 /* Create blob with 10 clusters */ 337 338 ut_spdk_blob_opts_init(&opts); 339 opts.num_clusters = 10; 340 341 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 342 poll_threads(); 343 CU_ASSERT(g_bserrno == 0); 344 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 345 blobid = g_blobid; 346 347 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 348 poll_threads(); 349 CU_ASSERT(g_bserrno == 0); 350 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 351 blob = g_blob; 352 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 353 354 spdk_blob_close(blob, blob_op_complete, NULL); 355 poll_threads(); 356 CU_ASSERT(g_bserrno == 0); 357 358 /* Create blob with 0 clusters */ 359 360 ut_spdk_blob_opts_init(&opts); 361 opts.num_clusters = 0; 362 363 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 364 poll_threads(); 365 CU_ASSERT(g_bserrno == 0); 366 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 367 blobid = g_blobid; 368 369 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 370 poll_threads(); 371 CU_ASSERT(g_bserrno == 0); 372 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 373 blob = g_blob; 374 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 375 376 spdk_blob_close(blob, blob_op_complete, NULL); 377 poll_threads(); 378 CU_ASSERT(g_bserrno == 0); 379 380 /* Create blob with default options (opts == NULL) */ 381 382 spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL); 383 poll_threads(); 384 CU_ASSERT(g_bserrno == 0); 385 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 386 blobid = g_blobid; 387 388 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 389 poll_threads(); 390 CU_ASSERT(g_bserrno == 0); 391 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 392 blob = g_blob; 393 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 394 395 spdk_blob_close(blob, blob_op_complete, NULL); 396 poll_threads(); 397 CU_ASSERT(g_bserrno == 0); 398 399 /* Try to create blob with size larger than blobstore */ 400 401 ut_spdk_blob_opts_init(&opts); 402 opts.num_clusters = bs->total_clusters + 1; 403 404 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 405 poll_threads(); 406 CU_ASSERT(g_bserrno == -ENOSPC); 407 } 408 409 static void 410 blob_create_internal(void) 411 { 412 struct spdk_blob_store *bs = g_bs; 413 struct spdk_blob *blob; 414 struct spdk_blob_opts opts; 415 struct spdk_blob_xattr_opts internal_xattrs; 416 const void *value; 417 size_t value_len; 418 spdk_blob_id blobid; 419 int rc; 420 421 /* Create blob with custom xattrs */ 422 423 ut_spdk_blob_opts_init(&opts); 424 _spdk_blob_xattrs_init(&internal_xattrs); 425 internal_xattrs.count = 3; 426 internal_xattrs.names = g_xattr_names; 427 internal_xattrs.get_value = _get_xattr_value; 428 internal_xattrs.ctx = &g_ctx; 429 430 _spdk_bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL); 431 poll_threads(); 432 CU_ASSERT(g_bserrno == 0); 433 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 434 blobid = g_blobid; 435 436 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 437 poll_threads(); 438 CU_ASSERT(g_bserrno == 0); 439 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 440 blob = g_blob; 441 442 rc = _spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true); 443 CU_ASSERT(rc == 0); 444 SPDK_CU_ASSERT_FATAL(value != NULL); 445 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 446 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 447 448 rc = _spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true); 449 CU_ASSERT(rc == 0); 450 SPDK_CU_ASSERT_FATAL(value != NULL); 451 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 452 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 453 454 rc = _spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true); 455 CU_ASSERT(rc == 0); 456 SPDK_CU_ASSERT_FATAL(value != NULL); 457 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 458 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 459 460 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len); 461 CU_ASSERT(rc != 0); 462 463 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len); 464 CU_ASSERT(rc != 0); 465 466 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len); 467 CU_ASSERT(rc != 0); 468 469 spdk_blob_close(blob, blob_op_complete, NULL); 470 poll_threads(); 471 CU_ASSERT(g_bserrno == 0); 472 473 /* Create blob with NULL internal options */ 474 475 _spdk_bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL); 476 poll_threads(); 477 CU_ASSERT(g_bserrno == 0); 478 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 479 blobid = g_blobid; 480 481 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 482 poll_threads(); 483 CU_ASSERT(g_bserrno == 0); 484 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 485 CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL); 486 487 blob = g_blob; 488 489 spdk_blob_close(blob, blob_op_complete, NULL); 490 poll_threads(); 491 CU_ASSERT(g_bserrno == 0); 492 } 493 494 static void 495 blob_thin_provision(void) 496 { 497 struct spdk_blob_store *bs; 498 struct spdk_bs_dev *dev; 499 struct spdk_blob *blob; 500 struct spdk_blob_opts opts; 501 struct spdk_bs_opts bs_opts; 502 spdk_blob_id blobid; 503 504 dev = init_dev(); 505 spdk_bs_opts_init(&bs_opts); 506 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 507 508 /* Initialize a new blob store */ 509 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 510 poll_threads(); 511 CU_ASSERT(g_bserrno == 0); 512 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 513 514 bs = g_bs; 515 516 /* Create blob with thin provisioning enabled */ 517 518 ut_spdk_blob_opts_init(&opts); 519 opts.thin_provision = true; 520 opts.num_clusters = 10; 521 522 blob = ut_blob_create_and_open(bs, &opts); 523 blobid = spdk_blob_get_id(blob); 524 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 525 526 spdk_blob_close(blob, blob_op_complete, NULL); 527 CU_ASSERT(g_bserrno == 0); 528 529 /* Do not shut down cleanly. This makes sure that when we load again 530 * and try to recover a valid used_cluster map, that blobstore will 531 * ignore clusters with index 0 since these are unallocated clusters. 532 */ 533 ut_bs_dirty_load(&bs, &bs_opts); 534 535 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 536 poll_threads(); 537 CU_ASSERT(g_bserrno == 0); 538 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 539 blob = g_blob; 540 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 541 542 spdk_blob_close(blob, blob_op_complete, NULL); 543 poll_threads(); 544 CU_ASSERT(g_bserrno == 0); 545 546 spdk_bs_unload(bs, bs_op_complete, NULL); 547 poll_threads(); 548 CU_ASSERT(g_bserrno == 0); 549 g_bs = NULL; 550 } 551 552 static void 553 blob_snapshot(void) 554 { 555 struct spdk_blob_store *bs = g_bs; 556 struct spdk_blob *blob; 557 struct spdk_blob *snapshot, *snapshot2; 558 struct spdk_blob_bs_dev *blob_bs_dev; 559 struct spdk_blob_opts opts; 560 struct spdk_blob_xattr_opts xattrs; 561 spdk_blob_id blobid; 562 spdk_blob_id snapshotid; 563 spdk_blob_id snapshotid2; 564 const void *value; 565 size_t value_len; 566 int rc; 567 spdk_blob_id ids[2]; 568 size_t count; 569 570 /* Create blob with 10 clusters */ 571 ut_spdk_blob_opts_init(&opts); 572 opts.num_clusters = 10; 573 574 blob = ut_blob_create_and_open(bs, &opts); 575 blobid = spdk_blob_get_id(blob); 576 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 577 578 /* Create snapshot from blob */ 579 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); 580 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 581 poll_threads(); 582 CU_ASSERT(g_bserrno == 0); 583 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 584 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 585 snapshotid = g_blobid; 586 587 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 588 poll_threads(); 589 CU_ASSERT(g_bserrno == 0); 590 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 591 snapshot = g_blob; 592 CU_ASSERT(snapshot->data_ro == true); 593 CU_ASSERT(snapshot->md_ro == true); 594 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 595 596 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 597 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 598 CU_ASSERT(spdk_mem_all_zero(blob->active.clusters, 599 blob->active.num_clusters * sizeof(blob->active.clusters[0]))); 600 601 /* Try to create snapshot from clone with xattrs */ 602 xattrs.names = g_xattr_names; 603 xattrs.get_value = _get_xattr_value; 604 xattrs.count = 3; 605 xattrs.ctx = &g_ctx; 606 spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL); 607 poll_threads(); 608 CU_ASSERT(g_bserrno == 0); 609 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 610 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); 611 snapshotid2 = g_blobid; 612 613 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 614 CU_ASSERT(g_bserrno == 0); 615 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 616 snapshot2 = g_blob; 617 CU_ASSERT(snapshot2->data_ro == true); 618 CU_ASSERT(snapshot2->md_ro == true); 619 CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10); 620 621 /* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */ 622 CU_ASSERT(snapshot->back_bs_dev == NULL); 623 SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL); 624 SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL); 625 626 blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 627 CU_ASSERT(blob_bs_dev->blob == snapshot2); 628 629 blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev; 630 CU_ASSERT(blob_bs_dev->blob == snapshot); 631 632 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len); 633 CU_ASSERT(rc == 0); 634 SPDK_CU_ASSERT_FATAL(value != NULL); 635 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 636 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 637 638 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len); 639 CU_ASSERT(rc == 0); 640 SPDK_CU_ASSERT_FATAL(value != NULL); 641 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 642 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 643 644 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len); 645 CU_ASSERT(rc == 0); 646 SPDK_CU_ASSERT_FATAL(value != NULL); 647 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 648 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 649 650 /* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */ 651 count = 2; 652 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 653 CU_ASSERT(count == 1); 654 CU_ASSERT(ids[0] == blobid); 655 656 count = 2; 657 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 658 CU_ASSERT(count == 1); 659 CU_ASSERT(ids[0] == snapshotid2); 660 661 /* Try to create snapshot from snapshot */ 662 spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 663 poll_threads(); 664 CU_ASSERT(g_bserrno == -EINVAL); 665 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 666 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); 667 668 /* Delete blob and confirm that it is no longer on snapshot2 clone list */ 669 spdk_blob_close(blob, blob_op_complete, NULL); 670 poll_threads(); 671 CU_ASSERT(g_bserrno == 0); 672 673 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 674 poll_threads(); 675 CU_ASSERT(g_bserrno == 0); 676 count = 2; 677 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 678 CU_ASSERT(count == 0); 679 680 /* Delete snapshot2 and confirm that it is no longer on snapshot clone list */ 681 spdk_blob_close(snapshot2, blob_op_complete, NULL); 682 poll_threads(); 683 CU_ASSERT(g_bserrno == 0); 684 685 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 686 poll_threads(); 687 CU_ASSERT(g_bserrno == 0); 688 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 689 count = 2; 690 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 691 CU_ASSERT(count == 0); 692 693 spdk_blob_close(snapshot, blob_op_complete, NULL); 694 poll_threads(); 695 CU_ASSERT(g_bserrno == 0); 696 697 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 698 poll_threads(); 699 CU_ASSERT(g_bserrno == 0); 700 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); 701 } 702 703 static void 704 blob_snapshot_freeze_io(void) 705 { 706 struct spdk_io_channel *channel; 707 struct spdk_bs_channel *bs_channel; 708 struct spdk_blob_store *bs = g_bs; 709 struct spdk_blob *blob; 710 struct spdk_blob_opts opts; 711 spdk_blob_id blobid; 712 uint32_t num_of_pages = 10; 713 uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE]; 714 uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE]; 715 uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE]; 716 717 memset(payload_write, 0xE5, sizeof(payload_write)); 718 memset(payload_read, 0x00, sizeof(payload_read)); 719 memset(payload_zero, 0x00, sizeof(payload_zero)); 720 721 /* Test freeze I/O during snapshot */ 722 channel = spdk_bs_alloc_io_channel(bs); 723 bs_channel = spdk_io_channel_get_ctx(channel); 724 725 /* Create blob with 10 clusters */ 726 ut_spdk_blob_opts_init(&opts); 727 opts.num_clusters = 10; 728 opts.thin_provision = false; 729 730 blob = ut_blob_create_and_open(bs, &opts); 731 blobid = spdk_blob_get_id(blob); 732 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 733 734 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 735 736 /* This is implementation specific. 737 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback. 738 * Four async I/O operations happen before that. */ 739 poll_thread_times(0, 3); 740 741 CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io)); 742 743 /* Blob I/O should be frozen here */ 744 CU_ASSERT(blob->frozen_refcnt == 1); 745 746 /* Write to the blob */ 747 spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL); 748 749 /* Verify that I/O is queued */ 750 CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io)); 751 /* Verify that payload is not written to disk */ 752 CU_ASSERT(memcmp(payload_zero, &g_dev_buffer[blob->active.clusters[0]*SPDK_BS_PAGE_SIZE], 753 SPDK_BS_PAGE_SIZE) == 0); 754 755 /* Finish all operations including spdk_bs_create_snapshot */ 756 poll_threads(); 757 758 /* Verify snapshot */ 759 CU_ASSERT(g_bserrno == 0); 760 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 761 762 /* Verify that blob has unset frozen_io */ 763 CU_ASSERT(blob->frozen_refcnt == 0); 764 765 /* Verify that postponed I/O completed successfully by comparing payload */ 766 spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL); 767 poll_threads(); 768 CU_ASSERT(g_bserrno == 0); 769 CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0); 770 771 spdk_blob_close(blob, blob_op_complete, NULL); 772 poll_threads(); 773 CU_ASSERT(g_bserrno == 0); 774 775 spdk_bs_free_io_channel(channel); 776 poll_threads(); 777 } 778 779 static void 780 blob_clone(void) 781 { 782 struct spdk_blob_store *bs = g_bs; 783 struct spdk_blob_opts opts; 784 struct spdk_blob *blob, *snapshot, *clone; 785 spdk_blob_id blobid, cloneid, snapshotid; 786 struct spdk_blob_xattr_opts xattrs; 787 const void *value; 788 size_t value_len; 789 int rc; 790 791 /* Create blob with 10 clusters */ 792 793 ut_spdk_blob_opts_init(&opts); 794 opts.num_clusters = 10; 795 796 blob = ut_blob_create_and_open(bs, &opts); 797 blobid = spdk_blob_get_id(blob); 798 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 799 800 /* Create snapshot */ 801 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 802 poll_threads(); 803 CU_ASSERT(g_bserrno == 0); 804 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 805 snapshotid = g_blobid; 806 807 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 808 poll_threads(); 809 CU_ASSERT(g_bserrno == 0); 810 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 811 snapshot = g_blob; 812 CU_ASSERT(snapshot->data_ro == true); 813 CU_ASSERT(snapshot->md_ro == true); 814 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 815 816 spdk_blob_close(snapshot, blob_op_complete, NULL); 817 poll_threads(); 818 CU_ASSERT(g_bserrno == 0); 819 820 /* Create clone from snapshot with xattrs */ 821 xattrs.names = g_xattr_names; 822 xattrs.get_value = _get_xattr_value; 823 xattrs.count = 3; 824 xattrs.ctx = &g_ctx; 825 826 spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL); 827 poll_threads(); 828 CU_ASSERT(g_bserrno == 0); 829 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 830 cloneid = g_blobid; 831 832 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 833 poll_threads(); 834 CU_ASSERT(g_bserrno == 0); 835 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 836 clone = g_blob; 837 CU_ASSERT(clone->data_ro == false); 838 CU_ASSERT(clone->md_ro == false); 839 CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10); 840 841 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len); 842 CU_ASSERT(rc == 0); 843 SPDK_CU_ASSERT_FATAL(value != NULL); 844 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 845 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 846 847 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len); 848 CU_ASSERT(rc == 0); 849 SPDK_CU_ASSERT_FATAL(value != NULL); 850 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 851 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 852 853 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len); 854 CU_ASSERT(rc == 0); 855 SPDK_CU_ASSERT_FATAL(value != NULL); 856 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 857 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 858 859 860 spdk_blob_close(clone, blob_op_complete, NULL); 861 poll_threads(); 862 CU_ASSERT(g_bserrno == 0); 863 864 /* Try to create clone from not read only blob */ 865 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 866 poll_threads(); 867 CU_ASSERT(g_bserrno == -EINVAL); 868 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 869 870 /* Mark blob as read only */ 871 spdk_blob_set_read_only(blob); 872 spdk_blob_sync_md(blob, blob_op_complete, NULL); 873 poll_threads(); 874 CU_ASSERT(g_bserrno == 0); 875 876 /* Create clone from read only blob */ 877 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 878 poll_threads(); 879 CU_ASSERT(g_bserrno == 0); 880 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 881 cloneid = g_blobid; 882 883 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 884 poll_threads(); 885 CU_ASSERT(g_bserrno == 0); 886 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 887 clone = g_blob; 888 CU_ASSERT(clone->data_ro == false); 889 CU_ASSERT(clone->md_ro == false); 890 CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10); 891 892 spdk_blob_close(clone, blob_op_complete, NULL); 893 poll_threads(); 894 CU_ASSERT(g_bserrno == 0); 895 896 spdk_blob_close(blob, blob_op_complete, NULL); 897 poll_threads(); 898 CU_ASSERT(g_bserrno == 0); 899 } 900 901 static void 902 _blob_inflate(bool decouple_parent) 903 { 904 struct spdk_blob_store *bs = g_bs; 905 struct spdk_blob_opts opts; 906 struct spdk_blob *blob, *snapshot; 907 spdk_blob_id blobid, snapshotid; 908 struct spdk_io_channel *channel; 909 uint64_t free_clusters; 910 911 channel = spdk_bs_alloc_io_channel(bs); 912 SPDK_CU_ASSERT_FATAL(channel != NULL); 913 914 /* Create blob with 10 clusters */ 915 916 ut_spdk_blob_opts_init(&opts); 917 opts.num_clusters = 10; 918 opts.thin_provision = true; 919 920 blob = ut_blob_create_and_open(bs, &opts); 921 blobid = spdk_blob_get_id(blob); 922 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 923 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true); 924 925 /* 1) Blob with no parent */ 926 if (decouple_parent) { 927 /* Decouple parent of blob with no parent (should fail) */ 928 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 929 poll_threads(); 930 CU_ASSERT(g_bserrno != 0); 931 } else { 932 /* Inflate of thin blob with no parent should made it thick */ 933 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 934 poll_threads(); 935 CU_ASSERT(g_bserrno == 0); 936 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false); 937 } 938 939 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 940 poll_threads(); 941 CU_ASSERT(g_bserrno == 0); 942 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 943 snapshotid = g_blobid; 944 945 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true); 946 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 947 948 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 949 poll_threads(); 950 CU_ASSERT(g_bserrno == 0); 951 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 952 snapshot = g_blob; 953 CU_ASSERT(snapshot->data_ro == true); 954 CU_ASSERT(snapshot->md_ro == true); 955 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 956 957 spdk_blob_close(snapshot, blob_op_complete, NULL); 958 poll_threads(); 959 CU_ASSERT(g_bserrno == 0); 960 961 free_clusters = spdk_bs_free_cluster_count(bs); 962 963 /* 2) Blob with parent */ 964 if (!decouple_parent) { 965 /* Do full blob inflation */ 966 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 967 poll_threads(); 968 CU_ASSERT(g_bserrno == 0); 969 /* all 10 clusters should be allocated */ 970 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10); 971 } else { 972 /* Decouple parent of blob */ 973 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 974 poll_threads(); 975 CU_ASSERT(g_bserrno == 0); 976 /* when only parent is removed, none of the clusters should be allocated */ 977 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters); 978 } 979 980 /* Now, it should be possible to delete snapshot */ 981 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 982 poll_threads(); 983 CU_ASSERT(g_bserrno == 0); 984 985 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 986 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent); 987 988 spdk_blob_close(blob, blob_op_complete, NULL); 989 poll_threads(); 990 CU_ASSERT(g_bserrno == 0); 991 992 spdk_bs_free_io_channel(channel); 993 poll_threads(); 994 } 995 996 static void 997 blob_inflate(void) 998 { 999 _blob_inflate(false); 1000 _blob_inflate(true); 1001 } 1002 1003 static void 1004 blob_delete(void) 1005 { 1006 struct spdk_blob_store *bs = g_bs; 1007 struct spdk_blob_opts blob_opts; 1008 spdk_blob_id blobid; 1009 1010 /* Create a blob and then delete it. */ 1011 ut_spdk_blob_opts_init(&blob_opts); 1012 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 1013 poll_threads(); 1014 CU_ASSERT(g_bserrno == 0); 1015 CU_ASSERT(g_blobid > 0); 1016 blobid = g_blobid; 1017 1018 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 1019 poll_threads(); 1020 CU_ASSERT(g_bserrno == 0); 1021 1022 /* Try to open the blob */ 1023 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1024 poll_threads(); 1025 CU_ASSERT(g_bserrno == -ENOENT); 1026 } 1027 1028 static void 1029 blob_resize(void) 1030 { 1031 struct spdk_blob_store *bs = g_bs; 1032 struct spdk_blob *blob; 1033 spdk_blob_id blobid; 1034 uint64_t free_clusters; 1035 1036 free_clusters = spdk_bs_free_cluster_count(bs); 1037 1038 blob = ut_blob_create_and_open(bs, NULL); 1039 blobid = spdk_blob_get_id(blob); 1040 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 1041 1042 /* Confirm that resize fails if blob is marked read-only. */ 1043 blob->md_ro = true; 1044 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1045 poll_threads(); 1046 CU_ASSERT(g_bserrno == -EPERM); 1047 blob->md_ro = false; 1048 1049 /* The blob started at 0 clusters. Resize it to be 5. */ 1050 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1051 poll_threads(); 1052 CU_ASSERT(g_bserrno == 0); 1053 CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs)); 1054 1055 /* Shrink the blob to 3 clusters. This will not actually release 1056 * the old clusters until the blob is synced. 1057 */ 1058 spdk_blob_resize(blob, 3, blob_op_complete, NULL); 1059 poll_threads(); 1060 CU_ASSERT(g_bserrno == 0); 1061 /* Verify there are still 5 clusters in use */ 1062 CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs)); 1063 1064 spdk_blob_sync_md(blob, blob_op_complete, NULL); 1065 poll_threads(); 1066 CU_ASSERT(g_bserrno == 0); 1067 /* Now there are only 3 clusters in use */ 1068 CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs)); 1069 1070 /* Resize the blob to be 10 clusters. Growth takes effect immediately. */ 1071 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 1072 poll_threads(); 1073 CU_ASSERT(g_bserrno == 0); 1074 CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs)); 1075 1076 /* Try to resize the blob to size larger than blobstore. */ 1077 spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL); 1078 poll_threads(); 1079 CU_ASSERT(g_bserrno == -ENOSPC); 1080 1081 spdk_blob_close(blob, blob_op_complete, NULL); 1082 poll_threads(); 1083 CU_ASSERT(g_bserrno == 0); 1084 1085 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 1086 poll_threads(); 1087 CU_ASSERT(g_bserrno == 0); 1088 } 1089 1090 static void 1091 blob_read_only(void) 1092 { 1093 struct spdk_blob_store *bs; 1094 struct spdk_bs_dev *dev; 1095 struct spdk_blob *blob; 1096 struct spdk_bs_opts opts; 1097 spdk_blob_id blobid; 1098 int rc; 1099 1100 dev = init_dev(); 1101 spdk_bs_opts_init(&opts); 1102 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 1103 1104 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 1105 poll_threads(); 1106 CU_ASSERT(g_bserrno == 0); 1107 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 1108 bs = g_bs; 1109 1110 blob = ut_blob_create_and_open(bs, NULL); 1111 blobid = spdk_blob_get_id(blob); 1112 1113 rc = spdk_blob_set_read_only(blob); 1114 CU_ASSERT(rc == 0); 1115 1116 CU_ASSERT(blob->data_ro == false); 1117 CU_ASSERT(blob->md_ro == false); 1118 1119 spdk_blob_sync_md(blob, bs_op_complete, NULL); 1120 poll_threads(); 1121 1122 CU_ASSERT(blob->data_ro == true); 1123 CU_ASSERT(blob->md_ro == true); 1124 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1125 1126 spdk_blob_close(blob, blob_op_complete, NULL); 1127 poll_threads(); 1128 CU_ASSERT(g_bserrno == 0); 1129 1130 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1131 poll_threads(); 1132 CU_ASSERT(g_bserrno == 0); 1133 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1134 blob = g_blob; 1135 1136 CU_ASSERT(blob->data_ro == true); 1137 CU_ASSERT(blob->md_ro == true); 1138 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1139 1140 spdk_blob_close(blob, blob_op_complete, NULL); 1141 poll_threads(); 1142 CU_ASSERT(g_bserrno == 0); 1143 1144 ut_bs_reload(&bs, &opts); 1145 1146 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1147 poll_threads(); 1148 CU_ASSERT(g_bserrno == 0); 1149 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1150 blob = g_blob; 1151 1152 CU_ASSERT(blob->data_ro == true); 1153 CU_ASSERT(blob->md_ro == true); 1154 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1155 1156 spdk_blob_close(blob, blob_op_complete, NULL); 1157 poll_threads(); 1158 CU_ASSERT(g_bserrno == 0); 1159 1160 spdk_bs_unload(bs, bs_op_complete, NULL); 1161 poll_threads(); 1162 CU_ASSERT(g_bserrno == 0); 1163 1164 } 1165 1166 static void 1167 channel_ops(void) 1168 { 1169 struct spdk_blob_store *bs = g_bs; 1170 struct spdk_io_channel *channel; 1171 1172 channel = spdk_bs_alloc_io_channel(bs); 1173 CU_ASSERT(channel != NULL); 1174 1175 spdk_bs_free_io_channel(channel); 1176 poll_threads(); 1177 } 1178 1179 static void 1180 blob_write(void) 1181 { 1182 struct spdk_blob_store *bs = g_bs; 1183 struct spdk_blob *blob; 1184 struct spdk_io_channel *channel; 1185 uint64_t pages_per_cluster; 1186 uint8_t payload[10 * 4096]; 1187 1188 blob = ut_blob_create_and_open(bs, NULL); 1189 1190 pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs); 1191 1192 channel = spdk_bs_alloc_io_channel(bs); 1193 CU_ASSERT(channel != NULL); 1194 1195 /* Write to a blob with 0 size */ 1196 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1197 poll_threads(); 1198 CU_ASSERT(g_bserrno == -EINVAL); 1199 1200 /* Resize the blob */ 1201 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1202 poll_threads(); 1203 CU_ASSERT(g_bserrno == 0); 1204 1205 /* Confirm that write fails if blob is marked read-only. */ 1206 blob->data_ro = true; 1207 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1208 poll_threads(); 1209 CU_ASSERT(g_bserrno == -EPERM); 1210 blob->data_ro = false; 1211 1212 /* Write to the blob */ 1213 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1214 poll_threads(); 1215 CU_ASSERT(g_bserrno == 0); 1216 1217 /* Write starting beyond the end */ 1218 spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete, 1219 NULL); 1220 poll_threads(); 1221 CU_ASSERT(g_bserrno == -EINVAL); 1222 1223 /* Write starting at a valid location but going off the end */ 1224 spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1, 1225 blob_op_complete, NULL); 1226 poll_threads(); 1227 CU_ASSERT(g_bserrno == -EINVAL); 1228 1229 spdk_blob_close(blob, blob_op_complete, NULL); 1230 poll_threads(); 1231 CU_ASSERT(g_bserrno == 0); 1232 1233 spdk_bs_free_io_channel(channel); 1234 poll_threads(); 1235 } 1236 1237 static void 1238 blob_read(void) 1239 { 1240 struct spdk_blob_store *bs = g_bs; 1241 struct spdk_blob *blob; 1242 struct spdk_io_channel *channel; 1243 uint64_t pages_per_cluster; 1244 uint8_t payload[10 * 4096]; 1245 1246 pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs); 1247 1248 channel = spdk_bs_alloc_io_channel(bs); 1249 CU_ASSERT(channel != NULL); 1250 1251 blob = ut_blob_create_and_open(bs, NULL); 1252 1253 /* Read from a blob with 0 size */ 1254 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1255 poll_threads(); 1256 CU_ASSERT(g_bserrno == -EINVAL); 1257 1258 /* Resize the blob */ 1259 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1260 poll_threads(); 1261 CU_ASSERT(g_bserrno == 0); 1262 1263 /* Confirm that read passes if blob is marked read-only. */ 1264 blob->data_ro = true; 1265 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1266 poll_threads(); 1267 CU_ASSERT(g_bserrno == 0); 1268 blob->data_ro = false; 1269 1270 /* Read from the blob */ 1271 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1272 poll_threads(); 1273 CU_ASSERT(g_bserrno == 0); 1274 1275 /* Read starting beyond the end */ 1276 spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete, 1277 NULL); 1278 poll_threads(); 1279 CU_ASSERT(g_bserrno == -EINVAL); 1280 1281 /* Read starting at a valid location but going off the end */ 1282 spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1, 1283 blob_op_complete, NULL); 1284 poll_threads(); 1285 CU_ASSERT(g_bserrno == -EINVAL); 1286 1287 spdk_blob_close(blob, blob_op_complete, NULL); 1288 poll_threads(); 1289 CU_ASSERT(g_bserrno == 0); 1290 1291 spdk_bs_free_io_channel(channel); 1292 poll_threads(); 1293 } 1294 1295 static void 1296 blob_rw_verify(void) 1297 { 1298 struct spdk_blob_store *bs = g_bs; 1299 struct spdk_blob *blob; 1300 struct spdk_io_channel *channel; 1301 uint8_t payload_read[10 * 4096]; 1302 uint8_t payload_write[10 * 4096]; 1303 1304 channel = spdk_bs_alloc_io_channel(bs); 1305 CU_ASSERT(channel != NULL); 1306 1307 blob = ut_blob_create_and_open(bs, NULL); 1308 1309 spdk_blob_resize(blob, 32, blob_op_complete, NULL); 1310 poll_threads(); 1311 CU_ASSERT(g_bserrno == 0); 1312 1313 memset(payload_write, 0xE5, sizeof(payload_write)); 1314 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 1315 poll_threads(); 1316 CU_ASSERT(g_bserrno == 0); 1317 1318 memset(payload_read, 0x00, sizeof(payload_read)); 1319 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 1320 poll_threads(); 1321 CU_ASSERT(g_bserrno == 0); 1322 CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0); 1323 1324 spdk_blob_close(blob, blob_op_complete, NULL); 1325 poll_threads(); 1326 CU_ASSERT(g_bserrno == 0); 1327 1328 spdk_bs_free_io_channel(channel); 1329 poll_threads(); 1330 } 1331 1332 static void 1333 blob_rw_verify_iov(void) 1334 { 1335 struct spdk_blob_store *bs = g_bs; 1336 struct spdk_blob *blob; 1337 struct spdk_io_channel *channel; 1338 uint8_t payload_read[10 * 4096]; 1339 uint8_t payload_write[10 * 4096]; 1340 struct iovec iov_read[3]; 1341 struct iovec iov_write[3]; 1342 void *buf; 1343 1344 channel = spdk_bs_alloc_io_channel(bs); 1345 CU_ASSERT(channel != NULL); 1346 1347 blob = ut_blob_create_and_open(bs, NULL); 1348 1349 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1350 poll_threads(); 1351 CU_ASSERT(g_bserrno == 0); 1352 1353 /* 1354 * Manually adjust the offset of the blob's second cluster. This allows 1355 * us to make sure that the readv/write code correctly accounts for I/O 1356 * that cross cluster boundaries. Start by asserting that the allocated 1357 * clusters are where we expect before modifying the second cluster. 1358 */ 1359 CU_ASSERT(blob->active.clusters[0] == 1 * 256); 1360 CU_ASSERT(blob->active.clusters[1] == 2 * 256); 1361 blob->active.clusters[1] = 3 * 256; 1362 1363 memset(payload_write, 0xE5, sizeof(payload_write)); 1364 iov_write[0].iov_base = payload_write; 1365 iov_write[0].iov_len = 1 * 4096; 1366 iov_write[1].iov_base = payload_write + 1 * 4096; 1367 iov_write[1].iov_len = 5 * 4096; 1368 iov_write[2].iov_base = payload_write + 6 * 4096; 1369 iov_write[2].iov_len = 4 * 4096; 1370 /* 1371 * Choose a page offset just before the cluster boundary. The first 6 pages of payload 1372 * will get written to the first cluster, the last 4 to the second cluster. 1373 */ 1374 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 1375 poll_threads(); 1376 CU_ASSERT(g_bserrno == 0); 1377 1378 memset(payload_read, 0xAA, sizeof(payload_read)); 1379 iov_read[0].iov_base = payload_read; 1380 iov_read[0].iov_len = 3 * 4096; 1381 iov_read[1].iov_base = payload_read + 3 * 4096; 1382 iov_read[1].iov_len = 4 * 4096; 1383 iov_read[2].iov_base = payload_read + 7 * 4096; 1384 iov_read[2].iov_len = 3 * 4096; 1385 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 1386 poll_threads(); 1387 CU_ASSERT(g_bserrno == 0); 1388 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 1389 1390 buf = calloc(1, 256 * 4096); 1391 SPDK_CU_ASSERT_FATAL(buf != NULL); 1392 /* Check that cluster 2 on "disk" was not modified. */ 1393 CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0); 1394 free(buf); 1395 1396 spdk_blob_close(blob, blob_op_complete, NULL); 1397 poll_threads(); 1398 CU_ASSERT(g_bserrno == 0); 1399 1400 spdk_bs_free_io_channel(channel); 1401 poll_threads(); 1402 } 1403 1404 static uint32_t 1405 bs_channel_get_req_count(struct spdk_io_channel *_channel) 1406 { 1407 struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel); 1408 struct spdk_bs_request_set *set; 1409 uint32_t count = 0; 1410 1411 TAILQ_FOREACH(set, &channel->reqs, link) { 1412 count++; 1413 } 1414 1415 return count; 1416 } 1417 1418 static void 1419 blob_rw_verify_iov_nomem(void) 1420 { 1421 struct spdk_blob_store *bs = g_bs; 1422 struct spdk_blob *blob; 1423 struct spdk_io_channel *channel; 1424 uint8_t payload_write[10 * 4096]; 1425 struct iovec iov_write[3]; 1426 uint32_t req_count; 1427 1428 channel = spdk_bs_alloc_io_channel(bs); 1429 CU_ASSERT(channel != NULL); 1430 1431 blob = ut_blob_create_and_open(bs, NULL); 1432 1433 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1434 poll_threads(); 1435 CU_ASSERT(g_bserrno == 0); 1436 1437 /* 1438 * Choose a page offset just before the cluster boundary. The first 6 pages of payload 1439 * will get written to the first cluster, the last 4 to the second cluster. 1440 */ 1441 iov_write[0].iov_base = payload_write; 1442 iov_write[0].iov_len = 1 * 4096; 1443 iov_write[1].iov_base = payload_write + 1 * 4096; 1444 iov_write[1].iov_len = 5 * 4096; 1445 iov_write[2].iov_base = payload_write + 6 * 4096; 1446 iov_write[2].iov_len = 4 * 4096; 1447 MOCK_SET(calloc, NULL); 1448 req_count = bs_channel_get_req_count(channel); 1449 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 1450 poll_threads(); 1451 CU_ASSERT(g_bserrno = -ENOMEM); 1452 CU_ASSERT(req_count == bs_channel_get_req_count(channel)); 1453 MOCK_CLEAR(calloc); 1454 1455 spdk_blob_close(blob, blob_op_complete, NULL); 1456 poll_threads(); 1457 CU_ASSERT(g_bserrno == 0); 1458 1459 spdk_bs_free_io_channel(channel); 1460 poll_threads(); 1461 } 1462 1463 static void 1464 blob_rw_iov_read_only(void) 1465 { 1466 struct spdk_blob_store *bs = g_bs; 1467 struct spdk_blob *blob; 1468 struct spdk_io_channel *channel; 1469 uint8_t payload_read[4096]; 1470 uint8_t payload_write[4096]; 1471 struct iovec iov_read; 1472 struct iovec iov_write; 1473 1474 channel = spdk_bs_alloc_io_channel(bs); 1475 CU_ASSERT(channel != NULL); 1476 1477 blob = ut_blob_create_and_open(bs, NULL); 1478 1479 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1480 poll_threads(); 1481 CU_ASSERT(g_bserrno == 0); 1482 1483 /* Verify that writev failed if read_only flag is set. */ 1484 blob->data_ro = true; 1485 iov_write.iov_base = payload_write; 1486 iov_write.iov_len = sizeof(payload_write); 1487 spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL); 1488 poll_threads(); 1489 CU_ASSERT(g_bserrno == -EPERM); 1490 1491 /* Verify that reads pass if data_ro flag is set. */ 1492 iov_read.iov_base = payload_read; 1493 iov_read.iov_len = sizeof(payload_read); 1494 spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL); 1495 poll_threads(); 1496 CU_ASSERT(g_bserrno == 0); 1497 1498 spdk_blob_close(blob, blob_op_complete, NULL); 1499 poll_threads(); 1500 CU_ASSERT(g_bserrno == 0); 1501 1502 spdk_bs_free_io_channel(channel); 1503 poll_threads(); 1504 } 1505 1506 static void 1507 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel, 1508 uint8_t *payload, uint64_t offset, uint64_t length, 1509 spdk_blob_op_complete cb_fn, void *cb_arg) 1510 { 1511 uint64_t i; 1512 uint8_t *buf; 1513 uint64_t page_size = spdk_bs_get_page_size(blob->bs); 1514 1515 /* To be sure that operation is NOT splitted, read one page at the time */ 1516 buf = payload; 1517 for (i = 0; i < length; i++) { 1518 spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL); 1519 poll_threads(); 1520 if (g_bserrno != 0) { 1521 /* Pass the error code up */ 1522 break; 1523 } 1524 buf += page_size; 1525 } 1526 1527 cb_fn(cb_arg, g_bserrno); 1528 } 1529 1530 static void 1531 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel, 1532 uint8_t *payload, uint64_t offset, uint64_t length, 1533 spdk_blob_op_complete cb_fn, void *cb_arg) 1534 { 1535 uint64_t i; 1536 uint8_t *buf; 1537 uint64_t page_size = spdk_bs_get_page_size(blob->bs); 1538 1539 /* To be sure that operation is NOT splitted, write one page at the time */ 1540 buf = payload; 1541 for (i = 0; i < length; i++) { 1542 spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL); 1543 poll_threads(); 1544 if (g_bserrno != 0) { 1545 /* Pass the error code up */ 1546 break; 1547 } 1548 buf += page_size; 1549 } 1550 1551 cb_fn(cb_arg, g_bserrno); 1552 } 1553 1554 static void 1555 blob_operation_split_rw(void) 1556 { 1557 struct spdk_blob_store *bs = g_bs; 1558 struct spdk_blob *blob; 1559 struct spdk_io_channel *channel; 1560 struct spdk_blob_opts opts; 1561 uint64_t cluster_size; 1562 1563 uint64_t payload_size; 1564 uint8_t *payload_read; 1565 uint8_t *payload_write; 1566 uint8_t *payload_pattern; 1567 1568 uint64_t page_size; 1569 uint64_t pages_per_cluster; 1570 uint64_t pages_per_payload; 1571 1572 uint64_t i; 1573 1574 cluster_size = spdk_bs_get_cluster_size(bs); 1575 page_size = spdk_bs_get_page_size(bs); 1576 pages_per_cluster = cluster_size / page_size; 1577 pages_per_payload = pages_per_cluster * 5; 1578 payload_size = cluster_size * 5; 1579 1580 payload_read = malloc(payload_size); 1581 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 1582 1583 payload_write = malloc(payload_size); 1584 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 1585 1586 payload_pattern = malloc(payload_size); 1587 SPDK_CU_ASSERT_FATAL(payload_pattern != NULL); 1588 1589 /* Prepare random pattern to write */ 1590 memset(payload_pattern, 0xFF, payload_size); 1591 for (i = 0; i < pages_per_payload; i++) { 1592 *((uint64_t *)(payload_pattern + page_size * i)) = (i + 1); 1593 } 1594 1595 channel = spdk_bs_alloc_io_channel(bs); 1596 SPDK_CU_ASSERT_FATAL(channel != NULL); 1597 1598 /* Create blob */ 1599 ut_spdk_blob_opts_init(&opts); 1600 opts.thin_provision = false; 1601 opts.num_clusters = 5; 1602 1603 blob = ut_blob_create_and_open(bs, &opts); 1604 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 1605 1606 /* Initial read should return zeroed payload */ 1607 memset(payload_read, 0xFF, payload_size); 1608 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1609 poll_threads(); 1610 CU_ASSERT(g_bserrno == 0); 1611 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 1612 1613 /* Fill whole blob except last page */ 1614 spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1, 1615 blob_op_complete, NULL); 1616 poll_threads(); 1617 CU_ASSERT(g_bserrno == 0); 1618 1619 /* Write last page with a pattern */ 1620 spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1, 1621 blob_op_complete, NULL); 1622 poll_threads(); 1623 CU_ASSERT(g_bserrno == 0); 1624 1625 /* Read whole blob and check consistency */ 1626 memset(payload_read, 0xFF, payload_size); 1627 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1628 poll_threads(); 1629 CU_ASSERT(g_bserrno == 0); 1630 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0); 1631 CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0); 1632 1633 /* Fill whole blob except first page */ 1634 spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1, 1635 blob_op_complete, NULL); 1636 poll_threads(); 1637 CU_ASSERT(g_bserrno == 0); 1638 1639 /* Write first page with a pattern */ 1640 spdk_blob_io_write(blob, channel, payload_pattern, 0, 1, 1641 blob_op_complete, NULL); 1642 poll_threads(); 1643 CU_ASSERT(g_bserrno == 0); 1644 1645 /* Read whole blob and check consistency */ 1646 memset(payload_read, 0xFF, payload_size); 1647 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1648 poll_threads(); 1649 CU_ASSERT(g_bserrno == 0); 1650 CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0); 1651 CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0); 1652 1653 1654 /* Fill whole blob with a pattern (5 clusters) */ 1655 1656 /* 1. Read test. */ 1657 _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload, 1658 blob_op_complete, NULL); 1659 poll_threads(); 1660 CU_ASSERT(g_bserrno == 0); 1661 1662 memset(payload_read, 0xFF, payload_size); 1663 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1664 poll_threads(); 1665 poll_threads(); 1666 CU_ASSERT(g_bserrno == 0); 1667 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1668 1669 /* 2. Write test. */ 1670 spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload, 1671 blob_op_complete, NULL); 1672 poll_threads(); 1673 CU_ASSERT(g_bserrno == 0); 1674 1675 memset(payload_read, 0xFF, payload_size); 1676 _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1677 poll_threads(); 1678 CU_ASSERT(g_bserrno == 0); 1679 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1680 1681 spdk_blob_close(blob, blob_op_complete, NULL); 1682 poll_threads(); 1683 CU_ASSERT(g_bserrno == 0); 1684 1685 spdk_bs_free_io_channel(channel); 1686 poll_threads(); 1687 1688 g_blob = NULL; 1689 g_blobid = 0; 1690 1691 free(payload_read); 1692 free(payload_write); 1693 free(payload_pattern); 1694 } 1695 1696 static void 1697 blob_operation_split_rw_iov(void) 1698 { 1699 struct spdk_blob_store *bs = g_bs; 1700 struct spdk_blob *blob; 1701 struct spdk_io_channel *channel; 1702 struct spdk_blob_opts opts; 1703 uint64_t cluster_size; 1704 1705 uint64_t payload_size; 1706 uint8_t *payload_read; 1707 uint8_t *payload_write; 1708 uint8_t *payload_pattern; 1709 1710 uint64_t page_size; 1711 uint64_t pages_per_cluster; 1712 uint64_t pages_per_payload; 1713 1714 struct iovec iov_read[2]; 1715 struct iovec iov_write[2]; 1716 1717 uint64_t i, j; 1718 1719 cluster_size = spdk_bs_get_cluster_size(bs); 1720 page_size = spdk_bs_get_page_size(bs); 1721 pages_per_cluster = cluster_size / page_size; 1722 pages_per_payload = pages_per_cluster * 5; 1723 payload_size = cluster_size * 5; 1724 1725 payload_read = malloc(payload_size); 1726 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 1727 1728 payload_write = malloc(payload_size); 1729 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 1730 1731 payload_pattern = malloc(payload_size); 1732 SPDK_CU_ASSERT_FATAL(payload_pattern != NULL); 1733 1734 /* Prepare random pattern to write */ 1735 for (i = 0; i < pages_per_payload; i++) { 1736 for (j = 0; j < page_size / sizeof(uint64_t); j++) { 1737 uint64_t *tmp; 1738 1739 tmp = (uint64_t *)payload_pattern; 1740 tmp += ((page_size * i) / sizeof(uint64_t)) + j; 1741 *tmp = i + 1; 1742 } 1743 } 1744 1745 channel = spdk_bs_alloc_io_channel(bs); 1746 SPDK_CU_ASSERT_FATAL(channel != NULL); 1747 1748 /* Create blob */ 1749 ut_spdk_blob_opts_init(&opts); 1750 opts.thin_provision = false; 1751 opts.num_clusters = 5; 1752 1753 blob = ut_blob_create_and_open(bs, &opts); 1754 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 1755 1756 /* Initial read should return zeroes payload */ 1757 memset(payload_read, 0xFF, payload_size); 1758 iov_read[0].iov_base = payload_read; 1759 iov_read[0].iov_len = cluster_size * 3; 1760 iov_read[1].iov_base = payload_read + cluster_size * 3; 1761 iov_read[1].iov_len = cluster_size * 2; 1762 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1763 poll_threads(); 1764 CU_ASSERT(g_bserrno == 0); 1765 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 1766 1767 /* First of iovs fills whole blob except last page and second of iovs writes last page 1768 * with a pattern. */ 1769 iov_write[0].iov_base = payload_pattern; 1770 iov_write[0].iov_len = payload_size - page_size; 1771 iov_write[1].iov_base = payload_pattern; 1772 iov_write[1].iov_len = page_size; 1773 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1774 poll_threads(); 1775 CU_ASSERT(g_bserrno == 0); 1776 1777 /* Read whole blob and check consistency */ 1778 memset(payload_read, 0xFF, payload_size); 1779 iov_read[0].iov_base = payload_read; 1780 iov_read[0].iov_len = cluster_size * 2; 1781 iov_read[1].iov_base = payload_read + cluster_size * 2; 1782 iov_read[1].iov_len = cluster_size * 3; 1783 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1784 poll_threads(); 1785 CU_ASSERT(g_bserrno == 0); 1786 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0); 1787 CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0); 1788 1789 /* First of iovs fills only first page and second of iovs writes whole blob except 1790 * first page with a pattern. */ 1791 iov_write[0].iov_base = payload_pattern; 1792 iov_write[0].iov_len = page_size; 1793 iov_write[1].iov_base = payload_pattern; 1794 iov_write[1].iov_len = payload_size - page_size; 1795 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1796 poll_threads(); 1797 CU_ASSERT(g_bserrno == 0); 1798 1799 /* Read whole blob and check consistency */ 1800 memset(payload_read, 0xFF, payload_size); 1801 iov_read[0].iov_base = payload_read; 1802 iov_read[0].iov_len = cluster_size * 4; 1803 iov_read[1].iov_base = payload_read + cluster_size * 4; 1804 iov_read[1].iov_len = cluster_size; 1805 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1806 poll_threads(); 1807 CU_ASSERT(g_bserrno == 0); 1808 CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0); 1809 CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0); 1810 1811 1812 /* Fill whole blob with a pattern (5 clusters) */ 1813 1814 /* 1. Read test. */ 1815 _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload, 1816 blob_op_complete, NULL); 1817 poll_threads(); 1818 CU_ASSERT(g_bserrno == 0); 1819 1820 memset(payload_read, 0xFF, payload_size); 1821 iov_read[0].iov_base = payload_read; 1822 iov_read[0].iov_len = cluster_size; 1823 iov_read[1].iov_base = payload_read + cluster_size; 1824 iov_read[1].iov_len = cluster_size * 4; 1825 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1826 poll_threads(); 1827 CU_ASSERT(g_bserrno == 0); 1828 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1829 1830 /* 2. Write test. */ 1831 iov_write[0].iov_base = payload_read; 1832 iov_write[0].iov_len = cluster_size * 2; 1833 iov_write[1].iov_base = payload_read + cluster_size * 2; 1834 iov_write[1].iov_len = cluster_size * 3; 1835 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1836 poll_threads(); 1837 CU_ASSERT(g_bserrno == 0); 1838 1839 memset(payload_read, 0xFF, payload_size); 1840 _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1841 poll_threads(); 1842 CU_ASSERT(g_bserrno == 0); 1843 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1844 1845 spdk_blob_close(blob, blob_op_complete, NULL); 1846 CU_ASSERT(g_bserrno == 0); 1847 1848 spdk_bs_free_io_channel(channel); 1849 poll_threads(); 1850 1851 g_blob = NULL; 1852 g_blobid = 0; 1853 1854 free(payload_read); 1855 free(payload_write); 1856 free(payload_pattern); 1857 } 1858 1859 static void 1860 blob_unmap(void) 1861 { 1862 struct spdk_blob_store *bs = g_bs; 1863 struct spdk_blob *blob; 1864 struct spdk_io_channel *channel; 1865 struct spdk_blob_opts opts; 1866 uint8_t payload[4096]; 1867 int i; 1868 1869 channel = spdk_bs_alloc_io_channel(bs); 1870 CU_ASSERT(channel != NULL); 1871 1872 ut_spdk_blob_opts_init(&opts); 1873 opts.num_clusters = 10; 1874 1875 blob = ut_blob_create_and_open(bs, &opts); 1876 1877 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 1878 poll_threads(); 1879 CU_ASSERT(g_bserrno == 0); 1880 1881 memset(payload, 0, sizeof(payload)); 1882 payload[0] = 0xFF; 1883 1884 /* 1885 * Set first byte of every cluster to 0xFF. 1886 * First cluster on device is reserved so let's start from cluster number 1 1887 */ 1888 for (i = 1; i < 11; i++) { 1889 g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF; 1890 } 1891 1892 /* Confirm writes */ 1893 for (i = 0; i < 10; i++) { 1894 payload[0] = 0; 1895 spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1, 1896 blob_op_complete, NULL); 1897 poll_threads(); 1898 CU_ASSERT(g_bserrno == 0); 1899 CU_ASSERT(payload[0] == 0xFF); 1900 } 1901 1902 /* Mark some clusters as unallocated */ 1903 blob->active.clusters[1] = 0; 1904 blob->active.clusters[2] = 0; 1905 blob->active.clusters[3] = 0; 1906 blob->active.clusters[6] = 0; 1907 blob->active.clusters[8] = 0; 1908 1909 /* Unmap clusters by resizing to 0 */ 1910 spdk_blob_resize(blob, 0, blob_op_complete, NULL); 1911 poll_threads(); 1912 CU_ASSERT(g_bserrno == 0); 1913 1914 spdk_blob_sync_md(blob, blob_op_complete, NULL); 1915 poll_threads(); 1916 CU_ASSERT(g_bserrno == 0); 1917 1918 /* Confirm that only 'allocated' clusters were unmapped */ 1919 for (i = 1; i < 11; i++) { 1920 switch (i) { 1921 case 2: 1922 case 3: 1923 case 4: 1924 case 7: 1925 case 9: 1926 CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF); 1927 break; 1928 default: 1929 CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0); 1930 break; 1931 } 1932 } 1933 1934 spdk_blob_close(blob, blob_op_complete, NULL); 1935 poll_threads(); 1936 CU_ASSERT(g_bserrno == 0); 1937 1938 spdk_bs_free_io_channel(channel); 1939 poll_threads(); 1940 } 1941 1942 1943 static void 1944 blob_iter(void) 1945 { 1946 struct spdk_blob_store *bs = g_bs; 1947 struct spdk_blob *blob; 1948 spdk_blob_id blobid; 1949 struct spdk_blob_opts blob_opts; 1950 1951 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 1952 poll_threads(); 1953 CU_ASSERT(g_blob == NULL); 1954 CU_ASSERT(g_bserrno == -ENOENT); 1955 1956 ut_spdk_blob_opts_init(&blob_opts); 1957 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 1958 poll_threads(); 1959 CU_ASSERT(g_bserrno == 0); 1960 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 1961 blobid = g_blobid; 1962 1963 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 1964 poll_threads(); 1965 CU_ASSERT(g_blob != NULL); 1966 CU_ASSERT(g_bserrno == 0); 1967 blob = g_blob; 1968 CU_ASSERT(spdk_blob_get_id(blob) == blobid); 1969 1970 spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL); 1971 poll_threads(); 1972 CU_ASSERT(g_blob == NULL); 1973 CU_ASSERT(g_bserrno == -ENOENT); 1974 } 1975 1976 static void 1977 blob_xattr(void) 1978 { 1979 struct spdk_blob_store *bs = g_bs; 1980 struct spdk_blob *blob; 1981 spdk_blob_id blobid; 1982 uint64_t length; 1983 int rc; 1984 const char *name1, *name2; 1985 const void *value; 1986 size_t value_len; 1987 struct spdk_xattr_names *names; 1988 1989 blob = ut_blob_create_and_open(bs, NULL); 1990 blobid = spdk_blob_get_id(blob); 1991 1992 /* Test that set_xattr fails if md_ro flag is set. */ 1993 blob->md_ro = true; 1994 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 1995 CU_ASSERT(rc == -EPERM); 1996 1997 blob->md_ro = false; 1998 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 1999 CU_ASSERT(rc == 0); 2000 2001 length = 2345; 2002 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2003 CU_ASSERT(rc == 0); 2004 2005 /* Overwrite "length" xattr. */ 2006 length = 3456; 2007 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2008 CU_ASSERT(rc == 0); 2009 2010 /* get_xattr should still work even if md_ro flag is set. */ 2011 value = NULL; 2012 blob->md_ro = true; 2013 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 2014 CU_ASSERT(rc == 0); 2015 SPDK_CU_ASSERT_FATAL(value != NULL); 2016 CU_ASSERT(*(uint64_t *)value == length); 2017 CU_ASSERT(value_len == 8); 2018 blob->md_ro = false; 2019 2020 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 2021 CU_ASSERT(rc == -ENOENT); 2022 2023 names = NULL; 2024 rc = spdk_blob_get_xattr_names(blob, &names); 2025 CU_ASSERT(rc == 0); 2026 SPDK_CU_ASSERT_FATAL(names != NULL); 2027 CU_ASSERT(spdk_xattr_names_get_count(names) == 2); 2028 name1 = spdk_xattr_names_get_name(names, 0); 2029 SPDK_CU_ASSERT_FATAL(name1 != NULL); 2030 CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length")); 2031 name2 = spdk_xattr_names_get_name(names, 1); 2032 SPDK_CU_ASSERT_FATAL(name2 != NULL); 2033 CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length")); 2034 CU_ASSERT(strcmp(name1, name2)); 2035 spdk_xattr_names_free(names); 2036 2037 /* Confirm that remove_xattr fails if md_ro is set to true. */ 2038 blob->md_ro = true; 2039 rc = spdk_blob_remove_xattr(blob, "name"); 2040 CU_ASSERT(rc == -EPERM); 2041 2042 blob->md_ro = false; 2043 rc = spdk_blob_remove_xattr(blob, "name"); 2044 CU_ASSERT(rc == 0); 2045 2046 rc = spdk_blob_remove_xattr(blob, "foobar"); 2047 CU_ASSERT(rc == -ENOENT); 2048 2049 /* Set internal xattr */ 2050 length = 7898; 2051 rc = _spdk_blob_set_xattr(blob, "internal", &length, sizeof(length), true); 2052 CU_ASSERT(rc == 0); 2053 rc = _spdk_blob_get_xattr_value(blob, "internal", &value, &value_len, true); 2054 CU_ASSERT(rc == 0); 2055 CU_ASSERT(*(uint64_t *)value == length); 2056 /* try to get public xattr with same name */ 2057 rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len); 2058 CU_ASSERT(rc != 0); 2059 rc = _spdk_blob_get_xattr_value(blob, "internal", &value, &value_len, false); 2060 CU_ASSERT(rc != 0); 2061 /* Check if SPDK_BLOB_INTERNAL_XATTR is set */ 2062 CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 2063 SPDK_BLOB_INTERNAL_XATTR); 2064 2065 spdk_blob_close(blob, blob_op_complete, NULL); 2066 poll_threads(); 2067 2068 /* Check if xattrs are persisted */ 2069 ut_bs_reload(&bs, NULL); 2070 2071 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2072 poll_threads(); 2073 CU_ASSERT(g_bserrno == 0); 2074 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2075 blob = g_blob; 2076 2077 rc = _spdk_blob_get_xattr_value(blob, "internal", &value, &value_len, true); 2078 CU_ASSERT(rc == 0); 2079 CU_ASSERT(*(uint64_t *)value == length); 2080 2081 /* try to get internal xattr trough public call */ 2082 rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len); 2083 CU_ASSERT(rc != 0); 2084 2085 rc = _spdk_blob_remove_xattr(blob, "internal", true); 2086 CU_ASSERT(rc == 0); 2087 2088 CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0); 2089 2090 spdk_blob_close(blob, blob_op_complete, NULL); 2091 poll_threads(); 2092 CU_ASSERT(g_bserrno == 0); 2093 } 2094 2095 static void 2096 bs_load(void) 2097 { 2098 struct spdk_blob_store *bs; 2099 struct spdk_bs_dev *dev; 2100 spdk_blob_id blobid; 2101 struct spdk_blob *blob; 2102 struct spdk_bs_super_block *super_block; 2103 uint64_t length; 2104 int rc; 2105 const void *value; 2106 size_t value_len; 2107 struct spdk_bs_opts opts; 2108 struct spdk_blob_opts blob_opts; 2109 2110 dev = init_dev(); 2111 spdk_bs_opts_init(&opts); 2112 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2113 2114 /* Initialize a new blob store */ 2115 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2116 poll_threads(); 2117 CU_ASSERT(g_bserrno == 0); 2118 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2119 bs = g_bs; 2120 2121 /* Try to open a blobid that does not exist */ 2122 spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL); 2123 poll_threads(); 2124 CU_ASSERT(g_bserrno == -ENOENT); 2125 CU_ASSERT(g_blob == NULL); 2126 2127 /* Create a blob */ 2128 blob = ut_blob_create_and_open(bs, NULL); 2129 blobid = spdk_blob_get_id(blob); 2130 2131 /* Try again to open valid blob but without the upper bit set */ 2132 spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL); 2133 poll_threads(); 2134 CU_ASSERT(g_bserrno == -ENOENT); 2135 CU_ASSERT(g_blob == NULL); 2136 2137 /* Set some xattrs */ 2138 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 2139 CU_ASSERT(rc == 0); 2140 2141 length = 2345; 2142 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2143 CU_ASSERT(rc == 0); 2144 2145 /* Resize the blob */ 2146 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 2147 poll_threads(); 2148 CU_ASSERT(g_bserrno == 0); 2149 2150 spdk_blob_close(blob, blob_op_complete, NULL); 2151 poll_threads(); 2152 CU_ASSERT(g_bserrno == 0); 2153 blob = NULL; 2154 g_blob = NULL; 2155 g_blobid = SPDK_BLOBID_INVALID; 2156 2157 /* Unload the blob store */ 2158 spdk_bs_unload(bs, bs_op_complete, NULL); 2159 poll_threads(); 2160 CU_ASSERT(g_bserrno == 0); 2161 g_bs = NULL; 2162 g_blob = NULL; 2163 g_blobid = 0; 2164 2165 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2166 CU_ASSERT(super_block->clean == 1); 2167 2168 /* Load should fail for device with an unsupported blocklen */ 2169 dev = init_dev(); 2170 dev->blocklen = SPDK_BS_PAGE_SIZE * 2; 2171 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 2172 poll_threads(); 2173 CU_ASSERT(g_bserrno == -EINVAL); 2174 2175 /* Load should when max_md_ops is set to zero */ 2176 dev = init_dev(); 2177 spdk_bs_opts_init(&opts); 2178 opts.max_md_ops = 0; 2179 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2180 poll_threads(); 2181 CU_ASSERT(g_bserrno == -EINVAL); 2182 2183 /* Load should when max_channel_ops is set to zero */ 2184 dev = init_dev(); 2185 spdk_bs_opts_init(&opts); 2186 opts.max_channel_ops = 0; 2187 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2188 poll_threads(); 2189 CU_ASSERT(g_bserrno == -EINVAL); 2190 2191 /* Load an existing blob store */ 2192 dev = init_dev(); 2193 spdk_bs_opts_init(&opts); 2194 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2195 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2196 poll_threads(); 2197 CU_ASSERT(g_bserrno == 0); 2198 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2199 bs = g_bs; 2200 2201 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2202 CU_ASSERT(super_block->clean == 1); 2203 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2204 2205 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2206 poll_threads(); 2207 CU_ASSERT(g_bserrno == 0); 2208 CU_ASSERT(g_blob != NULL); 2209 blob = g_blob; 2210 2211 /* Verify that blobstore is marked dirty after first metadata sync */ 2212 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2213 CU_ASSERT(super_block->clean == 1); 2214 2215 /* Get the xattrs */ 2216 value = NULL; 2217 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 2218 CU_ASSERT(rc == 0); 2219 SPDK_CU_ASSERT_FATAL(value != NULL); 2220 CU_ASSERT(*(uint64_t *)value == length); 2221 CU_ASSERT(value_len == 8); 2222 2223 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 2224 CU_ASSERT(rc == -ENOENT); 2225 2226 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 2227 2228 spdk_blob_close(blob, blob_op_complete, NULL); 2229 poll_threads(); 2230 CU_ASSERT(g_bserrno == 0); 2231 blob = NULL; 2232 g_blob = NULL; 2233 2234 spdk_bs_unload(bs, bs_op_complete, NULL); 2235 poll_threads(); 2236 CU_ASSERT(g_bserrno == 0); 2237 g_bs = NULL; 2238 2239 /* Load should fail: bdev size < saved size */ 2240 dev = init_dev(); 2241 dev->blockcnt /= 2; 2242 2243 spdk_bs_opts_init(&opts); 2244 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2245 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2246 poll_threads(); 2247 2248 CU_ASSERT(g_bserrno == -EILSEQ); 2249 2250 /* Load should succeed: bdev size > saved size */ 2251 dev = init_dev(); 2252 dev->blockcnt *= 4; 2253 2254 spdk_bs_opts_init(&opts); 2255 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2256 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2257 poll_threads(); 2258 CU_ASSERT(g_bserrno == 0); 2259 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2260 bs = g_bs; 2261 2262 CU_ASSERT(g_bserrno == 0); 2263 spdk_bs_unload(bs, bs_op_complete, NULL); 2264 poll_threads(); 2265 2266 2267 /* Test compatibility mode */ 2268 2269 dev = init_dev(); 2270 super_block->size = 0; 2271 super_block->crc = _spdk_blob_md_page_calc_crc(super_block); 2272 2273 spdk_bs_opts_init(&opts); 2274 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2275 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2276 poll_threads(); 2277 CU_ASSERT(g_bserrno == 0); 2278 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2279 bs = g_bs; 2280 2281 /* Create a blob */ 2282 ut_spdk_blob_opts_init(&blob_opts); 2283 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 2284 poll_threads(); 2285 CU_ASSERT(g_bserrno == 0); 2286 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2287 2288 /* Blobstore should update number of blocks in super_block */ 2289 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2290 CU_ASSERT(super_block->clean == 0); 2291 2292 spdk_bs_unload(bs, bs_op_complete, NULL); 2293 poll_threads(); 2294 CU_ASSERT(g_bserrno == 0); 2295 CU_ASSERT(super_block->clean == 1); 2296 g_bs = NULL; 2297 2298 } 2299 2300 static void 2301 bs_load_pending_removal(void) 2302 { 2303 struct spdk_blob_store *bs = g_bs; 2304 struct spdk_blob_opts opts; 2305 struct spdk_blob *blob, *snapshot; 2306 spdk_blob_id blobid, snapshotid; 2307 const void *value; 2308 size_t value_len; 2309 int rc; 2310 2311 /* Create blob */ 2312 ut_spdk_blob_opts_init(&opts); 2313 opts.num_clusters = 10; 2314 2315 blob = ut_blob_create_and_open(bs, &opts); 2316 blobid = spdk_blob_get_id(blob); 2317 2318 /* Create snapshot */ 2319 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 2320 poll_threads(); 2321 CU_ASSERT(g_bserrno == 0); 2322 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2323 snapshotid = g_blobid; 2324 2325 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2326 poll_threads(); 2327 CU_ASSERT(g_bserrno == 0); 2328 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2329 snapshot = g_blob; 2330 2331 /* Set SNAPSHOT_PENDING_REMOVAL xattr */ 2332 snapshot->md_ro = false; 2333 rc = _spdk_blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true); 2334 CU_ASSERT(rc == 0); 2335 snapshot->md_ro = true; 2336 2337 spdk_blob_close(snapshot, blob_op_complete, NULL); 2338 poll_threads(); 2339 CU_ASSERT(g_bserrno == 0); 2340 2341 spdk_blob_close(blob, blob_op_complete, NULL); 2342 poll_threads(); 2343 CU_ASSERT(g_bserrno == 0); 2344 2345 /* Reload blobstore */ 2346 ut_bs_reload(&bs, NULL); 2347 2348 /* Snapshot should not be removed as blob is still pointing to it */ 2349 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2350 poll_threads(); 2351 CU_ASSERT(g_bserrno == 0); 2352 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2353 snapshot = g_blob; 2354 2355 /* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */ 2356 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len); 2357 CU_ASSERT(rc != 0); 2358 2359 /* Set SNAPSHOT_PENDING_REMOVAL xattr again */ 2360 snapshot->md_ro = false; 2361 rc = _spdk_blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true); 2362 CU_ASSERT(rc == 0); 2363 snapshot->md_ro = true; 2364 2365 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2366 poll_threads(); 2367 CU_ASSERT(g_bserrno == 0); 2368 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2369 blob = g_blob; 2370 2371 /* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */ 2372 _spdk_blob_remove_xattr(blob, BLOB_SNAPSHOT, true); 2373 2374 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2375 poll_threads(); 2376 CU_ASSERT(g_bserrno == 0); 2377 2378 spdk_blob_close(snapshot, blob_op_complete, NULL); 2379 poll_threads(); 2380 CU_ASSERT(g_bserrno == 0); 2381 2382 spdk_blob_close(blob, blob_op_complete, NULL); 2383 poll_threads(); 2384 CU_ASSERT(g_bserrno == 0); 2385 2386 /* Reload blobstore */ 2387 ut_bs_reload(&bs, NULL); 2388 2389 /* Snapshot should be removed as blob is not pointing to it anymore */ 2390 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2391 poll_threads(); 2392 CU_ASSERT(g_bserrno != 0); 2393 } 2394 2395 static void 2396 bs_load_custom_cluster_size(void) 2397 { 2398 struct spdk_blob_store *bs; 2399 struct spdk_bs_dev *dev; 2400 struct spdk_bs_super_block *super_block; 2401 struct spdk_bs_opts opts; 2402 uint32_t custom_cluster_size = 4194304; /* 4MiB */ 2403 uint32_t cluster_sz; 2404 uint64_t total_clusters; 2405 2406 dev = init_dev(); 2407 spdk_bs_opts_init(&opts); 2408 opts.cluster_sz = custom_cluster_size; 2409 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2410 2411 /* Initialize a new blob store */ 2412 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2413 poll_threads(); 2414 CU_ASSERT(g_bserrno == 0); 2415 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2416 bs = g_bs; 2417 cluster_sz = bs->cluster_sz; 2418 total_clusters = bs->total_clusters; 2419 2420 /* Unload the blob store */ 2421 spdk_bs_unload(bs, bs_op_complete, NULL); 2422 poll_threads(); 2423 CU_ASSERT(g_bserrno == 0); 2424 g_bs = NULL; 2425 g_blob = NULL; 2426 g_blobid = 0; 2427 2428 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2429 CU_ASSERT(super_block->clean == 1); 2430 2431 /* Load an existing blob store */ 2432 dev = init_dev(); 2433 spdk_bs_opts_init(&opts); 2434 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2435 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2436 poll_threads(); 2437 CU_ASSERT(g_bserrno == 0); 2438 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2439 bs = g_bs; 2440 /* Compare cluster size and number to one after initialization */ 2441 CU_ASSERT(cluster_sz == bs->cluster_sz); 2442 CU_ASSERT(total_clusters == bs->total_clusters); 2443 2444 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2445 CU_ASSERT(super_block->clean == 1); 2446 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2447 2448 spdk_bs_unload(bs, bs_op_complete, NULL); 2449 poll_threads(); 2450 CU_ASSERT(g_bserrno == 0); 2451 CU_ASSERT(super_block->clean == 1); 2452 g_bs = NULL; 2453 } 2454 2455 static void 2456 bs_type(void) 2457 { 2458 struct spdk_blob_store *bs; 2459 struct spdk_bs_dev *dev; 2460 struct spdk_bs_opts opts; 2461 2462 dev = init_dev(); 2463 spdk_bs_opts_init(&opts); 2464 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2465 2466 /* Initialize a new blob store */ 2467 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2468 poll_threads(); 2469 CU_ASSERT(g_bserrno == 0); 2470 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2471 bs = g_bs; 2472 2473 /* Unload the blob store */ 2474 spdk_bs_unload(bs, bs_op_complete, NULL); 2475 poll_threads(); 2476 CU_ASSERT(g_bserrno == 0); 2477 g_bs = NULL; 2478 g_blob = NULL; 2479 g_blobid = 0; 2480 2481 /* Load non existing blobstore type */ 2482 dev = init_dev(); 2483 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING"); 2484 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2485 poll_threads(); 2486 CU_ASSERT(g_bserrno != 0); 2487 2488 /* Load with empty blobstore type */ 2489 dev = init_dev(); 2490 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2491 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2492 poll_threads(); 2493 CU_ASSERT(g_bserrno == 0); 2494 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2495 bs = g_bs; 2496 2497 spdk_bs_unload(bs, bs_op_complete, NULL); 2498 poll_threads(); 2499 CU_ASSERT(g_bserrno == 0); 2500 g_bs = NULL; 2501 2502 /* Initialize a new blob store with empty bstype */ 2503 dev = init_dev(); 2504 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2505 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2506 poll_threads(); 2507 CU_ASSERT(g_bserrno == 0); 2508 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2509 bs = g_bs; 2510 2511 spdk_bs_unload(bs, bs_op_complete, NULL); 2512 poll_threads(); 2513 CU_ASSERT(g_bserrno == 0); 2514 g_bs = NULL; 2515 2516 /* Load non existing blobstore type */ 2517 dev = init_dev(); 2518 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING"); 2519 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2520 poll_threads(); 2521 CU_ASSERT(g_bserrno != 0); 2522 2523 /* Load with empty blobstore type */ 2524 dev = init_dev(); 2525 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2526 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2527 poll_threads(); 2528 CU_ASSERT(g_bserrno == 0); 2529 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2530 bs = g_bs; 2531 2532 spdk_bs_unload(bs, bs_op_complete, NULL); 2533 poll_threads(); 2534 CU_ASSERT(g_bserrno == 0); 2535 g_bs = NULL; 2536 } 2537 2538 static void 2539 bs_super_block(void) 2540 { 2541 struct spdk_blob_store *bs; 2542 struct spdk_bs_dev *dev; 2543 struct spdk_bs_super_block *super_block; 2544 struct spdk_bs_opts opts; 2545 struct spdk_bs_super_block_ver1 super_block_v1; 2546 2547 dev = init_dev(); 2548 spdk_bs_opts_init(&opts); 2549 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2550 2551 /* Initialize a new blob store */ 2552 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2553 poll_threads(); 2554 CU_ASSERT(g_bserrno == 0); 2555 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2556 bs = g_bs; 2557 2558 /* Unload the blob store */ 2559 spdk_bs_unload(bs, bs_op_complete, NULL); 2560 poll_threads(); 2561 CU_ASSERT(g_bserrno == 0); 2562 g_bs = NULL; 2563 g_blob = NULL; 2564 g_blobid = 0; 2565 2566 /* Load an existing blob store with version newer than supported */ 2567 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2568 super_block->version++; 2569 2570 dev = init_dev(); 2571 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2572 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2573 poll_threads(); 2574 CU_ASSERT(g_bserrno != 0); 2575 2576 /* Create a new blob store with super block version 1 */ 2577 dev = init_dev(); 2578 super_block_v1.version = 1; 2579 memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature)); 2580 super_block_v1.length = 0x1000; 2581 super_block_v1.clean = 1; 2582 super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF; 2583 super_block_v1.cluster_size = 0x100000; 2584 super_block_v1.used_page_mask_start = 0x01; 2585 super_block_v1.used_page_mask_len = 0x01; 2586 super_block_v1.used_cluster_mask_start = 0x02; 2587 super_block_v1.used_cluster_mask_len = 0x01; 2588 super_block_v1.md_start = 0x03; 2589 super_block_v1.md_len = 0x40; 2590 memset(super_block_v1.reserved, 0, 4036); 2591 super_block_v1.crc = _spdk_blob_md_page_calc_crc(&super_block_v1); 2592 memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1)); 2593 2594 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2595 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2596 poll_threads(); 2597 CU_ASSERT(g_bserrno == 0); 2598 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2599 bs = g_bs; 2600 2601 spdk_bs_unload(bs, bs_op_complete, NULL); 2602 poll_threads(); 2603 CU_ASSERT(g_bserrno == 0); 2604 g_bs = NULL; 2605 } 2606 2607 /* 2608 * Create a blobstore and then unload it. 2609 */ 2610 static void 2611 bs_unload(void) 2612 { 2613 struct spdk_blob_store *bs = g_bs; 2614 struct spdk_blob *blob; 2615 2616 /* Create a blob and open it. */ 2617 blob = ut_blob_create_and_open(bs, NULL); 2618 2619 /* Try to unload blobstore, should fail with open blob */ 2620 g_bserrno = -1; 2621 spdk_bs_unload(bs, bs_op_complete, NULL); 2622 poll_threads(); 2623 CU_ASSERT(g_bserrno == -EBUSY); 2624 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2625 2626 /* Close the blob, then successfully unload blobstore */ 2627 g_bserrno = -1; 2628 spdk_blob_close(blob, blob_op_complete, NULL); 2629 poll_threads(); 2630 CU_ASSERT(g_bserrno == 0); 2631 } 2632 2633 /* 2634 * Create a blobstore with a cluster size different than the default, and ensure it is 2635 * persisted. 2636 */ 2637 static void 2638 bs_cluster_sz(void) 2639 { 2640 struct spdk_blob_store *bs; 2641 struct spdk_bs_dev *dev; 2642 struct spdk_bs_opts opts; 2643 uint32_t cluster_sz; 2644 2645 /* Set cluster size to zero */ 2646 dev = init_dev(); 2647 spdk_bs_opts_init(&opts); 2648 opts.cluster_sz = 0; 2649 2650 /* Initialize a new blob store */ 2651 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2652 poll_threads(); 2653 CU_ASSERT(g_bserrno == -EINVAL); 2654 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2655 2656 /* 2657 * Set cluster size to blobstore page size, 2658 * to work it is required to be at least twice the blobstore page size. 2659 */ 2660 dev = init_dev(); 2661 spdk_bs_opts_init(&opts); 2662 opts.cluster_sz = SPDK_BS_PAGE_SIZE; 2663 2664 /* Initialize a new blob store */ 2665 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2666 poll_threads(); 2667 CU_ASSERT(g_bserrno == -ENOMEM); 2668 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2669 2670 /* 2671 * Set cluster size to lower than page size, 2672 * to work it is required to be at least twice the blobstore page size. 2673 */ 2674 dev = init_dev(); 2675 spdk_bs_opts_init(&opts); 2676 opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1; 2677 2678 /* Initialize a new blob store */ 2679 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2680 poll_threads(); 2681 CU_ASSERT(g_bserrno == -EINVAL); 2682 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2683 2684 /* Set cluster size to twice the default */ 2685 dev = init_dev(); 2686 spdk_bs_opts_init(&opts); 2687 opts.cluster_sz *= 2; 2688 cluster_sz = opts.cluster_sz; 2689 2690 /* Initialize a new blob store */ 2691 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2692 poll_threads(); 2693 CU_ASSERT(g_bserrno == 0); 2694 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2695 bs = g_bs; 2696 2697 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2698 2699 ut_bs_reload(&bs, &opts); 2700 2701 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2702 2703 spdk_bs_unload(bs, bs_op_complete, NULL); 2704 poll_threads(); 2705 CU_ASSERT(g_bserrno == 0); 2706 g_bs = NULL; 2707 } 2708 2709 /* 2710 * Create a blobstore, reload it and ensure total usable cluster count 2711 * stays the same. 2712 */ 2713 static void 2714 bs_usable_clusters(void) 2715 { 2716 struct spdk_blob_store *bs = g_bs; 2717 struct spdk_blob *blob; 2718 uint32_t clusters; 2719 int i; 2720 2721 2722 clusters = spdk_bs_total_data_cluster_count(bs); 2723 2724 ut_bs_reload(&bs, NULL); 2725 2726 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2727 2728 /* Create and resize blobs to make sure that useable cluster count won't change */ 2729 for (i = 0; i < 4; i++) { 2730 g_bserrno = -1; 2731 g_blobid = SPDK_BLOBID_INVALID; 2732 blob = ut_blob_create_and_open(bs, NULL); 2733 2734 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 2735 poll_threads(); 2736 CU_ASSERT(g_bserrno == 0); 2737 2738 g_bserrno = -1; 2739 spdk_blob_close(blob, blob_op_complete, NULL); 2740 poll_threads(); 2741 CU_ASSERT(g_bserrno == 0); 2742 2743 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2744 } 2745 2746 /* Reload the blob store to make sure that nothing changed */ 2747 ut_bs_reload(&bs, NULL); 2748 2749 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2750 } 2751 2752 /* 2753 * Test resizing of the metadata blob. This requires creating enough blobs 2754 * so that one cluster is not enough to fit the metadata for those blobs. 2755 * To induce this condition to happen more quickly, we reduce the cluster 2756 * size to 16KB, which means only 4 4KB blob metadata pages can fit. 2757 */ 2758 static void 2759 bs_resize_md(void) 2760 { 2761 struct spdk_blob_store *bs; 2762 const int CLUSTER_PAGE_COUNT = 4; 2763 const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4; 2764 struct spdk_bs_dev *dev; 2765 struct spdk_bs_opts opts; 2766 struct spdk_blob *blob; 2767 struct spdk_blob_opts blob_opts; 2768 uint32_t cluster_sz; 2769 spdk_blob_id blobids[NUM_BLOBS]; 2770 int i; 2771 2772 2773 dev = init_dev(); 2774 spdk_bs_opts_init(&opts); 2775 opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096; 2776 cluster_sz = opts.cluster_sz; 2777 2778 /* Initialize a new blob store */ 2779 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2780 poll_threads(); 2781 CU_ASSERT(g_bserrno == 0); 2782 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2783 bs = g_bs; 2784 2785 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2786 2787 ut_spdk_blob_opts_init(&blob_opts); 2788 2789 for (i = 0; i < NUM_BLOBS; i++) { 2790 g_bserrno = -1; 2791 g_blobid = SPDK_BLOBID_INVALID; 2792 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 2793 poll_threads(); 2794 CU_ASSERT(g_bserrno == 0); 2795 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2796 blobids[i] = g_blobid; 2797 } 2798 2799 ut_bs_reload(&bs, &opts); 2800 2801 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2802 2803 for (i = 0; i < NUM_BLOBS; i++) { 2804 g_bserrno = -1; 2805 g_blob = NULL; 2806 spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL); 2807 poll_threads(); 2808 CU_ASSERT(g_bserrno == 0); 2809 CU_ASSERT(g_blob != NULL); 2810 blob = g_blob; 2811 g_bserrno = -1; 2812 spdk_blob_close(blob, blob_op_complete, NULL); 2813 poll_threads(); 2814 CU_ASSERT(g_bserrno == 0); 2815 } 2816 2817 spdk_bs_unload(bs, bs_op_complete, NULL); 2818 poll_threads(); 2819 CU_ASSERT(g_bserrno == 0); 2820 g_bs = NULL; 2821 } 2822 2823 static void 2824 bs_destroy(void) 2825 { 2826 struct spdk_blob_store *bs; 2827 struct spdk_bs_dev *dev; 2828 2829 /* Initialize a new blob store */ 2830 dev = init_dev(); 2831 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2832 poll_threads(); 2833 CU_ASSERT(g_bserrno == 0); 2834 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2835 bs = g_bs; 2836 2837 /* Destroy the blob store */ 2838 g_bserrno = -1; 2839 spdk_bs_destroy(bs, bs_op_complete, NULL); 2840 poll_threads(); 2841 CU_ASSERT(g_bserrno == 0); 2842 2843 /* Loading an non-existent blob store should fail. */ 2844 g_bs = NULL; 2845 dev = init_dev(); 2846 2847 g_bserrno = 0; 2848 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 2849 poll_threads(); 2850 CU_ASSERT(g_bserrno != 0); 2851 } 2852 2853 /* Try to hit all of the corner cases associated with serializing 2854 * a blob to disk 2855 */ 2856 static void 2857 blob_serialize(void) 2858 { 2859 struct spdk_bs_dev *dev; 2860 struct spdk_bs_opts opts; 2861 struct spdk_blob_store *bs; 2862 spdk_blob_id blobid[2]; 2863 struct spdk_blob *blob[2]; 2864 uint64_t i; 2865 char *value; 2866 int rc; 2867 2868 dev = init_dev(); 2869 2870 /* Initialize a new blobstore with very small clusters */ 2871 spdk_bs_opts_init(&opts); 2872 opts.cluster_sz = dev->blocklen * 8; 2873 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2874 poll_threads(); 2875 CU_ASSERT(g_bserrno == 0); 2876 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2877 bs = g_bs; 2878 2879 /* Create and open two blobs */ 2880 for (i = 0; i < 2; i++) { 2881 blob[i] = ut_blob_create_and_open(bs, NULL); 2882 blobid[i] = spdk_blob_get_id(blob[i]); 2883 2884 /* Set a fairly large xattr on both blobs to eat up 2885 * metadata space 2886 */ 2887 value = calloc(dev->blocklen - 64, sizeof(char)); 2888 SPDK_CU_ASSERT_FATAL(value != NULL); 2889 memset(value, i, dev->blocklen / 2); 2890 rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64); 2891 CU_ASSERT(rc == 0); 2892 free(value); 2893 } 2894 2895 /* Resize the blobs, alternating 1 cluster at a time. 2896 * This thwarts run length encoding and will cause spill 2897 * over of the extents. 2898 */ 2899 for (i = 0; i < 6; i++) { 2900 spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL); 2901 poll_threads(); 2902 CU_ASSERT(g_bserrno == 0); 2903 } 2904 2905 for (i = 0; i < 2; i++) { 2906 spdk_blob_sync_md(blob[i], blob_op_complete, NULL); 2907 poll_threads(); 2908 CU_ASSERT(g_bserrno == 0); 2909 } 2910 2911 /* Close the blobs */ 2912 for (i = 0; i < 2; i++) { 2913 spdk_blob_close(blob[i], blob_op_complete, NULL); 2914 poll_threads(); 2915 CU_ASSERT(g_bserrno == 0); 2916 } 2917 2918 ut_bs_reload(&bs, &opts); 2919 2920 for (i = 0; i < 2; i++) { 2921 blob[i] = NULL; 2922 2923 spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL); 2924 poll_threads(); 2925 CU_ASSERT(g_bserrno == 0); 2926 CU_ASSERT(g_blob != NULL); 2927 blob[i] = g_blob; 2928 2929 CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3); 2930 2931 spdk_blob_close(blob[i], blob_op_complete, NULL); 2932 poll_threads(); 2933 CU_ASSERT(g_bserrno == 0); 2934 } 2935 2936 spdk_bs_unload(bs, bs_op_complete, NULL); 2937 poll_threads(); 2938 CU_ASSERT(g_bserrno == 0); 2939 g_bs = NULL; 2940 } 2941 2942 static void 2943 blob_crc(void) 2944 { 2945 struct spdk_blob_store *bs = g_bs; 2946 struct spdk_blob *blob; 2947 spdk_blob_id blobid; 2948 uint32_t page_num; 2949 int index; 2950 struct spdk_blob_md_page *page; 2951 2952 blob = ut_blob_create_and_open(bs, NULL); 2953 blobid = spdk_blob_get_id(blob); 2954 2955 spdk_blob_close(blob, blob_op_complete, NULL); 2956 poll_threads(); 2957 CU_ASSERT(g_bserrno == 0); 2958 2959 page_num = _spdk_bs_blobid_to_page(blobid); 2960 index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num); 2961 page = (struct spdk_blob_md_page *)&g_dev_buffer[index]; 2962 page->crc = 0; 2963 2964 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2965 poll_threads(); 2966 CU_ASSERT(g_bserrno == -EINVAL); 2967 CU_ASSERT(g_blob == NULL); 2968 g_bserrno = 0; 2969 2970 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 2971 poll_threads(); 2972 CU_ASSERT(g_bserrno == -EINVAL); 2973 } 2974 2975 static void 2976 super_block_crc(void) 2977 { 2978 struct spdk_blob_store *bs; 2979 struct spdk_bs_dev *dev; 2980 struct spdk_bs_super_block *super_block; 2981 2982 dev = init_dev(); 2983 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2984 poll_threads(); 2985 CU_ASSERT(g_bserrno == 0); 2986 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2987 bs = g_bs; 2988 2989 spdk_bs_unload(bs, bs_op_complete, NULL); 2990 poll_threads(); 2991 CU_ASSERT(g_bserrno == 0); 2992 g_bs = NULL; 2993 2994 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2995 super_block->crc = 0; 2996 dev = init_dev(); 2997 2998 /* Load an existing blob store */ 2999 g_bserrno = 0; 3000 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3001 poll_threads(); 3002 CU_ASSERT(g_bserrno == -EILSEQ); 3003 } 3004 3005 /* For blob dirty shutdown test case we do the following sub-test cases: 3006 * 1 Initialize new blob store and create 1 super blob with some xattrs, then we 3007 * dirty shutdown and reload the blob store and verify the xattrs. 3008 * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown, 3009 * reload the blob store and verify the clusters number. 3010 * 3 Create the second blob and then dirty shutdown, reload the blob store 3011 * and verify the second blob. 3012 * 4 Delete the second blob and then dirty shutdown, reload the blob store 3013 * and verify the second blob is invalid. 3014 * 5 Create the second blob again and also create the third blob, modify the 3015 * md of second blob which makes the md invalid, and then dirty shutdown, 3016 * reload the blob store verify the second blob, it should invalid and also 3017 * verify the third blob, it should correct. 3018 */ 3019 static void 3020 blob_dirty_shutdown(void) 3021 { 3022 int rc; 3023 int index; 3024 struct spdk_blob_store *bs = g_bs; 3025 spdk_blob_id blobid1, blobid2, blobid3; 3026 struct spdk_blob *blob; 3027 uint64_t length; 3028 uint64_t free_clusters; 3029 const void *value; 3030 size_t value_len; 3031 uint32_t page_num; 3032 struct spdk_blob_md_page *page; 3033 struct spdk_blob_opts blob_opts; 3034 3035 /* Create first blob */ 3036 blob = ut_blob_create_and_open(bs, NULL); 3037 blobid1 = spdk_blob_get_id(blob); 3038 3039 /* Set some xattrs */ 3040 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 3041 CU_ASSERT(rc == 0); 3042 3043 length = 2345; 3044 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3045 CU_ASSERT(rc == 0); 3046 3047 /* Put xattr that fits exactly single page. 3048 * This results in adding additional pages to MD. 3049 * First is flags and smaller xattr, second the large xattr, 3050 * third are just the extents. 3051 */ 3052 size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) - 3053 strlen("large_xattr"); 3054 char *xattr = calloc(xattr_length, sizeof(char)); 3055 SPDK_CU_ASSERT_FATAL(xattr != NULL); 3056 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 3057 free(xattr); 3058 SPDK_CU_ASSERT_FATAL(rc == 0); 3059 3060 /* Resize the blob */ 3061 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 3062 poll_threads(); 3063 CU_ASSERT(g_bserrno == 0); 3064 3065 /* Set the blob as the super blob */ 3066 spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL); 3067 poll_threads(); 3068 CU_ASSERT(g_bserrno == 0); 3069 3070 free_clusters = spdk_bs_free_cluster_count(bs); 3071 3072 spdk_blob_close(blob, blob_op_complete, NULL); 3073 poll_threads(); 3074 CU_ASSERT(g_bserrno == 0); 3075 blob = NULL; 3076 g_blob = NULL; 3077 g_blobid = SPDK_BLOBID_INVALID; 3078 3079 ut_bs_dirty_load(&bs, NULL); 3080 3081 /* Get the super blob */ 3082 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 3083 poll_threads(); 3084 CU_ASSERT(g_bserrno == 0); 3085 CU_ASSERT(blobid1 == g_blobid); 3086 3087 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3088 poll_threads(); 3089 CU_ASSERT(g_bserrno == 0); 3090 CU_ASSERT(g_blob != NULL); 3091 blob = g_blob; 3092 3093 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3094 3095 /* Get the xattrs */ 3096 value = NULL; 3097 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 3098 CU_ASSERT(rc == 0); 3099 SPDK_CU_ASSERT_FATAL(value != NULL); 3100 CU_ASSERT(*(uint64_t *)value == length); 3101 CU_ASSERT(value_len == 8); 3102 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 3103 3104 /* Resize the blob */ 3105 spdk_blob_resize(blob, 20, blob_op_complete, NULL); 3106 poll_threads(); 3107 CU_ASSERT(g_bserrno == 0); 3108 3109 free_clusters = spdk_bs_free_cluster_count(bs); 3110 3111 spdk_blob_close(blob, blob_op_complete, NULL); 3112 poll_threads(); 3113 CU_ASSERT(g_bserrno == 0); 3114 blob = NULL; 3115 g_blob = NULL; 3116 g_blobid = SPDK_BLOBID_INVALID; 3117 3118 ut_bs_dirty_load(&bs, NULL); 3119 3120 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3121 poll_threads(); 3122 CU_ASSERT(g_bserrno == 0); 3123 CU_ASSERT(g_blob != NULL); 3124 blob = g_blob; 3125 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20); 3126 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3127 3128 spdk_blob_close(blob, blob_op_complete, NULL); 3129 poll_threads(); 3130 CU_ASSERT(g_bserrno == 0); 3131 blob = NULL; 3132 g_blob = NULL; 3133 g_blobid = SPDK_BLOBID_INVALID; 3134 3135 /* Create second blob */ 3136 blob = ut_blob_create_and_open(bs, NULL); 3137 blobid2 = spdk_blob_get_id(blob); 3138 3139 /* Set some xattrs */ 3140 rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1); 3141 CU_ASSERT(rc == 0); 3142 3143 length = 5432; 3144 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3145 CU_ASSERT(rc == 0); 3146 3147 /* Resize the blob */ 3148 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 3149 poll_threads(); 3150 CU_ASSERT(g_bserrno == 0); 3151 3152 free_clusters = spdk_bs_free_cluster_count(bs); 3153 3154 spdk_blob_close(blob, blob_op_complete, NULL); 3155 poll_threads(); 3156 CU_ASSERT(g_bserrno == 0); 3157 blob = NULL; 3158 g_blob = NULL; 3159 g_blobid = SPDK_BLOBID_INVALID; 3160 3161 ut_bs_dirty_load(&bs, NULL); 3162 3163 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3164 poll_threads(); 3165 CU_ASSERT(g_bserrno == 0); 3166 CU_ASSERT(g_blob != NULL); 3167 blob = g_blob; 3168 3169 /* Get the xattrs */ 3170 value = NULL; 3171 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 3172 CU_ASSERT(rc == 0); 3173 SPDK_CU_ASSERT_FATAL(value != NULL); 3174 CU_ASSERT(*(uint64_t *)value == length); 3175 CU_ASSERT(value_len == 8); 3176 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 3177 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3178 3179 spdk_blob_close(blob, blob_op_complete, NULL); 3180 poll_threads(); 3181 CU_ASSERT(g_bserrno == 0); 3182 spdk_bs_delete_blob(bs, blobid2, blob_op_complete, NULL); 3183 poll_threads(); 3184 CU_ASSERT(g_bserrno == 0); 3185 3186 free_clusters = spdk_bs_free_cluster_count(bs); 3187 3188 ut_bs_dirty_load(&bs, NULL); 3189 3190 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3191 poll_threads(); 3192 CU_ASSERT(g_bserrno != 0); 3193 CU_ASSERT(g_blob == NULL); 3194 3195 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3196 poll_threads(); 3197 CU_ASSERT(g_bserrno == 0); 3198 CU_ASSERT(g_blob != NULL); 3199 blob = g_blob; 3200 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3201 spdk_blob_close(blob, blob_op_complete, NULL); 3202 poll_threads(); 3203 CU_ASSERT(g_bserrno == 0); 3204 3205 ut_bs_reload(&bs, NULL); 3206 3207 /* Create second blob */ 3208 ut_spdk_blob_opts_init(&blob_opts); 3209 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3210 poll_threads(); 3211 CU_ASSERT(g_bserrno == 0); 3212 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3213 blobid2 = g_blobid; 3214 3215 /* Create third blob */ 3216 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3217 poll_threads(); 3218 CU_ASSERT(g_bserrno == 0); 3219 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3220 blobid3 = g_blobid; 3221 3222 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3223 poll_threads(); 3224 CU_ASSERT(g_bserrno == 0); 3225 CU_ASSERT(g_blob != NULL); 3226 blob = g_blob; 3227 3228 /* Set some xattrs for second blob */ 3229 rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1); 3230 CU_ASSERT(rc == 0); 3231 3232 length = 5432; 3233 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3234 CU_ASSERT(rc == 0); 3235 3236 spdk_blob_close(blob, blob_op_complete, NULL); 3237 poll_threads(); 3238 CU_ASSERT(g_bserrno == 0); 3239 blob = NULL; 3240 g_blob = NULL; 3241 g_blobid = SPDK_BLOBID_INVALID; 3242 3243 spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL); 3244 poll_threads(); 3245 CU_ASSERT(g_bserrno == 0); 3246 CU_ASSERT(g_blob != NULL); 3247 blob = g_blob; 3248 3249 /* Set some xattrs for third blob */ 3250 rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1); 3251 CU_ASSERT(rc == 0); 3252 3253 length = 5432; 3254 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3255 CU_ASSERT(rc == 0); 3256 3257 spdk_blob_close(blob, blob_op_complete, NULL); 3258 poll_threads(); 3259 CU_ASSERT(g_bserrno == 0); 3260 blob = NULL; 3261 g_blob = NULL; 3262 g_blobid = SPDK_BLOBID_INVALID; 3263 3264 /* Mark second blob as invalid */ 3265 page_num = _spdk_bs_blobid_to_page(blobid2); 3266 3267 index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num); 3268 page = (struct spdk_blob_md_page *)&g_dev_buffer[index]; 3269 page->sequence_num = 1; 3270 page->crc = _spdk_blob_md_page_calc_crc(page); 3271 3272 free_clusters = spdk_bs_free_cluster_count(bs); 3273 3274 ut_bs_dirty_load(&bs, NULL); 3275 3276 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3277 poll_threads(); 3278 CU_ASSERT(g_bserrno != 0); 3279 CU_ASSERT(g_blob == NULL); 3280 3281 spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL); 3282 poll_threads(); 3283 CU_ASSERT(g_bserrno == 0); 3284 CU_ASSERT(g_blob != NULL); 3285 blob = g_blob; 3286 3287 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3288 3289 spdk_blob_close(blob, blob_op_complete, NULL); 3290 poll_threads(); 3291 CU_ASSERT(g_bserrno == 0); 3292 blob = NULL; 3293 g_blob = NULL; 3294 g_blobid = SPDK_BLOBID_INVALID; 3295 } 3296 3297 static void 3298 blob_flags(void) 3299 { 3300 struct spdk_blob_store *bs = g_bs; 3301 spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro; 3302 struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro; 3303 struct spdk_blob_opts blob_opts; 3304 int rc; 3305 3306 /* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */ 3307 blob_invalid = ut_blob_create_and_open(bs, NULL); 3308 blobid_invalid = spdk_blob_get_id(blob_invalid); 3309 3310 blob_data_ro = ut_blob_create_and_open(bs, NULL); 3311 blobid_data_ro = spdk_blob_get_id(blob_data_ro); 3312 3313 ut_spdk_blob_opts_init(&blob_opts); 3314 blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES; 3315 blob_md_ro = ut_blob_create_and_open(bs, &blob_opts); 3316 blobid_md_ro = spdk_blob_get_id(blob_md_ro); 3317 CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES); 3318 3319 /* Change the size of blob_data_ro to check if flags are serialized 3320 * when blob has non zero number of extents */ 3321 spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL); 3322 poll_threads(); 3323 CU_ASSERT(g_bserrno == 0); 3324 3325 /* Set the xattr to check if flags are serialized 3326 * when blob has non zero number of xattrs */ 3327 rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1); 3328 CU_ASSERT(rc == 0); 3329 3330 blob_invalid->invalid_flags = (1ULL << 63); 3331 blob_invalid->state = SPDK_BLOB_STATE_DIRTY; 3332 blob_data_ro->data_ro_flags = (1ULL << 62); 3333 blob_data_ro->state = SPDK_BLOB_STATE_DIRTY; 3334 blob_md_ro->md_ro_flags = (1ULL << 61); 3335 blob_md_ro->state = SPDK_BLOB_STATE_DIRTY; 3336 3337 g_bserrno = -1; 3338 spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL); 3339 poll_threads(); 3340 CU_ASSERT(g_bserrno == 0); 3341 g_bserrno = -1; 3342 spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL); 3343 poll_threads(); 3344 CU_ASSERT(g_bserrno == 0); 3345 g_bserrno = -1; 3346 spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL); 3347 poll_threads(); 3348 CU_ASSERT(g_bserrno == 0); 3349 3350 g_bserrno = -1; 3351 spdk_blob_close(blob_invalid, blob_op_complete, NULL); 3352 poll_threads(); 3353 CU_ASSERT(g_bserrno == 0); 3354 blob_invalid = NULL; 3355 g_bserrno = -1; 3356 spdk_blob_close(blob_data_ro, blob_op_complete, NULL); 3357 poll_threads(); 3358 CU_ASSERT(g_bserrno == 0); 3359 blob_data_ro = NULL; 3360 g_bserrno = -1; 3361 spdk_blob_close(blob_md_ro, blob_op_complete, NULL); 3362 poll_threads(); 3363 CU_ASSERT(g_bserrno == 0); 3364 blob_md_ro = NULL; 3365 3366 g_blob = NULL; 3367 g_blobid = SPDK_BLOBID_INVALID; 3368 3369 ut_bs_reload(&bs, NULL); 3370 3371 g_blob = NULL; 3372 g_bserrno = 0; 3373 spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL); 3374 poll_threads(); 3375 CU_ASSERT(g_bserrno != 0); 3376 CU_ASSERT(g_blob == NULL); 3377 3378 g_blob = NULL; 3379 g_bserrno = -1; 3380 spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL); 3381 poll_threads(); 3382 CU_ASSERT(g_bserrno == 0); 3383 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3384 blob_data_ro = g_blob; 3385 /* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */ 3386 CU_ASSERT(blob_data_ro->data_ro == true); 3387 CU_ASSERT(blob_data_ro->md_ro == true); 3388 CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10); 3389 3390 g_blob = NULL; 3391 g_bserrno = -1; 3392 spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL); 3393 poll_threads(); 3394 CU_ASSERT(g_bserrno == 0); 3395 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3396 blob_md_ro = g_blob; 3397 CU_ASSERT(blob_md_ro->data_ro == false); 3398 CU_ASSERT(blob_md_ro->md_ro == true); 3399 3400 g_bserrno = -1; 3401 spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL); 3402 poll_threads(); 3403 CU_ASSERT(g_bserrno == 0); 3404 3405 spdk_blob_close(blob_data_ro, blob_op_complete, NULL); 3406 poll_threads(); 3407 CU_ASSERT(g_bserrno == 0); 3408 spdk_blob_close(blob_md_ro, blob_op_complete, NULL); 3409 poll_threads(); 3410 CU_ASSERT(g_bserrno == 0); 3411 } 3412 3413 static void 3414 bs_version(void) 3415 { 3416 struct spdk_bs_super_block *super; 3417 struct spdk_blob_store *bs = g_bs; 3418 struct spdk_bs_dev *dev; 3419 struct spdk_blob *blob; 3420 struct spdk_blob_opts blob_opts; 3421 spdk_blob_id blobid; 3422 3423 /* Unload the blob store */ 3424 spdk_bs_unload(bs, bs_op_complete, NULL); 3425 poll_threads(); 3426 CU_ASSERT(g_bserrno == 0); 3427 g_bs = NULL; 3428 3429 /* 3430 * Change the bs version on disk. This will allow us to 3431 * test that the version does not get modified automatically 3432 * when loading and unloading the blobstore. 3433 */ 3434 super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 3435 CU_ASSERT(super->version == SPDK_BS_VERSION); 3436 CU_ASSERT(super->clean == 1); 3437 super->version = 2; 3438 /* 3439 * Version 2 metadata does not have a used blobid mask, so clear 3440 * those fields in the super block and zero the corresponding 3441 * region on "disk". We will use this to ensure blob IDs are 3442 * correctly reconstructed. 3443 */ 3444 memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0, 3445 super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE); 3446 super->used_blobid_mask_start = 0; 3447 super->used_blobid_mask_len = 0; 3448 super->crc = _spdk_blob_md_page_calc_crc(super); 3449 3450 /* Load an existing blob store */ 3451 dev = init_dev(); 3452 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3453 poll_threads(); 3454 CU_ASSERT(g_bserrno == 0); 3455 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3456 CU_ASSERT(super->clean == 1); 3457 bs = g_bs; 3458 3459 /* 3460 * Create a blob - just to make sure that when we unload it 3461 * results in writing the super block (since metadata pages 3462 * were allocated. 3463 */ 3464 ut_spdk_blob_opts_init(&blob_opts); 3465 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3466 poll_threads(); 3467 CU_ASSERT(g_bserrno == 0); 3468 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3469 blobid = g_blobid; 3470 3471 /* Unload the blob store */ 3472 spdk_bs_unload(bs, bs_op_complete, NULL); 3473 poll_threads(); 3474 CU_ASSERT(g_bserrno == 0); 3475 g_bs = NULL; 3476 CU_ASSERT(super->version == 2); 3477 CU_ASSERT(super->used_blobid_mask_start == 0); 3478 CU_ASSERT(super->used_blobid_mask_len == 0); 3479 3480 dev = init_dev(); 3481 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3482 poll_threads(); 3483 CU_ASSERT(g_bserrno == 0); 3484 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3485 bs = g_bs; 3486 3487 g_blob = NULL; 3488 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3489 poll_threads(); 3490 CU_ASSERT(g_bserrno == 0); 3491 CU_ASSERT(g_blob != NULL); 3492 blob = g_blob; 3493 3494 spdk_blob_close(blob, blob_op_complete, NULL); 3495 poll_threads(); 3496 CU_ASSERT(g_bserrno == 0); 3497 3498 CU_ASSERT(super->version == 2); 3499 CU_ASSERT(super->used_blobid_mask_start == 0); 3500 CU_ASSERT(super->used_blobid_mask_len == 0); 3501 } 3502 3503 static void 3504 blob_set_xattrs(void) 3505 { 3506 struct spdk_blob_store *bs = g_bs; 3507 struct spdk_blob *blob; 3508 struct spdk_blob_opts opts; 3509 const void *value; 3510 size_t value_len; 3511 char *xattr; 3512 size_t xattr_length; 3513 int rc; 3514 3515 /* Create blob with extra attributes */ 3516 ut_spdk_blob_opts_init(&opts); 3517 3518 opts.xattrs.names = g_xattr_names; 3519 opts.xattrs.get_value = _get_xattr_value; 3520 opts.xattrs.count = 3; 3521 opts.xattrs.ctx = &g_ctx; 3522 3523 blob = ut_blob_create_and_open(bs, &opts); 3524 3525 /* Get the xattrs */ 3526 value = NULL; 3527 3528 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len); 3529 CU_ASSERT(rc == 0); 3530 SPDK_CU_ASSERT_FATAL(value != NULL); 3531 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 3532 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 3533 3534 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len); 3535 CU_ASSERT(rc == 0); 3536 SPDK_CU_ASSERT_FATAL(value != NULL); 3537 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 3538 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 3539 3540 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len); 3541 CU_ASSERT(rc == 0); 3542 SPDK_CU_ASSERT_FATAL(value != NULL); 3543 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 3544 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 3545 3546 /* Try to get non existing attribute */ 3547 3548 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 3549 CU_ASSERT(rc == -ENOENT); 3550 3551 /* Try xattr exceeding maximum length of descriptor in single page */ 3552 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 3553 strlen("large_xattr") + 1; 3554 xattr = calloc(xattr_length, sizeof(char)); 3555 SPDK_CU_ASSERT_FATAL(xattr != NULL); 3556 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 3557 free(xattr); 3558 SPDK_CU_ASSERT_FATAL(rc == -ENOMEM); 3559 3560 spdk_blob_close(blob, blob_op_complete, NULL); 3561 poll_threads(); 3562 CU_ASSERT(g_bserrno == 0); 3563 blob = NULL; 3564 g_blob = NULL; 3565 g_blobid = SPDK_BLOBID_INVALID; 3566 3567 /* NULL callback */ 3568 ut_spdk_blob_opts_init(&opts); 3569 opts.xattrs.names = g_xattr_names; 3570 opts.xattrs.get_value = NULL; 3571 opts.xattrs.count = 1; 3572 opts.xattrs.ctx = &g_ctx; 3573 3574 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 3575 poll_threads(); 3576 CU_ASSERT(g_bserrno == -EINVAL); 3577 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3578 3579 /* NULL values */ 3580 ut_spdk_blob_opts_init(&opts); 3581 opts.xattrs.names = g_xattr_names; 3582 opts.xattrs.get_value = _get_xattr_value_null; 3583 opts.xattrs.count = 1; 3584 opts.xattrs.ctx = NULL; 3585 3586 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 3587 poll_threads(); 3588 CU_ASSERT(g_bserrno == -EINVAL); 3589 } 3590 3591 static void 3592 blob_thin_prov_alloc(void) 3593 { 3594 struct spdk_blob_store *bs = g_bs; 3595 struct spdk_blob *blob; 3596 struct spdk_blob_opts opts; 3597 spdk_blob_id blobid; 3598 uint64_t free_clusters; 3599 3600 free_clusters = spdk_bs_free_cluster_count(bs); 3601 3602 /* Set blob as thin provisioned */ 3603 ut_spdk_blob_opts_init(&opts); 3604 opts.thin_provision = true; 3605 3606 blob = ut_blob_create_and_open(bs, &opts); 3607 blobid = spdk_blob_get_id(blob); 3608 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3609 3610 CU_ASSERT(blob->active.num_clusters == 0); 3611 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 3612 3613 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3614 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3615 poll_threads(); 3616 CU_ASSERT(g_bserrno == 0); 3617 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3618 CU_ASSERT(blob->active.num_clusters == 5); 3619 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 3620 3621 /* Grow it to 1TB - still unallocated */ 3622 spdk_blob_resize(blob, 262144, blob_op_complete, NULL); 3623 poll_threads(); 3624 CU_ASSERT(g_bserrno == 0); 3625 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3626 CU_ASSERT(blob->active.num_clusters == 262144); 3627 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144); 3628 3629 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3630 poll_threads(); 3631 CU_ASSERT(g_bserrno == 0); 3632 /* Sync must not change anything */ 3633 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3634 CU_ASSERT(blob->active.num_clusters == 262144); 3635 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144); 3636 /* Since clusters are not allocated, 3637 * number of metadata pages is expected to be minimal. 3638 */ 3639 CU_ASSERT(blob->active.num_pages == 1); 3640 3641 /* Shrink the blob to 3 clusters - still unallocated */ 3642 spdk_blob_resize(blob, 3, blob_op_complete, NULL); 3643 poll_threads(); 3644 CU_ASSERT(g_bserrno == 0); 3645 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3646 CU_ASSERT(blob->active.num_clusters == 3); 3647 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3); 3648 3649 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3650 poll_threads(); 3651 CU_ASSERT(g_bserrno == 0); 3652 /* Sync must not change anything */ 3653 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3654 CU_ASSERT(blob->active.num_clusters == 3); 3655 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3); 3656 3657 spdk_blob_close(blob, blob_op_complete, NULL); 3658 poll_threads(); 3659 CU_ASSERT(g_bserrno == 0); 3660 3661 ut_bs_reload(&bs, NULL); 3662 3663 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3664 poll_threads(); 3665 CU_ASSERT(g_bserrno == 0); 3666 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3667 blob = g_blob; 3668 3669 /* Check that clusters allocation and size is still the same */ 3670 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3671 CU_ASSERT(blob->active.num_clusters == 3); 3672 3673 spdk_blob_close(blob, blob_op_complete, NULL); 3674 poll_threads(); 3675 CU_ASSERT(g_bserrno == 0); 3676 3677 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 3678 poll_threads(); 3679 CU_ASSERT(g_bserrno == 0); 3680 } 3681 3682 static void 3683 blob_insert_cluster_msg(void) 3684 { 3685 struct spdk_blob_store *bs = g_bs; 3686 struct spdk_blob *blob; 3687 struct spdk_blob_opts opts; 3688 spdk_blob_id blobid; 3689 uint64_t free_clusters; 3690 uint64_t new_cluster = 0; 3691 uint32_t cluster_num = 3; 3692 uint32_t extent_page = 0; 3693 3694 free_clusters = spdk_bs_free_cluster_count(bs); 3695 3696 /* Set blob as thin provisioned */ 3697 ut_spdk_blob_opts_init(&opts); 3698 opts.thin_provision = true; 3699 opts.num_clusters = 4; 3700 3701 blob = ut_blob_create_and_open(bs, &opts); 3702 blobid = spdk_blob_get_id(blob); 3703 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3704 3705 CU_ASSERT(blob->active.num_clusters == 4); 3706 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4); 3707 CU_ASSERT(blob->active.clusters[cluster_num] == 0); 3708 3709 /* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread. 3710 * This is to simulate behaviour when cluster is allocated after blob creation. 3711 * Such as _spdk_bs_allocate_and_copy_cluster(). */ 3712 _spdk_bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false); 3713 CU_ASSERT(blob->active.clusters[cluster_num] == 0); 3714 3715 _spdk_blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, 3716 blob_op_complete, NULL); 3717 poll_threads(); 3718 3719 CU_ASSERT(blob->active.clusters[cluster_num] != 0); 3720 3721 spdk_blob_close(blob, blob_op_complete, NULL); 3722 poll_threads(); 3723 CU_ASSERT(g_bserrno == 0); 3724 3725 ut_bs_reload(&bs, NULL); 3726 3727 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3728 poll_threads(); 3729 CU_ASSERT(g_bserrno == 0); 3730 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3731 blob = g_blob; 3732 3733 CU_ASSERT(blob->active.clusters[cluster_num] != 0); 3734 3735 spdk_blob_close(blob, blob_op_complete, NULL); 3736 poll_threads(); 3737 CU_ASSERT(g_bserrno == 0); 3738 3739 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 3740 poll_threads(); 3741 CU_ASSERT(g_bserrno == 0); 3742 } 3743 3744 static void 3745 blob_thin_prov_rw(void) 3746 { 3747 static const uint8_t zero[10 * 4096] = { 0 }; 3748 struct spdk_blob_store *bs = g_bs; 3749 struct spdk_blob *blob; 3750 struct spdk_io_channel *channel, *channel_thread1; 3751 struct spdk_blob_opts opts; 3752 spdk_blob_id blobid; 3753 uint64_t free_clusters; 3754 uint64_t page_size; 3755 uint8_t payload_read[10 * 4096]; 3756 uint8_t payload_write[10 * 4096]; 3757 uint64_t write_bytes; 3758 uint64_t read_bytes; 3759 3760 free_clusters = spdk_bs_free_cluster_count(bs); 3761 page_size = spdk_bs_get_page_size(bs); 3762 3763 channel = spdk_bs_alloc_io_channel(bs); 3764 CU_ASSERT(channel != NULL); 3765 3766 ut_spdk_blob_opts_init(&opts); 3767 opts.thin_provision = true; 3768 3769 blob = ut_blob_create_and_open(bs, &opts); 3770 blobid = spdk_blob_get_id(blob); 3771 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3772 3773 CU_ASSERT(blob->active.num_clusters == 0); 3774 3775 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3776 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3777 poll_threads(); 3778 CU_ASSERT(g_bserrno == 0); 3779 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3780 CU_ASSERT(blob->active.num_clusters == 5); 3781 3782 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3783 poll_threads(); 3784 CU_ASSERT(g_bserrno == 0); 3785 /* Sync must not change anything */ 3786 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3787 CU_ASSERT(blob->active.num_clusters == 5); 3788 3789 /* Payload should be all zeros from unallocated clusters */ 3790 memset(payload_read, 0xFF, sizeof(payload_read)); 3791 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 3792 poll_threads(); 3793 CU_ASSERT(g_bserrno == 0); 3794 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 3795 3796 write_bytes = g_dev_write_bytes; 3797 read_bytes = g_dev_read_bytes; 3798 3799 /* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */ 3800 set_thread(1); 3801 channel_thread1 = spdk_bs_alloc_io_channel(bs); 3802 CU_ASSERT(channel_thread1 != NULL); 3803 memset(payload_write, 0xE5, sizeof(payload_write)); 3804 spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL); 3805 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3806 /* Perform write on thread 0. That will try to allocate cluster, 3807 * but fail due to another thread issuing the cluster allocation first. */ 3808 set_thread(0); 3809 memset(payload_write, 0xE5, sizeof(payload_write)); 3810 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 3811 CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs)); 3812 poll_threads(); 3813 CU_ASSERT(g_bserrno == 0); 3814 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3815 /* For thin-provisioned blob we need to write 20 pages plus one page metadata and 3816 * read 0 bytes */ 3817 if (g_use_extent_table) { 3818 /* Add one more page for EXTENT_PAGE write */ 3819 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22); 3820 } else { 3821 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21); 3822 } 3823 CU_ASSERT(g_dev_read_bytes - read_bytes == 0); 3824 3825 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 3826 poll_threads(); 3827 CU_ASSERT(g_bserrno == 0); 3828 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3829 3830 spdk_blob_close(blob, blob_op_complete, NULL); 3831 poll_threads(); 3832 CU_ASSERT(g_bserrno == 0); 3833 3834 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 3835 poll_threads(); 3836 CU_ASSERT(g_bserrno == 0); 3837 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3838 3839 set_thread(1); 3840 spdk_bs_free_io_channel(channel_thread1); 3841 set_thread(0); 3842 spdk_bs_free_io_channel(channel); 3843 poll_threads(); 3844 g_blob = NULL; 3845 g_blobid = 0; 3846 } 3847 3848 static void 3849 blob_thin_prov_rle(void) 3850 { 3851 static const uint8_t zero[10 * 4096] = { 0 }; 3852 struct spdk_blob_store *bs = g_bs; 3853 struct spdk_blob *blob; 3854 struct spdk_io_channel *channel; 3855 struct spdk_blob_opts opts; 3856 spdk_blob_id blobid; 3857 uint64_t free_clusters; 3858 uint64_t page_size; 3859 uint8_t payload_read[10 * 4096]; 3860 uint8_t payload_write[10 * 4096]; 3861 uint64_t write_bytes; 3862 uint64_t read_bytes; 3863 uint64_t io_unit; 3864 3865 free_clusters = spdk_bs_free_cluster_count(bs); 3866 page_size = spdk_bs_get_page_size(bs); 3867 3868 ut_spdk_blob_opts_init(&opts); 3869 opts.thin_provision = true; 3870 opts.num_clusters = 5; 3871 3872 blob = ut_blob_create_and_open(bs, &opts); 3873 blobid = spdk_blob_get_id(blob); 3874 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3875 3876 channel = spdk_bs_alloc_io_channel(bs); 3877 CU_ASSERT(channel != NULL); 3878 3879 /* Target specifically second cluster in a blob as first allocation */ 3880 io_unit = _spdk_bs_cluster_to_page(bs, 1) * _spdk_bs_io_unit_per_page(bs); 3881 3882 /* Payload should be all zeros from unallocated clusters */ 3883 memset(payload_read, 0xFF, sizeof(payload_read)); 3884 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3885 poll_threads(); 3886 CU_ASSERT(g_bserrno == 0); 3887 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 3888 3889 write_bytes = g_dev_write_bytes; 3890 read_bytes = g_dev_read_bytes; 3891 3892 /* Issue write to second cluster in a blob */ 3893 memset(payload_write, 0xE5, sizeof(payload_write)); 3894 spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL); 3895 poll_threads(); 3896 CU_ASSERT(g_bserrno == 0); 3897 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3898 /* For thin-provisioned blob we need to write 10 pages plus one page metadata and 3899 * read 0 bytes */ 3900 if (g_use_extent_table) { 3901 /* Add one more page for EXTENT_PAGE write */ 3902 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12); 3903 } else { 3904 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11); 3905 } 3906 CU_ASSERT(g_dev_read_bytes - read_bytes == 0); 3907 3908 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3909 poll_threads(); 3910 CU_ASSERT(g_bserrno == 0); 3911 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3912 3913 spdk_bs_free_io_channel(channel); 3914 poll_threads(); 3915 3916 spdk_blob_close(blob, blob_op_complete, NULL); 3917 poll_threads(); 3918 CU_ASSERT(g_bserrno == 0); 3919 3920 ut_bs_reload(&bs, NULL); 3921 3922 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3923 poll_threads(); 3924 CU_ASSERT(g_bserrno == 0); 3925 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3926 blob = g_blob; 3927 3928 channel = spdk_bs_alloc_io_channel(bs); 3929 CU_ASSERT(channel != NULL); 3930 3931 /* Read second cluster after blob reload to confirm data written */ 3932 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 3933 poll_threads(); 3934 CU_ASSERT(g_bserrno == 0); 3935 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3936 3937 spdk_bs_free_io_channel(channel); 3938 poll_threads(); 3939 3940 spdk_blob_close(blob, blob_op_complete, NULL); 3941 poll_threads(); 3942 CU_ASSERT(g_bserrno == 0); 3943 3944 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 3945 poll_threads(); 3946 CU_ASSERT(g_bserrno == 0); 3947 } 3948 3949 static void 3950 blob_thin_prov_rw_iov(void) 3951 { 3952 static const uint8_t zero[10 * 4096] = { 0 }; 3953 struct spdk_blob_store *bs = g_bs; 3954 struct spdk_blob *blob; 3955 struct spdk_io_channel *channel; 3956 struct spdk_blob_opts opts; 3957 uint64_t free_clusters; 3958 uint8_t payload_read[10 * 4096]; 3959 uint8_t payload_write[10 * 4096]; 3960 struct iovec iov_read[3]; 3961 struct iovec iov_write[3]; 3962 3963 free_clusters = spdk_bs_free_cluster_count(bs); 3964 3965 channel = spdk_bs_alloc_io_channel(bs); 3966 CU_ASSERT(channel != NULL); 3967 3968 ut_spdk_blob_opts_init(&opts); 3969 opts.thin_provision = true; 3970 3971 blob = ut_blob_create_and_open(bs, &opts); 3972 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3973 3974 CU_ASSERT(blob->active.num_clusters == 0); 3975 3976 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3977 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3978 poll_threads(); 3979 CU_ASSERT(g_bserrno == 0); 3980 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3981 CU_ASSERT(blob->active.num_clusters == 5); 3982 3983 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3984 poll_threads(); 3985 CU_ASSERT(g_bserrno == 0); 3986 /* Sync must not change anything */ 3987 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3988 CU_ASSERT(blob->active.num_clusters == 5); 3989 3990 /* Payload should be all zeros from unallocated clusters */ 3991 memset(payload_read, 0xAA, sizeof(payload_read)); 3992 iov_read[0].iov_base = payload_read; 3993 iov_read[0].iov_len = 3 * 4096; 3994 iov_read[1].iov_base = payload_read + 3 * 4096; 3995 iov_read[1].iov_len = 4 * 4096; 3996 iov_read[2].iov_base = payload_read + 7 * 4096; 3997 iov_read[2].iov_len = 3 * 4096; 3998 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 3999 poll_threads(); 4000 CU_ASSERT(g_bserrno == 0); 4001 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4002 4003 memset(payload_write, 0xE5, sizeof(payload_write)); 4004 iov_write[0].iov_base = payload_write; 4005 iov_write[0].iov_len = 1 * 4096; 4006 iov_write[1].iov_base = payload_write + 1 * 4096; 4007 iov_write[1].iov_len = 5 * 4096; 4008 iov_write[2].iov_base = payload_write + 6 * 4096; 4009 iov_write[2].iov_len = 4 * 4096; 4010 4011 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 4012 poll_threads(); 4013 CU_ASSERT(g_bserrno == 0); 4014 4015 memset(payload_read, 0xAA, sizeof(payload_read)); 4016 iov_read[0].iov_base = payload_read; 4017 iov_read[0].iov_len = 3 * 4096; 4018 iov_read[1].iov_base = payload_read + 3 * 4096; 4019 iov_read[1].iov_len = 4 * 4096; 4020 iov_read[2].iov_base = payload_read + 7 * 4096; 4021 iov_read[2].iov_len = 3 * 4096; 4022 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4023 poll_threads(); 4024 CU_ASSERT(g_bserrno == 0); 4025 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4026 4027 spdk_blob_close(blob, blob_op_complete, NULL); 4028 poll_threads(); 4029 CU_ASSERT(g_bserrno == 0); 4030 4031 spdk_bs_free_io_channel(channel); 4032 poll_threads(); 4033 4034 g_blob = NULL; 4035 g_blobid = 0; 4036 } 4037 4038 struct iter_ctx { 4039 int current_iter; 4040 spdk_blob_id blobid[4]; 4041 }; 4042 4043 static void 4044 test_iter(void *arg, struct spdk_blob *blob, int bserrno) 4045 { 4046 struct iter_ctx *iter_ctx = arg; 4047 spdk_blob_id blobid; 4048 4049 CU_ASSERT(bserrno == 0); 4050 blobid = spdk_blob_get_id(blob); 4051 CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]); 4052 } 4053 4054 static void 4055 bs_load_iter(void) 4056 { 4057 struct spdk_blob_store *bs; 4058 struct spdk_bs_dev *dev; 4059 struct iter_ctx iter_ctx = { 0 }; 4060 struct spdk_blob *blob; 4061 int i, rc; 4062 struct spdk_bs_opts opts; 4063 4064 dev = init_dev(); 4065 spdk_bs_opts_init(&opts); 4066 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4067 4068 /* Initialize a new blob store */ 4069 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 4070 poll_threads(); 4071 CU_ASSERT(g_bserrno == 0); 4072 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4073 bs = g_bs; 4074 4075 for (i = 0; i < 4; i++) { 4076 blob = ut_blob_create_and_open(bs, NULL); 4077 iter_ctx.blobid[i] = spdk_blob_get_id(blob); 4078 4079 /* Just save the blobid as an xattr for testing purposes. */ 4080 rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id)); 4081 CU_ASSERT(rc == 0); 4082 4083 /* Resize the blob */ 4084 spdk_blob_resize(blob, i, blob_op_complete, NULL); 4085 poll_threads(); 4086 CU_ASSERT(g_bserrno == 0); 4087 4088 spdk_blob_close(blob, blob_op_complete, NULL); 4089 poll_threads(); 4090 CU_ASSERT(g_bserrno == 0); 4091 } 4092 4093 g_bserrno = -1; 4094 spdk_bs_unload(bs, bs_op_complete, NULL); 4095 poll_threads(); 4096 CU_ASSERT(g_bserrno == 0); 4097 4098 dev = init_dev(); 4099 spdk_bs_opts_init(&opts); 4100 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4101 opts.iter_cb_fn = test_iter; 4102 opts.iter_cb_arg = &iter_ctx; 4103 4104 /* Test blob iteration during load after a clean shutdown. */ 4105 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 4106 poll_threads(); 4107 CU_ASSERT(g_bserrno == 0); 4108 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4109 bs = g_bs; 4110 4111 /* Dirty shutdown */ 4112 _spdk_bs_free(bs); 4113 4114 dev = init_dev(); 4115 spdk_bs_opts_init(&opts); 4116 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4117 opts.iter_cb_fn = test_iter; 4118 iter_ctx.current_iter = 0; 4119 opts.iter_cb_arg = &iter_ctx; 4120 4121 /* Test blob iteration during load after a dirty shutdown. */ 4122 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 4123 poll_threads(); 4124 CU_ASSERT(g_bserrno == 0); 4125 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4126 bs = g_bs; 4127 4128 spdk_bs_unload(bs, bs_op_complete, NULL); 4129 poll_threads(); 4130 CU_ASSERT(g_bserrno == 0); 4131 g_bs = NULL; 4132 } 4133 4134 static void 4135 blob_snapshot_rw(void) 4136 { 4137 static const uint8_t zero[10 * 4096] = { 0 }; 4138 struct spdk_blob_store *bs = g_bs; 4139 struct spdk_blob *blob, *snapshot; 4140 struct spdk_io_channel *channel; 4141 struct spdk_blob_opts opts; 4142 spdk_blob_id blobid, snapshotid; 4143 uint64_t free_clusters; 4144 uint64_t cluster_size; 4145 uint64_t page_size; 4146 uint8_t payload_read[10 * 4096]; 4147 uint8_t payload_write[10 * 4096]; 4148 uint64_t write_bytes; 4149 uint64_t read_bytes; 4150 4151 free_clusters = spdk_bs_free_cluster_count(bs); 4152 cluster_size = spdk_bs_get_cluster_size(bs); 4153 page_size = spdk_bs_get_page_size(bs); 4154 4155 channel = spdk_bs_alloc_io_channel(bs); 4156 CU_ASSERT(channel != NULL); 4157 4158 ut_spdk_blob_opts_init(&opts); 4159 opts.thin_provision = true; 4160 opts.num_clusters = 5; 4161 4162 blob = ut_blob_create_and_open(bs, &opts); 4163 blobid = spdk_blob_get_id(blob); 4164 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4165 4166 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4167 4168 memset(payload_read, 0xFF, sizeof(payload_read)); 4169 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 4170 poll_threads(); 4171 CU_ASSERT(g_bserrno == 0); 4172 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4173 4174 memset(payload_write, 0xE5, sizeof(payload_write)); 4175 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 4176 poll_threads(); 4177 CU_ASSERT(g_bserrno == 0); 4178 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4179 4180 /* Create snapshot from blob */ 4181 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4182 poll_threads(); 4183 CU_ASSERT(g_bserrno == 0); 4184 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4185 snapshotid = g_blobid; 4186 4187 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4188 poll_threads(); 4189 CU_ASSERT(g_bserrno == 0); 4190 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4191 snapshot = g_blob; 4192 CU_ASSERT(snapshot->data_ro == true); 4193 CU_ASSERT(snapshot->md_ro == true); 4194 4195 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4196 4197 write_bytes = g_dev_write_bytes; 4198 read_bytes = g_dev_read_bytes; 4199 4200 memset(payload_write, 0xAA, sizeof(payload_write)); 4201 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 4202 poll_threads(); 4203 CU_ASSERT(g_bserrno == 0); 4204 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4205 4206 /* For a clone we need to allocate and copy one cluster, update one page of metadata 4207 * and then write 10 pages of payload. 4208 */ 4209 if (g_use_extent_table) { 4210 /* Add one more page for EXTENT_PAGE write */ 4211 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12 + cluster_size); 4212 } else { 4213 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11 + cluster_size); 4214 } 4215 CU_ASSERT(g_dev_read_bytes - read_bytes == cluster_size); 4216 4217 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 4218 poll_threads(); 4219 CU_ASSERT(g_bserrno == 0); 4220 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4221 4222 /* Data on snapshot should not change after write to clone */ 4223 memset(payload_write, 0xE5, sizeof(payload_write)); 4224 spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL); 4225 poll_threads(); 4226 CU_ASSERT(g_bserrno == 0); 4227 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4228 4229 spdk_blob_close(blob, blob_op_complete, NULL); 4230 poll_threads(); 4231 CU_ASSERT(g_bserrno == 0); 4232 4233 spdk_blob_close(snapshot, blob_op_complete, NULL); 4234 poll_threads(); 4235 CU_ASSERT(g_bserrno == 0); 4236 4237 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 4238 poll_threads(); 4239 CU_ASSERT(g_bserrno == 0); 4240 4241 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4242 poll_threads(); 4243 CU_ASSERT(g_bserrno == 0); 4244 4245 spdk_bs_free_io_channel(channel); 4246 poll_threads(); 4247 g_blob = NULL; 4248 g_blobid = 0; 4249 } 4250 4251 static void 4252 blob_snapshot_rw_iov(void) 4253 { 4254 static const uint8_t zero[10 * 4096] = { 0 }; 4255 struct spdk_blob_store *bs = g_bs; 4256 struct spdk_blob *blob, *snapshot; 4257 struct spdk_io_channel *channel; 4258 struct spdk_blob_opts opts; 4259 spdk_blob_id blobid, snapshotid; 4260 uint64_t free_clusters; 4261 uint8_t payload_read[10 * 4096]; 4262 uint8_t payload_write[10 * 4096]; 4263 struct iovec iov_read[3]; 4264 struct iovec iov_write[3]; 4265 4266 free_clusters = spdk_bs_free_cluster_count(bs); 4267 4268 channel = spdk_bs_alloc_io_channel(bs); 4269 CU_ASSERT(channel != NULL); 4270 4271 ut_spdk_blob_opts_init(&opts); 4272 opts.thin_provision = true; 4273 opts.num_clusters = 5; 4274 4275 blob = ut_blob_create_and_open(bs, &opts); 4276 blobid = spdk_blob_get_id(blob); 4277 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4278 4279 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4280 4281 /* Create snapshot from blob */ 4282 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4283 poll_threads(); 4284 CU_ASSERT(g_bserrno == 0); 4285 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4286 snapshotid = g_blobid; 4287 4288 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4289 poll_threads(); 4290 CU_ASSERT(g_bserrno == 0); 4291 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4292 snapshot = g_blob; 4293 CU_ASSERT(snapshot->data_ro == true); 4294 CU_ASSERT(snapshot->md_ro == true); 4295 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4296 4297 /* Payload should be all zeros from unallocated clusters */ 4298 memset(payload_read, 0xAA, sizeof(payload_read)); 4299 iov_read[0].iov_base = payload_read; 4300 iov_read[0].iov_len = 3 * 4096; 4301 iov_read[1].iov_base = payload_read + 3 * 4096; 4302 iov_read[1].iov_len = 4 * 4096; 4303 iov_read[2].iov_base = payload_read + 7 * 4096; 4304 iov_read[2].iov_len = 3 * 4096; 4305 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4306 poll_threads(); 4307 CU_ASSERT(g_bserrno == 0); 4308 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4309 4310 memset(payload_write, 0xE5, sizeof(payload_write)); 4311 iov_write[0].iov_base = payload_write; 4312 iov_write[0].iov_len = 1 * 4096; 4313 iov_write[1].iov_base = payload_write + 1 * 4096; 4314 iov_write[1].iov_len = 5 * 4096; 4315 iov_write[2].iov_base = payload_write + 6 * 4096; 4316 iov_write[2].iov_len = 4 * 4096; 4317 4318 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 4319 poll_threads(); 4320 CU_ASSERT(g_bserrno == 0); 4321 4322 memset(payload_read, 0xAA, sizeof(payload_read)); 4323 iov_read[0].iov_base = payload_read; 4324 iov_read[0].iov_len = 3 * 4096; 4325 iov_read[1].iov_base = payload_read + 3 * 4096; 4326 iov_read[1].iov_len = 4 * 4096; 4327 iov_read[2].iov_base = payload_read + 7 * 4096; 4328 iov_read[2].iov_len = 3 * 4096; 4329 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4330 poll_threads(); 4331 CU_ASSERT(g_bserrno == 0); 4332 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4333 4334 spdk_blob_close(blob, blob_op_complete, NULL); 4335 poll_threads(); 4336 CU_ASSERT(g_bserrno == 0); 4337 4338 spdk_blob_close(snapshot, blob_op_complete, NULL); 4339 poll_threads(); 4340 CU_ASSERT(g_bserrno == 0); 4341 4342 spdk_bs_free_io_channel(channel); 4343 poll_threads(); 4344 4345 g_blob = NULL; 4346 g_blobid = 0; 4347 } 4348 4349 /** 4350 * Inflate / decouple parent rw unit tests. 4351 * 4352 * -------------- 4353 * original blob: 0 1 2 3 4 4354 * ,---------+---------+---------+---------+---------. 4355 * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - | 4356 * +---------+---------+---------+---------+---------+ 4357 * snapshot2 | - |yyyyyyyyy| - |yyyyyyyyy| - | 4358 * +---------+---------+---------+---------+---------+ 4359 * blob | - |zzzzzzzzz| - | - | - | 4360 * '---------+---------+---------+---------+---------' 4361 * . . . . . . 4362 * -------- . . . . . . 4363 * inflate: . . . . . . 4364 * ,---------+---------+---------+---------+---------. 4365 * blob |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000| 4366 * '---------+---------+---------+---------+---------' 4367 * 4368 * NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency 4369 * on snapshot2 and snapshot removed . . . 4370 * . . . . . . 4371 * ---------------- . . . . . . 4372 * decouple parent: . . . . . . 4373 * ,---------+---------+---------+---------+---------. 4374 * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - | 4375 * +---------+---------+---------+---------+---------+ 4376 * blob | - |zzzzzzzzz| - |yyyyyyyyy| - | 4377 * '---------+---------+---------+---------+---------' 4378 * 4379 * NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency 4380 * on snapshot2 removed and on snapshot still exists. Snapshot2 4381 * should remain a clone of snapshot. 4382 */ 4383 static void 4384 _blob_inflate_rw(bool decouple_parent) 4385 { 4386 struct spdk_blob_store *bs = g_bs; 4387 struct spdk_blob *blob, *snapshot, *snapshot2; 4388 struct spdk_io_channel *channel; 4389 struct spdk_blob_opts opts; 4390 spdk_blob_id blobid, snapshotid, snapshot2id; 4391 uint64_t free_clusters; 4392 uint64_t cluster_size; 4393 4394 uint64_t payload_size; 4395 uint8_t *payload_read; 4396 uint8_t *payload_write; 4397 uint8_t *payload_clone; 4398 4399 uint64_t pages_per_cluster; 4400 uint64_t pages_per_payload; 4401 4402 int i; 4403 spdk_blob_id ids[2]; 4404 size_t count; 4405 4406 free_clusters = spdk_bs_free_cluster_count(bs); 4407 cluster_size = spdk_bs_get_cluster_size(bs); 4408 pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs); 4409 pages_per_payload = pages_per_cluster * 5; 4410 4411 payload_size = cluster_size * 5; 4412 4413 payload_read = malloc(payload_size); 4414 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 4415 4416 payload_write = malloc(payload_size); 4417 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 4418 4419 payload_clone = malloc(payload_size); 4420 SPDK_CU_ASSERT_FATAL(payload_clone != NULL); 4421 4422 channel = spdk_bs_alloc_io_channel(bs); 4423 SPDK_CU_ASSERT_FATAL(channel != NULL); 4424 4425 /* Create blob */ 4426 ut_spdk_blob_opts_init(&opts); 4427 opts.thin_provision = true; 4428 opts.num_clusters = 5; 4429 4430 blob = ut_blob_create_and_open(bs, &opts); 4431 blobid = spdk_blob_get_id(blob); 4432 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4433 4434 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4435 4436 /* 1) Initial read should return zeroed payload */ 4437 memset(payload_read, 0xFF, payload_size); 4438 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4439 blob_op_complete, NULL); 4440 poll_threads(); 4441 CU_ASSERT(g_bserrno == 0); 4442 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 4443 4444 /* Fill whole blob with a pattern, except last cluster (to be sure it 4445 * isn't allocated) */ 4446 memset(payload_write, 0xE5, payload_size - cluster_size); 4447 spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload - 4448 pages_per_cluster, blob_op_complete, NULL); 4449 poll_threads(); 4450 CU_ASSERT(g_bserrno == 0); 4451 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4452 4453 /* 2) Create snapshot from blob (first level) */ 4454 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4455 poll_threads(); 4456 CU_ASSERT(g_bserrno == 0); 4457 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4458 snapshotid = g_blobid; 4459 4460 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4461 poll_threads(); 4462 CU_ASSERT(g_bserrno == 0); 4463 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4464 snapshot = g_blob; 4465 CU_ASSERT(snapshot->data_ro == true); 4466 CU_ASSERT(snapshot->md_ro == true); 4467 4468 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4469 4470 /* Write every second cluster with a pattern. 4471 * 4472 * Last cluster shouldn't be written, to be sure that snapshot nor clone 4473 * doesn't allocate it. 4474 * 4475 * payload_clone stores expected result on "blob" read at the time and 4476 * is used only to check data consistency on clone before and after 4477 * inflation. Initially we fill it with a backing snapshots pattern 4478 * used before. 4479 */ 4480 memset(payload_clone, 0xE5, payload_size - cluster_size); 4481 memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size); 4482 memset(payload_write, 0xAA, payload_size); 4483 for (i = 1; i < 5; i += 2) { 4484 spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster, 4485 pages_per_cluster, blob_op_complete, NULL); 4486 poll_threads(); 4487 CU_ASSERT(g_bserrno == 0); 4488 4489 /* Update expected result */ 4490 memcpy(payload_clone + (cluster_size * i), payload_write, 4491 cluster_size); 4492 } 4493 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4494 4495 /* Check data consistency on clone */ 4496 memset(payload_read, 0xFF, payload_size); 4497 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4498 blob_op_complete, NULL); 4499 poll_threads(); 4500 CU_ASSERT(g_bserrno == 0); 4501 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4502 4503 /* 3) Create second levels snapshot from blob */ 4504 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4505 poll_threads(); 4506 CU_ASSERT(g_bserrno == 0); 4507 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4508 snapshot2id = g_blobid; 4509 4510 spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL); 4511 poll_threads(); 4512 CU_ASSERT(g_bserrno == 0); 4513 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4514 snapshot2 = g_blob; 4515 CU_ASSERT(snapshot2->data_ro == true); 4516 CU_ASSERT(snapshot2->md_ro == true); 4517 4518 CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5); 4519 4520 CU_ASSERT(snapshot2->parent_id == snapshotid); 4521 4522 /* Write one cluster on the top level blob. This cluster (1) covers 4523 * already allocated cluster in the snapshot2, so shouldn't be inflated 4524 * at all */ 4525 spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster, 4526 pages_per_cluster, blob_op_complete, NULL); 4527 poll_threads(); 4528 CU_ASSERT(g_bserrno == 0); 4529 4530 /* Update expected result */ 4531 memcpy(payload_clone + cluster_size, payload_write, cluster_size); 4532 4533 /* Check data consistency on clone */ 4534 memset(payload_read, 0xFF, payload_size); 4535 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4536 blob_op_complete, NULL); 4537 poll_threads(); 4538 CU_ASSERT(g_bserrno == 0); 4539 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4540 4541 4542 /* Close all blobs */ 4543 spdk_blob_close(blob, blob_op_complete, NULL); 4544 poll_threads(); 4545 CU_ASSERT(g_bserrno == 0); 4546 4547 spdk_blob_close(snapshot2, blob_op_complete, NULL); 4548 poll_threads(); 4549 CU_ASSERT(g_bserrno == 0); 4550 4551 spdk_blob_close(snapshot, blob_op_complete, NULL); 4552 poll_threads(); 4553 CU_ASSERT(g_bserrno == 0); 4554 4555 /* Check snapshot-clone relations */ 4556 count = 2; 4557 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4558 CU_ASSERT(count == 1); 4559 CU_ASSERT(ids[0] == snapshot2id); 4560 4561 count = 2; 4562 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4563 CU_ASSERT(count == 1); 4564 CU_ASSERT(ids[0] == blobid); 4565 4566 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id); 4567 4568 free_clusters = spdk_bs_free_cluster_count(bs); 4569 if (!decouple_parent) { 4570 /* Do full blob inflation */ 4571 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 4572 poll_threads(); 4573 CU_ASSERT(g_bserrno == 0); 4574 4575 /* All clusters should be inflated (except one already allocated 4576 * in a top level blob) */ 4577 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4); 4578 4579 /* Check if relation tree updated correctly */ 4580 count = 2; 4581 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4582 4583 /* snapshotid have one clone */ 4584 CU_ASSERT(count == 1); 4585 CU_ASSERT(ids[0] == snapshot2id); 4586 4587 /* snapshot2id have no clones */ 4588 count = 2; 4589 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4590 CU_ASSERT(count == 0); 4591 4592 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 4593 } else { 4594 /* Decouple parent of blob */ 4595 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 4596 poll_threads(); 4597 CU_ASSERT(g_bserrno == 0); 4598 4599 /* Only one cluster from a parent should be inflated (second one 4600 * is covered by a cluster written on a top level blob, and 4601 * already allocated) */ 4602 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1); 4603 4604 /* Check if relation tree updated correctly */ 4605 count = 2; 4606 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4607 4608 /* snapshotid have two clones now */ 4609 CU_ASSERT(count == 2); 4610 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4611 CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id); 4612 4613 /* snapshot2id have no clones */ 4614 count = 2; 4615 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4616 CU_ASSERT(count == 0); 4617 4618 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4619 } 4620 4621 /* Try to delete snapshot2 (should pass) */ 4622 spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL); 4623 poll_threads(); 4624 CU_ASSERT(g_bserrno == 0); 4625 4626 /* Try to delete base snapshot */ 4627 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4628 poll_threads(); 4629 CU_ASSERT(g_bserrno == 0); 4630 4631 /* Reopen blob after snapshot deletion */ 4632 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 4633 poll_threads(); 4634 CU_ASSERT(g_bserrno == 0); 4635 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4636 blob = g_blob; 4637 4638 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4639 4640 /* Check data consistency on inflated blob */ 4641 memset(payload_read, 0xFF, payload_size); 4642 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4643 blob_op_complete, NULL); 4644 poll_threads(); 4645 CU_ASSERT(g_bserrno == 0); 4646 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4647 4648 spdk_blob_close(blob, blob_op_complete, NULL); 4649 poll_threads(); 4650 CU_ASSERT(g_bserrno == 0); 4651 4652 spdk_bs_free_io_channel(channel); 4653 poll_threads(); 4654 4655 g_blob = NULL; 4656 g_blobid = 0; 4657 4658 free(payload_read); 4659 free(payload_write); 4660 free(payload_clone); 4661 } 4662 4663 static void 4664 blob_inflate_rw(void) 4665 { 4666 _blob_inflate_rw(false); 4667 _blob_inflate_rw(true); 4668 } 4669 4670 /** 4671 * Snapshot-clones relation test 4672 * 4673 * snapshot 4674 * | 4675 * +-----+-----+ 4676 * | | 4677 * blob(ro) snapshot2 4678 * | | 4679 * clone2 clone 4680 */ 4681 static void 4682 blob_relations(void) 4683 { 4684 struct spdk_blob_store *bs; 4685 struct spdk_bs_dev *dev; 4686 struct spdk_bs_opts bs_opts; 4687 struct spdk_blob_opts opts; 4688 struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2; 4689 spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2; 4690 int rc; 4691 size_t count; 4692 spdk_blob_id ids[10] = {}; 4693 4694 dev = init_dev(); 4695 spdk_bs_opts_init(&bs_opts); 4696 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 4697 4698 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 4699 poll_threads(); 4700 CU_ASSERT(g_bserrno == 0); 4701 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4702 bs = g_bs; 4703 4704 /* 1. Create blob with 10 clusters */ 4705 4706 ut_spdk_blob_opts_init(&opts); 4707 opts.num_clusters = 10; 4708 4709 blob = ut_blob_create_and_open(bs, &opts); 4710 blobid = spdk_blob_get_id(blob); 4711 4712 CU_ASSERT(!spdk_blob_is_read_only(blob)); 4713 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4714 CU_ASSERT(!spdk_blob_is_clone(blob)); 4715 CU_ASSERT(!spdk_blob_is_thin_provisioned(blob)); 4716 4717 /* blob should not have underlying snapshot nor clones */ 4718 CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID); 4719 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 4720 count = SPDK_COUNTOF(ids); 4721 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4722 CU_ASSERT(rc == 0); 4723 CU_ASSERT(count == 0); 4724 4725 4726 /* 2. Create snapshot */ 4727 4728 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4729 poll_threads(); 4730 CU_ASSERT(g_bserrno == 0); 4731 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4732 snapshotid = g_blobid; 4733 4734 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4735 poll_threads(); 4736 CU_ASSERT(g_bserrno == 0); 4737 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4738 snapshot = g_blob; 4739 4740 CU_ASSERT(spdk_blob_is_read_only(snapshot)); 4741 CU_ASSERT(spdk_blob_is_snapshot(snapshot)); 4742 CU_ASSERT(!spdk_blob_is_clone(snapshot)); 4743 CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID); 4744 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID); 4745 4746 /* Check if original blob is converted to the clone of snapshot */ 4747 CU_ASSERT(!spdk_blob_is_read_only(blob)); 4748 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4749 CU_ASSERT(spdk_blob_is_clone(blob)); 4750 CU_ASSERT(spdk_blob_is_thin_provisioned(blob)); 4751 CU_ASSERT(blob->parent_id == snapshotid); 4752 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4753 4754 count = SPDK_COUNTOF(ids); 4755 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4756 CU_ASSERT(rc == 0); 4757 CU_ASSERT(count == 1); 4758 CU_ASSERT(ids[0] == blobid); 4759 4760 4761 /* 3. Create clone from snapshot */ 4762 4763 spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 4764 poll_threads(); 4765 CU_ASSERT(g_bserrno == 0); 4766 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4767 cloneid = g_blobid; 4768 4769 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 4770 poll_threads(); 4771 CU_ASSERT(g_bserrno == 0); 4772 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4773 clone = g_blob; 4774 4775 CU_ASSERT(!spdk_blob_is_read_only(clone)); 4776 CU_ASSERT(!spdk_blob_is_snapshot(clone)); 4777 CU_ASSERT(spdk_blob_is_clone(clone)); 4778 CU_ASSERT(spdk_blob_is_thin_provisioned(clone)); 4779 CU_ASSERT(clone->parent_id == snapshotid); 4780 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid); 4781 4782 count = SPDK_COUNTOF(ids); 4783 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 4784 CU_ASSERT(rc == 0); 4785 CU_ASSERT(count == 0); 4786 4787 /* Check if clone is on the snapshot's list */ 4788 count = SPDK_COUNTOF(ids); 4789 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4790 CU_ASSERT(rc == 0); 4791 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4792 CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid); 4793 4794 4795 /* 4. Create snapshot of the clone */ 4796 4797 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 4798 poll_threads(); 4799 CU_ASSERT(g_bserrno == 0); 4800 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4801 snapshotid2 = g_blobid; 4802 4803 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 4804 poll_threads(); 4805 CU_ASSERT(g_bserrno == 0); 4806 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4807 snapshot2 = g_blob; 4808 4809 CU_ASSERT(spdk_blob_is_read_only(snapshot2)); 4810 CU_ASSERT(spdk_blob_is_snapshot(snapshot2)); 4811 CU_ASSERT(spdk_blob_is_clone(snapshot2)); 4812 CU_ASSERT(snapshot2->parent_id == snapshotid); 4813 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid); 4814 4815 /* Check if clone is converted to the clone of snapshot2 and snapshot2 4816 * is a child of snapshot */ 4817 CU_ASSERT(!spdk_blob_is_read_only(clone)); 4818 CU_ASSERT(!spdk_blob_is_snapshot(clone)); 4819 CU_ASSERT(spdk_blob_is_clone(clone)); 4820 CU_ASSERT(spdk_blob_is_thin_provisioned(clone)); 4821 CU_ASSERT(clone->parent_id == snapshotid2); 4822 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 4823 4824 count = SPDK_COUNTOF(ids); 4825 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 4826 CU_ASSERT(rc == 0); 4827 CU_ASSERT(count == 1); 4828 CU_ASSERT(ids[0] == cloneid); 4829 4830 4831 /* 5. Try to create clone from read only blob */ 4832 4833 /* Mark blob as read only */ 4834 spdk_blob_set_read_only(blob); 4835 spdk_blob_sync_md(blob, blob_op_complete, NULL); 4836 poll_threads(); 4837 CU_ASSERT(g_bserrno == 0); 4838 4839 /* Check if previously created blob is read only clone */ 4840 CU_ASSERT(spdk_blob_is_read_only(blob)); 4841 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4842 CU_ASSERT(spdk_blob_is_clone(blob)); 4843 CU_ASSERT(spdk_blob_is_thin_provisioned(blob)); 4844 4845 /* Create clone from read only blob */ 4846 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4847 poll_threads(); 4848 CU_ASSERT(g_bserrno == 0); 4849 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4850 cloneid2 = g_blobid; 4851 4852 spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL); 4853 poll_threads(); 4854 CU_ASSERT(g_bserrno == 0); 4855 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4856 clone2 = g_blob; 4857 4858 CU_ASSERT(!spdk_blob_is_read_only(clone2)); 4859 CU_ASSERT(!spdk_blob_is_snapshot(clone2)); 4860 CU_ASSERT(spdk_blob_is_clone(clone2)); 4861 CU_ASSERT(spdk_blob_is_thin_provisioned(clone2)); 4862 4863 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 4864 4865 count = SPDK_COUNTOF(ids); 4866 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4867 CU_ASSERT(rc == 0); 4868 4869 CU_ASSERT(count == 1); 4870 CU_ASSERT(ids[0] == cloneid2); 4871 4872 /* Close blobs */ 4873 4874 spdk_blob_close(clone2, blob_op_complete, NULL); 4875 poll_threads(); 4876 CU_ASSERT(g_bserrno == 0); 4877 4878 spdk_blob_close(blob, blob_op_complete, NULL); 4879 poll_threads(); 4880 CU_ASSERT(g_bserrno == 0); 4881 4882 spdk_blob_close(clone, blob_op_complete, NULL); 4883 poll_threads(); 4884 CU_ASSERT(g_bserrno == 0); 4885 4886 spdk_blob_close(snapshot, blob_op_complete, NULL); 4887 poll_threads(); 4888 CU_ASSERT(g_bserrno == 0); 4889 4890 spdk_blob_close(snapshot2, blob_op_complete, NULL); 4891 poll_threads(); 4892 CU_ASSERT(g_bserrno == 0); 4893 4894 /* Try to delete snapshot with more than 1 clone */ 4895 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4896 poll_threads(); 4897 CU_ASSERT(g_bserrno != 0); 4898 4899 ut_bs_reload(&bs, &bs_opts); 4900 4901 /* NULL ids array should return number of clones in count */ 4902 count = SPDK_COUNTOF(ids); 4903 rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count); 4904 CU_ASSERT(rc == -ENOMEM); 4905 CU_ASSERT(count == 2); 4906 4907 /* incorrect array size */ 4908 count = 1; 4909 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4910 CU_ASSERT(rc == -ENOMEM); 4911 CU_ASSERT(count == 2); 4912 4913 4914 /* Verify structure of loaded blob store */ 4915 4916 /* snapshot */ 4917 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID); 4918 4919 count = SPDK_COUNTOF(ids); 4920 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4921 CU_ASSERT(rc == 0); 4922 CU_ASSERT(count == 2); 4923 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4924 CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2); 4925 4926 /* blob */ 4927 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4928 count = SPDK_COUNTOF(ids); 4929 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4930 CU_ASSERT(rc == 0); 4931 CU_ASSERT(count == 1); 4932 CU_ASSERT(ids[0] == cloneid2); 4933 4934 /* clone */ 4935 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 4936 count = SPDK_COUNTOF(ids); 4937 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 4938 CU_ASSERT(rc == 0); 4939 CU_ASSERT(count == 0); 4940 4941 /* snapshot2 */ 4942 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid); 4943 count = SPDK_COUNTOF(ids); 4944 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 4945 CU_ASSERT(rc == 0); 4946 CU_ASSERT(count == 1); 4947 CU_ASSERT(ids[0] == cloneid); 4948 4949 /* clone2 */ 4950 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 4951 count = SPDK_COUNTOF(ids); 4952 rc = spdk_blob_get_clones(bs, cloneid2, ids, &count); 4953 CU_ASSERT(rc == 0); 4954 CU_ASSERT(count == 0); 4955 4956 /* Try to delete blob that user should not be able to remove */ 4957 4958 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4959 poll_threads(); 4960 CU_ASSERT(g_bserrno != 0); 4961 4962 /* Remove all blobs */ 4963 4964 spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL); 4965 poll_threads(); 4966 CU_ASSERT(g_bserrno == 0); 4967 4968 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 4969 poll_threads(); 4970 CU_ASSERT(g_bserrno == 0); 4971 4972 spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL); 4973 poll_threads(); 4974 CU_ASSERT(g_bserrno == 0); 4975 4976 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 4977 poll_threads(); 4978 CU_ASSERT(g_bserrno == 0); 4979 4980 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4981 poll_threads(); 4982 CU_ASSERT(g_bserrno == 0); 4983 4984 spdk_bs_unload(bs, bs_op_complete, NULL); 4985 poll_threads(); 4986 CU_ASSERT(g_bserrno == 0); 4987 4988 g_bs = NULL; 4989 } 4990 4991 /** 4992 * Snapshot-clones relation test 2 4993 * 4994 * snapshot1 4995 * | 4996 * snapshot2 4997 * | 4998 * +-----+-----+ 4999 * | | 5000 * blob(ro) snapshot3 5001 * | | 5002 * | snapshot4 5003 * | | | 5004 * clone2 clone clone3 5005 */ 5006 static void 5007 blob_relations2(void) 5008 { 5009 struct spdk_blob_store *bs; 5010 struct spdk_bs_dev *dev; 5011 struct spdk_bs_opts bs_opts; 5012 struct spdk_blob_opts opts; 5013 struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2; 5014 spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2, 5015 cloneid3; 5016 int rc; 5017 size_t count; 5018 spdk_blob_id ids[10] = {}; 5019 5020 dev = init_dev(); 5021 spdk_bs_opts_init(&bs_opts); 5022 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 5023 5024 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 5025 poll_threads(); 5026 CU_ASSERT(g_bserrno == 0); 5027 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5028 bs = g_bs; 5029 5030 /* 1. Create blob with 10 clusters */ 5031 5032 ut_spdk_blob_opts_init(&opts); 5033 opts.num_clusters = 10; 5034 5035 blob = ut_blob_create_and_open(bs, &opts); 5036 blobid = spdk_blob_get_id(blob); 5037 5038 /* 2. Create snapshot1 */ 5039 5040 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5041 poll_threads(); 5042 CU_ASSERT(g_bserrno == 0); 5043 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5044 snapshotid1 = g_blobid; 5045 5046 spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL); 5047 poll_threads(); 5048 CU_ASSERT(g_bserrno == 0); 5049 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5050 snapshot1 = g_blob; 5051 5052 CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID); 5053 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID); 5054 5055 CU_ASSERT(blob->parent_id == snapshotid1); 5056 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1); 5057 5058 /* Check if blob is the clone of snapshot1 */ 5059 CU_ASSERT(blob->parent_id == snapshotid1); 5060 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1); 5061 5062 count = SPDK_COUNTOF(ids); 5063 rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count); 5064 CU_ASSERT(rc == 0); 5065 CU_ASSERT(count == 1); 5066 CU_ASSERT(ids[0] == blobid); 5067 5068 /* 3. Create another snapshot */ 5069 5070 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5071 poll_threads(); 5072 CU_ASSERT(g_bserrno == 0); 5073 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5074 snapshotid2 = g_blobid; 5075 5076 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 5077 poll_threads(); 5078 CU_ASSERT(g_bserrno == 0); 5079 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5080 snapshot2 = g_blob; 5081 5082 CU_ASSERT(spdk_blob_is_clone(snapshot2)); 5083 CU_ASSERT(snapshot2->parent_id == snapshotid1); 5084 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1); 5085 5086 /* Check if snapshot2 is the clone of snapshot1 and blob 5087 * is a child of snapshot2 */ 5088 CU_ASSERT(blob->parent_id == snapshotid2); 5089 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2); 5090 5091 count = SPDK_COUNTOF(ids); 5092 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5093 CU_ASSERT(rc == 0); 5094 CU_ASSERT(count == 1); 5095 CU_ASSERT(ids[0] == blobid); 5096 5097 /* 4. Create clone from snapshot */ 5098 5099 spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL); 5100 poll_threads(); 5101 CU_ASSERT(g_bserrno == 0); 5102 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5103 cloneid = g_blobid; 5104 5105 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 5106 poll_threads(); 5107 CU_ASSERT(g_bserrno == 0); 5108 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5109 clone = g_blob; 5110 5111 CU_ASSERT(clone->parent_id == snapshotid2); 5112 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 5113 5114 /* Check if clone is on the snapshot's list */ 5115 count = SPDK_COUNTOF(ids); 5116 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5117 CU_ASSERT(rc == 0); 5118 CU_ASSERT(count == 2); 5119 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5120 CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid); 5121 5122 /* 5. Create snapshot of the clone */ 5123 5124 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 5125 poll_threads(); 5126 CU_ASSERT(g_bserrno == 0); 5127 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5128 snapshotid3 = g_blobid; 5129 5130 spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL); 5131 poll_threads(); 5132 CU_ASSERT(g_bserrno == 0); 5133 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5134 snapshot3 = g_blob; 5135 5136 CU_ASSERT(snapshot3->parent_id == snapshotid2); 5137 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2); 5138 5139 /* Check if clone is converted to the clone of snapshot3 and snapshot3 5140 * is a child of snapshot2 */ 5141 CU_ASSERT(clone->parent_id == snapshotid3); 5142 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5143 5144 count = SPDK_COUNTOF(ids); 5145 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5146 CU_ASSERT(rc == 0); 5147 CU_ASSERT(count == 1); 5148 CU_ASSERT(ids[0] == cloneid); 5149 5150 /* 6. Create another snapshot of the clone */ 5151 5152 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 5153 poll_threads(); 5154 CU_ASSERT(g_bserrno == 0); 5155 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5156 snapshotid4 = g_blobid; 5157 5158 spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL); 5159 poll_threads(); 5160 CU_ASSERT(g_bserrno == 0); 5161 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5162 snapshot4 = g_blob; 5163 5164 CU_ASSERT(snapshot4->parent_id == snapshotid3); 5165 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3); 5166 5167 /* Check if clone is converted to the clone of snapshot4 and snapshot4 5168 * is a child of snapshot3 */ 5169 CU_ASSERT(clone->parent_id == snapshotid4); 5170 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4); 5171 5172 count = SPDK_COUNTOF(ids); 5173 rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count); 5174 CU_ASSERT(rc == 0); 5175 CU_ASSERT(count == 1); 5176 CU_ASSERT(ids[0] == cloneid); 5177 5178 /* 7. Remove snapshot 4 */ 5179 5180 spdk_blob_close(snapshot4, blob_op_complete, NULL); 5181 poll_threads(); 5182 CU_ASSERT(g_bserrno == 0); 5183 5184 spdk_bs_delete_blob(bs, snapshotid4, blob_op_complete, NULL); 5185 poll_threads(); 5186 CU_ASSERT(g_bserrno == 0); 5187 5188 /* Check if relations are back to state from before creating snapshot 4 */ 5189 CU_ASSERT(clone->parent_id == snapshotid3); 5190 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5191 5192 count = SPDK_COUNTOF(ids); 5193 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5194 CU_ASSERT(rc == 0); 5195 CU_ASSERT(count == 1); 5196 CU_ASSERT(ids[0] == cloneid); 5197 5198 /* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */ 5199 5200 spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL); 5201 poll_threads(); 5202 CU_ASSERT(g_bserrno == 0); 5203 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5204 cloneid3 = g_blobid; 5205 5206 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5207 poll_threads(); 5208 CU_ASSERT(g_bserrno != 0); 5209 5210 /* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */ 5211 5212 spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL); 5213 poll_threads(); 5214 CU_ASSERT(g_bserrno == 0); 5215 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5216 snapshot3 = g_blob; 5217 5218 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5219 poll_threads(); 5220 CU_ASSERT(g_bserrno != 0); 5221 5222 spdk_blob_close(snapshot3, blob_op_complete, NULL); 5223 poll_threads(); 5224 CU_ASSERT(g_bserrno == 0); 5225 5226 spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL); 5227 poll_threads(); 5228 CU_ASSERT(g_bserrno == 0); 5229 5230 /* 10. Remove snapshot 1 */ 5231 5232 spdk_blob_close(snapshot1, blob_op_complete, NULL); 5233 poll_threads(); 5234 CU_ASSERT(g_bserrno == 0); 5235 5236 spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL); 5237 poll_threads(); 5238 CU_ASSERT(g_bserrno == 0); 5239 5240 /* Check if relations are back to state from before creating snapshot 4 (before step 6) */ 5241 CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID); 5242 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID); 5243 5244 count = SPDK_COUNTOF(ids); 5245 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5246 CU_ASSERT(rc == 0); 5247 CU_ASSERT(count == 2); 5248 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5249 CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3); 5250 5251 /* 11. Try to create clone from read only blob */ 5252 5253 /* Mark blob as read only */ 5254 spdk_blob_set_read_only(blob); 5255 spdk_blob_sync_md(blob, blob_op_complete, NULL); 5256 poll_threads(); 5257 CU_ASSERT(g_bserrno == 0); 5258 5259 /* Create clone from read only blob */ 5260 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5261 poll_threads(); 5262 CU_ASSERT(g_bserrno == 0); 5263 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5264 cloneid2 = g_blobid; 5265 5266 spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL); 5267 poll_threads(); 5268 CU_ASSERT(g_bserrno == 0); 5269 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5270 clone2 = g_blob; 5271 5272 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5273 5274 count = SPDK_COUNTOF(ids); 5275 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5276 CU_ASSERT(rc == 0); 5277 CU_ASSERT(count == 1); 5278 CU_ASSERT(ids[0] == cloneid2); 5279 5280 /* Close blobs */ 5281 5282 spdk_blob_close(clone2, blob_op_complete, NULL); 5283 poll_threads(); 5284 CU_ASSERT(g_bserrno == 0); 5285 5286 spdk_blob_close(blob, blob_op_complete, NULL); 5287 poll_threads(); 5288 CU_ASSERT(g_bserrno == 0); 5289 5290 spdk_blob_close(clone, blob_op_complete, NULL); 5291 poll_threads(); 5292 CU_ASSERT(g_bserrno == 0); 5293 5294 spdk_blob_close(snapshot2, blob_op_complete, NULL); 5295 poll_threads(); 5296 CU_ASSERT(g_bserrno == 0); 5297 5298 spdk_blob_close(snapshot3, blob_op_complete, NULL); 5299 poll_threads(); 5300 CU_ASSERT(g_bserrno == 0); 5301 5302 ut_bs_reload(&bs, &bs_opts); 5303 5304 /* Verify structure of loaded blob store */ 5305 5306 /* snapshot2 */ 5307 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID); 5308 5309 count = SPDK_COUNTOF(ids); 5310 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5311 CU_ASSERT(rc == 0); 5312 CU_ASSERT(count == 2); 5313 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5314 CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3); 5315 5316 /* blob */ 5317 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2); 5318 count = SPDK_COUNTOF(ids); 5319 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5320 CU_ASSERT(rc == 0); 5321 CU_ASSERT(count == 1); 5322 CU_ASSERT(ids[0] == cloneid2); 5323 5324 /* clone */ 5325 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5326 count = SPDK_COUNTOF(ids); 5327 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 5328 CU_ASSERT(rc == 0); 5329 CU_ASSERT(count == 0); 5330 5331 /* snapshot3 */ 5332 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2); 5333 count = SPDK_COUNTOF(ids); 5334 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5335 CU_ASSERT(rc == 0); 5336 CU_ASSERT(count == 1); 5337 CU_ASSERT(ids[0] == cloneid); 5338 5339 /* clone2 */ 5340 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5341 count = SPDK_COUNTOF(ids); 5342 rc = spdk_blob_get_clones(bs, cloneid2, ids, &count); 5343 CU_ASSERT(rc == 0); 5344 CU_ASSERT(count == 0); 5345 5346 /* Try to delete all blobs in the worse possible order */ 5347 5348 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5349 poll_threads(); 5350 CU_ASSERT(g_bserrno != 0); 5351 5352 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5353 poll_threads(); 5354 CU_ASSERT(g_bserrno == 0); 5355 5356 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5357 poll_threads(); 5358 CU_ASSERT(g_bserrno != 0); 5359 5360 spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL); 5361 poll_threads(); 5362 CU_ASSERT(g_bserrno == 0); 5363 5364 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5365 poll_threads(); 5366 CU_ASSERT(g_bserrno == 0); 5367 5368 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 5369 poll_threads(); 5370 CU_ASSERT(g_bserrno == 0); 5371 5372 spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL); 5373 poll_threads(); 5374 CU_ASSERT(g_bserrno == 0); 5375 5376 spdk_bs_unload(bs, bs_op_complete, NULL); 5377 poll_threads(); 5378 CU_ASSERT(g_bserrno == 0); 5379 5380 g_bs = NULL; 5381 } 5382 5383 static void 5384 blob_delete_snapshot_power_failure(void) 5385 { 5386 struct spdk_blob_store *bs = g_bs; 5387 struct spdk_blob_opts opts; 5388 struct spdk_blob *blob, *snapshot; 5389 struct spdk_power_failure_thresholds thresholds = {}; 5390 spdk_blob_id blobid, snapshotid; 5391 const void *value; 5392 size_t value_len; 5393 size_t count; 5394 spdk_blob_id ids[3] = {}; 5395 int rc; 5396 bool deleted = false; 5397 5398 /* Create blob */ 5399 ut_spdk_blob_opts_init(&opts); 5400 opts.num_clusters = 10; 5401 5402 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 5403 poll_threads(); 5404 CU_ASSERT(g_bserrno == 0); 5405 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5406 blobid = g_blobid; 5407 5408 /* Create snapshot */ 5409 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5410 poll_threads(); 5411 CU_ASSERT(g_bserrno == 0); 5412 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5413 snapshotid = g_blobid; 5414 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5415 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5416 5417 thresholds.general_threshold = 1; 5418 while (!deleted) { 5419 dev_set_power_failure_thresholds(thresholds); 5420 5421 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 5422 poll_threads(); 5423 5424 /* Do not shut down cleanly. Assumption is that after snapshot deletion 5425 * reports success, changes to both blobs should already persisted. */ 5426 dev_reset_power_failure_event(); 5427 ut_bs_dirty_load(&bs, NULL); 5428 5429 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5430 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5431 5432 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 5433 poll_threads(); 5434 CU_ASSERT(g_bserrno == 0); 5435 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5436 blob = g_blob; 5437 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true); 5438 5439 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 5440 poll_threads(); 5441 5442 if (g_bserrno == 0) { 5443 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5444 snapshot = g_blob; 5445 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 5446 count = SPDK_COUNTOF(ids); 5447 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5448 CU_ASSERT(rc == 0); 5449 CU_ASSERT(count == 1); 5450 CU_ASSERT(ids[0] == blobid); 5451 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len); 5452 CU_ASSERT(rc != 0); 5453 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false); 5454 5455 spdk_blob_close(snapshot, blob_op_complete, NULL); 5456 poll_threads(); 5457 CU_ASSERT(g_bserrno == 0); 5458 } else { 5459 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 5460 deleted = true; 5461 } 5462 5463 spdk_blob_close(blob, blob_op_complete, NULL); 5464 poll_threads(); 5465 CU_ASSERT(g_bserrno == 0); 5466 5467 /* Reload blobstore to have the same starting conditions (as the previous blobstore load 5468 * may trigger cleanup after power failure or may not) */ 5469 ut_bs_reload(&bs, NULL); 5470 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5471 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5472 5473 thresholds.general_threshold++; 5474 } 5475 } 5476 5477 static void 5478 blob_create_snapshot_power_failure(void) 5479 { 5480 struct spdk_blob_store *bs = g_bs; 5481 struct spdk_blob_opts opts; 5482 struct spdk_blob *blob, *snapshot; 5483 struct spdk_power_failure_thresholds thresholds = {}; 5484 spdk_blob_id blobid, snapshotid; 5485 const void *value; 5486 size_t value_len; 5487 size_t count; 5488 spdk_blob_id ids[3] = {}; 5489 int rc; 5490 bool created = false; 5491 5492 /* Create blob */ 5493 ut_spdk_blob_opts_init(&opts); 5494 opts.num_clusters = 10; 5495 5496 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 5497 poll_threads(); 5498 CU_ASSERT(g_bserrno == 0); 5499 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5500 blobid = g_blobid; 5501 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5502 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5503 5504 thresholds.general_threshold = 1; 5505 while (!created) { 5506 dev_set_power_failure_thresholds(thresholds); 5507 5508 /* Create snapshot */ 5509 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5510 poll_threads(); 5511 snapshotid = g_blobid; 5512 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5513 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5514 5515 /* Do not shut down cleanly. Assumption is that after create snapshot 5516 * reports success, both blobs should be power-fail safe. */ 5517 dev_reset_power_failure_event(); 5518 ut_bs_dirty_load(&bs, NULL); 5519 5520 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5521 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5522 5523 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 5524 poll_threads(); 5525 CU_ASSERT(g_bserrno == 0); 5526 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5527 blob = g_blob; 5528 5529 if (snapshotid != SPDK_BLOBID_INVALID) { 5530 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 5531 poll_threads(); 5532 } 5533 5534 if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) { 5535 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5536 snapshot = g_blob; 5537 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true); 5538 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false); 5539 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 5540 count = SPDK_COUNTOF(ids); 5541 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5542 CU_ASSERT(rc == 0); 5543 CU_ASSERT(count == 1); 5544 CU_ASSERT(ids[0] == blobid); 5545 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len); 5546 CU_ASSERT(rc != 0); 5547 5548 spdk_blob_close(snapshot, blob_op_complete, NULL); 5549 poll_threads(); 5550 CU_ASSERT(g_bserrno == 0); 5551 created = true; 5552 } else { 5553 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 5554 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false); 5555 } 5556 5557 spdk_blob_close(blob, blob_op_complete, NULL); 5558 poll_threads(); 5559 CU_ASSERT(g_bserrno == 0); 5560 5561 /* Reload blobstore to have the same starting conditions (as the previous blobstore load 5562 * may trigger cleanup after power failure or may not) */ 5563 ut_bs_reload(&bs, NULL); 5564 SPDK_CU_ASSERT_FATAL(spdk_bit_array_get(bs->used_clusters, 1)); 5565 SPDK_CU_ASSERT_FATAL(!spdk_bit_array_get(bs->used_clusters, 11)); 5566 5567 thresholds.general_threshold++; 5568 } 5569 } 5570 5571 static void 5572 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5573 { 5574 uint8_t payload_ff[64 * 512]; 5575 uint8_t payload_aa[64 * 512]; 5576 uint8_t payload_00[64 * 512]; 5577 uint8_t *cluster0, *cluster1; 5578 5579 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5580 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5581 memset(payload_00, 0x00, sizeof(payload_00)); 5582 5583 /* Try to perform I/O with io unit = 512 */ 5584 spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL); 5585 poll_threads(); 5586 CU_ASSERT(g_bserrno == 0); 5587 5588 /* If thin provisioned is set cluster should be allocated now */ 5589 SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0); 5590 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5591 5592 /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character. 5593 * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */ 5594 /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5595 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5596 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0); 5597 5598 /* Verify write with offset on first page */ 5599 spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL); 5600 poll_threads(); 5601 CU_ASSERT(g_bserrno == 0); 5602 5603 /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5604 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5605 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5606 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5607 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5608 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0); 5609 5610 /* Verify write with offset on first page */ 5611 spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL); 5612 poll_threads(); 5613 5614 /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5615 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5616 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5617 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5618 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5619 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0); 5620 CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0); 5621 5622 /* Verify write with offset on second page */ 5623 spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL); 5624 poll_threads(); 5625 5626 /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */ 5627 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5628 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5629 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5630 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5631 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0); 5632 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5633 5634 /* Verify write across multiple pages */ 5635 spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL); 5636 poll_threads(); 5637 5638 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */ 5639 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5640 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5641 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5642 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5643 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5644 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5645 5646 /* Verify write across multiple clusters */ 5647 spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL); 5648 poll_threads(); 5649 5650 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5651 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5652 5653 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5654 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5655 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5656 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5657 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5658 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5659 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5660 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5661 5662 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5663 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0); 5664 5665 /* Verify write to second cluster */ 5666 spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL); 5667 poll_threads(); 5668 5669 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5670 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5671 5672 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5673 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */ 5674 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5675 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5676 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5677 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5678 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5679 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5680 5681 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5682 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0); 5683 CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0); 5684 CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0); 5685 } 5686 5687 static void 5688 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5689 { 5690 uint8_t payload_read[64 * 512]; 5691 uint8_t payload_ff[64 * 512]; 5692 uint8_t payload_aa[64 * 512]; 5693 uint8_t payload_00[64 * 512]; 5694 5695 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5696 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5697 memset(payload_00, 0x00, sizeof(payload_00)); 5698 5699 /* Read only first io unit */ 5700 /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5701 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5702 * payload_read: F000 0000 | 0000 0000 ... */ 5703 memset(payload_read, 0x00, sizeof(payload_read)); 5704 spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL); 5705 poll_threads(); 5706 CU_ASSERT(g_bserrno == 0); 5707 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5708 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0); 5709 5710 /* Read four io_units starting from offset = 2 5711 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5712 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5713 * payload_read: F0AA 0000 | 0000 0000 ... */ 5714 5715 memset(payload_read, 0x00, sizeof(payload_read)); 5716 spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL); 5717 poll_threads(); 5718 CU_ASSERT(g_bserrno == 0); 5719 5720 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5721 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 5722 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0); 5723 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0); 5724 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 5725 5726 /* Read eight io_units across multiple pages 5727 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 5728 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5729 * payload_read: AAAA AAAA | 0000 0000 ... */ 5730 memset(payload_read, 0x00, sizeof(payload_read)); 5731 spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL); 5732 poll_threads(); 5733 CU_ASSERT(g_bserrno == 0); 5734 5735 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0); 5736 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 5737 5738 /* Read eight io_units across multiple clusters 5739 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ] 5740 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5741 * payload_read: FFFF FFFF | 0000 0000 ... */ 5742 memset(payload_read, 0x00, sizeof(payload_read)); 5743 spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL); 5744 poll_threads(); 5745 CU_ASSERT(g_bserrno == 0); 5746 5747 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0); 5748 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 5749 5750 /* Read four io_units from second cluster 5751 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5752 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ] 5753 * payload_read: 00FF 0000 | 0000 0000 ... */ 5754 memset(payload_read, 0x00, sizeof(payload_read)); 5755 spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL); 5756 poll_threads(); 5757 CU_ASSERT(g_bserrno == 0); 5758 5759 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0); 5760 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0); 5761 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 5762 5763 /* Read second cluster 5764 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5765 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] 5766 * payload_read: FFFF 0000 | 0000 FF00 ... */ 5767 memset(payload_read, 0x00, sizeof(payload_read)); 5768 spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL); 5769 poll_threads(); 5770 CU_ASSERT(g_bserrno == 0); 5771 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0); 5772 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0); 5773 CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0); 5774 CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0); 5775 5776 /* Read whole two clusters 5777 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 5778 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */ 5779 memset(payload_read, 0x00, sizeof(payload_read)); 5780 spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL); 5781 poll_threads(); 5782 CU_ASSERT(g_bserrno == 0); 5783 5784 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 5785 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 5786 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0); 5787 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0); 5788 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0); 5789 CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0); 5790 5791 CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0); 5792 CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0); 5793 CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0); 5794 CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0); 5795 } 5796 5797 5798 static void 5799 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5800 { 5801 uint8_t payload_ff[64 * 512]; 5802 uint8_t payload_aa[64 * 512]; 5803 uint8_t payload_00[64 * 512]; 5804 uint8_t *cluster0, *cluster1; 5805 5806 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5807 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5808 memset(payload_00, 0x00, sizeof(payload_00)); 5809 5810 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5811 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5812 5813 /* Unmap */ 5814 spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL); 5815 poll_threads(); 5816 5817 CU_ASSERT(g_bserrno == 0); 5818 5819 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0); 5820 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0); 5821 } 5822 5823 static void 5824 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5825 { 5826 uint8_t payload_ff[64 * 512]; 5827 uint8_t payload_aa[64 * 512]; 5828 uint8_t payload_00[64 * 512]; 5829 uint8_t *cluster0, *cluster1; 5830 5831 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5832 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5833 memset(payload_00, 0x00, sizeof(payload_00)); 5834 5835 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5836 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5837 5838 /* Write zeroes */ 5839 spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL); 5840 poll_threads(); 5841 5842 CU_ASSERT(g_bserrno == 0); 5843 5844 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0); 5845 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0); 5846 } 5847 5848 5849 static void 5850 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5851 { 5852 uint8_t payload_ff[64 * 512]; 5853 uint8_t payload_aa[64 * 512]; 5854 uint8_t payload_00[64 * 512]; 5855 uint8_t *cluster0, *cluster1; 5856 struct iovec iov[4]; 5857 5858 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5859 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5860 memset(payload_00, 0x00, sizeof(payload_00)); 5861 5862 /* Try to perform I/O with io unit = 512 */ 5863 iov[0].iov_base = payload_ff; 5864 iov[0].iov_len = 1 * 512; 5865 spdk_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL); 5866 poll_threads(); 5867 CU_ASSERT(g_bserrno == 0); 5868 5869 /* If thin provisioned is set cluster should be allocated now */ 5870 SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0); 5871 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5872 5873 /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character. 5874 * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */ 5875 /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5876 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5877 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0); 5878 5879 /* Verify write with offset on first page */ 5880 iov[0].iov_base = payload_ff; 5881 iov[0].iov_len = 1 * 512; 5882 spdk_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL); 5883 poll_threads(); 5884 CU_ASSERT(g_bserrno == 0); 5885 5886 /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5887 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5888 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5889 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5890 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5891 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0); 5892 5893 /* Verify write with offset on first page */ 5894 iov[0].iov_base = payload_ff; 5895 iov[0].iov_len = 4 * 512; 5896 spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL); 5897 poll_threads(); 5898 5899 /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5900 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5901 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5902 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5903 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5904 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0); 5905 CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0); 5906 5907 /* Verify write with offset on second page */ 5908 iov[0].iov_base = payload_ff; 5909 iov[0].iov_len = 4 * 512; 5910 spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL); 5911 poll_threads(); 5912 5913 /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */ 5914 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5915 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5916 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5917 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5918 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0); 5919 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5920 5921 /* Verify write across multiple pages */ 5922 iov[0].iov_base = payload_aa; 5923 iov[0].iov_len = 8 * 512; 5924 spdk_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL); 5925 poll_threads(); 5926 5927 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */ 5928 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5929 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5930 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5931 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5932 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5933 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 5934 5935 /* Verify write across multiple clusters */ 5936 5937 iov[0].iov_base = payload_ff; 5938 iov[0].iov_len = 8 * 512; 5939 spdk_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL); 5940 poll_threads(); 5941 5942 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5943 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5944 5945 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5946 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5947 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5948 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5949 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5950 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5951 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5952 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0); 5953 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5954 5955 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5956 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0); 5957 5958 /* Verify write to second cluster */ 5959 5960 iov[0].iov_base = payload_ff; 5961 iov[0].iov_len = 2 * 512; 5962 spdk_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL); 5963 poll_threads(); 5964 5965 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 5966 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 5967 5968 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5969 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */ 5970 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5971 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 5972 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 5973 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 5974 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 5975 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 5976 5977 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 5978 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0); 5979 CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0); 5980 CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0); 5981 } 5982 5983 static void 5984 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5985 { 5986 uint8_t payload_read[64 * 512]; 5987 uint8_t payload_ff[64 * 512]; 5988 uint8_t payload_aa[64 * 512]; 5989 uint8_t payload_00[64 * 512]; 5990 struct iovec iov[4]; 5991 5992 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5993 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5994 memset(payload_00, 0x00, sizeof(payload_00)); 5995 5996 /* Read only first io unit */ 5997 /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 5998 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 5999 * payload_read: F000 0000 | 0000 0000 ... */ 6000 memset(payload_read, 0x00, sizeof(payload_read)); 6001 iov[0].iov_base = payload_read; 6002 iov[0].iov_len = 1 * 512; 6003 spdk_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL); 6004 poll_threads(); 6005 6006 CU_ASSERT(g_bserrno == 0); 6007 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6008 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0); 6009 6010 /* Read four io_units starting from offset = 2 6011 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6012 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6013 * payload_read: F0AA 0000 | 0000 0000 ... */ 6014 6015 memset(payload_read, 0x00, sizeof(payload_read)); 6016 iov[0].iov_base = payload_read; 6017 iov[0].iov_len = 4 * 512; 6018 spdk_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL); 6019 poll_threads(); 6020 CU_ASSERT(g_bserrno == 0); 6021 6022 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6023 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 6024 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0); 6025 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0); 6026 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 6027 6028 /* Read eight io_units across multiple pages 6029 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 6030 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6031 * payload_read: AAAA AAAA | 0000 0000 ... */ 6032 memset(payload_read, 0x00, sizeof(payload_read)); 6033 iov[0].iov_base = payload_read; 6034 iov[0].iov_len = 4 * 512; 6035 iov[1].iov_base = payload_read + 4 * 512; 6036 iov[1].iov_len = 4 * 512; 6037 spdk_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL); 6038 poll_threads(); 6039 CU_ASSERT(g_bserrno == 0); 6040 6041 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0); 6042 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 6043 6044 /* Read eight io_units across multiple clusters 6045 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ] 6046 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6047 * payload_read: FFFF FFFF | 0000 0000 ... */ 6048 memset(payload_read, 0x00, sizeof(payload_read)); 6049 iov[0].iov_base = payload_read; 6050 iov[0].iov_len = 2 * 512; 6051 iov[1].iov_base = payload_read + 2 * 512; 6052 iov[1].iov_len = 2 * 512; 6053 iov[2].iov_base = payload_read + 4 * 512; 6054 iov[2].iov_len = 2 * 512; 6055 iov[3].iov_base = payload_read + 6 * 512; 6056 iov[3].iov_len = 2 * 512; 6057 spdk_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL); 6058 poll_threads(); 6059 CU_ASSERT(g_bserrno == 0); 6060 6061 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0); 6062 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 6063 6064 /* Read four io_units from second cluster 6065 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6066 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ] 6067 * payload_read: 00FF 0000 | 0000 0000 ... */ 6068 memset(payload_read, 0x00, sizeof(payload_read)); 6069 iov[0].iov_base = payload_read; 6070 iov[0].iov_len = 1 * 512; 6071 iov[1].iov_base = payload_read + 1 * 512; 6072 iov[1].iov_len = 3 * 512; 6073 spdk_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL); 6074 poll_threads(); 6075 CU_ASSERT(g_bserrno == 0); 6076 6077 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0); 6078 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0); 6079 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 6080 6081 /* Read second cluster 6082 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6083 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] 6084 * payload_read: FFFF 0000 | 0000 FF00 ... */ 6085 memset(payload_read, 0x00, sizeof(payload_read)); 6086 iov[0].iov_base = payload_read; 6087 iov[0].iov_len = 1 * 512; 6088 iov[1].iov_base = payload_read + 1 * 512; 6089 iov[1].iov_len = 2 * 512; 6090 iov[2].iov_base = payload_read + 3 * 512; 6091 iov[2].iov_len = 4 * 512; 6092 iov[3].iov_base = payload_read + 7 * 512; 6093 iov[3].iov_len = 25 * 512; 6094 spdk_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL); 6095 poll_threads(); 6096 CU_ASSERT(g_bserrno == 0); 6097 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0); 6098 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0); 6099 CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0); 6100 CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0); 6101 6102 /* Read whole two clusters 6103 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 6104 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */ 6105 memset(payload_read, 0x00, sizeof(payload_read)); 6106 iov[0].iov_base = payload_read; 6107 iov[0].iov_len = 1 * 512; 6108 iov[1].iov_base = payload_read + 1 * 512; 6109 iov[1].iov_len = 8 * 512; 6110 iov[2].iov_base = payload_read + 9 * 512; 6111 iov[2].iov_len = 16 * 512; 6112 iov[3].iov_base = payload_read + 25 * 512; 6113 iov[3].iov_len = 39 * 512; 6114 spdk_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL); 6115 poll_threads(); 6116 CU_ASSERT(g_bserrno == 0); 6117 6118 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6119 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 6120 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0); 6121 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0); 6122 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0); 6123 CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0); 6124 6125 CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0); 6126 CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0); 6127 CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0); 6128 CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0); 6129 } 6130 6131 static void 6132 blob_io_unit(void) 6133 { 6134 struct spdk_bs_opts bsopts; 6135 struct spdk_blob_opts opts; 6136 struct spdk_blob_store *bs; 6137 struct spdk_bs_dev *dev; 6138 struct spdk_blob *blob, *snapshot, *clone; 6139 spdk_blob_id blobid; 6140 struct spdk_io_channel *channel; 6141 6142 /* Create dev with 512 bytes io unit size */ 6143 6144 spdk_bs_opts_init(&bsopts); 6145 bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */ 6146 snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE"); 6147 6148 /* Try to initialize a new blob store with unsupported io_unit */ 6149 dev = init_dev(); 6150 dev->blocklen = 512; 6151 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6152 6153 /* Initialize a new blob store */ 6154 spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL); 6155 poll_threads(); 6156 CU_ASSERT(g_bserrno == 0); 6157 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6158 bs = g_bs; 6159 6160 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512); 6161 channel = spdk_bs_alloc_io_channel(bs); 6162 6163 /* Create thick provisioned blob */ 6164 ut_spdk_blob_opts_init(&opts); 6165 opts.thin_provision = false; 6166 opts.num_clusters = 32; 6167 6168 blob = ut_blob_create_and_open(bs, &opts); 6169 blobid = spdk_blob_get_id(blob); 6170 6171 test_io_write(dev, blob, channel); 6172 test_io_read(dev, blob, channel); 6173 test_io_zeroes(dev, blob, channel); 6174 6175 test_iov_write(dev, blob, channel); 6176 test_iov_read(dev, blob, channel); 6177 6178 test_io_unmap(dev, blob, channel); 6179 6180 spdk_blob_close(blob, blob_op_complete, NULL); 6181 poll_threads(); 6182 CU_ASSERT(g_bserrno == 0); 6183 blob = NULL; 6184 g_blob = NULL; 6185 6186 /* Create thin provisioned blob */ 6187 6188 ut_spdk_blob_opts_init(&opts); 6189 opts.thin_provision = true; 6190 opts.num_clusters = 32; 6191 6192 blob = ut_blob_create_and_open(bs, &opts); 6193 blobid = spdk_blob_get_id(blob); 6194 6195 test_io_write(dev, blob, channel); 6196 test_io_read(dev, blob, channel); 6197 6198 test_io_zeroes(dev, blob, channel); 6199 6200 test_iov_write(dev, blob, channel); 6201 test_iov_read(dev, blob, channel); 6202 6203 /* Create snapshot */ 6204 6205 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6206 poll_threads(); 6207 CU_ASSERT(g_bserrno == 0); 6208 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6209 blobid = g_blobid; 6210 6211 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6212 poll_threads(); 6213 CU_ASSERT(g_bserrno == 0); 6214 CU_ASSERT(g_blob != NULL); 6215 snapshot = g_blob; 6216 6217 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6218 poll_threads(); 6219 CU_ASSERT(g_bserrno == 0); 6220 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6221 blobid = g_blobid; 6222 6223 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6224 poll_threads(); 6225 CU_ASSERT(g_bserrno == 0); 6226 CU_ASSERT(g_blob != NULL); 6227 clone = g_blob; 6228 6229 test_io_read(dev, blob, channel); 6230 test_io_read(dev, snapshot, channel); 6231 test_io_read(dev, clone, channel); 6232 6233 test_iov_read(dev, blob, channel); 6234 test_iov_read(dev, snapshot, channel); 6235 test_iov_read(dev, clone, channel); 6236 6237 /* Inflate clone */ 6238 6239 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 6240 poll_threads(); 6241 6242 CU_ASSERT(g_bserrno == 0); 6243 6244 test_io_read(dev, clone, channel); 6245 6246 test_io_unmap(dev, clone, channel); 6247 6248 test_iov_write(dev, clone, channel); 6249 test_iov_read(dev, clone, channel); 6250 6251 spdk_blob_close(blob, blob_op_complete, NULL); 6252 spdk_blob_close(snapshot, blob_op_complete, NULL); 6253 spdk_blob_close(clone, blob_op_complete, NULL); 6254 poll_threads(); 6255 CU_ASSERT(g_bserrno == 0); 6256 blob = NULL; 6257 g_blob = NULL; 6258 6259 spdk_bs_free_io_channel(channel); 6260 poll_threads(); 6261 6262 /* Unload the blob store */ 6263 spdk_bs_unload(bs, bs_op_complete, NULL); 6264 poll_threads(); 6265 CU_ASSERT(g_bserrno == 0); 6266 g_bs = NULL; 6267 g_blob = NULL; 6268 g_blobid = 0; 6269 } 6270 6271 static void 6272 blob_io_unit_compatiblity(void) 6273 { 6274 struct spdk_bs_opts bsopts; 6275 struct spdk_blob_store *bs; 6276 struct spdk_bs_dev *dev; 6277 struct spdk_bs_super_block *super; 6278 6279 /* Create dev with 512 bytes io unit size */ 6280 6281 spdk_bs_opts_init(&bsopts); 6282 bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */ 6283 snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE"); 6284 6285 /* Try to initialize a new blob store with unsupported io_unit */ 6286 dev = init_dev(); 6287 dev->blocklen = 512; 6288 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6289 6290 /* Initialize a new blob store */ 6291 spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL); 6292 poll_threads(); 6293 CU_ASSERT(g_bserrno == 0); 6294 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6295 bs = g_bs; 6296 6297 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512); 6298 6299 /* Unload the blob store */ 6300 spdk_bs_unload(bs, bs_op_complete, NULL); 6301 poll_threads(); 6302 CU_ASSERT(g_bserrno == 0); 6303 6304 /* Modify super block to behave like older version. 6305 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */ 6306 super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 6307 super->io_unit_size = 0; 6308 super->crc = _spdk_blob_md_page_calc_crc(super); 6309 6310 dev = init_dev(); 6311 dev->blocklen = 512; 6312 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6313 6314 spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL); 6315 poll_threads(); 6316 CU_ASSERT(g_bserrno == 0); 6317 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6318 bs = g_bs; 6319 6320 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE); 6321 6322 /* Unload the blob store */ 6323 spdk_bs_unload(bs, bs_op_complete, NULL); 6324 poll_threads(); 6325 CU_ASSERT(g_bserrno == 0); 6326 6327 g_bs = NULL; 6328 g_blob = NULL; 6329 g_blobid = 0; 6330 } 6331 6332 static void 6333 blob_simultaneous_operations(void) 6334 { 6335 struct spdk_blob_store *bs = g_bs; 6336 struct spdk_blob_opts opts; 6337 struct spdk_blob *blob, *snapshot; 6338 spdk_blob_id blobid, snapshotid; 6339 struct spdk_io_channel *channel; 6340 6341 channel = spdk_bs_alloc_io_channel(bs); 6342 SPDK_CU_ASSERT_FATAL(channel != NULL); 6343 6344 ut_spdk_blob_opts_init(&opts); 6345 opts.num_clusters = 10; 6346 6347 blob = ut_blob_create_and_open(bs, &opts); 6348 blobid = spdk_blob_get_id(blob); 6349 6350 /* Create snapshot and try to remove blob in the same time: 6351 * - snapshot should be created successfully 6352 * - delete operation should fail w -EBUSY */ 6353 CU_ASSERT(blob->locked_operation_in_progress == false); 6354 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6355 CU_ASSERT(blob->locked_operation_in_progress == true); 6356 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6357 CU_ASSERT(blob->locked_operation_in_progress == true); 6358 /* Deletion failure */ 6359 CU_ASSERT(g_bserrno == -EBUSY); 6360 poll_threads(); 6361 CU_ASSERT(blob->locked_operation_in_progress == false); 6362 /* Snapshot creation success */ 6363 CU_ASSERT(g_bserrno == 0); 6364 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6365 6366 snapshotid = g_blobid; 6367 6368 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 6369 poll_threads(); 6370 CU_ASSERT(g_bserrno == 0); 6371 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 6372 snapshot = g_blob; 6373 6374 /* Inflate blob and try to remove blob in the same time: 6375 * - blob should be inflated successfully 6376 * - delete operation should fail w -EBUSY */ 6377 CU_ASSERT(blob->locked_operation_in_progress == false); 6378 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 6379 CU_ASSERT(blob->locked_operation_in_progress == true); 6380 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6381 CU_ASSERT(blob->locked_operation_in_progress == true); 6382 /* Deletion failure */ 6383 CU_ASSERT(g_bserrno == -EBUSY); 6384 poll_threads(); 6385 CU_ASSERT(blob->locked_operation_in_progress == false); 6386 /* Inflation success */ 6387 CU_ASSERT(g_bserrno == 0); 6388 6389 /* Clone snapshot and try to remove snapshot in the same time: 6390 * - snapshot should be cloned successfully 6391 * - delete operation should fail w -EBUSY */ 6392 CU_ASSERT(blob->locked_operation_in_progress == false); 6393 spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 6394 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 6395 /* Deletion failure */ 6396 CU_ASSERT(g_bserrno == -EBUSY); 6397 poll_threads(); 6398 CU_ASSERT(blob->locked_operation_in_progress == false); 6399 /* Clone created */ 6400 CU_ASSERT(g_bserrno == 0); 6401 6402 /* Resize blob and try to remove blob in the same time: 6403 * - blob should be resized successfully 6404 * - delete operation should fail w -EBUSY */ 6405 CU_ASSERT(blob->locked_operation_in_progress == false); 6406 spdk_blob_resize(blob, 50, blob_op_complete, NULL); 6407 CU_ASSERT(blob->locked_operation_in_progress == true); 6408 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6409 CU_ASSERT(blob->locked_operation_in_progress == true); 6410 /* Deletion failure */ 6411 CU_ASSERT(g_bserrno == -EBUSY); 6412 poll_threads(); 6413 CU_ASSERT(blob->locked_operation_in_progress == false); 6414 /* Blob resized successfully */ 6415 CU_ASSERT(g_bserrno == 0); 6416 6417 /* Issue two consecutive blob syncs, neither should fail. 6418 * Force sync to actually occur by marking blob dirty each time. 6419 * Execution of sync should not be enough to complete the operation, 6420 * since disk I/O is required to complete it. */ 6421 g_bserrno = -1; 6422 6423 blob->state = SPDK_BLOB_STATE_DIRTY; 6424 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6425 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6426 6427 blob->state = SPDK_BLOB_STATE_DIRTY; 6428 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6429 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6430 6431 uint32_t completions = 0; 6432 while (completions < 2) { 6433 SPDK_CU_ASSERT_FATAL(poll_thread_times(0, 1)); 6434 if (g_bserrno == 0) { 6435 g_bserrno = -1; 6436 completions++; 6437 } 6438 /* Never should the g_bserrno be other than -1. 6439 * It would mean that either of syncs failed. */ 6440 SPDK_CU_ASSERT_FATAL(g_bserrno == -1); 6441 } 6442 6443 spdk_blob_close(blob, blob_op_complete, NULL); 6444 poll_threads(); 6445 CU_ASSERT(g_bserrno == 0); 6446 6447 spdk_blob_close(snapshot, blob_op_complete, NULL); 6448 poll_threads(); 6449 CU_ASSERT(g_bserrno == 0); 6450 6451 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6452 poll_threads(); 6453 CU_ASSERT(g_bserrno == 0); 6454 6455 spdk_bs_free_io_channel(channel); 6456 poll_threads(); 6457 } 6458 6459 static void 6460 blob_persist(void) 6461 { 6462 struct spdk_blob_store *bs = g_bs; 6463 struct spdk_blob_opts opts; 6464 struct spdk_blob *blob; 6465 spdk_blob_id blobid; 6466 struct spdk_io_channel *channel; 6467 char *xattr; 6468 size_t xattr_length; 6469 int rc; 6470 uint32_t page_count_clear, page_count_xattr; 6471 uint64_t poller_iterations; 6472 bool run_poller; 6473 6474 channel = spdk_bs_alloc_io_channel(bs); 6475 SPDK_CU_ASSERT_FATAL(channel != NULL); 6476 6477 ut_spdk_blob_opts_init(&opts); 6478 opts.num_clusters = 10; 6479 6480 blob = ut_blob_create_and_open(bs, &opts); 6481 blobid = spdk_blob_get_id(blob); 6482 6483 /* Save the amount of md pages used after creation of a blob. 6484 * This should be consistent after removing xattr. */ 6485 page_count_clear = spdk_bit_array_count_set(bs->used_md_pages); 6486 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear); 6487 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear); 6488 6489 /* Add xattr with maximum length of descriptor to exceed single metadata page. */ 6490 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 6491 strlen("large_xattr"); 6492 xattr = calloc(xattr_length, sizeof(char)); 6493 SPDK_CU_ASSERT_FATAL(xattr != NULL); 6494 6495 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 6496 SPDK_CU_ASSERT_FATAL(rc == 0); 6497 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6498 poll_threads(); 6499 SPDK_CU_ASSERT_FATAL(g_bserrno == 0); 6500 6501 /* Save the amount of md pages used after adding the large xattr */ 6502 page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages); 6503 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr); 6504 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr); 6505 6506 /* Add xattr to a blob and sync it. While sync is occuring, remove the xattr and sync again. 6507 * Interrupt the first sync after increasing number of poller iterations, until it succeeds. 6508 * Expectation is that after second sync completes no xattr is saved in metadata. */ 6509 poller_iterations = 1; 6510 run_poller = true; 6511 while (run_poller) { 6512 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 6513 SPDK_CU_ASSERT_FATAL(rc == 0); 6514 g_bserrno = -1; 6515 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6516 poll_thread_times(0, poller_iterations); 6517 if (g_bserrno == 0) { 6518 /* Poller iteration count was high enough for first sync to complete. 6519 * Verify that blob takes up enough of md_pages to store the xattr. */ 6520 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr); 6521 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr); 6522 SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr); 6523 run_poller = false; 6524 } 6525 rc = spdk_blob_remove_xattr(blob, "large_xattr"); 6526 SPDK_CU_ASSERT_FATAL(rc == 0); 6527 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6528 poll_threads(); 6529 SPDK_CU_ASSERT_FATAL(g_bserrno == 0); 6530 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear); 6531 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear); 6532 SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear); 6533 6534 /* Reload bs and re-open blob to verify that xattr was not persisted. */ 6535 spdk_blob_close(blob, blob_op_complete, NULL); 6536 poll_threads(); 6537 CU_ASSERT(g_bserrno == 0); 6538 6539 ut_bs_reload(&bs, NULL); 6540 6541 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6542 poll_threads(); 6543 CU_ASSERT(g_bserrno == 0); 6544 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 6545 blob = g_blob; 6546 6547 rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length); 6548 SPDK_CU_ASSERT_FATAL(rc == -ENOENT); 6549 6550 poller_iterations++; 6551 /* Stop at high iteration count to prevent infinite loop. 6552 * This value should be enough for first md sync to complete in any case. */ 6553 SPDK_CU_ASSERT_FATAL(poller_iterations < 50); 6554 } 6555 6556 free(xattr); 6557 6558 spdk_blob_close(blob, blob_op_complete, NULL); 6559 poll_threads(); 6560 CU_ASSERT(g_bserrno == 0); 6561 6562 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6563 poll_threads(); 6564 CU_ASSERT(g_bserrno == 0); 6565 6566 spdk_bs_free_io_channel(channel); 6567 poll_threads(); 6568 } 6569 6570 static void 6571 suite_bs_setup(void) 6572 { 6573 struct spdk_bs_dev *dev; 6574 6575 dev = init_dev(); 6576 memset(g_dev_buffer, 0, DEV_BUFFER_SIZE); 6577 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 6578 poll_threads(); 6579 CU_ASSERT(g_bserrno == 0); 6580 CU_ASSERT(g_bs != NULL); 6581 } 6582 6583 static void 6584 suite_bs_cleanup(void) 6585 { 6586 spdk_bs_unload(g_bs, bs_op_complete, NULL); 6587 poll_threads(); 6588 CU_ASSERT(g_bserrno == 0); 6589 g_bs = NULL; 6590 memset(g_dev_buffer, 0, DEV_BUFFER_SIZE); 6591 } 6592 6593 static struct spdk_blob * 6594 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts) 6595 { 6596 struct spdk_blob *blob; 6597 struct spdk_blob_opts create_blob_opts; 6598 spdk_blob_id blobid; 6599 6600 if (blob_opts == NULL) { 6601 ut_spdk_blob_opts_init(&create_blob_opts); 6602 blob_opts = &create_blob_opts; 6603 } 6604 6605 spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL); 6606 poll_threads(); 6607 CU_ASSERT(g_bserrno == 0); 6608 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6609 blobid = g_blobid; 6610 g_blobid = -1; 6611 6612 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6613 poll_threads(); 6614 CU_ASSERT(g_bserrno == 0); 6615 CU_ASSERT(g_blob != NULL); 6616 blob = g_blob; 6617 6618 g_blob = NULL; 6619 g_bserrno = -1; 6620 6621 return blob; 6622 } 6623 6624 int main(int argc, char **argv) 6625 { 6626 CU_pSuite suite, suite_bs; 6627 unsigned int num_failures; 6628 6629 CU_set_error_action(CUEA_ABORT); 6630 CU_initialize_registry(); 6631 6632 suite = CU_add_suite("blob", NULL, NULL); 6633 suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL, 6634 suite_bs_setup, suite_bs_cleanup); 6635 6636 CU_ADD_TEST(suite, blob_init); 6637 CU_ADD_TEST(suite_bs, blob_open); 6638 CU_ADD_TEST(suite_bs, blob_create); 6639 CU_ADD_TEST(suite_bs, blob_create_internal); 6640 CU_ADD_TEST(suite, blob_thin_provision); 6641 CU_ADD_TEST(suite_bs, blob_snapshot); 6642 CU_ADD_TEST(suite_bs, blob_clone); 6643 CU_ADD_TEST(suite_bs, blob_inflate); 6644 CU_ADD_TEST(suite_bs, blob_delete); 6645 CU_ADD_TEST(suite_bs, blob_resize); 6646 CU_ADD_TEST(suite, blob_read_only); 6647 CU_ADD_TEST(suite_bs, channel_ops); 6648 CU_ADD_TEST(suite_bs, blob_super); 6649 CU_ADD_TEST(suite_bs, blob_write); 6650 CU_ADD_TEST(suite_bs, blob_read); 6651 CU_ADD_TEST(suite_bs, blob_rw_verify); 6652 CU_ADD_TEST(suite_bs, blob_rw_verify_iov); 6653 CU_ADD_TEST(suite_bs, blob_rw_verify_iov_nomem); 6654 CU_ADD_TEST(suite_bs, blob_rw_iov_read_only); 6655 CU_ADD_TEST(suite_bs, blob_unmap); 6656 CU_ADD_TEST(suite_bs, blob_iter); 6657 CU_ADD_TEST(suite_bs, blob_xattr); 6658 CU_ADD_TEST(suite, bs_load); 6659 CU_ADD_TEST(suite_bs, bs_load_pending_removal); 6660 CU_ADD_TEST(suite, bs_load_custom_cluster_size); 6661 CU_ADD_TEST(suite_bs, bs_unload); 6662 CU_ADD_TEST(suite, bs_cluster_sz); 6663 CU_ADD_TEST(suite_bs, bs_usable_clusters); 6664 CU_ADD_TEST(suite, bs_resize_md); 6665 CU_ADD_TEST(suite, bs_destroy); 6666 CU_ADD_TEST(suite, bs_type); 6667 CU_ADD_TEST(suite, bs_super_block); 6668 CU_ADD_TEST(suite, blob_serialize); 6669 CU_ADD_TEST(suite_bs, blob_crc); 6670 CU_ADD_TEST(suite, super_block_crc); 6671 CU_ADD_TEST(suite_bs, blob_dirty_shutdown); 6672 CU_ADD_TEST(suite_bs, blob_flags); 6673 CU_ADD_TEST(suite_bs, bs_version); 6674 CU_ADD_TEST(suite_bs, blob_set_xattrs); 6675 CU_ADD_TEST(suite_bs, blob_thin_prov_alloc); 6676 CU_ADD_TEST(suite_bs, blob_insert_cluster_msg); 6677 CU_ADD_TEST(suite_bs, blob_thin_prov_rw); 6678 CU_ADD_TEST(suite_bs, blob_thin_prov_rle); 6679 CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov); 6680 CU_ADD_TEST(suite, bs_load_iter); 6681 CU_ADD_TEST(suite_bs, blob_snapshot_rw); 6682 CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov); 6683 CU_ADD_TEST(suite, blob_relations); 6684 CU_ADD_TEST(suite, blob_relations2); 6685 CU_ADD_TEST(suite_bs, blob_delete_snapshot_power_failure); 6686 CU_ADD_TEST(suite_bs, blob_create_snapshot_power_failure); 6687 CU_ADD_TEST(suite_bs, blob_inflate_rw); 6688 CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io); 6689 CU_ADD_TEST(suite_bs, blob_operation_split_rw); 6690 CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov); 6691 CU_ADD_TEST(suite, blob_io_unit); 6692 CU_ADD_TEST(suite, blob_io_unit_compatiblity); 6693 CU_ADD_TEST(suite_bs, blob_simultaneous_operations); 6694 CU_ADD_TEST(suite_bs, blob_persist); 6695 6696 allocate_threads(2); 6697 set_thread(0); 6698 6699 g_dev_buffer = calloc(1, DEV_BUFFER_SIZE); 6700 6701 CU_basic_set_mode(CU_BRM_VERBOSE); 6702 g_use_extent_table = false; 6703 CU_basic_run_tests(); 6704 num_failures = CU_get_number_of_failures(); 6705 g_use_extent_table = true; 6706 CU_basic_run_tests(); 6707 num_failures += CU_get_number_of_failures(); 6708 CU_cleanup_registry(); 6709 6710 free(g_dev_buffer); 6711 6712 free_threads(); 6713 6714 return num_failures; 6715 } 6716