1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk/stdinc.h" 36 37 #include "spdk_cunit.h" 38 #include "spdk/blob.h" 39 #include "spdk/string.h" 40 41 #include "common/lib/ut_multithread.c" 42 #include "../bs_dev_common.c" 43 #include "blob/blobstore.c" 44 #include "blob/request.c" 45 #include "blob/zeroes.c" 46 #include "blob/blob_bs_dev.c" 47 48 struct spdk_blob_store *g_bs; 49 spdk_blob_id g_blobid; 50 struct spdk_blob *g_blob, *g_blob2; 51 int g_bserrno, g_bserrno2; 52 struct spdk_xattr_names *g_names; 53 int g_done; 54 char *g_xattr_names[] = {"first", "second", "third"}; 55 char *g_xattr_values[] = {"one", "two", "three"}; 56 uint64_t g_ctx = 1729; 57 bool g_use_extent_table = false; 58 59 struct spdk_bs_super_block_ver1 { 60 uint8_t signature[8]; 61 uint32_t version; 62 uint32_t length; 63 uint32_t clean; /* If there was a clean shutdown, this is 1. */ 64 spdk_blob_id super_blob; 65 66 uint32_t cluster_size; /* In bytes */ 67 68 uint32_t used_page_mask_start; /* Offset from beginning of disk, in pages */ 69 uint32_t used_page_mask_len; /* Count, in pages */ 70 71 uint32_t used_cluster_mask_start; /* Offset from beginning of disk, in pages */ 72 uint32_t used_cluster_mask_len; /* Count, in pages */ 73 74 uint32_t md_start; /* Offset from beginning of disk, in pages */ 75 uint32_t md_len; /* Count, in pages */ 76 77 uint8_t reserved[4036]; 78 uint32_t crc; 79 } __attribute__((packed)); 80 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size"); 81 82 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs, 83 struct spdk_blob_opts *blob_opts); 84 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob); 85 static void suite_blob_setup(void); 86 static void suite_blob_cleanup(void); 87 88 static void 89 _get_xattr_value(void *arg, const char *name, 90 const void **value, size_t *value_len) 91 { 92 uint64_t i; 93 94 SPDK_CU_ASSERT_FATAL(value_len != NULL); 95 SPDK_CU_ASSERT_FATAL(value != NULL); 96 CU_ASSERT(arg == &g_ctx); 97 98 for (i = 0; i < sizeof(g_xattr_names); i++) { 99 if (!strcmp(name, g_xattr_names[i])) { 100 *value_len = strlen(g_xattr_values[i]); 101 *value = g_xattr_values[i]; 102 break; 103 } 104 } 105 } 106 107 static void 108 _get_xattr_value_null(void *arg, const char *name, 109 const void **value, size_t *value_len) 110 { 111 SPDK_CU_ASSERT_FATAL(value_len != NULL); 112 SPDK_CU_ASSERT_FATAL(value != NULL); 113 CU_ASSERT(arg == NULL); 114 115 *value_len = 0; 116 *value = NULL; 117 } 118 119 static int 120 _get_snapshots_count(struct spdk_blob_store *bs) 121 { 122 struct spdk_blob_list *snapshot = NULL; 123 int count = 0; 124 125 TAILQ_FOREACH(snapshot, &bs->snapshots, link) { 126 count += 1; 127 } 128 129 return count; 130 } 131 132 static void 133 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts) 134 { 135 spdk_blob_opts_init(opts, sizeof(*opts)); 136 opts->use_extent_table = g_use_extent_table; 137 } 138 139 static void 140 bs_op_complete(void *cb_arg, int bserrno) 141 { 142 g_bserrno = bserrno; 143 } 144 145 static void 146 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs, 147 int bserrno) 148 { 149 g_bs = bs; 150 g_bserrno = bserrno; 151 } 152 153 static void 154 blob_op_complete(void *cb_arg, int bserrno) 155 { 156 g_bserrno = bserrno; 157 } 158 159 static void 160 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno) 161 { 162 g_blobid = blobid; 163 g_bserrno = bserrno; 164 } 165 166 static void 167 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno) 168 { 169 g_blob = blb; 170 g_bserrno = bserrno; 171 } 172 173 static void 174 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno) 175 { 176 if (g_blob == NULL) { 177 g_blob = blob; 178 g_bserrno = bserrno; 179 } else { 180 g_blob2 = blob; 181 g_bserrno2 = bserrno; 182 } 183 } 184 185 static void 186 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts) 187 { 188 struct spdk_bs_dev *dev; 189 190 /* Unload the blob store */ 191 spdk_bs_unload(*bs, bs_op_complete, NULL); 192 poll_threads(); 193 CU_ASSERT(g_bserrno == 0); 194 195 dev = init_dev(); 196 /* Load an existing blob store */ 197 spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL); 198 poll_threads(); 199 CU_ASSERT(g_bserrno == 0); 200 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 201 *bs = g_bs; 202 203 g_bserrno = -1; 204 } 205 206 static void 207 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts) 208 { 209 struct spdk_bs_dev *dev; 210 211 /* Dirty shutdown */ 212 bs_free(*bs); 213 214 dev = init_dev(); 215 /* Load an existing blob store */ 216 spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL); 217 poll_threads(); 218 CU_ASSERT(g_bserrno == 0); 219 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 220 *bs = g_bs; 221 222 g_bserrno = -1; 223 } 224 225 static void 226 blob_init(void) 227 { 228 struct spdk_blob_store *bs; 229 struct spdk_bs_dev *dev; 230 231 dev = init_dev(); 232 233 /* should fail for an unsupported blocklen */ 234 dev->blocklen = 500; 235 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 236 poll_threads(); 237 CU_ASSERT(g_bserrno == -EINVAL); 238 239 dev = init_dev(); 240 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 241 poll_threads(); 242 CU_ASSERT(g_bserrno == 0); 243 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 244 bs = g_bs; 245 246 spdk_bs_unload(bs, bs_op_complete, NULL); 247 poll_threads(); 248 CU_ASSERT(g_bserrno == 0); 249 g_bs = NULL; 250 } 251 252 static void 253 blob_super(void) 254 { 255 struct spdk_blob_store *bs = g_bs; 256 spdk_blob_id blobid; 257 struct spdk_blob_opts blob_opts; 258 259 /* Get the super blob without having set one */ 260 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 261 poll_threads(); 262 CU_ASSERT(g_bserrno == -ENOENT); 263 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 264 265 /* Create a blob */ 266 ut_spdk_blob_opts_init(&blob_opts); 267 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 268 poll_threads(); 269 CU_ASSERT(g_bserrno == 0); 270 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 271 blobid = g_blobid; 272 273 /* Set the blob as the super blob */ 274 spdk_bs_set_super(bs, blobid, blob_op_complete, NULL); 275 poll_threads(); 276 CU_ASSERT(g_bserrno == 0); 277 278 /* Get the super blob */ 279 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 280 poll_threads(); 281 CU_ASSERT(g_bserrno == 0); 282 CU_ASSERT(blobid == g_blobid); 283 } 284 285 static void 286 blob_open(void) 287 { 288 struct spdk_blob_store *bs = g_bs; 289 struct spdk_blob *blob; 290 struct spdk_blob_opts blob_opts; 291 spdk_blob_id blobid, blobid2; 292 293 ut_spdk_blob_opts_init(&blob_opts); 294 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 295 poll_threads(); 296 CU_ASSERT(g_bserrno == 0); 297 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 298 blobid = g_blobid; 299 300 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 301 poll_threads(); 302 CU_ASSERT(g_bserrno == 0); 303 CU_ASSERT(g_blob != NULL); 304 blob = g_blob; 305 306 blobid2 = spdk_blob_get_id(blob); 307 CU_ASSERT(blobid == blobid2); 308 309 /* Try to open file again. It should return success. */ 310 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 311 poll_threads(); 312 CU_ASSERT(g_bserrno == 0); 313 CU_ASSERT(blob == g_blob); 314 315 spdk_blob_close(blob, blob_op_complete, NULL); 316 poll_threads(); 317 CU_ASSERT(g_bserrno == 0); 318 319 /* 320 * Close the file a second time, releasing the second reference. This 321 * should succeed. 322 */ 323 blob = g_blob; 324 spdk_blob_close(blob, blob_op_complete, NULL); 325 poll_threads(); 326 CU_ASSERT(g_bserrno == 0); 327 328 /* 329 * Try to open file again. It should succeed. This tests the case 330 * where the file is opened, closed, then re-opened again. 331 */ 332 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 333 poll_threads(); 334 CU_ASSERT(g_bserrno == 0); 335 CU_ASSERT(g_blob != NULL); 336 blob = g_blob; 337 spdk_blob_close(blob, blob_op_complete, NULL); 338 poll_threads(); 339 CU_ASSERT(g_bserrno == 0); 340 341 /* Try to open file twice in succession. This should return the same 342 * blob object. 343 */ 344 g_blob = NULL; 345 g_blob2 = NULL; 346 g_bserrno = -1; 347 g_bserrno2 = -1; 348 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL); 349 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL); 350 poll_threads(); 351 CU_ASSERT(g_bserrno == 0); 352 CU_ASSERT(g_bserrno2 == 0); 353 CU_ASSERT(g_blob != NULL); 354 CU_ASSERT(g_blob2 != NULL); 355 CU_ASSERT(g_blob == g_blob2); 356 357 g_bserrno = -1; 358 spdk_blob_close(g_blob, blob_op_complete, NULL); 359 poll_threads(); 360 CU_ASSERT(g_bserrno == 0); 361 362 ut_blob_close_and_delete(bs, g_blob); 363 } 364 365 static void 366 blob_create(void) 367 { 368 struct spdk_blob_store *bs = g_bs; 369 struct spdk_blob *blob; 370 struct spdk_blob_opts opts; 371 spdk_blob_id blobid; 372 373 /* Create blob with 10 clusters */ 374 375 ut_spdk_blob_opts_init(&opts); 376 opts.num_clusters = 10; 377 378 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 379 poll_threads(); 380 CU_ASSERT(g_bserrno == 0); 381 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 382 blobid = g_blobid; 383 384 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 385 poll_threads(); 386 CU_ASSERT(g_bserrno == 0); 387 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 388 blob = g_blob; 389 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 390 391 spdk_blob_close(blob, blob_op_complete, NULL); 392 poll_threads(); 393 CU_ASSERT(g_bserrno == 0); 394 395 /* Create blob with 0 clusters */ 396 397 ut_spdk_blob_opts_init(&opts); 398 opts.num_clusters = 0; 399 400 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 401 poll_threads(); 402 CU_ASSERT(g_bserrno == 0); 403 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 404 blobid = g_blobid; 405 406 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 407 poll_threads(); 408 CU_ASSERT(g_bserrno == 0); 409 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 410 blob = g_blob; 411 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 412 413 spdk_blob_close(blob, blob_op_complete, NULL); 414 poll_threads(); 415 CU_ASSERT(g_bserrno == 0); 416 417 /* Create blob with default options (opts == NULL) */ 418 419 spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL); 420 poll_threads(); 421 CU_ASSERT(g_bserrno == 0); 422 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 423 blobid = g_blobid; 424 425 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 426 poll_threads(); 427 CU_ASSERT(g_bserrno == 0); 428 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 429 blob = g_blob; 430 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 431 432 spdk_blob_close(blob, blob_op_complete, NULL); 433 poll_threads(); 434 CU_ASSERT(g_bserrno == 0); 435 436 /* Try to create blob with size larger than blobstore */ 437 438 ut_spdk_blob_opts_init(&opts); 439 opts.num_clusters = bs->total_clusters + 1; 440 441 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 442 poll_threads(); 443 CU_ASSERT(g_bserrno == -ENOSPC); 444 } 445 446 static void 447 blob_create_zero_extent(void) 448 { 449 struct spdk_blob_store *bs = g_bs; 450 struct spdk_blob *blob; 451 spdk_blob_id blobid; 452 453 /* Create blob with default options (opts == NULL) */ 454 spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL); 455 poll_threads(); 456 CU_ASSERT(g_bserrno == 0); 457 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 458 blobid = g_blobid; 459 460 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 461 poll_threads(); 462 CU_ASSERT(g_bserrno == 0); 463 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 464 blob = g_blob; 465 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 466 CU_ASSERT(blob->extent_table_found == true); 467 CU_ASSERT(blob->active.extent_pages_array_size == 0); 468 CU_ASSERT(blob->active.extent_pages == NULL); 469 470 spdk_blob_close(blob, blob_op_complete, NULL); 471 poll_threads(); 472 CU_ASSERT(g_bserrno == 0); 473 474 /* Create blob with NULL internal options */ 475 bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL); 476 poll_threads(); 477 CU_ASSERT(g_bserrno == 0); 478 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 479 blobid = g_blobid; 480 481 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 482 poll_threads(); 483 CU_ASSERT(g_bserrno == 0); 484 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 485 blob = g_blob; 486 CU_ASSERT(TAILQ_FIRST(&blob->xattrs_internal) == NULL); 487 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 488 CU_ASSERT(blob->extent_table_found == true); 489 CU_ASSERT(blob->active.extent_pages_array_size == 0); 490 CU_ASSERT(blob->active.extent_pages == NULL); 491 492 spdk_blob_close(blob, blob_op_complete, NULL); 493 poll_threads(); 494 CU_ASSERT(g_bserrno == 0); 495 } 496 497 /* 498 * Create and delete one blob in a loop over and over again. This helps ensure 499 * that the internal bit masks tracking used clusters and md_pages are being 500 * tracked correctly. 501 */ 502 static void 503 blob_create_loop(void) 504 { 505 struct spdk_blob_store *bs = g_bs; 506 struct spdk_blob_opts opts; 507 uint32_t i, loop_count; 508 509 loop_count = 4 * spdk_max(spdk_bit_array_capacity(bs->used_md_pages), 510 spdk_bit_pool_capacity(bs->used_clusters)); 511 512 for (i = 0; i < loop_count; i++) { 513 ut_spdk_blob_opts_init(&opts); 514 opts.num_clusters = 1; 515 g_bserrno = -1; 516 g_blobid = SPDK_BLOBID_INVALID; 517 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 518 poll_threads(); 519 CU_ASSERT(g_bserrno == 0); 520 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 521 spdk_bs_delete_blob(bs, g_blobid, blob_op_complete, NULL); 522 poll_threads(); 523 CU_ASSERT(g_bserrno == 0); 524 } 525 } 526 527 static void 528 blob_create_fail(void) 529 { 530 struct spdk_blob_store *bs = g_bs; 531 struct spdk_blob_opts opts; 532 spdk_blob_id blobid; 533 uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids); 534 uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages); 535 536 /* NULL callback */ 537 ut_spdk_blob_opts_init(&opts); 538 opts.xattrs.names = g_xattr_names; 539 opts.xattrs.get_value = NULL; 540 opts.xattrs.count = 1; 541 opts.xattrs.ctx = &g_ctx; 542 543 blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 544 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 545 poll_threads(); 546 CU_ASSERT(g_bserrno == -EINVAL); 547 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 548 CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count); 549 CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count); 550 551 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 552 poll_threads(); 553 CU_ASSERT(g_bserrno == -ENOENT); 554 SPDK_CU_ASSERT_FATAL(g_blob == NULL); 555 556 ut_bs_reload(&bs, NULL); 557 CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count); 558 CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count); 559 560 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 561 poll_threads(); 562 CU_ASSERT(g_blob == NULL); 563 CU_ASSERT(g_bserrno == -ENOENT); 564 } 565 566 static void 567 blob_create_internal(void) 568 { 569 struct spdk_blob_store *bs = g_bs; 570 struct spdk_blob *blob; 571 struct spdk_blob_opts opts; 572 struct spdk_blob_xattr_opts internal_xattrs; 573 const void *value; 574 size_t value_len; 575 spdk_blob_id blobid; 576 int rc; 577 578 /* Create blob with custom xattrs */ 579 580 ut_spdk_blob_opts_init(&opts); 581 blob_xattrs_init(&internal_xattrs); 582 internal_xattrs.count = 3; 583 internal_xattrs.names = g_xattr_names; 584 internal_xattrs.get_value = _get_xattr_value; 585 internal_xattrs.ctx = &g_ctx; 586 587 bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL); 588 poll_threads(); 589 CU_ASSERT(g_bserrno == 0); 590 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 591 blobid = g_blobid; 592 593 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 594 poll_threads(); 595 CU_ASSERT(g_bserrno == 0); 596 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 597 blob = g_blob; 598 599 rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true); 600 CU_ASSERT(rc == 0); 601 SPDK_CU_ASSERT_FATAL(value != NULL); 602 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 603 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 604 605 rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true); 606 CU_ASSERT(rc == 0); 607 SPDK_CU_ASSERT_FATAL(value != NULL); 608 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 609 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 610 611 rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true); 612 CU_ASSERT(rc == 0); 613 SPDK_CU_ASSERT_FATAL(value != NULL); 614 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 615 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 616 617 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len); 618 CU_ASSERT(rc != 0); 619 620 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len); 621 CU_ASSERT(rc != 0); 622 623 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len); 624 CU_ASSERT(rc != 0); 625 626 spdk_blob_close(blob, blob_op_complete, NULL); 627 poll_threads(); 628 CU_ASSERT(g_bserrno == 0); 629 630 /* Create blob with NULL internal options */ 631 632 bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL); 633 poll_threads(); 634 CU_ASSERT(g_bserrno == 0); 635 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 636 blobid = g_blobid; 637 638 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 639 poll_threads(); 640 CU_ASSERT(g_bserrno == 0); 641 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 642 CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL); 643 CU_ASSERT(spdk_blob_get_num_clusters(g_blob) == 0); 644 645 blob = g_blob; 646 647 spdk_blob_close(blob, blob_op_complete, NULL); 648 poll_threads(); 649 CU_ASSERT(g_bserrno == 0); 650 } 651 652 static void 653 blob_thin_provision(void) 654 { 655 struct spdk_blob_store *bs; 656 struct spdk_bs_dev *dev; 657 struct spdk_blob *blob; 658 struct spdk_blob_opts opts; 659 struct spdk_bs_opts bs_opts; 660 spdk_blob_id blobid; 661 662 dev = init_dev(); 663 spdk_bs_opts_init(&bs_opts, sizeof(bs_opts)); 664 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 665 666 /* Initialize a new blob store */ 667 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 668 poll_threads(); 669 CU_ASSERT(g_bserrno == 0); 670 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 671 672 bs = g_bs; 673 674 /* Create blob with thin provisioning enabled */ 675 676 ut_spdk_blob_opts_init(&opts); 677 opts.thin_provision = true; 678 opts.num_clusters = 10; 679 680 blob = ut_blob_create_and_open(bs, &opts); 681 blobid = spdk_blob_get_id(blob); 682 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 683 /* In thin provisioning with num_clusters is set, if not using the 684 * extent table, there is no allocation. If extent table is used, 685 * there is related allocation happened. */ 686 if (blob->extent_table_found == true) { 687 CU_ASSERT(blob->active.extent_pages_array_size > 0); 688 CU_ASSERT(blob->active.extent_pages != NULL); 689 } else { 690 CU_ASSERT(blob->active.extent_pages_array_size == 0); 691 CU_ASSERT(blob->active.extent_pages == NULL); 692 } 693 694 spdk_blob_close(blob, blob_op_complete, NULL); 695 CU_ASSERT(g_bserrno == 0); 696 697 /* Do not shut down cleanly. This makes sure that when we load again 698 * and try to recover a valid used_cluster map, that blobstore will 699 * ignore clusters with index 0 since these are unallocated clusters. 700 */ 701 ut_bs_dirty_load(&bs, &bs_opts); 702 703 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 704 poll_threads(); 705 CU_ASSERT(g_bserrno == 0); 706 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 707 blob = g_blob; 708 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 709 710 ut_blob_close_and_delete(bs, blob); 711 712 spdk_bs_unload(bs, bs_op_complete, NULL); 713 poll_threads(); 714 CU_ASSERT(g_bserrno == 0); 715 g_bs = NULL; 716 } 717 718 static void 719 blob_snapshot(void) 720 { 721 struct spdk_blob_store *bs = g_bs; 722 struct spdk_blob *blob; 723 struct spdk_blob *snapshot, *snapshot2; 724 struct spdk_blob_bs_dev *blob_bs_dev; 725 struct spdk_blob_opts opts; 726 struct spdk_blob_xattr_opts xattrs; 727 spdk_blob_id blobid; 728 spdk_blob_id snapshotid; 729 spdk_blob_id snapshotid2; 730 const void *value; 731 size_t value_len; 732 int rc; 733 spdk_blob_id ids[2]; 734 size_t count; 735 736 /* Create blob with 10 clusters */ 737 ut_spdk_blob_opts_init(&opts); 738 opts.num_clusters = 10; 739 740 blob = ut_blob_create_and_open(bs, &opts); 741 blobid = spdk_blob_get_id(blob); 742 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 743 744 /* Create snapshot from blob */ 745 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); 746 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 747 poll_threads(); 748 CU_ASSERT(g_bserrno == 0); 749 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 750 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 751 snapshotid = g_blobid; 752 753 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 754 poll_threads(); 755 CU_ASSERT(g_bserrno == 0); 756 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 757 snapshot = g_blob; 758 CU_ASSERT(snapshot->data_ro == true); 759 CU_ASSERT(snapshot->md_ro == true); 760 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 761 762 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 763 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 764 CU_ASSERT(spdk_mem_all_zero(blob->active.clusters, 765 blob->active.num_clusters * sizeof(blob->active.clusters[0]))); 766 767 /* Try to create snapshot from clone with xattrs */ 768 xattrs.names = g_xattr_names; 769 xattrs.get_value = _get_xattr_value; 770 xattrs.count = 3; 771 xattrs.ctx = &g_ctx; 772 spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL); 773 poll_threads(); 774 CU_ASSERT(g_bserrno == 0); 775 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 776 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); 777 snapshotid2 = g_blobid; 778 779 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 780 CU_ASSERT(g_bserrno == 0); 781 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 782 snapshot2 = g_blob; 783 CU_ASSERT(snapshot2->data_ro == true); 784 CU_ASSERT(snapshot2->md_ro == true); 785 CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10); 786 787 /* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */ 788 CU_ASSERT(snapshot->back_bs_dev == NULL); 789 SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL); 790 SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL); 791 792 blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 793 CU_ASSERT(blob_bs_dev->blob == snapshot2); 794 795 blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev; 796 CU_ASSERT(blob_bs_dev->blob == snapshot); 797 798 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len); 799 CU_ASSERT(rc == 0); 800 SPDK_CU_ASSERT_FATAL(value != NULL); 801 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 802 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 803 804 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len); 805 CU_ASSERT(rc == 0); 806 SPDK_CU_ASSERT_FATAL(value != NULL); 807 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 808 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 809 810 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len); 811 CU_ASSERT(rc == 0); 812 SPDK_CU_ASSERT_FATAL(value != NULL); 813 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 814 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 815 816 /* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */ 817 count = 2; 818 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 819 CU_ASSERT(count == 1); 820 CU_ASSERT(ids[0] == blobid); 821 822 count = 2; 823 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 824 CU_ASSERT(count == 1); 825 CU_ASSERT(ids[0] == snapshotid2); 826 827 /* Try to create snapshot from snapshot */ 828 spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 829 poll_threads(); 830 CU_ASSERT(g_bserrno == -EINVAL); 831 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 832 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); 833 834 /* Delete blob and confirm that it is no longer on snapshot2 clone list */ 835 ut_blob_close_and_delete(bs, blob); 836 count = 2; 837 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 838 CU_ASSERT(count == 0); 839 840 /* Delete snapshot2 and confirm that it is no longer on snapshot clone list */ 841 ut_blob_close_and_delete(bs, snapshot2); 842 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 843 count = 2; 844 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 845 CU_ASSERT(count == 0); 846 847 ut_blob_close_and_delete(bs, snapshot); 848 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); 849 } 850 851 static void 852 blob_snapshot_freeze_io(void) 853 { 854 struct spdk_io_channel *channel; 855 struct spdk_bs_channel *bs_channel; 856 struct spdk_blob_store *bs = g_bs; 857 struct spdk_blob *blob; 858 struct spdk_blob_opts opts; 859 spdk_blob_id blobid; 860 uint32_t num_of_pages = 10; 861 uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE]; 862 uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE]; 863 uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE]; 864 865 memset(payload_write, 0xE5, sizeof(payload_write)); 866 memset(payload_read, 0x00, sizeof(payload_read)); 867 memset(payload_zero, 0x00, sizeof(payload_zero)); 868 869 /* Test freeze I/O during snapshot */ 870 channel = spdk_bs_alloc_io_channel(bs); 871 bs_channel = spdk_io_channel_get_ctx(channel); 872 873 /* Create blob with 10 clusters */ 874 ut_spdk_blob_opts_init(&opts); 875 opts.num_clusters = 10; 876 opts.thin_provision = false; 877 878 blob = ut_blob_create_and_open(bs, &opts); 879 blobid = spdk_blob_get_id(blob); 880 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 881 882 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 883 884 /* This is implementation specific. 885 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback. 886 * Four async I/O operations happen before that. */ 887 poll_thread_times(0, 5); 888 889 CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io)); 890 891 /* Blob I/O should be frozen here */ 892 CU_ASSERT(blob->frozen_refcnt == 1); 893 894 /* Write to the blob */ 895 spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL); 896 897 /* Verify that I/O is queued */ 898 CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io)); 899 /* Verify that payload is not written to disk, at this point the blobs already switched */ 900 CU_ASSERT(blob->active.clusters[0] == 0); 901 902 /* Finish all operations including spdk_bs_create_snapshot */ 903 poll_threads(); 904 905 /* Verify snapshot */ 906 CU_ASSERT(g_bserrno == 0); 907 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 908 909 /* Verify that blob has unset frozen_io */ 910 CU_ASSERT(blob->frozen_refcnt == 0); 911 912 /* Verify that postponed I/O completed successfully by comparing payload */ 913 spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL); 914 poll_threads(); 915 CU_ASSERT(g_bserrno == 0); 916 CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0); 917 918 spdk_bs_free_io_channel(channel); 919 poll_threads(); 920 921 ut_blob_close_and_delete(bs, blob); 922 } 923 924 static void 925 blob_clone(void) 926 { 927 struct spdk_blob_store *bs = g_bs; 928 struct spdk_blob_opts opts; 929 struct spdk_blob *blob, *snapshot, *clone; 930 spdk_blob_id blobid, cloneid, snapshotid; 931 struct spdk_blob_xattr_opts xattrs; 932 const void *value; 933 size_t value_len; 934 int rc; 935 936 /* Create blob with 10 clusters */ 937 938 ut_spdk_blob_opts_init(&opts); 939 opts.num_clusters = 10; 940 941 blob = ut_blob_create_and_open(bs, &opts); 942 blobid = spdk_blob_get_id(blob); 943 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 944 945 /* Create snapshot */ 946 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 947 poll_threads(); 948 CU_ASSERT(g_bserrno == 0); 949 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 950 snapshotid = g_blobid; 951 952 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 953 poll_threads(); 954 CU_ASSERT(g_bserrno == 0); 955 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 956 snapshot = g_blob; 957 CU_ASSERT(snapshot->data_ro == true); 958 CU_ASSERT(snapshot->md_ro == true); 959 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 960 961 spdk_blob_close(snapshot, blob_op_complete, NULL); 962 poll_threads(); 963 CU_ASSERT(g_bserrno == 0); 964 965 /* Create clone from snapshot with xattrs */ 966 xattrs.names = g_xattr_names; 967 xattrs.get_value = _get_xattr_value; 968 xattrs.count = 3; 969 xattrs.ctx = &g_ctx; 970 971 spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL); 972 poll_threads(); 973 CU_ASSERT(g_bserrno == 0); 974 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 975 cloneid = g_blobid; 976 977 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 978 poll_threads(); 979 CU_ASSERT(g_bserrno == 0); 980 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 981 clone = g_blob; 982 CU_ASSERT(clone->data_ro == false); 983 CU_ASSERT(clone->md_ro == false); 984 CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10); 985 986 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len); 987 CU_ASSERT(rc == 0); 988 SPDK_CU_ASSERT_FATAL(value != NULL); 989 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 990 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 991 992 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len); 993 CU_ASSERT(rc == 0); 994 SPDK_CU_ASSERT_FATAL(value != NULL); 995 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 996 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 997 998 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len); 999 CU_ASSERT(rc == 0); 1000 SPDK_CU_ASSERT_FATAL(value != NULL); 1001 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 1002 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 1003 1004 1005 spdk_blob_close(clone, blob_op_complete, NULL); 1006 poll_threads(); 1007 CU_ASSERT(g_bserrno == 0); 1008 1009 /* Try to create clone from not read only blob */ 1010 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 1011 poll_threads(); 1012 CU_ASSERT(g_bserrno == -EINVAL); 1013 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 1014 1015 /* Mark blob as read only */ 1016 spdk_blob_set_read_only(blob); 1017 spdk_blob_sync_md(blob, blob_op_complete, NULL); 1018 poll_threads(); 1019 CU_ASSERT(g_bserrno == 0); 1020 1021 /* Create clone from read only blob */ 1022 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 1023 poll_threads(); 1024 CU_ASSERT(g_bserrno == 0); 1025 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 1026 cloneid = g_blobid; 1027 1028 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 1029 poll_threads(); 1030 CU_ASSERT(g_bserrno == 0); 1031 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1032 clone = g_blob; 1033 CU_ASSERT(clone->data_ro == false); 1034 CU_ASSERT(clone->md_ro == false); 1035 CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10); 1036 1037 ut_blob_close_and_delete(bs, clone); 1038 ut_blob_close_and_delete(bs, blob); 1039 } 1040 1041 static void 1042 _blob_inflate(bool decouple_parent) 1043 { 1044 struct spdk_blob_store *bs = g_bs; 1045 struct spdk_blob_opts opts; 1046 struct spdk_blob *blob, *snapshot; 1047 spdk_blob_id blobid, snapshotid; 1048 struct spdk_io_channel *channel; 1049 uint64_t free_clusters; 1050 1051 channel = spdk_bs_alloc_io_channel(bs); 1052 SPDK_CU_ASSERT_FATAL(channel != NULL); 1053 1054 /* Create blob with 10 clusters */ 1055 1056 ut_spdk_blob_opts_init(&opts); 1057 opts.num_clusters = 10; 1058 opts.thin_provision = true; 1059 1060 blob = ut_blob_create_and_open(bs, &opts); 1061 blobid = spdk_blob_get_id(blob); 1062 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 1063 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true); 1064 1065 /* 1) Blob with no parent */ 1066 if (decouple_parent) { 1067 /* Decouple parent of blob with no parent (should fail) */ 1068 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 1069 poll_threads(); 1070 CU_ASSERT(g_bserrno != 0); 1071 } else { 1072 /* Inflate of thin blob with no parent should made it thick */ 1073 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 1074 poll_threads(); 1075 CU_ASSERT(g_bserrno == 0); 1076 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false); 1077 } 1078 1079 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 1080 poll_threads(); 1081 CU_ASSERT(g_bserrno == 0); 1082 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 1083 snapshotid = g_blobid; 1084 1085 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true); 1086 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 1087 1088 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 1089 poll_threads(); 1090 CU_ASSERT(g_bserrno == 0); 1091 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1092 snapshot = g_blob; 1093 CU_ASSERT(snapshot->data_ro == true); 1094 CU_ASSERT(snapshot->md_ro == true); 1095 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 1096 1097 spdk_blob_close(snapshot, blob_op_complete, NULL); 1098 poll_threads(); 1099 CU_ASSERT(g_bserrno == 0); 1100 1101 free_clusters = spdk_bs_free_cluster_count(bs); 1102 1103 /* 2) Blob with parent */ 1104 if (!decouple_parent) { 1105 /* Do full blob inflation */ 1106 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 1107 poll_threads(); 1108 CU_ASSERT(g_bserrno == 0); 1109 /* all 10 clusters should be allocated */ 1110 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10); 1111 } else { 1112 /* Decouple parent of blob */ 1113 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 1114 poll_threads(); 1115 CU_ASSERT(g_bserrno == 0); 1116 /* when only parent is removed, none of the clusters should be allocated */ 1117 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters); 1118 } 1119 1120 /* Now, it should be possible to delete snapshot */ 1121 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 1122 poll_threads(); 1123 CU_ASSERT(g_bserrno == 0); 1124 1125 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 1126 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent); 1127 1128 spdk_bs_free_io_channel(channel); 1129 poll_threads(); 1130 1131 ut_blob_close_and_delete(bs, blob); 1132 } 1133 1134 static void 1135 blob_inflate(void) 1136 { 1137 _blob_inflate(false); 1138 _blob_inflate(true); 1139 } 1140 1141 static void 1142 blob_delete(void) 1143 { 1144 struct spdk_blob_store *bs = g_bs; 1145 struct spdk_blob_opts blob_opts; 1146 spdk_blob_id blobid; 1147 1148 /* Create a blob and then delete it. */ 1149 ut_spdk_blob_opts_init(&blob_opts); 1150 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 1151 poll_threads(); 1152 CU_ASSERT(g_bserrno == 0); 1153 CU_ASSERT(g_blobid > 0); 1154 blobid = g_blobid; 1155 1156 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 1157 poll_threads(); 1158 CU_ASSERT(g_bserrno == 0); 1159 1160 /* Try to open the blob */ 1161 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1162 poll_threads(); 1163 CU_ASSERT(g_bserrno == -ENOENT); 1164 } 1165 1166 static void 1167 blob_resize_test(void) 1168 { 1169 struct spdk_blob_store *bs = g_bs; 1170 struct spdk_blob *blob; 1171 uint64_t free_clusters; 1172 1173 free_clusters = spdk_bs_free_cluster_count(bs); 1174 1175 blob = ut_blob_create_and_open(bs, NULL); 1176 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 1177 1178 /* Confirm that resize fails if blob is marked read-only. */ 1179 blob->md_ro = true; 1180 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1181 poll_threads(); 1182 CU_ASSERT(g_bserrno == -EPERM); 1183 blob->md_ro = false; 1184 1185 /* The blob started at 0 clusters. Resize it to be 5. */ 1186 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1187 poll_threads(); 1188 CU_ASSERT(g_bserrno == 0); 1189 CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs)); 1190 1191 /* Shrink the blob to 3 clusters. This will not actually release 1192 * the old clusters until the blob is synced. 1193 */ 1194 spdk_blob_resize(blob, 3, blob_op_complete, NULL); 1195 poll_threads(); 1196 CU_ASSERT(g_bserrno == 0); 1197 /* Verify there are still 5 clusters in use */ 1198 CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs)); 1199 1200 spdk_blob_sync_md(blob, blob_op_complete, NULL); 1201 poll_threads(); 1202 CU_ASSERT(g_bserrno == 0); 1203 /* Now there are only 3 clusters in use */ 1204 CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs)); 1205 1206 /* Resize the blob to be 10 clusters. Growth takes effect immediately. */ 1207 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 1208 poll_threads(); 1209 CU_ASSERT(g_bserrno == 0); 1210 CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs)); 1211 1212 /* Try to resize the blob to size larger than blobstore. */ 1213 spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL); 1214 poll_threads(); 1215 CU_ASSERT(g_bserrno == -ENOSPC); 1216 1217 ut_blob_close_and_delete(bs, blob); 1218 } 1219 1220 static void 1221 blob_read_only(void) 1222 { 1223 struct spdk_blob_store *bs; 1224 struct spdk_bs_dev *dev; 1225 struct spdk_blob *blob; 1226 struct spdk_bs_opts opts; 1227 spdk_blob_id blobid; 1228 int rc; 1229 1230 dev = init_dev(); 1231 spdk_bs_opts_init(&opts, sizeof(opts)); 1232 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 1233 1234 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 1235 poll_threads(); 1236 CU_ASSERT(g_bserrno == 0); 1237 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 1238 bs = g_bs; 1239 1240 blob = ut_blob_create_and_open(bs, NULL); 1241 blobid = spdk_blob_get_id(blob); 1242 1243 rc = spdk_blob_set_read_only(blob); 1244 CU_ASSERT(rc == 0); 1245 1246 CU_ASSERT(blob->data_ro == false); 1247 CU_ASSERT(blob->md_ro == false); 1248 1249 spdk_blob_sync_md(blob, bs_op_complete, NULL); 1250 poll_threads(); 1251 1252 CU_ASSERT(blob->data_ro == true); 1253 CU_ASSERT(blob->md_ro == true); 1254 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1255 1256 spdk_blob_close(blob, blob_op_complete, NULL); 1257 poll_threads(); 1258 CU_ASSERT(g_bserrno == 0); 1259 1260 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1261 poll_threads(); 1262 CU_ASSERT(g_bserrno == 0); 1263 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1264 blob = g_blob; 1265 1266 CU_ASSERT(blob->data_ro == true); 1267 CU_ASSERT(blob->md_ro == true); 1268 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1269 1270 spdk_blob_close(blob, blob_op_complete, NULL); 1271 poll_threads(); 1272 CU_ASSERT(g_bserrno == 0); 1273 1274 ut_bs_reload(&bs, &opts); 1275 1276 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1277 poll_threads(); 1278 CU_ASSERT(g_bserrno == 0); 1279 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1280 blob = g_blob; 1281 1282 CU_ASSERT(blob->data_ro == true); 1283 CU_ASSERT(blob->md_ro == true); 1284 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1285 1286 ut_blob_close_and_delete(bs, blob); 1287 1288 spdk_bs_unload(bs, bs_op_complete, NULL); 1289 poll_threads(); 1290 CU_ASSERT(g_bserrno == 0); 1291 } 1292 1293 static void 1294 channel_ops(void) 1295 { 1296 struct spdk_blob_store *bs = g_bs; 1297 struct spdk_io_channel *channel; 1298 1299 channel = spdk_bs_alloc_io_channel(bs); 1300 CU_ASSERT(channel != NULL); 1301 1302 spdk_bs_free_io_channel(channel); 1303 poll_threads(); 1304 } 1305 1306 static void 1307 blob_write(void) 1308 { 1309 struct spdk_blob_store *bs = g_bs; 1310 struct spdk_blob *blob = g_blob; 1311 struct spdk_io_channel *channel; 1312 uint64_t pages_per_cluster; 1313 uint8_t payload[10 * 4096]; 1314 1315 pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs); 1316 1317 channel = spdk_bs_alloc_io_channel(bs); 1318 CU_ASSERT(channel != NULL); 1319 1320 /* Write to a blob with 0 size */ 1321 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1322 poll_threads(); 1323 CU_ASSERT(g_bserrno == -EINVAL); 1324 1325 /* Resize the blob */ 1326 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1327 poll_threads(); 1328 CU_ASSERT(g_bserrno == 0); 1329 1330 /* Confirm that write fails if blob is marked read-only. */ 1331 blob->data_ro = true; 1332 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1333 poll_threads(); 1334 CU_ASSERT(g_bserrno == -EPERM); 1335 blob->data_ro = false; 1336 1337 /* Write to the blob */ 1338 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1339 poll_threads(); 1340 CU_ASSERT(g_bserrno == 0); 1341 1342 /* Write starting beyond the end */ 1343 spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete, 1344 NULL); 1345 poll_threads(); 1346 CU_ASSERT(g_bserrno == -EINVAL); 1347 1348 /* Write starting at a valid location but going off the end */ 1349 spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1, 1350 blob_op_complete, NULL); 1351 poll_threads(); 1352 CU_ASSERT(g_bserrno == -EINVAL); 1353 1354 spdk_bs_free_io_channel(channel); 1355 poll_threads(); 1356 } 1357 1358 static void 1359 blob_read(void) 1360 { 1361 struct spdk_blob_store *bs = g_bs; 1362 struct spdk_blob *blob = g_blob; 1363 struct spdk_io_channel *channel; 1364 uint64_t pages_per_cluster; 1365 uint8_t payload[10 * 4096]; 1366 1367 pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs); 1368 1369 channel = spdk_bs_alloc_io_channel(bs); 1370 CU_ASSERT(channel != NULL); 1371 1372 /* Read from a blob with 0 size */ 1373 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1374 poll_threads(); 1375 CU_ASSERT(g_bserrno == -EINVAL); 1376 1377 /* Resize the blob */ 1378 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1379 poll_threads(); 1380 CU_ASSERT(g_bserrno == 0); 1381 1382 /* Confirm that read passes if blob is marked read-only. */ 1383 blob->data_ro = true; 1384 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1385 poll_threads(); 1386 CU_ASSERT(g_bserrno == 0); 1387 blob->data_ro = false; 1388 1389 /* Read from the blob */ 1390 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1391 poll_threads(); 1392 CU_ASSERT(g_bserrno == 0); 1393 1394 /* Read starting beyond the end */ 1395 spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete, 1396 NULL); 1397 poll_threads(); 1398 CU_ASSERT(g_bserrno == -EINVAL); 1399 1400 /* Read starting at a valid location but going off the end */ 1401 spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1, 1402 blob_op_complete, NULL); 1403 poll_threads(); 1404 CU_ASSERT(g_bserrno == -EINVAL); 1405 1406 spdk_bs_free_io_channel(channel); 1407 poll_threads(); 1408 } 1409 1410 static void 1411 blob_rw_verify(void) 1412 { 1413 struct spdk_blob_store *bs = g_bs; 1414 struct spdk_blob *blob = g_blob; 1415 struct spdk_io_channel *channel; 1416 uint8_t payload_read[10 * 4096]; 1417 uint8_t payload_write[10 * 4096]; 1418 1419 channel = spdk_bs_alloc_io_channel(bs); 1420 CU_ASSERT(channel != NULL); 1421 1422 spdk_blob_resize(blob, 32, blob_op_complete, NULL); 1423 poll_threads(); 1424 CU_ASSERT(g_bserrno == 0); 1425 1426 memset(payload_write, 0xE5, sizeof(payload_write)); 1427 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 1428 poll_threads(); 1429 CU_ASSERT(g_bserrno == 0); 1430 1431 memset(payload_read, 0x00, sizeof(payload_read)); 1432 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 1433 poll_threads(); 1434 CU_ASSERT(g_bserrno == 0); 1435 CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0); 1436 1437 spdk_bs_free_io_channel(channel); 1438 poll_threads(); 1439 } 1440 1441 static void 1442 blob_rw_verify_iov(void) 1443 { 1444 struct spdk_blob_store *bs = g_bs; 1445 struct spdk_blob *blob; 1446 struct spdk_io_channel *channel; 1447 uint8_t payload_read[10 * 4096]; 1448 uint8_t payload_write[10 * 4096]; 1449 struct iovec iov_read[3]; 1450 struct iovec iov_write[3]; 1451 void *buf; 1452 1453 channel = spdk_bs_alloc_io_channel(bs); 1454 CU_ASSERT(channel != NULL); 1455 1456 blob = ut_blob_create_and_open(bs, NULL); 1457 1458 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1459 poll_threads(); 1460 CU_ASSERT(g_bserrno == 0); 1461 1462 /* 1463 * Manually adjust the offset of the blob's second cluster. This allows 1464 * us to make sure that the readv/write code correctly accounts for I/O 1465 * that cross cluster boundaries. Start by asserting that the allocated 1466 * clusters are where we expect before modifying the second cluster. 1467 */ 1468 CU_ASSERT(blob->active.clusters[0] == 1 * 256); 1469 CU_ASSERT(blob->active.clusters[1] == 2 * 256); 1470 blob->active.clusters[1] = 3 * 256; 1471 1472 memset(payload_write, 0xE5, sizeof(payload_write)); 1473 iov_write[0].iov_base = payload_write; 1474 iov_write[0].iov_len = 1 * 4096; 1475 iov_write[1].iov_base = payload_write + 1 * 4096; 1476 iov_write[1].iov_len = 5 * 4096; 1477 iov_write[2].iov_base = payload_write + 6 * 4096; 1478 iov_write[2].iov_len = 4 * 4096; 1479 /* 1480 * Choose a page offset just before the cluster boundary. The first 6 pages of payload 1481 * will get written to the first cluster, the last 4 to the second cluster. 1482 */ 1483 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 1484 poll_threads(); 1485 CU_ASSERT(g_bserrno == 0); 1486 1487 memset(payload_read, 0xAA, sizeof(payload_read)); 1488 iov_read[0].iov_base = payload_read; 1489 iov_read[0].iov_len = 3 * 4096; 1490 iov_read[1].iov_base = payload_read + 3 * 4096; 1491 iov_read[1].iov_len = 4 * 4096; 1492 iov_read[2].iov_base = payload_read + 7 * 4096; 1493 iov_read[2].iov_len = 3 * 4096; 1494 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 1495 poll_threads(); 1496 CU_ASSERT(g_bserrno == 0); 1497 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 1498 1499 buf = calloc(1, 256 * 4096); 1500 SPDK_CU_ASSERT_FATAL(buf != NULL); 1501 /* Check that cluster 2 on "disk" was not modified. */ 1502 CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0); 1503 free(buf); 1504 1505 spdk_blob_close(blob, blob_op_complete, NULL); 1506 poll_threads(); 1507 CU_ASSERT(g_bserrno == 0); 1508 1509 spdk_bs_free_io_channel(channel); 1510 poll_threads(); 1511 } 1512 1513 static uint32_t 1514 bs_channel_get_req_count(struct spdk_io_channel *_channel) 1515 { 1516 struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel); 1517 struct spdk_bs_request_set *set; 1518 uint32_t count = 0; 1519 1520 TAILQ_FOREACH(set, &channel->reqs, link) { 1521 count++; 1522 } 1523 1524 return count; 1525 } 1526 1527 static void 1528 blob_rw_verify_iov_nomem(void) 1529 { 1530 struct spdk_blob_store *bs = g_bs; 1531 struct spdk_blob *blob = g_blob; 1532 struct spdk_io_channel *channel; 1533 uint8_t payload_write[10 * 4096]; 1534 struct iovec iov_write[3]; 1535 uint32_t req_count; 1536 1537 channel = spdk_bs_alloc_io_channel(bs); 1538 CU_ASSERT(channel != NULL); 1539 1540 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1541 poll_threads(); 1542 CU_ASSERT(g_bserrno == 0); 1543 1544 /* 1545 * Choose a page offset just before the cluster boundary. The first 6 pages of payload 1546 * will get written to the first cluster, the last 4 to the second cluster. 1547 */ 1548 iov_write[0].iov_base = payload_write; 1549 iov_write[0].iov_len = 1 * 4096; 1550 iov_write[1].iov_base = payload_write + 1 * 4096; 1551 iov_write[1].iov_len = 5 * 4096; 1552 iov_write[2].iov_base = payload_write + 6 * 4096; 1553 iov_write[2].iov_len = 4 * 4096; 1554 MOCK_SET(calloc, NULL); 1555 req_count = bs_channel_get_req_count(channel); 1556 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 1557 poll_threads(); 1558 CU_ASSERT(g_bserrno = -ENOMEM); 1559 CU_ASSERT(req_count == bs_channel_get_req_count(channel)); 1560 MOCK_CLEAR(calloc); 1561 1562 spdk_bs_free_io_channel(channel); 1563 poll_threads(); 1564 } 1565 1566 static void 1567 blob_rw_iov_read_only(void) 1568 { 1569 struct spdk_blob_store *bs = g_bs; 1570 struct spdk_blob *blob = g_blob; 1571 struct spdk_io_channel *channel; 1572 uint8_t payload_read[4096]; 1573 uint8_t payload_write[4096]; 1574 struct iovec iov_read; 1575 struct iovec iov_write; 1576 1577 channel = spdk_bs_alloc_io_channel(bs); 1578 CU_ASSERT(channel != NULL); 1579 1580 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1581 poll_threads(); 1582 CU_ASSERT(g_bserrno == 0); 1583 1584 /* Verify that writev failed if read_only flag is set. */ 1585 blob->data_ro = true; 1586 iov_write.iov_base = payload_write; 1587 iov_write.iov_len = sizeof(payload_write); 1588 spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL); 1589 poll_threads(); 1590 CU_ASSERT(g_bserrno == -EPERM); 1591 1592 /* Verify that reads pass if data_ro flag is set. */ 1593 iov_read.iov_base = payload_read; 1594 iov_read.iov_len = sizeof(payload_read); 1595 spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL); 1596 poll_threads(); 1597 CU_ASSERT(g_bserrno == 0); 1598 1599 spdk_bs_free_io_channel(channel); 1600 poll_threads(); 1601 } 1602 1603 static void 1604 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel, 1605 uint8_t *payload, uint64_t offset, uint64_t length, 1606 spdk_blob_op_complete cb_fn, void *cb_arg) 1607 { 1608 uint64_t i; 1609 uint8_t *buf; 1610 uint64_t page_size = spdk_bs_get_page_size(blob->bs); 1611 1612 /* To be sure that operation is NOT splitted, read one page at the time */ 1613 buf = payload; 1614 for (i = 0; i < length; i++) { 1615 spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL); 1616 poll_threads(); 1617 if (g_bserrno != 0) { 1618 /* Pass the error code up */ 1619 break; 1620 } 1621 buf += page_size; 1622 } 1623 1624 cb_fn(cb_arg, g_bserrno); 1625 } 1626 1627 static void 1628 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel, 1629 uint8_t *payload, uint64_t offset, uint64_t length, 1630 spdk_blob_op_complete cb_fn, void *cb_arg) 1631 { 1632 uint64_t i; 1633 uint8_t *buf; 1634 uint64_t page_size = spdk_bs_get_page_size(blob->bs); 1635 1636 /* To be sure that operation is NOT splitted, write one page at the time */ 1637 buf = payload; 1638 for (i = 0; i < length; i++) { 1639 spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL); 1640 poll_threads(); 1641 if (g_bserrno != 0) { 1642 /* Pass the error code up */ 1643 break; 1644 } 1645 buf += page_size; 1646 } 1647 1648 cb_fn(cb_arg, g_bserrno); 1649 } 1650 1651 static void 1652 blob_operation_split_rw(void) 1653 { 1654 struct spdk_blob_store *bs = g_bs; 1655 struct spdk_blob *blob; 1656 struct spdk_io_channel *channel; 1657 struct spdk_blob_opts opts; 1658 uint64_t cluster_size; 1659 1660 uint64_t payload_size; 1661 uint8_t *payload_read; 1662 uint8_t *payload_write; 1663 uint8_t *payload_pattern; 1664 1665 uint64_t page_size; 1666 uint64_t pages_per_cluster; 1667 uint64_t pages_per_payload; 1668 1669 uint64_t i; 1670 1671 cluster_size = spdk_bs_get_cluster_size(bs); 1672 page_size = spdk_bs_get_page_size(bs); 1673 pages_per_cluster = cluster_size / page_size; 1674 pages_per_payload = pages_per_cluster * 5; 1675 payload_size = cluster_size * 5; 1676 1677 payload_read = malloc(payload_size); 1678 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 1679 1680 payload_write = malloc(payload_size); 1681 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 1682 1683 payload_pattern = malloc(payload_size); 1684 SPDK_CU_ASSERT_FATAL(payload_pattern != NULL); 1685 1686 /* Prepare random pattern to write */ 1687 memset(payload_pattern, 0xFF, payload_size); 1688 for (i = 0; i < pages_per_payload; i++) { 1689 *((uint64_t *)(payload_pattern + page_size * i)) = (i + 1); 1690 } 1691 1692 channel = spdk_bs_alloc_io_channel(bs); 1693 SPDK_CU_ASSERT_FATAL(channel != NULL); 1694 1695 /* Create blob */ 1696 ut_spdk_blob_opts_init(&opts); 1697 opts.thin_provision = false; 1698 opts.num_clusters = 5; 1699 1700 blob = ut_blob_create_and_open(bs, &opts); 1701 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 1702 1703 /* Initial read should return zeroed payload */ 1704 memset(payload_read, 0xFF, payload_size); 1705 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1706 poll_threads(); 1707 CU_ASSERT(g_bserrno == 0); 1708 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 1709 1710 /* Fill whole blob except last page */ 1711 spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1, 1712 blob_op_complete, NULL); 1713 poll_threads(); 1714 CU_ASSERT(g_bserrno == 0); 1715 1716 /* Write last page with a pattern */ 1717 spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1, 1718 blob_op_complete, NULL); 1719 poll_threads(); 1720 CU_ASSERT(g_bserrno == 0); 1721 1722 /* Read whole blob and check consistency */ 1723 memset(payload_read, 0xFF, payload_size); 1724 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1725 poll_threads(); 1726 CU_ASSERT(g_bserrno == 0); 1727 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0); 1728 CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0); 1729 1730 /* Fill whole blob except first page */ 1731 spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1, 1732 blob_op_complete, NULL); 1733 poll_threads(); 1734 CU_ASSERT(g_bserrno == 0); 1735 1736 /* Write first page with a pattern */ 1737 spdk_blob_io_write(blob, channel, payload_pattern, 0, 1, 1738 blob_op_complete, NULL); 1739 poll_threads(); 1740 CU_ASSERT(g_bserrno == 0); 1741 1742 /* Read whole blob and check consistency */ 1743 memset(payload_read, 0xFF, payload_size); 1744 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1745 poll_threads(); 1746 CU_ASSERT(g_bserrno == 0); 1747 CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0); 1748 CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0); 1749 1750 1751 /* Fill whole blob with a pattern (5 clusters) */ 1752 1753 /* 1. Read test. */ 1754 _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload, 1755 blob_op_complete, NULL); 1756 poll_threads(); 1757 CU_ASSERT(g_bserrno == 0); 1758 1759 memset(payload_read, 0xFF, payload_size); 1760 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1761 poll_threads(); 1762 poll_threads(); 1763 CU_ASSERT(g_bserrno == 0); 1764 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1765 1766 /* 2. Write test. */ 1767 spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload, 1768 blob_op_complete, NULL); 1769 poll_threads(); 1770 CU_ASSERT(g_bserrno == 0); 1771 1772 memset(payload_read, 0xFF, payload_size); 1773 _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1774 poll_threads(); 1775 CU_ASSERT(g_bserrno == 0); 1776 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1777 1778 spdk_bs_free_io_channel(channel); 1779 poll_threads(); 1780 1781 g_blob = NULL; 1782 g_blobid = 0; 1783 1784 free(payload_read); 1785 free(payload_write); 1786 free(payload_pattern); 1787 1788 ut_blob_close_and_delete(bs, blob); 1789 } 1790 1791 static void 1792 blob_operation_split_rw_iov(void) 1793 { 1794 struct spdk_blob_store *bs = g_bs; 1795 struct spdk_blob *blob; 1796 struct spdk_io_channel *channel; 1797 struct spdk_blob_opts opts; 1798 uint64_t cluster_size; 1799 1800 uint64_t payload_size; 1801 uint8_t *payload_read; 1802 uint8_t *payload_write; 1803 uint8_t *payload_pattern; 1804 1805 uint64_t page_size; 1806 uint64_t pages_per_cluster; 1807 uint64_t pages_per_payload; 1808 1809 struct iovec iov_read[2]; 1810 struct iovec iov_write[2]; 1811 1812 uint64_t i, j; 1813 1814 cluster_size = spdk_bs_get_cluster_size(bs); 1815 page_size = spdk_bs_get_page_size(bs); 1816 pages_per_cluster = cluster_size / page_size; 1817 pages_per_payload = pages_per_cluster * 5; 1818 payload_size = cluster_size * 5; 1819 1820 payload_read = malloc(payload_size); 1821 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 1822 1823 payload_write = malloc(payload_size); 1824 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 1825 1826 payload_pattern = malloc(payload_size); 1827 SPDK_CU_ASSERT_FATAL(payload_pattern != NULL); 1828 1829 /* Prepare random pattern to write */ 1830 for (i = 0; i < pages_per_payload; i++) { 1831 for (j = 0; j < page_size / sizeof(uint64_t); j++) { 1832 uint64_t *tmp; 1833 1834 tmp = (uint64_t *)payload_pattern; 1835 tmp += ((page_size * i) / sizeof(uint64_t)) + j; 1836 *tmp = i + 1; 1837 } 1838 } 1839 1840 channel = spdk_bs_alloc_io_channel(bs); 1841 SPDK_CU_ASSERT_FATAL(channel != NULL); 1842 1843 /* Create blob */ 1844 ut_spdk_blob_opts_init(&opts); 1845 opts.thin_provision = false; 1846 opts.num_clusters = 5; 1847 1848 blob = ut_blob_create_and_open(bs, &opts); 1849 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 1850 1851 /* Initial read should return zeroes payload */ 1852 memset(payload_read, 0xFF, payload_size); 1853 iov_read[0].iov_base = payload_read; 1854 iov_read[0].iov_len = cluster_size * 3; 1855 iov_read[1].iov_base = payload_read + cluster_size * 3; 1856 iov_read[1].iov_len = cluster_size * 2; 1857 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1858 poll_threads(); 1859 CU_ASSERT(g_bserrno == 0); 1860 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 1861 1862 /* First of iovs fills whole blob except last page and second of iovs writes last page 1863 * with a pattern. */ 1864 iov_write[0].iov_base = payload_pattern; 1865 iov_write[0].iov_len = payload_size - page_size; 1866 iov_write[1].iov_base = payload_pattern; 1867 iov_write[1].iov_len = page_size; 1868 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1869 poll_threads(); 1870 CU_ASSERT(g_bserrno == 0); 1871 1872 /* Read whole blob and check consistency */ 1873 memset(payload_read, 0xFF, payload_size); 1874 iov_read[0].iov_base = payload_read; 1875 iov_read[0].iov_len = cluster_size * 2; 1876 iov_read[1].iov_base = payload_read + cluster_size * 2; 1877 iov_read[1].iov_len = cluster_size * 3; 1878 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1879 poll_threads(); 1880 CU_ASSERT(g_bserrno == 0); 1881 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0); 1882 CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0); 1883 1884 /* First of iovs fills only first page and second of iovs writes whole blob except 1885 * first page with a pattern. */ 1886 iov_write[0].iov_base = payload_pattern; 1887 iov_write[0].iov_len = page_size; 1888 iov_write[1].iov_base = payload_pattern; 1889 iov_write[1].iov_len = payload_size - page_size; 1890 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1891 poll_threads(); 1892 CU_ASSERT(g_bserrno == 0); 1893 1894 /* Read whole blob and check consistency */ 1895 memset(payload_read, 0xFF, payload_size); 1896 iov_read[0].iov_base = payload_read; 1897 iov_read[0].iov_len = cluster_size * 4; 1898 iov_read[1].iov_base = payload_read + cluster_size * 4; 1899 iov_read[1].iov_len = cluster_size; 1900 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1901 poll_threads(); 1902 CU_ASSERT(g_bserrno == 0); 1903 CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0); 1904 CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0); 1905 1906 1907 /* Fill whole blob with a pattern (5 clusters) */ 1908 1909 /* 1. Read test. */ 1910 _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload, 1911 blob_op_complete, NULL); 1912 poll_threads(); 1913 CU_ASSERT(g_bserrno == 0); 1914 1915 memset(payload_read, 0xFF, payload_size); 1916 iov_read[0].iov_base = payload_read; 1917 iov_read[0].iov_len = cluster_size; 1918 iov_read[1].iov_base = payload_read + cluster_size; 1919 iov_read[1].iov_len = cluster_size * 4; 1920 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1921 poll_threads(); 1922 CU_ASSERT(g_bserrno == 0); 1923 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1924 1925 /* 2. Write test. */ 1926 iov_write[0].iov_base = payload_read; 1927 iov_write[0].iov_len = cluster_size * 2; 1928 iov_write[1].iov_base = payload_read + cluster_size * 2; 1929 iov_write[1].iov_len = cluster_size * 3; 1930 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1931 poll_threads(); 1932 CU_ASSERT(g_bserrno == 0); 1933 1934 memset(payload_read, 0xFF, payload_size); 1935 _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1936 poll_threads(); 1937 CU_ASSERT(g_bserrno == 0); 1938 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1939 1940 spdk_bs_free_io_channel(channel); 1941 poll_threads(); 1942 1943 g_blob = NULL; 1944 g_blobid = 0; 1945 1946 free(payload_read); 1947 free(payload_write); 1948 free(payload_pattern); 1949 1950 ut_blob_close_and_delete(bs, blob); 1951 } 1952 1953 static void 1954 blob_unmap(void) 1955 { 1956 struct spdk_blob_store *bs = g_bs; 1957 struct spdk_blob *blob; 1958 struct spdk_io_channel *channel; 1959 struct spdk_blob_opts opts; 1960 uint8_t payload[4096]; 1961 int i; 1962 1963 channel = spdk_bs_alloc_io_channel(bs); 1964 CU_ASSERT(channel != NULL); 1965 1966 ut_spdk_blob_opts_init(&opts); 1967 opts.num_clusters = 10; 1968 1969 blob = ut_blob_create_and_open(bs, &opts); 1970 1971 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 1972 poll_threads(); 1973 CU_ASSERT(g_bserrno == 0); 1974 1975 memset(payload, 0, sizeof(payload)); 1976 payload[0] = 0xFF; 1977 1978 /* 1979 * Set first byte of every cluster to 0xFF. 1980 * First cluster on device is reserved so let's start from cluster number 1 1981 */ 1982 for (i = 1; i < 11; i++) { 1983 g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF; 1984 } 1985 1986 /* Confirm writes */ 1987 for (i = 0; i < 10; i++) { 1988 payload[0] = 0; 1989 spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1, 1990 blob_op_complete, NULL); 1991 poll_threads(); 1992 CU_ASSERT(g_bserrno == 0); 1993 CU_ASSERT(payload[0] == 0xFF); 1994 } 1995 1996 /* Mark some clusters as unallocated */ 1997 blob->active.clusters[1] = 0; 1998 blob->active.clusters[2] = 0; 1999 blob->active.clusters[3] = 0; 2000 blob->active.clusters[6] = 0; 2001 blob->active.clusters[8] = 0; 2002 2003 /* Unmap clusters by resizing to 0 */ 2004 spdk_blob_resize(blob, 0, blob_op_complete, NULL); 2005 poll_threads(); 2006 CU_ASSERT(g_bserrno == 0); 2007 2008 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2009 poll_threads(); 2010 CU_ASSERT(g_bserrno == 0); 2011 2012 /* Confirm that only 'allocated' clusters were unmapped */ 2013 for (i = 1; i < 11; i++) { 2014 switch (i) { 2015 case 2: 2016 case 3: 2017 case 4: 2018 case 7: 2019 case 9: 2020 CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF); 2021 break; 2022 default: 2023 CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0); 2024 break; 2025 } 2026 } 2027 2028 spdk_bs_free_io_channel(channel); 2029 poll_threads(); 2030 2031 ut_blob_close_and_delete(bs, blob); 2032 } 2033 2034 static void 2035 blob_iter(void) 2036 { 2037 struct spdk_blob_store *bs = g_bs; 2038 struct spdk_blob *blob; 2039 spdk_blob_id blobid; 2040 struct spdk_blob_opts blob_opts; 2041 2042 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 2043 poll_threads(); 2044 CU_ASSERT(g_blob == NULL); 2045 CU_ASSERT(g_bserrno == -ENOENT); 2046 2047 ut_spdk_blob_opts_init(&blob_opts); 2048 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 2049 poll_threads(); 2050 CU_ASSERT(g_bserrno == 0); 2051 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2052 blobid = g_blobid; 2053 2054 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 2055 poll_threads(); 2056 CU_ASSERT(g_blob != NULL); 2057 CU_ASSERT(g_bserrno == 0); 2058 blob = g_blob; 2059 CU_ASSERT(spdk_blob_get_id(blob) == blobid); 2060 2061 spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL); 2062 poll_threads(); 2063 CU_ASSERT(g_blob == NULL); 2064 CU_ASSERT(g_bserrno == -ENOENT); 2065 } 2066 2067 static void 2068 blob_xattr(void) 2069 { 2070 struct spdk_blob_store *bs = g_bs; 2071 struct spdk_blob *blob = g_blob; 2072 spdk_blob_id blobid = spdk_blob_get_id(blob); 2073 uint64_t length; 2074 int rc; 2075 const char *name1, *name2; 2076 const void *value; 2077 size_t value_len; 2078 struct spdk_xattr_names *names; 2079 2080 /* Test that set_xattr fails if md_ro flag is set. */ 2081 blob->md_ro = true; 2082 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 2083 CU_ASSERT(rc == -EPERM); 2084 2085 blob->md_ro = false; 2086 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 2087 CU_ASSERT(rc == 0); 2088 2089 length = 2345; 2090 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2091 CU_ASSERT(rc == 0); 2092 2093 /* Overwrite "length" xattr. */ 2094 length = 3456; 2095 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2096 CU_ASSERT(rc == 0); 2097 2098 /* get_xattr should still work even if md_ro flag is set. */ 2099 value = NULL; 2100 blob->md_ro = true; 2101 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 2102 CU_ASSERT(rc == 0); 2103 SPDK_CU_ASSERT_FATAL(value != NULL); 2104 CU_ASSERT(*(uint64_t *)value == length); 2105 CU_ASSERT(value_len == 8); 2106 blob->md_ro = false; 2107 2108 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 2109 CU_ASSERT(rc == -ENOENT); 2110 2111 names = NULL; 2112 rc = spdk_blob_get_xattr_names(blob, &names); 2113 CU_ASSERT(rc == 0); 2114 SPDK_CU_ASSERT_FATAL(names != NULL); 2115 CU_ASSERT(spdk_xattr_names_get_count(names) == 2); 2116 name1 = spdk_xattr_names_get_name(names, 0); 2117 SPDK_CU_ASSERT_FATAL(name1 != NULL); 2118 CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length")); 2119 name2 = spdk_xattr_names_get_name(names, 1); 2120 SPDK_CU_ASSERT_FATAL(name2 != NULL); 2121 CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length")); 2122 CU_ASSERT(strcmp(name1, name2)); 2123 spdk_xattr_names_free(names); 2124 2125 /* Confirm that remove_xattr fails if md_ro is set to true. */ 2126 blob->md_ro = true; 2127 rc = spdk_blob_remove_xattr(blob, "name"); 2128 CU_ASSERT(rc == -EPERM); 2129 2130 blob->md_ro = false; 2131 rc = spdk_blob_remove_xattr(blob, "name"); 2132 CU_ASSERT(rc == 0); 2133 2134 rc = spdk_blob_remove_xattr(blob, "foobar"); 2135 CU_ASSERT(rc == -ENOENT); 2136 2137 /* Set internal xattr */ 2138 length = 7898; 2139 rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true); 2140 CU_ASSERT(rc == 0); 2141 rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true); 2142 CU_ASSERT(rc == 0); 2143 CU_ASSERT(*(uint64_t *)value == length); 2144 /* try to get public xattr with same name */ 2145 rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len); 2146 CU_ASSERT(rc != 0); 2147 rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false); 2148 CU_ASSERT(rc != 0); 2149 /* Check if SPDK_BLOB_INTERNAL_XATTR is set */ 2150 CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 2151 SPDK_BLOB_INTERNAL_XATTR); 2152 2153 spdk_blob_close(blob, blob_op_complete, NULL); 2154 poll_threads(); 2155 2156 /* Check if xattrs are persisted */ 2157 ut_bs_reload(&bs, NULL); 2158 2159 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2160 poll_threads(); 2161 CU_ASSERT(g_bserrno == 0); 2162 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2163 blob = g_blob; 2164 2165 rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true); 2166 CU_ASSERT(rc == 0); 2167 CU_ASSERT(*(uint64_t *)value == length); 2168 2169 /* try to get internal xattr trough public call */ 2170 rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len); 2171 CU_ASSERT(rc != 0); 2172 2173 rc = blob_remove_xattr(blob, "internal", true); 2174 CU_ASSERT(rc == 0); 2175 2176 CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0); 2177 } 2178 2179 static void 2180 blob_parse_md(void) 2181 { 2182 struct spdk_blob_store *bs = g_bs; 2183 struct spdk_blob *blob; 2184 int rc; 2185 uint32_t used_pages; 2186 size_t xattr_length; 2187 char *xattr; 2188 2189 used_pages = spdk_bit_array_count_set(bs->used_md_pages); 2190 blob = ut_blob_create_and_open(bs, NULL); 2191 2192 /* Create large extent to force more than 1 page of metadata. */ 2193 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 2194 strlen("large_xattr"); 2195 xattr = calloc(xattr_length, sizeof(char)); 2196 SPDK_CU_ASSERT_FATAL(xattr != NULL); 2197 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 2198 free(xattr); 2199 SPDK_CU_ASSERT_FATAL(rc == 0); 2200 2201 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2202 poll_threads(); 2203 2204 /* Delete the blob and verify that number of pages returned to before its creation. */ 2205 SPDK_CU_ASSERT_FATAL(used_pages != spdk_bit_array_count_set(bs->used_md_pages)); 2206 ut_blob_close_and_delete(bs, blob); 2207 SPDK_CU_ASSERT_FATAL(used_pages == spdk_bit_array_count_set(bs->used_md_pages)); 2208 } 2209 2210 static void 2211 bs_load(void) 2212 { 2213 struct spdk_blob_store *bs; 2214 struct spdk_bs_dev *dev; 2215 spdk_blob_id blobid; 2216 struct spdk_blob *blob; 2217 struct spdk_bs_super_block *super_block; 2218 uint64_t length; 2219 int rc; 2220 const void *value; 2221 size_t value_len; 2222 struct spdk_bs_opts opts; 2223 struct spdk_blob_opts blob_opts; 2224 2225 dev = init_dev(); 2226 spdk_bs_opts_init(&opts, sizeof(opts)); 2227 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2228 2229 /* Initialize a new blob store */ 2230 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2231 poll_threads(); 2232 CU_ASSERT(g_bserrno == 0); 2233 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2234 bs = g_bs; 2235 2236 /* Try to open a blobid that does not exist */ 2237 spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL); 2238 poll_threads(); 2239 CU_ASSERT(g_bserrno == -ENOENT); 2240 CU_ASSERT(g_blob == NULL); 2241 2242 /* Create a blob */ 2243 blob = ut_blob_create_and_open(bs, NULL); 2244 blobid = spdk_blob_get_id(blob); 2245 2246 /* Try again to open valid blob but without the upper bit set */ 2247 spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL); 2248 poll_threads(); 2249 CU_ASSERT(g_bserrno == -ENOENT); 2250 CU_ASSERT(g_blob == NULL); 2251 2252 /* Set some xattrs */ 2253 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 2254 CU_ASSERT(rc == 0); 2255 2256 length = 2345; 2257 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2258 CU_ASSERT(rc == 0); 2259 2260 /* Resize the blob */ 2261 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 2262 poll_threads(); 2263 CU_ASSERT(g_bserrno == 0); 2264 2265 spdk_blob_close(blob, blob_op_complete, NULL); 2266 poll_threads(); 2267 CU_ASSERT(g_bserrno == 0); 2268 blob = NULL; 2269 g_blob = NULL; 2270 g_blobid = SPDK_BLOBID_INVALID; 2271 2272 /* Unload the blob store */ 2273 spdk_bs_unload(bs, bs_op_complete, NULL); 2274 poll_threads(); 2275 CU_ASSERT(g_bserrno == 0); 2276 g_bs = NULL; 2277 g_blob = NULL; 2278 g_blobid = 0; 2279 2280 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2281 CU_ASSERT(super_block->clean == 1); 2282 2283 /* Load should fail for device with an unsupported blocklen */ 2284 dev = init_dev(); 2285 dev->blocklen = SPDK_BS_PAGE_SIZE * 2; 2286 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 2287 poll_threads(); 2288 CU_ASSERT(g_bserrno == -EINVAL); 2289 2290 /* Load should when max_md_ops is set to zero */ 2291 dev = init_dev(); 2292 spdk_bs_opts_init(&opts, sizeof(opts)); 2293 opts.max_md_ops = 0; 2294 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2295 poll_threads(); 2296 CU_ASSERT(g_bserrno == -EINVAL); 2297 2298 /* Load should when max_channel_ops is set to zero */ 2299 dev = init_dev(); 2300 spdk_bs_opts_init(&opts, sizeof(opts)); 2301 opts.max_channel_ops = 0; 2302 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2303 poll_threads(); 2304 CU_ASSERT(g_bserrno == -EINVAL); 2305 2306 /* Load an existing blob store */ 2307 dev = init_dev(); 2308 spdk_bs_opts_init(&opts, sizeof(opts)); 2309 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2310 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2311 poll_threads(); 2312 CU_ASSERT(g_bserrno == 0); 2313 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2314 bs = g_bs; 2315 2316 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2317 CU_ASSERT(super_block->clean == 1); 2318 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2319 2320 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2321 poll_threads(); 2322 CU_ASSERT(g_bserrno == 0); 2323 CU_ASSERT(g_blob != NULL); 2324 blob = g_blob; 2325 2326 /* Verify that blobstore is marked dirty after first metadata sync */ 2327 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2328 CU_ASSERT(super_block->clean == 1); 2329 2330 /* Get the xattrs */ 2331 value = NULL; 2332 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 2333 CU_ASSERT(rc == 0); 2334 SPDK_CU_ASSERT_FATAL(value != NULL); 2335 CU_ASSERT(*(uint64_t *)value == length); 2336 CU_ASSERT(value_len == 8); 2337 2338 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 2339 CU_ASSERT(rc == -ENOENT); 2340 2341 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 2342 2343 spdk_blob_close(blob, blob_op_complete, NULL); 2344 poll_threads(); 2345 CU_ASSERT(g_bserrno == 0); 2346 blob = NULL; 2347 g_blob = NULL; 2348 2349 spdk_bs_unload(bs, bs_op_complete, NULL); 2350 poll_threads(); 2351 CU_ASSERT(g_bserrno == 0); 2352 g_bs = NULL; 2353 2354 /* Load should fail: bdev size < saved size */ 2355 dev = init_dev(); 2356 dev->blockcnt /= 2; 2357 2358 spdk_bs_opts_init(&opts, sizeof(opts)); 2359 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2360 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2361 poll_threads(); 2362 2363 CU_ASSERT(g_bserrno == -EILSEQ); 2364 2365 /* Load should succeed: bdev size > saved size */ 2366 dev = init_dev(); 2367 dev->blockcnt *= 4; 2368 2369 spdk_bs_opts_init(&opts, sizeof(opts)); 2370 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2371 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2372 poll_threads(); 2373 CU_ASSERT(g_bserrno == 0); 2374 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2375 bs = g_bs; 2376 2377 CU_ASSERT(g_bserrno == 0); 2378 spdk_bs_unload(bs, bs_op_complete, NULL); 2379 poll_threads(); 2380 2381 2382 /* Test compatibility mode */ 2383 2384 dev = init_dev(); 2385 super_block->size = 0; 2386 super_block->crc = blob_md_page_calc_crc(super_block); 2387 2388 spdk_bs_opts_init(&opts, sizeof(opts)); 2389 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2390 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2391 poll_threads(); 2392 CU_ASSERT(g_bserrno == 0); 2393 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2394 bs = g_bs; 2395 2396 /* Create a blob */ 2397 ut_spdk_blob_opts_init(&blob_opts); 2398 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 2399 poll_threads(); 2400 CU_ASSERT(g_bserrno == 0); 2401 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2402 2403 /* Blobstore should update number of blocks in super_block */ 2404 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2405 CU_ASSERT(super_block->clean == 0); 2406 2407 spdk_bs_unload(bs, bs_op_complete, NULL); 2408 poll_threads(); 2409 CU_ASSERT(g_bserrno == 0); 2410 CU_ASSERT(super_block->clean == 1); 2411 g_bs = NULL; 2412 2413 } 2414 2415 static void 2416 bs_load_pending_removal(void) 2417 { 2418 struct spdk_blob_store *bs = g_bs; 2419 struct spdk_blob_opts opts; 2420 struct spdk_blob *blob, *snapshot; 2421 spdk_blob_id blobid, snapshotid; 2422 const void *value; 2423 size_t value_len; 2424 int rc; 2425 2426 /* Create blob */ 2427 ut_spdk_blob_opts_init(&opts); 2428 opts.num_clusters = 10; 2429 2430 blob = ut_blob_create_and_open(bs, &opts); 2431 blobid = spdk_blob_get_id(blob); 2432 2433 /* Create snapshot */ 2434 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 2435 poll_threads(); 2436 CU_ASSERT(g_bserrno == 0); 2437 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2438 snapshotid = g_blobid; 2439 2440 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2441 poll_threads(); 2442 CU_ASSERT(g_bserrno == 0); 2443 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2444 snapshot = g_blob; 2445 2446 /* Set SNAPSHOT_PENDING_REMOVAL xattr */ 2447 snapshot->md_ro = false; 2448 rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true); 2449 CU_ASSERT(rc == 0); 2450 snapshot->md_ro = true; 2451 2452 spdk_blob_close(snapshot, blob_op_complete, NULL); 2453 poll_threads(); 2454 CU_ASSERT(g_bserrno == 0); 2455 2456 spdk_blob_close(blob, blob_op_complete, NULL); 2457 poll_threads(); 2458 CU_ASSERT(g_bserrno == 0); 2459 2460 /* Reload blobstore */ 2461 ut_bs_reload(&bs, NULL); 2462 2463 /* Snapshot should not be removed as blob is still pointing to it */ 2464 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2465 poll_threads(); 2466 CU_ASSERT(g_bserrno == 0); 2467 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2468 snapshot = g_blob; 2469 2470 /* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */ 2471 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len); 2472 CU_ASSERT(rc != 0); 2473 2474 /* Set SNAPSHOT_PENDING_REMOVAL xattr again */ 2475 snapshot->md_ro = false; 2476 rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true); 2477 CU_ASSERT(rc == 0); 2478 snapshot->md_ro = true; 2479 2480 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2481 poll_threads(); 2482 CU_ASSERT(g_bserrno == 0); 2483 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2484 blob = g_blob; 2485 2486 /* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */ 2487 blob_remove_xattr(blob, BLOB_SNAPSHOT, true); 2488 2489 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2490 poll_threads(); 2491 CU_ASSERT(g_bserrno == 0); 2492 2493 spdk_blob_close(snapshot, blob_op_complete, NULL); 2494 poll_threads(); 2495 CU_ASSERT(g_bserrno == 0); 2496 2497 spdk_blob_close(blob, blob_op_complete, NULL); 2498 poll_threads(); 2499 CU_ASSERT(g_bserrno == 0); 2500 2501 /* Reload blobstore */ 2502 ut_bs_reload(&bs, NULL); 2503 2504 /* Snapshot should be removed as blob is not pointing to it anymore */ 2505 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2506 poll_threads(); 2507 CU_ASSERT(g_bserrno != 0); 2508 } 2509 2510 static void 2511 bs_load_custom_cluster_size(void) 2512 { 2513 struct spdk_blob_store *bs; 2514 struct spdk_bs_dev *dev; 2515 struct spdk_bs_super_block *super_block; 2516 struct spdk_bs_opts opts; 2517 uint32_t custom_cluster_size = 4194304; /* 4MiB */ 2518 uint32_t cluster_sz; 2519 uint64_t total_clusters; 2520 2521 dev = init_dev(); 2522 spdk_bs_opts_init(&opts, sizeof(opts)); 2523 opts.cluster_sz = custom_cluster_size; 2524 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2525 2526 /* Initialize a new blob store */ 2527 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2528 poll_threads(); 2529 CU_ASSERT(g_bserrno == 0); 2530 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2531 bs = g_bs; 2532 cluster_sz = bs->cluster_sz; 2533 total_clusters = bs->total_clusters; 2534 2535 /* Unload the blob store */ 2536 spdk_bs_unload(bs, bs_op_complete, NULL); 2537 poll_threads(); 2538 CU_ASSERT(g_bserrno == 0); 2539 g_bs = NULL; 2540 g_blob = NULL; 2541 g_blobid = 0; 2542 2543 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2544 CU_ASSERT(super_block->clean == 1); 2545 2546 /* Load an existing blob store */ 2547 dev = init_dev(); 2548 spdk_bs_opts_init(&opts, sizeof(opts)); 2549 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2550 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2551 poll_threads(); 2552 CU_ASSERT(g_bserrno == 0); 2553 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2554 bs = g_bs; 2555 /* Compare cluster size and number to one after initialization */ 2556 CU_ASSERT(cluster_sz == bs->cluster_sz); 2557 CU_ASSERT(total_clusters == bs->total_clusters); 2558 2559 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2560 CU_ASSERT(super_block->clean == 1); 2561 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2562 2563 spdk_bs_unload(bs, bs_op_complete, NULL); 2564 poll_threads(); 2565 CU_ASSERT(g_bserrno == 0); 2566 CU_ASSERT(super_block->clean == 1); 2567 g_bs = NULL; 2568 } 2569 2570 static void 2571 bs_type(void) 2572 { 2573 struct spdk_blob_store *bs; 2574 struct spdk_bs_dev *dev; 2575 struct spdk_bs_opts opts; 2576 2577 dev = init_dev(); 2578 spdk_bs_opts_init(&opts, sizeof(opts)); 2579 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2580 2581 /* Initialize a new blob store */ 2582 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2583 poll_threads(); 2584 CU_ASSERT(g_bserrno == 0); 2585 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2586 bs = g_bs; 2587 2588 /* Unload the blob store */ 2589 spdk_bs_unload(bs, bs_op_complete, NULL); 2590 poll_threads(); 2591 CU_ASSERT(g_bserrno == 0); 2592 g_bs = NULL; 2593 g_blob = NULL; 2594 g_blobid = 0; 2595 2596 /* Load non existing blobstore type */ 2597 dev = init_dev(); 2598 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING"); 2599 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2600 poll_threads(); 2601 CU_ASSERT(g_bserrno != 0); 2602 2603 /* Load with empty blobstore type */ 2604 dev = init_dev(); 2605 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2606 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2607 poll_threads(); 2608 CU_ASSERT(g_bserrno == 0); 2609 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2610 bs = g_bs; 2611 2612 spdk_bs_unload(bs, bs_op_complete, NULL); 2613 poll_threads(); 2614 CU_ASSERT(g_bserrno == 0); 2615 g_bs = NULL; 2616 2617 /* Initialize a new blob store with empty bstype */ 2618 dev = init_dev(); 2619 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2620 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2621 poll_threads(); 2622 CU_ASSERT(g_bserrno == 0); 2623 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2624 bs = g_bs; 2625 2626 spdk_bs_unload(bs, bs_op_complete, NULL); 2627 poll_threads(); 2628 CU_ASSERT(g_bserrno == 0); 2629 g_bs = NULL; 2630 2631 /* Load non existing blobstore type */ 2632 dev = init_dev(); 2633 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING"); 2634 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2635 poll_threads(); 2636 CU_ASSERT(g_bserrno != 0); 2637 2638 /* Load with empty blobstore type */ 2639 dev = init_dev(); 2640 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2641 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2642 poll_threads(); 2643 CU_ASSERT(g_bserrno == 0); 2644 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2645 bs = g_bs; 2646 2647 spdk_bs_unload(bs, bs_op_complete, NULL); 2648 poll_threads(); 2649 CU_ASSERT(g_bserrno == 0); 2650 g_bs = NULL; 2651 } 2652 2653 static void 2654 bs_super_block(void) 2655 { 2656 struct spdk_blob_store *bs; 2657 struct spdk_bs_dev *dev; 2658 struct spdk_bs_super_block *super_block; 2659 struct spdk_bs_opts opts; 2660 struct spdk_bs_super_block_ver1 super_block_v1; 2661 2662 dev = init_dev(); 2663 spdk_bs_opts_init(&opts, sizeof(opts)); 2664 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2665 2666 /* Initialize a new blob store */ 2667 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2668 poll_threads(); 2669 CU_ASSERT(g_bserrno == 0); 2670 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2671 bs = g_bs; 2672 2673 /* Unload the blob store */ 2674 spdk_bs_unload(bs, bs_op_complete, NULL); 2675 poll_threads(); 2676 CU_ASSERT(g_bserrno == 0); 2677 g_bs = NULL; 2678 g_blob = NULL; 2679 g_blobid = 0; 2680 2681 /* Load an existing blob store with version newer than supported */ 2682 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2683 super_block->version++; 2684 2685 dev = init_dev(); 2686 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2687 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2688 poll_threads(); 2689 CU_ASSERT(g_bserrno != 0); 2690 2691 /* Create a new blob store with super block version 1 */ 2692 dev = init_dev(); 2693 super_block_v1.version = 1; 2694 memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature)); 2695 super_block_v1.length = 0x1000; 2696 super_block_v1.clean = 1; 2697 super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF; 2698 super_block_v1.cluster_size = 0x100000; 2699 super_block_v1.used_page_mask_start = 0x01; 2700 super_block_v1.used_page_mask_len = 0x01; 2701 super_block_v1.used_cluster_mask_start = 0x02; 2702 super_block_v1.used_cluster_mask_len = 0x01; 2703 super_block_v1.md_start = 0x03; 2704 super_block_v1.md_len = 0x40; 2705 memset(super_block_v1.reserved, 0, 4036); 2706 super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1); 2707 memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1)); 2708 2709 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2710 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2711 poll_threads(); 2712 CU_ASSERT(g_bserrno == 0); 2713 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2714 bs = g_bs; 2715 2716 spdk_bs_unload(bs, bs_op_complete, NULL); 2717 poll_threads(); 2718 CU_ASSERT(g_bserrno == 0); 2719 g_bs = NULL; 2720 } 2721 2722 static void 2723 bs_test_recover_cluster_count(void) 2724 { 2725 struct spdk_blob_store *bs; 2726 struct spdk_bs_dev *dev; 2727 struct spdk_bs_super_block super_block; 2728 struct spdk_bs_opts opts; 2729 2730 dev = init_dev(); 2731 spdk_bs_opts_init(&opts, sizeof(opts)); 2732 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2733 2734 super_block.version = 3; 2735 memcpy(super_block.signature, "SPDKBLOB", sizeof(super_block.signature)); 2736 super_block.length = 0x1000; 2737 super_block.clean = 0; 2738 super_block.super_blob = 0xFFFFFFFFFFFFFFFF; 2739 super_block.cluster_size = 4096; 2740 super_block.used_page_mask_start = 0x01; 2741 super_block.used_page_mask_len = 0x01; 2742 super_block.used_cluster_mask_start = 0x02; 2743 super_block.used_cluster_mask_len = 0x01; 2744 super_block.used_blobid_mask_start = 0x03; 2745 super_block.used_blobid_mask_len = 0x01; 2746 super_block.md_start = 0x04; 2747 super_block.md_len = 0x40; 2748 memset(super_block.bstype.bstype, 0, sizeof(super_block.bstype.bstype)); 2749 super_block.size = dev->blockcnt * dev->blocklen; 2750 super_block.io_unit_size = 0x1000; 2751 memset(super_block.reserved, 0, 4000); 2752 super_block.crc = blob_md_page_calc_crc(&super_block); 2753 memcpy(g_dev_buffer, &super_block, sizeof(struct spdk_bs_super_block)); 2754 2755 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2756 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2757 poll_threads(); 2758 CU_ASSERT(g_bserrno == 0); 2759 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2760 bs = g_bs; 2761 CU_ASSERT(bs->num_free_clusters == bs->total_clusters - (super_block.md_start + 2762 super_block.md_len)); 2763 2764 spdk_bs_unload(bs, bs_op_complete, NULL); 2765 poll_threads(); 2766 CU_ASSERT(g_bserrno == 0); 2767 g_bs = NULL; 2768 } 2769 2770 /* 2771 * Create a blobstore and then unload it. 2772 */ 2773 static void 2774 bs_unload(void) 2775 { 2776 struct spdk_blob_store *bs = g_bs; 2777 struct spdk_blob *blob; 2778 2779 /* Create a blob and open it. */ 2780 blob = ut_blob_create_and_open(bs, NULL); 2781 2782 /* Try to unload blobstore, should fail with open blob */ 2783 g_bserrno = -1; 2784 spdk_bs_unload(bs, bs_op_complete, NULL); 2785 poll_threads(); 2786 CU_ASSERT(g_bserrno == -EBUSY); 2787 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2788 2789 /* Close the blob, then successfully unload blobstore */ 2790 g_bserrno = -1; 2791 spdk_blob_close(blob, blob_op_complete, NULL); 2792 poll_threads(); 2793 CU_ASSERT(g_bserrno == 0); 2794 } 2795 2796 /* 2797 * Create a blobstore with a cluster size different than the default, and ensure it is 2798 * persisted. 2799 */ 2800 static void 2801 bs_cluster_sz(void) 2802 { 2803 struct spdk_blob_store *bs; 2804 struct spdk_bs_dev *dev; 2805 struct spdk_bs_opts opts; 2806 uint32_t cluster_sz; 2807 2808 /* Set cluster size to zero */ 2809 dev = init_dev(); 2810 spdk_bs_opts_init(&opts, sizeof(opts)); 2811 opts.cluster_sz = 0; 2812 2813 /* Initialize a new blob store */ 2814 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2815 poll_threads(); 2816 CU_ASSERT(g_bserrno == -EINVAL); 2817 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2818 2819 /* 2820 * Set cluster size to blobstore page size, 2821 * to work it is required to be at least twice the blobstore page size. 2822 */ 2823 dev = init_dev(); 2824 spdk_bs_opts_init(&opts, sizeof(opts)); 2825 opts.cluster_sz = SPDK_BS_PAGE_SIZE; 2826 2827 /* Initialize a new blob store */ 2828 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2829 poll_threads(); 2830 CU_ASSERT(g_bserrno == -ENOMEM); 2831 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2832 2833 /* 2834 * Set cluster size to lower than page size, 2835 * to work it is required to be at least twice the blobstore page size. 2836 */ 2837 dev = init_dev(); 2838 spdk_bs_opts_init(&opts, sizeof(opts)); 2839 opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1; 2840 2841 /* Initialize a new blob store */ 2842 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2843 poll_threads(); 2844 CU_ASSERT(g_bserrno == -EINVAL); 2845 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2846 2847 /* Set cluster size to twice the default */ 2848 dev = init_dev(); 2849 spdk_bs_opts_init(&opts, sizeof(opts)); 2850 opts.cluster_sz *= 2; 2851 cluster_sz = opts.cluster_sz; 2852 2853 /* Initialize a new blob store */ 2854 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2855 poll_threads(); 2856 CU_ASSERT(g_bserrno == 0); 2857 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2858 bs = g_bs; 2859 2860 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2861 2862 ut_bs_reload(&bs, &opts); 2863 2864 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2865 2866 spdk_bs_unload(bs, bs_op_complete, NULL); 2867 poll_threads(); 2868 CU_ASSERT(g_bserrno == 0); 2869 g_bs = NULL; 2870 } 2871 2872 /* 2873 * Create a blobstore, reload it and ensure total usable cluster count 2874 * stays the same. 2875 */ 2876 static void 2877 bs_usable_clusters(void) 2878 { 2879 struct spdk_blob_store *bs = g_bs; 2880 struct spdk_blob *blob; 2881 uint32_t clusters; 2882 int i; 2883 2884 2885 clusters = spdk_bs_total_data_cluster_count(bs); 2886 2887 ut_bs_reload(&bs, NULL); 2888 2889 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2890 2891 /* Create and resize blobs to make sure that useable cluster count won't change */ 2892 for (i = 0; i < 4; i++) { 2893 g_bserrno = -1; 2894 g_blobid = SPDK_BLOBID_INVALID; 2895 blob = ut_blob_create_and_open(bs, NULL); 2896 2897 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 2898 poll_threads(); 2899 CU_ASSERT(g_bserrno == 0); 2900 2901 g_bserrno = -1; 2902 spdk_blob_close(blob, blob_op_complete, NULL); 2903 poll_threads(); 2904 CU_ASSERT(g_bserrno == 0); 2905 2906 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2907 } 2908 2909 /* Reload the blob store to make sure that nothing changed */ 2910 ut_bs_reload(&bs, NULL); 2911 2912 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2913 } 2914 2915 /* 2916 * Test resizing of the metadata blob. This requires creating enough blobs 2917 * so that one cluster is not enough to fit the metadata for those blobs. 2918 * To induce this condition to happen more quickly, we reduce the cluster 2919 * size to 16KB, which means only 4 4KB blob metadata pages can fit. 2920 */ 2921 static void 2922 bs_resize_md(void) 2923 { 2924 struct spdk_blob_store *bs; 2925 const int CLUSTER_PAGE_COUNT = 4; 2926 const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4; 2927 struct spdk_bs_dev *dev; 2928 struct spdk_bs_opts opts; 2929 struct spdk_blob *blob; 2930 struct spdk_blob_opts blob_opts; 2931 uint32_t cluster_sz; 2932 spdk_blob_id blobids[NUM_BLOBS]; 2933 int i; 2934 2935 2936 dev = init_dev(); 2937 spdk_bs_opts_init(&opts, sizeof(opts)); 2938 opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096; 2939 cluster_sz = opts.cluster_sz; 2940 2941 /* Initialize a new blob store */ 2942 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2943 poll_threads(); 2944 CU_ASSERT(g_bserrno == 0); 2945 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2946 bs = g_bs; 2947 2948 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2949 2950 ut_spdk_blob_opts_init(&blob_opts); 2951 2952 for (i = 0; i < NUM_BLOBS; i++) { 2953 g_bserrno = -1; 2954 g_blobid = SPDK_BLOBID_INVALID; 2955 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 2956 poll_threads(); 2957 CU_ASSERT(g_bserrno == 0); 2958 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2959 blobids[i] = g_blobid; 2960 } 2961 2962 ut_bs_reload(&bs, &opts); 2963 2964 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2965 2966 for (i = 0; i < NUM_BLOBS; i++) { 2967 g_bserrno = -1; 2968 g_blob = NULL; 2969 spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL); 2970 poll_threads(); 2971 CU_ASSERT(g_bserrno == 0); 2972 CU_ASSERT(g_blob != NULL); 2973 blob = g_blob; 2974 g_bserrno = -1; 2975 spdk_blob_close(blob, blob_op_complete, NULL); 2976 poll_threads(); 2977 CU_ASSERT(g_bserrno == 0); 2978 } 2979 2980 spdk_bs_unload(bs, bs_op_complete, NULL); 2981 poll_threads(); 2982 CU_ASSERT(g_bserrno == 0); 2983 g_bs = NULL; 2984 } 2985 2986 static void 2987 bs_destroy(void) 2988 { 2989 struct spdk_blob_store *bs; 2990 struct spdk_bs_dev *dev; 2991 2992 /* Initialize a new blob store */ 2993 dev = init_dev(); 2994 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2995 poll_threads(); 2996 CU_ASSERT(g_bserrno == 0); 2997 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2998 bs = g_bs; 2999 3000 /* Destroy the blob store */ 3001 g_bserrno = -1; 3002 spdk_bs_destroy(bs, bs_op_complete, NULL); 3003 poll_threads(); 3004 CU_ASSERT(g_bserrno == 0); 3005 3006 /* Loading an non-existent blob store should fail. */ 3007 g_bs = NULL; 3008 dev = init_dev(); 3009 3010 g_bserrno = 0; 3011 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3012 poll_threads(); 3013 CU_ASSERT(g_bserrno != 0); 3014 } 3015 3016 /* Try to hit all of the corner cases associated with serializing 3017 * a blob to disk 3018 */ 3019 static void 3020 blob_serialize_test(void) 3021 { 3022 struct spdk_bs_dev *dev; 3023 struct spdk_bs_opts opts; 3024 struct spdk_blob_store *bs; 3025 spdk_blob_id blobid[2]; 3026 struct spdk_blob *blob[2]; 3027 uint64_t i; 3028 char *value; 3029 int rc; 3030 3031 dev = init_dev(); 3032 3033 /* Initialize a new blobstore with very small clusters */ 3034 spdk_bs_opts_init(&opts, sizeof(opts)); 3035 opts.cluster_sz = dev->blocklen * 8; 3036 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 3037 poll_threads(); 3038 CU_ASSERT(g_bserrno == 0); 3039 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3040 bs = g_bs; 3041 3042 /* Create and open two blobs */ 3043 for (i = 0; i < 2; i++) { 3044 blob[i] = ut_blob_create_and_open(bs, NULL); 3045 blobid[i] = spdk_blob_get_id(blob[i]); 3046 3047 /* Set a fairly large xattr on both blobs to eat up 3048 * metadata space 3049 */ 3050 value = calloc(dev->blocklen - 64, sizeof(char)); 3051 SPDK_CU_ASSERT_FATAL(value != NULL); 3052 memset(value, i, dev->blocklen / 2); 3053 rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64); 3054 CU_ASSERT(rc == 0); 3055 free(value); 3056 } 3057 3058 /* Resize the blobs, alternating 1 cluster at a time. 3059 * This thwarts run length encoding and will cause spill 3060 * over of the extents. 3061 */ 3062 for (i = 0; i < 6; i++) { 3063 spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL); 3064 poll_threads(); 3065 CU_ASSERT(g_bserrno == 0); 3066 } 3067 3068 for (i = 0; i < 2; i++) { 3069 spdk_blob_sync_md(blob[i], blob_op_complete, NULL); 3070 poll_threads(); 3071 CU_ASSERT(g_bserrno == 0); 3072 } 3073 3074 /* Close the blobs */ 3075 for (i = 0; i < 2; i++) { 3076 spdk_blob_close(blob[i], blob_op_complete, NULL); 3077 poll_threads(); 3078 CU_ASSERT(g_bserrno == 0); 3079 } 3080 3081 ut_bs_reload(&bs, &opts); 3082 3083 for (i = 0; i < 2; i++) { 3084 blob[i] = NULL; 3085 3086 spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL); 3087 poll_threads(); 3088 CU_ASSERT(g_bserrno == 0); 3089 CU_ASSERT(g_blob != NULL); 3090 blob[i] = g_blob; 3091 3092 CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3); 3093 3094 spdk_blob_close(blob[i], blob_op_complete, NULL); 3095 poll_threads(); 3096 CU_ASSERT(g_bserrno == 0); 3097 } 3098 3099 spdk_bs_unload(bs, bs_op_complete, NULL); 3100 poll_threads(); 3101 CU_ASSERT(g_bserrno == 0); 3102 g_bs = NULL; 3103 } 3104 3105 static void 3106 blob_crc(void) 3107 { 3108 struct spdk_blob_store *bs = g_bs; 3109 struct spdk_blob *blob; 3110 spdk_blob_id blobid; 3111 uint32_t page_num; 3112 int index; 3113 struct spdk_blob_md_page *page; 3114 3115 blob = ut_blob_create_and_open(bs, NULL); 3116 blobid = spdk_blob_get_id(blob); 3117 3118 spdk_blob_close(blob, blob_op_complete, NULL); 3119 poll_threads(); 3120 CU_ASSERT(g_bserrno == 0); 3121 3122 page_num = bs_blobid_to_page(blobid); 3123 index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num); 3124 page = (struct spdk_blob_md_page *)&g_dev_buffer[index]; 3125 page->crc = 0; 3126 3127 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3128 poll_threads(); 3129 CU_ASSERT(g_bserrno == -EINVAL); 3130 CU_ASSERT(g_blob == NULL); 3131 g_bserrno = 0; 3132 3133 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 3134 poll_threads(); 3135 CU_ASSERT(g_bserrno == -EINVAL); 3136 } 3137 3138 static void 3139 super_block_crc(void) 3140 { 3141 struct spdk_blob_store *bs; 3142 struct spdk_bs_dev *dev; 3143 struct spdk_bs_super_block *super_block; 3144 3145 dev = init_dev(); 3146 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 3147 poll_threads(); 3148 CU_ASSERT(g_bserrno == 0); 3149 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3150 bs = g_bs; 3151 3152 spdk_bs_unload(bs, bs_op_complete, NULL); 3153 poll_threads(); 3154 CU_ASSERT(g_bserrno == 0); 3155 g_bs = NULL; 3156 3157 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 3158 super_block->crc = 0; 3159 dev = init_dev(); 3160 3161 /* Load an existing blob store */ 3162 g_bserrno = 0; 3163 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3164 poll_threads(); 3165 CU_ASSERT(g_bserrno == -EILSEQ); 3166 } 3167 3168 /* For blob dirty shutdown test case we do the following sub-test cases: 3169 * 1 Initialize new blob store and create 1 super blob with some xattrs, then we 3170 * dirty shutdown and reload the blob store and verify the xattrs. 3171 * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown, 3172 * reload the blob store and verify the clusters number. 3173 * 3 Create the second blob and then dirty shutdown, reload the blob store 3174 * and verify the second blob. 3175 * 4 Delete the second blob and then dirty shutdown, reload the blob store 3176 * and verify the second blob is invalid. 3177 * 5 Create the second blob again and also create the third blob, modify the 3178 * md of second blob which makes the md invalid, and then dirty shutdown, 3179 * reload the blob store verify the second blob, it should invalid and also 3180 * verify the third blob, it should correct. 3181 */ 3182 static void 3183 blob_dirty_shutdown(void) 3184 { 3185 int rc; 3186 int index; 3187 struct spdk_blob_store *bs = g_bs; 3188 spdk_blob_id blobid1, blobid2, blobid3; 3189 struct spdk_blob *blob = g_blob; 3190 uint64_t length; 3191 uint64_t free_clusters; 3192 const void *value; 3193 size_t value_len; 3194 uint32_t page_num; 3195 struct spdk_blob_md_page *page; 3196 struct spdk_blob_opts blob_opts; 3197 3198 /* Create first blob */ 3199 blobid1 = spdk_blob_get_id(blob); 3200 3201 /* Set some xattrs */ 3202 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 3203 CU_ASSERT(rc == 0); 3204 3205 length = 2345; 3206 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3207 CU_ASSERT(rc == 0); 3208 3209 /* Put xattr that fits exactly single page. 3210 * This results in adding additional pages to MD. 3211 * First is flags and smaller xattr, second the large xattr, 3212 * third are just the extents. 3213 */ 3214 size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) - 3215 strlen("large_xattr"); 3216 char *xattr = calloc(xattr_length, sizeof(char)); 3217 SPDK_CU_ASSERT_FATAL(xattr != NULL); 3218 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 3219 free(xattr); 3220 SPDK_CU_ASSERT_FATAL(rc == 0); 3221 3222 /* Resize the blob */ 3223 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 3224 poll_threads(); 3225 CU_ASSERT(g_bserrno == 0); 3226 3227 /* Set the blob as the super blob */ 3228 spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL); 3229 poll_threads(); 3230 CU_ASSERT(g_bserrno == 0); 3231 3232 free_clusters = spdk_bs_free_cluster_count(bs); 3233 3234 spdk_blob_close(blob, blob_op_complete, NULL); 3235 poll_threads(); 3236 CU_ASSERT(g_bserrno == 0); 3237 blob = NULL; 3238 g_blob = NULL; 3239 g_blobid = SPDK_BLOBID_INVALID; 3240 3241 ut_bs_dirty_load(&bs, NULL); 3242 3243 /* Get the super blob */ 3244 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 3245 poll_threads(); 3246 CU_ASSERT(g_bserrno == 0); 3247 CU_ASSERT(blobid1 == g_blobid); 3248 3249 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3250 poll_threads(); 3251 CU_ASSERT(g_bserrno == 0); 3252 CU_ASSERT(g_blob != NULL); 3253 blob = g_blob; 3254 3255 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3256 3257 /* Get the xattrs */ 3258 value = NULL; 3259 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 3260 CU_ASSERT(rc == 0); 3261 SPDK_CU_ASSERT_FATAL(value != NULL); 3262 CU_ASSERT(*(uint64_t *)value == length); 3263 CU_ASSERT(value_len == 8); 3264 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 3265 3266 /* Resize the blob */ 3267 spdk_blob_resize(blob, 20, blob_op_complete, NULL); 3268 poll_threads(); 3269 CU_ASSERT(g_bserrno == 0); 3270 3271 free_clusters = spdk_bs_free_cluster_count(bs); 3272 3273 spdk_blob_close(blob, blob_op_complete, NULL); 3274 poll_threads(); 3275 CU_ASSERT(g_bserrno == 0); 3276 blob = NULL; 3277 g_blob = NULL; 3278 g_blobid = SPDK_BLOBID_INVALID; 3279 3280 ut_bs_dirty_load(&bs, NULL); 3281 3282 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3283 poll_threads(); 3284 CU_ASSERT(g_bserrno == 0); 3285 CU_ASSERT(g_blob != NULL); 3286 blob = g_blob; 3287 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20); 3288 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3289 3290 spdk_blob_close(blob, blob_op_complete, NULL); 3291 poll_threads(); 3292 CU_ASSERT(g_bserrno == 0); 3293 blob = NULL; 3294 g_blob = NULL; 3295 g_blobid = SPDK_BLOBID_INVALID; 3296 3297 /* Create second blob */ 3298 blob = ut_blob_create_and_open(bs, NULL); 3299 blobid2 = spdk_blob_get_id(blob); 3300 3301 /* Set some xattrs */ 3302 rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1); 3303 CU_ASSERT(rc == 0); 3304 3305 length = 5432; 3306 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3307 CU_ASSERT(rc == 0); 3308 3309 /* Resize the blob */ 3310 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 3311 poll_threads(); 3312 CU_ASSERT(g_bserrno == 0); 3313 3314 free_clusters = spdk_bs_free_cluster_count(bs); 3315 3316 spdk_blob_close(blob, blob_op_complete, NULL); 3317 poll_threads(); 3318 CU_ASSERT(g_bserrno == 0); 3319 blob = NULL; 3320 g_blob = NULL; 3321 g_blobid = SPDK_BLOBID_INVALID; 3322 3323 ut_bs_dirty_load(&bs, NULL); 3324 3325 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3326 poll_threads(); 3327 CU_ASSERT(g_bserrno == 0); 3328 CU_ASSERT(g_blob != NULL); 3329 blob = g_blob; 3330 3331 /* Get the xattrs */ 3332 value = NULL; 3333 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 3334 CU_ASSERT(rc == 0); 3335 SPDK_CU_ASSERT_FATAL(value != NULL); 3336 CU_ASSERT(*(uint64_t *)value == length); 3337 CU_ASSERT(value_len == 8); 3338 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 3339 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3340 3341 ut_blob_close_and_delete(bs, blob); 3342 3343 free_clusters = spdk_bs_free_cluster_count(bs); 3344 3345 ut_bs_dirty_load(&bs, NULL); 3346 3347 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3348 poll_threads(); 3349 CU_ASSERT(g_bserrno != 0); 3350 CU_ASSERT(g_blob == NULL); 3351 3352 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3353 poll_threads(); 3354 CU_ASSERT(g_bserrno == 0); 3355 CU_ASSERT(g_blob != NULL); 3356 blob = g_blob; 3357 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3358 spdk_blob_close(blob, blob_op_complete, NULL); 3359 poll_threads(); 3360 CU_ASSERT(g_bserrno == 0); 3361 3362 ut_bs_reload(&bs, NULL); 3363 3364 /* Create second blob */ 3365 ut_spdk_blob_opts_init(&blob_opts); 3366 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3367 poll_threads(); 3368 CU_ASSERT(g_bserrno == 0); 3369 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3370 blobid2 = g_blobid; 3371 3372 /* Create third blob */ 3373 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3374 poll_threads(); 3375 CU_ASSERT(g_bserrno == 0); 3376 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3377 blobid3 = g_blobid; 3378 3379 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3380 poll_threads(); 3381 CU_ASSERT(g_bserrno == 0); 3382 CU_ASSERT(g_blob != NULL); 3383 blob = g_blob; 3384 3385 /* Set some xattrs for second blob */ 3386 rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1); 3387 CU_ASSERT(rc == 0); 3388 3389 length = 5432; 3390 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3391 CU_ASSERT(rc == 0); 3392 3393 spdk_blob_close(blob, blob_op_complete, NULL); 3394 poll_threads(); 3395 CU_ASSERT(g_bserrno == 0); 3396 blob = NULL; 3397 g_blob = NULL; 3398 g_blobid = SPDK_BLOBID_INVALID; 3399 3400 spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL); 3401 poll_threads(); 3402 CU_ASSERT(g_bserrno == 0); 3403 CU_ASSERT(g_blob != NULL); 3404 blob = g_blob; 3405 3406 /* Set some xattrs for third blob */ 3407 rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1); 3408 CU_ASSERT(rc == 0); 3409 3410 length = 5432; 3411 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3412 CU_ASSERT(rc == 0); 3413 3414 spdk_blob_close(blob, blob_op_complete, NULL); 3415 poll_threads(); 3416 CU_ASSERT(g_bserrno == 0); 3417 blob = NULL; 3418 g_blob = NULL; 3419 g_blobid = SPDK_BLOBID_INVALID; 3420 3421 /* Mark second blob as invalid */ 3422 page_num = bs_blobid_to_page(blobid2); 3423 3424 index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num); 3425 page = (struct spdk_blob_md_page *)&g_dev_buffer[index]; 3426 page->sequence_num = 1; 3427 page->crc = blob_md_page_calc_crc(page); 3428 3429 free_clusters = spdk_bs_free_cluster_count(bs); 3430 3431 ut_bs_dirty_load(&bs, NULL); 3432 3433 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3434 poll_threads(); 3435 CU_ASSERT(g_bserrno != 0); 3436 CU_ASSERT(g_blob == NULL); 3437 3438 spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL); 3439 poll_threads(); 3440 CU_ASSERT(g_bserrno == 0); 3441 CU_ASSERT(g_blob != NULL); 3442 blob = g_blob; 3443 3444 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3445 } 3446 3447 static void 3448 blob_flags(void) 3449 { 3450 struct spdk_blob_store *bs = g_bs; 3451 spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro; 3452 struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro; 3453 struct spdk_blob_opts blob_opts; 3454 int rc; 3455 3456 /* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */ 3457 blob_invalid = ut_blob_create_and_open(bs, NULL); 3458 blobid_invalid = spdk_blob_get_id(blob_invalid); 3459 3460 blob_data_ro = ut_blob_create_and_open(bs, NULL); 3461 blobid_data_ro = spdk_blob_get_id(blob_data_ro); 3462 3463 ut_spdk_blob_opts_init(&blob_opts); 3464 blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES; 3465 blob_md_ro = ut_blob_create_and_open(bs, &blob_opts); 3466 blobid_md_ro = spdk_blob_get_id(blob_md_ro); 3467 CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES); 3468 3469 /* Change the size of blob_data_ro to check if flags are serialized 3470 * when blob has non zero number of extents */ 3471 spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL); 3472 poll_threads(); 3473 CU_ASSERT(g_bserrno == 0); 3474 3475 /* Set the xattr to check if flags are serialized 3476 * when blob has non zero number of xattrs */ 3477 rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1); 3478 CU_ASSERT(rc == 0); 3479 3480 blob_invalid->invalid_flags = (1ULL << 63); 3481 blob_invalid->state = SPDK_BLOB_STATE_DIRTY; 3482 blob_data_ro->data_ro_flags = (1ULL << 62); 3483 blob_data_ro->state = SPDK_BLOB_STATE_DIRTY; 3484 blob_md_ro->md_ro_flags = (1ULL << 61); 3485 blob_md_ro->state = SPDK_BLOB_STATE_DIRTY; 3486 3487 g_bserrno = -1; 3488 spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL); 3489 poll_threads(); 3490 CU_ASSERT(g_bserrno == 0); 3491 g_bserrno = -1; 3492 spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL); 3493 poll_threads(); 3494 CU_ASSERT(g_bserrno == 0); 3495 g_bserrno = -1; 3496 spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL); 3497 poll_threads(); 3498 CU_ASSERT(g_bserrno == 0); 3499 3500 g_bserrno = -1; 3501 spdk_blob_close(blob_invalid, blob_op_complete, NULL); 3502 poll_threads(); 3503 CU_ASSERT(g_bserrno == 0); 3504 blob_invalid = NULL; 3505 g_bserrno = -1; 3506 spdk_blob_close(blob_data_ro, blob_op_complete, NULL); 3507 poll_threads(); 3508 CU_ASSERT(g_bserrno == 0); 3509 blob_data_ro = NULL; 3510 g_bserrno = -1; 3511 spdk_blob_close(blob_md_ro, blob_op_complete, NULL); 3512 poll_threads(); 3513 CU_ASSERT(g_bserrno == 0); 3514 blob_md_ro = NULL; 3515 3516 g_blob = NULL; 3517 g_blobid = SPDK_BLOBID_INVALID; 3518 3519 ut_bs_reload(&bs, NULL); 3520 3521 g_blob = NULL; 3522 g_bserrno = 0; 3523 spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL); 3524 poll_threads(); 3525 CU_ASSERT(g_bserrno != 0); 3526 CU_ASSERT(g_blob == NULL); 3527 3528 g_blob = NULL; 3529 g_bserrno = -1; 3530 spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL); 3531 poll_threads(); 3532 CU_ASSERT(g_bserrno == 0); 3533 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3534 blob_data_ro = g_blob; 3535 /* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */ 3536 CU_ASSERT(blob_data_ro->data_ro == true); 3537 CU_ASSERT(blob_data_ro->md_ro == true); 3538 CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10); 3539 3540 g_blob = NULL; 3541 g_bserrno = -1; 3542 spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL); 3543 poll_threads(); 3544 CU_ASSERT(g_bserrno == 0); 3545 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3546 blob_md_ro = g_blob; 3547 CU_ASSERT(blob_md_ro->data_ro == false); 3548 CU_ASSERT(blob_md_ro->md_ro == true); 3549 3550 g_bserrno = -1; 3551 spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL); 3552 poll_threads(); 3553 CU_ASSERT(g_bserrno == 0); 3554 3555 ut_blob_close_and_delete(bs, blob_data_ro); 3556 ut_blob_close_and_delete(bs, blob_md_ro); 3557 } 3558 3559 static void 3560 bs_version(void) 3561 { 3562 struct spdk_bs_super_block *super; 3563 struct spdk_blob_store *bs = g_bs; 3564 struct spdk_bs_dev *dev; 3565 struct spdk_blob *blob; 3566 struct spdk_blob_opts blob_opts; 3567 spdk_blob_id blobid; 3568 3569 /* Unload the blob store */ 3570 spdk_bs_unload(bs, bs_op_complete, NULL); 3571 poll_threads(); 3572 CU_ASSERT(g_bserrno == 0); 3573 g_bs = NULL; 3574 3575 /* 3576 * Change the bs version on disk. This will allow us to 3577 * test that the version does not get modified automatically 3578 * when loading and unloading the blobstore. 3579 */ 3580 super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 3581 CU_ASSERT(super->version == SPDK_BS_VERSION); 3582 CU_ASSERT(super->clean == 1); 3583 super->version = 2; 3584 /* 3585 * Version 2 metadata does not have a used blobid mask, so clear 3586 * those fields in the super block and zero the corresponding 3587 * region on "disk". We will use this to ensure blob IDs are 3588 * correctly reconstructed. 3589 */ 3590 memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0, 3591 super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE); 3592 super->used_blobid_mask_start = 0; 3593 super->used_blobid_mask_len = 0; 3594 super->crc = blob_md_page_calc_crc(super); 3595 3596 /* Load an existing blob store */ 3597 dev = init_dev(); 3598 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3599 poll_threads(); 3600 CU_ASSERT(g_bserrno == 0); 3601 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3602 CU_ASSERT(super->clean == 1); 3603 bs = g_bs; 3604 3605 /* 3606 * Create a blob - just to make sure that when we unload it 3607 * results in writing the super block (since metadata pages 3608 * were allocated. 3609 */ 3610 ut_spdk_blob_opts_init(&blob_opts); 3611 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3612 poll_threads(); 3613 CU_ASSERT(g_bserrno == 0); 3614 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3615 blobid = g_blobid; 3616 3617 /* Unload the blob store */ 3618 spdk_bs_unload(bs, bs_op_complete, NULL); 3619 poll_threads(); 3620 CU_ASSERT(g_bserrno == 0); 3621 g_bs = NULL; 3622 CU_ASSERT(super->version == 2); 3623 CU_ASSERT(super->used_blobid_mask_start == 0); 3624 CU_ASSERT(super->used_blobid_mask_len == 0); 3625 3626 dev = init_dev(); 3627 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3628 poll_threads(); 3629 CU_ASSERT(g_bserrno == 0); 3630 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3631 bs = g_bs; 3632 3633 g_blob = NULL; 3634 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3635 poll_threads(); 3636 CU_ASSERT(g_bserrno == 0); 3637 CU_ASSERT(g_blob != NULL); 3638 blob = g_blob; 3639 3640 ut_blob_close_and_delete(bs, blob); 3641 3642 CU_ASSERT(super->version == 2); 3643 CU_ASSERT(super->used_blobid_mask_start == 0); 3644 CU_ASSERT(super->used_blobid_mask_len == 0); 3645 } 3646 3647 static void 3648 blob_set_xattrs_test(void) 3649 { 3650 struct spdk_blob_store *bs = g_bs; 3651 struct spdk_blob *blob; 3652 struct spdk_blob_opts opts; 3653 const void *value; 3654 size_t value_len; 3655 char *xattr; 3656 size_t xattr_length; 3657 int rc; 3658 3659 /* Create blob with extra attributes */ 3660 ut_spdk_blob_opts_init(&opts); 3661 3662 opts.xattrs.names = g_xattr_names; 3663 opts.xattrs.get_value = _get_xattr_value; 3664 opts.xattrs.count = 3; 3665 opts.xattrs.ctx = &g_ctx; 3666 3667 blob = ut_blob_create_and_open(bs, &opts); 3668 3669 /* Get the xattrs */ 3670 value = NULL; 3671 3672 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len); 3673 CU_ASSERT(rc == 0); 3674 SPDK_CU_ASSERT_FATAL(value != NULL); 3675 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 3676 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 3677 3678 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len); 3679 CU_ASSERT(rc == 0); 3680 SPDK_CU_ASSERT_FATAL(value != NULL); 3681 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 3682 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 3683 3684 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len); 3685 CU_ASSERT(rc == 0); 3686 SPDK_CU_ASSERT_FATAL(value != NULL); 3687 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 3688 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 3689 3690 /* Try to get non existing attribute */ 3691 3692 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 3693 CU_ASSERT(rc == -ENOENT); 3694 3695 /* Try xattr exceeding maximum length of descriptor in single page */ 3696 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 3697 strlen("large_xattr") + 1; 3698 xattr = calloc(xattr_length, sizeof(char)); 3699 SPDK_CU_ASSERT_FATAL(xattr != NULL); 3700 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 3701 free(xattr); 3702 SPDK_CU_ASSERT_FATAL(rc == -ENOMEM); 3703 3704 spdk_blob_close(blob, blob_op_complete, NULL); 3705 poll_threads(); 3706 CU_ASSERT(g_bserrno == 0); 3707 blob = NULL; 3708 g_blob = NULL; 3709 g_blobid = SPDK_BLOBID_INVALID; 3710 3711 /* NULL callback */ 3712 ut_spdk_blob_opts_init(&opts); 3713 opts.xattrs.names = g_xattr_names; 3714 opts.xattrs.get_value = NULL; 3715 opts.xattrs.count = 1; 3716 opts.xattrs.ctx = &g_ctx; 3717 3718 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 3719 poll_threads(); 3720 CU_ASSERT(g_bserrno == -EINVAL); 3721 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3722 3723 /* NULL values */ 3724 ut_spdk_blob_opts_init(&opts); 3725 opts.xattrs.names = g_xattr_names; 3726 opts.xattrs.get_value = _get_xattr_value_null; 3727 opts.xattrs.count = 1; 3728 opts.xattrs.ctx = NULL; 3729 3730 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 3731 poll_threads(); 3732 CU_ASSERT(g_bserrno == -EINVAL); 3733 } 3734 3735 static void 3736 blob_thin_prov_alloc(void) 3737 { 3738 struct spdk_blob_store *bs = g_bs; 3739 struct spdk_blob *blob; 3740 struct spdk_blob_opts opts; 3741 spdk_blob_id blobid; 3742 uint64_t free_clusters; 3743 3744 free_clusters = spdk_bs_free_cluster_count(bs); 3745 3746 /* Set blob as thin provisioned */ 3747 ut_spdk_blob_opts_init(&opts); 3748 opts.thin_provision = true; 3749 3750 blob = ut_blob_create_and_open(bs, &opts); 3751 blobid = spdk_blob_get_id(blob); 3752 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3753 3754 CU_ASSERT(blob->active.num_clusters == 0); 3755 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 3756 3757 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3758 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3759 poll_threads(); 3760 CU_ASSERT(g_bserrno == 0); 3761 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3762 CU_ASSERT(blob->active.num_clusters == 5); 3763 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 3764 3765 /* Grow it to 1TB - still unallocated */ 3766 spdk_blob_resize(blob, 262144, blob_op_complete, NULL); 3767 poll_threads(); 3768 CU_ASSERT(g_bserrno == 0); 3769 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3770 CU_ASSERT(blob->active.num_clusters == 262144); 3771 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144); 3772 3773 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3774 poll_threads(); 3775 CU_ASSERT(g_bserrno == 0); 3776 /* Sync must not change anything */ 3777 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3778 CU_ASSERT(blob->active.num_clusters == 262144); 3779 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144); 3780 /* Since clusters are not allocated, 3781 * number of metadata pages is expected to be minimal. 3782 */ 3783 CU_ASSERT(blob->active.num_pages == 1); 3784 3785 /* Shrink the blob to 3 clusters - still unallocated */ 3786 spdk_blob_resize(blob, 3, blob_op_complete, NULL); 3787 poll_threads(); 3788 CU_ASSERT(g_bserrno == 0); 3789 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3790 CU_ASSERT(blob->active.num_clusters == 3); 3791 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3); 3792 3793 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3794 poll_threads(); 3795 CU_ASSERT(g_bserrno == 0); 3796 /* Sync must not change anything */ 3797 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3798 CU_ASSERT(blob->active.num_clusters == 3); 3799 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3); 3800 3801 spdk_blob_close(blob, blob_op_complete, NULL); 3802 poll_threads(); 3803 CU_ASSERT(g_bserrno == 0); 3804 3805 ut_bs_reload(&bs, NULL); 3806 3807 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3808 poll_threads(); 3809 CU_ASSERT(g_bserrno == 0); 3810 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3811 blob = g_blob; 3812 3813 /* Check that clusters allocation and size is still the same */ 3814 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3815 CU_ASSERT(blob->active.num_clusters == 3); 3816 3817 ut_blob_close_and_delete(bs, blob); 3818 } 3819 3820 static void 3821 blob_insert_cluster_msg_test(void) 3822 { 3823 struct spdk_blob_store *bs = g_bs; 3824 struct spdk_blob *blob; 3825 struct spdk_blob_opts opts; 3826 spdk_blob_id blobid; 3827 uint64_t free_clusters; 3828 uint64_t new_cluster = 0; 3829 uint32_t cluster_num = 3; 3830 uint32_t extent_page = 0; 3831 3832 free_clusters = spdk_bs_free_cluster_count(bs); 3833 3834 /* Set blob as thin provisioned */ 3835 ut_spdk_blob_opts_init(&opts); 3836 opts.thin_provision = true; 3837 opts.num_clusters = 4; 3838 3839 blob = ut_blob_create_and_open(bs, &opts); 3840 blobid = spdk_blob_get_id(blob); 3841 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3842 3843 CU_ASSERT(blob->active.num_clusters == 4); 3844 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4); 3845 CU_ASSERT(blob->active.clusters[cluster_num] == 0); 3846 3847 /* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread. 3848 * This is to simulate behaviour when cluster is allocated after blob creation. 3849 * Such as _spdk_bs_allocate_and_copy_cluster(). */ 3850 bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false); 3851 CU_ASSERT(blob->active.clusters[cluster_num] == 0); 3852 3853 blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, 3854 blob_op_complete, NULL); 3855 poll_threads(); 3856 3857 CU_ASSERT(blob->active.clusters[cluster_num] != 0); 3858 3859 spdk_blob_close(blob, blob_op_complete, NULL); 3860 poll_threads(); 3861 CU_ASSERT(g_bserrno == 0); 3862 3863 ut_bs_reload(&bs, NULL); 3864 3865 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3866 poll_threads(); 3867 CU_ASSERT(g_bserrno == 0); 3868 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3869 blob = g_blob; 3870 3871 CU_ASSERT(blob->active.clusters[cluster_num] != 0); 3872 3873 ut_blob_close_and_delete(bs, blob); 3874 } 3875 3876 static void 3877 blob_thin_prov_rw(void) 3878 { 3879 static const uint8_t zero[10 * 4096] = { 0 }; 3880 struct spdk_blob_store *bs = g_bs; 3881 struct spdk_blob *blob, *blob_id0; 3882 struct spdk_io_channel *channel, *channel_thread1; 3883 struct spdk_blob_opts opts; 3884 uint64_t free_clusters; 3885 uint64_t page_size; 3886 uint8_t payload_read[10 * 4096]; 3887 uint8_t payload_write[10 * 4096]; 3888 uint64_t write_bytes; 3889 uint64_t read_bytes; 3890 3891 free_clusters = spdk_bs_free_cluster_count(bs); 3892 page_size = spdk_bs_get_page_size(bs); 3893 3894 channel = spdk_bs_alloc_io_channel(bs); 3895 CU_ASSERT(channel != NULL); 3896 3897 ut_spdk_blob_opts_init(&opts); 3898 opts.thin_provision = true; 3899 3900 /* Create and delete blob at md page 0, so that next md page allocation 3901 * for extent will use that. */ 3902 blob_id0 = ut_blob_create_and_open(bs, &opts); 3903 blob = ut_blob_create_and_open(bs, &opts); 3904 ut_blob_close_and_delete(bs, blob_id0); 3905 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3906 3907 CU_ASSERT(blob->active.num_clusters == 0); 3908 3909 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3910 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3911 poll_threads(); 3912 CU_ASSERT(g_bserrno == 0); 3913 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3914 CU_ASSERT(blob->active.num_clusters == 5); 3915 3916 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3917 poll_threads(); 3918 CU_ASSERT(g_bserrno == 0); 3919 /* Sync must not change anything */ 3920 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3921 CU_ASSERT(blob->active.num_clusters == 5); 3922 3923 /* Payload should be all zeros from unallocated clusters */ 3924 memset(payload_read, 0xFF, sizeof(payload_read)); 3925 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 3926 poll_threads(); 3927 CU_ASSERT(g_bserrno == 0); 3928 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 3929 3930 write_bytes = g_dev_write_bytes; 3931 read_bytes = g_dev_read_bytes; 3932 3933 /* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */ 3934 set_thread(1); 3935 channel_thread1 = spdk_bs_alloc_io_channel(bs); 3936 CU_ASSERT(channel_thread1 != NULL); 3937 memset(payload_write, 0xE5, sizeof(payload_write)); 3938 spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL); 3939 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3940 /* Perform write on thread 0. That will try to allocate cluster, 3941 * but fail due to another thread issuing the cluster allocation first. */ 3942 set_thread(0); 3943 memset(payload_write, 0xE5, sizeof(payload_write)); 3944 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 3945 CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs)); 3946 poll_threads(); 3947 CU_ASSERT(g_bserrno == 0); 3948 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3949 /* For thin-provisioned blob we need to write 20 pages plus one page metadata and 3950 * read 0 bytes */ 3951 if (g_use_extent_table) { 3952 /* Add one more page for EXTENT_PAGE write */ 3953 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22); 3954 } else { 3955 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21); 3956 } 3957 CU_ASSERT(g_dev_read_bytes - read_bytes == 0); 3958 3959 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 3960 poll_threads(); 3961 CU_ASSERT(g_bserrno == 0); 3962 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3963 3964 ut_blob_close_and_delete(bs, blob); 3965 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3966 3967 set_thread(1); 3968 spdk_bs_free_io_channel(channel_thread1); 3969 set_thread(0); 3970 spdk_bs_free_io_channel(channel); 3971 poll_threads(); 3972 g_blob = NULL; 3973 g_blobid = 0; 3974 } 3975 3976 static void 3977 blob_thin_prov_write_count_io(void) 3978 { 3979 struct spdk_blob_store *bs; 3980 struct spdk_blob *blob; 3981 struct spdk_io_channel *ch; 3982 struct spdk_bs_dev *dev; 3983 struct spdk_bs_opts bs_opts; 3984 struct spdk_blob_opts opts; 3985 uint64_t free_clusters; 3986 uint64_t page_size; 3987 uint8_t payload_write[4096]; 3988 uint64_t write_bytes; 3989 uint64_t read_bytes; 3990 const uint32_t CLUSTER_SZ = 16384; 3991 uint32_t pages_per_cluster; 3992 uint32_t pages_per_extent_page; 3993 uint32_t i; 3994 3995 /* Use a very small cluster size for this test. This ensures we need multiple 3996 * extent pages to hold all of the clusters even for relatively small blobs like 3997 * we are restricted to for the unit tests (i.e. we don't want to allocate multi-GB 3998 * buffers). 3999 */ 4000 dev = init_dev(); 4001 spdk_bs_opts_init(&bs_opts, sizeof(bs_opts)); 4002 bs_opts.cluster_sz = CLUSTER_SZ; 4003 4004 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 4005 poll_threads(); 4006 CU_ASSERT(g_bserrno == 0); 4007 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4008 bs = g_bs; 4009 4010 free_clusters = spdk_bs_free_cluster_count(bs); 4011 page_size = spdk_bs_get_page_size(bs); 4012 pages_per_cluster = CLUSTER_SZ / page_size; 4013 pages_per_extent_page = SPDK_EXTENTS_PER_EP * pages_per_cluster; 4014 4015 ch = spdk_bs_alloc_io_channel(bs); 4016 SPDK_CU_ASSERT_FATAL(ch != NULL); 4017 4018 ut_spdk_blob_opts_init(&opts); 4019 opts.thin_provision = true; 4020 4021 blob = ut_blob_create_and_open(bs, &opts); 4022 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4023 4024 /* Resize the blob so that it will require 8 extent pages to hold all of 4025 * the clusters. 4026 */ 4027 g_bserrno = -1; 4028 spdk_blob_resize(blob, SPDK_EXTENTS_PER_EP * 8, blob_op_complete, NULL); 4029 poll_threads(); 4030 CU_ASSERT(g_bserrno == 0); 4031 4032 g_bserrno = -1; 4033 spdk_blob_sync_md(blob, blob_op_complete, NULL); 4034 poll_threads(); 4035 CU_ASSERT(g_bserrno == 0); 4036 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4037 CU_ASSERT(blob->active.num_clusters == SPDK_EXTENTS_PER_EP * 8); 4038 4039 memset(payload_write, 0, sizeof(payload_write)); 4040 for (i = 0; i < 8; i++) { 4041 write_bytes = g_dev_write_bytes; 4042 read_bytes = g_dev_read_bytes; 4043 4044 g_bserrno = -1; 4045 spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i, 1, blob_op_complete, NULL); 4046 poll_threads(); 4047 CU_ASSERT(g_bserrno == 0); 4048 CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs)); 4049 4050 CU_ASSERT(g_dev_read_bytes == read_bytes); 4051 if (!g_use_extent_table) { 4052 /* For legacy metadata, we should have written two pages - one for the 4053 * write I/O itself, another for the blob's primary metadata. 4054 */ 4055 CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2); 4056 } else { 4057 /* For extent table metadata, we should have written three pages - one 4058 * for the write I/O, one for the extent page, one for the blob's primary 4059 * metadata. 4060 */ 4061 CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 3); 4062 } 4063 4064 /* The write should have synced the metadata already. Do another sync here 4065 * just to confirm. 4066 */ 4067 write_bytes = g_dev_write_bytes; 4068 read_bytes = g_dev_read_bytes; 4069 4070 g_bserrno = -1; 4071 spdk_blob_sync_md(blob, blob_op_complete, NULL); 4072 poll_threads(); 4073 CU_ASSERT(g_bserrno == 0); 4074 CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs)); 4075 4076 CU_ASSERT(g_dev_read_bytes == read_bytes); 4077 CU_ASSERT(g_dev_write_bytes == write_bytes); 4078 4079 /* Now write to another unallocated cluster that is part of the same extent page. */ 4080 g_bserrno = -1; 4081 spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i + pages_per_cluster, 4082 1, blob_op_complete, NULL); 4083 poll_threads(); 4084 CU_ASSERT(g_bserrno == 0); 4085 CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs)); 4086 4087 CU_ASSERT(g_dev_read_bytes == read_bytes); 4088 /* 4089 * For legacy metadata, we should have written the I/O and the primary metadata page. 4090 * For extent table metadata, we should have written the I/O and the extent metadata page. 4091 */ 4092 CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2); 4093 } 4094 4095 ut_blob_close_and_delete(bs, blob); 4096 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4097 4098 spdk_bs_free_io_channel(ch); 4099 poll_threads(); 4100 g_blob = NULL; 4101 g_blobid = 0; 4102 4103 spdk_bs_unload(bs, bs_op_complete, NULL); 4104 poll_threads(); 4105 CU_ASSERT(g_bserrno == 0); 4106 g_bs = NULL; 4107 } 4108 4109 static void 4110 blob_thin_prov_rle(void) 4111 { 4112 static const uint8_t zero[10 * 4096] = { 0 }; 4113 struct spdk_blob_store *bs = g_bs; 4114 struct spdk_blob *blob; 4115 struct spdk_io_channel *channel; 4116 struct spdk_blob_opts opts; 4117 spdk_blob_id blobid; 4118 uint64_t free_clusters; 4119 uint64_t page_size; 4120 uint8_t payload_read[10 * 4096]; 4121 uint8_t payload_write[10 * 4096]; 4122 uint64_t write_bytes; 4123 uint64_t read_bytes; 4124 uint64_t io_unit; 4125 4126 free_clusters = spdk_bs_free_cluster_count(bs); 4127 page_size = spdk_bs_get_page_size(bs); 4128 4129 ut_spdk_blob_opts_init(&opts); 4130 opts.thin_provision = true; 4131 opts.num_clusters = 5; 4132 4133 blob = ut_blob_create_and_open(bs, &opts); 4134 blobid = spdk_blob_get_id(blob); 4135 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4136 4137 channel = spdk_bs_alloc_io_channel(bs); 4138 CU_ASSERT(channel != NULL); 4139 4140 /* Target specifically second cluster in a blob as first allocation */ 4141 io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs); 4142 4143 /* Payload should be all zeros from unallocated clusters */ 4144 memset(payload_read, 0xFF, sizeof(payload_read)); 4145 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 4146 poll_threads(); 4147 CU_ASSERT(g_bserrno == 0); 4148 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4149 4150 write_bytes = g_dev_write_bytes; 4151 read_bytes = g_dev_read_bytes; 4152 4153 /* Issue write to second cluster in a blob */ 4154 memset(payload_write, 0xE5, sizeof(payload_write)); 4155 spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL); 4156 poll_threads(); 4157 CU_ASSERT(g_bserrno == 0); 4158 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 4159 /* For thin-provisioned blob we need to write 10 pages plus one page metadata and 4160 * read 0 bytes */ 4161 if (g_use_extent_table) { 4162 /* Add one more page for EXTENT_PAGE write */ 4163 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12); 4164 } else { 4165 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11); 4166 } 4167 CU_ASSERT(g_dev_read_bytes - read_bytes == 0); 4168 4169 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 4170 poll_threads(); 4171 CU_ASSERT(g_bserrno == 0); 4172 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4173 4174 spdk_bs_free_io_channel(channel); 4175 poll_threads(); 4176 4177 spdk_blob_close(blob, blob_op_complete, NULL); 4178 poll_threads(); 4179 CU_ASSERT(g_bserrno == 0); 4180 4181 ut_bs_reload(&bs, NULL); 4182 4183 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 4184 poll_threads(); 4185 CU_ASSERT(g_bserrno == 0); 4186 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4187 blob = g_blob; 4188 4189 channel = spdk_bs_alloc_io_channel(bs); 4190 CU_ASSERT(channel != NULL); 4191 4192 /* Read second cluster after blob reload to confirm data written */ 4193 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 4194 poll_threads(); 4195 CU_ASSERT(g_bserrno == 0); 4196 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4197 4198 spdk_bs_free_io_channel(channel); 4199 poll_threads(); 4200 4201 ut_blob_close_and_delete(bs, blob); 4202 } 4203 4204 static void 4205 blob_thin_prov_rw_iov(void) 4206 { 4207 static const uint8_t zero[10 * 4096] = { 0 }; 4208 struct spdk_blob_store *bs = g_bs; 4209 struct spdk_blob *blob; 4210 struct spdk_io_channel *channel; 4211 struct spdk_blob_opts opts; 4212 uint64_t free_clusters; 4213 uint8_t payload_read[10 * 4096]; 4214 uint8_t payload_write[10 * 4096]; 4215 struct iovec iov_read[3]; 4216 struct iovec iov_write[3]; 4217 4218 free_clusters = spdk_bs_free_cluster_count(bs); 4219 4220 channel = spdk_bs_alloc_io_channel(bs); 4221 CU_ASSERT(channel != NULL); 4222 4223 ut_spdk_blob_opts_init(&opts); 4224 opts.thin_provision = true; 4225 4226 blob = ut_blob_create_and_open(bs, &opts); 4227 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4228 4229 CU_ASSERT(blob->active.num_clusters == 0); 4230 4231 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 4232 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 4233 poll_threads(); 4234 CU_ASSERT(g_bserrno == 0); 4235 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4236 CU_ASSERT(blob->active.num_clusters == 5); 4237 4238 spdk_blob_sync_md(blob, blob_op_complete, NULL); 4239 poll_threads(); 4240 CU_ASSERT(g_bserrno == 0); 4241 /* Sync must not change anything */ 4242 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4243 CU_ASSERT(blob->active.num_clusters == 5); 4244 4245 /* Payload should be all zeros from unallocated clusters */ 4246 memset(payload_read, 0xAA, sizeof(payload_read)); 4247 iov_read[0].iov_base = payload_read; 4248 iov_read[0].iov_len = 3 * 4096; 4249 iov_read[1].iov_base = payload_read + 3 * 4096; 4250 iov_read[1].iov_len = 4 * 4096; 4251 iov_read[2].iov_base = payload_read + 7 * 4096; 4252 iov_read[2].iov_len = 3 * 4096; 4253 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4254 poll_threads(); 4255 CU_ASSERT(g_bserrno == 0); 4256 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4257 4258 memset(payload_write, 0xE5, sizeof(payload_write)); 4259 iov_write[0].iov_base = payload_write; 4260 iov_write[0].iov_len = 1 * 4096; 4261 iov_write[1].iov_base = payload_write + 1 * 4096; 4262 iov_write[1].iov_len = 5 * 4096; 4263 iov_write[2].iov_base = payload_write + 6 * 4096; 4264 iov_write[2].iov_len = 4 * 4096; 4265 4266 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 4267 poll_threads(); 4268 CU_ASSERT(g_bserrno == 0); 4269 4270 memset(payload_read, 0xAA, sizeof(payload_read)); 4271 iov_read[0].iov_base = payload_read; 4272 iov_read[0].iov_len = 3 * 4096; 4273 iov_read[1].iov_base = payload_read + 3 * 4096; 4274 iov_read[1].iov_len = 4 * 4096; 4275 iov_read[2].iov_base = payload_read + 7 * 4096; 4276 iov_read[2].iov_len = 3 * 4096; 4277 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4278 poll_threads(); 4279 CU_ASSERT(g_bserrno == 0); 4280 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4281 4282 spdk_bs_free_io_channel(channel); 4283 poll_threads(); 4284 4285 ut_blob_close_and_delete(bs, blob); 4286 } 4287 4288 struct iter_ctx { 4289 int current_iter; 4290 spdk_blob_id blobid[4]; 4291 }; 4292 4293 static void 4294 test_iter(void *arg, struct spdk_blob *blob, int bserrno) 4295 { 4296 struct iter_ctx *iter_ctx = arg; 4297 spdk_blob_id blobid; 4298 4299 CU_ASSERT(bserrno == 0); 4300 blobid = spdk_blob_get_id(blob); 4301 CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]); 4302 } 4303 4304 static void 4305 bs_load_iter_test(void) 4306 { 4307 struct spdk_blob_store *bs; 4308 struct spdk_bs_dev *dev; 4309 struct iter_ctx iter_ctx = { 0 }; 4310 struct spdk_blob *blob; 4311 int i, rc; 4312 struct spdk_bs_opts opts; 4313 4314 dev = init_dev(); 4315 spdk_bs_opts_init(&opts, sizeof(opts)); 4316 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4317 4318 /* Initialize a new blob store */ 4319 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 4320 poll_threads(); 4321 CU_ASSERT(g_bserrno == 0); 4322 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4323 bs = g_bs; 4324 4325 for (i = 0; i < 4; i++) { 4326 blob = ut_blob_create_and_open(bs, NULL); 4327 iter_ctx.blobid[i] = spdk_blob_get_id(blob); 4328 4329 /* Just save the blobid as an xattr for testing purposes. */ 4330 rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id)); 4331 CU_ASSERT(rc == 0); 4332 4333 /* Resize the blob */ 4334 spdk_blob_resize(blob, i, blob_op_complete, NULL); 4335 poll_threads(); 4336 CU_ASSERT(g_bserrno == 0); 4337 4338 spdk_blob_close(blob, blob_op_complete, NULL); 4339 poll_threads(); 4340 CU_ASSERT(g_bserrno == 0); 4341 } 4342 4343 g_bserrno = -1; 4344 spdk_bs_unload(bs, bs_op_complete, NULL); 4345 poll_threads(); 4346 CU_ASSERT(g_bserrno == 0); 4347 4348 dev = init_dev(); 4349 spdk_bs_opts_init(&opts, sizeof(opts)); 4350 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4351 opts.iter_cb_fn = test_iter; 4352 opts.iter_cb_arg = &iter_ctx; 4353 4354 /* Test blob iteration during load after a clean shutdown. */ 4355 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 4356 poll_threads(); 4357 CU_ASSERT(g_bserrno == 0); 4358 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4359 bs = g_bs; 4360 4361 /* Dirty shutdown */ 4362 bs_free(bs); 4363 4364 dev = init_dev(); 4365 spdk_bs_opts_init(&opts, sizeof(opts)); 4366 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4367 opts.iter_cb_fn = test_iter; 4368 iter_ctx.current_iter = 0; 4369 opts.iter_cb_arg = &iter_ctx; 4370 4371 /* Test blob iteration during load after a dirty shutdown. */ 4372 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 4373 poll_threads(); 4374 CU_ASSERT(g_bserrno == 0); 4375 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4376 bs = g_bs; 4377 4378 spdk_bs_unload(bs, bs_op_complete, NULL); 4379 poll_threads(); 4380 CU_ASSERT(g_bserrno == 0); 4381 g_bs = NULL; 4382 } 4383 4384 static void 4385 blob_snapshot_rw(void) 4386 { 4387 static const uint8_t zero[10 * 4096] = { 0 }; 4388 struct spdk_blob_store *bs = g_bs; 4389 struct spdk_blob *blob, *snapshot; 4390 struct spdk_io_channel *channel; 4391 struct spdk_blob_opts opts; 4392 spdk_blob_id blobid, snapshotid; 4393 uint64_t free_clusters; 4394 uint64_t cluster_size; 4395 uint64_t page_size; 4396 uint8_t payload_read[10 * 4096]; 4397 uint8_t payload_write[10 * 4096]; 4398 uint64_t write_bytes; 4399 uint64_t read_bytes; 4400 4401 free_clusters = spdk_bs_free_cluster_count(bs); 4402 cluster_size = spdk_bs_get_cluster_size(bs); 4403 page_size = spdk_bs_get_page_size(bs); 4404 4405 channel = spdk_bs_alloc_io_channel(bs); 4406 CU_ASSERT(channel != NULL); 4407 4408 ut_spdk_blob_opts_init(&opts); 4409 opts.thin_provision = true; 4410 opts.num_clusters = 5; 4411 4412 blob = ut_blob_create_and_open(bs, &opts); 4413 blobid = spdk_blob_get_id(blob); 4414 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4415 4416 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4417 4418 memset(payload_read, 0xFF, sizeof(payload_read)); 4419 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 4420 poll_threads(); 4421 CU_ASSERT(g_bserrno == 0); 4422 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4423 4424 memset(payload_write, 0xE5, sizeof(payload_write)); 4425 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 4426 poll_threads(); 4427 CU_ASSERT(g_bserrno == 0); 4428 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4429 4430 /* Create snapshot from blob */ 4431 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4432 poll_threads(); 4433 CU_ASSERT(g_bserrno == 0); 4434 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4435 snapshotid = g_blobid; 4436 4437 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4438 poll_threads(); 4439 CU_ASSERT(g_bserrno == 0); 4440 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4441 snapshot = g_blob; 4442 CU_ASSERT(snapshot->data_ro == true); 4443 CU_ASSERT(snapshot->md_ro == true); 4444 4445 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4446 4447 write_bytes = g_dev_write_bytes; 4448 read_bytes = g_dev_read_bytes; 4449 4450 memset(payload_write, 0xAA, sizeof(payload_write)); 4451 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 4452 poll_threads(); 4453 CU_ASSERT(g_bserrno == 0); 4454 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4455 4456 /* For a clone we need to allocate and copy one cluster, update one page of metadata 4457 * and then write 10 pages of payload. 4458 */ 4459 if (g_use_extent_table) { 4460 /* Add one more page for EXTENT_PAGE write */ 4461 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12 + cluster_size); 4462 } else { 4463 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11 + cluster_size); 4464 } 4465 CU_ASSERT(g_dev_read_bytes - read_bytes == cluster_size); 4466 4467 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 4468 poll_threads(); 4469 CU_ASSERT(g_bserrno == 0); 4470 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4471 4472 /* Data on snapshot should not change after write to clone */ 4473 memset(payload_write, 0xE5, sizeof(payload_write)); 4474 spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL); 4475 poll_threads(); 4476 CU_ASSERT(g_bserrno == 0); 4477 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4478 4479 ut_blob_close_and_delete(bs, blob); 4480 ut_blob_close_and_delete(bs, snapshot); 4481 4482 spdk_bs_free_io_channel(channel); 4483 poll_threads(); 4484 g_blob = NULL; 4485 g_blobid = 0; 4486 } 4487 4488 static void 4489 blob_snapshot_rw_iov(void) 4490 { 4491 static const uint8_t zero[10 * 4096] = { 0 }; 4492 struct spdk_blob_store *bs = g_bs; 4493 struct spdk_blob *blob, *snapshot; 4494 struct spdk_io_channel *channel; 4495 struct spdk_blob_opts opts; 4496 spdk_blob_id blobid, snapshotid; 4497 uint64_t free_clusters; 4498 uint8_t payload_read[10 * 4096]; 4499 uint8_t payload_write[10 * 4096]; 4500 struct iovec iov_read[3]; 4501 struct iovec iov_write[3]; 4502 4503 free_clusters = spdk_bs_free_cluster_count(bs); 4504 4505 channel = spdk_bs_alloc_io_channel(bs); 4506 CU_ASSERT(channel != NULL); 4507 4508 ut_spdk_blob_opts_init(&opts); 4509 opts.thin_provision = true; 4510 opts.num_clusters = 5; 4511 4512 blob = ut_blob_create_and_open(bs, &opts); 4513 blobid = spdk_blob_get_id(blob); 4514 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4515 4516 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4517 4518 /* Create snapshot from blob */ 4519 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4520 poll_threads(); 4521 CU_ASSERT(g_bserrno == 0); 4522 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4523 snapshotid = g_blobid; 4524 4525 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4526 poll_threads(); 4527 CU_ASSERT(g_bserrno == 0); 4528 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4529 snapshot = g_blob; 4530 CU_ASSERT(snapshot->data_ro == true); 4531 CU_ASSERT(snapshot->md_ro == true); 4532 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4533 4534 /* Payload should be all zeros from unallocated clusters */ 4535 memset(payload_read, 0xAA, sizeof(payload_read)); 4536 iov_read[0].iov_base = payload_read; 4537 iov_read[0].iov_len = 3 * 4096; 4538 iov_read[1].iov_base = payload_read + 3 * 4096; 4539 iov_read[1].iov_len = 4 * 4096; 4540 iov_read[2].iov_base = payload_read + 7 * 4096; 4541 iov_read[2].iov_len = 3 * 4096; 4542 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4543 poll_threads(); 4544 CU_ASSERT(g_bserrno == 0); 4545 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4546 4547 memset(payload_write, 0xE5, sizeof(payload_write)); 4548 iov_write[0].iov_base = payload_write; 4549 iov_write[0].iov_len = 1 * 4096; 4550 iov_write[1].iov_base = payload_write + 1 * 4096; 4551 iov_write[1].iov_len = 5 * 4096; 4552 iov_write[2].iov_base = payload_write + 6 * 4096; 4553 iov_write[2].iov_len = 4 * 4096; 4554 4555 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 4556 poll_threads(); 4557 CU_ASSERT(g_bserrno == 0); 4558 4559 memset(payload_read, 0xAA, sizeof(payload_read)); 4560 iov_read[0].iov_base = payload_read; 4561 iov_read[0].iov_len = 3 * 4096; 4562 iov_read[1].iov_base = payload_read + 3 * 4096; 4563 iov_read[1].iov_len = 4 * 4096; 4564 iov_read[2].iov_base = payload_read + 7 * 4096; 4565 iov_read[2].iov_len = 3 * 4096; 4566 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4567 poll_threads(); 4568 CU_ASSERT(g_bserrno == 0); 4569 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4570 4571 spdk_bs_free_io_channel(channel); 4572 poll_threads(); 4573 4574 ut_blob_close_and_delete(bs, blob); 4575 ut_blob_close_and_delete(bs, snapshot); 4576 } 4577 4578 /** 4579 * Inflate / decouple parent rw unit tests. 4580 * 4581 * -------------- 4582 * original blob: 0 1 2 3 4 4583 * ,---------+---------+---------+---------+---------. 4584 * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - | 4585 * +---------+---------+---------+---------+---------+ 4586 * snapshot2 | - |yyyyyyyyy| - |yyyyyyyyy| - | 4587 * +---------+---------+---------+---------+---------+ 4588 * blob | - |zzzzzzzzz| - | - | - | 4589 * '---------+---------+---------+---------+---------' 4590 * . . . . . . 4591 * -------- . . . . . . 4592 * inflate: . . . . . . 4593 * ,---------+---------+---------+---------+---------. 4594 * blob |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000| 4595 * '---------+---------+---------+---------+---------' 4596 * 4597 * NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency 4598 * on snapshot2 and snapshot removed . . . 4599 * . . . . . . 4600 * ---------------- . . . . . . 4601 * decouple parent: . . . . . . 4602 * ,---------+---------+---------+---------+---------. 4603 * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - | 4604 * +---------+---------+---------+---------+---------+ 4605 * blob | - |zzzzzzzzz| - |yyyyyyyyy| - | 4606 * '---------+---------+---------+---------+---------' 4607 * 4608 * NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency 4609 * on snapshot2 removed and on snapshot still exists. Snapshot2 4610 * should remain a clone of snapshot. 4611 */ 4612 static void 4613 _blob_inflate_rw(bool decouple_parent) 4614 { 4615 struct spdk_blob_store *bs = g_bs; 4616 struct spdk_blob *blob, *snapshot, *snapshot2; 4617 struct spdk_io_channel *channel; 4618 struct spdk_blob_opts opts; 4619 spdk_blob_id blobid, snapshotid, snapshot2id; 4620 uint64_t free_clusters; 4621 uint64_t cluster_size; 4622 4623 uint64_t payload_size; 4624 uint8_t *payload_read; 4625 uint8_t *payload_write; 4626 uint8_t *payload_clone; 4627 4628 uint64_t pages_per_cluster; 4629 uint64_t pages_per_payload; 4630 4631 int i; 4632 spdk_blob_id ids[2]; 4633 size_t count; 4634 4635 free_clusters = spdk_bs_free_cluster_count(bs); 4636 cluster_size = spdk_bs_get_cluster_size(bs); 4637 pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs); 4638 pages_per_payload = pages_per_cluster * 5; 4639 4640 payload_size = cluster_size * 5; 4641 4642 payload_read = malloc(payload_size); 4643 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 4644 4645 payload_write = malloc(payload_size); 4646 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 4647 4648 payload_clone = malloc(payload_size); 4649 SPDK_CU_ASSERT_FATAL(payload_clone != NULL); 4650 4651 channel = spdk_bs_alloc_io_channel(bs); 4652 SPDK_CU_ASSERT_FATAL(channel != NULL); 4653 4654 /* Create blob */ 4655 ut_spdk_blob_opts_init(&opts); 4656 opts.thin_provision = true; 4657 opts.num_clusters = 5; 4658 4659 blob = ut_blob_create_and_open(bs, &opts); 4660 blobid = spdk_blob_get_id(blob); 4661 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4662 4663 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4664 4665 /* 1) Initial read should return zeroed payload */ 4666 memset(payload_read, 0xFF, payload_size); 4667 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4668 blob_op_complete, NULL); 4669 poll_threads(); 4670 CU_ASSERT(g_bserrno == 0); 4671 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 4672 4673 /* Fill whole blob with a pattern, except last cluster (to be sure it 4674 * isn't allocated) */ 4675 memset(payload_write, 0xE5, payload_size - cluster_size); 4676 spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload - 4677 pages_per_cluster, blob_op_complete, NULL); 4678 poll_threads(); 4679 CU_ASSERT(g_bserrno == 0); 4680 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4681 4682 /* 2) Create snapshot from blob (first level) */ 4683 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4684 poll_threads(); 4685 CU_ASSERT(g_bserrno == 0); 4686 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4687 snapshotid = g_blobid; 4688 4689 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4690 poll_threads(); 4691 CU_ASSERT(g_bserrno == 0); 4692 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4693 snapshot = g_blob; 4694 CU_ASSERT(snapshot->data_ro == true); 4695 CU_ASSERT(snapshot->md_ro == true); 4696 4697 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4698 4699 /* Write every second cluster with a pattern. 4700 * 4701 * Last cluster shouldn't be written, to be sure that snapshot nor clone 4702 * doesn't allocate it. 4703 * 4704 * payload_clone stores expected result on "blob" read at the time and 4705 * is used only to check data consistency on clone before and after 4706 * inflation. Initially we fill it with a backing snapshots pattern 4707 * used before. 4708 */ 4709 memset(payload_clone, 0xE5, payload_size - cluster_size); 4710 memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size); 4711 memset(payload_write, 0xAA, payload_size); 4712 for (i = 1; i < 5; i += 2) { 4713 spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster, 4714 pages_per_cluster, blob_op_complete, NULL); 4715 poll_threads(); 4716 CU_ASSERT(g_bserrno == 0); 4717 4718 /* Update expected result */ 4719 memcpy(payload_clone + (cluster_size * i), payload_write, 4720 cluster_size); 4721 } 4722 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4723 4724 /* Check data consistency on clone */ 4725 memset(payload_read, 0xFF, payload_size); 4726 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4727 blob_op_complete, NULL); 4728 poll_threads(); 4729 CU_ASSERT(g_bserrno == 0); 4730 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4731 4732 /* 3) Create second levels snapshot from blob */ 4733 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4734 poll_threads(); 4735 CU_ASSERT(g_bserrno == 0); 4736 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4737 snapshot2id = g_blobid; 4738 4739 spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL); 4740 poll_threads(); 4741 CU_ASSERT(g_bserrno == 0); 4742 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4743 snapshot2 = g_blob; 4744 CU_ASSERT(snapshot2->data_ro == true); 4745 CU_ASSERT(snapshot2->md_ro == true); 4746 4747 CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5); 4748 4749 CU_ASSERT(snapshot2->parent_id == snapshotid); 4750 4751 /* Write one cluster on the top level blob. This cluster (1) covers 4752 * already allocated cluster in the snapshot2, so shouldn't be inflated 4753 * at all */ 4754 spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster, 4755 pages_per_cluster, blob_op_complete, NULL); 4756 poll_threads(); 4757 CU_ASSERT(g_bserrno == 0); 4758 4759 /* Update expected result */ 4760 memcpy(payload_clone + cluster_size, payload_write, cluster_size); 4761 4762 /* Check data consistency on clone */ 4763 memset(payload_read, 0xFF, payload_size); 4764 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4765 blob_op_complete, NULL); 4766 poll_threads(); 4767 CU_ASSERT(g_bserrno == 0); 4768 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4769 4770 4771 /* Close all blobs */ 4772 spdk_blob_close(blob, blob_op_complete, NULL); 4773 poll_threads(); 4774 CU_ASSERT(g_bserrno == 0); 4775 4776 spdk_blob_close(snapshot2, blob_op_complete, NULL); 4777 poll_threads(); 4778 CU_ASSERT(g_bserrno == 0); 4779 4780 spdk_blob_close(snapshot, blob_op_complete, NULL); 4781 poll_threads(); 4782 CU_ASSERT(g_bserrno == 0); 4783 4784 /* Check snapshot-clone relations */ 4785 count = 2; 4786 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4787 CU_ASSERT(count == 1); 4788 CU_ASSERT(ids[0] == snapshot2id); 4789 4790 count = 2; 4791 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4792 CU_ASSERT(count == 1); 4793 CU_ASSERT(ids[0] == blobid); 4794 4795 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id); 4796 4797 free_clusters = spdk_bs_free_cluster_count(bs); 4798 if (!decouple_parent) { 4799 /* Do full blob inflation */ 4800 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 4801 poll_threads(); 4802 CU_ASSERT(g_bserrno == 0); 4803 4804 /* All clusters should be inflated (except one already allocated 4805 * in a top level blob) */ 4806 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4); 4807 4808 /* Check if relation tree updated correctly */ 4809 count = 2; 4810 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4811 4812 /* snapshotid have one clone */ 4813 CU_ASSERT(count == 1); 4814 CU_ASSERT(ids[0] == snapshot2id); 4815 4816 /* snapshot2id have no clones */ 4817 count = 2; 4818 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4819 CU_ASSERT(count == 0); 4820 4821 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 4822 } else { 4823 /* Decouple parent of blob */ 4824 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 4825 poll_threads(); 4826 CU_ASSERT(g_bserrno == 0); 4827 4828 /* Only one cluster from a parent should be inflated (second one 4829 * is covered by a cluster written on a top level blob, and 4830 * already allocated) */ 4831 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1); 4832 4833 /* Check if relation tree updated correctly */ 4834 count = 2; 4835 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4836 4837 /* snapshotid have two clones now */ 4838 CU_ASSERT(count == 2); 4839 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4840 CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id); 4841 4842 /* snapshot2id have no clones */ 4843 count = 2; 4844 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4845 CU_ASSERT(count == 0); 4846 4847 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4848 } 4849 4850 /* Try to delete snapshot2 (should pass) */ 4851 spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL); 4852 poll_threads(); 4853 CU_ASSERT(g_bserrno == 0); 4854 4855 /* Try to delete base snapshot */ 4856 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4857 poll_threads(); 4858 CU_ASSERT(g_bserrno == 0); 4859 4860 /* Reopen blob after snapshot deletion */ 4861 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 4862 poll_threads(); 4863 CU_ASSERT(g_bserrno == 0); 4864 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4865 blob = g_blob; 4866 4867 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4868 4869 /* Check data consistency on inflated blob */ 4870 memset(payload_read, 0xFF, payload_size); 4871 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4872 blob_op_complete, NULL); 4873 poll_threads(); 4874 CU_ASSERT(g_bserrno == 0); 4875 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4876 4877 spdk_bs_free_io_channel(channel); 4878 poll_threads(); 4879 4880 free(payload_read); 4881 free(payload_write); 4882 free(payload_clone); 4883 4884 ut_blob_close_and_delete(bs, blob); 4885 } 4886 4887 static void 4888 blob_inflate_rw(void) 4889 { 4890 _blob_inflate_rw(false); 4891 _blob_inflate_rw(true); 4892 } 4893 4894 /** 4895 * Snapshot-clones relation test 4896 * 4897 * snapshot 4898 * | 4899 * +-----+-----+ 4900 * | | 4901 * blob(ro) snapshot2 4902 * | | 4903 * clone2 clone 4904 */ 4905 static void 4906 blob_relations(void) 4907 { 4908 struct spdk_blob_store *bs; 4909 struct spdk_bs_dev *dev; 4910 struct spdk_bs_opts bs_opts; 4911 struct spdk_blob_opts opts; 4912 struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2; 4913 spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2; 4914 int rc; 4915 size_t count; 4916 spdk_blob_id ids[10] = {}; 4917 4918 dev = init_dev(); 4919 spdk_bs_opts_init(&bs_opts, sizeof(bs_opts)); 4920 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 4921 4922 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 4923 poll_threads(); 4924 CU_ASSERT(g_bserrno == 0); 4925 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4926 bs = g_bs; 4927 4928 /* 1. Create blob with 10 clusters */ 4929 4930 ut_spdk_blob_opts_init(&opts); 4931 opts.num_clusters = 10; 4932 4933 blob = ut_blob_create_and_open(bs, &opts); 4934 blobid = spdk_blob_get_id(blob); 4935 4936 CU_ASSERT(!spdk_blob_is_read_only(blob)); 4937 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4938 CU_ASSERT(!spdk_blob_is_clone(blob)); 4939 CU_ASSERT(!spdk_blob_is_thin_provisioned(blob)); 4940 4941 /* blob should not have underlying snapshot nor clones */ 4942 CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID); 4943 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 4944 count = SPDK_COUNTOF(ids); 4945 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4946 CU_ASSERT(rc == 0); 4947 CU_ASSERT(count == 0); 4948 4949 4950 /* 2. Create snapshot */ 4951 4952 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4953 poll_threads(); 4954 CU_ASSERT(g_bserrno == 0); 4955 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4956 snapshotid = g_blobid; 4957 4958 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4959 poll_threads(); 4960 CU_ASSERT(g_bserrno == 0); 4961 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4962 snapshot = g_blob; 4963 4964 CU_ASSERT(spdk_blob_is_read_only(snapshot)); 4965 CU_ASSERT(spdk_blob_is_snapshot(snapshot)); 4966 CU_ASSERT(!spdk_blob_is_clone(snapshot)); 4967 CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID); 4968 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID); 4969 4970 /* Check if original blob is converted to the clone of snapshot */ 4971 CU_ASSERT(!spdk_blob_is_read_only(blob)); 4972 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4973 CU_ASSERT(spdk_blob_is_clone(blob)); 4974 CU_ASSERT(spdk_blob_is_thin_provisioned(blob)); 4975 CU_ASSERT(blob->parent_id == snapshotid); 4976 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4977 4978 count = SPDK_COUNTOF(ids); 4979 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4980 CU_ASSERT(rc == 0); 4981 CU_ASSERT(count == 1); 4982 CU_ASSERT(ids[0] == blobid); 4983 4984 4985 /* 3. Create clone from snapshot */ 4986 4987 spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 4988 poll_threads(); 4989 CU_ASSERT(g_bserrno == 0); 4990 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4991 cloneid = g_blobid; 4992 4993 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 4994 poll_threads(); 4995 CU_ASSERT(g_bserrno == 0); 4996 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4997 clone = g_blob; 4998 4999 CU_ASSERT(!spdk_blob_is_read_only(clone)); 5000 CU_ASSERT(!spdk_blob_is_snapshot(clone)); 5001 CU_ASSERT(spdk_blob_is_clone(clone)); 5002 CU_ASSERT(spdk_blob_is_thin_provisioned(clone)); 5003 CU_ASSERT(clone->parent_id == snapshotid); 5004 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid); 5005 5006 count = SPDK_COUNTOF(ids); 5007 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 5008 CU_ASSERT(rc == 0); 5009 CU_ASSERT(count == 0); 5010 5011 /* Check if clone is on the snapshot's list */ 5012 count = SPDK_COUNTOF(ids); 5013 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5014 CU_ASSERT(rc == 0); 5015 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5016 CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid); 5017 5018 5019 /* 4. Create snapshot of the clone */ 5020 5021 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 5022 poll_threads(); 5023 CU_ASSERT(g_bserrno == 0); 5024 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5025 snapshotid2 = g_blobid; 5026 5027 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 5028 poll_threads(); 5029 CU_ASSERT(g_bserrno == 0); 5030 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5031 snapshot2 = g_blob; 5032 5033 CU_ASSERT(spdk_blob_is_read_only(snapshot2)); 5034 CU_ASSERT(spdk_blob_is_snapshot(snapshot2)); 5035 CU_ASSERT(spdk_blob_is_clone(snapshot2)); 5036 CU_ASSERT(snapshot2->parent_id == snapshotid); 5037 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid); 5038 5039 /* Check if clone is converted to the clone of snapshot2 and snapshot2 5040 * is a child of snapshot */ 5041 CU_ASSERT(!spdk_blob_is_read_only(clone)); 5042 CU_ASSERT(!spdk_blob_is_snapshot(clone)); 5043 CU_ASSERT(spdk_blob_is_clone(clone)); 5044 CU_ASSERT(spdk_blob_is_thin_provisioned(clone)); 5045 CU_ASSERT(clone->parent_id == snapshotid2); 5046 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 5047 5048 count = SPDK_COUNTOF(ids); 5049 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5050 CU_ASSERT(rc == 0); 5051 CU_ASSERT(count == 1); 5052 CU_ASSERT(ids[0] == cloneid); 5053 5054 5055 /* 5. Try to create clone from read only blob */ 5056 5057 /* Mark blob as read only */ 5058 spdk_blob_set_read_only(blob); 5059 spdk_blob_sync_md(blob, blob_op_complete, NULL); 5060 poll_threads(); 5061 CU_ASSERT(g_bserrno == 0); 5062 5063 /* Check if previously created blob is read only clone */ 5064 CU_ASSERT(spdk_blob_is_read_only(blob)); 5065 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 5066 CU_ASSERT(spdk_blob_is_clone(blob)); 5067 CU_ASSERT(spdk_blob_is_thin_provisioned(blob)); 5068 5069 /* Create clone from read only blob */ 5070 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5071 poll_threads(); 5072 CU_ASSERT(g_bserrno == 0); 5073 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5074 cloneid2 = g_blobid; 5075 5076 spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL); 5077 poll_threads(); 5078 CU_ASSERT(g_bserrno == 0); 5079 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5080 clone2 = g_blob; 5081 5082 CU_ASSERT(!spdk_blob_is_read_only(clone2)); 5083 CU_ASSERT(!spdk_blob_is_snapshot(clone2)); 5084 CU_ASSERT(spdk_blob_is_clone(clone2)); 5085 CU_ASSERT(spdk_blob_is_thin_provisioned(clone2)); 5086 5087 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5088 5089 count = SPDK_COUNTOF(ids); 5090 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5091 CU_ASSERT(rc == 0); 5092 5093 CU_ASSERT(count == 1); 5094 CU_ASSERT(ids[0] == cloneid2); 5095 5096 /* Close blobs */ 5097 5098 spdk_blob_close(clone2, blob_op_complete, NULL); 5099 poll_threads(); 5100 CU_ASSERT(g_bserrno == 0); 5101 5102 spdk_blob_close(blob, blob_op_complete, NULL); 5103 poll_threads(); 5104 CU_ASSERT(g_bserrno == 0); 5105 5106 spdk_blob_close(clone, blob_op_complete, NULL); 5107 poll_threads(); 5108 CU_ASSERT(g_bserrno == 0); 5109 5110 spdk_blob_close(snapshot, blob_op_complete, NULL); 5111 poll_threads(); 5112 CU_ASSERT(g_bserrno == 0); 5113 5114 spdk_blob_close(snapshot2, blob_op_complete, NULL); 5115 poll_threads(); 5116 CU_ASSERT(g_bserrno == 0); 5117 5118 /* Try to delete snapshot with more than 1 clone */ 5119 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 5120 poll_threads(); 5121 CU_ASSERT(g_bserrno != 0); 5122 5123 ut_bs_reload(&bs, &bs_opts); 5124 5125 /* NULL ids array should return number of clones in count */ 5126 count = SPDK_COUNTOF(ids); 5127 rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count); 5128 CU_ASSERT(rc == -ENOMEM); 5129 CU_ASSERT(count == 2); 5130 5131 /* incorrect array size */ 5132 count = 1; 5133 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5134 CU_ASSERT(rc == -ENOMEM); 5135 CU_ASSERT(count == 2); 5136 5137 5138 /* Verify structure of loaded blob store */ 5139 5140 /* snapshot */ 5141 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID); 5142 5143 count = SPDK_COUNTOF(ids); 5144 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5145 CU_ASSERT(rc == 0); 5146 CU_ASSERT(count == 2); 5147 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5148 CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2); 5149 5150 /* blob */ 5151 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 5152 count = SPDK_COUNTOF(ids); 5153 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5154 CU_ASSERT(rc == 0); 5155 CU_ASSERT(count == 1); 5156 CU_ASSERT(ids[0] == cloneid2); 5157 5158 /* clone */ 5159 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 5160 count = SPDK_COUNTOF(ids); 5161 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 5162 CU_ASSERT(rc == 0); 5163 CU_ASSERT(count == 0); 5164 5165 /* snapshot2 */ 5166 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid); 5167 count = SPDK_COUNTOF(ids); 5168 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5169 CU_ASSERT(rc == 0); 5170 CU_ASSERT(count == 1); 5171 CU_ASSERT(ids[0] == cloneid); 5172 5173 /* clone2 */ 5174 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5175 count = SPDK_COUNTOF(ids); 5176 rc = spdk_blob_get_clones(bs, cloneid2, ids, &count); 5177 CU_ASSERT(rc == 0); 5178 CU_ASSERT(count == 0); 5179 5180 /* Try to delete blob that user should not be able to remove */ 5181 5182 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 5183 poll_threads(); 5184 CU_ASSERT(g_bserrno != 0); 5185 5186 /* Remove all blobs */ 5187 5188 spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL); 5189 poll_threads(); 5190 CU_ASSERT(g_bserrno == 0); 5191 5192 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5193 poll_threads(); 5194 CU_ASSERT(g_bserrno == 0); 5195 5196 spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL); 5197 poll_threads(); 5198 CU_ASSERT(g_bserrno == 0); 5199 5200 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 5201 poll_threads(); 5202 CU_ASSERT(g_bserrno == 0); 5203 5204 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 5205 poll_threads(); 5206 CU_ASSERT(g_bserrno == 0); 5207 5208 spdk_bs_unload(bs, bs_op_complete, NULL); 5209 poll_threads(); 5210 CU_ASSERT(g_bserrno == 0); 5211 5212 g_bs = NULL; 5213 } 5214 5215 /** 5216 * Snapshot-clones relation test 2 5217 * 5218 * snapshot1 5219 * | 5220 * snapshot2 5221 * | 5222 * +-----+-----+ 5223 * | | 5224 * blob(ro) snapshot3 5225 * | | 5226 * | snapshot4 5227 * | | | 5228 * clone2 clone clone3 5229 */ 5230 static void 5231 blob_relations2(void) 5232 { 5233 struct spdk_blob_store *bs; 5234 struct spdk_bs_dev *dev; 5235 struct spdk_bs_opts bs_opts; 5236 struct spdk_blob_opts opts; 5237 struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2; 5238 spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2, 5239 cloneid3; 5240 int rc; 5241 size_t count; 5242 spdk_blob_id ids[10] = {}; 5243 5244 dev = init_dev(); 5245 spdk_bs_opts_init(&bs_opts, sizeof(bs_opts)); 5246 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 5247 5248 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 5249 poll_threads(); 5250 CU_ASSERT(g_bserrno == 0); 5251 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5252 bs = g_bs; 5253 5254 /* 1. Create blob with 10 clusters */ 5255 5256 ut_spdk_blob_opts_init(&opts); 5257 opts.num_clusters = 10; 5258 5259 blob = ut_blob_create_and_open(bs, &opts); 5260 blobid = spdk_blob_get_id(blob); 5261 5262 /* 2. Create snapshot1 */ 5263 5264 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5265 poll_threads(); 5266 CU_ASSERT(g_bserrno == 0); 5267 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5268 snapshotid1 = g_blobid; 5269 5270 spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL); 5271 poll_threads(); 5272 CU_ASSERT(g_bserrno == 0); 5273 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5274 snapshot1 = g_blob; 5275 5276 CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID); 5277 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID); 5278 5279 CU_ASSERT(blob->parent_id == snapshotid1); 5280 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1); 5281 5282 /* Check if blob is the clone of snapshot1 */ 5283 CU_ASSERT(blob->parent_id == snapshotid1); 5284 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1); 5285 5286 count = SPDK_COUNTOF(ids); 5287 rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count); 5288 CU_ASSERT(rc == 0); 5289 CU_ASSERT(count == 1); 5290 CU_ASSERT(ids[0] == blobid); 5291 5292 /* 3. Create another snapshot */ 5293 5294 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5295 poll_threads(); 5296 CU_ASSERT(g_bserrno == 0); 5297 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5298 snapshotid2 = g_blobid; 5299 5300 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 5301 poll_threads(); 5302 CU_ASSERT(g_bserrno == 0); 5303 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5304 snapshot2 = g_blob; 5305 5306 CU_ASSERT(spdk_blob_is_clone(snapshot2)); 5307 CU_ASSERT(snapshot2->parent_id == snapshotid1); 5308 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1); 5309 5310 /* Check if snapshot2 is the clone of snapshot1 and blob 5311 * is a child of snapshot2 */ 5312 CU_ASSERT(blob->parent_id == snapshotid2); 5313 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2); 5314 5315 count = SPDK_COUNTOF(ids); 5316 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5317 CU_ASSERT(rc == 0); 5318 CU_ASSERT(count == 1); 5319 CU_ASSERT(ids[0] == blobid); 5320 5321 /* 4. Create clone from snapshot */ 5322 5323 spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL); 5324 poll_threads(); 5325 CU_ASSERT(g_bserrno == 0); 5326 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5327 cloneid = g_blobid; 5328 5329 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 5330 poll_threads(); 5331 CU_ASSERT(g_bserrno == 0); 5332 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5333 clone = g_blob; 5334 5335 CU_ASSERT(clone->parent_id == snapshotid2); 5336 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 5337 5338 /* Check if clone is on the snapshot's list */ 5339 count = SPDK_COUNTOF(ids); 5340 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5341 CU_ASSERT(rc == 0); 5342 CU_ASSERT(count == 2); 5343 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5344 CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid); 5345 5346 /* 5. Create snapshot of the clone */ 5347 5348 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 5349 poll_threads(); 5350 CU_ASSERT(g_bserrno == 0); 5351 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5352 snapshotid3 = g_blobid; 5353 5354 spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL); 5355 poll_threads(); 5356 CU_ASSERT(g_bserrno == 0); 5357 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5358 snapshot3 = g_blob; 5359 5360 CU_ASSERT(snapshot3->parent_id == snapshotid2); 5361 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2); 5362 5363 /* Check if clone is converted to the clone of snapshot3 and snapshot3 5364 * is a child of snapshot2 */ 5365 CU_ASSERT(clone->parent_id == snapshotid3); 5366 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5367 5368 count = SPDK_COUNTOF(ids); 5369 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5370 CU_ASSERT(rc == 0); 5371 CU_ASSERT(count == 1); 5372 CU_ASSERT(ids[0] == cloneid); 5373 5374 /* 6. Create another snapshot of the clone */ 5375 5376 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 5377 poll_threads(); 5378 CU_ASSERT(g_bserrno == 0); 5379 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5380 snapshotid4 = g_blobid; 5381 5382 spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL); 5383 poll_threads(); 5384 CU_ASSERT(g_bserrno == 0); 5385 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5386 snapshot4 = g_blob; 5387 5388 CU_ASSERT(snapshot4->parent_id == snapshotid3); 5389 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3); 5390 5391 /* Check if clone is converted to the clone of snapshot4 and snapshot4 5392 * is a child of snapshot3 */ 5393 CU_ASSERT(clone->parent_id == snapshotid4); 5394 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4); 5395 5396 count = SPDK_COUNTOF(ids); 5397 rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count); 5398 CU_ASSERT(rc == 0); 5399 CU_ASSERT(count == 1); 5400 CU_ASSERT(ids[0] == cloneid); 5401 5402 /* 7. Remove snapshot 4 */ 5403 5404 ut_blob_close_and_delete(bs, snapshot4); 5405 5406 /* Check if relations are back to state from before creating snapshot 4 */ 5407 CU_ASSERT(clone->parent_id == snapshotid3); 5408 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5409 5410 count = SPDK_COUNTOF(ids); 5411 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5412 CU_ASSERT(rc == 0); 5413 CU_ASSERT(count == 1); 5414 CU_ASSERT(ids[0] == cloneid); 5415 5416 /* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */ 5417 5418 spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL); 5419 poll_threads(); 5420 CU_ASSERT(g_bserrno == 0); 5421 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5422 cloneid3 = g_blobid; 5423 5424 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5425 poll_threads(); 5426 CU_ASSERT(g_bserrno != 0); 5427 5428 /* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */ 5429 5430 spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL); 5431 poll_threads(); 5432 CU_ASSERT(g_bserrno == 0); 5433 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5434 snapshot3 = g_blob; 5435 5436 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5437 poll_threads(); 5438 CU_ASSERT(g_bserrno != 0); 5439 5440 spdk_blob_close(snapshot3, blob_op_complete, NULL); 5441 poll_threads(); 5442 CU_ASSERT(g_bserrno == 0); 5443 5444 spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL); 5445 poll_threads(); 5446 CU_ASSERT(g_bserrno == 0); 5447 5448 /* 10. Remove snapshot 1 */ 5449 5450 ut_blob_close_and_delete(bs, snapshot1); 5451 5452 /* Check if relations are back to state from before creating snapshot 4 (before step 6) */ 5453 CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID); 5454 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID); 5455 5456 count = SPDK_COUNTOF(ids); 5457 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5458 CU_ASSERT(rc == 0); 5459 CU_ASSERT(count == 2); 5460 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5461 CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3); 5462 5463 /* 11. Try to create clone from read only blob */ 5464 5465 /* Mark blob as read only */ 5466 spdk_blob_set_read_only(blob); 5467 spdk_blob_sync_md(blob, blob_op_complete, NULL); 5468 poll_threads(); 5469 CU_ASSERT(g_bserrno == 0); 5470 5471 /* Create clone from read only blob */ 5472 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5473 poll_threads(); 5474 CU_ASSERT(g_bserrno == 0); 5475 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5476 cloneid2 = g_blobid; 5477 5478 spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL); 5479 poll_threads(); 5480 CU_ASSERT(g_bserrno == 0); 5481 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5482 clone2 = g_blob; 5483 5484 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5485 5486 count = SPDK_COUNTOF(ids); 5487 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5488 CU_ASSERT(rc == 0); 5489 CU_ASSERT(count == 1); 5490 CU_ASSERT(ids[0] == cloneid2); 5491 5492 /* Close blobs */ 5493 5494 spdk_blob_close(clone2, blob_op_complete, NULL); 5495 poll_threads(); 5496 CU_ASSERT(g_bserrno == 0); 5497 5498 spdk_blob_close(blob, blob_op_complete, NULL); 5499 poll_threads(); 5500 CU_ASSERT(g_bserrno == 0); 5501 5502 spdk_blob_close(clone, blob_op_complete, NULL); 5503 poll_threads(); 5504 CU_ASSERT(g_bserrno == 0); 5505 5506 spdk_blob_close(snapshot2, blob_op_complete, NULL); 5507 poll_threads(); 5508 CU_ASSERT(g_bserrno == 0); 5509 5510 spdk_blob_close(snapshot3, blob_op_complete, NULL); 5511 poll_threads(); 5512 CU_ASSERT(g_bserrno == 0); 5513 5514 ut_bs_reload(&bs, &bs_opts); 5515 5516 /* Verify structure of loaded blob store */ 5517 5518 /* snapshot2 */ 5519 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID); 5520 5521 count = SPDK_COUNTOF(ids); 5522 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5523 CU_ASSERT(rc == 0); 5524 CU_ASSERT(count == 2); 5525 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5526 CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3); 5527 5528 /* blob */ 5529 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2); 5530 count = SPDK_COUNTOF(ids); 5531 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5532 CU_ASSERT(rc == 0); 5533 CU_ASSERT(count == 1); 5534 CU_ASSERT(ids[0] == cloneid2); 5535 5536 /* clone */ 5537 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5538 count = SPDK_COUNTOF(ids); 5539 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 5540 CU_ASSERT(rc == 0); 5541 CU_ASSERT(count == 0); 5542 5543 /* snapshot3 */ 5544 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2); 5545 count = SPDK_COUNTOF(ids); 5546 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5547 CU_ASSERT(rc == 0); 5548 CU_ASSERT(count == 1); 5549 CU_ASSERT(ids[0] == cloneid); 5550 5551 /* clone2 */ 5552 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5553 count = SPDK_COUNTOF(ids); 5554 rc = spdk_blob_get_clones(bs, cloneid2, ids, &count); 5555 CU_ASSERT(rc == 0); 5556 CU_ASSERT(count == 0); 5557 5558 /* Try to delete all blobs in the worse possible order */ 5559 5560 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5561 poll_threads(); 5562 CU_ASSERT(g_bserrno != 0); 5563 5564 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5565 poll_threads(); 5566 CU_ASSERT(g_bserrno == 0); 5567 5568 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5569 poll_threads(); 5570 CU_ASSERT(g_bserrno != 0); 5571 5572 spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL); 5573 poll_threads(); 5574 CU_ASSERT(g_bserrno == 0); 5575 5576 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5577 poll_threads(); 5578 CU_ASSERT(g_bserrno == 0); 5579 5580 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 5581 poll_threads(); 5582 CU_ASSERT(g_bserrno == 0); 5583 5584 spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL); 5585 poll_threads(); 5586 CU_ASSERT(g_bserrno == 0); 5587 5588 spdk_bs_unload(bs, bs_op_complete, NULL); 5589 poll_threads(); 5590 CU_ASSERT(g_bserrno == 0); 5591 5592 g_bs = NULL; 5593 } 5594 5595 /** 5596 * Snapshot-clones relation test 3 5597 * 5598 * snapshot0 5599 * | 5600 * snapshot1 5601 * | 5602 * snapshot2 5603 * | 5604 * blob 5605 */ 5606 static void 5607 blob_relations3(void) 5608 { 5609 struct spdk_blob_store *bs; 5610 struct spdk_bs_dev *dev; 5611 struct spdk_io_channel *channel; 5612 struct spdk_bs_opts bs_opts; 5613 struct spdk_blob_opts opts; 5614 struct spdk_blob *blob; 5615 spdk_blob_id blobid, snapshotid0, snapshotid1, snapshotid2; 5616 5617 dev = init_dev(); 5618 spdk_bs_opts_init(&bs_opts, sizeof(bs_opts)); 5619 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 5620 5621 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 5622 poll_threads(); 5623 CU_ASSERT(g_bserrno == 0); 5624 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5625 bs = g_bs; 5626 5627 channel = spdk_bs_alloc_io_channel(bs); 5628 SPDK_CU_ASSERT_FATAL(channel != NULL); 5629 5630 /* 1. Create blob with 10 clusters */ 5631 ut_spdk_blob_opts_init(&opts); 5632 opts.num_clusters = 10; 5633 5634 blob = ut_blob_create_and_open(bs, &opts); 5635 blobid = spdk_blob_get_id(blob); 5636 5637 /* 2. Create snapshot0 */ 5638 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5639 poll_threads(); 5640 CU_ASSERT(g_bserrno == 0); 5641 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5642 snapshotid0 = g_blobid; 5643 5644 /* 3. Create snapshot1 */ 5645 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5646 poll_threads(); 5647 CU_ASSERT(g_bserrno == 0); 5648 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5649 snapshotid1 = g_blobid; 5650 5651 /* 4. Create snapshot2 */ 5652 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5653 poll_threads(); 5654 CU_ASSERT(g_bserrno == 0); 5655 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5656 snapshotid2 = g_blobid; 5657 5658 /* 5. Decouple blob */ 5659 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 5660 poll_threads(); 5661 CU_ASSERT(g_bserrno == 0); 5662 5663 /* 6. Decouple snapshot2. Make sure updating md of snapshot2 is possible */ 5664 spdk_bs_blob_decouple_parent(bs, channel, snapshotid2, blob_op_complete, NULL); 5665 poll_threads(); 5666 CU_ASSERT(g_bserrno == 0); 5667 5668 /* 7. Delete blob */ 5669 spdk_blob_close(blob, blob_op_complete, NULL); 5670 poll_threads(); 5671 CU_ASSERT(g_bserrno == 0); 5672 5673 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 5674 poll_threads(); 5675 CU_ASSERT(g_bserrno == 0); 5676 5677 /* 8. Delete snapshot2. 5678 * If md of snapshot 2 was updated, it should be possible to delete it */ 5679 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5680 poll_threads(); 5681 CU_ASSERT(g_bserrno == 0); 5682 5683 /* Remove remaining blobs and unload bs */ 5684 spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL); 5685 poll_threads(); 5686 CU_ASSERT(g_bserrno == 0); 5687 5688 spdk_bs_delete_blob(bs, snapshotid0, blob_op_complete, NULL); 5689 poll_threads(); 5690 CU_ASSERT(g_bserrno == 0); 5691 5692 spdk_bs_free_io_channel(channel); 5693 poll_threads(); 5694 5695 spdk_bs_unload(bs, bs_op_complete, NULL); 5696 poll_threads(); 5697 CU_ASSERT(g_bserrno == 0); 5698 5699 g_bs = NULL; 5700 } 5701 5702 static void 5703 blobstore_clean_power_failure(void) 5704 { 5705 struct spdk_blob_store *bs; 5706 struct spdk_blob *blob; 5707 struct spdk_power_failure_thresholds thresholds = {}; 5708 bool clean = false; 5709 struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 5710 struct spdk_bs_super_block super_copy = {}; 5711 5712 thresholds.general_threshold = 1; 5713 while (!clean) { 5714 /* Create bs and blob */ 5715 suite_blob_setup(); 5716 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5717 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5718 bs = g_bs; 5719 blob = g_blob; 5720 5721 /* Super block should not change for rest of the UT, 5722 * save it and compare later. */ 5723 memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block)); 5724 SPDK_CU_ASSERT_FATAL(super->clean == 0); 5725 SPDK_CU_ASSERT_FATAL(bs->clean == 0); 5726 5727 /* Force bs/super block in a clean state. 5728 * Along with marking blob dirty, to cause blob persist. */ 5729 blob->state = SPDK_BLOB_STATE_DIRTY; 5730 bs->clean = 1; 5731 super->clean = 1; 5732 super->crc = blob_md_page_calc_crc(super); 5733 5734 g_bserrno = -1; 5735 dev_set_power_failure_thresholds(thresholds); 5736 spdk_blob_sync_md(blob, blob_op_complete, NULL); 5737 poll_threads(); 5738 dev_reset_power_failure_event(); 5739 5740 if (g_bserrno == 0) { 5741 /* After successful md sync, both bs and super block 5742 * should be marked as not clean. */ 5743 SPDK_CU_ASSERT_FATAL(bs->clean == 0); 5744 SPDK_CU_ASSERT_FATAL(super->clean == 0); 5745 clean = true; 5746 } 5747 5748 /* Depending on the point of failure, super block was either updated or not. */ 5749 super_copy.clean = super->clean; 5750 super_copy.crc = blob_md_page_calc_crc(&super_copy); 5751 /* Compare that the values in super block remained unchanged. */ 5752 SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block))); 5753 5754 /* Delete blob and unload bs */ 5755 suite_blob_cleanup(); 5756 5757 thresholds.general_threshold++; 5758 } 5759 } 5760 5761 static void 5762 blob_delete_snapshot_power_failure(void) 5763 { 5764 struct spdk_bs_dev *dev; 5765 struct spdk_blob_store *bs; 5766 struct spdk_blob_opts opts; 5767 struct spdk_blob *blob, *snapshot; 5768 struct spdk_power_failure_thresholds thresholds = {}; 5769 spdk_blob_id blobid, snapshotid; 5770 const void *value; 5771 size_t value_len; 5772 size_t count; 5773 spdk_blob_id ids[3] = {}; 5774 int rc; 5775 bool deleted = false; 5776 int delete_snapshot_bserrno = -1; 5777 5778 thresholds.general_threshold = 1; 5779 while (!deleted) { 5780 dev = init_dev(); 5781 5782 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 5783 poll_threads(); 5784 CU_ASSERT(g_bserrno == 0); 5785 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5786 bs = g_bs; 5787 5788 /* Create blob */ 5789 ut_spdk_blob_opts_init(&opts); 5790 opts.num_clusters = 10; 5791 5792 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 5793 poll_threads(); 5794 CU_ASSERT(g_bserrno == 0); 5795 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5796 blobid = g_blobid; 5797 5798 /* Create snapshot */ 5799 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5800 poll_threads(); 5801 CU_ASSERT(g_bserrno == 0); 5802 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5803 snapshotid = g_blobid; 5804 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5805 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5806 5807 dev_set_power_failure_thresholds(thresholds); 5808 5809 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 5810 poll_threads(); 5811 delete_snapshot_bserrno = g_bserrno; 5812 5813 /* Do not shut down cleanly. Assumption is that after snapshot deletion 5814 * reports success, changes to both blobs should already persisted. */ 5815 dev_reset_power_failure_event(); 5816 ut_bs_dirty_load(&bs, NULL); 5817 5818 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5819 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5820 5821 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 5822 poll_threads(); 5823 CU_ASSERT(g_bserrno == 0); 5824 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5825 blob = g_blob; 5826 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true); 5827 5828 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 5829 poll_threads(); 5830 5831 if (g_bserrno == 0) { 5832 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5833 snapshot = g_blob; 5834 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 5835 count = SPDK_COUNTOF(ids); 5836 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5837 CU_ASSERT(rc == 0); 5838 CU_ASSERT(count == 1); 5839 CU_ASSERT(ids[0] == blobid); 5840 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len); 5841 CU_ASSERT(rc != 0); 5842 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false); 5843 5844 spdk_blob_close(snapshot, blob_op_complete, NULL); 5845 poll_threads(); 5846 CU_ASSERT(g_bserrno == 0); 5847 } else { 5848 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 5849 /* Snapshot might have been left in unrecoverable state, so it does not open. 5850 * Yet delete might perform further changes to the clone after that. 5851 * This UT should test until snapshot is deleted and delete call succeeds. */ 5852 if (delete_snapshot_bserrno == 0) { 5853 deleted = true; 5854 } 5855 } 5856 5857 spdk_blob_close(blob, blob_op_complete, NULL); 5858 poll_threads(); 5859 CU_ASSERT(g_bserrno == 0); 5860 5861 spdk_bs_unload(bs, bs_op_complete, NULL); 5862 poll_threads(); 5863 CU_ASSERT(g_bserrno == 0); 5864 5865 thresholds.general_threshold++; 5866 } 5867 } 5868 5869 static void 5870 blob_create_snapshot_power_failure(void) 5871 { 5872 struct spdk_blob_store *bs = g_bs; 5873 struct spdk_bs_dev *dev; 5874 struct spdk_blob_opts opts; 5875 struct spdk_blob *blob, *snapshot; 5876 struct spdk_power_failure_thresholds thresholds = {}; 5877 spdk_blob_id blobid, snapshotid; 5878 const void *value; 5879 size_t value_len; 5880 size_t count; 5881 spdk_blob_id ids[3] = {}; 5882 int rc; 5883 bool created = false; 5884 int create_snapshot_bserrno = -1; 5885 5886 thresholds.general_threshold = 1; 5887 while (!created) { 5888 dev = init_dev(); 5889 5890 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 5891 poll_threads(); 5892 CU_ASSERT(g_bserrno == 0); 5893 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5894 bs = g_bs; 5895 5896 /* Create blob */ 5897 ut_spdk_blob_opts_init(&opts); 5898 opts.num_clusters = 10; 5899 5900 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 5901 poll_threads(); 5902 CU_ASSERT(g_bserrno == 0); 5903 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5904 blobid = g_blobid; 5905 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5906 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5907 5908 dev_set_power_failure_thresholds(thresholds); 5909 5910 /* Create snapshot */ 5911 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5912 poll_threads(); 5913 create_snapshot_bserrno = g_bserrno; 5914 snapshotid = g_blobid; 5915 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5916 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5917 5918 /* Do not shut down cleanly. Assumption is that after create snapshot 5919 * reports success, both blobs should be power-fail safe. */ 5920 dev_reset_power_failure_event(); 5921 ut_bs_dirty_load(&bs, NULL); 5922 5923 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5924 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5925 5926 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 5927 poll_threads(); 5928 CU_ASSERT(g_bserrno == 0); 5929 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5930 blob = g_blob; 5931 5932 if (snapshotid != SPDK_BLOBID_INVALID) { 5933 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 5934 poll_threads(); 5935 } 5936 5937 if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) { 5938 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5939 snapshot = g_blob; 5940 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true); 5941 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false); 5942 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 5943 count = SPDK_COUNTOF(ids); 5944 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5945 CU_ASSERT(rc == 0); 5946 CU_ASSERT(count == 1); 5947 CU_ASSERT(ids[0] == blobid); 5948 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len); 5949 CU_ASSERT(rc != 0); 5950 5951 spdk_blob_close(snapshot, blob_op_complete, NULL); 5952 poll_threads(); 5953 CU_ASSERT(g_bserrno == 0); 5954 if (create_snapshot_bserrno == 0) { 5955 created = true; 5956 } 5957 } else { 5958 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 5959 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false); 5960 } 5961 5962 spdk_blob_close(blob, blob_op_complete, NULL); 5963 poll_threads(); 5964 CU_ASSERT(g_bserrno == 0); 5965 5966 spdk_bs_unload(bs, bs_op_complete, NULL); 5967 poll_threads(); 5968 CU_ASSERT(g_bserrno == 0); 5969 5970 thresholds.general_threshold++; 5971 } 5972 } 5973 5974 static void 5975 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5976 { 5977 uint8_t payload_ff[64 * 512]; 5978 uint8_t payload_aa[64 * 512]; 5979 uint8_t payload_00[64 * 512]; 5980 uint8_t *cluster0, *cluster1; 5981 5982 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5983 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5984 memset(payload_00, 0x00, sizeof(payload_00)); 5985 5986 /* Try to perform I/O with io unit = 512 */ 5987 spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL); 5988 poll_threads(); 5989 CU_ASSERT(g_bserrno == 0); 5990 5991 /* If thin provisioned is set cluster should be allocated now */ 5992 SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0); 5993 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5994 5995 /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character. 5996 * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */ 5997 /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5998 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5999 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0); 6000 6001 /* Verify write with offset on first page */ 6002 spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL); 6003 poll_threads(); 6004 CU_ASSERT(g_bserrno == 0); 6005 6006 /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 6007 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6008 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6009 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6010 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6011 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0); 6012 6013 /* Verify write with offset on first page */ 6014 spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL); 6015 poll_threads(); 6016 6017 /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */ 6018 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6019 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6020 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6021 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6022 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0); 6023 CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0); 6024 6025 /* Verify write with offset on second page */ 6026 spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL); 6027 poll_threads(); 6028 6029 /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */ 6030 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6031 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6032 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6033 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6034 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0); 6035 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 6036 6037 /* Verify write across multiple pages */ 6038 spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL); 6039 poll_threads(); 6040 6041 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */ 6042 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6043 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6044 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6045 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6046 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 6047 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 6048 6049 /* Verify write across multiple clusters */ 6050 spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL); 6051 poll_threads(); 6052 6053 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 6054 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 6055 6056 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6057 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 6058 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6059 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6060 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6061 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6062 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 6063 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 6064 6065 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 6066 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0); 6067 6068 /* Verify write to second cluster */ 6069 spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL); 6070 poll_threads(); 6071 6072 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 6073 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 6074 6075 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6076 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */ 6077 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6078 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6079 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6080 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6081 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 6082 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 6083 6084 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 6085 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0); 6086 CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0); 6087 CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0); 6088 } 6089 6090 static void 6091 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 6092 { 6093 uint8_t payload_read[64 * 512]; 6094 uint8_t payload_ff[64 * 512]; 6095 uint8_t payload_aa[64 * 512]; 6096 uint8_t payload_00[64 * 512]; 6097 6098 memset(payload_ff, 0xFF, sizeof(payload_ff)); 6099 memset(payload_aa, 0xAA, sizeof(payload_aa)); 6100 memset(payload_00, 0x00, sizeof(payload_00)); 6101 6102 /* Read only first io unit */ 6103 /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6104 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6105 * payload_read: F000 0000 | 0000 0000 ... */ 6106 memset(payload_read, 0x00, sizeof(payload_read)); 6107 spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL); 6108 poll_threads(); 6109 CU_ASSERT(g_bserrno == 0); 6110 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6111 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0); 6112 6113 /* Read four io_units starting from offset = 2 6114 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6115 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6116 * payload_read: F0AA 0000 | 0000 0000 ... */ 6117 6118 memset(payload_read, 0x00, sizeof(payload_read)); 6119 spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL); 6120 poll_threads(); 6121 CU_ASSERT(g_bserrno == 0); 6122 6123 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6124 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 6125 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0); 6126 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0); 6127 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 6128 6129 /* Read eight io_units across multiple pages 6130 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 6131 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6132 * payload_read: AAAA AAAA | 0000 0000 ... */ 6133 memset(payload_read, 0x00, sizeof(payload_read)); 6134 spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL); 6135 poll_threads(); 6136 CU_ASSERT(g_bserrno == 0); 6137 6138 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0); 6139 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 6140 6141 /* Read eight io_units across multiple clusters 6142 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ] 6143 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6144 * payload_read: FFFF FFFF | 0000 0000 ... */ 6145 memset(payload_read, 0x00, sizeof(payload_read)); 6146 spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL); 6147 poll_threads(); 6148 CU_ASSERT(g_bserrno == 0); 6149 6150 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0); 6151 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 6152 6153 /* Read four io_units from second cluster 6154 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6155 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ] 6156 * payload_read: 00FF 0000 | 0000 0000 ... */ 6157 memset(payload_read, 0x00, sizeof(payload_read)); 6158 spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL); 6159 poll_threads(); 6160 CU_ASSERT(g_bserrno == 0); 6161 6162 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0); 6163 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0); 6164 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 6165 6166 /* Read second cluster 6167 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6168 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] 6169 * payload_read: FFFF 0000 | 0000 FF00 ... */ 6170 memset(payload_read, 0x00, sizeof(payload_read)); 6171 spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL); 6172 poll_threads(); 6173 CU_ASSERT(g_bserrno == 0); 6174 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0); 6175 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0); 6176 CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0); 6177 CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0); 6178 6179 /* Read whole two clusters 6180 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 6181 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */ 6182 memset(payload_read, 0x00, sizeof(payload_read)); 6183 spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL); 6184 poll_threads(); 6185 CU_ASSERT(g_bserrno == 0); 6186 6187 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6188 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 6189 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0); 6190 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0); 6191 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0); 6192 CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0); 6193 6194 CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0); 6195 CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0); 6196 CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0); 6197 CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0); 6198 } 6199 6200 6201 static void 6202 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 6203 { 6204 uint8_t payload_ff[64 * 512]; 6205 uint8_t payload_aa[64 * 512]; 6206 uint8_t payload_00[64 * 512]; 6207 uint8_t *cluster0, *cluster1; 6208 6209 memset(payload_ff, 0xFF, sizeof(payload_ff)); 6210 memset(payload_aa, 0xAA, sizeof(payload_aa)); 6211 memset(payload_00, 0x00, sizeof(payload_00)); 6212 6213 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 6214 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 6215 6216 /* Unmap */ 6217 spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL); 6218 poll_threads(); 6219 6220 CU_ASSERT(g_bserrno == 0); 6221 6222 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0); 6223 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0); 6224 } 6225 6226 static void 6227 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 6228 { 6229 uint8_t payload_ff[64 * 512]; 6230 uint8_t payload_aa[64 * 512]; 6231 uint8_t payload_00[64 * 512]; 6232 uint8_t *cluster0, *cluster1; 6233 6234 memset(payload_ff, 0xFF, sizeof(payload_ff)); 6235 memset(payload_aa, 0xAA, sizeof(payload_aa)); 6236 memset(payload_00, 0x00, sizeof(payload_00)); 6237 6238 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 6239 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 6240 6241 /* Write zeroes */ 6242 spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL); 6243 poll_threads(); 6244 6245 CU_ASSERT(g_bserrno == 0); 6246 6247 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0); 6248 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0); 6249 } 6250 6251 6252 static void 6253 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 6254 { 6255 uint8_t payload_ff[64 * 512]; 6256 uint8_t payload_aa[64 * 512]; 6257 uint8_t payload_00[64 * 512]; 6258 uint8_t *cluster0, *cluster1; 6259 struct iovec iov[4]; 6260 6261 memset(payload_ff, 0xFF, sizeof(payload_ff)); 6262 memset(payload_aa, 0xAA, sizeof(payload_aa)); 6263 memset(payload_00, 0x00, sizeof(payload_00)); 6264 6265 /* Try to perform I/O with io unit = 512 */ 6266 iov[0].iov_base = payload_ff; 6267 iov[0].iov_len = 1 * 512; 6268 spdk_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL); 6269 poll_threads(); 6270 CU_ASSERT(g_bserrno == 0); 6271 6272 /* If thin provisioned is set cluster should be allocated now */ 6273 SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0); 6274 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 6275 6276 /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character. 6277 * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */ 6278 /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 6279 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6280 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0); 6281 6282 /* Verify write with offset on first page */ 6283 iov[0].iov_base = payload_ff; 6284 iov[0].iov_len = 1 * 512; 6285 spdk_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL); 6286 poll_threads(); 6287 CU_ASSERT(g_bserrno == 0); 6288 6289 /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 6290 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6291 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6292 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6293 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6294 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0); 6295 6296 /* Verify write with offset on first page */ 6297 iov[0].iov_base = payload_ff; 6298 iov[0].iov_len = 4 * 512; 6299 spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL); 6300 poll_threads(); 6301 6302 /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */ 6303 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6304 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6305 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6306 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6307 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0); 6308 CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0); 6309 6310 /* Verify write with offset on second page */ 6311 iov[0].iov_base = payload_ff; 6312 iov[0].iov_len = 4 * 512; 6313 spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL); 6314 poll_threads(); 6315 6316 /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */ 6317 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6318 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6319 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6320 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6321 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0); 6322 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 6323 6324 /* Verify write across multiple pages */ 6325 iov[0].iov_base = payload_aa; 6326 iov[0].iov_len = 8 * 512; 6327 spdk_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL); 6328 poll_threads(); 6329 6330 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */ 6331 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6332 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6333 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6334 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6335 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 6336 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 6337 6338 /* Verify write across multiple clusters */ 6339 6340 iov[0].iov_base = payload_ff; 6341 iov[0].iov_len = 8 * 512; 6342 spdk_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL); 6343 poll_threads(); 6344 6345 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 6346 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 6347 6348 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6349 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 6350 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6351 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6352 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6353 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6354 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 6355 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0); 6356 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 6357 6358 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 6359 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0); 6360 6361 /* Verify write to second cluster */ 6362 6363 iov[0].iov_base = payload_ff; 6364 iov[0].iov_len = 2 * 512; 6365 spdk_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL); 6366 poll_threads(); 6367 6368 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 6369 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 6370 6371 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6372 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */ 6373 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6374 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6375 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6376 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6377 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 6378 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 6379 6380 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 6381 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0); 6382 CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0); 6383 CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0); 6384 } 6385 6386 static void 6387 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 6388 { 6389 uint8_t payload_read[64 * 512]; 6390 uint8_t payload_ff[64 * 512]; 6391 uint8_t payload_aa[64 * 512]; 6392 uint8_t payload_00[64 * 512]; 6393 struct iovec iov[4]; 6394 6395 memset(payload_ff, 0xFF, sizeof(payload_ff)); 6396 memset(payload_aa, 0xAA, sizeof(payload_aa)); 6397 memset(payload_00, 0x00, sizeof(payload_00)); 6398 6399 /* Read only first io unit */ 6400 /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6401 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6402 * payload_read: F000 0000 | 0000 0000 ... */ 6403 memset(payload_read, 0x00, sizeof(payload_read)); 6404 iov[0].iov_base = payload_read; 6405 iov[0].iov_len = 1 * 512; 6406 spdk_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL); 6407 poll_threads(); 6408 6409 CU_ASSERT(g_bserrno == 0); 6410 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6411 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0); 6412 6413 /* Read four io_units starting from offset = 2 6414 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6415 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6416 * payload_read: F0AA 0000 | 0000 0000 ... */ 6417 6418 memset(payload_read, 0x00, sizeof(payload_read)); 6419 iov[0].iov_base = payload_read; 6420 iov[0].iov_len = 4 * 512; 6421 spdk_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL); 6422 poll_threads(); 6423 CU_ASSERT(g_bserrno == 0); 6424 6425 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6426 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 6427 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0); 6428 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0); 6429 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 6430 6431 /* Read eight io_units across multiple pages 6432 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 6433 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6434 * payload_read: AAAA AAAA | 0000 0000 ... */ 6435 memset(payload_read, 0x00, sizeof(payload_read)); 6436 iov[0].iov_base = payload_read; 6437 iov[0].iov_len = 4 * 512; 6438 iov[1].iov_base = payload_read + 4 * 512; 6439 iov[1].iov_len = 4 * 512; 6440 spdk_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL); 6441 poll_threads(); 6442 CU_ASSERT(g_bserrno == 0); 6443 6444 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0); 6445 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 6446 6447 /* Read eight io_units across multiple clusters 6448 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ] 6449 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6450 * payload_read: FFFF FFFF | 0000 0000 ... */ 6451 memset(payload_read, 0x00, sizeof(payload_read)); 6452 iov[0].iov_base = payload_read; 6453 iov[0].iov_len = 2 * 512; 6454 iov[1].iov_base = payload_read + 2 * 512; 6455 iov[1].iov_len = 2 * 512; 6456 iov[2].iov_base = payload_read + 4 * 512; 6457 iov[2].iov_len = 2 * 512; 6458 iov[3].iov_base = payload_read + 6 * 512; 6459 iov[3].iov_len = 2 * 512; 6460 spdk_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL); 6461 poll_threads(); 6462 CU_ASSERT(g_bserrno == 0); 6463 6464 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0); 6465 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 6466 6467 /* Read four io_units from second cluster 6468 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6469 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ] 6470 * payload_read: 00FF 0000 | 0000 0000 ... */ 6471 memset(payload_read, 0x00, sizeof(payload_read)); 6472 iov[0].iov_base = payload_read; 6473 iov[0].iov_len = 1 * 512; 6474 iov[1].iov_base = payload_read + 1 * 512; 6475 iov[1].iov_len = 3 * 512; 6476 spdk_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL); 6477 poll_threads(); 6478 CU_ASSERT(g_bserrno == 0); 6479 6480 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0); 6481 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0); 6482 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 6483 6484 /* Read second cluster 6485 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6486 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] 6487 * payload_read: FFFF 0000 | 0000 FF00 ... */ 6488 memset(payload_read, 0x00, sizeof(payload_read)); 6489 iov[0].iov_base = payload_read; 6490 iov[0].iov_len = 1 * 512; 6491 iov[1].iov_base = payload_read + 1 * 512; 6492 iov[1].iov_len = 2 * 512; 6493 iov[2].iov_base = payload_read + 3 * 512; 6494 iov[2].iov_len = 4 * 512; 6495 iov[3].iov_base = payload_read + 7 * 512; 6496 iov[3].iov_len = 25 * 512; 6497 spdk_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL); 6498 poll_threads(); 6499 CU_ASSERT(g_bserrno == 0); 6500 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0); 6501 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0); 6502 CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0); 6503 CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0); 6504 6505 /* Read whole two clusters 6506 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 6507 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */ 6508 memset(payload_read, 0x00, sizeof(payload_read)); 6509 iov[0].iov_base = payload_read; 6510 iov[0].iov_len = 1 * 512; 6511 iov[1].iov_base = payload_read + 1 * 512; 6512 iov[1].iov_len = 8 * 512; 6513 iov[2].iov_base = payload_read + 9 * 512; 6514 iov[2].iov_len = 16 * 512; 6515 iov[3].iov_base = payload_read + 25 * 512; 6516 iov[3].iov_len = 39 * 512; 6517 spdk_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL); 6518 poll_threads(); 6519 CU_ASSERT(g_bserrno == 0); 6520 6521 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6522 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 6523 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0); 6524 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0); 6525 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0); 6526 CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0); 6527 6528 CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0); 6529 CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0); 6530 CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0); 6531 CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0); 6532 } 6533 6534 static void 6535 blob_io_unit(void) 6536 { 6537 struct spdk_bs_opts bsopts; 6538 struct spdk_blob_opts opts; 6539 struct spdk_blob_store *bs; 6540 struct spdk_bs_dev *dev; 6541 struct spdk_blob *blob, *snapshot, *clone; 6542 spdk_blob_id blobid; 6543 struct spdk_io_channel *channel; 6544 6545 /* Create dev with 512 bytes io unit size */ 6546 6547 spdk_bs_opts_init(&bsopts, sizeof(bsopts)); 6548 bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */ 6549 snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE"); 6550 6551 /* Try to initialize a new blob store with unsupported io_unit */ 6552 dev = init_dev(); 6553 dev->blocklen = 512; 6554 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6555 6556 /* Initialize a new blob store */ 6557 spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL); 6558 poll_threads(); 6559 CU_ASSERT(g_bserrno == 0); 6560 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6561 bs = g_bs; 6562 6563 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512); 6564 channel = spdk_bs_alloc_io_channel(bs); 6565 6566 /* Create thick provisioned blob */ 6567 ut_spdk_blob_opts_init(&opts); 6568 opts.thin_provision = false; 6569 opts.num_clusters = 32; 6570 6571 blob = ut_blob_create_and_open(bs, &opts); 6572 blobid = spdk_blob_get_id(blob); 6573 6574 test_io_write(dev, blob, channel); 6575 test_io_read(dev, blob, channel); 6576 test_io_zeroes(dev, blob, channel); 6577 6578 test_iov_write(dev, blob, channel); 6579 test_iov_read(dev, blob, channel); 6580 6581 test_io_unmap(dev, blob, channel); 6582 6583 spdk_blob_close(blob, blob_op_complete, NULL); 6584 poll_threads(); 6585 CU_ASSERT(g_bserrno == 0); 6586 blob = NULL; 6587 g_blob = NULL; 6588 6589 /* Create thin provisioned blob */ 6590 6591 ut_spdk_blob_opts_init(&opts); 6592 opts.thin_provision = true; 6593 opts.num_clusters = 32; 6594 6595 blob = ut_blob_create_and_open(bs, &opts); 6596 blobid = spdk_blob_get_id(blob); 6597 6598 test_io_write(dev, blob, channel); 6599 test_io_read(dev, blob, channel); 6600 6601 test_io_zeroes(dev, blob, channel); 6602 6603 test_iov_write(dev, blob, channel); 6604 test_iov_read(dev, blob, channel); 6605 6606 /* Create snapshot */ 6607 6608 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6609 poll_threads(); 6610 CU_ASSERT(g_bserrno == 0); 6611 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6612 blobid = g_blobid; 6613 6614 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6615 poll_threads(); 6616 CU_ASSERT(g_bserrno == 0); 6617 CU_ASSERT(g_blob != NULL); 6618 snapshot = g_blob; 6619 6620 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6621 poll_threads(); 6622 CU_ASSERT(g_bserrno == 0); 6623 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6624 blobid = g_blobid; 6625 6626 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6627 poll_threads(); 6628 CU_ASSERT(g_bserrno == 0); 6629 CU_ASSERT(g_blob != NULL); 6630 clone = g_blob; 6631 6632 test_io_read(dev, blob, channel); 6633 test_io_read(dev, snapshot, channel); 6634 test_io_read(dev, clone, channel); 6635 6636 test_iov_read(dev, blob, channel); 6637 test_iov_read(dev, snapshot, channel); 6638 test_iov_read(dev, clone, channel); 6639 6640 /* Inflate clone */ 6641 6642 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 6643 poll_threads(); 6644 6645 CU_ASSERT(g_bserrno == 0); 6646 6647 test_io_read(dev, clone, channel); 6648 6649 test_io_unmap(dev, clone, channel); 6650 6651 test_iov_write(dev, clone, channel); 6652 test_iov_read(dev, clone, channel); 6653 6654 spdk_blob_close(blob, blob_op_complete, NULL); 6655 spdk_blob_close(snapshot, blob_op_complete, NULL); 6656 spdk_blob_close(clone, blob_op_complete, NULL); 6657 poll_threads(); 6658 CU_ASSERT(g_bserrno == 0); 6659 blob = NULL; 6660 g_blob = NULL; 6661 6662 spdk_bs_free_io_channel(channel); 6663 poll_threads(); 6664 6665 /* Unload the blob store */ 6666 spdk_bs_unload(bs, bs_op_complete, NULL); 6667 poll_threads(); 6668 CU_ASSERT(g_bserrno == 0); 6669 g_bs = NULL; 6670 g_blob = NULL; 6671 g_blobid = 0; 6672 } 6673 6674 static void 6675 blob_io_unit_compatibility(void) 6676 { 6677 struct spdk_bs_opts bsopts; 6678 struct spdk_blob_store *bs; 6679 struct spdk_bs_dev *dev; 6680 struct spdk_bs_super_block *super; 6681 6682 /* Create dev with 512 bytes io unit size */ 6683 6684 spdk_bs_opts_init(&bsopts, sizeof(bsopts)); 6685 bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */ 6686 snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE"); 6687 6688 /* Try to initialize a new blob store with unsupported io_unit */ 6689 dev = init_dev(); 6690 dev->blocklen = 512; 6691 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6692 6693 /* Initialize a new blob store */ 6694 spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL); 6695 poll_threads(); 6696 CU_ASSERT(g_bserrno == 0); 6697 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6698 bs = g_bs; 6699 6700 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512); 6701 6702 /* Unload the blob store */ 6703 spdk_bs_unload(bs, bs_op_complete, NULL); 6704 poll_threads(); 6705 CU_ASSERT(g_bserrno == 0); 6706 6707 /* Modify super block to behave like older version. 6708 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */ 6709 super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 6710 super->io_unit_size = 0; 6711 super->crc = blob_md_page_calc_crc(super); 6712 6713 dev = init_dev(); 6714 dev->blocklen = 512; 6715 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6716 6717 spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL); 6718 poll_threads(); 6719 CU_ASSERT(g_bserrno == 0); 6720 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6721 bs = g_bs; 6722 6723 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE); 6724 6725 /* Unload the blob store */ 6726 spdk_bs_unload(bs, bs_op_complete, NULL); 6727 poll_threads(); 6728 CU_ASSERT(g_bserrno == 0); 6729 6730 g_bs = NULL; 6731 g_blob = NULL; 6732 g_blobid = 0; 6733 } 6734 6735 static void 6736 first_sync_complete(void *cb_arg, int bserrno) 6737 { 6738 struct spdk_blob *blob = cb_arg; 6739 int rc; 6740 6741 CU_ASSERT(bserrno == 0); 6742 rc = spdk_blob_set_xattr(blob, "sync", "second", strlen("second") + 1); 6743 CU_ASSERT(rc == 0); 6744 CU_ASSERT(g_bserrno == -1); 6745 6746 /* Keep g_bserrno at -1, only the 6747 * second sync completion should set it at 0. */ 6748 } 6749 6750 static void 6751 second_sync_complete(void *cb_arg, int bserrno) 6752 { 6753 struct spdk_blob *blob = cb_arg; 6754 const void *value; 6755 size_t value_len; 6756 int rc; 6757 6758 CU_ASSERT(bserrno == 0); 6759 6760 /* Verify that the first sync completion had a chance to execute */ 6761 rc = spdk_blob_get_xattr_value(blob, "sync", &value, &value_len); 6762 CU_ASSERT(rc == 0); 6763 SPDK_CU_ASSERT_FATAL(value != NULL); 6764 CU_ASSERT(value_len == strlen("second") + 1); 6765 CU_ASSERT_NSTRING_EQUAL_FATAL(value, "second", value_len); 6766 6767 CU_ASSERT(g_bserrno == -1); 6768 g_bserrno = bserrno; 6769 } 6770 6771 static void 6772 blob_simultaneous_operations(void) 6773 { 6774 struct spdk_blob_store *bs = g_bs; 6775 struct spdk_blob_opts opts; 6776 struct spdk_blob *blob, *snapshot; 6777 spdk_blob_id blobid, snapshotid; 6778 struct spdk_io_channel *channel; 6779 int rc; 6780 6781 channel = spdk_bs_alloc_io_channel(bs); 6782 SPDK_CU_ASSERT_FATAL(channel != NULL); 6783 6784 ut_spdk_blob_opts_init(&opts); 6785 opts.num_clusters = 10; 6786 6787 blob = ut_blob_create_and_open(bs, &opts); 6788 blobid = spdk_blob_get_id(blob); 6789 6790 /* Create snapshot and try to remove blob in the same time: 6791 * - snapshot should be created successfully 6792 * - delete operation should fail w -EBUSY */ 6793 CU_ASSERT(blob->locked_operation_in_progress == false); 6794 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6795 CU_ASSERT(blob->locked_operation_in_progress == true); 6796 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6797 CU_ASSERT(blob->locked_operation_in_progress == true); 6798 /* Deletion failure */ 6799 CU_ASSERT(g_bserrno == -EBUSY); 6800 poll_threads(); 6801 CU_ASSERT(blob->locked_operation_in_progress == false); 6802 /* Snapshot creation success */ 6803 CU_ASSERT(g_bserrno == 0); 6804 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6805 6806 snapshotid = g_blobid; 6807 6808 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 6809 poll_threads(); 6810 CU_ASSERT(g_bserrno == 0); 6811 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 6812 snapshot = g_blob; 6813 6814 /* Inflate blob and try to remove blob in the same time: 6815 * - blob should be inflated successfully 6816 * - delete operation should fail w -EBUSY */ 6817 CU_ASSERT(blob->locked_operation_in_progress == false); 6818 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 6819 CU_ASSERT(blob->locked_operation_in_progress == true); 6820 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6821 CU_ASSERT(blob->locked_operation_in_progress == true); 6822 /* Deletion failure */ 6823 CU_ASSERT(g_bserrno == -EBUSY); 6824 poll_threads(); 6825 CU_ASSERT(blob->locked_operation_in_progress == false); 6826 /* Inflation success */ 6827 CU_ASSERT(g_bserrno == 0); 6828 6829 /* Clone snapshot and try to remove snapshot in the same time: 6830 * - snapshot should be cloned successfully 6831 * - delete operation should fail w -EBUSY */ 6832 CU_ASSERT(blob->locked_operation_in_progress == false); 6833 spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 6834 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 6835 /* Deletion failure */ 6836 CU_ASSERT(g_bserrno == -EBUSY); 6837 poll_threads(); 6838 CU_ASSERT(blob->locked_operation_in_progress == false); 6839 /* Clone created */ 6840 CU_ASSERT(g_bserrno == 0); 6841 6842 /* Resize blob and try to remove blob in the same time: 6843 * - blob should be resized successfully 6844 * - delete operation should fail w -EBUSY */ 6845 CU_ASSERT(blob->locked_operation_in_progress == false); 6846 spdk_blob_resize(blob, 50, blob_op_complete, NULL); 6847 CU_ASSERT(blob->locked_operation_in_progress == true); 6848 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6849 CU_ASSERT(blob->locked_operation_in_progress == true); 6850 /* Deletion failure */ 6851 CU_ASSERT(g_bserrno == -EBUSY); 6852 poll_threads(); 6853 CU_ASSERT(blob->locked_operation_in_progress == false); 6854 /* Blob resized successfully */ 6855 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6856 poll_threads(); 6857 CU_ASSERT(g_bserrno == 0); 6858 6859 /* Issue two consecutive blob syncs, neither should fail. 6860 * Force sync to actually occur by marking blob dirty each time. 6861 * Execution of sync should not be enough to complete the operation, 6862 * since disk I/O is required to complete it. */ 6863 g_bserrno = -1; 6864 6865 rc = spdk_blob_set_xattr(blob, "sync", "first", strlen("first") + 1); 6866 CU_ASSERT(rc == 0); 6867 spdk_blob_sync_md(blob, first_sync_complete, blob); 6868 CU_ASSERT(g_bserrno == -1); 6869 6870 spdk_blob_sync_md(blob, second_sync_complete, blob); 6871 CU_ASSERT(g_bserrno == -1); 6872 6873 poll_threads(); 6874 CU_ASSERT(g_bserrno == 0); 6875 6876 spdk_bs_free_io_channel(channel); 6877 poll_threads(); 6878 6879 ut_blob_close_and_delete(bs, snapshot); 6880 ut_blob_close_and_delete(bs, blob); 6881 } 6882 6883 static void 6884 blob_persist_test(void) 6885 { 6886 struct spdk_blob_store *bs = g_bs; 6887 struct spdk_blob_opts opts; 6888 struct spdk_blob *blob; 6889 spdk_blob_id blobid; 6890 struct spdk_io_channel *channel; 6891 char *xattr; 6892 size_t xattr_length; 6893 int rc; 6894 uint32_t page_count_clear, page_count_xattr; 6895 uint64_t poller_iterations; 6896 bool run_poller; 6897 6898 channel = spdk_bs_alloc_io_channel(bs); 6899 SPDK_CU_ASSERT_FATAL(channel != NULL); 6900 6901 ut_spdk_blob_opts_init(&opts); 6902 opts.num_clusters = 10; 6903 6904 blob = ut_blob_create_and_open(bs, &opts); 6905 blobid = spdk_blob_get_id(blob); 6906 6907 /* Save the amount of md pages used after creation of a blob. 6908 * This should be consistent after removing xattr. */ 6909 page_count_clear = spdk_bit_array_count_set(bs->used_md_pages); 6910 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear); 6911 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear); 6912 6913 /* Add xattr with maximum length of descriptor to exceed single metadata page. */ 6914 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 6915 strlen("large_xattr"); 6916 xattr = calloc(xattr_length, sizeof(char)); 6917 SPDK_CU_ASSERT_FATAL(xattr != NULL); 6918 6919 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 6920 SPDK_CU_ASSERT_FATAL(rc == 0); 6921 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6922 poll_threads(); 6923 SPDK_CU_ASSERT_FATAL(g_bserrno == 0); 6924 6925 /* Save the amount of md pages used after adding the large xattr */ 6926 page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages); 6927 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr); 6928 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr); 6929 6930 /* Add xattr to a blob and sync it. While sync is occurring, remove the xattr and sync again. 6931 * Interrupt the first sync after increasing number of poller iterations, until it succeeds. 6932 * Expectation is that after second sync completes no xattr is saved in metadata. */ 6933 poller_iterations = 1; 6934 run_poller = true; 6935 while (run_poller) { 6936 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 6937 SPDK_CU_ASSERT_FATAL(rc == 0); 6938 g_bserrno = -1; 6939 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6940 poll_thread_times(0, poller_iterations); 6941 if (g_bserrno == 0) { 6942 /* Poller iteration count was high enough for first sync to complete. 6943 * Verify that blob takes up enough of md_pages to store the xattr. */ 6944 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr); 6945 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr); 6946 SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr); 6947 run_poller = false; 6948 } 6949 rc = spdk_blob_remove_xattr(blob, "large_xattr"); 6950 SPDK_CU_ASSERT_FATAL(rc == 0); 6951 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6952 poll_threads(); 6953 SPDK_CU_ASSERT_FATAL(g_bserrno == 0); 6954 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear); 6955 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear); 6956 SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear); 6957 6958 /* Reload bs and re-open blob to verify that xattr was not persisted. */ 6959 spdk_blob_close(blob, blob_op_complete, NULL); 6960 poll_threads(); 6961 CU_ASSERT(g_bserrno == 0); 6962 6963 ut_bs_reload(&bs, NULL); 6964 6965 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6966 poll_threads(); 6967 CU_ASSERT(g_bserrno == 0); 6968 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 6969 blob = g_blob; 6970 6971 rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length); 6972 SPDK_CU_ASSERT_FATAL(rc == -ENOENT); 6973 6974 poller_iterations++; 6975 /* Stop at high iteration count to prevent infinite loop. 6976 * This value should be enough for first md sync to complete in any case. */ 6977 SPDK_CU_ASSERT_FATAL(poller_iterations < 50); 6978 } 6979 6980 free(xattr); 6981 6982 ut_blob_close_and_delete(bs, blob); 6983 6984 spdk_bs_free_io_channel(channel); 6985 poll_threads(); 6986 } 6987 6988 static void 6989 blob_decouple_snapshot(void) 6990 { 6991 struct spdk_blob_store *bs = g_bs; 6992 struct spdk_blob_opts opts; 6993 struct spdk_blob *blob, *snapshot1, *snapshot2; 6994 struct spdk_io_channel *channel; 6995 spdk_blob_id blobid, snapshotid; 6996 uint64_t cluster; 6997 6998 channel = spdk_bs_alloc_io_channel(bs); 6999 SPDK_CU_ASSERT_FATAL(channel != NULL); 7000 7001 ut_spdk_blob_opts_init(&opts); 7002 opts.num_clusters = 10; 7003 opts.thin_provision = false; 7004 7005 blob = ut_blob_create_and_open(bs, &opts); 7006 blobid = spdk_blob_get_id(blob); 7007 7008 /* Create first snapshot */ 7009 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); 7010 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 7011 poll_threads(); 7012 CU_ASSERT(g_bserrno == 0); 7013 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 7014 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 7015 snapshotid = g_blobid; 7016 7017 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 7018 poll_threads(); 7019 CU_ASSERT(g_bserrno == 0); 7020 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 7021 snapshot1 = g_blob; 7022 7023 /* Create the second one */ 7024 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 7025 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 7026 poll_threads(); 7027 CU_ASSERT(g_bserrno == 0); 7028 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 7029 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); 7030 snapshotid = g_blobid; 7031 7032 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 7033 poll_threads(); 7034 CU_ASSERT(g_bserrno == 0); 7035 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 7036 snapshot2 = g_blob; 7037 CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), snapshot1->id); 7038 7039 /* Now decouple the second snapshot forcing it to copy the written clusters */ 7040 spdk_bs_blob_decouple_parent(bs, channel, snapshot2->id, blob_op_complete, NULL); 7041 poll_threads(); 7042 CU_ASSERT(g_bserrno == 0); 7043 7044 /* Verify that the snapshot has been decoupled and that the clusters have been copied */ 7045 CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), SPDK_BLOBID_INVALID); 7046 for (cluster = 0; cluster < snapshot2->active.num_clusters; ++cluster) { 7047 CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster], 0); 7048 CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster], 7049 snapshot1->active.clusters[cluster]); 7050 } 7051 7052 spdk_bs_free_io_channel(channel); 7053 7054 ut_blob_close_and_delete(bs, snapshot2); 7055 ut_blob_close_and_delete(bs, snapshot1); 7056 ut_blob_close_and_delete(bs, blob); 7057 poll_threads(); 7058 } 7059 7060 static void 7061 suite_bs_setup(void) 7062 { 7063 struct spdk_bs_dev *dev; 7064 7065 dev = init_dev(); 7066 memset(g_dev_buffer, 0, DEV_BUFFER_SIZE); 7067 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 7068 poll_threads(); 7069 CU_ASSERT(g_bserrno == 0); 7070 CU_ASSERT(g_bs != NULL); 7071 } 7072 7073 static void 7074 suite_bs_cleanup(void) 7075 { 7076 spdk_bs_unload(g_bs, bs_op_complete, NULL); 7077 poll_threads(); 7078 CU_ASSERT(g_bserrno == 0); 7079 g_bs = NULL; 7080 memset(g_dev_buffer, 0, DEV_BUFFER_SIZE); 7081 } 7082 7083 static struct spdk_blob * 7084 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts) 7085 { 7086 struct spdk_blob *blob; 7087 struct spdk_blob_opts create_blob_opts; 7088 spdk_blob_id blobid; 7089 7090 if (blob_opts == NULL) { 7091 ut_spdk_blob_opts_init(&create_blob_opts); 7092 blob_opts = &create_blob_opts; 7093 } 7094 7095 spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL); 7096 poll_threads(); 7097 CU_ASSERT(g_bserrno == 0); 7098 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 7099 blobid = g_blobid; 7100 g_blobid = -1; 7101 7102 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 7103 poll_threads(); 7104 CU_ASSERT(g_bserrno == 0); 7105 CU_ASSERT(g_blob != NULL); 7106 blob = g_blob; 7107 7108 g_blob = NULL; 7109 g_bserrno = -1; 7110 7111 return blob; 7112 } 7113 7114 static void 7115 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob) 7116 { 7117 spdk_blob_id blobid = spdk_blob_get_id(blob); 7118 7119 spdk_blob_close(blob, blob_op_complete, NULL); 7120 poll_threads(); 7121 CU_ASSERT(g_bserrno == 0); 7122 g_blob = NULL; 7123 7124 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 7125 poll_threads(); 7126 CU_ASSERT(g_bserrno == 0); 7127 g_bserrno = -1; 7128 } 7129 7130 static void 7131 suite_blob_setup(void) 7132 { 7133 suite_bs_setup(); 7134 CU_ASSERT(g_bs != NULL); 7135 7136 g_blob = ut_blob_create_and_open(g_bs, NULL); 7137 CU_ASSERT(g_blob != NULL); 7138 } 7139 7140 static void 7141 suite_blob_cleanup(void) 7142 { 7143 ut_blob_close_and_delete(g_bs, g_blob); 7144 CU_ASSERT(g_blob == NULL); 7145 7146 suite_bs_cleanup(); 7147 CU_ASSERT(g_bs == NULL); 7148 } 7149 7150 int main(int argc, char **argv) 7151 { 7152 CU_pSuite suite, suite_bs, suite_blob; 7153 unsigned int num_failures; 7154 7155 CU_set_error_action(CUEA_ABORT); 7156 CU_initialize_registry(); 7157 7158 suite = CU_add_suite("blob", NULL, NULL); 7159 suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL, 7160 suite_bs_setup, suite_bs_cleanup); 7161 suite_blob = CU_add_suite_with_setup_and_teardown("blob_blob", NULL, NULL, 7162 suite_blob_setup, suite_blob_cleanup); 7163 7164 CU_ADD_TEST(suite, blob_init); 7165 CU_ADD_TEST(suite_bs, blob_open); 7166 CU_ADD_TEST(suite_bs, blob_create); 7167 CU_ADD_TEST(suite_bs, blob_create_loop); 7168 CU_ADD_TEST(suite_bs, blob_create_fail); 7169 CU_ADD_TEST(suite_bs, blob_create_internal); 7170 CU_ADD_TEST(suite_bs, blob_create_zero_extent); 7171 CU_ADD_TEST(suite, blob_thin_provision); 7172 CU_ADD_TEST(suite_bs, blob_snapshot); 7173 CU_ADD_TEST(suite_bs, blob_clone); 7174 CU_ADD_TEST(suite_bs, blob_inflate); 7175 CU_ADD_TEST(suite_bs, blob_delete); 7176 CU_ADD_TEST(suite_bs, blob_resize_test); 7177 CU_ADD_TEST(suite, blob_read_only); 7178 CU_ADD_TEST(suite_bs, channel_ops); 7179 CU_ADD_TEST(suite_bs, blob_super); 7180 CU_ADD_TEST(suite_blob, blob_write); 7181 CU_ADD_TEST(suite_blob, blob_read); 7182 CU_ADD_TEST(suite_blob, blob_rw_verify); 7183 CU_ADD_TEST(suite_bs, blob_rw_verify_iov); 7184 CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem); 7185 CU_ADD_TEST(suite_blob, blob_rw_iov_read_only); 7186 CU_ADD_TEST(suite_bs, blob_unmap); 7187 CU_ADD_TEST(suite_bs, blob_iter); 7188 CU_ADD_TEST(suite_blob, blob_xattr); 7189 CU_ADD_TEST(suite_bs, blob_parse_md); 7190 CU_ADD_TEST(suite, bs_load); 7191 CU_ADD_TEST(suite_bs, bs_load_pending_removal); 7192 CU_ADD_TEST(suite, bs_load_custom_cluster_size); 7193 CU_ADD_TEST(suite_bs, bs_unload); 7194 CU_ADD_TEST(suite, bs_cluster_sz); 7195 CU_ADD_TEST(suite_bs, bs_usable_clusters); 7196 CU_ADD_TEST(suite, bs_resize_md); 7197 CU_ADD_TEST(suite, bs_destroy); 7198 CU_ADD_TEST(suite, bs_type); 7199 CU_ADD_TEST(suite, bs_super_block); 7200 CU_ADD_TEST(suite, bs_test_recover_cluster_count); 7201 CU_ADD_TEST(suite, blob_serialize_test); 7202 CU_ADD_TEST(suite_bs, blob_crc); 7203 CU_ADD_TEST(suite, super_block_crc); 7204 CU_ADD_TEST(suite_blob, blob_dirty_shutdown); 7205 CU_ADD_TEST(suite_bs, blob_flags); 7206 CU_ADD_TEST(suite_bs, bs_version); 7207 CU_ADD_TEST(suite_bs, blob_set_xattrs_test); 7208 CU_ADD_TEST(suite_bs, blob_thin_prov_alloc); 7209 CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test); 7210 CU_ADD_TEST(suite_bs, blob_thin_prov_rw); 7211 CU_ADD_TEST(suite, blob_thin_prov_write_count_io); 7212 CU_ADD_TEST(suite_bs, blob_thin_prov_rle); 7213 CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov); 7214 CU_ADD_TEST(suite, bs_load_iter_test); 7215 CU_ADD_TEST(suite_bs, blob_snapshot_rw); 7216 CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov); 7217 CU_ADD_TEST(suite, blob_relations); 7218 CU_ADD_TEST(suite, blob_relations2); 7219 CU_ADD_TEST(suite, blob_relations3); 7220 CU_ADD_TEST(suite, blobstore_clean_power_failure); 7221 CU_ADD_TEST(suite, blob_delete_snapshot_power_failure); 7222 CU_ADD_TEST(suite, blob_create_snapshot_power_failure); 7223 CU_ADD_TEST(suite_bs, blob_inflate_rw); 7224 CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io); 7225 CU_ADD_TEST(suite_bs, blob_operation_split_rw); 7226 CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov); 7227 CU_ADD_TEST(suite, blob_io_unit); 7228 CU_ADD_TEST(suite, blob_io_unit_compatibility); 7229 CU_ADD_TEST(suite_bs, blob_simultaneous_operations); 7230 CU_ADD_TEST(suite_bs, blob_persist_test); 7231 CU_ADD_TEST(suite_bs, blob_decouple_snapshot); 7232 7233 allocate_threads(2); 7234 set_thread(0); 7235 7236 g_dev_buffer = calloc(1, DEV_BUFFER_SIZE); 7237 7238 CU_basic_set_mode(CU_BRM_VERBOSE); 7239 g_use_extent_table = false; 7240 CU_basic_run_tests(); 7241 num_failures = CU_get_number_of_failures(); 7242 g_use_extent_table = true; 7243 CU_basic_run_tests(); 7244 num_failures += CU_get_number_of_failures(); 7245 CU_cleanup_registry(); 7246 7247 free(g_dev_buffer); 7248 7249 free_threads(); 7250 7251 return num_failures; 7252 } 7253