1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 #include "spdk/blob.h" 38 #include "spdk/string.h" 39 40 #include "common/lib/ut_multithread.c" 41 #include "../bs_dev_common.c" 42 #include "blob/blobstore.c" 43 #include "blob/request.c" 44 #include "blob/zeroes.c" 45 #include "blob/blob_bs_dev.c" 46 47 struct spdk_blob_store *g_bs; 48 spdk_blob_id g_blobid; 49 struct spdk_blob *g_blob, *g_blob2; 50 int g_bserrno, g_bserrno2; 51 struct spdk_xattr_names *g_names; 52 int g_done; 53 char *g_xattr_names[] = {"first", "second", "third"}; 54 char *g_xattr_values[] = {"one", "two", "three"}; 55 uint64_t g_ctx = 1729; 56 bool g_use_extent_table = false; 57 58 struct spdk_bs_super_block_ver1 { 59 uint8_t signature[8]; 60 uint32_t version; 61 uint32_t length; 62 uint32_t clean; /* If there was a clean shutdown, this is 1. */ 63 spdk_blob_id super_blob; 64 65 uint32_t cluster_size; /* In bytes */ 66 67 uint32_t used_page_mask_start; /* Offset from beginning of disk, in pages */ 68 uint32_t used_page_mask_len; /* Count, in pages */ 69 70 uint32_t used_cluster_mask_start; /* Offset from beginning of disk, in pages */ 71 uint32_t used_cluster_mask_len; /* Count, in pages */ 72 73 uint32_t md_start; /* Offset from beginning of disk, in pages */ 74 uint32_t md_len; /* Count, in pages */ 75 76 uint8_t reserved[4036]; 77 uint32_t crc; 78 } __attribute__((packed)); 79 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size"); 80 81 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs, 82 struct spdk_blob_opts *blob_opts); 83 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob); 84 static void suite_blob_setup(void); 85 static void suite_blob_cleanup(void); 86 87 static void 88 _get_xattr_value(void *arg, const char *name, 89 const void **value, size_t *value_len) 90 { 91 uint64_t i; 92 93 SPDK_CU_ASSERT_FATAL(value_len != NULL); 94 SPDK_CU_ASSERT_FATAL(value != NULL); 95 CU_ASSERT(arg == &g_ctx); 96 97 for (i = 0; i < sizeof(g_xattr_names); i++) { 98 if (!strcmp(name, g_xattr_names[i])) { 99 *value_len = strlen(g_xattr_values[i]); 100 *value = g_xattr_values[i]; 101 break; 102 } 103 } 104 } 105 106 static void 107 _get_xattr_value_null(void *arg, const char *name, 108 const void **value, size_t *value_len) 109 { 110 SPDK_CU_ASSERT_FATAL(value_len != NULL); 111 SPDK_CU_ASSERT_FATAL(value != NULL); 112 CU_ASSERT(arg == NULL); 113 114 *value_len = 0; 115 *value = NULL; 116 } 117 118 static int 119 _get_snapshots_count(struct spdk_blob_store *bs) 120 { 121 struct spdk_blob_list *snapshot = NULL; 122 int count = 0; 123 124 TAILQ_FOREACH(snapshot, &bs->snapshots, link) { 125 count += 1; 126 } 127 128 return count; 129 } 130 131 static void 132 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts) 133 { 134 spdk_blob_opts_init(opts, sizeof(*opts)); 135 opts->use_extent_table = g_use_extent_table; 136 } 137 138 static void 139 bs_op_complete(void *cb_arg, int bserrno) 140 { 141 g_bserrno = bserrno; 142 } 143 144 static void 145 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs, 146 int bserrno) 147 { 148 g_bs = bs; 149 g_bserrno = bserrno; 150 } 151 152 static void 153 blob_op_complete(void *cb_arg, int bserrno) 154 { 155 g_bserrno = bserrno; 156 } 157 158 static void 159 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno) 160 { 161 g_blobid = blobid; 162 g_bserrno = bserrno; 163 } 164 165 static void 166 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno) 167 { 168 g_blob = blb; 169 g_bserrno = bserrno; 170 } 171 172 static void 173 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno) 174 { 175 if (g_blob == NULL) { 176 g_blob = blob; 177 g_bserrno = bserrno; 178 } else { 179 g_blob2 = blob; 180 g_bserrno2 = bserrno; 181 } 182 } 183 184 static void 185 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts) 186 { 187 struct spdk_bs_dev *dev; 188 189 /* Unload the blob store */ 190 spdk_bs_unload(*bs, bs_op_complete, NULL); 191 poll_threads(); 192 CU_ASSERT(g_bserrno == 0); 193 194 dev = init_dev(); 195 /* Load an existing blob store */ 196 spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL); 197 poll_threads(); 198 CU_ASSERT(g_bserrno == 0); 199 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 200 *bs = g_bs; 201 202 g_bserrno = -1; 203 } 204 205 static void 206 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts) 207 { 208 struct spdk_bs_dev *dev; 209 210 /* Dirty shutdown */ 211 bs_free(*bs); 212 213 dev = init_dev(); 214 /* Load an existing blob store */ 215 spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL); 216 poll_threads(); 217 CU_ASSERT(g_bserrno == 0); 218 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 219 *bs = g_bs; 220 221 g_bserrno = -1; 222 } 223 224 static void 225 blob_init(void) 226 { 227 struct spdk_blob_store *bs; 228 struct spdk_bs_dev *dev; 229 230 dev = init_dev(); 231 232 /* should fail for an unsupported blocklen */ 233 dev->blocklen = 500; 234 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 235 poll_threads(); 236 CU_ASSERT(g_bserrno == -EINVAL); 237 238 dev = init_dev(); 239 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 240 poll_threads(); 241 CU_ASSERT(g_bserrno == 0); 242 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 243 bs = g_bs; 244 245 spdk_bs_unload(bs, bs_op_complete, NULL); 246 poll_threads(); 247 CU_ASSERT(g_bserrno == 0); 248 g_bs = NULL; 249 } 250 251 static void 252 blob_super(void) 253 { 254 struct spdk_blob_store *bs = g_bs; 255 spdk_blob_id blobid; 256 struct spdk_blob_opts blob_opts; 257 258 /* Get the super blob without having set one */ 259 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 260 poll_threads(); 261 CU_ASSERT(g_bserrno == -ENOENT); 262 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 263 264 /* Create a blob */ 265 ut_spdk_blob_opts_init(&blob_opts); 266 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 267 poll_threads(); 268 CU_ASSERT(g_bserrno == 0); 269 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 270 blobid = g_blobid; 271 272 /* Set the blob as the super blob */ 273 spdk_bs_set_super(bs, blobid, blob_op_complete, NULL); 274 poll_threads(); 275 CU_ASSERT(g_bserrno == 0); 276 277 /* Get the super blob */ 278 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 279 poll_threads(); 280 CU_ASSERT(g_bserrno == 0); 281 CU_ASSERT(blobid == g_blobid); 282 } 283 284 static void 285 blob_open(void) 286 { 287 struct spdk_blob_store *bs = g_bs; 288 struct spdk_blob *blob; 289 struct spdk_blob_opts blob_opts; 290 spdk_blob_id blobid, blobid2; 291 292 ut_spdk_blob_opts_init(&blob_opts); 293 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 294 poll_threads(); 295 CU_ASSERT(g_bserrno == 0); 296 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 297 blobid = g_blobid; 298 299 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 300 poll_threads(); 301 CU_ASSERT(g_bserrno == 0); 302 CU_ASSERT(g_blob != NULL); 303 blob = g_blob; 304 305 blobid2 = spdk_blob_get_id(blob); 306 CU_ASSERT(blobid == blobid2); 307 308 /* Try to open file again. It should return success. */ 309 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 310 poll_threads(); 311 CU_ASSERT(g_bserrno == 0); 312 CU_ASSERT(blob == g_blob); 313 314 spdk_blob_close(blob, blob_op_complete, NULL); 315 poll_threads(); 316 CU_ASSERT(g_bserrno == 0); 317 318 /* 319 * Close the file a second time, releasing the second reference. This 320 * should succeed. 321 */ 322 blob = g_blob; 323 spdk_blob_close(blob, blob_op_complete, NULL); 324 poll_threads(); 325 CU_ASSERT(g_bserrno == 0); 326 327 /* 328 * Try to open file again. It should succeed. This tests the case 329 * where the file is opened, closed, then re-opened again. 330 */ 331 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 332 poll_threads(); 333 CU_ASSERT(g_bserrno == 0); 334 CU_ASSERT(g_blob != NULL); 335 blob = g_blob; 336 spdk_blob_close(blob, blob_op_complete, NULL); 337 poll_threads(); 338 CU_ASSERT(g_bserrno == 0); 339 340 /* Try to open file twice in succession. This should return the same 341 * blob object. 342 */ 343 g_blob = NULL; 344 g_blob2 = NULL; 345 g_bserrno = -1; 346 g_bserrno2 = -1; 347 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL); 348 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL); 349 poll_threads(); 350 CU_ASSERT(g_bserrno == 0); 351 CU_ASSERT(g_bserrno2 == 0); 352 CU_ASSERT(g_blob != NULL); 353 CU_ASSERT(g_blob2 != NULL); 354 CU_ASSERT(g_blob == g_blob2); 355 356 g_bserrno = -1; 357 spdk_blob_close(g_blob, blob_op_complete, NULL); 358 poll_threads(); 359 CU_ASSERT(g_bserrno == 0); 360 361 ut_blob_close_and_delete(bs, g_blob); 362 } 363 364 static void 365 blob_create(void) 366 { 367 struct spdk_blob_store *bs = g_bs; 368 struct spdk_blob *blob; 369 struct spdk_blob_opts opts; 370 spdk_blob_id blobid; 371 372 /* Create blob with 10 clusters */ 373 374 ut_spdk_blob_opts_init(&opts); 375 opts.num_clusters = 10; 376 377 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 378 poll_threads(); 379 CU_ASSERT(g_bserrno == 0); 380 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 381 blobid = g_blobid; 382 383 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 384 poll_threads(); 385 CU_ASSERT(g_bserrno == 0); 386 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 387 blob = g_blob; 388 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 389 390 spdk_blob_close(blob, blob_op_complete, NULL); 391 poll_threads(); 392 CU_ASSERT(g_bserrno == 0); 393 394 /* Create blob with 0 clusters */ 395 396 ut_spdk_blob_opts_init(&opts); 397 opts.num_clusters = 0; 398 399 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 400 poll_threads(); 401 CU_ASSERT(g_bserrno == 0); 402 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 403 blobid = g_blobid; 404 405 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 406 poll_threads(); 407 CU_ASSERT(g_bserrno == 0); 408 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 409 blob = g_blob; 410 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 411 412 spdk_blob_close(blob, blob_op_complete, NULL); 413 poll_threads(); 414 CU_ASSERT(g_bserrno == 0); 415 416 /* Create blob with default options (opts == NULL) */ 417 418 spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL); 419 poll_threads(); 420 CU_ASSERT(g_bserrno == 0); 421 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 422 blobid = g_blobid; 423 424 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 425 poll_threads(); 426 CU_ASSERT(g_bserrno == 0); 427 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 428 blob = g_blob; 429 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 430 431 spdk_blob_close(blob, blob_op_complete, NULL); 432 poll_threads(); 433 CU_ASSERT(g_bserrno == 0); 434 435 /* Try to create blob with size larger than blobstore */ 436 437 ut_spdk_blob_opts_init(&opts); 438 opts.num_clusters = bs->total_clusters + 1; 439 440 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 441 poll_threads(); 442 CU_ASSERT(g_bserrno == -ENOSPC); 443 } 444 445 static void 446 blob_create_zero_extent(void) 447 { 448 struct spdk_blob_store *bs = g_bs; 449 struct spdk_blob *blob; 450 spdk_blob_id blobid; 451 452 /* Create blob with default options (opts == NULL) */ 453 spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL); 454 poll_threads(); 455 CU_ASSERT(g_bserrno == 0); 456 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 457 blobid = g_blobid; 458 459 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 460 poll_threads(); 461 CU_ASSERT(g_bserrno == 0); 462 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 463 blob = g_blob; 464 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 465 CU_ASSERT(blob->extent_table_found == true); 466 CU_ASSERT(blob->active.extent_pages_array_size == 0); 467 CU_ASSERT(blob->active.extent_pages == NULL); 468 469 spdk_blob_close(blob, blob_op_complete, NULL); 470 poll_threads(); 471 CU_ASSERT(g_bserrno == 0); 472 473 /* Create blob with NULL internal options */ 474 bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL); 475 poll_threads(); 476 CU_ASSERT(g_bserrno == 0); 477 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 478 blobid = g_blobid; 479 480 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 481 poll_threads(); 482 CU_ASSERT(g_bserrno == 0); 483 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 484 blob = g_blob; 485 CU_ASSERT(TAILQ_FIRST(&blob->xattrs_internal) == NULL); 486 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 487 CU_ASSERT(blob->extent_table_found == true); 488 CU_ASSERT(blob->active.extent_pages_array_size == 0); 489 CU_ASSERT(blob->active.extent_pages == NULL); 490 491 spdk_blob_close(blob, blob_op_complete, NULL); 492 poll_threads(); 493 CU_ASSERT(g_bserrno == 0); 494 } 495 496 /* 497 * Create and delete one blob in a loop over and over again. This helps ensure 498 * that the internal bit masks tracking used clusters and md_pages are being 499 * tracked correctly. 500 */ 501 static void 502 blob_create_loop(void) 503 { 504 struct spdk_blob_store *bs = g_bs; 505 struct spdk_blob_opts opts; 506 uint32_t i, loop_count; 507 508 loop_count = 4 * spdk_max(spdk_bit_array_capacity(bs->used_md_pages), 509 spdk_bit_pool_capacity(bs->used_clusters)); 510 511 for (i = 0; i < loop_count; i++) { 512 ut_spdk_blob_opts_init(&opts); 513 opts.num_clusters = 1; 514 g_bserrno = -1; 515 g_blobid = SPDK_BLOBID_INVALID; 516 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 517 poll_threads(); 518 CU_ASSERT(g_bserrno == 0); 519 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 520 spdk_bs_delete_blob(bs, g_blobid, blob_op_complete, NULL); 521 poll_threads(); 522 CU_ASSERT(g_bserrno == 0); 523 } 524 } 525 526 static void 527 blob_create_fail(void) 528 { 529 struct spdk_blob_store *bs = g_bs; 530 struct spdk_blob_opts opts; 531 spdk_blob_id blobid; 532 uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids); 533 uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages); 534 535 /* NULL callback */ 536 ut_spdk_blob_opts_init(&opts); 537 opts.xattrs.names = g_xattr_names; 538 opts.xattrs.get_value = NULL; 539 opts.xattrs.count = 1; 540 opts.xattrs.ctx = &g_ctx; 541 542 blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0); 543 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 544 poll_threads(); 545 CU_ASSERT(g_bserrno == -EINVAL); 546 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 547 CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count); 548 CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count); 549 550 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 551 poll_threads(); 552 CU_ASSERT(g_bserrno == -ENOENT); 553 SPDK_CU_ASSERT_FATAL(g_blob == NULL); 554 555 ut_bs_reload(&bs, NULL); 556 CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count); 557 CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count); 558 559 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 560 poll_threads(); 561 CU_ASSERT(g_blob == NULL); 562 CU_ASSERT(g_bserrno == -ENOENT); 563 } 564 565 static void 566 blob_create_internal(void) 567 { 568 struct spdk_blob_store *bs = g_bs; 569 struct spdk_blob *blob; 570 struct spdk_blob_opts opts; 571 struct spdk_blob_xattr_opts internal_xattrs; 572 const void *value; 573 size_t value_len; 574 spdk_blob_id blobid; 575 int rc; 576 577 /* Create blob with custom xattrs */ 578 579 ut_spdk_blob_opts_init(&opts); 580 blob_xattrs_init(&internal_xattrs); 581 internal_xattrs.count = 3; 582 internal_xattrs.names = g_xattr_names; 583 internal_xattrs.get_value = _get_xattr_value; 584 internal_xattrs.ctx = &g_ctx; 585 586 bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL); 587 poll_threads(); 588 CU_ASSERT(g_bserrno == 0); 589 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 590 blobid = g_blobid; 591 592 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 593 poll_threads(); 594 CU_ASSERT(g_bserrno == 0); 595 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 596 blob = g_blob; 597 598 rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true); 599 CU_ASSERT(rc == 0); 600 SPDK_CU_ASSERT_FATAL(value != NULL); 601 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 602 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 603 604 rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true); 605 CU_ASSERT(rc == 0); 606 SPDK_CU_ASSERT_FATAL(value != NULL); 607 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 608 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 609 610 rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true); 611 CU_ASSERT(rc == 0); 612 SPDK_CU_ASSERT_FATAL(value != NULL); 613 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 614 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 615 616 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len); 617 CU_ASSERT(rc != 0); 618 619 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len); 620 CU_ASSERT(rc != 0); 621 622 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len); 623 CU_ASSERT(rc != 0); 624 625 spdk_blob_close(blob, blob_op_complete, NULL); 626 poll_threads(); 627 CU_ASSERT(g_bserrno == 0); 628 629 /* Create blob with NULL internal options */ 630 631 bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL); 632 poll_threads(); 633 CU_ASSERT(g_bserrno == 0); 634 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 635 blobid = g_blobid; 636 637 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 638 poll_threads(); 639 CU_ASSERT(g_bserrno == 0); 640 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 641 CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL); 642 CU_ASSERT(spdk_blob_get_num_clusters(g_blob) == 0); 643 644 blob = g_blob; 645 646 spdk_blob_close(blob, blob_op_complete, NULL); 647 poll_threads(); 648 CU_ASSERT(g_bserrno == 0); 649 } 650 651 static void 652 blob_thin_provision(void) 653 { 654 struct spdk_blob_store *bs; 655 struct spdk_bs_dev *dev; 656 struct spdk_blob *blob; 657 struct spdk_blob_opts opts; 658 struct spdk_bs_opts bs_opts; 659 spdk_blob_id blobid; 660 661 dev = init_dev(); 662 spdk_bs_opts_init(&bs_opts, sizeof(bs_opts)); 663 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 664 665 /* Initialize a new blob store */ 666 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 667 poll_threads(); 668 CU_ASSERT(g_bserrno == 0); 669 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 670 671 bs = g_bs; 672 673 /* Create blob with thin provisioning enabled */ 674 675 ut_spdk_blob_opts_init(&opts); 676 opts.thin_provision = true; 677 opts.num_clusters = 10; 678 679 blob = ut_blob_create_and_open(bs, &opts); 680 blobid = spdk_blob_get_id(blob); 681 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 682 /* In thin provisioning with num_clusters is set, if not using the 683 * extent table, there is no allocation. If extent table is used, 684 * there is related allocation happened. */ 685 if (blob->extent_table_found == true) { 686 CU_ASSERT(blob->active.extent_pages_array_size > 0); 687 CU_ASSERT(blob->active.extent_pages != NULL); 688 } else { 689 CU_ASSERT(blob->active.extent_pages_array_size == 0); 690 CU_ASSERT(blob->active.extent_pages == NULL); 691 } 692 693 spdk_blob_close(blob, blob_op_complete, NULL); 694 CU_ASSERT(g_bserrno == 0); 695 696 /* Do not shut down cleanly. This makes sure that when we load again 697 * and try to recover a valid used_cluster map, that blobstore will 698 * ignore clusters with index 0 since these are unallocated clusters. 699 */ 700 ut_bs_dirty_load(&bs, &bs_opts); 701 702 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 703 poll_threads(); 704 CU_ASSERT(g_bserrno == 0); 705 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 706 blob = g_blob; 707 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 708 709 ut_blob_close_and_delete(bs, blob); 710 711 spdk_bs_unload(bs, bs_op_complete, NULL); 712 poll_threads(); 713 CU_ASSERT(g_bserrno == 0); 714 g_bs = NULL; 715 } 716 717 static void 718 blob_snapshot(void) 719 { 720 struct spdk_blob_store *bs = g_bs; 721 struct spdk_blob *blob; 722 struct spdk_blob *snapshot, *snapshot2; 723 struct spdk_blob_bs_dev *blob_bs_dev; 724 struct spdk_blob_opts opts; 725 struct spdk_blob_xattr_opts xattrs; 726 spdk_blob_id blobid; 727 spdk_blob_id snapshotid; 728 spdk_blob_id snapshotid2; 729 const void *value; 730 size_t value_len; 731 int rc; 732 spdk_blob_id ids[2]; 733 size_t count; 734 735 /* Create blob with 10 clusters */ 736 ut_spdk_blob_opts_init(&opts); 737 opts.num_clusters = 10; 738 739 blob = ut_blob_create_and_open(bs, &opts); 740 blobid = spdk_blob_get_id(blob); 741 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 742 743 /* Create snapshot from blob */ 744 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); 745 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 746 poll_threads(); 747 CU_ASSERT(g_bserrno == 0); 748 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 749 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 750 snapshotid = g_blobid; 751 752 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 753 poll_threads(); 754 CU_ASSERT(g_bserrno == 0); 755 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 756 snapshot = g_blob; 757 CU_ASSERT(snapshot->data_ro == true); 758 CU_ASSERT(snapshot->md_ro == true); 759 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 760 761 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 762 CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV); 763 CU_ASSERT(spdk_mem_all_zero(blob->active.clusters, 764 blob->active.num_clusters * sizeof(blob->active.clusters[0]))); 765 766 /* Try to create snapshot from clone with xattrs */ 767 xattrs.names = g_xattr_names; 768 xattrs.get_value = _get_xattr_value; 769 xattrs.count = 3; 770 xattrs.ctx = &g_ctx; 771 spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL); 772 poll_threads(); 773 CU_ASSERT(g_bserrno == 0); 774 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 775 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); 776 snapshotid2 = g_blobid; 777 778 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 779 CU_ASSERT(g_bserrno == 0); 780 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 781 snapshot2 = g_blob; 782 CU_ASSERT(snapshot2->data_ro == true); 783 CU_ASSERT(snapshot2->md_ro == true); 784 CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10); 785 786 /* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */ 787 CU_ASSERT(snapshot->back_bs_dev == NULL); 788 SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL); 789 SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL); 790 791 blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev; 792 CU_ASSERT(blob_bs_dev->blob == snapshot2); 793 794 blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev; 795 CU_ASSERT(blob_bs_dev->blob == snapshot); 796 797 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len); 798 CU_ASSERT(rc == 0); 799 SPDK_CU_ASSERT_FATAL(value != NULL); 800 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 801 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 802 803 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len); 804 CU_ASSERT(rc == 0); 805 SPDK_CU_ASSERT_FATAL(value != NULL); 806 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 807 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 808 809 rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len); 810 CU_ASSERT(rc == 0); 811 SPDK_CU_ASSERT_FATAL(value != NULL); 812 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 813 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 814 815 /* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */ 816 count = 2; 817 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 818 CU_ASSERT(count == 1); 819 CU_ASSERT(ids[0] == blobid); 820 821 count = 2; 822 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 823 CU_ASSERT(count == 1); 824 CU_ASSERT(ids[0] == snapshotid2); 825 826 /* Try to create snapshot from snapshot */ 827 spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 828 poll_threads(); 829 CU_ASSERT(g_bserrno == -EINVAL); 830 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 831 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); 832 833 /* Delete blob and confirm that it is no longer on snapshot2 clone list */ 834 ut_blob_close_and_delete(bs, blob); 835 count = 2; 836 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 837 CU_ASSERT(count == 0); 838 839 /* Delete snapshot2 and confirm that it is no longer on snapshot clone list */ 840 ut_blob_close_and_delete(bs, snapshot2); 841 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 842 count = 2; 843 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0); 844 CU_ASSERT(count == 0); 845 846 ut_blob_close_and_delete(bs, snapshot); 847 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); 848 } 849 850 static void 851 blob_snapshot_freeze_io(void) 852 { 853 struct spdk_io_channel *channel; 854 struct spdk_bs_channel *bs_channel; 855 struct spdk_blob_store *bs = g_bs; 856 struct spdk_blob *blob; 857 struct spdk_blob_opts opts; 858 spdk_blob_id blobid; 859 uint32_t num_of_pages = 10; 860 uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE]; 861 uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE]; 862 uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE]; 863 864 memset(payload_write, 0xE5, sizeof(payload_write)); 865 memset(payload_read, 0x00, sizeof(payload_read)); 866 memset(payload_zero, 0x00, sizeof(payload_zero)); 867 868 /* Test freeze I/O during snapshot */ 869 channel = spdk_bs_alloc_io_channel(bs); 870 bs_channel = spdk_io_channel_get_ctx(channel); 871 872 /* Create blob with 10 clusters */ 873 ut_spdk_blob_opts_init(&opts); 874 opts.num_clusters = 10; 875 opts.thin_provision = false; 876 877 blob = ut_blob_create_and_open(bs, &opts); 878 blobid = spdk_blob_get_id(blob); 879 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 880 881 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 882 883 /* This is implementation specific. 884 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback. 885 * Four async I/O operations happen before that. */ 886 poll_thread_times(0, 5); 887 888 CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io)); 889 890 /* Blob I/O should be frozen here */ 891 CU_ASSERT(blob->frozen_refcnt == 1); 892 893 /* Write to the blob */ 894 spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL); 895 896 /* Verify that I/O is queued */ 897 CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io)); 898 /* Verify that payload is not written to disk, at this point the blobs already switched */ 899 CU_ASSERT(blob->active.clusters[0] == 0); 900 901 /* Finish all operations including spdk_bs_create_snapshot */ 902 poll_threads(); 903 904 /* Verify snapshot */ 905 CU_ASSERT(g_bserrno == 0); 906 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 907 908 /* Verify that blob has unset frozen_io */ 909 CU_ASSERT(blob->frozen_refcnt == 0); 910 911 /* Verify that postponed I/O completed successfully by comparing payload */ 912 spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL); 913 poll_threads(); 914 CU_ASSERT(g_bserrno == 0); 915 CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0); 916 917 spdk_bs_free_io_channel(channel); 918 poll_threads(); 919 920 ut_blob_close_and_delete(bs, blob); 921 } 922 923 static void 924 blob_clone(void) 925 { 926 struct spdk_blob_store *bs = g_bs; 927 struct spdk_blob_opts opts; 928 struct spdk_blob *blob, *snapshot, *clone; 929 spdk_blob_id blobid, cloneid, snapshotid; 930 struct spdk_blob_xattr_opts xattrs; 931 const void *value; 932 size_t value_len; 933 int rc; 934 935 /* Create blob with 10 clusters */ 936 937 ut_spdk_blob_opts_init(&opts); 938 opts.num_clusters = 10; 939 940 blob = ut_blob_create_and_open(bs, &opts); 941 blobid = spdk_blob_get_id(blob); 942 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 943 944 /* Create snapshot */ 945 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 946 poll_threads(); 947 CU_ASSERT(g_bserrno == 0); 948 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 949 snapshotid = g_blobid; 950 951 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 952 poll_threads(); 953 CU_ASSERT(g_bserrno == 0); 954 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 955 snapshot = g_blob; 956 CU_ASSERT(snapshot->data_ro == true); 957 CU_ASSERT(snapshot->md_ro == true); 958 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 959 960 spdk_blob_close(snapshot, blob_op_complete, NULL); 961 poll_threads(); 962 CU_ASSERT(g_bserrno == 0); 963 964 /* Create clone from snapshot with xattrs */ 965 xattrs.names = g_xattr_names; 966 xattrs.get_value = _get_xattr_value; 967 xattrs.count = 3; 968 xattrs.ctx = &g_ctx; 969 970 spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL); 971 poll_threads(); 972 CU_ASSERT(g_bserrno == 0); 973 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 974 cloneid = g_blobid; 975 976 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 977 poll_threads(); 978 CU_ASSERT(g_bserrno == 0); 979 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 980 clone = g_blob; 981 CU_ASSERT(clone->data_ro == false); 982 CU_ASSERT(clone->md_ro == false); 983 CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10); 984 985 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len); 986 CU_ASSERT(rc == 0); 987 SPDK_CU_ASSERT_FATAL(value != NULL); 988 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 989 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 990 991 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len); 992 CU_ASSERT(rc == 0); 993 SPDK_CU_ASSERT_FATAL(value != NULL); 994 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 995 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 996 997 rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len); 998 CU_ASSERT(rc == 0); 999 SPDK_CU_ASSERT_FATAL(value != NULL); 1000 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 1001 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 1002 1003 1004 spdk_blob_close(clone, blob_op_complete, NULL); 1005 poll_threads(); 1006 CU_ASSERT(g_bserrno == 0); 1007 1008 /* Try to create clone from not read only blob */ 1009 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 1010 poll_threads(); 1011 CU_ASSERT(g_bserrno == -EINVAL); 1012 CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID); 1013 1014 /* Mark blob as read only */ 1015 spdk_blob_set_read_only(blob); 1016 spdk_blob_sync_md(blob, blob_op_complete, NULL); 1017 poll_threads(); 1018 CU_ASSERT(g_bserrno == 0); 1019 1020 /* Create clone from read only blob */ 1021 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 1022 poll_threads(); 1023 CU_ASSERT(g_bserrno == 0); 1024 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 1025 cloneid = g_blobid; 1026 1027 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 1028 poll_threads(); 1029 CU_ASSERT(g_bserrno == 0); 1030 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1031 clone = g_blob; 1032 CU_ASSERT(clone->data_ro == false); 1033 CU_ASSERT(clone->md_ro == false); 1034 CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10); 1035 1036 ut_blob_close_and_delete(bs, clone); 1037 ut_blob_close_and_delete(bs, blob); 1038 } 1039 1040 static void 1041 _blob_inflate(bool decouple_parent) 1042 { 1043 struct spdk_blob_store *bs = g_bs; 1044 struct spdk_blob_opts opts; 1045 struct spdk_blob *blob, *snapshot; 1046 spdk_blob_id blobid, snapshotid; 1047 struct spdk_io_channel *channel; 1048 uint64_t free_clusters; 1049 1050 channel = spdk_bs_alloc_io_channel(bs); 1051 SPDK_CU_ASSERT_FATAL(channel != NULL); 1052 1053 /* Create blob with 10 clusters */ 1054 1055 ut_spdk_blob_opts_init(&opts); 1056 opts.num_clusters = 10; 1057 opts.thin_provision = true; 1058 1059 blob = ut_blob_create_and_open(bs, &opts); 1060 blobid = spdk_blob_get_id(blob); 1061 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 1062 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true); 1063 1064 /* 1) Blob with no parent */ 1065 if (decouple_parent) { 1066 /* Decouple parent of blob with no parent (should fail) */ 1067 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 1068 poll_threads(); 1069 CU_ASSERT(g_bserrno != 0); 1070 } else { 1071 /* Inflate of thin blob with no parent should made it thick */ 1072 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 1073 poll_threads(); 1074 CU_ASSERT(g_bserrno == 0); 1075 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false); 1076 } 1077 1078 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 1079 poll_threads(); 1080 CU_ASSERT(g_bserrno == 0); 1081 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 1082 snapshotid = g_blobid; 1083 1084 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true); 1085 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 1086 1087 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 1088 poll_threads(); 1089 CU_ASSERT(g_bserrno == 0); 1090 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1091 snapshot = g_blob; 1092 CU_ASSERT(snapshot->data_ro == true); 1093 CU_ASSERT(snapshot->md_ro == true); 1094 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10); 1095 1096 spdk_blob_close(snapshot, blob_op_complete, NULL); 1097 poll_threads(); 1098 CU_ASSERT(g_bserrno == 0); 1099 1100 free_clusters = spdk_bs_free_cluster_count(bs); 1101 1102 /* 2) Blob with parent */ 1103 if (!decouple_parent) { 1104 /* Do full blob inflation */ 1105 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 1106 poll_threads(); 1107 CU_ASSERT(g_bserrno == 0); 1108 /* all 10 clusters should be allocated */ 1109 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10); 1110 } else { 1111 /* Decouple parent of blob */ 1112 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 1113 poll_threads(); 1114 CU_ASSERT(g_bserrno == 0); 1115 /* when only parent is removed, none of the clusters should be allocated */ 1116 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters); 1117 } 1118 1119 /* Now, it should be possible to delete snapshot */ 1120 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 1121 poll_threads(); 1122 CU_ASSERT(g_bserrno == 0); 1123 1124 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 1125 CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent); 1126 1127 spdk_bs_free_io_channel(channel); 1128 poll_threads(); 1129 1130 ut_blob_close_and_delete(bs, blob); 1131 } 1132 1133 static void 1134 blob_inflate(void) 1135 { 1136 _blob_inflate(false); 1137 _blob_inflate(true); 1138 } 1139 1140 static void 1141 blob_delete(void) 1142 { 1143 struct spdk_blob_store *bs = g_bs; 1144 struct spdk_blob_opts blob_opts; 1145 spdk_blob_id blobid; 1146 1147 /* Create a blob and then delete it. */ 1148 ut_spdk_blob_opts_init(&blob_opts); 1149 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 1150 poll_threads(); 1151 CU_ASSERT(g_bserrno == 0); 1152 CU_ASSERT(g_blobid > 0); 1153 blobid = g_blobid; 1154 1155 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 1156 poll_threads(); 1157 CU_ASSERT(g_bserrno == 0); 1158 1159 /* Try to open the blob */ 1160 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1161 poll_threads(); 1162 CU_ASSERT(g_bserrno == -ENOENT); 1163 } 1164 1165 static void 1166 blob_resize_test(void) 1167 { 1168 struct spdk_blob_store *bs = g_bs; 1169 struct spdk_blob *blob; 1170 uint64_t free_clusters; 1171 1172 free_clusters = spdk_bs_free_cluster_count(bs); 1173 1174 blob = ut_blob_create_and_open(bs, NULL); 1175 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 1176 1177 /* Confirm that resize fails if blob is marked read-only. */ 1178 blob->md_ro = true; 1179 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1180 poll_threads(); 1181 CU_ASSERT(g_bserrno == -EPERM); 1182 blob->md_ro = false; 1183 1184 /* The blob started at 0 clusters. Resize it to be 5. */ 1185 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1186 poll_threads(); 1187 CU_ASSERT(g_bserrno == 0); 1188 CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs)); 1189 1190 /* Shrink the blob to 3 clusters. This will not actually release 1191 * the old clusters until the blob is synced. 1192 */ 1193 spdk_blob_resize(blob, 3, blob_op_complete, NULL); 1194 poll_threads(); 1195 CU_ASSERT(g_bserrno == 0); 1196 /* Verify there are still 5 clusters in use */ 1197 CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs)); 1198 1199 spdk_blob_sync_md(blob, blob_op_complete, NULL); 1200 poll_threads(); 1201 CU_ASSERT(g_bserrno == 0); 1202 /* Now there are only 3 clusters in use */ 1203 CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs)); 1204 1205 /* Resize the blob to be 10 clusters. Growth takes effect immediately. */ 1206 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 1207 poll_threads(); 1208 CU_ASSERT(g_bserrno == 0); 1209 CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs)); 1210 1211 /* Try to resize the blob to size larger than blobstore. */ 1212 spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL); 1213 poll_threads(); 1214 CU_ASSERT(g_bserrno == -ENOSPC); 1215 1216 ut_blob_close_and_delete(bs, blob); 1217 } 1218 1219 static void 1220 blob_read_only(void) 1221 { 1222 struct spdk_blob_store *bs; 1223 struct spdk_bs_dev *dev; 1224 struct spdk_blob *blob; 1225 struct spdk_bs_opts opts; 1226 spdk_blob_id blobid; 1227 int rc; 1228 1229 dev = init_dev(); 1230 spdk_bs_opts_init(&opts, sizeof(opts)); 1231 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 1232 1233 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 1234 poll_threads(); 1235 CU_ASSERT(g_bserrno == 0); 1236 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 1237 bs = g_bs; 1238 1239 blob = ut_blob_create_and_open(bs, NULL); 1240 blobid = spdk_blob_get_id(blob); 1241 1242 rc = spdk_blob_set_read_only(blob); 1243 CU_ASSERT(rc == 0); 1244 1245 CU_ASSERT(blob->data_ro == false); 1246 CU_ASSERT(blob->md_ro == false); 1247 1248 spdk_blob_sync_md(blob, bs_op_complete, NULL); 1249 poll_threads(); 1250 1251 CU_ASSERT(blob->data_ro == true); 1252 CU_ASSERT(blob->md_ro == true); 1253 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1254 1255 spdk_blob_close(blob, blob_op_complete, NULL); 1256 poll_threads(); 1257 CU_ASSERT(g_bserrno == 0); 1258 1259 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1260 poll_threads(); 1261 CU_ASSERT(g_bserrno == 0); 1262 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1263 blob = g_blob; 1264 1265 CU_ASSERT(blob->data_ro == true); 1266 CU_ASSERT(blob->md_ro == true); 1267 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1268 1269 spdk_blob_close(blob, blob_op_complete, NULL); 1270 poll_threads(); 1271 CU_ASSERT(g_bserrno == 0); 1272 1273 ut_bs_reload(&bs, &opts); 1274 1275 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 1276 poll_threads(); 1277 CU_ASSERT(g_bserrno == 0); 1278 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 1279 blob = g_blob; 1280 1281 CU_ASSERT(blob->data_ro == true); 1282 CU_ASSERT(blob->md_ro == true); 1283 CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY); 1284 1285 ut_blob_close_and_delete(bs, blob); 1286 1287 spdk_bs_unload(bs, bs_op_complete, NULL); 1288 poll_threads(); 1289 CU_ASSERT(g_bserrno == 0); 1290 } 1291 1292 static void 1293 channel_ops(void) 1294 { 1295 struct spdk_blob_store *bs = g_bs; 1296 struct spdk_io_channel *channel; 1297 1298 channel = spdk_bs_alloc_io_channel(bs); 1299 CU_ASSERT(channel != NULL); 1300 1301 spdk_bs_free_io_channel(channel); 1302 poll_threads(); 1303 } 1304 1305 static void 1306 blob_write(void) 1307 { 1308 struct spdk_blob_store *bs = g_bs; 1309 struct spdk_blob *blob = g_blob; 1310 struct spdk_io_channel *channel; 1311 uint64_t pages_per_cluster; 1312 uint8_t payload[10 * 4096]; 1313 1314 pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs); 1315 1316 channel = spdk_bs_alloc_io_channel(bs); 1317 CU_ASSERT(channel != NULL); 1318 1319 /* Write to a blob with 0 size */ 1320 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1321 poll_threads(); 1322 CU_ASSERT(g_bserrno == -EINVAL); 1323 1324 /* Resize the blob */ 1325 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1326 poll_threads(); 1327 CU_ASSERT(g_bserrno == 0); 1328 1329 /* Confirm that write fails if blob is marked read-only. */ 1330 blob->data_ro = true; 1331 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1332 poll_threads(); 1333 CU_ASSERT(g_bserrno == -EPERM); 1334 blob->data_ro = false; 1335 1336 /* Write to the blob */ 1337 spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1338 poll_threads(); 1339 CU_ASSERT(g_bserrno == 0); 1340 1341 /* Write starting beyond the end */ 1342 spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete, 1343 NULL); 1344 poll_threads(); 1345 CU_ASSERT(g_bserrno == -EINVAL); 1346 1347 /* Write starting at a valid location but going off the end */ 1348 spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1, 1349 blob_op_complete, NULL); 1350 poll_threads(); 1351 CU_ASSERT(g_bserrno == -EINVAL); 1352 1353 spdk_bs_free_io_channel(channel); 1354 poll_threads(); 1355 } 1356 1357 static void 1358 blob_read(void) 1359 { 1360 struct spdk_blob_store *bs = g_bs; 1361 struct spdk_blob *blob = g_blob; 1362 struct spdk_io_channel *channel; 1363 uint64_t pages_per_cluster; 1364 uint8_t payload[10 * 4096]; 1365 1366 pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs); 1367 1368 channel = spdk_bs_alloc_io_channel(bs); 1369 CU_ASSERT(channel != NULL); 1370 1371 /* Read from a blob with 0 size */ 1372 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1373 poll_threads(); 1374 CU_ASSERT(g_bserrno == -EINVAL); 1375 1376 /* Resize the blob */ 1377 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 1378 poll_threads(); 1379 CU_ASSERT(g_bserrno == 0); 1380 1381 /* Confirm that read passes if blob is marked read-only. */ 1382 blob->data_ro = true; 1383 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1384 poll_threads(); 1385 CU_ASSERT(g_bserrno == 0); 1386 blob->data_ro = false; 1387 1388 /* Read from the blob */ 1389 spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL); 1390 poll_threads(); 1391 CU_ASSERT(g_bserrno == 0); 1392 1393 /* Read starting beyond the end */ 1394 spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete, 1395 NULL); 1396 poll_threads(); 1397 CU_ASSERT(g_bserrno == -EINVAL); 1398 1399 /* Read starting at a valid location but going off the end */ 1400 spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1, 1401 blob_op_complete, NULL); 1402 poll_threads(); 1403 CU_ASSERT(g_bserrno == -EINVAL); 1404 1405 spdk_bs_free_io_channel(channel); 1406 poll_threads(); 1407 } 1408 1409 static void 1410 blob_rw_verify(void) 1411 { 1412 struct spdk_blob_store *bs = g_bs; 1413 struct spdk_blob *blob = g_blob; 1414 struct spdk_io_channel *channel; 1415 uint8_t payload_read[10 * 4096]; 1416 uint8_t payload_write[10 * 4096]; 1417 1418 channel = spdk_bs_alloc_io_channel(bs); 1419 CU_ASSERT(channel != NULL); 1420 1421 spdk_blob_resize(blob, 32, blob_op_complete, NULL); 1422 poll_threads(); 1423 CU_ASSERT(g_bserrno == 0); 1424 1425 memset(payload_write, 0xE5, sizeof(payload_write)); 1426 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 1427 poll_threads(); 1428 CU_ASSERT(g_bserrno == 0); 1429 1430 memset(payload_read, 0x00, sizeof(payload_read)); 1431 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 1432 poll_threads(); 1433 CU_ASSERT(g_bserrno == 0); 1434 CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0); 1435 1436 spdk_bs_free_io_channel(channel); 1437 poll_threads(); 1438 } 1439 1440 static void 1441 blob_rw_verify_iov(void) 1442 { 1443 struct spdk_blob_store *bs = g_bs; 1444 struct spdk_blob *blob; 1445 struct spdk_io_channel *channel; 1446 uint8_t payload_read[10 * 4096]; 1447 uint8_t payload_write[10 * 4096]; 1448 struct iovec iov_read[3]; 1449 struct iovec iov_write[3]; 1450 void *buf; 1451 1452 channel = spdk_bs_alloc_io_channel(bs); 1453 CU_ASSERT(channel != NULL); 1454 1455 blob = ut_blob_create_and_open(bs, NULL); 1456 1457 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1458 poll_threads(); 1459 CU_ASSERT(g_bserrno == 0); 1460 1461 /* 1462 * Manually adjust the offset of the blob's second cluster. This allows 1463 * us to make sure that the readv/write code correctly accounts for I/O 1464 * that cross cluster boundaries. Start by asserting that the allocated 1465 * clusters are where we expect before modifying the second cluster. 1466 */ 1467 CU_ASSERT(blob->active.clusters[0] == 1 * 256); 1468 CU_ASSERT(blob->active.clusters[1] == 2 * 256); 1469 blob->active.clusters[1] = 3 * 256; 1470 1471 memset(payload_write, 0xE5, sizeof(payload_write)); 1472 iov_write[0].iov_base = payload_write; 1473 iov_write[0].iov_len = 1 * 4096; 1474 iov_write[1].iov_base = payload_write + 1 * 4096; 1475 iov_write[1].iov_len = 5 * 4096; 1476 iov_write[2].iov_base = payload_write + 6 * 4096; 1477 iov_write[2].iov_len = 4 * 4096; 1478 /* 1479 * Choose a page offset just before the cluster boundary. The first 6 pages of payload 1480 * will get written to the first cluster, the last 4 to the second cluster. 1481 */ 1482 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 1483 poll_threads(); 1484 CU_ASSERT(g_bserrno == 0); 1485 1486 memset(payload_read, 0xAA, sizeof(payload_read)); 1487 iov_read[0].iov_base = payload_read; 1488 iov_read[0].iov_len = 3 * 4096; 1489 iov_read[1].iov_base = payload_read + 3 * 4096; 1490 iov_read[1].iov_len = 4 * 4096; 1491 iov_read[2].iov_base = payload_read + 7 * 4096; 1492 iov_read[2].iov_len = 3 * 4096; 1493 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 1494 poll_threads(); 1495 CU_ASSERT(g_bserrno == 0); 1496 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 1497 1498 buf = calloc(1, 256 * 4096); 1499 SPDK_CU_ASSERT_FATAL(buf != NULL); 1500 /* Check that cluster 2 on "disk" was not modified. */ 1501 CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0); 1502 free(buf); 1503 1504 spdk_blob_close(blob, blob_op_complete, NULL); 1505 poll_threads(); 1506 CU_ASSERT(g_bserrno == 0); 1507 1508 spdk_bs_free_io_channel(channel); 1509 poll_threads(); 1510 } 1511 1512 static uint32_t 1513 bs_channel_get_req_count(struct spdk_io_channel *_channel) 1514 { 1515 struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel); 1516 struct spdk_bs_request_set *set; 1517 uint32_t count = 0; 1518 1519 TAILQ_FOREACH(set, &channel->reqs, link) { 1520 count++; 1521 } 1522 1523 return count; 1524 } 1525 1526 static void 1527 blob_rw_verify_iov_nomem(void) 1528 { 1529 struct spdk_blob_store *bs = g_bs; 1530 struct spdk_blob *blob = g_blob; 1531 struct spdk_io_channel *channel; 1532 uint8_t payload_write[10 * 4096]; 1533 struct iovec iov_write[3]; 1534 uint32_t req_count; 1535 1536 channel = spdk_bs_alloc_io_channel(bs); 1537 CU_ASSERT(channel != NULL); 1538 1539 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1540 poll_threads(); 1541 CU_ASSERT(g_bserrno == 0); 1542 1543 /* 1544 * Choose a page offset just before the cluster boundary. The first 6 pages of payload 1545 * will get written to the first cluster, the last 4 to the second cluster. 1546 */ 1547 iov_write[0].iov_base = payload_write; 1548 iov_write[0].iov_len = 1 * 4096; 1549 iov_write[1].iov_base = payload_write + 1 * 4096; 1550 iov_write[1].iov_len = 5 * 4096; 1551 iov_write[2].iov_base = payload_write + 6 * 4096; 1552 iov_write[2].iov_len = 4 * 4096; 1553 MOCK_SET(calloc, NULL); 1554 req_count = bs_channel_get_req_count(channel); 1555 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 1556 poll_threads(); 1557 CU_ASSERT(g_bserrno = -ENOMEM); 1558 CU_ASSERT(req_count == bs_channel_get_req_count(channel)); 1559 MOCK_CLEAR(calloc); 1560 1561 spdk_bs_free_io_channel(channel); 1562 poll_threads(); 1563 } 1564 1565 static void 1566 blob_rw_iov_read_only(void) 1567 { 1568 struct spdk_blob_store *bs = g_bs; 1569 struct spdk_blob *blob = g_blob; 1570 struct spdk_io_channel *channel; 1571 uint8_t payload_read[4096]; 1572 uint8_t payload_write[4096]; 1573 struct iovec iov_read; 1574 struct iovec iov_write; 1575 1576 channel = spdk_bs_alloc_io_channel(bs); 1577 CU_ASSERT(channel != NULL); 1578 1579 spdk_blob_resize(blob, 2, blob_op_complete, NULL); 1580 poll_threads(); 1581 CU_ASSERT(g_bserrno == 0); 1582 1583 /* Verify that writev failed if read_only flag is set. */ 1584 blob->data_ro = true; 1585 iov_write.iov_base = payload_write; 1586 iov_write.iov_len = sizeof(payload_write); 1587 spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL); 1588 poll_threads(); 1589 CU_ASSERT(g_bserrno == -EPERM); 1590 1591 /* Verify that reads pass if data_ro flag is set. */ 1592 iov_read.iov_base = payload_read; 1593 iov_read.iov_len = sizeof(payload_read); 1594 spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL); 1595 poll_threads(); 1596 CU_ASSERT(g_bserrno == 0); 1597 1598 spdk_bs_free_io_channel(channel); 1599 poll_threads(); 1600 } 1601 1602 static void 1603 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel, 1604 uint8_t *payload, uint64_t offset, uint64_t length, 1605 spdk_blob_op_complete cb_fn, void *cb_arg) 1606 { 1607 uint64_t i; 1608 uint8_t *buf; 1609 uint64_t page_size = spdk_bs_get_page_size(blob->bs); 1610 1611 /* To be sure that operation is NOT splitted, read one page at the time */ 1612 buf = payload; 1613 for (i = 0; i < length; i++) { 1614 spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL); 1615 poll_threads(); 1616 if (g_bserrno != 0) { 1617 /* Pass the error code up */ 1618 break; 1619 } 1620 buf += page_size; 1621 } 1622 1623 cb_fn(cb_arg, g_bserrno); 1624 } 1625 1626 static void 1627 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel, 1628 uint8_t *payload, uint64_t offset, uint64_t length, 1629 spdk_blob_op_complete cb_fn, void *cb_arg) 1630 { 1631 uint64_t i; 1632 uint8_t *buf; 1633 uint64_t page_size = spdk_bs_get_page_size(blob->bs); 1634 1635 /* To be sure that operation is NOT splitted, write one page at the time */ 1636 buf = payload; 1637 for (i = 0; i < length; i++) { 1638 spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL); 1639 poll_threads(); 1640 if (g_bserrno != 0) { 1641 /* Pass the error code up */ 1642 break; 1643 } 1644 buf += page_size; 1645 } 1646 1647 cb_fn(cb_arg, g_bserrno); 1648 } 1649 1650 static void 1651 blob_operation_split_rw(void) 1652 { 1653 struct spdk_blob_store *bs = g_bs; 1654 struct spdk_blob *blob; 1655 struct spdk_io_channel *channel; 1656 struct spdk_blob_opts opts; 1657 uint64_t cluster_size; 1658 1659 uint64_t payload_size; 1660 uint8_t *payload_read; 1661 uint8_t *payload_write; 1662 uint8_t *payload_pattern; 1663 1664 uint64_t page_size; 1665 uint64_t pages_per_cluster; 1666 uint64_t pages_per_payload; 1667 1668 uint64_t i; 1669 1670 cluster_size = spdk_bs_get_cluster_size(bs); 1671 page_size = spdk_bs_get_page_size(bs); 1672 pages_per_cluster = cluster_size / page_size; 1673 pages_per_payload = pages_per_cluster * 5; 1674 payload_size = cluster_size * 5; 1675 1676 payload_read = malloc(payload_size); 1677 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 1678 1679 payload_write = malloc(payload_size); 1680 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 1681 1682 payload_pattern = malloc(payload_size); 1683 SPDK_CU_ASSERT_FATAL(payload_pattern != NULL); 1684 1685 /* Prepare random pattern to write */ 1686 memset(payload_pattern, 0xFF, payload_size); 1687 for (i = 0; i < pages_per_payload; i++) { 1688 *((uint64_t *)(payload_pattern + page_size * i)) = (i + 1); 1689 } 1690 1691 channel = spdk_bs_alloc_io_channel(bs); 1692 SPDK_CU_ASSERT_FATAL(channel != NULL); 1693 1694 /* Create blob */ 1695 ut_spdk_blob_opts_init(&opts); 1696 opts.thin_provision = false; 1697 opts.num_clusters = 5; 1698 1699 blob = ut_blob_create_and_open(bs, &opts); 1700 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 1701 1702 /* Initial read should return zeroed payload */ 1703 memset(payload_read, 0xFF, payload_size); 1704 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1705 poll_threads(); 1706 CU_ASSERT(g_bserrno == 0); 1707 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 1708 1709 /* Fill whole blob except last page */ 1710 spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1, 1711 blob_op_complete, NULL); 1712 poll_threads(); 1713 CU_ASSERT(g_bserrno == 0); 1714 1715 /* Write last page with a pattern */ 1716 spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1, 1717 blob_op_complete, NULL); 1718 poll_threads(); 1719 CU_ASSERT(g_bserrno == 0); 1720 1721 /* Read whole blob and check consistency */ 1722 memset(payload_read, 0xFF, payload_size); 1723 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1724 poll_threads(); 1725 CU_ASSERT(g_bserrno == 0); 1726 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0); 1727 CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0); 1728 1729 /* Fill whole blob except first page */ 1730 spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1, 1731 blob_op_complete, NULL); 1732 poll_threads(); 1733 CU_ASSERT(g_bserrno == 0); 1734 1735 /* Write first page with a pattern */ 1736 spdk_blob_io_write(blob, channel, payload_pattern, 0, 1, 1737 blob_op_complete, NULL); 1738 poll_threads(); 1739 CU_ASSERT(g_bserrno == 0); 1740 1741 /* Read whole blob and check consistency */ 1742 memset(payload_read, 0xFF, payload_size); 1743 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1744 poll_threads(); 1745 CU_ASSERT(g_bserrno == 0); 1746 CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0); 1747 CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0); 1748 1749 1750 /* Fill whole blob with a pattern (5 clusters) */ 1751 1752 /* 1. Read test. */ 1753 _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload, 1754 blob_op_complete, NULL); 1755 poll_threads(); 1756 CU_ASSERT(g_bserrno == 0); 1757 1758 memset(payload_read, 0xFF, payload_size); 1759 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1760 poll_threads(); 1761 poll_threads(); 1762 CU_ASSERT(g_bserrno == 0); 1763 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1764 1765 /* 2. Write test. */ 1766 spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload, 1767 blob_op_complete, NULL); 1768 poll_threads(); 1769 CU_ASSERT(g_bserrno == 0); 1770 1771 memset(payload_read, 0xFF, payload_size); 1772 _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1773 poll_threads(); 1774 CU_ASSERT(g_bserrno == 0); 1775 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1776 1777 spdk_bs_free_io_channel(channel); 1778 poll_threads(); 1779 1780 g_blob = NULL; 1781 g_blobid = 0; 1782 1783 free(payload_read); 1784 free(payload_write); 1785 free(payload_pattern); 1786 1787 ut_blob_close_and_delete(bs, blob); 1788 } 1789 1790 static void 1791 blob_operation_split_rw_iov(void) 1792 { 1793 struct spdk_blob_store *bs = g_bs; 1794 struct spdk_blob *blob; 1795 struct spdk_io_channel *channel; 1796 struct spdk_blob_opts opts; 1797 uint64_t cluster_size; 1798 1799 uint64_t payload_size; 1800 uint8_t *payload_read; 1801 uint8_t *payload_write; 1802 uint8_t *payload_pattern; 1803 1804 uint64_t page_size; 1805 uint64_t pages_per_cluster; 1806 uint64_t pages_per_payload; 1807 1808 struct iovec iov_read[2]; 1809 struct iovec iov_write[2]; 1810 1811 uint64_t i, j; 1812 1813 cluster_size = spdk_bs_get_cluster_size(bs); 1814 page_size = spdk_bs_get_page_size(bs); 1815 pages_per_cluster = cluster_size / page_size; 1816 pages_per_payload = pages_per_cluster * 5; 1817 payload_size = cluster_size * 5; 1818 1819 payload_read = malloc(payload_size); 1820 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 1821 1822 payload_write = malloc(payload_size); 1823 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 1824 1825 payload_pattern = malloc(payload_size); 1826 SPDK_CU_ASSERT_FATAL(payload_pattern != NULL); 1827 1828 /* Prepare random pattern to write */ 1829 for (i = 0; i < pages_per_payload; i++) { 1830 for (j = 0; j < page_size / sizeof(uint64_t); j++) { 1831 uint64_t *tmp; 1832 1833 tmp = (uint64_t *)payload_pattern; 1834 tmp += ((page_size * i) / sizeof(uint64_t)) + j; 1835 *tmp = i + 1; 1836 } 1837 } 1838 1839 channel = spdk_bs_alloc_io_channel(bs); 1840 SPDK_CU_ASSERT_FATAL(channel != NULL); 1841 1842 /* Create blob */ 1843 ut_spdk_blob_opts_init(&opts); 1844 opts.thin_provision = false; 1845 opts.num_clusters = 5; 1846 1847 blob = ut_blob_create_and_open(bs, &opts); 1848 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 1849 1850 /* Initial read should return zeroes payload */ 1851 memset(payload_read, 0xFF, payload_size); 1852 iov_read[0].iov_base = payload_read; 1853 iov_read[0].iov_len = cluster_size * 3; 1854 iov_read[1].iov_base = payload_read + cluster_size * 3; 1855 iov_read[1].iov_len = cluster_size * 2; 1856 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1857 poll_threads(); 1858 CU_ASSERT(g_bserrno == 0); 1859 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 1860 1861 /* First of iovs fills whole blob except last page and second of iovs writes last page 1862 * with a pattern. */ 1863 iov_write[0].iov_base = payload_pattern; 1864 iov_write[0].iov_len = payload_size - page_size; 1865 iov_write[1].iov_base = payload_pattern; 1866 iov_write[1].iov_len = page_size; 1867 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1868 poll_threads(); 1869 CU_ASSERT(g_bserrno == 0); 1870 1871 /* Read whole blob and check consistency */ 1872 memset(payload_read, 0xFF, payload_size); 1873 iov_read[0].iov_base = payload_read; 1874 iov_read[0].iov_len = cluster_size * 2; 1875 iov_read[1].iov_base = payload_read + cluster_size * 2; 1876 iov_read[1].iov_len = cluster_size * 3; 1877 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1878 poll_threads(); 1879 CU_ASSERT(g_bserrno == 0); 1880 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0); 1881 CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0); 1882 1883 /* First of iovs fills only first page and second of iovs writes whole blob except 1884 * first page with a pattern. */ 1885 iov_write[0].iov_base = payload_pattern; 1886 iov_write[0].iov_len = page_size; 1887 iov_write[1].iov_base = payload_pattern; 1888 iov_write[1].iov_len = payload_size - page_size; 1889 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1890 poll_threads(); 1891 CU_ASSERT(g_bserrno == 0); 1892 1893 /* Read whole blob and check consistency */ 1894 memset(payload_read, 0xFF, payload_size); 1895 iov_read[0].iov_base = payload_read; 1896 iov_read[0].iov_len = cluster_size * 4; 1897 iov_read[1].iov_base = payload_read + cluster_size * 4; 1898 iov_read[1].iov_len = cluster_size; 1899 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1900 poll_threads(); 1901 CU_ASSERT(g_bserrno == 0); 1902 CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0); 1903 CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0); 1904 1905 1906 /* Fill whole blob with a pattern (5 clusters) */ 1907 1908 /* 1. Read test. */ 1909 _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload, 1910 blob_op_complete, NULL); 1911 poll_threads(); 1912 CU_ASSERT(g_bserrno == 0); 1913 1914 memset(payload_read, 0xFF, payload_size); 1915 iov_read[0].iov_base = payload_read; 1916 iov_read[0].iov_len = cluster_size; 1917 iov_read[1].iov_base = payload_read + cluster_size; 1918 iov_read[1].iov_len = cluster_size * 4; 1919 spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL); 1920 poll_threads(); 1921 CU_ASSERT(g_bserrno == 0); 1922 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1923 1924 /* 2. Write test. */ 1925 iov_write[0].iov_base = payload_read; 1926 iov_write[0].iov_len = cluster_size * 2; 1927 iov_write[1].iov_base = payload_read + cluster_size * 2; 1928 iov_write[1].iov_len = cluster_size * 3; 1929 spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL); 1930 poll_threads(); 1931 CU_ASSERT(g_bserrno == 0); 1932 1933 memset(payload_read, 0xFF, payload_size); 1934 _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL); 1935 poll_threads(); 1936 CU_ASSERT(g_bserrno == 0); 1937 CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0); 1938 1939 spdk_bs_free_io_channel(channel); 1940 poll_threads(); 1941 1942 g_blob = NULL; 1943 g_blobid = 0; 1944 1945 free(payload_read); 1946 free(payload_write); 1947 free(payload_pattern); 1948 1949 ut_blob_close_and_delete(bs, blob); 1950 } 1951 1952 static void 1953 blob_unmap(void) 1954 { 1955 struct spdk_blob_store *bs = g_bs; 1956 struct spdk_blob *blob; 1957 struct spdk_io_channel *channel; 1958 struct spdk_blob_opts opts; 1959 uint8_t payload[4096]; 1960 int i; 1961 1962 channel = spdk_bs_alloc_io_channel(bs); 1963 CU_ASSERT(channel != NULL); 1964 1965 ut_spdk_blob_opts_init(&opts); 1966 opts.num_clusters = 10; 1967 1968 blob = ut_blob_create_and_open(bs, &opts); 1969 1970 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 1971 poll_threads(); 1972 CU_ASSERT(g_bserrno == 0); 1973 1974 memset(payload, 0, sizeof(payload)); 1975 payload[0] = 0xFF; 1976 1977 /* 1978 * Set first byte of every cluster to 0xFF. 1979 * First cluster on device is reserved so let's start from cluster number 1 1980 */ 1981 for (i = 1; i < 11; i++) { 1982 g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF; 1983 } 1984 1985 /* Confirm writes */ 1986 for (i = 0; i < 10; i++) { 1987 payload[0] = 0; 1988 spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1, 1989 blob_op_complete, NULL); 1990 poll_threads(); 1991 CU_ASSERT(g_bserrno == 0); 1992 CU_ASSERT(payload[0] == 0xFF); 1993 } 1994 1995 /* Mark some clusters as unallocated */ 1996 blob->active.clusters[1] = 0; 1997 blob->active.clusters[2] = 0; 1998 blob->active.clusters[3] = 0; 1999 blob->active.clusters[6] = 0; 2000 blob->active.clusters[8] = 0; 2001 2002 /* Unmap clusters by resizing to 0 */ 2003 spdk_blob_resize(blob, 0, blob_op_complete, NULL); 2004 poll_threads(); 2005 CU_ASSERT(g_bserrno == 0); 2006 2007 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2008 poll_threads(); 2009 CU_ASSERT(g_bserrno == 0); 2010 2011 /* Confirm that only 'allocated' clusters were unmapped */ 2012 for (i = 1; i < 11; i++) { 2013 switch (i) { 2014 case 2: 2015 case 3: 2016 case 4: 2017 case 7: 2018 case 9: 2019 CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF); 2020 break; 2021 default: 2022 CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0); 2023 break; 2024 } 2025 } 2026 2027 spdk_bs_free_io_channel(channel); 2028 poll_threads(); 2029 2030 ut_blob_close_and_delete(bs, blob); 2031 } 2032 2033 static void 2034 blob_iter(void) 2035 { 2036 struct spdk_blob_store *bs = g_bs; 2037 struct spdk_blob *blob; 2038 spdk_blob_id blobid; 2039 struct spdk_blob_opts blob_opts; 2040 2041 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 2042 poll_threads(); 2043 CU_ASSERT(g_blob == NULL); 2044 CU_ASSERT(g_bserrno == -ENOENT); 2045 2046 ut_spdk_blob_opts_init(&blob_opts); 2047 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 2048 poll_threads(); 2049 CU_ASSERT(g_bserrno == 0); 2050 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2051 blobid = g_blobid; 2052 2053 spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL); 2054 poll_threads(); 2055 CU_ASSERT(g_blob != NULL); 2056 CU_ASSERT(g_bserrno == 0); 2057 blob = g_blob; 2058 CU_ASSERT(spdk_blob_get_id(blob) == blobid); 2059 2060 spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL); 2061 poll_threads(); 2062 CU_ASSERT(g_blob == NULL); 2063 CU_ASSERT(g_bserrno == -ENOENT); 2064 } 2065 2066 static void 2067 blob_xattr(void) 2068 { 2069 struct spdk_blob_store *bs = g_bs; 2070 struct spdk_blob *blob = g_blob; 2071 spdk_blob_id blobid = spdk_blob_get_id(blob); 2072 uint64_t length; 2073 int rc; 2074 const char *name1, *name2; 2075 const void *value; 2076 size_t value_len; 2077 struct spdk_xattr_names *names; 2078 2079 /* Test that set_xattr fails if md_ro flag is set. */ 2080 blob->md_ro = true; 2081 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 2082 CU_ASSERT(rc == -EPERM); 2083 2084 blob->md_ro = false; 2085 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 2086 CU_ASSERT(rc == 0); 2087 2088 length = 2345; 2089 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2090 CU_ASSERT(rc == 0); 2091 2092 /* Overwrite "length" xattr. */ 2093 length = 3456; 2094 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2095 CU_ASSERT(rc == 0); 2096 2097 /* get_xattr should still work even if md_ro flag is set. */ 2098 value = NULL; 2099 blob->md_ro = true; 2100 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 2101 CU_ASSERT(rc == 0); 2102 SPDK_CU_ASSERT_FATAL(value != NULL); 2103 CU_ASSERT(*(uint64_t *)value == length); 2104 CU_ASSERT(value_len == 8); 2105 blob->md_ro = false; 2106 2107 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 2108 CU_ASSERT(rc == -ENOENT); 2109 2110 names = NULL; 2111 rc = spdk_blob_get_xattr_names(blob, &names); 2112 CU_ASSERT(rc == 0); 2113 SPDK_CU_ASSERT_FATAL(names != NULL); 2114 CU_ASSERT(spdk_xattr_names_get_count(names) == 2); 2115 name1 = spdk_xattr_names_get_name(names, 0); 2116 SPDK_CU_ASSERT_FATAL(name1 != NULL); 2117 CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length")); 2118 name2 = spdk_xattr_names_get_name(names, 1); 2119 SPDK_CU_ASSERT_FATAL(name2 != NULL); 2120 CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length")); 2121 CU_ASSERT(strcmp(name1, name2)); 2122 spdk_xattr_names_free(names); 2123 2124 /* Confirm that remove_xattr fails if md_ro is set to true. */ 2125 blob->md_ro = true; 2126 rc = spdk_blob_remove_xattr(blob, "name"); 2127 CU_ASSERT(rc == -EPERM); 2128 2129 blob->md_ro = false; 2130 rc = spdk_blob_remove_xattr(blob, "name"); 2131 CU_ASSERT(rc == 0); 2132 2133 rc = spdk_blob_remove_xattr(blob, "foobar"); 2134 CU_ASSERT(rc == -ENOENT); 2135 2136 /* Set internal xattr */ 2137 length = 7898; 2138 rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true); 2139 CU_ASSERT(rc == 0); 2140 rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true); 2141 CU_ASSERT(rc == 0); 2142 CU_ASSERT(*(uint64_t *)value == length); 2143 /* try to get public xattr with same name */ 2144 rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len); 2145 CU_ASSERT(rc != 0); 2146 rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false); 2147 CU_ASSERT(rc != 0); 2148 /* Check if SPDK_BLOB_INTERNAL_XATTR is set */ 2149 CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 2150 SPDK_BLOB_INTERNAL_XATTR); 2151 2152 spdk_blob_close(blob, blob_op_complete, NULL); 2153 poll_threads(); 2154 2155 /* Check if xattrs are persisted */ 2156 ut_bs_reload(&bs, NULL); 2157 2158 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2159 poll_threads(); 2160 CU_ASSERT(g_bserrno == 0); 2161 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2162 blob = g_blob; 2163 2164 rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true); 2165 CU_ASSERT(rc == 0); 2166 CU_ASSERT(*(uint64_t *)value == length); 2167 2168 /* try to get internal xattr trough public call */ 2169 rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len); 2170 CU_ASSERT(rc != 0); 2171 2172 rc = blob_remove_xattr(blob, "internal", true); 2173 CU_ASSERT(rc == 0); 2174 2175 CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0); 2176 } 2177 2178 static void 2179 blob_parse_md(void) 2180 { 2181 struct spdk_blob_store *bs = g_bs; 2182 struct spdk_blob *blob; 2183 int rc; 2184 uint32_t used_pages; 2185 size_t xattr_length; 2186 char *xattr; 2187 2188 used_pages = spdk_bit_array_count_set(bs->used_md_pages); 2189 blob = ut_blob_create_and_open(bs, NULL); 2190 2191 /* Create large extent to force more than 1 page of metadata. */ 2192 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 2193 strlen("large_xattr"); 2194 xattr = calloc(xattr_length, sizeof(char)); 2195 SPDK_CU_ASSERT_FATAL(xattr != NULL); 2196 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 2197 free(xattr); 2198 SPDK_CU_ASSERT_FATAL(rc == 0); 2199 2200 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2201 poll_threads(); 2202 2203 /* Delete the blob and verify that number of pages returned to before its creation. */ 2204 SPDK_CU_ASSERT_FATAL(used_pages != spdk_bit_array_count_set(bs->used_md_pages)); 2205 ut_blob_close_and_delete(bs, blob); 2206 SPDK_CU_ASSERT_FATAL(used_pages == spdk_bit_array_count_set(bs->used_md_pages)); 2207 } 2208 2209 static void 2210 bs_load(void) 2211 { 2212 struct spdk_blob_store *bs; 2213 struct spdk_bs_dev *dev; 2214 spdk_blob_id blobid; 2215 struct spdk_blob *blob; 2216 struct spdk_bs_super_block *super_block; 2217 uint64_t length; 2218 int rc; 2219 const void *value; 2220 size_t value_len; 2221 struct spdk_bs_opts opts; 2222 struct spdk_blob_opts blob_opts; 2223 2224 dev = init_dev(); 2225 spdk_bs_opts_init(&opts, sizeof(opts)); 2226 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2227 2228 /* Initialize a new blob store */ 2229 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2230 poll_threads(); 2231 CU_ASSERT(g_bserrno == 0); 2232 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2233 bs = g_bs; 2234 2235 /* Try to open a blobid that does not exist */ 2236 spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL); 2237 poll_threads(); 2238 CU_ASSERT(g_bserrno == -ENOENT); 2239 CU_ASSERT(g_blob == NULL); 2240 2241 /* Create a blob */ 2242 blob = ut_blob_create_and_open(bs, NULL); 2243 blobid = spdk_blob_get_id(blob); 2244 2245 /* Try again to open valid blob but without the upper bit set */ 2246 spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL); 2247 poll_threads(); 2248 CU_ASSERT(g_bserrno == -ENOENT); 2249 CU_ASSERT(g_blob == NULL); 2250 2251 /* Set some xattrs */ 2252 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 2253 CU_ASSERT(rc == 0); 2254 2255 length = 2345; 2256 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 2257 CU_ASSERT(rc == 0); 2258 2259 /* Resize the blob */ 2260 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 2261 poll_threads(); 2262 CU_ASSERT(g_bserrno == 0); 2263 2264 spdk_blob_close(blob, blob_op_complete, NULL); 2265 poll_threads(); 2266 CU_ASSERT(g_bserrno == 0); 2267 blob = NULL; 2268 g_blob = NULL; 2269 g_blobid = SPDK_BLOBID_INVALID; 2270 2271 /* Unload the blob store */ 2272 spdk_bs_unload(bs, bs_op_complete, NULL); 2273 poll_threads(); 2274 CU_ASSERT(g_bserrno == 0); 2275 g_bs = NULL; 2276 g_blob = NULL; 2277 g_blobid = 0; 2278 2279 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2280 CU_ASSERT(super_block->clean == 1); 2281 2282 /* Load should fail for device with an unsupported blocklen */ 2283 dev = init_dev(); 2284 dev->blocklen = SPDK_BS_PAGE_SIZE * 2; 2285 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 2286 poll_threads(); 2287 CU_ASSERT(g_bserrno == -EINVAL); 2288 2289 /* Load should when max_md_ops is set to zero */ 2290 dev = init_dev(); 2291 spdk_bs_opts_init(&opts, sizeof(opts)); 2292 opts.max_md_ops = 0; 2293 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2294 poll_threads(); 2295 CU_ASSERT(g_bserrno == -EINVAL); 2296 2297 /* Load should when max_channel_ops is set to zero */ 2298 dev = init_dev(); 2299 spdk_bs_opts_init(&opts, sizeof(opts)); 2300 opts.max_channel_ops = 0; 2301 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2302 poll_threads(); 2303 CU_ASSERT(g_bserrno == -EINVAL); 2304 2305 /* Load an existing blob store */ 2306 dev = init_dev(); 2307 spdk_bs_opts_init(&opts, sizeof(opts)); 2308 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2309 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2310 poll_threads(); 2311 CU_ASSERT(g_bserrno == 0); 2312 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2313 bs = g_bs; 2314 2315 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2316 CU_ASSERT(super_block->clean == 1); 2317 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2318 2319 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2320 poll_threads(); 2321 CU_ASSERT(g_bserrno == 0); 2322 CU_ASSERT(g_blob != NULL); 2323 blob = g_blob; 2324 2325 /* Verify that blobstore is marked dirty after first metadata sync */ 2326 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2327 CU_ASSERT(super_block->clean == 1); 2328 2329 /* Get the xattrs */ 2330 value = NULL; 2331 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 2332 CU_ASSERT(rc == 0); 2333 SPDK_CU_ASSERT_FATAL(value != NULL); 2334 CU_ASSERT(*(uint64_t *)value == length); 2335 CU_ASSERT(value_len == 8); 2336 2337 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 2338 CU_ASSERT(rc == -ENOENT); 2339 2340 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 2341 2342 spdk_blob_close(blob, blob_op_complete, NULL); 2343 poll_threads(); 2344 CU_ASSERT(g_bserrno == 0); 2345 blob = NULL; 2346 g_blob = NULL; 2347 2348 spdk_bs_unload(bs, bs_op_complete, NULL); 2349 poll_threads(); 2350 CU_ASSERT(g_bserrno == 0); 2351 g_bs = NULL; 2352 2353 /* Load should fail: bdev size < saved size */ 2354 dev = init_dev(); 2355 dev->blockcnt /= 2; 2356 2357 spdk_bs_opts_init(&opts, sizeof(opts)); 2358 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2359 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2360 poll_threads(); 2361 2362 CU_ASSERT(g_bserrno == -EILSEQ); 2363 2364 /* Load should succeed: bdev size > saved size */ 2365 dev = init_dev(); 2366 dev->blockcnt *= 4; 2367 2368 spdk_bs_opts_init(&opts, sizeof(opts)); 2369 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2370 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2371 poll_threads(); 2372 CU_ASSERT(g_bserrno == 0); 2373 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2374 bs = g_bs; 2375 2376 CU_ASSERT(g_bserrno == 0); 2377 spdk_bs_unload(bs, bs_op_complete, NULL); 2378 poll_threads(); 2379 2380 2381 /* Test compatibility mode */ 2382 2383 dev = init_dev(); 2384 super_block->size = 0; 2385 super_block->crc = blob_md_page_calc_crc(super_block); 2386 2387 spdk_bs_opts_init(&opts, sizeof(opts)); 2388 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2389 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2390 poll_threads(); 2391 CU_ASSERT(g_bserrno == 0); 2392 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2393 bs = g_bs; 2394 2395 /* Create a blob */ 2396 ut_spdk_blob_opts_init(&blob_opts); 2397 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 2398 poll_threads(); 2399 CU_ASSERT(g_bserrno == 0); 2400 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2401 2402 /* Blobstore should update number of blocks in super_block */ 2403 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2404 CU_ASSERT(super_block->clean == 0); 2405 2406 spdk_bs_unload(bs, bs_op_complete, NULL); 2407 poll_threads(); 2408 CU_ASSERT(g_bserrno == 0); 2409 CU_ASSERT(super_block->clean == 1); 2410 g_bs = NULL; 2411 2412 } 2413 2414 static void 2415 bs_load_pending_removal(void) 2416 { 2417 struct spdk_blob_store *bs = g_bs; 2418 struct spdk_blob_opts opts; 2419 struct spdk_blob *blob, *snapshot; 2420 spdk_blob_id blobid, snapshotid; 2421 const void *value; 2422 size_t value_len; 2423 int rc; 2424 2425 /* Create blob */ 2426 ut_spdk_blob_opts_init(&opts); 2427 opts.num_clusters = 10; 2428 2429 blob = ut_blob_create_and_open(bs, &opts); 2430 blobid = spdk_blob_get_id(blob); 2431 2432 /* Create snapshot */ 2433 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 2434 poll_threads(); 2435 CU_ASSERT(g_bserrno == 0); 2436 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2437 snapshotid = g_blobid; 2438 2439 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2440 poll_threads(); 2441 CU_ASSERT(g_bserrno == 0); 2442 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2443 snapshot = g_blob; 2444 2445 /* Set SNAPSHOT_PENDING_REMOVAL xattr */ 2446 snapshot->md_ro = false; 2447 rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true); 2448 CU_ASSERT(rc == 0); 2449 snapshot->md_ro = true; 2450 2451 spdk_blob_close(snapshot, blob_op_complete, NULL); 2452 poll_threads(); 2453 CU_ASSERT(g_bserrno == 0); 2454 2455 spdk_blob_close(blob, blob_op_complete, NULL); 2456 poll_threads(); 2457 CU_ASSERT(g_bserrno == 0); 2458 2459 /* Reload blobstore */ 2460 ut_bs_reload(&bs, NULL); 2461 2462 /* Snapshot should not be removed as blob is still pointing to it */ 2463 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2464 poll_threads(); 2465 CU_ASSERT(g_bserrno == 0); 2466 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2467 snapshot = g_blob; 2468 2469 /* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */ 2470 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len); 2471 CU_ASSERT(rc != 0); 2472 2473 /* Set SNAPSHOT_PENDING_REMOVAL xattr again */ 2474 snapshot->md_ro = false; 2475 rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true); 2476 CU_ASSERT(rc == 0); 2477 snapshot->md_ro = true; 2478 2479 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 2480 poll_threads(); 2481 CU_ASSERT(g_bserrno == 0); 2482 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 2483 blob = g_blob; 2484 2485 /* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */ 2486 blob_remove_xattr(blob, BLOB_SNAPSHOT, true); 2487 2488 spdk_blob_sync_md(blob, blob_op_complete, NULL); 2489 poll_threads(); 2490 CU_ASSERT(g_bserrno == 0); 2491 2492 spdk_blob_close(snapshot, blob_op_complete, NULL); 2493 poll_threads(); 2494 CU_ASSERT(g_bserrno == 0); 2495 2496 spdk_blob_close(blob, blob_op_complete, NULL); 2497 poll_threads(); 2498 CU_ASSERT(g_bserrno == 0); 2499 2500 /* Reload blobstore */ 2501 ut_bs_reload(&bs, NULL); 2502 2503 /* Snapshot should be removed as blob is not pointing to it anymore */ 2504 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 2505 poll_threads(); 2506 CU_ASSERT(g_bserrno != 0); 2507 } 2508 2509 static void 2510 bs_load_custom_cluster_size(void) 2511 { 2512 struct spdk_blob_store *bs; 2513 struct spdk_bs_dev *dev; 2514 struct spdk_bs_super_block *super_block; 2515 struct spdk_bs_opts opts; 2516 uint32_t custom_cluster_size = 4194304; /* 4MiB */ 2517 uint32_t cluster_sz; 2518 uint64_t total_clusters; 2519 2520 dev = init_dev(); 2521 spdk_bs_opts_init(&opts, sizeof(opts)); 2522 opts.cluster_sz = custom_cluster_size; 2523 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2524 2525 /* Initialize a new blob store */ 2526 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2527 poll_threads(); 2528 CU_ASSERT(g_bserrno == 0); 2529 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2530 bs = g_bs; 2531 cluster_sz = bs->cluster_sz; 2532 total_clusters = bs->total_clusters; 2533 2534 /* Unload the blob store */ 2535 spdk_bs_unload(bs, bs_op_complete, NULL); 2536 poll_threads(); 2537 CU_ASSERT(g_bserrno == 0); 2538 g_bs = NULL; 2539 g_blob = NULL; 2540 g_blobid = 0; 2541 2542 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2543 CU_ASSERT(super_block->clean == 1); 2544 2545 /* Load an existing blob store */ 2546 dev = init_dev(); 2547 spdk_bs_opts_init(&opts, sizeof(opts)); 2548 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2549 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2550 poll_threads(); 2551 CU_ASSERT(g_bserrno == 0); 2552 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2553 bs = g_bs; 2554 /* Compare cluster size and number to one after initialization */ 2555 CU_ASSERT(cluster_sz == bs->cluster_sz); 2556 CU_ASSERT(total_clusters == bs->total_clusters); 2557 2558 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2559 CU_ASSERT(super_block->clean == 1); 2560 CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen); 2561 2562 spdk_bs_unload(bs, bs_op_complete, NULL); 2563 poll_threads(); 2564 CU_ASSERT(g_bserrno == 0); 2565 CU_ASSERT(super_block->clean == 1); 2566 g_bs = NULL; 2567 } 2568 2569 static void 2570 bs_type(void) 2571 { 2572 struct spdk_blob_store *bs; 2573 struct spdk_bs_dev *dev; 2574 struct spdk_bs_opts opts; 2575 2576 dev = init_dev(); 2577 spdk_bs_opts_init(&opts, sizeof(opts)); 2578 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2579 2580 /* Initialize a new blob store */ 2581 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2582 poll_threads(); 2583 CU_ASSERT(g_bserrno == 0); 2584 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2585 bs = g_bs; 2586 2587 /* Unload the blob store */ 2588 spdk_bs_unload(bs, bs_op_complete, NULL); 2589 poll_threads(); 2590 CU_ASSERT(g_bserrno == 0); 2591 g_bs = NULL; 2592 g_blob = NULL; 2593 g_blobid = 0; 2594 2595 /* Load non existing blobstore type */ 2596 dev = init_dev(); 2597 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING"); 2598 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2599 poll_threads(); 2600 CU_ASSERT(g_bserrno != 0); 2601 2602 /* Load with empty blobstore type */ 2603 dev = init_dev(); 2604 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2605 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2606 poll_threads(); 2607 CU_ASSERT(g_bserrno == 0); 2608 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2609 bs = g_bs; 2610 2611 spdk_bs_unload(bs, bs_op_complete, NULL); 2612 poll_threads(); 2613 CU_ASSERT(g_bserrno == 0); 2614 g_bs = NULL; 2615 2616 /* Initialize a new blob store with empty bstype */ 2617 dev = init_dev(); 2618 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2619 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2620 poll_threads(); 2621 CU_ASSERT(g_bserrno == 0); 2622 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2623 bs = g_bs; 2624 2625 spdk_bs_unload(bs, bs_op_complete, NULL); 2626 poll_threads(); 2627 CU_ASSERT(g_bserrno == 0); 2628 g_bs = NULL; 2629 2630 /* Load non existing blobstore type */ 2631 dev = init_dev(); 2632 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING"); 2633 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2634 poll_threads(); 2635 CU_ASSERT(g_bserrno != 0); 2636 2637 /* Load with empty blobstore type */ 2638 dev = init_dev(); 2639 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2640 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2641 poll_threads(); 2642 CU_ASSERT(g_bserrno == 0); 2643 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2644 bs = g_bs; 2645 2646 spdk_bs_unload(bs, bs_op_complete, NULL); 2647 poll_threads(); 2648 CU_ASSERT(g_bserrno == 0); 2649 g_bs = NULL; 2650 } 2651 2652 static void 2653 bs_super_block(void) 2654 { 2655 struct spdk_blob_store *bs; 2656 struct spdk_bs_dev *dev; 2657 struct spdk_bs_super_block *super_block; 2658 struct spdk_bs_opts opts; 2659 struct spdk_bs_super_block_ver1 super_block_v1; 2660 2661 dev = init_dev(); 2662 spdk_bs_opts_init(&opts, sizeof(opts)); 2663 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2664 2665 /* Initialize a new blob store */ 2666 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2667 poll_threads(); 2668 CU_ASSERT(g_bserrno == 0); 2669 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2670 bs = g_bs; 2671 2672 /* Unload the blob store */ 2673 spdk_bs_unload(bs, bs_op_complete, NULL); 2674 poll_threads(); 2675 CU_ASSERT(g_bserrno == 0); 2676 g_bs = NULL; 2677 g_blob = NULL; 2678 g_blobid = 0; 2679 2680 /* Load an existing blob store with version newer than supported */ 2681 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 2682 super_block->version++; 2683 2684 dev = init_dev(); 2685 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2686 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2687 poll_threads(); 2688 CU_ASSERT(g_bserrno != 0); 2689 2690 /* Create a new blob store with super block version 1 */ 2691 dev = init_dev(); 2692 super_block_v1.version = 1; 2693 memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature)); 2694 super_block_v1.length = 0x1000; 2695 super_block_v1.clean = 1; 2696 super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF; 2697 super_block_v1.cluster_size = 0x100000; 2698 super_block_v1.used_page_mask_start = 0x01; 2699 super_block_v1.used_page_mask_len = 0x01; 2700 super_block_v1.used_cluster_mask_start = 0x02; 2701 super_block_v1.used_cluster_mask_len = 0x01; 2702 super_block_v1.md_start = 0x03; 2703 super_block_v1.md_len = 0x40; 2704 memset(super_block_v1.reserved, 0, 4036); 2705 super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1); 2706 memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1)); 2707 2708 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2709 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2710 poll_threads(); 2711 CU_ASSERT(g_bserrno == 0); 2712 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2713 bs = g_bs; 2714 2715 spdk_bs_unload(bs, bs_op_complete, NULL); 2716 poll_threads(); 2717 CU_ASSERT(g_bserrno == 0); 2718 g_bs = NULL; 2719 } 2720 2721 static void 2722 bs_test_recover_cluster_count(void) 2723 { 2724 struct spdk_blob_store *bs; 2725 struct spdk_bs_dev *dev; 2726 struct spdk_bs_super_block super_block; 2727 struct spdk_bs_opts opts; 2728 2729 dev = init_dev(); 2730 spdk_bs_opts_init(&opts, sizeof(opts)); 2731 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 2732 2733 super_block.version = 3; 2734 memcpy(super_block.signature, "SPDKBLOB", sizeof(super_block.signature)); 2735 super_block.length = 0x1000; 2736 super_block.clean = 0; 2737 super_block.super_blob = 0xFFFFFFFFFFFFFFFF; 2738 super_block.cluster_size = 4096; 2739 super_block.used_page_mask_start = 0x01; 2740 super_block.used_page_mask_len = 0x01; 2741 super_block.used_cluster_mask_start = 0x02; 2742 super_block.used_cluster_mask_len = 0x01; 2743 super_block.used_blobid_mask_start = 0x03; 2744 super_block.used_blobid_mask_len = 0x01; 2745 super_block.md_start = 0x04; 2746 super_block.md_len = 0x40; 2747 memset(super_block.bstype.bstype, 0, sizeof(super_block.bstype.bstype)); 2748 super_block.size = dev->blockcnt * dev->blocklen; 2749 super_block.io_unit_size = 0x1000; 2750 memset(super_block.reserved, 0, 4000); 2751 super_block.crc = blob_md_page_calc_crc(&super_block); 2752 memcpy(g_dev_buffer, &super_block, sizeof(struct spdk_bs_super_block)); 2753 2754 memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype)); 2755 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 2756 poll_threads(); 2757 CU_ASSERT(g_bserrno == 0); 2758 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2759 bs = g_bs; 2760 CU_ASSERT(bs->num_free_clusters == bs->total_clusters - (super_block.md_start + 2761 super_block.md_len)); 2762 2763 spdk_bs_unload(bs, bs_op_complete, NULL); 2764 poll_threads(); 2765 CU_ASSERT(g_bserrno == 0); 2766 g_bs = NULL; 2767 } 2768 2769 /* 2770 * Create a blobstore and then unload it. 2771 */ 2772 static void 2773 bs_unload(void) 2774 { 2775 struct spdk_blob_store *bs = g_bs; 2776 struct spdk_blob *blob; 2777 2778 /* Create a blob and open it. */ 2779 blob = ut_blob_create_and_open(bs, NULL); 2780 2781 /* Try to unload blobstore, should fail with open blob */ 2782 g_bserrno = -1; 2783 spdk_bs_unload(bs, bs_op_complete, NULL); 2784 poll_threads(); 2785 CU_ASSERT(g_bserrno == -EBUSY); 2786 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2787 2788 /* Close the blob, then successfully unload blobstore */ 2789 g_bserrno = -1; 2790 spdk_blob_close(blob, blob_op_complete, NULL); 2791 poll_threads(); 2792 CU_ASSERT(g_bserrno == 0); 2793 } 2794 2795 /* 2796 * Create a blobstore with a cluster size different than the default, and ensure it is 2797 * persisted. 2798 */ 2799 static void 2800 bs_cluster_sz(void) 2801 { 2802 struct spdk_blob_store *bs; 2803 struct spdk_bs_dev *dev; 2804 struct spdk_bs_opts opts; 2805 uint32_t cluster_sz; 2806 2807 /* Set cluster size to zero */ 2808 dev = init_dev(); 2809 spdk_bs_opts_init(&opts, sizeof(opts)); 2810 opts.cluster_sz = 0; 2811 2812 /* Initialize a new blob store */ 2813 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2814 poll_threads(); 2815 CU_ASSERT(g_bserrno == -EINVAL); 2816 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2817 2818 /* 2819 * Set cluster size to blobstore page size, 2820 * to work it is required to be at least twice the blobstore page size. 2821 */ 2822 dev = init_dev(); 2823 spdk_bs_opts_init(&opts, sizeof(opts)); 2824 opts.cluster_sz = SPDK_BS_PAGE_SIZE; 2825 2826 /* Initialize a new blob store */ 2827 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2828 poll_threads(); 2829 CU_ASSERT(g_bserrno == -ENOMEM); 2830 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2831 2832 /* 2833 * Set cluster size to lower than page size, 2834 * to work it is required to be at least twice the blobstore page size. 2835 */ 2836 dev = init_dev(); 2837 spdk_bs_opts_init(&opts, sizeof(opts)); 2838 opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1; 2839 2840 /* Initialize a new blob store */ 2841 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2842 poll_threads(); 2843 CU_ASSERT(g_bserrno == -EINVAL); 2844 SPDK_CU_ASSERT_FATAL(g_bs == NULL); 2845 2846 /* Set cluster size to twice the default */ 2847 dev = init_dev(); 2848 spdk_bs_opts_init(&opts, sizeof(opts)); 2849 opts.cluster_sz *= 2; 2850 cluster_sz = opts.cluster_sz; 2851 2852 /* Initialize a new blob store */ 2853 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2854 poll_threads(); 2855 CU_ASSERT(g_bserrno == 0); 2856 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2857 bs = g_bs; 2858 2859 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2860 2861 ut_bs_reload(&bs, &opts); 2862 2863 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2864 2865 spdk_bs_unload(bs, bs_op_complete, NULL); 2866 poll_threads(); 2867 CU_ASSERT(g_bserrno == 0); 2868 g_bs = NULL; 2869 } 2870 2871 /* 2872 * Create a blobstore, reload it and ensure total usable cluster count 2873 * stays the same. 2874 */ 2875 static void 2876 bs_usable_clusters(void) 2877 { 2878 struct spdk_blob_store *bs = g_bs; 2879 struct spdk_blob *blob; 2880 uint32_t clusters; 2881 int i; 2882 2883 2884 clusters = spdk_bs_total_data_cluster_count(bs); 2885 2886 ut_bs_reload(&bs, NULL); 2887 2888 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2889 2890 /* Create and resize blobs to make sure that useable cluster count won't change */ 2891 for (i = 0; i < 4; i++) { 2892 g_bserrno = -1; 2893 g_blobid = SPDK_BLOBID_INVALID; 2894 blob = ut_blob_create_and_open(bs, NULL); 2895 2896 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 2897 poll_threads(); 2898 CU_ASSERT(g_bserrno == 0); 2899 2900 g_bserrno = -1; 2901 spdk_blob_close(blob, blob_op_complete, NULL); 2902 poll_threads(); 2903 CU_ASSERT(g_bserrno == 0); 2904 2905 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2906 } 2907 2908 /* Reload the blob store to make sure that nothing changed */ 2909 ut_bs_reload(&bs, NULL); 2910 2911 CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters); 2912 } 2913 2914 /* 2915 * Test resizing of the metadata blob. This requires creating enough blobs 2916 * so that one cluster is not enough to fit the metadata for those blobs. 2917 * To induce this condition to happen more quickly, we reduce the cluster 2918 * size to 16KB, which means only 4 4KB blob metadata pages can fit. 2919 */ 2920 static void 2921 bs_resize_md(void) 2922 { 2923 struct spdk_blob_store *bs; 2924 const int CLUSTER_PAGE_COUNT = 4; 2925 const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4; 2926 struct spdk_bs_dev *dev; 2927 struct spdk_bs_opts opts; 2928 struct spdk_blob *blob; 2929 struct spdk_blob_opts blob_opts; 2930 uint32_t cluster_sz; 2931 spdk_blob_id blobids[NUM_BLOBS]; 2932 int i; 2933 2934 2935 dev = init_dev(); 2936 spdk_bs_opts_init(&opts, sizeof(opts)); 2937 opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096; 2938 cluster_sz = opts.cluster_sz; 2939 2940 /* Initialize a new blob store */ 2941 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 2942 poll_threads(); 2943 CU_ASSERT(g_bserrno == 0); 2944 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2945 bs = g_bs; 2946 2947 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2948 2949 ut_spdk_blob_opts_init(&blob_opts); 2950 2951 for (i = 0; i < NUM_BLOBS; i++) { 2952 g_bserrno = -1; 2953 g_blobid = SPDK_BLOBID_INVALID; 2954 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 2955 poll_threads(); 2956 CU_ASSERT(g_bserrno == 0); 2957 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 2958 blobids[i] = g_blobid; 2959 } 2960 2961 ut_bs_reload(&bs, &opts); 2962 2963 CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz); 2964 2965 for (i = 0; i < NUM_BLOBS; i++) { 2966 g_bserrno = -1; 2967 g_blob = NULL; 2968 spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL); 2969 poll_threads(); 2970 CU_ASSERT(g_bserrno == 0); 2971 CU_ASSERT(g_blob != NULL); 2972 blob = g_blob; 2973 g_bserrno = -1; 2974 spdk_blob_close(blob, blob_op_complete, NULL); 2975 poll_threads(); 2976 CU_ASSERT(g_bserrno == 0); 2977 } 2978 2979 spdk_bs_unload(bs, bs_op_complete, NULL); 2980 poll_threads(); 2981 CU_ASSERT(g_bserrno == 0); 2982 g_bs = NULL; 2983 } 2984 2985 static void 2986 bs_destroy(void) 2987 { 2988 struct spdk_blob_store *bs; 2989 struct spdk_bs_dev *dev; 2990 2991 /* Initialize a new blob store */ 2992 dev = init_dev(); 2993 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 2994 poll_threads(); 2995 CU_ASSERT(g_bserrno == 0); 2996 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 2997 bs = g_bs; 2998 2999 /* Destroy the blob store */ 3000 g_bserrno = -1; 3001 spdk_bs_destroy(bs, bs_op_complete, NULL); 3002 poll_threads(); 3003 CU_ASSERT(g_bserrno == 0); 3004 3005 /* Loading an non-existent blob store should fail. */ 3006 g_bs = NULL; 3007 dev = init_dev(); 3008 3009 g_bserrno = 0; 3010 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3011 poll_threads(); 3012 CU_ASSERT(g_bserrno != 0); 3013 } 3014 3015 /* Try to hit all of the corner cases associated with serializing 3016 * a blob to disk 3017 */ 3018 static void 3019 blob_serialize_test(void) 3020 { 3021 struct spdk_bs_dev *dev; 3022 struct spdk_bs_opts opts; 3023 struct spdk_blob_store *bs; 3024 spdk_blob_id blobid[2]; 3025 struct spdk_blob *blob[2]; 3026 uint64_t i; 3027 char *value; 3028 int rc; 3029 3030 dev = init_dev(); 3031 3032 /* Initialize a new blobstore with very small clusters */ 3033 spdk_bs_opts_init(&opts, sizeof(opts)); 3034 opts.cluster_sz = dev->blocklen * 8; 3035 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 3036 poll_threads(); 3037 CU_ASSERT(g_bserrno == 0); 3038 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3039 bs = g_bs; 3040 3041 /* Create and open two blobs */ 3042 for (i = 0; i < 2; i++) { 3043 blob[i] = ut_blob_create_and_open(bs, NULL); 3044 blobid[i] = spdk_blob_get_id(blob[i]); 3045 3046 /* Set a fairly large xattr on both blobs to eat up 3047 * metadata space 3048 */ 3049 value = calloc(dev->blocklen - 64, sizeof(char)); 3050 SPDK_CU_ASSERT_FATAL(value != NULL); 3051 memset(value, i, dev->blocklen / 2); 3052 rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64); 3053 CU_ASSERT(rc == 0); 3054 free(value); 3055 } 3056 3057 /* Resize the blobs, alternating 1 cluster at a time. 3058 * This thwarts run length encoding and will cause spill 3059 * over of the extents. 3060 */ 3061 for (i = 0; i < 6; i++) { 3062 spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL); 3063 poll_threads(); 3064 CU_ASSERT(g_bserrno == 0); 3065 } 3066 3067 for (i = 0; i < 2; i++) { 3068 spdk_blob_sync_md(blob[i], blob_op_complete, NULL); 3069 poll_threads(); 3070 CU_ASSERT(g_bserrno == 0); 3071 } 3072 3073 /* Close the blobs */ 3074 for (i = 0; i < 2; i++) { 3075 spdk_blob_close(blob[i], blob_op_complete, NULL); 3076 poll_threads(); 3077 CU_ASSERT(g_bserrno == 0); 3078 } 3079 3080 ut_bs_reload(&bs, &opts); 3081 3082 for (i = 0; i < 2; i++) { 3083 blob[i] = NULL; 3084 3085 spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL); 3086 poll_threads(); 3087 CU_ASSERT(g_bserrno == 0); 3088 CU_ASSERT(g_blob != NULL); 3089 blob[i] = g_blob; 3090 3091 CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3); 3092 3093 spdk_blob_close(blob[i], blob_op_complete, NULL); 3094 poll_threads(); 3095 CU_ASSERT(g_bserrno == 0); 3096 } 3097 3098 spdk_bs_unload(bs, bs_op_complete, NULL); 3099 poll_threads(); 3100 CU_ASSERT(g_bserrno == 0); 3101 g_bs = NULL; 3102 } 3103 3104 static void 3105 blob_crc(void) 3106 { 3107 struct spdk_blob_store *bs = g_bs; 3108 struct spdk_blob *blob; 3109 spdk_blob_id blobid; 3110 uint32_t page_num; 3111 int index; 3112 struct spdk_blob_md_page *page; 3113 3114 blob = ut_blob_create_and_open(bs, NULL); 3115 blobid = spdk_blob_get_id(blob); 3116 3117 spdk_blob_close(blob, blob_op_complete, NULL); 3118 poll_threads(); 3119 CU_ASSERT(g_bserrno == 0); 3120 3121 page_num = bs_blobid_to_page(blobid); 3122 index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num); 3123 page = (struct spdk_blob_md_page *)&g_dev_buffer[index]; 3124 page->crc = 0; 3125 3126 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3127 poll_threads(); 3128 CU_ASSERT(g_bserrno == -EINVAL); 3129 CU_ASSERT(g_blob == NULL); 3130 g_bserrno = 0; 3131 3132 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 3133 poll_threads(); 3134 CU_ASSERT(g_bserrno == -EINVAL); 3135 } 3136 3137 static void 3138 super_block_crc(void) 3139 { 3140 struct spdk_blob_store *bs; 3141 struct spdk_bs_dev *dev; 3142 struct spdk_bs_super_block *super_block; 3143 3144 dev = init_dev(); 3145 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 3146 poll_threads(); 3147 CU_ASSERT(g_bserrno == 0); 3148 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3149 bs = g_bs; 3150 3151 spdk_bs_unload(bs, bs_op_complete, NULL); 3152 poll_threads(); 3153 CU_ASSERT(g_bserrno == 0); 3154 g_bs = NULL; 3155 3156 super_block = (struct spdk_bs_super_block *)g_dev_buffer; 3157 super_block->crc = 0; 3158 dev = init_dev(); 3159 3160 /* Load an existing blob store */ 3161 g_bserrno = 0; 3162 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3163 poll_threads(); 3164 CU_ASSERT(g_bserrno == -EILSEQ); 3165 } 3166 3167 /* For blob dirty shutdown test case we do the following sub-test cases: 3168 * 1 Initialize new blob store and create 1 super blob with some xattrs, then we 3169 * dirty shutdown and reload the blob store and verify the xattrs. 3170 * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown, 3171 * reload the blob store and verify the clusters number. 3172 * 3 Create the second blob and then dirty shutdown, reload the blob store 3173 * and verify the second blob. 3174 * 4 Delete the second blob and then dirty shutdown, reload the blob store 3175 * and verify the second blob is invalid. 3176 * 5 Create the second blob again and also create the third blob, modify the 3177 * md of second blob which makes the md invalid, and then dirty shutdown, 3178 * reload the blob store verify the second blob, it should invalid and also 3179 * verify the third blob, it should correct. 3180 */ 3181 static void 3182 blob_dirty_shutdown(void) 3183 { 3184 int rc; 3185 int index; 3186 struct spdk_blob_store *bs = g_bs; 3187 spdk_blob_id blobid1, blobid2, blobid3; 3188 struct spdk_blob *blob = g_blob; 3189 uint64_t length; 3190 uint64_t free_clusters; 3191 const void *value; 3192 size_t value_len; 3193 uint32_t page_num; 3194 struct spdk_blob_md_page *page; 3195 struct spdk_blob_opts blob_opts; 3196 3197 /* Create first blob */ 3198 blobid1 = spdk_blob_get_id(blob); 3199 3200 /* Set some xattrs */ 3201 rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1); 3202 CU_ASSERT(rc == 0); 3203 3204 length = 2345; 3205 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3206 CU_ASSERT(rc == 0); 3207 3208 /* Put xattr that fits exactly single page. 3209 * This results in adding additional pages to MD. 3210 * First is flags and smaller xattr, second the large xattr, 3211 * third are just the extents. 3212 */ 3213 size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) - 3214 strlen("large_xattr"); 3215 char *xattr = calloc(xattr_length, sizeof(char)); 3216 SPDK_CU_ASSERT_FATAL(xattr != NULL); 3217 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 3218 free(xattr); 3219 SPDK_CU_ASSERT_FATAL(rc == 0); 3220 3221 /* Resize the blob */ 3222 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 3223 poll_threads(); 3224 CU_ASSERT(g_bserrno == 0); 3225 3226 /* Set the blob as the super blob */ 3227 spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL); 3228 poll_threads(); 3229 CU_ASSERT(g_bserrno == 0); 3230 3231 free_clusters = spdk_bs_free_cluster_count(bs); 3232 3233 spdk_blob_close(blob, blob_op_complete, NULL); 3234 poll_threads(); 3235 CU_ASSERT(g_bserrno == 0); 3236 blob = NULL; 3237 g_blob = NULL; 3238 g_blobid = SPDK_BLOBID_INVALID; 3239 3240 ut_bs_dirty_load(&bs, NULL); 3241 3242 /* Get the super blob */ 3243 spdk_bs_get_super(bs, blob_op_with_id_complete, NULL); 3244 poll_threads(); 3245 CU_ASSERT(g_bserrno == 0); 3246 CU_ASSERT(blobid1 == g_blobid); 3247 3248 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3249 poll_threads(); 3250 CU_ASSERT(g_bserrno == 0); 3251 CU_ASSERT(g_blob != NULL); 3252 blob = g_blob; 3253 3254 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3255 3256 /* Get the xattrs */ 3257 value = NULL; 3258 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 3259 CU_ASSERT(rc == 0); 3260 SPDK_CU_ASSERT_FATAL(value != NULL); 3261 CU_ASSERT(*(uint64_t *)value == length); 3262 CU_ASSERT(value_len == 8); 3263 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 3264 3265 /* Resize the blob */ 3266 spdk_blob_resize(blob, 20, blob_op_complete, NULL); 3267 poll_threads(); 3268 CU_ASSERT(g_bserrno == 0); 3269 3270 free_clusters = spdk_bs_free_cluster_count(bs); 3271 3272 spdk_blob_close(blob, blob_op_complete, NULL); 3273 poll_threads(); 3274 CU_ASSERT(g_bserrno == 0); 3275 blob = NULL; 3276 g_blob = NULL; 3277 g_blobid = SPDK_BLOBID_INVALID; 3278 3279 ut_bs_dirty_load(&bs, NULL); 3280 3281 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3282 poll_threads(); 3283 CU_ASSERT(g_bserrno == 0); 3284 CU_ASSERT(g_blob != NULL); 3285 blob = g_blob; 3286 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20); 3287 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3288 3289 spdk_blob_close(blob, blob_op_complete, NULL); 3290 poll_threads(); 3291 CU_ASSERT(g_bserrno == 0); 3292 blob = NULL; 3293 g_blob = NULL; 3294 g_blobid = SPDK_BLOBID_INVALID; 3295 3296 /* Create second blob */ 3297 blob = ut_blob_create_and_open(bs, NULL); 3298 blobid2 = spdk_blob_get_id(blob); 3299 3300 /* Set some xattrs */ 3301 rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1); 3302 CU_ASSERT(rc == 0); 3303 3304 length = 5432; 3305 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3306 CU_ASSERT(rc == 0); 3307 3308 /* Resize the blob */ 3309 spdk_blob_resize(blob, 10, blob_op_complete, NULL); 3310 poll_threads(); 3311 CU_ASSERT(g_bserrno == 0); 3312 3313 free_clusters = spdk_bs_free_cluster_count(bs); 3314 3315 spdk_blob_close(blob, blob_op_complete, NULL); 3316 poll_threads(); 3317 CU_ASSERT(g_bserrno == 0); 3318 blob = NULL; 3319 g_blob = NULL; 3320 g_blobid = SPDK_BLOBID_INVALID; 3321 3322 ut_bs_dirty_load(&bs, NULL); 3323 3324 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3325 poll_threads(); 3326 CU_ASSERT(g_bserrno == 0); 3327 CU_ASSERT(g_blob != NULL); 3328 blob = g_blob; 3329 3330 /* Get the xattrs */ 3331 value = NULL; 3332 rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len); 3333 CU_ASSERT(rc == 0); 3334 SPDK_CU_ASSERT_FATAL(value != NULL); 3335 CU_ASSERT(*(uint64_t *)value == length); 3336 CU_ASSERT(value_len == 8); 3337 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10); 3338 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3339 3340 ut_blob_close_and_delete(bs, blob); 3341 3342 free_clusters = spdk_bs_free_cluster_count(bs); 3343 3344 ut_bs_dirty_load(&bs, NULL); 3345 3346 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3347 poll_threads(); 3348 CU_ASSERT(g_bserrno != 0); 3349 CU_ASSERT(g_blob == NULL); 3350 3351 spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL); 3352 poll_threads(); 3353 CU_ASSERT(g_bserrno == 0); 3354 CU_ASSERT(g_blob != NULL); 3355 blob = g_blob; 3356 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3357 spdk_blob_close(blob, blob_op_complete, NULL); 3358 poll_threads(); 3359 CU_ASSERT(g_bserrno == 0); 3360 3361 ut_bs_reload(&bs, NULL); 3362 3363 /* Create second blob */ 3364 ut_spdk_blob_opts_init(&blob_opts); 3365 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3366 poll_threads(); 3367 CU_ASSERT(g_bserrno == 0); 3368 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3369 blobid2 = g_blobid; 3370 3371 /* Create third blob */ 3372 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3373 poll_threads(); 3374 CU_ASSERT(g_bserrno == 0); 3375 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3376 blobid3 = g_blobid; 3377 3378 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3379 poll_threads(); 3380 CU_ASSERT(g_bserrno == 0); 3381 CU_ASSERT(g_blob != NULL); 3382 blob = g_blob; 3383 3384 /* Set some xattrs for second blob */ 3385 rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1); 3386 CU_ASSERT(rc == 0); 3387 3388 length = 5432; 3389 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3390 CU_ASSERT(rc == 0); 3391 3392 spdk_blob_close(blob, blob_op_complete, NULL); 3393 poll_threads(); 3394 CU_ASSERT(g_bserrno == 0); 3395 blob = NULL; 3396 g_blob = NULL; 3397 g_blobid = SPDK_BLOBID_INVALID; 3398 3399 spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL); 3400 poll_threads(); 3401 CU_ASSERT(g_bserrno == 0); 3402 CU_ASSERT(g_blob != NULL); 3403 blob = g_blob; 3404 3405 /* Set some xattrs for third blob */ 3406 rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1); 3407 CU_ASSERT(rc == 0); 3408 3409 length = 5432; 3410 rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length)); 3411 CU_ASSERT(rc == 0); 3412 3413 spdk_blob_close(blob, blob_op_complete, NULL); 3414 poll_threads(); 3415 CU_ASSERT(g_bserrno == 0); 3416 blob = NULL; 3417 g_blob = NULL; 3418 g_blobid = SPDK_BLOBID_INVALID; 3419 3420 /* Mark second blob as invalid */ 3421 page_num = bs_blobid_to_page(blobid2); 3422 3423 index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num); 3424 page = (struct spdk_blob_md_page *)&g_dev_buffer[index]; 3425 page->sequence_num = 1; 3426 page->crc = blob_md_page_calc_crc(page); 3427 3428 free_clusters = spdk_bs_free_cluster_count(bs); 3429 3430 ut_bs_dirty_load(&bs, NULL); 3431 3432 spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL); 3433 poll_threads(); 3434 CU_ASSERT(g_bserrno != 0); 3435 CU_ASSERT(g_blob == NULL); 3436 3437 spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL); 3438 poll_threads(); 3439 CU_ASSERT(g_bserrno == 0); 3440 CU_ASSERT(g_blob != NULL); 3441 blob = g_blob; 3442 3443 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3444 } 3445 3446 static void 3447 blob_flags(void) 3448 { 3449 struct spdk_blob_store *bs = g_bs; 3450 spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro; 3451 struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro; 3452 struct spdk_blob_opts blob_opts; 3453 int rc; 3454 3455 /* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */ 3456 blob_invalid = ut_blob_create_and_open(bs, NULL); 3457 blobid_invalid = spdk_blob_get_id(blob_invalid); 3458 3459 blob_data_ro = ut_blob_create_and_open(bs, NULL); 3460 blobid_data_ro = spdk_blob_get_id(blob_data_ro); 3461 3462 ut_spdk_blob_opts_init(&blob_opts); 3463 blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES; 3464 blob_md_ro = ut_blob_create_and_open(bs, &blob_opts); 3465 blobid_md_ro = spdk_blob_get_id(blob_md_ro); 3466 CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES); 3467 3468 /* Change the size of blob_data_ro to check if flags are serialized 3469 * when blob has non zero number of extents */ 3470 spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL); 3471 poll_threads(); 3472 CU_ASSERT(g_bserrno == 0); 3473 3474 /* Set the xattr to check if flags are serialized 3475 * when blob has non zero number of xattrs */ 3476 rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1); 3477 CU_ASSERT(rc == 0); 3478 3479 blob_invalid->invalid_flags = (1ULL << 63); 3480 blob_invalid->state = SPDK_BLOB_STATE_DIRTY; 3481 blob_data_ro->data_ro_flags = (1ULL << 62); 3482 blob_data_ro->state = SPDK_BLOB_STATE_DIRTY; 3483 blob_md_ro->md_ro_flags = (1ULL << 61); 3484 blob_md_ro->state = SPDK_BLOB_STATE_DIRTY; 3485 3486 g_bserrno = -1; 3487 spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL); 3488 poll_threads(); 3489 CU_ASSERT(g_bserrno == 0); 3490 g_bserrno = -1; 3491 spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL); 3492 poll_threads(); 3493 CU_ASSERT(g_bserrno == 0); 3494 g_bserrno = -1; 3495 spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL); 3496 poll_threads(); 3497 CU_ASSERT(g_bserrno == 0); 3498 3499 g_bserrno = -1; 3500 spdk_blob_close(blob_invalid, blob_op_complete, NULL); 3501 poll_threads(); 3502 CU_ASSERT(g_bserrno == 0); 3503 blob_invalid = NULL; 3504 g_bserrno = -1; 3505 spdk_blob_close(blob_data_ro, blob_op_complete, NULL); 3506 poll_threads(); 3507 CU_ASSERT(g_bserrno == 0); 3508 blob_data_ro = NULL; 3509 g_bserrno = -1; 3510 spdk_blob_close(blob_md_ro, blob_op_complete, NULL); 3511 poll_threads(); 3512 CU_ASSERT(g_bserrno == 0); 3513 blob_md_ro = NULL; 3514 3515 g_blob = NULL; 3516 g_blobid = SPDK_BLOBID_INVALID; 3517 3518 ut_bs_reload(&bs, NULL); 3519 3520 g_blob = NULL; 3521 g_bserrno = 0; 3522 spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL); 3523 poll_threads(); 3524 CU_ASSERT(g_bserrno != 0); 3525 CU_ASSERT(g_blob == NULL); 3526 3527 g_blob = NULL; 3528 g_bserrno = -1; 3529 spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL); 3530 poll_threads(); 3531 CU_ASSERT(g_bserrno == 0); 3532 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3533 blob_data_ro = g_blob; 3534 /* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */ 3535 CU_ASSERT(blob_data_ro->data_ro == true); 3536 CU_ASSERT(blob_data_ro->md_ro == true); 3537 CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10); 3538 3539 g_blob = NULL; 3540 g_bserrno = -1; 3541 spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL); 3542 poll_threads(); 3543 CU_ASSERT(g_bserrno == 0); 3544 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3545 blob_md_ro = g_blob; 3546 CU_ASSERT(blob_md_ro->data_ro == false); 3547 CU_ASSERT(blob_md_ro->md_ro == true); 3548 3549 g_bserrno = -1; 3550 spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL); 3551 poll_threads(); 3552 CU_ASSERT(g_bserrno == 0); 3553 3554 ut_blob_close_and_delete(bs, blob_data_ro); 3555 ut_blob_close_and_delete(bs, blob_md_ro); 3556 } 3557 3558 static void 3559 bs_version(void) 3560 { 3561 struct spdk_bs_super_block *super; 3562 struct spdk_blob_store *bs = g_bs; 3563 struct spdk_bs_dev *dev; 3564 struct spdk_blob *blob; 3565 struct spdk_blob_opts blob_opts; 3566 spdk_blob_id blobid; 3567 3568 /* Unload the blob store */ 3569 spdk_bs_unload(bs, bs_op_complete, NULL); 3570 poll_threads(); 3571 CU_ASSERT(g_bserrno == 0); 3572 g_bs = NULL; 3573 3574 /* 3575 * Change the bs version on disk. This will allow us to 3576 * test that the version does not get modified automatically 3577 * when loading and unloading the blobstore. 3578 */ 3579 super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 3580 CU_ASSERT(super->version == SPDK_BS_VERSION); 3581 CU_ASSERT(super->clean == 1); 3582 super->version = 2; 3583 /* 3584 * Version 2 metadata does not have a used blobid mask, so clear 3585 * those fields in the super block and zero the corresponding 3586 * region on "disk". We will use this to ensure blob IDs are 3587 * correctly reconstructed. 3588 */ 3589 memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0, 3590 super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE); 3591 super->used_blobid_mask_start = 0; 3592 super->used_blobid_mask_len = 0; 3593 super->crc = blob_md_page_calc_crc(super); 3594 3595 /* Load an existing blob store */ 3596 dev = init_dev(); 3597 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3598 poll_threads(); 3599 CU_ASSERT(g_bserrno == 0); 3600 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3601 CU_ASSERT(super->clean == 1); 3602 bs = g_bs; 3603 3604 /* 3605 * Create a blob - just to make sure that when we unload it 3606 * results in writing the super block (since metadata pages 3607 * were allocated. 3608 */ 3609 ut_spdk_blob_opts_init(&blob_opts); 3610 spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL); 3611 poll_threads(); 3612 CU_ASSERT(g_bserrno == 0); 3613 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3614 blobid = g_blobid; 3615 3616 /* Unload the blob store */ 3617 spdk_bs_unload(bs, bs_op_complete, NULL); 3618 poll_threads(); 3619 CU_ASSERT(g_bserrno == 0); 3620 g_bs = NULL; 3621 CU_ASSERT(super->version == 2); 3622 CU_ASSERT(super->used_blobid_mask_start == 0); 3623 CU_ASSERT(super->used_blobid_mask_len == 0); 3624 3625 dev = init_dev(); 3626 spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL); 3627 poll_threads(); 3628 CU_ASSERT(g_bserrno == 0); 3629 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 3630 bs = g_bs; 3631 3632 g_blob = NULL; 3633 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3634 poll_threads(); 3635 CU_ASSERT(g_bserrno == 0); 3636 CU_ASSERT(g_blob != NULL); 3637 blob = g_blob; 3638 3639 ut_blob_close_and_delete(bs, blob); 3640 3641 CU_ASSERT(super->version == 2); 3642 CU_ASSERT(super->used_blobid_mask_start == 0); 3643 CU_ASSERT(super->used_blobid_mask_len == 0); 3644 } 3645 3646 static void 3647 blob_set_xattrs_test(void) 3648 { 3649 struct spdk_blob_store *bs = g_bs; 3650 struct spdk_blob *blob; 3651 struct spdk_blob_opts opts; 3652 const void *value; 3653 size_t value_len; 3654 char *xattr; 3655 size_t xattr_length; 3656 int rc; 3657 3658 /* Create blob with extra attributes */ 3659 ut_spdk_blob_opts_init(&opts); 3660 3661 opts.xattrs.names = g_xattr_names; 3662 opts.xattrs.get_value = _get_xattr_value; 3663 opts.xattrs.count = 3; 3664 opts.xattrs.ctx = &g_ctx; 3665 3666 blob = ut_blob_create_and_open(bs, &opts); 3667 3668 /* Get the xattrs */ 3669 value = NULL; 3670 3671 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len); 3672 CU_ASSERT(rc == 0); 3673 SPDK_CU_ASSERT_FATAL(value != NULL); 3674 CU_ASSERT(value_len == strlen(g_xattr_values[0])); 3675 CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len); 3676 3677 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len); 3678 CU_ASSERT(rc == 0); 3679 SPDK_CU_ASSERT_FATAL(value != NULL); 3680 CU_ASSERT(value_len == strlen(g_xattr_values[1])); 3681 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len); 3682 3683 rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len); 3684 CU_ASSERT(rc == 0); 3685 SPDK_CU_ASSERT_FATAL(value != NULL); 3686 CU_ASSERT(value_len == strlen(g_xattr_values[2])); 3687 CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len); 3688 3689 /* Try to get non existing attribute */ 3690 3691 rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len); 3692 CU_ASSERT(rc == -ENOENT); 3693 3694 /* Try xattr exceeding maximum length of descriptor in single page */ 3695 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 3696 strlen("large_xattr") + 1; 3697 xattr = calloc(xattr_length, sizeof(char)); 3698 SPDK_CU_ASSERT_FATAL(xattr != NULL); 3699 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 3700 free(xattr); 3701 SPDK_CU_ASSERT_FATAL(rc == -ENOMEM); 3702 3703 spdk_blob_close(blob, blob_op_complete, NULL); 3704 poll_threads(); 3705 CU_ASSERT(g_bserrno == 0); 3706 blob = NULL; 3707 g_blob = NULL; 3708 g_blobid = SPDK_BLOBID_INVALID; 3709 3710 /* NULL callback */ 3711 ut_spdk_blob_opts_init(&opts); 3712 opts.xattrs.names = g_xattr_names; 3713 opts.xattrs.get_value = NULL; 3714 opts.xattrs.count = 1; 3715 opts.xattrs.ctx = &g_ctx; 3716 3717 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 3718 poll_threads(); 3719 CU_ASSERT(g_bserrno == -EINVAL); 3720 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 3721 3722 /* NULL values */ 3723 ut_spdk_blob_opts_init(&opts); 3724 opts.xattrs.names = g_xattr_names; 3725 opts.xattrs.get_value = _get_xattr_value_null; 3726 opts.xattrs.count = 1; 3727 opts.xattrs.ctx = NULL; 3728 3729 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 3730 poll_threads(); 3731 CU_ASSERT(g_bserrno == -EINVAL); 3732 } 3733 3734 static void 3735 blob_thin_prov_alloc(void) 3736 { 3737 struct spdk_blob_store *bs = g_bs; 3738 struct spdk_blob *blob; 3739 struct spdk_blob_opts opts; 3740 spdk_blob_id blobid; 3741 uint64_t free_clusters; 3742 3743 free_clusters = spdk_bs_free_cluster_count(bs); 3744 3745 /* Set blob as thin provisioned */ 3746 ut_spdk_blob_opts_init(&opts); 3747 opts.thin_provision = true; 3748 3749 blob = ut_blob_create_and_open(bs, &opts); 3750 blobid = spdk_blob_get_id(blob); 3751 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3752 3753 CU_ASSERT(blob->active.num_clusters == 0); 3754 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0); 3755 3756 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3757 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3758 poll_threads(); 3759 CU_ASSERT(g_bserrno == 0); 3760 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3761 CU_ASSERT(blob->active.num_clusters == 5); 3762 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 3763 3764 /* Grow it to 1TB - still unallocated */ 3765 spdk_blob_resize(blob, 262144, blob_op_complete, NULL); 3766 poll_threads(); 3767 CU_ASSERT(g_bserrno == 0); 3768 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3769 CU_ASSERT(blob->active.num_clusters == 262144); 3770 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144); 3771 3772 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3773 poll_threads(); 3774 CU_ASSERT(g_bserrno == 0); 3775 /* Sync must not change anything */ 3776 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3777 CU_ASSERT(blob->active.num_clusters == 262144); 3778 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144); 3779 /* Since clusters are not allocated, 3780 * number of metadata pages is expected to be minimal. 3781 */ 3782 CU_ASSERT(blob->active.num_pages == 1); 3783 3784 /* Shrink the blob to 3 clusters - still unallocated */ 3785 spdk_blob_resize(blob, 3, blob_op_complete, NULL); 3786 poll_threads(); 3787 CU_ASSERT(g_bserrno == 0); 3788 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3789 CU_ASSERT(blob->active.num_clusters == 3); 3790 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3); 3791 3792 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3793 poll_threads(); 3794 CU_ASSERT(g_bserrno == 0); 3795 /* Sync must not change anything */ 3796 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3797 CU_ASSERT(blob->active.num_clusters == 3); 3798 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3); 3799 3800 spdk_blob_close(blob, blob_op_complete, NULL); 3801 poll_threads(); 3802 CU_ASSERT(g_bserrno == 0); 3803 3804 ut_bs_reload(&bs, NULL); 3805 3806 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3807 poll_threads(); 3808 CU_ASSERT(g_bserrno == 0); 3809 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3810 blob = g_blob; 3811 3812 /* Check that clusters allocation and size is still the same */ 3813 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3814 CU_ASSERT(blob->active.num_clusters == 3); 3815 3816 ut_blob_close_and_delete(bs, blob); 3817 } 3818 3819 static void 3820 blob_insert_cluster_msg_test(void) 3821 { 3822 struct spdk_blob_store *bs = g_bs; 3823 struct spdk_blob *blob; 3824 struct spdk_blob_opts opts; 3825 spdk_blob_id blobid; 3826 uint64_t free_clusters; 3827 uint64_t new_cluster = 0; 3828 uint32_t cluster_num = 3; 3829 uint32_t extent_page = 0; 3830 3831 free_clusters = spdk_bs_free_cluster_count(bs); 3832 3833 /* Set blob as thin provisioned */ 3834 ut_spdk_blob_opts_init(&opts); 3835 opts.thin_provision = true; 3836 opts.num_clusters = 4; 3837 3838 blob = ut_blob_create_and_open(bs, &opts); 3839 blobid = spdk_blob_get_id(blob); 3840 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3841 3842 CU_ASSERT(blob->active.num_clusters == 4); 3843 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4); 3844 CU_ASSERT(blob->active.clusters[cluster_num] == 0); 3845 3846 /* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread. 3847 * This is to simulate behaviour when cluster is allocated after blob creation. 3848 * Such as _spdk_bs_allocate_and_copy_cluster(). */ 3849 bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false); 3850 CU_ASSERT(blob->active.clusters[cluster_num] == 0); 3851 3852 blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, 3853 blob_op_complete, NULL); 3854 poll_threads(); 3855 3856 CU_ASSERT(blob->active.clusters[cluster_num] != 0); 3857 3858 spdk_blob_close(blob, blob_op_complete, NULL); 3859 poll_threads(); 3860 CU_ASSERT(g_bserrno == 0); 3861 3862 ut_bs_reload(&bs, NULL); 3863 3864 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 3865 poll_threads(); 3866 CU_ASSERT(g_bserrno == 0); 3867 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 3868 blob = g_blob; 3869 3870 CU_ASSERT(blob->active.clusters[cluster_num] != 0); 3871 3872 ut_blob_close_and_delete(bs, blob); 3873 } 3874 3875 static void 3876 blob_thin_prov_rw(void) 3877 { 3878 static const uint8_t zero[10 * 4096] = { 0 }; 3879 struct spdk_blob_store *bs = g_bs; 3880 struct spdk_blob *blob, *blob_id0; 3881 struct spdk_io_channel *channel, *channel_thread1; 3882 struct spdk_blob_opts opts; 3883 uint64_t free_clusters; 3884 uint64_t page_size; 3885 uint8_t payload_read[10 * 4096]; 3886 uint8_t payload_write[10 * 4096]; 3887 uint64_t write_bytes; 3888 uint64_t read_bytes; 3889 3890 free_clusters = spdk_bs_free_cluster_count(bs); 3891 page_size = spdk_bs_get_page_size(bs); 3892 3893 channel = spdk_bs_alloc_io_channel(bs); 3894 CU_ASSERT(channel != NULL); 3895 3896 ut_spdk_blob_opts_init(&opts); 3897 opts.thin_provision = true; 3898 3899 /* Create and delete blob at md page 0, so that next md page allocation 3900 * for extent will use that. */ 3901 blob_id0 = ut_blob_create_and_open(bs, &opts); 3902 blob = ut_blob_create_and_open(bs, &opts); 3903 ut_blob_close_and_delete(bs, blob_id0); 3904 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3905 3906 CU_ASSERT(blob->active.num_clusters == 0); 3907 3908 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 3909 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 3910 poll_threads(); 3911 CU_ASSERT(g_bserrno == 0); 3912 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3913 CU_ASSERT(blob->active.num_clusters == 5); 3914 3915 spdk_blob_sync_md(blob, blob_op_complete, NULL); 3916 poll_threads(); 3917 CU_ASSERT(g_bserrno == 0); 3918 /* Sync must not change anything */ 3919 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3920 CU_ASSERT(blob->active.num_clusters == 5); 3921 3922 /* Payload should be all zeros from unallocated clusters */ 3923 memset(payload_read, 0xFF, sizeof(payload_read)); 3924 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 3925 poll_threads(); 3926 CU_ASSERT(g_bserrno == 0); 3927 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 3928 3929 write_bytes = g_dev_write_bytes; 3930 read_bytes = g_dev_read_bytes; 3931 3932 /* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */ 3933 set_thread(1); 3934 channel_thread1 = spdk_bs_alloc_io_channel(bs); 3935 CU_ASSERT(channel_thread1 != NULL); 3936 memset(payload_write, 0xE5, sizeof(payload_write)); 3937 spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL); 3938 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3939 /* Perform write on thread 0. That will try to allocate cluster, 3940 * but fail due to another thread issuing the cluster allocation first. */ 3941 set_thread(0); 3942 memset(payload_write, 0xE5, sizeof(payload_write)); 3943 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 3944 CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs)); 3945 poll_threads(); 3946 CU_ASSERT(g_bserrno == 0); 3947 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 3948 /* For thin-provisioned blob we need to write 20 pages plus one page metadata and 3949 * read 0 bytes */ 3950 if (g_use_extent_table) { 3951 /* Add one more page for EXTENT_PAGE write */ 3952 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22); 3953 } else { 3954 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21); 3955 } 3956 CU_ASSERT(g_dev_read_bytes - read_bytes == 0); 3957 3958 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 3959 poll_threads(); 3960 CU_ASSERT(g_bserrno == 0); 3961 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 3962 3963 ut_blob_close_and_delete(bs, blob); 3964 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 3965 3966 set_thread(1); 3967 spdk_bs_free_io_channel(channel_thread1); 3968 set_thread(0); 3969 spdk_bs_free_io_channel(channel); 3970 poll_threads(); 3971 g_blob = NULL; 3972 g_blobid = 0; 3973 } 3974 3975 static void 3976 blob_thin_prov_write_count_io(void) 3977 { 3978 struct spdk_blob_store *bs; 3979 struct spdk_blob *blob; 3980 struct spdk_io_channel *ch; 3981 struct spdk_bs_dev *dev; 3982 struct spdk_bs_opts bs_opts; 3983 struct spdk_blob_opts opts; 3984 uint64_t free_clusters; 3985 uint64_t page_size; 3986 uint8_t payload_write[4096]; 3987 uint64_t write_bytes; 3988 uint64_t read_bytes; 3989 const uint32_t CLUSTER_SZ = 16384; 3990 uint32_t pages_per_cluster; 3991 uint32_t pages_per_extent_page; 3992 uint32_t i; 3993 3994 /* Use a very small cluster size for this test. This ensures we need multiple 3995 * extent pages to hold all of the clusters even for relatively small blobs like 3996 * we are restricted to for the unit tests (i.e. we don't want to allocate multi-GB 3997 * buffers). 3998 */ 3999 dev = init_dev(); 4000 spdk_bs_opts_init(&bs_opts, sizeof(bs_opts)); 4001 bs_opts.cluster_sz = CLUSTER_SZ; 4002 4003 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 4004 poll_threads(); 4005 CU_ASSERT(g_bserrno == 0); 4006 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4007 bs = g_bs; 4008 4009 free_clusters = spdk_bs_free_cluster_count(bs); 4010 page_size = spdk_bs_get_page_size(bs); 4011 pages_per_cluster = CLUSTER_SZ / page_size; 4012 pages_per_extent_page = SPDK_EXTENTS_PER_EP * pages_per_cluster; 4013 4014 ch = spdk_bs_alloc_io_channel(bs); 4015 SPDK_CU_ASSERT_FATAL(ch != NULL); 4016 4017 ut_spdk_blob_opts_init(&opts); 4018 opts.thin_provision = true; 4019 4020 blob = ut_blob_create_and_open(bs, &opts); 4021 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4022 4023 /* Resize the blob so that it will require 8 extent pages to hold all of 4024 * the clusters. 4025 */ 4026 g_bserrno = -1; 4027 spdk_blob_resize(blob, SPDK_EXTENTS_PER_EP * 8, blob_op_complete, NULL); 4028 poll_threads(); 4029 CU_ASSERT(g_bserrno == 0); 4030 4031 g_bserrno = -1; 4032 spdk_blob_sync_md(blob, blob_op_complete, NULL); 4033 poll_threads(); 4034 CU_ASSERT(g_bserrno == 0); 4035 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4036 CU_ASSERT(blob->active.num_clusters == SPDK_EXTENTS_PER_EP * 8); 4037 4038 memset(payload_write, 0, sizeof(payload_write)); 4039 for (i = 0; i < 8; i++) { 4040 write_bytes = g_dev_write_bytes; 4041 read_bytes = g_dev_read_bytes; 4042 4043 g_bserrno = -1; 4044 spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i, 1, blob_op_complete, NULL); 4045 poll_threads(); 4046 CU_ASSERT(g_bserrno == 0); 4047 CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs)); 4048 4049 CU_ASSERT(g_dev_read_bytes == read_bytes); 4050 if (!g_use_extent_table) { 4051 /* For legacy metadata, we should have written two pages - one for the 4052 * write I/O itself, another for the blob's primary metadata. 4053 */ 4054 CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2); 4055 } else { 4056 /* For extent table metadata, we should have written three pages - one 4057 * for the write I/O, one for the extent page, one for the blob's primary 4058 * metadata. 4059 */ 4060 CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 3); 4061 } 4062 4063 /* The write should have synced the metadata already. Do another sync here 4064 * just to confirm. 4065 */ 4066 write_bytes = g_dev_write_bytes; 4067 read_bytes = g_dev_read_bytes; 4068 4069 g_bserrno = -1; 4070 spdk_blob_sync_md(blob, blob_op_complete, NULL); 4071 poll_threads(); 4072 CU_ASSERT(g_bserrno == 0); 4073 CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs)); 4074 4075 CU_ASSERT(g_dev_read_bytes == read_bytes); 4076 CU_ASSERT(g_dev_write_bytes == write_bytes); 4077 4078 /* Now write to another unallocated cluster that is part of the same extent page. */ 4079 g_bserrno = -1; 4080 spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i + pages_per_cluster, 4081 1, blob_op_complete, NULL); 4082 poll_threads(); 4083 CU_ASSERT(g_bserrno == 0); 4084 CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs)); 4085 4086 CU_ASSERT(g_dev_read_bytes == read_bytes); 4087 /* 4088 * For legacy metadata, we should have written the I/O and the primary metadata page. 4089 * For extent table metadata, we should have written the I/O and the extent metadata page. 4090 */ 4091 CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2); 4092 } 4093 4094 ut_blob_close_and_delete(bs, blob); 4095 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4096 4097 spdk_bs_free_io_channel(ch); 4098 poll_threads(); 4099 g_blob = NULL; 4100 g_blobid = 0; 4101 4102 spdk_bs_unload(bs, bs_op_complete, NULL); 4103 poll_threads(); 4104 CU_ASSERT(g_bserrno == 0); 4105 g_bs = NULL; 4106 } 4107 4108 static void 4109 blob_thin_prov_rle(void) 4110 { 4111 static const uint8_t zero[10 * 4096] = { 0 }; 4112 struct spdk_blob_store *bs = g_bs; 4113 struct spdk_blob *blob; 4114 struct spdk_io_channel *channel; 4115 struct spdk_blob_opts opts; 4116 spdk_blob_id blobid; 4117 uint64_t free_clusters; 4118 uint64_t page_size; 4119 uint8_t payload_read[10 * 4096]; 4120 uint8_t payload_write[10 * 4096]; 4121 uint64_t write_bytes; 4122 uint64_t read_bytes; 4123 uint64_t io_unit; 4124 4125 free_clusters = spdk_bs_free_cluster_count(bs); 4126 page_size = spdk_bs_get_page_size(bs); 4127 4128 ut_spdk_blob_opts_init(&opts); 4129 opts.thin_provision = true; 4130 opts.num_clusters = 5; 4131 4132 blob = ut_blob_create_and_open(bs, &opts); 4133 blobid = spdk_blob_get_id(blob); 4134 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4135 4136 channel = spdk_bs_alloc_io_channel(bs); 4137 CU_ASSERT(channel != NULL); 4138 4139 /* Target specifically second cluster in a blob as first allocation */ 4140 io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs); 4141 4142 /* Payload should be all zeros from unallocated clusters */ 4143 memset(payload_read, 0xFF, sizeof(payload_read)); 4144 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 4145 poll_threads(); 4146 CU_ASSERT(g_bserrno == 0); 4147 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4148 4149 write_bytes = g_dev_write_bytes; 4150 read_bytes = g_dev_read_bytes; 4151 4152 /* Issue write to second cluster in a blob */ 4153 memset(payload_write, 0xE5, sizeof(payload_write)); 4154 spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL); 4155 poll_threads(); 4156 CU_ASSERT(g_bserrno == 0); 4157 CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs)); 4158 /* For thin-provisioned blob we need to write 10 pages plus one page metadata and 4159 * read 0 bytes */ 4160 if (g_use_extent_table) { 4161 /* Add one more page for EXTENT_PAGE write */ 4162 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12); 4163 } else { 4164 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11); 4165 } 4166 CU_ASSERT(g_dev_read_bytes - read_bytes == 0); 4167 4168 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 4169 poll_threads(); 4170 CU_ASSERT(g_bserrno == 0); 4171 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4172 4173 spdk_bs_free_io_channel(channel); 4174 poll_threads(); 4175 4176 spdk_blob_close(blob, blob_op_complete, NULL); 4177 poll_threads(); 4178 CU_ASSERT(g_bserrno == 0); 4179 4180 ut_bs_reload(&bs, NULL); 4181 4182 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 4183 poll_threads(); 4184 CU_ASSERT(g_bserrno == 0); 4185 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4186 blob = g_blob; 4187 4188 channel = spdk_bs_alloc_io_channel(bs); 4189 CU_ASSERT(channel != NULL); 4190 4191 /* Read second cluster after blob reload to confirm data written */ 4192 spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL); 4193 poll_threads(); 4194 CU_ASSERT(g_bserrno == 0); 4195 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4196 4197 spdk_bs_free_io_channel(channel); 4198 poll_threads(); 4199 4200 ut_blob_close_and_delete(bs, blob); 4201 } 4202 4203 static void 4204 blob_thin_prov_rw_iov(void) 4205 { 4206 static const uint8_t zero[10 * 4096] = { 0 }; 4207 struct spdk_blob_store *bs = g_bs; 4208 struct spdk_blob *blob; 4209 struct spdk_io_channel *channel; 4210 struct spdk_blob_opts opts; 4211 uint64_t free_clusters; 4212 uint8_t payload_read[10 * 4096]; 4213 uint8_t payload_write[10 * 4096]; 4214 struct iovec iov_read[3]; 4215 struct iovec iov_write[3]; 4216 4217 free_clusters = spdk_bs_free_cluster_count(bs); 4218 4219 channel = spdk_bs_alloc_io_channel(bs); 4220 CU_ASSERT(channel != NULL); 4221 4222 ut_spdk_blob_opts_init(&opts); 4223 opts.thin_provision = true; 4224 4225 blob = ut_blob_create_and_open(bs, &opts); 4226 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4227 4228 CU_ASSERT(blob->active.num_clusters == 0); 4229 4230 /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */ 4231 spdk_blob_resize(blob, 5, blob_op_complete, NULL); 4232 poll_threads(); 4233 CU_ASSERT(g_bserrno == 0); 4234 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4235 CU_ASSERT(blob->active.num_clusters == 5); 4236 4237 spdk_blob_sync_md(blob, blob_op_complete, NULL); 4238 poll_threads(); 4239 CU_ASSERT(g_bserrno == 0); 4240 /* Sync must not change anything */ 4241 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4242 CU_ASSERT(blob->active.num_clusters == 5); 4243 4244 /* Payload should be all zeros from unallocated clusters */ 4245 memset(payload_read, 0xAA, sizeof(payload_read)); 4246 iov_read[0].iov_base = payload_read; 4247 iov_read[0].iov_len = 3 * 4096; 4248 iov_read[1].iov_base = payload_read + 3 * 4096; 4249 iov_read[1].iov_len = 4 * 4096; 4250 iov_read[2].iov_base = payload_read + 7 * 4096; 4251 iov_read[2].iov_len = 3 * 4096; 4252 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4253 poll_threads(); 4254 CU_ASSERT(g_bserrno == 0); 4255 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4256 4257 memset(payload_write, 0xE5, sizeof(payload_write)); 4258 iov_write[0].iov_base = payload_write; 4259 iov_write[0].iov_len = 1 * 4096; 4260 iov_write[1].iov_base = payload_write + 1 * 4096; 4261 iov_write[1].iov_len = 5 * 4096; 4262 iov_write[2].iov_base = payload_write + 6 * 4096; 4263 iov_write[2].iov_len = 4 * 4096; 4264 4265 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 4266 poll_threads(); 4267 CU_ASSERT(g_bserrno == 0); 4268 4269 memset(payload_read, 0xAA, sizeof(payload_read)); 4270 iov_read[0].iov_base = payload_read; 4271 iov_read[0].iov_len = 3 * 4096; 4272 iov_read[1].iov_base = payload_read + 3 * 4096; 4273 iov_read[1].iov_len = 4 * 4096; 4274 iov_read[2].iov_base = payload_read + 7 * 4096; 4275 iov_read[2].iov_len = 3 * 4096; 4276 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4277 poll_threads(); 4278 CU_ASSERT(g_bserrno == 0); 4279 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4280 4281 spdk_bs_free_io_channel(channel); 4282 poll_threads(); 4283 4284 ut_blob_close_and_delete(bs, blob); 4285 } 4286 4287 struct iter_ctx { 4288 int current_iter; 4289 spdk_blob_id blobid[4]; 4290 }; 4291 4292 static void 4293 test_iter(void *arg, struct spdk_blob *blob, int bserrno) 4294 { 4295 struct iter_ctx *iter_ctx = arg; 4296 spdk_blob_id blobid; 4297 4298 CU_ASSERT(bserrno == 0); 4299 blobid = spdk_blob_get_id(blob); 4300 CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]); 4301 } 4302 4303 static void 4304 bs_load_iter_test(void) 4305 { 4306 struct spdk_blob_store *bs; 4307 struct spdk_bs_dev *dev; 4308 struct iter_ctx iter_ctx = { 0 }; 4309 struct spdk_blob *blob; 4310 int i, rc; 4311 struct spdk_bs_opts opts; 4312 4313 dev = init_dev(); 4314 spdk_bs_opts_init(&opts, sizeof(opts)); 4315 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4316 4317 /* Initialize a new blob store */ 4318 spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL); 4319 poll_threads(); 4320 CU_ASSERT(g_bserrno == 0); 4321 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4322 bs = g_bs; 4323 4324 for (i = 0; i < 4; i++) { 4325 blob = ut_blob_create_and_open(bs, NULL); 4326 iter_ctx.blobid[i] = spdk_blob_get_id(blob); 4327 4328 /* Just save the blobid as an xattr for testing purposes. */ 4329 rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id)); 4330 CU_ASSERT(rc == 0); 4331 4332 /* Resize the blob */ 4333 spdk_blob_resize(blob, i, blob_op_complete, NULL); 4334 poll_threads(); 4335 CU_ASSERT(g_bserrno == 0); 4336 4337 spdk_blob_close(blob, blob_op_complete, NULL); 4338 poll_threads(); 4339 CU_ASSERT(g_bserrno == 0); 4340 } 4341 4342 g_bserrno = -1; 4343 spdk_bs_unload(bs, bs_op_complete, NULL); 4344 poll_threads(); 4345 CU_ASSERT(g_bserrno == 0); 4346 4347 dev = init_dev(); 4348 spdk_bs_opts_init(&opts, sizeof(opts)); 4349 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4350 opts.iter_cb_fn = test_iter; 4351 opts.iter_cb_arg = &iter_ctx; 4352 4353 /* Test blob iteration during load after a clean shutdown. */ 4354 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 4355 poll_threads(); 4356 CU_ASSERT(g_bserrno == 0); 4357 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4358 bs = g_bs; 4359 4360 /* Dirty shutdown */ 4361 bs_free(bs); 4362 4363 dev = init_dev(); 4364 spdk_bs_opts_init(&opts, sizeof(opts)); 4365 snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE"); 4366 opts.iter_cb_fn = test_iter; 4367 iter_ctx.current_iter = 0; 4368 opts.iter_cb_arg = &iter_ctx; 4369 4370 /* Test blob iteration during load after a dirty shutdown. */ 4371 spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL); 4372 poll_threads(); 4373 CU_ASSERT(g_bserrno == 0); 4374 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4375 bs = g_bs; 4376 4377 spdk_bs_unload(bs, bs_op_complete, NULL); 4378 poll_threads(); 4379 CU_ASSERT(g_bserrno == 0); 4380 g_bs = NULL; 4381 } 4382 4383 static void 4384 blob_snapshot_rw(void) 4385 { 4386 static const uint8_t zero[10 * 4096] = { 0 }; 4387 struct spdk_blob_store *bs = g_bs; 4388 struct spdk_blob *blob, *snapshot; 4389 struct spdk_io_channel *channel; 4390 struct spdk_blob_opts opts; 4391 spdk_blob_id blobid, snapshotid; 4392 uint64_t free_clusters; 4393 uint64_t cluster_size; 4394 uint64_t page_size; 4395 uint8_t payload_read[10 * 4096]; 4396 uint8_t payload_write[10 * 4096]; 4397 uint64_t write_bytes; 4398 uint64_t read_bytes; 4399 4400 free_clusters = spdk_bs_free_cluster_count(bs); 4401 cluster_size = spdk_bs_get_cluster_size(bs); 4402 page_size = spdk_bs_get_page_size(bs); 4403 4404 channel = spdk_bs_alloc_io_channel(bs); 4405 CU_ASSERT(channel != NULL); 4406 4407 ut_spdk_blob_opts_init(&opts); 4408 opts.thin_provision = true; 4409 opts.num_clusters = 5; 4410 4411 blob = ut_blob_create_and_open(bs, &opts); 4412 blobid = spdk_blob_get_id(blob); 4413 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4414 4415 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4416 4417 memset(payload_read, 0xFF, sizeof(payload_read)); 4418 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 4419 poll_threads(); 4420 CU_ASSERT(g_bserrno == 0); 4421 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4422 4423 memset(payload_write, 0xE5, sizeof(payload_write)); 4424 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 4425 poll_threads(); 4426 CU_ASSERT(g_bserrno == 0); 4427 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4428 4429 /* Create snapshot from blob */ 4430 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4431 poll_threads(); 4432 CU_ASSERT(g_bserrno == 0); 4433 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4434 snapshotid = g_blobid; 4435 4436 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4437 poll_threads(); 4438 CU_ASSERT(g_bserrno == 0); 4439 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4440 snapshot = g_blob; 4441 CU_ASSERT(snapshot->data_ro == true); 4442 CU_ASSERT(snapshot->md_ro == true); 4443 4444 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4445 4446 write_bytes = g_dev_write_bytes; 4447 read_bytes = g_dev_read_bytes; 4448 4449 memset(payload_write, 0xAA, sizeof(payload_write)); 4450 spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL); 4451 poll_threads(); 4452 CU_ASSERT(g_bserrno == 0); 4453 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4454 4455 /* For a clone we need to allocate and copy one cluster, update one page of metadata 4456 * and then write 10 pages of payload. 4457 */ 4458 if (g_use_extent_table) { 4459 /* Add one more page for EXTENT_PAGE write */ 4460 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12 + cluster_size); 4461 } else { 4462 CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11 + cluster_size); 4463 } 4464 CU_ASSERT(g_dev_read_bytes - read_bytes == cluster_size); 4465 4466 spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL); 4467 poll_threads(); 4468 CU_ASSERT(g_bserrno == 0); 4469 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4470 4471 /* Data on snapshot should not change after write to clone */ 4472 memset(payload_write, 0xE5, sizeof(payload_write)); 4473 spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL); 4474 poll_threads(); 4475 CU_ASSERT(g_bserrno == 0); 4476 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4477 4478 ut_blob_close_and_delete(bs, blob); 4479 ut_blob_close_and_delete(bs, snapshot); 4480 4481 spdk_bs_free_io_channel(channel); 4482 poll_threads(); 4483 g_blob = NULL; 4484 g_blobid = 0; 4485 } 4486 4487 static void 4488 blob_snapshot_rw_iov(void) 4489 { 4490 static const uint8_t zero[10 * 4096] = { 0 }; 4491 struct spdk_blob_store *bs = g_bs; 4492 struct spdk_blob *blob, *snapshot; 4493 struct spdk_io_channel *channel; 4494 struct spdk_blob_opts opts; 4495 spdk_blob_id blobid, snapshotid; 4496 uint64_t free_clusters; 4497 uint8_t payload_read[10 * 4096]; 4498 uint8_t payload_write[10 * 4096]; 4499 struct iovec iov_read[3]; 4500 struct iovec iov_write[3]; 4501 4502 free_clusters = spdk_bs_free_cluster_count(bs); 4503 4504 channel = spdk_bs_alloc_io_channel(bs); 4505 CU_ASSERT(channel != NULL); 4506 4507 ut_spdk_blob_opts_init(&opts); 4508 opts.thin_provision = true; 4509 opts.num_clusters = 5; 4510 4511 blob = ut_blob_create_and_open(bs, &opts); 4512 blobid = spdk_blob_get_id(blob); 4513 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4514 4515 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4516 4517 /* Create snapshot from blob */ 4518 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4519 poll_threads(); 4520 CU_ASSERT(g_bserrno == 0); 4521 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4522 snapshotid = g_blobid; 4523 4524 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4525 poll_threads(); 4526 CU_ASSERT(g_bserrno == 0); 4527 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4528 snapshot = g_blob; 4529 CU_ASSERT(snapshot->data_ro == true); 4530 CU_ASSERT(snapshot->md_ro == true); 4531 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4532 4533 /* Payload should be all zeros from unallocated clusters */ 4534 memset(payload_read, 0xAA, sizeof(payload_read)); 4535 iov_read[0].iov_base = payload_read; 4536 iov_read[0].iov_len = 3 * 4096; 4537 iov_read[1].iov_base = payload_read + 3 * 4096; 4538 iov_read[1].iov_len = 4 * 4096; 4539 iov_read[2].iov_base = payload_read + 7 * 4096; 4540 iov_read[2].iov_len = 3 * 4096; 4541 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4542 poll_threads(); 4543 CU_ASSERT(g_bserrno == 0); 4544 CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0); 4545 4546 memset(payload_write, 0xE5, sizeof(payload_write)); 4547 iov_write[0].iov_base = payload_write; 4548 iov_write[0].iov_len = 1 * 4096; 4549 iov_write[1].iov_base = payload_write + 1 * 4096; 4550 iov_write[1].iov_len = 5 * 4096; 4551 iov_write[2].iov_base = payload_write + 6 * 4096; 4552 iov_write[2].iov_len = 4 * 4096; 4553 4554 spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL); 4555 poll_threads(); 4556 CU_ASSERT(g_bserrno == 0); 4557 4558 memset(payload_read, 0xAA, sizeof(payload_read)); 4559 iov_read[0].iov_base = payload_read; 4560 iov_read[0].iov_len = 3 * 4096; 4561 iov_read[1].iov_base = payload_read + 3 * 4096; 4562 iov_read[1].iov_len = 4 * 4096; 4563 iov_read[2].iov_base = payload_read + 7 * 4096; 4564 iov_read[2].iov_len = 3 * 4096; 4565 spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL); 4566 poll_threads(); 4567 CU_ASSERT(g_bserrno == 0); 4568 CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0); 4569 4570 spdk_bs_free_io_channel(channel); 4571 poll_threads(); 4572 4573 ut_blob_close_and_delete(bs, blob); 4574 ut_blob_close_and_delete(bs, snapshot); 4575 } 4576 4577 /** 4578 * Inflate / decouple parent rw unit tests. 4579 * 4580 * -------------- 4581 * original blob: 0 1 2 3 4 4582 * ,---------+---------+---------+---------+---------. 4583 * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - | 4584 * +---------+---------+---------+---------+---------+ 4585 * snapshot2 | - |yyyyyyyyy| - |yyyyyyyyy| - | 4586 * +---------+---------+---------+---------+---------+ 4587 * blob | - |zzzzzzzzz| - | - | - | 4588 * '---------+---------+---------+---------+---------' 4589 * . . . . . . 4590 * -------- . . . . . . 4591 * inflate: . . . . . . 4592 * ,---------+---------+---------+---------+---------. 4593 * blob |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000| 4594 * '---------+---------+---------+---------+---------' 4595 * 4596 * NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency 4597 * on snapshot2 and snapshot removed . . . 4598 * . . . . . . 4599 * ---------------- . . . . . . 4600 * decouple parent: . . . . . . 4601 * ,---------+---------+---------+---------+---------. 4602 * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - | 4603 * +---------+---------+---------+---------+---------+ 4604 * blob | - |zzzzzzzzz| - |yyyyyyyyy| - | 4605 * '---------+---------+---------+---------+---------' 4606 * 4607 * NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency 4608 * on snapshot2 removed and on snapshot still exists. Snapshot2 4609 * should remain a clone of snapshot. 4610 */ 4611 static void 4612 _blob_inflate_rw(bool decouple_parent) 4613 { 4614 struct spdk_blob_store *bs = g_bs; 4615 struct spdk_blob *blob, *snapshot, *snapshot2; 4616 struct spdk_io_channel *channel; 4617 struct spdk_blob_opts opts; 4618 spdk_blob_id blobid, snapshotid, snapshot2id; 4619 uint64_t free_clusters; 4620 uint64_t cluster_size; 4621 4622 uint64_t payload_size; 4623 uint8_t *payload_read; 4624 uint8_t *payload_write; 4625 uint8_t *payload_clone; 4626 4627 uint64_t pages_per_cluster; 4628 uint64_t pages_per_payload; 4629 4630 int i; 4631 spdk_blob_id ids[2]; 4632 size_t count; 4633 4634 free_clusters = spdk_bs_free_cluster_count(bs); 4635 cluster_size = spdk_bs_get_cluster_size(bs); 4636 pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs); 4637 pages_per_payload = pages_per_cluster * 5; 4638 4639 payload_size = cluster_size * 5; 4640 4641 payload_read = malloc(payload_size); 4642 SPDK_CU_ASSERT_FATAL(payload_read != NULL); 4643 4644 payload_write = malloc(payload_size); 4645 SPDK_CU_ASSERT_FATAL(payload_write != NULL); 4646 4647 payload_clone = malloc(payload_size); 4648 SPDK_CU_ASSERT_FATAL(payload_clone != NULL); 4649 4650 channel = spdk_bs_alloc_io_channel(bs); 4651 SPDK_CU_ASSERT_FATAL(channel != NULL); 4652 4653 /* Create blob */ 4654 ut_spdk_blob_opts_init(&opts); 4655 opts.thin_provision = true; 4656 opts.num_clusters = 5; 4657 4658 blob = ut_blob_create_and_open(bs, &opts); 4659 blobid = spdk_blob_get_id(blob); 4660 CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs)); 4661 4662 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4663 4664 /* 1) Initial read should return zeroed payload */ 4665 memset(payload_read, 0xFF, payload_size); 4666 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4667 blob_op_complete, NULL); 4668 poll_threads(); 4669 CU_ASSERT(g_bserrno == 0); 4670 CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size)); 4671 4672 /* Fill whole blob with a pattern, except last cluster (to be sure it 4673 * isn't allocated) */ 4674 memset(payload_write, 0xE5, payload_size - cluster_size); 4675 spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload - 4676 pages_per_cluster, blob_op_complete, NULL); 4677 poll_threads(); 4678 CU_ASSERT(g_bserrno == 0); 4679 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4680 4681 /* 2) Create snapshot from blob (first level) */ 4682 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4683 poll_threads(); 4684 CU_ASSERT(g_bserrno == 0); 4685 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4686 snapshotid = g_blobid; 4687 4688 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4689 poll_threads(); 4690 CU_ASSERT(g_bserrno == 0); 4691 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4692 snapshot = g_blob; 4693 CU_ASSERT(snapshot->data_ro == true); 4694 CU_ASSERT(snapshot->md_ro == true); 4695 4696 CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5); 4697 4698 /* Write every second cluster with a pattern. 4699 * 4700 * Last cluster shouldn't be written, to be sure that snapshot nor clone 4701 * doesn't allocate it. 4702 * 4703 * payload_clone stores expected result on "blob" read at the time and 4704 * is used only to check data consistency on clone before and after 4705 * inflation. Initially we fill it with a backing snapshots pattern 4706 * used before. 4707 */ 4708 memset(payload_clone, 0xE5, payload_size - cluster_size); 4709 memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size); 4710 memset(payload_write, 0xAA, payload_size); 4711 for (i = 1; i < 5; i += 2) { 4712 spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster, 4713 pages_per_cluster, blob_op_complete, NULL); 4714 poll_threads(); 4715 CU_ASSERT(g_bserrno == 0); 4716 4717 /* Update expected result */ 4718 memcpy(payload_clone + (cluster_size * i), payload_write, 4719 cluster_size); 4720 } 4721 CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs)); 4722 4723 /* Check data consistency on clone */ 4724 memset(payload_read, 0xFF, payload_size); 4725 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4726 blob_op_complete, NULL); 4727 poll_threads(); 4728 CU_ASSERT(g_bserrno == 0); 4729 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4730 4731 /* 3) Create second levels snapshot from blob */ 4732 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4733 poll_threads(); 4734 CU_ASSERT(g_bserrno == 0); 4735 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4736 snapshot2id = g_blobid; 4737 4738 spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL); 4739 poll_threads(); 4740 CU_ASSERT(g_bserrno == 0); 4741 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4742 snapshot2 = g_blob; 4743 CU_ASSERT(snapshot2->data_ro == true); 4744 CU_ASSERT(snapshot2->md_ro == true); 4745 4746 CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5); 4747 4748 CU_ASSERT(snapshot2->parent_id == snapshotid); 4749 4750 /* Write one cluster on the top level blob. This cluster (1) covers 4751 * already allocated cluster in the snapshot2, so shouldn't be inflated 4752 * at all */ 4753 spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster, 4754 pages_per_cluster, blob_op_complete, NULL); 4755 poll_threads(); 4756 CU_ASSERT(g_bserrno == 0); 4757 4758 /* Update expected result */ 4759 memcpy(payload_clone + cluster_size, payload_write, cluster_size); 4760 4761 /* Check data consistency on clone */ 4762 memset(payload_read, 0xFF, payload_size); 4763 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4764 blob_op_complete, NULL); 4765 poll_threads(); 4766 CU_ASSERT(g_bserrno == 0); 4767 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4768 4769 4770 /* Close all blobs */ 4771 spdk_blob_close(blob, blob_op_complete, NULL); 4772 poll_threads(); 4773 CU_ASSERT(g_bserrno == 0); 4774 4775 spdk_blob_close(snapshot2, blob_op_complete, NULL); 4776 poll_threads(); 4777 CU_ASSERT(g_bserrno == 0); 4778 4779 spdk_blob_close(snapshot, blob_op_complete, NULL); 4780 poll_threads(); 4781 CU_ASSERT(g_bserrno == 0); 4782 4783 /* Check snapshot-clone relations */ 4784 count = 2; 4785 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4786 CU_ASSERT(count == 1); 4787 CU_ASSERT(ids[0] == snapshot2id); 4788 4789 count = 2; 4790 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4791 CU_ASSERT(count == 1); 4792 CU_ASSERT(ids[0] == blobid); 4793 4794 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id); 4795 4796 free_clusters = spdk_bs_free_cluster_count(bs); 4797 if (!decouple_parent) { 4798 /* Do full blob inflation */ 4799 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 4800 poll_threads(); 4801 CU_ASSERT(g_bserrno == 0); 4802 4803 /* All clusters should be inflated (except one already allocated 4804 * in a top level blob) */ 4805 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4); 4806 4807 /* Check if relation tree updated correctly */ 4808 count = 2; 4809 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4810 4811 /* snapshotid have one clone */ 4812 CU_ASSERT(count == 1); 4813 CU_ASSERT(ids[0] == snapshot2id); 4814 4815 /* snapshot2id have no clones */ 4816 count = 2; 4817 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4818 CU_ASSERT(count == 0); 4819 4820 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 4821 } else { 4822 /* Decouple parent of blob */ 4823 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 4824 poll_threads(); 4825 CU_ASSERT(g_bserrno == 0); 4826 4827 /* Only one cluster from a parent should be inflated (second one 4828 * is covered by a cluster written on a top level blob, and 4829 * already allocated) */ 4830 CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1); 4831 4832 /* Check if relation tree updated correctly */ 4833 count = 2; 4834 CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0); 4835 4836 /* snapshotid have two clones now */ 4837 CU_ASSERT(count == 2); 4838 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 4839 CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id); 4840 4841 /* snapshot2id have no clones */ 4842 count = 2; 4843 CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0); 4844 CU_ASSERT(count == 0); 4845 4846 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4847 } 4848 4849 /* Try to delete snapshot2 (should pass) */ 4850 spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL); 4851 poll_threads(); 4852 CU_ASSERT(g_bserrno == 0); 4853 4854 /* Try to delete base snapshot */ 4855 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 4856 poll_threads(); 4857 CU_ASSERT(g_bserrno == 0); 4858 4859 /* Reopen blob after snapshot deletion */ 4860 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 4861 poll_threads(); 4862 CU_ASSERT(g_bserrno == 0); 4863 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4864 blob = g_blob; 4865 4866 CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5); 4867 4868 /* Check data consistency on inflated blob */ 4869 memset(payload_read, 0xFF, payload_size); 4870 spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, 4871 blob_op_complete, NULL); 4872 poll_threads(); 4873 CU_ASSERT(g_bserrno == 0); 4874 CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0); 4875 4876 spdk_bs_free_io_channel(channel); 4877 poll_threads(); 4878 4879 free(payload_read); 4880 free(payload_write); 4881 free(payload_clone); 4882 4883 ut_blob_close_and_delete(bs, blob); 4884 } 4885 4886 static void 4887 blob_inflate_rw(void) 4888 { 4889 _blob_inflate_rw(false); 4890 _blob_inflate_rw(true); 4891 } 4892 4893 /** 4894 * Snapshot-clones relation test 4895 * 4896 * snapshot 4897 * | 4898 * +-----+-----+ 4899 * | | 4900 * blob(ro) snapshot2 4901 * | | 4902 * clone2 clone 4903 */ 4904 static void 4905 blob_relations(void) 4906 { 4907 struct spdk_blob_store *bs; 4908 struct spdk_bs_dev *dev; 4909 struct spdk_bs_opts bs_opts; 4910 struct spdk_blob_opts opts; 4911 struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2; 4912 spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2; 4913 int rc; 4914 size_t count; 4915 spdk_blob_id ids[10] = {}; 4916 4917 dev = init_dev(); 4918 spdk_bs_opts_init(&bs_opts, sizeof(opts)); 4919 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 4920 4921 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 4922 poll_threads(); 4923 CU_ASSERT(g_bserrno == 0); 4924 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 4925 bs = g_bs; 4926 4927 /* 1. Create blob with 10 clusters */ 4928 4929 ut_spdk_blob_opts_init(&opts); 4930 opts.num_clusters = 10; 4931 4932 blob = ut_blob_create_and_open(bs, &opts); 4933 blobid = spdk_blob_get_id(blob); 4934 4935 CU_ASSERT(!spdk_blob_is_read_only(blob)); 4936 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4937 CU_ASSERT(!spdk_blob_is_clone(blob)); 4938 CU_ASSERT(!spdk_blob_is_thin_provisioned(blob)); 4939 4940 /* blob should not have underlying snapshot nor clones */ 4941 CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID); 4942 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 4943 count = SPDK_COUNTOF(ids); 4944 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 4945 CU_ASSERT(rc == 0); 4946 CU_ASSERT(count == 0); 4947 4948 4949 /* 2. Create snapshot */ 4950 4951 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 4952 poll_threads(); 4953 CU_ASSERT(g_bserrno == 0); 4954 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4955 snapshotid = g_blobid; 4956 4957 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 4958 poll_threads(); 4959 CU_ASSERT(g_bserrno == 0); 4960 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4961 snapshot = g_blob; 4962 4963 CU_ASSERT(spdk_blob_is_read_only(snapshot)); 4964 CU_ASSERT(spdk_blob_is_snapshot(snapshot)); 4965 CU_ASSERT(!spdk_blob_is_clone(snapshot)); 4966 CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID); 4967 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID); 4968 4969 /* Check if original blob is converted to the clone of snapshot */ 4970 CU_ASSERT(!spdk_blob_is_read_only(blob)); 4971 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 4972 CU_ASSERT(spdk_blob_is_clone(blob)); 4973 CU_ASSERT(spdk_blob_is_thin_provisioned(blob)); 4974 CU_ASSERT(blob->parent_id == snapshotid); 4975 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 4976 4977 count = SPDK_COUNTOF(ids); 4978 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 4979 CU_ASSERT(rc == 0); 4980 CU_ASSERT(count == 1); 4981 CU_ASSERT(ids[0] == blobid); 4982 4983 4984 /* 3. Create clone from snapshot */ 4985 4986 spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 4987 poll_threads(); 4988 CU_ASSERT(g_bserrno == 0); 4989 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 4990 cloneid = g_blobid; 4991 4992 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 4993 poll_threads(); 4994 CU_ASSERT(g_bserrno == 0); 4995 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 4996 clone = g_blob; 4997 4998 CU_ASSERT(!spdk_blob_is_read_only(clone)); 4999 CU_ASSERT(!spdk_blob_is_snapshot(clone)); 5000 CU_ASSERT(spdk_blob_is_clone(clone)); 5001 CU_ASSERT(spdk_blob_is_thin_provisioned(clone)); 5002 CU_ASSERT(clone->parent_id == snapshotid); 5003 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid); 5004 5005 count = SPDK_COUNTOF(ids); 5006 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 5007 CU_ASSERT(rc == 0); 5008 CU_ASSERT(count == 0); 5009 5010 /* Check if clone is on the snapshot's list */ 5011 count = SPDK_COUNTOF(ids); 5012 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5013 CU_ASSERT(rc == 0); 5014 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5015 CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid); 5016 5017 5018 /* 4. Create snapshot of the clone */ 5019 5020 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 5021 poll_threads(); 5022 CU_ASSERT(g_bserrno == 0); 5023 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5024 snapshotid2 = g_blobid; 5025 5026 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 5027 poll_threads(); 5028 CU_ASSERT(g_bserrno == 0); 5029 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5030 snapshot2 = g_blob; 5031 5032 CU_ASSERT(spdk_blob_is_read_only(snapshot2)); 5033 CU_ASSERT(spdk_blob_is_snapshot(snapshot2)); 5034 CU_ASSERT(spdk_blob_is_clone(snapshot2)); 5035 CU_ASSERT(snapshot2->parent_id == snapshotid); 5036 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid); 5037 5038 /* Check if clone is converted to the clone of snapshot2 and snapshot2 5039 * is a child of snapshot */ 5040 CU_ASSERT(!spdk_blob_is_read_only(clone)); 5041 CU_ASSERT(!spdk_blob_is_snapshot(clone)); 5042 CU_ASSERT(spdk_blob_is_clone(clone)); 5043 CU_ASSERT(spdk_blob_is_thin_provisioned(clone)); 5044 CU_ASSERT(clone->parent_id == snapshotid2); 5045 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 5046 5047 count = SPDK_COUNTOF(ids); 5048 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5049 CU_ASSERT(rc == 0); 5050 CU_ASSERT(count == 1); 5051 CU_ASSERT(ids[0] == cloneid); 5052 5053 5054 /* 5. Try to create clone from read only blob */ 5055 5056 /* Mark blob as read only */ 5057 spdk_blob_set_read_only(blob); 5058 spdk_blob_sync_md(blob, blob_op_complete, NULL); 5059 poll_threads(); 5060 CU_ASSERT(g_bserrno == 0); 5061 5062 /* Check if previously created blob is read only clone */ 5063 CU_ASSERT(spdk_blob_is_read_only(blob)); 5064 CU_ASSERT(!spdk_blob_is_snapshot(blob)); 5065 CU_ASSERT(spdk_blob_is_clone(blob)); 5066 CU_ASSERT(spdk_blob_is_thin_provisioned(blob)); 5067 5068 /* Create clone from read only blob */ 5069 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5070 poll_threads(); 5071 CU_ASSERT(g_bserrno == 0); 5072 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5073 cloneid2 = g_blobid; 5074 5075 spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL); 5076 poll_threads(); 5077 CU_ASSERT(g_bserrno == 0); 5078 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5079 clone2 = g_blob; 5080 5081 CU_ASSERT(!spdk_blob_is_read_only(clone2)); 5082 CU_ASSERT(!spdk_blob_is_snapshot(clone2)); 5083 CU_ASSERT(spdk_blob_is_clone(clone2)); 5084 CU_ASSERT(spdk_blob_is_thin_provisioned(clone2)); 5085 5086 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5087 5088 count = SPDK_COUNTOF(ids); 5089 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5090 CU_ASSERT(rc == 0); 5091 5092 CU_ASSERT(count == 1); 5093 CU_ASSERT(ids[0] == cloneid2); 5094 5095 /* Close blobs */ 5096 5097 spdk_blob_close(clone2, blob_op_complete, NULL); 5098 poll_threads(); 5099 CU_ASSERT(g_bserrno == 0); 5100 5101 spdk_blob_close(blob, blob_op_complete, NULL); 5102 poll_threads(); 5103 CU_ASSERT(g_bserrno == 0); 5104 5105 spdk_blob_close(clone, blob_op_complete, NULL); 5106 poll_threads(); 5107 CU_ASSERT(g_bserrno == 0); 5108 5109 spdk_blob_close(snapshot, blob_op_complete, NULL); 5110 poll_threads(); 5111 CU_ASSERT(g_bserrno == 0); 5112 5113 spdk_blob_close(snapshot2, blob_op_complete, NULL); 5114 poll_threads(); 5115 CU_ASSERT(g_bserrno == 0); 5116 5117 /* Try to delete snapshot with more than 1 clone */ 5118 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 5119 poll_threads(); 5120 CU_ASSERT(g_bserrno != 0); 5121 5122 ut_bs_reload(&bs, &bs_opts); 5123 5124 /* NULL ids array should return number of clones in count */ 5125 count = SPDK_COUNTOF(ids); 5126 rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count); 5127 CU_ASSERT(rc == -ENOMEM); 5128 CU_ASSERT(count == 2); 5129 5130 /* incorrect array size */ 5131 count = 1; 5132 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5133 CU_ASSERT(rc == -ENOMEM); 5134 CU_ASSERT(count == 2); 5135 5136 5137 /* Verify structure of loaded blob store */ 5138 5139 /* snapshot */ 5140 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID); 5141 5142 count = SPDK_COUNTOF(ids); 5143 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5144 CU_ASSERT(rc == 0); 5145 CU_ASSERT(count == 2); 5146 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5147 CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2); 5148 5149 /* blob */ 5150 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 5151 count = SPDK_COUNTOF(ids); 5152 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5153 CU_ASSERT(rc == 0); 5154 CU_ASSERT(count == 1); 5155 CU_ASSERT(ids[0] == cloneid2); 5156 5157 /* clone */ 5158 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 5159 count = SPDK_COUNTOF(ids); 5160 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 5161 CU_ASSERT(rc == 0); 5162 CU_ASSERT(count == 0); 5163 5164 /* snapshot2 */ 5165 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid); 5166 count = SPDK_COUNTOF(ids); 5167 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5168 CU_ASSERT(rc == 0); 5169 CU_ASSERT(count == 1); 5170 CU_ASSERT(ids[0] == cloneid); 5171 5172 /* clone2 */ 5173 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5174 count = SPDK_COUNTOF(ids); 5175 rc = spdk_blob_get_clones(bs, cloneid2, ids, &count); 5176 CU_ASSERT(rc == 0); 5177 CU_ASSERT(count == 0); 5178 5179 /* Try to delete blob that user should not be able to remove */ 5180 5181 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 5182 poll_threads(); 5183 CU_ASSERT(g_bserrno != 0); 5184 5185 /* Remove all blobs */ 5186 5187 spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL); 5188 poll_threads(); 5189 CU_ASSERT(g_bserrno == 0); 5190 5191 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5192 poll_threads(); 5193 CU_ASSERT(g_bserrno == 0); 5194 5195 spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL); 5196 poll_threads(); 5197 CU_ASSERT(g_bserrno == 0); 5198 5199 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 5200 poll_threads(); 5201 CU_ASSERT(g_bserrno == 0); 5202 5203 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 5204 poll_threads(); 5205 CU_ASSERT(g_bserrno == 0); 5206 5207 spdk_bs_unload(bs, bs_op_complete, NULL); 5208 poll_threads(); 5209 CU_ASSERT(g_bserrno == 0); 5210 5211 g_bs = NULL; 5212 } 5213 5214 /** 5215 * Snapshot-clones relation test 2 5216 * 5217 * snapshot1 5218 * | 5219 * snapshot2 5220 * | 5221 * +-----+-----+ 5222 * | | 5223 * blob(ro) snapshot3 5224 * | | 5225 * | snapshot4 5226 * | | | 5227 * clone2 clone clone3 5228 */ 5229 static void 5230 blob_relations2(void) 5231 { 5232 struct spdk_blob_store *bs; 5233 struct spdk_bs_dev *dev; 5234 struct spdk_bs_opts bs_opts; 5235 struct spdk_blob_opts opts; 5236 struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2; 5237 spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2, 5238 cloneid3; 5239 int rc; 5240 size_t count; 5241 spdk_blob_id ids[10] = {}; 5242 5243 dev = init_dev(); 5244 spdk_bs_opts_init(&bs_opts, sizeof(bs_opts)); 5245 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 5246 5247 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 5248 poll_threads(); 5249 CU_ASSERT(g_bserrno == 0); 5250 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5251 bs = g_bs; 5252 5253 /* 1. Create blob with 10 clusters */ 5254 5255 ut_spdk_blob_opts_init(&opts); 5256 opts.num_clusters = 10; 5257 5258 blob = ut_blob_create_and_open(bs, &opts); 5259 blobid = spdk_blob_get_id(blob); 5260 5261 /* 2. Create snapshot1 */ 5262 5263 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5264 poll_threads(); 5265 CU_ASSERT(g_bserrno == 0); 5266 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5267 snapshotid1 = g_blobid; 5268 5269 spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL); 5270 poll_threads(); 5271 CU_ASSERT(g_bserrno == 0); 5272 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5273 snapshot1 = g_blob; 5274 5275 CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID); 5276 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID); 5277 5278 CU_ASSERT(blob->parent_id == snapshotid1); 5279 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1); 5280 5281 /* Check if blob is the clone of snapshot1 */ 5282 CU_ASSERT(blob->parent_id == snapshotid1); 5283 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1); 5284 5285 count = SPDK_COUNTOF(ids); 5286 rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count); 5287 CU_ASSERT(rc == 0); 5288 CU_ASSERT(count == 1); 5289 CU_ASSERT(ids[0] == blobid); 5290 5291 /* 3. Create another snapshot */ 5292 5293 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5294 poll_threads(); 5295 CU_ASSERT(g_bserrno == 0); 5296 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5297 snapshotid2 = g_blobid; 5298 5299 spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL); 5300 poll_threads(); 5301 CU_ASSERT(g_bserrno == 0); 5302 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5303 snapshot2 = g_blob; 5304 5305 CU_ASSERT(spdk_blob_is_clone(snapshot2)); 5306 CU_ASSERT(snapshot2->parent_id == snapshotid1); 5307 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1); 5308 5309 /* Check if snapshot2 is the clone of snapshot1 and blob 5310 * is a child of snapshot2 */ 5311 CU_ASSERT(blob->parent_id == snapshotid2); 5312 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2); 5313 5314 count = SPDK_COUNTOF(ids); 5315 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5316 CU_ASSERT(rc == 0); 5317 CU_ASSERT(count == 1); 5318 CU_ASSERT(ids[0] == blobid); 5319 5320 /* 4. Create clone from snapshot */ 5321 5322 spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL); 5323 poll_threads(); 5324 CU_ASSERT(g_bserrno == 0); 5325 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5326 cloneid = g_blobid; 5327 5328 spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL); 5329 poll_threads(); 5330 CU_ASSERT(g_bserrno == 0); 5331 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5332 clone = g_blob; 5333 5334 CU_ASSERT(clone->parent_id == snapshotid2); 5335 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2); 5336 5337 /* Check if clone is on the snapshot's list */ 5338 count = SPDK_COUNTOF(ids); 5339 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5340 CU_ASSERT(rc == 0); 5341 CU_ASSERT(count == 2); 5342 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5343 CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid); 5344 5345 /* 5. Create snapshot of the clone */ 5346 5347 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 5348 poll_threads(); 5349 CU_ASSERT(g_bserrno == 0); 5350 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5351 snapshotid3 = g_blobid; 5352 5353 spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL); 5354 poll_threads(); 5355 CU_ASSERT(g_bserrno == 0); 5356 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5357 snapshot3 = g_blob; 5358 5359 CU_ASSERT(snapshot3->parent_id == snapshotid2); 5360 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2); 5361 5362 /* Check if clone is converted to the clone of snapshot3 and snapshot3 5363 * is a child of snapshot2 */ 5364 CU_ASSERT(clone->parent_id == snapshotid3); 5365 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5366 5367 count = SPDK_COUNTOF(ids); 5368 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5369 CU_ASSERT(rc == 0); 5370 CU_ASSERT(count == 1); 5371 CU_ASSERT(ids[0] == cloneid); 5372 5373 /* 6. Create another snapshot of the clone */ 5374 5375 spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL); 5376 poll_threads(); 5377 CU_ASSERT(g_bserrno == 0); 5378 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5379 snapshotid4 = g_blobid; 5380 5381 spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL); 5382 poll_threads(); 5383 CU_ASSERT(g_bserrno == 0); 5384 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5385 snapshot4 = g_blob; 5386 5387 CU_ASSERT(snapshot4->parent_id == snapshotid3); 5388 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3); 5389 5390 /* Check if clone is converted to the clone of snapshot4 and snapshot4 5391 * is a child of snapshot3 */ 5392 CU_ASSERT(clone->parent_id == snapshotid4); 5393 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4); 5394 5395 count = SPDK_COUNTOF(ids); 5396 rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count); 5397 CU_ASSERT(rc == 0); 5398 CU_ASSERT(count == 1); 5399 CU_ASSERT(ids[0] == cloneid); 5400 5401 /* 7. Remove snapshot 4 */ 5402 5403 ut_blob_close_and_delete(bs, snapshot4); 5404 5405 /* Check if relations are back to state from before creating snapshot 4 */ 5406 CU_ASSERT(clone->parent_id == snapshotid3); 5407 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5408 5409 count = SPDK_COUNTOF(ids); 5410 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5411 CU_ASSERT(rc == 0); 5412 CU_ASSERT(count == 1); 5413 CU_ASSERT(ids[0] == cloneid); 5414 5415 /* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */ 5416 5417 spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL); 5418 poll_threads(); 5419 CU_ASSERT(g_bserrno == 0); 5420 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5421 cloneid3 = g_blobid; 5422 5423 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5424 poll_threads(); 5425 CU_ASSERT(g_bserrno != 0); 5426 5427 /* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */ 5428 5429 spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL); 5430 poll_threads(); 5431 CU_ASSERT(g_bserrno == 0); 5432 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5433 snapshot3 = g_blob; 5434 5435 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5436 poll_threads(); 5437 CU_ASSERT(g_bserrno != 0); 5438 5439 spdk_blob_close(snapshot3, blob_op_complete, NULL); 5440 poll_threads(); 5441 CU_ASSERT(g_bserrno == 0); 5442 5443 spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL); 5444 poll_threads(); 5445 CU_ASSERT(g_bserrno == 0); 5446 5447 /* 10. Remove snapshot 1 */ 5448 5449 ut_blob_close_and_delete(bs, snapshot1); 5450 5451 /* Check if relations are back to state from before creating snapshot 4 (before step 6) */ 5452 CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID); 5453 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID); 5454 5455 count = SPDK_COUNTOF(ids); 5456 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5457 CU_ASSERT(rc == 0); 5458 CU_ASSERT(count == 2); 5459 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5460 CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3); 5461 5462 /* 11. Try to create clone from read only blob */ 5463 5464 /* Mark blob as read only */ 5465 spdk_blob_set_read_only(blob); 5466 spdk_blob_sync_md(blob, blob_op_complete, NULL); 5467 poll_threads(); 5468 CU_ASSERT(g_bserrno == 0); 5469 5470 /* Create clone from read only blob */ 5471 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5472 poll_threads(); 5473 CU_ASSERT(g_bserrno == 0); 5474 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5475 cloneid2 = g_blobid; 5476 5477 spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL); 5478 poll_threads(); 5479 CU_ASSERT(g_bserrno == 0); 5480 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5481 clone2 = g_blob; 5482 5483 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5484 5485 count = SPDK_COUNTOF(ids); 5486 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5487 CU_ASSERT(rc == 0); 5488 CU_ASSERT(count == 1); 5489 CU_ASSERT(ids[0] == cloneid2); 5490 5491 /* Close blobs */ 5492 5493 spdk_blob_close(clone2, blob_op_complete, NULL); 5494 poll_threads(); 5495 CU_ASSERT(g_bserrno == 0); 5496 5497 spdk_blob_close(blob, blob_op_complete, NULL); 5498 poll_threads(); 5499 CU_ASSERT(g_bserrno == 0); 5500 5501 spdk_blob_close(clone, blob_op_complete, NULL); 5502 poll_threads(); 5503 CU_ASSERT(g_bserrno == 0); 5504 5505 spdk_blob_close(snapshot2, blob_op_complete, NULL); 5506 poll_threads(); 5507 CU_ASSERT(g_bserrno == 0); 5508 5509 spdk_blob_close(snapshot3, blob_op_complete, NULL); 5510 poll_threads(); 5511 CU_ASSERT(g_bserrno == 0); 5512 5513 ut_bs_reload(&bs, &bs_opts); 5514 5515 /* Verify structure of loaded blob store */ 5516 5517 /* snapshot2 */ 5518 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID); 5519 5520 count = SPDK_COUNTOF(ids); 5521 rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count); 5522 CU_ASSERT(rc == 0); 5523 CU_ASSERT(count == 2); 5524 CU_ASSERT(ids[0] == blobid || ids[1] == blobid); 5525 CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3); 5526 5527 /* blob */ 5528 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2); 5529 count = SPDK_COUNTOF(ids); 5530 rc = spdk_blob_get_clones(bs, blobid, ids, &count); 5531 CU_ASSERT(rc == 0); 5532 CU_ASSERT(count == 1); 5533 CU_ASSERT(ids[0] == cloneid2); 5534 5535 /* clone */ 5536 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3); 5537 count = SPDK_COUNTOF(ids); 5538 rc = spdk_blob_get_clones(bs, cloneid, ids, &count); 5539 CU_ASSERT(rc == 0); 5540 CU_ASSERT(count == 0); 5541 5542 /* snapshot3 */ 5543 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2); 5544 count = SPDK_COUNTOF(ids); 5545 rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count); 5546 CU_ASSERT(rc == 0); 5547 CU_ASSERT(count == 1); 5548 CU_ASSERT(ids[0] == cloneid); 5549 5550 /* clone2 */ 5551 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid); 5552 count = SPDK_COUNTOF(ids); 5553 rc = spdk_blob_get_clones(bs, cloneid2, ids, &count); 5554 CU_ASSERT(rc == 0); 5555 CU_ASSERT(count == 0); 5556 5557 /* Try to delete all blobs in the worse possible order */ 5558 5559 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5560 poll_threads(); 5561 CU_ASSERT(g_bserrno != 0); 5562 5563 spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL); 5564 poll_threads(); 5565 CU_ASSERT(g_bserrno == 0); 5566 5567 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5568 poll_threads(); 5569 CU_ASSERT(g_bserrno != 0); 5570 5571 spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL); 5572 poll_threads(); 5573 CU_ASSERT(g_bserrno == 0); 5574 5575 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5576 poll_threads(); 5577 CU_ASSERT(g_bserrno == 0); 5578 5579 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 5580 poll_threads(); 5581 CU_ASSERT(g_bserrno == 0); 5582 5583 spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL); 5584 poll_threads(); 5585 CU_ASSERT(g_bserrno == 0); 5586 5587 spdk_bs_unload(bs, bs_op_complete, NULL); 5588 poll_threads(); 5589 CU_ASSERT(g_bserrno == 0); 5590 5591 g_bs = NULL; 5592 } 5593 5594 /** 5595 * Snapshot-clones relation test 3 5596 * 5597 * snapshot0 5598 * | 5599 * snapshot1 5600 * | 5601 * snapshot2 5602 * | 5603 * blob 5604 */ 5605 static void 5606 blob_relations3(void) 5607 { 5608 struct spdk_blob_store *bs; 5609 struct spdk_bs_dev *dev; 5610 struct spdk_io_channel *channel; 5611 struct spdk_bs_opts bs_opts; 5612 struct spdk_blob_opts opts; 5613 struct spdk_blob *blob; 5614 spdk_blob_id blobid, snapshotid0, snapshotid1, snapshotid2; 5615 5616 dev = init_dev(); 5617 spdk_bs_opts_init(&bs_opts, sizeof(opts)); 5618 snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE"); 5619 5620 spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL); 5621 poll_threads(); 5622 CU_ASSERT(g_bserrno == 0); 5623 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5624 bs = g_bs; 5625 5626 channel = spdk_bs_alloc_io_channel(bs); 5627 SPDK_CU_ASSERT_FATAL(channel != NULL); 5628 5629 /* 1. Create blob with 10 clusters */ 5630 ut_spdk_blob_opts_init(&opts); 5631 opts.num_clusters = 10; 5632 5633 blob = ut_blob_create_and_open(bs, &opts); 5634 blobid = spdk_blob_get_id(blob); 5635 5636 /* 2. Create snapshot0 */ 5637 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5638 poll_threads(); 5639 CU_ASSERT(g_bserrno == 0); 5640 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5641 snapshotid0 = g_blobid; 5642 5643 /* 3. Create snapshot1 */ 5644 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5645 poll_threads(); 5646 CU_ASSERT(g_bserrno == 0); 5647 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5648 snapshotid1 = g_blobid; 5649 5650 /* 4. Create snapshot2 */ 5651 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5652 poll_threads(); 5653 CU_ASSERT(g_bserrno == 0); 5654 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5655 snapshotid2 = g_blobid; 5656 5657 /* 5. Decouple blob */ 5658 spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL); 5659 poll_threads(); 5660 CU_ASSERT(g_bserrno == 0); 5661 5662 /* 6. Decouple snapshot2. Make sure updating md of snapshot2 is possible */ 5663 spdk_bs_blob_decouple_parent(bs, channel, snapshotid2, blob_op_complete, NULL); 5664 poll_threads(); 5665 CU_ASSERT(g_bserrno == 0); 5666 5667 /* 7. Delete blob */ 5668 spdk_blob_close(blob, blob_op_complete, NULL); 5669 poll_threads(); 5670 CU_ASSERT(g_bserrno == 0); 5671 5672 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 5673 poll_threads(); 5674 CU_ASSERT(g_bserrno == 0); 5675 5676 /* 8. Delete snapshot2. 5677 * If md of snapshot 2 was updated, it should be possible to delete it */ 5678 spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL); 5679 poll_threads(); 5680 CU_ASSERT(g_bserrno == 0); 5681 5682 /* Remove remaining blobs and unload bs */ 5683 spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL); 5684 poll_threads(); 5685 CU_ASSERT(g_bserrno == 0); 5686 5687 spdk_bs_delete_blob(bs, snapshotid0, blob_op_complete, NULL); 5688 poll_threads(); 5689 CU_ASSERT(g_bserrno == 0); 5690 5691 spdk_bs_free_io_channel(channel); 5692 poll_threads(); 5693 5694 spdk_bs_unload(bs, bs_op_complete, NULL); 5695 poll_threads(); 5696 CU_ASSERT(g_bserrno == 0); 5697 5698 g_bs = NULL; 5699 } 5700 5701 static void 5702 blobstore_clean_power_failure(void) 5703 { 5704 struct spdk_blob_store *bs; 5705 struct spdk_blob *blob; 5706 struct spdk_power_failure_thresholds thresholds = {}; 5707 bool clean = false; 5708 struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 5709 struct spdk_bs_super_block super_copy = {}; 5710 5711 thresholds.general_threshold = 1; 5712 while (!clean) { 5713 /* Create bs and blob */ 5714 suite_blob_setup(); 5715 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5716 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5717 bs = g_bs; 5718 blob = g_blob; 5719 5720 /* Super block should not change for rest of the UT, 5721 * save it and compare later. */ 5722 memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block)); 5723 SPDK_CU_ASSERT_FATAL(super->clean == 0); 5724 SPDK_CU_ASSERT_FATAL(bs->clean == 0); 5725 5726 /* Force bs/super block in a clean state. 5727 * Along with marking blob dirty, to cause blob persist. */ 5728 blob->state = SPDK_BLOB_STATE_DIRTY; 5729 bs->clean = 1; 5730 super->clean = 1; 5731 super->crc = blob_md_page_calc_crc(super); 5732 5733 g_bserrno = -1; 5734 dev_set_power_failure_thresholds(thresholds); 5735 spdk_blob_sync_md(blob, blob_op_complete, NULL); 5736 poll_threads(); 5737 dev_reset_power_failure_event(); 5738 5739 if (g_bserrno == 0) { 5740 /* After successful md sync, both bs and super block 5741 * should be marked as not clean. */ 5742 SPDK_CU_ASSERT_FATAL(bs->clean == 0); 5743 SPDK_CU_ASSERT_FATAL(super->clean == 0); 5744 clean = true; 5745 } 5746 5747 /* Depending on the point of failure, super block was either updated or not. */ 5748 super_copy.clean = super->clean; 5749 super_copy.crc = blob_md_page_calc_crc(&super_copy); 5750 /* Compare that the values in super block remained unchanged. */ 5751 SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block))); 5752 5753 /* Delete blob and unload bs */ 5754 suite_blob_cleanup(); 5755 5756 thresholds.general_threshold++; 5757 } 5758 } 5759 5760 static void 5761 blob_delete_snapshot_power_failure(void) 5762 { 5763 struct spdk_bs_dev *dev; 5764 struct spdk_blob_store *bs; 5765 struct spdk_blob_opts opts; 5766 struct spdk_blob *blob, *snapshot; 5767 struct spdk_power_failure_thresholds thresholds = {}; 5768 spdk_blob_id blobid, snapshotid; 5769 const void *value; 5770 size_t value_len; 5771 size_t count; 5772 spdk_blob_id ids[3] = {}; 5773 int rc; 5774 bool deleted = false; 5775 int delete_snapshot_bserrno = -1; 5776 5777 thresholds.general_threshold = 1; 5778 while (!deleted) { 5779 dev = init_dev(); 5780 5781 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 5782 poll_threads(); 5783 CU_ASSERT(g_bserrno == 0); 5784 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5785 bs = g_bs; 5786 5787 /* Create blob */ 5788 ut_spdk_blob_opts_init(&opts); 5789 opts.num_clusters = 10; 5790 5791 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 5792 poll_threads(); 5793 CU_ASSERT(g_bserrno == 0); 5794 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5795 blobid = g_blobid; 5796 5797 /* Create snapshot */ 5798 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5799 poll_threads(); 5800 CU_ASSERT(g_bserrno == 0); 5801 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5802 snapshotid = g_blobid; 5803 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5804 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5805 5806 dev_set_power_failure_thresholds(thresholds); 5807 5808 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 5809 poll_threads(); 5810 delete_snapshot_bserrno = g_bserrno; 5811 5812 /* Do not shut down cleanly. Assumption is that after snapshot deletion 5813 * reports success, changes to both blobs should already persisted. */ 5814 dev_reset_power_failure_event(); 5815 ut_bs_dirty_load(&bs, NULL); 5816 5817 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5818 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5819 5820 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 5821 poll_threads(); 5822 CU_ASSERT(g_bserrno == 0); 5823 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5824 blob = g_blob; 5825 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true); 5826 5827 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 5828 poll_threads(); 5829 5830 if (g_bserrno == 0) { 5831 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5832 snapshot = g_blob; 5833 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 5834 count = SPDK_COUNTOF(ids); 5835 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5836 CU_ASSERT(rc == 0); 5837 CU_ASSERT(count == 1); 5838 CU_ASSERT(ids[0] == blobid); 5839 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len); 5840 CU_ASSERT(rc != 0); 5841 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false); 5842 5843 spdk_blob_close(snapshot, blob_op_complete, NULL); 5844 poll_threads(); 5845 CU_ASSERT(g_bserrno == 0); 5846 } else { 5847 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 5848 /* Snapshot might have been left in unrecoverable state, so it does not open. 5849 * Yet delete might perform further changes to the clone after that. 5850 * This UT should test until snapshot is deleted and delete call succeeds. */ 5851 if (delete_snapshot_bserrno == 0) { 5852 deleted = true; 5853 } 5854 } 5855 5856 spdk_blob_close(blob, blob_op_complete, NULL); 5857 poll_threads(); 5858 CU_ASSERT(g_bserrno == 0); 5859 5860 spdk_bs_unload(bs, bs_op_complete, NULL); 5861 poll_threads(); 5862 CU_ASSERT(g_bserrno == 0); 5863 5864 thresholds.general_threshold++; 5865 } 5866 } 5867 5868 static void 5869 blob_create_snapshot_power_failure(void) 5870 { 5871 struct spdk_blob_store *bs = g_bs; 5872 struct spdk_bs_dev *dev; 5873 struct spdk_blob_opts opts; 5874 struct spdk_blob *blob, *snapshot; 5875 struct spdk_power_failure_thresholds thresholds = {}; 5876 spdk_blob_id blobid, snapshotid; 5877 const void *value; 5878 size_t value_len; 5879 size_t count; 5880 spdk_blob_id ids[3] = {}; 5881 int rc; 5882 bool created = false; 5883 int create_snapshot_bserrno = -1; 5884 5885 thresholds.general_threshold = 1; 5886 while (!created) { 5887 dev = init_dev(); 5888 5889 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 5890 poll_threads(); 5891 CU_ASSERT(g_bserrno == 0); 5892 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 5893 bs = g_bs; 5894 5895 /* Create blob */ 5896 ut_spdk_blob_opts_init(&opts); 5897 opts.num_clusters = 10; 5898 5899 spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL); 5900 poll_threads(); 5901 CU_ASSERT(g_bserrno == 0); 5902 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 5903 blobid = g_blobid; 5904 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5905 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5906 5907 dev_set_power_failure_thresholds(thresholds); 5908 5909 /* Create snapshot */ 5910 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 5911 poll_threads(); 5912 create_snapshot_bserrno = g_bserrno; 5913 snapshotid = g_blobid; 5914 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5915 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5916 5917 /* Do not shut down cleanly. Assumption is that after create snapshot 5918 * reports success, both blobs should be power-fail safe. */ 5919 dev_reset_power_failure_event(); 5920 ut_bs_dirty_load(&bs, NULL); 5921 5922 SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1)); 5923 SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11)); 5924 5925 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 5926 poll_threads(); 5927 CU_ASSERT(g_bserrno == 0); 5928 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5929 blob = g_blob; 5930 5931 if (snapshotid != SPDK_BLOBID_INVALID) { 5932 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 5933 poll_threads(); 5934 } 5935 5936 if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) { 5937 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 5938 snapshot = g_blob; 5939 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true); 5940 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false); 5941 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid); 5942 count = SPDK_COUNTOF(ids); 5943 rc = spdk_blob_get_clones(bs, snapshotid, ids, &count); 5944 CU_ASSERT(rc == 0); 5945 CU_ASSERT(count == 1); 5946 CU_ASSERT(ids[0] == blobid); 5947 rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len); 5948 CU_ASSERT(rc != 0); 5949 5950 spdk_blob_close(snapshot, blob_op_complete, NULL); 5951 poll_threads(); 5952 CU_ASSERT(g_bserrno == 0); 5953 if (create_snapshot_bserrno == 0) { 5954 created = true; 5955 } 5956 } else { 5957 CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID); 5958 SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false); 5959 } 5960 5961 spdk_blob_close(blob, blob_op_complete, NULL); 5962 poll_threads(); 5963 CU_ASSERT(g_bserrno == 0); 5964 5965 spdk_bs_unload(bs, bs_op_complete, NULL); 5966 poll_threads(); 5967 CU_ASSERT(g_bserrno == 0); 5968 5969 thresholds.general_threshold++; 5970 } 5971 } 5972 5973 static void 5974 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 5975 { 5976 uint8_t payload_ff[64 * 512]; 5977 uint8_t payload_aa[64 * 512]; 5978 uint8_t payload_00[64 * 512]; 5979 uint8_t *cluster0, *cluster1; 5980 5981 memset(payload_ff, 0xFF, sizeof(payload_ff)); 5982 memset(payload_aa, 0xAA, sizeof(payload_aa)); 5983 memset(payload_00, 0x00, sizeof(payload_00)); 5984 5985 /* Try to perform I/O with io unit = 512 */ 5986 spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL); 5987 poll_threads(); 5988 CU_ASSERT(g_bserrno == 0); 5989 5990 /* If thin provisioned is set cluster should be allocated now */ 5991 SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0); 5992 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 5993 5994 /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character. 5995 * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */ 5996 /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 5997 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 5998 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0); 5999 6000 /* Verify write with offset on first page */ 6001 spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL); 6002 poll_threads(); 6003 CU_ASSERT(g_bserrno == 0); 6004 6005 /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 6006 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6007 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6008 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6009 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6010 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0); 6011 6012 /* Verify write with offset on first page */ 6013 spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL); 6014 poll_threads(); 6015 6016 /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */ 6017 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6018 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6019 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6020 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6021 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0); 6022 CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0); 6023 6024 /* Verify write with offset on second page */ 6025 spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL); 6026 poll_threads(); 6027 6028 /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */ 6029 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6030 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6031 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6032 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6033 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0); 6034 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 6035 6036 /* Verify write across multiple pages */ 6037 spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL); 6038 poll_threads(); 6039 6040 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */ 6041 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6042 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6043 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6044 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6045 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 6046 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 6047 6048 /* Verify write across multiple clusters */ 6049 spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL); 6050 poll_threads(); 6051 6052 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 6053 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 6054 6055 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6056 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 6057 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6058 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6059 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6060 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6061 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 6062 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 6063 6064 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 6065 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0); 6066 6067 /* Verify write to second cluster */ 6068 spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL); 6069 poll_threads(); 6070 6071 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 6072 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 6073 6074 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6075 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */ 6076 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6077 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6078 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6079 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6080 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 6081 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 6082 6083 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 6084 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0); 6085 CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0); 6086 CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0); 6087 } 6088 6089 static void 6090 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 6091 { 6092 uint8_t payload_read[64 * 512]; 6093 uint8_t payload_ff[64 * 512]; 6094 uint8_t payload_aa[64 * 512]; 6095 uint8_t payload_00[64 * 512]; 6096 6097 memset(payload_ff, 0xFF, sizeof(payload_ff)); 6098 memset(payload_aa, 0xAA, sizeof(payload_aa)); 6099 memset(payload_00, 0x00, sizeof(payload_00)); 6100 6101 /* Read only first io unit */ 6102 /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6103 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6104 * payload_read: F000 0000 | 0000 0000 ... */ 6105 memset(payload_read, 0x00, sizeof(payload_read)); 6106 spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL); 6107 poll_threads(); 6108 CU_ASSERT(g_bserrno == 0); 6109 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6110 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0); 6111 6112 /* Read four io_units starting from offset = 2 6113 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6114 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6115 * payload_read: F0AA 0000 | 0000 0000 ... */ 6116 6117 memset(payload_read, 0x00, sizeof(payload_read)); 6118 spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL); 6119 poll_threads(); 6120 CU_ASSERT(g_bserrno == 0); 6121 6122 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6123 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 6124 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0); 6125 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0); 6126 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 6127 6128 /* Read eight io_units across multiple pages 6129 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 6130 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6131 * payload_read: AAAA AAAA | 0000 0000 ... */ 6132 memset(payload_read, 0x00, sizeof(payload_read)); 6133 spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL); 6134 poll_threads(); 6135 CU_ASSERT(g_bserrno == 0); 6136 6137 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0); 6138 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 6139 6140 /* Read eight io_units across multiple clusters 6141 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ] 6142 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6143 * payload_read: FFFF FFFF | 0000 0000 ... */ 6144 memset(payload_read, 0x00, sizeof(payload_read)); 6145 spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL); 6146 poll_threads(); 6147 CU_ASSERT(g_bserrno == 0); 6148 6149 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0); 6150 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 6151 6152 /* Read four io_units from second cluster 6153 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6154 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ] 6155 * payload_read: 00FF 0000 | 0000 0000 ... */ 6156 memset(payload_read, 0x00, sizeof(payload_read)); 6157 spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL); 6158 poll_threads(); 6159 CU_ASSERT(g_bserrno == 0); 6160 6161 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0); 6162 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0); 6163 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 6164 6165 /* Read second cluster 6166 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6167 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] 6168 * payload_read: FFFF 0000 | 0000 FF00 ... */ 6169 memset(payload_read, 0x00, sizeof(payload_read)); 6170 spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL); 6171 poll_threads(); 6172 CU_ASSERT(g_bserrno == 0); 6173 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0); 6174 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0); 6175 CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0); 6176 CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0); 6177 6178 /* Read whole two clusters 6179 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 6180 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */ 6181 memset(payload_read, 0x00, sizeof(payload_read)); 6182 spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL); 6183 poll_threads(); 6184 CU_ASSERT(g_bserrno == 0); 6185 6186 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6187 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 6188 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0); 6189 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0); 6190 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0); 6191 CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0); 6192 6193 CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0); 6194 CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0); 6195 CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0); 6196 CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0); 6197 } 6198 6199 6200 static void 6201 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 6202 { 6203 uint8_t payload_ff[64 * 512]; 6204 uint8_t payload_aa[64 * 512]; 6205 uint8_t payload_00[64 * 512]; 6206 uint8_t *cluster0, *cluster1; 6207 6208 memset(payload_ff, 0xFF, sizeof(payload_ff)); 6209 memset(payload_aa, 0xAA, sizeof(payload_aa)); 6210 memset(payload_00, 0x00, sizeof(payload_00)); 6211 6212 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 6213 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 6214 6215 /* Unmap */ 6216 spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL); 6217 poll_threads(); 6218 6219 CU_ASSERT(g_bserrno == 0); 6220 6221 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0); 6222 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0); 6223 } 6224 6225 static void 6226 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 6227 { 6228 uint8_t payload_ff[64 * 512]; 6229 uint8_t payload_aa[64 * 512]; 6230 uint8_t payload_00[64 * 512]; 6231 uint8_t *cluster0, *cluster1; 6232 6233 memset(payload_ff, 0xFF, sizeof(payload_ff)); 6234 memset(payload_aa, 0xAA, sizeof(payload_aa)); 6235 memset(payload_00, 0x00, sizeof(payload_00)); 6236 6237 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 6238 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 6239 6240 /* Write zeroes */ 6241 spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL); 6242 poll_threads(); 6243 6244 CU_ASSERT(g_bserrno == 0); 6245 6246 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0); 6247 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0); 6248 } 6249 6250 6251 static void 6252 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 6253 { 6254 uint8_t payload_ff[64 * 512]; 6255 uint8_t payload_aa[64 * 512]; 6256 uint8_t payload_00[64 * 512]; 6257 uint8_t *cluster0, *cluster1; 6258 struct iovec iov[4]; 6259 6260 memset(payload_ff, 0xFF, sizeof(payload_ff)); 6261 memset(payload_aa, 0xAA, sizeof(payload_aa)); 6262 memset(payload_00, 0x00, sizeof(payload_00)); 6263 6264 /* Try to perform I/O with io unit = 512 */ 6265 iov[0].iov_base = payload_ff; 6266 iov[0].iov_len = 1 * 512; 6267 spdk_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL); 6268 poll_threads(); 6269 CU_ASSERT(g_bserrno == 0); 6270 6271 /* If thin provisioned is set cluster should be allocated now */ 6272 SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0); 6273 cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen]; 6274 6275 /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character. 6276 * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */ 6277 /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 6278 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6279 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0); 6280 6281 /* Verify write with offset on first page */ 6282 iov[0].iov_base = payload_ff; 6283 iov[0].iov_len = 1 * 512; 6284 spdk_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL); 6285 poll_threads(); 6286 CU_ASSERT(g_bserrno == 0); 6287 6288 /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 6289 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6290 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6291 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6292 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6293 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0); 6294 6295 /* Verify write with offset on first page */ 6296 iov[0].iov_base = payload_ff; 6297 iov[0].iov_len = 4 * 512; 6298 spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL); 6299 poll_threads(); 6300 6301 /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */ 6302 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6303 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6304 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6305 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6306 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0); 6307 CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0); 6308 6309 /* Verify write with offset on second page */ 6310 iov[0].iov_base = payload_ff; 6311 iov[0].iov_len = 4 * 512; 6312 spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL); 6313 poll_threads(); 6314 6315 /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */ 6316 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6317 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6318 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6319 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6320 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0); 6321 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 6322 6323 /* Verify write across multiple pages */ 6324 iov[0].iov_base = payload_aa; 6325 iov[0].iov_len = 8 * 512; 6326 spdk_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL); 6327 poll_threads(); 6328 6329 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */ 6330 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6331 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6332 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6333 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6334 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 6335 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0); 6336 6337 /* Verify write across multiple clusters */ 6338 6339 iov[0].iov_base = payload_ff; 6340 iov[0].iov_len = 8 * 512; 6341 spdk_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL); 6342 poll_threads(); 6343 6344 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 6345 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 6346 6347 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6348 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */ 6349 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6350 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6351 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6352 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6353 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 6354 CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0); 6355 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 6356 6357 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 6358 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0); 6359 6360 /* Verify write to second cluster */ 6361 6362 iov[0].iov_base = payload_ff; 6363 iov[0].iov_len = 2 * 512; 6364 spdk_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL); 6365 poll_threads(); 6366 6367 SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0); 6368 cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen]; 6369 6370 /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6371 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */ 6372 CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0); 6373 CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0); 6374 CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0); 6375 CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0); 6376 CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0); 6377 CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0); 6378 6379 CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0); 6380 CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0); 6381 CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0); 6382 CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0); 6383 } 6384 6385 static void 6386 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel) 6387 { 6388 uint8_t payload_read[64 * 512]; 6389 uint8_t payload_ff[64 * 512]; 6390 uint8_t payload_aa[64 * 512]; 6391 uint8_t payload_00[64 * 512]; 6392 struct iovec iov[4]; 6393 6394 memset(payload_ff, 0xFF, sizeof(payload_ff)); 6395 memset(payload_aa, 0xAA, sizeof(payload_aa)); 6396 memset(payload_00, 0x00, sizeof(payload_00)); 6397 6398 /* Read only first io unit */ 6399 /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6400 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6401 * payload_read: F000 0000 | 0000 0000 ... */ 6402 memset(payload_read, 0x00, sizeof(payload_read)); 6403 iov[0].iov_base = payload_read; 6404 iov[0].iov_len = 1 * 512; 6405 spdk_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL); 6406 poll_threads(); 6407 6408 CU_ASSERT(g_bserrno == 0); 6409 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6410 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0); 6411 6412 /* Read four io_units starting from offset = 2 6413 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6414 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6415 * payload_read: F0AA 0000 | 0000 0000 ... */ 6416 6417 memset(payload_read, 0x00, sizeof(payload_read)); 6418 iov[0].iov_base = payload_read; 6419 iov[0].iov_len = 4 * 512; 6420 spdk_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL); 6421 poll_threads(); 6422 CU_ASSERT(g_bserrno == 0); 6423 6424 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6425 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 6426 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0); 6427 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0); 6428 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 6429 6430 /* Read eight io_units across multiple pages 6431 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 6432 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6433 * payload_read: AAAA AAAA | 0000 0000 ... */ 6434 memset(payload_read, 0x00, sizeof(payload_read)); 6435 iov[0].iov_base = payload_read; 6436 iov[0].iov_len = 4 * 512; 6437 iov[1].iov_base = payload_read + 4 * 512; 6438 iov[1].iov_len = 4 * 512; 6439 spdk_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL); 6440 poll_threads(); 6441 CU_ASSERT(g_bserrno == 0); 6442 6443 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0); 6444 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 6445 6446 /* Read eight io_units across multiple clusters 6447 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ] 6448 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] 6449 * payload_read: FFFF FFFF | 0000 0000 ... */ 6450 memset(payload_read, 0x00, sizeof(payload_read)); 6451 iov[0].iov_base = payload_read; 6452 iov[0].iov_len = 2 * 512; 6453 iov[1].iov_base = payload_read + 2 * 512; 6454 iov[1].iov_len = 2 * 512; 6455 iov[2].iov_base = payload_read + 4 * 512; 6456 iov[2].iov_len = 2 * 512; 6457 iov[3].iov_base = payload_read + 6 * 512; 6458 iov[3].iov_len = 2 * 512; 6459 spdk_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL); 6460 poll_threads(); 6461 CU_ASSERT(g_bserrno == 0); 6462 6463 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0); 6464 CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0); 6465 6466 /* Read four io_units from second cluster 6467 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6468 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ] 6469 * payload_read: 00FF 0000 | 0000 0000 ... */ 6470 memset(payload_read, 0x00, sizeof(payload_read)); 6471 iov[0].iov_base = payload_read; 6472 iov[0].iov_len = 1 * 512; 6473 iov[1].iov_base = payload_read + 1 * 512; 6474 iov[1].iov_len = 3 * 512; 6475 spdk_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL); 6476 poll_threads(); 6477 CU_ASSERT(g_bserrno == 0); 6478 6479 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0); 6480 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0); 6481 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0); 6482 6483 /* Read second cluster 6484 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ] 6485 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] 6486 * payload_read: FFFF 0000 | 0000 FF00 ... */ 6487 memset(payload_read, 0x00, sizeof(payload_read)); 6488 iov[0].iov_base = payload_read; 6489 iov[0].iov_len = 1 * 512; 6490 iov[1].iov_base = payload_read + 1 * 512; 6491 iov[1].iov_len = 2 * 512; 6492 iov[2].iov_base = payload_read + 3 * 512; 6493 iov[2].iov_len = 4 * 512; 6494 iov[3].iov_base = payload_read + 7 * 512; 6495 iov[3].iov_len = 25 * 512; 6496 spdk_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL); 6497 poll_threads(); 6498 CU_ASSERT(g_bserrno == 0); 6499 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0); 6500 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0); 6501 CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0); 6502 CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0); 6503 6504 /* Read whole two clusters 6505 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ] 6506 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */ 6507 memset(payload_read, 0x00, sizeof(payload_read)); 6508 iov[0].iov_base = payload_read; 6509 iov[0].iov_len = 1 * 512; 6510 iov[1].iov_base = payload_read + 1 * 512; 6511 iov[1].iov_len = 8 * 512; 6512 iov[2].iov_base = payload_read + 9 * 512; 6513 iov[2].iov_len = 16 * 512; 6514 iov[3].iov_base = payload_read + 25 * 512; 6515 iov[3].iov_len = 39 * 512; 6516 spdk_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL); 6517 poll_threads(); 6518 CU_ASSERT(g_bserrno == 0); 6519 6520 CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0); 6521 CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0); 6522 CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0); 6523 CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0); 6524 CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0); 6525 CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0); 6526 6527 CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0); 6528 CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0); 6529 CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0); 6530 CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0); 6531 } 6532 6533 static void 6534 blob_io_unit(void) 6535 { 6536 struct spdk_bs_opts bsopts; 6537 struct spdk_blob_opts opts; 6538 struct spdk_blob_store *bs; 6539 struct spdk_bs_dev *dev; 6540 struct spdk_blob *blob, *snapshot, *clone; 6541 spdk_blob_id blobid; 6542 struct spdk_io_channel *channel; 6543 6544 /* Create dev with 512 bytes io unit size */ 6545 6546 spdk_bs_opts_init(&bsopts, sizeof(bsopts)); 6547 bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */ 6548 snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE"); 6549 6550 /* Try to initialize a new blob store with unsupported io_unit */ 6551 dev = init_dev(); 6552 dev->blocklen = 512; 6553 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6554 6555 /* Initialize a new blob store */ 6556 spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL); 6557 poll_threads(); 6558 CU_ASSERT(g_bserrno == 0); 6559 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6560 bs = g_bs; 6561 6562 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512); 6563 channel = spdk_bs_alloc_io_channel(bs); 6564 6565 /* Create thick provisioned blob */ 6566 ut_spdk_blob_opts_init(&opts); 6567 opts.thin_provision = false; 6568 opts.num_clusters = 32; 6569 6570 blob = ut_blob_create_and_open(bs, &opts); 6571 blobid = spdk_blob_get_id(blob); 6572 6573 test_io_write(dev, blob, channel); 6574 test_io_read(dev, blob, channel); 6575 test_io_zeroes(dev, blob, channel); 6576 6577 test_iov_write(dev, blob, channel); 6578 test_iov_read(dev, blob, channel); 6579 6580 test_io_unmap(dev, blob, channel); 6581 6582 spdk_blob_close(blob, blob_op_complete, NULL); 6583 poll_threads(); 6584 CU_ASSERT(g_bserrno == 0); 6585 blob = NULL; 6586 g_blob = NULL; 6587 6588 /* Create thin provisioned blob */ 6589 6590 ut_spdk_blob_opts_init(&opts); 6591 opts.thin_provision = true; 6592 opts.num_clusters = 32; 6593 6594 blob = ut_blob_create_and_open(bs, &opts); 6595 blobid = spdk_blob_get_id(blob); 6596 6597 test_io_write(dev, blob, channel); 6598 test_io_read(dev, blob, channel); 6599 6600 test_io_zeroes(dev, blob, channel); 6601 6602 test_iov_write(dev, blob, channel); 6603 test_iov_read(dev, blob, channel); 6604 6605 /* Create snapshot */ 6606 6607 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6608 poll_threads(); 6609 CU_ASSERT(g_bserrno == 0); 6610 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6611 blobid = g_blobid; 6612 6613 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6614 poll_threads(); 6615 CU_ASSERT(g_bserrno == 0); 6616 CU_ASSERT(g_blob != NULL); 6617 snapshot = g_blob; 6618 6619 spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6620 poll_threads(); 6621 CU_ASSERT(g_bserrno == 0); 6622 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6623 blobid = g_blobid; 6624 6625 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6626 poll_threads(); 6627 CU_ASSERT(g_bserrno == 0); 6628 CU_ASSERT(g_blob != NULL); 6629 clone = g_blob; 6630 6631 test_io_read(dev, blob, channel); 6632 test_io_read(dev, snapshot, channel); 6633 test_io_read(dev, clone, channel); 6634 6635 test_iov_read(dev, blob, channel); 6636 test_iov_read(dev, snapshot, channel); 6637 test_iov_read(dev, clone, channel); 6638 6639 /* Inflate clone */ 6640 6641 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 6642 poll_threads(); 6643 6644 CU_ASSERT(g_bserrno == 0); 6645 6646 test_io_read(dev, clone, channel); 6647 6648 test_io_unmap(dev, clone, channel); 6649 6650 test_iov_write(dev, clone, channel); 6651 test_iov_read(dev, clone, channel); 6652 6653 spdk_blob_close(blob, blob_op_complete, NULL); 6654 spdk_blob_close(snapshot, blob_op_complete, NULL); 6655 spdk_blob_close(clone, blob_op_complete, NULL); 6656 poll_threads(); 6657 CU_ASSERT(g_bserrno == 0); 6658 blob = NULL; 6659 g_blob = NULL; 6660 6661 spdk_bs_free_io_channel(channel); 6662 poll_threads(); 6663 6664 /* Unload the blob store */ 6665 spdk_bs_unload(bs, bs_op_complete, NULL); 6666 poll_threads(); 6667 CU_ASSERT(g_bserrno == 0); 6668 g_bs = NULL; 6669 g_blob = NULL; 6670 g_blobid = 0; 6671 } 6672 6673 static void 6674 blob_io_unit_compatibility(void) 6675 { 6676 struct spdk_bs_opts bsopts; 6677 struct spdk_blob_store *bs; 6678 struct spdk_bs_dev *dev; 6679 struct spdk_bs_super_block *super; 6680 6681 /* Create dev with 512 bytes io unit size */ 6682 6683 spdk_bs_opts_init(&bsopts, sizeof(bsopts)); 6684 bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; /* 8 * 4 = 32 io_unit */ 6685 snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE"); 6686 6687 /* Try to initialize a new blob store with unsupported io_unit */ 6688 dev = init_dev(); 6689 dev->blocklen = 512; 6690 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6691 6692 /* Initialize a new blob store */ 6693 spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL); 6694 poll_threads(); 6695 CU_ASSERT(g_bserrno == 0); 6696 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6697 bs = g_bs; 6698 6699 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512); 6700 6701 /* Unload the blob store */ 6702 spdk_bs_unload(bs, bs_op_complete, NULL); 6703 poll_threads(); 6704 CU_ASSERT(g_bserrno == 0); 6705 6706 /* Modify super block to behave like older version. 6707 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */ 6708 super = (struct spdk_bs_super_block *)&g_dev_buffer[0]; 6709 super->io_unit_size = 0; 6710 super->crc = blob_md_page_calc_crc(super); 6711 6712 dev = init_dev(); 6713 dev->blocklen = 512; 6714 dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen; 6715 6716 spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL); 6717 poll_threads(); 6718 CU_ASSERT(g_bserrno == 0); 6719 SPDK_CU_ASSERT_FATAL(g_bs != NULL); 6720 bs = g_bs; 6721 6722 CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE); 6723 6724 /* Unload the blob store */ 6725 spdk_bs_unload(bs, bs_op_complete, NULL); 6726 poll_threads(); 6727 CU_ASSERT(g_bserrno == 0); 6728 6729 g_bs = NULL; 6730 g_blob = NULL; 6731 g_blobid = 0; 6732 } 6733 6734 static void 6735 first_sync_complete(void *cb_arg, int bserrno) 6736 { 6737 struct spdk_blob *blob = cb_arg; 6738 int rc; 6739 6740 CU_ASSERT(bserrno == 0); 6741 rc = spdk_blob_set_xattr(blob, "sync", "second", strlen("second") + 1); 6742 CU_ASSERT(rc == 0); 6743 CU_ASSERT(g_bserrno == -1); 6744 6745 /* Keep g_bserrno at -1, only the 6746 * second sync completion should set it at 0. */ 6747 } 6748 6749 static void 6750 second_sync_complete(void *cb_arg, int bserrno) 6751 { 6752 struct spdk_blob *blob = cb_arg; 6753 const void *value; 6754 size_t value_len; 6755 int rc; 6756 6757 CU_ASSERT(bserrno == 0); 6758 6759 /* Verify that the first sync completion had a chance to execute */ 6760 rc = spdk_blob_get_xattr_value(blob, "sync", &value, &value_len); 6761 CU_ASSERT(rc == 0); 6762 SPDK_CU_ASSERT_FATAL(value != NULL); 6763 CU_ASSERT(value_len == strlen("second") + 1); 6764 CU_ASSERT_NSTRING_EQUAL_FATAL(value, "second", value_len); 6765 6766 CU_ASSERT(g_bserrno == -1); 6767 g_bserrno = bserrno; 6768 } 6769 6770 static void 6771 blob_simultaneous_operations(void) 6772 { 6773 struct spdk_blob_store *bs = g_bs; 6774 struct spdk_blob_opts opts; 6775 struct spdk_blob *blob, *snapshot; 6776 spdk_blob_id blobid, snapshotid; 6777 struct spdk_io_channel *channel; 6778 int rc; 6779 6780 channel = spdk_bs_alloc_io_channel(bs); 6781 SPDK_CU_ASSERT_FATAL(channel != NULL); 6782 6783 ut_spdk_blob_opts_init(&opts); 6784 opts.num_clusters = 10; 6785 6786 blob = ut_blob_create_and_open(bs, &opts); 6787 blobid = spdk_blob_get_id(blob); 6788 6789 /* Create snapshot and try to remove blob in the same time: 6790 * - snapshot should be created successfully 6791 * - delete operation should fail w -EBUSY */ 6792 CU_ASSERT(blob->locked_operation_in_progress == false); 6793 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 6794 CU_ASSERT(blob->locked_operation_in_progress == true); 6795 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6796 CU_ASSERT(blob->locked_operation_in_progress == true); 6797 /* Deletion failure */ 6798 CU_ASSERT(g_bserrno == -EBUSY); 6799 poll_threads(); 6800 CU_ASSERT(blob->locked_operation_in_progress == false); 6801 /* Snapshot creation success */ 6802 CU_ASSERT(g_bserrno == 0); 6803 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 6804 6805 snapshotid = g_blobid; 6806 6807 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 6808 poll_threads(); 6809 CU_ASSERT(g_bserrno == 0); 6810 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 6811 snapshot = g_blob; 6812 6813 /* Inflate blob and try to remove blob in the same time: 6814 * - blob should be inflated successfully 6815 * - delete operation should fail w -EBUSY */ 6816 CU_ASSERT(blob->locked_operation_in_progress == false); 6817 spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL); 6818 CU_ASSERT(blob->locked_operation_in_progress == true); 6819 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6820 CU_ASSERT(blob->locked_operation_in_progress == true); 6821 /* Deletion failure */ 6822 CU_ASSERT(g_bserrno == -EBUSY); 6823 poll_threads(); 6824 CU_ASSERT(blob->locked_operation_in_progress == false); 6825 /* Inflation success */ 6826 CU_ASSERT(g_bserrno == 0); 6827 6828 /* Clone snapshot and try to remove snapshot in the same time: 6829 * - snapshot should be cloned successfully 6830 * - delete operation should fail w -EBUSY */ 6831 CU_ASSERT(blob->locked_operation_in_progress == false); 6832 spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL); 6833 spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL); 6834 /* Deletion failure */ 6835 CU_ASSERT(g_bserrno == -EBUSY); 6836 poll_threads(); 6837 CU_ASSERT(blob->locked_operation_in_progress == false); 6838 /* Clone created */ 6839 CU_ASSERT(g_bserrno == 0); 6840 6841 /* Resize blob and try to remove blob in the same time: 6842 * - blob should be resized successfully 6843 * - delete operation should fail w -EBUSY */ 6844 CU_ASSERT(blob->locked_operation_in_progress == false); 6845 spdk_blob_resize(blob, 50, blob_op_complete, NULL); 6846 CU_ASSERT(blob->locked_operation_in_progress == true); 6847 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 6848 CU_ASSERT(blob->locked_operation_in_progress == true); 6849 /* Deletion failure */ 6850 CU_ASSERT(g_bserrno == -EBUSY); 6851 poll_threads(); 6852 CU_ASSERT(blob->locked_operation_in_progress == false); 6853 /* Blob resized successfully */ 6854 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6855 poll_threads(); 6856 CU_ASSERT(g_bserrno == 0); 6857 6858 /* Issue two consecutive blob syncs, neither should fail. 6859 * Force sync to actually occur by marking blob dirty each time. 6860 * Execution of sync should not be enough to complete the operation, 6861 * since disk I/O is required to complete it. */ 6862 g_bserrno = -1; 6863 6864 rc = spdk_blob_set_xattr(blob, "sync", "first", strlen("first") + 1); 6865 CU_ASSERT(rc == 0); 6866 spdk_blob_sync_md(blob, first_sync_complete, blob); 6867 CU_ASSERT(g_bserrno == -1); 6868 6869 spdk_blob_sync_md(blob, second_sync_complete, blob); 6870 CU_ASSERT(g_bserrno == -1); 6871 6872 poll_threads(); 6873 CU_ASSERT(g_bserrno == 0); 6874 6875 spdk_bs_free_io_channel(channel); 6876 poll_threads(); 6877 6878 ut_blob_close_and_delete(bs, snapshot); 6879 ut_blob_close_and_delete(bs, blob); 6880 } 6881 6882 static void 6883 blob_persist_test(void) 6884 { 6885 struct spdk_blob_store *bs = g_bs; 6886 struct spdk_blob_opts opts; 6887 struct spdk_blob *blob; 6888 spdk_blob_id blobid; 6889 struct spdk_io_channel *channel; 6890 char *xattr; 6891 size_t xattr_length; 6892 int rc; 6893 uint32_t page_count_clear, page_count_xattr; 6894 uint64_t poller_iterations; 6895 bool run_poller; 6896 6897 channel = spdk_bs_alloc_io_channel(bs); 6898 SPDK_CU_ASSERT_FATAL(channel != NULL); 6899 6900 ut_spdk_blob_opts_init(&opts); 6901 opts.num_clusters = 10; 6902 6903 blob = ut_blob_create_and_open(bs, &opts); 6904 blobid = spdk_blob_get_id(blob); 6905 6906 /* Save the amount of md pages used after creation of a blob. 6907 * This should be consistent after removing xattr. */ 6908 page_count_clear = spdk_bit_array_count_set(bs->used_md_pages); 6909 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear); 6910 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear); 6911 6912 /* Add xattr with maximum length of descriptor to exceed single metadata page. */ 6913 xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) - 6914 strlen("large_xattr"); 6915 xattr = calloc(xattr_length, sizeof(char)); 6916 SPDK_CU_ASSERT_FATAL(xattr != NULL); 6917 6918 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 6919 SPDK_CU_ASSERT_FATAL(rc == 0); 6920 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6921 poll_threads(); 6922 SPDK_CU_ASSERT_FATAL(g_bserrno == 0); 6923 6924 /* Save the amount of md pages used after adding the large xattr */ 6925 page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages); 6926 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr); 6927 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr); 6928 6929 /* Add xattr to a blob and sync it. While sync is occurring, remove the xattr and sync again. 6930 * Interrupt the first sync after increasing number of poller iterations, until it succeeds. 6931 * Expectation is that after second sync completes no xattr is saved in metadata. */ 6932 poller_iterations = 1; 6933 run_poller = true; 6934 while (run_poller) { 6935 rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length); 6936 SPDK_CU_ASSERT_FATAL(rc == 0); 6937 g_bserrno = -1; 6938 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6939 poll_thread_times(0, poller_iterations); 6940 if (g_bserrno == 0) { 6941 /* Poller iteration count was high enough for first sync to complete. 6942 * Verify that blob takes up enough of md_pages to store the xattr. */ 6943 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr); 6944 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr); 6945 SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr); 6946 run_poller = false; 6947 } 6948 rc = spdk_blob_remove_xattr(blob, "large_xattr"); 6949 SPDK_CU_ASSERT_FATAL(rc == 0); 6950 spdk_blob_sync_md(blob, blob_op_complete, NULL); 6951 poll_threads(); 6952 SPDK_CU_ASSERT_FATAL(g_bserrno == 0); 6953 SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear); 6954 SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear); 6955 SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear); 6956 6957 /* Reload bs and re-open blob to verify that xattr was not persisted. */ 6958 spdk_blob_close(blob, blob_op_complete, NULL); 6959 poll_threads(); 6960 CU_ASSERT(g_bserrno == 0); 6961 6962 ut_bs_reload(&bs, NULL); 6963 6964 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 6965 poll_threads(); 6966 CU_ASSERT(g_bserrno == 0); 6967 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 6968 blob = g_blob; 6969 6970 rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length); 6971 SPDK_CU_ASSERT_FATAL(rc == -ENOENT); 6972 6973 poller_iterations++; 6974 /* Stop at high iteration count to prevent infinite loop. 6975 * This value should be enough for first md sync to complete in any case. */ 6976 SPDK_CU_ASSERT_FATAL(poller_iterations < 50); 6977 } 6978 6979 free(xattr); 6980 6981 ut_blob_close_and_delete(bs, blob); 6982 6983 spdk_bs_free_io_channel(channel); 6984 poll_threads(); 6985 } 6986 6987 static void 6988 blob_decouple_snapshot(void) 6989 { 6990 struct spdk_blob_store *bs = g_bs; 6991 struct spdk_blob_opts opts; 6992 struct spdk_blob *blob, *snapshot1, *snapshot2; 6993 struct spdk_io_channel *channel; 6994 spdk_blob_id blobid, snapshotid; 6995 uint64_t cluster; 6996 6997 channel = spdk_bs_alloc_io_channel(bs); 6998 SPDK_CU_ASSERT_FATAL(channel != NULL); 6999 7000 ut_spdk_blob_opts_init(&opts); 7001 opts.num_clusters = 10; 7002 opts.thin_provision = false; 7003 7004 blob = ut_blob_create_and_open(bs, &opts); 7005 blobid = spdk_blob_get_id(blob); 7006 7007 /* Create first snapshot */ 7008 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0); 7009 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 7010 poll_threads(); 7011 CU_ASSERT(g_bserrno == 0); 7012 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 7013 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 7014 snapshotid = g_blobid; 7015 7016 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 7017 poll_threads(); 7018 CU_ASSERT(g_bserrno == 0); 7019 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 7020 snapshot1 = g_blob; 7021 7022 /* Create the second one */ 7023 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1); 7024 spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL); 7025 poll_threads(); 7026 CU_ASSERT(g_bserrno == 0); 7027 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 7028 CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2); 7029 snapshotid = g_blobid; 7030 7031 spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL); 7032 poll_threads(); 7033 CU_ASSERT(g_bserrno == 0); 7034 SPDK_CU_ASSERT_FATAL(g_blob != NULL); 7035 snapshot2 = g_blob; 7036 CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), snapshot1->id); 7037 7038 /* Now decouple the second snapshot forcing it to copy the written clusters */ 7039 spdk_bs_blob_decouple_parent(bs, channel, snapshot2->id, blob_op_complete, NULL); 7040 poll_threads(); 7041 CU_ASSERT(g_bserrno == 0); 7042 7043 /* Verify that the snapshot has been decoupled and that the clusters have been copied */ 7044 CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), SPDK_BLOBID_INVALID); 7045 for (cluster = 0; cluster < snapshot2->active.num_clusters; ++cluster) { 7046 CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster], 0); 7047 CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster], 7048 snapshot1->active.clusters[cluster]); 7049 } 7050 7051 spdk_bs_free_io_channel(channel); 7052 7053 ut_blob_close_and_delete(bs, snapshot2); 7054 ut_blob_close_and_delete(bs, snapshot1); 7055 ut_blob_close_and_delete(bs, blob); 7056 poll_threads(); 7057 } 7058 7059 static void 7060 suite_bs_setup(void) 7061 { 7062 struct spdk_bs_dev *dev; 7063 7064 dev = init_dev(); 7065 memset(g_dev_buffer, 0, DEV_BUFFER_SIZE); 7066 spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL); 7067 poll_threads(); 7068 CU_ASSERT(g_bserrno == 0); 7069 CU_ASSERT(g_bs != NULL); 7070 } 7071 7072 static void 7073 suite_bs_cleanup(void) 7074 { 7075 spdk_bs_unload(g_bs, bs_op_complete, NULL); 7076 poll_threads(); 7077 CU_ASSERT(g_bserrno == 0); 7078 g_bs = NULL; 7079 memset(g_dev_buffer, 0, DEV_BUFFER_SIZE); 7080 } 7081 7082 static struct spdk_blob * 7083 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts) 7084 { 7085 struct spdk_blob *blob; 7086 struct spdk_blob_opts create_blob_opts; 7087 spdk_blob_id blobid; 7088 7089 if (blob_opts == NULL) { 7090 ut_spdk_blob_opts_init(&create_blob_opts); 7091 blob_opts = &create_blob_opts; 7092 } 7093 7094 spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL); 7095 poll_threads(); 7096 CU_ASSERT(g_bserrno == 0); 7097 CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID); 7098 blobid = g_blobid; 7099 g_blobid = -1; 7100 7101 spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL); 7102 poll_threads(); 7103 CU_ASSERT(g_bserrno == 0); 7104 CU_ASSERT(g_blob != NULL); 7105 blob = g_blob; 7106 7107 g_blob = NULL; 7108 g_bserrno = -1; 7109 7110 return blob; 7111 } 7112 7113 static void 7114 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob) 7115 { 7116 spdk_blob_id blobid = spdk_blob_get_id(blob); 7117 7118 spdk_blob_close(blob, blob_op_complete, NULL); 7119 poll_threads(); 7120 CU_ASSERT(g_bserrno == 0); 7121 g_blob = NULL; 7122 7123 spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL); 7124 poll_threads(); 7125 CU_ASSERT(g_bserrno == 0); 7126 g_bserrno = -1; 7127 } 7128 7129 static void 7130 suite_blob_setup(void) 7131 { 7132 suite_bs_setup(); 7133 CU_ASSERT(g_bs != NULL); 7134 7135 g_blob = ut_blob_create_and_open(g_bs, NULL); 7136 CU_ASSERT(g_blob != NULL); 7137 } 7138 7139 static void 7140 suite_blob_cleanup(void) 7141 { 7142 ut_blob_close_and_delete(g_bs, g_blob); 7143 CU_ASSERT(g_blob == NULL); 7144 7145 suite_bs_cleanup(); 7146 CU_ASSERT(g_bs == NULL); 7147 } 7148 7149 int main(int argc, char **argv) 7150 { 7151 CU_pSuite suite, suite_bs, suite_blob; 7152 unsigned int num_failures; 7153 7154 CU_set_error_action(CUEA_ABORT); 7155 CU_initialize_registry(); 7156 7157 suite = CU_add_suite("blob", NULL, NULL); 7158 suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL, 7159 suite_bs_setup, suite_bs_cleanup); 7160 suite_blob = CU_add_suite_with_setup_and_teardown("blob_blob", NULL, NULL, 7161 suite_blob_setup, suite_blob_cleanup); 7162 7163 CU_ADD_TEST(suite, blob_init); 7164 CU_ADD_TEST(suite_bs, blob_open); 7165 CU_ADD_TEST(suite_bs, blob_create); 7166 CU_ADD_TEST(suite_bs, blob_create_loop); 7167 CU_ADD_TEST(suite_bs, blob_create_fail); 7168 CU_ADD_TEST(suite_bs, blob_create_internal); 7169 CU_ADD_TEST(suite_bs, blob_create_zero_extent); 7170 CU_ADD_TEST(suite, blob_thin_provision); 7171 CU_ADD_TEST(suite_bs, blob_snapshot); 7172 CU_ADD_TEST(suite_bs, blob_clone); 7173 CU_ADD_TEST(suite_bs, blob_inflate); 7174 CU_ADD_TEST(suite_bs, blob_delete); 7175 CU_ADD_TEST(suite_bs, blob_resize_test); 7176 CU_ADD_TEST(suite, blob_read_only); 7177 CU_ADD_TEST(suite_bs, channel_ops); 7178 CU_ADD_TEST(suite_bs, blob_super); 7179 CU_ADD_TEST(suite_blob, blob_write); 7180 CU_ADD_TEST(suite_blob, blob_read); 7181 CU_ADD_TEST(suite_blob, blob_rw_verify); 7182 CU_ADD_TEST(suite_bs, blob_rw_verify_iov); 7183 CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem); 7184 CU_ADD_TEST(suite_blob, blob_rw_iov_read_only); 7185 CU_ADD_TEST(suite_bs, blob_unmap); 7186 CU_ADD_TEST(suite_bs, blob_iter); 7187 CU_ADD_TEST(suite_blob, blob_xattr); 7188 CU_ADD_TEST(suite_bs, blob_parse_md); 7189 CU_ADD_TEST(suite, bs_load); 7190 CU_ADD_TEST(suite_bs, bs_load_pending_removal); 7191 CU_ADD_TEST(suite, bs_load_custom_cluster_size); 7192 CU_ADD_TEST(suite_bs, bs_unload); 7193 CU_ADD_TEST(suite, bs_cluster_sz); 7194 CU_ADD_TEST(suite_bs, bs_usable_clusters); 7195 CU_ADD_TEST(suite, bs_resize_md); 7196 CU_ADD_TEST(suite, bs_destroy); 7197 CU_ADD_TEST(suite, bs_type); 7198 CU_ADD_TEST(suite, bs_super_block); 7199 CU_ADD_TEST(suite, bs_test_recover_cluster_count); 7200 CU_ADD_TEST(suite, blob_serialize_test); 7201 CU_ADD_TEST(suite_bs, blob_crc); 7202 CU_ADD_TEST(suite, super_block_crc); 7203 CU_ADD_TEST(suite_blob, blob_dirty_shutdown); 7204 CU_ADD_TEST(suite_bs, blob_flags); 7205 CU_ADD_TEST(suite_bs, bs_version); 7206 CU_ADD_TEST(suite_bs, blob_set_xattrs_test); 7207 CU_ADD_TEST(suite_bs, blob_thin_prov_alloc); 7208 CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test); 7209 CU_ADD_TEST(suite_bs, blob_thin_prov_rw); 7210 CU_ADD_TEST(suite, blob_thin_prov_write_count_io); 7211 CU_ADD_TEST(suite_bs, blob_thin_prov_rle); 7212 CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov); 7213 CU_ADD_TEST(suite, bs_load_iter_test); 7214 CU_ADD_TEST(suite_bs, blob_snapshot_rw); 7215 CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov); 7216 CU_ADD_TEST(suite, blob_relations); 7217 CU_ADD_TEST(suite, blob_relations2); 7218 CU_ADD_TEST(suite, blob_relations3); 7219 CU_ADD_TEST(suite, blobstore_clean_power_failure); 7220 CU_ADD_TEST(suite, blob_delete_snapshot_power_failure); 7221 CU_ADD_TEST(suite, blob_create_snapshot_power_failure); 7222 CU_ADD_TEST(suite_bs, blob_inflate_rw); 7223 CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io); 7224 CU_ADD_TEST(suite_bs, blob_operation_split_rw); 7225 CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov); 7226 CU_ADD_TEST(suite, blob_io_unit); 7227 CU_ADD_TEST(suite, blob_io_unit_compatibility); 7228 CU_ADD_TEST(suite_bs, blob_simultaneous_operations); 7229 CU_ADD_TEST(suite_bs, blob_persist_test); 7230 CU_ADD_TEST(suite_bs, blob_decouple_snapshot); 7231 7232 allocate_threads(2); 7233 set_thread(0); 7234 7235 g_dev_buffer = calloc(1, DEV_BUFFER_SIZE); 7236 7237 CU_basic_set_mode(CU_BRM_VERBOSE); 7238 g_use_extent_table = false; 7239 CU_basic_run_tests(); 7240 num_failures = CU_get_number_of_failures(); 7241 g_use_extent_table = true; 7242 CU_basic_run_tests(); 7243 num_failures += CU_get_number_of_failures(); 7244 CU_cleanup_registry(); 7245 7246 free(g_dev_buffer); 7247 7248 free_threads(); 7249 7250 return num_failures; 7251 } 7252