1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 38 #include "reduce/reduce.c" 39 #include "spdk_internal/mock.h" 40 #include "common/lib/test_env.c" 41 42 static struct spdk_reduce_vol *g_vol; 43 static int g_reduce_errno; 44 static char *g_volatile_pm_buf; 45 static size_t g_volatile_pm_buf_len; 46 static char *g_persistent_pm_buf; 47 static size_t g_persistent_pm_buf_len; 48 static char *g_backing_dev_buf; 49 static char g_path[REDUCE_PATH_MAX]; 50 static char *g_decomp_buf; 51 52 #define TEST_MD_PATH "/tmp" 53 54 enum ut_reduce_bdev_io_type { 55 UT_REDUCE_IO_READV = 1, 56 UT_REDUCE_IO_WRITEV = 2, 57 UT_REDUCE_IO_UNMAP = 3, 58 }; 59 60 struct ut_reduce_bdev_io { 61 enum ut_reduce_bdev_io_type type; 62 struct spdk_reduce_backing_dev *backing_dev; 63 struct iovec *iov; 64 int iovcnt; 65 uint64_t lba; 66 uint32_t lba_count; 67 struct spdk_reduce_vol_cb_args *args; 68 TAILQ_ENTRY(ut_reduce_bdev_io) link; 69 }; 70 71 static bool g_defer_bdev_io = false; 72 static TAILQ_HEAD(, ut_reduce_bdev_io) g_pending_bdev_io = 73 TAILQ_HEAD_INITIALIZER(g_pending_bdev_io); 74 static uint32_t g_pending_bdev_io_count = 0; 75 76 static void 77 sync_pm_buf(const void *addr, size_t length) 78 { 79 uint64_t offset = (char *)addr - g_volatile_pm_buf; 80 81 memcpy(&g_persistent_pm_buf[offset], addr, length); 82 } 83 84 int 85 pmem_msync(const void *addr, size_t length) 86 { 87 sync_pm_buf(addr, length); 88 return 0; 89 } 90 91 void 92 pmem_persist(const void *addr, size_t len) 93 { 94 sync_pm_buf(addr, len); 95 } 96 97 static void 98 get_pm_file_size(void) 99 { 100 struct spdk_reduce_vol_params params; 101 uint64_t pm_size, expected_pm_size; 102 103 params.backing_io_unit_size = 4096; 104 params.chunk_size = 4096 * 4; 105 params.vol_size = 4096 * 4 * 100; 106 107 pm_size = _get_pm_file_size(¶ms); 108 expected_pm_size = sizeof(struct spdk_reduce_vol_superblock); 109 /* 100 chunks in logical map * 8 bytes per chunk */ 110 expected_pm_size += 100 * sizeof(uint64_t); 111 /* 100 chunks * (chunk stuct size + 4 backing io units per chunk * 8 bytes per backing io unit) */ 112 expected_pm_size += 100 * (sizeof(struct spdk_reduce_chunk_map) + 4 * sizeof(uint64_t)); 113 /* reduce allocates some extra chunks too for in-flight writes when logical map 114 * is full. REDUCE_EXTRA_CHUNKS is a private #ifdef in reduce.c Here we need the num chunks 115 * times (chunk struct size + 4 backing io units per chunk * 8 bytes per backing io unit). 116 */ 117 expected_pm_size += REDUCE_NUM_EXTRA_CHUNKS * 118 (sizeof(struct spdk_reduce_chunk_map) + 4 * sizeof(uint64_t)); 119 /* reduce will add some padding so numbers may not match exactly. Make sure 120 * they are close though. 121 */ 122 CU_ASSERT((pm_size - expected_pm_size) <= REDUCE_PM_SIZE_ALIGNMENT); 123 } 124 125 static void 126 get_vol_size(void) 127 { 128 uint64_t chunk_size, backing_dev_size; 129 130 chunk_size = 16 * 1024; 131 backing_dev_size = 16 * 1024 * 1000; 132 CU_ASSERT(_get_vol_size(chunk_size, backing_dev_size) < backing_dev_size); 133 } 134 135 void * 136 pmem_map_file(const char *path, size_t len, int flags, mode_t mode, 137 size_t *mapped_lenp, int *is_pmemp) 138 { 139 CU_ASSERT(g_volatile_pm_buf == NULL); 140 snprintf(g_path, sizeof(g_path), "%s", path); 141 *is_pmemp = 1; 142 143 if (g_persistent_pm_buf == NULL) { 144 g_persistent_pm_buf = calloc(1, len); 145 g_persistent_pm_buf_len = len; 146 SPDK_CU_ASSERT_FATAL(g_persistent_pm_buf != NULL); 147 } 148 149 *mapped_lenp = g_persistent_pm_buf_len; 150 g_volatile_pm_buf = calloc(1, g_persistent_pm_buf_len); 151 SPDK_CU_ASSERT_FATAL(g_volatile_pm_buf != NULL); 152 memcpy(g_volatile_pm_buf, g_persistent_pm_buf, g_persistent_pm_buf_len); 153 g_volatile_pm_buf_len = g_persistent_pm_buf_len; 154 155 return g_volatile_pm_buf; 156 } 157 158 int 159 pmem_unmap(void *addr, size_t len) 160 { 161 CU_ASSERT(addr == g_volatile_pm_buf); 162 CU_ASSERT(len == g_volatile_pm_buf_len); 163 free(g_volatile_pm_buf); 164 g_volatile_pm_buf = NULL; 165 g_volatile_pm_buf_len = 0; 166 167 return 0; 168 } 169 170 static void 171 persistent_pm_buf_destroy(void) 172 { 173 CU_ASSERT(g_persistent_pm_buf != NULL); 174 free(g_persistent_pm_buf); 175 g_persistent_pm_buf = NULL; 176 g_persistent_pm_buf_len = 0; 177 } 178 179 static void 180 unlink_cb(void) 181 { 182 persistent_pm_buf_destroy(); 183 } 184 185 static void 186 init_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno) 187 { 188 g_vol = vol; 189 g_reduce_errno = reduce_errno; 190 } 191 192 static void 193 load_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno) 194 { 195 g_vol = vol; 196 g_reduce_errno = reduce_errno; 197 } 198 199 static void 200 unload_cb(void *cb_arg, int reduce_errno) 201 { 202 g_reduce_errno = reduce_errno; 203 } 204 205 static void 206 init_failure(void) 207 { 208 struct spdk_reduce_vol_params params = {}; 209 struct spdk_reduce_backing_dev backing_dev = {}; 210 211 backing_dev.blocklen = 512; 212 /* This blockcnt is too small for a reduce vol - there needs to be 213 * enough space for at least REDUCE_NUM_EXTRA_CHUNKS + 1 chunks. 214 */ 215 backing_dev.blockcnt = 20; 216 217 params.vol_size = 0; 218 params.chunk_size = 16 * 1024; 219 params.backing_io_unit_size = backing_dev.blocklen; 220 params.logical_block_size = 512; 221 222 /* backing_dev has an invalid size. This should fail. */ 223 g_vol = NULL; 224 g_reduce_errno = 0; 225 spdk_reduce_vol_init(¶ms, &backing_dev, TEST_MD_PATH, init_cb, NULL); 226 CU_ASSERT(g_reduce_errno == -EINVAL); 227 SPDK_CU_ASSERT_FATAL(g_vol == NULL); 228 229 /* backing_dev now has valid size, but backing_dev still has null 230 * function pointers. This should fail. 231 */ 232 backing_dev.blockcnt = 20000; 233 234 g_vol = NULL; 235 g_reduce_errno = 0; 236 spdk_reduce_vol_init(¶ms, &backing_dev, TEST_MD_PATH, init_cb, NULL); 237 CU_ASSERT(g_reduce_errno == -EINVAL); 238 SPDK_CU_ASSERT_FATAL(g_vol == NULL); 239 } 240 241 static void 242 backing_dev_readv_execute(struct spdk_reduce_backing_dev *backing_dev, 243 struct iovec *iov, int iovcnt, 244 uint64_t lba, uint32_t lba_count, 245 struct spdk_reduce_vol_cb_args *args) 246 { 247 char *offset; 248 int i; 249 250 offset = g_backing_dev_buf + lba * backing_dev->blocklen; 251 for (i = 0; i < iovcnt; i++) { 252 memcpy(iov[i].iov_base, offset, iov[i].iov_len); 253 offset += iov[i].iov_len; 254 } 255 args->cb_fn(args->cb_arg, 0); 256 } 257 258 static void 259 backing_dev_insert_io(enum ut_reduce_bdev_io_type type, struct spdk_reduce_backing_dev *backing_dev, 260 struct iovec *iov, int iovcnt, uint64_t lba, uint32_t lba_count, 261 struct spdk_reduce_vol_cb_args *args) 262 { 263 struct ut_reduce_bdev_io *ut_bdev_io; 264 265 ut_bdev_io = calloc(1, sizeof(*ut_bdev_io)); 266 SPDK_CU_ASSERT_FATAL(ut_bdev_io != NULL); 267 268 ut_bdev_io->type = type; 269 ut_bdev_io->backing_dev = backing_dev; 270 ut_bdev_io->iov = iov; 271 ut_bdev_io->iovcnt = iovcnt; 272 ut_bdev_io->lba = lba; 273 ut_bdev_io->lba_count = lba_count; 274 ut_bdev_io->args = args; 275 TAILQ_INSERT_TAIL(&g_pending_bdev_io, ut_bdev_io, link); 276 g_pending_bdev_io_count++; 277 } 278 279 static void 280 backing_dev_readv(struct spdk_reduce_backing_dev *backing_dev, struct iovec *iov, int iovcnt, 281 uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args) 282 { 283 if (g_defer_bdev_io == false) { 284 CU_ASSERT(g_pending_bdev_io_count == 0); 285 CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io)); 286 backing_dev_readv_execute(backing_dev, iov, iovcnt, lba, lba_count, args); 287 return; 288 } 289 290 backing_dev_insert_io(UT_REDUCE_IO_READV, backing_dev, iov, iovcnt, lba, lba_count, args); 291 } 292 293 static void 294 backing_dev_writev_execute(struct spdk_reduce_backing_dev *backing_dev, 295 struct iovec *iov, int iovcnt, 296 uint64_t lba, uint32_t lba_count, 297 struct spdk_reduce_vol_cb_args *args) 298 { 299 char *offset; 300 int i; 301 302 offset = g_backing_dev_buf + lba * backing_dev->blocklen; 303 for (i = 0; i < iovcnt; i++) { 304 memcpy(offset, iov[i].iov_base, iov[i].iov_len); 305 offset += iov[i].iov_len; 306 } 307 args->cb_fn(args->cb_arg, 0); 308 } 309 310 static void 311 backing_dev_writev(struct spdk_reduce_backing_dev *backing_dev, struct iovec *iov, int iovcnt, 312 uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args) 313 { 314 if (g_defer_bdev_io == false) { 315 CU_ASSERT(g_pending_bdev_io_count == 0); 316 CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io)); 317 backing_dev_writev_execute(backing_dev, iov, iovcnt, lba, lba_count, args); 318 return; 319 } 320 321 backing_dev_insert_io(UT_REDUCE_IO_WRITEV, backing_dev, iov, iovcnt, lba, lba_count, args); 322 } 323 324 static void 325 backing_dev_unmap_execute(struct spdk_reduce_backing_dev *backing_dev, 326 uint64_t lba, uint32_t lba_count, 327 struct spdk_reduce_vol_cb_args *args) 328 { 329 char *offset; 330 331 offset = g_backing_dev_buf + lba * backing_dev->blocklen; 332 memset(offset, 0, lba_count * backing_dev->blocklen); 333 args->cb_fn(args->cb_arg, 0); 334 } 335 336 static void 337 backing_dev_unmap(struct spdk_reduce_backing_dev *backing_dev, 338 uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args) 339 { 340 if (g_defer_bdev_io == false) { 341 CU_ASSERT(g_pending_bdev_io_count == 0); 342 CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io)); 343 backing_dev_unmap_execute(backing_dev, lba, lba_count, args); 344 return; 345 } 346 347 backing_dev_insert_io(UT_REDUCE_IO_UNMAP, backing_dev, NULL, 0, lba, lba_count, args); 348 } 349 350 static void 351 backing_dev_io_execute(uint32_t count) 352 { 353 struct ut_reduce_bdev_io *ut_bdev_io; 354 uint32_t done = 0; 355 356 CU_ASSERT(g_defer_bdev_io == true); 357 while (!TAILQ_EMPTY(&g_pending_bdev_io) && (count == 0 || done < count)) { 358 ut_bdev_io = TAILQ_FIRST(&g_pending_bdev_io); 359 TAILQ_REMOVE(&g_pending_bdev_io, ut_bdev_io, link); 360 g_pending_bdev_io_count--; 361 switch (ut_bdev_io->type) { 362 case UT_REDUCE_IO_READV: 363 backing_dev_readv_execute(ut_bdev_io->backing_dev, 364 ut_bdev_io->iov, ut_bdev_io->iovcnt, 365 ut_bdev_io->lba, ut_bdev_io->lba_count, 366 ut_bdev_io->args); 367 break; 368 case UT_REDUCE_IO_WRITEV: 369 backing_dev_writev_execute(ut_bdev_io->backing_dev, 370 ut_bdev_io->iov, ut_bdev_io->iovcnt, 371 ut_bdev_io->lba, ut_bdev_io->lba_count, 372 ut_bdev_io->args); 373 break; 374 case UT_REDUCE_IO_UNMAP: 375 backing_dev_unmap_execute(ut_bdev_io->backing_dev, 376 ut_bdev_io->lba, ut_bdev_io->lba_count, 377 ut_bdev_io->args); 378 break; 379 default: 380 CU_ASSERT(false); 381 break; 382 } 383 free(ut_bdev_io); 384 done++; 385 } 386 } 387 388 static int 389 ut_compress(char *outbuf, uint32_t *compressed_len, char *inbuf, uint32_t inbuflen) 390 { 391 uint32_t len = 0; 392 uint8_t count; 393 char last; 394 395 while (true) { 396 if (inbuflen == 0) { 397 *compressed_len = len; 398 return 0; 399 } 400 401 if (*compressed_len < (len + 2)) { 402 return -ENOSPC; 403 } 404 405 last = *inbuf; 406 count = 1; 407 inbuflen--; 408 inbuf++; 409 410 while (inbuflen > 0 && *inbuf == last && count < UINT8_MAX) { 411 count++; 412 inbuflen--; 413 inbuf++; 414 } 415 416 outbuf[len] = count; 417 outbuf[len + 1] = last; 418 len += 2; 419 } 420 } 421 422 static int 423 ut_decompress(uint8_t *outbuf, uint32_t *compressed_len, uint8_t *inbuf, uint32_t inbuflen) 424 { 425 uint32_t len = 0; 426 427 SPDK_CU_ASSERT_FATAL(inbuflen % 2 == 0); 428 429 while (true) { 430 if (inbuflen == 0) { 431 *compressed_len = len; 432 return 0; 433 } 434 435 if ((len + inbuf[0]) > *compressed_len) { 436 return -ENOSPC; 437 } 438 439 memset(outbuf, inbuf[1], inbuf[0]); 440 outbuf += inbuf[0]; 441 len += inbuf[0]; 442 inbuflen -= 2; 443 inbuf += 2; 444 } 445 } 446 447 static void 448 ut_build_data_buffer(uint8_t *data, uint32_t data_len, uint8_t init_val, uint32_t repeat) 449 { 450 uint32_t _repeat = repeat; 451 452 SPDK_CU_ASSERT_FATAL(repeat > 0); 453 454 while (data_len > 0) { 455 *data = init_val; 456 data++; 457 data_len--; 458 _repeat--; 459 if (_repeat == 0) { 460 init_val++; 461 _repeat = repeat; 462 } 463 } 464 } 465 466 static void 467 backing_dev_compress(struct spdk_reduce_backing_dev *backing_dev, 468 struct iovec *src_iov, int src_iovcnt, 469 struct iovec *dst_iov, int dst_iovcnt, 470 struct spdk_reduce_vol_cb_args *args) 471 { 472 uint32_t compressed_len; 473 uint64_t total_length = 0; 474 char *buf = g_decomp_buf; 475 int rc, i; 476 477 CU_ASSERT(dst_iovcnt == 1); 478 479 for (i = 0; i < src_iovcnt; i++) { 480 memcpy(buf, src_iov[i].iov_base, src_iov[i].iov_len); 481 buf += src_iov[i].iov_len; 482 total_length += src_iov[i].iov_len; 483 } 484 485 compressed_len = dst_iov[0].iov_len; 486 rc = ut_compress(dst_iov[0].iov_base, &compressed_len, 487 g_decomp_buf, total_length); 488 489 args->cb_fn(args->cb_arg, rc ? rc : (int)compressed_len); 490 } 491 492 static void 493 backing_dev_decompress(struct spdk_reduce_backing_dev *backing_dev, 494 struct iovec *src_iov, int src_iovcnt, 495 struct iovec *dst_iov, int dst_iovcnt, 496 struct spdk_reduce_vol_cb_args *args) 497 { 498 uint32_t decompressed_len = 0; 499 char *buf = g_decomp_buf; 500 int rc, i; 501 502 CU_ASSERT(src_iovcnt == 1); 503 504 for (i = 0; i < dst_iovcnt; i++) { 505 decompressed_len += dst_iov[i].iov_len; 506 } 507 508 rc = ut_decompress(g_decomp_buf, &decompressed_len, 509 src_iov[0].iov_base, src_iov[0].iov_len); 510 511 for (i = 0; i < dst_iovcnt; i++) { 512 memcpy(dst_iov[i].iov_base, buf, dst_iov[i].iov_len); 513 buf += dst_iov[i].iov_len; 514 } 515 516 args->cb_fn(args->cb_arg, rc ? rc : (int)decompressed_len); 517 } 518 519 static void 520 backing_dev_destroy(struct spdk_reduce_backing_dev *backing_dev) 521 { 522 /* We don't free this during backing_dev_close so that we can test init/unload/load 523 * scenarios. 524 */ 525 free(g_backing_dev_buf); 526 free(g_decomp_buf); 527 g_backing_dev_buf = NULL; 528 } 529 530 static void 531 backing_dev_init(struct spdk_reduce_backing_dev *backing_dev, struct spdk_reduce_vol_params *params, 532 uint32_t backing_blocklen) 533 { 534 int64_t size; 535 536 size = 4 * 1024 * 1024; 537 backing_dev->blocklen = backing_blocklen; 538 backing_dev->blockcnt = size / backing_dev->blocklen; 539 backing_dev->readv = backing_dev_readv; 540 backing_dev->writev = backing_dev_writev; 541 backing_dev->unmap = backing_dev_unmap; 542 backing_dev->compress = backing_dev_compress; 543 backing_dev->decompress = backing_dev_decompress; 544 545 g_decomp_buf = calloc(1, params->chunk_size); 546 SPDK_CU_ASSERT_FATAL(g_decomp_buf != NULL); 547 548 g_backing_dev_buf = calloc(1, size); 549 SPDK_CU_ASSERT_FATAL(g_backing_dev_buf != NULL); 550 } 551 552 static void 553 init_md(void) 554 { 555 struct spdk_reduce_vol_params params = {}; 556 struct spdk_reduce_vol_params *persistent_params; 557 struct spdk_reduce_backing_dev backing_dev = {}; 558 struct spdk_uuid uuid; 559 uint64_t *entry; 560 561 params.chunk_size = 16 * 1024; 562 params.backing_io_unit_size = 512; 563 params.logical_block_size = 512; 564 565 backing_dev_init(&backing_dev, ¶ms, 512); 566 567 g_vol = NULL; 568 g_reduce_errno = -1; 569 spdk_reduce_vol_init(¶ms, &backing_dev, TEST_MD_PATH, init_cb, NULL); 570 CU_ASSERT(g_reduce_errno == 0); 571 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 572 /* Confirm that reduce persisted the params to metadata. */ 573 CU_ASSERT(memcmp(g_persistent_pm_buf, SPDK_REDUCE_SIGNATURE, 8) == 0); 574 persistent_params = (struct spdk_reduce_vol_params *)(g_persistent_pm_buf + 8); 575 CU_ASSERT(memcmp(persistent_params, ¶ms, sizeof(params)) == 0); 576 /* Now confirm that contents of pm_file after the superblock have been initialized 577 * to REDUCE_EMPTY_MAP_ENTRY. 578 */ 579 entry = (uint64_t *)(g_persistent_pm_buf + sizeof(struct spdk_reduce_vol_superblock)); 580 while (entry != (uint64_t *)(g_persistent_pm_buf + g_vol->pm_file.size)) { 581 CU_ASSERT(*entry == REDUCE_EMPTY_MAP_ENTRY); 582 entry++; 583 } 584 585 /* Check that the pm file path was constructed correctly. It should be in 586 * the form: 587 * TEST_MD_PATH + "/" + <uuid string> 588 */ 589 CU_ASSERT(strncmp(&g_path[0], TEST_MD_PATH, strlen(TEST_MD_PATH)) == 0); 590 CU_ASSERT(g_path[strlen(TEST_MD_PATH)] == '/'); 591 CU_ASSERT(spdk_uuid_parse(&uuid, &g_path[strlen(TEST_MD_PATH) + 1]) == 0); 592 CU_ASSERT(spdk_uuid_compare(&uuid, spdk_reduce_vol_get_uuid(g_vol)) == 0); 593 594 g_reduce_errno = -1; 595 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 596 CU_ASSERT(g_reduce_errno == 0); 597 CU_ASSERT(g_volatile_pm_buf == NULL); 598 599 persistent_pm_buf_destroy(); 600 backing_dev_destroy(&backing_dev); 601 } 602 603 static void 604 _init_backing_dev(uint32_t backing_blocklen) 605 { 606 struct spdk_reduce_vol_params params = {}; 607 struct spdk_reduce_vol_params *persistent_params; 608 struct spdk_reduce_backing_dev backing_dev = {}; 609 610 params.chunk_size = 16 * 1024; 611 params.backing_io_unit_size = 512; 612 params.logical_block_size = 512; 613 spdk_uuid_generate(¶ms.uuid); 614 615 backing_dev_init(&backing_dev, ¶ms, backing_blocklen); 616 617 g_vol = NULL; 618 memset(g_path, 0, sizeof(g_path)); 619 g_reduce_errno = -1; 620 spdk_reduce_vol_init(¶ms, &backing_dev, TEST_MD_PATH, init_cb, NULL); 621 CU_ASSERT(g_reduce_errno == 0); 622 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 623 CU_ASSERT(strncmp(TEST_MD_PATH, g_path, strlen(TEST_MD_PATH)) == 0); 624 /* Confirm that libreduce persisted the params to the backing device. */ 625 CU_ASSERT(memcmp(g_backing_dev_buf, SPDK_REDUCE_SIGNATURE, 8) == 0); 626 persistent_params = (struct spdk_reduce_vol_params *)(g_backing_dev_buf + 8); 627 CU_ASSERT(memcmp(persistent_params, ¶ms, sizeof(params)) == 0); 628 /* Confirm that the path to the persistent memory metadata file was persisted to 629 * the backing device. 630 */ 631 CU_ASSERT(strncmp(g_path, 632 g_backing_dev_buf + REDUCE_BACKING_DEV_PATH_OFFSET, 633 REDUCE_PATH_MAX) == 0); 634 635 g_reduce_errno = -1; 636 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 637 CU_ASSERT(g_reduce_errno == 0); 638 639 persistent_pm_buf_destroy(); 640 backing_dev_destroy(&backing_dev); 641 } 642 643 static void 644 init_backing_dev(void) 645 { 646 _init_backing_dev(512); 647 _init_backing_dev(4096); 648 } 649 650 static void 651 _load(uint32_t backing_blocklen) 652 { 653 struct spdk_reduce_vol_params params = {}; 654 struct spdk_reduce_backing_dev backing_dev = {}; 655 char pmem_file_path[REDUCE_PATH_MAX]; 656 657 params.chunk_size = 16 * 1024; 658 params.backing_io_unit_size = 512; 659 params.logical_block_size = 512; 660 spdk_uuid_generate(¶ms.uuid); 661 662 backing_dev_init(&backing_dev, ¶ms, backing_blocklen); 663 664 g_vol = NULL; 665 g_reduce_errno = -1; 666 spdk_reduce_vol_init(¶ms, &backing_dev, TEST_MD_PATH, init_cb, NULL); 667 CU_ASSERT(g_reduce_errno == 0); 668 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 669 CU_ASSERT(strncmp(TEST_MD_PATH, g_path, strlen(TEST_MD_PATH)) == 0); 670 memcpy(pmem_file_path, g_path, sizeof(pmem_file_path)); 671 672 g_reduce_errno = -1; 673 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 674 CU_ASSERT(g_reduce_errno == 0); 675 676 g_vol = NULL; 677 memset(g_path, 0, sizeof(g_path)); 678 g_reduce_errno = -1; 679 spdk_reduce_vol_load(&backing_dev, load_cb, NULL); 680 CU_ASSERT(g_reduce_errno == 0); 681 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 682 CU_ASSERT(strncmp(g_path, pmem_file_path, sizeof(pmem_file_path)) == 0); 683 CU_ASSERT(g_vol->params.vol_size == params.vol_size); 684 CU_ASSERT(g_vol->params.chunk_size == params.chunk_size); 685 CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size); 686 687 g_reduce_errno = -1; 688 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 689 CU_ASSERT(g_reduce_errno == 0); 690 691 persistent_pm_buf_destroy(); 692 backing_dev_destroy(&backing_dev); 693 } 694 695 static void 696 load(void) 697 { 698 _load(512); 699 _load(4096); 700 } 701 702 static uint64_t 703 _vol_get_chunk_map_index(struct spdk_reduce_vol *vol, uint64_t offset) 704 { 705 uint64_t logical_map_index = offset / vol->logical_blocks_per_chunk; 706 707 return vol->pm_logical_map[logical_map_index]; 708 } 709 710 static void 711 write_cb(void *arg, int reduce_errno) 712 { 713 g_reduce_errno = reduce_errno; 714 } 715 716 static void 717 read_cb(void *arg, int reduce_errno) 718 { 719 g_reduce_errno = reduce_errno; 720 } 721 722 static void 723 _write_maps(uint32_t backing_blocklen) 724 { 725 struct spdk_reduce_vol_params params = {}; 726 struct spdk_reduce_backing_dev backing_dev = {}; 727 struct iovec iov; 728 const int bufsize = 16 * 1024; /* chunk size */ 729 char buf[bufsize]; 730 uint32_t num_lbas, i; 731 uint64_t old_chunk0_map_index, new_chunk0_map_index; 732 struct spdk_reduce_chunk_map *old_chunk0_map, *new_chunk0_map; 733 734 params.chunk_size = bufsize; 735 params.backing_io_unit_size = 4096; 736 params.logical_block_size = 512; 737 num_lbas = bufsize / params.logical_block_size; 738 spdk_uuid_generate(¶ms.uuid); 739 740 backing_dev_init(&backing_dev, ¶ms, backing_blocklen); 741 742 g_vol = NULL; 743 g_reduce_errno = -1; 744 spdk_reduce_vol_init(¶ms, &backing_dev, TEST_MD_PATH, init_cb, NULL); 745 CU_ASSERT(g_reduce_errno == 0); 746 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 747 748 for (i = 0; i < g_vol->params.vol_size / g_vol->params.chunk_size; i++) { 749 CU_ASSERT(_vol_get_chunk_map_index(g_vol, i) == REDUCE_EMPTY_MAP_ENTRY); 750 } 751 752 ut_build_data_buffer(buf, bufsize, 0x00, 1); 753 iov.iov_base = buf; 754 iov.iov_len = bufsize; 755 g_reduce_errno = -1; 756 spdk_reduce_vol_writev(g_vol, &iov, 1, 0, num_lbas, write_cb, NULL); 757 CU_ASSERT(g_reduce_errno == 0); 758 759 old_chunk0_map_index = _vol_get_chunk_map_index(g_vol, 0); 760 CU_ASSERT(old_chunk0_map_index != REDUCE_EMPTY_MAP_ENTRY); 761 CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, old_chunk0_map_index) == true); 762 763 old_chunk0_map = _reduce_vol_get_chunk_map(g_vol, old_chunk0_map_index); 764 for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) { 765 CU_ASSERT(old_chunk0_map->io_unit_index[i] != REDUCE_EMPTY_MAP_ENTRY); 766 CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units, 767 old_chunk0_map->io_unit_index[i]) == true); 768 } 769 770 g_reduce_errno = -1; 771 spdk_reduce_vol_writev(g_vol, &iov, 1, 0, num_lbas, write_cb, NULL); 772 CU_ASSERT(g_reduce_errno == 0); 773 774 new_chunk0_map_index = _vol_get_chunk_map_index(g_vol, 0); 775 CU_ASSERT(new_chunk0_map_index != REDUCE_EMPTY_MAP_ENTRY); 776 CU_ASSERT(new_chunk0_map_index != old_chunk0_map_index); 777 CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, new_chunk0_map_index) == true); 778 CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, old_chunk0_map_index) == false); 779 780 for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) { 781 CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units, 782 old_chunk0_map->io_unit_index[i]) == false); 783 } 784 785 new_chunk0_map = _reduce_vol_get_chunk_map(g_vol, new_chunk0_map_index); 786 for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) { 787 CU_ASSERT(new_chunk0_map->io_unit_index[i] != REDUCE_EMPTY_MAP_ENTRY); 788 CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units, 789 new_chunk0_map->io_unit_index[i]) == true); 790 } 791 792 g_reduce_errno = -1; 793 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 794 CU_ASSERT(g_reduce_errno == 0); 795 796 g_vol = NULL; 797 g_reduce_errno = -1; 798 spdk_reduce_vol_load(&backing_dev, load_cb, NULL); 799 CU_ASSERT(g_reduce_errno == 0); 800 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 801 CU_ASSERT(g_vol->params.vol_size == params.vol_size); 802 CU_ASSERT(g_vol->params.chunk_size == params.chunk_size); 803 CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size); 804 805 g_reduce_errno = -1; 806 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 807 CU_ASSERT(g_reduce_errno == 0); 808 809 persistent_pm_buf_destroy(); 810 backing_dev_destroy(&backing_dev); 811 } 812 813 static void 814 write_maps(void) 815 { 816 _write_maps(512); 817 _write_maps(4096); 818 } 819 820 static void 821 _read_write(uint32_t backing_blocklen) 822 { 823 struct spdk_reduce_vol_params params = {}; 824 struct spdk_reduce_backing_dev backing_dev = {}; 825 struct iovec iov; 826 char buf[16 * 1024]; /* chunk size */ 827 char compare_buf[16 * 1024]; 828 uint32_t i; 829 830 params.chunk_size = 16 * 1024; 831 params.backing_io_unit_size = 4096; 832 params.logical_block_size = 512; 833 spdk_uuid_generate(¶ms.uuid); 834 835 backing_dev_init(&backing_dev, ¶ms, backing_blocklen); 836 837 g_vol = NULL; 838 g_reduce_errno = -1; 839 spdk_reduce_vol_init(¶ms, &backing_dev, TEST_MD_PATH, init_cb, NULL); 840 CU_ASSERT(g_reduce_errno == 0); 841 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 842 843 /* Write 0xAA to 2 512-byte logical blocks, starting at LBA 2. */ 844 memset(buf, 0xAA, 2 * params.logical_block_size); 845 iov.iov_base = buf; 846 iov.iov_len = 2 * params.logical_block_size; 847 g_reduce_errno = -1; 848 spdk_reduce_vol_writev(g_vol, &iov, 1, 2, 2, write_cb, NULL); 849 CU_ASSERT(g_reduce_errno == 0); 850 851 memset(compare_buf, 0xAA, sizeof(compare_buf)); 852 for (i = 0; i < params.chunk_size / params.logical_block_size; i++) { 853 memset(buf, 0xFF, params.logical_block_size); 854 iov.iov_base = buf; 855 iov.iov_len = params.logical_block_size; 856 g_reduce_errno = -1; 857 spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL); 858 CU_ASSERT(g_reduce_errno == 0); 859 860 switch (i) { 861 case 2: 862 case 3: 863 CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0); 864 break; 865 default: 866 CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size)); 867 break; 868 } 869 } 870 871 g_reduce_errno = -1; 872 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 873 CU_ASSERT(g_reduce_errno == 0); 874 875 /* Overwrite what we just wrote with 0xCC */ 876 g_vol = NULL; 877 g_reduce_errno = -1; 878 spdk_reduce_vol_load(&backing_dev, load_cb, NULL); 879 CU_ASSERT(g_reduce_errno == 0); 880 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 881 CU_ASSERT(g_vol->params.vol_size == params.vol_size); 882 CU_ASSERT(g_vol->params.chunk_size == params.chunk_size); 883 CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size); 884 885 memset(buf, 0xCC, 2 * params.logical_block_size); 886 iov.iov_base = buf; 887 iov.iov_len = 2 * params.logical_block_size; 888 g_reduce_errno = -1; 889 spdk_reduce_vol_writev(g_vol, &iov, 1, 2, 2, write_cb, NULL); 890 CU_ASSERT(g_reduce_errno == 0); 891 892 memset(compare_buf, 0xCC, sizeof(compare_buf)); 893 for (i = 0; i < params.chunk_size / params.logical_block_size; i++) { 894 memset(buf, 0xFF, params.logical_block_size); 895 iov.iov_base = buf; 896 iov.iov_len = params.logical_block_size; 897 g_reduce_errno = -1; 898 spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL); 899 CU_ASSERT(g_reduce_errno == 0); 900 901 switch (i) { 902 case 2: 903 case 3: 904 CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0); 905 break; 906 default: 907 CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size)); 908 break; 909 } 910 } 911 912 g_reduce_errno = -1; 913 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 914 CU_ASSERT(g_reduce_errno == 0); 915 916 g_vol = NULL; 917 g_reduce_errno = -1; 918 spdk_reduce_vol_load(&backing_dev, load_cb, NULL); 919 CU_ASSERT(g_reduce_errno == 0); 920 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 921 CU_ASSERT(g_vol->params.vol_size == params.vol_size); 922 CU_ASSERT(g_vol->params.chunk_size == params.chunk_size); 923 CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size); 924 925 g_reduce_errno = -1; 926 927 /* Write 0xBB to 2 512-byte logical blocks, starting at LBA 37. 928 * This is writing into the second chunk of the volume. This also 929 * enables implicitly checking that we reloaded the bit arrays 930 * correctly - making sure we don't use the first chunk map again 931 * for this new write - the first chunk map was already used by the 932 * write from before we unloaded and reloaded. 933 */ 934 memset(buf, 0xBB, 2 * params.logical_block_size); 935 iov.iov_base = buf; 936 iov.iov_len = 2 * params.logical_block_size; 937 g_reduce_errno = -1; 938 spdk_reduce_vol_writev(g_vol, &iov, 1, 37, 2, write_cb, NULL); 939 CU_ASSERT(g_reduce_errno == 0); 940 941 for (i = 0; i < 2 * params.chunk_size / params.logical_block_size; i++) { 942 memset(buf, 0xFF, params.logical_block_size); 943 iov.iov_base = buf; 944 iov.iov_len = params.logical_block_size; 945 g_reduce_errno = -1; 946 spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL); 947 CU_ASSERT(g_reduce_errno == 0); 948 949 switch (i) { 950 case 2: 951 case 3: 952 memset(compare_buf, 0xCC, sizeof(compare_buf)); 953 CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0); 954 break; 955 case 37: 956 case 38: 957 memset(compare_buf, 0xBB, sizeof(compare_buf)); 958 CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0); 959 break; 960 default: 961 CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size)); 962 break; 963 } 964 } 965 966 g_reduce_errno = -1; 967 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 968 CU_ASSERT(g_reduce_errno == 0); 969 970 persistent_pm_buf_destroy(); 971 backing_dev_destroy(&backing_dev); 972 } 973 974 static void 975 read_write(void) 976 { 977 _read_write(512); 978 _read_write(4096); 979 } 980 981 static void 982 _readv_writev(uint32_t backing_blocklen) 983 { 984 struct spdk_reduce_vol_params params = {}; 985 struct spdk_reduce_backing_dev backing_dev = {}; 986 struct iovec iov[REDUCE_MAX_IOVECS + 1]; 987 988 params.chunk_size = 16 * 1024; 989 params.backing_io_unit_size = 4096; 990 params.logical_block_size = 512; 991 spdk_uuid_generate(¶ms.uuid); 992 993 backing_dev_init(&backing_dev, ¶ms, backing_blocklen); 994 995 g_vol = NULL; 996 g_reduce_errno = -1; 997 spdk_reduce_vol_init(¶ms, &backing_dev, TEST_MD_PATH, init_cb, NULL); 998 CU_ASSERT(g_reduce_errno == 0); 999 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 1000 1001 g_reduce_errno = -1; 1002 spdk_reduce_vol_writev(g_vol, iov, REDUCE_MAX_IOVECS + 1, 2, REDUCE_MAX_IOVECS + 1, write_cb, NULL); 1003 CU_ASSERT(g_reduce_errno == -EINVAL); 1004 1005 g_reduce_errno = -1; 1006 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 1007 CU_ASSERT(g_reduce_errno == 0); 1008 1009 persistent_pm_buf_destroy(); 1010 backing_dev_destroy(&backing_dev); 1011 } 1012 1013 static void 1014 readv_writev(void) 1015 { 1016 _readv_writev(512); 1017 _readv_writev(4096); 1018 } 1019 1020 static void 1021 destroy_cb(void *ctx, int reduce_errno) 1022 { 1023 g_reduce_errno = reduce_errno; 1024 } 1025 1026 static void 1027 destroy(void) 1028 { 1029 struct spdk_reduce_vol_params params = {}; 1030 struct spdk_reduce_backing_dev backing_dev = {}; 1031 1032 params.chunk_size = 16 * 1024; 1033 params.backing_io_unit_size = 512; 1034 params.logical_block_size = 512; 1035 spdk_uuid_generate(¶ms.uuid); 1036 1037 backing_dev_init(&backing_dev, ¶ms, 512); 1038 1039 g_vol = NULL; 1040 g_reduce_errno = -1; 1041 spdk_reduce_vol_init(¶ms, &backing_dev, TEST_MD_PATH, init_cb, NULL); 1042 CU_ASSERT(g_reduce_errno == 0); 1043 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 1044 1045 g_reduce_errno = -1; 1046 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 1047 CU_ASSERT(g_reduce_errno == 0); 1048 1049 g_vol = NULL; 1050 g_reduce_errno = -1; 1051 spdk_reduce_vol_load(&backing_dev, load_cb, NULL); 1052 CU_ASSERT(g_reduce_errno == 0); 1053 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 1054 1055 g_reduce_errno = -1; 1056 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 1057 CU_ASSERT(g_reduce_errno == 0); 1058 1059 g_reduce_errno = -1; 1060 MOCK_CLEAR(spdk_malloc); 1061 MOCK_CLEAR(spdk_zmalloc); 1062 spdk_reduce_vol_destroy(&backing_dev, destroy_cb, NULL); 1063 CU_ASSERT(g_reduce_errno == 0); 1064 1065 g_reduce_errno = 0; 1066 spdk_reduce_vol_load(&backing_dev, load_cb, NULL); 1067 CU_ASSERT(g_reduce_errno == -EILSEQ); 1068 1069 backing_dev_destroy(&backing_dev); 1070 } 1071 1072 /* This test primarily checks that the reduce unit test infrastructure for asynchronous 1073 * backing device I/O operations is working correctly. 1074 */ 1075 static void 1076 defer_bdev_io(void) 1077 { 1078 struct spdk_reduce_vol_params params = {}; 1079 struct spdk_reduce_backing_dev backing_dev = {}; 1080 const uint32_t logical_block_size = 512; 1081 struct iovec iov; 1082 char buf[logical_block_size]; 1083 char compare_buf[logical_block_size]; 1084 1085 params.chunk_size = 16 * 1024; 1086 params.backing_io_unit_size = 4096; 1087 params.logical_block_size = logical_block_size; 1088 spdk_uuid_generate(¶ms.uuid); 1089 1090 backing_dev_init(&backing_dev, ¶ms, 512); 1091 1092 g_vol = NULL; 1093 g_reduce_errno = -1; 1094 spdk_reduce_vol_init(¶ms, &backing_dev, TEST_MD_PATH, init_cb, NULL); 1095 CU_ASSERT(g_reduce_errno == 0); 1096 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 1097 1098 /* Write 0xAA to 1 512-byte logical block. */ 1099 memset(buf, 0xAA, params.logical_block_size); 1100 iov.iov_base = buf; 1101 iov.iov_len = params.logical_block_size; 1102 g_reduce_errno = -100; 1103 g_defer_bdev_io = true; 1104 spdk_reduce_vol_writev(g_vol, &iov, 1, 0, 1, write_cb, NULL); 1105 /* Callback should not have executed, so this should still equal -100. */ 1106 CU_ASSERT(g_reduce_errno == -100); 1107 CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io)); 1108 /* We wrote to just 512 bytes of one chunk which was previously unallocated. This 1109 * should result in 1 pending I/O since the rest of this chunk will be zeroes and 1110 * very compressible. 1111 */ 1112 CU_ASSERT(g_pending_bdev_io_count == 1); 1113 1114 backing_dev_io_execute(0); 1115 CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io)); 1116 CU_ASSERT(g_reduce_errno == 0); 1117 1118 g_defer_bdev_io = false; 1119 memset(compare_buf, 0xAA, sizeof(compare_buf)); 1120 memset(buf, 0xFF, sizeof(buf)); 1121 iov.iov_base = buf; 1122 iov.iov_len = params.logical_block_size; 1123 g_reduce_errno = -100; 1124 spdk_reduce_vol_readv(g_vol, &iov, 1, 0, 1, read_cb, NULL); 1125 CU_ASSERT(g_reduce_errno == 0); 1126 CU_ASSERT(memcmp(buf, compare_buf, sizeof(buf)) == 0); 1127 1128 g_reduce_errno = -1; 1129 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 1130 CU_ASSERT(g_reduce_errno == 0); 1131 1132 persistent_pm_buf_destroy(); 1133 backing_dev_destroy(&backing_dev); 1134 } 1135 1136 static void 1137 overlapped(void) 1138 { 1139 struct spdk_reduce_vol_params params = {}; 1140 struct spdk_reduce_backing_dev backing_dev = {}; 1141 const uint32_t logical_block_size = 512; 1142 struct iovec iov; 1143 char buf[2 * logical_block_size]; 1144 char compare_buf[2 * logical_block_size]; 1145 1146 params.chunk_size = 16 * 1024; 1147 params.backing_io_unit_size = 4096; 1148 params.logical_block_size = logical_block_size; 1149 spdk_uuid_generate(¶ms.uuid); 1150 1151 backing_dev_init(&backing_dev, ¶ms, 512); 1152 1153 g_vol = NULL; 1154 g_reduce_errno = -1; 1155 spdk_reduce_vol_init(¶ms, &backing_dev, TEST_MD_PATH, init_cb, NULL); 1156 CU_ASSERT(g_reduce_errno == 0); 1157 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 1158 1159 /* Write 0xAA to 1 512-byte logical block. */ 1160 memset(buf, 0xAA, logical_block_size); 1161 iov.iov_base = buf; 1162 iov.iov_len = logical_block_size; 1163 g_reduce_errno = -100; 1164 g_defer_bdev_io = true; 1165 spdk_reduce_vol_writev(g_vol, &iov, 1, 0, 1, write_cb, NULL); 1166 /* Callback should not have executed, so this should still equal -100. */ 1167 CU_ASSERT(g_reduce_errno == -100); 1168 CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io)); 1169 /* We wrote to just 512 bytes of one chunk which was previously unallocated. This 1170 * should result in 1 pending I/O since the rest of this chunk will be zeroes and 1171 * very compressible. 1172 */ 1173 CU_ASSERT(g_pending_bdev_io_count == 1); 1174 1175 /* Now do an overlapped I/O to the same chunk. */ 1176 spdk_reduce_vol_writev(g_vol, &iov, 1, 1, 1, write_cb, NULL); 1177 /* Callback should not have executed, so this should still equal -100. */ 1178 CU_ASSERT(g_reduce_errno == -100); 1179 CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io)); 1180 /* The second I/O overlaps with the first one. So we should only see pending bdev_io 1181 * related to the first I/O here - the second one won't start until the first one is completed. 1182 */ 1183 CU_ASSERT(g_pending_bdev_io_count == 1); 1184 1185 backing_dev_io_execute(0); 1186 CU_ASSERT(g_reduce_errno == 0); 1187 1188 g_defer_bdev_io = false; 1189 memset(compare_buf, 0xAA, sizeof(compare_buf)); 1190 memset(buf, 0xFF, sizeof(buf)); 1191 iov.iov_base = buf; 1192 iov.iov_len = 2 * logical_block_size; 1193 g_reduce_errno = -100; 1194 spdk_reduce_vol_readv(g_vol, &iov, 1, 0, 2, read_cb, NULL); 1195 CU_ASSERT(g_reduce_errno == 0); 1196 CU_ASSERT(memcmp(buf, compare_buf, 2 * logical_block_size) == 0); 1197 1198 g_reduce_errno = -1; 1199 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 1200 CU_ASSERT(g_reduce_errno == 0); 1201 1202 persistent_pm_buf_destroy(); 1203 backing_dev_destroy(&backing_dev); 1204 } 1205 1206 #define BUFSIZE 4096 1207 1208 static void 1209 compress_algorithm(void) 1210 { 1211 uint8_t original_data[BUFSIZE]; 1212 uint8_t compressed_data[BUFSIZE]; 1213 uint8_t decompressed_data[BUFSIZE]; 1214 uint32_t compressed_len, decompressed_len; 1215 int rc; 1216 1217 ut_build_data_buffer(original_data, BUFSIZE, 0xAA, BUFSIZE); 1218 compressed_len = sizeof(compressed_data); 1219 rc = ut_compress(compressed_data, &compressed_len, original_data, UINT8_MAX); 1220 CU_ASSERT(rc == 0); 1221 CU_ASSERT(compressed_len == 2); 1222 CU_ASSERT(compressed_data[0] == UINT8_MAX); 1223 CU_ASSERT(compressed_data[1] == 0xAA); 1224 1225 decompressed_len = sizeof(decompressed_data); 1226 rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len); 1227 CU_ASSERT(rc == 0); 1228 CU_ASSERT(decompressed_len == UINT8_MAX); 1229 CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0); 1230 1231 compressed_len = sizeof(compressed_data); 1232 rc = ut_compress(compressed_data, &compressed_len, original_data, UINT8_MAX + 1); 1233 CU_ASSERT(rc == 0); 1234 CU_ASSERT(compressed_len == 4); 1235 CU_ASSERT(compressed_data[0] == UINT8_MAX); 1236 CU_ASSERT(compressed_data[1] == 0xAA); 1237 CU_ASSERT(compressed_data[2] == 1); 1238 CU_ASSERT(compressed_data[3] == 0xAA); 1239 1240 decompressed_len = sizeof(decompressed_data); 1241 rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len); 1242 CU_ASSERT(rc == 0); 1243 CU_ASSERT(decompressed_len == UINT8_MAX + 1); 1244 CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0); 1245 1246 ut_build_data_buffer(original_data, BUFSIZE, 0x00, 1); 1247 compressed_len = sizeof(compressed_data); 1248 rc = ut_compress(compressed_data, &compressed_len, original_data, 2048); 1249 CU_ASSERT(rc == 0); 1250 CU_ASSERT(compressed_len == 4096); 1251 CU_ASSERT(compressed_data[0] == 1); 1252 CU_ASSERT(compressed_data[1] == 0); 1253 CU_ASSERT(compressed_data[4094] == 1); 1254 CU_ASSERT(compressed_data[4095] == 0xFF); 1255 1256 decompressed_len = sizeof(decompressed_data); 1257 rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len); 1258 CU_ASSERT(rc == 0); 1259 CU_ASSERT(decompressed_len == 2048); 1260 CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0); 1261 1262 compressed_len = sizeof(compressed_data); 1263 rc = ut_compress(compressed_data, &compressed_len, original_data, 2049); 1264 CU_ASSERT(rc == -ENOSPC); 1265 } 1266 1267 int 1268 main(int argc, char **argv) 1269 { 1270 CU_pSuite suite = NULL; 1271 unsigned int num_failures; 1272 1273 CU_set_error_action(CUEA_ABORT); 1274 CU_initialize_registry(); 1275 1276 suite = CU_add_suite("reduce", NULL, NULL); 1277 1278 CU_ADD_TEST(suite, get_pm_file_size); 1279 CU_ADD_TEST(suite, get_vol_size); 1280 CU_ADD_TEST(suite, init_failure); 1281 CU_ADD_TEST(suite, init_md); 1282 CU_ADD_TEST(suite, init_backing_dev); 1283 CU_ADD_TEST(suite, load); 1284 CU_ADD_TEST(suite, write_maps); 1285 CU_ADD_TEST(suite, read_write); 1286 CU_ADD_TEST(suite, readv_writev); 1287 CU_ADD_TEST(suite, destroy); 1288 CU_ADD_TEST(suite, defer_bdev_io); 1289 CU_ADD_TEST(suite, overlapped); 1290 CU_ADD_TEST(suite, compress_algorithm); 1291 1292 g_unlink_path = g_path; 1293 g_unlink_callback = unlink_cb; 1294 1295 CU_basic_set_mode(CU_BRM_VERBOSE); 1296 CU_basic_run_tests(); 1297 num_failures = CU_get_number_of_failures(); 1298 CU_cleanup_registry(); 1299 return num_failures; 1300 } 1301