1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 38 #include "reduce/reduce.c" 39 #include "spdk_internal/mock.h" 40 #include "common/lib/test_env.c" 41 42 static struct spdk_reduce_vol *g_vol; 43 static int g_reduce_errno; 44 static char *g_volatile_pm_buf; 45 static size_t g_volatile_pm_buf_len; 46 static char *g_persistent_pm_buf; 47 static size_t g_persistent_pm_buf_len; 48 static char *g_backing_dev_buf; 49 static char g_path[REDUCE_PATH_MAX]; 50 51 #define TEST_MD_PATH "/tmp" 52 53 enum ut_reduce_bdev_io_type { 54 UT_REDUCE_IO_READV = 1, 55 UT_REDUCE_IO_WRITEV = 2, 56 UT_REDUCE_IO_UNMAP = 3, 57 }; 58 59 struct ut_reduce_bdev_io { 60 enum ut_reduce_bdev_io_type type; 61 struct spdk_reduce_backing_dev *backing_dev; 62 struct iovec *iov; 63 int iovcnt; 64 uint64_t lba; 65 uint32_t lba_count; 66 struct spdk_reduce_vol_cb_args *args; 67 TAILQ_ENTRY(ut_reduce_bdev_io) link; 68 }; 69 70 static bool g_defer_bdev_io = false; 71 static TAILQ_HEAD(, ut_reduce_bdev_io) g_pending_bdev_io = 72 TAILQ_HEAD_INITIALIZER(g_pending_bdev_io); 73 static uint32_t g_pending_bdev_io_count = 0; 74 75 static void 76 sync_pm_buf(const void *addr, size_t length) 77 { 78 uint64_t offset = (char *)addr - g_volatile_pm_buf; 79 80 memcpy(&g_persistent_pm_buf[offset], addr, length); 81 } 82 83 int 84 pmem_msync(const void *addr, size_t length) 85 { 86 sync_pm_buf(addr, length); 87 return 0; 88 } 89 90 void 91 pmem_persist(const void *addr, size_t len) 92 { 93 sync_pm_buf(addr, len); 94 } 95 96 static void 97 get_pm_file_size(void) 98 { 99 struct spdk_reduce_vol_params params; 100 uint64_t pm_size, expected_pm_size; 101 102 params.backing_io_unit_size = 4096; 103 params.chunk_size = 4096 * 4; 104 params.vol_size = 4096 * 4 * 100; 105 106 pm_size = _get_pm_file_size(¶ms); 107 expected_pm_size = sizeof(struct spdk_reduce_vol_superblock); 108 /* 100 chunks in logical map * 8 bytes per chunk */ 109 expected_pm_size += 100 * sizeof(uint64_t); 110 /* 100 chunks * 4 backing io units per chunk * 8 bytes per backing io unit */ 111 expected_pm_size += 100 * 4 * sizeof(uint64_t); 112 /* reduce allocates some extra chunks too for in-flight writes when logical map 113 * is full. REDUCE_EXTRA_CHUNKS is a private #ifdef in reduce.c. 114 */ 115 expected_pm_size += REDUCE_NUM_EXTRA_CHUNKS * 4 * sizeof(uint64_t); 116 /* reduce will add some padding so numbers may not match exactly. Make sure 117 * they are close though. 118 */ 119 CU_ASSERT((pm_size - expected_pm_size) < REDUCE_PM_SIZE_ALIGNMENT); 120 } 121 122 static void 123 get_vol_size(void) 124 { 125 uint64_t chunk_size, backing_dev_size; 126 127 chunk_size = 16 * 1024; 128 backing_dev_size = 16 * 1024 * 1000; 129 CU_ASSERT(_get_vol_size(chunk_size, backing_dev_size) < backing_dev_size); 130 } 131 132 void * 133 pmem_map_file(const char *path, size_t len, int flags, mode_t mode, 134 size_t *mapped_lenp, int *is_pmemp) 135 { 136 CU_ASSERT(g_volatile_pm_buf == NULL); 137 snprintf(g_path, sizeof(g_path), "%s", path); 138 *is_pmemp = 1; 139 140 if (g_persistent_pm_buf == NULL) { 141 g_persistent_pm_buf = calloc(1, len); 142 g_persistent_pm_buf_len = len; 143 SPDK_CU_ASSERT_FATAL(g_persistent_pm_buf != NULL); 144 } 145 146 *mapped_lenp = g_persistent_pm_buf_len; 147 g_volatile_pm_buf = calloc(1, g_persistent_pm_buf_len); 148 SPDK_CU_ASSERT_FATAL(g_volatile_pm_buf != NULL); 149 memcpy(g_volatile_pm_buf, g_persistent_pm_buf, g_persistent_pm_buf_len); 150 g_volatile_pm_buf_len = g_persistent_pm_buf_len; 151 152 return g_volatile_pm_buf; 153 } 154 155 int 156 pmem_unmap(void *addr, size_t len) 157 { 158 CU_ASSERT(addr == g_volatile_pm_buf); 159 CU_ASSERT(len == g_volatile_pm_buf_len); 160 free(g_volatile_pm_buf); 161 g_volatile_pm_buf = NULL; 162 g_volatile_pm_buf_len = 0; 163 164 return 0; 165 } 166 167 static void 168 persistent_pm_buf_destroy(void) 169 { 170 CU_ASSERT(g_persistent_pm_buf != NULL); 171 free(g_persistent_pm_buf); 172 g_persistent_pm_buf = NULL; 173 g_persistent_pm_buf_len = 0; 174 } 175 176 int __wrap_unlink(const char *path); 177 178 int 179 __wrap_unlink(const char *path) 180 { 181 if (strcmp(g_path, path) != 0) { 182 return ENOENT; 183 } 184 185 persistent_pm_buf_destroy(); 186 return 0; 187 } 188 189 static void 190 init_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno) 191 { 192 g_vol = vol; 193 g_reduce_errno = reduce_errno; 194 } 195 196 static void 197 load_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno) 198 { 199 g_vol = vol; 200 g_reduce_errno = reduce_errno; 201 } 202 203 static void 204 unload_cb(void *cb_arg, int reduce_errno) 205 { 206 g_reduce_errno = reduce_errno; 207 } 208 209 static void 210 init_failure(void) 211 { 212 struct spdk_reduce_vol_params params = {}; 213 struct spdk_reduce_backing_dev backing_dev = {}; 214 215 backing_dev.blocklen = 512; 216 /* This blockcnt is too small for a reduce vol - there needs to be 217 * enough space for at least REDUCE_NUM_EXTRA_CHUNKS + 1 chunks. 218 */ 219 backing_dev.blockcnt = 20; 220 221 params.vol_size = 0; 222 params.chunk_size = 16 * 1024; 223 params.backing_io_unit_size = backing_dev.blocklen; 224 params.logical_block_size = 512; 225 226 /* backing_dev has an invalid size. This should fail. */ 227 g_vol = NULL; 228 g_reduce_errno = 0; 229 spdk_reduce_vol_init(¶ms, &backing_dev, TEST_MD_PATH, init_cb, NULL); 230 CU_ASSERT(g_reduce_errno == -EINVAL); 231 SPDK_CU_ASSERT_FATAL(g_vol == NULL); 232 233 /* backing_dev now has valid size, but backing_dev still has null 234 * function pointers. This should fail. 235 */ 236 backing_dev.blockcnt = 20000; 237 238 g_vol = NULL; 239 g_reduce_errno = 0; 240 spdk_reduce_vol_init(¶ms, &backing_dev, TEST_MD_PATH, init_cb, NULL); 241 CU_ASSERT(g_reduce_errno == -EINVAL); 242 SPDK_CU_ASSERT_FATAL(g_vol == NULL); 243 } 244 245 static void 246 backing_dev_readv_execute(struct spdk_reduce_backing_dev *backing_dev, 247 struct iovec *iov, int iovcnt, 248 uint64_t lba, uint32_t lba_count, 249 struct spdk_reduce_vol_cb_args *args) 250 { 251 char *offset; 252 int i; 253 254 offset = g_backing_dev_buf + lba * backing_dev->blocklen; 255 for (i = 0; i < iovcnt; i++) { 256 memcpy(iov[i].iov_base, offset, iov[i].iov_len); 257 offset += iov[i].iov_len; 258 } 259 args->cb_fn(args->cb_arg, 0); 260 } 261 262 static void 263 backing_dev_insert_io(enum ut_reduce_bdev_io_type type, struct spdk_reduce_backing_dev *backing_dev, 264 struct iovec *iov, int iovcnt, uint64_t lba, uint32_t lba_count, 265 struct spdk_reduce_vol_cb_args *args) 266 { 267 struct ut_reduce_bdev_io *ut_bdev_io; 268 269 ut_bdev_io = calloc(1, sizeof(*ut_bdev_io)); 270 SPDK_CU_ASSERT_FATAL(ut_bdev_io != NULL); 271 272 ut_bdev_io->type = type; 273 ut_bdev_io->backing_dev = backing_dev; 274 ut_bdev_io->iov = iov; 275 ut_bdev_io->iovcnt = iovcnt; 276 ut_bdev_io->lba = lba; 277 ut_bdev_io->lba_count = lba_count; 278 ut_bdev_io->args = args; 279 TAILQ_INSERT_TAIL(&g_pending_bdev_io, ut_bdev_io, link); 280 g_pending_bdev_io_count++; 281 } 282 283 static void 284 backing_dev_readv(struct spdk_reduce_backing_dev *backing_dev, struct iovec *iov, int iovcnt, 285 uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args) 286 { 287 if (g_defer_bdev_io == false) { 288 CU_ASSERT(g_pending_bdev_io_count == 0); 289 CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io)); 290 backing_dev_readv_execute(backing_dev, iov, iovcnt, lba, lba_count, args); 291 return; 292 } 293 294 backing_dev_insert_io(UT_REDUCE_IO_READV, backing_dev, iov, iovcnt, lba, lba_count, args); 295 } 296 297 static void 298 backing_dev_writev_execute(struct spdk_reduce_backing_dev *backing_dev, 299 struct iovec *iov, int iovcnt, 300 uint64_t lba, uint32_t lba_count, 301 struct spdk_reduce_vol_cb_args *args) 302 { 303 char *offset; 304 int i; 305 306 offset = g_backing_dev_buf + lba * backing_dev->blocklen; 307 for (i = 0; i < iovcnt; i++) { 308 memcpy(offset, iov[i].iov_base, iov[i].iov_len); 309 offset += iov[i].iov_len; 310 } 311 args->cb_fn(args->cb_arg, 0); 312 } 313 314 static void 315 backing_dev_writev(struct spdk_reduce_backing_dev *backing_dev, struct iovec *iov, int iovcnt, 316 uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args) 317 { 318 if (g_defer_bdev_io == false) { 319 CU_ASSERT(g_pending_bdev_io_count == 0); 320 CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io)); 321 backing_dev_writev_execute(backing_dev, iov, iovcnt, lba, lba_count, args); 322 return; 323 } 324 325 backing_dev_insert_io(UT_REDUCE_IO_WRITEV, backing_dev, iov, iovcnt, lba, lba_count, args); 326 } 327 328 static void 329 backing_dev_unmap_execute(struct spdk_reduce_backing_dev *backing_dev, 330 uint64_t lba, uint32_t lba_count, 331 struct spdk_reduce_vol_cb_args *args) 332 { 333 char *offset; 334 335 offset = g_backing_dev_buf + lba * backing_dev->blocklen; 336 memset(offset, 0, lba_count * backing_dev->blocklen); 337 args->cb_fn(args->cb_arg, 0); 338 } 339 340 static void 341 backing_dev_unmap(struct spdk_reduce_backing_dev *backing_dev, 342 uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args) 343 { 344 if (g_defer_bdev_io == false) { 345 CU_ASSERT(g_pending_bdev_io_count == 0); 346 CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io)); 347 backing_dev_unmap_execute(backing_dev, lba, lba_count, args); 348 return; 349 } 350 351 backing_dev_insert_io(UT_REDUCE_IO_UNMAP, backing_dev, NULL, 0, lba, lba_count, args); 352 } 353 354 static void 355 backing_dev_io_execute(uint32_t count) 356 { 357 struct ut_reduce_bdev_io *ut_bdev_io; 358 uint32_t done = 0; 359 360 CU_ASSERT(g_defer_bdev_io == true); 361 while (!TAILQ_EMPTY(&g_pending_bdev_io) && (count == 0 || done < count)) { 362 ut_bdev_io = TAILQ_FIRST(&g_pending_bdev_io); 363 TAILQ_REMOVE(&g_pending_bdev_io, ut_bdev_io, link); 364 g_pending_bdev_io_count--; 365 switch (ut_bdev_io->type) { 366 case UT_REDUCE_IO_READV: 367 backing_dev_readv_execute(ut_bdev_io->backing_dev, 368 ut_bdev_io->iov, ut_bdev_io->iovcnt, 369 ut_bdev_io->lba, ut_bdev_io->lba_count, 370 ut_bdev_io->args); 371 break; 372 case UT_REDUCE_IO_WRITEV: 373 backing_dev_writev_execute(ut_bdev_io->backing_dev, 374 ut_bdev_io->iov, ut_bdev_io->iovcnt, 375 ut_bdev_io->lba, ut_bdev_io->lba_count, 376 ut_bdev_io->args); 377 break; 378 case UT_REDUCE_IO_UNMAP: 379 backing_dev_unmap_execute(ut_bdev_io->backing_dev, 380 ut_bdev_io->lba, ut_bdev_io->lba_count, 381 ut_bdev_io->args); 382 break; 383 default: 384 CU_ASSERT(false); 385 break; 386 } 387 free(ut_bdev_io); 388 done++; 389 } 390 } 391 392 static void 393 backing_dev_compress(struct spdk_reduce_backing_dev *backing_dev, 394 struct iovec *src_iov, int src_iovcnt, 395 struct iovec *dst_iov, int dst_iovcnt, 396 struct spdk_reduce_vol_cb_args *args) 397 { 398 CU_ASSERT(src_iovcnt == 1); 399 CU_ASSERT(dst_iovcnt == 1); 400 CU_ASSERT(src_iov[0].iov_len == dst_iov[0].iov_len); 401 memcpy(dst_iov[0].iov_base, src_iov[0].iov_base, src_iov[0].iov_len); 402 args->cb_fn(args->cb_arg, src_iov[0].iov_len); 403 } 404 405 static void 406 backing_dev_decompress(struct spdk_reduce_backing_dev *backing_dev, 407 struct iovec *src_iov, int src_iovcnt, 408 struct iovec *dst_iov, int dst_iovcnt, 409 struct spdk_reduce_vol_cb_args *args) 410 { 411 CU_ASSERT(src_iovcnt == 1); 412 CU_ASSERT(dst_iovcnt == 1); 413 CU_ASSERT(src_iov[0].iov_len == dst_iov[0].iov_len); 414 memcpy(dst_iov[0].iov_base, src_iov[0].iov_base, src_iov[0].iov_len); 415 args->cb_fn(args->cb_arg, src_iov[0].iov_len); 416 } 417 418 static void 419 backing_dev_destroy(struct spdk_reduce_backing_dev *backing_dev) 420 { 421 /* We don't free this during backing_dev_close so that we can test init/unload/load 422 * scenarios. 423 */ 424 free(g_backing_dev_buf); 425 g_backing_dev_buf = NULL; 426 } 427 428 static void 429 backing_dev_init(struct spdk_reduce_backing_dev *backing_dev, struct spdk_reduce_vol_params *params, 430 uint32_t backing_blocklen) 431 { 432 int64_t size; 433 434 size = 4 * 1024 * 1024; 435 backing_dev->blocklen = backing_blocklen; 436 backing_dev->blockcnt = size / backing_dev->blocklen; 437 backing_dev->readv = backing_dev_readv; 438 backing_dev->writev = backing_dev_writev; 439 backing_dev->unmap = backing_dev_unmap; 440 backing_dev->compress = backing_dev_compress; 441 backing_dev->decompress = backing_dev_decompress; 442 443 g_backing_dev_buf = calloc(1, size); 444 SPDK_CU_ASSERT_FATAL(g_backing_dev_buf != NULL); 445 } 446 447 static void 448 init_md(void) 449 { 450 struct spdk_reduce_vol_params params = {}; 451 struct spdk_reduce_vol_params *persistent_params; 452 struct spdk_reduce_backing_dev backing_dev = {}; 453 struct spdk_uuid uuid; 454 uint64_t *entry; 455 456 params.chunk_size = 16 * 1024; 457 params.backing_io_unit_size = 512; 458 params.logical_block_size = 512; 459 460 backing_dev_init(&backing_dev, ¶ms, 512); 461 462 g_vol = NULL; 463 g_reduce_errno = -1; 464 spdk_reduce_vol_init(¶ms, &backing_dev, TEST_MD_PATH, init_cb, NULL); 465 CU_ASSERT(g_reduce_errno == 0); 466 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 467 /* Confirm that reduce persisted the params to metadata. */ 468 CU_ASSERT(memcmp(g_persistent_pm_buf, SPDK_REDUCE_SIGNATURE, 8) == 0); 469 persistent_params = (struct spdk_reduce_vol_params *)(g_persistent_pm_buf + 8); 470 CU_ASSERT(memcmp(persistent_params, ¶ms, sizeof(params)) == 0); 471 /* Now confirm that contents of pm_file after the superblock have been initialized 472 * to REDUCE_EMPTY_MAP_ENTRY. 473 */ 474 entry = (uint64_t *)(g_persistent_pm_buf + sizeof(struct spdk_reduce_vol_superblock)); 475 while (entry != (uint64_t *)(g_persistent_pm_buf + g_vol->pm_file.size)) { 476 CU_ASSERT(*entry == REDUCE_EMPTY_MAP_ENTRY); 477 entry++; 478 } 479 480 /* Check that the pm file path was constructed correctly. It should be in 481 * the form: 482 * TEST_MD_PATH + "/" + <uuid string> 483 */ 484 CU_ASSERT(strncmp(&g_path[0], TEST_MD_PATH, strlen(TEST_MD_PATH)) == 0); 485 CU_ASSERT(g_path[strlen(TEST_MD_PATH)] == '/'); 486 CU_ASSERT(spdk_uuid_parse(&uuid, &g_path[strlen(TEST_MD_PATH) + 1]) == 0); 487 CU_ASSERT(spdk_uuid_compare(&uuid, spdk_reduce_vol_get_uuid(g_vol)) == 0); 488 489 g_reduce_errno = -1; 490 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 491 CU_ASSERT(g_reduce_errno == 0); 492 CU_ASSERT(g_volatile_pm_buf == NULL); 493 494 persistent_pm_buf_destroy(); 495 backing_dev_destroy(&backing_dev); 496 } 497 498 static void 499 _init_backing_dev(uint32_t backing_blocklen) 500 { 501 struct spdk_reduce_vol_params params = {}; 502 struct spdk_reduce_vol_params *persistent_params; 503 struct spdk_reduce_backing_dev backing_dev = {}; 504 505 params.chunk_size = 16 * 1024; 506 params.backing_io_unit_size = 512; 507 params.logical_block_size = 512; 508 spdk_uuid_generate(¶ms.uuid); 509 510 backing_dev_init(&backing_dev, ¶ms, backing_blocklen); 511 512 g_vol = NULL; 513 memset(g_path, 0, sizeof(g_path)); 514 g_reduce_errno = -1; 515 spdk_reduce_vol_init(¶ms, &backing_dev, TEST_MD_PATH, init_cb, NULL); 516 CU_ASSERT(g_reduce_errno == 0); 517 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 518 CU_ASSERT(strncmp(TEST_MD_PATH, g_path, strlen(TEST_MD_PATH)) == 0); 519 /* Confirm that libreduce persisted the params to the backing device. */ 520 CU_ASSERT(memcmp(g_backing_dev_buf, SPDK_REDUCE_SIGNATURE, 8) == 0); 521 persistent_params = (struct spdk_reduce_vol_params *)(g_backing_dev_buf + 8); 522 CU_ASSERT(memcmp(persistent_params, ¶ms, sizeof(params)) == 0); 523 /* Confirm that the path to the persistent memory metadata file was persisted to 524 * the backing device. 525 */ 526 CU_ASSERT(strncmp(g_path, 527 g_backing_dev_buf + REDUCE_BACKING_DEV_PATH_OFFSET, 528 REDUCE_PATH_MAX) == 0); 529 530 g_reduce_errno = -1; 531 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 532 CU_ASSERT(g_reduce_errno == 0); 533 534 persistent_pm_buf_destroy(); 535 backing_dev_destroy(&backing_dev); 536 } 537 538 static void 539 init_backing_dev(void) 540 { 541 _init_backing_dev(512); 542 _init_backing_dev(4096); 543 } 544 545 static void 546 _load(uint32_t backing_blocklen) 547 { 548 struct spdk_reduce_vol_params params = {}; 549 struct spdk_reduce_backing_dev backing_dev = {}; 550 char pmem_file_path[REDUCE_PATH_MAX]; 551 552 params.chunk_size = 16 * 1024; 553 params.backing_io_unit_size = 512; 554 params.logical_block_size = 512; 555 spdk_uuid_generate(¶ms.uuid); 556 557 backing_dev_init(&backing_dev, ¶ms, backing_blocklen); 558 559 g_vol = NULL; 560 g_reduce_errno = -1; 561 spdk_reduce_vol_init(¶ms, &backing_dev, TEST_MD_PATH, init_cb, NULL); 562 CU_ASSERT(g_reduce_errno == 0); 563 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 564 CU_ASSERT(strncmp(TEST_MD_PATH, g_path, strlen(TEST_MD_PATH)) == 0); 565 memcpy(pmem_file_path, g_path, sizeof(pmem_file_path)); 566 567 g_reduce_errno = -1; 568 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 569 CU_ASSERT(g_reduce_errno == 0); 570 571 g_vol = NULL; 572 memset(g_path, 0, sizeof(g_path)); 573 g_reduce_errno = -1; 574 spdk_reduce_vol_load(&backing_dev, load_cb, NULL); 575 CU_ASSERT(g_reduce_errno == 0); 576 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 577 CU_ASSERT(strncmp(g_path, pmem_file_path, sizeof(pmem_file_path)) == 0); 578 CU_ASSERT(g_vol->params.vol_size == params.vol_size); 579 CU_ASSERT(g_vol->params.chunk_size == params.chunk_size); 580 CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size); 581 582 g_reduce_errno = -1; 583 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 584 CU_ASSERT(g_reduce_errno == 0); 585 586 persistent_pm_buf_destroy(); 587 backing_dev_destroy(&backing_dev); 588 } 589 590 static void 591 load(void) 592 { 593 _load(512); 594 _load(4096); 595 } 596 597 static uint64_t 598 _vol_get_chunk_map_index(struct spdk_reduce_vol *vol, uint64_t offset) 599 { 600 uint64_t logical_map_index = offset / vol->logical_blocks_per_chunk; 601 602 return vol->pm_logical_map[logical_map_index]; 603 } 604 605 static void 606 write_cb(void *arg, int reduce_errno) 607 { 608 g_reduce_errno = reduce_errno; 609 } 610 611 static void 612 read_cb(void *arg, int reduce_errno) 613 { 614 g_reduce_errno = reduce_errno; 615 } 616 617 static void 618 _write_maps(uint32_t backing_blocklen) 619 { 620 struct spdk_reduce_vol_params params = {}; 621 struct spdk_reduce_backing_dev backing_dev = {}; 622 struct iovec iov; 623 char buf[16 * 1024]; /* chunk size */ 624 uint32_t i; 625 uint64_t old_chunk0_map_index, new_chunk0_map_index; 626 struct spdk_reduce_chunk_map *old_chunk0_map, *new_chunk0_map; 627 628 params.chunk_size = 16 * 1024; 629 params.backing_io_unit_size = 4096; 630 params.logical_block_size = 512; 631 spdk_uuid_generate(¶ms.uuid); 632 633 backing_dev_init(&backing_dev, ¶ms, backing_blocklen); 634 635 g_vol = NULL; 636 g_reduce_errno = -1; 637 spdk_reduce_vol_init(¶ms, &backing_dev, TEST_MD_PATH, init_cb, NULL); 638 CU_ASSERT(g_reduce_errno == 0); 639 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 640 641 for (i = 0; i < g_vol->params.vol_size / g_vol->params.chunk_size; i++) { 642 CU_ASSERT(_vol_get_chunk_map_index(g_vol, i) == REDUCE_EMPTY_MAP_ENTRY); 643 } 644 645 iov.iov_base = buf; 646 iov.iov_len = params.logical_block_size; 647 g_reduce_errno = -1; 648 spdk_reduce_vol_writev(g_vol, &iov, 1, 0, 1, write_cb, NULL); 649 CU_ASSERT(g_reduce_errno == 0); 650 651 old_chunk0_map_index = _vol_get_chunk_map_index(g_vol, 0); 652 CU_ASSERT(old_chunk0_map_index != REDUCE_EMPTY_MAP_ENTRY); 653 CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, old_chunk0_map_index) == true); 654 655 old_chunk0_map = _reduce_vol_get_chunk_map(g_vol, old_chunk0_map_index); 656 for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) { 657 CU_ASSERT(old_chunk0_map->io_unit_index[i] != REDUCE_EMPTY_MAP_ENTRY); 658 CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units, 659 old_chunk0_map->io_unit_index[i]) == true); 660 } 661 662 g_reduce_errno = -1; 663 spdk_reduce_vol_writev(g_vol, &iov, 1, 0, 1, write_cb, NULL); 664 CU_ASSERT(g_reduce_errno == 0); 665 666 new_chunk0_map_index = _vol_get_chunk_map_index(g_vol, 0); 667 CU_ASSERT(new_chunk0_map_index != REDUCE_EMPTY_MAP_ENTRY); 668 CU_ASSERT(new_chunk0_map_index != old_chunk0_map_index); 669 CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, new_chunk0_map_index) == true); 670 CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, old_chunk0_map_index) == false); 671 672 for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) { 673 CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units, 674 old_chunk0_map->io_unit_index[i]) == false); 675 } 676 677 new_chunk0_map = _reduce_vol_get_chunk_map(g_vol, new_chunk0_map_index); 678 for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) { 679 CU_ASSERT(new_chunk0_map->io_unit_index[i] != REDUCE_EMPTY_MAP_ENTRY); 680 CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units, 681 new_chunk0_map->io_unit_index[i]) == true); 682 } 683 684 g_reduce_errno = -1; 685 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 686 CU_ASSERT(g_reduce_errno == 0); 687 688 g_vol = NULL; 689 g_reduce_errno = -1; 690 spdk_reduce_vol_load(&backing_dev, load_cb, NULL); 691 CU_ASSERT(g_reduce_errno == 0); 692 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 693 CU_ASSERT(g_vol->params.vol_size == params.vol_size); 694 CU_ASSERT(g_vol->params.chunk_size == params.chunk_size); 695 CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size); 696 697 g_reduce_errno = -1; 698 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 699 CU_ASSERT(g_reduce_errno == 0); 700 701 persistent_pm_buf_destroy(); 702 backing_dev_destroy(&backing_dev); 703 } 704 705 static void 706 write_maps(void) 707 { 708 _write_maps(512); 709 _write_maps(4096); 710 } 711 712 static void 713 _read_write(uint32_t backing_blocklen) 714 { 715 struct spdk_reduce_vol_params params = {}; 716 struct spdk_reduce_backing_dev backing_dev = {}; 717 struct iovec iov; 718 char buf[16 * 1024]; /* chunk size */ 719 char compare_buf[16 * 1024]; 720 uint32_t i; 721 722 params.chunk_size = 16 * 1024; 723 params.backing_io_unit_size = 4096; 724 params.logical_block_size = 512; 725 spdk_uuid_generate(¶ms.uuid); 726 727 backing_dev_init(&backing_dev, ¶ms, backing_blocklen); 728 729 g_vol = NULL; 730 g_reduce_errno = -1; 731 spdk_reduce_vol_init(¶ms, &backing_dev, TEST_MD_PATH, init_cb, NULL); 732 CU_ASSERT(g_reduce_errno == 0); 733 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 734 735 /* Write 0xAA to 2 512-byte logical blocks, starting at LBA 2. */ 736 memset(buf, 0xAA, 2 * params.logical_block_size); 737 iov.iov_base = buf; 738 iov.iov_len = 2 * params.logical_block_size; 739 g_reduce_errno = -1; 740 spdk_reduce_vol_writev(g_vol, &iov, 1, 2, 2, write_cb, NULL); 741 CU_ASSERT(g_reduce_errno == 0); 742 743 memset(compare_buf, 0xAA, sizeof(compare_buf)); 744 for (i = 0; i < params.chunk_size / params.logical_block_size; i++) { 745 memset(buf, 0xFF, params.logical_block_size); 746 iov.iov_base = buf; 747 iov.iov_len = params.logical_block_size; 748 g_reduce_errno = -1; 749 spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL); 750 CU_ASSERT(g_reduce_errno == 0); 751 752 switch (i) { 753 case 2: 754 case 3: 755 CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0); 756 break; 757 default: 758 CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size)); 759 break; 760 } 761 } 762 763 g_reduce_errno = -1; 764 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 765 CU_ASSERT(g_reduce_errno == 0); 766 767 /* Overwrite what we just wrote with 0xCC */ 768 g_vol = NULL; 769 g_reduce_errno = -1; 770 spdk_reduce_vol_load(&backing_dev, load_cb, NULL); 771 CU_ASSERT(g_reduce_errno == 0); 772 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 773 CU_ASSERT(g_vol->params.vol_size == params.vol_size); 774 CU_ASSERT(g_vol->params.chunk_size == params.chunk_size); 775 CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size); 776 777 memset(buf, 0xCC, 2 * params.logical_block_size); 778 iov.iov_base = buf; 779 iov.iov_len = 2 * params.logical_block_size; 780 g_reduce_errno = -1; 781 spdk_reduce_vol_writev(g_vol, &iov, 1, 2, 2, write_cb, NULL); 782 CU_ASSERT(g_reduce_errno == 0); 783 784 memset(compare_buf, 0xCC, sizeof(compare_buf)); 785 for (i = 0; i < params.chunk_size / params.logical_block_size; i++) { 786 memset(buf, 0xFF, params.logical_block_size); 787 iov.iov_base = buf; 788 iov.iov_len = params.logical_block_size; 789 g_reduce_errno = -1; 790 spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL); 791 CU_ASSERT(g_reduce_errno == 0); 792 793 switch (i) { 794 case 2: 795 case 3: 796 CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0); 797 break; 798 default: 799 CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size)); 800 break; 801 } 802 } 803 804 g_reduce_errno = -1; 805 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 806 CU_ASSERT(g_reduce_errno == 0); 807 808 g_vol = NULL; 809 g_reduce_errno = -1; 810 spdk_reduce_vol_load(&backing_dev, load_cb, NULL); 811 CU_ASSERT(g_reduce_errno == 0); 812 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 813 CU_ASSERT(g_vol->params.vol_size == params.vol_size); 814 CU_ASSERT(g_vol->params.chunk_size == params.chunk_size); 815 CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size); 816 817 g_reduce_errno = -1; 818 819 /* Write 0xBB to 2 512-byte logical blocks, starting at LBA 37. 820 * This is writing into the second chunk of the volume. This also 821 * enables implicitly checking that we reloaded the bit arrays 822 * correctly - making sure we don't use the first chunk map again 823 * for this new write - the first chunk map was already used by the 824 * write from before we unloaded and reloaded. 825 */ 826 memset(buf, 0xBB, 2 * params.logical_block_size); 827 iov.iov_base = buf; 828 iov.iov_len = 2 * params.logical_block_size; 829 g_reduce_errno = -1; 830 spdk_reduce_vol_writev(g_vol, &iov, 1, 37, 2, write_cb, NULL); 831 CU_ASSERT(g_reduce_errno == 0); 832 833 for (i = 0; i < 2 * params.chunk_size / params.logical_block_size; i++) { 834 memset(buf, 0xFF, params.logical_block_size); 835 iov.iov_base = buf; 836 iov.iov_len = params.logical_block_size; 837 g_reduce_errno = -1; 838 spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL); 839 CU_ASSERT(g_reduce_errno == 0); 840 841 switch (i) { 842 case 2: 843 case 3: 844 memset(compare_buf, 0xCC, sizeof(compare_buf)); 845 CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0); 846 break; 847 case 37: 848 case 38: 849 memset(compare_buf, 0xBB, sizeof(compare_buf)); 850 CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0); 851 break; 852 default: 853 CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size)); 854 break; 855 } 856 } 857 858 g_reduce_errno = -1; 859 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 860 CU_ASSERT(g_reduce_errno == 0); 861 862 persistent_pm_buf_destroy(); 863 backing_dev_destroy(&backing_dev); 864 } 865 866 static void 867 read_write(void) 868 { 869 _read_write(512); 870 _read_write(4096); 871 } 872 873 static void 874 destroy_cb(void *ctx, int reduce_errno) 875 { 876 g_reduce_errno = reduce_errno; 877 } 878 879 static void 880 destroy(void) 881 { 882 struct spdk_reduce_vol_params params = {}; 883 struct spdk_reduce_backing_dev backing_dev = {}; 884 885 params.chunk_size = 16 * 1024; 886 params.backing_io_unit_size = 512; 887 params.logical_block_size = 512; 888 spdk_uuid_generate(¶ms.uuid); 889 890 backing_dev_init(&backing_dev, ¶ms, 512); 891 892 g_vol = NULL; 893 g_reduce_errno = -1; 894 spdk_reduce_vol_init(¶ms, &backing_dev, TEST_MD_PATH, init_cb, NULL); 895 CU_ASSERT(g_reduce_errno == 0); 896 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 897 898 g_reduce_errno = -1; 899 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 900 CU_ASSERT(g_reduce_errno == 0); 901 902 g_vol = NULL; 903 g_reduce_errno = -1; 904 spdk_reduce_vol_load(&backing_dev, load_cb, NULL); 905 CU_ASSERT(g_reduce_errno == 0); 906 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 907 908 g_reduce_errno = -1; 909 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 910 CU_ASSERT(g_reduce_errno == 0); 911 912 g_reduce_errno = -1; 913 MOCK_CLEAR(spdk_dma_zmalloc); 914 MOCK_CLEAR(spdk_malloc); 915 MOCK_CLEAR(spdk_zmalloc); 916 spdk_reduce_vol_destroy(&backing_dev, destroy_cb, NULL); 917 CU_ASSERT(g_reduce_errno == 0); 918 919 g_reduce_errno = 0; 920 spdk_reduce_vol_load(&backing_dev, load_cb, NULL); 921 CU_ASSERT(g_reduce_errno == -EILSEQ); 922 923 backing_dev_destroy(&backing_dev); 924 } 925 926 /* This test primarily checks that the reduce unit test infrastructure for asynchronous 927 * backing device I/O operations is working correctly. 928 */ 929 static void 930 defer_bdev_io(void) 931 { 932 struct spdk_reduce_vol_params params = {}; 933 struct spdk_reduce_backing_dev backing_dev = {}; 934 const uint32_t logical_block_size = 512; 935 struct iovec iov; 936 char buf[logical_block_size]; 937 char compare_buf[logical_block_size]; 938 939 params.chunk_size = 16 * 1024; 940 params.backing_io_unit_size = 4096; 941 params.logical_block_size = logical_block_size; 942 spdk_uuid_generate(¶ms.uuid); 943 944 backing_dev_init(&backing_dev, ¶ms, 512); 945 946 g_vol = NULL; 947 g_reduce_errno = -1; 948 spdk_reduce_vol_init(¶ms, &backing_dev, TEST_MD_PATH, init_cb, NULL); 949 CU_ASSERT(g_reduce_errno == 0); 950 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 951 952 /* Write 0xAA to 1 512-byte logical block. */ 953 memset(buf, 0xAA, params.logical_block_size); 954 iov.iov_base = buf; 955 iov.iov_len = params.logical_block_size; 956 g_reduce_errno = -100; 957 g_defer_bdev_io = true; 958 spdk_reduce_vol_writev(g_vol, &iov, 1, 0, 1, write_cb, NULL); 959 /* Callback should not have executed, so this should still equal -100. */ 960 CU_ASSERT(g_reduce_errno == -100); 961 CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io)); 962 /* We wrote to part of one chunk which was previously unallocated. This should result in 963 * 4 pending I/O - one for each backing io unit in the chunk. 964 */ 965 CU_ASSERT(g_pending_bdev_io_count == params.chunk_size / params.backing_io_unit_size); 966 967 backing_dev_io_execute(0); 968 CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io)); 969 CU_ASSERT(g_reduce_errno == 0); 970 971 g_defer_bdev_io = false; 972 memset(compare_buf, 0xAA, sizeof(compare_buf)); 973 memset(buf, 0xFF, sizeof(buf)); 974 iov.iov_base = buf; 975 iov.iov_len = params.logical_block_size; 976 g_reduce_errno = -100; 977 spdk_reduce_vol_readv(g_vol, &iov, 1, 0, 1, read_cb, NULL); 978 CU_ASSERT(g_reduce_errno == 0); 979 CU_ASSERT(memcmp(buf, compare_buf, sizeof(buf)) == 0); 980 981 g_reduce_errno = -1; 982 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 983 CU_ASSERT(g_reduce_errno == 0); 984 985 persistent_pm_buf_destroy(); 986 backing_dev_destroy(&backing_dev); 987 } 988 989 static void 990 overlapped(void) 991 { 992 struct spdk_reduce_vol_params params = {}; 993 struct spdk_reduce_backing_dev backing_dev = {}; 994 const uint32_t logical_block_size = 512; 995 struct iovec iov; 996 char buf[2 * logical_block_size]; 997 char compare_buf[2 * logical_block_size]; 998 999 params.chunk_size = 16 * 1024; 1000 params.backing_io_unit_size = 4096; 1001 params.logical_block_size = logical_block_size; 1002 spdk_uuid_generate(¶ms.uuid); 1003 1004 backing_dev_init(&backing_dev, ¶ms, 512); 1005 1006 g_vol = NULL; 1007 g_reduce_errno = -1; 1008 spdk_reduce_vol_init(¶ms, &backing_dev, TEST_MD_PATH, init_cb, NULL); 1009 CU_ASSERT(g_reduce_errno == 0); 1010 SPDK_CU_ASSERT_FATAL(g_vol != NULL); 1011 1012 /* Write 0xAA to 1 512-byte logical block. */ 1013 memset(buf, 0xAA, logical_block_size); 1014 iov.iov_base = buf; 1015 iov.iov_len = logical_block_size; 1016 g_reduce_errno = -100; 1017 g_defer_bdev_io = true; 1018 spdk_reduce_vol_writev(g_vol, &iov, 1, 0, 1, write_cb, NULL); 1019 /* Callback should not have executed, so this should still equal -100. */ 1020 CU_ASSERT(g_reduce_errno == -100); 1021 CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io)); 1022 CU_ASSERT(g_pending_bdev_io_count == params.chunk_size / params.backing_io_unit_size); 1023 1024 /* Now do an overlapped I/O to the same chunk. */ 1025 spdk_reduce_vol_writev(g_vol, &iov, 1, 1, 1, write_cb, NULL); 1026 /* Callback should not have executed, so this should still equal -100. */ 1027 CU_ASSERT(g_reduce_errno == -100); 1028 CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io)); 1029 /* The second I/O overlaps with the first one. So we should only see pending bdev_io 1030 * related to the first I/O here - the second one won't start until the first one is completed. 1031 */ 1032 CU_ASSERT(g_pending_bdev_io_count == params.chunk_size / params.backing_io_unit_size); 1033 1034 backing_dev_io_execute(0); 1035 CU_ASSERT(g_reduce_errno == 0); 1036 1037 g_defer_bdev_io = false; 1038 memset(compare_buf, 0xAA, sizeof(compare_buf)); 1039 memset(buf, 0xFF, sizeof(buf)); 1040 iov.iov_base = buf; 1041 iov.iov_len = 2 * logical_block_size; 1042 g_reduce_errno = -100; 1043 spdk_reduce_vol_readv(g_vol, &iov, 1, 0, 2, read_cb, NULL); 1044 CU_ASSERT(g_reduce_errno == 0); 1045 CU_ASSERT(memcmp(buf, compare_buf, 2 * logical_block_size) == 0); 1046 1047 g_reduce_errno = -1; 1048 spdk_reduce_vol_unload(g_vol, unload_cb, NULL); 1049 CU_ASSERT(g_reduce_errno == 0); 1050 1051 persistent_pm_buf_destroy(); 1052 backing_dev_destroy(&backing_dev); 1053 } 1054 1055 static int 1056 ut_compress(char *outbuf, uint32_t *compressed_len, char *inbuf, uint32_t inbuflen) 1057 { 1058 uint32_t len = 0; 1059 uint8_t count; 1060 char last; 1061 1062 while (true) { 1063 if (inbuflen == 0) { 1064 *compressed_len = len; 1065 return 0; 1066 } 1067 1068 if (*compressed_len < (len + 2)) { 1069 return -ENOSPC; 1070 } 1071 1072 last = *inbuf; 1073 count = 1; 1074 inbuflen--; 1075 inbuf++; 1076 1077 while (inbuflen > 0 && *inbuf == last && count < UINT8_MAX) { 1078 count++; 1079 inbuflen--; 1080 inbuf++; 1081 } 1082 1083 outbuf[len] = count; 1084 outbuf[len + 1] = last; 1085 len += 2; 1086 } 1087 } 1088 1089 static int 1090 ut_decompress(uint8_t *outbuf, uint32_t *compressed_len, uint8_t *inbuf, uint32_t inbuflen) 1091 { 1092 uint32_t len = 0; 1093 1094 SPDK_CU_ASSERT_FATAL(inbuflen % 2 == 0); 1095 1096 while (true) { 1097 if (inbuflen == 0) { 1098 *compressed_len = len; 1099 return 0; 1100 } 1101 1102 if ((len + inbuf[0]) > *compressed_len) { 1103 return -ENOSPC; 1104 } 1105 1106 memset(outbuf, inbuf[1], inbuf[0]); 1107 outbuf += inbuf[0]; 1108 len += inbuf[0]; 1109 inbuflen -= 2; 1110 inbuf += 2; 1111 } 1112 } 1113 1114 #define BUFSIZE 4096 1115 1116 static void 1117 compress_algorithm(void) 1118 { 1119 uint8_t original_data[BUFSIZE]; 1120 uint8_t compressed_data[BUFSIZE]; 1121 uint8_t decompressed_data[BUFSIZE]; 1122 uint32_t compressed_len, decompressed_len, i; 1123 int rc; 1124 1125 memset(original_data, 0xAA, BUFSIZE); 1126 compressed_len = sizeof(compressed_data); 1127 rc = ut_compress(compressed_data, &compressed_len, original_data, UINT8_MAX); 1128 CU_ASSERT(rc == 0); 1129 CU_ASSERT(compressed_len == 2); 1130 CU_ASSERT(compressed_data[0] == UINT8_MAX); 1131 CU_ASSERT(compressed_data[1] == 0xAA); 1132 1133 decompressed_len = sizeof(decompressed_data); 1134 rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len); 1135 CU_ASSERT(rc == 0); 1136 CU_ASSERT(decompressed_len == UINT8_MAX); 1137 CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0); 1138 1139 compressed_len = sizeof(compressed_data); 1140 rc = ut_compress(compressed_data, &compressed_len, original_data, UINT8_MAX + 1); 1141 CU_ASSERT(rc == 0); 1142 CU_ASSERT(compressed_len == 4); 1143 CU_ASSERT(compressed_data[0] == UINT8_MAX); 1144 CU_ASSERT(compressed_data[1] == 0xAA); 1145 CU_ASSERT(compressed_data[2] == 1); 1146 CU_ASSERT(compressed_data[3] == 0xAA); 1147 1148 decompressed_len = sizeof(decompressed_data); 1149 rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len); 1150 CU_ASSERT(rc == 0); 1151 CU_ASSERT(decompressed_len == UINT8_MAX + 1); 1152 CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0); 1153 1154 for (i = 0; i < sizeof(original_data); i++) { 1155 original_data[i] = i & 0xFF; 1156 } 1157 compressed_len = sizeof(compressed_data); 1158 rc = ut_compress(compressed_data, &compressed_len, original_data, 2048); 1159 CU_ASSERT(rc == 0); 1160 CU_ASSERT(compressed_len == 4096); 1161 CU_ASSERT(compressed_data[0] == 1); 1162 CU_ASSERT(compressed_data[1] == 0); 1163 CU_ASSERT(compressed_data[4094] == 1); 1164 CU_ASSERT(compressed_data[4095] == 0xFF); 1165 1166 decompressed_len = sizeof(decompressed_data); 1167 rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len); 1168 CU_ASSERT(rc == 0); 1169 CU_ASSERT(decompressed_len == 2048); 1170 CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0); 1171 1172 compressed_len = sizeof(compressed_data); 1173 rc = ut_compress(compressed_data, &compressed_len, original_data, 2049); 1174 CU_ASSERT(rc == -ENOSPC); 1175 } 1176 1177 int 1178 main(int argc, char **argv) 1179 { 1180 CU_pSuite suite = NULL; 1181 unsigned int num_failures; 1182 1183 if (CU_initialize_registry() != CUE_SUCCESS) { 1184 return CU_get_error(); 1185 } 1186 1187 suite = CU_add_suite("reduce", NULL, NULL); 1188 if (suite == NULL) { 1189 CU_cleanup_registry(); 1190 return CU_get_error(); 1191 } 1192 1193 if ( 1194 CU_add_test(suite, "get_pm_file_size", get_pm_file_size) == NULL || 1195 CU_add_test(suite, "get_vol_size", get_vol_size) == NULL || 1196 CU_add_test(suite, "init_failure", init_failure) == NULL || 1197 CU_add_test(suite, "init_md", init_md) == NULL || 1198 CU_add_test(suite, "init_backing_dev", init_backing_dev) == NULL || 1199 CU_add_test(suite, "load", load) == NULL || 1200 CU_add_test(suite, "write_maps", write_maps) == NULL || 1201 CU_add_test(suite, "read_write", read_write) == NULL || 1202 CU_add_test(suite, "destroy", destroy) == NULL || 1203 CU_add_test(suite, "defer_bdev_io", defer_bdev_io) == NULL || 1204 CU_add_test(suite, "overlapped", overlapped) == NULL || 1205 CU_add_test(suite, "compress_algorithm", compress_algorithm) == NULL 1206 ) { 1207 CU_cleanup_registry(); 1208 return CU_get_error(); 1209 } 1210 1211 CU_basic_set_mode(CU_BRM_VERBOSE); 1212 CU_basic_run_tests(); 1213 num_failures = CU_get_number_of_failures(); 1214 CU_cleanup_registry(); 1215 return num_failures; 1216 } 1217