xref: /spdk/test/unit/lib/reduce/reduce.c/reduce_ut.c (revision bb488d2829a9b7863daab45917dd2174905cc0ae)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 
38 #include "reduce/reduce.c"
39 #include "spdk_internal/mock.h"
40 #include "common/lib/test_env.c"
41 
42 static struct spdk_reduce_vol *g_vol;
43 static int g_reduce_errno;
44 static char *g_volatile_pm_buf;
45 static size_t g_volatile_pm_buf_len;
46 static char *g_persistent_pm_buf;
47 static size_t g_persistent_pm_buf_len;
48 static char *g_backing_dev_buf;
49 static char g_path[REDUCE_PATH_MAX];
50 
51 #define TEST_MD_PATH "/tmp"
52 
53 enum ut_reduce_bdev_io_type {
54 	UT_REDUCE_IO_READV = 1,
55 	UT_REDUCE_IO_WRITEV = 2,
56 	UT_REDUCE_IO_UNMAP = 3,
57 };
58 
59 struct ut_reduce_bdev_io {
60 	enum ut_reduce_bdev_io_type type;
61 	struct spdk_reduce_backing_dev *backing_dev;
62 	struct iovec *iov;
63 	int iovcnt;
64 	uint64_t lba;
65 	uint32_t lba_count;
66 	struct spdk_reduce_vol_cb_args *args;
67 	TAILQ_ENTRY(ut_reduce_bdev_io)	link;
68 };
69 
70 static bool g_defer_bdev_io = false;
71 static TAILQ_HEAD(, ut_reduce_bdev_io) g_pending_bdev_io =
72 	TAILQ_HEAD_INITIALIZER(g_pending_bdev_io);
73 static uint32_t g_pending_bdev_io_count = 0;
74 
75 static void
76 sync_pm_buf(const void *addr, size_t length)
77 {
78 	uint64_t offset = (char *)addr - g_volatile_pm_buf;
79 
80 	memcpy(&g_persistent_pm_buf[offset], addr, length);
81 }
82 
83 int
84 pmem_msync(const void *addr, size_t length)
85 {
86 	sync_pm_buf(addr, length);
87 	return 0;
88 }
89 
90 void
91 pmem_persist(const void *addr, size_t len)
92 {
93 	sync_pm_buf(addr, len);
94 }
95 
96 static void
97 get_pm_file_size(void)
98 {
99 	struct spdk_reduce_vol_params params;
100 	uint64_t pm_size, expected_pm_size;
101 
102 	params.backing_io_unit_size = 4096;
103 	params.chunk_size = 4096 * 4;
104 	params.vol_size = 4096 * 4 * 100;
105 
106 	pm_size = _get_pm_file_size(&params);
107 	expected_pm_size = sizeof(struct spdk_reduce_vol_superblock);
108 	/* 100 chunks in logical map * 8 bytes per chunk */
109 	expected_pm_size += 100 * sizeof(uint64_t);
110 	/* 100 chunks * 4 backing io units per chunk * 8 bytes per backing io unit */
111 	expected_pm_size += 100 * 4 * sizeof(uint64_t);
112 	/* reduce allocates some extra chunks too for in-flight writes when logical map
113 	 * is full.  REDUCE_EXTRA_CHUNKS is a private #ifdef in reduce.c.
114 	 */
115 	expected_pm_size += REDUCE_NUM_EXTRA_CHUNKS * 4 * sizeof(uint64_t);
116 	/* reduce will add some padding so numbers may not match exactly.  Make sure
117 	 * they are close though.
118 	 */
119 	CU_ASSERT((pm_size - expected_pm_size) < REDUCE_PM_SIZE_ALIGNMENT);
120 }
121 
122 static void
123 get_vol_size(void)
124 {
125 	uint64_t chunk_size, backing_dev_size;
126 
127 	chunk_size = 16 * 1024;
128 	backing_dev_size = 16 * 1024 * 1000;
129 	CU_ASSERT(_get_vol_size(chunk_size, backing_dev_size) < backing_dev_size);
130 }
131 
132 void *
133 pmem_map_file(const char *path, size_t len, int flags, mode_t mode,
134 	      size_t *mapped_lenp, int *is_pmemp)
135 {
136 	CU_ASSERT(g_volatile_pm_buf == NULL);
137 	snprintf(g_path, sizeof(g_path), "%s", path);
138 	*is_pmemp = 1;
139 
140 	if (g_persistent_pm_buf == NULL) {
141 		g_persistent_pm_buf = calloc(1, len);
142 		g_persistent_pm_buf_len = len;
143 		SPDK_CU_ASSERT_FATAL(g_persistent_pm_buf != NULL);
144 	}
145 
146 	*mapped_lenp = g_persistent_pm_buf_len;
147 	g_volatile_pm_buf = calloc(1, g_persistent_pm_buf_len);
148 	SPDK_CU_ASSERT_FATAL(g_volatile_pm_buf != NULL);
149 	memcpy(g_volatile_pm_buf, g_persistent_pm_buf, g_persistent_pm_buf_len);
150 	g_volatile_pm_buf_len = g_persistent_pm_buf_len;
151 
152 	return g_volatile_pm_buf;
153 }
154 
155 int
156 pmem_unmap(void *addr, size_t len)
157 {
158 	CU_ASSERT(addr == g_volatile_pm_buf);
159 	CU_ASSERT(len == g_volatile_pm_buf_len);
160 	free(g_volatile_pm_buf);
161 	g_volatile_pm_buf = NULL;
162 	g_volatile_pm_buf_len = 0;
163 
164 	return 0;
165 }
166 
167 static void
168 persistent_pm_buf_destroy(void)
169 {
170 	CU_ASSERT(g_persistent_pm_buf != NULL);
171 	free(g_persistent_pm_buf);
172 	g_persistent_pm_buf = NULL;
173 	g_persistent_pm_buf_len = 0;
174 }
175 
176 int __wrap_unlink(const char *path);
177 
178 int
179 __wrap_unlink(const char *path)
180 {
181 	if (strcmp(g_path, path) != 0) {
182 		return ENOENT;
183 	}
184 
185 	persistent_pm_buf_destroy();
186 	return 0;
187 }
188 
189 static void
190 init_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno)
191 {
192 	g_vol = vol;
193 	g_reduce_errno = reduce_errno;
194 }
195 
196 static void
197 load_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno)
198 {
199 	g_vol = vol;
200 	g_reduce_errno = reduce_errno;
201 }
202 
203 static void
204 unload_cb(void *cb_arg, int reduce_errno)
205 {
206 	g_reduce_errno = reduce_errno;
207 }
208 
209 static void
210 init_failure(void)
211 {
212 	struct spdk_reduce_vol_params params = {};
213 	struct spdk_reduce_backing_dev backing_dev = {};
214 
215 	backing_dev.blocklen = 512;
216 	/* This blockcnt is too small for a reduce vol - there needs to be
217 	 *  enough space for at least REDUCE_NUM_EXTRA_CHUNKS + 1 chunks.
218 	 */
219 	backing_dev.blockcnt = 20;
220 
221 	params.vol_size = 0;
222 	params.chunk_size = 16 * 1024;
223 	params.backing_io_unit_size = backing_dev.blocklen;
224 	params.logical_block_size = 512;
225 
226 	/* backing_dev has an invalid size.  This should fail. */
227 	g_vol = NULL;
228 	g_reduce_errno = 0;
229 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
230 	CU_ASSERT(g_reduce_errno == -EINVAL);
231 	SPDK_CU_ASSERT_FATAL(g_vol == NULL);
232 
233 	/* backing_dev now has valid size, but backing_dev still has null
234 	 *  function pointers.  This should fail.
235 	 */
236 	backing_dev.blockcnt = 20000;
237 
238 	g_vol = NULL;
239 	g_reduce_errno = 0;
240 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
241 	CU_ASSERT(g_reduce_errno == -EINVAL);
242 	SPDK_CU_ASSERT_FATAL(g_vol == NULL);
243 }
244 
245 static void
246 backing_dev_readv_execute(struct spdk_reduce_backing_dev *backing_dev,
247 			  struct iovec *iov, int iovcnt,
248 			  uint64_t lba, uint32_t lba_count,
249 			  struct spdk_reduce_vol_cb_args *args)
250 {
251 	char *offset;
252 	int i;
253 
254 	offset = g_backing_dev_buf + lba * backing_dev->blocklen;
255 	for (i = 0; i < iovcnt; i++) {
256 		memcpy(iov[i].iov_base, offset, iov[i].iov_len);
257 		offset += iov[i].iov_len;
258 	}
259 	args->cb_fn(args->cb_arg, 0);
260 }
261 
262 static void
263 backing_dev_insert_io(enum ut_reduce_bdev_io_type type, struct spdk_reduce_backing_dev *backing_dev,
264 		      struct iovec *iov, int iovcnt, uint64_t lba, uint32_t lba_count,
265 		      struct spdk_reduce_vol_cb_args *args)
266 {
267 	struct ut_reduce_bdev_io *ut_bdev_io;
268 
269 	ut_bdev_io = calloc(1, sizeof(*ut_bdev_io));
270 	SPDK_CU_ASSERT_FATAL(ut_bdev_io != NULL);
271 
272 	ut_bdev_io->type = type;
273 	ut_bdev_io->backing_dev = backing_dev;
274 	ut_bdev_io->iov = iov;
275 	ut_bdev_io->iovcnt = iovcnt;
276 	ut_bdev_io->lba = lba;
277 	ut_bdev_io->lba_count = lba_count;
278 	ut_bdev_io->args = args;
279 	TAILQ_INSERT_TAIL(&g_pending_bdev_io, ut_bdev_io, link);
280 	g_pending_bdev_io_count++;
281 }
282 
283 static void
284 backing_dev_readv(struct spdk_reduce_backing_dev *backing_dev, struct iovec *iov, int iovcnt,
285 		  uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
286 {
287 	if (g_defer_bdev_io == false) {
288 		CU_ASSERT(g_pending_bdev_io_count == 0);
289 		CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
290 		backing_dev_readv_execute(backing_dev, iov, iovcnt, lba, lba_count, args);
291 		return;
292 	}
293 
294 	backing_dev_insert_io(UT_REDUCE_IO_READV, backing_dev, iov, iovcnt, lba, lba_count, args);
295 }
296 
297 static void
298 backing_dev_writev_execute(struct spdk_reduce_backing_dev *backing_dev,
299 			   struct iovec *iov, int iovcnt,
300 			   uint64_t lba, uint32_t lba_count,
301 			   struct spdk_reduce_vol_cb_args *args)
302 {
303 	char *offset;
304 	int i;
305 
306 	offset = g_backing_dev_buf + lba * backing_dev->blocklen;
307 	for (i = 0; i < iovcnt; i++) {
308 		memcpy(offset, iov[i].iov_base, iov[i].iov_len);
309 		offset += iov[i].iov_len;
310 	}
311 	args->cb_fn(args->cb_arg, 0);
312 }
313 
314 static void
315 backing_dev_writev(struct spdk_reduce_backing_dev *backing_dev, struct iovec *iov, int iovcnt,
316 		   uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
317 {
318 	if (g_defer_bdev_io == false) {
319 		CU_ASSERT(g_pending_bdev_io_count == 0);
320 		CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
321 		backing_dev_writev_execute(backing_dev, iov, iovcnt, lba, lba_count, args);
322 		return;
323 	}
324 
325 	backing_dev_insert_io(UT_REDUCE_IO_WRITEV, backing_dev, iov, iovcnt, lba, lba_count, args);
326 }
327 
328 static void
329 backing_dev_unmap_execute(struct spdk_reduce_backing_dev *backing_dev,
330 			  uint64_t lba, uint32_t lba_count,
331 			  struct spdk_reduce_vol_cb_args *args)
332 {
333 	char *offset;
334 
335 	offset = g_backing_dev_buf + lba * backing_dev->blocklen;
336 	memset(offset, 0, lba_count * backing_dev->blocklen);
337 	args->cb_fn(args->cb_arg, 0);
338 }
339 
340 static void
341 backing_dev_unmap(struct spdk_reduce_backing_dev *backing_dev,
342 		  uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
343 {
344 	if (g_defer_bdev_io == false) {
345 		CU_ASSERT(g_pending_bdev_io_count == 0);
346 		CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
347 		backing_dev_unmap_execute(backing_dev, lba, lba_count, args);
348 		return;
349 	}
350 
351 	backing_dev_insert_io(UT_REDUCE_IO_UNMAP, backing_dev, NULL, 0, lba, lba_count, args);
352 }
353 
354 static void
355 backing_dev_io_execute(uint32_t count)
356 {
357 	struct ut_reduce_bdev_io *ut_bdev_io;
358 	uint32_t done = 0;
359 
360 	CU_ASSERT(g_defer_bdev_io == true);
361 	while (!TAILQ_EMPTY(&g_pending_bdev_io) && (count == 0 || done < count)) {
362 		ut_bdev_io = TAILQ_FIRST(&g_pending_bdev_io);
363 		TAILQ_REMOVE(&g_pending_bdev_io, ut_bdev_io, link);
364 		g_pending_bdev_io_count--;
365 		switch (ut_bdev_io->type) {
366 		case UT_REDUCE_IO_READV:
367 			backing_dev_readv_execute(ut_bdev_io->backing_dev,
368 						  ut_bdev_io->iov, ut_bdev_io->iovcnt,
369 						  ut_bdev_io->lba, ut_bdev_io->lba_count,
370 						  ut_bdev_io->args);
371 			break;
372 		case UT_REDUCE_IO_WRITEV:
373 			backing_dev_writev_execute(ut_bdev_io->backing_dev,
374 						   ut_bdev_io->iov, ut_bdev_io->iovcnt,
375 						   ut_bdev_io->lba, ut_bdev_io->lba_count,
376 						   ut_bdev_io->args);
377 			break;
378 		case UT_REDUCE_IO_UNMAP:
379 			backing_dev_unmap_execute(ut_bdev_io->backing_dev,
380 						  ut_bdev_io->lba, ut_bdev_io->lba_count,
381 						  ut_bdev_io->args);
382 			break;
383 		default:
384 			CU_ASSERT(false);
385 			break;
386 		}
387 		free(ut_bdev_io);
388 		done++;
389 	}
390 }
391 
392 static int
393 ut_compress(char *outbuf, uint32_t *compressed_len, char *inbuf, uint32_t inbuflen)
394 {
395 	uint32_t len = 0;
396 	uint8_t count;
397 	char last;
398 
399 	while (true) {
400 		if (inbuflen == 0) {
401 			*compressed_len = len;
402 			return 0;
403 		}
404 
405 		if (*compressed_len < (len + 2)) {
406 			return -ENOSPC;
407 		}
408 
409 		last = *inbuf;
410 		count = 1;
411 		inbuflen--;
412 		inbuf++;
413 
414 		while (inbuflen > 0 && *inbuf == last && count < UINT8_MAX) {
415 			count++;
416 			inbuflen--;
417 			inbuf++;
418 		}
419 
420 		outbuf[len] = count;
421 		outbuf[len + 1] = last;
422 		len += 2;
423 	}
424 }
425 
426 static int
427 ut_decompress(uint8_t *outbuf, uint32_t *compressed_len, uint8_t *inbuf, uint32_t inbuflen)
428 {
429 	uint32_t len = 0;
430 
431 	SPDK_CU_ASSERT_FATAL(inbuflen % 2 == 0);
432 
433 	while (true) {
434 		if (inbuflen == 0) {
435 			*compressed_len = len;
436 			return 0;
437 		}
438 
439 		if ((len + inbuf[0]) > *compressed_len) {
440 			return -ENOSPC;
441 		}
442 
443 		memset(outbuf, inbuf[1], inbuf[0]);
444 		outbuf += inbuf[0];
445 		len += inbuf[0];
446 		inbuflen -= 2;
447 		inbuf += 2;
448 	}
449 }
450 
451 static void
452 ut_build_data_buffer(uint8_t *data, uint32_t data_len, uint8_t init_val, uint32_t repeat)
453 {
454 	uint32_t _repeat = repeat;
455 
456 	SPDK_CU_ASSERT_FATAL(repeat > 0);
457 
458 	while (data_len > 0) {
459 		*data = init_val;
460 		data++;
461 		data_len--;
462 		_repeat--;
463 		if (_repeat == 0) {
464 			init_val++;
465 			_repeat = repeat;
466 		}
467 	}
468 }
469 
470 static void
471 backing_dev_compress(struct spdk_reduce_backing_dev *backing_dev,
472 		     struct iovec *src_iov, int src_iovcnt,
473 		     struct iovec *dst_iov, int dst_iovcnt,
474 		     struct spdk_reduce_vol_cb_args *args)
475 {
476 	uint32_t compressed_len;
477 	int rc;
478 
479 	CU_ASSERT(src_iovcnt == 1);
480 	CU_ASSERT(dst_iovcnt == 1);
481 	CU_ASSERT(src_iov[0].iov_len == dst_iov[0].iov_len);
482 
483 	compressed_len = dst_iov[0].iov_len;
484 	rc = ut_compress(dst_iov[0].iov_base, &compressed_len,
485 			 src_iov[0].iov_base, src_iov[0].iov_len);
486 	args->cb_fn(args->cb_arg, rc ? rc : (int)compressed_len);
487 }
488 
489 static void
490 backing_dev_decompress(struct spdk_reduce_backing_dev *backing_dev,
491 		       struct iovec *src_iov, int src_iovcnt,
492 		       struct iovec *dst_iov, int dst_iovcnt,
493 		       struct spdk_reduce_vol_cb_args *args)
494 {
495 	uint32_t decompressed_len;
496 	int rc;
497 
498 	CU_ASSERT(src_iovcnt == 1);
499 	CU_ASSERT(dst_iovcnt == 1);
500 
501 	decompressed_len = dst_iov[0].iov_len;
502 	rc = ut_decompress(dst_iov[0].iov_base, &decompressed_len,
503 			   src_iov[0].iov_base, src_iov[0].iov_len);
504 	args->cb_fn(args->cb_arg, rc ? rc : (int)decompressed_len);
505 }
506 
507 static void
508 backing_dev_destroy(struct spdk_reduce_backing_dev *backing_dev)
509 {
510 	/* We don't free this during backing_dev_close so that we can test init/unload/load
511 	 *  scenarios.
512 	 */
513 	free(g_backing_dev_buf);
514 	g_backing_dev_buf = NULL;
515 }
516 
517 static void
518 backing_dev_init(struct spdk_reduce_backing_dev *backing_dev, struct spdk_reduce_vol_params *params,
519 		 uint32_t backing_blocklen)
520 {
521 	int64_t size;
522 
523 	size = 4 * 1024 * 1024;
524 	backing_dev->blocklen = backing_blocklen;
525 	backing_dev->blockcnt = size / backing_dev->blocklen;
526 	backing_dev->readv = backing_dev_readv;
527 	backing_dev->writev = backing_dev_writev;
528 	backing_dev->unmap = backing_dev_unmap;
529 	backing_dev->compress = backing_dev_compress;
530 	backing_dev->decompress = backing_dev_decompress;
531 
532 	g_backing_dev_buf = calloc(1, size);
533 	SPDK_CU_ASSERT_FATAL(g_backing_dev_buf != NULL);
534 }
535 
536 static void
537 init_md(void)
538 {
539 	struct spdk_reduce_vol_params params = {};
540 	struct spdk_reduce_vol_params *persistent_params;
541 	struct spdk_reduce_backing_dev backing_dev = {};
542 	struct spdk_uuid uuid;
543 	uint64_t *entry;
544 
545 	params.chunk_size = 16 * 1024;
546 	params.backing_io_unit_size = 512;
547 	params.logical_block_size = 512;
548 
549 	backing_dev_init(&backing_dev, &params, 512);
550 
551 	g_vol = NULL;
552 	g_reduce_errno = -1;
553 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
554 	CU_ASSERT(g_reduce_errno == 0);
555 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
556 	/* Confirm that reduce persisted the params to metadata. */
557 	CU_ASSERT(memcmp(g_persistent_pm_buf, SPDK_REDUCE_SIGNATURE, 8) == 0);
558 	persistent_params = (struct spdk_reduce_vol_params *)(g_persistent_pm_buf + 8);
559 	CU_ASSERT(memcmp(persistent_params, &params, sizeof(params)) == 0);
560 	/* Now confirm that contents of pm_file after the superblock have been initialized
561 	 *  to REDUCE_EMPTY_MAP_ENTRY.
562 	 */
563 	entry = (uint64_t *)(g_persistent_pm_buf + sizeof(struct spdk_reduce_vol_superblock));
564 	while (entry != (uint64_t *)(g_persistent_pm_buf + g_vol->pm_file.size)) {
565 		CU_ASSERT(*entry == REDUCE_EMPTY_MAP_ENTRY);
566 		entry++;
567 	}
568 
569 	/* Check that the pm file path was constructed correctly.  It should be in
570 	 * the form:
571 	 * TEST_MD_PATH + "/" + <uuid string>
572 	 */
573 	CU_ASSERT(strncmp(&g_path[0], TEST_MD_PATH, strlen(TEST_MD_PATH)) == 0);
574 	CU_ASSERT(g_path[strlen(TEST_MD_PATH)] == '/');
575 	CU_ASSERT(spdk_uuid_parse(&uuid, &g_path[strlen(TEST_MD_PATH) + 1]) == 0);
576 	CU_ASSERT(spdk_uuid_compare(&uuid, spdk_reduce_vol_get_uuid(g_vol)) == 0);
577 
578 	g_reduce_errno = -1;
579 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
580 	CU_ASSERT(g_reduce_errno == 0);
581 	CU_ASSERT(g_volatile_pm_buf == NULL);
582 
583 	persistent_pm_buf_destroy();
584 	backing_dev_destroy(&backing_dev);
585 }
586 
587 static void
588 _init_backing_dev(uint32_t backing_blocklen)
589 {
590 	struct spdk_reduce_vol_params params = {};
591 	struct spdk_reduce_vol_params *persistent_params;
592 	struct spdk_reduce_backing_dev backing_dev = {};
593 
594 	params.chunk_size = 16 * 1024;
595 	params.backing_io_unit_size = 512;
596 	params.logical_block_size = 512;
597 	spdk_uuid_generate(&params.uuid);
598 
599 	backing_dev_init(&backing_dev, &params, backing_blocklen);
600 
601 	g_vol = NULL;
602 	memset(g_path, 0, sizeof(g_path));
603 	g_reduce_errno = -1;
604 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
605 	CU_ASSERT(g_reduce_errno == 0);
606 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
607 	CU_ASSERT(strncmp(TEST_MD_PATH, g_path, strlen(TEST_MD_PATH)) == 0);
608 	/* Confirm that libreduce persisted the params to the backing device. */
609 	CU_ASSERT(memcmp(g_backing_dev_buf, SPDK_REDUCE_SIGNATURE, 8) == 0);
610 	persistent_params = (struct spdk_reduce_vol_params *)(g_backing_dev_buf + 8);
611 	CU_ASSERT(memcmp(persistent_params, &params, sizeof(params)) == 0);
612 	/* Confirm that the path to the persistent memory metadata file was persisted to
613 	 *  the backing device.
614 	 */
615 	CU_ASSERT(strncmp(g_path,
616 			  g_backing_dev_buf + REDUCE_BACKING_DEV_PATH_OFFSET,
617 			  REDUCE_PATH_MAX) == 0);
618 
619 	g_reduce_errno = -1;
620 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
621 	CU_ASSERT(g_reduce_errno == 0);
622 
623 	persistent_pm_buf_destroy();
624 	backing_dev_destroy(&backing_dev);
625 }
626 
627 static void
628 init_backing_dev(void)
629 {
630 	_init_backing_dev(512);
631 	_init_backing_dev(4096);
632 }
633 
634 static void
635 _load(uint32_t backing_blocklen)
636 {
637 	struct spdk_reduce_vol_params params = {};
638 	struct spdk_reduce_backing_dev backing_dev = {};
639 	char pmem_file_path[REDUCE_PATH_MAX];
640 
641 	params.chunk_size = 16 * 1024;
642 	params.backing_io_unit_size = 512;
643 	params.logical_block_size = 512;
644 	spdk_uuid_generate(&params.uuid);
645 
646 	backing_dev_init(&backing_dev, &params, backing_blocklen);
647 
648 	g_vol = NULL;
649 	g_reduce_errno = -1;
650 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
651 	CU_ASSERT(g_reduce_errno == 0);
652 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
653 	CU_ASSERT(strncmp(TEST_MD_PATH, g_path, strlen(TEST_MD_PATH)) == 0);
654 	memcpy(pmem_file_path, g_path, sizeof(pmem_file_path));
655 
656 	g_reduce_errno = -1;
657 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
658 	CU_ASSERT(g_reduce_errno == 0);
659 
660 	g_vol = NULL;
661 	memset(g_path, 0, sizeof(g_path));
662 	g_reduce_errno = -1;
663 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
664 	CU_ASSERT(g_reduce_errno == 0);
665 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
666 	CU_ASSERT(strncmp(g_path, pmem_file_path, sizeof(pmem_file_path)) == 0);
667 	CU_ASSERT(g_vol->params.vol_size == params.vol_size);
668 	CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
669 	CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
670 
671 	g_reduce_errno = -1;
672 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
673 	CU_ASSERT(g_reduce_errno == 0);
674 
675 	persistent_pm_buf_destroy();
676 	backing_dev_destroy(&backing_dev);
677 }
678 
679 static void
680 load(void)
681 {
682 	_load(512);
683 	_load(4096);
684 }
685 
686 static uint64_t
687 _vol_get_chunk_map_index(struct spdk_reduce_vol *vol, uint64_t offset)
688 {
689 	uint64_t logical_map_index = offset / vol->logical_blocks_per_chunk;
690 
691 	return vol->pm_logical_map[logical_map_index];
692 }
693 
694 static void
695 write_cb(void *arg, int reduce_errno)
696 {
697 	g_reduce_errno = reduce_errno;
698 }
699 
700 static void
701 read_cb(void *arg, int reduce_errno)
702 {
703 	g_reduce_errno = reduce_errno;
704 }
705 
706 static void
707 _write_maps(uint32_t backing_blocklen)
708 {
709 	struct spdk_reduce_vol_params params = {};
710 	struct spdk_reduce_backing_dev backing_dev = {};
711 	struct iovec iov;
712 	const int bufsize = 16 * 1024; /* chunk size */
713 	char buf[bufsize];
714 	uint32_t num_lbas, i;
715 	uint64_t old_chunk0_map_index, new_chunk0_map_index;
716 	struct spdk_reduce_chunk_map *old_chunk0_map, *new_chunk0_map;
717 
718 	params.chunk_size = bufsize;
719 	params.backing_io_unit_size = 4096;
720 	params.logical_block_size = 512;
721 	num_lbas = bufsize / params.logical_block_size;
722 	spdk_uuid_generate(&params.uuid);
723 
724 	backing_dev_init(&backing_dev, &params, backing_blocklen);
725 
726 	g_vol = NULL;
727 	g_reduce_errno = -1;
728 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
729 	CU_ASSERT(g_reduce_errno == 0);
730 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
731 
732 	for (i = 0; i < g_vol->params.vol_size / g_vol->params.chunk_size; i++) {
733 		CU_ASSERT(_vol_get_chunk_map_index(g_vol, i) == REDUCE_EMPTY_MAP_ENTRY);
734 	}
735 
736 	ut_build_data_buffer(buf, bufsize, 0x00, 1);
737 	iov.iov_base = buf;
738 	iov.iov_len = bufsize;
739 	g_reduce_errno = -1;
740 	spdk_reduce_vol_writev(g_vol, &iov, 1, 0, num_lbas, write_cb, NULL);
741 	CU_ASSERT(g_reduce_errno == 0);
742 
743 	old_chunk0_map_index = _vol_get_chunk_map_index(g_vol, 0);
744 	CU_ASSERT(old_chunk0_map_index != REDUCE_EMPTY_MAP_ENTRY);
745 	CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, old_chunk0_map_index) == true);
746 
747 	old_chunk0_map = _reduce_vol_get_chunk_map(g_vol, old_chunk0_map_index);
748 	for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
749 		CU_ASSERT(old_chunk0_map->io_unit_index[i] != REDUCE_EMPTY_MAP_ENTRY);
750 		CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units,
751 					     old_chunk0_map->io_unit_index[i]) == true);
752 	}
753 
754 	g_reduce_errno = -1;
755 	spdk_reduce_vol_writev(g_vol, &iov, 1, 0, num_lbas, write_cb, NULL);
756 	CU_ASSERT(g_reduce_errno == 0);
757 
758 	new_chunk0_map_index = _vol_get_chunk_map_index(g_vol, 0);
759 	CU_ASSERT(new_chunk0_map_index != REDUCE_EMPTY_MAP_ENTRY);
760 	CU_ASSERT(new_chunk0_map_index != old_chunk0_map_index);
761 	CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, new_chunk0_map_index) == true);
762 	CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, old_chunk0_map_index) == false);
763 
764 	for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
765 		CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units,
766 					     old_chunk0_map->io_unit_index[i]) == false);
767 	}
768 
769 	new_chunk0_map = _reduce_vol_get_chunk_map(g_vol, new_chunk0_map_index);
770 	for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
771 		CU_ASSERT(new_chunk0_map->io_unit_index[i] != REDUCE_EMPTY_MAP_ENTRY);
772 		CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units,
773 					     new_chunk0_map->io_unit_index[i]) == true);
774 	}
775 
776 	g_reduce_errno = -1;
777 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
778 	CU_ASSERT(g_reduce_errno == 0);
779 
780 	g_vol = NULL;
781 	g_reduce_errno = -1;
782 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
783 	CU_ASSERT(g_reduce_errno == 0);
784 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
785 	CU_ASSERT(g_vol->params.vol_size == params.vol_size);
786 	CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
787 	CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
788 
789 	g_reduce_errno = -1;
790 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
791 	CU_ASSERT(g_reduce_errno == 0);
792 
793 	persistent_pm_buf_destroy();
794 	backing_dev_destroy(&backing_dev);
795 }
796 
797 static void
798 write_maps(void)
799 {
800 	_write_maps(512);
801 	_write_maps(4096);
802 }
803 
804 static void
805 _read_write(uint32_t backing_blocklen)
806 {
807 	struct spdk_reduce_vol_params params = {};
808 	struct spdk_reduce_backing_dev backing_dev = {};
809 	struct iovec iov;
810 	char buf[16 * 1024]; /* chunk size */
811 	char compare_buf[16 * 1024];
812 	uint32_t i;
813 
814 	params.chunk_size = 16 * 1024;
815 	params.backing_io_unit_size = 4096;
816 	params.logical_block_size = 512;
817 	spdk_uuid_generate(&params.uuid);
818 
819 	backing_dev_init(&backing_dev, &params, backing_blocklen);
820 
821 	g_vol = NULL;
822 	g_reduce_errno = -1;
823 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
824 	CU_ASSERT(g_reduce_errno == 0);
825 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
826 
827 	/* Write 0xAA to 2 512-byte logical blocks, starting at LBA 2. */
828 	memset(buf, 0xAA, 2 * params.logical_block_size);
829 	iov.iov_base = buf;
830 	iov.iov_len = 2 * params.logical_block_size;
831 	g_reduce_errno = -1;
832 	spdk_reduce_vol_writev(g_vol, &iov, 1, 2, 2, write_cb, NULL);
833 	CU_ASSERT(g_reduce_errno == 0);
834 
835 	memset(compare_buf, 0xAA, sizeof(compare_buf));
836 	for (i = 0; i < params.chunk_size / params.logical_block_size; i++) {
837 		memset(buf, 0xFF, params.logical_block_size);
838 		iov.iov_base = buf;
839 		iov.iov_len = params.logical_block_size;
840 		g_reduce_errno = -1;
841 		spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL);
842 		CU_ASSERT(g_reduce_errno == 0);
843 
844 		switch (i) {
845 		case 2:
846 		case 3:
847 			CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
848 			break;
849 		default:
850 			CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size));
851 			break;
852 		}
853 	}
854 
855 	g_reduce_errno = -1;
856 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
857 	CU_ASSERT(g_reduce_errno == 0);
858 
859 	/* Overwrite what we just wrote with 0xCC */
860 	g_vol = NULL;
861 	g_reduce_errno = -1;
862 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
863 	CU_ASSERT(g_reduce_errno == 0);
864 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
865 	CU_ASSERT(g_vol->params.vol_size == params.vol_size);
866 	CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
867 	CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
868 
869 	memset(buf, 0xCC, 2 * params.logical_block_size);
870 	iov.iov_base = buf;
871 	iov.iov_len = 2 * params.logical_block_size;
872 	g_reduce_errno = -1;
873 	spdk_reduce_vol_writev(g_vol, &iov, 1, 2, 2, write_cb, NULL);
874 	CU_ASSERT(g_reduce_errno == 0);
875 
876 	memset(compare_buf, 0xCC, sizeof(compare_buf));
877 	for (i = 0; i < params.chunk_size / params.logical_block_size; i++) {
878 		memset(buf, 0xFF, params.logical_block_size);
879 		iov.iov_base = buf;
880 		iov.iov_len = params.logical_block_size;
881 		g_reduce_errno = -1;
882 		spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL);
883 		CU_ASSERT(g_reduce_errno == 0);
884 
885 		switch (i) {
886 		case 2:
887 		case 3:
888 			CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
889 			break;
890 		default:
891 			CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size));
892 			break;
893 		}
894 	}
895 
896 	g_reduce_errno = -1;
897 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
898 	CU_ASSERT(g_reduce_errno == 0);
899 
900 	g_vol = NULL;
901 	g_reduce_errno = -1;
902 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
903 	CU_ASSERT(g_reduce_errno == 0);
904 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
905 	CU_ASSERT(g_vol->params.vol_size == params.vol_size);
906 	CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
907 	CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
908 
909 	g_reduce_errno = -1;
910 
911 	/* Write 0xBB to 2 512-byte logical blocks, starting at LBA 37.
912 	 * This is writing into the second chunk of the volume.  This also
913 	 * enables implicitly checking that we reloaded the bit arrays
914 	 * correctly - making sure we don't use the first chunk map again
915 	 * for this new write - the first chunk map was already used by the
916 	 * write from before we unloaded and reloaded.
917 	 */
918 	memset(buf, 0xBB, 2 * params.logical_block_size);
919 	iov.iov_base = buf;
920 	iov.iov_len = 2 * params.logical_block_size;
921 	g_reduce_errno = -1;
922 	spdk_reduce_vol_writev(g_vol, &iov, 1, 37, 2, write_cb, NULL);
923 	CU_ASSERT(g_reduce_errno == 0);
924 
925 	for (i = 0; i < 2 * params.chunk_size / params.logical_block_size; i++) {
926 		memset(buf, 0xFF, params.logical_block_size);
927 		iov.iov_base = buf;
928 		iov.iov_len = params.logical_block_size;
929 		g_reduce_errno = -1;
930 		spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL);
931 		CU_ASSERT(g_reduce_errno == 0);
932 
933 		switch (i) {
934 		case 2:
935 		case 3:
936 			memset(compare_buf, 0xCC, sizeof(compare_buf));
937 			CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
938 			break;
939 		case 37:
940 		case 38:
941 			memset(compare_buf, 0xBB, sizeof(compare_buf));
942 			CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
943 			break;
944 		default:
945 			CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size));
946 			break;
947 		}
948 	}
949 
950 	g_reduce_errno = -1;
951 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
952 	CU_ASSERT(g_reduce_errno == 0);
953 
954 	persistent_pm_buf_destroy();
955 	backing_dev_destroy(&backing_dev);
956 }
957 
958 static void
959 read_write(void)
960 {
961 	_read_write(512);
962 	_read_write(4096);
963 }
964 
965 static void
966 destroy_cb(void *ctx, int reduce_errno)
967 {
968 	g_reduce_errno = reduce_errno;
969 }
970 
971 static void
972 destroy(void)
973 {
974 	struct spdk_reduce_vol_params params = {};
975 	struct spdk_reduce_backing_dev backing_dev = {};
976 
977 	params.chunk_size = 16 * 1024;
978 	params.backing_io_unit_size = 512;
979 	params.logical_block_size = 512;
980 	spdk_uuid_generate(&params.uuid);
981 
982 	backing_dev_init(&backing_dev, &params, 512);
983 
984 	g_vol = NULL;
985 	g_reduce_errno = -1;
986 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
987 	CU_ASSERT(g_reduce_errno == 0);
988 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
989 
990 	g_reduce_errno = -1;
991 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
992 	CU_ASSERT(g_reduce_errno == 0);
993 
994 	g_vol = NULL;
995 	g_reduce_errno = -1;
996 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
997 	CU_ASSERT(g_reduce_errno == 0);
998 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
999 
1000 	g_reduce_errno = -1;
1001 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
1002 	CU_ASSERT(g_reduce_errno == 0);
1003 
1004 	g_reduce_errno = -1;
1005 	MOCK_CLEAR(spdk_dma_zmalloc);
1006 	MOCK_CLEAR(spdk_malloc);
1007 	MOCK_CLEAR(spdk_zmalloc);
1008 	spdk_reduce_vol_destroy(&backing_dev, destroy_cb, NULL);
1009 	CU_ASSERT(g_reduce_errno == 0);
1010 
1011 	g_reduce_errno = 0;
1012 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
1013 	CU_ASSERT(g_reduce_errno == -EILSEQ);
1014 
1015 	backing_dev_destroy(&backing_dev);
1016 }
1017 
1018 /* This test primarily checks that the reduce unit test infrastructure for asynchronous
1019  * backing device I/O operations is working correctly.
1020  */
1021 static void
1022 defer_bdev_io(void)
1023 {
1024 	struct spdk_reduce_vol_params params = {};
1025 	struct spdk_reduce_backing_dev backing_dev = {};
1026 	const uint32_t logical_block_size = 512;
1027 	struct iovec iov;
1028 	char buf[logical_block_size];
1029 	char compare_buf[logical_block_size];
1030 
1031 	params.chunk_size = 16 * 1024;
1032 	params.backing_io_unit_size = 4096;
1033 	params.logical_block_size = logical_block_size;
1034 	spdk_uuid_generate(&params.uuid);
1035 
1036 	backing_dev_init(&backing_dev, &params, 512);
1037 
1038 	g_vol = NULL;
1039 	g_reduce_errno = -1;
1040 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
1041 	CU_ASSERT(g_reduce_errno == 0);
1042 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
1043 
1044 	/* Write 0xAA to 1 512-byte logical block. */
1045 	memset(buf, 0xAA, params.logical_block_size);
1046 	iov.iov_base = buf;
1047 	iov.iov_len = params.logical_block_size;
1048 	g_reduce_errno = -100;
1049 	g_defer_bdev_io = true;
1050 	spdk_reduce_vol_writev(g_vol, &iov, 1, 0, 1, write_cb, NULL);
1051 	/* Callback should not have executed, so this should still equal -100. */
1052 	CU_ASSERT(g_reduce_errno == -100);
1053 	CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io));
1054 	/* We wrote to just 512 bytes of one chunk which was previously unallocated.  This
1055 	 * should result in 1 pending I/O since the rest of this chunk will be zeroes and
1056 	 * very compressible.
1057 	 */
1058 	CU_ASSERT(g_pending_bdev_io_count == 1);
1059 
1060 	backing_dev_io_execute(0);
1061 	CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
1062 	CU_ASSERT(g_reduce_errno == 0);
1063 
1064 	g_defer_bdev_io = false;
1065 	memset(compare_buf, 0xAA, sizeof(compare_buf));
1066 	memset(buf, 0xFF, sizeof(buf));
1067 	iov.iov_base = buf;
1068 	iov.iov_len = params.logical_block_size;
1069 	g_reduce_errno = -100;
1070 	spdk_reduce_vol_readv(g_vol, &iov, 1, 0, 1, read_cb, NULL);
1071 	CU_ASSERT(g_reduce_errno == 0);
1072 	CU_ASSERT(memcmp(buf, compare_buf, sizeof(buf)) == 0);
1073 
1074 	g_reduce_errno = -1;
1075 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
1076 	CU_ASSERT(g_reduce_errno == 0);
1077 
1078 	persistent_pm_buf_destroy();
1079 	backing_dev_destroy(&backing_dev);
1080 }
1081 
1082 static void
1083 overlapped(void)
1084 {
1085 	struct spdk_reduce_vol_params params = {};
1086 	struct spdk_reduce_backing_dev backing_dev = {};
1087 	const uint32_t logical_block_size = 512;
1088 	struct iovec iov;
1089 	char buf[2 * logical_block_size];
1090 	char compare_buf[2 * logical_block_size];
1091 
1092 	params.chunk_size = 16 * 1024;
1093 	params.backing_io_unit_size = 4096;
1094 	params.logical_block_size = logical_block_size;
1095 	spdk_uuid_generate(&params.uuid);
1096 
1097 	backing_dev_init(&backing_dev, &params, 512);
1098 
1099 	g_vol = NULL;
1100 	g_reduce_errno = -1;
1101 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
1102 	CU_ASSERT(g_reduce_errno == 0);
1103 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
1104 
1105 	/* Write 0xAA to 1 512-byte logical block. */
1106 	memset(buf, 0xAA, logical_block_size);
1107 	iov.iov_base = buf;
1108 	iov.iov_len = logical_block_size;
1109 	g_reduce_errno = -100;
1110 	g_defer_bdev_io = true;
1111 	spdk_reduce_vol_writev(g_vol, &iov, 1, 0, 1, write_cb, NULL);
1112 	/* Callback should not have executed, so this should still equal -100. */
1113 	CU_ASSERT(g_reduce_errno == -100);
1114 	CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io));
1115 	/* We wrote to just 512 bytes of one chunk which was previously unallocated.  This
1116 	 * should result in 1 pending I/O since the rest of this chunk will be zeroes and
1117 	 * very compressible.
1118 	 */
1119 	CU_ASSERT(g_pending_bdev_io_count == 1);
1120 
1121 	/* Now do an overlapped I/O to the same chunk. */
1122 	spdk_reduce_vol_writev(g_vol, &iov, 1, 1, 1, write_cb, NULL);
1123 	/* Callback should not have executed, so this should still equal -100. */
1124 	CU_ASSERT(g_reduce_errno == -100);
1125 	CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io));
1126 	/* The second I/O overlaps with the first one.  So we should only see pending bdev_io
1127 	 * related to the first I/O here - the second one won't start until the first one is completed.
1128 	 */
1129 	CU_ASSERT(g_pending_bdev_io_count == 1);
1130 
1131 	backing_dev_io_execute(0);
1132 	CU_ASSERT(g_reduce_errno == 0);
1133 
1134 	g_defer_bdev_io = false;
1135 	memset(compare_buf, 0xAA, sizeof(compare_buf));
1136 	memset(buf, 0xFF, sizeof(buf));
1137 	iov.iov_base = buf;
1138 	iov.iov_len = 2 * logical_block_size;
1139 	g_reduce_errno = -100;
1140 	spdk_reduce_vol_readv(g_vol, &iov, 1, 0, 2, read_cb, NULL);
1141 	CU_ASSERT(g_reduce_errno == 0);
1142 	CU_ASSERT(memcmp(buf, compare_buf, 2 * logical_block_size) == 0);
1143 
1144 	g_reduce_errno = -1;
1145 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
1146 	CU_ASSERT(g_reduce_errno == 0);
1147 
1148 	persistent_pm_buf_destroy();
1149 	backing_dev_destroy(&backing_dev);
1150 }
1151 
1152 #define BUFSIZE 4096
1153 
1154 static void
1155 compress_algorithm(void)
1156 {
1157 	uint8_t original_data[BUFSIZE];
1158 	uint8_t compressed_data[BUFSIZE];
1159 	uint8_t decompressed_data[BUFSIZE];
1160 	uint32_t compressed_len, decompressed_len;
1161 	int rc;
1162 
1163 	ut_build_data_buffer(original_data, BUFSIZE, 0xAA, BUFSIZE);
1164 	compressed_len = sizeof(compressed_data);
1165 	rc = ut_compress(compressed_data, &compressed_len, original_data, UINT8_MAX);
1166 	CU_ASSERT(rc == 0);
1167 	CU_ASSERT(compressed_len == 2);
1168 	CU_ASSERT(compressed_data[0] == UINT8_MAX);
1169 	CU_ASSERT(compressed_data[1] == 0xAA);
1170 
1171 	decompressed_len = sizeof(decompressed_data);
1172 	rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len);
1173 	CU_ASSERT(rc == 0);
1174 	CU_ASSERT(decompressed_len == UINT8_MAX);
1175 	CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0);
1176 
1177 	compressed_len = sizeof(compressed_data);
1178 	rc = ut_compress(compressed_data, &compressed_len, original_data, UINT8_MAX + 1);
1179 	CU_ASSERT(rc == 0);
1180 	CU_ASSERT(compressed_len == 4);
1181 	CU_ASSERT(compressed_data[0] == UINT8_MAX);
1182 	CU_ASSERT(compressed_data[1] == 0xAA);
1183 	CU_ASSERT(compressed_data[2] == 1);
1184 	CU_ASSERT(compressed_data[3] == 0xAA);
1185 
1186 	decompressed_len = sizeof(decompressed_data);
1187 	rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len);
1188 	CU_ASSERT(rc == 0);
1189 	CU_ASSERT(decompressed_len == UINT8_MAX + 1);
1190 	CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0);
1191 
1192 	ut_build_data_buffer(original_data, BUFSIZE, 0x00, 1);
1193 	compressed_len = sizeof(compressed_data);
1194 	rc = ut_compress(compressed_data, &compressed_len, original_data, 2048);
1195 	CU_ASSERT(rc == 0);
1196 	CU_ASSERT(compressed_len == 4096);
1197 	CU_ASSERT(compressed_data[0] == 1);
1198 	CU_ASSERT(compressed_data[1] == 0);
1199 	CU_ASSERT(compressed_data[4094] == 1);
1200 	CU_ASSERT(compressed_data[4095] == 0xFF);
1201 
1202 	decompressed_len = sizeof(decompressed_data);
1203 	rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len);
1204 	CU_ASSERT(rc == 0);
1205 	CU_ASSERT(decompressed_len == 2048);
1206 	CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0);
1207 
1208 	compressed_len = sizeof(compressed_data);
1209 	rc = ut_compress(compressed_data, &compressed_len, original_data, 2049);
1210 	CU_ASSERT(rc == -ENOSPC);
1211 }
1212 
1213 int
1214 main(int argc, char **argv)
1215 {
1216 	CU_pSuite	suite = NULL;
1217 	unsigned int	num_failures;
1218 
1219 	if (CU_initialize_registry() != CUE_SUCCESS) {
1220 		return CU_get_error();
1221 	}
1222 
1223 	suite = CU_add_suite("reduce", NULL, NULL);
1224 	if (suite == NULL) {
1225 		CU_cleanup_registry();
1226 		return CU_get_error();
1227 	}
1228 
1229 	if (
1230 		CU_add_test(suite, "get_pm_file_size", get_pm_file_size) == NULL ||
1231 		CU_add_test(suite, "get_vol_size", get_vol_size) == NULL ||
1232 		CU_add_test(suite, "init_failure", init_failure) == NULL ||
1233 		CU_add_test(suite, "init_md", init_md) == NULL ||
1234 		CU_add_test(suite, "init_backing_dev", init_backing_dev) == NULL ||
1235 		CU_add_test(suite, "load", load) == NULL ||
1236 		CU_add_test(suite, "write_maps", write_maps) == NULL ||
1237 		CU_add_test(suite, "read_write", read_write) == NULL ||
1238 		CU_add_test(suite, "destroy", destroy) == NULL ||
1239 		CU_add_test(suite, "defer_bdev_io", defer_bdev_io) == NULL ||
1240 		CU_add_test(suite, "overlapped", overlapped) == NULL ||
1241 		CU_add_test(suite, "compress_algorithm", compress_algorithm) == NULL
1242 	) {
1243 		CU_cleanup_registry();
1244 		return CU_get_error();
1245 	}
1246 
1247 	CU_basic_set_mode(CU_BRM_VERBOSE);
1248 	CU_basic_run_tests();
1249 	num_failures = CU_get_number_of_failures();
1250 	CU_cleanup_registry();
1251 	return num_failures;
1252 }
1253