xref: /spdk/test/unit/lib/reduce/reduce.c/reduce_ut.c (revision 9889ab2dc80e40dae92dcef361d53dcba722043d)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 
38 #include "reduce/reduce.c"
39 #include "spdk_internal/mock.h"
40 #include "common/lib/test_env.c"
41 
42 static struct spdk_reduce_vol *g_vol;
43 static int g_reduce_errno;
44 static char *g_volatile_pm_buf;
45 static size_t g_volatile_pm_buf_len;
46 static char *g_persistent_pm_buf;
47 static size_t g_persistent_pm_buf_len;
48 static char *g_backing_dev_buf;
49 static char g_path[REDUCE_PATH_MAX];
50 static char *g_decomp_buf;
51 
52 #define TEST_MD_PATH "/tmp"
53 
54 enum ut_reduce_bdev_io_type {
55 	UT_REDUCE_IO_READV = 1,
56 	UT_REDUCE_IO_WRITEV = 2,
57 	UT_REDUCE_IO_UNMAP = 3,
58 };
59 
60 struct ut_reduce_bdev_io {
61 	enum ut_reduce_bdev_io_type type;
62 	struct spdk_reduce_backing_dev *backing_dev;
63 	struct iovec *iov;
64 	int iovcnt;
65 	uint64_t lba;
66 	uint32_t lba_count;
67 	struct spdk_reduce_vol_cb_args *args;
68 	TAILQ_ENTRY(ut_reduce_bdev_io)	link;
69 };
70 
71 static bool g_defer_bdev_io = false;
72 static TAILQ_HEAD(, ut_reduce_bdev_io) g_pending_bdev_io =
73 	TAILQ_HEAD_INITIALIZER(g_pending_bdev_io);
74 static uint32_t g_pending_bdev_io_count = 0;
75 
76 static void
77 sync_pm_buf(const void *addr, size_t length)
78 {
79 	uint64_t offset = (char *)addr - g_volatile_pm_buf;
80 
81 	memcpy(&g_persistent_pm_buf[offset], addr, length);
82 }
83 
84 int
85 pmem_msync(const void *addr, size_t length)
86 {
87 	sync_pm_buf(addr, length);
88 	return 0;
89 }
90 
91 void
92 pmem_persist(const void *addr, size_t len)
93 {
94 	sync_pm_buf(addr, len);
95 }
96 
97 static void
98 get_pm_file_size(void)
99 {
100 	struct spdk_reduce_vol_params params;
101 	uint64_t pm_size, expected_pm_size;
102 
103 	params.backing_io_unit_size = 4096;
104 	params.chunk_size = 4096 * 4;
105 	params.vol_size = 4096 * 4 * 100;
106 
107 	pm_size = _get_pm_file_size(&params);
108 	expected_pm_size = sizeof(struct spdk_reduce_vol_superblock);
109 	/* 100 chunks in logical map * 8 bytes per chunk */
110 	expected_pm_size += 100 * sizeof(uint64_t);
111 	/* 100 chunks * (chunk stuct size + 4 backing io units per chunk * 8 bytes per backing io unit) */
112 	expected_pm_size += 100 * (sizeof(struct spdk_reduce_chunk_map) + 4 * sizeof(uint64_t));
113 	/* reduce allocates some extra chunks too for in-flight writes when logical map
114 	 * is full.  REDUCE_EXTRA_CHUNKS is a private #ifdef in reduce.c Here we need the num chunks
115 	 * times (chunk struct size + 4 backing io units per chunk * 8 bytes per backing io unit).
116 	 */
117 	expected_pm_size += REDUCE_NUM_EXTRA_CHUNKS *
118 			    (sizeof(struct spdk_reduce_chunk_map) + 4 * sizeof(uint64_t));
119 	/* reduce will add some padding so numbers may not match exactly.  Make sure
120 	 * they are close though.
121 	 */
122 	CU_ASSERT((pm_size - expected_pm_size) <= REDUCE_PM_SIZE_ALIGNMENT);
123 }
124 
125 static void
126 get_vol_size(void)
127 {
128 	uint64_t chunk_size, backing_dev_size;
129 
130 	chunk_size = 16 * 1024;
131 	backing_dev_size = 16 * 1024 * 1000;
132 	CU_ASSERT(_get_vol_size(chunk_size, backing_dev_size) < backing_dev_size);
133 }
134 
135 void *
136 pmem_map_file(const char *path, size_t len, int flags, mode_t mode,
137 	      size_t *mapped_lenp, int *is_pmemp)
138 {
139 	CU_ASSERT(g_volatile_pm_buf == NULL);
140 	snprintf(g_path, sizeof(g_path), "%s", path);
141 	*is_pmemp = 1;
142 
143 	if (g_persistent_pm_buf == NULL) {
144 		g_persistent_pm_buf = calloc(1, len);
145 		g_persistent_pm_buf_len = len;
146 		SPDK_CU_ASSERT_FATAL(g_persistent_pm_buf != NULL);
147 	}
148 
149 	*mapped_lenp = g_persistent_pm_buf_len;
150 	g_volatile_pm_buf = calloc(1, g_persistent_pm_buf_len);
151 	SPDK_CU_ASSERT_FATAL(g_volatile_pm_buf != NULL);
152 	memcpy(g_volatile_pm_buf, g_persistent_pm_buf, g_persistent_pm_buf_len);
153 	g_volatile_pm_buf_len = g_persistent_pm_buf_len;
154 
155 	return g_volatile_pm_buf;
156 }
157 
158 int
159 pmem_unmap(void *addr, size_t len)
160 {
161 	CU_ASSERT(addr == g_volatile_pm_buf);
162 	CU_ASSERT(len == g_volatile_pm_buf_len);
163 	free(g_volatile_pm_buf);
164 	g_volatile_pm_buf = NULL;
165 	g_volatile_pm_buf_len = 0;
166 
167 	return 0;
168 }
169 
170 static void
171 persistent_pm_buf_destroy(void)
172 {
173 	CU_ASSERT(g_persistent_pm_buf != NULL);
174 	free(g_persistent_pm_buf);
175 	g_persistent_pm_buf = NULL;
176 	g_persistent_pm_buf_len = 0;
177 }
178 
179 int __wrap_unlink(const char *path);
180 
181 int
182 __wrap_unlink(const char *path)
183 {
184 	if (strcmp(g_path, path) != 0) {
185 		return ENOENT;
186 	}
187 
188 	persistent_pm_buf_destroy();
189 	return 0;
190 }
191 
192 static void
193 init_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno)
194 {
195 	g_vol = vol;
196 	g_reduce_errno = reduce_errno;
197 }
198 
199 static void
200 load_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno)
201 {
202 	g_vol = vol;
203 	g_reduce_errno = reduce_errno;
204 }
205 
206 static void
207 unload_cb(void *cb_arg, int reduce_errno)
208 {
209 	g_reduce_errno = reduce_errno;
210 }
211 
212 static void
213 init_failure(void)
214 {
215 	struct spdk_reduce_vol_params params = {};
216 	struct spdk_reduce_backing_dev backing_dev = {};
217 
218 	backing_dev.blocklen = 512;
219 	/* This blockcnt is too small for a reduce vol - there needs to be
220 	 *  enough space for at least REDUCE_NUM_EXTRA_CHUNKS + 1 chunks.
221 	 */
222 	backing_dev.blockcnt = 20;
223 
224 	params.vol_size = 0;
225 	params.chunk_size = 16 * 1024;
226 	params.backing_io_unit_size = backing_dev.blocklen;
227 	params.logical_block_size = 512;
228 
229 	/* backing_dev has an invalid size.  This should fail. */
230 	g_vol = NULL;
231 	g_reduce_errno = 0;
232 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
233 	CU_ASSERT(g_reduce_errno == -EINVAL);
234 	SPDK_CU_ASSERT_FATAL(g_vol == NULL);
235 
236 	/* backing_dev now has valid size, but backing_dev still has null
237 	 *  function pointers.  This should fail.
238 	 */
239 	backing_dev.blockcnt = 20000;
240 
241 	g_vol = NULL;
242 	g_reduce_errno = 0;
243 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
244 	CU_ASSERT(g_reduce_errno == -EINVAL);
245 	SPDK_CU_ASSERT_FATAL(g_vol == NULL);
246 }
247 
248 static void
249 backing_dev_readv_execute(struct spdk_reduce_backing_dev *backing_dev,
250 			  struct iovec *iov, int iovcnt,
251 			  uint64_t lba, uint32_t lba_count,
252 			  struct spdk_reduce_vol_cb_args *args)
253 {
254 	char *offset;
255 	int i;
256 
257 	offset = g_backing_dev_buf + lba * backing_dev->blocklen;
258 	for (i = 0; i < iovcnt; i++) {
259 		memcpy(iov[i].iov_base, offset, iov[i].iov_len);
260 		offset += iov[i].iov_len;
261 	}
262 	args->cb_fn(args->cb_arg, 0);
263 }
264 
265 static void
266 backing_dev_insert_io(enum ut_reduce_bdev_io_type type, struct spdk_reduce_backing_dev *backing_dev,
267 		      struct iovec *iov, int iovcnt, uint64_t lba, uint32_t lba_count,
268 		      struct spdk_reduce_vol_cb_args *args)
269 {
270 	struct ut_reduce_bdev_io *ut_bdev_io;
271 
272 	ut_bdev_io = calloc(1, sizeof(*ut_bdev_io));
273 	SPDK_CU_ASSERT_FATAL(ut_bdev_io != NULL);
274 
275 	ut_bdev_io->type = type;
276 	ut_bdev_io->backing_dev = backing_dev;
277 	ut_bdev_io->iov = iov;
278 	ut_bdev_io->iovcnt = iovcnt;
279 	ut_bdev_io->lba = lba;
280 	ut_bdev_io->lba_count = lba_count;
281 	ut_bdev_io->args = args;
282 	TAILQ_INSERT_TAIL(&g_pending_bdev_io, ut_bdev_io, link);
283 	g_pending_bdev_io_count++;
284 }
285 
286 static void
287 backing_dev_readv(struct spdk_reduce_backing_dev *backing_dev, struct iovec *iov, int iovcnt,
288 		  uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
289 {
290 	if (g_defer_bdev_io == false) {
291 		CU_ASSERT(g_pending_bdev_io_count == 0);
292 		CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
293 		backing_dev_readv_execute(backing_dev, iov, iovcnt, lba, lba_count, args);
294 		return;
295 	}
296 
297 	backing_dev_insert_io(UT_REDUCE_IO_READV, backing_dev, iov, iovcnt, lba, lba_count, args);
298 }
299 
300 static void
301 backing_dev_writev_execute(struct spdk_reduce_backing_dev *backing_dev,
302 			   struct iovec *iov, int iovcnt,
303 			   uint64_t lba, uint32_t lba_count,
304 			   struct spdk_reduce_vol_cb_args *args)
305 {
306 	char *offset;
307 	int i;
308 
309 	offset = g_backing_dev_buf + lba * backing_dev->blocklen;
310 	for (i = 0; i < iovcnt; i++) {
311 		memcpy(offset, iov[i].iov_base, iov[i].iov_len);
312 		offset += iov[i].iov_len;
313 	}
314 	args->cb_fn(args->cb_arg, 0);
315 }
316 
317 static void
318 backing_dev_writev(struct spdk_reduce_backing_dev *backing_dev, struct iovec *iov, int iovcnt,
319 		   uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
320 {
321 	if (g_defer_bdev_io == false) {
322 		CU_ASSERT(g_pending_bdev_io_count == 0);
323 		CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
324 		backing_dev_writev_execute(backing_dev, iov, iovcnt, lba, lba_count, args);
325 		return;
326 	}
327 
328 	backing_dev_insert_io(UT_REDUCE_IO_WRITEV, backing_dev, iov, iovcnt, lba, lba_count, args);
329 }
330 
331 static void
332 backing_dev_unmap_execute(struct spdk_reduce_backing_dev *backing_dev,
333 			  uint64_t lba, uint32_t lba_count,
334 			  struct spdk_reduce_vol_cb_args *args)
335 {
336 	char *offset;
337 
338 	offset = g_backing_dev_buf + lba * backing_dev->blocklen;
339 	memset(offset, 0, lba_count * backing_dev->blocklen);
340 	args->cb_fn(args->cb_arg, 0);
341 }
342 
343 static void
344 backing_dev_unmap(struct spdk_reduce_backing_dev *backing_dev,
345 		  uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
346 {
347 	if (g_defer_bdev_io == false) {
348 		CU_ASSERT(g_pending_bdev_io_count == 0);
349 		CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
350 		backing_dev_unmap_execute(backing_dev, lba, lba_count, args);
351 		return;
352 	}
353 
354 	backing_dev_insert_io(UT_REDUCE_IO_UNMAP, backing_dev, NULL, 0, lba, lba_count, args);
355 }
356 
357 static void
358 backing_dev_io_execute(uint32_t count)
359 {
360 	struct ut_reduce_bdev_io *ut_bdev_io;
361 	uint32_t done = 0;
362 
363 	CU_ASSERT(g_defer_bdev_io == true);
364 	while (!TAILQ_EMPTY(&g_pending_bdev_io) && (count == 0 || done < count)) {
365 		ut_bdev_io = TAILQ_FIRST(&g_pending_bdev_io);
366 		TAILQ_REMOVE(&g_pending_bdev_io, ut_bdev_io, link);
367 		g_pending_bdev_io_count--;
368 		switch (ut_bdev_io->type) {
369 		case UT_REDUCE_IO_READV:
370 			backing_dev_readv_execute(ut_bdev_io->backing_dev,
371 						  ut_bdev_io->iov, ut_bdev_io->iovcnt,
372 						  ut_bdev_io->lba, ut_bdev_io->lba_count,
373 						  ut_bdev_io->args);
374 			break;
375 		case UT_REDUCE_IO_WRITEV:
376 			backing_dev_writev_execute(ut_bdev_io->backing_dev,
377 						   ut_bdev_io->iov, ut_bdev_io->iovcnt,
378 						   ut_bdev_io->lba, ut_bdev_io->lba_count,
379 						   ut_bdev_io->args);
380 			break;
381 		case UT_REDUCE_IO_UNMAP:
382 			backing_dev_unmap_execute(ut_bdev_io->backing_dev,
383 						  ut_bdev_io->lba, ut_bdev_io->lba_count,
384 						  ut_bdev_io->args);
385 			break;
386 		default:
387 			CU_ASSERT(false);
388 			break;
389 		}
390 		free(ut_bdev_io);
391 		done++;
392 	}
393 }
394 
395 static int
396 ut_compress(char *outbuf, uint32_t *compressed_len, char *inbuf, uint32_t inbuflen)
397 {
398 	uint32_t len = 0;
399 	uint8_t count;
400 	char last;
401 
402 	while (true) {
403 		if (inbuflen == 0) {
404 			*compressed_len = len;
405 			return 0;
406 		}
407 
408 		if (*compressed_len < (len + 2)) {
409 			return -ENOSPC;
410 		}
411 
412 		last = *inbuf;
413 		count = 1;
414 		inbuflen--;
415 		inbuf++;
416 
417 		while (inbuflen > 0 && *inbuf == last && count < UINT8_MAX) {
418 			count++;
419 			inbuflen--;
420 			inbuf++;
421 		}
422 
423 		outbuf[len] = count;
424 		outbuf[len + 1] = last;
425 		len += 2;
426 	}
427 }
428 
429 static int
430 ut_decompress(uint8_t *outbuf, uint32_t *compressed_len, uint8_t *inbuf, uint32_t inbuflen)
431 {
432 	uint32_t len = 0;
433 
434 	SPDK_CU_ASSERT_FATAL(inbuflen % 2 == 0);
435 
436 	while (true) {
437 		if (inbuflen == 0) {
438 			*compressed_len = len;
439 			return 0;
440 		}
441 
442 		if ((len + inbuf[0]) > *compressed_len) {
443 			return -ENOSPC;
444 		}
445 
446 		memset(outbuf, inbuf[1], inbuf[0]);
447 		outbuf += inbuf[0];
448 		len += inbuf[0];
449 		inbuflen -= 2;
450 		inbuf += 2;
451 	}
452 }
453 
454 static void
455 ut_build_data_buffer(uint8_t *data, uint32_t data_len, uint8_t init_val, uint32_t repeat)
456 {
457 	uint32_t _repeat = repeat;
458 
459 	SPDK_CU_ASSERT_FATAL(repeat > 0);
460 
461 	while (data_len > 0) {
462 		*data = init_val;
463 		data++;
464 		data_len--;
465 		_repeat--;
466 		if (_repeat == 0) {
467 			init_val++;
468 			_repeat = repeat;
469 		}
470 	}
471 }
472 
473 static void
474 backing_dev_compress(struct spdk_reduce_backing_dev *backing_dev,
475 		     struct iovec *src_iov, int src_iovcnt,
476 		     struct iovec *dst_iov, int dst_iovcnt,
477 		     struct spdk_reduce_vol_cb_args *args)
478 {
479 	uint32_t compressed_len;
480 	uint64_t total_length = 0;
481 	char *buf = g_decomp_buf;
482 	int rc, i;
483 
484 	CU_ASSERT(dst_iovcnt == 1);
485 
486 	for (i = 0; i < src_iovcnt; i++) {
487 		memcpy(buf, src_iov[i].iov_base, src_iov[i].iov_len);
488 		buf += src_iov[i].iov_len;
489 		total_length += src_iov[i].iov_len;
490 	}
491 
492 	compressed_len = dst_iov[0].iov_len;
493 	rc = ut_compress(dst_iov[0].iov_base, &compressed_len,
494 			 g_decomp_buf, total_length);
495 
496 	args->cb_fn(args->cb_arg, rc ? rc : (int)compressed_len);
497 }
498 
499 static void
500 backing_dev_decompress(struct spdk_reduce_backing_dev *backing_dev,
501 		       struct iovec *src_iov, int src_iovcnt,
502 		       struct iovec *dst_iov, int dst_iovcnt,
503 		       struct spdk_reduce_vol_cb_args *args)
504 {
505 	uint32_t decompressed_len = 0;
506 	char *buf = g_decomp_buf;
507 	int rc, i;
508 
509 	CU_ASSERT(src_iovcnt == 1);
510 
511 	for (i = 0; i < dst_iovcnt; i++) {
512 		decompressed_len += dst_iov[i].iov_len;
513 	}
514 
515 	rc = ut_decompress(g_decomp_buf, &decompressed_len,
516 			   src_iov[0].iov_base, src_iov[0].iov_len);
517 
518 	for (i = 0; i < dst_iovcnt; i++) {
519 		memcpy(dst_iov[i].iov_base, buf, dst_iov[i].iov_len);
520 		buf += dst_iov[i].iov_len;
521 	}
522 
523 	args->cb_fn(args->cb_arg, rc ? rc : (int)decompressed_len);
524 }
525 
526 static void
527 backing_dev_destroy(struct spdk_reduce_backing_dev *backing_dev)
528 {
529 	/* We don't free this during backing_dev_close so that we can test init/unload/load
530 	 *  scenarios.
531 	 */
532 	free(g_backing_dev_buf);
533 	free(g_decomp_buf);
534 	g_backing_dev_buf = NULL;
535 }
536 
537 static void
538 backing_dev_init(struct spdk_reduce_backing_dev *backing_dev, struct spdk_reduce_vol_params *params,
539 		 uint32_t backing_blocklen)
540 {
541 	int64_t size;
542 
543 	size = 4 * 1024 * 1024;
544 	backing_dev->blocklen = backing_blocklen;
545 	backing_dev->blockcnt = size / backing_dev->blocklen;
546 	backing_dev->readv = backing_dev_readv;
547 	backing_dev->writev = backing_dev_writev;
548 	backing_dev->unmap = backing_dev_unmap;
549 	backing_dev->compress = backing_dev_compress;
550 	backing_dev->decompress = backing_dev_decompress;
551 
552 	g_decomp_buf = calloc(1, params->chunk_size);
553 	SPDK_CU_ASSERT_FATAL(g_decomp_buf != NULL);
554 
555 	g_backing_dev_buf = calloc(1, size);
556 	SPDK_CU_ASSERT_FATAL(g_backing_dev_buf != NULL);
557 }
558 
559 static void
560 init_md(void)
561 {
562 	struct spdk_reduce_vol_params params = {};
563 	struct spdk_reduce_vol_params *persistent_params;
564 	struct spdk_reduce_backing_dev backing_dev = {};
565 	struct spdk_uuid uuid;
566 	uint64_t *entry;
567 
568 	params.chunk_size = 16 * 1024;
569 	params.backing_io_unit_size = 512;
570 	params.logical_block_size = 512;
571 
572 	backing_dev_init(&backing_dev, &params, 512);
573 
574 	g_vol = NULL;
575 	g_reduce_errno = -1;
576 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
577 	CU_ASSERT(g_reduce_errno == 0);
578 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
579 	/* Confirm that reduce persisted the params to metadata. */
580 	CU_ASSERT(memcmp(g_persistent_pm_buf, SPDK_REDUCE_SIGNATURE, 8) == 0);
581 	persistent_params = (struct spdk_reduce_vol_params *)(g_persistent_pm_buf + 8);
582 	CU_ASSERT(memcmp(persistent_params, &params, sizeof(params)) == 0);
583 	/* Now confirm that contents of pm_file after the superblock have been initialized
584 	 *  to REDUCE_EMPTY_MAP_ENTRY.
585 	 */
586 	entry = (uint64_t *)(g_persistent_pm_buf + sizeof(struct spdk_reduce_vol_superblock));
587 	while (entry != (uint64_t *)(g_persistent_pm_buf + g_vol->pm_file.size)) {
588 		CU_ASSERT(*entry == REDUCE_EMPTY_MAP_ENTRY);
589 		entry++;
590 	}
591 
592 	/* Check that the pm file path was constructed correctly.  It should be in
593 	 * the form:
594 	 * TEST_MD_PATH + "/" + <uuid string>
595 	 */
596 	CU_ASSERT(strncmp(&g_path[0], TEST_MD_PATH, strlen(TEST_MD_PATH)) == 0);
597 	CU_ASSERT(g_path[strlen(TEST_MD_PATH)] == '/');
598 	CU_ASSERT(spdk_uuid_parse(&uuid, &g_path[strlen(TEST_MD_PATH) + 1]) == 0);
599 	CU_ASSERT(spdk_uuid_compare(&uuid, spdk_reduce_vol_get_uuid(g_vol)) == 0);
600 
601 	g_reduce_errno = -1;
602 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
603 	CU_ASSERT(g_reduce_errno == 0);
604 	CU_ASSERT(g_volatile_pm_buf == NULL);
605 
606 	persistent_pm_buf_destroy();
607 	backing_dev_destroy(&backing_dev);
608 }
609 
610 static void
611 _init_backing_dev(uint32_t backing_blocklen)
612 {
613 	struct spdk_reduce_vol_params params = {};
614 	struct spdk_reduce_vol_params *persistent_params;
615 	struct spdk_reduce_backing_dev backing_dev = {};
616 
617 	params.chunk_size = 16 * 1024;
618 	params.backing_io_unit_size = 512;
619 	params.logical_block_size = 512;
620 	spdk_uuid_generate(&params.uuid);
621 
622 	backing_dev_init(&backing_dev, &params, backing_blocklen);
623 
624 	g_vol = NULL;
625 	memset(g_path, 0, sizeof(g_path));
626 	g_reduce_errno = -1;
627 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
628 	CU_ASSERT(g_reduce_errno == 0);
629 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
630 	CU_ASSERT(strncmp(TEST_MD_PATH, g_path, strlen(TEST_MD_PATH)) == 0);
631 	/* Confirm that libreduce persisted the params to the backing device. */
632 	CU_ASSERT(memcmp(g_backing_dev_buf, SPDK_REDUCE_SIGNATURE, 8) == 0);
633 	persistent_params = (struct spdk_reduce_vol_params *)(g_backing_dev_buf + 8);
634 	CU_ASSERT(memcmp(persistent_params, &params, sizeof(params)) == 0);
635 	/* Confirm that the path to the persistent memory metadata file was persisted to
636 	 *  the backing device.
637 	 */
638 	CU_ASSERT(strncmp(g_path,
639 			  g_backing_dev_buf + REDUCE_BACKING_DEV_PATH_OFFSET,
640 			  REDUCE_PATH_MAX) == 0);
641 
642 	g_reduce_errno = -1;
643 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
644 	CU_ASSERT(g_reduce_errno == 0);
645 
646 	persistent_pm_buf_destroy();
647 	backing_dev_destroy(&backing_dev);
648 }
649 
650 static void
651 init_backing_dev(void)
652 {
653 	_init_backing_dev(512);
654 	_init_backing_dev(4096);
655 }
656 
657 static void
658 _load(uint32_t backing_blocklen)
659 {
660 	struct spdk_reduce_vol_params params = {};
661 	struct spdk_reduce_backing_dev backing_dev = {};
662 	char pmem_file_path[REDUCE_PATH_MAX];
663 
664 	params.chunk_size = 16 * 1024;
665 	params.backing_io_unit_size = 512;
666 	params.logical_block_size = 512;
667 	spdk_uuid_generate(&params.uuid);
668 
669 	backing_dev_init(&backing_dev, &params, backing_blocklen);
670 
671 	g_vol = NULL;
672 	g_reduce_errno = -1;
673 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
674 	CU_ASSERT(g_reduce_errno == 0);
675 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
676 	CU_ASSERT(strncmp(TEST_MD_PATH, g_path, strlen(TEST_MD_PATH)) == 0);
677 	memcpy(pmem_file_path, g_path, sizeof(pmem_file_path));
678 
679 	g_reduce_errno = -1;
680 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
681 	CU_ASSERT(g_reduce_errno == 0);
682 
683 	g_vol = NULL;
684 	memset(g_path, 0, sizeof(g_path));
685 	g_reduce_errno = -1;
686 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
687 	CU_ASSERT(g_reduce_errno == 0);
688 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
689 	CU_ASSERT(strncmp(g_path, pmem_file_path, sizeof(pmem_file_path)) == 0);
690 	CU_ASSERT(g_vol->params.vol_size == params.vol_size);
691 	CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
692 	CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
693 
694 	g_reduce_errno = -1;
695 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
696 	CU_ASSERT(g_reduce_errno == 0);
697 
698 	persistent_pm_buf_destroy();
699 	backing_dev_destroy(&backing_dev);
700 }
701 
702 static void
703 load(void)
704 {
705 	_load(512);
706 	_load(4096);
707 }
708 
709 static uint64_t
710 _vol_get_chunk_map_index(struct spdk_reduce_vol *vol, uint64_t offset)
711 {
712 	uint64_t logical_map_index = offset / vol->logical_blocks_per_chunk;
713 
714 	return vol->pm_logical_map[logical_map_index];
715 }
716 
717 static void
718 write_cb(void *arg, int reduce_errno)
719 {
720 	g_reduce_errno = reduce_errno;
721 }
722 
723 static void
724 read_cb(void *arg, int reduce_errno)
725 {
726 	g_reduce_errno = reduce_errno;
727 }
728 
729 static void
730 _write_maps(uint32_t backing_blocklen)
731 {
732 	struct spdk_reduce_vol_params params = {};
733 	struct spdk_reduce_backing_dev backing_dev = {};
734 	struct iovec iov;
735 	const int bufsize = 16 * 1024; /* chunk size */
736 	char buf[bufsize];
737 	uint32_t num_lbas, i;
738 	uint64_t old_chunk0_map_index, new_chunk0_map_index;
739 	struct spdk_reduce_chunk_map *old_chunk0_map, *new_chunk0_map;
740 
741 	params.chunk_size = bufsize;
742 	params.backing_io_unit_size = 4096;
743 	params.logical_block_size = 512;
744 	num_lbas = bufsize / params.logical_block_size;
745 	spdk_uuid_generate(&params.uuid);
746 
747 	backing_dev_init(&backing_dev, &params, backing_blocklen);
748 
749 	g_vol = NULL;
750 	g_reduce_errno = -1;
751 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
752 	CU_ASSERT(g_reduce_errno == 0);
753 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
754 
755 	for (i = 0; i < g_vol->params.vol_size / g_vol->params.chunk_size; i++) {
756 		CU_ASSERT(_vol_get_chunk_map_index(g_vol, i) == REDUCE_EMPTY_MAP_ENTRY);
757 	}
758 
759 	ut_build_data_buffer(buf, bufsize, 0x00, 1);
760 	iov.iov_base = buf;
761 	iov.iov_len = bufsize;
762 	g_reduce_errno = -1;
763 	spdk_reduce_vol_writev(g_vol, &iov, 1, 0, num_lbas, write_cb, NULL);
764 	CU_ASSERT(g_reduce_errno == 0);
765 
766 	old_chunk0_map_index = _vol_get_chunk_map_index(g_vol, 0);
767 	CU_ASSERT(old_chunk0_map_index != REDUCE_EMPTY_MAP_ENTRY);
768 	CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, old_chunk0_map_index) == true);
769 
770 	old_chunk0_map = _reduce_vol_get_chunk_map(g_vol, old_chunk0_map_index);
771 	for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
772 		CU_ASSERT(old_chunk0_map->io_unit_index[i] != REDUCE_EMPTY_MAP_ENTRY);
773 		CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units,
774 					     old_chunk0_map->io_unit_index[i]) == true);
775 	}
776 
777 	g_reduce_errno = -1;
778 	spdk_reduce_vol_writev(g_vol, &iov, 1, 0, num_lbas, write_cb, NULL);
779 	CU_ASSERT(g_reduce_errno == 0);
780 
781 	new_chunk0_map_index = _vol_get_chunk_map_index(g_vol, 0);
782 	CU_ASSERT(new_chunk0_map_index != REDUCE_EMPTY_MAP_ENTRY);
783 	CU_ASSERT(new_chunk0_map_index != old_chunk0_map_index);
784 	CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, new_chunk0_map_index) == true);
785 	CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, old_chunk0_map_index) == false);
786 
787 	for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
788 		CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units,
789 					     old_chunk0_map->io_unit_index[i]) == false);
790 	}
791 
792 	new_chunk0_map = _reduce_vol_get_chunk_map(g_vol, new_chunk0_map_index);
793 	for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
794 		CU_ASSERT(new_chunk0_map->io_unit_index[i] != REDUCE_EMPTY_MAP_ENTRY);
795 		CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units,
796 					     new_chunk0_map->io_unit_index[i]) == true);
797 	}
798 
799 	g_reduce_errno = -1;
800 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
801 	CU_ASSERT(g_reduce_errno == 0);
802 
803 	g_vol = NULL;
804 	g_reduce_errno = -1;
805 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
806 	CU_ASSERT(g_reduce_errno == 0);
807 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
808 	CU_ASSERT(g_vol->params.vol_size == params.vol_size);
809 	CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
810 	CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
811 
812 	g_reduce_errno = -1;
813 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
814 	CU_ASSERT(g_reduce_errno == 0);
815 
816 	persistent_pm_buf_destroy();
817 	backing_dev_destroy(&backing_dev);
818 }
819 
820 static void
821 write_maps(void)
822 {
823 	_write_maps(512);
824 	_write_maps(4096);
825 }
826 
827 static void
828 _read_write(uint32_t backing_blocklen)
829 {
830 	struct spdk_reduce_vol_params params = {};
831 	struct spdk_reduce_backing_dev backing_dev = {};
832 	struct iovec iov;
833 	char buf[16 * 1024]; /* chunk size */
834 	char compare_buf[16 * 1024];
835 	uint32_t i;
836 
837 	params.chunk_size = 16 * 1024;
838 	params.backing_io_unit_size = 4096;
839 	params.logical_block_size = 512;
840 	spdk_uuid_generate(&params.uuid);
841 
842 	backing_dev_init(&backing_dev, &params, backing_blocklen);
843 
844 	g_vol = NULL;
845 	g_reduce_errno = -1;
846 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
847 	CU_ASSERT(g_reduce_errno == 0);
848 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
849 
850 	/* Write 0xAA to 2 512-byte logical blocks, starting at LBA 2. */
851 	memset(buf, 0xAA, 2 * params.logical_block_size);
852 	iov.iov_base = buf;
853 	iov.iov_len = 2 * params.logical_block_size;
854 	g_reduce_errno = -1;
855 	spdk_reduce_vol_writev(g_vol, &iov, 1, 2, 2, write_cb, NULL);
856 	CU_ASSERT(g_reduce_errno == 0);
857 
858 	memset(compare_buf, 0xAA, sizeof(compare_buf));
859 	for (i = 0; i < params.chunk_size / params.logical_block_size; i++) {
860 		memset(buf, 0xFF, params.logical_block_size);
861 		iov.iov_base = buf;
862 		iov.iov_len = params.logical_block_size;
863 		g_reduce_errno = -1;
864 		spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL);
865 		CU_ASSERT(g_reduce_errno == 0);
866 
867 		switch (i) {
868 		case 2:
869 		case 3:
870 			CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
871 			break;
872 		default:
873 			CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size));
874 			break;
875 		}
876 	}
877 
878 	g_reduce_errno = -1;
879 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
880 	CU_ASSERT(g_reduce_errno == 0);
881 
882 	/* Overwrite what we just wrote with 0xCC */
883 	g_vol = NULL;
884 	g_reduce_errno = -1;
885 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
886 	CU_ASSERT(g_reduce_errno == 0);
887 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
888 	CU_ASSERT(g_vol->params.vol_size == params.vol_size);
889 	CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
890 	CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
891 
892 	memset(buf, 0xCC, 2 * params.logical_block_size);
893 	iov.iov_base = buf;
894 	iov.iov_len = 2 * params.logical_block_size;
895 	g_reduce_errno = -1;
896 	spdk_reduce_vol_writev(g_vol, &iov, 1, 2, 2, write_cb, NULL);
897 	CU_ASSERT(g_reduce_errno == 0);
898 
899 	memset(compare_buf, 0xCC, sizeof(compare_buf));
900 	for (i = 0; i < params.chunk_size / params.logical_block_size; i++) {
901 		memset(buf, 0xFF, params.logical_block_size);
902 		iov.iov_base = buf;
903 		iov.iov_len = params.logical_block_size;
904 		g_reduce_errno = -1;
905 		spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL);
906 		CU_ASSERT(g_reduce_errno == 0);
907 
908 		switch (i) {
909 		case 2:
910 		case 3:
911 			CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
912 			break;
913 		default:
914 			CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size));
915 			break;
916 		}
917 	}
918 
919 	g_reduce_errno = -1;
920 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
921 	CU_ASSERT(g_reduce_errno == 0);
922 
923 	g_vol = NULL;
924 	g_reduce_errno = -1;
925 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
926 	CU_ASSERT(g_reduce_errno == 0);
927 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
928 	CU_ASSERT(g_vol->params.vol_size == params.vol_size);
929 	CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
930 	CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
931 
932 	g_reduce_errno = -1;
933 
934 	/* Write 0xBB to 2 512-byte logical blocks, starting at LBA 37.
935 	 * This is writing into the second chunk of the volume.  This also
936 	 * enables implicitly checking that we reloaded the bit arrays
937 	 * correctly - making sure we don't use the first chunk map again
938 	 * for this new write - the first chunk map was already used by the
939 	 * write from before we unloaded and reloaded.
940 	 */
941 	memset(buf, 0xBB, 2 * params.logical_block_size);
942 	iov.iov_base = buf;
943 	iov.iov_len = 2 * params.logical_block_size;
944 	g_reduce_errno = -1;
945 	spdk_reduce_vol_writev(g_vol, &iov, 1, 37, 2, write_cb, NULL);
946 	CU_ASSERT(g_reduce_errno == 0);
947 
948 	for (i = 0; i < 2 * params.chunk_size / params.logical_block_size; i++) {
949 		memset(buf, 0xFF, params.logical_block_size);
950 		iov.iov_base = buf;
951 		iov.iov_len = params.logical_block_size;
952 		g_reduce_errno = -1;
953 		spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL);
954 		CU_ASSERT(g_reduce_errno == 0);
955 
956 		switch (i) {
957 		case 2:
958 		case 3:
959 			memset(compare_buf, 0xCC, sizeof(compare_buf));
960 			CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
961 			break;
962 		case 37:
963 		case 38:
964 			memset(compare_buf, 0xBB, sizeof(compare_buf));
965 			CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
966 			break;
967 		default:
968 			CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size));
969 			break;
970 		}
971 	}
972 
973 	g_reduce_errno = -1;
974 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
975 	CU_ASSERT(g_reduce_errno == 0);
976 
977 	persistent_pm_buf_destroy();
978 	backing_dev_destroy(&backing_dev);
979 }
980 
981 static void
982 read_write(void)
983 {
984 	_read_write(512);
985 	_read_write(4096);
986 }
987 
988 static void
989 destroy_cb(void *ctx, int reduce_errno)
990 {
991 	g_reduce_errno = reduce_errno;
992 }
993 
994 static void
995 destroy(void)
996 {
997 	struct spdk_reduce_vol_params params = {};
998 	struct spdk_reduce_backing_dev backing_dev = {};
999 
1000 	params.chunk_size = 16 * 1024;
1001 	params.backing_io_unit_size = 512;
1002 	params.logical_block_size = 512;
1003 	spdk_uuid_generate(&params.uuid);
1004 
1005 	backing_dev_init(&backing_dev, &params, 512);
1006 
1007 	g_vol = NULL;
1008 	g_reduce_errno = -1;
1009 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
1010 	CU_ASSERT(g_reduce_errno == 0);
1011 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
1012 
1013 	g_reduce_errno = -1;
1014 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
1015 	CU_ASSERT(g_reduce_errno == 0);
1016 
1017 	g_vol = NULL;
1018 	g_reduce_errno = -1;
1019 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
1020 	CU_ASSERT(g_reduce_errno == 0);
1021 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
1022 
1023 	g_reduce_errno = -1;
1024 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
1025 	CU_ASSERT(g_reduce_errno == 0);
1026 
1027 	g_reduce_errno = -1;
1028 	MOCK_CLEAR(spdk_malloc);
1029 	MOCK_CLEAR(spdk_zmalloc);
1030 	spdk_reduce_vol_destroy(&backing_dev, destroy_cb, NULL);
1031 	CU_ASSERT(g_reduce_errno == 0);
1032 
1033 	g_reduce_errno = 0;
1034 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
1035 	CU_ASSERT(g_reduce_errno == -EILSEQ);
1036 
1037 	backing_dev_destroy(&backing_dev);
1038 }
1039 
1040 /* This test primarily checks that the reduce unit test infrastructure for asynchronous
1041  * backing device I/O operations is working correctly.
1042  */
1043 static void
1044 defer_bdev_io(void)
1045 {
1046 	struct spdk_reduce_vol_params params = {};
1047 	struct spdk_reduce_backing_dev backing_dev = {};
1048 	const uint32_t logical_block_size = 512;
1049 	struct iovec iov;
1050 	char buf[logical_block_size];
1051 	char compare_buf[logical_block_size];
1052 
1053 	params.chunk_size = 16 * 1024;
1054 	params.backing_io_unit_size = 4096;
1055 	params.logical_block_size = logical_block_size;
1056 	spdk_uuid_generate(&params.uuid);
1057 
1058 	backing_dev_init(&backing_dev, &params, 512);
1059 
1060 	g_vol = NULL;
1061 	g_reduce_errno = -1;
1062 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
1063 	CU_ASSERT(g_reduce_errno == 0);
1064 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
1065 
1066 	/* Write 0xAA to 1 512-byte logical block. */
1067 	memset(buf, 0xAA, params.logical_block_size);
1068 	iov.iov_base = buf;
1069 	iov.iov_len = params.logical_block_size;
1070 	g_reduce_errno = -100;
1071 	g_defer_bdev_io = true;
1072 	spdk_reduce_vol_writev(g_vol, &iov, 1, 0, 1, write_cb, NULL);
1073 	/* Callback should not have executed, so this should still equal -100. */
1074 	CU_ASSERT(g_reduce_errno == -100);
1075 	CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io));
1076 	/* We wrote to just 512 bytes of one chunk which was previously unallocated.  This
1077 	 * should result in 1 pending I/O since the rest of this chunk will be zeroes and
1078 	 * very compressible.
1079 	 */
1080 	CU_ASSERT(g_pending_bdev_io_count == 1);
1081 
1082 	backing_dev_io_execute(0);
1083 	CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
1084 	CU_ASSERT(g_reduce_errno == 0);
1085 
1086 	g_defer_bdev_io = false;
1087 	memset(compare_buf, 0xAA, sizeof(compare_buf));
1088 	memset(buf, 0xFF, sizeof(buf));
1089 	iov.iov_base = buf;
1090 	iov.iov_len = params.logical_block_size;
1091 	g_reduce_errno = -100;
1092 	spdk_reduce_vol_readv(g_vol, &iov, 1, 0, 1, read_cb, NULL);
1093 	CU_ASSERT(g_reduce_errno == 0);
1094 	CU_ASSERT(memcmp(buf, compare_buf, sizeof(buf)) == 0);
1095 
1096 	g_reduce_errno = -1;
1097 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
1098 	CU_ASSERT(g_reduce_errno == 0);
1099 
1100 	persistent_pm_buf_destroy();
1101 	backing_dev_destroy(&backing_dev);
1102 }
1103 
1104 static void
1105 overlapped(void)
1106 {
1107 	struct spdk_reduce_vol_params params = {};
1108 	struct spdk_reduce_backing_dev backing_dev = {};
1109 	const uint32_t logical_block_size = 512;
1110 	struct iovec iov;
1111 	char buf[2 * logical_block_size];
1112 	char compare_buf[2 * logical_block_size];
1113 
1114 	params.chunk_size = 16 * 1024;
1115 	params.backing_io_unit_size = 4096;
1116 	params.logical_block_size = logical_block_size;
1117 	spdk_uuid_generate(&params.uuid);
1118 
1119 	backing_dev_init(&backing_dev, &params, 512);
1120 
1121 	g_vol = NULL;
1122 	g_reduce_errno = -1;
1123 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
1124 	CU_ASSERT(g_reduce_errno == 0);
1125 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
1126 
1127 	/* Write 0xAA to 1 512-byte logical block. */
1128 	memset(buf, 0xAA, logical_block_size);
1129 	iov.iov_base = buf;
1130 	iov.iov_len = logical_block_size;
1131 	g_reduce_errno = -100;
1132 	g_defer_bdev_io = true;
1133 	spdk_reduce_vol_writev(g_vol, &iov, 1, 0, 1, write_cb, NULL);
1134 	/* Callback should not have executed, so this should still equal -100. */
1135 	CU_ASSERT(g_reduce_errno == -100);
1136 	CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io));
1137 	/* We wrote to just 512 bytes of one chunk which was previously unallocated.  This
1138 	 * should result in 1 pending I/O since the rest of this chunk will be zeroes and
1139 	 * very compressible.
1140 	 */
1141 	CU_ASSERT(g_pending_bdev_io_count == 1);
1142 
1143 	/* Now do an overlapped I/O to the same chunk. */
1144 	spdk_reduce_vol_writev(g_vol, &iov, 1, 1, 1, write_cb, NULL);
1145 	/* Callback should not have executed, so this should still equal -100. */
1146 	CU_ASSERT(g_reduce_errno == -100);
1147 	CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io));
1148 	/* The second I/O overlaps with the first one.  So we should only see pending bdev_io
1149 	 * related to the first I/O here - the second one won't start until the first one is completed.
1150 	 */
1151 	CU_ASSERT(g_pending_bdev_io_count == 1);
1152 
1153 	backing_dev_io_execute(0);
1154 	CU_ASSERT(g_reduce_errno == 0);
1155 
1156 	g_defer_bdev_io = false;
1157 	memset(compare_buf, 0xAA, sizeof(compare_buf));
1158 	memset(buf, 0xFF, sizeof(buf));
1159 	iov.iov_base = buf;
1160 	iov.iov_len = 2 * logical_block_size;
1161 	g_reduce_errno = -100;
1162 	spdk_reduce_vol_readv(g_vol, &iov, 1, 0, 2, read_cb, NULL);
1163 	CU_ASSERT(g_reduce_errno == 0);
1164 	CU_ASSERT(memcmp(buf, compare_buf, 2 * logical_block_size) == 0);
1165 
1166 	g_reduce_errno = -1;
1167 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
1168 	CU_ASSERT(g_reduce_errno == 0);
1169 
1170 	persistent_pm_buf_destroy();
1171 	backing_dev_destroy(&backing_dev);
1172 }
1173 
1174 #define BUFSIZE 4096
1175 
1176 static void
1177 compress_algorithm(void)
1178 {
1179 	uint8_t original_data[BUFSIZE];
1180 	uint8_t compressed_data[BUFSIZE];
1181 	uint8_t decompressed_data[BUFSIZE];
1182 	uint32_t compressed_len, decompressed_len;
1183 	int rc;
1184 
1185 	ut_build_data_buffer(original_data, BUFSIZE, 0xAA, BUFSIZE);
1186 	compressed_len = sizeof(compressed_data);
1187 	rc = ut_compress(compressed_data, &compressed_len, original_data, UINT8_MAX);
1188 	CU_ASSERT(rc == 0);
1189 	CU_ASSERT(compressed_len == 2);
1190 	CU_ASSERT(compressed_data[0] == UINT8_MAX);
1191 	CU_ASSERT(compressed_data[1] == 0xAA);
1192 
1193 	decompressed_len = sizeof(decompressed_data);
1194 	rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len);
1195 	CU_ASSERT(rc == 0);
1196 	CU_ASSERT(decompressed_len == UINT8_MAX);
1197 	CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0);
1198 
1199 	compressed_len = sizeof(compressed_data);
1200 	rc = ut_compress(compressed_data, &compressed_len, original_data, UINT8_MAX + 1);
1201 	CU_ASSERT(rc == 0);
1202 	CU_ASSERT(compressed_len == 4);
1203 	CU_ASSERT(compressed_data[0] == UINT8_MAX);
1204 	CU_ASSERT(compressed_data[1] == 0xAA);
1205 	CU_ASSERT(compressed_data[2] == 1);
1206 	CU_ASSERT(compressed_data[3] == 0xAA);
1207 
1208 	decompressed_len = sizeof(decompressed_data);
1209 	rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len);
1210 	CU_ASSERT(rc == 0);
1211 	CU_ASSERT(decompressed_len == UINT8_MAX + 1);
1212 	CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0);
1213 
1214 	ut_build_data_buffer(original_data, BUFSIZE, 0x00, 1);
1215 	compressed_len = sizeof(compressed_data);
1216 	rc = ut_compress(compressed_data, &compressed_len, original_data, 2048);
1217 	CU_ASSERT(rc == 0);
1218 	CU_ASSERT(compressed_len == 4096);
1219 	CU_ASSERT(compressed_data[0] == 1);
1220 	CU_ASSERT(compressed_data[1] == 0);
1221 	CU_ASSERT(compressed_data[4094] == 1);
1222 	CU_ASSERT(compressed_data[4095] == 0xFF);
1223 
1224 	decompressed_len = sizeof(decompressed_data);
1225 	rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len);
1226 	CU_ASSERT(rc == 0);
1227 	CU_ASSERT(decompressed_len == 2048);
1228 	CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0);
1229 
1230 	compressed_len = sizeof(compressed_data);
1231 	rc = ut_compress(compressed_data, &compressed_len, original_data, 2049);
1232 	CU_ASSERT(rc == -ENOSPC);
1233 }
1234 
1235 int
1236 main(int argc, char **argv)
1237 {
1238 	CU_pSuite	suite = NULL;
1239 	unsigned int	num_failures;
1240 
1241 	if (CU_initialize_registry() != CUE_SUCCESS) {
1242 		return CU_get_error();
1243 	}
1244 
1245 	suite = CU_add_suite("reduce", NULL, NULL);
1246 	if (suite == NULL) {
1247 		CU_cleanup_registry();
1248 		return CU_get_error();
1249 	}
1250 
1251 	if (
1252 		CU_add_test(suite, "get_pm_file_size", get_pm_file_size) == NULL ||
1253 		CU_add_test(suite, "get_vol_size", get_vol_size) == NULL ||
1254 		CU_add_test(suite, "init_failure", init_failure) == NULL ||
1255 		CU_add_test(suite, "init_md", init_md) == NULL ||
1256 		CU_add_test(suite, "init_backing_dev", init_backing_dev) == NULL ||
1257 		CU_add_test(suite, "load", load) == NULL ||
1258 		CU_add_test(suite, "write_maps", write_maps) == NULL ||
1259 		CU_add_test(suite, "read_write", read_write) == NULL ||
1260 		CU_add_test(suite, "destroy", destroy) == NULL ||
1261 		CU_add_test(suite, "defer_bdev_io", defer_bdev_io) == NULL ||
1262 		CU_add_test(suite, "overlapped", overlapped) == NULL ||
1263 		CU_add_test(suite, "compress_algorithm", compress_algorithm) == NULL
1264 	) {
1265 		CU_cleanup_registry();
1266 		return CU_get_error();
1267 	}
1268 
1269 	CU_basic_set_mode(CU_BRM_VERBOSE);
1270 	CU_basic_run_tests();
1271 	num_failures = CU_get_number_of_failures();
1272 	CU_cleanup_registry();
1273 	return num_failures;
1274 }
1275