xref: /spdk/test/unit/lib/reduce/reduce.c/reduce_ut.c (revision 895300d84030b370decaeaac0b3d6b7798227fe9)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "spdk/stdinc.h"
36 
37 #include "spdk_cunit.h"
38 
39 #include "reduce/reduce.c"
40 #include "spdk_internal/mock.h"
41 #define UNIT_TEST_NO_VTOPHYS
42 #include "common/lib/test_env.c"
43 #undef UNIT_TEST_NO_VTOPHYS
44 
45 static struct spdk_reduce_vol *g_vol;
46 static int g_reduce_errno;
47 static char *g_volatile_pm_buf;
48 static size_t g_volatile_pm_buf_len;
49 static char *g_persistent_pm_buf;
50 static size_t g_persistent_pm_buf_len;
51 static char *g_backing_dev_buf;
52 static char g_path[REDUCE_PATH_MAX];
53 static char *g_decomp_buf;
54 static int g_decompressed_len;
55 
56 #define TEST_MD_PATH "/tmp"
57 
58 uint64_t
59 spdk_vtophys(const void *buf, uint64_t *size)
60 {
61 	/* add + 1 to buf addr for cases where buf is the start of the page, that will give us correct end of the page */
62 	const uint8_t *page_2mb_end = (const uint8_t *)SPDK_ALIGN_CEIL((uintptr_t)buf + 1, VALUE_2MB);
63 	uint64_t bytes_to_page_end = page_2mb_end - (const uint8_t *)buf;
64 	uint64_t _size;
65 
66 	if (*size) {
67 		_size = *size;
68 		_size = spdk_min(_size, bytes_to_page_end);
69 		*size = _size;
70 	}
71 
72 	return (uintptr_t)buf;
73 }
74 
75 enum ut_reduce_bdev_io_type {
76 	UT_REDUCE_IO_READV = 1,
77 	UT_REDUCE_IO_WRITEV = 2,
78 	UT_REDUCE_IO_UNMAP = 3,
79 };
80 
81 struct ut_reduce_bdev_io {
82 	enum ut_reduce_bdev_io_type type;
83 	struct spdk_reduce_backing_dev *backing_dev;
84 	struct iovec *iov;
85 	int iovcnt;
86 	uint64_t lba;
87 	uint32_t lba_count;
88 	struct spdk_reduce_vol_cb_args *args;
89 	TAILQ_ENTRY(ut_reduce_bdev_io)	link;
90 };
91 
92 static bool g_defer_bdev_io = false;
93 static TAILQ_HEAD(, ut_reduce_bdev_io) g_pending_bdev_io =
94 	TAILQ_HEAD_INITIALIZER(g_pending_bdev_io);
95 static uint32_t g_pending_bdev_io_count = 0;
96 
97 static void
98 sync_pm_buf(const void *addr, size_t length)
99 {
100 	uint64_t offset = (char *)addr - g_volatile_pm_buf;
101 
102 	memcpy(&g_persistent_pm_buf[offset], addr, length);
103 }
104 
105 int
106 pmem_msync(const void *addr, size_t length)
107 {
108 	sync_pm_buf(addr, length);
109 	return 0;
110 }
111 
112 void
113 pmem_persist(const void *addr, size_t len)
114 {
115 	sync_pm_buf(addr, len);
116 }
117 
118 static void
119 get_pm_file_size(void)
120 {
121 	struct spdk_reduce_vol_params params;
122 	uint64_t pm_size, expected_pm_size;
123 
124 	params.backing_io_unit_size = 4096;
125 	params.chunk_size = 4096 * 4;
126 	params.vol_size = 4096 * 4 * 100;
127 
128 	pm_size = _get_pm_file_size(&params);
129 	expected_pm_size = sizeof(struct spdk_reduce_vol_superblock);
130 	/* 100 chunks in logical map * 8 bytes per chunk */
131 	expected_pm_size += 100 * sizeof(uint64_t);
132 	/* 100 chunks * (chunk struct size + 4 backing io units per chunk * 8 bytes per backing io unit) */
133 	expected_pm_size += 100 * (sizeof(struct spdk_reduce_chunk_map) + 4 * sizeof(uint64_t));
134 	/* reduce allocates some extra chunks too for in-flight writes when logical map
135 	 * is full.  REDUCE_EXTRA_CHUNKS is a private #ifdef in reduce.c Here we need the num chunks
136 	 * times (chunk struct size + 4 backing io units per chunk * 8 bytes per backing io unit).
137 	 */
138 	expected_pm_size += REDUCE_NUM_EXTRA_CHUNKS *
139 			    (sizeof(struct spdk_reduce_chunk_map) + 4 * sizeof(uint64_t));
140 	/* reduce will add some padding so numbers may not match exactly.  Make sure
141 	 * they are close though.
142 	 */
143 	CU_ASSERT((pm_size - expected_pm_size) <= REDUCE_PM_SIZE_ALIGNMENT);
144 }
145 
146 static void
147 get_vol_size(void)
148 {
149 	uint64_t chunk_size, backing_dev_size;
150 
151 	chunk_size = 16 * 1024;
152 	backing_dev_size = 16 * 1024 * 1000;
153 	CU_ASSERT(_get_vol_size(chunk_size, backing_dev_size) < backing_dev_size);
154 }
155 
156 void *
157 pmem_map_file(const char *path, size_t len, int flags, mode_t mode,
158 	      size_t *mapped_lenp, int *is_pmemp)
159 {
160 	CU_ASSERT(g_volatile_pm_buf == NULL);
161 	snprintf(g_path, sizeof(g_path), "%s", path);
162 	*is_pmemp = 1;
163 
164 	if (g_persistent_pm_buf == NULL) {
165 		g_persistent_pm_buf = calloc(1, len);
166 		g_persistent_pm_buf_len = len;
167 		SPDK_CU_ASSERT_FATAL(g_persistent_pm_buf != NULL);
168 	}
169 
170 	*mapped_lenp = g_persistent_pm_buf_len;
171 	g_volatile_pm_buf = calloc(1, g_persistent_pm_buf_len);
172 	SPDK_CU_ASSERT_FATAL(g_volatile_pm_buf != NULL);
173 	memcpy(g_volatile_pm_buf, g_persistent_pm_buf, g_persistent_pm_buf_len);
174 	g_volatile_pm_buf_len = g_persistent_pm_buf_len;
175 
176 	return g_volatile_pm_buf;
177 }
178 
179 int
180 pmem_unmap(void *addr, size_t len)
181 {
182 	CU_ASSERT(addr == g_volatile_pm_buf);
183 	CU_ASSERT(len == g_volatile_pm_buf_len);
184 	free(g_volatile_pm_buf);
185 	g_volatile_pm_buf = NULL;
186 	g_volatile_pm_buf_len = 0;
187 
188 	return 0;
189 }
190 
191 static void
192 persistent_pm_buf_destroy(void)
193 {
194 	CU_ASSERT(g_persistent_pm_buf != NULL);
195 	free(g_persistent_pm_buf);
196 	g_persistent_pm_buf = NULL;
197 	g_persistent_pm_buf_len = 0;
198 }
199 
200 static void
201 unlink_cb(void)
202 {
203 	persistent_pm_buf_destroy();
204 }
205 
206 static void
207 init_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno)
208 {
209 	g_vol = vol;
210 	g_reduce_errno = reduce_errno;
211 }
212 
213 static void
214 load_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno)
215 {
216 	g_vol = vol;
217 	g_reduce_errno = reduce_errno;
218 }
219 
220 static void
221 unload_cb(void *cb_arg, int reduce_errno)
222 {
223 	g_reduce_errno = reduce_errno;
224 }
225 
226 static void
227 init_failure(void)
228 {
229 	struct spdk_reduce_vol_params params = {};
230 	struct spdk_reduce_backing_dev backing_dev = {};
231 
232 	backing_dev.blocklen = 512;
233 	/* This blockcnt is too small for a reduce vol - there needs to be
234 	 *  enough space for at least REDUCE_NUM_EXTRA_CHUNKS + 1 chunks.
235 	 */
236 	backing_dev.blockcnt = 20;
237 
238 	params.vol_size = 0;
239 	params.chunk_size = 16 * 1024;
240 	params.backing_io_unit_size = backing_dev.blocklen;
241 	params.logical_block_size = 512;
242 
243 	/* backing_dev has an invalid size.  This should fail. */
244 	g_vol = NULL;
245 	g_reduce_errno = 0;
246 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
247 	CU_ASSERT(g_reduce_errno == -EINVAL);
248 	SPDK_CU_ASSERT_FATAL(g_vol == NULL);
249 
250 	/* backing_dev now has valid size, but backing_dev still has null
251 	 *  function pointers.  This should fail.
252 	 */
253 	backing_dev.blockcnt = 20000;
254 
255 	g_vol = NULL;
256 	g_reduce_errno = 0;
257 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
258 	CU_ASSERT(g_reduce_errno == -EINVAL);
259 	SPDK_CU_ASSERT_FATAL(g_vol == NULL);
260 }
261 
262 static void
263 backing_dev_readv_execute(struct spdk_reduce_backing_dev *backing_dev,
264 			  struct iovec *iov, int iovcnt,
265 			  uint64_t lba, uint32_t lba_count,
266 			  struct spdk_reduce_vol_cb_args *args)
267 {
268 	char *offset;
269 	int i;
270 
271 	offset = g_backing_dev_buf + lba * backing_dev->blocklen;
272 	for (i = 0; i < iovcnt; i++) {
273 		memcpy(iov[i].iov_base, offset, iov[i].iov_len);
274 		offset += iov[i].iov_len;
275 	}
276 	args->cb_fn(args->cb_arg, 0);
277 }
278 
279 static void
280 backing_dev_insert_io(enum ut_reduce_bdev_io_type type, struct spdk_reduce_backing_dev *backing_dev,
281 		      struct iovec *iov, int iovcnt, uint64_t lba, uint32_t lba_count,
282 		      struct spdk_reduce_vol_cb_args *args)
283 {
284 	struct ut_reduce_bdev_io *ut_bdev_io;
285 
286 	ut_bdev_io = calloc(1, sizeof(*ut_bdev_io));
287 	SPDK_CU_ASSERT_FATAL(ut_bdev_io != NULL);
288 
289 	ut_bdev_io->type = type;
290 	ut_bdev_io->backing_dev = backing_dev;
291 	ut_bdev_io->iov = iov;
292 	ut_bdev_io->iovcnt = iovcnt;
293 	ut_bdev_io->lba = lba;
294 	ut_bdev_io->lba_count = lba_count;
295 	ut_bdev_io->args = args;
296 	TAILQ_INSERT_TAIL(&g_pending_bdev_io, ut_bdev_io, link);
297 	g_pending_bdev_io_count++;
298 }
299 
300 static void
301 backing_dev_readv(struct spdk_reduce_backing_dev *backing_dev, struct iovec *iov, int iovcnt,
302 		  uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
303 {
304 	if (g_defer_bdev_io == false) {
305 		CU_ASSERT(g_pending_bdev_io_count == 0);
306 		CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
307 		backing_dev_readv_execute(backing_dev, iov, iovcnt, lba, lba_count, args);
308 		return;
309 	}
310 
311 	backing_dev_insert_io(UT_REDUCE_IO_READV, backing_dev, iov, iovcnt, lba, lba_count, args);
312 }
313 
314 static void
315 backing_dev_writev_execute(struct spdk_reduce_backing_dev *backing_dev,
316 			   struct iovec *iov, int iovcnt,
317 			   uint64_t lba, uint32_t lba_count,
318 			   struct spdk_reduce_vol_cb_args *args)
319 {
320 	char *offset;
321 	int i;
322 
323 	offset = g_backing_dev_buf + lba * backing_dev->blocklen;
324 	for (i = 0; i < iovcnt; i++) {
325 		memcpy(offset, iov[i].iov_base, iov[i].iov_len);
326 		offset += iov[i].iov_len;
327 	}
328 	args->cb_fn(args->cb_arg, 0);
329 }
330 
331 static void
332 backing_dev_writev(struct spdk_reduce_backing_dev *backing_dev, struct iovec *iov, int iovcnt,
333 		   uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
334 {
335 	if (g_defer_bdev_io == false) {
336 		CU_ASSERT(g_pending_bdev_io_count == 0);
337 		CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
338 		backing_dev_writev_execute(backing_dev, iov, iovcnt, lba, lba_count, args);
339 		return;
340 	}
341 
342 	backing_dev_insert_io(UT_REDUCE_IO_WRITEV, backing_dev, iov, iovcnt, lba, lba_count, args);
343 }
344 
345 static void
346 backing_dev_unmap_execute(struct spdk_reduce_backing_dev *backing_dev,
347 			  uint64_t lba, uint32_t lba_count,
348 			  struct spdk_reduce_vol_cb_args *args)
349 {
350 	char *offset;
351 
352 	offset = g_backing_dev_buf + lba * backing_dev->blocklen;
353 	memset(offset, 0, lba_count * backing_dev->blocklen);
354 	args->cb_fn(args->cb_arg, 0);
355 }
356 
357 static void
358 backing_dev_unmap(struct spdk_reduce_backing_dev *backing_dev,
359 		  uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
360 {
361 	if (g_defer_bdev_io == false) {
362 		CU_ASSERT(g_pending_bdev_io_count == 0);
363 		CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
364 		backing_dev_unmap_execute(backing_dev, lba, lba_count, args);
365 		return;
366 	}
367 
368 	backing_dev_insert_io(UT_REDUCE_IO_UNMAP, backing_dev, NULL, 0, lba, lba_count, args);
369 }
370 
371 static void
372 backing_dev_io_execute(uint32_t count)
373 {
374 	struct ut_reduce_bdev_io *ut_bdev_io;
375 	uint32_t done = 0;
376 
377 	CU_ASSERT(g_defer_bdev_io == true);
378 	while (!TAILQ_EMPTY(&g_pending_bdev_io) && (count == 0 || done < count)) {
379 		ut_bdev_io = TAILQ_FIRST(&g_pending_bdev_io);
380 		TAILQ_REMOVE(&g_pending_bdev_io, ut_bdev_io, link);
381 		g_pending_bdev_io_count--;
382 		switch (ut_bdev_io->type) {
383 		case UT_REDUCE_IO_READV:
384 			backing_dev_readv_execute(ut_bdev_io->backing_dev,
385 						  ut_bdev_io->iov, ut_bdev_io->iovcnt,
386 						  ut_bdev_io->lba, ut_bdev_io->lba_count,
387 						  ut_bdev_io->args);
388 			break;
389 		case UT_REDUCE_IO_WRITEV:
390 			backing_dev_writev_execute(ut_bdev_io->backing_dev,
391 						   ut_bdev_io->iov, ut_bdev_io->iovcnt,
392 						   ut_bdev_io->lba, ut_bdev_io->lba_count,
393 						   ut_bdev_io->args);
394 			break;
395 		case UT_REDUCE_IO_UNMAP:
396 			backing_dev_unmap_execute(ut_bdev_io->backing_dev,
397 						  ut_bdev_io->lba, ut_bdev_io->lba_count,
398 						  ut_bdev_io->args);
399 			break;
400 		default:
401 			CU_ASSERT(false);
402 			break;
403 		}
404 		free(ut_bdev_io);
405 		done++;
406 	}
407 }
408 
409 static int
410 ut_compress(char *outbuf, uint32_t *compressed_len, char *inbuf, uint32_t inbuflen)
411 {
412 	uint32_t len = 0;
413 	uint8_t count;
414 	char last;
415 
416 	while (true) {
417 		if (inbuflen == 0) {
418 			*compressed_len = len;
419 			return 0;
420 		}
421 
422 		if (*compressed_len < (len + 2)) {
423 			return -ENOSPC;
424 		}
425 
426 		last = *inbuf;
427 		count = 1;
428 		inbuflen--;
429 		inbuf++;
430 
431 		while (inbuflen > 0 && *inbuf == last && count < UINT8_MAX) {
432 			count++;
433 			inbuflen--;
434 			inbuf++;
435 		}
436 
437 		outbuf[len] = count;
438 		outbuf[len + 1] = last;
439 		len += 2;
440 	}
441 }
442 
443 static int
444 ut_decompress(uint8_t *outbuf, uint32_t *compressed_len, uint8_t *inbuf, uint32_t inbuflen)
445 {
446 	uint32_t len = 0;
447 
448 	SPDK_CU_ASSERT_FATAL(inbuflen % 2 == 0);
449 
450 	while (true) {
451 		if (inbuflen == 0) {
452 			*compressed_len = len;
453 			return 0;
454 		}
455 
456 		if ((len + inbuf[0]) > *compressed_len) {
457 			return -ENOSPC;
458 		}
459 
460 		memset(outbuf, inbuf[1], inbuf[0]);
461 		outbuf += inbuf[0];
462 		len += inbuf[0];
463 		inbuflen -= 2;
464 		inbuf += 2;
465 	}
466 }
467 
468 static void
469 ut_build_data_buffer(uint8_t *data, uint32_t data_len, uint8_t init_val, uint32_t repeat)
470 {
471 	uint32_t _repeat = repeat;
472 
473 	SPDK_CU_ASSERT_FATAL(repeat > 0);
474 
475 	while (data_len > 0) {
476 		*data = init_val;
477 		data++;
478 		data_len--;
479 		_repeat--;
480 		if (_repeat == 0) {
481 			init_val++;
482 			_repeat = repeat;
483 		}
484 	}
485 }
486 
487 static void
488 backing_dev_compress(struct spdk_reduce_backing_dev *backing_dev,
489 		     struct iovec *src_iov, int src_iovcnt,
490 		     struct iovec *dst_iov, int dst_iovcnt,
491 		     struct spdk_reduce_vol_cb_args *args)
492 {
493 	uint32_t compressed_len;
494 	uint64_t total_length = 0;
495 	char *buf = g_decomp_buf;
496 	int rc, i;
497 
498 	CU_ASSERT(dst_iovcnt == 1);
499 
500 	for (i = 0; i < src_iovcnt; i++) {
501 		memcpy(buf, src_iov[i].iov_base, src_iov[i].iov_len);
502 		buf += src_iov[i].iov_len;
503 		total_length += src_iov[i].iov_len;
504 	}
505 
506 	compressed_len = dst_iov[0].iov_len;
507 	rc = ut_compress(dst_iov[0].iov_base, &compressed_len,
508 			 g_decomp_buf, total_length);
509 
510 	args->cb_fn(args->cb_arg, rc ? rc : (int)compressed_len);
511 }
512 
513 static void
514 backing_dev_decompress(struct spdk_reduce_backing_dev *backing_dev,
515 		       struct iovec *src_iov, int src_iovcnt,
516 		       struct iovec *dst_iov, int dst_iovcnt,
517 		       struct spdk_reduce_vol_cb_args *args)
518 {
519 	uint32_t decompressed_len = 0;
520 	char *buf = g_decomp_buf;
521 	int rc, i;
522 
523 	CU_ASSERT(src_iovcnt == 1);
524 
525 	for (i = 0; i < dst_iovcnt; i++) {
526 		decompressed_len += dst_iov[i].iov_len;
527 	}
528 
529 	rc = ut_decompress(g_decomp_buf, &decompressed_len,
530 			   src_iov[0].iov_base, src_iov[0].iov_len);
531 
532 	for (i = 0; i < dst_iovcnt; i++) {
533 		memcpy(dst_iov[i].iov_base, buf, dst_iov[i].iov_len);
534 		buf += dst_iov[i].iov_len;
535 	}
536 
537 	args->cb_fn(args->cb_arg, rc ? rc : (int)decompressed_len);
538 }
539 
540 static void
541 backing_dev_destroy(struct spdk_reduce_backing_dev *backing_dev)
542 {
543 	/* We don't free this during backing_dev_close so that we can test init/unload/load
544 	 *  scenarios.
545 	 */
546 	free(g_backing_dev_buf);
547 	free(g_decomp_buf);
548 	g_backing_dev_buf = NULL;
549 }
550 
551 static void
552 backing_dev_init(struct spdk_reduce_backing_dev *backing_dev, struct spdk_reduce_vol_params *params,
553 		 uint32_t backing_blocklen)
554 {
555 	int64_t size;
556 
557 	size = 4 * 1024 * 1024;
558 	backing_dev->blocklen = backing_blocklen;
559 	backing_dev->blockcnt = size / backing_dev->blocklen;
560 	backing_dev->readv = backing_dev_readv;
561 	backing_dev->writev = backing_dev_writev;
562 	backing_dev->unmap = backing_dev_unmap;
563 	backing_dev->compress = backing_dev_compress;
564 	backing_dev->decompress = backing_dev_decompress;
565 	backing_dev->sgl_in = true;
566 	backing_dev->sgl_out = true;
567 
568 	g_decomp_buf = calloc(1, params->chunk_size);
569 	SPDK_CU_ASSERT_FATAL(g_decomp_buf != NULL);
570 
571 	g_backing_dev_buf = calloc(1, size);
572 	SPDK_CU_ASSERT_FATAL(g_backing_dev_buf != NULL);
573 }
574 
575 static void
576 init_md(void)
577 {
578 	struct spdk_reduce_vol_params params = {};
579 	struct spdk_reduce_vol_params *persistent_params;
580 	struct spdk_reduce_backing_dev backing_dev = {};
581 	struct spdk_uuid uuid;
582 	uint64_t *entry;
583 
584 	params.chunk_size = 16 * 1024;
585 	params.backing_io_unit_size = 512;
586 	params.logical_block_size = 512;
587 
588 	backing_dev_init(&backing_dev, &params, 512);
589 
590 	g_vol = NULL;
591 	g_reduce_errno = -1;
592 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
593 	CU_ASSERT(g_reduce_errno == 0);
594 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
595 	/* Confirm that reduce persisted the params to metadata. */
596 	CU_ASSERT(memcmp(g_persistent_pm_buf, SPDK_REDUCE_SIGNATURE, 8) == 0);
597 	persistent_params = (struct spdk_reduce_vol_params *)(g_persistent_pm_buf + 8);
598 	CU_ASSERT(memcmp(persistent_params, &params, sizeof(params)) == 0);
599 	/* Now confirm that contents of pm_file after the superblock have been initialized
600 	 *  to REDUCE_EMPTY_MAP_ENTRY.
601 	 */
602 	entry = (uint64_t *)(g_persistent_pm_buf + sizeof(struct spdk_reduce_vol_superblock));
603 	while (entry != (uint64_t *)(g_persistent_pm_buf + g_vol->pm_file.size)) {
604 		CU_ASSERT(*entry == REDUCE_EMPTY_MAP_ENTRY);
605 		entry++;
606 	}
607 
608 	/* Check that the pm file path was constructed correctly.  It should be in
609 	 * the form:
610 	 * TEST_MD_PATH + "/" + <uuid string>
611 	 */
612 	CU_ASSERT(strncmp(&g_path[0], TEST_MD_PATH, strlen(TEST_MD_PATH)) == 0);
613 	CU_ASSERT(g_path[strlen(TEST_MD_PATH)] == '/');
614 	CU_ASSERT(spdk_uuid_parse(&uuid, &g_path[strlen(TEST_MD_PATH) + 1]) == 0);
615 	CU_ASSERT(spdk_uuid_compare(&uuid, spdk_reduce_vol_get_uuid(g_vol)) == 0);
616 
617 	g_reduce_errno = -1;
618 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
619 	CU_ASSERT(g_reduce_errno == 0);
620 	CU_ASSERT(g_volatile_pm_buf == NULL);
621 
622 	persistent_pm_buf_destroy();
623 	backing_dev_destroy(&backing_dev);
624 }
625 
626 static void
627 _init_backing_dev(uint32_t backing_blocklen)
628 {
629 	struct spdk_reduce_vol_params params = {};
630 	struct spdk_reduce_vol_params *persistent_params;
631 	struct spdk_reduce_backing_dev backing_dev = {};
632 
633 	params.chunk_size = 16 * 1024;
634 	params.backing_io_unit_size = 512;
635 	params.logical_block_size = 512;
636 	spdk_uuid_generate(&params.uuid);
637 
638 	backing_dev_init(&backing_dev, &params, backing_blocklen);
639 
640 	g_vol = NULL;
641 	memset(g_path, 0, sizeof(g_path));
642 	g_reduce_errno = -1;
643 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
644 	CU_ASSERT(g_reduce_errno == 0);
645 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
646 	CU_ASSERT(strncmp(TEST_MD_PATH, g_path, strlen(TEST_MD_PATH)) == 0);
647 	/* Confirm that libreduce persisted the params to the backing device. */
648 	CU_ASSERT(memcmp(g_backing_dev_buf, SPDK_REDUCE_SIGNATURE, 8) == 0);
649 	persistent_params = (struct spdk_reduce_vol_params *)(g_backing_dev_buf + 8);
650 	CU_ASSERT(memcmp(persistent_params, &params, sizeof(params)) == 0);
651 	/* Confirm that the path to the persistent memory metadata file was persisted to
652 	 *  the backing device.
653 	 */
654 	CU_ASSERT(strncmp(g_path,
655 			  g_backing_dev_buf + REDUCE_BACKING_DEV_PATH_OFFSET,
656 			  REDUCE_PATH_MAX) == 0);
657 
658 	g_reduce_errno = -1;
659 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
660 	CU_ASSERT(g_reduce_errno == 0);
661 
662 	persistent_pm_buf_destroy();
663 	backing_dev_destroy(&backing_dev);
664 }
665 
666 static void
667 init_backing_dev(void)
668 {
669 	_init_backing_dev(512);
670 	_init_backing_dev(4096);
671 }
672 
673 static void
674 _load(uint32_t backing_blocklen)
675 {
676 	struct spdk_reduce_vol_params params = {};
677 	struct spdk_reduce_backing_dev backing_dev = {};
678 	char pmem_file_path[REDUCE_PATH_MAX];
679 
680 	params.chunk_size = 16 * 1024;
681 	params.backing_io_unit_size = 512;
682 	params.logical_block_size = 512;
683 	spdk_uuid_generate(&params.uuid);
684 
685 	backing_dev_init(&backing_dev, &params, backing_blocklen);
686 
687 	g_vol = NULL;
688 	g_reduce_errno = -1;
689 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
690 	CU_ASSERT(g_reduce_errno == 0);
691 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
692 	CU_ASSERT(strncmp(TEST_MD_PATH, g_path, strlen(TEST_MD_PATH)) == 0);
693 	memcpy(pmem_file_path, g_path, sizeof(pmem_file_path));
694 
695 	g_reduce_errno = -1;
696 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
697 	CU_ASSERT(g_reduce_errno == 0);
698 
699 	g_vol = NULL;
700 	memset(g_path, 0, sizeof(g_path));
701 	g_reduce_errno = -1;
702 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
703 	CU_ASSERT(g_reduce_errno == 0);
704 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
705 	CU_ASSERT(strncmp(g_path, pmem_file_path, sizeof(pmem_file_path)) == 0);
706 	CU_ASSERT(g_vol->params.vol_size == params.vol_size);
707 	CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
708 	CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
709 
710 	g_reduce_errno = -1;
711 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
712 	CU_ASSERT(g_reduce_errno == 0);
713 
714 	persistent_pm_buf_destroy();
715 	backing_dev_destroy(&backing_dev);
716 }
717 
718 static void
719 load(void)
720 {
721 	_load(512);
722 	_load(4096);
723 }
724 
725 static uint64_t
726 _vol_get_chunk_map_index(struct spdk_reduce_vol *vol, uint64_t offset)
727 {
728 	uint64_t logical_map_index = offset / vol->logical_blocks_per_chunk;
729 
730 	return vol->pm_logical_map[logical_map_index];
731 }
732 
733 static void
734 write_cb(void *arg, int reduce_errno)
735 {
736 	g_reduce_errno = reduce_errno;
737 }
738 
739 static void
740 read_cb(void *arg, int reduce_errno)
741 {
742 	g_reduce_errno = reduce_errno;
743 }
744 
745 static void
746 _write_maps(uint32_t backing_blocklen)
747 {
748 	struct spdk_reduce_vol_params params = {};
749 	struct spdk_reduce_backing_dev backing_dev = {};
750 	struct iovec iov;
751 	const int bufsize = 16 * 1024; /* chunk size */
752 	char buf[bufsize];
753 	uint32_t num_lbas, i;
754 	uint64_t old_chunk0_map_index, new_chunk0_map_index;
755 	struct spdk_reduce_chunk_map *old_chunk0_map, *new_chunk0_map;
756 
757 	params.chunk_size = bufsize;
758 	params.backing_io_unit_size = 4096;
759 	params.logical_block_size = 512;
760 	num_lbas = bufsize / params.logical_block_size;
761 	spdk_uuid_generate(&params.uuid);
762 
763 	backing_dev_init(&backing_dev, &params, backing_blocklen);
764 
765 	g_vol = NULL;
766 	g_reduce_errno = -1;
767 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
768 	CU_ASSERT(g_reduce_errno == 0);
769 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
770 
771 	for (i = 0; i < g_vol->params.vol_size / g_vol->params.chunk_size; i++) {
772 		CU_ASSERT(_vol_get_chunk_map_index(g_vol, i) == REDUCE_EMPTY_MAP_ENTRY);
773 	}
774 
775 	ut_build_data_buffer(buf, bufsize, 0x00, 1);
776 	iov.iov_base = buf;
777 	iov.iov_len = bufsize;
778 	g_reduce_errno = -1;
779 	spdk_reduce_vol_writev(g_vol, &iov, 1, 0, num_lbas, write_cb, NULL);
780 	CU_ASSERT(g_reduce_errno == 0);
781 
782 	old_chunk0_map_index = _vol_get_chunk_map_index(g_vol, 0);
783 	CU_ASSERT(old_chunk0_map_index != REDUCE_EMPTY_MAP_ENTRY);
784 	CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, old_chunk0_map_index) == true);
785 
786 	old_chunk0_map = _reduce_vol_get_chunk_map(g_vol, old_chunk0_map_index);
787 	for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
788 		CU_ASSERT(old_chunk0_map->io_unit_index[i] != REDUCE_EMPTY_MAP_ENTRY);
789 		CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units,
790 					     old_chunk0_map->io_unit_index[i]) == true);
791 	}
792 
793 	g_reduce_errno = -1;
794 	spdk_reduce_vol_writev(g_vol, &iov, 1, 0, num_lbas, write_cb, NULL);
795 	CU_ASSERT(g_reduce_errno == 0);
796 
797 	new_chunk0_map_index = _vol_get_chunk_map_index(g_vol, 0);
798 	CU_ASSERT(new_chunk0_map_index != REDUCE_EMPTY_MAP_ENTRY);
799 	CU_ASSERT(new_chunk0_map_index != old_chunk0_map_index);
800 	CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, new_chunk0_map_index) == true);
801 	CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, old_chunk0_map_index) == false);
802 
803 	for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
804 		CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units,
805 					     old_chunk0_map->io_unit_index[i]) == false);
806 	}
807 
808 	new_chunk0_map = _reduce_vol_get_chunk_map(g_vol, new_chunk0_map_index);
809 	for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
810 		CU_ASSERT(new_chunk0_map->io_unit_index[i] != REDUCE_EMPTY_MAP_ENTRY);
811 		CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units,
812 					     new_chunk0_map->io_unit_index[i]) == true);
813 	}
814 
815 	g_reduce_errno = -1;
816 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
817 	CU_ASSERT(g_reduce_errno == 0);
818 
819 	g_vol = NULL;
820 	g_reduce_errno = -1;
821 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
822 	CU_ASSERT(g_reduce_errno == 0);
823 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
824 	CU_ASSERT(g_vol->params.vol_size == params.vol_size);
825 	CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
826 	CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
827 
828 	g_reduce_errno = -1;
829 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
830 	CU_ASSERT(g_reduce_errno == 0);
831 
832 	persistent_pm_buf_destroy();
833 	backing_dev_destroy(&backing_dev);
834 }
835 
836 static void
837 write_maps(void)
838 {
839 	_write_maps(512);
840 	_write_maps(4096);
841 }
842 
843 static void
844 _read_write(uint32_t backing_blocklen)
845 {
846 	struct spdk_reduce_vol_params params = {};
847 	struct spdk_reduce_backing_dev backing_dev = {};
848 	struct iovec iov;
849 	char buf[16 * 1024]; /* chunk size */
850 	char compare_buf[16 * 1024];
851 	uint32_t i;
852 
853 	params.chunk_size = 16 * 1024;
854 	params.backing_io_unit_size = 4096;
855 	params.logical_block_size = 512;
856 	spdk_uuid_generate(&params.uuid);
857 
858 	backing_dev_init(&backing_dev, &params, backing_blocklen);
859 
860 	g_vol = NULL;
861 	g_reduce_errno = -1;
862 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
863 	CU_ASSERT(g_reduce_errno == 0);
864 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
865 
866 	/* Write 0xAA to 2 512-byte logical blocks, starting at LBA 2. */
867 	memset(buf, 0xAA, 2 * params.logical_block_size);
868 	iov.iov_base = buf;
869 	iov.iov_len = 2 * params.logical_block_size;
870 	g_reduce_errno = -1;
871 	spdk_reduce_vol_writev(g_vol, &iov, 1, 2, 2, write_cb, NULL);
872 	CU_ASSERT(g_reduce_errno == 0);
873 
874 	memset(compare_buf, 0xAA, sizeof(compare_buf));
875 	for (i = 0; i < params.chunk_size / params.logical_block_size; i++) {
876 		memset(buf, 0xFF, params.logical_block_size);
877 		iov.iov_base = buf;
878 		iov.iov_len = params.logical_block_size;
879 		g_reduce_errno = -1;
880 		spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL);
881 		CU_ASSERT(g_reduce_errno == 0);
882 
883 		switch (i) {
884 		case 2:
885 		case 3:
886 			CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
887 			break;
888 		default:
889 			CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size));
890 			break;
891 		}
892 	}
893 
894 	g_reduce_errno = -1;
895 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
896 	CU_ASSERT(g_reduce_errno == 0);
897 
898 	/* Overwrite what we just wrote with 0xCC */
899 	g_vol = NULL;
900 	g_reduce_errno = -1;
901 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
902 	CU_ASSERT(g_reduce_errno == 0);
903 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
904 	CU_ASSERT(g_vol->params.vol_size == params.vol_size);
905 	CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
906 	CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
907 
908 	memset(buf, 0xCC, 2 * params.logical_block_size);
909 	iov.iov_base = buf;
910 	iov.iov_len = 2 * params.logical_block_size;
911 	g_reduce_errno = -1;
912 	spdk_reduce_vol_writev(g_vol, &iov, 1, 2, 2, write_cb, NULL);
913 	CU_ASSERT(g_reduce_errno == 0);
914 
915 	memset(compare_buf, 0xCC, sizeof(compare_buf));
916 	for (i = 0; i < params.chunk_size / params.logical_block_size; i++) {
917 		memset(buf, 0xFF, params.logical_block_size);
918 		iov.iov_base = buf;
919 		iov.iov_len = params.logical_block_size;
920 		g_reduce_errno = -1;
921 		spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL);
922 		CU_ASSERT(g_reduce_errno == 0);
923 
924 		switch (i) {
925 		case 2:
926 		case 3:
927 			CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
928 			break;
929 		default:
930 			CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size));
931 			break;
932 		}
933 	}
934 
935 	g_reduce_errno = -1;
936 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
937 	CU_ASSERT(g_reduce_errno == 0);
938 
939 	g_vol = NULL;
940 	g_reduce_errno = -1;
941 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
942 	CU_ASSERT(g_reduce_errno == 0);
943 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
944 	CU_ASSERT(g_vol->params.vol_size == params.vol_size);
945 	CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
946 	CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
947 
948 	g_reduce_errno = -1;
949 
950 	/* Write 0xBB to 2 512-byte logical blocks, starting at LBA 37.
951 	 * This is writing into the second chunk of the volume.  This also
952 	 * enables implicitly checking that we reloaded the bit arrays
953 	 * correctly - making sure we don't use the first chunk map again
954 	 * for this new write - the first chunk map was already used by the
955 	 * write from before we unloaded and reloaded.
956 	 */
957 	memset(buf, 0xBB, 2 * params.logical_block_size);
958 	iov.iov_base = buf;
959 	iov.iov_len = 2 * params.logical_block_size;
960 	g_reduce_errno = -1;
961 	spdk_reduce_vol_writev(g_vol, &iov, 1, 37, 2, write_cb, NULL);
962 	CU_ASSERT(g_reduce_errno == 0);
963 
964 	for (i = 0; i < 2 * params.chunk_size / params.logical_block_size; i++) {
965 		memset(buf, 0xFF, params.logical_block_size);
966 		iov.iov_base = buf;
967 		iov.iov_len = params.logical_block_size;
968 		g_reduce_errno = -1;
969 		spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL);
970 		CU_ASSERT(g_reduce_errno == 0);
971 
972 		switch (i) {
973 		case 2:
974 		case 3:
975 			memset(compare_buf, 0xCC, sizeof(compare_buf));
976 			CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
977 			break;
978 		case 37:
979 		case 38:
980 			memset(compare_buf, 0xBB, sizeof(compare_buf));
981 			CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
982 			break;
983 		default:
984 			CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size));
985 			break;
986 		}
987 	}
988 
989 	g_reduce_errno = -1;
990 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
991 	CU_ASSERT(g_reduce_errno == 0);
992 
993 	persistent_pm_buf_destroy();
994 	backing_dev_destroy(&backing_dev);
995 }
996 
997 static void
998 read_write(void)
999 {
1000 	_read_write(512);
1001 	_read_write(4096);
1002 }
1003 
1004 static void
1005 _readv_writev(uint32_t backing_blocklen)
1006 {
1007 	struct spdk_reduce_vol_params params = {};
1008 	struct spdk_reduce_backing_dev backing_dev = {};
1009 	struct iovec iov[REDUCE_MAX_IOVECS + 1];
1010 
1011 	params.chunk_size = 16 * 1024;
1012 	params.backing_io_unit_size = 4096;
1013 	params.logical_block_size = 512;
1014 	spdk_uuid_generate(&params.uuid);
1015 
1016 	backing_dev_init(&backing_dev, &params, backing_blocklen);
1017 
1018 	g_vol = NULL;
1019 	g_reduce_errno = -1;
1020 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
1021 	CU_ASSERT(g_reduce_errno == 0);
1022 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
1023 
1024 	g_reduce_errno = -1;
1025 	spdk_reduce_vol_writev(g_vol, iov, REDUCE_MAX_IOVECS + 1, 2, REDUCE_MAX_IOVECS + 1, write_cb, NULL);
1026 	CU_ASSERT(g_reduce_errno == -EINVAL);
1027 
1028 	g_reduce_errno = -1;
1029 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
1030 	CU_ASSERT(g_reduce_errno == 0);
1031 
1032 	persistent_pm_buf_destroy();
1033 	backing_dev_destroy(&backing_dev);
1034 }
1035 
1036 static void
1037 readv_writev(void)
1038 {
1039 	_readv_writev(512);
1040 	_readv_writev(4096);
1041 }
1042 
1043 static void
1044 destroy_cb(void *ctx, int reduce_errno)
1045 {
1046 	g_reduce_errno = reduce_errno;
1047 }
1048 
1049 static void
1050 destroy(void)
1051 {
1052 	struct spdk_reduce_vol_params params = {};
1053 	struct spdk_reduce_backing_dev backing_dev = {};
1054 
1055 	params.chunk_size = 16 * 1024;
1056 	params.backing_io_unit_size = 512;
1057 	params.logical_block_size = 512;
1058 	spdk_uuid_generate(&params.uuid);
1059 
1060 	backing_dev_init(&backing_dev, &params, 512);
1061 
1062 	g_vol = NULL;
1063 	g_reduce_errno = -1;
1064 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
1065 	CU_ASSERT(g_reduce_errno == 0);
1066 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
1067 
1068 	g_reduce_errno = -1;
1069 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
1070 	CU_ASSERT(g_reduce_errno == 0);
1071 
1072 	g_vol = NULL;
1073 	g_reduce_errno = -1;
1074 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
1075 	CU_ASSERT(g_reduce_errno == 0);
1076 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
1077 
1078 	g_reduce_errno = -1;
1079 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
1080 	CU_ASSERT(g_reduce_errno == 0);
1081 
1082 	g_reduce_errno = -1;
1083 	MOCK_CLEAR(spdk_malloc);
1084 	MOCK_CLEAR(spdk_zmalloc);
1085 	spdk_reduce_vol_destroy(&backing_dev, destroy_cb, NULL);
1086 	CU_ASSERT(g_reduce_errno == 0);
1087 
1088 	g_reduce_errno = 0;
1089 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
1090 	CU_ASSERT(g_reduce_errno == -EILSEQ);
1091 
1092 	backing_dev_destroy(&backing_dev);
1093 }
1094 
1095 /* This test primarily checks that the reduce unit test infrastructure for asynchronous
1096  * backing device I/O operations is working correctly.
1097  */
1098 static void
1099 defer_bdev_io(void)
1100 {
1101 	struct spdk_reduce_vol_params params = {};
1102 	struct spdk_reduce_backing_dev backing_dev = {};
1103 	const uint32_t logical_block_size = 512;
1104 	struct iovec iov;
1105 	char buf[logical_block_size];
1106 	char compare_buf[logical_block_size];
1107 
1108 	params.chunk_size = 16 * 1024;
1109 	params.backing_io_unit_size = 4096;
1110 	params.logical_block_size = logical_block_size;
1111 	spdk_uuid_generate(&params.uuid);
1112 
1113 	backing_dev_init(&backing_dev, &params, 512);
1114 
1115 	g_vol = NULL;
1116 	g_reduce_errno = -1;
1117 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
1118 	CU_ASSERT(g_reduce_errno == 0);
1119 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
1120 
1121 	/* Write 0xAA to 1 512-byte logical block. */
1122 	memset(buf, 0xAA, params.logical_block_size);
1123 	iov.iov_base = buf;
1124 	iov.iov_len = params.logical_block_size;
1125 	g_reduce_errno = -100;
1126 	g_defer_bdev_io = true;
1127 	spdk_reduce_vol_writev(g_vol, &iov, 1, 0, 1, write_cb, NULL);
1128 	/* Callback should not have executed, so this should still equal -100. */
1129 	CU_ASSERT(g_reduce_errno == -100);
1130 	CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io));
1131 	/* We wrote to just 512 bytes of one chunk which was previously unallocated.  This
1132 	 * should result in 1 pending I/O since the rest of this chunk will be zeroes and
1133 	 * very compressible.
1134 	 */
1135 	CU_ASSERT(g_pending_bdev_io_count == 1);
1136 
1137 	backing_dev_io_execute(0);
1138 	CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
1139 	CU_ASSERT(g_reduce_errno == 0);
1140 
1141 	g_defer_bdev_io = false;
1142 	memset(compare_buf, 0xAA, sizeof(compare_buf));
1143 	memset(buf, 0xFF, sizeof(buf));
1144 	iov.iov_base = buf;
1145 	iov.iov_len = params.logical_block_size;
1146 	g_reduce_errno = -100;
1147 	spdk_reduce_vol_readv(g_vol, &iov, 1, 0, 1, read_cb, NULL);
1148 	CU_ASSERT(g_reduce_errno == 0);
1149 	CU_ASSERT(memcmp(buf, compare_buf, sizeof(buf)) == 0);
1150 
1151 	g_reduce_errno = -1;
1152 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
1153 	CU_ASSERT(g_reduce_errno == 0);
1154 
1155 	persistent_pm_buf_destroy();
1156 	backing_dev_destroy(&backing_dev);
1157 }
1158 
1159 static void
1160 overlapped(void)
1161 {
1162 	struct spdk_reduce_vol_params params = {};
1163 	struct spdk_reduce_backing_dev backing_dev = {};
1164 	const uint32_t logical_block_size = 512;
1165 	struct iovec iov;
1166 	char buf[2 * logical_block_size];
1167 	char compare_buf[2 * logical_block_size];
1168 
1169 	params.chunk_size = 16 * 1024;
1170 	params.backing_io_unit_size = 4096;
1171 	params.logical_block_size = logical_block_size;
1172 	spdk_uuid_generate(&params.uuid);
1173 
1174 	backing_dev_init(&backing_dev, &params, 512);
1175 
1176 	g_vol = NULL;
1177 	g_reduce_errno = -1;
1178 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
1179 	CU_ASSERT(g_reduce_errno == 0);
1180 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
1181 
1182 	/* Write 0xAA to 1 512-byte logical block. */
1183 	memset(buf, 0xAA, logical_block_size);
1184 	iov.iov_base = buf;
1185 	iov.iov_len = logical_block_size;
1186 	g_reduce_errno = -100;
1187 	g_defer_bdev_io = true;
1188 	spdk_reduce_vol_writev(g_vol, &iov, 1, 0, 1, write_cb, NULL);
1189 	/* Callback should not have executed, so this should still equal -100. */
1190 	CU_ASSERT(g_reduce_errno == -100);
1191 	CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io));
1192 	/* We wrote to just 512 bytes of one chunk which was previously unallocated.  This
1193 	 * should result in 1 pending I/O since the rest of this chunk will be zeroes and
1194 	 * very compressible.
1195 	 */
1196 	CU_ASSERT(g_pending_bdev_io_count == 1);
1197 
1198 	/* Now do an overlapped I/O to the same chunk. */
1199 	spdk_reduce_vol_writev(g_vol, &iov, 1, 1, 1, write_cb, NULL);
1200 	/* Callback should not have executed, so this should still equal -100. */
1201 	CU_ASSERT(g_reduce_errno == -100);
1202 	CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io));
1203 	/* The second I/O overlaps with the first one.  So we should only see pending bdev_io
1204 	 * related to the first I/O here - the second one won't start until the first one is completed.
1205 	 */
1206 	CU_ASSERT(g_pending_bdev_io_count == 1);
1207 
1208 	backing_dev_io_execute(0);
1209 	CU_ASSERT(g_reduce_errno == 0);
1210 
1211 	g_defer_bdev_io = false;
1212 	memset(compare_buf, 0xAA, sizeof(compare_buf));
1213 	memset(buf, 0xFF, sizeof(buf));
1214 	iov.iov_base = buf;
1215 	iov.iov_len = 2 * logical_block_size;
1216 	g_reduce_errno = -100;
1217 	spdk_reduce_vol_readv(g_vol, &iov, 1, 0, 2, read_cb, NULL);
1218 	CU_ASSERT(g_reduce_errno == 0);
1219 	CU_ASSERT(memcmp(buf, compare_buf, 2 * logical_block_size) == 0);
1220 
1221 	g_reduce_errno = -1;
1222 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
1223 	CU_ASSERT(g_reduce_errno == 0);
1224 
1225 	persistent_pm_buf_destroy();
1226 	backing_dev_destroy(&backing_dev);
1227 }
1228 
1229 #define BUFSIZE 4096
1230 
1231 static void
1232 compress_algorithm(void)
1233 {
1234 	uint8_t original_data[BUFSIZE];
1235 	uint8_t compressed_data[BUFSIZE];
1236 	uint8_t decompressed_data[BUFSIZE];
1237 	uint32_t compressed_len, decompressed_len;
1238 	int rc;
1239 
1240 	ut_build_data_buffer(original_data, BUFSIZE, 0xAA, BUFSIZE);
1241 	compressed_len = sizeof(compressed_data);
1242 	rc = ut_compress(compressed_data, &compressed_len, original_data, UINT8_MAX);
1243 	CU_ASSERT(rc == 0);
1244 	CU_ASSERT(compressed_len == 2);
1245 	CU_ASSERT(compressed_data[0] == UINT8_MAX);
1246 	CU_ASSERT(compressed_data[1] == 0xAA);
1247 
1248 	decompressed_len = sizeof(decompressed_data);
1249 	rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len);
1250 	CU_ASSERT(rc == 0);
1251 	CU_ASSERT(decompressed_len == UINT8_MAX);
1252 	CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0);
1253 
1254 	compressed_len = sizeof(compressed_data);
1255 	rc = ut_compress(compressed_data, &compressed_len, original_data, UINT8_MAX + 1);
1256 	CU_ASSERT(rc == 0);
1257 	CU_ASSERT(compressed_len == 4);
1258 	CU_ASSERT(compressed_data[0] == UINT8_MAX);
1259 	CU_ASSERT(compressed_data[1] == 0xAA);
1260 	CU_ASSERT(compressed_data[2] == 1);
1261 	CU_ASSERT(compressed_data[3] == 0xAA);
1262 
1263 	decompressed_len = sizeof(decompressed_data);
1264 	rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len);
1265 	CU_ASSERT(rc == 0);
1266 	CU_ASSERT(decompressed_len == UINT8_MAX + 1);
1267 	CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0);
1268 
1269 	ut_build_data_buffer(original_data, BUFSIZE, 0x00, 1);
1270 	compressed_len = sizeof(compressed_data);
1271 	rc = ut_compress(compressed_data, &compressed_len, original_data, 2048);
1272 	CU_ASSERT(rc == 0);
1273 	CU_ASSERT(compressed_len == 4096);
1274 	CU_ASSERT(compressed_data[0] == 1);
1275 	CU_ASSERT(compressed_data[1] == 0);
1276 	CU_ASSERT(compressed_data[4094] == 1);
1277 	CU_ASSERT(compressed_data[4095] == 0xFF);
1278 
1279 	decompressed_len = sizeof(decompressed_data);
1280 	rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len);
1281 	CU_ASSERT(rc == 0);
1282 	CU_ASSERT(decompressed_len == 2048);
1283 	CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0);
1284 
1285 	compressed_len = sizeof(compressed_data);
1286 	rc = ut_compress(compressed_data, &compressed_len, original_data, 2049);
1287 	CU_ASSERT(rc == -ENOSPC);
1288 }
1289 
1290 static void
1291 test_prepare_compress_chunk(void)
1292 {
1293 	struct spdk_reduce_vol vol = {};
1294 	struct spdk_reduce_backing_dev backing_dev = {};
1295 	struct spdk_reduce_vol_request req = {};
1296 	void *buf;
1297 	char *buffer_end, *aligned_user_buffer, *unaligned_user_buffer;
1298 	char decomp_buffer[16 * 1024] = {};
1299 	char comp_buffer[16 * 1024] = {};
1300 	struct iovec user_iov[2] = {};
1301 	size_t user_buffer_iov_len = 8192;
1302 	size_t remainder_bytes;
1303 	size_t offset_bytes;
1304 	size_t memcmp_offset;
1305 	uint32_t i;
1306 
1307 	vol.params.chunk_size = 16 * 1024;
1308 	vol.params.backing_io_unit_size = 4096;
1309 	vol.params.logical_block_size = 512;
1310 	backing_dev_init(&backing_dev, &vol.params, 512);
1311 	vol.backing_dev = &backing_dev;
1312 	vol.logical_blocks_per_chunk = vol.params.chunk_size / vol.params.logical_block_size;
1313 
1314 	/* Allocate 1 extra byte to test a case when buffer crosses huge page boundary */
1315 	SPDK_CU_ASSERT_FATAL(posix_memalign(&buf, VALUE_2MB, VALUE_2MB + 1) == 0);
1316 	buffer_end = (char *)buf + VALUE_2MB + 1;
1317 	aligned_user_buffer = (char *)buf;
1318 	memset(aligned_user_buffer, 0xc, vol.params.chunk_size);
1319 	unaligned_user_buffer = buffer_end - vol.params.chunk_size;
1320 	memset(unaligned_user_buffer, 0xc, vol.params.chunk_size);
1321 
1322 	req.vol = &vol;
1323 	req.decomp_buf = decomp_buffer;
1324 	req.comp_buf = comp_buffer;
1325 	req.iov = user_iov;
1326 	req.iovcnt = 2;
1327 	req.offset = 0;
1328 
1329 	/* Part 1 - backing dev supports sgl_in */
1330 	/* Test 1 - user's buffers length equals to chunk_size */
1331 	for (i = 0; i < 2; i++) {
1332 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1333 		req.iov[i].iov_len = user_buffer_iov_len;
1334 	}
1335 
1336 	_prepare_compress_chunk(&req, false);
1337 	CU_ASSERT(req.decomp_iovcnt == 2);
1338 	for (i = 0; i < 2; i++) {
1339 		CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
1340 		CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
1341 	}
1342 
1343 	_prepare_compress_chunk(&req, true);
1344 	CU_ASSERT(req.decomp_iovcnt == 2);
1345 	for (i = 0; i < 2; i++) {
1346 		CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
1347 		CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
1348 	}
1349 
1350 	/* Test 2 - user's buffer less than chunk_size, without offset */
1351 	user_buffer_iov_len = 4096;
1352 	remainder_bytes = vol.params.chunk_size - user_buffer_iov_len * 2;
1353 	for (i = 0; i < 2; i++) {
1354 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1355 		req.iov[i].iov_len = user_buffer_iov_len;
1356 	}
1357 
1358 	_prepare_compress_chunk(&req, false);
1359 	CU_ASSERT(req.decomp_iovcnt == 3);
1360 	for (i = 0; i < 2; i++) {
1361 		CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
1362 		CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
1363 	}
1364 	CU_ASSERT(req.decomp_iov[i].iov_base == req.decomp_buf + user_buffer_iov_len * 2);
1365 	CU_ASSERT(req.decomp_iov[i].iov_len == remainder_bytes);
1366 
1367 	_prepare_compress_chunk(&req, true);
1368 	CU_ASSERT(req.decomp_iovcnt == 3);
1369 	for (i = 0; i < 2; i++) {
1370 		CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
1371 		CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
1372 	}
1373 	CU_ASSERT(req.decomp_iov[i].iov_base == g_zero_buf + user_buffer_iov_len * 2);
1374 	CU_ASSERT(req.decomp_iov[i].iov_len == remainder_bytes);
1375 
1376 	/* Test 3 - user's buffer less than chunk_size, non zero offset */
1377 	user_buffer_iov_len = 4096;
1378 	req.offset = 3;
1379 	offset_bytes = req.offset * vol.params.logical_block_size;
1380 	remainder_bytes = vol.params.chunk_size - offset_bytes - user_buffer_iov_len * 2;
1381 
1382 	_prepare_compress_chunk(&req, false);
1383 	CU_ASSERT(req.decomp_iovcnt == 4);
1384 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1385 	CU_ASSERT(req.decomp_iov[0].iov_len == offset_bytes);
1386 	for (i = 0; i < 2; i++) {
1387 		CU_ASSERT(req.decomp_iov[i + 1].iov_base == req.iov[i].iov_base);
1388 		CU_ASSERT(req.decomp_iov[i + 1].iov_len == req.iov[i].iov_len);
1389 	}
1390 	CU_ASSERT(req.decomp_iov[3].iov_base == req.decomp_buf + offset_bytes + user_buffer_iov_len * 2);
1391 	CU_ASSERT(req.decomp_iov[3].iov_len == remainder_bytes);
1392 
1393 	_prepare_compress_chunk(&req, true);
1394 	CU_ASSERT(req.decomp_iovcnt == 4);
1395 	CU_ASSERT(req.decomp_iov[0].iov_base == g_zero_buf);
1396 	CU_ASSERT(req.decomp_iov[0].iov_len == offset_bytes);
1397 	for (i = 0; i < 2; i++) {
1398 		CU_ASSERT(req.decomp_iov[i + 1].iov_base == req.iov[i].iov_base);
1399 		CU_ASSERT(req.decomp_iov[i + 1].iov_len == req.iov[i].iov_len);
1400 	}
1401 	CU_ASSERT(req.decomp_iov[3].iov_base == g_zero_buf + offset_bytes + user_buffer_iov_len * 2);
1402 	CU_ASSERT(req.decomp_iov[3].iov_len == remainder_bytes);
1403 
1404 	/* Part 2 - backing dev doesn't support sgl_in */
1405 	/* Test 1 - user's buffers length equals to chunk_size
1406 	 * user's buffers are copied */
1407 	vol.backing_dev->sgl_in = false;
1408 	req.offset = 0;
1409 	user_buffer_iov_len = 8192;
1410 	for (i = 0; i < 2; i++) {
1411 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1412 		req.iov[i].iov_len = user_buffer_iov_len;
1413 		memset(req.iov[i].iov_base, 0xb + i, req.iov[i].iov_len);
1414 	}
1415 
1416 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1417 
1418 	_prepare_compress_chunk(&req, false);
1419 	CU_ASSERT(req.decomp_iovcnt == 1);
1420 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1421 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1422 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base, req.iov[0].iov_base, req.iov[0].iov_len) == 0);
1423 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + req.iov[0].iov_len, req.iov[1].iov_base,
1424 			 req.iov[1].iov_len) == 0);
1425 
1426 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1427 
1428 	_prepare_compress_chunk(&req, true);
1429 	CU_ASSERT(req.decomp_iovcnt == 1);
1430 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1431 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1432 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base, req.iov[0].iov_base, req.iov[0].iov_len) == 0);
1433 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + req.iov[0].iov_len, req.iov[1].iov_base,
1434 			 req.iov[1].iov_len) == 0);
1435 
1436 	/* Test 2 - single user's buffer length equals to chunk_size, buffer is not aligned
1437 	* User's buffer is copied */
1438 	req.iov[0].iov_base = unaligned_user_buffer;
1439 	req.iov[0].iov_len = vol.params.chunk_size;
1440 	req.iovcnt = 1;
1441 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1442 
1443 	_prepare_compress_chunk(&req, false);
1444 	CU_ASSERT(req.decomp_iovcnt == 1);
1445 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1446 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1447 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base, req.iov[0].iov_base,
1448 			 req.iov[0].iov_len) == 0);
1449 
1450 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1451 
1452 	_prepare_compress_chunk(&req, true);
1453 	CU_ASSERT(req.decomp_iovcnt == 1);
1454 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1455 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1456 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base, req.iov[0].iov_base,
1457 			 req.iov[0].iov_len) == 0);
1458 
1459 	/* Test 3 - single user's buffer length equals to chunk_size
1460 	 * User's buffer is not copied */
1461 	req.iov[0].iov_base = aligned_user_buffer;
1462 	req.iov[0].iov_len = vol.params.chunk_size;
1463 	req.iovcnt = 1;
1464 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1465 
1466 	_prepare_compress_chunk(&req, false);
1467 	CU_ASSERT(req.decomp_iovcnt == 1);
1468 	CU_ASSERT(req.decomp_iov[0].iov_base == req.iov[0].iov_base);
1469 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1470 
1471 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1472 
1473 	_prepare_compress_chunk(&req, true);
1474 	CU_ASSERT(req.decomp_iovcnt == 1);
1475 	CU_ASSERT(req.decomp_iov[0].iov_base == req.iov[0].iov_base);
1476 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1477 
1478 	/* Test 4 - user's buffer less than chunk_size, without offset
1479 	 * User's buffers are copied */
1480 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1481 	user_buffer_iov_len = 4096;
1482 	req.iovcnt = 2;
1483 	remainder_bytes = vol.params.chunk_size - user_buffer_iov_len * 2;
1484 	for (i = 0; i < 2; i++) {
1485 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1486 		req.iov[i].iov_len = user_buffer_iov_len;
1487 	}
1488 
1489 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1490 
1491 	_prepare_compress_chunk(&req, false);
1492 	CU_ASSERT(req.decomp_iovcnt == 1);
1493 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1494 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1495 	memcmp_offset = 0;
1496 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[0].iov_base,
1497 			 req.iov[0].iov_len) == 0);
1498 	memcmp_offset += req.iov[0].iov_len;
1499 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[1].iov_base,
1500 			 req.iov[1].iov_len) == 0);
1501 	memcmp_offset += req.iov[0].iov_len;
1502 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.decomp_buf + memcmp_offset,
1503 			 remainder_bytes) == 0);
1504 
1505 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1506 
1507 	_prepare_compress_chunk(&req, true);
1508 	CU_ASSERT(req.decomp_iovcnt == 1);
1509 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1510 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1511 	memcmp_offset = 0;
1512 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[0].iov_base,
1513 			 req.iov[0].iov_len) == 0);
1514 	memcmp_offset += req.iov[0].iov_len;
1515 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[1].iov_base,
1516 			 req.iov[1].iov_len) == 0);
1517 	memcmp_offset += req.iov[0].iov_len;
1518 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, g_zero_buf + memcmp_offset,
1519 			 remainder_bytes) == 0);
1520 
1521 	/* Test 5 - user's buffer less than chunk_size, non zero offset
1522 	 * user's buffers are copied */
1523 	req.offset = 3;
1524 	offset_bytes = req.offset * vol.params.logical_block_size;
1525 	remainder_bytes = vol.params.chunk_size - offset_bytes - user_buffer_iov_len * 2;
1526 
1527 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1528 
1529 	_prepare_compress_chunk(&req, false);
1530 	CU_ASSERT(req.decomp_iovcnt == 1);
1531 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1532 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1533 	memcmp_offset = 0;
1534 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.decomp_buf, offset_bytes) == 0);
1535 	memcmp_offset += offset_bytes;
1536 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[0].iov_base,
1537 			 req.iov[0].iov_len) == 0);
1538 	memcmp_offset += req.iov[0].iov_len;
1539 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[1].iov_base,
1540 			 req.iov[1].iov_len) == 0);
1541 	memcmp_offset += req.iov[1].iov_len;
1542 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.decomp_buf + memcmp_offset,
1543 			 remainder_bytes) == 0);
1544 
1545 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1546 
1547 	_prepare_compress_chunk(&req, true);
1548 	CU_ASSERT(req.decomp_iovcnt == 1);
1549 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1550 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1551 	memcmp_offset = 0;
1552 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, g_zero_buf, offset_bytes) == 0);
1553 	memcmp_offset += offset_bytes;
1554 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[0].iov_base,
1555 			 req.iov[0].iov_len) == 0);
1556 	memcmp_offset += req.iov[0].iov_len;
1557 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[1].iov_base,
1558 			 req.iov[1].iov_len) == 0);
1559 	memcmp_offset += req.iov[1].iov_len;
1560 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, g_zero_buf + memcmp_offset,
1561 			 remainder_bytes) == 0);
1562 
1563 	free(buf);
1564 }
1565 
1566 static void _reduce_vol_op_complete(void *ctx, int reduce_errno)
1567 {
1568 	g_reduce_errno = reduce_errno;
1569 }
1570 
1571 static void
1572 dummy_backing_dev_decompress(struct spdk_reduce_backing_dev *backing_dev,
1573 			     struct iovec *src_iov, int src_iovcnt,
1574 			     struct iovec *dst_iov, int dst_iovcnt,
1575 			     struct spdk_reduce_vol_cb_args *args)
1576 {
1577 	args->cb_fn(args->cb_arg, g_decompressed_len);
1578 }
1579 static void test_reduce_decompress_chunk(void)
1580 {
1581 	struct spdk_reduce_vol vol = {};
1582 	struct spdk_reduce_backing_dev backing_dev = {};
1583 	struct spdk_reduce_vol_request req = {};
1584 	void *buf;
1585 	char *buffer_end, *aligned_user_buffer, *unaligned_user_buffer;
1586 	char decomp_buffer[16 * 1024] = {};
1587 	char comp_buffer[16 * 1024] = {};
1588 	struct iovec user_iov[2] = {};
1589 	struct iovec comp_buf_iov = {};
1590 	struct spdk_reduce_chunk_map chunk = {};
1591 	size_t user_buffer_iov_len = 8192;
1592 	size_t remainder_bytes;
1593 	size_t offset_bytes;
1594 	uint32_t i;
1595 
1596 	vol.params.chunk_size = 16 * 1024;
1597 	vol.params.backing_io_unit_size = 4096;
1598 	vol.params.logical_block_size = 512;
1599 	backing_dev_init(&backing_dev, &vol.params, 512);
1600 	backing_dev.decompress = dummy_backing_dev_decompress;
1601 	vol.backing_dev = &backing_dev;
1602 	vol.logical_blocks_per_chunk = vol.params.chunk_size / vol.params.logical_block_size;
1603 	TAILQ_INIT(&vol.executing_requests);
1604 	TAILQ_INIT(&vol.queued_requests);
1605 	TAILQ_INIT(&vol.free_requests);
1606 
1607 	/* Allocate 1 extra byte to test a case when buffer crosses huge page boundary */
1608 	SPDK_CU_ASSERT_FATAL(posix_memalign(&buf, VALUE_2MB, VALUE_2MB + 1) == 0);
1609 	buffer_end = (char *)buf + VALUE_2MB + 1;
1610 	aligned_user_buffer = (char *)buf;
1611 	unaligned_user_buffer = buffer_end - vol.params.chunk_size;
1612 
1613 	chunk.compressed_size = user_buffer_iov_len / 2;
1614 	req.chunk = &chunk;
1615 	req.vol = &vol;
1616 	req.decomp_buf = decomp_buffer;
1617 	req.comp_buf = comp_buffer;
1618 	req.comp_buf_iov = &comp_buf_iov;
1619 	req.iov = user_iov;
1620 	req.iovcnt = 2;
1621 	req.offset = 0;
1622 	req.cb_fn = _reduce_vol_op_complete;
1623 
1624 	/* Part 1 - backing dev supports sgl_out */
1625 	/* Test 1 - user's buffers length equals to chunk_size */
1626 	for (i = 0; i < 2; i++) {
1627 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1628 		req.iov[i].iov_len = user_buffer_iov_len;
1629 		memset(req.iov[i].iov_base, 0, req.iov[i].iov_len);
1630 	}
1631 	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
1632 	g_reduce_errno = -1;
1633 	g_decompressed_len = vol.params.chunk_size;
1634 
1635 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1636 	CU_ASSERT(g_reduce_errno == 0);
1637 	CU_ASSERT(req.copy_after_decompress == false);
1638 	CU_ASSERT(req.decomp_iovcnt == 2);
1639 	for (i = 0; i < 2; i++) {
1640 		CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
1641 		CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
1642 	}
1643 	CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
1644 	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
1645 
1646 	/* Test 2 - user's buffer less than chunk_size, without offset */
1647 	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
1648 	g_reduce_errno = -1;
1649 	user_buffer_iov_len = 4096;
1650 	for (i = 0; i < 2; i++) {
1651 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1652 		req.iov[i].iov_len = user_buffer_iov_len;
1653 		memset(req.iov[i].iov_base, 0, req.iov[i].iov_len);
1654 	}
1655 	remainder_bytes = vol.params.chunk_size - user_buffer_iov_len * 2;
1656 
1657 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1658 	CU_ASSERT(g_reduce_errno == 0);
1659 	CU_ASSERT(req.copy_after_decompress == false);
1660 	CU_ASSERT(req.decomp_iovcnt == 3);
1661 	for (i = 0; i < 2; i++) {
1662 		CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
1663 		CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
1664 	}
1665 	CU_ASSERT(req.decomp_iov[i].iov_base == req.decomp_buf + user_buffer_iov_len * 2);
1666 	CU_ASSERT(req.decomp_iov[i].iov_len == remainder_bytes);
1667 	CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
1668 	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
1669 
1670 	/* Test 3 - user's buffer less than chunk_size, non zero offset */
1671 	req.offset = 3;
1672 	offset_bytes = req.offset * vol.params.logical_block_size;
1673 	remainder_bytes = vol.params.chunk_size - offset_bytes - user_buffer_iov_len * 2;
1674 	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
1675 	g_reduce_errno = -1;
1676 
1677 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1678 	CU_ASSERT(g_reduce_errno == 0);
1679 	CU_ASSERT(req.copy_after_decompress == false);
1680 	CU_ASSERT(req.decomp_iovcnt == 4);
1681 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1682 	CU_ASSERT(req.decomp_iov[0].iov_len == offset_bytes);
1683 	for (i = 0; i < 2; i++) {
1684 		CU_ASSERT(req.decomp_iov[i + 1].iov_base == req.iov[i].iov_base);
1685 		CU_ASSERT(req.decomp_iov[i + 1].iov_len == req.iov[i].iov_len);
1686 	}
1687 	CU_ASSERT(req.decomp_iov[3].iov_base == req.decomp_buf + offset_bytes + user_buffer_iov_len * 2);
1688 	CU_ASSERT(req.decomp_iov[3].iov_len == remainder_bytes);
1689 	CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
1690 	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
1691 
1692 	/* Part 2 - backing dev doesn't support sgl_out */
1693 	/* Test 1 - user's buffers length equals to chunk_size
1694 	 * user's buffers are copied */
1695 	vol.backing_dev->sgl_out = false;
1696 	req.offset = 0;
1697 	user_buffer_iov_len = 8192;
1698 
1699 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1700 	for (i = 0; i < 2; i++) {
1701 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1702 		req.iov[i].iov_len = user_buffer_iov_len;
1703 		memset(req.iov[i].iov_base, 0xb + i, req.iov[i].iov_len);
1704 	}
1705 	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
1706 	g_reduce_errno = -1;
1707 
1708 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1709 	CU_ASSERT(g_reduce_errno == 0);
1710 	CU_ASSERT(req.copy_after_decompress == true);
1711 	CU_ASSERT(req.decomp_iovcnt == 1);
1712 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1713 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1714 	CU_ASSERT(memcmp(req.iov[0].iov_base, req.decomp_iov[0].iov_base, req.iov[0].iov_len) == 0);
1715 	CU_ASSERT(memcmp(req.iov[1].iov_base, req.decomp_iov[0].iov_base + req.iov[0].iov_len,
1716 			 req.iov[1].iov_len) == 0);
1717 	CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
1718 	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
1719 
1720 	/* Test 2 - single user's buffer length equals to chunk_size, buffer is not aligned
1721 	* User's buffer is copied */
1722 	memset(unaligned_user_buffer, 0xc, vol.params.chunk_size);
1723 	req.iov[0].iov_base = unaligned_user_buffer;
1724 	req.iov[0].iov_len = vol.params.chunk_size;
1725 	req.iovcnt = 1;
1726 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1727 	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
1728 	g_reduce_errno = -1;
1729 
1730 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1731 	CU_ASSERT(g_reduce_errno == 0);
1732 	CU_ASSERT(req.copy_after_decompress == true);
1733 	CU_ASSERT(req.decomp_iovcnt == 1);
1734 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1735 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1736 	CU_ASSERT(memcmp(req.iov[0].iov_base, req.decomp_iov[0].iov_base,
1737 			 req.iov[0].iov_len) == 0);
1738 
1739 	/* Test 3 - single user's buffer length equals to chunk_size
1740 	* User's buffer is not copied */
1741 	req.iov[0].iov_base = aligned_user_buffer;
1742 	req.iov[0].iov_len = vol.params.chunk_size;
1743 	req.iovcnt = 1;
1744 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1745 	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
1746 	g_reduce_errno = -1;
1747 
1748 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1749 	CU_ASSERT(g_reduce_errno == 0);
1750 	CU_ASSERT(req.copy_after_decompress == false);
1751 	CU_ASSERT(req.decomp_iovcnt == 1);
1752 	CU_ASSERT(req.decomp_iov[0].iov_base == req.iov[0].iov_base);
1753 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1754 
1755 	/* Test 4 - user's buffer less than chunk_size, without offset
1756 	 * User's buffers are copied */
1757 	user_buffer_iov_len = 4096;
1758 	req.iovcnt = 2;
1759 	remainder_bytes = vol.params.chunk_size - user_buffer_iov_len * 2;
1760 	for (i = 0; i < 2; i++) {
1761 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1762 		req.iov[i].iov_len = user_buffer_iov_len;
1763 		memset(req.iov[i].iov_base, 0xb + i, req.iov[i].iov_len);
1764 	}
1765 
1766 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1767 	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
1768 	g_reduce_errno = -1;
1769 
1770 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1771 	CU_ASSERT(g_reduce_errno == 0);
1772 	CU_ASSERT(req.copy_after_decompress == true);
1773 	CU_ASSERT(req.decomp_iovcnt == 1);
1774 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1775 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1776 	CU_ASSERT(memcmp(req.iov[0].iov_base, req.decomp_iov[0].iov_base,
1777 			 req.iov[0].iov_len) == 0);
1778 	CU_ASSERT(memcmp(req.iov[1].iov_base, req.decomp_iov[0].iov_base + req.iov[0].iov_len,
1779 			 req.iov[1].iov_len) == 0);
1780 	CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
1781 	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
1782 
1783 	/* Test 5 - user's buffer less than chunk_size, non zero offset
1784 	* user's buffers are copied */
1785 	req.offset = 3;
1786 	offset_bytes = req.offset * vol.params.logical_block_size;
1787 	remainder_bytes = vol.params.chunk_size - offset_bytes - user_buffer_iov_len * 2;
1788 
1789 	for (i = 0; i < 2; i++) {
1790 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1791 		req.iov[i].iov_len = user_buffer_iov_len;
1792 		memset(req.iov[i].iov_base, 0xb + i, req.iov[i].iov_len);
1793 	}
1794 
1795 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1796 	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
1797 	g_reduce_errno = -1;
1798 
1799 	_prepare_compress_chunk(&req, false);
1800 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1801 	CU_ASSERT(g_reduce_errno == 0);
1802 	CU_ASSERT(req.copy_after_decompress == true);
1803 	CU_ASSERT(req.decomp_iovcnt == 1);
1804 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1805 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1806 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + offset_bytes, req.iov[0].iov_base,
1807 			 req.iov[0].iov_len) == 0);
1808 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + offset_bytes + req.iov[0].iov_len,
1809 			 req.iov[1].iov_base,
1810 			 req.iov[1].iov_len) == 0);
1811 	CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
1812 	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
1813 
1814 	free(buf);
1815 }
1816 
1817 static void test_allocate_vol_requests(void)
1818 {
1819 	struct spdk_reduce_vol *vol;
1820 	/* include chunk_sizes which are not power of 2 */
1821 	uint32_t chunk_sizes[] = {8192, 8320, 16384, 16416, 32768};
1822 	uint32_t io_unit_sizes[] = {512, 520, 4096, 4104, 4096};
1823 	uint32_t i;
1824 
1825 	for (i = 0; i < 4; i++) {
1826 		vol = calloc(1, sizeof(*vol));
1827 		SPDK_CU_ASSERT_FATAL(vol);
1828 
1829 		vol->params.chunk_size = chunk_sizes[i];
1830 		vol->params.logical_block_size = io_unit_sizes[i];
1831 		vol->params.backing_io_unit_size = io_unit_sizes[i];
1832 		vol->backing_io_units_per_chunk = vol->params.chunk_size / vol->params.backing_io_unit_size;
1833 		vol->logical_blocks_per_chunk = vol->params.chunk_size / vol->params.logical_block_size;
1834 
1835 		CU_ASSERT(_validate_vol_params(&vol->params) == 0);
1836 		CU_ASSERT(_allocate_vol_requests(vol) == 0);
1837 		_init_load_cleanup(vol, NULL);
1838 	}
1839 }
1840 
1841 int
1842 main(int argc, char **argv)
1843 {
1844 	CU_pSuite	suite = NULL;
1845 	unsigned int	num_failures;
1846 
1847 	CU_set_error_action(CUEA_ABORT);
1848 	CU_initialize_registry();
1849 
1850 	suite = CU_add_suite("reduce", NULL, NULL);
1851 
1852 	CU_ADD_TEST(suite, get_pm_file_size);
1853 	CU_ADD_TEST(suite, get_vol_size);
1854 	CU_ADD_TEST(suite, init_failure);
1855 	CU_ADD_TEST(suite, init_md);
1856 	CU_ADD_TEST(suite, init_backing_dev);
1857 	CU_ADD_TEST(suite, load);
1858 	CU_ADD_TEST(suite, write_maps);
1859 	CU_ADD_TEST(suite, read_write);
1860 	CU_ADD_TEST(suite, readv_writev);
1861 	CU_ADD_TEST(suite, destroy);
1862 	CU_ADD_TEST(suite, defer_bdev_io);
1863 	CU_ADD_TEST(suite, overlapped);
1864 	CU_ADD_TEST(suite, compress_algorithm);
1865 	CU_ADD_TEST(suite, test_prepare_compress_chunk);
1866 	CU_ADD_TEST(suite, test_reduce_decompress_chunk);
1867 	CU_ADD_TEST(suite, test_allocate_vol_requests);
1868 
1869 	g_unlink_path = g_path;
1870 	g_unlink_callback = unlink_cb;
1871 
1872 	CU_basic_set_mode(CU_BRM_VERBOSE);
1873 	CU_basic_run_tests();
1874 	num_failures = CU_get_number_of_failures();
1875 	CU_cleanup_registry();
1876 	return num_failures;
1877 }
1878