xref: /spdk/test/unit/lib/reduce/reduce.c/reduce_ut.c (revision 412fced1b5fc287476aabf5b48d7c7a89eaca00f)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/cunit.h"
10 
11 #include "reduce/reduce.c"
12 #include "spdk_internal/mock.h"
13 #define UNIT_TEST_NO_VTOPHYS
14 #include "common/lib/test_env.c"
15 #include "thread/thread_internal.h"
16 #undef UNIT_TEST_NO_VTOPHYS
17 
18 static struct spdk_reduce_vol *g_vol;
19 static int g_reduce_errno;
20 static char *g_volatile_pm_buf;
21 static size_t g_volatile_pm_buf_len;
22 static char *g_persistent_pm_buf;
23 static size_t g_persistent_pm_buf_len;
24 static char *g_backing_dev_buf;
25 static char g_path[REDUCE_PATH_MAX];
26 static char *g_decomp_buf;
27 static int g_decompressed_len;
28 
29 #define TEST_MD_PATH "/tmp"
30 
31 uint64_t
32 spdk_vtophys(const void *buf, uint64_t *size)
33 {
34 	/* add + 1 to buf addr for cases where buf is the start of the page, that will give us correct end of the page */
35 	const uint8_t *page_2mb_end = (const uint8_t *)SPDK_ALIGN_CEIL((uintptr_t)buf + 1, VALUE_2MB);
36 	uint64_t bytes_to_page_end = page_2mb_end - (const uint8_t *)buf;
37 	uint64_t _size;
38 
39 	if (*size) {
40 		_size = *size;
41 		_size = spdk_min(_size, bytes_to_page_end);
42 		*size = _size;
43 	}
44 
45 	return (uintptr_t)buf;
46 }
47 
48 enum ut_reduce_bdev_io_type {
49 	UT_REDUCE_IO_READV = 1,
50 	UT_REDUCE_IO_WRITEV = 2,
51 	UT_REDUCE_IO_UNMAP = 3,
52 };
53 
54 struct ut_reduce_bdev_io {
55 	enum ut_reduce_bdev_io_type type;
56 	struct spdk_reduce_backing_dev *backing_dev;
57 	struct iovec *iov;
58 	int iovcnt;
59 	uint64_t lba;
60 	uint32_t lba_count;
61 	struct spdk_reduce_vol_cb_args *args;
62 	TAILQ_ENTRY(ut_reduce_bdev_io)	link;
63 };
64 
65 static bool g_defer_bdev_io = false;
66 static TAILQ_HEAD(, ut_reduce_bdev_io) g_pending_bdev_io =
67 	TAILQ_HEAD_INITIALIZER(g_pending_bdev_io);
68 static uint32_t g_pending_bdev_io_count = 0;
69 static struct spdk_thread *g_thread = NULL;
70 
71 static void
72 sync_pm_buf(const void *addr, size_t length)
73 {
74 	uint64_t offset = (char *)addr - g_volatile_pm_buf;
75 
76 	memcpy(&g_persistent_pm_buf[offset], addr, length);
77 }
78 
79 int
80 pmem_msync(const void *addr, size_t length)
81 {
82 	sync_pm_buf(addr, length);
83 	return 0;
84 }
85 
86 void
87 pmem_persist(const void *addr, size_t len)
88 {
89 	sync_pm_buf(addr, len);
90 }
91 
92 static void
93 get_pm_file_size(void)
94 {
95 	struct spdk_reduce_vol_params params;
96 	uint64_t pm_size, expected_pm_size;
97 
98 	params.backing_io_unit_size = 4096;
99 	params.chunk_size = 4096 * 4;
100 	params.vol_size = 4096 * 4 * 100;
101 
102 	pm_size = _get_pm_file_size(&params);
103 	expected_pm_size = sizeof(struct spdk_reduce_vol_superblock);
104 	/* 100 chunks in logical map * 8 bytes per chunk */
105 	expected_pm_size += 100 * sizeof(uint64_t);
106 	/* 100 chunks * (chunk struct size + 4 backing io units per chunk * 8 bytes per backing io unit) */
107 	expected_pm_size += 100 * (sizeof(struct spdk_reduce_chunk_map) + 4 * sizeof(uint64_t));
108 	/* reduce allocates some extra chunks too for in-flight writes when logical map
109 	 * is full.  REDUCE_EXTRA_CHUNKS is a private #ifdef in reduce.c Here we need the num chunks
110 	 * times (chunk struct size + 4 backing io units per chunk * 8 bytes per backing io unit).
111 	 */
112 	expected_pm_size += REDUCE_NUM_EXTRA_CHUNKS *
113 			    (sizeof(struct spdk_reduce_chunk_map) + 4 * sizeof(uint64_t));
114 	/* reduce will add some padding so numbers may not match exactly.  Make sure
115 	 * they are close though.
116 	 */
117 	CU_ASSERT((pm_size - expected_pm_size) <= REDUCE_PM_SIZE_ALIGNMENT);
118 }
119 
120 static void
121 get_vol_size(void)
122 {
123 	uint64_t chunk_size, backing_dev_size;
124 
125 	chunk_size = 16 * 1024;
126 	backing_dev_size = 16 * 1024 * 1000;
127 	CU_ASSERT(_get_vol_size(chunk_size, backing_dev_size) < backing_dev_size);
128 }
129 
130 void *
131 pmem_map_file(const char *path, size_t len, int flags, mode_t mode,
132 	      size_t *mapped_lenp, int *is_pmemp)
133 {
134 	CU_ASSERT(g_volatile_pm_buf == NULL);
135 	snprintf(g_path, sizeof(g_path), "%s", path);
136 	*is_pmemp = 1;
137 
138 	if (g_persistent_pm_buf == NULL) {
139 		g_persistent_pm_buf = calloc(1, len);
140 		g_persistent_pm_buf_len = len;
141 		SPDK_CU_ASSERT_FATAL(g_persistent_pm_buf != NULL);
142 	}
143 
144 	*mapped_lenp = g_persistent_pm_buf_len;
145 	g_volatile_pm_buf = calloc(1, g_persistent_pm_buf_len);
146 	SPDK_CU_ASSERT_FATAL(g_volatile_pm_buf != NULL);
147 	memcpy(g_volatile_pm_buf, g_persistent_pm_buf, g_persistent_pm_buf_len);
148 	g_volatile_pm_buf_len = g_persistent_pm_buf_len;
149 
150 	return g_volatile_pm_buf;
151 }
152 
153 int
154 pmem_unmap(void *addr, size_t len)
155 {
156 	CU_ASSERT(addr == g_volatile_pm_buf);
157 	CU_ASSERT(len == g_volatile_pm_buf_len);
158 	free(g_volatile_pm_buf);
159 	g_volatile_pm_buf = NULL;
160 	g_volatile_pm_buf_len = 0;
161 
162 	return 0;
163 }
164 
165 static void
166 persistent_pm_buf_destroy(void)
167 {
168 	CU_ASSERT(g_persistent_pm_buf != NULL);
169 	free(g_persistent_pm_buf);
170 	g_persistent_pm_buf = NULL;
171 	g_persistent_pm_buf_len = 0;
172 }
173 
174 static void
175 unlink_cb(void)
176 {
177 	persistent_pm_buf_destroy();
178 }
179 
180 static void
181 init_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno)
182 {
183 	g_vol = vol;
184 	g_reduce_errno = reduce_errno;
185 }
186 
187 static void
188 load_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno)
189 {
190 	g_vol = vol;
191 	g_reduce_errno = reduce_errno;
192 }
193 
194 static void
195 unload_cb(void *cb_arg, int reduce_errno)
196 {
197 	g_reduce_errno = reduce_errno;
198 }
199 
200 static void
201 init_failure(void)
202 {
203 	struct spdk_reduce_vol_params params = {};
204 	struct spdk_reduce_backing_dev backing_dev = {};
205 
206 	backing_dev.blocklen = 512;
207 	/* This blockcnt is too small for a reduce vol - there needs to be
208 	 *  enough space for at least REDUCE_NUM_EXTRA_CHUNKS + 1 chunks.
209 	 */
210 	backing_dev.blockcnt = 20;
211 
212 	params.vol_size = 0;
213 	params.chunk_size = 16 * 1024;
214 	params.backing_io_unit_size = backing_dev.blocklen;
215 	params.logical_block_size = 512;
216 
217 	/* backing_dev has an invalid size.  This should fail. */
218 	g_vol = NULL;
219 	g_reduce_errno = 0;
220 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
221 	CU_ASSERT(g_reduce_errno == -EINVAL);
222 	SPDK_CU_ASSERT_FATAL(g_vol == NULL);
223 
224 	/* backing_dev now has valid size, but backing_dev still has null
225 	 *  function pointers.  This should fail.
226 	 */
227 	backing_dev.blockcnt = 20000;
228 
229 	g_vol = NULL;
230 	g_reduce_errno = 0;
231 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
232 	CU_ASSERT(g_reduce_errno == -EINVAL);
233 	SPDK_CU_ASSERT_FATAL(g_vol == NULL);
234 }
235 
236 static void
237 backing_dev_readv_execute(struct spdk_reduce_backing_dev *backing_dev,
238 			  struct iovec *iov, int iovcnt,
239 			  uint64_t lba, uint32_t lba_count,
240 			  struct spdk_reduce_vol_cb_args *args)
241 {
242 	char *offset;
243 	int i;
244 
245 	offset = g_backing_dev_buf + lba * backing_dev->blocklen;
246 	for (i = 0; i < iovcnt; i++) {
247 		memcpy(iov[i].iov_base, offset, iov[i].iov_len);
248 		offset += iov[i].iov_len;
249 	}
250 	args->cb_fn(args->cb_arg, 0);
251 }
252 
253 static void
254 backing_dev_insert_io(enum ut_reduce_bdev_io_type type, struct spdk_reduce_backing_dev *backing_dev,
255 		      struct iovec *iov, int iovcnt, uint64_t lba, uint32_t lba_count,
256 		      struct spdk_reduce_vol_cb_args *args)
257 {
258 	struct ut_reduce_bdev_io *ut_bdev_io;
259 
260 	ut_bdev_io = calloc(1, sizeof(*ut_bdev_io));
261 	SPDK_CU_ASSERT_FATAL(ut_bdev_io != NULL);
262 
263 	ut_bdev_io->type = type;
264 	ut_bdev_io->backing_dev = backing_dev;
265 	ut_bdev_io->iov = iov;
266 	ut_bdev_io->iovcnt = iovcnt;
267 	ut_bdev_io->lba = lba;
268 	ut_bdev_io->lba_count = lba_count;
269 	ut_bdev_io->args = args;
270 	TAILQ_INSERT_TAIL(&g_pending_bdev_io, ut_bdev_io, link);
271 	g_pending_bdev_io_count++;
272 }
273 
274 static void
275 backing_dev_readv(struct spdk_reduce_backing_dev *backing_dev, struct iovec *iov, int iovcnt,
276 		  uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
277 {
278 	if (g_defer_bdev_io == false) {
279 		CU_ASSERT(g_pending_bdev_io_count == 0);
280 		CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
281 		backing_dev_readv_execute(backing_dev, iov, iovcnt, lba, lba_count, args);
282 		return;
283 	}
284 
285 	backing_dev_insert_io(UT_REDUCE_IO_READV, backing_dev, iov, iovcnt, lba, lba_count, args);
286 }
287 
288 static void
289 backing_dev_writev_execute(struct spdk_reduce_backing_dev *backing_dev,
290 			   struct iovec *iov, int iovcnt,
291 			   uint64_t lba, uint32_t lba_count,
292 			   struct spdk_reduce_vol_cb_args *args)
293 {
294 	char *offset;
295 	int i;
296 
297 	offset = g_backing_dev_buf + lba * backing_dev->blocklen;
298 	for (i = 0; i < iovcnt; i++) {
299 		memcpy(offset, iov[i].iov_base, iov[i].iov_len);
300 		offset += iov[i].iov_len;
301 	}
302 	args->cb_fn(args->cb_arg, 0);
303 }
304 
305 static void
306 backing_dev_writev(struct spdk_reduce_backing_dev *backing_dev, struct iovec *iov, int iovcnt,
307 		   uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
308 {
309 	if (g_defer_bdev_io == false) {
310 		CU_ASSERT(g_pending_bdev_io_count == 0);
311 		CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
312 		backing_dev_writev_execute(backing_dev, iov, iovcnt, lba, lba_count, args);
313 		return;
314 	}
315 
316 	backing_dev_insert_io(UT_REDUCE_IO_WRITEV, backing_dev, iov, iovcnt, lba, lba_count, args);
317 }
318 
319 static void
320 backing_dev_unmap_execute(struct spdk_reduce_backing_dev *backing_dev,
321 			  uint64_t lba, uint32_t lba_count,
322 			  struct spdk_reduce_vol_cb_args *args)
323 {
324 	char *offset;
325 
326 	offset = g_backing_dev_buf + lba * backing_dev->blocklen;
327 	memset(offset, 0, lba_count * backing_dev->blocklen);
328 	args->cb_fn(args->cb_arg, 0);
329 }
330 
331 static void
332 backing_dev_unmap(struct spdk_reduce_backing_dev *backing_dev,
333 		  uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
334 {
335 	if (g_defer_bdev_io == false) {
336 		CU_ASSERT(g_pending_bdev_io_count == 0);
337 		CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
338 		backing_dev_unmap_execute(backing_dev, lba, lba_count, args);
339 		return;
340 	}
341 
342 	backing_dev_insert_io(UT_REDUCE_IO_UNMAP, backing_dev, NULL, 0, lba, lba_count, args);
343 }
344 
345 static void
346 backing_dev_io_execute(uint32_t count)
347 {
348 	struct ut_reduce_bdev_io *ut_bdev_io;
349 	uint32_t done = 0;
350 
351 	CU_ASSERT(g_defer_bdev_io == true);
352 	while (!TAILQ_EMPTY(&g_pending_bdev_io) && (count == 0 || done < count)) {
353 		ut_bdev_io = TAILQ_FIRST(&g_pending_bdev_io);
354 		TAILQ_REMOVE(&g_pending_bdev_io, ut_bdev_io, link);
355 		g_pending_bdev_io_count--;
356 		switch (ut_bdev_io->type) {
357 		case UT_REDUCE_IO_READV:
358 			backing_dev_readv_execute(ut_bdev_io->backing_dev,
359 						  ut_bdev_io->iov, ut_bdev_io->iovcnt,
360 						  ut_bdev_io->lba, ut_bdev_io->lba_count,
361 						  ut_bdev_io->args);
362 			break;
363 		case UT_REDUCE_IO_WRITEV:
364 			backing_dev_writev_execute(ut_bdev_io->backing_dev,
365 						   ut_bdev_io->iov, ut_bdev_io->iovcnt,
366 						   ut_bdev_io->lba, ut_bdev_io->lba_count,
367 						   ut_bdev_io->args);
368 			break;
369 		case UT_REDUCE_IO_UNMAP:
370 			backing_dev_unmap_execute(ut_bdev_io->backing_dev,
371 						  ut_bdev_io->lba, ut_bdev_io->lba_count,
372 						  ut_bdev_io->args);
373 			break;
374 		default:
375 			CU_ASSERT(false);
376 			break;
377 		}
378 		free(ut_bdev_io);
379 		done++;
380 	}
381 }
382 
383 static void
384 backing_dev_submit_io(struct spdk_reduce_backing_io *backing_io)
385 {
386 	switch (backing_io->backing_io_type) {
387 	case SPDK_REDUCE_BACKING_IO_WRITE:
388 		backing_dev_writev(backing_io->dev, backing_io->iov, backing_io->iovcnt,
389 				   backing_io->lba, backing_io->lba_count, backing_io->backing_cb_args);
390 		break;
391 	case SPDK_REDUCE_BACKING_IO_READ:
392 		backing_dev_readv(backing_io->dev, backing_io->iov, backing_io->iovcnt,
393 				  backing_io->lba, backing_io->lba_count, backing_io->backing_cb_args);
394 		break;
395 	case SPDK_REDUCE_BACKING_IO_UNMAP:
396 		backing_dev_unmap(backing_io->dev, backing_io->lba, backing_io->lba_count,
397 				  backing_io->backing_cb_args);
398 		break;
399 	default:
400 		CU_ASSERT(false);
401 		break;
402 	}
403 }
404 
405 static int
406 ut_compress(char *outbuf, uint32_t *compressed_len, char *inbuf, uint32_t inbuflen)
407 {
408 	uint32_t len = 0;
409 	uint8_t count;
410 	char last;
411 
412 	while (true) {
413 		if (inbuflen == 0) {
414 			*compressed_len = len;
415 			return 0;
416 		}
417 
418 		if (*compressed_len < (len + 2)) {
419 			return -ENOSPC;
420 		}
421 
422 		last = *inbuf;
423 		count = 1;
424 		inbuflen--;
425 		inbuf++;
426 
427 		while (inbuflen > 0 && *inbuf == last && count < UINT8_MAX) {
428 			count++;
429 			inbuflen--;
430 			inbuf++;
431 		}
432 
433 		outbuf[len] = count;
434 		outbuf[len + 1] = last;
435 		len += 2;
436 	}
437 }
438 
439 static int
440 ut_decompress(uint8_t *outbuf, uint32_t *compressed_len, uint8_t *inbuf, uint32_t inbuflen)
441 {
442 	uint32_t len = 0;
443 
444 	SPDK_CU_ASSERT_FATAL(inbuflen % 2 == 0);
445 
446 	while (true) {
447 		if (inbuflen == 0) {
448 			*compressed_len = len;
449 			return 0;
450 		}
451 
452 		if ((len + inbuf[0]) > *compressed_len) {
453 			return -ENOSPC;
454 		}
455 
456 		memset(outbuf, inbuf[1], inbuf[0]);
457 		outbuf += inbuf[0];
458 		len += inbuf[0];
459 		inbuflen -= 2;
460 		inbuf += 2;
461 	}
462 }
463 
464 static void
465 ut_build_data_buffer(uint8_t *data, uint32_t data_len, uint8_t init_val, uint32_t repeat)
466 {
467 	uint32_t _repeat = repeat;
468 
469 	SPDK_CU_ASSERT_FATAL(repeat > 0);
470 
471 	while (data_len > 0) {
472 		*data = init_val;
473 		data++;
474 		data_len--;
475 		_repeat--;
476 		if (_repeat == 0) {
477 			init_val++;
478 			_repeat = repeat;
479 		}
480 	}
481 }
482 
483 static void
484 backing_dev_compress(struct spdk_reduce_backing_dev *backing_dev,
485 		     struct iovec *src_iov, int src_iovcnt,
486 		     struct iovec *dst_iov, int dst_iovcnt,
487 		     struct spdk_reduce_vol_cb_args *args)
488 {
489 	uint32_t compressed_len;
490 	uint64_t total_length = 0;
491 	char *buf = g_decomp_buf;
492 	int rc, i;
493 
494 	CU_ASSERT(dst_iovcnt == 1);
495 
496 	for (i = 0; i < src_iovcnt; i++) {
497 		memcpy(buf, src_iov[i].iov_base, src_iov[i].iov_len);
498 		buf += src_iov[i].iov_len;
499 		total_length += src_iov[i].iov_len;
500 	}
501 
502 	compressed_len = dst_iov[0].iov_len;
503 	rc = ut_compress(dst_iov[0].iov_base, &compressed_len,
504 			 g_decomp_buf, total_length);
505 
506 	args->output_size = compressed_len;
507 
508 	args->cb_fn(args->cb_arg, rc);
509 }
510 
511 static void
512 backing_dev_decompress(struct spdk_reduce_backing_dev *backing_dev,
513 		       struct iovec *src_iov, int src_iovcnt,
514 		       struct iovec *dst_iov, int dst_iovcnt,
515 		       struct spdk_reduce_vol_cb_args *args)
516 {
517 	uint32_t decompressed_len = 0;
518 	char *buf = g_decomp_buf;
519 	int rc, i;
520 
521 	CU_ASSERT(src_iovcnt == 1);
522 
523 	for (i = 0; i < dst_iovcnt; i++) {
524 		decompressed_len += dst_iov[i].iov_len;
525 	}
526 
527 	rc = ut_decompress(g_decomp_buf, &decompressed_len,
528 			   src_iov[0].iov_base, src_iov[0].iov_len);
529 
530 	for (i = 0; i < dst_iovcnt; i++) {
531 		memcpy(dst_iov[i].iov_base, buf, dst_iov[i].iov_len);
532 		buf += dst_iov[i].iov_len;
533 	}
534 
535 	args->output_size = decompressed_len;
536 
537 	args->cb_fn(args->cb_arg, rc);
538 }
539 
540 static void
541 backing_dev_destroy(struct spdk_reduce_backing_dev *backing_dev)
542 {
543 	/* We don't free this during backing_dev_close so that we can test init/unload/load
544 	 *  scenarios.
545 	 */
546 	free(g_backing_dev_buf);
547 	free(g_decomp_buf);
548 	g_backing_dev_buf = NULL;
549 }
550 
551 static void
552 backing_dev_init(struct spdk_reduce_backing_dev *backing_dev, struct spdk_reduce_vol_params *params,
553 		 uint32_t backing_blocklen)
554 {
555 	int64_t size;
556 
557 	size = 4 * 1024 * 1024;
558 	backing_dev->blocklen = backing_blocklen;
559 	backing_dev->blockcnt = size / backing_dev->blocklen;
560 	backing_dev->submit_backing_io = backing_dev_submit_io;
561 	backing_dev->compress = backing_dev_compress;
562 	backing_dev->decompress = backing_dev_decompress;
563 	backing_dev->sgl_in = true;
564 	backing_dev->sgl_out = true;
565 
566 	g_decomp_buf = calloc(1, params->chunk_size);
567 	SPDK_CU_ASSERT_FATAL(g_decomp_buf != NULL);
568 
569 	g_backing_dev_buf = calloc(1, size);
570 	SPDK_CU_ASSERT_FATAL(g_backing_dev_buf != NULL);
571 }
572 
573 static void
574 init_md(void)
575 {
576 	struct spdk_reduce_vol_params params = {};
577 	struct spdk_reduce_vol_params *persistent_params;
578 	struct spdk_reduce_backing_dev backing_dev = {};
579 	struct spdk_uuid uuid;
580 	uint64_t *entry;
581 
582 	params.chunk_size = 16 * 1024;
583 	params.backing_io_unit_size = 512;
584 	params.logical_block_size = 512;
585 
586 	backing_dev_init(&backing_dev, &params, 512);
587 
588 	g_vol = NULL;
589 	g_reduce_errno = -1;
590 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
591 	CU_ASSERT(g_reduce_errno == 0);
592 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
593 	/* Confirm that reduce persisted the params to metadata. */
594 	CU_ASSERT(memcmp(g_persistent_pm_buf, SPDK_REDUCE_SIGNATURE, 8) == 0);
595 	persistent_params = (struct spdk_reduce_vol_params *)(g_persistent_pm_buf + 8);
596 	CU_ASSERT(memcmp(persistent_params, &params, sizeof(params)) == 0);
597 	/* Now confirm that contents of pm_file after the superblock have been initialized
598 	 *  to REDUCE_EMPTY_MAP_ENTRY.
599 	 */
600 	entry = (uint64_t *)(g_persistent_pm_buf + sizeof(struct spdk_reduce_vol_superblock));
601 	while (entry != (uint64_t *)(g_persistent_pm_buf + g_vol->pm_file.size)) {
602 		CU_ASSERT(*entry == REDUCE_EMPTY_MAP_ENTRY);
603 		entry++;
604 	}
605 
606 	/* Check that the pm file path was constructed correctly.  It should be in
607 	 * the form:
608 	 * TEST_MD_PATH + "/" + <uuid string>
609 	 */
610 	CU_ASSERT(strncmp(&g_path[0], TEST_MD_PATH, strlen(TEST_MD_PATH)) == 0);
611 	CU_ASSERT(g_path[strlen(TEST_MD_PATH)] == '/');
612 	CU_ASSERT(spdk_uuid_parse(&uuid, &g_path[strlen(TEST_MD_PATH) + 1]) == 0);
613 	CU_ASSERT(spdk_uuid_compare(&uuid, spdk_reduce_vol_get_uuid(g_vol)) == 0);
614 
615 	g_reduce_errno = -1;
616 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
617 	CU_ASSERT(g_reduce_errno == 0);
618 	CU_ASSERT(g_volatile_pm_buf == NULL);
619 
620 	persistent_pm_buf_destroy();
621 	backing_dev_destroy(&backing_dev);
622 }
623 
624 static void
625 _init_backing_dev(uint32_t backing_blocklen)
626 {
627 	struct spdk_reduce_vol_params params = {};
628 	struct spdk_reduce_vol_params *persistent_params;
629 	struct spdk_reduce_backing_dev backing_dev = {};
630 
631 	params.chunk_size = 16 * 1024;
632 	params.backing_io_unit_size = 512;
633 	params.logical_block_size = 512;
634 	spdk_uuid_generate(&params.uuid);
635 
636 	backing_dev_init(&backing_dev, &params, backing_blocklen);
637 
638 	g_vol = NULL;
639 	memset(g_path, 0, sizeof(g_path));
640 	g_reduce_errno = -1;
641 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
642 	CU_ASSERT(g_reduce_errno == 0);
643 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
644 	CU_ASSERT(strncmp(TEST_MD_PATH, g_path, strlen(TEST_MD_PATH)) == 0);
645 	/* Confirm that libreduce persisted the params to the backing device. */
646 	CU_ASSERT(memcmp(g_backing_dev_buf, SPDK_REDUCE_SIGNATURE, 8) == 0);
647 	persistent_params = (struct spdk_reduce_vol_params *)(g_backing_dev_buf + 8);
648 	CU_ASSERT(memcmp(persistent_params, &params, sizeof(params)) == 0);
649 	/* Confirm that the path to the persistent memory metadata file was persisted to
650 	 *  the backing device.
651 	 */
652 	CU_ASSERT(strncmp(g_path,
653 			  g_backing_dev_buf + REDUCE_BACKING_DEV_PATH_OFFSET,
654 			  REDUCE_PATH_MAX) == 0);
655 
656 	g_reduce_errno = -1;
657 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
658 	CU_ASSERT(g_reduce_errno == 0);
659 
660 	persistent_pm_buf_destroy();
661 	backing_dev_destroy(&backing_dev);
662 }
663 
664 static void
665 init_backing_dev(void)
666 {
667 	_init_backing_dev(512);
668 	_init_backing_dev(4096);
669 }
670 
671 static void
672 _load(uint32_t backing_blocklen)
673 {
674 	struct spdk_reduce_vol_params params = {};
675 	struct spdk_reduce_backing_dev backing_dev = {};
676 	char pmem_file_path[REDUCE_PATH_MAX];
677 
678 	params.chunk_size = 16 * 1024;
679 	params.backing_io_unit_size = 512;
680 	params.logical_block_size = 512;
681 	spdk_uuid_generate(&params.uuid);
682 
683 	backing_dev_init(&backing_dev, &params, backing_blocklen);
684 
685 	g_vol = NULL;
686 	g_reduce_errno = -1;
687 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
688 	CU_ASSERT(g_reduce_errno == 0);
689 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
690 	CU_ASSERT(strncmp(TEST_MD_PATH, g_path, strlen(TEST_MD_PATH)) == 0);
691 	memcpy(pmem_file_path, g_path, sizeof(pmem_file_path));
692 
693 	g_reduce_errno = -1;
694 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
695 	CU_ASSERT(g_reduce_errno == 0);
696 
697 	g_vol = NULL;
698 	memset(g_path, 0, sizeof(g_path));
699 	g_reduce_errno = -1;
700 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
701 	CU_ASSERT(g_reduce_errno == 0);
702 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
703 	CU_ASSERT(strncmp(g_path, pmem_file_path, sizeof(pmem_file_path)) == 0);
704 	CU_ASSERT(g_vol->params.vol_size == params.vol_size);
705 	CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
706 	CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
707 
708 	g_reduce_errno = -1;
709 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
710 	CU_ASSERT(g_reduce_errno == 0);
711 
712 	persistent_pm_buf_destroy();
713 	backing_dev_destroy(&backing_dev);
714 }
715 
716 static void
717 load(void)
718 {
719 	_load(512);
720 	_load(4096);
721 }
722 
723 static uint64_t
724 _vol_get_chunk_map_index(struct spdk_reduce_vol *vol, uint64_t offset)
725 {
726 	uint64_t logical_map_index = offset / vol->logical_blocks_per_chunk;
727 
728 	return vol->pm_logical_map[logical_map_index];
729 }
730 
731 static void
732 write_cb(void *arg, int reduce_errno)
733 {
734 	g_reduce_errno = reduce_errno;
735 }
736 
737 static void
738 read_cb(void *arg, int reduce_errno)
739 {
740 	g_reduce_errno = reduce_errno;
741 }
742 
743 static void
744 unmap_cb(void *arg, int reduce_errno)
745 {
746 	g_reduce_errno = reduce_errno;
747 }
748 
749 static void
750 _write_maps(uint32_t backing_blocklen)
751 {
752 	struct spdk_reduce_vol_params params = {};
753 	struct spdk_reduce_backing_dev backing_dev = {};
754 	struct iovec iov;
755 	const int bufsize = 16 * 1024; /* chunk size */
756 	char buf[bufsize];
757 	uint32_t num_lbas, i;
758 	uint64_t old_chunk0_map_index, new_chunk0_map_index;
759 	struct spdk_reduce_chunk_map *old_chunk0_map, *new_chunk0_map;
760 
761 	params.chunk_size = bufsize;
762 	params.backing_io_unit_size = 4096;
763 	params.logical_block_size = 512;
764 	num_lbas = bufsize / params.logical_block_size;
765 	spdk_uuid_generate(&params.uuid);
766 
767 	backing_dev_init(&backing_dev, &params, backing_blocklen);
768 
769 	g_vol = NULL;
770 	g_reduce_errno = -1;
771 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
772 	CU_ASSERT(g_reduce_errno == 0);
773 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
774 
775 	for (i = 0; i < g_vol->params.vol_size / g_vol->params.chunk_size; i++) {
776 		CU_ASSERT(_vol_get_chunk_map_index(g_vol, i) == REDUCE_EMPTY_MAP_ENTRY);
777 	}
778 
779 	ut_build_data_buffer(buf, bufsize, 0x00, 1);
780 	iov.iov_base = buf;
781 	iov.iov_len = bufsize;
782 	g_reduce_errno = -1;
783 	spdk_reduce_vol_writev(g_vol, &iov, 1, 0, num_lbas, write_cb, NULL);
784 	CU_ASSERT(g_reduce_errno == 0);
785 
786 	old_chunk0_map_index = _vol_get_chunk_map_index(g_vol, 0);
787 	CU_ASSERT(old_chunk0_map_index != REDUCE_EMPTY_MAP_ENTRY);
788 	CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, old_chunk0_map_index) == true);
789 
790 	old_chunk0_map = _reduce_vol_get_chunk_map(g_vol, old_chunk0_map_index);
791 	for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
792 		CU_ASSERT(old_chunk0_map->io_unit_index[i] != REDUCE_EMPTY_MAP_ENTRY);
793 		CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units,
794 					     old_chunk0_map->io_unit_index[i]) == true);
795 	}
796 
797 	g_reduce_errno = -1;
798 	spdk_reduce_vol_writev(g_vol, &iov, 1, 0, num_lbas, write_cb, NULL);
799 	CU_ASSERT(g_reduce_errno == 0);
800 
801 	new_chunk0_map_index = _vol_get_chunk_map_index(g_vol, 0);
802 	CU_ASSERT(new_chunk0_map_index != REDUCE_EMPTY_MAP_ENTRY);
803 	CU_ASSERT(new_chunk0_map_index != old_chunk0_map_index);
804 	CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, new_chunk0_map_index) == true);
805 	CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, old_chunk0_map_index) == false);
806 
807 	for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
808 		CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units,
809 					     old_chunk0_map->io_unit_index[i]) == false);
810 	}
811 
812 	new_chunk0_map = _reduce_vol_get_chunk_map(g_vol, new_chunk0_map_index);
813 	for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
814 		CU_ASSERT(new_chunk0_map->io_unit_index[i] != REDUCE_EMPTY_MAP_ENTRY);
815 		CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units,
816 					     new_chunk0_map->io_unit_index[i]) == true);
817 	}
818 
819 	g_reduce_errno = -1;
820 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
821 	CU_ASSERT(g_reduce_errno == 0);
822 
823 	g_vol = NULL;
824 	g_reduce_errno = -1;
825 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
826 	CU_ASSERT(g_reduce_errno == 0);
827 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
828 	CU_ASSERT(g_vol->params.vol_size == params.vol_size);
829 	CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
830 	CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
831 
832 	g_reduce_errno = -1;
833 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
834 	CU_ASSERT(g_reduce_errno == 0);
835 
836 	persistent_pm_buf_destroy();
837 	backing_dev_destroy(&backing_dev);
838 }
839 
840 static void
841 write_maps(void)
842 {
843 	_write_maps(512);
844 	_write_maps(4096);
845 }
846 
847 static void
848 _read_write(uint32_t backing_blocklen)
849 {
850 	struct spdk_reduce_vol_params params = {};
851 	struct spdk_reduce_backing_dev backing_dev = {};
852 	struct iovec iov;
853 	char buf[16 * 1024]; /* chunk size */
854 	char compare_buf[16 * 1024];
855 	uint32_t i;
856 
857 	params.chunk_size = 16 * 1024;
858 	params.backing_io_unit_size = 4096;
859 	params.logical_block_size = 512;
860 	spdk_uuid_generate(&params.uuid);
861 
862 	backing_dev_init(&backing_dev, &params, backing_blocklen);
863 
864 	g_vol = NULL;
865 	g_reduce_errno = -1;
866 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
867 	CU_ASSERT(g_reduce_errno == 0);
868 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
869 
870 	/* Write 0xAA to 2 512-byte logical blocks, starting at LBA 2. */
871 	memset(buf, 0xAA, 2 * params.logical_block_size);
872 	iov.iov_base = buf;
873 	iov.iov_len = 2 * params.logical_block_size;
874 	g_reduce_errno = -1;
875 	spdk_reduce_vol_writev(g_vol, &iov, 1, 2, 2, write_cb, NULL);
876 	CU_ASSERT(g_reduce_errno == 0);
877 
878 	memset(compare_buf, 0xAA, sizeof(compare_buf));
879 	for (i = 0; i < params.chunk_size / params.logical_block_size; i++) {
880 		memset(buf, 0xFF, params.logical_block_size);
881 		iov.iov_base = buf;
882 		iov.iov_len = params.logical_block_size;
883 		g_reduce_errno = -1;
884 		spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL);
885 		CU_ASSERT(g_reduce_errno == 0);
886 
887 		switch (i) {
888 		case 2:
889 		case 3:
890 			CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
891 			break;
892 		default:
893 			CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size));
894 			break;
895 		}
896 	}
897 
898 	g_reduce_errno = -1;
899 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
900 	CU_ASSERT(g_reduce_errno == 0);
901 
902 	/* Overwrite what we just wrote with 0xCC */
903 	g_vol = NULL;
904 	g_reduce_errno = -1;
905 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
906 	CU_ASSERT(g_reduce_errno == 0);
907 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
908 	CU_ASSERT(g_vol->params.vol_size == params.vol_size);
909 	CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
910 	CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
911 
912 	memset(buf, 0xCC, 2 * params.logical_block_size);
913 	iov.iov_base = buf;
914 	iov.iov_len = 2 * params.logical_block_size;
915 	g_reduce_errno = -1;
916 	spdk_reduce_vol_writev(g_vol, &iov, 1, 2, 2, write_cb, NULL);
917 	CU_ASSERT(g_reduce_errno == 0);
918 
919 	memset(compare_buf, 0xCC, sizeof(compare_buf));
920 	for (i = 0; i < params.chunk_size / params.logical_block_size; i++) {
921 		memset(buf, 0xFF, params.logical_block_size);
922 		iov.iov_base = buf;
923 		iov.iov_len = params.logical_block_size;
924 		g_reduce_errno = -1;
925 		spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL);
926 		CU_ASSERT(g_reduce_errno == 0);
927 
928 		switch (i) {
929 		case 2:
930 		case 3:
931 			CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
932 			break;
933 		default:
934 			CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size));
935 			break;
936 		}
937 	}
938 
939 	g_reduce_errno = -1;
940 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
941 	CU_ASSERT(g_reduce_errno == 0);
942 
943 	g_vol = NULL;
944 	g_reduce_errno = -1;
945 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
946 	CU_ASSERT(g_reduce_errno == 0);
947 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
948 	CU_ASSERT(g_vol->params.vol_size == params.vol_size);
949 	CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
950 	CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
951 
952 	g_reduce_errno = -1;
953 
954 	/* Write 0xBB to 2 512-byte logical blocks, starting at LBA 37.
955 	 * This is writing into the second chunk of the volume.  This also
956 	 * enables implicitly checking that we reloaded the bit arrays
957 	 * correctly - making sure we don't use the first chunk map again
958 	 * for this new write - the first chunk map was already used by the
959 	 * write from before we unloaded and reloaded.
960 	 */
961 	memset(buf, 0xBB, 2 * params.logical_block_size);
962 	iov.iov_base = buf;
963 	iov.iov_len = 2 * params.logical_block_size;
964 	g_reduce_errno = -1;
965 	spdk_reduce_vol_writev(g_vol, &iov, 1, 37, 2, write_cb, NULL);
966 	CU_ASSERT(g_reduce_errno == 0);
967 
968 	for (i = 0; i < 2 * params.chunk_size / params.logical_block_size; i++) {
969 		memset(buf, 0xFF, params.logical_block_size);
970 		iov.iov_base = buf;
971 		iov.iov_len = params.logical_block_size;
972 		g_reduce_errno = -1;
973 		spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL);
974 		CU_ASSERT(g_reduce_errno == 0);
975 
976 		switch (i) {
977 		case 2:
978 		case 3:
979 			memset(compare_buf, 0xCC, sizeof(compare_buf));
980 			CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
981 			break;
982 		case 37:
983 		case 38:
984 			memset(compare_buf, 0xBB, sizeof(compare_buf));
985 			CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
986 			break;
987 		default:
988 			CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size));
989 			break;
990 		}
991 	}
992 
993 	g_reduce_errno = -1;
994 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
995 	CU_ASSERT(g_reduce_errno == 0);
996 
997 	persistent_pm_buf_destroy();
998 	backing_dev_destroy(&backing_dev);
999 }
1000 
1001 static void
1002 read_write(void)
1003 {
1004 	_read_write(512);
1005 	_read_write(4096);
1006 }
1007 
1008 static void
1009 _readv_writev(uint32_t backing_blocklen)
1010 {
1011 	struct spdk_reduce_vol_params params = {};
1012 	struct spdk_reduce_backing_dev backing_dev = {};
1013 	struct iovec iov[REDUCE_MAX_IOVECS + 1];
1014 
1015 	params.chunk_size = 16 * 1024;
1016 	params.backing_io_unit_size = 4096;
1017 	params.logical_block_size = 512;
1018 	spdk_uuid_generate(&params.uuid);
1019 
1020 	backing_dev_init(&backing_dev, &params, backing_blocklen);
1021 
1022 	g_vol = NULL;
1023 	g_reduce_errno = -1;
1024 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
1025 	CU_ASSERT(g_reduce_errno == 0);
1026 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
1027 
1028 	g_reduce_errno = -1;
1029 	spdk_reduce_vol_writev(g_vol, iov, REDUCE_MAX_IOVECS + 1, 2, REDUCE_MAX_IOVECS + 1, write_cb, NULL);
1030 	CU_ASSERT(g_reduce_errno == -EINVAL);
1031 
1032 	g_reduce_errno = -1;
1033 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
1034 	CU_ASSERT(g_reduce_errno == 0);
1035 
1036 	persistent_pm_buf_destroy();
1037 	backing_dev_destroy(&backing_dev);
1038 }
1039 
1040 static void
1041 readv_writev(void)
1042 {
1043 	_readv_writev(512);
1044 	_readv_writev(4096);
1045 }
1046 
1047 /* 1.write offset  0KB, length 32KB, with 0xAA
1048  * 2.unmap offset  8KB, length 24KB, fail with -EINVAL, verify
1049  * 3.unmap offset  8KB, length  8KB
1050  * 4.unmap offset 16KB, length 16KB
1051  * 5.two fullchunk read verify
1052  */
1053 static void
1054 write_unmap_verify(void)
1055 {
1056 	uint32_t backing_blocklen = 512;
1057 	uint64_t blocks_per_chunk;
1058 
1059 	struct spdk_reduce_vol_params params = {};
1060 	struct spdk_reduce_backing_dev backing_dev = {};
1061 	struct iovec iov;
1062 	char buf[16 * 1024]; /* chunk size */
1063 	char compare_buf[32 * 1024];
1064 
1065 	params.chunk_size = 16 * 1024;
1066 	params.backing_io_unit_size = 4096;
1067 	params.logical_block_size = 512;
1068 	spdk_uuid_generate(&params.uuid);
1069 
1070 	backing_dev_init(&backing_dev, &params, backing_blocklen);
1071 
1072 	blocks_per_chunk = params.chunk_size / params.logical_block_size;
1073 	g_vol = NULL;
1074 	g_reduce_errno = -1;
1075 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
1076 	CU_ASSERT(g_reduce_errno == 0);
1077 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
1078 
1079 	/* 1.write offset 0KB, length 32KB, with 0xAA */
1080 	iov.iov_base = buf;
1081 	iov.iov_len = sizeof(buf);
1082 	memset(buf, 0xAA, sizeof(buf));
1083 	g_reduce_errno = -1;
1084 	spdk_reduce_vol_writev(g_vol, &iov, 1, 0, blocks_per_chunk, write_cb, NULL);
1085 	CU_ASSERT(g_reduce_errno == 0);
1086 	g_reduce_errno = -1;
1087 	spdk_reduce_vol_writev(g_vol, &iov, 1, blocks_per_chunk, blocks_per_chunk, write_cb, NULL);
1088 	CU_ASSERT(g_reduce_errno == 0);
1089 
1090 	memset(compare_buf, 0xAA, sizeof(compare_buf));
1091 
1092 	memset(buf, 0xFF, sizeof(buf));
1093 	g_reduce_errno = -1;
1094 	spdk_reduce_vol_readv(g_vol, &iov, 1, 0, blocks_per_chunk, read_cb, NULL);
1095 	CU_ASSERT(g_reduce_errno == 0);
1096 	CU_ASSERT(memcmp(buf, compare_buf, sizeof(buf)) == 0);
1097 
1098 	memset(buf, 0xFF, sizeof(buf));
1099 	g_reduce_errno = -1;
1100 	spdk_reduce_vol_readv(g_vol, &iov, 1, blocks_per_chunk, blocks_per_chunk, read_cb, NULL);
1101 	CU_ASSERT(g_reduce_errno == 0);
1102 	CU_ASSERT(memcmp(buf, compare_buf + sizeof(buf), sizeof(buf)) == 0);
1103 
1104 	/* 2.unmap offset 8KB, length 24KB, fail with -EINVAL */
1105 	g_reduce_errno = 0;
1106 	spdk_reduce_vol_unmap(g_vol, 8 * 1024 / params.logical_block_size,
1107 			      24 * 1024 / params.logical_block_size, unmap_cb, NULL);
1108 	spdk_thread_poll(g_thread, 0, 0);
1109 	CU_ASSERT(g_reduce_errno == -EINVAL);
1110 
1111 	memset(buf, 0xFF, sizeof(buf));
1112 	g_reduce_errno = -1;
1113 	spdk_reduce_vol_readv(g_vol, &iov, 1, 0, blocks_per_chunk, read_cb, NULL);
1114 	CU_ASSERT(g_reduce_errno == 0);
1115 	CU_ASSERT(memcmp(buf, compare_buf, sizeof(buf)) == 0);
1116 
1117 	memset(buf, 0xFF, sizeof(buf));
1118 	g_reduce_errno = -1;
1119 	spdk_reduce_vol_readv(g_vol, &iov, 1, blocks_per_chunk, blocks_per_chunk, read_cb, NULL);
1120 	CU_ASSERT(g_reduce_errno == 0);
1121 	CU_ASSERT(memcmp(buf, compare_buf + sizeof(buf), sizeof(buf)) == 0);
1122 
1123 	/* 3.unmap offset  8KB, length  8KB */
1124 	g_reduce_errno = -1;
1125 	spdk_reduce_vol_unmap(g_vol, 8 * 1024 / params.logical_block_size,
1126 			      8 * 1024 / params.logical_block_size, unmap_cb, NULL);
1127 	spdk_thread_poll(g_thread, 0, 0);
1128 	CU_ASSERT(g_reduce_errno == 0);
1129 	memset(compare_buf + 8 * 1024, 0x00, 8 * 1024);
1130 
1131 	/* 4.unmap offset 16KB, length 16KB */
1132 	g_reduce_errno = -1;
1133 	spdk_reduce_vol_unmap(g_vol, 16 * 1024 / params.logical_block_size,
1134 			      16 * 1024 / params.logical_block_size, unmap_cb, NULL);
1135 	spdk_thread_poll(g_thread, 0, 0);
1136 	CU_ASSERT(g_reduce_errno == 0);
1137 	memset(compare_buf + 16 * 1024, 0x00, 16 * 1024);
1138 
1139 	/* 5.two fullchunk read verify */
1140 	memset(buf, 0xFF, sizeof(buf));
1141 	g_reduce_errno = -1;
1142 	spdk_reduce_vol_readv(g_vol, &iov, 1, 0, blocks_per_chunk, read_cb, NULL);
1143 	CU_ASSERT(g_reduce_errno == 0);
1144 	CU_ASSERT(memcmp(buf, compare_buf, sizeof(buf)) == 0);
1145 
1146 	memset(buf, 0xFF, sizeof(buf));
1147 	g_reduce_errno = -1;
1148 	spdk_reduce_vol_readv(g_vol, &iov, 1, blocks_per_chunk, blocks_per_chunk, read_cb, NULL);
1149 	CU_ASSERT(g_reduce_errno == 0);
1150 	CU_ASSERT(memcmp(buf, compare_buf + 16 * 1024, sizeof(buf)) == 0);
1151 	/* done */
1152 
1153 	g_reduce_errno = -1;
1154 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
1155 	CU_ASSERT(g_reduce_errno == 0);
1156 
1157 	persistent_pm_buf_destroy();
1158 	backing_dev_destroy(&backing_dev);
1159 }
1160 
1161 static void
1162 destroy_cb(void *ctx, int reduce_errno)
1163 {
1164 	g_reduce_errno = reduce_errno;
1165 }
1166 
1167 static void
1168 destroy(void)
1169 {
1170 	struct spdk_reduce_vol_params params = {};
1171 	struct spdk_reduce_backing_dev backing_dev = {};
1172 
1173 	params.chunk_size = 16 * 1024;
1174 	params.backing_io_unit_size = 512;
1175 	params.logical_block_size = 512;
1176 	spdk_uuid_generate(&params.uuid);
1177 
1178 	backing_dev_init(&backing_dev, &params, 512);
1179 
1180 	g_vol = NULL;
1181 	g_reduce_errno = -1;
1182 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
1183 	CU_ASSERT(g_reduce_errno == 0);
1184 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
1185 
1186 	g_reduce_errno = -1;
1187 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
1188 	CU_ASSERT(g_reduce_errno == 0);
1189 
1190 	g_vol = NULL;
1191 	g_reduce_errno = -1;
1192 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
1193 	CU_ASSERT(g_reduce_errno == 0);
1194 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
1195 
1196 	g_reduce_errno = -1;
1197 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
1198 	CU_ASSERT(g_reduce_errno == 0);
1199 
1200 	g_reduce_errno = -1;
1201 	MOCK_CLEAR(spdk_malloc);
1202 	MOCK_CLEAR(spdk_zmalloc);
1203 	spdk_reduce_vol_destroy(&backing_dev, destroy_cb, NULL);
1204 	CU_ASSERT(g_reduce_errno == 0);
1205 
1206 	g_reduce_errno = 0;
1207 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
1208 	CU_ASSERT(g_reduce_errno == -EILSEQ);
1209 
1210 	backing_dev_destroy(&backing_dev);
1211 }
1212 
1213 /* This test primarily checks that the reduce unit test infrastructure for asynchronous
1214  * backing device I/O operations is working correctly.
1215  */
1216 static void
1217 defer_bdev_io(void)
1218 {
1219 	struct spdk_reduce_vol_params params = {};
1220 	struct spdk_reduce_backing_dev backing_dev = {};
1221 	const uint32_t logical_block_size = 512;
1222 	struct iovec iov;
1223 	char buf[logical_block_size];
1224 	char compare_buf[logical_block_size];
1225 
1226 	params.chunk_size = 16 * 1024;
1227 	params.backing_io_unit_size = 4096;
1228 	params.logical_block_size = logical_block_size;
1229 	spdk_uuid_generate(&params.uuid);
1230 
1231 	backing_dev_init(&backing_dev, &params, 512);
1232 
1233 	g_vol = NULL;
1234 	g_reduce_errno = -1;
1235 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
1236 	CU_ASSERT(g_reduce_errno == 0);
1237 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
1238 
1239 	/* Write 0xAA to 1 512-byte logical block. */
1240 	memset(buf, 0xAA, params.logical_block_size);
1241 	iov.iov_base = buf;
1242 	iov.iov_len = params.logical_block_size;
1243 	g_reduce_errno = -100;
1244 	g_defer_bdev_io = true;
1245 	spdk_reduce_vol_writev(g_vol, &iov, 1, 0, 1, write_cb, NULL);
1246 	/* Callback should not have executed, so this should still equal -100. */
1247 	CU_ASSERT(g_reduce_errno == -100);
1248 	CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io));
1249 	/* We wrote to just 512 bytes of one chunk which was previously unallocated.  This
1250 	 * should result in 1 pending I/O since the rest of this chunk will be zeroes and
1251 	 * very compressible.
1252 	 */
1253 	CU_ASSERT(g_pending_bdev_io_count == 1);
1254 
1255 	backing_dev_io_execute(0);
1256 	CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
1257 	CU_ASSERT(g_reduce_errno == 0);
1258 
1259 	g_defer_bdev_io = false;
1260 	memset(compare_buf, 0xAA, sizeof(compare_buf));
1261 	memset(buf, 0xFF, sizeof(buf));
1262 	iov.iov_base = buf;
1263 	iov.iov_len = params.logical_block_size;
1264 	g_reduce_errno = -100;
1265 	spdk_reduce_vol_readv(g_vol, &iov, 1, 0, 1, read_cb, NULL);
1266 	CU_ASSERT(g_reduce_errno == 0);
1267 	CU_ASSERT(memcmp(buf, compare_buf, sizeof(buf)) == 0);
1268 
1269 	g_reduce_errno = -1;
1270 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
1271 	CU_ASSERT(g_reduce_errno == 0);
1272 
1273 	persistent_pm_buf_destroy();
1274 	backing_dev_destroy(&backing_dev);
1275 }
1276 
1277 static void
1278 overlapped(void)
1279 {
1280 	struct spdk_reduce_vol_params params = {};
1281 	struct spdk_reduce_backing_dev backing_dev = {};
1282 	const uint32_t logical_block_size = 512;
1283 	struct iovec iov;
1284 	char buf[2 * logical_block_size];
1285 	char compare_buf[2 * logical_block_size];
1286 
1287 	params.chunk_size = 16 * 1024;
1288 	params.backing_io_unit_size = 4096;
1289 	params.logical_block_size = logical_block_size;
1290 	spdk_uuid_generate(&params.uuid);
1291 
1292 	backing_dev_init(&backing_dev, &params, 512);
1293 
1294 	g_vol = NULL;
1295 	g_reduce_errno = -1;
1296 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
1297 	CU_ASSERT(g_reduce_errno == 0);
1298 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
1299 
1300 	/* Write 0xAA to 1 512-byte logical block. */
1301 	memset(buf, 0xAA, logical_block_size);
1302 	iov.iov_base = buf;
1303 	iov.iov_len = logical_block_size;
1304 	g_reduce_errno = -100;
1305 	g_defer_bdev_io = true;
1306 	spdk_reduce_vol_writev(g_vol, &iov, 1, 0, 1, write_cb, NULL);
1307 	/* Callback should not have executed, so this should still equal -100. */
1308 	CU_ASSERT(g_reduce_errno == -100);
1309 	CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io));
1310 	/* We wrote to just 512 bytes of one chunk which was previously unallocated.  This
1311 	 * should result in 1 pending I/O since the rest of this chunk will be zeroes and
1312 	 * very compressible.
1313 	 */
1314 	CU_ASSERT(g_pending_bdev_io_count == 1);
1315 
1316 	/* Now do an overlapped I/O to the same chunk. */
1317 	spdk_reduce_vol_writev(g_vol, &iov, 1, 1, 1, write_cb, NULL);
1318 	/* Callback should not have executed, so this should still equal -100. */
1319 	CU_ASSERT(g_reduce_errno == -100);
1320 	CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io));
1321 	/* The second I/O overlaps with the first one.  So we should only see pending bdev_io
1322 	 * related to the first I/O here - the second one won't start until the first one is completed.
1323 	 */
1324 	CU_ASSERT(g_pending_bdev_io_count == 1);
1325 
1326 	backing_dev_io_execute(0);
1327 	CU_ASSERT(g_reduce_errno == 0);
1328 
1329 	g_defer_bdev_io = false;
1330 	memset(compare_buf, 0xAA, sizeof(compare_buf));
1331 	memset(buf, 0xFF, sizeof(buf));
1332 	iov.iov_base = buf;
1333 	iov.iov_len = 2 * logical_block_size;
1334 	g_reduce_errno = -100;
1335 	spdk_reduce_vol_readv(g_vol, &iov, 1, 0, 2, read_cb, NULL);
1336 	CU_ASSERT(g_reduce_errno == 0);
1337 	CU_ASSERT(memcmp(buf, compare_buf, 2 * logical_block_size) == 0);
1338 
1339 	g_reduce_errno = -1;
1340 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
1341 	CU_ASSERT(g_reduce_errno == 0);
1342 
1343 	persistent_pm_buf_destroy();
1344 	backing_dev_destroy(&backing_dev);
1345 }
1346 
1347 #define BUFSIZE 4096
1348 
1349 static void
1350 compress_algorithm(void)
1351 {
1352 	uint8_t original_data[BUFSIZE];
1353 	uint8_t compressed_data[BUFSIZE];
1354 	uint8_t decompressed_data[BUFSIZE];
1355 	uint32_t compressed_len, decompressed_len;
1356 	int rc;
1357 
1358 	ut_build_data_buffer(original_data, BUFSIZE, 0xAA, BUFSIZE);
1359 	compressed_len = sizeof(compressed_data);
1360 	rc = ut_compress(compressed_data, &compressed_len, original_data, UINT8_MAX);
1361 	CU_ASSERT(rc == 0);
1362 	CU_ASSERT(compressed_len == 2);
1363 	CU_ASSERT(compressed_data[0] == UINT8_MAX);
1364 	CU_ASSERT(compressed_data[1] == 0xAA);
1365 
1366 	decompressed_len = sizeof(decompressed_data);
1367 	rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len);
1368 	CU_ASSERT(rc == 0);
1369 	CU_ASSERT(decompressed_len == UINT8_MAX);
1370 	CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0);
1371 
1372 	compressed_len = sizeof(compressed_data);
1373 	rc = ut_compress(compressed_data, &compressed_len, original_data, UINT8_MAX + 1);
1374 	CU_ASSERT(rc == 0);
1375 	CU_ASSERT(compressed_len == 4);
1376 	CU_ASSERT(compressed_data[0] == UINT8_MAX);
1377 	CU_ASSERT(compressed_data[1] == 0xAA);
1378 	CU_ASSERT(compressed_data[2] == 1);
1379 	CU_ASSERT(compressed_data[3] == 0xAA);
1380 
1381 	decompressed_len = sizeof(decompressed_data);
1382 	rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len);
1383 	CU_ASSERT(rc == 0);
1384 	CU_ASSERT(decompressed_len == UINT8_MAX + 1);
1385 	CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0);
1386 
1387 	ut_build_data_buffer(original_data, BUFSIZE, 0x00, 1);
1388 	compressed_len = sizeof(compressed_data);
1389 	rc = ut_compress(compressed_data, &compressed_len, original_data, 2048);
1390 	CU_ASSERT(rc == 0);
1391 	CU_ASSERT(compressed_len == 4096);
1392 	CU_ASSERT(compressed_data[0] == 1);
1393 	CU_ASSERT(compressed_data[1] == 0);
1394 	CU_ASSERT(compressed_data[4094] == 1);
1395 	CU_ASSERT(compressed_data[4095] == 0xFF);
1396 
1397 	decompressed_len = sizeof(decompressed_data);
1398 	rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len);
1399 	CU_ASSERT(rc == 0);
1400 	CU_ASSERT(decompressed_len == 2048);
1401 	CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0);
1402 
1403 	compressed_len = sizeof(compressed_data);
1404 	rc = ut_compress(compressed_data, &compressed_len, original_data, 2049);
1405 	CU_ASSERT(rc == -ENOSPC);
1406 }
1407 
1408 static void
1409 test_prepare_compress_chunk(void)
1410 {
1411 	struct spdk_reduce_vol vol = {};
1412 	struct spdk_reduce_backing_dev backing_dev = {};
1413 	struct spdk_reduce_vol_request req = {};
1414 	void *buf;
1415 	char *buffer_end, *aligned_user_buffer, *unaligned_user_buffer;
1416 	char decomp_buffer[16 * 1024] = {};
1417 	char comp_buffer[16 * 1024] = {};
1418 	struct iovec user_iov[2] = {};
1419 	size_t user_buffer_iov_len = 8192;
1420 	size_t remainder_bytes;
1421 	size_t offset_bytes;
1422 	size_t memcmp_offset;
1423 	uint32_t i;
1424 
1425 	vol.params.chunk_size = 16 * 1024;
1426 	vol.params.backing_io_unit_size = 4096;
1427 	vol.params.logical_block_size = 512;
1428 	backing_dev_init(&backing_dev, &vol.params, 512);
1429 	vol.backing_dev = &backing_dev;
1430 	vol.logical_blocks_per_chunk = vol.params.chunk_size / vol.params.logical_block_size;
1431 
1432 	/* Allocate 1 extra byte to test a case when buffer crosses huge page boundary */
1433 	SPDK_CU_ASSERT_FATAL(posix_memalign(&buf, VALUE_2MB, VALUE_2MB + 1) == 0);
1434 	buffer_end = (char *)buf + VALUE_2MB + 1;
1435 	aligned_user_buffer = (char *)buf;
1436 	memset(aligned_user_buffer, 0xc, vol.params.chunk_size);
1437 	unaligned_user_buffer = buffer_end - vol.params.chunk_size;
1438 	memset(unaligned_user_buffer, 0xc, vol.params.chunk_size);
1439 
1440 	req.vol = &vol;
1441 	req.decomp_buf = decomp_buffer;
1442 	req.comp_buf = comp_buffer;
1443 	req.iov = user_iov;
1444 	req.iovcnt = 2;
1445 	req.offset = 0;
1446 
1447 	/* Part 1 - backing dev supports sgl_in */
1448 	/* Test 1 - user's buffers length equals to chunk_size */
1449 	for (i = 0; i < 2; i++) {
1450 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1451 		req.iov[i].iov_len = user_buffer_iov_len;
1452 	}
1453 
1454 	_prepare_compress_chunk(&req, false);
1455 	CU_ASSERT(req.decomp_iovcnt == 2);
1456 	for (i = 0; i < 2; i++) {
1457 		CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
1458 		CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
1459 	}
1460 
1461 	_prepare_compress_chunk(&req, true);
1462 	CU_ASSERT(req.decomp_iovcnt == 2);
1463 	for (i = 0; i < 2; i++) {
1464 		CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
1465 		CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
1466 	}
1467 
1468 	/* Test 2 - user's buffer less than chunk_size, without offset */
1469 	user_buffer_iov_len = 4096;
1470 	remainder_bytes = vol.params.chunk_size - user_buffer_iov_len * 2;
1471 	for (i = 0; i < 2; i++) {
1472 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1473 		req.iov[i].iov_len = user_buffer_iov_len;
1474 	}
1475 
1476 	_prepare_compress_chunk(&req, false);
1477 	CU_ASSERT(req.decomp_iovcnt == 3);
1478 	for (i = 0; i < 2; i++) {
1479 		CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
1480 		CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
1481 	}
1482 	CU_ASSERT(req.decomp_iov[i].iov_base == req.decomp_buf + user_buffer_iov_len * 2);
1483 	CU_ASSERT(req.decomp_iov[i].iov_len == remainder_bytes);
1484 
1485 	_prepare_compress_chunk(&req, true);
1486 	CU_ASSERT(req.decomp_iovcnt == 3);
1487 	for (i = 0; i < 2; i++) {
1488 		CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
1489 		CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
1490 	}
1491 	CU_ASSERT(req.decomp_iov[i].iov_base == g_zero_buf + user_buffer_iov_len * 2);
1492 	CU_ASSERT(req.decomp_iov[i].iov_len == remainder_bytes);
1493 
1494 	/* Test 3 - user's buffer less than chunk_size, non zero offset */
1495 	user_buffer_iov_len = 4096;
1496 	req.offset = 3;
1497 	offset_bytes = req.offset * vol.params.logical_block_size;
1498 	remainder_bytes = vol.params.chunk_size - offset_bytes - user_buffer_iov_len * 2;
1499 
1500 	_prepare_compress_chunk(&req, false);
1501 	CU_ASSERT(req.decomp_iovcnt == 4);
1502 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1503 	CU_ASSERT(req.decomp_iov[0].iov_len == offset_bytes);
1504 	for (i = 0; i < 2; i++) {
1505 		CU_ASSERT(req.decomp_iov[i + 1].iov_base == req.iov[i].iov_base);
1506 		CU_ASSERT(req.decomp_iov[i + 1].iov_len == req.iov[i].iov_len);
1507 	}
1508 	CU_ASSERT(req.decomp_iov[3].iov_base == req.decomp_buf + offset_bytes + user_buffer_iov_len * 2);
1509 	CU_ASSERT(req.decomp_iov[3].iov_len == remainder_bytes);
1510 
1511 	_prepare_compress_chunk(&req, true);
1512 	CU_ASSERT(req.decomp_iovcnt == 4);
1513 	CU_ASSERT(req.decomp_iov[0].iov_base == g_zero_buf);
1514 	CU_ASSERT(req.decomp_iov[0].iov_len == offset_bytes);
1515 	for (i = 0; i < 2; i++) {
1516 		CU_ASSERT(req.decomp_iov[i + 1].iov_base == req.iov[i].iov_base);
1517 		CU_ASSERT(req.decomp_iov[i + 1].iov_len == req.iov[i].iov_len);
1518 	}
1519 	CU_ASSERT(req.decomp_iov[3].iov_base == g_zero_buf + offset_bytes + user_buffer_iov_len * 2);
1520 	CU_ASSERT(req.decomp_iov[3].iov_len == remainder_bytes);
1521 
1522 	/* Part 2 - backing dev doesn't support sgl_in */
1523 	/* Test 1 - user's buffers length equals to chunk_size
1524 	 * user's buffers are copied */
1525 	vol.backing_dev->sgl_in = false;
1526 	req.offset = 0;
1527 	user_buffer_iov_len = 8192;
1528 	for (i = 0; i < 2; i++) {
1529 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1530 		req.iov[i].iov_len = user_buffer_iov_len;
1531 		memset(req.iov[i].iov_base, 0xb + i, req.iov[i].iov_len);
1532 	}
1533 
1534 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1535 
1536 	_prepare_compress_chunk(&req, false);
1537 	CU_ASSERT(req.decomp_iovcnt == 1);
1538 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1539 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1540 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base, req.iov[0].iov_base, req.iov[0].iov_len) == 0);
1541 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + req.iov[0].iov_len, req.iov[1].iov_base,
1542 			 req.iov[1].iov_len) == 0);
1543 
1544 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1545 
1546 	_prepare_compress_chunk(&req, true);
1547 	CU_ASSERT(req.decomp_iovcnt == 1);
1548 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1549 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1550 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base, req.iov[0].iov_base, req.iov[0].iov_len) == 0);
1551 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + req.iov[0].iov_len, req.iov[1].iov_base,
1552 			 req.iov[1].iov_len) == 0);
1553 
1554 	/* Test 2 - single user's buffer length equals to chunk_size, buffer is not aligned
1555 	* User's buffer is copied */
1556 	req.iov[0].iov_base = unaligned_user_buffer;
1557 	req.iov[0].iov_len = vol.params.chunk_size;
1558 	req.iovcnt = 1;
1559 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1560 
1561 	_prepare_compress_chunk(&req, false);
1562 	CU_ASSERT(req.decomp_iovcnt == 1);
1563 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1564 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1565 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base, req.iov[0].iov_base,
1566 			 req.iov[0].iov_len) == 0);
1567 
1568 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1569 
1570 	_prepare_compress_chunk(&req, true);
1571 	CU_ASSERT(req.decomp_iovcnt == 1);
1572 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1573 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1574 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base, req.iov[0].iov_base,
1575 			 req.iov[0].iov_len) == 0);
1576 
1577 	/* Test 3 - single user's buffer length equals to chunk_size
1578 	 * User's buffer is not copied */
1579 	req.iov[0].iov_base = aligned_user_buffer;
1580 	req.iov[0].iov_len = vol.params.chunk_size;
1581 	req.iovcnt = 1;
1582 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1583 
1584 	_prepare_compress_chunk(&req, false);
1585 	CU_ASSERT(req.decomp_iovcnt == 1);
1586 	CU_ASSERT(req.decomp_iov[0].iov_base == req.iov[0].iov_base);
1587 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1588 
1589 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1590 
1591 	_prepare_compress_chunk(&req, true);
1592 	CU_ASSERT(req.decomp_iovcnt == 1);
1593 	CU_ASSERT(req.decomp_iov[0].iov_base == req.iov[0].iov_base);
1594 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1595 
1596 	/* Test 4 - user's buffer less than chunk_size, without offset
1597 	 * User's buffers are copied */
1598 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1599 	user_buffer_iov_len = 4096;
1600 	req.iovcnt = 2;
1601 	remainder_bytes = vol.params.chunk_size - user_buffer_iov_len * 2;
1602 	for (i = 0; i < 2; i++) {
1603 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1604 		req.iov[i].iov_len = user_buffer_iov_len;
1605 	}
1606 
1607 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1608 
1609 	_prepare_compress_chunk(&req, false);
1610 	CU_ASSERT(req.decomp_iovcnt == 1);
1611 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1612 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1613 	memcmp_offset = 0;
1614 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[0].iov_base,
1615 			 req.iov[0].iov_len) == 0);
1616 	memcmp_offset += req.iov[0].iov_len;
1617 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[1].iov_base,
1618 			 req.iov[1].iov_len) == 0);
1619 	memcmp_offset += req.iov[0].iov_len;
1620 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.decomp_buf + memcmp_offset,
1621 			 remainder_bytes) == 0);
1622 
1623 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1624 
1625 	_prepare_compress_chunk(&req, true);
1626 	CU_ASSERT(req.decomp_iovcnt == 1);
1627 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1628 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1629 	memcmp_offset = 0;
1630 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[0].iov_base,
1631 			 req.iov[0].iov_len) == 0);
1632 	memcmp_offset += req.iov[0].iov_len;
1633 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[1].iov_base,
1634 			 req.iov[1].iov_len) == 0);
1635 	memcmp_offset += req.iov[0].iov_len;
1636 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, g_zero_buf + memcmp_offset,
1637 			 remainder_bytes) == 0);
1638 
1639 	/* Test 5 - user's buffer less than chunk_size, non zero offset
1640 	 * user's buffers are copied */
1641 	req.offset = 3;
1642 	offset_bytes = req.offset * vol.params.logical_block_size;
1643 	remainder_bytes = vol.params.chunk_size - offset_bytes - user_buffer_iov_len * 2;
1644 
1645 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1646 
1647 	_prepare_compress_chunk(&req, false);
1648 	CU_ASSERT(req.decomp_iovcnt == 1);
1649 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1650 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1651 	memcmp_offset = 0;
1652 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.decomp_buf, offset_bytes) == 0);
1653 	memcmp_offset += offset_bytes;
1654 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[0].iov_base,
1655 			 req.iov[0].iov_len) == 0);
1656 	memcmp_offset += req.iov[0].iov_len;
1657 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[1].iov_base,
1658 			 req.iov[1].iov_len) == 0);
1659 	memcmp_offset += req.iov[1].iov_len;
1660 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.decomp_buf + memcmp_offset,
1661 			 remainder_bytes) == 0);
1662 
1663 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1664 
1665 	_prepare_compress_chunk(&req, true);
1666 	CU_ASSERT(req.decomp_iovcnt == 1);
1667 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1668 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1669 	memcmp_offset = 0;
1670 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, g_zero_buf, offset_bytes) == 0);
1671 	memcmp_offset += offset_bytes;
1672 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[0].iov_base,
1673 			 req.iov[0].iov_len) == 0);
1674 	memcmp_offset += req.iov[0].iov_len;
1675 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[1].iov_base,
1676 			 req.iov[1].iov_len) == 0);
1677 	memcmp_offset += req.iov[1].iov_len;
1678 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, g_zero_buf + memcmp_offset,
1679 			 remainder_bytes) == 0);
1680 	backing_dev_destroy(&backing_dev);
1681 	free(buf);
1682 }
1683 
1684 static void
1685 _reduce_vol_op_complete(void *ctx, int reduce_errno)
1686 {
1687 	g_reduce_errno = reduce_errno;
1688 }
1689 
1690 static void
1691 dummy_backing_dev_decompress(struct spdk_reduce_backing_dev *backing_dev,
1692 			     struct iovec *src_iov, int src_iovcnt,
1693 			     struct iovec *dst_iov, int dst_iovcnt,
1694 			     struct spdk_reduce_vol_cb_args *args)
1695 {
1696 	args->output_size = g_decompressed_len;
1697 	args->cb_fn(args->cb_arg, 0);
1698 }
1699 static void
1700 test_reduce_decompress_chunk(void)
1701 {
1702 	struct spdk_reduce_vol vol = {};
1703 	struct spdk_reduce_backing_dev backing_dev = {};
1704 	struct spdk_reduce_vol_request req = {};
1705 	void *buf;
1706 	char *buffer_end, *aligned_user_buffer, *unaligned_user_buffer;
1707 	char decomp_buffer[16 * 1024] = {};
1708 	char comp_buffer[16 * 1024] = {};
1709 	struct iovec user_iov[2] = {};
1710 	struct iovec comp_buf_iov = {};
1711 	struct spdk_reduce_chunk_map chunk = {};
1712 	size_t user_buffer_iov_len = 8192;
1713 	size_t remainder_bytes;
1714 	size_t offset_bytes;
1715 	uint32_t i;
1716 
1717 	vol.params.chunk_size = 16 * 1024;
1718 	vol.params.backing_io_unit_size = 4096;
1719 	vol.params.logical_block_size = 512;
1720 	backing_dev_init(&backing_dev, &vol.params, 512);
1721 	backing_dev.decompress = dummy_backing_dev_decompress;
1722 	vol.backing_dev = &backing_dev;
1723 	vol.logical_blocks_per_chunk = vol.params.chunk_size / vol.params.logical_block_size;
1724 	RB_INIT(&vol.executing_requests);
1725 	TAILQ_INIT(&vol.queued_requests);
1726 	TAILQ_INIT(&vol.free_requests);
1727 
1728 	/* Allocate 1 extra byte to test a case when buffer crosses huge page boundary */
1729 	SPDK_CU_ASSERT_FATAL(posix_memalign(&buf, VALUE_2MB, VALUE_2MB + 1) == 0);
1730 	buffer_end = (char *)buf + VALUE_2MB + 1;
1731 	aligned_user_buffer = (char *)buf;
1732 	unaligned_user_buffer = buffer_end - vol.params.chunk_size;
1733 
1734 	chunk.compressed_size = user_buffer_iov_len / 2;
1735 	req.chunk = &chunk;
1736 	req.vol = &vol;
1737 	req.decomp_buf = decomp_buffer;
1738 	req.comp_buf = comp_buffer;
1739 	req.comp_buf_iov = &comp_buf_iov;
1740 	req.iov = user_iov;
1741 	req.iovcnt = 2;
1742 	req.offset = 0;
1743 	req.cb_fn = _reduce_vol_op_complete;
1744 
1745 	/* Part 1 - backing dev supports sgl_out */
1746 	/* Test 1 - user's buffers length equals to chunk_size */
1747 	for (i = 0; i < 2; i++) {
1748 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1749 		req.iov[i].iov_len = user_buffer_iov_len;
1750 		memset(req.iov[i].iov_base, 0, req.iov[i].iov_len);
1751 	}
1752 	RB_INSERT(executing_req_tree, &vol.executing_requests, &req);
1753 	g_reduce_errno = -1;
1754 	g_decompressed_len = vol.params.chunk_size;
1755 
1756 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1757 	CU_ASSERT(g_reduce_errno == 0);
1758 	CU_ASSERT(req.copy_after_decompress == false);
1759 	CU_ASSERT(req.decomp_iovcnt == 2);
1760 	for (i = 0; i < 2; i++) {
1761 		CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
1762 		CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
1763 	}
1764 	CU_ASSERT(RB_EMPTY(&vol.executing_requests));
1765 	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
1766 
1767 	/* Test 2 - user's buffer less than chunk_size, without offset */
1768 	RB_INSERT(executing_req_tree, &vol.executing_requests, &req);
1769 	g_reduce_errno = -1;
1770 	user_buffer_iov_len = 4096;
1771 	for (i = 0; i < 2; i++) {
1772 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1773 		req.iov[i].iov_len = user_buffer_iov_len;
1774 		memset(req.iov[i].iov_base, 0, req.iov[i].iov_len);
1775 	}
1776 	remainder_bytes = vol.params.chunk_size - user_buffer_iov_len * 2;
1777 
1778 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1779 	CU_ASSERT(g_reduce_errno == 0);
1780 	CU_ASSERT(req.copy_after_decompress == false);
1781 	CU_ASSERT(req.decomp_iovcnt == 3);
1782 	for (i = 0; i < 2; i++) {
1783 		CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
1784 		CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
1785 	}
1786 	CU_ASSERT(req.decomp_iov[i].iov_base == req.decomp_buf + user_buffer_iov_len * 2);
1787 	CU_ASSERT(req.decomp_iov[i].iov_len == remainder_bytes);
1788 	CU_ASSERT(RB_EMPTY(&vol.executing_requests));
1789 	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
1790 
1791 	/* Test 3 - user's buffer less than chunk_size, non zero offset */
1792 	req.offset = 3;
1793 	offset_bytes = req.offset * vol.params.logical_block_size;
1794 	remainder_bytes = vol.params.chunk_size - offset_bytes - user_buffer_iov_len * 2;
1795 	RB_INSERT(executing_req_tree, &vol.executing_requests, &req);
1796 	g_reduce_errno = -1;
1797 
1798 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1799 	CU_ASSERT(g_reduce_errno == 0);
1800 	CU_ASSERT(req.copy_after_decompress == false);
1801 	CU_ASSERT(req.decomp_iovcnt == 4);
1802 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1803 	CU_ASSERT(req.decomp_iov[0].iov_len == offset_bytes);
1804 	for (i = 0; i < 2; i++) {
1805 		CU_ASSERT(req.decomp_iov[i + 1].iov_base == req.iov[i].iov_base);
1806 		CU_ASSERT(req.decomp_iov[i + 1].iov_len == req.iov[i].iov_len);
1807 	}
1808 	CU_ASSERT(req.decomp_iov[3].iov_base == req.decomp_buf + offset_bytes + user_buffer_iov_len * 2);
1809 	CU_ASSERT(req.decomp_iov[3].iov_len == remainder_bytes);
1810 	CU_ASSERT(RB_EMPTY(&vol.executing_requests));
1811 	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
1812 
1813 	/* Part 2 - backing dev doesn't support sgl_out */
1814 	/* Test 1 - user's buffers length equals to chunk_size
1815 	 * user's buffers are copied */
1816 	vol.backing_dev->sgl_out = false;
1817 	req.offset = 0;
1818 	user_buffer_iov_len = 8192;
1819 
1820 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1821 	for (i = 0; i < 2; i++) {
1822 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1823 		req.iov[i].iov_len = user_buffer_iov_len;
1824 		memset(req.iov[i].iov_base, 0xb + i, req.iov[i].iov_len);
1825 	}
1826 	RB_INSERT(executing_req_tree, &vol.executing_requests, &req);
1827 	g_reduce_errno = -1;
1828 
1829 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1830 	CU_ASSERT(g_reduce_errno == 0);
1831 	CU_ASSERT(req.copy_after_decompress == true);
1832 	CU_ASSERT(req.decomp_iovcnt == 1);
1833 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1834 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1835 	CU_ASSERT(memcmp(req.iov[0].iov_base, req.decomp_iov[0].iov_base, req.iov[0].iov_len) == 0);
1836 	CU_ASSERT(memcmp(req.iov[1].iov_base, req.decomp_iov[0].iov_base + req.iov[0].iov_len,
1837 			 req.iov[1].iov_len) == 0);
1838 	CU_ASSERT(RB_EMPTY(&vol.executing_requests));
1839 	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
1840 
1841 	/* Test 2 - single user's buffer length equals to chunk_size, buffer is not aligned
1842 	* User's buffer is copied */
1843 	memset(unaligned_user_buffer, 0xc, vol.params.chunk_size);
1844 	req.iov[0].iov_base = unaligned_user_buffer;
1845 	req.iov[0].iov_len = vol.params.chunk_size;
1846 	req.iovcnt = 1;
1847 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1848 	RB_INSERT(executing_req_tree, &vol.executing_requests, &req);
1849 	g_reduce_errno = -1;
1850 
1851 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1852 	CU_ASSERT(g_reduce_errno == 0);
1853 	CU_ASSERT(req.copy_after_decompress == true);
1854 	CU_ASSERT(req.decomp_iovcnt == 1);
1855 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1856 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1857 	CU_ASSERT(memcmp(req.iov[0].iov_base, req.decomp_iov[0].iov_base,
1858 			 req.iov[0].iov_len) == 0);
1859 
1860 	/* Test 3 - single user's buffer length equals to chunk_size
1861 	* User's buffer is not copied */
1862 	req.iov[0].iov_base = aligned_user_buffer;
1863 	req.iov[0].iov_len = vol.params.chunk_size;
1864 	req.iovcnt = 1;
1865 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1866 	RB_INSERT(executing_req_tree, &vol.executing_requests, &req);
1867 	g_reduce_errno = -1;
1868 
1869 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1870 	CU_ASSERT(g_reduce_errno == 0);
1871 	CU_ASSERT(req.copy_after_decompress == false);
1872 	CU_ASSERT(req.decomp_iovcnt == 1);
1873 	CU_ASSERT(req.decomp_iov[0].iov_base == req.iov[0].iov_base);
1874 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1875 
1876 	/* Test 4 - user's buffer less than chunk_size, without offset
1877 	 * User's buffers are copied */
1878 	user_buffer_iov_len = 4096;
1879 	req.iovcnt = 2;
1880 	remainder_bytes = vol.params.chunk_size - user_buffer_iov_len * 2;
1881 	for (i = 0; i < 2; i++) {
1882 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1883 		req.iov[i].iov_len = user_buffer_iov_len;
1884 		memset(req.iov[i].iov_base, 0xb + i, req.iov[i].iov_len);
1885 	}
1886 
1887 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1888 	RB_INSERT(executing_req_tree, &vol.executing_requests, &req);
1889 	g_reduce_errno = -1;
1890 
1891 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1892 	CU_ASSERT(g_reduce_errno == 0);
1893 	CU_ASSERT(req.copy_after_decompress == true);
1894 	CU_ASSERT(req.decomp_iovcnt == 1);
1895 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1896 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1897 	CU_ASSERT(memcmp(req.iov[0].iov_base, req.decomp_iov[0].iov_base,
1898 			 req.iov[0].iov_len) == 0);
1899 	CU_ASSERT(memcmp(req.iov[1].iov_base, req.decomp_iov[0].iov_base + req.iov[0].iov_len,
1900 			 req.iov[1].iov_len) == 0);
1901 	CU_ASSERT(RB_EMPTY(&vol.executing_requests));
1902 	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
1903 
1904 	/* Test 5 - user's buffer less than chunk_size, non zero offset
1905 	* user's buffers are copied */
1906 	req.offset = 3;
1907 	offset_bytes = req.offset * vol.params.logical_block_size;
1908 	remainder_bytes = vol.params.chunk_size - offset_bytes - user_buffer_iov_len * 2;
1909 
1910 	for (i = 0; i < 2; i++) {
1911 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1912 		req.iov[i].iov_len = user_buffer_iov_len;
1913 		memset(req.iov[i].iov_base, 0xb + i, req.iov[i].iov_len);
1914 	}
1915 
1916 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1917 	RB_INSERT(executing_req_tree, &vol.executing_requests, &req);
1918 	g_reduce_errno = -1;
1919 
1920 	_prepare_compress_chunk(&req, false);
1921 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1922 	CU_ASSERT(g_reduce_errno == 0);
1923 	CU_ASSERT(req.copy_after_decompress == true);
1924 	CU_ASSERT(req.decomp_iovcnt == 1);
1925 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1926 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1927 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + offset_bytes, req.iov[0].iov_base,
1928 			 req.iov[0].iov_len) == 0);
1929 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + offset_bytes + req.iov[0].iov_len,
1930 			 req.iov[1].iov_base,
1931 			 req.iov[1].iov_len) == 0);
1932 	CU_ASSERT(RB_EMPTY(&vol.executing_requests));
1933 	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
1934 
1935 	free(buf);
1936 }
1937 
1938 static void
1939 test_allocate_vol_requests(void)
1940 {
1941 	struct spdk_reduce_vol *vol;
1942 	struct spdk_reduce_backing_dev backing_dev = {};
1943 	/* include chunk_sizes which are not power of 2 */
1944 	uint32_t chunk_sizes[] = {8192, 8320, 16384, 16416, 32768};
1945 	uint32_t io_unit_sizes[] = {512, 520, 4096, 4104, 4096};
1946 	uint32_t i;
1947 
1948 	/* bdev compress module can specify how big the user_ctx_size needs to be */
1949 	backing_dev.user_ctx_size = 64;
1950 	for (i = 0; i < 4; i++) {
1951 		vol = calloc(1, sizeof(*vol));
1952 		SPDK_CU_ASSERT_FATAL(vol);
1953 
1954 		vol->params.chunk_size = chunk_sizes[i];
1955 		vol->params.logical_block_size = io_unit_sizes[i];
1956 		vol->params.backing_io_unit_size = io_unit_sizes[i];
1957 		vol->backing_io_units_per_chunk = vol->params.chunk_size / vol->params.backing_io_unit_size;
1958 		vol->logical_blocks_per_chunk = vol->params.chunk_size / vol->params.logical_block_size;
1959 		vol->backing_dev = &backing_dev;
1960 
1961 		CU_ASSERT(_validate_vol_params(&vol->params) == 0);
1962 		CU_ASSERT(_allocate_vol_requests(vol) == 0);
1963 		_init_load_cleanup(vol, NULL);
1964 	}
1965 }
1966 
1967 int
1968 main(int argc, char **argv)
1969 {
1970 	CU_pSuite	suite = NULL;
1971 	unsigned int	num_failures;
1972 
1973 	CU_initialize_registry();
1974 
1975 	suite = CU_add_suite("reduce", NULL, NULL);
1976 
1977 	spdk_thread_lib_init(NULL, 0);
1978 	g_thread = spdk_thread_create(NULL, NULL);
1979 	spdk_set_thread(g_thread);
1980 
1981 	CU_ADD_TEST(suite, get_pm_file_size);
1982 	CU_ADD_TEST(suite, get_vol_size);
1983 	CU_ADD_TEST(suite, init_failure);
1984 	CU_ADD_TEST(suite, init_md);
1985 	CU_ADD_TEST(suite, init_backing_dev);
1986 	CU_ADD_TEST(suite, load);
1987 	CU_ADD_TEST(suite, write_maps);
1988 	CU_ADD_TEST(suite, read_write);
1989 	CU_ADD_TEST(suite, readv_writev);
1990 	CU_ADD_TEST(suite, write_unmap_verify);
1991 	CU_ADD_TEST(suite, destroy);
1992 	CU_ADD_TEST(suite, defer_bdev_io);
1993 	CU_ADD_TEST(suite, overlapped);
1994 	CU_ADD_TEST(suite, compress_algorithm);
1995 	CU_ADD_TEST(suite, test_prepare_compress_chunk);
1996 	CU_ADD_TEST(suite, test_reduce_decompress_chunk);
1997 	CU_ADD_TEST(suite, test_allocate_vol_requests);
1998 
1999 	g_unlink_path = g_path;
2000 	g_unlink_callback = unlink_cb;
2001 
2002 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
2003 
2004 	spdk_thread_exit(g_thread);
2005 	while (!spdk_thread_is_exited(g_thread)) {
2006 		spdk_thread_poll(g_thread, 0, 0);
2007 	}
2008 	spdk_thread_destroy(g_thread);
2009 	spdk_thread_lib_fini();
2010 
2011 	CU_cleanup_registry();
2012 	return num_failures;
2013 }
2014