xref: /spdk/test/unit/lib/reduce/reduce.c/reduce_ut.c (revision 488570ebd418ba07c9e69e65106dcc964f3bb41b)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_cunit.h"
10 
11 #include "reduce/reduce.c"
12 #include "spdk_internal/mock.h"
13 #define UNIT_TEST_NO_VTOPHYS
14 #include "common/lib/test_env.c"
15 #undef UNIT_TEST_NO_VTOPHYS
16 
17 static struct spdk_reduce_vol *g_vol;
18 static int g_reduce_errno;
19 static char *g_volatile_pm_buf;
20 static size_t g_volatile_pm_buf_len;
21 static char *g_persistent_pm_buf;
22 static size_t g_persistent_pm_buf_len;
23 static char *g_backing_dev_buf;
24 static char g_path[REDUCE_PATH_MAX];
25 static char *g_decomp_buf;
26 static int g_decompressed_len;
27 
28 #define TEST_MD_PATH "/tmp"
29 
30 uint64_t
31 spdk_vtophys(const void *buf, uint64_t *size)
32 {
33 	/* add + 1 to buf addr for cases where buf is the start of the page, that will give us correct end of the page */
34 	const uint8_t *page_2mb_end = (const uint8_t *)SPDK_ALIGN_CEIL((uintptr_t)buf + 1, VALUE_2MB);
35 	uint64_t bytes_to_page_end = page_2mb_end - (const uint8_t *)buf;
36 	uint64_t _size;
37 
38 	if (*size) {
39 		_size = *size;
40 		_size = spdk_min(_size, bytes_to_page_end);
41 		*size = _size;
42 	}
43 
44 	return (uintptr_t)buf;
45 }
46 
47 enum ut_reduce_bdev_io_type {
48 	UT_REDUCE_IO_READV = 1,
49 	UT_REDUCE_IO_WRITEV = 2,
50 	UT_REDUCE_IO_UNMAP = 3,
51 };
52 
53 struct ut_reduce_bdev_io {
54 	enum ut_reduce_bdev_io_type type;
55 	struct spdk_reduce_backing_dev *backing_dev;
56 	struct iovec *iov;
57 	int iovcnt;
58 	uint64_t lba;
59 	uint32_t lba_count;
60 	struct spdk_reduce_vol_cb_args *args;
61 	TAILQ_ENTRY(ut_reduce_bdev_io)	link;
62 };
63 
64 static bool g_defer_bdev_io = false;
65 static TAILQ_HEAD(, ut_reduce_bdev_io) g_pending_bdev_io =
66 	TAILQ_HEAD_INITIALIZER(g_pending_bdev_io);
67 static uint32_t g_pending_bdev_io_count = 0;
68 
69 static void
70 sync_pm_buf(const void *addr, size_t length)
71 {
72 	uint64_t offset = (char *)addr - g_volatile_pm_buf;
73 
74 	memcpy(&g_persistent_pm_buf[offset], addr, length);
75 }
76 
77 int
78 pmem_msync(const void *addr, size_t length)
79 {
80 	sync_pm_buf(addr, length);
81 	return 0;
82 }
83 
84 void
85 pmem_persist(const void *addr, size_t len)
86 {
87 	sync_pm_buf(addr, len);
88 }
89 
90 static void
91 get_pm_file_size(void)
92 {
93 	struct spdk_reduce_vol_params params;
94 	uint64_t pm_size, expected_pm_size;
95 
96 	params.backing_io_unit_size = 4096;
97 	params.chunk_size = 4096 * 4;
98 	params.vol_size = 4096 * 4 * 100;
99 
100 	pm_size = _get_pm_file_size(&params);
101 	expected_pm_size = sizeof(struct spdk_reduce_vol_superblock);
102 	/* 100 chunks in logical map * 8 bytes per chunk */
103 	expected_pm_size += 100 * sizeof(uint64_t);
104 	/* 100 chunks * (chunk struct size + 4 backing io units per chunk * 8 bytes per backing io unit) */
105 	expected_pm_size += 100 * (sizeof(struct spdk_reduce_chunk_map) + 4 * sizeof(uint64_t));
106 	/* reduce allocates some extra chunks too for in-flight writes when logical map
107 	 * is full.  REDUCE_EXTRA_CHUNKS is a private #ifdef in reduce.c Here we need the num chunks
108 	 * times (chunk struct size + 4 backing io units per chunk * 8 bytes per backing io unit).
109 	 */
110 	expected_pm_size += REDUCE_NUM_EXTRA_CHUNKS *
111 			    (sizeof(struct spdk_reduce_chunk_map) + 4 * sizeof(uint64_t));
112 	/* reduce will add some padding so numbers may not match exactly.  Make sure
113 	 * they are close though.
114 	 */
115 	CU_ASSERT((pm_size - expected_pm_size) <= REDUCE_PM_SIZE_ALIGNMENT);
116 }
117 
118 static void
119 get_vol_size(void)
120 {
121 	uint64_t chunk_size, backing_dev_size;
122 
123 	chunk_size = 16 * 1024;
124 	backing_dev_size = 16 * 1024 * 1000;
125 	CU_ASSERT(_get_vol_size(chunk_size, backing_dev_size) < backing_dev_size);
126 }
127 
128 void *
129 pmem_map_file(const char *path, size_t len, int flags, mode_t mode,
130 	      size_t *mapped_lenp, int *is_pmemp)
131 {
132 	CU_ASSERT(g_volatile_pm_buf == NULL);
133 	snprintf(g_path, sizeof(g_path), "%s", path);
134 	*is_pmemp = 1;
135 
136 	if (g_persistent_pm_buf == NULL) {
137 		g_persistent_pm_buf = calloc(1, len);
138 		g_persistent_pm_buf_len = len;
139 		SPDK_CU_ASSERT_FATAL(g_persistent_pm_buf != NULL);
140 	}
141 
142 	*mapped_lenp = g_persistent_pm_buf_len;
143 	g_volatile_pm_buf = calloc(1, g_persistent_pm_buf_len);
144 	SPDK_CU_ASSERT_FATAL(g_volatile_pm_buf != NULL);
145 	memcpy(g_volatile_pm_buf, g_persistent_pm_buf, g_persistent_pm_buf_len);
146 	g_volatile_pm_buf_len = g_persistent_pm_buf_len;
147 
148 	return g_volatile_pm_buf;
149 }
150 
151 int
152 pmem_unmap(void *addr, size_t len)
153 {
154 	CU_ASSERT(addr == g_volatile_pm_buf);
155 	CU_ASSERT(len == g_volatile_pm_buf_len);
156 	free(g_volatile_pm_buf);
157 	g_volatile_pm_buf = NULL;
158 	g_volatile_pm_buf_len = 0;
159 
160 	return 0;
161 }
162 
163 static void
164 persistent_pm_buf_destroy(void)
165 {
166 	CU_ASSERT(g_persistent_pm_buf != NULL);
167 	free(g_persistent_pm_buf);
168 	g_persistent_pm_buf = NULL;
169 	g_persistent_pm_buf_len = 0;
170 }
171 
172 static void
173 unlink_cb(void)
174 {
175 	persistent_pm_buf_destroy();
176 }
177 
178 static void
179 init_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno)
180 {
181 	g_vol = vol;
182 	g_reduce_errno = reduce_errno;
183 }
184 
185 static void
186 load_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno)
187 {
188 	g_vol = vol;
189 	g_reduce_errno = reduce_errno;
190 }
191 
192 static void
193 unload_cb(void *cb_arg, int reduce_errno)
194 {
195 	g_reduce_errno = reduce_errno;
196 }
197 
198 static void
199 init_failure(void)
200 {
201 	struct spdk_reduce_vol_params params = {};
202 	struct spdk_reduce_backing_dev backing_dev = {};
203 
204 	backing_dev.blocklen = 512;
205 	/* This blockcnt is too small for a reduce vol - there needs to be
206 	 *  enough space for at least REDUCE_NUM_EXTRA_CHUNKS + 1 chunks.
207 	 */
208 	backing_dev.blockcnt = 20;
209 
210 	params.vol_size = 0;
211 	params.chunk_size = 16 * 1024;
212 	params.backing_io_unit_size = backing_dev.blocklen;
213 	params.logical_block_size = 512;
214 
215 	/* backing_dev has an invalid size.  This should fail. */
216 	g_vol = NULL;
217 	g_reduce_errno = 0;
218 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
219 	CU_ASSERT(g_reduce_errno == -EINVAL);
220 	SPDK_CU_ASSERT_FATAL(g_vol == NULL);
221 
222 	/* backing_dev now has valid size, but backing_dev still has null
223 	 *  function pointers.  This should fail.
224 	 */
225 	backing_dev.blockcnt = 20000;
226 
227 	g_vol = NULL;
228 	g_reduce_errno = 0;
229 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
230 	CU_ASSERT(g_reduce_errno == -EINVAL);
231 	SPDK_CU_ASSERT_FATAL(g_vol == NULL);
232 }
233 
234 static void
235 backing_dev_readv_execute(struct spdk_reduce_backing_dev *backing_dev,
236 			  struct iovec *iov, int iovcnt,
237 			  uint64_t lba, uint32_t lba_count,
238 			  struct spdk_reduce_vol_cb_args *args)
239 {
240 	char *offset;
241 	int i;
242 
243 	offset = g_backing_dev_buf + lba * backing_dev->blocklen;
244 	for (i = 0; i < iovcnt; i++) {
245 		memcpy(iov[i].iov_base, offset, iov[i].iov_len);
246 		offset += iov[i].iov_len;
247 	}
248 	args->cb_fn(args->cb_arg, 0);
249 }
250 
251 static void
252 backing_dev_insert_io(enum ut_reduce_bdev_io_type type, struct spdk_reduce_backing_dev *backing_dev,
253 		      struct iovec *iov, int iovcnt, uint64_t lba, uint32_t lba_count,
254 		      struct spdk_reduce_vol_cb_args *args)
255 {
256 	struct ut_reduce_bdev_io *ut_bdev_io;
257 
258 	ut_bdev_io = calloc(1, sizeof(*ut_bdev_io));
259 	SPDK_CU_ASSERT_FATAL(ut_bdev_io != NULL);
260 
261 	ut_bdev_io->type = type;
262 	ut_bdev_io->backing_dev = backing_dev;
263 	ut_bdev_io->iov = iov;
264 	ut_bdev_io->iovcnt = iovcnt;
265 	ut_bdev_io->lba = lba;
266 	ut_bdev_io->lba_count = lba_count;
267 	ut_bdev_io->args = args;
268 	TAILQ_INSERT_TAIL(&g_pending_bdev_io, ut_bdev_io, link);
269 	g_pending_bdev_io_count++;
270 }
271 
272 static void
273 backing_dev_readv(struct spdk_reduce_backing_dev *backing_dev, struct iovec *iov, int iovcnt,
274 		  uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
275 {
276 	if (g_defer_bdev_io == false) {
277 		CU_ASSERT(g_pending_bdev_io_count == 0);
278 		CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
279 		backing_dev_readv_execute(backing_dev, iov, iovcnt, lba, lba_count, args);
280 		return;
281 	}
282 
283 	backing_dev_insert_io(UT_REDUCE_IO_READV, backing_dev, iov, iovcnt, lba, lba_count, args);
284 }
285 
286 static void
287 backing_dev_writev_execute(struct spdk_reduce_backing_dev *backing_dev,
288 			   struct iovec *iov, int iovcnt,
289 			   uint64_t lba, uint32_t lba_count,
290 			   struct spdk_reduce_vol_cb_args *args)
291 {
292 	char *offset;
293 	int i;
294 
295 	offset = g_backing_dev_buf + lba * backing_dev->blocklen;
296 	for (i = 0; i < iovcnt; i++) {
297 		memcpy(offset, iov[i].iov_base, iov[i].iov_len);
298 		offset += iov[i].iov_len;
299 	}
300 	args->cb_fn(args->cb_arg, 0);
301 }
302 
303 static void
304 backing_dev_writev(struct spdk_reduce_backing_dev *backing_dev, struct iovec *iov, int iovcnt,
305 		   uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
306 {
307 	if (g_defer_bdev_io == false) {
308 		CU_ASSERT(g_pending_bdev_io_count == 0);
309 		CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
310 		backing_dev_writev_execute(backing_dev, iov, iovcnt, lba, lba_count, args);
311 		return;
312 	}
313 
314 	backing_dev_insert_io(UT_REDUCE_IO_WRITEV, backing_dev, iov, iovcnt, lba, lba_count, args);
315 }
316 
317 static void
318 backing_dev_unmap_execute(struct spdk_reduce_backing_dev *backing_dev,
319 			  uint64_t lba, uint32_t lba_count,
320 			  struct spdk_reduce_vol_cb_args *args)
321 {
322 	char *offset;
323 
324 	offset = g_backing_dev_buf + lba * backing_dev->blocklen;
325 	memset(offset, 0, lba_count * backing_dev->blocklen);
326 	args->cb_fn(args->cb_arg, 0);
327 }
328 
329 static void
330 backing_dev_unmap(struct spdk_reduce_backing_dev *backing_dev,
331 		  uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
332 {
333 	if (g_defer_bdev_io == false) {
334 		CU_ASSERT(g_pending_bdev_io_count == 0);
335 		CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
336 		backing_dev_unmap_execute(backing_dev, lba, lba_count, args);
337 		return;
338 	}
339 
340 	backing_dev_insert_io(UT_REDUCE_IO_UNMAP, backing_dev, NULL, 0, lba, lba_count, args);
341 }
342 
343 static void
344 backing_dev_io_execute(uint32_t count)
345 {
346 	struct ut_reduce_bdev_io *ut_bdev_io;
347 	uint32_t done = 0;
348 
349 	CU_ASSERT(g_defer_bdev_io == true);
350 	while (!TAILQ_EMPTY(&g_pending_bdev_io) && (count == 0 || done < count)) {
351 		ut_bdev_io = TAILQ_FIRST(&g_pending_bdev_io);
352 		TAILQ_REMOVE(&g_pending_bdev_io, ut_bdev_io, link);
353 		g_pending_bdev_io_count--;
354 		switch (ut_bdev_io->type) {
355 		case UT_REDUCE_IO_READV:
356 			backing_dev_readv_execute(ut_bdev_io->backing_dev,
357 						  ut_bdev_io->iov, ut_bdev_io->iovcnt,
358 						  ut_bdev_io->lba, ut_bdev_io->lba_count,
359 						  ut_bdev_io->args);
360 			break;
361 		case UT_REDUCE_IO_WRITEV:
362 			backing_dev_writev_execute(ut_bdev_io->backing_dev,
363 						   ut_bdev_io->iov, ut_bdev_io->iovcnt,
364 						   ut_bdev_io->lba, ut_bdev_io->lba_count,
365 						   ut_bdev_io->args);
366 			break;
367 		case UT_REDUCE_IO_UNMAP:
368 			backing_dev_unmap_execute(ut_bdev_io->backing_dev,
369 						  ut_bdev_io->lba, ut_bdev_io->lba_count,
370 						  ut_bdev_io->args);
371 			break;
372 		default:
373 			CU_ASSERT(false);
374 			break;
375 		}
376 		free(ut_bdev_io);
377 		done++;
378 	}
379 }
380 
381 static int
382 ut_compress(char *outbuf, uint32_t *compressed_len, char *inbuf, uint32_t inbuflen)
383 {
384 	uint32_t len = 0;
385 	uint8_t count;
386 	char last;
387 
388 	while (true) {
389 		if (inbuflen == 0) {
390 			*compressed_len = len;
391 			return 0;
392 		}
393 
394 		if (*compressed_len < (len + 2)) {
395 			return -ENOSPC;
396 		}
397 
398 		last = *inbuf;
399 		count = 1;
400 		inbuflen--;
401 		inbuf++;
402 
403 		while (inbuflen > 0 && *inbuf == last && count < UINT8_MAX) {
404 			count++;
405 			inbuflen--;
406 			inbuf++;
407 		}
408 
409 		outbuf[len] = count;
410 		outbuf[len + 1] = last;
411 		len += 2;
412 	}
413 }
414 
415 static int
416 ut_decompress(uint8_t *outbuf, uint32_t *compressed_len, uint8_t *inbuf, uint32_t inbuflen)
417 {
418 	uint32_t len = 0;
419 
420 	SPDK_CU_ASSERT_FATAL(inbuflen % 2 == 0);
421 
422 	while (true) {
423 		if (inbuflen == 0) {
424 			*compressed_len = len;
425 			return 0;
426 		}
427 
428 		if ((len + inbuf[0]) > *compressed_len) {
429 			return -ENOSPC;
430 		}
431 
432 		memset(outbuf, inbuf[1], inbuf[0]);
433 		outbuf += inbuf[0];
434 		len += inbuf[0];
435 		inbuflen -= 2;
436 		inbuf += 2;
437 	}
438 }
439 
440 static void
441 ut_build_data_buffer(uint8_t *data, uint32_t data_len, uint8_t init_val, uint32_t repeat)
442 {
443 	uint32_t _repeat = repeat;
444 
445 	SPDK_CU_ASSERT_FATAL(repeat > 0);
446 
447 	while (data_len > 0) {
448 		*data = init_val;
449 		data++;
450 		data_len--;
451 		_repeat--;
452 		if (_repeat == 0) {
453 			init_val++;
454 			_repeat = repeat;
455 		}
456 	}
457 }
458 
459 static void
460 backing_dev_compress(struct spdk_reduce_backing_dev *backing_dev,
461 		     struct iovec *src_iov, int src_iovcnt,
462 		     struct iovec *dst_iov, int dst_iovcnt,
463 		     struct spdk_reduce_vol_cb_args *args)
464 {
465 	uint32_t compressed_len;
466 	uint64_t total_length = 0;
467 	char *buf = g_decomp_buf;
468 	int rc, i;
469 
470 	CU_ASSERT(dst_iovcnt == 1);
471 
472 	for (i = 0; i < src_iovcnt; i++) {
473 		memcpy(buf, src_iov[i].iov_base, src_iov[i].iov_len);
474 		buf += src_iov[i].iov_len;
475 		total_length += src_iov[i].iov_len;
476 	}
477 
478 	compressed_len = dst_iov[0].iov_len;
479 	rc = ut_compress(dst_iov[0].iov_base, &compressed_len,
480 			 g_decomp_buf, total_length);
481 
482 	args->cb_fn(args->cb_arg, rc ? rc : (int)compressed_len);
483 }
484 
485 static void
486 backing_dev_decompress(struct spdk_reduce_backing_dev *backing_dev,
487 		       struct iovec *src_iov, int src_iovcnt,
488 		       struct iovec *dst_iov, int dst_iovcnt,
489 		       struct spdk_reduce_vol_cb_args *args)
490 {
491 	uint32_t decompressed_len = 0;
492 	char *buf = g_decomp_buf;
493 	int rc, i;
494 
495 	CU_ASSERT(src_iovcnt == 1);
496 
497 	for (i = 0; i < dst_iovcnt; i++) {
498 		decompressed_len += dst_iov[i].iov_len;
499 	}
500 
501 	rc = ut_decompress(g_decomp_buf, &decompressed_len,
502 			   src_iov[0].iov_base, src_iov[0].iov_len);
503 
504 	for (i = 0; i < dst_iovcnt; i++) {
505 		memcpy(dst_iov[i].iov_base, buf, dst_iov[i].iov_len);
506 		buf += dst_iov[i].iov_len;
507 	}
508 
509 	args->cb_fn(args->cb_arg, rc ? rc : (int)decompressed_len);
510 }
511 
512 static void
513 backing_dev_destroy(struct spdk_reduce_backing_dev *backing_dev)
514 {
515 	/* We don't free this during backing_dev_close so that we can test init/unload/load
516 	 *  scenarios.
517 	 */
518 	free(g_backing_dev_buf);
519 	free(g_decomp_buf);
520 	g_backing_dev_buf = NULL;
521 }
522 
523 static void
524 backing_dev_init(struct spdk_reduce_backing_dev *backing_dev, struct spdk_reduce_vol_params *params,
525 		 uint32_t backing_blocklen)
526 {
527 	int64_t size;
528 
529 	size = 4 * 1024 * 1024;
530 	backing_dev->blocklen = backing_blocklen;
531 	backing_dev->blockcnt = size / backing_dev->blocklen;
532 	backing_dev->readv = backing_dev_readv;
533 	backing_dev->writev = backing_dev_writev;
534 	backing_dev->unmap = backing_dev_unmap;
535 	backing_dev->compress = backing_dev_compress;
536 	backing_dev->decompress = backing_dev_decompress;
537 	backing_dev->sgl_in = true;
538 	backing_dev->sgl_out = true;
539 
540 	g_decomp_buf = calloc(1, params->chunk_size);
541 	SPDK_CU_ASSERT_FATAL(g_decomp_buf != NULL);
542 
543 	g_backing_dev_buf = calloc(1, size);
544 	SPDK_CU_ASSERT_FATAL(g_backing_dev_buf != NULL);
545 }
546 
547 static void
548 init_md(void)
549 {
550 	struct spdk_reduce_vol_params params = {};
551 	struct spdk_reduce_vol_params *persistent_params;
552 	struct spdk_reduce_backing_dev backing_dev = {};
553 	struct spdk_uuid uuid;
554 	uint64_t *entry;
555 
556 	params.chunk_size = 16 * 1024;
557 	params.backing_io_unit_size = 512;
558 	params.logical_block_size = 512;
559 
560 	backing_dev_init(&backing_dev, &params, 512);
561 
562 	g_vol = NULL;
563 	g_reduce_errno = -1;
564 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
565 	CU_ASSERT(g_reduce_errno == 0);
566 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
567 	/* Confirm that reduce persisted the params to metadata. */
568 	CU_ASSERT(memcmp(g_persistent_pm_buf, SPDK_REDUCE_SIGNATURE, 8) == 0);
569 	persistent_params = (struct spdk_reduce_vol_params *)(g_persistent_pm_buf + 8);
570 	CU_ASSERT(memcmp(persistent_params, &params, sizeof(params)) == 0);
571 	/* Now confirm that contents of pm_file after the superblock have been initialized
572 	 *  to REDUCE_EMPTY_MAP_ENTRY.
573 	 */
574 	entry = (uint64_t *)(g_persistent_pm_buf + sizeof(struct spdk_reduce_vol_superblock));
575 	while (entry != (uint64_t *)(g_persistent_pm_buf + g_vol->pm_file.size)) {
576 		CU_ASSERT(*entry == REDUCE_EMPTY_MAP_ENTRY);
577 		entry++;
578 	}
579 
580 	/* Check that the pm file path was constructed correctly.  It should be in
581 	 * the form:
582 	 * TEST_MD_PATH + "/" + <uuid string>
583 	 */
584 	CU_ASSERT(strncmp(&g_path[0], TEST_MD_PATH, strlen(TEST_MD_PATH)) == 0);
585 	CU_ASSERT(g_path[strlen(TEST_MD_PATH)] == '/');
586 	CU_ASSERT(spdk_uuid_parse(&uuid, &g_path[strlen(TEST_MD_PATH) + 1]) == 0);
587 	CU_ASSERT(spdk_uuid_compare(&uuid, spdk_reduce_vol_get_uuid(g_vol)) == 0);
588 
589 	g_reduce_errno = -1;
590 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
591 	CU_ASSERT(g_reduce_errno == 0);
592 	CU_ASSERT(g_volatile_pm_buf == NULL);
593 
594 	persistent_pm_buf_destroy();
595 	backing_dev_destroy(&backing_dev);
596 }
597 
598 static void
599 _init_backing_dev(uint32_t backing_blocklen)
600 {
601 	struct spdk_reduce_vol_params params = {};
602 	struct spdk_reduce_vol_params *persistent_params;
603 	struct spdk_reduce_backing_dev backing_dev = {};
604 
605 	params.chunk_size = 16 * 1024;
606 	params.backing_io_unit_size = 512;
607 	params.logical_block_size = 512;
608 	spdk_uuid_generate(&params.uuid);
609 
610 	backing_dev_init(&backing_dev, &params, backing_blocklen);
611 
612 	g_vol = NULL;
613 	memset(g_path, 0, sizeof(g_path));
614 	g_reduce_errno = -1;
615 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
616 	CU_ASSERT(g_reduce_errno == 0);
617 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
618 	CU_ASSERT(strncmp(TEST_MD_PATH, g_path, strlen(TEST_MD_PATH)) == 0);
619 	/* Confirm that libreduce persisted the params to the backing device. */
620 	CU_ASSERT(memcmp(g_backing_dev_buf, SPDK_REDUCE_SIGNATURE, 8) == 0);
621 	persistent_params = (struct spdk_reduce_vol_params *)(g_backing_dev_buf + 8);
622 	CU_ASSERT(memcmp(persistent_params, &params, sizeof(params)) == 0);
623 	/* Confirm that the path to the persistent memory metadata file was persisted to
624 	 *  the backing device.
625 	 */
626 	CU_ASSERT(strncmp(g_path,
627 			  g_backing_dev_buf + REDUCE_BACKING_DEV_PATH_OFFSET,
628 			  REDUCE_PATH_MAX) == 0);
629 
630 	g_reduce_errno = -1;
631 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
632 	CU_ASSERT(g_reduce_errno == 0);
633 
634 	persistent_pm_buf_destroy();
635 	backing_dev_destroy(&backing_dev);
636 }
637 
638 static void
639 init_backing_dev(void)
640 {
641 	_init_backing_dev(512);
642 	_init_backing_dev(4096);
643 }
644 
645 static void
646 _load(uint32_t backing_blocklen)
647 {
648 	struct spdk_reduce_vol_params params = {};
649 	struct spdk_reduce_backing_dev backing_dev = {};
650 	char pmem_file_path[REDUCE_PATH_MAX];
651 
652 	params.chunk_size = 16 * 1024;
653 	params.backing_io_unit_size = 512;
654 	params.logical_block_size = 512;
655 	spdk_uuid_generate(&params.uuid);
656 
657 	backing_dev_init(&backing_dev, &params, backing_blocklen);
658 
659 	g_vol = NULL;
660 	g_reduce_errno = -1;
661 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
662 	CU_ASSERT(g_reduce_errno == 0);
663 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
664 	CU_ASSERT(strncmp(TEST_MD_PATH, g_path, strlen(TEST_MD_PATH)) == 0);
665 	memcpy(pmem_file_path, g_path, sizeof(pmem_file_path));
666 
667 	g_reduce_errno = -1;
668 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
669 	CU_ASSERT(g_reduce_errno == 0);
670 
671 	g_vol = NULL;
672 	memset(g_path, 0, sizeof(g_path));
673 	g_reduce_errno = -1;
674 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
675 	CU_ASSERT(g_reduce_errno == 0);
676 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
677 	CU_ASSERT(strncmp(g_path, pmem_file_path, sizeof(pmem_file_path)) == 0);
678 	CU_ASSERT(g_vol->params.vol_size == params.vol_size);
679 	CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
680 	CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
681 
682 	g_reduce_errno = -1;
683 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
684 	CU_ASSERT(g_reduce_errno == 0);
685 
686 	persistent_pm_buf_destroy();
687 	backing_dev_destroy(&backing_dev);
688 }
689 
690 static void
691 load(void)
692 {
693 	_load(512);
694 	_load(4096);
695 }
696 
697 static uint64_t
698 _vol_get_chunk_map_index(struct spdk_reduce_vol *vol, uint64_t offset)
699 {
700 	uint64_t logical_map_index = offset / vol->logical_blocks_per_chunk;
701 
702 	return vol->pm_logical_map[logical_map_index];
703 }
704 
705 static void
706 write_cb(void *arg, int reduce_errno)
707 {
708 	g_reduce_errno = reduce_errno;
709 }
710 
711 static void
712 read_cb(void *arg, int reduce_errno)
713 {
714 	g_reduce_errno = reduce_errno;
715 }
716 
717 static void
718 _write_maps(uint32_t backing_blocklen)
719 {
720 	struct spdk_reduce_vol_params params = {};
721 	struct spdk_reduce_backing_dev backing_dev = {};
722 	struct iovec iov;
723 	const int bufsize = 16 * 1024; /* chunk size */
724 	char buf[bufsize];
725 	uint32_t num_lbas, i;
726 	uint64_t old_chunk0_map_index, new_chunk0_map_index;
727 	struct spdk_reduce_chunk_map *old_chunk0_map, *new_chunk0_map;
728 
729 	params.chunk_size = bufsize;
730 	params.backing_io_unit_size = 4096;
731 	params.logical_block_size = 512;
732 	num_lbas = bufsize / params.logical_block_size;
733 	spdk_uuid_generate(&params.uuid);
734 
735 	backing_dev_init(&backing_dev, &params, backing_blocklen);
736 
737 	g_vol = NULL;
738 	g_reduce_errno = -1;
739 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
740 	CU_ASSERT(g_reduce_errno == 0);
741 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
742 
743 	for (i = 0; i < g_vol->params.vol_size / g_vol->params.chunk_size; i++) {
744 		CU_ASSERT(_vol_get_chunk_map_index(g_vol, i) == REDUCE_EMPTY_MAP_ENTRY);
745 	}
746 
747 	ut_build_data_buffer(buf, bufsize, 0x00, 1);
748 	iov.iov_base = buf;
749 	iov.iov_len = bufsize;
750 	g_reduce_errno = -1;
751 	spdk_reduce_vol_writev(g_vol, &iov, 1, 0, num_lbas, write_cb, NULL);
752 	CU_ASSERT(g_reduce_errno == 0);
753 
754 	old_chunk0_map_index = _vol_get_chunk_map_index(g_vol, 0);
755 	CU_ASSERT(old_chunk0_map_index != REDUCE_EMPTY_MAP_ENTRY);
756 	CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, old_chunk0_map_index) == true);
757 
758 	old_chunk0_map = _reduce_vol_get_chunk_map(g_vol, old_chunk0_map_index);
759 	for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
760 		CU_ASSERT(old_chunk0_map->io_unit_index[i] != REDUCE_EMPTY_MAP_ENTRY);
761 		CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units,
762 					     old_chunk0_map->io_unit_index[i]) == true);
763 	}
764 
765 	g_reduce_errno = -1;
766 	spdk_reduce_vol_writev(g_vol, &iov, 1, 0, num_lbas, write_cb, NULL);
767 	CU_ASSERT(g_reduce_errno == 0);
768 
769 	new_chunk0_map_index = _vol_get_chunk_map_index(g_vol, 0);
770 	CU_ASSERT(new_chunk0_map_index != REDUCE_EMPTY_MAP_ENTRY);
771 	CU_ASSERT(new_chunk0_map_index != old_chunk0_map_index);
772 	CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, new_chunk0_map_index) == true);
773 	CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, old_chunk0_map_index) == false);
774 
775 	for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
776 		CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units,
777 					     old_chunk0_map->io_unit_index[i]) == false);
778 	}
779 
780 	new_chunk0_map = _reduce_vol_get_chunk_map(g_vol, new_chunk0_map_index);
781 	for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
782 		CU_ASSERT(new_chunk0_map->io_unit_index[i] != REDUCE_EMPTY_MAP_ENTRY);
783 		CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units,
784 					     new_chunk0_map->io_unit_index[i]) == true);
785 	}
786 
787 	g_reduce_errno = -1;
788 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
789 	CU_ASSERT(g_reduce_errno == 0);
790 
791 	g_vol = NULL;
792 	g_reduce_errno = -1;
793 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
794 	CU_ASSERT(g_reduce_errno == 0);
795 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
796 	CU_ASSERT(g_vol->params.vol_size == params.vol_size);
797 	CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
798 	CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
799 
800 	g_reduce_errno = -1;
801 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
802 	CU_ASSERT(g_reduce_errno == 0);
803 
804 	persistent_pm_buf_destroy();
805 	backing_dev_destroy(&backing_dev);
806 }
807 
808 static void
809 write_maps(void)
810 {
811 	_write_maps(512);
812 	_write_maps(4096);
813 }
814 
815 static void
816 _read_write(uint32_t backing_blocklen)
817 {
818 	struct spdk_reduce_vol_params params = {};
819 	struct spdk_reduce_backing_dev backing_dev = {};
820 	struct iovec iov;
821 	char buf[16 * 1024]; /* chunk size */
822 	char compare_buf[16 * 1024];
823 	uint32_t i;
824 
825 	params.chunk_size = 16 * 1024;
826 	params.backing_io_unit_size = 4096;
827 	params.logical_block_size = 512;
828 	spdk_uuid_generate(&params.uuid);
829 
830 	backing_dev_init(&backing_dev, &params, backing_blocklen);
831 
832 	g_vol = NULL;
833 	g_reduce_errno = -1;
834 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
835 	CU_ASSERT(g_reduce_errno == 0);
836 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
837 
838 	/* Write 0xAA to 2 512-byte logical blocks, starting at LBA 2. */
839 	memset(buf, 0xAA, 2 * params.logical_block_size);
840 	iov.iov_base = buf;
841 	iov.iov_len = 2 * params.logical_block_size;
842 	g_reduce_errno = -1;
843 	spdk_reduce_vol_writev(g_vol, &iov, 1, 2, 2, write_cb, NULL);
844 	CU_ASSERT(g_reduce_errno == 0);
845 
846 	memset(compare_buf, 0xAA, sizeof(compare_buf));
847 	for (i = 0; i < params.chunk_size / params.logical_block_size; i++) {
848 		memset(buf, 0xFF, params.logical_block_size);
849 		iov.iov_base = buf;
850 		iov.iov_len = params.logical_block_size;
851 		g_reduce_errno = -1;
852 		spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL);
853 		CU_ASSERT(g_reduce_errno == 0);
854 
855 		switch (i) {
856 		case 2:
857 		case 3:
858 			CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
859 			break;
860 		default:
861 			CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size));
862 			break;
863 		}
864 	}
865 
866 	g_reduce_errno = -1;
867 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
868 	CU_ASSERT(g_reduce_errno == 0);
869 
870 	/* Overwrite what we just wrote with 0xCC */
871 	g_vol = NULL;
872 	g_reduce_errno = -1;
873 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
874 	CU_ASSERT(g_reduce_errno == 0);
875 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
876 	CU_ASSERT(g_vol->params.vol_size == params.vol_size);
877 	CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
878 	CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
879 
880 	memset(buf, 0xCC, 2 * params.logical_block_size);
881 	iov.iov_base = buf;
882 	iov.iov_len = 2 * params.logical_block_size;
883 	g_reduce_errno = -1;
884 	spdk_reduce_vol_writev(g_vol, &iov, 1, 2, 2, write_cb, NULL);
885 	CU_ASSERT(g_reduce_errno == 0);
886 
887 	memset(compare_buf, 0xCC, sizeof(compare_buf));
888 	for (i = 0; i < params.chunk_size / params.logical_block_size; i++) {
889 		memset(buf, 0xFF, params.logical_block_size);
890 		iov.iov_base = buf;
891 		iov.iov_len = params.logical_block_size;
892 		g_reduce_errno = -1;
893 		spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL);
894 		CU_ASSERT(g_reduce_errno == 0);
895 
896 		switch (i) {
897 		case 2:
898 		case 3:
899 			CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
900 			break;
901 		default:
902 			CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size));
903 			break;
904 		}
905 	}
906 
907 	g_reduce_errno = -1;
908 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
909 	CU_ASSERT(g_reduce_errno == 0);
910 
911 	g_vol = NULL;
912 	g_reduce_errno = -1;
913 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
914 	CU_ASSERT(g_reduce_errno == 0);
915 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
916 	CU_ASSERT(g_vol->params.vol_size == params.vol_size);
917 	CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
918 	CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
919 
920 	g_reduce_errno = -1;
921 
922 	/* Write 0xBB to 2 512-byte logical blocks, starting at LBA 37.
923 	 * This is writing into the second chunk of the volume.  This also
924 	 * enables implicitly checking that we reloaded the bit arrays
925 	 * correctly - making sure we don't use the first chunk map again
926 	 * for this new write - the first chunk map was already used by the
927 	 * write from before we unloaded and reloaded.
928 	 */
929 	memset(buf, 0xBB, 2 * params.logical_block_size);
930 	iov.iov_base = buf;
931 	iov.iov_len = 2 * params.logical_block_size;
932 	g_reduce_errno = -1;
933 	spdk_reduce_vol_writev(g_vol, &iov, 1, 37, 2, write_cb, NULL);
934 	CU_ASSERT(g_reduce_errno == 0);
935 
936 	for (i = 0; i < 2 * params.chunk_size / params.logical_block_size; i++) {
937 		memset(buf, 0xFF, params.logical_block_size);
938 		iov.iov_base = buf;
939 		iov.iov_len = params.logical_block_size;
940 		g_reduce_errno = -1;
941 		spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL);
942 		CU_ASSERT(g_reduce_errno == 0);
943 
944 		switch (i) {
945 		case 2:
946 		case 3:
947 			memset(compare_buf, 0xCC, sizeof(compare_buf));
948 			CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
949 			break;
950 		case 37:
951 		case 38:
952 			memset(compare_buf, 0xBB, sizeof(compare_buf));
953 			CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
954 			break;
955 		default:
956 			CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size));
957 			break;
958 		}
959 	}
960 
961 	g_reduce_errno = -1;
962 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
963 	CU_ASSERT(g_reduce_errno == 0);
964 
965 	persistent_pm_buf_destroy();
966 	backing_dev_destroy(&backing_dev);
967 }
968 
969 static void
970 read_write(void)
971 {
972 	_read_write(512);
973 	_read_write(4096);
974 }
975 
976 static void
977 _readv_writev(uint32_t backing_blocklen)
978 {
979 	struct spdk_reduce_vol_params params = {};
980 	struct spdk_reduce_backing_dev backing_dev = {};
981 	struct iovec iov[REDUCE_MAX_IOVECS + 1];
982 
983 	params.chunk_size = 16 * 1024;
984 	params.backing_io_unit_size = 4096;
985 	params.logical_block_size = 512;
986 	spdk_uuid_generate(&params.uuid);
987 
988 	backing_dev_init(&backing_dev, &params, backing_blocklen);
989 
990 	g_vol = NULL;
991 	g_reduce_errno = -1;
992 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
993 	CU_ASSERT(g_reduce_errno == 0);
994 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
995 
996 	g_reduce_errno = -1;
997 	spdk_reduce_vol_writev(g_vol, iov, REDUCE_MAX_IOVECS + 1, 2, REDUCE_MAX_IOVECS + 1, write_cb, NULL);
998 	CU_ASSERT(g_reduce_errno == -EINVAL);
999 
1000 	g_reduce_errno = -1;
1001 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
1002 	CU_ASSERT(g_reduce_errno == 0);
1003 
1004 	persistent_pm_buf_destroy();
1005 	backing_dev_destroy(&backing_dev);
1006 }
1007 
1008 static void
1009 readv_writev(void)
1010 {
1011 	_readv_writev(512);
1012 	_readv_writev(4096);
1013 }
1014 
1015 static void
1016 destroy_cb(void *ctx, int reduce_errno)
1017 {
1018 	g_reduce_errno = reduce_errno;
1019 }
1020 
1021 static void
1022 destroy(void)
1023 {
1024 	struct spdk_reduce_vol_params params = {};
1025 	struct spdk_reduce_backing_dev backing_dev = {};
1026 
1027 	params.chunk_size = 16 * 1024;
1028 	params.backing_io_unit_size = 512;
1029 	params.logical_block_size = 512;
1030 	spdk_uuid_generate(&params.uuid);
1031 
1032 	backing_dev_init(&backing_dev, &params, 512);
1033 
1034 	g_vol = NULL;
1035 	g_reduce_errno = -1;
1036 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
1037 	CU_ASSERT(g_reduce_errno == 0);
1038 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
1039 
1040 	g_reduce_errno = -1;
1041 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
1042 	CU_ASSERT(g_reduce_errno == 0);
1043 
1044 	g_vol = NULL;
1045 	g_reduce_errno = -1;
1046 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
1047 	CU_ASSERT(g_reduce_errno == 0);
1048 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
1049 
1050 	g_reduce_errno = -1;
1051 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
1052 	CU_ASSERT(g_reduce_errno == 0);
1053 
1054 	g_reduce_errno = -1;
1055 	MOCK_CLEAR(spdk_malloc);
1056 	MOCK_CLEAR(spdk_zmalloc);
1057 	spdk_reduce_vol_destroy(&backing_dev, destroy_cb, NULL);
1058 	CU_ASSERT(g_reduce_errno == 0);
1059 
1060 	g_reduce_errno = 0;
1061 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
1062 	CU_ASSERT(g_reduce_errno == -EILSEQ);
1063 
1064 	backing_dev_destroy(&backing_dev);
1065 }
1066 
1067 /* This test primarily checks that the reduce unit test infrastructure for asynchronous
1068  * backing device I/O operations is working correctly.
1069  */
1070 static void
1071 defer_bdev_io(void)
1072 {
1073 	struct spdk_reduce_vol_params params = {};
1074 	struct spdk_reduce_backing_dev backing_dev = {};
1075 	const uint32_t logical_block_size = 512;
1076 	struct iovec iov;
1077 	char buf[logical_block_size];
1078 	char compare_buf[logical_block_size];
1079 
1080 	params.chunk_size = 16 * 1024;
1081 	params.backing_io_unit_size = 4096;
1082 	params.logical_block_size = logical_block_size;
1083 	spdk_uuid_generate(&params.uuid);
1084 
1085 	backing_dev_init(&backing_dev, &params, 512);
1086 
1087 	g_vol = NULL;
1088 	g_reduce_errno = -1;
1089 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
1090 	CU_ASSERT(g_reduce_errno == 0);
1091 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
1092 
1093 	/* Write 0xAA to 1 512-byte logical block. */
1094 	memset(buf, 0xAA, params.logical_block_size);
1095 	iov.iov_base = buf;
1096 	iov.iov_len = params.logical_block_size;
1097 	g_reduce_errno = -100;
1098 	g_defer_bdev_io = true;
1099 	spdk_reduce_vol_writev(g_vol, &iov, 1, 0, 1, write_cb, NULL);
1100 	/* Callback should not have executed, so this should still equal -100. */
1101 	CU_ASSERT(g_reduce_errno == -100);
1102 	CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io));
1103 	/* We wrote to just 512 bytes of one chunk which was previously unallocated.  This
1104 	 * should result in 1 pending I/O since the rest of this chunk will be zeroes and
1105 	 * very compressible.
1106 	 */
1107 	CU_ASSERT(g_pending_bdev_io_count == 1);
1108 
1109 	backing_dev_io_execute(0);
1110 	CU_ASSERT(TAILQ_EMPTY(&g_pending_bdev_io));
1111 	CU_ASSERT(g_reduce_errno == 0);
1112 
1113 	g_defer_bdev_io = false;
1114 	memset(compare_buf, 0xAA, sizeof(compare_buf));
1115 	memset(buf, 0xFF, sizeof(buf));
1116 	iov.iov_base = buf;
1117 	iov.iov_len = params.logical_block_size;
1118 	g_reduce_errno = -100;
1119 	spdk_reduce_vol_readv(g_vol, &iov, 1, 0, 1, read_cb, NULL);
1120 	CU_ASSERT(g_reduce_errno == 0);
1121 	CU_ASSERT(memcmp(buf, compare_buf, sizeof(buf)) == 0);
1122 
1123 	g_reduce_errno = -1;
1124 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
1125 	CU_ASSERT(g_reduce_errno == 0);
1126 
1127 	persistent_pm_buf_destroy();
1128 	backing_dev_destroy(&backing_dev);
1129 }
1130 
1131 static void
1132 overlapped(void)
1133 {
1134 	struct spdk_reduce_vol_params params = {};
1135 	struct spdk_reduce_backing_dev backing_dev = {};
1136 	const uint32_t logical_block_size = 512;
1137 	struct iovec iov;
1138 	char buf[2 * logical_block_size];
1139 	char compare_buf[2 * logical_block_size];
1140 
1141 	params.chunk_size = 16 * 1024;
1142 	params.backing_io_unit_size = 4096;
1143 	params.logical_block_size = logical_block_size;
1144 	spdk_uuid_generate(&params.uuid);
1145 
1146 	backing_dev_init(&backing_dev, &params, 512);
1147 
1148 	g_vol = NULL;
1149 	g_reduce_errno = -1;
1150 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
1151 	CU_ASSERT(g_reduce_errno == 0);
1152 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
1153 
1154 	/* Write 0xAA to 1 512-byte logical block. */
1155 	memset(buf, 0xAA, logical_block_size);
1156 	iov.iov_base = buf;
1157 	iov.iov_len = logical_block_size;
1158 	g_reduce_errno = -100;
1159 	g_defer_bdev_io = true;
1160 	spdk_reduce_vol_writev(g_vol, &iov, 1, 0, 1, write_cb, NULL);
1161 	/* Callback should not have executed, so this should still equal -100. */
1162 	CU_ASSERT(g_reduce_errno == -100);
1163 	CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io));
1164 	/* We wrote to just 512 bytes of one chunk which was previously unallocated.  This
1165 	 * should result in 1 pending I/O since the rest of this chunk will be zeroes and
1166 	 * very compressible.
1167 	 */
1168 	CU_ASSERT(g_pending_bdev_io_count == 1);
1169 
1170 	/* Now do an overlapped I/O to the same chunk. */
1171 	spdk_reduce_vol_writev(g_vol, &iov, 1, 1, 1, write_cb, NULL);
1172 	/* Callback should not have executed, so this should still equal -100. */
1173 	CU_ASSERT(g_reduce_errno == -100);
1174 	CU_ASSERT(!TAILQ_EMPTY(&g_pending_bdev_io));
1175 	/* The second I/O overlaps with the first one.  So we should only see pending bdev_io
1176 	 * related to the first I/O here - the second one won't start until the first one is completed.
1177 	 */
1178 	CU_ASSERT(g_pending_bdev_io_count == 1);
1179 
1180 	backing_dev_io_execute(0);
1181 	CU_ASSERT(g_reduce_errno == 0);
1182 
1183 	g_defer_bdev_io = false;
1184 	memset(compare_buf, 0xAA, sizeof(compare_buf));
1185 	memset(buf, 0xFF, sizeof(buf));
1186 	iov.iov_base = buf;
1187 	iov.iov_len = 2 * logical_block_size;
1188 	g_reduce_errno = -100;
1189 	spdk_reduce_vol_readv(g_vol, &iov, 1, 0, 2, read_cb, NULL);
1190 	CU_ASSERT(g_reduce_errno == 0);
1191 	CU_ASSERT(memcmp(buf, compare_buf, 2 * logical_block_size) == 0);
1192 
1193 	g_reduce_errno = -1;
1194 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
1195 	CU_ASSERT(g_reduce_errno == 0);
1196 
1197 	persistent_pm_buf_destroy();
1198 	backing_dev_destroy(&backing_dev);
1199 }
1200 
1201 #define BUFSIZE 4096
1202 
1203 static void
1204 compress_algorithm(void)
1205 {
1206 	uint8_t original_data[BUFSIZE];
1207 	uint8_t compressed_data[BUFSIZE];
1208 	uint8_t decompressed_data[BUFSIZE];
1209 	uint32_t compressed_len, decompressed_len;
1210 	int rc;
1211 
1212 	ut_build_data_buffer(original_data, BUFSIZE, 0xAA, BUFSIZE);
1213 	compressed_len = sizeof(compressed_data);
1214 	rc = ut_compress(compressed_data, &compressed_len, original_data, UINT8_MAX);
1215 	CU_ASSERT(rc == 0);
1216 	CU_ASSERT(compressed_len == 2);
1217 	CU_ASSERT(compressed_data[0] == UINT8_MAX);
1218 	CU_ASSERT(compressed_data[1] == 0xAA);
1219 
1220 	decompressed_len = sizeof(decompressed_data);
1221 	rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len);
1222 	CU_ASSERT(rc == 0);
1223 	CU_ASSERT(decompressed_len == UINT8_MAX);
1224 	CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0);
1225 
1226 	compressed_len = sizeof(compressed_data);
1227 	rc = ut_compress(compressed_data, &compressed_len, original_data, UINT8_MAX + 1);
1228 	CU_ASSERT(rc == 0);
1229 	CU_ASSERT(compressed_len == 4);
1230 	CU_ASSERT(compressed_data[0] == UINT8_MAX);
1231 	CU_ASSERT(compressed_data[1] == 0xAA);
1232 	CU_ASSERT(compressed_data[2] == 1);
1233 	CU_ASSERT(compressed_data[3] == 0xAA);
1234 
1235 	decompressed_len = sizeof(decompressed_data);
1236 	rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len);
1237 	CU_ASSERT(rc == 0);
1238 	CU_ASSERT(decompressed_len == UINT8_MAX + 1);
1239 	CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0);
1240 
1241 	ut_build_data_buffer(original_data, BUFSIZE, 0x00, 1);
1242 	compressed_len = sizeof(compressed_data);
1243 	rc = ut_compress(compressed_data, &compressed_len, original_data, 2048);
1244 	CU_ASSERT(rc == 0);
1245 	CU_ASSERT(compressed_len == 4096);
1246 	CU_ASSERT(compressed_data[0] == 1);
1247 	CU_ASSERT(compressed_data[1] == 0);
1248 	CU_ASSERT(compressed_data[4094] == 1);
1249 	CU_ASSERT(compressed_data[4095] == 0xFF);
1250 
1251 	decompressed_len = sizeof(decompressed_data);
1252 	rc = ut_decompress(decompressed_data, &decompressed_len, compressed_data, compressed_len);
1253 	CU_ASSERT(rc == 0);
1254 	CU_ASSERT(decompressed_len == 2048);
1255 	CU_ASSERT(memcmp(original_data, decompressed_data, decompressed_len) == 0);
1256 
1257 	compressed_len = sizeof(compressed_data);
1258 	rc = ut_compress(compressed_data, &compressed_len, original_data, 2049);
1259 	CU_ASSERT(rc == -ENOSPC);
1260 }
1261 
1262 static void
1263 test_prepare_compress_chunk(void)
1264 {
1265 	struct spdk_reduce_vol vol = {};
1266 	struct spdk_reduce_backing_dev backing_dev = {};
1267 	struct spdk_reduce_vol_request req = {};
1268 	void *buf;
1269 	char *buffer_end, *aligned_user_buffer, *unaligned_user_buffer;
1270 	char decomp_buffer[16 * 1024] = {};
1271 	char comp_buffer[16 * 1024] = {};
1272 	struct iovec user_iov[2] = {};
1273 	size_t user_buffer_iov_len = 8192;
1274 	size_t remainder_bytes;
1275 	size_t offset_bytes;
1276 	size_t memcmp_offset;
1277 	uint32_t i;
1278 
1279 	vol.params.chunk_size = 16 * 1024;
1280 	vol.params.backing_io_unit_size = 4096;
1281 	vol.params.logical_block_size = 512;
1282 	backing_dev_init(&backing_dev, &vol.params, 512);
1283 	vol.backing_dev = &backing_dev;
1284 	vol.logical_blocks_per_chunk = vol.params.chunk_size / vol.params.logical_block_size;
1285 
1286 	/* Allocate 1 extra byte to test a case when buffer crosses huge page boundary */
1287 	SPDK_CU_ASSERT_FATAL(posix_memalign(&buf, VALUE_2MB, VALUE_2MB + 1) == 0);
1288 	buffer_end = (char *)buf + VALUE_2MB + 1;
1289 	aligned_user_buffer = (char *)buf;
1290 	memset(aligned_user_buffer, 0xc, vol.params.chunk_size);
1291 	unaligned_user_buffer = buffer_end - vol.params.chunk_size;
1292 	memset(unaligned_user_buffer, 0xc, vol.params.chunk_size);
1293 
1294 	req.vol = &vol;
1295 	req.decomp_buf = decomp_buffer;
1296 	req.comp_buf = comp_buffer;
1297 	req.iov = user_iov;
1298 	req.iovcnt = 2;
1299 	req.offset = 0;
1300 
1301 	/* Part 1 - backing dev supports sgl_in */
1302 	/* Test 1 - user's buffers length equals to chunk_size */
1303 	for (i = 0; i < 2; i++) {
1304 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1305 		req.iov[i].iov_len = user_buffer_iov_len;
1306 	}
1307 
1308 	_prepare_compress_chunk(&req, false);
1309 	CU_ASSERT(req.decomp_iovcnt == 2);
1310 	for (i = 0; i < 2; i++) {
1311 		CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
1312 		CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
1313 	}
1314 
1315 	_prepare_compress_chunk(&req, true);
1316 	CU_ASSERT(req.decomp_iovcnt == 2);
1317 	for (i = 0; i < 2; i++) {
1318 		CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
1319 		CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
1320 	}
1321 
1322 	/* Test 2 - user's buffer less than chunk_size, without offset */
1323 	user_buffer_iov_len = 4096;
1324 	remainder_bytes = vol.params.chunk_size - user_buffer_iov_len * 2;
1325 	for (i = 0; i < 2; i++) {
1326 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1327 		req.iov[i].iov_len = user_buffer_iov_len;
1328 	}
1329 
1330 	_prepare_compress_chunk(&req, false);
1331 	CU_ASSERT(req.decomp_iovcnt == 3);
1332 	for (i = 0; i < 2; i++) {
1333 		CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
1334 		CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
1335 	}
1336 	CU_ASSERT(req.decomp_iov[i].iov_base == req.decomp_buf + user_buffer_iov_len * 2);
1337 	CU_ASSERT(req.decomp_iov[i].iov_len == remainder_bytes);
1338 
1339 	_prepare_compress_chunk(&req, true);
1340 	CU_ASSERT(req.decomp_iovcnt == 3);
1341 	for (i = 0; i < 2; i++) {
1342 		CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
1343 		CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
1344 	}
1345 	CU_ASSERT(req.decomp_iov[i].iov_base == g_zero_buf + user_buffer_iov_len * 2);
1346 	CU_ASSERT(req.decomp_iov[i].iov_len == remainder_bytes);
1347 
1348 	/* Test 3 - user's buffer less than chunk_size, non zero offset */
1349 	user_buffer_iov_len = 4096;
1350 	req.offset = 3;
1351 	offset_bytes = req.offset * vol.params.logical_block_size;
1352 	remainder_bytes = vol.params.chunk_size - offset_bytes - user_buffer_iov_len * 2;
1353 
1354 	_prepare_compress_chunk(&req, false);
1355 	CU_ASSERT(req.decomp_iovcnt == 4);
1356 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1357 	CU_ASSERT(req.decomp_iov[0].iov_len == offset_bytes);
1358 	for (i = 0; i < 2; i++) {
1359 		CU_ASSERT(req.decomp_iov[i + 1].iov_base == req.iov[i].iov_base);
1360 		CU_ASSERT(req.decomp_iov[i + 1].iov_len == req.iov[i].iov_len);
1361 	}
1362 	CU_ASSERT(req.decomp_iov[3].iov_base == req.decomp_buf + offset_bytes + user_buffer_iov_len * 2);
1363 	CU_ASSERT(req.decomp_iov[3].iov_len == remainder_bytes);
1364 
1365 	_prepare_compress_chunk(&req, true);
1366 	CU_ASSERT(req.decomp_iovcnt == 4);
1367 	CU_ASSERT(req.decomp_iov[0].iov_base == g_zero_buf);
1368 	CU_ASSERT(req.decomp_iov[0].iov_len == offset_bytes);
1369 	for (i = 0; i < 2; i++) {
1370 		CU_ASSERT(req.decomp_iov[i + 1].iov_base == req.iov[i].iov_base);
1371 		CU_ASSERT(req.decomp_iov[i + 1].iov_len == req.iov[i].iov_len);
1372 	}
1373 	CU_ASSERT(req.decomp_iov[3].iov_base == g_zero_buf + offset_bytes + user_buffer_iov_len * 2);
1374 	CU_ASSERT(req.decomp_iov[3].iov_len == remainder_bytes);
1375 
1376 	/* Part 2 - backing dev doesn't support sgl_in */
1377 	/* Test 1 - user's buffers length equals to chunk_size
1378 	 * user's buffers are copied */
1379 	vol.backing_dev->sgl_in = false;
1380 	req.offset = 0;
1381 	user_buffer_iov_len = 8192;
1382 	for (i = 0; i < 2; i++) {
1383 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1384 		req.iov[i].iov_len = user_buffer_iov_len;
1385 		memset(req.iov[i].iov_base, 0xb + i, req.iov[i].iov_len);
1386 	}
1387 
1388 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1389 
1390 	_prepare_compress_chunk(&req, false);
1391 	CU_ASSERT(req.decomp_iovcnt == 1);
1392 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1393 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1394 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base, req.iov[0].iov_base, req.iov[0].iov_len) == 0);
1395 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + req.iov[0].iov_len, req.iov[1].iov_base,
1396 			 req.iov[1].iov_len) == 0);
1397 
1398 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1399 
1400 	_prepare_compress_chunk(&req, true);
1401 	CU_ASSERT(req.decomp_iovcnt == 1);
1402 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1403 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1404 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base, req.iov[0].iov_base, req.iov[0].iov_len) == 0);
1405 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + req.iov[0].iov_len, req.iov[1].iov_base,
1406 			 req.iov[1].iov_len) == 0);
1407 
1408 	/* Test 2 - single user's buffer length equals to chunk_size, buffer is not aligned
1409 	* User's buffer is copied */
1410 	req.iov[0].iov_base = unaligned_user_buffer;
1411 	req.iov[0].iov_len = vol.params.chunk_size;
1412 	req.iovcnt = 1;
1413 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1414 
1415 	_prepare_compress_chunk(&req, false);
1416 	CU_ASSERT(req.decomp_iovcnt == 1);
1417 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1418 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1419 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base, req.iov[0].iov_base,
1420 			 req.iov[0].iov_len) == 0);
1421 
1422 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1423 
1424 	_prepare_compress_chunk(&req, true);
1425 	CU_ASSERT(req.decomp_iovcnt == 1);
1426 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1427 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1428 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base, req.iov[0].iov_base,
1429 			 req.iov[0].iov_len) == 0);
1430 
1431 	/* Test 3 - single user's buffer length equals to chunk_size
1432 	 * User's buffer is not copied */
1433 	req.iov[0].iov_base = aligned_user_buffer;
1434 	req.iov[0].iov_len = vol.params.chunk_size;
1435 	req.iovcnt = 1;
1436 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1437 
1438 	_prepare_compress_chunk(&req, false);
1439 	CU_ASSERT(req.decomp_iovcnt == 1);
1440 	CU_ASSERT(req.decomp_iov[0].iov_base == req.iov[0].iov_base);
1441 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1442 
1443 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1444 
1445 	_prepare_compress_chunk(&req, true);
1446 	CU_ASSERT(req.decomp_iovcnt == 1);
1447 	CU_ASSERT(req.decomp_iov[0].iov_base == req.iov[0].iov_base);
1448 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1449 
1450 	/* Test 4 - user's buffer less than chunk_size, without offset
1451 	 * User's buffers are copied */
1452 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1453 	user_buffer_iov_len = 4096;
1454 	req.iovcnt = 2;
1455 	remainder_bytes = vol.params.chunk_size - user_buffer_iov_len * 2;
1456 	for (i = 0; i < 2; i++) {
1457 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1458 		req.iov[i].iov_len = user_buffer_iov_len;
1459 	}
1460 
1461 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1462 
1463 	_prepare_compress_chunk(&req, false);
1464 	CU_ASSERT(req.decomp_iovcnt == 1);
1465 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1466 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1467 	memcmp_offset = 0;
1468 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[0].iov_base,
1469 			 req.iov[0].iov_len) == 0);
1470 	memcmp_offset += req.iov[0].iov_len;
1471 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[1].iov_base,
1472 			 req.iov[1].iov_len) == 0);
1473 	memcmp_offset += req.iov[0].iov_len;
1474 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.decomp_buf + memcmp_offset,
1475 			 remainder_bytes) == 0);
1476 
1477 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1478 
1479 	_prepare_compress_chunk(&req, true);
1480 	CU_ASSERT(req.decomp_iovcnt == 1);
1481 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1482 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1483 	memcmp_offset = 0;
1484 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[0].iov_base,
1485 			 req.iov[0].iov_len) == 0);
1486 	memcmp_offset += req.iov[0].iov_len;
1487 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[1].iov_base,
1488 			 req.iov[1].iov_len) == 0);
1489 	memcmp_offset += req.iov[0].iov_len;
1490 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, g_zero_buf + memcmp_offset,
1491 			 remainder_bytes) == 0);
1492 
1493 	/* Test 5 - user's buffer less than chunk_size, non zero offset
1494 	 * user's buffers are copied */
1495 	req.offset = 3;
1496 	offset_bytes = req.offset * vol.params.logical_block_size;
1497 	remainder_bytes = vol.params.chunk_size - offset_bytes - user_buffer_iov_len * 2;
1498 
1499 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1500 
1501 	_prepare_compress_chunk(&req, false);
1502 	CU_ASSERT(req.decomp_iovcnt == 1);
1503 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1504 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1505 	memcmp_offset = 0;
1506 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.decomp_buf, offset_bytes) == 0);
1507 	memcmp_offset += offset_bytes;
1508 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[0].iov_base,
1509 			 req.iov[0].iov_len) == 0);
1510 	memcmp_offset += req.iov[0].iov_len;
1511 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[1].iov_base,
1512 			 req.iov[1].iov_len) == 0);
1513 	memcmp_offset += req.iov[1].iov_len;
1514 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.decomp_buf + memcmp_offset,
1515 			 remainder_bytes) == 0);
1516 
1517 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1518 
1519 	_prepare_compress_chunk(&req, true);
1520 	CU_ASSERT(req.decomp_iovcnt == 1);
1521 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1522 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1523 	memcmp_offset = 0;
1524 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, g_zero_buf, offset_bytes) == 0);
1525 	memcmp_offset += offset_bytes;
1526 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[0].iov_base,
1527 			 req.iov[0].iov_len) == 0);
1528 	memcmp_offset += req.iov[0].iov_len;
1529 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[1].iov_base,
1530 			 req.iov[1].iov_len) == 0);
1531 	memcmp_offset += req.iov[1].iov_len;
1532 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, g_zero_buf + memcmp_offset,
1533 			 remainder_bytes) == 0);
1534 
1535 	free(buf);
1536 }
1537 
1538 static void _reduce_vol_op_complete(void *ctx, int reduce_errno)
1539 {
1540 	g_reduce_errno = reduce_errno;
1541 }
1542 
1543 static void
1544 dummy_backing_dev_decompress(struct spdk_reduce_backing_dev *backing_dev,
1545 			     struct iovec *src_iov, int src_iovcnt,
1546 			     struct iovec *dst_iov, int dst_iovcnt,
1547 			     struct spdk_reduce_vol_cb_args *args)
1548 {
1549 	args->cb_fn(args->cb_arg, g_decompressed_len);
1550 }
1551 static void test_reduce_decompress_chunk(void)
1552 {
1553 	struct spdk_reduce_vol vol = {};
1554 	struct spdk_reduce_backing_dev backing_dev = {};
1555 	struct spdk_reduce_vol_request req = {};
1556 	void *buf;
1557 	char *buffer_end, *aligned_user_buffer, *unaligned_user_buffer;
1558 	char decomp_buffer[16 * 1024] = {};
1559 	char comp_buffer[16 * 1024] = {};
1560 	struct iovec user_iov[2] = {};
1561 	struct iovec comp_buf_iov = {};
1562 	struct spdk_reduce_chunk_map chunk = {};
1563 	size_t user_buffer_iov_len = 8192;
1564 	size_t remainder_bytes;
1565 	size_t offset_bytes;
1566 	uint32_t i;
1567 
1568 	vol.params.chunk_size = 16 * 1024;
1569 	vol.params.backing_io_unit_size = 4096;
1570 	vol.params.logical_block_size = 512;
1571 	backing_dev_init(&backing_dev, &vol.params, 512);
1572 	backing_dev.decompress = dummy_backing_dev_decompress;
1573 	vol.backing_dev = &backing_dev;
1574 	vol.logical_blocks_per_chunk = vol.params.chunk_size / vol.params.logical_block_size;
1575 	TAILQ_INIT(&vol.executing_requests);
1576 	TAILQ_INIT(&vol.queued_requests);
1577 	TAILQ_INIT(&vol.free_requests);
1578 
1579 	/* Allocate 1 extra byte to test a case when buffer crosses huge page boundary */
1580 	SPDK_CU_ASSERT_FATAL(posix_memalign(&buf, VALUE_2MB, VALUE_2MB + 1) == 0);
1581 	buffer_end = (char *)buf + VALUE_2MB + 1;
1582 	aligned_user_buffer = (char *)buf;
1583 	unaligned_user_buffer = buffer_end - vol.params.chunk_size;
1584 
1585 	chunk.compressed_size = user_buffer_iov_len / 2;
1586 	req.chunk = &chunk;
1587 	req.vol = &vol;
1588 	req.decomp_buf = decomp_buffer;
1589 	req.comp_buf = comp_buffer;
1590 	req.comp_buf_iov = &comp_buf_iov;
1591 	req.iov = user_iov;
1592 	req.iovcnt = 2;
1593 	req.offset = 0;
1594 	req.cb_fn = _reduce_vol_op_complete;
1595 
1596 	/* Part 1 - backing dev supports sgl_out */
1597 	/* Test 1 - user's buffers length equals to chunk_size */
1598 	for (i = 0; i < 2; i++) {
1599 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1600 		req.iov[i].iov_len = user_buffer_iov_len;
1601 		memset(req.iov[i].iov_base, 0, req.iov[i].iov_len);
1602 	}
1603 	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
1604 	g_reduce_errno = -1;
1605 	g_decompressed_len = vol.params.chunk_size;
1606 
1607 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1608 	CU_ASSERT(g_reduce_errno == 0);
1609 	CU_ASSERT(req.copy_after_decompress == false);
1610 	CU_ASSERT(req.decomp_iovcnt == 2);
1611 	for (i = 0; i < 2; i++) {
1612 		CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
1613 		CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
1614 	}
1615 	CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
1616 	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
1617 
1618 	/* Test 2 - user's buffer less than chunk_size, without offset */
1619 	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
1620 	g_reduce_errno = -1;
1621 	user_buffer_iov_len = 4096;
1622 	for (i = 0; i < 2; i++) {
1623 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1624 		req.iov[i].iov_len = user_buffer_iov_len;
1625 		memset(req.iov[i].iov_base, 0, req.iov[i].iov_len);
1626 	}
1627 	remainder_bytes = vol.params.chunk_size - user_buffer_iov_len * 2;
1628 
1629 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1630 	CU_ASSERT(g_reduce_errno == 0);
1631 	CU_ASSERT(req.copy_after_decompress == false);
1632 	CU_ASSERT(req.decomp_iovcnt == 3);
1633 	for (i = 0; i < 2; i++) {
1634 		CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
1635 		CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
1636 	}
1637 	CU_ASSERT(req.decomp_iov[i].iov_base == req.decomp_buf + user_buffer_iov_len * 2);
1638 	CU_ASSERT(req.decomp_iov[i].iov_len == remainder_bytes);
1639 	CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
1640 	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
1641 
1642 	/* Test 3 - user's buffer less than chunk_size, non zero offset */
1643 	req.offset = 3;
1644 	offset_bytes = req.offset * vol.params.logical_block_size;
1645 	remainder_bytes = vol.params.chunk_size - offset_bytes - user_buffer_iov_len * 2;
1646 	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
1647 	g_reduce_errno = -1;
1648 
1649 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1650 	CU_ASSERT(g_reduce_errno == 0);
1651 	CU_ASSERT(req.copy_after_decompress == false);
1652 	CU_ASSERT(req.decomp_iovcnt == 4);
1653 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1654 	CU_ASSERT(req.decomp_iov[0].iov_len == offset_bytes);
1655 	for (i = 0; i < 2; i++) {
1656 		CU_ASSERT(req.decomp_iov[i + 1].iov_base == req.iov[i].iov_base);
1657 		CU_ASSERT(req.decomp_iov[i + 1].iov_len == req.iov[i].iov_len);
1658 	}
1659 	CU_ASSERT(req.decomp_iov[3].iov_base == req.decomp_buf + offset_bytes + user_buffer_iov_len * 2);
1660 	CU_ASSERT(req.decomp_iov[3].iov_len == remainder_bytes);
1661 	CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
1662 	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
1663 
1664 	/* Part 2 - backing dev doesn't support sgl_out */
1665 	/* Test 1 - user's buffers length equals to chunk_size
1666 	 * user's buffers are copied */
1667 	vol.backing_dev->sgl_out = false;
1668 	req.offset = 0;
1669 	user_buffer_iov_len = 8192;
1670 
1671 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1672 	for (i = 0; i < 2; i++) {
1673 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1674 		req.iov[i].iov_len = user_buffer_iov_len;
1675 		memset(req.iov[i].iov_base, 0xb + i, req.iov[i].iov_len);
1676 	}
1677 	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
1678 	g_reduce_errno = -1;
1679 
1680 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1681 	CU_ASSERT(g_reduce_errno == 0);
1682 	CU_ASSERT(req.copy_after_decompress == true);
1683 	CU_ASSERT(req.decomp_iovcnt == 1);
1684 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1685 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1686 	CU_ASSERT(memcmp(req.iov[0].iov_base, req.decomp_iov[0].iov_base, req.iov[0].iov_len) == 0);
1687 	CU_ASSERT(memcmp(req.iov[1].iov_base, req.decomp_iov[0].iov_base + req.iov[0].iov_len,
1688 			 req.iov[1].iov_len) == 0);
1689 	CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
1690 	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
1691 
1692 	/* Test 2 - single user's buffer length equals to chunk_size, buffer is not aligned
1693 	* User's buffer is copied */
1694 	memset(unaligned_user_buffer, 0xc, vol.params.chunk_size);
1695 	req.iov[0].iov_base = unaligned_user_buffer;
1696 	req.iov[0].iov_len = vol.params.chunk_size;
1697 	req.iovcnt = 1;
1698 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1699 	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
1700 	g_reduce_errno = -1;
1701 
1702 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1703 	CU_ASSERT(g_reduce_errno == 0);
1704 	CU_ASSERT(req.copy_after_decompress == true);
1705 	CU_ASSERT(req.decomp_iovcnt == 1);
1706 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1707 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1708 	CU_ASSERT(memcmp(req.iov[0].iov_base, req.decomp_iov[0].iov_base,
1709 			 req.iov[0].iov_len) == 0);
1710 
1711 	/* Test 3 - single user's buffer length equals to chunk_size
1712 	* User's buffer is not copied */
1713 	req.iov[0].iov_base = aligned_user_buffer;
1714 	req.iov[0].iov_len = vol.params.chunk_size;
1715 	req.iovcnt = 1;
1716 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1717 	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
1718 	g_reduce_errno = -1;
1719 
1720 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1721 	CU_ASSERT(g_reduce_errno == 0);
1722 	CU_ASSERT(req.copy_after_decompress == false);
1723 	CU_ASSERT(req.decomp_iovcnt == 1);
1724 	CU_ASSERT(req.decomp_iov[0].iov_base == req.iov[0].iov_base);
1725 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1726 
1727 	/* Test 4 - user's buffer less than chunk_size, without offset
1728 	 * User's buffers are copied */
1729 	user_buffer_iov_len = 4096;
1730 	req.iovcnt = 2;
1731 	remainder_bytes = vol.params.chunk_size - user_buffer_iov_len * 2;
1732 	for (i = 0; i < 2; i++) {
1733 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1734 		req.iov[i].iov_len = user_buffer_iov_len;
1735 		memset(req.iov[i].iov_base, 0xb + i, req.iov[i].iov_len);
1736 	}
1737 
1738 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1739 	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
1740 	g_reduce_errno = -1;
1741 
1742 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1743 	CU_ASSERT(g_reduce_errno == 0);
1744 	CU_ASSERT(req.copy_after_decompress == true);
1745 	CU_ASSERT(req.decomp_iovcnt == 1);
1746 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1747 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1748 	CU_ASSERT(memcmp(req.iov[0].iov_base, req.decomp_iov[0].iov_base,
1749 			 req.iov[0].iov_len) == 0);
1750 	CU_ASSERT(memcmp(req.iov[1].iov_base, req.decomp_iov[0].iov_base + req.iov[0].iov_len,
1751 			 req.iov[1].iov_len) == 0);
1752 	CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
1753 	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
1754 
1755 	/* Test 5 - user's buffer less than chunk_size, non zero offset
1756 	* user's buffers are copied */
1757 	req.offset = 3;
1758 	offset_bytes = req.offset * vol.params.logical_block_size;
1759 	remainder_bytes = vol.params.chunk_size - offset_bytes - user_buffer_iov_len * 2;
1760 
1761 	for (i = 0; i < 2; i++) {
1762 		req.iov[i].iov_base = aligned_user_buffer + i * user_buffer_iov_len;
1763 		req.iov[i].iov_len = user_buffer_iov_len;
1764 		memset(req.iov[i].iov_base, 0xb + i, req.iov[i].iov_len);
1765 	}
1766 
1767 	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
1768 	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
1769 	g_reduce_errno = -1;
1770 
1771 	_prepare_compress_chunk(&req, false);
1772 	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
1773 	CU_ASSERT(g_reduce_errno == 0);
1774 	CU_ASSERT(req.copy_after_decompress == true);
1775 	CU_ASSERT(req.decomp_iovcnt == 1);
1776 	CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
1777 	CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
1778 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + offset_bytes, req.iov[0].iov_base,
1779 			 req.iov[0].iov_len) == 0);
1780 	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + offset_bytes + req.iov[0].iov_len,
1781 			 req.iov[1].iov_base,
1782 			 req.iov[1].iov_len) == 0);
1783 	CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
1784 	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
1785 
1786 	free(buf);
1787 }
1788 
1789 static void test_allocate_vol_requests(void)
1790 {
1791 	struct spdk_reduce_vol *vol;
1792 	/* include chunk_sizes which are not power of 2 */
1793 	uint32_t chunk_sizes[] = {8192, 8320, 16384, 16416, 32768};
1794 	uint32_t io_unit_sizes[] = {512, 520, 4096, 4104, 4096};
1795 	uint32_t i;
1796 
1797 	for (i = 0; i < 4; i++) {
1798 		vol = calloc(1, sizeof(*vol));
1799 		SPDK_CU_ASSERT_FATAL(vol);
1800 
1801 		vol->params.chunk_size = chunk_sizes[i];
1802 		vol->params.logical_block_size = io_unit_sizes[i];
1803 		vol->params.backing_io_unit_size = io_unit_sizes[i];
1804 		vol->backing_io_units_per_chunk = vol->params.chunk_size / vol->params.backing_io_unit_size;
1805 		vol->logical_blocks_per_chunk = vol->params.chunk_size / vol->params.logical_block_size;
1806 
1807 		CU_ASSERT(_validate_vol_params(&vol->params) == 0);
1808 		CU_ASSERT(_allocate_vol_requests(vol) == 0);
1809 		_init_load_cleanup(vol, NULL);
1810 	}
1811 }
1812 
1813 int
1814 main(int argc, char **argv)
1815 {
1816 	CU_pSuite	suite = NULL;
1817 	unsigned int	num_failures;
1818 
1819 	CU_set_error_action(CUEA_ABORT);
1820 	CU_initialize_registry();
1821 
1822 	suite = CU_add_suite("reduce", NULL, NULL);
1823 
1824 	CU_ADD_TEST(suite, get_pm_file_size);
1825 	CU_ADD_TEST(suite, get_vol_size);
1826 	CU_ADD_TEST(suite, init_failure);
1827 	CU_ADD_TEST(suite, init_md);
1828 	CU_ADD_TEST(suite, init_backing_dev);
1829 	CU_ADD_TEST(suite, load);
1830 	CU_ADD_TEST(suite, write_maps);
1831 	CU_ADD_TEST(suite, read_write);
1832 	CU_ADD_TEST(suite, readv_writev);
1833 	CU_ADD_TEST(suite, destroy);
1834 	CU_ADD_TEST(suite, defer_bdev_io);
1835 	CU_ADD_TEST(suite, overlapped);
1836 	CU_ADD_TEST(suite, compress_algorithm);
1837 	CU_ADD_TEST(suite, test_prepare_compress_chunk);
1838 	CU_ADD_TEST(suite, test_reduce_decompress_chunk);
1839 	CU_ADD_TEST(suite, test_allocate_vol_requests);
1840 
1841 	g_unlink_path = g_path;
1842 	g_unlink_callback = unlink_cb;
1843 
1844 	CU_basic_set_mode(CU_BRM_VERBOSE);
1845 	CU_basic_run_tests();
1846 	num_failures = CU_get_number_of_failures();
1847 	CU_cleanup_registry();
1848 	return num_failures;
1849 }
1850