xref: /spdk/test/unit/lib/reduce/reduce.c/reduce_ut.c (revision 13a58c41ad5c01eefaf68f049b6c7da2b9d19426)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 
38 #include "reduce/reduce.c"
39 #include "spdk_internal/mock.h"
40 #include "common/lib/test_env.c"
41 
42 static struct spdk_reduce_vol *g_vol;
43 static int g_reduce_errno;
44 static char *g_volatile_pm_buf;
45 static size_t g_volatile_pm_buf_len;
46 static char *g_persistent_pm_buf;
47 static size_t g_persistent_pm_buf_len;
48 static char *g_backing_dev_buf;
49 static char g_path[REDUCE_PATH_MAX];
50 
51 #define TEST_MD_PATH "/tmp"
52 
53 static void
54 sync_pm_buf(const void *addr, size_t length)
55 {
56 	uint64_t offset = (char *)addr - g_volatile_pm_buf;
57 
58 	memcpy(&g_persistent_pm_buf[offset], addr, length);
59 }
60 
61 int
62 pmem_msync(const void *addr, size_t length)
63 {
64 	sync_pm_buf(addr, length);
65 	return 0;
66 }
67 
68 void
69 pmem_persist(const void *addr, size_t len)
70 {
71 	sync_pm_buf(addr, len);
72 }
73 
74 static void
75 get_pm_file_size(void)
76 {
77 	struct spdk_reduce_vol_params params;
78 	uint64_t pm_size, expected_pm_size;
79 
80 	params.backing_io_unit_size = 4096;
81 	params.chunk_size = 4096 * 4;
82 	params.vol_size = 4096 * 4 * 100;
83 
84 	pm_size = _get_pm_file_size(&params);
85 	expected_pm_size = sizeof(struct spdk_reduce_vol_superblock);
86 	/* 100 chunks in logical map * 8 bytes per chunk */
87 	expected_pm_size += 100 * sizeof(uint64_t);
88 	/* 100 chunks * 4 backing io units per chunk * 8 bytes per backing io unit */
89 	expected_pm_size += 100 * 4 * sizeof(uint64_t);
90 	/* reduce allocates some extra chunks too for in-flight writes when logical map
91 	 * is full.  REDUCE_EXTRA_CHUNKS is a private #ifdef in reduce.c.
92 	 */
93 	expected_pm_size += REDUCE_NUM_EXTRA_CHUNKS * 4 * sizeof(uint64_t);
94 	/* reduce will add some padding so numbers may not match exactly.  Make sure
95 	 * they are close though.
96 	 */
97 	CU_ASSERT((pm_size - expected_pm_size) < REDUCE_PM_SIZE_ALIGNMENT);
98 }
99 
100 static void
101 get_vol_size(void)
102 {
103 	uint64_t chunk_size, backing_dev_size;
104 
105 	chunk_size = 16 * 1024;
106 	backing_dev_size = 16 * 1024 * 1000;
107 	CU_ASSERT(_get_vol_size(chunk_size, backing_dev_size) < backing_dev_size);
108 }
109 
110 void *
111 pmem_map_file(const char *path, size_t len, int flags, mode_t mode,
112 	      size_t *mapped_lenp, int *is_pmemp)
113 {
114 	CU_ASSERT(g_volatile_pm_buf == NULL);
115 	snprintf(g_path, sizeof(g_path), "%s", path);
116 	*is_pmemp = 1;
117 
118 	if (g_persistent_pm_buf == NULL) {
119 		g_persistent_pm_buf = calloc(1, len);
120 		g_persistent_pm_buf_len = len;
121 		SPDK_CU_ASSERT_FATAL(g_persistent_pm_buf != NULL);
122 	}
123 
124 	*mapped_lenp = g_persistent_pm_buf_len;
125 	g_volatile_pm_buf = calloc(1, g_persistent_pm_buf_len);
126 	SPDK_CU_ASSERT_FATAL(g_volatile_pm_buf != NULL);
127 	memcpy(g_volatile_pm_buf, g_persistent_pm_buf, g_persistent_pm_buf_len);
128 	g_volatile_pm_buf_len = g_persistent_pm_buf_len;
129 
130 	return g_volatile_pm_buf;
131 }
132 
133 int
134 pmem_unmap(void *addr, size_t len)
135 {
136 	CU_ASSERT(addr == g_volatile_pm_buf);
137 	CU_ASSERT(len == g_volatile_pm_buf_len);
138 	free(g_volatile_pm_buf);
139 	g_volatile_pm_buf = NULL;
140 	g_volatile_pm_buf_len = 0;
141 
142 	return 0;
143 }
144 
145 static void
146 persistent_pm_buf_destroy(void)
147 {
148 	CU_ASSERT(g_persistent_pm_buf != NULL);
149 	free(g_persistent_pm_buf);
150 	g_persistent_pm_buf = NULL;
151 	g_persistent_pm_buf_len = 0;
152 }
153 
154 int __wrap_unlink(const char *path);
155 
156 int
157 __wrap_unlink(const char *path)
158 {
159 	if (strcmp(g_path, path) != 0) {
160 		return ENOENT;
161 	}
162 
163 	persistent_pm_buf_destroy();
164 	return 0;
165 }
166 
167 static void
168 init_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno)
169 {
170 	g_vol = vol;
171 	g_reduce_errno = reduce_errno;
172 }
173 
174 static void
175 load_cb(void *cb_arg, struct spdk_reduce_vol *vol, int reduce_errno)
176 {
177 	g_vol = vol;
178 	g_reduce_errno = reduce_errno;
179 }
180 
181 static void
182 unload_cb(void *cb_arg, int reduce_errno)
183 {
184 	g_reduce_errno = reduce_errno;
185 }
186 
187 static void
188 init_failure(void)
189 {
190 	struct spdk_reduce_vol_params params = {};
191 	struct spdk_reduce_backing_dev backing_dev = {};
192 
193 	backing_dev.blocklen = 512;
194 	/* This blockcnt is too small for a reduce vol - there needs to be
195 	 *  enough space for at least REDUCE_NUM_EXTRA_CHUNKS + 1 chunks.
196 	 */
197 	backing_dev.blockcnt = 20;
198 
199 	params.vol_size = 0;
200 	params.chunk_size = 16 * 1024;
201 	params.backing_io_unit_size = backing_dev.blocklen;
202 	params.logical_block_size = 512;
203 
204 	/* backing_dev has an invalid size.  This should fail. */
205 	g_vol = NULL;
206 	g_reduce_errno = 0;
207 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
208 	CU_ASSERT(g_reduce_errno == -EINVAL);
209 	SPDK_CU_ASSERT_FATAL(g_vol == NULL);
210 
211 	/* backing_dev now has valid size, but backing_dev still has null
212 	 *  function pointers.  This should fail.
213 	 */
214 	backing_dev.blockcnt = 20000;
215 
216 	g_vol = NULL;
217 	g_reduce_errno = 0;
218 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
219 	CU_ASSERT(g_reduce_errno == -EINVAL);
220 	SPDK_CU_ASSERT_FATAL(g_vol == NULL);
221 }
222 
223 static void
224 backing_dev_readv(struct spdk_reduce_backing_dev *backing_dev, struct iovec *iov, int iovcnt,
225 		  uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
226 {
227 	char *offset;
228 	int i;
229 
230 	offset = g_backing_dev_buf + lba * backing_dev->blocklen;
231 	for (i = 0; i < iovcnt; i++) {
232 		memcpy(iov[i].iov_base, offset, iov[i].iov_len);
233 		offset += iov[i].iov_len;
234 	}
235 	args->cb_fn(args->cb_arg, 0);
236 }
237 
238 static void
239 backing_dev_writev(struct spdk_reduce_backing_dev *backing_dev, struct iovec *iov, int iovcnt,
240 		   uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
241 {
242 	char *offset;
243 	int i;
244 
245 	offset = g_backing_dev_buf + lba * backing_dev->blocklen;
246 	for (i = 0; i < iovcnt; i++) {
247 		memcpy(offset, iov[i].iov_base, iov[i].iov_len);
248 		offset += iov[i].iov_len;
249 	}
250 	args->cb_fn(args->cb_arg, 0);
251 }
252 
253 static void
254 backing_dev_unmap(struct spdk_reduce_backing_dev *backing_dev,
255 		  uint64_t lba, uint32_t lba_count, struct spdk_reduce_vol_cb_args *args)
256 {
257 	char *offset;
258 
259 	offset = g_backing_dev_buf + lba * backing_dev->blocklen;
260 	memset(offset, 0, lba_count * backing_dev->blocklen);
261 	args->cb_fn(args->cb_arg, 0);
262 }
263 
264 static void
265 backing_dev_destroy(struct spdk_reduce_backing_dev *backing_dev)
266 {
267 	/* We don't free this during backing_dev_close so that we can test init/unload/load
268 	 *  scenarios.
269 	 */
270 	free(g_backing_dev_buf);
271 	g_backing_dev_buf = NULL;
272 }
273 
274 static void
275 backing_dev_init(struct spdk_reduce_backing_dev *backing_dev, struct spdk_reduce_vol_params *params,
276 		 uint32_t backing_blocklen)
277 {
278 	int64_t size;
279 
280 	size = 4 * 1024 * 1024;
281 	backing_dev->blocklen = backing_blocklen;
282 	backing_dev->blockcnt = size / backing_dev->blocklen;
283 	backing_dev->readv = backing_dev_readv;
284 	backing_dev->writev = backing_dev_writev;
285 	backing_dev->unmap = backing_dev_unmap;
286 
287 	g_backing_dev_buf = calloc(1, size);
288 	SPDK_CU_ASSERT_FATAL(g_backing_dev_buf != NULL);
289 }
290 
291 static void
292 init_md(void)
293 {
294 	struct spdk_reduce_vol_params params = {};
295 	struct spdk_reduce_vol_params *persistent_params;
296 	struct spdk_reduce_backing_dev backing_dev = {};
297 	struct spdk_uuid uuid;
298 	uint64_t *entry;
299 
300 	params.chunk_size = 16 * 1024;
301 	params.backing_io_unit_size = 512;
302 	params.logical_block_size = 512;
303 
304 	backing_dev_init(&backing_dev, &params, 512);
305 
306 	g_vol = NULL;
307 	g_reduce_errno = -1;
308 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
309 	CU_ASSERT(g_reduce_errno == 0);
310 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
311 	/* Confirm that reduce persisted the params to metadata. */
312 	CU_ASSERT(memcmp(g_persistent_pm_buf, SPDK_REDUCE_SIGNATURE, 8) == 0);
313 	persistent_params = (struct spdk_reduce_vol_params *)(g_persistent_pm_buf + 8);
314 	CU_ASSERT(memcmp(persistent_params, &params, sizeof(params)) == 0);
315 	/* Now confirm that contents of pm_file after the superblock have been initialized
316 	 *  to REDUCE_EMPTY_MAP_ENTRY.
317 	 */
318 	entry = (uint64_t *)(g_persistent_pm_buf + sizeof(struct spdk_reduce_vol_superblock));
319 	while (entry != (uint64_t *)(g_persistent_pm_buf + g_vol->pm_file.size)) {
320 		CU_ASSERT(*entry == REDUCE_EMPTY_MAP_ENTRY);
321 		entry++;
322 	}
323 
324 	/* Check that the pm file path was constructed correctly.  It should be in
325 	 * the form:
326 	 * TEST_MD_PATH + "/" + <uuid string>
327 	 */
328 	CU_ASSERT(strncmp(&g_path[0], TEST_MD_PATH, strlen(TEST_MD_PATH)) == 0);
329 	CU_ASSERT(g_path[strlen(TEST_MD_PATH)] == '/');
330 	CU_ASSERT(spdk_uuid_parse(&uuid, &g_path[strlen(TEST_MD_PATH) + 1]) == 0);
331 	CU_ASSERT(spdk_uuid_compare(&uuid, spdk_reduce_vol_get_uuid(g_vol)) == 0);
332 
333 	g_reduce_errno = -1;
334 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
335 	CU_ASSERT(g_reduce_errno == 0);
336 	CU_ASSERT(g_volatile_pm_buf == NULL);
337 
338 	persistent_pm_buf_destroy();
339 	backing_dev_destroy(&backing_dev);
340 }
341 
342 static void
343 _init_backing_dev(uint32_t backing_blocklen)
344 {
345 	struct spdk_reduce_vol_params params = {};
346 	struct spdk_reduce_vol_params *persistent_params;
347 	struct spdk_reduce_backing_dev backing_dev = {};
348 
349 	params.chunk_size = 16 * 1024;
350 	params.backing_io_unit_size = 512;
351 	params.logical_block_size = 512;
352 	spdk_uuid_generate(&params.uuid);
353 
354 	backing_dev_init(&backing_dev, &params, backing_blocklen);
355 
356 	g_vol = NULL;
357 	memset(g_path, 0, sizeof(g_path));
358 	g_reduce_errno = -1;
359 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
360 	CU_ASSERT(g_reduce_errno == 0);
361 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
362 	CU_ASSERT(strncmp(TEST_MD_PATH, g_path, strlen(TEST_MD_PATH)) == 0);
363 	/* Confirm that libreduce persisted the params to the backing device. */
364 	CU_ASSERT(memcmp(g_backing_dev_buf, SPDK_REDUCE_SIGNATURE, 8) == 0);
365 	persistent_params = (struct spdk_reduce_vol_params *)(g_backing_dev_buf + 8);
366 	CU_ASSERT(memcmp(persistent_params, &params, sizeof(params)) == 0);
367 	/* Confirm that the path to the persistent memory metadata file was persisted to
368 	 *  the backing device.
369 	 */
370 	CU_ASSERT(strncmp(g_path,
371 			  g_backing_dev_buf + REDUCE_BACKING_DEV_PATH_OFFSET,
372 			  REDUCE_PATH_MAX) == 0);
373 
374 	g_reduce_errno = -1;
375 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
376 	CU_ASSERT(g_reduce_errno == 0);
377 
378 	persistent_pm_buf_destroy();
379 	backing_dev_destroy(&backing_dev);
380 }
381 
382 static void
383 init_backing_dev(void)
384 {
385 	_init_backing_dev(512);
386 	_init_backing_dev(4096);
387 }
388 
389 static void
390 _load(uint32_t backing_blocklen)
391 {
392 	struct spdk_reduce_vol_params params = {};
393 	struct spdk_reduce_backing_dev backing_dev = {};
394 	char pmem_file_path[REDUCE_PATH_MAX];
395 
396 	params.chunk_size = 16 * 1024;
397 	params.backing_io_unit_size = 512;
398 	params.logical_block_size = 512;
399 	spdk_uuid_generate(&params.uuid);
400 
401 	backing_dev_init(&backing_dev, &params, backing_blocklen);
402 
403 	g_vol = NULL;
404 	g_reduce_errno = -1;
405 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
406 	CU_ASSERT(g_reduce_errno == 0);
407 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
408 	CU_ASSERT(strncmp(TEST_MD_PATH, g_path, strlen(TEST_MD_PATH)) == 0);
409 	memcpy(pmem_file_path, g_path, sizeof(pmem_file_path));
410 
411 	g_reduce_errno = -1;
412 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
413 	CU_ASSERT(g_reduce_errno == 0);
414 
415 	g_vol = NULL;
416 	memset(g_path, 0, sizeof(g_path));
417 	g_reduce_errno = -1;
418 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
419 	CU_ASSERT(g_reduce_errno == 0);
420 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
421 	CU_ASSERT(strncmp(g_path, pmem_file_path, sizeof(pmem_file_path)) == 0);
422 	CU_ASSERT(g_vol->params.vol_size == params.vol_size);
423 	CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
424 	CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
425 
426 	g_reduce_errno = -1;
427 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
428 	CU_ASSERT(g_reduce_errno == 0);
429 
430 	persistent_pm_buf_destroy();
431 	backing_dev_destroy(&backing_dev);
432 }
433 
434 static void
435 load(void)
436 {
437 	_load(512);
438 	_load(4096);
439 }
440 
441 static uint64_t
442 _vol_get_chunk_map_index(struct spdk_reduce_vol *vol, uint64_t offset)
443 {
444 	uint64_t logical_map_index = offset / vol->logical_blocks_per_chunk;
445 
446 	return vol->pm_logical_map[logical_map_index];
447 }
448 
449 static uint64_t *
450 _vol_get_chunk_map(struct spdk_reduce_vol *vol, uint64_t chunk_map_index)
451 {
452 	return &vol->pm_chunk_maps[chunk_map_index * vol->backing_io_units_per_chunk];
453 }
454 
455 static void
456 write_cb(void *arg, int reduce_errno)
457 {
458 	g_reduce_errno = reduce_errno;
459 }
460 
461 static void
462 read_cb(void *arg, int reduce_errno)
463 {
464 	g_reduce_errno = reduce_errno;
465 }
466 
467 static void
468 _write_maps(uint32_t backing_blocklen)
469 {
470 	struct spdk_reduce_vol_params params = {};
471 	struct spdk_reduce_backing_dev backing_dev = {};
472 	struct iovec iov;
473 	char buf[16 * 1024]; /* chunk size */
474 	uint32_t i;
475 	uint64_t old_chunk0_map_index, new_chunk0_map_index;
476 	uint64_t *old_chunk0_map, *new_chunk0_map;
477 
478 	params.chunk_size = 16 * 1024;
479 	params.backing_io_unit_size = 4096;
480 	params.logical_block_size = 512;
481 	spdk_uuid_generate(&params.uuid);
482 
483 	backing_dev_init(&backing_dev, &params, backing_blocklen);
484 
485 	g_vol = NULL;
486 	g_reduce_errno = -1;
487 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
488 	CU_ASSERT(g_reduce_errno == 0);
489 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
490 
491 	for (i = 0; i < g_vol->params.vol_size / g_vol->params.chunk_size; i++) {
492 		CU_ASSERT(_vol_get_chunk_map_index(g_vol, i) == REDUCE_EMPTY_MAP_ENTRY);
493 	}
494 
495 	iov.iov_base = buf;
496 	iov.iov_len = params.logical_block_size;
497 	g_reduce_errno = -1;
498 	spdk_reduce_vol_writev(g_vol, &iov, 1, 0, 1, write_cb, NULL);
499 	CU_ASSERT(g_reduce_errno == 0);
500 
501 	old_chunk0_map_index = _vol_get_chunk_map_index(g_vol, 0);
502 	CU_ASSERT(old_chunk0_map_index != REDUCE_EMPTY_MAP_ENTRY);
503 	CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, old_chunk0_map_index) == true);
504 
505 	old_chunk0_map = _vol_get_chunk_map(g_vol, old_chunk0_map_index);
506 	for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
507 		CU_ASSERT(old_chunk0_map[i] != REDUCE_EMPTY_MAP_ENTRY);
508 		CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units, old_chunk0_map[i]) == true);
509 	}
510 
511 	g_reduce_errno = -1;
512 	spdk_reduce_vol_writev(g_vol, &iov, 1, 0, 1, write_cb, NULL);
513 	CU_ASSERT(g_reduce_errno == 0);
514 
515 	new_chunk0_map_index = _vol_get_chunk_map_index(g_vol, 0);
516 	CU_ASSERT(new_chunk0_map_index != REDUCE_EMPTY_MAP_ENTRY);
517 	CU_ASSERT(new_chunk0_map_index != old_chunk0_map_index);
518 	CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, new_chunk0_map_index) == true);
519 	CU_ASSERT(spdk_bit_array_get(g_vol->allocated_chunk_maps, old_chunk0_map_index) == false);
520 
521 	for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
522 		CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units, old_chunk0_map[i]) == false);
523 	}
524 
525 	new_chunk0_map = _vol_get_chunk_map(g_vol, new_chunk0_map_index);
526 	for (i = 0; i < g_vol->backing_io_units_per_chunk; i++) {
527 		CU_ASSERT(new_chunk0_map[i] != REDUCE_EMPTY_MAP_ENTRY);
528 		CU_ASSERT(spdk_bit_array_get(g_vol->allocated_backing_io_units, new_chunk0_map[i]) == true);
529 	}
530 
531 	g_reduce_errno = -1;
532 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
533 	CU_ASSERT(g_reduce_errno == 0);
534 
535 	g_vol = NULL;
536 	g_reduce_errno = -1;
537 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
538 	CU_ASSERT(g_reduce_errno == 0);
539 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
540 	CU_ASSERT(g_vol->params.vol_size == params.vol_size);
541 	CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
542 	CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
543 
544 	g_reduce_errno = -1;
545 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
546 	CU_ASSERT(g_reduce_errno == 0);
547 
548 	persistent_pm_buf_destroy();
549 	backing_dev_destroy(&backing_dev);
550 }
551 
552 static void
553 write_maps(void)
554 {
555 	_write_maps(512);
556 	_write_maps(4096);
557 }
558 
559 static void
560 _read_write(uint32_t backing_blocklen)
561 {
562 	struct spdk_reduce_vol_params params = {};
563 	struct spdk_reduce_backing_dev backing_dev = {};
564 	struct iovec iov;
565 	char buf[16 * 1024]; /* chunk size */
566 	char compare_buf[16 * 1024];
567 	uint32_t i;
568 
569 	params.chunk_size = 16 * 1024;
570 	params.backing_io_unit_size = 4096;
571 	params.logical_block_size = 512;
572 	spdk_uuid_generate(&params.uuid);
573 
574 	backing_dev_init(&backing_dev, &params, backing_blocklen);
575 
576 	g_vol = NULL;
577 	g_reduce_errno = -1;
578 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
579 	CU_ASSERT(g_reduce_errno == 0);
580 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
581 
582 	/* Write 0xAA to 2 512-byte logical blocks, starting at LBA 2. */
583 	memset(buf, 0xAA, 2 * params.logical_block_size);
584 	iov.iov_base = buf;
585 	iov.iov_len = 2 * params.logical_block_size;
586 	g_reduce_errno = -1;
587 	spdk_reduce_vol_writev(g_vol, &iov, 1, 2, 2, write_cb, NULL);
588 	CU_ASSERT(g_reduce_errno == 0);
589 
590 	memset(compare_buf, 0xAA, sizeof(compare_buf));
591 	for (i = 0; i < params.chunk_size / params.logical_block_size; i++) {
592 		memset(buf, 0xFF, params.logical_block_size);
593 		iov.iov_base = buf;
594 		iov.iov_len = params.logical_block_size;
595 		g_reduce_errno = -1;
596 		spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL);
597 		CU_ASSERT(g_reduce_errno == 0);
598 
599 		switch (i) {
600 		case 2:
601 		case 3:
602 			CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
603 			break;
604 		default:
605 			CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size));
606 			break;
607 		}
608 	}
609 
610 	g_reduce_errno = -1;
611 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
612 	CU_ASSERT(g_reduce_errno == 0);
613 
614 	g_vol = NULL;
615 	g_reduce_errno = -1;
616 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
617 	CU_ASSERT(g_reduce_errno == 0);
618 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
619 	CU_ASSERT(g_vol->params.vol_size == params.vol_size);
620 	CU_ASSERT(g_vol->params.chunk_size == params.chunk_size);
621 	CU_ASSERT(g_vol->params.backing_io_unit_size == params.backing_io_unit_size);
622 
623 	g_reduce_errno = -1;
624 
625 	/* Write 0xBB to 2 512-byte logical blocks, starting at LBA 37.
626 	 * This is writing into the second chunk of the volume.  This also
627 	 * enables implicitly checking that we reloaded the bit arrays
628 	 * correctly - making sure we don't use the first chunk map again
629 	 * for this new write - the first chunk map was already used by the
630 	 * write from before we unloaded and reloaded.
631 	 */
632 	memset(buf, 0xBB, 2 * params.logical_block_size);
633 	iov.iov_base = buf;
634 	iov.iov_len = 2 * params.logical_block_size;
635 	g_reduce_errno = -1;
636 	spdk_reduce_vol_writev(g_vol, &iov, 1, 37, 2, write_cb, NULL);
637 	CU_ASSERT(g_reduce_errno == 0);
638 
639 	for (i = 0; i < 2 * params.chunk_size / params.logical_block_size; i++) {
640 		memset(buf, 0xFF, params.logical_block_size);
641 		iov.iov_base = buf;
642 		iov.iov_len = params.logical_block_size;
643 		g_reduce_errno = -1;
644 		spdk_reduce_vol_readv(g_vol, &iov, 1, i, 1, read_cb, NULL);
645 		CU_ASSERT(g_reduce_errno == 0);
646 
647 		switch (i) {
648 		case 2:
649 		case 3:
650 			memset(compare_buf, 0xAA, sizeof(compare_buf));
651 			CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
652 			break;
653 		case 37:
654 		case 38:
655 			memset(compare_buf, 0xBB, sizeof(compare_buf));
656 			CU_ASSERT(memcmp(buf, compare_buf, params.logical_block_size) == 0);
657 			break;
658 		default:
659 			CU_ASSERT(spdk_mem_all_zero(buf, params.logical_block_size));
660 			break;
661 		}
662 	}
663 
664 	g_reduce_errno = -1;
665 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
666 	CU_ASSERT(g_reduce_errno == 0);
667 
668 	persistent_pm_buf_destroy();
669 	backing_dev_destroy(&backing_dev);
670 }
671 
672 static void
673 read_write(void)
674 {
675 	_read_write(512);
676 	_read_write(4096);
677 }
678 
679 static void
680 destroy_cb(void *ctx, int reduce_errno)
681 {
682 	g_reduce_errno = reduce_errno;
683 }
684 
685 static void
686 destroy(void)
687 {
688 	struct spdk_reduce_vol_params params = {};
689 	struct spdk_reduce_backing_dev backing_dev = {};
690 
691 	params.chunk_size = 16 * 1024;
692 	params.backing_io_unit_size = 512;
693 	params.logical_block_size = 512;
694 	spdk_uuid_generate(&params.uuid);
695 
696 	backing_dev_init(&backing_dev, &params, 512);
697 
698 	g_vol = NULL;
699 	g_reduce_errno = -1;
700 	spdk_reduce_vol_init(&params, &backing_dev, TEST_MD_PATH, init_cb, NULL);
701 	CU_ASSERT(g_reduce_errno == 0);
702 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
703 
704 	g_reduce_errno = -1;
705 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
706 	CU_ASSERT(g_reduce_errno == 0);
707 
708 	g_vol = NULL;
709 	g_reduce_errno = -1;
710 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
711 	CU_ASSERT(g_reduce_errno == 0);
712 	SPDK_CU_ASSERT_FATAL(g_vol != NULL);
713 
714 	g_reduce_errno = -1;
715 	spdk_reduce_vol_unload(g_vol, unload_cb, NULL);
716 	CU_ASSERT(g_reduce_errno == 0);
717 
718 	g_reduce_errno = -1;
719 	MOCK_CLEAR(spdk_dma_zmalloc);
720 	MOCK_CLEAR(spdk_malloc);
721 	MOCK_CLEAR(spdk_zmalloc);
722 	spdk_reduce_vol_destroy(&backing_dev, destroy_cb, NULL);
723 	CU_ASSERT(g_reduce_errno == 0);
724 
725 	g_reduce_errno = 0;
726 	spdk_reduce_vol_load(&backing_dev, load_cb, NULL);
727 	CU_ASSERT(g_reduce_errno == -EILSEQ);
728 
729 	backing_dev_destroy(&backing_dev);
730 }
731 
732 int
733 main(int argc, char **argv)
734 {
735 	CU_pSuite	suite = NULL;
736 	unsigned int	num_failures;
737 
738 	if (CU_initialize_registry() != CUE_SUCCESS) {
739 		return CU_get_error();
740 	}
741 
742 	suite = CU_add_suite("reduce", NULL, NULL);
743 	if (suite == NULL) {
744 		CU_cleanup_registry();
745 		return CU_get_error();
746 	}
747 
748 	if (
749 		CU_add_test(suite, "get_pm_file_size", get_pm_file_size) == NULL ||
750 		CU_add_test(suite, "get_vol_size", get_vol_size) == NULL ||
751 		CU_add_test(suite, "init_failure", init_failure) == NULL ||
752 		CU_add_test(suite, "init_md", init_md) == NULL ||
753 		CU_add_test(suite, "init_backing_dev", init_backing_dev) == NULL ||
754 		CU_add_test(suite, "load", load) == NULL ||
755 		CU_add_test(suite, "write_maps", write_maps) == NULL ||
756 		CU_add_test(suite, "read_write", read_write) == NULL ||
757 		CU_add_test(suite, "destroy", destroy) == NULL
758 	) {
759 		CU_cleanup_registry();
760 		return CU_get_error();
761 	}
762 
763 	CU_basic_set_mode(CU_BRM_VERBOSE);
764 	CU_basic_run_tests();
765 	num_failures = CU_get_number_of_failures();
766 	CU_cleanup_registry();
767 	return num_failures;
768 }
769