xref: /spdk/test/unit/lib/bdev/compress.c/compress_ut.c (revision dc29e75b1c287e6ba6bcf207e0e01d06f489b1ae)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 /* We have our own mock for this */
36 #define UNIT_TEST_NO_VTOPHYS
37 #include "common/lib/test_env.c"
38 #include "spdk_internal/mock.h"
39 #include "unit/lib/json_mock.c"
40 #include "spdk/reduce.h"
41 
42 #include <rte_compressdev.h>
43 
44 /* There will be one if the data perfectly matches the chunk size,
45  * or there could be an offset into the data and a remainder after
46  * the data or both for a max of 3.
47  */
48 #define UT_MBUFS_PER_OP 3
49 /* For testing the crossing of a huge page boundary on address translation,
50  * we'll have an extra one but we only test on the source side.
51  */
52 #define UT_MBUFS_PER_OP_BOUND_TEST 4
53 
54 struct spdk_bdev_io *g_bdev_io;
55 struct spdk_io_channel *g_io_ch;
56 struct rte_comp_op g_comp_op[2];
57 struct vbdev_compress g_comp_bdev;
58 struct comp_device_qp g_device_qp;
59 struct compress_dev g_device;
60 struct rte_compressdev_capabilities g_cdev_cap;
61 static struct rte_mbuf *g_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST];
62 static struct rte_mbuf *g_dst_mbufs[UT_MBUFS_PER_OP];
63 static struct rte_mbuf g_expected_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST];
64 static struct rte_mbuf g_expected_dst_mbufs[UT_MBUFS_PER_OP];
65 struct comp_bdev_io *g_io_ctx;
66 struct comp_io_channel *g_comp_ch;
67 struct rte_config *g_test_config;
68 
69 /* Those functions are defined as static inline in DPDK, so we can't
70  * mock them straight away. We use defines to redirect them into
71  * our custom functions.
72  */
73 
74 static void mock_rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova,
75 		uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo);
76 #define rte_pktmbuf_attach_extbuf mock_rte_pktmbuf_attach_extbuf
77 static void mock_rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova,
78 		uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo)
79 {
80 	assert(m != NULL);
81 	m->buf_addr = buf_addr;
82 	m->buf_iova = buf_iova;
83 	m->buf_len = buf_len;
84 	m->data_len = m->pkt_len = 0;
85 }
86 
87 static char *mock_rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len);
88 #define rte_pktmbuf_append mock_rte_pktmbuf_append
89 static char *mock_rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
90 {
91 	m->pkt_len = m->pkt_len + len;
92 	return NULL;
93 }
94 
95 static inline int mock_rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail);
96 #define rte_pktmbuf_chain mock_rte_pktmbuf_chain
97 static inline int mock_rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
98 {
99 	struct rte_mbuf *cur_tail;
100 
101 	cur_tail = rte_pktmbuf_lastseg(head);
102 	cur_tail->next = tail;
103 
104 	return 0;
105 }
106 
107 uint16_t ut_max_nb_queue_pairs = 0;
108 void __rte_experimental mock_rte_compressdev_info_get(uint8_t dev_id,
109 		struct rte_compressdev_info *dev_info);
110 #define rte_compressdev_info_get mock_rte_compressdev_info_get
111 void __rte_experimental
112 mock_rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info)
113 {
114 	dev_info->max_nb_queue_pairs = ut_max_nb_queue_pairs;
115 	dev_info->capabilities = &g_cdev_cap;
116 	dev_info->driver_name = "compress_isal";
117 }
118 
119 int ut_rte_compressdev_configure = 0;
120 int __rte_experimental mock_rte_compressdev_configure(uint8_t dev_id,
121 		struct rte_compressdev_config *config);
122 #define rte_compressdev_configure mock_rte_compressdev_configure
123 int __rte_experimental
124 mock_rte_compressdev_configure(uint8_t dev_id, struct rte_compressdev_config *config)
125 {
126 	return ut_rte_compressdev_configure;
127 }
128 
129 int ut_rte_compressdev_queue_pair_setup = 0;
130 int __rte_experimental mock_rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
131 		uint32_t max_inflight_ops, int socket_id);
132 #define rte_compressdev_queue_pair_setup mock_rte_compressdev_queue_pair_setup
133 int __rte_experimental
134 mock_rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
135 				      uint32_t max_inflight_ops, int socket_id)
136 {
137 	return ut_rte_compressdev_queue_pair_setup;
138 }
139 
140 int ut_rte_compressdev_start = 0;
141 int __rte_experimental mock_rte_compressdev_start(uint8_t dev_id);
142 #define rte_compressdev_start mock_rte_compressdev_start
143 int __rte_experimental
144 mock_rte_compressdev_start(uint8_t dev_id)
145 {
146 	return ut_rte_compressdev_start;
147 }
148 
149 int ut_rte_compressdev_private_xform_create = 0;
150 int __rte_experimental mock_rte_compressdev_private_xform_create(uint8_t dev_id,
151 		const struct rte_comp_xform *xform, void **private_xform);
152 #define rte_compressdev_private_xform_create mock_rte_compressdev_private_xform_create
153 int __rte_experimental
154 mock_rte_compressdev_private_xform_create(uint8_t dev_id,
155 		const struct rte_comp_xform *xform, void **private_xform)
156 {
157 	return ut_rte_compressdev_private_xform_create;
158 }
159 
160 uint8_t ut_rte_compressdev_count = 0;
161 uint8_t __rte_experimental mock_rte_compressdev_count(void);
162 #define rte_compressdev_count mock_rte_compressdev_count
163 uint8_t __rte_experimental
164 mock_rte_compressdev_count(void)
165 {
166 	return ut_rte_compressdev_count;
167 }
168 
169 struct rte_mempool *ut_rte_comp_op_pool_create = NULL;
170 struct rte_mempool *__rte_experimental mock_rte_comp_op_pool_create(const char *name,
171 		unsigned int nb_elts, unsigned int cache_size, uint16_t user_size,
172 		int socket_id);
173 #define rte_comp_op_pool_create mock_rte_comp_op_pool_create
174 struct rte_mempool *__rte_experimental
175 mock_rte_comp_op_pool_create(const char *name, unsigned int nb_elts,
176 			     unsigned int cache_size, uint16_t user_size, int socket_id)
177 {
178 	return ut_rte_comp_op_pool_create;
179 }
180 
181 void mock_rte_pktmbuf_free(struct rte_mbuf *m);
182 #define rte_pktmbuf_free mock_rte_pktmbuf_free
183 void mock_rte_pktmbuf_free(struct rte_mbuf *m)
184 {
185 }
186 
187 static bool ut_boundary_alloc = false;
188 static int ut_rte_pktmbuf_alloc_bulk = 0;
189 int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
190 				unsigned count);
191 #define rte_pktmbuf_alloc_bulk mock_rte_pktmbuf_alloc_bulk
192 int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
193 				unsigned count)
194 {
195 	int i;
196 
197 	/* This mocked function only supports the alloc of up to 3 src and 3 dst. */
198 	ut_rte_pktmbuf_alloc_bulk += count;
199 
200 	if (ut_rte_pktmbuf_alloc_bulk == 1) {
201 		/* allocation of an extra mbuf for boundary cross test */
202 		ut_boundary_alloc = true;
203 		g_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST - 1]->next = NULL;
204 		*mbufs = g_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST - 1];
205 		ut_rte_pktmbuf_alloc_bulk = 0;
206 	} else if (ut_rte_pktmbuf_alloc_bulk == UT_MBUFS_PER_OP) {
207 		/* first test allocation, src mbufs */
208 		for (i = 0; i < UT_MBUFS_PER_OP; i++) {
209 			g_src_mbufs[i]->next = NULL;
210 			*mbufs++ = g_src_mbufs[i];
211 		}
212 	} else if (ut_rte_pktmbuf_alloc_bulk == UT_MBUFS_PER_OP * 2) {
213 		/* second test allocation, dst mbufs */
214 		for (i = 0; i < UT_MBUFS_PER_OP; i++) {
215 			g_dst_mbufs[i]->next = NULL;
216 			*mbufs++ = g_dst_mbufs[i];
217 		}
218 		ut_rte_pktmbuf_alloc_bulk = 0;
219 	} else {
220 		return -1;
221 	}
222 	return 0;
223 }
224 
225 struct rte_mempool *
226 rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size,
227 			uint16_t priv_size, uint16_t data_room_size, int socket_id)
228 {
229 	struct spdk_mempool *tmp;
230 
231 	tmp = spdk_mempool_create("mbuf_mp", 1024, sizeof(struct rte_mbuf),
232 				  SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
233 				  SPDK_ENV_SOCKET_ID_ANY);
234 
235 	return (struct rte_mempool *)tmp;
236 }
237 
238 void
239 rte_mempool_free(struct rte_mempool *mp)
240 {
241 	if (mp) {
242 		spdk_mempool_free((struct spdk_mempool *)mp);
243 	}
244 }
245 
246 static int ut_spdk_reduce_vol_op_complete_err = 0;
247 void
248 spdk_reduce_vol_writev(struct spdk_reduce_vol *vol, struct iovec *iov, int iovcnt,
249 		       uint64_t offset, uint64_t length, spdk_reduce_vol_op_complete cb_fn,
250 		       void *cb_arg)
251 {
252 	cb_fn(cb_arg, ut_spdk_reduce_vol_op_complete_err);
253 }
254 
255 void
256 spdk_reduce_vol_readv(struct spdk_reduce_vol *vol, struct iovec *iov, int iovcnt,
257 		      uint64_t offset, uint64_t length, spdk_reduce_vol_op_complete cb_fn,
258 		      void *cb_arg)
259 {
260 	cb_fn(cb_arg, ut_spdk_reduce_vol_op_complete_err);
261 }
262 
263 #include "bdev/compress/vbdev_compress.c"
264 
265 /* SPDK stubs */
266 DEFINE_STUB(spdk_bdev_get_aliases, const struct spdk_bdev_aliases_list *,
267 	    (const struct spdk_bdev *bdev), NULL);
268 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
269 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io));
270 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
271 		enum spdk_bdev_io_type io_type), 0);
272 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
273 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
274 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0);
275 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
276 DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn,
277 				     void *cb_arg));
278 DEFINE_STUB(spdk_bdev_open, int, (struct spdk_bdev *bdev, bool write,
279 				  spdk_bdev_remove_cb_t remove_cb,
280 				  void *remove_ctx, struct spdk_bdev_desc **_desc), 0);
281 DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
282 		struct spdk_bdev_module *module), 0);
283 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
284 DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *bdev), 0);
285 DEFINE_STUB(spdk_bdev_get_by_name, struct spdk_bdev *, (const char *bdev_name), NULL);
286 DEFINE_STUB(spdk_bdev_io_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_io *bdev_io),
287 	    0);
288 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
289 		struct spdk_bdev_io_wait_entry *entry), 0);
290 DEFINE_STUB_V(spdk_reduce_vol_unload, (struct spdk_reduce_vol *vol,
291 				       spdk_reduce_vol_op_complete cb_fn, void *cb_arg));
292 DEFINE_STUB_V(spdk_reduce_vol_load, (struct spdk_reduce_backing_dev *backing_dev,
293 				     spdk_reduce_vol_op_with_handle_complete cb_fn, void *cb_arg));
294 DEFINE_STUB(spdk_reduce_vol_get_params, const struct spdk_reduce_vol_params *,
295 	    (struct spdk_reduce_vol *vol), NULL);
296 
297 /* DPDK stubs */
298 DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
299 DEFINE_STUB(rte_eal_get_configuration, struct rte_config *, (void), NULL);
300 DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
301 DEFINE_STUB_V(rte_comp_op_free, (struct rte_comp_op *op));
302 DEFINE_STUB(rte_comp_op_alloc, struct rte_comp_op *, (struct rte_mempool *mempool), NULL);
303 
304 int g_small_size_counter = 0;
305 int g_small_size_modify = 0;
306 uint64_t g_small_size = 0;
307 uint64_t
308 spdk_vtophys(void *buf, uint64_t *size)
309 {
310 	g_small_size_counter++;
311 	if (g_small_size_counter == g_small_size_modify) {
312 		*size = g_small_size;
313 		g_small_size_counter = 0;
314 		g_small_size_modify = 0;
315 	}
316 	return (uint64_t)buf;
317 }
318 
319 void
320 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
321 {
322 	cb(g_io_ch, g_bdev_io, true);
323 }
324 
325 /* Mock these functions to call the callback and then return the value we require */
326 int ut_spdk_bdev_readv_blocks = 0;
327 int
328 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
329 		       struct iovec *iov, int iovcnt,
330 		       uint64_t offset_blocks, uint64_t num_blocks,
331 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
332 {
333 	cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg);
334 	return ut_spdk_bdev_readv_blocks;
335 }
336 
337 int ut_spdk_bdev_writev_blocks = 0;
338 bool ut_spdk_bdev_writev_blocks_mocked = false;
339 int
340 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
341 			struct iovec *iov, int iovcnt,
342 			uint64_t offset_blocks, uint64_t num_blocks,
343 			spdk_bdev_io_completion_cb cb, void *cb_arg)
344 {
345 	cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg);
346 	return ut_spdk_bdev_writev_blocks;
347 }
348 
349 int ut_spdk_bdev_unmap_blocks = 0;
350 bool ut_spdk_bdev_unmap_blocks_mocked = false;
351 int
352 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
353 		       uint64_t offset_blocks, uint64_t num_blocks,
354 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
355 {
356 	cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg);
357 	return ut_spdk_bdev_unmap_blocks;
358 }
359 
360 int ut_spdk_bdev_flush_blocks = 0;
361 bool ut_spdk_bdev_flush_blocks_mocked = false;
362 int
363 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
364 		       uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
365 		       void *cb_arg)
366 {
367 	cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg);
368 	return ut_spdk_bdev_flush_blocks;
369 }
370 
371 int ut_spdk_bdev_reset = 0;
372 bool ut_spdk_bdev_reset_mocked = false;
373 int
374 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
375 		spdk_bdev_io_completion_cb cb, void *cb_arg)
376 {
377 	cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg);
378 	return ut_spdk_bdev_reset;
379 }
380 
381 bool g_completion_called = false;
382 void
383 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
384 {
385 	bdev_io->internal.status = status;
386 	g_completion_called = true;
387 }
388 
389 static uint16_t ut_rte_compressdev_dequeue_burst = 0;
390 uint16_t
391 rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_comp_op **ops,
392 			      uint16_t nb_op)
393 {
394 	if (ut_rte_compressdev_dequeue_burst == 0) {
395 		return 0;
396 	}
397 
398 	ops[0] = &g_comp_op[0];
399 	ops[1] = &g_comp_op[1];
400 
401 	return ut_rte_compressdev_dequeue_burst;
402 }
403 
404 static int ut_compress_done[2];
405 /* done_count and done_idx together control which expected assertion
406  * value to use when dequeuing 2 operations.
407  */
408 static uint16_t done_count = 1;
409 static uint16_t done_idx = 0;
410 static void
411 _compress_done(void *_req, int reduce_errno)
412 {
413 	if (done_count == 1) {
414 		CU_ASSERT(reduce_errno == ut_compress_done[0]);
415 	} else if (done_count == 2) {
416 		CU_ASSERT(reduce_errno == ut_compress_done[done_idx++]);
417 	}
418 }
419 
420 static void
421 _get_mbuf_array(struct rte_mbuf *mbuf_array[UT_MBUFS_PER_OP_BOUND_TEST],
422 		struct rte_mbuf *mbuf_head, int mbuf_count, bool null_final)
423 {
424 	int i;
425 
426 	for (i = 0; i < mbuf_count; i++) {
427 		mbuf_array[i] = mbuf_head;
428 		if (mbuf_head) {
429 			mbuf_head = mbuf_head->next;
430 		}
431 	}
432 	if (null_final) {
433 		mbuf_array[i - 1] = NULL;
434 	}
435 }
436 
437 #define FAKE_ENQUEUE_SUCCESS 255
438 #define FAKE_ENQUEUE_ERROR 128
439 #define FAKE_ENQUEUE_BUSY 64
440 static uint16_t ut_enqueue_value = FAKE_ENQUEUE_SUCCESS;
441 static struct rte_comp_op ut_expected_op;
442 uint16_t
443 rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_comp_op **ops,
444 			      uint16_t nb_ops)
445 {
446 	struct rte_comp_op *op = *ops;
447 	struct rte_mbuf *op_mbuf[UT_MBUFS_PER_OP_BOUND_TEST];
448 	struct rte_mbuf *exp_mbuf[UT_MBUFS_PER_OP_BOUND_TEST];
449 	int i, num_src_mbufs = UT_MBUFS_PER_OP;
450 
451 	switch (ut_enqueue_value) {
452 	case FAKE_ENQUEUE_BUSY:
453 		op->status = RTE_COMP_OP_STATUS_NOT_PROCESSED;
454 		return 0;
455 		break;
456 	case FAKE_ENQUEUE_SUCCESS:
457 		op->status = RTE_COMP_OP_STATUS_SUCCESS;
458 		return 1;
459 		break;
460 	case FAKE_ENQUEUE_ERROR:
461 		op->status = RTE_COMP_OP_STATUS_ERROR;
462 		return 0;
463 		break;
464 	default:
465 		break;
466 	}
467 
468 	/* by design the compress module will never send more than 1 op at a time */
469 	CU_ASSERT(op->private_xform == ut_expected_op.private_xform);
470 
471 	/* setup our local pointers to the chained mbufs, those pointed to in the
472 	 * operation struct and the expected values.
473 	 */
474 	_get_mbuf_array(op_mbuf, op->m_src, SPDK_COUNTOF(op_mbuf), true);
475 	_get_mbuf_array(exp_mbuf, ut_expected_op.m_src, SPDK_COUNTOF(exp_mbuf), true);
476 
477 	if (ut_boundary_alloc == true) {
478 		/* if we crossed a boundary, we need to check the 4th src mbuf and
479 		 * reset the global that is used to identify whether we crossed
480 		 * or not
481 		 */
482 		num_src_mbufs = UT_MBUFS_PER_OP_BOUND_TEST;
483 		exp_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] = ut_expected_op.m_src->next->next->next;
484 		op_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] = op->m_src->next->next->next;
485 		ut_boundary_alloc = false;
486 	}
487 
488 
489 	for (i = 0; i < num_src_mbufs; i++) {
490 		CU_ASSERT(op_mbuf[i]->buf_addr == exp_mbuf[i]->buf_addr);
491 		CU_ASSERT(op_mbuf[i]->buf_iova == exp_mbuf[i]->buf_iova);
492 		CU_ASSERT(op_mbuf[i]->buf_len == exp_mbuf[i]->buf_len);
493 		CU_ASSERT(op_mbuf[i]->pkt_len == exp_mbuf[i]->pkt_len);
494 	}
495 
496 	/* if only 3 mbufs were used in the test, the 4th should be zeroed */
497 	if (num_src_mbufs == UT_MBUFS_PER_OP) {
498 		CU_ASSERT(op_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] == NULL);
499 		CU_ASSERT(exp_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] == NULL);
500 	}
501 
502 	CU_ASSERT(op->m_src->userdata == ut_expected_op.m_src->userdata);
503 	CU_ASSERT(op->src.offset == ut_expected_op.src.offset);
504 	CU_ASSERT(op->src.length == ut_expected_op.src.length);
505 
506 	/* check dst mbuf values */
507 	_get_mbuf_array(op_mbuf, op->m_dst, SPDK_COUNTOF(op_mbuf), true);
508 	_get_mbuf_array(exp_mbuf, ut_expected_op.m_dst, SPDK_COUNTOF(exp_mbuf), true);
509 
510 	for (i = 0; i < UT_MBUFS_PER_OP; i++) {
511 		CU_ASSERT(op_mbuf[i]->buf_addr == exp_mbuf[i]->buf_addr);
512 		CU_ASSERT(op_mbuf[i]->buf_iova == exp_mbuf[i]->buf_iova);
513 		CU_ASSERT(op_mbuf[i]->buf_len == exp_mbuf[i]->buf_len);
514 		CU_ASSERT(op_mbuf[i]->pkt_len == exp_mbuf[i]->pkt_len);
515 	}
516 	CU_ASSERT(op->dst.offset == ut_expected_op.dst.offset);
517 
518 	return ut_enqueue_value;
519 }
520 
521 /* Global setup for all tests that share a bunch of preparation... */
522 static int
523 test_setup(void)
524 {
525 	int i;
526 
527 	g_comp_bdev.backing_dev.unmap = _comp_reduce_unmap;
528 	g_comp_bdev.backing_dev.readv = _comp_reduce_readv;
529 	g_comp_bdev.backing_dev.writev = _comp_reduce_writev;
530 	g_comp_bdev.backing_dev.compress = _comp_reduce_compress;
531 	g_comp_bdev.backing_dev.decompress = _comp_reduce_decompress;
532 	g_comp_bdev.backing_dev.blocklen = 512;
533 	g_comp_bdev.backing_dev.blockcnt = 1024 * 16;
534 
535 	g_comp_bdev.device_qp = &g_device_qp;
536 	g_comp_bdev.device_qp->device = &g_device;
537 
538 	TAILQ_INIT(&g_comp_bdev.queued_comp_ops);
539 
540 	g_comp_xform = (struct rte_comp_xform) {
541 		.type = RTE_COMP_COMPRESS,
542 		.compress = {
543 			.algo = RTE_COMP_ALGO_DEFLATE,
544 			.deflate.huffman = RTE_COMP_HUFFMAN_DEFAULT,
545 			.level = RTE_COMP_LEVEL_MAX,
546 			.window_size = DEFAULT_WINDOW_SIZE,
547 			.chksum = RTE_COMP_CHECKSUM_NONE,
548 			.hash_algo = RTE_COMP_HASH_ALGO_NONE
549 		}
550 	};
551 
552 	g_decomp_xform = (struct rte_comp_xform) {
553 		.type = RTE_COMP_DECOMPRESS,
554 		.decompress = {
555 			.algo = RTE_COMP_ALGO_DEFLATE,
556 			.chksum = RTE_COMP_CHECKSUM_NONE,
557 			.window_size = DEFAULT_WINDOW_SIZE,
558 			.hash_algo = RTE_COMP_HASH_ALGO_NONE
559 		}
560 	};
561 	g_device.comp_xform = &g_comp_xform;
562 	g_device.decomp_xform = &g_decomp_xform;
563 	g_cdev_cap.comp_feature_flags = RTE_COMP_FF_SHAREABLE_PRIV_XFORM;
564 	g_device.cdev_info.driver_name = "compress_isal";
565 	g_device.cdev_info.capabilities = &g_cdev_cap;
566 	for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST; i++) {
567 		g_src_mbufs[i] = calloc(1, sizeof(struct rte_mbuf));
568 	}
569 	for (i = 0; i < UT_MBUFS_PER_OP; i++) {
570 		g_dst_mbufs[i] = calloc(1, sizeof(struct rte_mbuf));
571 	}
572 
573 	g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct comp_bdev_io));
574 	g_bdev_io->u.bdev.iovs = calloc(128, sizeof(struct iovec));
575 	g_bdev_io->bdev = &g_comp_bdev.comp_bdev;
576 	g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct comp_io_channel));
577 	g_comp_ch = (struct comp_io_channel *)((uint8_t *)g_io_ch + sizeof(struct spdk_io_channel));
578 	g_io_ctx = (struct comp_bdev_io *)g_bdev_io->driver_ctx;
579 
580 	g_io_ctx->comp_ch = g_comp_ch;
581 	g_io_ctx->comp_bdev = &g_comp_bdev;
582 	g_comp_bdev.device_qp = &g_device_qp;
583 
584 	g_test_config = calloc(1, sizeof(struct rte_config));
585 	g_test_config->lcore_count = 1;
586 
587 	for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST - 1; i++) {
588 		g_expected_src_mbufs[i].next = &g_expected_src_mbufs[i + 1];
589 	}
590 	g_expected_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST - 1].next = NULL;
591 
592 	/* we only test w/4 mbufs on src side */
593 	for (i = 0; i < UT_MBUFS_PER_OP - 1; i++) {
594 		g_expected_dst_mbufs[i].next = &g_expected_dst_mbufs[i + 1];
595 	}
596 	g_expected_dst_mbufs[UT_MBUFS_PER_OP - 1].next = NULL;
597 
598 	return 0;
599 }
600 
601 /* Global teardown for all tests */
602 static int
603 test_cleanup(void)
604 {
605 	int i;
606 
607 	for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST; i++) {
608 		free(g_src_mbufs[i]);
609 	}
610 	for (i = 0; i < UT_MBUFS_PER_OP; i++) {
611 		free(g_dst_mbufs[i]);
612 	}
613 	free(g_bdev_io->u.bdev.iovs);
614 	free(g_bdev_io);
615 	free(g_io_ch);
616 	free(g_test_config);
617 	return 0;
618 }
619 
620 static void
621 test_compress_operation(void)
622 {
623 	struct iovec src_iovs[3] = {};
624 	int src_iovcnt;
625 	struct iovec dst_iovs[3] = {};
626 	int dst_iovcnt;
627 	struct spdk_reduce_vol_cb_args cb_arg;
628 	int rc, i;
629 	struct vbdev_comp_op *op;
630 	struct rte_mbuf *exp_src_mbuf[UT_MBUFS_PER_OP];
631 	struct rte_mbuf *exp_dst_mbuf[UT_MBUFS_PER_OP];
632 
633 	src_iovcnt = dst_iovcnt = 3;
634 	for (i = 0; i < dst_iovcnt; i++) {
635 		src_iovs[i].iov_len = 0x1000;
636 		dst_iovs[i].iov_len = 0x1000;
637 		src_iovs[i].iov_base = (void *)0x10000000 + 0x1000 * i;
638 		dst_iovs[i].iov_base = (void *)0x20000000 + 0x1000 * i;
639 	}
640 
641 	/* test rte_comp_op_alloc failure */
642 	MOCK_SET(rte_comp_op_alloc, NULL);
643 	CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
644 	rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
645 				 &dst_iovs[0], dst_iovcnt, true, &cb_arg);
646 	CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == false);
647 	while (!TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops)) {
648 		op = TAILQ_FIRST(&g_comp_bdev.queued_comp_ops);
649 		TAILQ_REMOVE(&g_comp_bdev.queued_comp_ops, op, link);
650 		free(op);
651 	}
652 	CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
653 	CU_ASSERT(rc == 0);
654 	MOCK_SET(rte_comp_op_alloc, &g_comp_op[0]);
655 
656 	/* test mempool get failure */
657 	ut_rte_pktmbuf_alloc_bulk = -1;
658 	CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
659 	rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
660 				 &dst_iovs[0], dst_iovcnt, true, &cb_arg);
661 	CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == false);
662 	while (!TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops)) {
663 		op = TAILQ_FIRST(&g_comp_bdev.queued_comp_ops);
664 		TAILQ_REMOVE(&g_comp_bdev.queued_comp_ops, op, link);
665 		free(op);
666 	}
667 	CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
668 	CU_ASSERT(rc == 0);
669 	ut_rte_pktmbuf_alloc_bulk = 0;
670 
671 	/* test enqueue failure busy */
672 	ut_enqueue_value = FAKE_ENQUEUE_BUSY;
673 	CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
674 	rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
675 				 &dst_iovs[0], dst_iovcnt, true, &cb_arg);
676 	CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == false);
677 	while (!TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops)) {
678 		op = TAILQ_FIRST(&g_comp_bdev.queued_comp_ops);
679 		TAILQ_REMOVE(&g_comp_bdev.queued_comp_ops, op, link);
680 		free(op);
681 	}
682 	CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
683 	CU_ASSERT(rc == 0);
684 	ut_enqueue_value = 1;
685 
686 	/* test enqueue failure error */
687 	ut_enqueue_value = FAKE_ENQUEUE_ERROR;
688 	CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
689 	rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
690 				 &dst_iovs[0], dst_iovcnt, true, &cb_arg);
691 	CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
692 	CU_ASSERT(rc == -EINVAL);
693 	ut_enqueue_value = FAKE_ENQUEUE_SUCCESS;
694 
695 	/* test success with 3 vector iovec */
696 	ut_expected_op.private_xform = &g_decomp_xform;
697 	ut_expected_op.src.offset = 0;
698 	ut_expected_op.src.length = src_iovs[0].iov_len + src_iovs[1].iov_len + src_iovs[2].iov_len;
699 
700 	/* setup the src expected values */
701 	_get_mbuf_array(exp_src_mbuf, &g_expected_src_mbufs[0], SPDK_COUNTOF(exp_src_mbuf), false);
702 	ut_expected_op.m_src = exp_src_mbuf[0];
703 
704 	for (i = 0; i < UT_MBUFS_PER_OP; i++) {
705 		exp_src_mbuf[i]->userdata = &cb_arg;
706 		exp_src_mbuf[i]->buf_addr = src_iovs[i].iov_base;
707 		exp_src_mbuf[i]->buf_iova = spdk_vtophys(src_iovs[i].iov_base, &src_iovs[i].iov_len);
708 		exp_src_mbuf[i]->buf_len = src_iovs[i].iov_len;
709 		exp_src_mbuf[i]->pkt_len = src_iovs[i].iov_len;
710 	}
711 
712 	/* setup the dst expected values */
713 	_get_mbuf_array(exp_dst_mbuf, &g_expected_dst_mbufs[0], SPDK_COUNTOF(exp_dst_mbuf), false);
714 	ut_expected_op.dst.offset = 0;
715 	ut_expected_op.m_dst = exp_dst_mbuf[0];
716 
717 	for (i = 0; i < UT_MBUFS_PER_OP; i++) {
718 		exp_dst_mbuf[i]->buf_addr = dst_iovs[i].iov_base;
719 		exp_dst_mbuf[i]->buf_iova = spdk_vtophys(dst_iovs[i].iov_base, &dst_iovs[i].iov_len);
720 		exp_dst_mbuf[i]->buf_len = dst_iovs[i].iov_len;
721 		exp_dst_mbuf[i]->pkt_len = dst_iovs[i].iov_len;
722 	}
723 
724 	rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
725 				 &dst_iovs[0], dst_iovcnt, false, &cb_arg);
726 	CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
727 	CU_ASSERT(rc == 0);
728 
729 }
730 
731 static void
732 test_compress_operation_cross_boundary(void)
733 {
734 	struct iovec src_iovs[3] = {};
735 	int src_iovcnt;
736 	struct iovec dst_iovs[3] = {};
737 	int dst_iovcnt;
738 	struct spdk_reduce_vol_cb_args cb_arg;
739 	int rc, i;
740 	struct rte_mbuf *exp_src_mbuf[UT_MBUFS_PER_OP_BOUND_TEST];
741 	struct rte_mbuf *exp_dst_mbuf[UT_MBUFS_PER_OP_BOUND_TEST];
742 
743 	/* Setup the same basic 3 IOV test as used in the simple success case
744 	 * but then we'll start testing a vtophy boundary crossing at each
745 	 * position.
746 	 */
747 	src_iovcnt = dst_iovcnt = 3;
748 	for (i = 0; i < dst_iovcnt; i++) {
749 		src_iovs[i].iov_len = 0x1000;
750 		dst_iovs[i].iov_len = 0x1000;
751 		src_iovs[i].iov_base = (void *)0x10000000 + 0x1000 * i;
752 		dst_iovs[i].iov_base = (void *)0x20000000 + 0x1000 * i;
753 	}
754 
755 	ut_expected_op.private_xform = &g_decomp_xform;
756 	ut_expected_op.src.offset = 0;
757 	ut_expected_op.src.length = src_iovs[0].iov_len + src_iovs[1].iov_len + src_iovs[2].iov_len;
758 
759 	/* setup the src expected values */
760 	_get_mbuf_array(exp_src_mbuf, &g_expected_src_mbufs[0], SPDK_COUNTOF(exp_src_mbuf), false);
761 	ut_expected_op.m_src = exp_src_mbuf[0];
762 
763 	for (i = 0; i < UT_MBUFS_PER_OP; i++) {
764 		exp_src_mbuf[i]->userdata = &cb_arg;
765 		exp_src_mbuf[i]->buf_addr = src_iovs[i].iov_base;
766 		exp_src_mbuf[i]->buf_iova = spdk_vtophys(src_iovs[i].iov_base, &src_iovs[i].iov_len);
767 		exp_src_mbuf[i]->buf_len = src_iovs[i].iov_len;
768 		exp_src_mbuf[i]->pkt_len = src_iovs[i].iov_len;
769 	}
770 
771 	/* setup the dst expected values, we don't test needing a 4th dst mbuf */
772 	_get_mbuf_array(exp_dst_mbuf, &g_expected_dst_mbufs[0], SPDK_COUNTOF(exp_dst_mbuf), false);
773 	ut_expected_op.dst.offset = 0;
774 	ut_expected_op.m_dst = exp_dst_mbuf[0];
775 
776 	for (i = 0; i < UT_MBUFS_PER_OP; i++) {
777 		exp_dst_mbuf[i]->buf_addr = dst_iovs[i].iov_base;
778 		exp_dst_mbuf[i]->buf_iova = spdk_vtophys(dst_iovs[i].iov_base, &dst_iovs[i].iov_len);
779 		exp_dst_mbuf[i]->buf_len = dst_iovs[i].iov_len;
780 		exp_dst_mbuf[i]->pkt_len = dst_iovs[i].iov_len;
781 	}
782 
783 	/* force the 1st IOV to get partial length from spdk_vtophys */
784 	g_small_size_counter = 0;
785 	g_small_size_modify = 1;
786 	g_small_size = 0x800;
787 	exp_src_mbuf[3]->userdata = &cb_arg;
788 
789 	/* first only has shorter length */
790 	exp_src_mbuf[0]->pkt_len = exp_src_mbuf[0]->buf_len = 0x800;
791 
792 	/* 2nd was inserted by the boundary crossing condition and finishes off
793 	 * the length from the first */
794 	exp_src_mbuf[1]->buf_addr = (void *)0x10000800;
795 	exp_src_mbuf[1]->buf_iova = 0x10000800;
796 	exp_src_mbuf[1]->pkt_len = exp_src_mbuf[1]->buf_len = 0x800;
797 
798 	/* 3rd looks like that the 2nd would have */
799 	exp_src_mbuf[2]->buf_addr = (void *)0x10001000;
800 	exp_src_mbuf[2]->buf_iova = 0x10001000;
801 	exp_src_mbuf[2]->pkt_len = exp_src_mbuf[2]->buf_len = 0x1000;
802 
803 	/* a new 4th looks like what the 3rd would have */
804 	exp_src_mbuf[3]->buf_addr = (void *)0x10002000;
805 	exp_src_mbuf[3]->buf_iova = 0x10002000;
806 	exp_src_mbuf[3]->pkt_len = exp_src_mbuf[3]->buf_len = 0x1000;
807 
808 	rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
809 				 &dst_iovs[0], dst_iovcnt, false, &cb_arg);
810 	CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
811 	CU_ASSERT(rc == 0);
812 
813 	/* Now force the 2nd IOV to get partial length from spdk_vtophys */
814 	g_small_size_counter = 0;
815 	g_small_size_modify = 2;
816 	g_small_size = 0x800;
817 
818 	/* first is normal */
819 	exp_src_mbuf[0]->buf_addr = (void *)0x10000000;
820 	exp_src_mbuf[0]->buf_iova = 0x10000000;
821 	exp_src_mbuf[0]->pkt_len = exp_src_mbuf[0]->buf_len = 0x1000;
822 
823 	/* second only has shorter length */
824 	exp_src_mbuf[1]->buf_addr = (void *)0x10001000;
825 	exp_src_mbuf[1]->buf_iova = 0x10001000;
826 	exp_src_mbuf[1]->pkt_len = exp_src_mbuf[1]->buf_len = 0x800;
827 
828 	/* 3rd was inserted by the boundary crossing condition and finishes off
829 	 * the length from the first */
830 	exp_src_mbuf[2]->buf_addr = (void *)0x10001800;
831 	exp_src_mbuf[2]->buf_iova = 0x10001800;
832 	exp_src_mbuf[2]->pkt_len = exp_src_mbuf[2]->buf_len = 0x800;
833 
834 	/* a new 4th looks like what the 3rd would have */
835 	exp_src_mbuf[3]->buf_addr = (void *)0x10002000;
836 	exp_src_mbuf[3]->buf_iova = 0x10002000;
837 	exp_src_mbuf[3]->pkt_len = exp_src_mbuf[3]->buf_len = 0x1000;
838 
839 	rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
840 				 &dst_iovs[0], dst_iovcnt, false, &cb_arg);
841 	CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
842 	CU_ASSERT(rc == 0);
843 
844 	/* Finally force the 3rd IOV to get partial length from spdk_vtophys */
845 	g_small_size_counter = 0;
846 	g_small_size_modify = 3;
847 	g_small_size = 0x800;
848 
849 	/* first is normal */
850 	exp_src_mbuf[0]->buf_addr = (void *)0x10000000;
851 	exp_src_mbuf[0]->buf_iova = 0x10000000;
852 	exp_src_mbuf[0]->pkt_len = exp_src_mbuf[0]->buf_len = 0x1000;
853 
854 	/* second is normal */
855 	exp_src_mbuf[1]->buf_addr = (void *)0x10001000;
856 	exp_src_mbuf[1]->buf_iova = 0x10001000;
857 	exp_src_mbuf[1]->pkt_len = exp_src_mbuf[1]->buf_len = 0x1000;
858 
859 	/* 3rd has shorter length */
860 	exp_src_mbuf[2]->buf_addr = (void *)0x10002000;
861 	exp_src_mbuf[2]->buf_iova = 0x10002000;
862 	exp_src_mbuf[2]->pkt_len = exp_src_mbuf[2]->buf_len = 0x800;
863 
864 	/* a new 4th handles the remainder from the 3rd */
865 	exp_src_mbuf[3]->buf_addr = (void *)0x10002800;
866 	exp_src_mbuf[3]->buf_iova = 0x10002800;
867 	exp_src_mbuf[3]->pkt_len = exp_src_mbuf[3]->buf_len = 0x800;
868 
869 	rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt,
870 				 &dst_iovs[0], dst_iovcnt, false, &cb_arg);
871 	CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
872 	CU_ASSERT(rc == 0);
873 }
874 
875 static void
876 test_poller(void)
877 {
878 	int rc;
879 	struct spdk_reduce_vol_cb_args *cb_args;
880 	struct rte_mbuf mbuf[4]; /* one src, one dst, 2 ops */
881 	struct vbdev_comp_op *op_to_queue;
882 	struct iovec src_iovs[3] = {};
883 	struct iovec dst_iovs[3] = {};
884 	int i;
885 
886 	cb_args = calloc(1, sizeof(*cb_args));
887 	SPDK_CU_ASSERT_FATAL(cb_args != NULL);
888 	cb_args->cb_fn = _compress_done;
889 	memset(&g_comp_op[0], 0, sizeof(struct rte_comp_op));
890 	g_comp_op[0].m_src = &mbuf[0];
891 	g_comp_op[1].m_src = &mbuf[1];
892 	g_comp_op[0].m_dst = &mbuf[2];
893 	g_comp_op[1].m_dst = &mbuf[3];
894 	for (i = 0; i < 3; i++) {
895 		src_iovs[i].iov_len = 0x1000;
896 		dst_iovs[i].iov_len = 0x1000;
897 		src_iovs[i].iov_base = (void *)0x10000000 + 0x1000 * i;
898 		dst_iovs[i].iov_base = (void *)0x20000000 + 0x1000 * i;
899 	}
900 
901 	/* Error from dequeue, nothing needing to be resubmitted.
902 	 */
903 	ut_rte_compressdev_dequeue_burst = 1;
904 	/* setup what we want dequeue to return for the op */
905 	g_comp_op[0].m_src->userdata = (void *)cb_args;
906 	g_comp_op[0].produced = 1;
907 	g_comp_op[0].status = 1;
908 	/* value asserted in the reduce callback */
909 	ut_compress_done[0] = -EINVAL;
910 	CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
911 	rc = comp_dev_poller((void *)&g_comp_bdev);
912 	CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
913 	CU_ASSERT(rc == 0);
914 
915 	/* Success from dequeue, 2 ops. nothing needing to be resubmitted.
916 	 */
917 	ut_rte_compressdev_dequeue_burst = 2;
918 	/* setup what we want dequeue to return for the op */
919 	g_comp_op[0].m_src->userdata = (void *)cb_args;
920 	g_comp_op[0].produced = 16;
921 	g_comp_op[0].status = 0;
922 	g_comp_op[1].m_src->userdata = (void *)cb_args;
923 	g_comp_op[1].produced = 32;
924 	g_comp_op[1].status = 0;
925 	/* value asserted in the reduce callback */
926 	ut_compress_done[0] = 16;
927 	ut_compress_done[1] = 32;
928 	done_count = 2;
929 	CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
930 	rc = comp_dev_poller((void *)&g_comp_bdev);
931 	CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
932 	CU_ASSERT(rc == 0);
933 
934 	/* Success from dequeue, one op to be resubmitted.
935 	 */
936 	ut_rte_compressdev_dequeue_burst = 1;
937 	/* setup what we want dequeue to return for the op */
938 	g_comp_op[0].m_src->userdata = (void *)cb_args;
939 	g_comp_op[0].produced = 16;
940 	g_comp_op[0].status = 0;
941 	/* value asserted in the reduce callback */
942 	ut_compress_done[0] = 16;
943 	done_count = 1;
944 	op_to_queue = calloc(1, sizeof(struct vbdev_comp_op));
945 	SPDK_CU_ASSERT_FATAL(op_to_queue != NULL);
946 	op_to_queue->backing_dev = &g_comp_bdev.backing_dev;
947 	op_to_queue->src_iovs = &src_iovs[0];
948 	op_to_queue->src_iovcnt = 3;
949 	op_to_queue->dst_iovs = &dst_iovs[0];
950 	op_to_queue->dst_iovcnt = 3;
951 	op_to_queue->compress = true;
952 	op_to_queue->cb_arg = cb_args;
953 	ut_enqueue_value = FAKE_ENQUEUE_SUCCESS;
954 	TAILQ_INSERT_TAIL(&g_comp_bdev.queued_comp_ops,
955 			  op_to_queue,
956 			  link);
957 	CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == false);
958 	rc = comp_dev_poller((void *)&g_comp_bdev);
959 	CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true);
960 	CU_ASSERT(rc == 0);
961 
962 	/* op_to_queue is freed in code under test */
963 	free(cb_args);
964 }
965 
966 static void
967 test_vbdev_compress_submit_request(void)
968 {
969 	/* Single element block size write */
970 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
971 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
972 	g_completion_called = false;
973 	MOCK_SET(spdk_bdev_io_get_io_channel, g_io_ch);
974 	vbdev_compress_submit_request(g_io_ch, g_bdev_io);
975 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
976 	CU_ASSERT(g_completion_called == true);
977 	CU_ASSERT(g_io_ctx->orig_io == g_bdev_io);
978 	CU_ASSERT(g_io_ctx->comp_bdev == &g_comp_bdev);
979 	CU_ASSERT(g_io_ctx->comp_ch == g_comp_ch);
980 
981 	/* same write but now fail it */
982 	ut_spdk_reduce_vol_op_complete_err = 1;
983 	g_completion_called = false;
984 	vbdev_compress_submit_request(g_io_ch, g_bdev_io);
985 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
986 	CU_ASSERT(g_completion_called == true);
987 
988 	/* test a read success */
989 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
990 	ut_spdk_reduce_vol_op_complete_err = 0;
991 	g_completion_called = false;
992 	vbdev_compress_submit_request(g_io_ch, g_bdev_io);
993 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
994 	CU_ASSERT(g_completion_called == true);
995 
996 	/* test a read failure */
997 	ut_spdk_reduce_vol_op_complete_err = 1;
998 	g_completion_called = false;
999 	vbdev_compress_submit_request(g_io_ch, g_bdev_io);
1000 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1001 	CU_ASSERT(g_completion_called == true);
1002 }
1003 
1004 static void
1005 test_passthru(void)
1006 {
1007 
1008 }
1009 
1010 static void
1011 test_reset(void)
1012 {
1013 	/* TODO: There are a few different ways to do this given that
1014 	 * the code uses spdk_for_each_channel() to implement reset
1015 	 * handling. SUbmitting w/o UT for this function for now and
1016 	 * will follow up with something shortly.
1017 	 */
1018 }
1019 
1020 static void
1021 test_initdrivers(void)
1022 {
1023 	int rc;
1024 
1025 	/* test return values from rte_vdev_init() */
1026 	MOCK_SET(rte_eal_get_configuration, g_test_config);
1027 	MOCK_SET(rte_vdev_init, -EEXIST);
1028 	rc = vbdev_init_compress_drivers();
1029 	/* This is not an error condition, we already have one */
1030 	CU_ASSERT(rc == 0);
1031 
1032 	/* error */
1033 	MOCK_SET(rte_vdev_init, -2);
1034 	rc = vbdev_init_compress_drivers();
1035 	CU_ASSERT(rc == -EINVAL);
1036 	CU_ASSERT(g_mbuf_mp == NULL);
1037 	CU_ASSERT(g_comp_op_mp == NULL);
1038 
1039 	/* compressdev count 0 */
1040 	ut_rte_compressdev_count = 0;
1041 	MOCK_SET(rte_vdev_init, 0);
1042 	rc = vbdev_init_compress_drivers();
1043 	CU_ASSERT(rc == 0);
1044 
1045 	/* bogus count */
1046 	ut_rte_compressdev_count = RTE_COMPRESS_MAX_DEVS + 1;
1047 	rc = vbdev_init_compress_drivers();
1048 	CU_ASSERT(rc == -EINVAL);
1049 
1050 	/* can't get mbuf pool */
1051 	ut_rte_compressdev_count = 1;
1052 	MOCK_SET(spdk_mempool_create, NULL);
1053 	rc = vbdev_init_compress_drivers();
1054 	CU_ASSERT(rc == -ENOMEM);
1055 	MOCK_CLEAR(spdk_mempool_create);
1056 
1057 	/* can't get comp op pool */
1058 	ut_rte_comp_op_pool_create = NULL;
1059 	rc = vbdev_init_compress_drivers();
1060 	CU_ASSERT(rc == -ENOMEM);
1061 
1062 	/* error on create_compress_dev() */
1063 	ut_rte_comp_op_pool_create = (struct rte_mempool *)&test_initdrivers;
1064 	ut_rte_compressdev_configure = -1;
1065 	rc = vbdev_init_compress_drivers();
1066 	CU_ASSERT(rc == -1);
1067 
1068 	/* error on create_compress_dev() but coverage for large num queues */
1069 	ut_max_nb_queue_pairs = 99;
1070 	rc = vbdev_init_compress_drivers();
1071 	CU_ASSERT(rc == -1);
1072 
1073 	/* qpair setup fails */
1074 	ut_rte_compressdev_configure = 0;
1075 	ut_max_nb_queue_pairs = 0;
1076 	ut_rte_compressdev_queue_pair_setup = -1;
1077 	rc = vbdev_init_compress_drivers();
1078 	CU_ASSERT(rc == -EINVAL);
1079 
1080 	/* rte_compressdev_start fails */
1081 	ut_rte_compressdev_queue_pair_setup = 0;
1082 	ut_rte_compressdev_start = -1;
1083 	rc = vbdev_init_compress_drivers();
1084 	CU_ASSERT(rc == -1);
1085 
1086 	/* rte_compressdev_private_xform_create() fails */
1087 	ut_rte_compressdev_start = 0;
1088 	ut_rte_compressdev_private_xform_create = -2;
1089 	rc = vbdev_init_compress_drivers();
1090 	CU_ASSERT(rc == -2);
1091 
1092 	/* success */
1093 	ut_rte_compressdev_private_xform_create = 0;
1094 	rc = vbdev_init_compress_drivers();
1095 	CU_ASSERT(rc == 0);
1096 	spdk_mempool_free((struct spdk_mempool *)g_mbuf_mp);
1097 }
1098 
1099 static void
1100 test_supported_io(void)
1101 {
1102 
1103 }
1104 
1105 int
1106 main(int argc, char **argv)
1107 {
1108 	CU_pSuite	suite = NULL;
1109 	unsigned int	num_failures;
1110 
1111 	if (CU_initialize_registry() != CUE_SUCCESS) {
1112 		return CU_get_error();
1113 	}
1114 
1115 	suite = CU_add_suite("compress", test_setup, test_cleanup);
1116 	if (suite == NULL) {
1117 		CU_cleanup_registry();
1118 		return CU_get_error();
1119 	}
1120 
1121 	if (CU_add_test(suite, "test_compress_operation",
1122 			test_compress_operation) == NULL ||
1123 	    CU_add_test(suite, "test_compress_operation_cross_boundary",
1124 			test_compress_operation_cross_boundary) == NULL ||
1125 	    CU_add_test(suite, "vbdev_compress_submit_request",
1126 			test_vbdev_compress_submit_request) == NULL ||
1127 	    CU_add_test(suite, "test_passthru",
1128 			test_passthru) == NULL ||
1129 	    CU_add_test(suite, "test_initdrivers",
1130 			test_initdrivers) == NULL ||
1131 	    CU_add_test(suite, "test_supported_io",
1132 			test_supported_io) == NULL ||
1133 	    CU_add_test(suite, "test_poller",
1134 			test_poller) == NULL ||
1135 	    CU_add_test(suite, "test_reset",
1136 			test_reset) == NULL
1137 	   ) {
1138 		CU_cleanup_registry();
1139 		return CU_get_error();
1140 	}
1141 
1142 	CU_basic_set_mode(CU_BRM_VERBOSE);
1143 	CU_basic_run_tests();
1144 	num_failures = CU_get_number_of_failures();
1145 	CU_cleanup_registry();
1146 	return num_failures;
1147 }
1148