xref: /spdk/test/unit/lib/accel/dpdk_compressdev.c/accel_dpdk_compressdev_ut.c (revision 95d6c9fac17572b107042103439aafd696d60b0e)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk_internal/cunit.h"
8 /* We have our own mock for this */
9 #define UNIT_TEST_NO_VTOPHYS
10 #include "common/lib/test_env.c"
11 #include "spdk_internal/mock.h"
12 #include "thread/thread_internal.h"
13 #include "unit/lib/json_mock.c"
14 
15 #include <rte_compressdev.h>
16 
17 /* There will be one if the data perfectly matches the chunk size,
18  * or there could be an offset into the data and a remainder after
19  * the data or both for a max of 3.
20  */
21 #define UT_MBUFS_PER_OP 3
22 /* For testing the crossing of a huge page boundary on address translation,
23  * we'll have an extra one but we only test on the source side.
24  */
25 #define UT_MBUFS_PER_OP_BOUND_TEST 4
26 
27 struct spdk_io_channel *g_io_ch;
28 struct rte_comp_op g_comp_op[2];
29 struct comp_device_qp g_device_qp;
30 struct compress_dev g_device;
31 struct rte_compressdev_capabilities g_cdev_cap;
32 static struct rte_mbuf *g_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST];
33 static struct rte_mbuf *g_dst_mbufs[UT_MBUFS_PER_OP];
34 static struct rte_mbuf g_expected_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST];
35 static struct rte_mbuf g_expected_dst_mbufs[UT_MBUFS_PER_OP];
36 struct compress_io_channel *g_comp_ch;
37 
38 /* Those functions are defined as static inline in DPDK, so we can't
39  * mock them straight away. We use defines to redirect them into
40  * our custom functions.
41  */
42 
43 static int ut_total_rte_pktmbuf_attach_extbuf = 0;
44 static void mock_rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova,
45 		uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo);
46 #define rte_pktmbuf_attach_extbuf mock_rte_pktmbuf_attach_extbuf
47 static void
48 mock_rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova,
49 			       uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo)
50 {
51 	assert(m != NULL);
52 	m->buf_addr = buf_addr;
53 	m->buf_iova = buf_iova;
54 	m->buf_len = buf_len;
55 	m->data_len = m->pkt_len = 0;
56 	ut_total_rte_pktmbuf_attach_extbuf++;
57 }
58 
59 static char *mock_rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len);
60 #define rte_pktmbuf_append mock_rte_pktmbuf_append
61 static char *
62 mock_rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
63 {
64 	m->pkt_len = m->pkt_len + len;
65 	return NULL;
66 }
67 
68 static inline int mock_rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail);
69 #define rte_pktmbuf_chain mock_rte_pktmbuf_chain
70 static inline int
71 mock_rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
72 {
73 	struct rte_mbuf *cur_tail;
74 
75 	cur_tail = rte_pktmbuf_lastseg(head);
76 	cur_tail->next = tail;
77 
78 	return 0;
79 }
80 
81 uint16_t ut_max_nb_queue_pairs = 0;
82 void __rte_experimental mock_rte_compressdev_info_get(uint8_t dev_id,
83 		struct rte_compressdev_info *dev_info);
84 #define rte_compressdev_info_get mock_rte_compressdev_info_get
85 void __rte_experimental
86 mock_rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info)
87 {
88 	dev_info->max_nb_queue_pairs = ut_max_nb_queue_pairs;
89 	dev_info->capabilities = &g_cdev_cap;
90 	dev_info->driver_name = "compressdev";
91 }
92 
93 int ut_rte_compressdev_configure = 0;
94 int __rte_experimental mock_rte_compressdev_configure(uint8_t dev_id,
95 		struct rte_compressdev_config *config);
96 #define rte_compressdev_configure mock_rte_compressdev_configure
97 int __rte_experimental
98 mock_rte_compressdev_configure(uint8_t dev_id, struct rte_compressdev_config *config)
99 {
100 	return ut_rte_compressdev_configure;
101 }
102 
103 int ut_rte_compressdev_queue_pair_setup = 0;
104 int __rte_experimental mock_rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
105 		uint32_t max_inflight_ops, int socket_id);
106 #define rte_compressdev_queue_pair_setup mock_rte_compressdev_queue_pair_setup
107 int __rte_experimental
108 mock_rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
109 				      uint32_t max_inflight_ops, int socket_id)
110 {
111 	return ut_rte_compressdev_queue_pair_setup;
112 }
113 
114 int ut_rte_compressdev_start = 0;
115 int __rte_experimental mock_rte_compressdev_start(uint8_t dev_id);
116 #define rte_compressdev_start mock_rte_compressdev_start
117 int __rte_experimental
118 mock_rte_compressdev_start(uint8_t dev_id)
119 {
120 	return ut_rte_compressdev_start;
121 }
122 
123 int ut_rte_compressdev_private_xform_create = 0;
124 int __rte_experimental mock_rte_compressdev_private_xform_create(uint8_t dev_id,
125 		const struct rte_comp_xform *xform, void **private_xform);
126 #define rte_compressdev_private_xform_create mock_rte_compressdev_private_xform_create
127 int __rte_experimental
128 mock_rte_compressdev_private_xform_create(uint8_t dev_id,
129 		const struct rte_comp_xform *xform, void **private_xform)
130 {
131 	return ut_rte_compressdev_private_xform_create;
132 }
133 
134 uint8_t ut_rte_compressdev_count = 0;
135 uint8_t __rte_experimental mock_rte_compressdev_count(void);
136 #define rte_compressdev_count mock_rte_compressdev_count
137 uint8_t __rte_experimental
138 mock_rte_compressdev_count(void)
139 {
140 	return ut_rte_compressdev_count;
141 }
142 
143 struct rte_mempool *ut_rte_comp_op_pool_create = NULL;
144 struct rte_mempool *__rte_experimental mock_rte_comp_op_pool_create(const char *name,
145 		unsigned int nb_elts, unsigned int cache_size, uint16_t user_size,
146 		int socket_id);
147 #define rte_comp_op_pool_create mock_rte_comp_op_pool_create
148 struct rte_mempool *__rte_experimental
149 mock_rte_comp_op_pool_create(const char *name, unsigned int nb_elts,
150 			     unsigned int cache_size, uint16_t user_size, int socket_id)
151 {
152 	return ut_rte_comp_op_pool_create;
153 }
154 
155 void mock_rte_pktmbuf_free(struct rte_mbuf *m);
156 #define rte_pktmbuf_free mock_rte_pktmbuf_free
157 void
158 mock_rte_pktmbuf_free(struct rte_mbuf *m)
159 {
160 }
161 
162 void mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt);
163 #define rte_pktmbuf_free_bulk mock_rte_pktmbuf_free_bulk
164 void
165 mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt)
166 {
167 }
168 
169 static bool ut_boundary_alloc = false;
170 static int ut_rte_pktmbuf_alloc_bulk = 0;
171 int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
172 				unsigned count);
173 #define rte_pktmbuf_alloc_bulk mock_rte_pktmbuf_alloc_bulk
174 int
175 mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
176 			    unsigned count)
177 {
178 	int i;
179 
180 	/* This mocked function only supports the alloc of up to 3 src and 3 dst. */
181 	ut_rte_pktmbuf_alloc_bulk += count;
182 
183 	if (ut_rte_pktmbuf_alloc_bulk == 1) {
184 		/* allocation of an extra mbuf for boundary cross test */
185 		ut_boundary_alloc = true;
186 		g_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST - 1]->next = NULL;
187 		*mbufs = g_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST - 1];
188 		ut_rte_pktmbuf_alloc_bulk = 0;
189 	} else if (ut_rte_pktmbuf_alloc_bulk == UT_MBUFS_PER_OP) {
190 		/* first test allocation, src mbufs */
191 		for (i = 0; i < UT_MBUFS_PER_OP; i++) {
192 			g_src_mbufs[i]->next = NULL;
193 			*mbufs++ = g_src_mbufs[i];
194 		}
195 	} else if (ut_rte_pktmbuf_alloc_bulk == UT_MBUFS_PER_OP * 2) {
196 		/* second test allocation, dst mbufs */
197 		for (i = 0; i < UT_MBUFS_PER_OP; i++) {
198 			g_dst_mbufs[i]->next = NULL;
199 			*mbufs++ = g_dst_mbufs[i];
200 		}
201 		ut_rte_pktmbuf_alloc_bulk = 0;
202 	} else {
203 		return -1;
204 	}
205 	return 0;
206 }
207 
208 struct rte_mempool *
209 rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size,
210 			uint16_t priv_size, uint16_t data_room_size, int socket_id)
211 {
212 	struct spdk_mempool *tmp;
213 
214 	tmp = spdk_mempool_create("mbuf_mp", 1024, sizeof(struct rte_mbuf),
215 				  SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
216 				  SPDK_ENV_NUMA_ID_ANY);
217 
218 	return (struct rte_mempool *)tmp;
219 }
220 
221 void
222 rte_mempool_free(struct rte_mempool *mp)
223 {
224 	if (mp) {
225 		spdk_mempool_free((struct spdk_mempool *)mp);
226 	}
227 }
228 
229 #include "accel/dpdk_compressdev/accel_dpdk_compressdev.c"
230 
231 static void _compress_done(void *arg, int status);
232 static int ut_expected_task_status = 0;
233 void
234 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status)
235 {
236 	CU_ASSERT(status == ut_expected_task_status);
237 	accel_task->cb_fn(accel_task, status);
238 }
239 
240 /* SPDK stubs */
241 DEFINE_STUB_V(spdk_accel_module_finish, (void));
242 DEFINE_STUB_V(spdk_accel_module_list_add, (struct spdk_accel_module_if *accel_module));
243 
244 /* DPDK stubs */
245 DEFINE_STUB(rte_compressdev_capability_get, const struct rte_compressdev_capabilities *,
246 	    (uint8_t dev_id,
247 	     enum rte_comp_algorithm algo), NULL);
248 #define DPDK_DYNFIELD_OFFSET offsetof(struct rte_mbuf, dynfield1[1])
249 DEFINE_STUB(rte_mbuf_dynfield_register, int, (const struct rte_mbuf_dynfield *params),
250 	    DPDK_DYNFIELD_OFFSET);
251 DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
252 DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
253 DEFINE_STUB(rte_vdev_uninit, int, (const char *name), 0);
254 DEFINE_STUB_V(rte_compressdev_stop, (uint8_t dev_id));
255 DEFINE_STUB(rte_compressdev_close, int, (uint8_t dev_id), 0);
256 DEFINE_STUB_V(rte_comp_op_free, (struct rte_comp_op *op));
257 DEFINE_STUB(rte_comp_op_alloc, struct rte_comp_op *, (struct rte_mempool *mempool), NULL);
258 
259 int g_small_size_counter = 0;
260 int g_small_size_modify = 0;
261 uint64_t g_small_size = 0;
262 uint64_t
263 spdk_vtophys(const void *buf, uint64_t *size)
264 {
265 	g_small_size_counter++;
266 	if (g_small_size_counter == g_small_size_modify) {
267 		*size = g_small_size;
268 		g_small_size_counter = 0;
269 		g_small_size_modify = 0;
270 	}
271 	return (uint64_t)buf;
272 }
273 
274 static uint16_t ut_rte_compressdev_dequeue_burst = 0;
275 uint16_t
276 rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_comp_op **ops,
277 			      uint16_t nb_op)
278 {
279 	if (ut_rte_compressdev_dequeue_burst == 0) {
280 		return 0;
281 	}
282 
283 	ops[0] = &g_comp_op[0];
284 	ops[1] = &g_comp_op[1];
285 
286 	return ut_rte_compressdev_dequeue_burst;
287 }
288 
289 static uint16_t g_done_count = 1;
290 static void
291 _compress_done(void *arg, int status)
292 {
293 	struct spdk_accel_task *task  = arg;
294 
295 	if (status == 0) {
296 		CU_ASSERT(*task->output_size == g_comp_op[g_done_count++].produced);
297 	}
298 }
299 
300 static void
301 _get_mbuf_array(struct rte_mbuf **mbuf_array, struct rte_mbuf *mbuf_head,
302 		int mbuf_count, bool null_final)
303 {
304 	int i;
305 
306 	for (i = 0; i < mbuf_count; i++) {
307 		mbuf_array[i] = mbuf_head;
308 		if (mbuf_head) {
309 			mbuf_head = mbuf_head->next;
310 		}
311 	}
312 	if (null_final) {
313 		mbuf_array[i - 1] = NULL;
314 	}
315 }
316 
317 #define FAKE_ENQUEUE_SUCCESS 255
318 #define FAKE_ENQUEUE_ERROR 128
319 #define FAKE_ENQUEUE_BUSY 64
320 static uint16_t ut_enqueue_value = FAKE_ENQUEUE_SUCCESS;
321 static struct rte_comp_op ut_expected_op;
322 uint16_t
323 rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_comp_op **ops,
324 			      uint16_t nb_ops)
325 {
326 	struct rte_comp_op *op = *ops;
327 	struct rte_mbuf *op_mbuf[UT_MBUFS_PER_OP_BOUND_TEST];
328 	struct rte_mbuf *exp_mbuf[UT_MBUFS_PER_OP_BOUND_TEST];
329 	int i, num_src_mbufs = UT_MBUFS_PER_OP;
330 
331 	switch (ut_enqueue_value) {
332 	case FAKE_ENQUEUE_BUSY:
333 		op->status = RTE_COMP_OP_STATUS_NOT_PROCESSED;
334 		return 0;
335 	case FAKE_ENQUEUE_SUCCESS:
336 		op->status = RTE_COMP_OP_STATUS_SUCCESS;
337 		return 1;
338 	case FAKE_ENQUEUE_ERROR:
339 		op->status = RTE_COMP_OP_STATUS_ERROR;
340 		return 0;
341 	default:
342 		break;
343 	}
344 
345 	/* by design the compress module will never send more than 1 op at a time */
346 	CU_ASSERT(op->private_xform == ut_expected_op.private_xform);
347 
348 	/* setup our local pointers to the chained mbufs, those pointed to in the
349 	 * operation struct and the expected values.
350 	 */
351 	_get_mbuf_array(op_mbuf, op->m_src, SPDK_COUNTOF(op_mbuf), true);
352 	_get_mbuf_array(exp_mbuf, ut_expected_op.m_src, SPDK_COUNTOF(exp_mbuf), true);
353 
354 	if (ut_boundary_alloc == true) {
355 		/* if we crossed a boundary, we need to check the 4th src mbuf and
356 		 * reset the global that is used to identify whether we crossed
357 		 * or not
358 		 */
359 		num_src_mbufs = UT_MBUFS_PER_OP_BOUND_TEST;
360 		exp_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] = ut_expected_op.m_src->next->next->next;
361 		op_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] = op->m_src->next->next->next;
362 		ut_boundary_alloc = false;
363 	}
364 
365 	for (i = 0; i < num_src_mbufs; i++) {
366 		CU_ASSERT(op_mbuf[i]->buf_addr == exp_mbuf[i]->buf_addr);
367 		CU_ASSERT(op_mbuf[i]->buf_iova == exp_mbuf[i]->buf_iova);
368 		CU_ASSERT(op_mbuf[i]->buf_len == exp_mbuf[i]->buf_len);
369 		CU_ASSERT(op_mbuf[i]->pkt_len == exp_mbuf[i]->pkt_len);
370 	}
371 
372 	/* if only 3 mbufs were used in the test, the 4th should be zeroed */
373 	if (num_src_mbufs == UT_MBUFS_PER_OP) {
374 		CU_ASSERT(op_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] == NULL);
375 		CU_ASSERT(exp_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] == NULL);
376 	}
377 	CU_ASSERT(*RTE_MBUF_DYNFIELD(op->m_src, g_mbuf_offset, uint64_t *) ==
378 		  *RTE_MBUF_DYNFIELD(ut_expected_op.m_src, g_mbuf_offset, uint64_t *));
379 	CU_ASSERT(op->src.offset == ut_expected_op.src.offset);
380 	CU_ASSERT(op->src.length == ut_expected_op.src.length);
381 
382 	/* check dst mbuf values */
383 	_get_mbuf_array(op_mbuf, op->m_dst, SPDK_COUNTOF(op_mbuf), true);
384 	_get_mbuf_array(exp_mbuf, ut_expected_op.m_dst, SPDK_COUNTOF(exp_mbuf), true);
385 
386 	for (i = 0; i < UT_MBUFS_PER_OP; i++) {
387 		CU_ASSERT(op_mbuf[i]->buf_addr == exp_mbuf[i]->buf_addr);
388 		CU_ASSERT(op_mbuf[i]->buf_iova == exp_mbuf[i]->buf_iova);
389 		CU_ASSERT(op_mbuf[i]->buf_len == exp_mbuf[i]->buf_len);
390 		CU_ASSERT(op_mbuf[i]->pkt_len == exp_mbuf[i]->pkt_len);
391 	}
392 	CU_ASSERT(op->dst.offset == ut_expected_op.dst.offset);
393 
394 	return ut_enqueue_value;
395 }
396 
397 /* Global setup for all tests that share a bunch of preparation... */
398 static int
399 test_setup(void)
400 {
401 	struct spdk_thread *thread;
402 	int i;
403 
404 	spdk_thread_lib_init(NULL, 0);
405 
406 	thread = spdk_thread_create(NULL, NULL);
407 	spdk_set_thread(thread);
408 
409 	g_comp_xform = (struct rte_comp_xform) {
410 		.type = RTE_COMP_COMPRESS,
411 		.compress = {
412 			.algo = RTE_COMP_ALGO_DEFLATE,
413 			.deflate.huffman = RTE_COMP_HUFFMAN_DEFAULT,
414 			.level = RTE_COMP_LEVEL_MAX,
415 			.window_size = DEFAULT_WINDOW_SIZE,
416 			.chksum = RTE_COMP_CHECKSUM_NONE,
417 			.hash_algo = RTE_COMP_HASH_ALGO_NONE
418 		}
419 	};
420 
421 	g_decomp_xform = (struct rte_comp_xform) {
422 		.type = RTE_COMP_DECOMPRESS,
423 		.decompress = {
424 			.algo = RTE_COMP_ALGO_DEFLATE,
425 			.chksum = RTE_COMP_CHECKSUM_NONE,
426 			.window_size = DEFAULT_WINDOW_SIZE,
427 			.hash_algo = RTE_COMP_HASH_ALGO_NONE
428 		}
429 	};
430 	g_device.comp_xform = &g_comp_xform;
431 	g_device.decomp_xform = &g_decomp_xform;
432 	g_cdev_cap.comp_feature_flags = RTE_COMP_FF_SHAREABLE_PRIV_XFORM;
433 	g_device.cdev_info.driver_name = "compressdev";
434 	g_device.cdev_info.capabilities = &g_cdev_cap;
435 	for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST; i++) {
436 		g_src_mbufs[i] = spdk_zmalloc(sizeof(struct rte_mbuf), 0x40, NULL,
437 					      SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
438 	}
439 	for (i = 0; i < UT_MBUFS_PER_OP; i++) {
440 		g_dst_mbufs[i] = spdk_zmalloc(sizeof(struct rte_mbuf), 0x40, NULL,
441 					      SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
442 	}
443 
444 	g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct compress_io_channel));
445 	g_io_ch->thread = thread;
446 	g_comp_ch = (struct compress_io_channel *)spdk_io_channel_get_ctx(g_io_ch);
447 	g_comp_ch->device_qp = &g_device_qp;
448 	g_comp_ch->device_qp->device = &g_device;
449 	g_device_qp.device->sgl_in = true;
450 	g_device_qp.device->sgl_out = true;
451 	g_comp_ch->src_mbufs = calloc(UT_MBUFS_PER_OP_BOUND_TEST, sizeof(void *));
452 	g_comp_ch->dst_mbufs = calloc(UT_MBUFS_PER_OP, sizeof(void *));
453 	STAILQ_INIT(&g_comp_ch->queued_tasks);
454 
455 	for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST - 1; i++) {
456 		g_expected_src_mbufs[i].next = &g_expected_src_mbufs[i + 1];
457 	}
458 	g_expected_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST - 1].next = NULL;
459 
460 	/* we only test w/4 mbufs on src side */
461 	for (i = 0; i < UT_MBUFS_PER_OP - 1; i++) {
462 		g_expected_dst_mbufs[i].next = &g_expected_dst_mbufs[i + 1];
463 	}
464 	g_expected_dst_mbufs[UT_MBUFS_PER_OP - 1].next = NULL;
465 	g_mbuf_offset = DPDK_DYNFIELD_OFFSET;
466 
467 	return 0;
468 }
469 
470 /* Global teardown for all tests */
471 static int
472 test_cleanup(void)
473 {
474 	struct spdk_thread *thread;
475 	int i;
476 
477 	for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST; i++) {
478 		spdk_free(g_src_mbufs[i]);
479 	}
480 	for (i = 0; i < UT_MBUFS_PER_OP; i++) {
481 		spdk_free(g_dst_mbufs[i]);
482 	}
483 	free(g_comp_ch->src_mbufs);
484 	free(g_comp_ch->dst_mbufs);
485 	free(g_io_ch);
486 
487 	thread = spdk_get_thread();
488 	spdk_thread_exit(thread);
489 	while (!spdk_thread_is_exited(thread)) {
490 		spdk_thread_poll(thread, 0, 0);
491 	}
492 	spdk_thread_destroy(thread);
493 
494 	spdk_thread_lib_fini();
495 
496 	return 0;
497 }
498 
499 static void
500 test_compress_operation(void)
501 {
502 	struct iovec src_iovs[3] = {};
503 	int src_iovcnt;
504 	struct iovec dst_iovs[3] = {};
505 	int dst_iovcnt;
506 	struct spdk_accel_task task = {};
507 	int rc, i;
508 	struct rte_mbuf *exp_src_mbuf[UT_MBUFS_PER_OP];
509 	struct rte_mbuf *exp_dst_mbuf[UT_MBUFS_PER_OP];
510 	uint32_t output_size;
511 
512 	src_iovcnt = dst_iovcnt = 3;
513 	for (i = 0; i < dst_iovcnt; i++) {
514 		src_iovs[i].iov_len = 0x1000;
515 		dst_iovs[i].iov_len = 0x1000;
516 		src_iovs[i].iov_base = (void *)0x10000000 + 0x1000 * i;
517 		dst_iovs[i].iov_base = (void *)0x20000000 + 0x1000 * i;
518 	}
519 
520 	task.cb_fn = _compress_done;
521 	task.op_code = SPDK_ACCEL_OPC_COMPRESS;
522 	task.output_size = &output_size;
523 	task.d.iovs = dst_iovs;
524 	task.d.iovcnt = dst_iovcnt;
525 	task.s.iovs = src_iovs;
526 	task.s.iovcnt = src_iovcnt;
527 
528 	/* test rte_comp_op_alloc failure */
529 	MOCK_SET(rte_comp_op_alloc, NULL);
530 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true);
531 	rc = _compress_operation(g_comp_ch, &task);
532 	CU_ASSERT(rc == 0);
533 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == false);
534 	while (!STAILQ_EMPTY(&g_comp_ch->queued_tasks)) {
535 		STAILQ_REMOVE_HEAD(&g_comp_ch->queued_tasks, link);
536 	}
537 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true);
538 
539 	/* test mempool get failure */
540 	MOCK_SET(rte_comp_op_alloc, &g_comp_op[0]);
541 	ut_rte_pktmbuf_alloc_bulk = -1;
542 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true);
543 	rc = _compress_operation(g_comp_ch, &task);
544 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == false);
545 	while (!STAILQ_EMPTY(&g_comp_ch->queued_tasks)) {
546 		STAILQ_REMOVE_HEAD(&g_comp_ch->queued_tasks, link);
547 	}
548 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true);
549 	CU_ASSERT(rc == 0);
550 	ut_rte_pktmbuf_alloc_bulk = 0;
551 
552 	/* test enqueue failure busy */
553 	ut_enqueue_value = FAKE_ENQUEUE_BUSY;
554 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true);
555 	rc = _compress_operation(g_comp_ch, &task);
556 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == false);
557 	while (!STAILQ_EMPTY(&g_comp_ch->queued_tasks)) {
558 		STAILQ_REMOVE_HEAD(&g_comp_ch->queued_tasks, link);
559 	}
560 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true);
561 	CU_ASSERT(rc == 0);
562 	ut_enqueue_value = 1;
563 
564 	/* test enqueue failure error */
565 	ut_enqueue_value = FAKE_ENQUEUE_ERROR;
566 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true);
567 	rc = _compress_operation(g_comp_ch, &task);
568 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true);
569 	CU_ASSERT(rc == -EINVAL);
570 	ut_enqueue_value = FAKE_ENQUEUE_SUCCESS;
571 
572 	/* test success with 3 vector iovec */
573 	ut_expected_op.private_xform = &g_decomp_xform;
574 	ut_expected_op.src.offset = 0;
575 	ut_expected_op.src.length = src_iovs[0].iov_len + src_iovs[1].iov_len + src_iovs[2].iov_len;
576 
577 	/* setup the src expected values */
578 	_get_mbuf_array(exp_src_mbuf, &g_expected_src_mbufs[0], SPDK_COUNTOF(exp_src_mbuf), false);
579 	ut_expected_op.m_src = exp_src_mbuf[0];
580 
581 	for (i = 0; i < UT_MBUFS_PER_OP; i++) {
582 		*RTE_MBUF_DYNFIELD(exp_src_mbuf[i], g_mbuf_offset, uint64_t *) = (uint64_t)&task;
583 		exp_src_mbuf[i]->buf_addr = src_iovs[i].iov_base;
584 		exp_src_mbuf[i]->buf_iova = spdk_vtophys(src_iovs[i].iov_base, &src_iovs[i].iov_len);
585 		exp_src_mbuf[i]->buf_len = src_iovs[i].iov_len;
586 		exp_src_mbuf[i]->pkt_len = src_iovs[i].iov_len;
587 	}
588 
589 	/* setup the dst expected values */
590 	_get_mbuf_array(exp_dst_mbuf, &g_expected_dst_mbufs[0], SPDK_COUNTOF(exp_dst_mbuf), false);
591 	ut_expected_op.dst.offset = 0;
592 	ut_expected_op.m_dst = exp_dst_mbuf[0];
593 
594 	for (i = 0; i < UT_MBUFS_PER_OP; i++) {
595 		exp_dst_mbuf[i]->buf_addr = dst_iovs[i].iov_base;
596 		exp_dst_mbuf[i]->buf_iova = spdk_vtophys(dst_iovs[i].iov_base, &dst_iovs[i].iov_len);
597 		exp_dst_mbuf[i]->buf_len = dst_iovs[i].iov_len;
598 		exp_dst_mbuf[i]->pkt_len = dst_iovs[i].iov_len;
599 	}
600 
601 	rc = _compress_operation(g_comp_ch, &task);
602 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true);
603 	CU_ASSERT(rc == 0);
604 
605 	/* test sgl out failure */
606 	g_device.sgl_out = false;
607 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true);
608 	rc = _compress_operation(g_comp_ch, &task);
609 	CU_ASSERT(rc == -EINVAL);
610 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true);
611 	g_device.sgl_out = true;
612 
613 	/* test sgl in failure */
614 	g_device.sgl_in = false;
615 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true);
616 	rc = _compress_operation(g_comp_ch, &task);
617 	CU_ASSERT(rc == -EINVAL);
618 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true);
619 	g_device.sgl_in = true;
620 }
621 
622 static void
623 test_compress_operation_cross_boundary(void)
624 {
625 	struct iovec src_iovs[3] = {};
626 	int src_iovcnt;
627 	struct iovec dst_iovs[3] = {};
628 	int dst_iovcnt;
629 	int rc, i;
630 	struct rte_mbuf *exp_src_mbuf[UT_MBUFS_PER_OP_BOUND_TEST];
631 	struct rte_mbuf *exp_dst_mbuf[UT_MBUFS_PER_OP_BOUND_TEST];
632 	struct spdk_accel_task task = {};
633 	uint32_t output_size;
634 
635 	/* Setup the same basic 3 IOV test as used in the simple success case
636 	 * but then we'll start testing a vtophy boundary crossing at each
637 	 * position.
638 	 */
639 	src_iovcnt = dst_iovcnt = 3;
640 	for (i = 0; i < dst_iovcnt; i++) {
641 		src_iovs[i].iov_len = 0x1000;
642 		dst_iovs[i].iov_len = 0x1000;
643 		src_iovs[i].iov_base = (void *)0x10000000 + 0x1000 * i;
644 		dst_iovs[i].iov_base = (void *)0x20000000 + 0x1000 * i;
645 	}
646 
647 	ut_expected_op.private_xform = &g_decomp_xform;
648 	ut_expected_op.src.offset = 0;
649 	ut_expected_op.src.length = src_iovs[0].iov_len + src_iovs[1].iov_len + src_iovs[2].iov_len;
650 
651 	/* setup the src expected values */
652 	_get_mbuf_array(exp_src_mbuf, &g_expected_src_mbufs[0], SPDK_COUNTOF(exp_src_mbuf), false);
653 	ut_expected_op.m_src = exp_src_mbuf[0];
654 
655 	for (i = 0; i < UT_MBUFS_PER_OP; i++) {
656 		*RTE_MBUF_DYNFIELD(exp_src_mbuf[i], g_mbuf_offset, uint64_t *) = (uint64_t)&task;
657 		exp_src_mbuf[i]->buf_addr = src_iovs[i].iov_base;
658 		exp_src_mbuf[i]->buf_iova = spdk_vtophys(src_iovs[i].iov_base, &src_iovs[i].iov_len);
659 		exp_src_mbuf[i]->buf_len = src_iovs[i].iov_len;
660 		exp_src_mbuf[i]->pkt_len = src_iovs[i].iov_len;
661 	}
662 
663 	/* setup the dst expected values, we don't test needing a 4th dst mbuf */
664 	_get_mbuf_array(exp_dst_mbuf, &g_expected_dst_mbufs[0], SPDK_COUNTOF(exp_dst_mbuf), false);
665 	ut_expected_op.dst.offset = 0;
666 	ut_expected_op.m_dst = exp_dst_mbuf[0];
667 
668 	for (i = 0; i < UT_MBUFS_PER_OP; i++) {
669 		exp_dst_mbuf[i]->buf_addr = dst_iovs[i].iov_base;
670 		exp_dst_mbuf[i]->buf_iova = spdk_vtophys(dst_iovs[i].iov_base, &dst_iovs[i].iov_len);
671 		exp_dst_mbuf[i]->buf_len = dst_iovs[i].iov_len;
672 		exp_dst_mbuf[i]->pkt_len = dst_iovs[i].iov_len;
673 	}
674 
675 	/* force the 1st IOV to get partial length from spdk_vtophys */
676 	g_small_size_counter = 0;
677 	g_small_size_modify = 1;
678 	g_small_size = 0x800;
679 	*RTE_MBUF_DYNFIELD(exp_src_mbuf[3], g_mbuf_offset, uint64_t *) = (uint64_t)&task;
680 
681 	/* first only has shorter length */
682 	exp_src_mbuf[0]->pkt_len = exp_src_mbuf[0]->buf_len = 0x800;
683 
684 	/* 2nd was inserted by the boundary crossing condition and finishes off
685 	 * the length from the first */
686 	exp_src_mbuf[1]->buf_addr = (void *)0x10000800;
687 	exp_src_mbuf[1]->buf_iova = 0x10000800;
688 	exp_src_mbuf[1]->pkt_len = exp_src_mbuf[1]->buf_len = 0x800;
689 
690 	/* 3rd looks like that the 2nd would have */
691 	exp_src_mbuf[2]->buf_addr = (void *)0x10001000;
692 	exp_src_mbuf[2]->buf_iova = 0x10001000;
693 	exp_src_mbuf[2]->pkt_len = exp_src_mbuf[2]->buf_len = 0x1000;
694 
695 	/* a new 4th looks like what the 3rd would have */
696 	exp_src_mbuf[3]->buf_addr = (void *)0x10002000;
697 	exp_src_mbuf[3]->buf_iova = 0x10002000;
698 	exp_src_mbuf[3]->pkt_len = exp_src_mbuf[3]->buf_len = 0x1000;
699 
700 	task.cb_fn = _compress_done;
701 	task.op_code = SPDK_ACCEL_OPC_COMPRESS;
702 	task.output_size = &output_size;
703 	task.d.iovs = dst_iovs;
704 	task.d.iovcnt = dst_iovcnt;
705 	task.s.iovs = src_iovs;
706 	task.s.iovcnt = src_iovcnt;
707 
708 	rc = _compress_operation(g_comp_ch, &task);
709 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true);
710 	CU_ASSERT(rc == 0);
711 
712 	/* Now force the 2nd IOV to get partial length from spdk_vtophys */
713 	g_small_size_counter = 0;
714 	g_small_size_modify = 2;
715 	g_small_size = 0x800;
716 
717 	/* first is normal */
718 	exp_src_mbuf[0]->buf_addr = (void *)0x10000000;
719 	exp_src_mbuf[0]->buf_iova = 0x10000000;
720 	exp_src_mbuf[0]->pkt_len = exp_src_mbuf[0]->buf_len = 0x1000;
721 
722 	/* second only has shorter length */
723 	exp_src_mbuf[1]->buf_addr = (void *)0x10001000;
724 	exp_src_mbuf[1]->buf_iova = 0x10001000;
725 	exp_src_mbuf[1]->pkt_len = exp_src_mbuf[1]->buf_len = 0x800;
726 
727 	/* 3rd was inserted by the boundary crossing condition and finishes off
728 	 * the length from the first */
729 	exp_src_mbuf[2]->buf_addr = (void *)0x10001800;
730 	exp_src_mbuf[2]->buf_iova = 0x10001800;
731 	exp_src_mbuf[2]->pkt_len = exp_src_mbuf[2]->buf_len = 0x800;
732 
733 	/* a new 4th looks like what the 3rd would have */
734 	exp_src_mbuf[3]->buf_addr = (void *)0x10002000;
735 	exp_src_mbuf[3]->buf_iova = 0x10002000;
736 	exp_src_mbuf[3]->pkt_len = exp_src_mbuf[3]->buf_len = 0x1000;
737 
738 	rc = _compress_operation(g_comp_ch, &task);
739 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true);
740 	CU_ASSERT(rc == 0);
741 
742 	/* Finally force the 3rd IOV to get partial length from spdk_vtophys */
743 	g_small_size_counter = 0;
744 	g_small_size_modify = 3;
745 	g_small_size = 0x800;
746 
747 	/* first is normal */
748 	exp_src_mbuf[0]->buf_addr = (void *)0x10000000;
749 	exp_src_mbuf[0]->buf_iova = 0x10000000;
750 	exp_src_mbuf[0]->pkt_len = exp_src_mbuf[0]->buf_len = 0x1000;
751 
752 	/* second is normal */
753 	exp_src_mbuf[1]->buf_addr = (void *)0x10001000;
754 	exp_src_mbuf[1]->buf_iova = 0x10001000;
755 	exp_src_mbuf[1]->pkt_len = exp_src_mbuf[1]->buf_len = 0x1000;
756 
757 	/* 3rd has shorter length */
758 	exp_src_mbuf[2]->buf_addr = (void *)0x10002000;
759 	exp_src_mbuf[2]->buf_iova = 0x10002000;
760 	exp_src_mbuf[2]->pkt_len = exp_src_mbuf[2]->buf_len = 0x800;
761 
762 	/* a new 4th handles the remainder from the 3rd */
763 	exp_src_mbuf[3]->buf_addr = (void *)0x10002800;
764 	exp_src_mbuf[3]->buf_iova = 0x10002800;
765 	exp_src_mbuf[3]->pkt_len = exp_src_mbuf[3]->buf_len = 0x800;
766 
767 	rc = _compress_operation(g_comp_ch, &task);
768 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true);
769 	CU_ASSERT(rc == 0);
770 
771 	/* Single input iov is split on page boundary, sgl_in is not supported */
772 	g_device.sgl_in = false;
773 	g_small_size_counter = 0;
774 	g_small_size_modify = 1;
775 	g_small_size = 0x800;
776 	rc = _compress_operation(g_comp_ch, &task);
777 	CU_ASSERT(rc == -EINVAL);
778 	g_device.sgl_in = true;
779 
780 	/* Single output iov is split on page boundary, sgl_out is not supported */
781 	g_device.sgl_out = false;
782 	g_small_size_counter = 0;
783 	g_small_size_modify = 2;
784 	g_small_size = 0x800;
785 	rc = _compress_operation(g_comp_ch, &task);
786 	CU_ASSERT(rc == -EINVAL);
787 	g_device.sgl_out = true;
788 }
789 
790 static void
791 test_setup_compress_mbuf(void)
792 {
793 	struct iovec src_iovs = {};
794 	int src_iovcnt = 1;
795 	struct spdk_accel_task task = {};
796 	int src_mbuf_added = 0;
797 	uint64_t total_length;
798 	struct rte_mbuf *exp_src_mbuf[UT_MBUFS_PER_OP_BOUND_TEST];
799 	int rc, i;
800 
801 	/* setup the src expected values */
802 	_get_mbuf_array(exp_src_mbuf, &g_expected_src_mbufs[0], SPDK_COUNTOF(exp_src_mbuf), false);
803 
804 	/* no splitting */
805 	total_length = 0;
806 	ut_total_rte_pktmbuf_attach_extbuf = 0;
807 	src_iovs.iov_len = 0x1000;
808 	src_iovs.iov_base = (void *)0x10000000 + 0x1000;
809 	rc = _setup_compress_mbuf(exp_src_mbuf, &src_mbuf_added, &total_length,
810 				  &src_iovs, src_iovcnt, &task);
811 	CU_ASSERT(rc == 0);
812 	CU_ASSERT(total_length = src_iovs.iov_len);
813 	CU_ASSERT(src_mbuf_added == 0);
814 	CU_ASSERT(ut_total_rte_pktmbuf_attach_extbuf == 1);
815 
816 	/* one split, for splitting tests we need the global mbuf array unlinked,
817 	 * otherwise the functional code will attempt to link them but if they are
818 	 * already linked, it will just create a chain that links to itself */
819 	for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST - 1; i++) {
820 		g_expected_src_mbufs[i].next = NULL;
821 	}
822 	total_length = 0;
823 	ut_total_rte_pktmbuf_attach_extbuf = 0;
824 	src_iovs.iov_len = 0x1000 + MBUF_SPLIT;
825 	exp_src_mbuf[0]->buf_len = src_iovs.iov_len;
826 	exp_src_mbuf[0]->pkt_len = src_iovs.iov_len;
827 	rc = _setup_compress_mbuf(exp_src_mbuf, &src_mbuf_added, &total_length,
828 				  &src_iovs, src_iovcnt, &task);
829 	CU_ASSERT(rc == 0);
830 	CU_ASSERT(total_length = src_iovs.iov_len);
831 	CU_ASSERT(src_mbuf_added == 0);
832 	CU_ASSERT(ut_total_rte_pktmbuf_attach_extbuf == 2);
833 
834 	/* two splits */
835 	for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST - 1; i++) {
836 		g_expected_src_mbufs[i].next = NULL;
837 	}
838 	total_length = 0;
839 	ut_total_rte_pktmbuf_attach_extbuf = 0;
840 	src_iovs.iov_len = 0x1000 + 2 * MBUF_SPLIT;
841 	exp_src_mbuf[0]->buf_len = src_iovs.iov_len;
842 	exp_src_mbuf[0]->pkt_len = src_iovs.iov_len;
843 
844 	rc = _setup_compress_mbuf(exp_src_mbuf, &src_mbuf_added, &total_length,
845 				  &src_iovs, src_iovcnt, &task);
846 	CU_ASSERT(rc == 0);
847 	CU_ASSERT(total_length = src_iovs.iov_len);
848 	CU_ASSERT(src_mbuf_added == 0);
849 	CU_ASSERT(ut_total_rte_pktmbuf_attach_extbuf == 3);
850 
851 	/* relink the global mbuf array */
852 	for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST - 1; i++) {
853 		g_expected_src_mbufs[i].next = &g_expected_src_mbufs[i + 1];
854 	}
855 }
856 
857 static void
858 test_poller(void)
859 {
860 	int rc;
861 	struct compress_io_channel *args;
862 	struct rte_mbuf mbuf[4]; /* one src, one dst, 2 ops */
863 	struct iovec src_iovs[3] = {};
864 	struct iovec dst_iovs[3] = {};
865 	uint32_t output_size[2];
866 	struct spdk_accel_task task[2] = {};
867 	struct spdk_accel_task *task_to_resubmit;
868 	struct rte_mbuf *exp_src_mbuf[UT_MBUFS_PER_OP];
869 	struct rte_mbuf *exp_dst_mbuf[UT_MBUFS_PER_OP];
870 	int i;
871 
872 	args = calloc(1, sizeof(*args));
873 	SPDK_CU_ASSERT_FATAL(args != NULL);
874 	memset(&g_comp_op[0], 0, sizeof(struct rte_comp_op));
875 	g_comp_op[0].m_src = &mbuf[0];
876 	g_comp_op[1].m_src = &mbuf[1];
877 	g_comp_op[0].m_dst = &mbuf[2];
878 	g_comp_op[1].m_dst = &mbuf[3];
879 	for (i = 0; i < 3; i++) {
880 		src_iovs[i].iov_len = 0x1000;
881 		dst_iovs[i].iov_len = 0x1000;
882 		src_iovs[i].iov_base = (void *)0x10000000 + 0x1000 * i;
883 		dst_iovs[i].iov_base = (void *)0x20000000 + 0x1000 * i;
884 	}
885 	task[0].cb_fn = task[1].cb_fn = _compress_done;
886 	task[0].output_size = &output_size[0];
887 	task[1].output_size = &output_size[1];
888 
889 	/* Error from dequeue, nothing needing to be resubmitted.
890 	 */
891 	ut_rte_compressdev_dequeue_burst = 1;
892 	ut_expected_task_status = -EIO;
893 	/* setup what we want dequeue to return for the op */
894 	*RTE_MBUF_DYNFIELD(g_comp_op[0].m_src, g_mbuf_offset, uint64_t *) = (uint64_t)&task[0];
895 	g_comp_op[0].produced = 1;
896 	g_done_count = 0;
897 	g_comp_op[0].status = RTE_COMP_OP_STATUS_NOT_PROCESSED;
898 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true);
899 	rc = comp_dev_poller((void *)g_comp_ch);
900 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true);
901 	CU_ASSERT(rc == SPDK_POLLER_BUSY);
902 	ut_expected_task_status = 0;
903 
904 	/* Success from dequeue, 2 ops. nothing needing to be resubmitted.
905 	 */
906 	ut_rte_compressdev_dequeue_burst = 2;
907 	/* setup what we want dequeue to return for the op */
908 	*RTE_MBUF_DYNFIELD(g_comp_op[0].m_src, g_mbuf_offset, uint64_t *) = (uint64_t)&task[0];
909 	g_comp_op[0].produced = 16;
910 	g_comp_op[0].status = RTE_COMP_OP_STATUS_SUCCESS;
911 	*RTE_MBUF_DYNFIELD(g_comp_op[1].m_src, g_mbuf_offset, uint64_t *) = (uint64_t)&task[1];
912 	g_comp_op[1].produced = 32;
913 	g_comp_op[1].status = RTE_COMP_OP_STATUS_SUCCESS;
914 	g_done_count = 0;
915 	ut_enqueue_value = FAKE_ENQUEUE_SUCCESS;
916 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true);
917 	rc = comp_dev_poller((void *)g_comp_ch);
918 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true);
919 	CU_ASSERT(rc == SPDK_POLLER_BUSY);
920 
921 	/* One to dequeue, one op to be resubmitted. */
922 	ut_rte_compressdev_dequeue_burst = 1;
923 	/* setup what we want dequeue to return for the op */
924 	*RTE_MBUF_DYNFIELD(g_comp_op[0].m_src, g_mbuf_offset, uint64_t *) = (uint64_t)&task[0];
925 	g_comp_op[0].produced = 16;
926 	g_comp_op[0].status = 0;
927 	g_done_count = 0;
928 	task_to_resubmit = calloc(1, sizeof(struct spdk_accel_task));
929 	SPDK_CU_ASSERT_FATAL(task_to_resubmit != NULL);
930 	task_to_resubmit->s.iovs = &src_iovs[0];
931 	task_to_resubmit->s.iovcnt = 3;
932 	task_to_resubmit->d.iovs = &dst_iovs[0];
933 	task_to_resubmit->d.iovcnt = 3;
934 	task_to_resubmit->op_code = SPDK_ACCEL_OPC_COMPRESS;
935 	task_to_resubmit->cb_arg = args;
936 	ut_enqueue_value = FAKE_ENQUEUE_SUCCESS;
937 	ut_expected_op.private_xform = &g_decomp_xform;
938 	ut_expected_op.src.offset = 0;
939 	ut_expected_op.src.length = src_iovs[0].iov_len + src_iovs[1].iov_len + src_iovs[2].iov_len;
940 
941 	/* setup the src expected values */
942 	_get_mbuf_array(exp_src_mbuf, &g_expected_src_mbufs[0], SPDK_COUNTOF(exp_src_mbuf), false);
943 	ut_expected_op.m_src = exp_src_mbuf[0];
944 
945 	for (i = 0; i < UT_MBUFS_PER_OP; i++) {
946 		*RTE_MBUF_DYNFIELD(exp_src_mbuf[i], g_mbuf_offset, uint64_t *) = (uint64_t)&task[0];
947 		exp_src_mbuf[i]->buf_addr = src_iovs[i].iov_base;
948 		exp_src_mbuf[i]->buf_iova = spdk_vtophys(src_iovs[i].iov_base, &src_iovs[i].iov_len);
949 		exp_src_mbuf[i]->buf_len = src_iovs[i].iov_len;
950 		exp_src_mbuf[i]->pkt_len = src_iovs[i].iov_len;
951 	}
952 
953 	/* setup the dst expected values */
954 	_get_mbuf_array(exp_dst_mbuf, &g_expected_dst_mbufs[0], SPDK_COUNTOF(exp_dst_mbuf), false);
955 	ut_expected_op.dst.offset = 0;
956 	ut_expected_op.m_dst = exp_dst_mbuf[0];
957 
958 	for (i = 0; i < UT_MBUFS_PER_OP; i++) {
959 		exp_dst_mbuf[i]->buf_addr = dst_iovs[i].iov_base;
960 		exp_dst_mbuf[i]->buf_iova = spdk_vtophys(dst_iovs[i].iov_base, &dst_iovs[i].iov_len);
961 		exp_dst_mbuf[i]->buf_len = dst_iovs[i].iov_len;
962 		exp_dst_mbuf[i]->pkt_len = dst_iovs[i].iov_len;
963 	}
964 	MOCK_SET(rte_comp_op_alloc, &g_comp_op[0]);
965 	STAILQ_INSERT_TAIL(&g_comp_ch->queued_tasks,
966 			   task_to_resubmit,
967 			   link);
968 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == false);
969 	rc = comp_dev_poller((void *)g_comp_ch);
970 	CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true);
971 	CU_ASSERT(rc == SPDK_POLLER_BUSY);
972 
973 	free(task_to_resubmit);
974 	free(args);
975 }
976 
977 static void
978 test_initdrivers(void)
979 {
980 	int rc;
981 
982 	/* compressdev count 0 */
983 	rc = accel_init_compress_drivers();
984 	CU_ASSERT(rc == -ENODEV);
985 
986 	/* bogus count */
987 	ut_rte_compressdev_count = RTE_COMPRESS_MAX_DEVS + 1;
988 	rc = accel_init_compress_drivers();
989 	CU_ASSERT(rc == -EINVAL);
990 
991 	/* failure with rte_mbuf_dynfield_register */
992 	ut_rte_compressdev_count = 1;
993 	MOCK_SET(rte_mbuf_dynfield_register, -1);
994 	rc = accel_init_compress_drivers();
995 	CU_ASSERT(rc == -EINVAL);
996 	MOCK_SET(rte_mbuf_dynfield_register, DPDK_DYNFIELD_OFFSET);
997 
998 	/* error on create_compress_dev() */
999 	ut_rte_comp_op_pool_create = (struct rte_mempool *)0xDEADBEEF;
1000 	ut_rte_compressdev_count = 1;
1001 	ut_rte_compressdev_configure = -1;
1002 	rc = accel_init_compress_drivers();
1003 	CU_ASSERT(rc == -1);
1004 
1005 	/* error on create_compress_dev() but coverage for large num queues */
1006 	ut_max_nb_queue_pairs = 99;
1007 	rc = accel_init_compress_drivers();
1008 	CU_ASSERT(rc == -1);
1009 
1010 	/* qpair setup fails */
1011 	ut_rte_compressdev_configure = 0;
1012 	ut_max_nb_queue_pairs = 0;
1013 	ut_rte_compressdev_queue_pair_setup = -1;
1014 	rc = accel_init_compress_drivers();
1015 	CU_ASSERT(rc == -EINVAL);
1016 
1017 	/* rte_compressdev_start fails */
1018 	ut_rte_compressdev_queue_pair_setup = 0;
1019 	ut_rte_compressdev_start = -1;
1020 	rc = accel_init_compress_drivers();
1021 	CU_ASSERT(rc == -1);
1022 
1023 	/* rte_compressdev_private_xform_create() fails */
1024 	ut_rte_compressdev_start = 0;
1025 	ut_rte_compressdev_private_xform_create = -2;
1026 	rc = accel_init_compress_drivers();
1027 	CU_ASSERT(rc == -2);
1028 
1029 	/* success */
1030 	ut_rte_compressdev_private_xform_create = 0;
1031 	rc = accel_init_compress_drivers();
1032 	CU_ASSERT(rc == 0);
1033 }
1034 
1035 int
1036 main(int argc, char **argv)
1037 {
1038 	CU_pSuite	suite = NULL;
1039 	unsigned int	num_failures;
1040 
1041 	CU_initialize_registry();
1042 
1043 	suite = CU_add_suite("compress", test_setup, test_cleanup);
1044 	CU_ADD_TEST(suite, test_compress_operation);
1045 	CU_ADD_TEST(suite, test_compress_operation_cross_boundary);
1046 	CU_ADD_TEST(suite, test_setup_compress_mbuf);
1047 	CU_ADD_TEST(suite, test_initdrivers);
1048 	CU_ADD_TEST(suite, test_poller);
1049 
1050 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
1051 	CU_cleanup_registry();
1052 	return num_failures;
1053 }
1054