xref: /spdk/test/unit/lib/accel/dpdk_cryptodev.c/accel_dpdk_cryptodev_ut.c (revision aaba5d9c9e8fca9925d5812030ff3ec9ba869fa3)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 #include "spdk_cunit.h"
8 
9 #include "spdk_internal/mock.h"
10 #include "thread/thread_internal.h"
11 #include "unit/lib/json_mock.c"
12 #include "common/lib/ut_multithread.c"
13 
14 #include <rte_crypto.h>
15 #include <rte_cryptodev.h>
16 #include <rte_version.h>
17 
18 #define MAX_TEST_BLOCKS 8192
19 struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS];
20 struct rte_crypto_op *g_test_dev_full_ops[MAX_TEST_BLOCKS];
21 
22 uint16_t g_dequeue_mock;
23 uint16_t g_enqueue_mock;
24 unsigned ut_rte_crypto_op_bulk_alloc;
25 int ut_rte_crypto_op_attach_sym_session = 0;
26 #define MOCK_INFO_GET_1QP_AESNI 0
27 #define MOCK_INFO_GET_1QP_QAT 1
28 #define MOCK_INFO_GET_1QP_MLX5 2
29 #define MOCK_INFO_GET_1QP_BOGUS_PMD 3
30 int ut_rte_cryptodev_info_get = 0;
31 bool ut_rte_cryptodev_info_get_mocked = false;
32 
33 void mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt);
34 #define rte_pktmbuf_free_bulk mock_rte_pktmbuf_free_bulk
35 void
36 mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt)
37 {
38 	spdk_mempool_put_bulk((struct spdk_mempool *)m[0]->pool, (void **)m, cnt);
39 }
40 
41 void mock_rte_pktmbuf_free(struct rte_mbuf *m);
42 #define rte_pktmbuf_free mock_rte_pktmbuf_free
43 void
44 mock_rte_pktmbuf_free(struct rte_mbuf *m)
45 {
46 	spdk_mempool_put((struct spdk_mempool *)m->pool, (void *)m);
47 }
48 
49 void
50 rte_mempool_free(struct rte_mempool *mp)
51 {
52 	spdk_mempool_free((struct spdk_mempool *)mp);
53 }
54 
55 int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
56 				unsigned count);
57 #define rte_pktmbuf_alloc_bulk mock_rte_pktmbuf_alloc_bulk
58 int
59 mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
60 			    unsigned count)
61 {
62 	int rc;
63 
64 	rc = spdk_mempool_get_bulk((struct spdk_mempool *)pool, (void **)mbufs, count);
65 	if (rc) {
66 		return rc;
67 	}
68 	for (unsigned i = 0; i < count; i++) {
69 		rte_pktmbuf_reset(mbufs[i]);
70 		mbufs[i]->pool = pool;
71 	}
72 	return rc;
73 }
74 
75 struct rte_mempool *
76 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
77 				      uint32_t elt_size, uint32_t cache_size,
78 				      uint16_t priv_size, int socket_id)
79 {
80 	struct spdk_mempool *tmp;
81 
82 	tmp = spdk_mempool_create(name, nb_elts, elt_size + priv_size,
83 				  cache_size, socket_id);
84 
85 	return (struct rte_mempool *)tmp;
86 
87 }
88 
89 struct rte_mempool *
90 rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size,
91 			uint16_t priv_size, uint16_t data_room_size, int socket_id)
92 {
93 	struct spdk_mempool *tmp;
94 
95 	tmp = spdk_mempool_create(name, n, sizeof(struct rte_mbuf) + priv_size,
96 				  cache_size, socket_id);
97 
98 	return (struct rte_mempool *)tmp;
99 }
100 
101 struct rte_mempool *
102 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
103 		   unsigned cache_size, unsigned private_data_size,
104 		   rte_mempool_ctor_t *mp_init, void *mp_init_arg,
105 		   rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
106 		   int socket_id, unsigned flags)
107 {
108 	struct spdk_mempool *tmp;
109 
110 	tmp = spdk_mempool_create(name, n, elt_size + private_data_size,
111 				  cache_size, socket_id);
112 
113 	return (struct rte_mempool *)tmp;
114 }
115 
116 DEFINE_RETURN_MOCK(rte_crypto_op_pool_create, struct rte_mempool *);
117 struct rte_mempool *
118 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
119 			  unsigned nb_elts, unsigned cache_size,
120 			  uint16_t priv_size, int socket_id)
121 {
122 	struct spdk_mempool *tmp;
123 
124 	HANDLE_RETURN_MOCK(rte_crypto_op_pool_create);
125 
126 	tmp = spdk_mempool_create(name, nb_elts,
127 				  sizeof(struct rte_crypto_op) + priv_size,
128 				  cache_size, socket_id);
129 
130 	return (struct rte_mempool *)tmp;
131 
132 }
133 
134 /* Those functions are defined as static inline in DPDK, so we can't
135  * mock them straight away. We use defines to redirect them into
136  * our custom functions.
137  */
138 static bool g_resubmit_test = false;
139 #define rte_cryptodev_enqueue_burst mock_rte_cryptodev_enqueue_burst
140 static inline uint16_t
141 mock_rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
142 				 struct rte_crypto_op **ops, uint16_t nb_ops)
143 {
144 	int i;
145 
146 	CU_ASSERT(nb_ops > 0);
147 
148 	for (i = 0; i < nb_ops; i++) {
149 		/* Use this empty (til now) array of pointers to store
150 		 * enqueued operations for assertion in dev_full test.
151 		 */
152 		g_test_dev_full_ops[i] = *ops++;
153 		if (g_resubmit_test == true) {
154 			CU_ASSERT(g_test_dev_full_ops[i] == (void *)0xDEADBEEF);
155 		}
156 	}
157 
158 	return g_enqueue_mock;
159 }
160 
161 #define rte_cryptodev_dequeue_burst mock_rte_cryptodev_dequeue_burst
162 static inline uint16_t
163 mock_rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
164 				 struct rte_crypto_op **ops, uint16_t nb_ops)
165 {
166 	int i;
167 
168 	CU_ASSERT(nb_ops > 0);
169 
170 	for (i = 0; i < g_dequeue_mock; i++) {
171 		*ops++ = g_test_crypto_ops[i];
172 	}
173 
174 	return g_dequeue_mock;
175 }
176 
177 /* Instead of allocating real memory, assign the allocations to our
178  * test array for assertion in tests.
179  */
180 #define rte_crypto_op_bulk_alloc mock_rte_crypto_op_bulk_alloc
181 static inline unsigned
182 mock_rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
183 			      enum rte_crypto_op_type type,
184 			      struct rte_crypto_op **ops, uint16_t nb_ops)
185 {
186 	int i;
187 
188 	for (i = 0; i < nb_ops; i++) {
189 		*ops++ = g_test_crypto_ops[i];
190 	}
191 	return ut_rte_crypto_op_bulk_alloc;
192 }
193 
194 #define rte_mempool_put_bulk mock_rte_mempool_put_bulk
195 static __rte_always_inline void
196 mock_rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table,
197 			  unsigned int n)
198 {
199 	return;
200 }
201 #define rte_crypto_op_attach_sym_session mock_rte_crypto_op_attach_sym_session
202 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
203 static inline int
204 mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op, void *sess)
205 #else
206 static inline int
207 mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
208 				      struct rte_cryptodev_sym_session *sess)
209 #endif
210 {
211 	return ut_rte_crypto_op_attach_sym_session;
212 }
213 
214 #define rte_lcore_count mock_rte_lcore_count
215 static inline unsigned
216 mock_rte_lcore_count(void)
217 {
218 	return 1;
219 }
220 
221 #include "accel/dpdk_cryptodev/accel_dpdk_cryptodev.c"
222 
223 /* accel stubs */
224 DEFINE_STUB_V(spdk_accel_task_complete, (struct spdk_accel_task *task, int status));
225 DEFINE_STUB_V(spdk_accel_module_finish, (void));
226 DEFINE_STUB_V(spdk_accel_module_list_add, (struct spdk_accel_module_if *accel_module));
227 
228 /* DPDK stubs */
229 #define DPDK_DYNFIELD_OFFSET offsetof(struct rte_mbuf, dynfield1[1])
230 DEFINE_STUB(rte_mbuf_dynfield_register, int, (const struct rte_mbuf_dynfield *params),
231 	    DPDK_DYNFIELD_OFFSET);
232 DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0);
233 DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
234 DEFINE_STUB(rte_cryptodev_device_count_by_driver, uint8_t, (uint8_t driver_id), 0);
235 DEFINE_STUB(rte_cryptodev_configure, int, (uint8_t dev_id, struct rte_cryptodev_config *config), 0);
236 DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
237 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id), 0);
238 DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0);
239 DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id));
240 DEFINE_STUB(rte_cryptodev_close, int, (uint8_t dev_id), 0);
241 DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
242 DEFINE_STUB(rte_vdev_uninit, int, (const char *name), 0);
243 
244 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
245 DEFINE_STUB(rte_cryptodev_sym_session_create, void *,
246 	    (uint8_t dev_id, struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), (void *)1);
247 DEFINE_STUB(rte_cryptodev_sym_session_free, int, (uint8_t dev_id, void *sess), 0);
248 #else
249 DEFINE_STUB(rte_cryptodev_sym_session_create, struct rte_cryptodev_sym_session *,
250 	    (struct rte_mempool *mempool), (void *)1);
251 DEFINE_STUB(rte_cryptodev_sym_session_init, int, (uint8_t dev_id,
252 		struct rte_cryptodev_sym_session *sess,
253 		struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), 0);
254 DEFINE_STUB(rte_cryptodev_sym_session_free, int, (struct rte_cryptodev_sym_session *sess), 0);
255 #endif
256 
257 struct rte_cryptodev *rte_cryptodevs;
258 
259 /* global vars and setup/cleanup functions used for all test functions */
260 struct spdk_io_channel *g_io_ch;
261 struct accel_dpdk_cryptodev_io_channel *g_crypto_ch;
262 struct accel_dpdk_cryptodev_device g_aesni_crypto_dev;
263 struct accel_dpdk_cryptodev_qp g_aesni_qp;
264 struct accel_dpdk_cryptodev_key_handle g_key_handle;
265 struct accel_dpdk_cryptodev_key_priv g_key_priv;
266 struct spdk_accel_crypto_key g_key;
267 
268 void
269 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
270 {
271 	dev_info->max_nb_queue_pairs = 1;
272 	if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_AESNI) {
273 		dev_info->driver_name = g_driver_names[0];
274 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_QAT) {
275 		dev_info->driver_name = g_driver_names[1];
276 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_MLX5) {
277 		dev_info->driver_name = g_driver_names[2];
278 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_BOGUS_PMD) {
279 		dev_info->driver_name = "junk";
280 	}
281 }
282 
283 unsigned int
284 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
285 {
286 	return (unsigned int)dev_id;
287 }
288 
289 /* Global setup for all tests that share a bunch of preparation... */
290 static int
291 test_setup(void)
292 {
293 	int i, rc;
294 
295 	/* Prepare essential variables for test routines */
296 	g_io_ch = calloc(1, sizeof(*g_io_ch) + sizeof(struct accel_dpdk_cryptodev_io_channel));
297 	g_crypto_ch = (struct accel_dpdk_cryptodev_io_channel *)spdk_io_channel_get_ctx(g_io_ch);
298 	TAILQ_INIT(&g_crypto_ch->queued_tasks);
299 
300 	g_aesni_crypto_dev.type = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB;
301 	g_aesni_crypto_dev.qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS;
302 	TAILQ_INIT(&g_aesni_crypto_dev.qpairs);
303 
304 	g_aesni_qp.device = &g_aesni_crypto_dev;
305 	g_crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] = &g_aesni_qp;
306 
307 	g_key_handle.device = &g_aesni_crypto_dev;
308 	g_key_priv.driver = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB;
309 	g_key_priv.cipher = ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC;
310 	TAILQ_INIT(&g_key_priv.dev_keys);
311 	TAILQ_INSERT_TAIL(&g_key_priv.dev_keys, &g_key_handle, link);
312 	g_key.priv = &g_key_priv;
313 	g_key.module_if = &g_accel_dpdk_cryptodev_module;
314 
315 
316 	/* Allocate a real mbuf pool so we can test error paths */
317 	g_mbuf_mp = rte_pktmbuf_pool_create("mbuf_mp", ACCEL_DPDK_CRYPTODEV_NUM_MBUFS,
318 					    (unsigned)SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
319 					    0, 0, SPDK_ENV_SOCKET_ID_ANY);
320 	/* Instead of allocating real rte mempools for these, it's easier and provides the
321 	 * same coverage just calloc them here.
322 	 */
323 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
324 		size_t size = ACCEL_DPDK_CRYPTODEV_IV_OFFSET + ACCEL_DPDK_CRYPTODEV_IV_LENGTH;
325 		rc = posix_memalign((void **)&g_test_crypto_ops[i], 64, size);
326 		if (rc != 0) {
327 			assert(false);
328 		}
329 		memset(g_test_crypto_ops[i], 0, ACCEL_DPDK_CRYPTODEV_IV_OFFSET);
330 	}
331 	g_mbuf_offset = DPDK_DYNFIELD_OFFSET;
332 
333 	return 0;
334 }
335 
336 /* Global teardown for all tests */
337 static int
338 test_cleanup(void)
339 {
340 	int i;
341 
342 	if (g_crypto_op_mp) {
343 		rte_mempool_free(g_crypto_op_mp);
344 		g_crypto_op_mp = NULL;
345 	}
346 	if (g_mbuf_mp) {
347 		rte_mempool_free(g_mbuf_mp);
348 		g_mbuf_mp = NULL;
349 	}
350 	if (g_session_mp) {
351 		rte_mempool_free(g_session_mp);
352 		g_session_mp = NULL;
353 	}
354 	if (g_session_mp_priv != NULL) {
355 		/* g_session_mp_priv may or may not be set depending on the DPDK version */
356 		rte_mempool_free(g_session_mp_priv);
357 		g_session_mp_priv = NULL;
358 	}
359 
360 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
361 		free(g_test_crypto_ops[i]);
362 	}
363 	free(g_io_ch);
364 	return 0;
365 }
366 
367 static void
368 test_error_paths(void)
369 {
370 	/* Single element block size encrypt, just to test error paths
371 	 * in accel_dpdk_cryptodev_submit_tasks() */
372 	struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = 512 };
373 	struct iovec dst_iov = src_iov;
374 	struct accel_dpdk_cryptodev_task task = {};
375 	struct accel_dpdk_cryptodev_key_priv key_priv = {};
376 	struct spdk_accel_crypto_key key = {};
377 	int rc;
378 
379 	task.base.op_code = ACCEL_OPC_ENCRYPT;
380 	task.base.s.iovcnt = 1;
381 	task.base.s.iovs = &src_iov;
382 	task.base.d.iovcnt = 1;
383 	task.base.d.iovs = &dst_iov;
384 	task.base.block_size = 512;
385 	task.base.crypto_key = &g_key;
386 	task.base.iv = 1;
387 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
388 
389 	/* case 1 - no crypto key */
390 	task.base.crypto_key = NULL;
391 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
392 	CU_ASSERT(rc == -EINVAL);
393 	task.base.crypto_key = &g_key;
394 
395 	/* case 2 - crypto key with wrong module_if  */
396 	key_priv.driver = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB;
397 	key_priv.cipher = ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC;
398 	TAILQ_INIT(&key_priv.dev_keys);
399 	key.priv = &key_priv;
400 	key.module_if = (struct spdk_accel_module_if *) 0x1;
401 	task.base.crypto_key = &key;
402 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
403 	CU_ASSERT(rc == -EINVAL);
404 	key.module_if = &g_accel_dpdk_cryptodev_module;
405 
406 	/* case 3 - no key handle in the channel */
407 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
408 	CU_ASSERT(rc == -EINVAL);
409 	task.base.crypto_key = &g_key;
410 
411 	/* case 4 - invalid op */
412 	task.base.op_code = ACCEL_OPC_COMPARE;
413 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
414 	CU_ASSERT(rc == -EINVAL);
415 	task.base.op_code = ACCEL_OPC_ENCRYPT;
416 
417 	/* case 5 - no entries in g_mbuf_mp */
418 	MOCK_SET(spdk_mempool_get, NULL);
419 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_tasks) == true);
420 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
421 	CU_ASSERT(rc == 0);
422 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_tasks) == false);
423 	CU_ASSERT(TAILQ_FIRST(&g_crypto_ch->queued_tasks) == &task);
424 	MOCK_CLEAR(spdk_mempool_get);
425 	TAILQ_INIT(&g_crypto_ch->queued_tasks);
426 
427 	/* case 6 - vtophys error in accel_dpdk_cryptodev_mbuf_attach_buf */
428 	MOCK_SET(spdk_vtophys, SPDK_VTOPHYS_ERROR);
429 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
430 	CU_ASSERT(rc == -EFAULT);
431 	MOCK_CLEAR(spdk_vtophys);
432 }
433 
434 static void
435 test_simple_encrypt(void)
436 {
437 	struct iovec src_iov[4] = {[0] = {.iov_base = (void *)0xDEADBEEF, .iov_len = 512 }};
438 	struct iovec dst_iov = src_iov[0];
439 	struct accel_dpdk_cryptodev_task task = {};
440 	struct rte_mbuf *mbuf, *next;
441 	int rc, i;
442 
443 	task.base.op_code = ACCEL_OPC_ENCRYPT;
444 	task.base.s.iovcnt = 1;
445 	task.base.s.iovs = src_iov;
446 	task.base.d.iovcnt = 1;
447 	task.base.d.iovs = &dst_iov;
448 	task.base.block_size = 512;
449 	task.base.crypto_key = &g_key;
450 	task.base.iv = 1;
451 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
452 
453 	/* Inplace encryption */
454 	g_aesni_qp.num_enqueued_ops = 0;
455 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
456 	CU_ASSERT(rc == 0);
457 	CU_ASSERT(task.cryop_submitted == 1);
458 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base);
459 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len);
460 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
461 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
462 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
463 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
464 				     uint64_t *) == (uint64_t)&task);
465 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
466 
467 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
468 
469 	/* out-of-place encryption */
470 	g_aesni_qp.num_enqueued_ops = 0;
471 	task.cryop_submitted = 0;
472 	dst_iov.iov_base = (void *)0xFEEDBEEF;
473 
474 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
475 	CU_ASSERT(rc == 0);
476 	CU_ASSERT(task.cryop_submitted == 1);
477 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base);
478 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len);
479 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
480 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
481 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
482 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
483 				     uint64_t *) == (uint64_t)&task);
484 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr == dst_iov.iov_base);
485 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == dst_iov.iov_len);
486 
487 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
488 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst);
489 
490 	/* out-of-place encryption, fragmented payload */
491 	g_aesni_qp.num_enqueued_ops = 0;
492 	task.base.s.iovcnt = 4;
493 	for (i = 0; i < 4; i++) {
494 		src_iov[i].iov_base = (void *)0xDEADBEEF + i * 128;
495 		src_iov[i].iov_len = 128;
496 	}
497 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
498 	CU_ASSERT(rc == 0);
499 	CU_ASSERT(task.cryop_submitted == 1);
500 	mbuf = g_test_crypto_ops[0]->sym->m_src;
501 	SPDK_CU_ASSERT_FATAL(mbuf != NULL);
502 	CU_ASSERT(mbuf->buf_addr == src_iov[0].iov_base);
503 	CU_ASSERT(mbuf->data_len == src_iov[0].iov_len);
504 	mbuf = mbuf->next;
505 	for (i = 1; i < 4; i++) {
506 		SPDK_CU_ASSERT_FATAL(mbuf != NULL);
507 		CU_ASSERT(mbuf->buf_addr == src_iov[i].iov_base);
508 		CU_ASSERT(mbuf->data_len == src_iov[i].iov_len);
509 		next = mbuf->next;
510 		rte_pktmbuf_free(mbuf);
511 		mbuf = next;
512 	}
513 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
514 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
515 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
516 				     uint64_t *) == (uint64_t)&task);
517 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr == dst_iov.iov_base);
518 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == dst_iov.iov_len);
519 
520 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
521 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst);
522 
523 	/* Big logical block size, inplace encryption */
524 	src_iov[0].iov_len = ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4;
525 	dst_iov = src_iov[0];
526 	task.base.block_size = ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4;
527 	task.base.s.iovcnt = 1;
528 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
529 
530 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
531 	CU_ASSERT(rc == 0);
532 	CU_ASSERT(task.cryop_submitted == 1);
533 	mbuf = g_test_crypto_ops[0]->sym->m_src;
534 	SPDK_CU_ASSERT_FATAL(mbuf != NULL);
535 	CU_ASSERT(mbuf->buf_addr == src_iov[0].iov_base);
536 	CU_ASSERT(mbuf->data_len == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
537 	mbuf = mbuf->next;
538 	for (i = 1; i < 4; i++) {
539 		SPDK_CU_ASSERT_FATAL(mbuf != NULL);
540 		CU_ASSERT(mbuf->buf_addr == (char *)src_iov[0].iov_base + i * ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
541 		CU_ASSERT(mbuf->data_len == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
542 		next = mbuf->next;
543 		rte_pktmbuf_free(mbuf);
544 		mbuf = next;
545 	}
546 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4);
547 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
548 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
549 				     uint64_t *) == (uint64_t)&task);
550 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
551 
552 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
553 }
554 
555 static void
556 test_simple_decrypt(void)
557 {
558 	struct iovec src_iov[4] = {[0] = {.iov_base = (void *)0xDEADBEEF, .iov_len = 512 }};
559 	struct iovec dst_iov = src_iov[0];
560 	struct accel_dpdk_cryptodev_task task = {};
561 	struct rte_mbuf *mbuf, *next;
562 	int rc, i;
563 
564 	task.base.op_code = ACCEL_OPC_DECRYPT;
565 	task.base.s.iovcnt = 1;
566 	task.base.s.iovs = src_iov;
567 	task.base.d.iovcnt = 1;
568 	task.base.d.iovs = &dst_iov;
569 	task.base.block_size = 512;
570 	task.base.crypto_key = &g_key;
571 	task.base.iv = 1;
572 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
573 
574 	/* Inplace decryption */
575 	g_aesni_qp.num_enqueued_ops = 0;
576 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
577 	CU_ASSERT(rc == 0);
578 	CU_ASSERT(task.cryop_submitted == 1);
579 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base);
580 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len);
581 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
582 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
583 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
584 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
585 				     uint64_t *) == (uint64_t)&task);
586 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
587 
588 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
589 
590 	/* out-of-place decryption */
591 	g_aesni_qp.num_enqueued_ops = 0;
592 	task.cryop_submitted = 0;
593 	dst_iov.iov_base = (void *)0xFEEDBEEF;
594 
595 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
596 	CU_ASSERT(rc == 0);
597 	CU_ASSERT(task.cryop_submitted == 1);
598 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base);
599 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len);
600 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
601 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
602 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
603 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
604 				     uint64_t *) == (uint64_t)&task);
605 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr == dst_iov.iov_base);
606 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == dst_iov.iov_len);
607 
608 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
609 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst);
610 
611 	/* out-of-place decryption, fragmented payload */
612 	g_aesni_qp.num_enqueued_ops = 0;
613 	task.base.s.iovcnt = 4;
614 	for (i = 0; i < 4; i++) {
615 		src_iov[i].iov_base = (void *)0xDEADBEEF + i * 128;
616 		src_iov[i].iov_len = 128;
617 	}
618 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
619 	CU_ASSERT(rc == 0);
620 	CU_ASSERT(task.cryop_submitted == 1);
621 	mbuf = g_test_crypto_ops[0]->sym->m_src;
622 	SPDK_CU_ASSERT_FATAL(mbuf != NULL);
623 	CU_ASSERT(mbuf->buf_addr == src_iov[0].iov_base);
624 	CU_ASSERT(mbuf->data_len == src_iov[0].iov_len);
625 	mbuf = mbuf->next;
626 	for (i = 1; i < 4; i++) {
627 		SPDK_CU_ASSERT_FATAL(mbuf != NULL);
628 		CU_ASSERT(mbuf->buf_addr == src_iov[i].iov_base);
629 		CU_ASSERT(mbuf->data_len == src_iov[i].iov_len);
630 		next = mbuf->next;
631 		rte_pktmbuf_free(mbuf);
632 		mbuf = next;
633 	}
634 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
635 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
636 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
637 				     uint64_t *) == (uint64_t)&task);
638 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr == dst_iov.iov_base);
639 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == dst_iov.iov_len);
640 
641 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
642 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst);
643 
644 	/* Big logical block size, inplace encryption */
645 	src_iov[0].iov_len = ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4;
646 	dst_iov = src_iov[0];
647 	task.base.block_size = ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4;
648 	task.base.s.iovcnt = 1;
649 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
650 
651 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
652 	CU_ASSERT(rc == 0);
653 	CU_ASSERT(task.cryop_submitted == 1);
654 	mbuf = g_test_crypto_ops[0]->sym->m_src;
655 	SPDK_CU_ASSERT_FATAL(mbuf != NULL);
656 	CU_ASSERT(mbuf->buf_addr == src_iov[0].iov_base);
657 	CU_ASSERT(mbuf->data_len == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
658 	mbuf = mbuf->next;
659 	for (i = 1; i < 4; i++) {
660 		SPDK_CU_ASSERT_FATAL(mbuf != NULL);
661 		CU_ASSERT(mbuf->buf_addr == (char *)src_iov[0].iov_base + i * ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
662 		CU_ASSERT(mbuf->data_len == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
663 		next = mbuf->next;
664 		rte_pktmbuf_free(mbuf);
665 		mbuf = next;
666 	}
667 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4);
668 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
669 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
670 				     uint64_t *) == (uint64_t)&task);
671 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
672 
673 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
674 }
675 
676 static void
677 test_large_enc_dec(void)
678 {
679 	struct accel_dpdk_cryptodev_task task = {};
680 	uint32_t block_len = 512;
681 	uint32_t num_blocks = ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE * 2;
682 	uint32_t iov_len = num_blocks * block_len / 16;
683 	uint32_t blocks_in_iov = num_blocks / 16;
684 	uint32_t iov_idx;
685 	struct iovec src_iov[16];
686 	struct iovec dst_iov[16];
687 	uint32_t i;
688 	int rc;
689 
690 	for (i = 0; i < 16; i++) {
691 		src_iov[i].iov_base = (void *)0xDEADBEEF + i * iov_len;
692 		src_iov[i].iov_len = iov_len;
693 
694 		dst_iov[i].iov_base = (void *)0xDEADBEEF + i * iov_len;
695 		dst_iov[i].iov_len = iov_len;
696 	}
697 
698 	task.base.op_code = ACCEL_OPC_DECRYPT;
699 	task.base.s.iovcnt = 16;
700 	task.base.s.iovs = src_iov;
701 	task.base.d.iovcnt = 16;
702 	task.base.d.iovs = dst_iov;
703 	task.base.block_size = 512;
704 	task.base.crypto_key = &g_key;
705 	task.base.iv = 1;
706 
707 	/* Test 1. Multi block size decryption, multi-element, inplace */
708 	g_aesni_qp.num_enqueued_ops = 0;
709 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc =
710 			ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE;
711 
712 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
713 	CU_ASSERT(rc == 0);
714 	CU_ASSERT(task.inplace == true);
715 	CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE);
716 	CU_ASSERT(task.cryop_total == num_blocks);
717 	CU_ASSERT(task.cryop_completed == 0);
718 
719 	for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
720 		iov_idx = i / blocks_in_iov;
721 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[iov_idx].iov_base + ((
722 					i % blocks_in_iov) * block_len));
723 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
724 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
725 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
726 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
727 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
728 					     uint64_t *) == (uint64_t)&task);
729 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
730 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
731 	}
732 
733 	/* Call accel_dpdk_cryptodev_process_task like it was called by completion poller */
734 	g_aesni_qp.num_enqueued_ops = 0;
735 	task.cryop_completed = task.cryop_submitted;
736 	rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task);
737 
738 	CU_ASSERT(rc == 0);
739 	CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE * 2);
740 	CU_ASSERT(task.cryop_total == task.cryop_submitted);
741 
742 	for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
743 		iov_idx = i / blocks_in_iov + 8;
744 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[iov_idx].iov_base + ((
745 					i % blocks_in_iov) * block_len));
746 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
747 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
748 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
749 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
750 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
751 					     uint64_t *) == (uint64_t)&task);
752 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
753 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
754 	}
755 
756 	/* Test 2. Multi block size decryption, multi-element, out-of-place */
757 	g_aesni_qp.num_enqueued_ops = 0;
758 	/* Modify dst to make payload out-of-place */
759 	dst_iov[0].iov_base -= 1;
760 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc =
761 			ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE;
762 
763 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
764 	CU_ASSERT(rc == 0);
765 	CU_ASSERT(task.inplace == false);
766 	CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE);
767 	CU_ASSERT(task.cryop_total == num_blocks);
768 	CU_ASSERT(task.cryop_completed == 0);
769 
770 	for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
771 		iov_idx = i / blocks_in_iov;
772 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[iov_idx].iov_base + ((
773 					i % blocks_in_iov) * block_len));
774 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
775 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
776 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
777 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
778 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
779 					     uint64_t *) == (uint64_t)&task);
780 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr == dst_iov[iov_idx].iov_base + ((
781 					i % blocks_in_iov) * block_len));
782 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
783 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->next == NULL);
784 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
785 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
786 	}
787 
788 	/* Call accel_dpdk_cryptodev_process_task like it was called by completion poller */
789 	g_aesni_qp.num_enqueued_ops = 0;
790 	task.cryop_completed = task.cryop_submitted;
791 	rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task);
792 
793 	CU_ASSERT(rc == 0);
794 	CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE * 2);
795 	CU_ASSERT(task.cryop_total == task.cryop_submitted);
796 
797 	for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
798 		iov_idx = i / blocks_in_iov + 8;
799 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[iov_idx].iov_base + ((
800 					i % blocks_in_iov) * block_len));
801 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
802 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
803 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
804 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
805 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
806 					     uint64_t *) == (uint64_t)&task);
807 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr == dst_iov[iov_idx].iov_base + ((
808 					i % blocks_in_iov) * block_len));
809 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
810 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->next == NULL);
811 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
812 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
813 	}
814 
815 	/* Test 3. Multi block size encryption, multi-element, inplace */
816 	g_aesni_qp.num_enqueued_ops = 0;
817 	task.base.op_code = ACCEL_OPC_ENCRYPT;
818 	task.cryop_submitted = 0;
819 	/* Modify dst to make payload iplace */
820 	dst_iov[0].iov_base += 1;
821 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc =
822 			ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE;
823 
824 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
825 	CU_ASSERT(rc == 0);
826 	CU_ASSERT(task.inplace == true);
827 	CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE);
828 	CU_ASSERT(task.cryop_total == num_blocks);
829 	CU_ASSERT(task.cryop_completed == 0);
830 
831 	for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
832 		iov_idx = i / blocks_in_iov;
833 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[iov_idx].iov_base + ((
834 					i % blocks_in_iov) * block_len));
835 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
836 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
837 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
838 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
839 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
840 					     uint64_t *) == (uint64_t)&task);
841 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
842 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
843 	}
844 
845 	/* Call accel_dpdk_cryptodev_process_task like it was called by completion poller */
846 	g_aesni_qp.num_enqueued_ops = 0;
847 	task.cryop_completed = task.cryop_submitted;
848 	rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task);
849 
850 	CU_ASSERT(rc == 0);
851 	CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE * 2);
852 	CU_ASSERT(task.cryop_total == task.cryop_submitted);
853 
854 	for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
855 		iov_idx = i / blocks_in_iov + 8;
856 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[iov_idx].iov_base + ((
857 					i % blocks_in_iov) * block_len));
858 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
859 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
860 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
861 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
862 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
863 					     uint64_t *) == (uint64_t)&task);
864 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
865 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
866 	}
867 
868 	/* Multi block size encryption, multi-element, out-of-place */
869 	g_aesni_qp.num_enqueued_ops = 0;
870 	task.cryop_submitted = 0;
871 	/* Modify dst to make payload out-of-place */
872 	dst_iov[0].iov_base -= 1;
873 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc =
874 			ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE;
875 
876 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
877 	CU_ASSERT(task.inplace == false);
878 	CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE);
879 	CU_ASSERT(task.cryop_total == num_blocks);
880 	CU_ASSERT(task.cryop_completed == 0);
881 
882 	for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
883 		iov_idx = i / blocks_in_iov;
884 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[iov_idx].iov_base + ((
885 					i % blocks_in_iov) * block_len));
886 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
887 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
888 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
889 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
890 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
891 					     uint64_t *) == (uint64_t)&task);
892 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr == dst_iov[iov_idx].iov_base + ((
893 					i % blocks_in_iov) * block_len));
894 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
895 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->next == NULL);
896 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
897 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
898 	}
899 
900 	/* Call accel_dpdk_cryptodev_process_task  like it was called by completion poller */
901 	g_aesni_qp.num_enqueued_ops = 0;
902 	task.cryop_completed = task.cryop_submitted;
903 	rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task);
904 
905 	CU_ASSERT(rc == 0);
906 	CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE * 2);
907 	CU_ASSERT(task.cryop_total == task.cryop_submitted);
908 
909 	for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
910 		iov_idx = i / blocks_in_iov + 8;
911 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[iov_idx].iov_base + ((
912 					i % blocks_in_iov) * block_len));
913 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
914 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
915 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
916 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
917 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
918 					     uint64_t *) == (uint64_t)&task);
919 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr == dst_iov[iov_idx].iov_base + ((
920 					i % blocks_in_iov) * block_len));
921 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
922 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->next == NULL);
923 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
924 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
925 	}
926 }
927 
928 static void
929 test_dev_full(void)
930 {
931 	struct accel_dpdk_cryptodev_task task = {};
932 	struct rte_crypto_sym_op *sym_op;
933 	struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = 1024 };
934 	struct iovec dst_iov = src_iov;
935 	int rc;
936 
937 	task.base.op_code = ACCEL_OPC_DECRYPT;
938 	task.base.s.iovcnt = 1;
939 	task.base.s.iovs = &src_iov;
940 	task.base.d.iovcnt = 1;
941 	task.base.d.iovs = &dst_iov;
942 	task.base.block_size = 512;
943 	task.base.crypto_key = &g_key;
944 	task.base.iv = 1;
945 
946 	/* Two element block size decryption, 2nd op was not submitted */
947 	g_aesni_qp.num_enqueued_ops = 0;
948 	g_enqueue_mock = g_dequeue_mock = 1;
949 	ut_rte_crypto_op_bulk_alloc = 2;
950 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
951 
952 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
953 	CU_ASSERT(rc == 0);
954 	CU_ASSERT(task.cryop_submitted == 1);
955 	sym_op = g_test_crypto_ops[0]->sym;
956 	CU_ASSERT(sym_op->m_src->buf_addr == src_iov.iov_base);
957 	CU_ASSERT(sym_op->m_src->data_len == 512);
958 	CU_ASSERT(sym_op->m_src->next == NULL);
959 	CU_ASSERT(sym_op->cipher.data.length == 512);
960 	CU_ASSERT(sym_op->cipher.data.offset == 0);
961 	CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)&task);
962 	CU_ASSERT(sym_op->m_dst == NULL);
963 	/* op which was not submitted is already released */
964 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
965 	task.cryop_submitted = 0;
966 
967 	/* Two element block size decryption, no ops were submitted, task should be queued */
968 	g_aesni_qp.num_enqueued_ops = 0;
969 	g_enqueue_mock = g_dequeue_mock = 0;
970 	ut_rte_crypto_op_bulk_alloc = 2;
971 	g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
972 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
973 
974 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_tasks) == true);
975 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
976 	CU_ASSERT(rc == 0);
977 	CU_ASSERT(task.cryop_submitted == 0);
978 	CU_ASSERT(!TAILQ_EMPTY(&g_crypto_ch->queued_tasks));
979 	CU_ASSERT(TAILQ_FIRST(&g_crypto_ch->queued_tasks) == &task);
980 	TAILQ_INIT(&g_crypto_ch->queued_tasks);
981 
982 	/* Non-busy reason for enqueue failure, all were rejected. */
983 	g_enqueue_mock = 0;
984 	g_aesni_qp.num_enqueued_ops = 0;
985 	g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_ERROR;
986 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
987 	CU_ASSERT(rc == -EINVAL);
988 
989 	/* QP is full, task should be queued */
990 	g_aesni_qp.num_enqueued_ops = g_aesni_crypto_dev.qp_desc_nr;
991 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_tasks) == true);
992 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
993 	CU_ASSERT(rc == 0);
994 	CU_ASSERT(!TAILQ_EMPTY(&g_crypto_ch->queued_tasks));
995 	CU_ASSERT(TAILQ_FIRST(&g_crypto_ch->queued_tasks) == &task);
996 	g_aesni_qp.num_enqueued_ops = 0;
997 
998 	TAILQ_INIT(&g_crypto_ch->queued_tasks);
999 }
1000 
1001 static void
1002 test_crazy_rw(void)
1003 {
1004 	struct accel_dpdk_cryptodev_task task = {};
1005 	struct iovec src_iov[4] = {
1006 		[0] = {.iov_base = (void *)0xDEADBEEF, .iov_len = 512 },
1007 		[1] = {.iov_base = (void *)0xDEADBEEF + 512, .iov_len = 1024 },
1008 		[2] = {.iov_base = (void *)0xDEADBEEF + 512 + 1024, .iov_len = 512 }
1009 	};
1010 	struct iovec *dst_iov = src_iov;
1011 	uint32_t block_len = 512, num_blocks = 4, i;
1012 	int rc;
1013 
1014 	task.base.op_code = ACCEL_OPC_DECRYPT;
1015 	task.base.s.iovcnt = 3;
1016 	task.base.s.iovs = src_iov;
1017 	task.base.d.iovcnt = 3;
1018 	task.base.d.iovs = dst_iov;
1019 	task.base.block_size = 512;
1020 	task.base.crypto_key = &g_key;
1021 	task.base.iv = 1;
1022 
1023 	/* Multi block size read, single element, strange IOV makeup */
1024 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
1025 	g_aesni_qp.num_enqueued_ops = 0;
1026 
1027 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
1028 	CU_ASSERT(rc == 0);
1029 	CU_ASSERT(task.cryop_submitted == num_blocks);
1030 
1031 	for (i = 0; i < num_blocks; i++) {
1032 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
1033 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
1034 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
1035 					     uint64_t *) == (uint64_t)&task);
1036 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
1037 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[0].iov_base + (i * block_len));
1038 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
1039 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
1040 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
1041 	}
1042 
1043 	/* Multi block size write, single element strange IOV makeup */
1044 	num_blocks = 8;
1045 	task.base.op_code = ACCEL_OPC_ENCRYPT;
1046 	task.cryop_submitted = 0;
1047 	task.base.s.iovcnt = 4;
1048 	task.base.d.iovcnt = 4;
1049 	task.base.s.iovs[0].iov_len = 2048;
1050 	task.base.s.iovs[0].iov_base = (void *)0xDEADBEEF;
1051 	task.base.s.iovs[1].iov_len = 512;
1052 	task.base.s.iovs[1].iov_base = (void *)0xDEADBEEF + 2048;
1053 	task.base.s.iovs[2].iov_len = 512;
1054 	task.base.s.iovs[2].iov_base = (void *)0xDEADBEEF + 2048 + 512;
1055 	task.base.s.iovs[3].iov_len = 1024;
1056 	task.base.s.iovs[3].iov_base = (void *)0xDEADBEEF + 2048 + 512 + 512;
1057 
1058 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
1059 	g_aesni_qp.num_enqueued_ops = 0;
1060 
1061 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
1062 	CU_ASSERT(rc == 0);
1063 	CU_ASSERT(task.cryop_submitted == num_blocks);
1064 
1065 	for (i = 0; i < num_blocks; i++) {
1066 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
1067 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
1068 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
1069 					     uint64_t *) == (uint64_t)&task);
1070 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
1071 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[0].iov_base + (i * block_len));
1072 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
1073 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
1074 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
1075 	}
1076 }
1077 
1078 static void
1079 init_cleanup(void)
1080 {
1081 	struct accel_dpdk_cryptodev_device *dev, *tmp;
1082 
1083 	if (g_crypto_op_mp) {
1084 		rte_mempool_free(g_crypto_op_mp);
1085 		g_crypto_op_mp = NULL;
1086 	}
1087 	if (g_mbuf_mp) {
1088 		rte_mempool_free(g_mbuf_mp);
1089 		g_mbuf_mp = NULL;
1090 	}
1091 	if (g_session_mp) {
1092 		rte_mempool_free(g_session_mp);
1093 		g_session_mp = NULL;
1094 	}
1095 	if (g_session_mp_priv != NULL) {
1096 		/* g_session_mp_priv may or may not be set depending on the DPDK version */
1097 		rte_mempool_free(g_session_mp_priv);
1098 		g_session_mp_priv = NULL;
1099 	}
1100 
1101 	TAILQ_FOREACH_SAFE(dev, &g_crypto_devices, link, tmp) {
1102 		TAILQ_REMOVE(&g_crypto_devices, dev, link);
1103 		accel_dpdk_cryptodev_release(dev);
1104 	}
1105 
1106 	spdk_io_device_unregister(&g_accel_dpdk_cryptodev_module, NULL);
1107 }
1108 
1109 static void
1110 test_initdrivers(void)
1111 {
1112 	int rc;
1113 	static struct rte_mempool *orig_mbuf_mp;
1114 	static struct rte_mempool *orig_session_mp;
1115 	static struct rte_mempool *orig_session_mp_priv;
1116 
1117 	/* accel_dpdk_cryptodev_init calls spdk_io_device_register, we need to have a thread */
1118 	allocate_threads(1);
1119 	set_thread(0);
1120 
1121 	/* These tests will alloc and free our g_mbuf_mp
1122 	 * so save that off here and restore it after each test is over.
1123 	 */
1124 	orig_mbuf_mp = g_mbuf_mp;
1125 	orig_session_mp = g_session_mp;
1126 	orig_session_mp_priv = g_session_mp_priv;
1127 
1128 	g_session_mp_priv = NULL;
1129 	g_session_mp = NULL;
1130 	g_mbuf_mp = NULL;
1131 
1132 	/* No drivers available, not an error though */
1133 	MOCK_SET(rte_cryptodev_count, 0);
1134 	rc = accel_dpdk_cryptodev_init();
1135 	CU_ASSERT(rc == 0);
1136 	CU_ASSERT(g_mbuf_mp == NULL);
1137 	CU_ASSERT(g_session_mp == NULL);
1138 	CU_ASSERT(g_session_mp_priv == NULL);
1139 
1140 	/* Can't create session pool. */
1141 	MOCK_SET(rte_cryptodev_count, 2);
1142 	MOCK_SET(spdk_mempool_create, NULL);
1143 	rc = accel_dpdk_cryptodev_init();
1144 	CU_ASSERT(rc == -ENOMEM);
1145 	CU_ASSERT(g_mbuf_mp == NULL);
1146 	CU_ASSERT(g_session_mp == NULL);
1147 	CU_ASSERT(g_session_mp_priv == NULL);
1148 	MOCK_CLEAR(spdk_mempool_create);
1149 
1150 	/* Can't create op pool. */
1151 	MOCK_SET(rte_crypto_op_pool_create, NULL);
1152 	rc = accel_dpdk_cryptodev_init();
1153 	CU_ASSERT(rc == -ENOMEM);
1154 	CU_ASSERT(g_mbuf_mp == NULL);
1155 	CU_ASSERT(g_session_mp == NULL);
1156 	CU_ASSERT(g_session_mp_priv == NULL);
1157 	MOCK_CLEAR(rte_crypto_op_pool_create);
1158 
1159 	/* Check resources are not sufficient */
1160 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
1161 	rc = accel_dpdk_cryptodev_init();
1162 	CU_ASSERT(rc == -EINVAL);
1163 
1164 	/* Test crypto dev configure failure. */
1165 	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
1166 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
1167 	MOCK_SET(rte_cryptodev_configure, -1);
1168 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
1169 	rc = accel_dpdk_cryptodev_init();
1170 	MOCK_SET(rte_cryptodev_configure, 0);
1171 	CU_ASSERT(g_mbuf_mp == NULL);
1172 	CU_ASSERT(g_session_mp == NULL);
1173 	CU_ASSERT(g_session_mp_priv == NULL);
1174 	CU_ASSERT(rc == -EINVAL);
1175 
1176 	/* Test failure of qp setup. */
1177 	MOCK_SET(rte_cryptodev_queue_pair_setup, -1);
1178 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
1179 	rc = accel_dpdk_cryptodev_init();
1180 	CU_ASSERT(rc == -EINVAL);
1181 	CU_ASSERT(g_mbuf_mp == NULL);
1182 	CU_ASSERT(g_session_mp == NULL);
1183 	CU_ASSERT(g_session_mp_priv == NULL);
1184 	MOCK_SET(rte_cryptodev_queue_pair_setup, 0);
1185 
1186 	/* Test failure of dev start. */
1187 	MOCK_SET(rte_cryptodev_start, -1);
1188 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
1189 	rc = accel_dpdk_cryptodev_init();
1190 	CU_ASSERT(rc == -EINVAL);
1191 	CU_ASSERT(g_mbuf_mp == NULL);
1192 	CU_ASSERT(g_session_mp == NULL);
1193 	CU_ASSERT(g_session_mp_priv == NULL);
1194 	MOCK_SET(rte_cryptodev_start, 0);
1195 
1196 	/* Test bogus PMD */
1197 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
1198 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_BOGUS_PMD);
1199 	rc = accel_dpdk_cryptodev_init();
1200 	CU_ASSERT(g_mbuf_mp == NULL);
1201 	CU_ASSERT(g_session_mp == NULL);
1202 	CU_ASSERT(rc == -EINVAL);
1203 
1204 	/* Test happy path QAT. */
1205 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
1206 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
1207 	rc = accel_dpdk_cryptodev_init();
1208 	CU_ASSERT(g_mbuf_mp != NULL);
1209 	CU_ASSERT(g_session_mp != NULL);
1210 	init_cleanup();
1211 	CU_ASSERT(rc == 0);
1212 
1213 	/* Test happy path AESNI. */
1214 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
1215 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
1216 	rc = accel_dpdk_cryptodev_init();
1217 	CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET);
1218 	init_cleanup();
1219 	CU_ASSERT(rc == 0);
1220 
1221 	/* Test happy path MLX5. */
1222 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
1223 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_MLX5);
1224 	rc = accel_dpdk_cryptodev_init();
1225 	CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET);
1226 	init_cleanup();
1227 	CU_ASSERT(rc == 0);
1228 
1229 	/* Test failure of DPDK dev init. By now it is not longer an error
1230 	 * situation for entire crypto framework. */
1231 	MOCK_SET(rte_cryptodev_count, 2);
1232 	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
1233 	MOCK_SET(rte_vdev_init, -1);
1234 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
1235 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
1236 	rc = accel_dpdk_cryptodev_init();
1237 	CU_ASSERT(rc == 0);
1238 	CU_ASSERT(g_mbuf_mp != NULL);
1239 	CU_ASSERT(g_session_mp != NULL);
1240 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1241 	CU_ASSERT(g_session_mp_priv != NULL);
1242 #endif
1243 	init_cleanup();
1244 	MOCK_SET(rte_vdev_init, 0);
1245 	MOCK_CLEAR(rte_cryptodev_device_count_by_driver);
1246 
1247 	/* restore our initial values. */
1248 	g_mbuf_mp = orig_mbuf_mp;
1249 	g_session_mp = orig_session_mp;
1250 	g_session_mp_priv = orig_session_mp_priv;
1251 	free_threads();
1252 }
1253 
1254 static void
1255 test_supported_opcodes(void)
1256 {
1257 	bool rc = true;
1258 	enum accel_opcode opc;
1259 
1260 	for (opc = 0; opc < ACCEL_OPC_LAST; opc++) {
1261 		rc = accel_dpdk_cryptodev_supports_opcode(opc);
1262 		switch (opc) {
1263 		case ACCEL_OPC_ENCRYPT:
1264 		case ACCEL_OPC_DECRYPT:
1265 			CU_ASSERT(rc == true);
1266 			break;
1267 		default:
1268 			CU_ASSERT(rc == false);
1269 		}
1270 	}
1271 }
1272 
1273 static void
1274 test_poller(void)
1275 {
1276 	struct accel_dpdk_cryptodev_task task = {};
1277 	struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = 1024 };
1278 	struct iovec dst_iov = src_iov;
1279 	struct rte_mbuf *src_mbufs[2];
1280 	int rc;
1281 
1282 	task.base.op_code = ACCEL_OPC_DECRYPT;
1283 	task.base.s.iovcnt = 1;
1284 	task.base.s.iovs = &src_iov;
1285 	task.base.d.iovcnt = 1;
1286 	task.base.d.iovs = &dst_iov;
1287 	task.base.block_size = 512;
1288 	task.base.crypto_key = &g_key;
1289 	task.base.iv = 1;
1290 	task.inplace = true;
1291 
1292 	/* test regular 1 op to dequeue and complete */
1293 	g_dequeue_mock = g_enqueue_mock = 1;
1294 	g_aesni_qp.num_enqueued_ops = 1;
1295 	rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 1);
1296 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
1297 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1298 			   uint64_t *) = (uintptr_t)&task;
1299 	g_test_crypto_ops[0]->sym->m_dst = NULL;
1300 	task.cryop_submitted = 1;
1301 	task.cryop_total = 1;
1302 	task.cryop_completed = 0;
1303 	task.base.op_code = ACCEL_OPC_DECRYPT;
1304 	rc = accel_dpdk_cryptodev_poller(g_crypto_ch);
1305 	CU_ASSERT(rc == 1);
1306 	CU_ASSERT(task.cryop_completed == task.cryop_submitted);
1307 	CU_ASSERT(g_aesni_qp.num_enqueued_ops == 0);
1308 
1309 	/* 2 to dequeue but 2nd one failed */
1310 	g_dequeue_mock = g_enqueue_mock = 2;
1311 	g_aesni_qp.num_enqueued_ops = 2;
1312 	task.cryop_submitted = 2;
1313 	task.cryop_total = 2;
1314 	task.cryop_completed = 0;
1315 	rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 2);
1316 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
1317 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1318 			   uint64_t *) = (uint64_t)&task;
1319 	g_test_crypto_ops[0]->sym->m_dst = NULL;
1320 	g_test_crypto_ops[0]->status =  RTE_CRYPTO_OP_STATUS_SUCCESS;
1321 	g_test_crypto_ops[1]->sym->m_src = src_mbufs[1];
1322 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[1]->sym->m_src, g_mbuf_offset,
1323 			   uint64_t *) = (uint64_t)&task;
1324 	g_test_crypto_ops[1]->sym->m_dst = NULL;
1325 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1326 	rc = accel_dpdk_cryptodev_poller(g_crypto_ch);
1327 	CU_ASSERT(task.is_failed == true);
1328 	CU_ASSERT(rc == 1);
1329 	CU_ASSERT(g_aesni_qp.num_enqueued_ops == 0);
1330 
1331 	/* Dequeue a task which needs to be submitted again */
1332 	g_dequeue_mock = g_enqueue_mock = ut_rte_crypto_op_bulk_alloc = 1;
1333 	task.cryop_submitted = 1;
1334 	task.cryop_total = 2;
1335 	task.cryop_completed = 0;
1336 	g_aesni_qp.num_enqueued_ops = 1;
1337 	rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 1);
1338 	SPDK_CU_ASSERT_FATAL(src_mbufs[0] != NULL);
1339 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
1340 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1341 			   uint64_t *) = (uintptr_t)&task;
1342 	g_test_crypto_ops[0]->sym->m_dst = NULL;
1343 	rc = accel_dpdk_cryptodev_poller(g_crypto_ch);
1344 	CU_ASSERT(rc == 1);
1345 	CU_ASSERT(task.cryop_submitted == 2);
1346 	CU_ASSERT(task.cryop_total == 2);
1347 	CU_ASSERT(task.cryop_completed == 1);
1348 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov.iov_base + task.base.block_size);
1349 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == task.base.block_size);
1350 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
1351 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == task.base.block_size);
1352 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
1353 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1354 				     uint64_t *) == (uint64_t)&task);
1355 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
1356 	CU_ASSERT(g_aesni_qp.num_enqueued_ops == 1);
1357 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
1358 
1359 	/* Process queued tasks, qp is full */
1360 	g_dequeue_mock = g_enqueue_mock = 0;
1361 	g_aesni_qp.num_enqueued_ops = g_aesni_crypto_dev.qp_desc_nr;
1362 	task.cryop_submitted = 1;
1363 	task.cryop_total = 2;
1364 	task.cryop_completed = 1;
1365 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_tasks));
1366 	TAILQ_INSERT_TAIL(&g_crypto_ch->queued_tasks, &task, link);
1367 
1368 	rc = accel_dpdk_cryptodev_poller(g_crypto_ch);
1369 	CU_ASSERT(rc == 0);
1370 	CU_ASSERT(TAILQ_FIRST(&g_crypto_ch->queued_tasks) == &task);
1371 
1372 	/* Try again when queue is empty, task should be submitted */
1373 	g_enqueue_mock = 1;
1374 	g_aesni_qp.num_enqueued_ops = 0;
1375 	rc = accel_dpdk_cryptodev_poller(g_crypto_ch);
1376 	CU_ASSERT(rc == 1);
1377 	CU_ASSERT(task.cryop_submitted == 2);
1378 	CU_ASSERT(task.cryop_total == 2);
1379 	CU_ASSERT(task.cryop_completed == 1);
1380 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov.iov_base + task.base.block_size);
1381 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == task.base.block_size);
1382 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
1383 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == task.base.block_size);
1384 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
1385 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1386 				     uint64_t *) == (uint64_t)&task);
1387 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
1388 	CU_ASSERT(g_aesni_qp.num_enqueued_ops == 1);
1389 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_tasks));
1390 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
1391 }
1392 
1393 /* Helper function for accel_dpdk_cryptodev_assign_device_qps() */
1394 static void
1395 _check_expected_values(struct accel_dpdk_cryptodev_io_channel *crypto_ch,
1396 		       uint8_t expected_qat_index,
1397 		       uint8_t next_qat_index)
1398 {
1399 	uint32_t num_qpairs;
1400 
1401 	memset(crypto_ch->device_qp, 0, sizeof(crypto_ch->device_qp));
1402 
1403 	num_qpairs = accel_dpdk_cryptodev_assign_device_qps(crypto_ch);
1404 	CU_ASSERT(num_qpairs == 3);
1405 
1406 	SPDK_CU_ASSERT_FATAL(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT] != NULL);
1407 	CU_ASSERT(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT]->index == expected_qat_index);
1408 	CU_ASSERT(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT]->in_use == true);
1409 	CU_ASSERT(g_next_qat_index == next_qat_index);
1410 	SPDK_CU_ASSERT_FATAL(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] != NULL);
1411 	CU_ASSERT(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB]->in_use == true);
1412 	SPDK_CU_ASSERT_FATAL(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI] != NULL);
1413 	CU_ASSERT(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI]->in_use == true);
1414 }
1415 
1416 static void
1417 test_assign_device_qp(void)
1418 {
1419 	struct accel_dpdk_cryptodev_device qat_dev = {
1420 		.type = ACCEL_DPDK_CRYPTODEV_DRIVER_QAT,
1421 		.qpairs = TAILQ_HEAD_INITIALIZER(qat_dev.qpairs)
1422 	};
1423 	struct accel_dpdk_cryptodev_device aesni_dev = {
1424 		.type = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB,
1425 		.qpairs = TAILQ_HEAD_INITIALIZER(aesni_dev.qpairs)
1426 	};
1427 	struct accel_dpdk_cryptodev_device mlx5_dev = {
1428 		.type = ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI,
1429 		.qpairs = TAILQ_HEAD_INITIALIZER(mlx5_dev.qpairs)
1430 	};
1431 	struct accel_dpdk_cryptodev_qp *qat_qps;
1432 	struct accel_dpdk_cryptodev_qp aesni_qps[4] = {};
1433 	struct accel_dpdk_cryptodev_qp mlx5_qps[4] = {};
1434 	struct accel_dpdk_cryptodev_io_channel io_ch = {};
1435 	TAILQ_HEAD(, accel_dpdk_cryptodev_device) devs_tmp = TAILQ_HEAD_INITIALIZER(devs_tmp);
1436 	int i;
1437 
1438 	g_qat_total_qp = 96;
1439 	qat_qps = calloc(g_qat_total_qp, sizeof(*qat_qps));
1440 	SPDK_CU_ASSERT_FATAL(qat_qps != NULL);
1441 
1442 	for (i = 0; i < 4; i++) {
1443 		aesni_qps[i].index = i;
1444 		aesni_qps[i].device = &aesni_dev;
1445 		TAILQ_INSERT_TAIL(&aesni_dev.qpairs, &aesni_qps[i], link);
1446 
1447 		mlx5_qps[i].index = i;
1448 		mlx5_qps[i].device = &mlx5_dev;
1449 		TAILQ_INSERT_TAIL(&mlx5_dev.qpairs, &mlx5_qps[i], link);
1450 	}
1451 	for (i = 0; i < g_qat_total_qp; i++) {
1452 		qat_qps[i].index = i;
1453 		qat_qps[i].device = &qat_dev;
1454 		TAILQ_INSERT_TAIL(&qat_dev.qpairs, &qat_qps[i], link);
1455 	}
1456 
1457 	/* Swap g_crypto_devices so that other tests are not affected */
1458 	TAILQ_SWAP(&g_crypto_devices, &devs_tmp, accel_dpdk_cryptodev_device, link);
1459 
1460 	TAILQ_INSERT_TAIL(&g_crypto_devices, &qat_dev, link);
1461 	TAILQ_INSERT_TAIL(&g_crypto_devices, &aesni_dev, link);
1462 	TAILQ_INSERT_TAIL(&g_crypto_devices, &mlx5_dev, link);
1463 
1464 	/* QAT testing is more complex as the code under test load balances by
1465 	 * assigning each subsequent device/qp to every QAT_VF_SPREAD modulo
1466 	 * g_qat_total_qp. For the current latest QAT we'll have 48 virtual functions
1467 	 * each with 2 qp so the "spread" between assignments is 32. */
1468 
1469 	/* First assignment will assign to 0 and next at 32. */
1470 	_check_expected_values(&io_ch, 0, ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD);
1471 
1472 	/* Second assignment will assign to 32 and next at 64. */
1473 	_check_expected_values(&io_ch, ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD,
1474 			       ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD * 2);
1475 
1476 	/* Third assignment will assign to 64 and next at 0. */
1477 	_check_expected_values(&io_ch, ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD * 2, 0);
1478 
1479 	/* Fourth assignment will assign to 1 and next at 33. */
1480 	_check_expected_values(&io_ch, 1, ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD + 1);
1481 
1482 	TAILQ_SWAP(&devs_tmp, &g_crypto_devices, accel_dpdk_cryptodev_device, link);
1483 
1484 	free(qat_qps);
1485 }
1486 
1487 int
1488 main(int argc, char **argv)
1489 {
1490 	CU_pSuite	suite = NULL;
1491 	unsigned int	num_failures;
1492 
1493 	CU_set_error_action(CUEA_ABORT);
1494 	CU_initialize_registry();
1495 
1496 	suite = CU_add_suite("dpdk_cryptodev", test_setup, test_cleanup);
1497 	CU_ADD_TEST(suite, test_error_paths);
1498 	CU_ADD_TEST(suite, test_simple_encrypt);
1499 	CU_ADD_TEST(suite, test_simple_decrypt);
1500 	CU_ADD_TEST(suite, test_large_enc_dec);
1501 	CU_ADD_TEST(suite, test_dev_full);
1502 	CU_ADD_TEST(suite, test_crazy_rw);
1503 	CU_ADD_TEST(suite, test_initdrivers);
1504 	CU_ADD_TEST(suite, test_supported_opcodes);
1505 	CU_ADD_TEST(suite, test_poller);
1506 	CU_ADD_TEST(suite, test_assign_device_qp);
1507 
1508 	CU_basic_set_mode(CU_BRM_VERBOSE);
1509 	CU_basic_run_tests();
1510 	num_failures = CU_get_number_of_failures();
1511 	CU_cleanup_registry();
1512 	return num_failures;
1513 }
1514