xref: /spdk/test/unit/lib/accel/dpdk_cryptodev.c/accel_dpdk_cryptodev_ut.c (revision 935721bb1e21f686ed96c6da536272068fade793)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 #include "spdk_cunit.h"
8 
9 #include "spdk_internal/mock.h"
10 #include "thread/thread_internal.h"
11 #include "unit/lib/json_mock.c"
12 #include "common/lib/ut_multithread.c"
13 
14 #include <rte_crypto.h>
15 #include <rte_cryptodev.h>
16 
17 #define MAX_TEST_BLOCKS 8192
18 struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS];
19 struct rte_crypto_op *g_test_dev_full_ops[MAX_TEST_BLOCKS];
20 
21 uint16_t g_dequeue_mock;
22 uint16_t g_enqueue_mock;
23 unsigned ut_rte_crypto_op_bulk_alloc;
24 int ut_rte_crypto_op_attach_sym_session = 0;
25 #define MOCK_INFO_GET_1QP_AESNI 0
26 #define MOCK_INFO_GET_1QP_QAT 1
27 #define MOCK_INFO_GET_1QP_MLX5 2
28 #define MOCK_INFO_GET_1QP_BOGUS_PMD 3
29 int ut_rte_cryptodev_info_get = 0;
30 bool ut_rte_cryptodev_info_get_mocked = false;
31 
32 void mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt);
33 #define rte_pktmbuf_free_bulk mock_rte_pktmbuf_free_bulk
34 void
35 mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt)
36 {
37 	spdk_mempool_put_bulk((struct spdk_mempool *)m[0]->pool, (void **)m, cnt);
38 }
39 
40 void mock_rte_pktmbuf_free(struct rte_mbuf *m);
41 #define rte_pktmbuf_free mock_rte_pktmbuf_free
42 void
43 mock_rte_pktmbuf_free(struct rte_mbuf *m)
44 {
45 	spdk_mempool_put((struct spdk_mempool *)m->pool, (void *)m);
46 }
47 
48 void
49 rte_mempool_free(struct rte_mempool *mp)
50 {
51 	spdk_mempool_free((struct spdk_mempool *)mp);
52 }
53 
54 int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
55 				unsigned count);
56 #define rte_pktmbuf_alloc_bulk mock_rte_pktmbuf_alloc_bulk
57 int
58 mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
59 			    unsigned count)
60 {
61 	int rc;
62 
63 	rc = spdk_mempool_get_bulk((struct spdk_mempool *)pool, (void **)mbufs, count);
64 	if (rc) {
65 		return rc;
66 	}
67 	for (unsigned i = 0; i < count; i++) {
68 		rte_pktmbuf_reset(mbufs[i]);
69 		mbufs[i]->pool = pool;
70 	}
71 	return rc;
72 }
73 
74 struct rte_mempool *
75 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
76 				      uint32_t elt_size, uint32_t cache_size,
77 				      uint16_t priv_size, int socket_id)
78 {
79 	struct spdk_mempool *tmp;
80 
81 	tmp = spdk_mempool_create(name, nb_elts, elt_size + priv_size,
82 				  cache_size, socket_id);
83 
84 	return (struct rte_mempool *)tmp;
85 
86 }
87 
88 struct rte_mempool *
89 rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size,
90 			uint16_t priv_size, uint16_t data_room_size, int socket_id)
91 {
92 	struct spdk_mempool *tmp;
93 
94 	tmp = spdk_mempool_create(name, n, sizeof(struct rte_mbuf) + priv_size,
95 				  cache_size, socket_id);
96 
97 	return (struct rte_mempool *)tmp;
98 }
99 
100 struct rte_mempool *
101 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
102 		   unsigned cache_size, unsigned private_data_size,
103 		   rte_mempool_ctor_t *mp_init, void *mp_init_arg,
104 		   rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
105 		   int socket_id, unsigned flags)
106 {
107 	struct spdk_mempool *tmp;
108 
109 	tmp = spdk_mempool_create(name, n, elt_size + private_data_size,
110 				  cache_size, socket_id);
111 
112 	return (struct rte_mempool *)tmp;
113 }
114 
115 DEFINE_RETURN_MOCK(rte_crypto_op_pool_create, struct rte_mempool *);
116 struct rte_mempool *
117 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
118 			  unsigned nb_elts, unsigned cache_size,
119 			  uint16_t priv_size, int socket_id)
120 {
121 	struct spdk_mempool *tmp;
122 
123 	HANDLE_RETURN_MOCK(rte_crypto_op_pool_create);
124 
125 	tmp = spdk_mempool_create(name, nb_elts,
126 				  sizeof(struct rte_crypto_op) + priv_size,
127 				  cache_size, socket_id);
128 
129 	return (struct rte_mempool *)tmp;
130 
131 }
132 
133 /* Those functions are defined as static inline in DPDK, so we can't
134  * mock them straight away. We use defines to redirect them into
135  * our custom functions.
136  */
137 static bool g_resubmit_test = false;
138 #define rte_cryptodev_enqueue_burst mock_rte_cryptodev_enqueue_burst
139 static inline uint16_t
140 mock_rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
141 				 struct rte_crypto_op **ops, uint16_t nb_ops)
142 {
143 	int i;
144 
145 	CU_ASSERT(nb_ops > 0);
146 
147 	for (i = 0; i < nb_ops; i++) {
148 		/* Use this empty (til now) array of pointers to store
149 		 * enqueued operations for assertion in dev_full test.
150 		 */
151 		g_test_dev_full_ops[i] = *ops++;
152 		if (g_resubmit_test == true) {
153 			CU_ASSERT(g_test_dev_full_ops[i] == (void *)0xDEADBEEF);
154 		}
155 	}
156 
157 	return g_enqueue_mock;
158 }
159 
160 #define rte_cryptodev_dequeue_burst mock_rte_cryptodev_dequeue_burst
161 static inline uint16_t
162 mock_rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
163 				 struct rte_crypto_op **ops, uint16_t nb_ops)
164 {
165 	int i;
166 
167 	CU_ASSERT(nb_ops > 0);
168 
169 	for (i = 0; i < g_dequeue_mock; i++) {
170 		*ops++ = g_test_crypto_ops[i];
171 	}
172 
173 	return g_dequeue_mock;
174 }
175 
176 /* Instead of allocating real memory, assign the allocations to our
177  * test array for assertion in tests.
178  */
179 #define rte_crypto_op_bulk_alloc mock_rte_crypto_op_bulk_alloc
180 static inline unsigned
181 mock_rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
182 			      enum rte_crypto_op_type type,
183 			      struct rte_crypto_op **ops, uint16_t nb_ops)
184 {
185 	int i;
186 
187 	for (i = 0; i < nb_ops; i++) {
188 		*ops++ = g_test_crypto_ops[i];
189 	}
190 	return ut_rte_crypto_op_bulk_alloc;
191 }
192 
193 #define rte_mempool_put_bulk mock_rte_mempool_put_bulk
194 static __rte_always_inline void
195 mock_rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table,
196 			  unsigned int n)
197 {
198 	return;
199 }
200 
201 #define rte_crypto_op_attach_sym_session mock_rte_crypto_op_attach_sym_session
202 static inline int
203 mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
204 				      struct rte_cryptodev_sym_session *sess)
205 {
206 	return ut_rte_crypto_op_attach_sym_session;
207 }
208 
209 #define rte_lcore_count mock_rte_lcore_count
210 static inline unsigned
211 mock_rte_lcore_count(void)
212 {
213 	return 1;
214 }
215 
216 #include "accel/dpdk_cryptodev/accel_dpdk_cryptodev.c"
217 
218 /* accel stubs */
219 DEFINE_STUB_V(spdk_accel_task_complete, (struct spdk_accel_task *task, int status));
220 DEFINE_STUB_V(spdk_accel_module_finish, (void));
221 DEFINE_STUB_V(spdk_accel_module_list_add, (struct spdk_accel_module_if *accel_module));
222 
223 /* DPDK stubs */
224 #define DPDK_DYNFIELD_OFFSET offsetof(struct rte_mbuf, dynfield1[1])
225 DEFINE_STUB(rte_mbuf_dynfield_register, int, (const struct rte_mbuf_dynfield *params),
226 	    DPDK_DYNFIELD_OFFSET);
227 DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0);
228 DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
229 DEFINE_STUB(rte_cryptodev_device_count_by_driver, uint8_t, (uint8_t driver_id), 0);
230 DEFINE_STUB(rte_cryptodev_configure, int, (uint8_t dev_id, struct rte_cryptodev_config *config), 0);
231 DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
232 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id), 0);
233 DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0);
234 DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id));
235 DEFINE_STUB(rte_cryptodev_close, int, (uint8_t dev_id), 0);
236 DEFINE_STUB(rte_cryptodev_sym_session_create, struct rte_cryptodev_sym_session *,
237 	    (struct rte_mempool *mempool), (struct rte_cryptodev_sym_session *)1);
238 DEFINE_STUB(rte_cryptodev_sym_session_init, int, (uint8_t dev_id,
239 		struct rte_cryptodev_sym_session *sess,
240 		struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), 0);
241 DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
242 DEFINE_STUB(rte_cryptodev_sym_session_free, int, (struct rte_cryptodev_sym_session *sess), 0);
243 DEFINE_STUB(rte_vdev_uninit, int, (const char *name), 0);
244 
245 struct rte_cryptodev *rte_cryptodevs;
246 
247 /* global vars and setup/cleanup functions used for all test functions */
248 struct spdk_io_channel *g_io_ch;
249 struct accel_dpdk_cryptodev_io_channel *g_crypto_ch;
250 struct accel_dpdk_cryptodev_device g_aesni_crypto_dev;
251 struct accel_dpdk_cryptodev_qp g_aesni_qp;
252 struct accel_dpdk_cryptodev_key_handle g_key_handle;
253 struct accel_dpdk_cryptodev_key_priv g_key_priv;
254 struct spdk_accel_crypto_key g_key;
255 
256 void
257 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
258 {
259 	dev_info->max_nb_queue_pairs = 1;
260 	if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_AESNI) {
261 		dev_info->driver_name = g_driver_names[0];
262 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_QAT) {
263 		dev_info->driver_name = g_driver_names[1];
264 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_MLX5) {
265 		dev_info->driver_name = g_driver_names[2];
266 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_BOGUS_PMD) {
267 		dev_info->driver_name = "junk";
268 	}
269 }
270 
271 unsigned int
272 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
273 {
274 	return (unsigned int)dev_id;
275 }
276 
277 /* Global setup for all tests that share a bunch of preparation... */
278 static int
279 test_setup(void)
280 {
281 	int i, rc;
282 
283 	/* Prepare essential variables for test routines */
284 	g_io_ch = calloc(1, sizeof(*g_io_ch) + sizeof(struct accel_dpdk_cryptodev_io_channel));
285 	g_crypto_ch = (struct accel_dpdk_cryptodev_io_channel *)spdk_io_channel_get_ctx(g_io_ch);
286 	TAILQ_INIT(&g_crypto_ch->queued_cry_ops);
287 
288 	g_aesni_crypto_dev.type = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB;
289 	g_aesni_crypto_dev.qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS;
290 	TAILQ_INIT(&g_aesni_crypto_dev.qpairs);
291 
292 	g_aesni_qp.device = &g_aesni_crypto_dev;
293 	g_crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] = &g_aesni_qp;
294 
295 	g_key_handle.device = &g_aesni_crypto_dev;
296 	g_key_priv.driver = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB;
297 	g_key_priv.cipher = ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC;
298 	TAILQ_INIT(&g_key_priv.dev_keys);
299 	TAILQ_INSERT_TAIL(&g_key_priv.dev_keys, &g_key_handle, link);
300 	g_key.priv = &g_key_priv;
301 	g_key.module_if = &g_accel_dpdk_cryptodev_module;
302 
303 
304 	/* Allocate a real mbuf pool so we can test error paths */
305 	g_mbuf_mp = rte_pktmbuf_pool_create("mbuf_mp", ACCEL_DPDK_CRYPTODEV_NUM_MBUFS,
306 					    (unsigned)SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
307 					    0, 0, SPDK_ENV_SOCKET_ID_ANY);
308 	/* Instead of allocating real rte mempools for these, it's easier and provides the
309 	 * same coverage just calloc them here.
310 	 */
311 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
312 		size_t size = ACCEL_DPDK_CRYPTODEV_IV_OFFSET + ACCEL_DPDK_CRYPTODEV_IV_LENGTH +
313 			      ACCEL_DPDK_CRYPTODEV_QUEUED_OP_LENGTH;
314 		rc = posix_memalign((void **)&g_test_crypto_ops[i], 64, size);
315 		if (rc != 0) {
316 			assert(false);
317 		}
318 		memset(g_test_crypto_ops[i], 0,
319 		       ACCEL_DPDK_CRYPTODEV_IV_OFFSET + ACCEL_DPDK_CRYPTODEV_QUEUED_OP_LENGTH);
320 	}
321 	g_mbuf_offset = DPDK_DYNFIELD_OFFSET;
322 
323 	return 0;
324 }
325 
326 /* Global teardown for all tests */
327 static int
328 test_cleanup(void)
329 {
330 	int i;
331 
332 	if (g_crypto_op_mp) {
333 		rte_mempool_free(g_crypto_op_mp);
334 		g_crypto_op_mp = NULL;
335 	}
336 	if (g_mbuf_mp) {
337 		rte_mempool_free(g_mbuf_mp);
338 		g_mbuf_mp = NULL;
339 	}
340 	if (g_session_mp) {
341 		rte_mempool_free(g_session_mp);
342 		g_session_mp = NULL;
343 	}
344 	if (g_session_mp_priv != NULL) {
345 		/* g_session_mp_priv may or may not be set depending on the DPDK version */
346 		rte_mempool_free(g_session_mp_priv);
347 		g_session_mp_priv = NULL;
348 	}
349 
350 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
351 		free(g_test_crypto_ops[i]);
352 	}
353 	free(g_io_ch);
354 	return 0;
355 }
356 
357 static void
358 test_error_paths(void)
359 {
360 	/* Single element block size encrypt, just to test error paths
361 	 * in accel_dpdk_cryptodev_submit_tasks() */
362 	struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = 512 };
363 	struct iovec dst_iov = src_iov;
364 	struct accel_dpdk_cryptodev_task task = {};
365 	struct accel_dpdk_cryptodev_key_priv key_priv = {};
366 	struct spdk_accel_crypto_key key = {};
367 	int rc;
368 
369 	task.base.op_code = ACCEL_OPC_ENCRYPT;
370 	task.base.s.iovcnt = 1;
371 	task.base.s.iovs = &src_iov;
372 	task.base.d.iovcnt = 1;
373 	task.base.d.iovs = &dst_iov;
374 	task.base.nbytes = 512;
375 	task.base.block_size = 512;
376 	task.base.crypto_key = &g_key;
377 	task.base.iv = 1;
378 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
379 
380 	/* case 1 - no crypto key */
381 	task.base.crypto_key = NULL;
382 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
383 	CU_ASSERT(rc == -EINVAL);
384 	task.base.crypto_key = &g_key;
385 
386 	/* case 2 - crypto key with wrong module_if  */
387 	key_priv.driver = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB;
388 	key_priv.cipher = ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC;
389 	TAILQ_INIT(&key_priv.dev_keys);
390 	key.priv = &key_priv;
391 	key.module_if = (struct spdk_accel_module_if *) 0x1;
392 	task.base.crypto_key = &key;
393 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
394 	CU_ASSERT(rc == -EINVAL);
395 	key.module_if = &g_accel_dpdk_cryptodev_module;
396 
397 	/* case 3 - nbytes too big */
398 	task.base.nbytes = ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO + 512;
399 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
400 	CU_ASSERT(rc == -E2BIG);
401 	task.base.nbytes = 512;
402 
403 	/* case 4 - no key handle in the channel */
404 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
405 	CU_ASSERT(rc == -EINVAL);
406 	task.base.crypto_key = &g_key;
407 
408 	/* case 5 - invalid op */
409 	task.base.op_code = ACCEL_OPC_COMPARE;
410 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
411 	CU_ASSERT(rc == -EINVAL);
412 	task.base.op_code = ACCEL_OPC_ENCRYPT;
413 
414 	/* case 6 - no entries in g_mbuf_mp */
415 	MOCK_SET(spdk_mempool_get, NULL);
416 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
417 	CU_ASSERT(rc == -ENOMEM);
418 	MOCK_CLEAR(spdk_mempool_get);
419 
420 	/* case 7 - vtophys error in accel_dpdk_cryptodev_mbuf_attach_buf */
421 	MOCK_SET(spdk_vtophys, SPDK_VTOPHYS_ERROR);
422 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
423 	CU_ASSERT(rc == -EFAULT);
424 	MOCK_CLEAR(spdk_vtophys);
425 }
426 
427 static void
428 test_simple_encrypt(void)
429 {
430 	struct iovec src_iov[4] = {[0] = {.iov_base = (void *)0xDEADBEEF, .iov_len = 512 }};
431 	struct iovec dst_iov = src_iov[0];
432 	struct accel_dpdk_cryptodev_task task = {};
433 	struct rte_mbuf *mbuf;
434 	int rc, i;
435 
436 	task.base.op_code = ACCEL_OPC_ENCRYPT;
437 	task.base.s.iovcnt = 1;
438 	task.base.s.iovs = src_iov;
439 	task.base.d.iovcnt = 1;
440 	task.base.d.iovs = &dst_iov;
441 	task.base.nbytes = 512;
442 	task.base.block_size = 512;
443 	task.base.crypto_key = &g_key;
444 	task.base.iv = 1;
445 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
446 
447 	/* Inplace encryption */
448 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
449 	CU_ASSERT(rc == 0);
450 	CU_ASSERT(task.cryop_cnt_remaining == 1);
451 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base);
452 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len);
453 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
454 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
455 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
456 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
457 				     uint64_t *) == (uint64_t)&task);
458 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
459 
460 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
461 
462 	/* out-of-place encryption */
463 	task.cryop_cnt_remaining = 0;
464 	dst_iov.iov_base = (void *)0xFEEDBEEF;
465 
466 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
467 	CU_ASSERT(rc == 0);
468 	CU_ASSERT(task.cryop_cnt_remaining == 1);
469 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base);
470 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len);
471 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
472 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
473 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
474 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
475 				     uint64_t *) == (uint64_t)&task);
476 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr == dst_iov.iov_base);
477 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == dst_iov.iov_len);
478 
479 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
480 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst);
481 
482 	/* out-of-place encryption, fragmented payload */
483 	task.base.s.iovcnt = 4;
484 	for (i = 0; i < 4; i++) {
485 		src_iov[i].iov_base = (void *)0xDEADBEEF + i * 128;
486 		src_iov[i].iov_len = 128;
487 	}
488 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
489 	CU_ASSERT(rc == 0);
490 	CU_ASSERT(task.cryop_cnt_remaining == 1);
491 	mbuf = g_test_crypto_ops[0]->sym->m_src;
492 	CU_ASSERT(mbuf != NULL);
493 	CU_ASSERT(mbuf->buf_addr == src_iov[0].iov_base);
494 	CU_ASSERT(mbuf->data_len == src_iov[0].iov_len);
495 	for (i = 1; i < 4; i++) {
496 		mbuf = mbuf->next;
497 		SPDK_CU_ASSERT_FATAL(mbuf != NULL);
498 		CU_ASSERT(mbuf->buf_addr == src_iov[i].iov_base);
499 		CU_ASSERT(mbuf->data_len == src_iov[i].iov_len);
500 		rte_pktmbuf_free(mbuf);
501 
502 	}
503 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
504 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
505 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
506 				     uint64_t *) == (uint64_t)&task);
507 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr == dst_iov.iov_base);
508 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == dst_iov.iov_len);
509 
510 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
511 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst);
512 }
513 
514 static void
515 test_simple_decrypt(void)
516 {
517 	struct iovec src_iov[4] = {[0] = {.iov_base = (void *)0xDEADBEEF, .iov_len = 512 }};
518 	struct iovec dst_iov = src_iov[0];
519 	struct accel_dpdk_cryptodev_task task = {};
520 	struct rte_mbuf *mbuf;
521 	int rc, i;
522 
523 	task.base.op_code = ACCEL_OPC_DECRYPT;
524 	task.base.s.iovcnt = 1;
525 	task.base.s.iovs = src_iov;
526 	task.base.d.iovcnt = 1;
527 	task.base.d.iovs = &dst_iov;
528 	task.base.nbytes = 512;
529 	task.base.block_size = 512;
530 	task.base.crypto_key = &g_key;
531 	task.base.iv = 1;
532 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
533 
534 	/* Inplace decryption */
535 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
536 	CU_ASSERT(rc == 0);
537 	CU_ASSERT(task.cryop_cnt_remaining == 1);
538 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base);
539 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len);
540 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
541 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
542 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
543 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
544 				     uint64_t *) == (uint64_t)&task);
545 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
546 
547 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
548 
549 	/* out-of-place decryption */
550 	task.cryop_cnt_remaining = 0;
551 	dst_iov.iov_base = (void *)0xFEEDBEEF;
552 
553 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
554 	CU_ASSERT(rc == 0);
555 	CU_ASSERT(task.cryop_cnt_remaining == 1);
556 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base);
557 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len);
558 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
559 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
560 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
561 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
562 				     uint64_t *) == (uint64_t)&task);
563 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr == dst_iov.iov_base);
564 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == dst_iov.iov_len);
565 
566 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
567 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst);
568 
569 	/* out-of-place decryption, fragmented payload */
570 	task.base.s.iovcnt = 4;
571 	for (i = 0; i < 4; i++) {
572 		src_iov[i].iov_base = (void *)0xDEADBEEF + i * 128;
573 		src_iov[i].iov_len = 128;
574 	}
575 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
576 	CU_ASSERT(rc == 0);
577 	CU_ASSERT(task.cryop_cnt_remaining == 1);
578 	mbuf = g_test_crypto_ops[0]->sym->m_src;
579 	CU_ASSERT(mbuf != NULL);
580 	CU_ASSERT(mbuf->buf_addr == src_iov[0].iov_base);
581 	CU_ASSERT(mbuf->data_len == src_iov[0].iov_len);
582 	for (i = 1; i < 4; i++) {
583 		mbuf = mbuf->next;
584 		SPDK_CU_ASSERT_FATAL(mbuf != NULL);
585 		CU_ASSERT(mbuf->buf_addr == src_iov[i].iov_base);
586 		CU_ASSERT(mbuf->data_len == src_iov[i].iov_len);
587 		rte_pktmbuf_free(mbuf);
588 
589 	}
590 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
591 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
592 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
593 				     uint64_t *) == (uint64_t)&task);
594 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr == dst_iov.iov_base);
595 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == dst_iov.iov_len);
596 
597 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
598 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst);
599 }
600 
601 static void
602 test_large_enc_dec(void)
603 {
604 	struct accel_dpdk_cryptodev_task task = {};
605 	uint32_t block_len = 512;
606 	uint32_t num_blocks = ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO / block_len;
607 	struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO };
608 	struct iovec dst_iov = src_iov;
609 	uint32_t i;
610 	int rc;
611 
612 	task.base.op_code = ACCEL_OPC_DECRYPT;
613 	task.base.s.iovcnt = 1;
614 	task.base.s.iovs = &src_iov;
615 	task.base.d.iovcnt = 1;
616 	task.base.d.iovs = &dst_iov;
617 	task.base.nbytes = ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO;
618 	task.base.block_size = 512;
619 	task.base.crypto_key = &g_key;
620 	task.base.iv = 1;
621 
622 	/* Multi block size decryption, multi-element, inplace */
623 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
624 
625 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
626 	CU_ASSERT(rc == 0);
627 	CU_ASSERT(task.cryop_cnt_remaining == num_blocks);
628 
629 	for (i = 0; i < num_blocks; i++) {
630 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + (i * block_len));
631 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
632 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
633 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
634 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
635 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
636 					     uint64_t *) == (uint64_t)&task);
637 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
638 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
639 	}
640 
641 	/* Multi block size decryption, multi-element, out-of-place */
642 	task.cryop_cnt_remaining = 0;
643 	dst_iov.iov_base = (void *)0xFEEDBEEF;
644 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
645 
646 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
647 	CU_ASSERT(rc == 0);
648 	CU_ASSERT(task.cryop_cnt_remaining == num_blocks);
649 	for (i = 0; i < num_blocks; i++) {
650 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + (i * block_len));
651 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
652 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
653 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
654 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
655 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
656 					     uint64_t *) == (uint64_t)&task);
657 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr == dst_iov.iov_base + (i * block_len));
658 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
659 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->next == NULL);
660 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
661 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
662 	}
663 
664 	/* Multi block size encryption, multi-element, inplace */
665 	dst_iov = src_iov;
666 	task.base.op_code = ACCEL_OPC_ENCRYPT;
667 	task.cryop_cnt_remaining = 0;
668 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
669 
670 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
671 	CU_ASSERT(rc == 0);
672 	CU_ASSERT(task.cryop_cnt_remaining == num_blocks);
673 
674 	for (i = 0; i < num_blocks; i++) {
675 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + (i * block_len));
676 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
677 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
678 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
679 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
680 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
681 					     uint64_t *) == (uint64_t)&task);
682 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
683 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
684 	}
685 
686 	/* Multi block size encryption, multi-element, out-of-place */
687 	task.cryop_cnt_remaining = 0;
688 	dst_iov.iov_base = (void *)0xFEEDBEEF;
689 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
690 
691 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
692 	CU_ASSERT(rc == 0);
693 	CU_ASSERT(task.cryop_cnt_remaining == num_blocks);
694 	for (i = 0; i < num_blocks; i++) {
695 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + (i * block_len));
696 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
697 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
698 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
699 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
700 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
701 					     uint64_t *) == (uint64_t)&task);
702 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr == dst_iov.iov_base + (i * block_len));
703 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
704 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->next == NULL);
705 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
706 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
707 	}
708 }
709 
710 static void
711 test_dev_full(void)
712 {
713 	struct accel_dpdk_cryptodev_task task = {};
714 	struct accel_dpdk_cryptodev_queued_op *queued_op;
715 	struct rte_crypto_sym_op *sym_op;
716 	struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = 1024 };
717 	struct iovec dst_iov = src_iov;
718 	int rc;
719 
720 	task.base.op_code = ACCEL_OPC_DECRYPT;
721 	task.base.s.iovcnt = 1;
722 	task.base.s.iovs = &src_iov;
723 	task.base.d.iovcnt = 1;
724 	task.base.d.iovs = &dst_iov;
725 	task.base.nbytes = 1024;
726 	task.base.block_size = 512;
727 	task.base.crypto_key = &g_key;
728 	task.base.iv = 1;
729 
730 	/* Two element block size decryption */
731 	g_enqueue_mock = g_dequeue_mock = 1;
732 	ut_rte_crypto_op_bulk_alloc = 2;
733 
734 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
735 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
736 
737 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
738 	CU_ASSERT(rc == 0);
739 	CU_ASSERT(task.cryop_cnt_remaining == 2);
740 	sym_op = g_test_crypto_ops[0]->sym;
741 	CU_ASSERT(sym_op->m_src->buf_addr == src_iov.iov_base);
742 	CU_ASSERT(sym_op->m_src->data_len == 512);
743 	CU_ASSERT(sym_op->m_src->next == NULL);
744 	CU_ASSERT(sym_op->cipher.data.length == 512);
745 	CU_ASSERT(sym_op->cipher.data.offset == 0);
746 	CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)&task);
747 	CU_ASSERT(sym_op->m_dst == NULL);
748 
749 	/* make sure one got queued and confirm its values */
750 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
751 	queued_op = TAILQ_FIRST(&g_crypto_ch->queued_cry_ops);
752 	sym_op = queued_op->crypto_op->sym;
753 	TAILQ_REMOVE(&g_crypto_ch->queued_cry_ops, queued_op, link);
754 	CU_ASSERT(queued_op->task == &task);
755 	CU_ASSERT(queued_op->crypto_op == g_test_crypto_ops[1]);
756 	CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xDEADBEEF + 512);
757 	CU_ASSERT(sym_op->m_src->data_len == 512);
758 	CU_ASSERT(sym_op->m_src->next == NULL);
759 	CU_ASSERT(sym_op->cipher.data.length == 512);
760 	CU_ASSERT(sym_op->cipher.data.offset == 0);
761 	CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)&task);
762 	CU_ASSERT(sym_op->m_dst == NULL);
763 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
764 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
765 	rte_pktmbuf_free(g_test_crypto_ops[1]->sym->m_src);
766 
767 	/* Non-busy reason for enqueue failure, all were rejected. */
768 	g_enqueue_mock = 0;
769 	g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_ERROR;
770 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
771 	CU_ASSERT(rc == -EINVAL);
772 }
773 
774 static void
775 test_crazy_rw(void)
776 {
777 	struct accel_dpdk_cryptodev_task task = {};
778 	struct iovec src_iov[4] = {
779 		[0] = {.iov_base = (void *)0xDEADBEEF, .iov_len = 512 },
780 		[1] = {.iov_base = (void *)0xDEADBEEF + 512, .iov_len = 1024 },
781 		[2] = {.iov_base = (void *)0xDEADBEEF + 512 + 1024, .iov_len = 512 }
782 	};
783 	struct iovec *dst_iov = src_iov;
784 	uint32_t block_len = 512, num_blocks = 4, i;
785 	int rc;
786 
787 	task.base.op_code = ACCEL_OPC_DECRYPT;
788 	task.base.s.iovcnt = 3;
789 	task.base.s.iovs = src_iov;
790 	task.base.d.iovcnt = 3;
791 	task.base.d.iovs = dst_iov;
792 	task.base.block_size = 512;
793 	task.base.nbytes = num_blocks * task.base.block_size;
794 	task.base.crypto_key = &g_key;
795 	task.base.iv = 1;
796 
797 	/* Multi block size read, single element, strange IOV makeup */
798 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
799 
800 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
801 	CU_ASSERT(rc == 0);
802 	CU_ASSERT(task.cryop_cnt_remaining == num_blocks);
803 
804 	for (i = 0; i < num_blocks; i++) {
805 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
806 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
807 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
808 					     uint64_t *) == (uint64_t)&task);
809 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
810 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[0].iov_base + (i * block_len));
811 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
812 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
813 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
814 	}
815 
816 	/* Multi block size write, single element strange IOV makeup */
817 	num_blocks = 8;
818 	task.base.op_code = ACCEL_OPC_ENCRYPT;
819 	task.cryop_cnt_remaining = 0;
820 	task.base.nbytes = task.base.block_size * num_blocks;
821 	task.base.s.iovcnt = 4;
822 	task.base.d.iovcnt = 4;
823 	task.base.s.iovs[0].iov_len = 2048;
824 	task.base.s.iovs[0].iov_base = (void *)0xDEADBEEF;
825 	task.base.s.iovs[1].iov_len = 512;
826 	task.base.s.iovs[1].iov_base = (void *)0xDEADBEEF + 2048;
827 	task.base.s.iovs[2].iov_len = 512;
828 	task.base.s.iovs[2].iov_base = (void *)0xDEADBEEF + 2048 + 512;
829 	task.base.s.iovs[3].iov_len = 1024;
830 	task.base.s.iovs[3].iov_base = (void *)0xDEADBEEF + 2048 + 512 + 512;
831 
832 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
833 
834 	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
835 	CU_ASSERT(rc == 0);
836 	CU_ASSERT(task.cryop_cnt_remaining == num_blocks);
837 
838 	for (i = 0; i < num_blocks; i++) {
839 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
840 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
841 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
842 					     uint64_t *) == (uint64_t)&task);
843 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
844 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[0].iov_base + (i * block_len));
845 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
846 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
847 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
848 	}
849 }
850 
851 static void
852 init_cleanup(void)
853 {
854 	struct accel_dpdk_cryptodev_device *dev, *tmp;
855 
856 	if (g_crypto_op_mp) {
857 		rte_mempool_free(g_crypto_op_mp);
858 		g_crypto_op_mp = NULL;
859 	}
860 	if (g_mbuf_mp) {
861 		rte_mempool_free(g_mbuf_mp);
862 		g_mbuf_mp = NULL;
863 	}
864 	if (g_session_mp) {
865 		rte_mempool_free(g_session_mp);
866 		g_session_mp = NULL;
867 	}
868 	if (g_session_mp_priv != NULL) {
869 		/* g_session_mp_priv may or may not be set depending on the DPDK version */
870 		rte_mempool_free(g_session_mp_priv);
871 		g_session_mp_priv = NULL;
872 	}
873 
874 	TAILQ_FOREACH_SAFE(dev, &g_crypto_devices, link, tmp) {
875 		TAILQ_REMOVE(&g_crypto_devices, dev, link);
876 		accel_dpdk_cryptodev_release(dev);
877 	}
878 
879 	spdk_io_device_unregister(&g_accel_dpdk_cryptodev_module, NULL);
880 }
881 
882 static void
883 test_initdrivers(void)
884 {
885 	int rc;
886 	static struct rte_mempool *orig_mbuf_mp;
887 	static struct rte_mempool *orig_session_mp;
888 	static struct rte_mempool *orig_session_mp_priv;
889 
890 	/* accel_dpdk_cryptodev_init calls spdk_io_device_register, we need to have a thread */
891 	allocate_threads(1);
892 	set_thread(0);
893 
894 	/* These tests will alloc and free our g_mbuf_mp
895 	 * so save that off here and restore it after each test is over.
896 	 */
897 	orig_mbuf_mp = g_mbuf_mp;
898 	orig_session_mp = g_session_mp;
899 	orig_session_mp_priv = g_session_mp_priv;
900 
901 	g_session_mp_priv = NULL;
902 	g_session_mp = NULL;
903 	g_mbuf_mp = NULL;
904 
905 	/* No drivers available, not an error though */
906 	MOCK_SET(rte_cryptodev_count, 0);
907 	rc = accel_dpdk_cryptodev_init();
908 	CU_ASSERT(rc == 0);
909 	CU_ASSERT(g_mbuf_mp == NULL);
910 	CU_ASSERT(g_session_mp == NULL);
911 	CU_ASSERT(g_session_mp_priv == NULL);
912 
913 	/* Can't create session pool. */
914 	MOCK_SET(rte_cryptodev_count, 2);
915 	MOCK_SET(spdk_mempool_create, NULL);
916 	rc = accel_dpdk_cryptodev_init();
917 	CU_ASSERT(rc == -ENOMEM);
918 	CU_ASSERT(g_mbuf_mp == NULL);
919 	CU_ASSERT(g_session_mp == NULL);
920 	CU_ASSERT(g_session_mp_priv == NULL);
921 	MOCK_CLEAR(spdk_mempool_create);
922 
923 	/* Can't create op pool. */
924 	MOCK_SET(rte_crypto_op_pool_create, NULL);
925 	rc = accel_dpdk_cryptodev_init();
926 	CU_ASSERT(rc == -ENOMEM);
927 	CU_ASSERT(g_mbuf_mp == NULL);
928 	CU_ASSERT(g_session_mp == NULL);
929 	CU_ASSERT(g_session_mp_priv == NULL);
930 	MOCK_CLEAR(rte_crypto_op_pool_create);
931 
932 	/* Check resources are not sufficient */
933 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
934 	rc = accel_dpdk_cryptodev_init();
935 	CU_ASSERT(rc == -EINVAL);
936 
937 	/* Test crypto dev configure failure. */
938 	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
939 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
940 	MOCK_SET(rte_cryptodev_configure, -1);
941 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
942 	rc = accel_dpdk_cryptodev_init();
943 	MOCK_SET(rte_cryptodev_configure, 0);
944 	CU_ASSERT(g_mbuf_mp == NULL);
945 	CU_ASSERT(g_session_mp == NULL);
946 	CU_ASSERT(g_session_mp_priv == NULL);
947 	CU_ASSERT(rc == -EINVAL);
948 
949 	/* Test failure of qp setup. */
950 	MOCK_SET(rte_cryptodev_queue_pair_setup, -1);
951 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
952 	rc = accel_dpdk_cryptodev_init();
953 	CU_ASSERT(rc == -EINVAL);
954 	CU_ASSERT(g_mbuf_mp == NULL);
955 	CU_ASSERT(g_session_mp == NULL);
956 	CU_ASSERT(g_session_mp_priv == NULL);
957 	MOCK_SET(rte_cryptodev_queue_pair_setup, 0);
958 
959 	/* Test failure of dev start. */
960 	MOCK_SET(rte_cryptodev_start, -1);
961 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
962 	rc = accel_dpdk_cryptodev_init();
963 	CU_ASSERT(rc == -EINVAL);
964 	CU_ASSERT(g_mbuf_mp == NULL);
965 	CU_ASSERT(g_session_mp == NULL);
966 	CU_ASSERT(g_session_mp_priv == NULL);
967 	MOCK_SET(rte_cryptodev_start, 0);
968 
969 	/* Test bogus PMD */
970 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
971 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_BOGUS_PMD);
972 	rc = accel_dpdk_cryptodev_init();
973 	CU_ASSERT(g_mbuf_mp == NULL);
974 	CU_ASSERT(g_session_mp == NULL);
975 	CU_ASSERT(rc == -EINVAL);
976 
977 	/* Test happy path QAT. */
978 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
979 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
980 	rc = accel_dpdk_cryptodev_init();
981 	CU_ASSERT(g_mbuf_mp != NULL);
982 	CU_ASSERT(g_session_mp != NULL);
983 	init_cleanup();
984 	CU_ASSERT(rc == 0);
985 
986 	/* Test happy path AESNI. */
987 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
988 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
989 	rc = accel_dpdk_cryptodev_init();
990 	CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET);
991 	init_cleanup();
992 	CU_ASSERT(rc == 0);
993 
994 	/* Test happy path MLX5. */
995 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
996 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_MLX5);
997 	rc = accel_dpdk_cryptodev_init();
998 	CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET);
999 	init_cleanup();
1000 	CU_ASSERT(rc == 0);
1001 
1002 	/* Test failure of DPDK dev init. By now it is not longer an error
1003 	 * situation for entire crypto framework. */
1004 	MOCK_SET(rte_cryptodev_count, 2);
1005 	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
1006 	MOCK_SET(rte_vdev_init, -1);
1007 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
1008 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
1009 	rc = accel_dpdk_cryptodev_init();
1010 	CU_ASSERT(rc == 0);
1011 	CU_ASSERT(g_mbuf_mp != NULL);
1012 	CU_ASSERT(g_session_mp != NULL);
1013 	CU_ASSERT(g_session_mp_priv != NULL);
1014 	init_cleanup();
1015 	MOCK_SET(rte_vdev_init, 0);
1016 	MOCK_CLEAR(rte_cryptodev_device_count_by_driver);
1017 
1018 	/* restore our initial values. */
1019 	g_mbuf_mp = orig_mbuf_mp;
1020 	g_session_mp = orig_session_mp;
1021 	g_session_mp_priv = orig_session_mp_priv;
1022 	free_threads();
1023 }
1024 
1025 static void
1026 test_supported_opcodes(void)
1027 {
1028 	bool rc = true;
1029 	enum accel_opcode opc;
1030 
1031 	for (opc = 0; opc < ACCEL_OPC_LAST; opc++) {
1032 		rc = accel_dpdk_cryptodev_supports_opcode(opc);
1033 		switch (opc) {
1034 		case ACCEL_OPC_ENCRYPT:
1035 		case ACCEL_OPC_DECRYPT:
1036 			CU_ASSERT(rc == true);
1037 			break;
1038 		default:
1039 			CU_ASSERT(rc == false);
1040 		}
1041 	}
1042 }
1043 
1044 static void
1045 test_poller(void)
1046 {
1047 	struct accel_dpdk_cryptodev_task task = {};
1048 	struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = 1024 };
1049 	struct iovec dst_iov = src_iov;
1050 	int rc;
1051 
1052 	task.base.op_code = ACCEL_OPC_DECRYPT;
1053 	task.base.s.iovcnt = 1;
1054 	task.base.s.iovs = &src_iov;
1055 	task.base.d.iovcnt = 1;
1056 	task.base.d.iovs = &dst_iov;
1057 	task.base.nbytes = 1024;
1058 	task.base.block_size = 512;
1059 	task.base.crypto_key = &g_key;
1060 	task.base.iv = 1;
1061 
1062 	struct rte_mbuf *src_mbufs[2];
1063 	struct accel_dpdk_cryptodev_queued_op *op_to_resubmit;
1064 
1065 	/* test regular 1 op to dequeue and complete */
1066 	g_dequeue_mock = g_enqueue_mock = 1;
1067 	rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 1);
1068 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
1069 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1070 			   uint64_t *) = (uintptr_t)&task;
1071 	g_test_crypto_ops[0]->sym->m_dst = NULL;
1072 	task.cryop_cnt_remaining = 1;
1073 	task.base.op_code = ACCEL_OPC_DECRYPT;
1074 	rc = accel_dpdk_cryptodev_poller(g_crypto_ch);
1075 	CU_ASSERT(rc == 1);
1076 
1077 	/* We have nothing dequeued but have some to resubmit */
1078 	g_dequeue_mock = 0;
1079 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
1080 
1081 	/* add an op to the queued list. */
1082 	g_resubmit_test = true;
1083 	op_to_resubmit = (struct accel_dpdk_cryptodev_queued_op *)((uint8_t *)g_test_crypto_ops[0] +
1084 			 ACCEL_DPDK_CRYPTODEV_QUEUED_OP_OFFSET);
1085 	op_to_resubmit->crypto_op = (void *)0xDEADBEEF;
1086 	op_to_resubmit->task = &task;
1087 	op_to_resubmit->qp = &g_aesni_qp;
1088 	TAILQ_INSERT_TAIL(&g_crypto_ch->queued_cry_ops,
1089 			  op_to_resubmit,
1090 			  link);
1091 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
1092 	rc = accel_dpdk_cryptodev_poller(g_crypto_ch);
1093 	g_resubmit_test = false;
1094 	CU_ASSERT(rc == 1);
1095 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
1096 
1097 	/* 2 to dequeue but 2nd one failed */
1098 	g_dequeue_mock = g_enqueue_mock = 2;
1099 	task.cryop_cnt_remaining = 2;
1100 	rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 2);
1101 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
1102 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1103 			   uint64_t *) = (uint64_t)&task;
1104 	g_test_crypto_ops[0]->sym->m_dst = NULL;
1105 	g_test_crypto_ops[0]->status =  RTE_CRYPTO_OP_STATUS_SUCCESS;
1106 	g_test_crypto_ops[1]->sym->m_src = src_mbufs[1];
1107 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[1]->sym->m_src, g_mbuf_offset,
1108 			   uint64_t *) = (uint64_t)&task;
1109 	g_test_crypto_ops[1]->sym->m_dst = NULL;
1110 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1111 	rc = accel_dpdk_cryptodev_poller(g_crypto_ch);
1112 	CU_ASSERT(task.is_failed == true);
1113 	CU_ASSERT(rc == 1);
1114 }
1115 
1116 /* Helper function for accel_dpdk_cryptodev_assign_device_qps() */
1117 static void
1118 _check_expected_values(struct accel_dpdk_cryptodev_io_channel *crypto_ch,
1119 		       uint8_t expected_qat_index,
1120 		       uint8_t next_qat_index)
1121 {
1122 	uint32_t num_qpairs;
1123 
1124 	memset(crypto_ch->device_qp, 0, sizeof(crypto_ch->device_qp));
1125 
1126 	num_qpairs = accel_dpdk_cryptodev_assign_device_qps(crypto_ch);
1127 	CU_ASSERT(num_qpairs == 3);
1128 
1129 	SPDK_CU_ASSERT_FATAL(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT] != NULL);
1130 	CU_ASSERT(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT]->index == expected_qat_index);
1131 	CU_ASSERT(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT]->in_use == true);
1132 	CU_ASSERT(g_next_qat_index == next_qat_index);
1133 	SPDK_CU_ASSERT_FATAL(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] != NULL);
1134 	CU_ASSERT(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB]->in_use == true);
1135 	SPDK_CU_ASSERT_FATAL(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI] != NULL);
1136 	CU_ASSERT(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI]->in_use == true);
1137 }
1138 
1139 static void
1140 test_assign_device_qp(void)
1141 {
1142 	struct accel_dpdk_cryptodev_device qat_dev = {
1143 		.type = ACCEL_DPDK_CRYPTODEV_DRIVER_QAT,
1144 		.qpairs = TAILQ_HEAD_INITIALIZER(qat_dev.qpairs)
1145 	};
1146 	struct accel_dpdk_cryptodev_device aesni_dev = {
1147 		.type = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB,
1148 		.qpairs = TAILQ_HEAD_INITIALIZER(aesni_dev.qpairs)
1149 	};
1150 	struct accel_dpdk_cryptodev_device mlx5_dev = {
1151 		.type = ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI,
1152 		.qpairs = TAILQ_HEAD_INITIALIZER(mlx5_dev.qpairs)
1153 	};
1154 	struct accel_dpdk_cryptodev_qp *qat_qps;
1155 	struct accel_dpdk_cryptodev_qp aesni_qps[4] = {};
1156 	struct accel_dpdk_cryptodev_qp mlx5_qps[4] = {};
1157 	struct accel_dpdk_cryptodev_io_channel io_ch = {};
1158 	TAILQ_HEAD(, accel_dpdk_cryptodev_device) devs_tmp = TAILQ_HEAD_INITIALIZER(devs_tmp);
1159 	int i;
1160 
1161 	g_qat_total_qp = 96;
1162 	qat_qps = calloc(g_qat_total_qp, sizeof(*qat_qps));
1163 	SPDK_CU_ASSERT_FATAL(qat_qps != NULL);
1164 
1165 	for (i = 0; i < 4; i++) {
1166 		aesni_qps[i].index = i;
1167 		aesni_qps[i].device = &aesni_dev;
1168 		TAILQ_INSERT_TAIL(&aesni_dev.qpairs, &aesni_qps[i], link);
1169 
1170 		mlx5_qps[i].index = i;
1171 		mlx5_qps[i].device = &mlx5_dev;
1172 		TAILQ_INSERT_TAIL(&mlx5_dev.qpairs, &mlx5_qps[i], link);
1173 	}
1174 	for (i = 0; i < g_qat_total_qp; i++) {
1175 		qat_qps[i].index = i;
1176 		qat_qps[i].device = &qat_dev;
1177 		TAILQ_INSERT_TAIL(&qat_dev.qpairs, &qat_qps[i], link);
1178 	}
1179 
1180 	/* Swap g_crypto_devices so that other tests are not affected */
1181 	TAILQ_SWAP(&g_crypto_devices, &devs_tmp, accel_dpdk_cryptodev_device, link);
1182 
1183 	TAILQ_INSERT_TAIL(&g_crypto_devices, &qat_dev, link);
1184 	TAILQ_INSERT_TAIL(&g_crypto_devices, &aesni_dev, link);
1185 	TAILQ_INSERT_TAIL(&g_crypto_devices, &mlx5_dev, link);
1186 
1187 	/* QAT testing is more complex as the code under test load balances by
1188 	 * assigning each subsequent device/qp to every QAT_VF_SPREAD modulo
1189 	 * g_qat_total_qp. For the current latest QAT we'll have 48 virtual functions
1190 	 * each with 2 qp so the "spread" between assignments is 32. */
1191 
1192 	/* First assignment will assign to 0 and next at 32. */
1193 	_check_expected_values(&io_ch, 0, ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD);
1194 
1195 	/* Second assignment will assign to 32 and next at 64. */
1196 	_check_expected_values(&io_ch, ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD,
1197 			       ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD * 2);
1198 
1199 	/* Third assignment will assign to 64 and next at 0. */
1200 	_check_expected_values(&io_ch, ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD * 2, 0);
1201 
1202 	/* Fourth assignment will assign to 1 and next at 33. */
1203 	_check_expected_values(&io_ch, 1, ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD + 1);
1204 
1205 	TAILQ_SWAP(&devs_tmp, &g_crypto_devices, accel_dpdk_cryptodev_device, link);
1206 }
1207 
1208 int
1209 main(int argc, char **argv)
1210 {
1211 	CU_pSuite	suite = NULL;
1212 	unsigned int	num_failures;
1213 
1214 	CU_set_error_action(CUEA_ABORT);
1215 	CU_initialize_registry();
1216 
1217 	suite = CU_add_suite("dpdk_cryptodev", test_setup, test_cleanup);
1218 	CU_ADD_TEST(suite, test_error_paths);
1219 	CU_ADD_TEST(suite, test_simple_encrypt);
1220 	CU_ADD_TEST(suite, test_simple_decrypt);
1221 	CU_ADD_TEST(suite, test_large_enc_dec);
1222 	CU_ADD_TEST(suite, test_dev_full);
1223 	CU_ADD_TEST(suite, test_crazy_rw);
1224 	CU_ADD_TEST(suite, test_initdrivers);
1225 	CU_ADD_TEST(suite, test_supported_opcodes);
1226 	CU_ADD_TEST(suite, test_poller);
1227 	CU_ADD_TEST(suite, test_assign_device_qp);
1228 
1229 	CU_basic_set_mode(CU_BRM_VERBOSE);
1230 	CU_basic_run_tests();
1231 	num_failures = CU_get_number_of_failures();
1232 	CU_cleanup_registry();
1233 	return num_failures;
1234 }
1235