xref: /spdk/test/unit/lib/accel/dpdk_cryptodev.c/accel_dpdk_cryptodev_ut.c (revision e5693d682a9872b3bb3a84b3245a099af77992d6)
1  /*   SPDX-License-Identifier: BSD-3-Clause
2   *   Copyright (C) 2018 Intel Corporation.
3   *   Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES.
4   *   All rights reserved.
5   */
6  
7  #include "spdk_internal/cunit.h"
8  
9  #include "spdk_internal/mock.h"
10  #include "thread/thread_internal.h"
11  #include "unit/lib/json_mock.c"
12  #include "common/lib/ut_multithread.c"
13  
14  #include <rte_crypto.h>
15  #include <rte_cryptodev.h>
16  #include <rte_version.h>
17  
18  #define MAX_TEST_BLOCKS 8192
19  struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS];
20  struct rte_crypto_op *g_test_dev_full_ops[MAX_TEST_BLOCKS];
21  
22  uint16_t g_dequeue_mock;
23  uint16_t g_enqueue_mock;
24  unsigned ut_rte_crypto_op_bulk_alloc;
25  int ut_rte_crypto_op_attach_sym_session = 0;
26  #define MOCK_INFO_GET_1QP_AESNI 0
27  #define MOCK_INFO_GET_1QP_QAT 1
28  #define MOCK_INFO_GET_1QP_MLX5 2
29  #define MOCK_INFO_GET_1QP_BOGUS_PMD 3
30  int ut_rte_cryptodev_info_get = 0;
31  bool ut_rte_cryptodev_info_get_mocked = false;
32  
33  void mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt);
34  #define rte_pktmbuf_free_bulk mock_rte_pktmbuf_free_bulk
35  void
36  mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt)
37  {
38  	spdk_mempool_put_bulk((struct spdk_mempool *)m[0]->pool, (void **)m, cnt);
39  }
40  
41  void mock_rte_pktmbuf_free(struct rte_mbuf *m);
42  #define rte_pktmbuf_free mock_rte_pktmbuf_free
43  void
44  mock_rte_pktmbuf_free(struct rte_mbuf *m)
45  {
46  	spdk_mempool_put((struct spdk_mempool *)m->pool, (void *)m);
47  }
48  
49  void
50  rte_mempool_free(struct rte_mempool *mp)
51  {
52  	spdk_mempool_free((struct spdk_mempool *)mp);
53  }
54  
55  int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
56  				unsigned count);
57  #define rte_pktmbuf_alloc_bulk mock_rte_pktmbuf_alloc_bulk
58  int
59  mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
60  			    unsigned count)
61  {
62  	int rc;
63  
64  	rc = spdk_mempool_get_bulk((struct spdk_mempool *)pool, (void **)mbufs, count);
65  	if (rc) {
66  		return rc;
67  	}
68  	for (unsigned i = 0; i < count; i++) {
69  		rte_pktmbuf_reset(mbufs[i]);
70  		mbufs[i]->pool = pool;
71  	}
72  	return rc;
73  }
74  
75  struct rte_mempool *
76  rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
77  				      uint32_t elt_size, uint32_t cache_size,
78  				      uint16_t priv_size, int socket_id)
79  {
80  	struct spdk_mempool *tmp;
81  
82  	tmp = spdk_mempool_create(name, nb_elts, elt_size + priv_size,
83  				  cache_size, socket_id);
84  
85  	return (struct rte_mempool *)tmp;
86  
87  }
88  
89  struct rte_mempool *
90  rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size,
91  			uint16_t priv_size, uint16_t data_room_size, int socket_id)
92  {
93  	struct spdk_mempool *tmp;
94  
95  	tmp = spdk_mempool_create(name, n, sizeof(struct rte_mbuf) + priv_size,
96  				  cache_size, socket_id);
97  
98  	return (struct rte_mempool *)tmp;
99  }
100  
101  struct rte_mempool *
102  rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
103  		   unsigned cache_size, unsigned private_data_size,
104  		   rte_mempool_ctor_t *mp_init, void *mp_init_arg,
105  		   rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
106  		   int socket_id, unsigned flags)
107  {
108  	struct spdk_mempool *tmp;
109  
110  	tmp = spdk_mempool_create(name, n, elt_size + private_data_size,
111  				  cache_size, socket_id);
112  
113  	return (struct rte_mempool *)tmp;
114  }
115  
116  DEFINE_RETURN_MOCK(rte_crypto_op_pool_create, struct rte_mempool *);
117  struct rte_mempool *
118  rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
119  			  unsigned nb_elts, unsigned cache_size,
120  			  uint16_t priv_size, int socket_id)
121  {
122  	struct spdk_mempool *tmp;
123  
124  	HANDLE_RETURN_MOCK(rte_crypto_op_pool_create);
125  
126  	tmp = spdk_mempool_create(name, nb_elts,
127  				  sizeof(struct rte_crypto_op) + priv_size,
128  				  cache_size, socket_id);
129  
130  	return (struct rte_mempool *)tmp;
131  
132  }
133  
134  /* Those functions are defined as static inline in DPDK, so we can't
135   * mock them straight away. We use defines to redirect them into
136   * our custom functions.
137   */
138  static bool g_resubmit_test = false;
139  #define rte_cryptodev_enqueue_burst mock_rte_cryptodev_enqueue_burst
140  static inline uint16_t
141  mock_rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
142  				 struct rte_crypto_op **ops, uint16_t nb_ops)
143  {
144  	int i;
145  
146  	CU_ASSERT(nb_ops > 0);
147  
148  	for (i = 0; i < nb_ops; i++) {
149  		/* Use this empty (til now) array of pointers to store
150  		 * enqueued operations for assertion in dev_full test.
151  		 */
152  		g_test_dev_full_ops[i] = *ops++;
153  		if (g_resubmit_test == true) {
154  			CU_ASSERT(g_test_dev_full_ops[i] == (void *)0xDEADBEEF);
155  		}
156  	}
157  
158  	return g_enqueue_mock;
159  }
160  
161  #define rte_cryptodev_dequeue_burst mock_rte_cryptodev_dequeue_burst
162  static inline uint16_t
163  mock_rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
164  				 struct rte_crypto_op **ops, uint16_t nb_ops)
165  {
166  	int i;
167  
168  	CU_ASSERT(nb_ops > 0);
169  
170  	for (i = 0; i < g_dequeue_mock; i++) {
171  		*ops++ = g_test_crypto_ops[i];
172  	}
173  
174  	return g_dequeue_mock;
175  }
176  
177  /* Instead of allocating real memory, assign the allocations to our
178   * test array for assertion in tests.
179   */
180  #define rte_crypto_op_bulk_alloc mock_rte_crypto_op_bulk_alloc
181  static inline unsigned
182  mock_rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
183  			      enum rte_crypto_op_type type,
184  			      struct rte_crypto_op **ops, uint16_t nb_ops)
185  {
186  	int i;
187  
188  	for (i = 0; i < nb_ops; i++) {
189  		*ops++ = g_test_crypto_ops[i];
190  	}
191  	return ut_rte_crypto_op_bulk_alloc;
192  }
193  
194  #define rte_mempool_put_bulk mock_rte_mempool_put_bulk
195  static __rte_always_inline void
196  mock_rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table,
197  			  unsigned int n)
198  {
199  	return;
200  }
201  #define rte_crypto_op_attach_sym_session mock_rte_crypto_op_attach_sym_session
202  #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
203  static inline int
204  mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op, void *sess)
205  #else
206  static inline int
207  mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
208  				      struct rte_cryptodev_sym_session *sess)
209  #endif
210  {
211  	return ut_rte_crypto_op_attach_sym_session;
212  }
213  
214  #define rte_lcore_count mock_rte_lcore_count
215  static inline unsigned
216  mock_rte_lcore_count(void)
217  {
218  	return 1;
219  }
220  
221  #include "accel/dpdk_cryptodev/accel_dpdk_cryptodev.c"
222  
223  /* accel stubs */
224  DEFINE_STUB_V(spdk_accel_task_complete, (struct spdk_accel_task *task, int status));
225  DEFINE_STUB_V(spdk_accel_module_finish, (void));
226  DEFINE_STUB_V(spdk_accel_module_list_add, (struct spdk_accel_module_if *accel_module));
227  
228  /* DPDK stubs */
229  #define DPDK_DYNFIELD_OFFSET offsetof(struct rte_mbuf, dynfield1[1])
230  DEFINE_STUB(rte_mbuf_dynfield_register, int, (const struct rte_mbuf_dynfield *params),
231  	    DPDK_DYNFIELD_OFFSET);
232  DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0);
233  DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
234  DEFINE_STUB(rte_cryptodev_device_count_by_driver, uint8_t, (uint8_t driver_id), 0);
235  DEFINE_STUB(rte_cryptodev_configure, int, (uint8_t dev_id, struct rte_cryptodev_config *config), 0);
236  DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
237  		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id), 0);
238  DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0);
239  DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id));
240  DEFINE_STUB(rte_cryptodev_close, int, (uint8_t dev_id), 0);
241  DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
242  DEFINE_STUB(rte_vdev_uninit, int, (const char *name), 0);
243  
244  #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
245  DEFINE_STUB(rte_cryptodev_sym_session_create, void *,
246  	    (uint8_t dev_id, struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), (void *)1);
247  DEFINE_STUB(rte_cryptodev_sym_session_free, int, (uint8_t dev_id, void *sess), 0);
248  #else
249  DEFINE_STUB(rte_cryptodev_sym_session_create, struct rte_cryptodev_sym_session *,
250  	    (struct rte_mempool *mempool), (void *)1);
251  DEFINE_STUB(rte_cryptodev_sym_session_init, int, (uint8_t dev_id,
252  		struct rte_cryptodev_sym_session *sess,
253  		struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), 0);
254  DEFINE_STUB(rte_cryptodev_sym_session_free, int, (struct rte_cryptodev_sym_session *sess), 0);
255  #endif
256  
257  struct rte_cryptodev *rte_cryptodevs;
258  
259  /* global vars and setup/cleanup functions used for all test functions */
260  struct spdk_io_channel *g_io_ch;
261  struct accel_dpdk_cryptodev_io_channel *g_crypto_ch;
262  struct accel_dpdk_cryptodev_device g_aesni_crypto_dev;
263  struct accel_dpdk_cryptodev_qp g_aesni_qp;
264  struct accel_dpdk_cryptodev_key_handle g_key_handle;
265  struct accel_dpdk_cryptodev_key_priv g_key_priv;
266  struct spdk_accel_crypto_key g_key;
267  
268  void
269  rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
270  {
271  	dev_info->max_nb_queue_pairs = 1;
272  	if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_AESNI) {
273  		dev_info->driver_name = g_driver_names[0];
274  	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_QAT) {
275  		dev_info->driver_name = g_driver_names[1];
276  	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_MLX5) {
277  		dev_info->driver_name = g_driver_names[2];
278  	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_BOGUS_PMD) {
279  		dev_info->driver_name = "junk";
280  	}
281  }
282  
283  unsigned int
284  rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
285  {
286  	return (unsigned int)dev_id;
287  }
288  
289  /* Global setup for all tests that share a bunch of preparation... */
290  static int
291  test_setup(void)
292  {
293  	int i, rc;
294  
295  	/* Prepare essential variables for test routines */
296  	g_io_ch = calloc(1, sizeof(*g_io_ch) + sizeof(struct accel_dpdk_cryptodev_io_channel));
297  	g_crypto_ch = (struct accel_dpdk_cryptodev_io_channel *)spdk_io_channel_get_ctx(g_io_ch);
298  	TAILQ_INIT(&g_crypto_ch->queued_tasks);
299  	TAILQ_INIT(&g_crypto_ch->completed_tasks);
300  
301  	g_aesni_crypto_dev.type = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB;
302  	g_aesni_crypto_dev.qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS;
303  	TAILQ_INIT(&g_aesni_crypto_dev.qpairs);
304  
305  	g_aesni_qp.device = &g_aesni_crypto_dev;
306  	g_crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] = &g_aesni_qp;
307  
308  	g_key_handle.device = &g_aesni_crypto_dev;
309  	g_key_priv.driver = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB;
310  	g_key_priv.cipher = SPDK_ACCEL_CIPHER_AES_CBC;
311  	TAILQ_INIT(&g_key_priv.dev_keys);
312  	TAILQ_INSERT_TAIL(&g_key_priv.dev_keys, &g_key_handle, link);
313  	g_key.priv = &g_key_priv;
314  	g_key.module_if = &g_accel_dpdk_cryptodev_module;
315  
316  
317  	/* Allocate a real mbuf pool so we can test error paths */
318  	g_mbuf_mp = rte_pktmbuf_pool_create("mbuf_mp", ACCEL_DPDK_CRYPTODEV_NUM_MBUFS,
319  					    (unsigned)SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
320  					    0, 0, SPDK_ENV_NUMA_ID_ANY);
321  	/* Instead of allocating real rte mempools for these, it's easier and provides the
322  	 * same coverage just calloc them here.
323  	 */
324  	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
325  		size_t size = ACCEL_DPDK_CRYPTODEV_IV_OFFSET + ACCEL_DPDK_CRYPTODEV_IV_LENGTH;
326  		rc = posix_memalign((void **)&g_test_crypto_ops[i], 64, size);
327  		if (rc != 0) {
328  			assert(false);
329  		}
330  		memset(g_test_crypto_ops[i], 0, ACCEL_DPDK_CRYPTODEV_IV_OFFSET);
331  	}
332  	g_mbuf_offset = DPDK_DYNFIELD_OFFSET;
333  
334  	return 0;
335  }
336  
337  /* Global teardown for all tests */
338  static int
339  test_cleanup(void)
340  {
341  	int i;
342  
343  	if (g_crypto_op_mp) {
344  		rte_mempool_free(g_crypto_op_mp);
345  		g_crypto_op_mp = NULL;
346  	}
347  	if (g_mbuf_mp) {
348  		rte_mempool_free(g_mbuf_mp);
349  		g_mbuf_mp = NULL;
350  	}
351  	if (g_session_mp) {
352  		rte_mempool_free(g_session_mp);
353  		g_session_mp = NULL;
354  	}
355  	if (g_session_mp_priv != NULL) {
356  		/* g_session_mp_priv may or may not be set depending on the DPDK version */
357  		rte_mempool_free(g_session_mp_priv);
358  		g_session_mp_priv = NULL;
359  	}
360  
361  	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
362  		free(g_test_crypto_ops[i]);
363  	}
364  	free(g_io_ch);
365  	return 0;
366  }
367  
368  static void
369  test_error_paths(void)
370  {
371  	/* Single element block size encrypt, just to test error paths
372  	 * in accel_dpdk_cryptodev_submit_tasks() */
373  	struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = 512 };
374  	struct iovec dst_iov = src_iov;
375  	struct accel_dpdk_cryptodev_task task = {};
376  	struct accel_dpdk_cryptodev_key_priv key_priv = {};
377  	struct spdk_accel_crypto_key key = {};
378  	int rc;
379  
380  	task.base.op_code = SPDK_ACCEL_OPC_ENCRYPT;
381  	task.base.s.iovcnt = 1;
382  	task.base.s.iovs = &src_iov;
383  	task.base.d.iovcnt = 1;
384  	task.base.d.iovs = &dst_iov;
385  	task.base.block_size = 512;
386  	task.base.crypto_key = &g_key;
387  	task.base.iv = 1;
388  	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
389  
390  	/* case 1 - no crypto key */
391  	task.base.crypto_key = NULL;
392  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
393  	CU_ASSERT(rc == -EINVAL);
394  	task.base.crypto_key = &g_key;
395  
396  	/* case 2 - crypto key with wrong module_if  */
397  	key_priv.driver = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB;
398  	key_priv.cipher = SPDK_ACCEL_CIPHER_AES_CBC;
399  	TAILQ_INIT(&key_priv.dev_keys);
400  	key.priv = &key_priv;
401  	key.module_if = (struct spdk_accel_module_if *) 0x1;
402  	task.base.crypto_key = &key;
403  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
404  	CU_ASSERT(rc == -EINVAL);
405  	key.module_if = &g_accel_dpdk_cryptodev_module;
406  
407  	/* case 3 - no key handle in the channel */
408  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
409  	CU_ASSERT(rc == -EINVAL);
410  	task.base.crypto_key = &g_key;
411  
412  	/* case 4 - invalid op */
413  	task.base.op_code = SPDK_ACCEL_OPC_COMPARE;
414  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
415  	CU_ASSERT(rc == -EINVAL);
416  	task.base.op_code = SPDK_ACCEL_OPC_ENCRYPT;
417  
418  	/* case 5 - no entries in g_mbuf_mp */
419  	MOCK_SET(spdk_mempool_get, NULL);
420  	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_tasks) == true);
421  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
422  	CU_ASSERT(rc == 0);
423  	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_tasks) == false);
424  	CU_ASSERT(TAILQ_FIRST(&g_crypto_ch->queued_tasks) == &task);
425  	MOCK_CLEAR(spdk_mempool_get);
426  	TAILQ_INIT(&g_crypto_ch->queued_tasks);
427  
428  	/* case 6 - vtophys error in accel_dpdk_cryptodev_mbuf_attach_buf */
429  	MOCK_SET(spdk_vtophys, SPDK_VTOPHYS_ERROR);
430  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
431  	CU_ASSERT(rc == -EFAULT);
432  	MOCK_CLEAR(spdk_vtophys);
433  }
434  
435  static void
436  test_simple_encrypt(void)
437  {
438  	struct iovec src_iov[4] = {[0] = {.iov_base = (void *)0xDEADBEEF, .iov_len = 512 }};
439  	struct iovec dst_iov = src_iov[0];
440  	struct accel_dpdk_cryptodev_task task = {};
441  	struct rte_mbuf *mbuf, *next;
442  	int rc, i;
443  
444  	task.base.op_code = SPDK_ACCEL_OPC_ENCRYPT;
445  	task.base.s.iovcnt = 1;
446  	task.base.s.iovs = src_iov;
447  	task.base.d.iovcnt = 1;
448  	task.base.d.iovs = &dst_iov;
449  	task.base.block_size = 512;
450  	task.base.crypto_key = &g_key;
451  	task.base.iv = 1;
452  	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
453  
454  	/* Inplace encryption */
455  	g_aesni_qp.num_enqueued_ops = 0;
456  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
457  	CU_ASSERT(rc == 0);
458  	CU_ASSERT(task.cryop_submitted == 1);
459  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base);
460  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len);
461  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
462  	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
463  	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
464  	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
465  				     uint64_t *) == (uint64_t)&task);
466  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
467  
468  	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
469  
470  	/* out-of-place encryption */
471  	g_aesni_qp.num_enqueued_ops = 0;
472  	task.cryop_submitted = 0;
473  	dst_iov.iov_base = (void *)0xFEEDBEEF;
474  
475  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
476  	CU_ASSERT(rc == 0);
477  	CU_ASSERT(task.cryop_submitted == 1);
478  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base);
479  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len);
480  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
481  	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
482  	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
483  	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
484  				     uint64_t *) == (uint64_t)&task);
485  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr == dst_iov.iov_base);
486  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == dst_iov.iov_len);
487  
488  	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
489  	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst);
490  
491  	/* out-of-place encryption, fragmented payload */
492  	g_aesni_qp.num_enqueued_ops = 0;
493  	task.base.s.iovcnt = 4;
494  	for (i = 0; i < 4; i++) {
495  		src_iov[i].iov_base = (void *)0xDEADBEEF + i * 128;
496  		src_iov[i].iov_len = 128;
497  	}
498  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
499  	CU_ASSERT(rc == 0);
500  	CU_ASSERT(task.cryop_submitted == 1);
501  	mbuf = g_test_crypto_ops[0]->sym->m_src;
502  	SPDK_CU_ASSERT_FATAL(mbuf != NULL);
503  	CU_ASSERT(mbuf->buf_addr == src_iov[0].iov_base);
504  	CU_ASSERT(mbuf->data_len == src_iov[0].iov_len);
505  	mbuf = mbuf->next;
506  	for (i = 1; i < 4; i++) {
507  		SPDK_CU_ASSERT_FATAL(mbuf != NULL);
508  		CU_ASSERT(mbuf->buf_addr == src_iov[i].iov_base);
509  		CU_ASSERT(mbuf->data_len == src_iov[i].iov_len);
510  		next = mbuf->next;
511  		rte_pktmbuf_free(mbuf);
512  		mbuf = next;
513  	}
514  	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
515  	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
516  	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
517  				     uint64_t *) == (uint64_t)&task);
518  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr == dst_iov.iov_base);
519  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == dst_iov.iov_len);
520  
521  	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
522  	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst);
523  
524  	/* Big logical block size, inplace encryption */
525  	src_iov[0].iov_len = ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4;
526  	dst_iov = src_iov[0];
527  	task.base.block_size = ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4;
528  	task.base.s.iovcnt = 1;
529  	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
530  
531  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
532  	CU_ASSERT(rc == 0);
533  	CU_ASSERT(task.cryop_submitted == 1);
534  	mbuf = g_test_crypto_ops[0]->sym->m_src;
535  	SPDK_CU_ASSERT_FATAL(mbuf != NULL);
536  	CU_ASSERT(mbuf->buf_addr == src_iov[0].iov_base);
537  	CU_ASSERT(mbuf->data_len == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
538  	mbuf = mbuf->next;
539  	for (i = 1; i < 4; i++) {
540  		SPDK_CU_ASSERT_FATAL(mbuf != NULL);
541  		CU_ASSERT(mbuf->buf_addr == (char *)src_iov[0].iov_base + i * ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
542  		CU_ASSERT(mbuf->data_len == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
543  		next = mbuf->next;
544  		rte_pktmbuf_free(mbuf);
545  		mbuf = next;
546  	}
547  	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4);
548  	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
549  	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
550  				     uint64_t *) == (uint64_t)&task);
551  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
552  
553  	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
554  }
555  
556  static void
557  test_simple_decrypt(void)
558  {
559  	struct iovec src_iov[4] = {[0] = {.iov_base = (void *)0xDEADBEEF, .iov_len = 512 }};
560  	struct iovec dst_iov = src_iov[0];
561  	struct accel_dpdk_cryptodev_task task = {};
562  	struct rte_mbuf *mbuf, *next;
563  	int rc, i;
564  
565  	task.base.op_code = SPDK_ACCEL_OPC_DECRYPT;
566  	task.base.s.iovcnt = 1;
567  	task.base.s.iovs = src_iov;
568  	task.base.d.iovcnt = 1;
569  	task.base.d.iovs = &dst_iov;
570  	task.base.block_size = 512;
571  	task.base.crypto_key = &g_key;
572  	task.base.iv = 1;
573  	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
574  
575  	/* Inplace decryption */
576  	g_aesni_qp.num_enqueued_ops = 0;
577  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
578  	CU_ASSERT(rc == 0);
579  	CU_ASSERT(task.cryop_submitted == 1);
580  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base);
581  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len);
582  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
583  	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
584  	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
585  	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
586  				     uint64_t *) == (uint64_t)&task);
587  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
588  
589  	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
590  
591  	/* out-of-place decryption */
592  	g_aesni_qp.num_enqueued_ops = 0;
593  	task.cryop_submitted = 0;
594  	dst_iov.iov_base = (void *)0xFEEDBEEF;
595  
596  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
597  	CU_ASSERT(rc == 0);
598  	CU_ASSERT(task.cryop_submitted == 1);
599  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base);
600  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len);
601  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
602  	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
603  	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
604  	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
605  				     uint64_t *) == (uint64_t)&task);
606  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr == dst_iov.iov_base);
607  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == dst_iov.iov_len);
608  
609  	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
610  	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst);
611  
612  	/* out-of-place decryption, fragmented payload */
613  	g_aesni_qp.num_enqueued_ops = 0;
614  	task.base.s.iovcnt = 4;
615  	for (i = 0; i < 4; i++) {
616  		src_iov[i].iov_base = (void *)0xDEADBEEF + i * 128;
617  		src_iov[i].iov_len = 128;
618  	}
619  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
620  	CU_ASSERT(rc == 0);
621  	CU_ASSERT(task.cryop_submitted == 1);
622  	mbuf = g_test_crypto_ops[0]->sym->m_src;
623  	SPDK_CU_ASSERT_FATAL(mbuf != NULL);
624  	CU_ASSERT(mbuf->buf_addr == src_iov[0].iov_base);
625  	CU_ASSERT(mbuf->data_len == src_iov[0].iov_len);
626  	mbuf = mbuf->next;
627  	for (i = 1; i < 4; i++) {
628  		SPDK_CU_ASSERT_FATAL(mbuf != NULL);
629  		CU_ASSERT(mbuf->buf_addr == src_iov[i].iov_base);
630  		CU_ASSERT(mbuf->data_len == src_iov[i].iov_len);
631  		next = mbuf->next;
632  		rte_pktmbuf_free(mbuf);
633  		mbuf = next;
634  	}
635  	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
636  	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
637  	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
638  				     uint64_t *) == (uint64_t)&task);
639  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr == dst_iov.iov_base);
640  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == dst_iov.iov_len);
641  
642  	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
643  	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst);
644  
645  	/* Big logical block size, inplace encryption */
646  	src_iov[0].iov_len = ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4;
647  	dst_iov = src_iov[0];
648  	task.base.block_size = ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4;
649  	task.base.s.iovcnt = 1;
650  	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
651  
652  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
653  	CU_ASSERT(rc == 0);
654  	CU_ASSERT(task.cryop_submitted == 1);
655  	mbuf = g_test_crypto_ops[0]->sym->m_src;
656  	SPDK_CU_ASSERT_FATAL(mbuf != NULL);
657  	CU_ASSERT(mbuf->buf_addr == src_iov[0].iov_base);
658  	CU_ASSERT(mbuf->data_len == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
659  	mbuf = mbuf->next;
660  	for (i = 1; i < 4; i++) {
661  		SPDK_CU_ASSERT_FATAL(mbuf != NULL);
662  		CU_ASSERT(mbuf->buf_addr == (char *)src_iov[0].iov_base + i * ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
663  		CU_ASSERT(mbuf->data_len == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
664  		next = mbuf->next;
665  		rte_pktmbuf_free(mbuf);
666  		mbuf = next;
667  	}
668  	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4);
669  	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
670  	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
671  				     uint64_t *) == (uint64_t)&task);
672  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
673  
674  	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
675  }
676  
677  static void
678  test_large_enc_dec(void)
679  {
680  	struct accel_dpdk_cryptodev_task task = {};
681  	uint32_t block_len = 512;
682  	uint32_t num_blocks = ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE * 2;
683  	uint32_t iov_len = num_blocks * block_len / 16;
684  	uint32_t blocks_in_iov = num_blocks / 16;
685  	uint32_t iov_idx;
686  	struct iovec src_iov[16];
687  	struct iovec dst_iov[16];
688  	uint32_t i;
689  	int rc;
690  
691  	for (i = 0; i < 16; i++) {
692  		src_iov[i].iov_base = (void *)0xDEADBEEF + i * iov_len;
693  		src_iov[i].iov_len = iov_len;
694  
695  		dst_iov[i].iov_base = (void *)0xDEADBEEF + i * iov_len;
696  		dst_iov[i].iov_len = iov_len;
697  	}
698  
699  	task.base.op_code = SPDK_ACCEL_OPC_DECRYPT;
700  	task.base.s.iovcnt = 16;
701  	task.base.s.iovs = src_iov;
702  	task.base.d.iovcnt = 16;
703  	task.base.d.iovs = dst_iov;
704  	task.base.block_size = 512;
705  	task.base.crypto_key = &g_key;
706  	task.base.iv = 1;
707  
708  	/* Test 1. Multi block size decryption, multi-element, inplace */
709  	g_aesni_qp.num_enqueued_ops = 0;
710  	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc =
711  			ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE;
712  
713  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
714  	CU_ASSERT(rc == 0);
715  	CU_ASSERT(task.inplace == true);
716  	CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE);
717  	CU_ASSERT(task.cryop_total == num_blocks);
718  	CU_ASSERT(task.cryop_completed == 0);
719  
720  	for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
721  		iov_idx = i / blocks_in_iov;
722  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[iov_idx].iov_base + ((
723  					i % blocks_in_iov) * block_len));
724  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
725  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
726  		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
727  		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
728  		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
729  					     uint64_t *) == (uint64_t)&task);
730  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
731  		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
732  	}
733  
734  	/* Call accel_dpdk_cryptodev_process_task like it was called by completion poller */
735  	g_aesni_qp.num_enqueued_ops = 0;
736  	task.cryop_completed = task.cryop_submitted;
737  	rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task);
738  
739  	CU_ASSERT(rc == 0);
740  	CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE * 2);
741  	CU_ASSERT(task.cryop_total == task.cryop_submitted);
742  
743  	for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
744  		iov_idx = i / blocks_in_iov + 8;
745  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[iov_idx].iov_base + ((
746  					i % blocks_in_iov) * block_len));
747  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
748  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
749  		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
750  		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
751  		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
752  					     uint64_t *) == (uint64_t)&task);
753  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
754  		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
755  	}
756  
757  	/* Test 2. Multi block size decryption, multi-element, out-of-place */
758  	g_aesni_qp.num_enqueued_ops = 0;
759  	/* Modify dst to make payload out-of-place */
760  	dst_iov[0].iov_base -= 1;
761  	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc =
762  			ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE;
763  
764  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
765  	CU_ASSERT(rc == 0);
766  	CU_ASSERT(task.inplace == false);
767  	CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE);
768  	CU_ASSERT(task.cryop_total == num_blocks);
769  	CU_ASSERT(task.cryop_completed == 0);
770  
771  	for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
772  		iov_idx = i / blocks_in_iov;
773  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[iov_idx].iov_base + ((
774  					i % blocks_in_iov) * block_len));
775  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
776  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
777  		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
778  		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
779  		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
780  					     uint64_t *) == (uint64_t)&task);
781  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr == dst_iov[iov_idx].iov_base + ((
782  					i % blocks_in_iov) * block_len));
783  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
784  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->next == NULL);
785  		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
786  		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
787  	}
788  
789  	/* Call accel_dpdk_cryptodev_process_task like it was called by completion poller */
790  	g_aesni_qp.num_enqueued_ops = 0;
791  	task.cryop_completed = task.cryop_submitted;
792  	rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task);
793  
794  	CU_ASSERT(rc == 0);
795  	CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE * 2);
796  	CU_ASSERT(task.cryop_total == task.cryop_submitted);
797  
798  	for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
799  		iov_idx = i / blocks_in_iov + 8;
800  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[iov_idx].iov_base + ((
801  					i % blocks_in_iov) * block_len));
802  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
803  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
804  		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
805  		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
806  		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
807  					     uint64_t *) == (uint64_t)&task);
808  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr == dst_iov[iov_idx].iov_base + ((
809  					i % blocks_in_iov) * block_len));
810  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
811  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->next == NULL);
812  		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
813  		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
814  	}
815  
816  	/* Test 3. Multi block size encryption, multi-element, inplace */
817  	g_aesni_qp.num_enqueued_ops = 0;
818  	task.base.op_code = SPDK_ACCEL_OPC_ENCRYPT;
819  	task.cryop_submitted = 0;
820  	/* Modify dst to make payload iplace */
821  	dst_iov[0].iov_base += 1;
822  	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc =
823  			ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE;
824  
825  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
826  	CU_ASSERT(rc == 0);
827  	CU_ASSERT(task.inplace == true);
828  	CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE);
829  	CU_ASSERT(task.cryop_total == num_blocks);
830  	CU_ASSERT(task.cryop_completed == 0);
831  
832  	for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
833  		iov_idx = i / blocks_in_iov;
834  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[iov_idx].iov_base + ((
835  					i % blocks_in_iov) * block_len));
836  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
837  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
838  		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
839  		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
840  		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
841  					     uint64_t *) == (uint64_t)&task);
842  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
843  		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
844  	}
845  
846  	/* Call accel_dpdk_cryptodev_process_task like it was called by completion poller */
847  	g_aesni_qp.num_enqueued_ops = 0;
848  	task.cryop_completed = task.cryop_submitted;
849  	rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task);
850  
851  	CU_ASSERT(rc == 0);
852  	CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE * 2);
853  	CU_ASSERT(task.cryop_total == task.cryop_submitted);
854  
855  	for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
856  		iov_idx = i / blocks_in_iov + 8;
857  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[iov_idx].iov_base + ((
858  					i % blocks_in_iov) * block_len));
859  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
860  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
861  		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
862  		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
863  		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
864  					     uint64_t *) == (uint64_t)&task);
865  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
866  		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
867  	}
868  
869  	/* Multi block size encryption, multi-element, out-of-place */
870  	g_aesni_qp.num_enqueued_ops = 0;
871  	task.cryop_submitted = 0;
872  	/* Modify dst to make payload out-of-place */
873  	dst_iov[0].iov_base -= 1;
874  	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc =
875  			ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE;
876  
877  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
878  	CU_ASSERT(task.inplace == false);
879  	CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE);
880  	CU_ASSERT(task.cryop_total == num_blocks);
881  	CU_ASSERT(task.cryop_completed == 0);
882  
883  	for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
884  		iov_idx = i / blocks_in_iov;
885  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[iov_idx].iov_base + ((
886  					i % blocks_in_iov) * block_len));
887  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
888  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
889  		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
890  		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
891  		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
892  					     uint64_t *) == (uint64_t)&task);
893  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr == dst_iov[iov_idx].iov_base + ((
894  					i % blocks_in_iov) * block_len));
895  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
896  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->next == NULL);
897  		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
898  		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
899  	}
900  
901  	/* Call accel_dpdk_cryptodev_process_task  like it was called by completion poller */
902  	g_aesni_qp.num_enqueued_ops = 0;
903  	task.cryop_completed = task.cryop_submitted;
904  	rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task);
905  
906  	CU_ASSERT(rc == 0);
907  	CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE * 2);
908  	CU_ASSERT(task.cryop_total == task.cryop_submitted);
909  
910  	for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) {
911  		iov_idx = i / blocks_in_iov + 8;
912  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[iov_idx].iov_base + ((
913  					i % blocks_in_iov) * block_len));
914  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
915  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
916  		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
917  		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
918  		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
919  					     uint64_t *) == (uint64_t)&task);
920  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr == dst_iov[iov_idx].iov_base + ((
921  					i % blocks_in_iov) * block_len));
922  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
923  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->next == NULL);
924  		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
925  		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
926  	}
927  }
928  
929  static void
930  test_dev_full(void)
931  {
932  	struct accel_dpdk_cryptodev_task task = {};
933  	struct rte_crypto_sym_op *sym_op;
934  	struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = 1024 };
935  	struct iovec dst_iov = src_iov;
936  	int rc;
937  
938  	task.base.op_code = SPDK_ACCEL_OPC_DECRYPT;
939  	task.base.s.iovcnt = 1;
940  	task.base.s.iovs = &src_iov;
941  	task.base.d.iovcnt = 1;
942  	task.base.d.iovs = &dst_iov;
943  	task.base.block_size = 512;
944  	task.base.crypto_key = &g_key;
945  	task.base.iv = 1;
946  
947  	/* Two element block size decryption, 2nd op was not submitted */
948  	g_aesni_qp.num_enqueued_ops = 0;
949  	g_enqueue_mock = g_dequeue_mock = 1;
950  	ut_rte_crypto_op_bulk_alloc = 2;
951  	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
952  
953  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
954  	CU_ASSERT(rc == 0);
955  	CU_ASSERT(task.cryop_submitted == 1);
956  	CU_ASSERT(task.cryop_total == 2);
957  	sym_op = g_test_crypto_ops[0]->sym;
958  	CU_ASSERT(sym_op->m_src->buf_addr == src_iov.iov_base);
959  	CU_ASSERT(sym_op->m_src->data_len == 512);
960  	CU_ASSERT(sym_op->m_src->next == NULL);
961  	CU_ASSERT(sym_op->cipher.data.length == 512);
962  	CU_ASSERT(sym_op->cipher.data.offset == 0);
963  	CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)&task);
964  	CU_ASSERT(sym_op->m_dst == NULL);
965  	/* op which was not submitted is already released */
966  	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
967  	task.cryop_submitted = 0;
968  
969  	/* Two element block size decryption, no ops were submitted, task should be queued */
970  	g_aesni_qp.num_enqueued_ops = 0;
971  	g_enqueue_mock = g_dequeue_mock = 0;
972  	ut_rte_crypto_op_bulk_alloc = 2;
973  	g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
974  	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
975  
976  	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_tasks) == true);
977  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
978  	CU_ASSERT(rc == 0);
979  	CU_ASSERT(task.cryop_submitted == 0);
980  	CU_ASSERT(!TAILQ_EMPTY(&g_crypto_ch->queued_tasks));
981  	CU_ASSERT(TAILQ_FIRST(&g_crypto_ch->queued_tasks) == &task);
982  	TAILQ_INIT(&g_crypto_ch->queued_tasks);
983  
984  	/* Non-busy reason for enqueue failure, all were rejected. */
985  	g_enqueue_mock = 0;
986  	g_aesni_qp.num_enqueued_ops = 0;
987  	g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_ERROR;
988  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
989  	CU_ASSERT(rc == -EINVAL);
990  
991  	/* QP is full, task should be queued */
992  	g_aesni_qp.num_enqueued_ops = g_aesni_crypto_dev.qp_desc_nr;
993  	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_tasks) == true);
994  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
995  	CU_ASSERT(rc == 0);
996  	CU_ASSERT(!TAILQ_EMPTY(&g_crypto_ch->queued_tasks));
997  	CU_ASSERT(TAILQ_FIRST(&g_crypto_ch->queued_tasks) == &task);
998  	g_aesni_qp.num_enqueued_ops = 0;
999  
1000  	TAILQ_INIT(&g_crypto_ch->queued_tasks);
1001  
1002  	/* Two element block size decryption, 2nd op was not submitted, but has RTE_CRYPTO_OP_STATUS_SUCCESS status */
1003  	g_aesni_qp.num_enqueued_ops = 0;
1004  	g_enqueue_mock = g_dequeue_mock = 1;
1005  	ut_rte_crypto_op_bulk_alloc = 2;
1006  	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1007  
1008  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
1009  	CU_ASSERT(rc == 0);
1010  	CU_ASSERT(task.cryop_total == 2);
1011  	CU_ASSERT(task.cryop_submitted == 2);
1012  	CU_ASSERT(task.cryop_completed == 1);
1013  	sym_op = g_test_crypto_ops[0]->sym;
1014  	CU_ASSERT(sym_op->m_src->buf_addr == src_iov.iov_base);
1015  	CU_ASSERT(sym_op->m_src->data_len == 512);
1016  	CU_ASSERT(sym_op->m_src->next == NULL);
1017  	CU_ASSERT(sym_op->cipher.data.length == 512);
1018  	CU_ASSERT(sym_op->cipher.data.offset == 0);
1019  	CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)&task);
1020  	CU_ASSERT(sym_op->m_dst == NULL);
1021  	/* op which was not submitted is already released */
1022  	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
1023  
1024  	/* Two element block size decryption, 1st op was not submitted, but has RTE_CRYPTO_OP_STATUS_SUCCESS status */
1025  	g_aesni_qp.num_enqueued_ops = 0;
1026  	g_enqueue_mock = g_dequeue_mock = 0;
1027  	ut_rte_crypto_op_bulk_alloc = 2;
1028  	g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1029  
1030  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
1031  	CU_ASSERT(rc == 0);
1032  	CU_ASSERT(task.cryop_total == 2);
1033  	CU_ASSERT(task.cryop_submitted == 1);
1034  	CU_ASSERT(task.cryop_completed == 1);
1035  	CU_ASSERT(!TAILQ_EMPTY(&g_crypto_ch->queued_tasks));
1036  	CU_ASSERT(TAILQ_FIRST(&g_crypto_ch->queued_tasks) == &task);
1037  	TAILQ_INIT(&g_crypto_ch->queued_tasks);
1038  
1039  	/* Single element block size decryption, 1st op was not submitted, but has RTE_CRYPTO_OP_STATUS_SUCCESS status.
1040  	 * Task should be queued in the completed_tasks list */
1041  	src_iov.iov_len = 512;
1042  	dst_iov.iov_len = 512;
1043  	g_aesni_qp.num_enqueued_ops = 0;
1044  	g_enqueue_mock = g_dequeue_mock = 0;
1045  	ut_rte_crypto_op_bulk_alloc = 1;
1046  	g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1047  
1048  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
1049  	CU_ASSERT(rc == 0);
1050  	CU_ASSERT(task.cryop_total == 1);
1051  	CU_ASSERT(task.cryop_submitted == 1);
1052  	CU_ASSERT(task.cryop_completed == 1);
1053  	CU_ASSERT(!TAILQ_EMPTY(&g_crypto_ch->completed_tasks));
1054  	CU_ASSERT(TAILQ_FIRST(&g_crypto_ch->completed_tasks) == &task);
1055  	TAILQ_INIT(&g_crypto_ch->completed_tasks);
1056  }
1057  
1058  static void
1059  test_crazy_rw(void)
1060  {
1061  	struct accel_dpdk_cryptodev_task task = {};
1062  	struct iovec src_iov[4] = {
1063  		[0] = {.iov_base = (void *)0xDEADBEEF, .iov_len = 512 },
1064  		[1] = {.iov_base = (void *)0xDEADBEEF + 512, .iov_len = 1024 },
1065  		[2] = {.iov_base = (void *)0xDEADBEEF + 512 + 1024, .iov_len = 512 }
1066  	};
1067  	struct iovec *dst_iov = src_iov;
1068  	uint32_t block_len = 512, num_blocks = 4, i;
1069  	int rc;
1070  
1071  	task.base.op_code = SPDK_ACCEL_OPC_DECRYPT;
1072  	task.base.s.iovcnt = 3;
1073  	task.base.s.iovs = src_iov;
1074  	task.base.d.iovcnt = 3;
1075  	task.base.d.iovs = dst_iov;
1076  	task.base.block_size = 512;
1077  	task.base.crypto_key = &g_key;
1078  	task.base.iv = 1;
1079  
1080  	/* Multi block size read, single element, strange IOV makeup */
1081  	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
1082  	g_aesni_qp.num_enqueued_ops = 0;
1083  
1084  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
1085  	CU_ASSERT(rc == 0);
1086  	CU_ASSERT(task.cryop_submitted == num_blocks);
1087  
1088  	for (i = 0; i < num_blocks; i++) {
1089  		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
1090  		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
1091  		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
1092  					     uint64_t *) == (uint64_t)&task);
1093  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
1094  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[0].iov_base + (i * block_len));
1095  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
1096  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
1097  		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
1098  	}
1099  
1100  	/* Multi block size write, single element strange IOV makeup */
1101  	num_blocks = 8;
1102  	task.base.op_code = SPDK_ACCEL_OPC_ENCRYPT;
1103  	task.cryop_submitted = 0;
1104  	task.base.s.iovcnt = 4;
1105  	task.base.d.iovcnt = 4;
1106  	task.base.s.iovs[0].iov_len = 2048;
1107  	task.base.s.iovs[0].iov_base = (void *)0xDEADBEEF;
1108  	task.base.s.iovs[1].iov_len = 512;
1109  	task.base.s.iovs[1].iov_base = (void *)0xDEADBEEF + 2048;
1110  	task.base.s.iovs[2].iov_len = 512;
1111  	task.base.s.iovs[2].iov_base = (void *)0xDEADBEEF + 2048 + 512;
1112  	task.base.s.iovs[3].iov_len = 1024;
1113  	task.base.s.iovs[3].iov_base = (void *)0xDEADBEEF + 2048 + 512 + 512;
1114  
1115  	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
1116  	g_aesni_qp.num_enqueued_ops = 0;
1117  
1118  	rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base);
1119  	CU_ASSERT(rc == 0);
1120  	CU_ASSERT(task.cryop_submitted == num_blocks);
1121  
1122  	for (i = 0; i < num_blocks; i++) {
1123  		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
1124  		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
1125  		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
1126  					     uint64_t *) == (uint64_t)&task);
1127  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
1128  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[0].iov_base + (i * block_len));
1129  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
1130  		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
1131  		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
1132  	}
1133  }
1134  
1135  static void
1136  init_cleanup(void)
1137  {
1138  	struct accel_dpdk_cryptodev_device *dev, *tmp;
1139  
1140  	if (g_crypto_op_mp) {
1141  		rte_mempool_free(g_crypto_op_mp);
1142  		g_crypto_op_mp = NULL;
1143  	}
1144  	if (g_mbuf_mp) {
1145  		rte_mempool_free(g_mbuf_mp);
1146  		g_mbuf_mp = NULL;
1147  	}
1148  	if (g_session_mp) {
1149  		rte_mempool_free(g_session_mp);
1150  		g_session_mp = NULL;
1151  	}
1152  	if (g_session_mp_priv != NULL) {
1153  		/* g_session_mp_priv may or may not be set depending on the DPDK version */
1154  		rte_mempool_free(g_session_mp_priv);
1155  		g_session_mp_priv = NULL;
1156  	}
1157  
1158  	TAILQ_FOREACH_SAFE(dev, &g_crypto_devices, link, tmp) {
1159  		TAILQ_REMOVE(&g_crypto_devices, dev, link);
1160  		accel_dpdk_cryptodev_release(dev);
1161  	}
1162  
1163  	spdk_io_device_unregister(&g_accel_dpdk_cryptodev_module, NULL);
1164  }
1165  
1166  static void
1167  test_initdrivers(void)
1168  {
1169  	int rc;
1170  	static struct rte_mempool *orig_mbuf_mp;
1171  	static struct rte_mempool *orig_session_mp;
1172  	static struct rte_mempool *orig_session_mp_priv;
1173  
1174  	/* accel_dpdk_cryptodev_init calls spdk_io_device_register, we need to have a thread */
1175  	allocate_threads(1);
1176  	set_thread(0);
1177  
1178  	/* These tests will alloc and free our g_mbuf_mp
1179  	 * so save that off here and restore it after each test is over.
1180  	 */
1181  	orig_mbuf_mp = g_mbuf_mp;
1182  	orig_session_mp = g_session_mp;
1183  	orig_session_mp_priv = g_session_mp_priv;
1184  
1185  	g_session_mp_priv = NULL;
1186  	g_session_mp = NULL;
1187  	g_mbuf_mp = NULL;
1188  
1189  	/* No drivers available, not an error though */
1190  	MOCK_SET(rte_cryptodev_count, 0);
1191  	rc = accel_dpdk_cryptodev_init();
1192  	CU_ASSERT(rc == -ENODEV);
1193  	CU_ASSERT(g_mbuf_mp == NULL);
1194  	CU_ASSERT(g_session_mp == NULL);
1195  	CU_ASSERT(g_session_mp_priv == NULL);
1196  
1197  	/* Can't create session pool. */
1198  	MOCK_SET(rte_cryptodev_count, 2);
1199  	MOCK_SET(spdk_mempool_create, NULL);
1200  	rc = accel_dpdk_cryptodev_init();
1201  	CU_ASSERT(rc == -ENOMEM);
1202  	CU_ASSERT(g_mbuf_mp == NULL);
1203  	CU_ASSERT(g_session_mp == NULL);
1204  	CU_ASSERT(g_session_mp_priv == NULL);
1205  	MOCK_CLEAR(spdk_mempool_create);
1206  
1207  	/* Can't create op pool. */
1208  	MOCK_SET(rte_crypto_op_pool_create, NULL);
1209  	rc = accel_dpdk_cryptodev_init();
1210  	CU_ASSERT(rc == -ENOMEM);
1211  	CU_ASSERT(g_mbuf_mp == NULL);
1212  	CU_ASSERT(g_session_mp == NULL);
1213  	CU_ASSERT(g_session_mp_priv == NULL);
1214  	MOCK_CLEAR(rte_crypto_op_pool_create);
1215  
1216  	/* Check resources are not sufficient */
1217  	MOCK_CLEARED_ASSERT(spdk_mempool_create);
1218  	rc = accel_dpdk_cryptodev_init();
1219  	CU_ASSERT(rc == -EINVAL);
1220  
1221  	/* Test crypto dev configure failure. */
1222  	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
1223  	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
1224  	MOCK_SET(rte_cryptodev_configure, -1);
1225  	MOCK_CLEARED_ASSERT(spdk_mempool_create);
1226  	rc = accel_dpdk_cryptodev_init();
1227  	MOCK_SET(rte_cryptodev_configure, 0);
1228  	CU_ASSERT(g_mbuf_mp == NULL);
1229  	CU_ASSERT(g_session_mp == NULL);
1230  	CU_ASSERT(g_session_mp_priv == NULL);
1231  	CU_ASSERT(rc == -EINVAL);
1232  
1233  	/* Test failure of qp setup. */
1234  	MOCK_SET(rte_cryptodev_queue_pair_setup, -1);
1235  	MOCK_CLEARED_ASSERT(spdk_mempool_create);
1236  	rc = accel_dpdk_cryptodev_init();
1237  	CU_ASSERT(rc == -EINVAL);
1238  	CU_ASSERT(g_mbuf_mp == NULL);
1239  	CU_ASSERT(g_session_mp == NULL);
1240  	CU_ASSERT(g_session_mp_priv == NULL);
1241  	MOCK_SET(rte_cryptodev_queue_pair_setup, 0);
1242  
1243  	/* Test failure of dev start. */
1244  	MOCK_SET(rte_cryptodev_start, -1);
1245  	MOCK_CLEARED_ASSERT(spdk_mempool_create);
1246  	rc = accel_dpdk_cryptodev_init();
1247  	CU_ASSERT(rc == -EINVAL);
1248  	CU_ASSERT(g_mbuf_mp == NULL);
1249  	CU_ASSERT(g_session_mp == NULL);
1250  	CU_ASSERT(g_session_mp_priv == NULL);
1251  	MOCK_SET(rte_cryptodev_start, 0);
1252  
1253  	/* Test bogus PMD */
1254  	MOCK_CLEARED_ASSERT(spdk_mempool_create);
1255  	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_BOGUS_PMD);
1256  	rc = accel_dpdk_cryptodev_init();
1257  	CU_ASSERT(g_mbuf_mp == NULL);
1258  	CU_ASSERT(g_session_mp == NULL);
1259  	CU_ASSERT(rc == -EINVAL);
1260  
1261  	/* Test happy path QAT. */
1262  	MOCK_CLEARED_ASSERT(spdk_mempool_create);
1263  	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
1264  	rc = accel_dpdk_cryptodev_init();
1265  	CU_ASSERT(g_mbuf_mp != NULL);
1266  	CU_ASSERT(g_session_mp != NULL);
1267  	init_cleanup();
1268  	CU_ASSERT(rc == 0);
1269  
1270  	/* Test happy path AESNI. */
1271  	MOCK_CLEARED_ASSERT(spdk_mempool_create);
1272  	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
1273  	rc = accel_dpdk_cryptodev_init();
1274  	CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET);
1275  	init_cleanup();
1276  	CU_ASSERT(rc == 0);
1277  
1278  	/* Test happy path MLX5. */
1279  	MOCK_CLEARED_ASSERT(spdk_mempool_create);
1280  	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_MLX5);
1281  	rc = accel_dpdk_cryptodev_init();
1282  	CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET);
1283  	init_cleanup();
1284  	CU_ASSERT(rc == 0);
1285  
1286  	/* Test failure of DPDK dev init. By now it is not longer an error
1287  	 * situation for entire crypto framework. */
1288  	MOCK_SET(rte_cryptodev_count, 2);
1289  	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
1290  	MOCK_SET(rte_vdev_init, -1);
1291  	MOCK_CLEARED_ASSERT(spdk_mempool_create);
1292  	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
1293  	rc = accel_dpdk_cryptodev_init();
1294  	CU_ASSERT(rc == 0);
1295  	CU_ASSERT(g_mbuf_mp != NULL);
1296  	CU_ASSERT(g_session_mp != NULL);
1297  #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1298  	CU_ASSERT(g_session_mp_priv != NULL);
1299  #endif
1300  	init_cleanup();
1301  	MOCK_SET(rte_vdev_init, 0);
1302  	MOCK_CLEAR(rte_cryptodev_device_count_by_driver);
1303  
1304  	/* restore our initial values. */
1305  	g_mbuf_mp = orig_mbuf_mp;
1306  	g_session_mp = orig_session_mp;
1307  	g_session_mp_priv = orig_session_mp_priv;
1308  	free_threads();
1309  }
1310  
1311  static void
1312  test_supported_opcodes(void)
1313  {
1314  	bool rc = true;
1315  	enum spdk_accel_opcode opc;
1316  
1317  	for (opc = 0; opc < SPDK_ACCEL_OPC_LAST; opc++) {
1318  		rc = accel_dpdk_cryptodev_supports_opcode(opc);
1319  		switch (opc) {
1320  		case SPDK_ACCEL_OPC_ENCRYPT:
1321  		case SPDK_ACCEL_OPC_DECRYPT:
1322  			CU_ASSERT(rc == true);
1323  			break;
1324  		default:
1325  			CU_ASSERT(rc == false);
1326  		}
1327  	}
1328  }
1329  
1330  static void
1331  test_poller(void)
1332  {
1333  	struct accel_dpdk_cryptodev_task task = {};
1334  	struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = 1024 };
1335  	struct iovec dst_iov = src_iov;
1336  	struct rte_mbuf *src_mbufs[2];
1337  	int rc;
1338  
1339  	task.base.op_code = SPDK_ACCEL_OPC_DECRYPT;
1340  	task.base.s.iovcnt = 1;
1341  	task.base.s.iovs = &src_iov;
1342  	task.base.d.iovcnt = 1;
1343  	task.base.d.iovs = &dst_iov;
1344  	task.base.block_size = 512;
1345  	task.base.crypto_key = &g_key;
1346  	task.base.iv = 1;
1347  	task.inplace = true;
1348  
1349  	/* test regular 1 op to dequeue and complete */
1350  	g_dequeue_mock = g_enqueue_mock = 1;
1351  	g_aesni_qp.num_enqueued_ops = 1;
1352  	rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 1);
1353  	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
1354  	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1355  			   uint64_t *) = (uintptr_t)&task;
1356  	g_test_crypto_ops[0]->sym->m_dst = NULL;
1357  	task.cryop_submitted = 1;
1358  	task.cryop_total = 1;
1359  	task.cryop_completed = 0;
1360  	task.base.op_code = SPDK_ACCEL_OPC_DECRYPT;
1361  	rc = accel_dpdk_cryptodev_poller(g_crypto_ch);
1362  	CU_ASSERT(rc == 1);
1363  	CU_ASSERT(task.cryop_completed == task.cryop_submitted);
1364  	CU_ASSERT(g_aesni_qp.num_enqueued_ops == 0);
1365  
1366  	/* 2 to dequeue but 2nd one failed */
1367  	g_dequeue_mock = g_enqueue_mock = 2;
1368  	g_aesni_qp.num_enqueued_ops = 2;
1369  	task.cryop_submitted = 2;
1370  	task.cryop_total = 2;
1371  	task.cryop_completed = 0;
1372  	rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 2);
1373  	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
1374  	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1375  			   uint64_t *) = (uint64_t)&task;
1376  	g_test_crypto_ops[0]->sym->m_dst = NULL;
1377  	g_test_crypto_ops[0]->status =  RTE_CRYPTO_OP_STATUS_SUCCESS;
1378  	g_test_crypto_ops[1]->sym->m_src = src_mbufs[1];
1379  	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[1]->sym->m_src, g_mbuf_offset,
1380  			   uint64_t *) = (uint64_t)&task;
1381  	g_test_crypto_ops[1]->sym->m_dst = NULL;
1382  	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1383  	rc = accel_dpdk_cryptodev_poller(g_crypto_ch);
1384  	CU_ASSERT(task.is_failed == true);
1385  	CU_ASSERT(rc == 1);
1386  	CU_ASSERT(g_aesni_qp.num_enqueued_ops == 0);
1387  
1388  	/* Dequeue a task which needs to be submitted again */
1389  	g_dequeue_mock = g_enqueue_mock = ut_rte_crypto_op_bulk_alloc = 1;
1390  	task.cryop_submitted = 1;
1391  	task.cryop_total = 2;
1392  	task.cryop_completed = 0;
1393  	g_aesni_qp.num_enqueued_ops = 1;
1394  	rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 1);
1395  	SPDK_CU_ASSERT_FATAL(src_mbufs[0] != NULL);
1396  	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
1397  	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1398  			   uint64_t *) = (uintptr_t)&task;
1399  	g_test_crypto_ops[0]->sym->m_dst = NULL;
1400  	rc = accel_dpdk_cryptodev_poller(g_crypto_ch);
1401  	CU_ASSERT(rc == 1);
1402  	CU_ASSERT(task.cryop_submitted == 2);
1403  	CU_ASSERT(task.cryop_total == 2);
1404  	CU_ASSERT(task.cryop_completed == 1);
1405  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov.iov_base + task.base.block_size);
1406  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == task.base.block_size);
1407  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
1408  	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == task.base.block_size);
1409  	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
1410  	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1411  				     uint64_t *) == (uint64_t)&task);
1412  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
1413  	CU_ASSERT(g_aesni_qp.num_enqueued_ops == 1);
1414  	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
1415  
1416  	/* Process queued tasks, qp is full */
1417  	g_dequeue_mock = g_enqueue_mock = 0;
1418  	g_aesni_qp.num_enqueued_ops = g_aesni_crypto_dev.qp_desc_nr;
1419  	task.cryop_submitted = 1;
1420  	task.cryop_total = 2;
1421  	task.cryop_completed = 1;
1422  	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_tasks));
1423  	TAILQ_INSERT_TAIL(&g_crypto_ch->queued_tasks, &task, link);
1424  
1425  	rc = accel_dpdk_cryptodev_poller(g_crypto_ch);
1426  	CU_ASSERT(rc == 0);
1427  	CU_ASSERT(TAILQ_FIRST(&g_crypto_ch->queued_tasks) == &task);
1428  
1429  	/* Try again when queue is empty, task should be submitted */
1430  	g_enqueue_mock = 1;
1431  	g_aesni_qp.num_enqueued_ops = 0;
1432  	rc = accel_dpdk_cryptodev_poller(g_crypto_ch);
1433  	CU_ASSERT(rc == 1);
1434  	CU_ASSERT(task.cryop_submitted == 2);
1435  	CU_ASSERT(task.cryop_total == 2);
1436  	CU_ASSERT(task.cryop_completed == 1);
1437  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov.iov_base + task.base.block_size);
1438  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == task.base.block_size);
1439  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
1440  	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == task.base.block_size);
1441  	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
1442  	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1443  				     uint64_t *) == (uint64_t)&task);
1444  	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
1445  	CU_ASSERT(g_aesni_qp.num_enqueued_ops == 1);
1446  	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_tasks));
1447  	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
1448  
1449  	/* Complete tasks in the dedicated list */
1450  	g_dequeue_mock = g_enqueue_mock = 0;
1451  	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->completed_tasks));
1452  	TAILQ_INSERT_TAIL(&g_crypto_ch->completed_tasks, &task, link);
1453  	rc = accel_dpdk_cryptodev_poller(g_crypto_ch);
1454  	CU_ASSERT(rc == 1);
1455  	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->completed_tasks));
1456  }
1457  
1458  /* Helper function for accel_dpdk_cryptodev_assign_device_qps() */
1459  static void
1460  _check_expected_values(struct accel_dpdk_cryptodev_io_channel *crypto_ch,
1461  		       uint8_t expected_qat_index,
1462  		       uint8_t next_qat_index)
1463  {
1464  	uint32_t num_qpairs;
1465  
1466  	memset(crypto_ch->device_qp, 0, sizeof(crypto_ch->device_qp));
1467  
1468  	num_qpairs = accel_dpdk_cryptodev_assign_device_qps(crypto_ch);
1469  	CU_ASSERT(num_qpairs == 3);
1470  
1471  	SPDK_CU_ASSERT_FATAL(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT] != NULL);
1472  	CU_ASSERT(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT]->index == expected_qat_index);
1473  	CU_ASSERT(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT]->in_use == true);
1474  	CU_ASSERT(g_next_qat_index == next_qat_index);
1475  	SPDK_CU_ASSERT_FATAL(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] != NULL);
1476  	CU_ASSERT(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB]->in_use == true);
1477  	SPDK_CU_ASSERT_FATAL(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI] != NULL);
1478  	CU_ASSERT(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI]->in_use == true);
1479  }
1480  
1481  static void
1482  test_assign_device_qp(void)
1483  {
1484  	struct accel_dpdk_cryptodev_device qat_dev = {
1485  		.type = ACCEL_DPDK_CRYPTODEV_DRIVER_QAT,
1486  		.qpairs = TAILQ_HEAD_INITIALIZER(qat_dev.qpairs)
1487  	};
1488  	struct accel_dpdk_cryptodev_device aesni_dev = {
1489  		.type = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB,
1490  		.qpairs = TAILQ_HEAD_INITIALIZER(aesni_dev.qpairs)
1491  	};
1492  	struct accel_dpdk_cryptodev_device mlx5_dev = {
1493  		.type = ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI,
1494  		.qpairs = TAILQ_HEAD_INITIALIZER(mlx5_dev.qpairs)
1495  	};
1496  	struct accel_dpdk_cryptodev_qp *qat_qps;
1497  	struct accel_dpdk_cryptodev_qp aesni_qps[4] = {};
1498  	struct accel_dpdk_cryptodev_qp mlx5_qps[4] = {};
1499  	struct accel_dpdk_cryptodev_io_channel io_ch = {};
1500  	TAILQ_HEAD(, accel_dpdk_cryptodev_device) devs_tmp = TAILQ_HEAD_INITIALIZER(devs_tmp);
1501  	int i;
1502  
1503  	g_qat_total_qp = 96;
1504  	qat_qps = calloc(g_qat_total_qp, sizeof(*qat_qps));
1505  	SPDK_CU_ASSERT_FATAL(qat_qps != NULL);
1506  
1507  	for (i = 0; i < 4; i++) {
1508  		aesni_qps[i].index = i;
1509  		aesni_qps[i].device = &aesni_dev;
1510  		TAILQ_INSERT_TAIL(&aesni_dev.qpairs, &aesni_qps[i], link);
1511  
1512  		mlx5_qps[i].index = i;
1513  		mlx5_qps[i].device = &mlx5_dev;
1514  		TAILQ_INSERT_TAIL(&mlx5_dev.qpairs, &mlx5_qps[i], link);
1515  	}
1516  	for (i = 0; i < g_qat_total_qp; i++) {
1517  		qat_qps[i].index = i;
1518  		qat_qps[i].device = &qat_dev;
1519  		TAILQ_INSERT_TAIL(&qat_dev.qpairs, &qat_qps[i], link);
1520  	}
1521  
1522  	/* Swap g_crypto_devices so that other tests are not affected */
1523  	TAILQ_SWAP(&g_crypto_devices, &devs_tmp, accel_dpdk_cryptodev_device, link);
1524  
1525  	TAILQ_INSERT_TAIL(&g_crypto_devices, &qat_dev, link);
1526  	TAILQ_INSERT_TAIL(&g_crypto_devices, &aesni_dev, link);
1527  	TAILQ_INSERT_TAIL(&g_crypto_devices, &mlx5_dev, link);
1528  
1529  	/* QAT testing is more complex as the code under test load balances by
1530  	 * assigning each subsequent device/qp to every QAT_VF_SPREAD modulo
1531  	 * g_qat_total_qp. For the current latest QAT we'll have 48 virtual functions
1532  	 * each with 2 qp so the "spread" between assignments is 32. */
1533  
1534  	/* First assignment will assign to 0 and next at 32. */
1535  	_check_expected_values(&io_ch, 0, ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD);
1536  
1537  	/* Second assignment will assign to 32 and next at 64. */
1538  	_check_expected_values(&io_ch, ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD,
1539  			       ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD * 2);
1540  
1541  	/* Third assignment will assign to 64 and next at 0. */
1542  	_check_expected_values(&io_ch, ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD * 2, 0);
1543  
1544  	/* Fourth assignment will assign to 1 and next at 33. */
1545  	_check_expected_values(&io_ch, 1, ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD + 1);
1546  
1547  	TAILQ_SWAP(&devs_tmp, &g_crypto_devices, accel_dpdk_cryptodev_device, link);
1548  
1549  	free(qat_qps);
1550  }
1551  
1552  int
1553  main(int argc, char **argv)
1554  {
1555  	CU_pSuite	suite = NULL;
1556  	unsigned int	num_failures;
1557  
1558  	CU_initialize_registry();
1559  
1560  	suite = CU_add_suite("dpdk_cryptodev", test_setup, test_cleanup);
1561  	CU_ADD_TEST(suite, test_error_paths);
1562  	CU_ADD_TEST(suite, test_simple_encrypt);
1563  	CU_ADD_TEST(suite, test_simple_decrypt);
1564  	CU_ADD_TEST(suite, test_large_enc_dec);
1565  	CU_ADD_TEST(suite, test_dev_full);
1566  	CU_ADD_TEST(suite, test_crazy_rw);
1567  	CU_ADD_TEST(suite, test_initdrivers);
1568  	CU_ADD_TEST(suite, test_supported_opcodes);
1569  	CU_ADD_TEST(suite, test_poller);
1570  	CU_ADD_TEST(suite, test_assign_device_qp);
1571  
1572  	num_failures = spdk_ut_run_tests(argc, argv, NULL);
1573  	CU_cleanup_registry();
1574  	return num_failures;
1575  }
1576