xref: /spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c (revision fecffda6ecf8853b82edccde429b68252f0a62c5)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk_cunit.h"
7 
8 #include "common/lib/test_env.c"
9 #include "spdk_internal/mock.h"
10 #include "thread/thread_internal.h"
11 #include "unit/lib/json_mock.c"
12 
13 #include <rte_crypto.h>
14 #include <rte_cryptodev.h>
15 #include <rte_version.h>
16 
17 #define MAX_TEST_BLOCKS 8192
18 struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS];
19 struct rte_crypto_op *g_test_dev_full_ops[MAX_TEST_BLOCKS];
20 
21 uint16_t g_dequeue_mock;
22 uint16_t g_enqueue_mock;
23 unsigned ut_rte_crypto_op_bulk_alloc;
24 int ut_rte_crypto_op_attach_sym_session = 0;
25 #define MOCK_INFO_GET_1QP_AESNI 0
26 #define MOCK_INFO_GET_1QP_QAT 1
27 #define MOCK_INFO_GET_1QP_MLX5 2
28 #define MOCK_INFO_GET_1QP_BOGUS_PMD 3
29 int ut_rte_cryptodev_info_get = 0;
30 bool ut_rte_cryptodev_info_get_mocked = false;
31 
32 void mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt);
33 #define rte_pktmbuf_free_bulk mock_rte_pktmbuf_free_bulk
34 void
35 mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt)
36 {
37 	spdk_mempool_put_bulk((struct spdk_mempool *)m[0]->pool, (void **)m, cnt);
38 }
39 
40 void mock_rte_pktmbuf_free(struct rte_mbuf *m);
41 #define rte_pktmbuf_free mock_rte_pktmbuf_free
42 void
43 mock_rte_pktmbuf_free(struct rte_mbuf *m)
44 {
45 	spdk_mempool_put((struct spdk_mempool *)m->pool, (void *)m);
46 }
47 
48 void
49 rte_mempool_free(struct rte_mempool *mp)
50 {
51 	spdk_mempool_free((struct spdk_mempool *)mp);
52 }
53 
54 int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
55 				unsigned count);
56 #define rte_pktmbuf_alloc_bulk mock_rte_pktmbuf_alloc_bulk
57 int
58 mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
59 			    unsigned count)
60 {
61 	int rc;
62 
63 	rc = spdk_mempool_get_bulk((struct spdk_mempool *)pool, (void **)mbufs, count);
64 	if (rc) {
65 		return rc;
66 	}
67 	for (unsigned i = 0; i < count; i++) {
68 		rte_pktmbuf_reset(mbufs[i]);
69 		mbufs[i]->pool = pool;
70 	}
71 	return rc;
72 }
73 
74 struct rte_mempool *
75 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
76 				      uint32_t elt_size, uint32_t cache_size,
77 				      uint16_t priv_size, int socket_id)
78 {
79 	struct spdk_mempool *tmp;
80 
81 	tmp = spdk_mempool_create(name, nb_elts, elt_size + priv_size,
82 				  cache_size, socket_id);
83 
84 	return (struct rte_mempool *)tmp;
85 
86 }
87 
88 struct rte_mempool *
89 rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size,
90 			uint16_t priv_size, uint16_t data_room_size, int socket_id)
91 {
92 	struct spdk_mempool *tmp;
93 
94 	tmp = spdk_mempool_create(name, n, sizeof(struct rte_mbuf) + priv_size,
95 				  cache_size, socket_id);
96 
97 	return (struct rte_mempool *)tmp;
98 }
99 
100 struct rte_mempool *
101 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
102 		   unsigned cache_size, unsigned private_data_size,
103 		   rte_mempool_ctor_t *mp_init, void *mp_init_arg,
104 		   rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
105 		   int socket_id, unsigned flags)
106 {
107 	struct spdk_mempool *tmp;
108 
109 	tmp = spdk_mempool_create(name, n, elt_size + private_data_size,
110 				  cache_size, socket_id);
111 
112 	return (struct rte_mempool *)tmp;
113 }
114 
115 DEFINE_RETURN_MOCK(rte_crypto_op_pool_create, struct rte_mempool *);
116 struct rte_mempool *
117 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
118 			  unsigned nb_elts, unsigned cache_size,
119 			  uint16_t priv_size, int socket_id)
120 {
121 	struct spdk_mempool *tmp;
122 
123 	HANDLE_RETURN_MOCK(rte_crypto_op_pool_create);
124 
125 	tmp = spdk_mempool_create(name, nb_elts,
126 				  sizeof(struct rte_crypto_op) + priv_size,
127 				  cache_size, socket_id);
128 
129 	return (struct rte_mempool *)tmp;
130 
131 }
132 
133 /* Those functions are defined as static inline in DPDK, so we can't
134  * mock them straight away. We use defines to redirect them into
135  * our custom functions.
136  */
137 static bool g_resubmit_test = false;
138 #define rte_cryptodev_enqueue_burst mock_rte_cryptodev_enqueue_burst
139 static inline uint16_t
140 mock_rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
141 				 struct rte_crypto_op **ops, uint16_t nb_ops)
142 {
143 	int i;
144 
145 	CU_ASSERT(nb_ops > 0);
146 
147 	for (i = 0; i < nb_ops; i++) {
148 		/* Use this empty (til now) array of pointers to store
149 		 * enqueued operations for assertion in dev_full test.
150 		 */
151 		g_test_dev_full_ops[i] = *ops++;
152 		if (g_resubmit_test == true) {
153 			CU_ASSERT(g_test_dev_full_ops[i] == (void *)0xDEADBEEF);
154 		}
155 	}
156 
157 	return g_enqueue_mock;
158 }
159 
160 #define rte_cryptodev_dequeue_burst mock_rte_cryptodev_dequeue_burst
161 static inline uint16_t
162 mock_rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
163 				 struct rte_crypto_op **ops, uint16_t nb_ops)
164 {
165 	int i;
166 
167 	CU_ASSERT(nb_ops > 0);
168 
169 	for (i = 0; i < g_dequeue_mock; i++) {
170 		*ops++ = g_test_crypto_ops[i];
171 	}
172 
173 	return g_dequeue_mock;
174 }
175 
176 /* Instead of allocating real memory, assign the allocations to our
177  * test array for assertion in tests.
178  */
179 #define rte_crypto_op_bulk_alloc mock_rte_crypto_op_bulk_alloc
180 static inline unsigned
181 mock_rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
182 			      enum rte_crypto_op_type type,
183 			      struct rte_crypto_op **ops, uint16_t nb_ops)
184 {
185 	int i;
186 
187 	for (i = 0; i < nb_ops; i++) {
188 		*ops++ = g_test_crypto_ops[i];
189 	}
190 	return ut_rte_crypto_op_bulk_alloc;
191 }
192 
193 #define rte_mempool_put_bulk mock_rte_mempool_put_bulk
194 static __rte_always_inline void
195 mock_rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table,
196 			  unsigned int n)
197 {
198 	return;
199 }
200 
201 #define rte_crypto_op_attach_sym_session mock_rte_crypto_op_attach_sym_session
202 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
203 static inline int
204 mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op, void *sess)
205 #else
206 static inline int
207 mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
208 				      struct rte_cryptodev_sym_session *sess)
209 #endif
210 {
211 	return ut_rte_crypto_op_attach_sym_session;
212 }
213 
214 #define rte_lcore_count mock_rte_lcore_count
215 static inline unsigned
216 mock_rte_lcore_count(void)
217 {
218 	return 1;
219 }
220 
221 #include "bdev/crypto/vbdev_crypto.c"
222 
223 /* SPDK stubs */
224 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
225 		struct spdk_bdev_io_wait_entry *entry), 0);
226 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
227 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io));
228 DEFINE_STUB_V(spdk_bdev_io_put_aux_buf, (struct spdk_bdev_io *bdev_io, void *aux_buf));
229 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
230 		enum spdk_bdev_io_type io_type), 0);
231 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
232 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
233 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0);
234 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 64);
235 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
236 DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn,
237 				     void *cb_arg));
238 DEFINE_STUB(spdk_bdev_unregister_by_name, int, (const char *bdev_name,
239 		struct spdk_bdev_module *module,
240 		spdk_bdev_unregister_cb cb_fn, void *cb_arg), 0);
241 DEFINE_STUB(spdk_bdev_open_ext, int, (const char *bdev_name, bool write,
242 				      spdk_bdev_event_cb_t event_cb,
243 				      void *event_ctx, struct spdk_bdev_desc **_desc), 0);
244 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
245 DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
246 		struct spdk_bdev_module *module), 0);
247 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
248 DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *vbdev), 0);
249 
250 /* DPDK stubs */
251 #define DPDK_DYNFIELD_OFFSET offsetof(struct rte_mbuf, dynfield1[1])
252 DEFINE_STUB(rte_mbuf_dynfield_register, int, (const struct rte_mbuf_dynfield *params),
253 	    DPDK_DYNFIELD_OFFSET);
254 DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0);
255 DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
256 DEFINE_STUB(rte_cryptodev_device_count_by_driver, uint8_t, (uint8_t driver_id), 0);
257 DEFINE_STUB(rte_cryptodev_configure, int, (uint8_t dev_id, struct rte_cryptodev_config *config), 0);
258 DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
259 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id), 0);
260 DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0);
261 DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id));
262 DEFINE_STUB(rte_cryptodev_close, int, (uint8_t dev_id), 0);
263 DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
264 DEFINE_STUB(rte_vdev_uninit, int, (const char *name), 0);
265 
266 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
267 DEFINE_STUB(rte_cryptodev_sym_session_create, void *,
268 	    (uint8_t dev_id, struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), (void *)1);
269 DEFINE_STUB(rte_cryptodev_sym_session_free, int, (uint8_t dev_id, void *sess), 0);
270 #else
271 DEFINE_STUB(rte_cryptodev_sym_session_create, struct rte_cryptodev_sym_session *,
272 	    (struct rte_mempool *mempool), (void *)1);
273 DEFINE_STUB(rte_cryptodev_sym_session_init, int, (uint8_t dev_id,
274 		struct rte_cryptodev_sym_session *sess,
275 		struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), 0);
276 DEFINE_STUB(rte_cryptodev_sym_session_free, int, (struct rte_cryptodev_sym_session *sess), 0);
277 #endif
278 
279 struct rte_cryptodev *rte_cryptodevs;
280 
281 /* global vars and setup/cleanup functions used for all test functions */
282 struct spdk_bdev_io *g_bdev_io;
283 struct crypto_bdev_io *g_io_ctx;
284 struct crypto_io_channel *g_crypto_ch;
285 struct spdk_io_channel *g_io_ch;
286 struct vbdev_dev g_device;
287 struct vbdev_crypto g_crypto_bdev;
288 struct vbdev_crypto_opts g_crypto_bdev_opts;
289 struct device_qp g_dev_qp;
290 
291 void
292 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
293 {
294 	dev_info->max_nb_queue_pairs = 1;
295 	if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_AESNI) {
296 		dev_info->driver_name = g_driver_names[0];
297 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_QAT) {
298 		dev_info->driver_name = g_driver_names[1];
299 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_MLX5) {
300 		dev_info->driver_name = g_driver_names[2];
301 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_BOGUS_PMD) {
302 		dev_info->driver_name = "junk";
303 	}
304 }
305 
306 unsigned int
307 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
308 {
309 	return (unsigned int)dev_id;
310 }
311 
312 void
313 spdk_bdev_io_get_aux_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_aux_buf_cb cb)
314 {
315 	cb(g_io_ch, g_bdev_io, (void *)0xDEADBEEF);
316 }
317 
318 void
319 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
320 {
321 	cb(g_io_ch, g_bdev_io, true);
322 }
323 
324 /* Mock these functions to call the callback and then return the value we require */
325 int ut_spdk_bdev_readv_blocks = 0;
326 bool ut_spdk_bdev_readv_blocks_mocked = false;
327 int
328 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
329 		       struct iovec *iov, int iovcnt,
330 		       uint64_t offset_blocks, uint64_t num_blocks,
331 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
332 {
333 	cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg);
334 	return ut_spdk_bdev_readv_blocks;
335 }
336 
337 int ut_spdk_bdev_writev_blocks = 0;
338 bool ut_spdk_bdev_writev_blocks_mocked = false;
339 int
340 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
341 			struct iovec *iov, int iovcnt,
342 			uint64_t offset_blocks, uint64_t num_blocks,
343 			spdk_bdev_io_completion_cb cb, void *cb_arg)
344 {
345 	cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg);
346 	return ut_spdk_bdev_writev_blocks;
347 }
348 
349 int ut_spdk_bdev_unmap_blocks = 0;
350 bool ut_spdk_bdev_unmap_blocks_mocked = false;
351 int
352 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
353 		       uint64_t offset_blocks, uint64_t num_blocks,
354 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
355 {
356 	cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg);
357 	return ut_spdk_bdev_unmap_blocks;
358 }
359 
360 int ut_spdk_bdev_flush_blocks = 0;
361 bool ut_spdk_bdev_flush_blocks_mocked = false;
362 int
363 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
364 		       uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
365 		       void *cb_arg)
366 {
367 	cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg);
368 	return ut_spdk_bdev_flush_blocks;
369 }
370 
371 int ut_spdk_bdev_reset = 0;
372 bool ut_spdk_bdev_reset_mocked = false;
373 int
374 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
375 		spdk_bdev_io_completion_cb cb, void *cb_arg)
376 {
377 	cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg);
378 	return ut_spdk_bdev_reset;
379 }
380 
381 bool g_completion_called = false;
382 void
383 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
384 {
385 	bdev_io->internal.status = status;
386 	g_completion_called = true;
387 }
388 
389 /* Global setup for all tests that share a bunch of preparation... */
390 static int
391 test_setup(void)
392 {
393 	int i, rc;
394 
395 	/* Prepare essential variables for test routines */
396 	g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct crypto_bdev_io));
397 	g_bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec) * 128);
398 	g_bdev_io->bdev = &g_crypto_bdev.crypto_bdev;
399 	g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct crypto_io_channel));
400 	g_crypto_ch = (struct crypto_io_channel *)spdk_io_channel_get_ctx(g_io_ch);
401 	g_io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
402 	memset(&g_device, 0, sizeof(struct vbdev_dev));
403 	memset(&g_crypto_bdev, 0, sizeof(struct vbdev_crypto));
404 	memset(&g_crypto_bdev_opts, 0, sizeof(struct vbdev_crypto_opts));
405 	g_dev_qp.device = &g_device;
406 	g_io_ctx->crypto_ch = g_crypto_ch;
407 	g_io_ctx->crypto_bdev = &g_crypto_bdev;
408 	g_io_ctx->crypto_bdev->qp_desc_nr = CRYPTO_QP_DESCRIPTORS;
409 	g_io_ctx->crypto_bdev->opts = &g_crypto_bdev_opts;
410 	g_crypto_ch->device_qp = &g_dev_qp;
411 	TAILQ_INIT(&g_crypto_ch->pending_cry_ios);
412 	TAILQ_INIT(&g_crypto_ch->queued_cry_ops);
413 
414 	/* Allocate a real mbuf pool so we can test error paths */
415 	g_mbuf_mp = rte_pktmbuf_pool_create("mbuf_mp", NUM_MBUFS,
416 					    (unsigned)SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
417 					    0, 0, SPDK_ENV_SOCKET_ID_ANY);
418 	/* Instead of allocating real rte mempools for these, it's easier and provides the
419 	 * same coverage just calloc them here.
420 	 */
421 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
422 		size_t size = IV_OFFSET + IV_LENGTH + QUEUED_OP_LENGTH;
423 		rc = posix_memalign((void **)&g_test_crypto_ops[i], 64, size);
424 		if (rc != 0) {
425 			assert(false);
426 		}
427 		memset(g_test_crypto_ops[i], 0, IV_OFFSET + QUEUED_OP_LENGTH);
428 	}
429 	g_mbuf_offset = DPDK_DYNFIELD_OFFSET;
430 
431 	return 0;
432 }
433 
434 /* Global teardown for all tests */
435 static int
436 test_cleanup(void)
437 {
438 	int i;
439 
440 	if (g_crypto_op_mp) {
441 		rte_mempool_free(g_crypto_op_mp);
442 		g_crypto_op_mp = NULL;
443 	}
444 	if (g_mbuf_mp) {
445 		rte_mempool_free(g_mbuf_mp);
446 		g_mbuf_mp = NULL;
447 	}
448 	if (g_session_mp) {
449 		rte_mempool_free(g_session_mp);
450 		g_session_mp = NULL;
451 	}
452 	if (g_session_mp_priv != NULL) {
453 		/* g_session_mp_priv may or may not be set depending on the DPDK version */
454 		rte_mempool_free(g_session_mp_priv);
455 		g_session_mp_priv = NULL;
456 	}
457 
458 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
459 		free(g_test_crypto_ops[i]);
460 	}
461 	free(g_bdev_io->u.bdev.iovs);
462 	free(g_bdev_io);
463 	free(g_io_ch);
464 	return 0;
465 }
466 
467 static void
468 test_error_paths(void)
469 {
470 	/* Single element block size write, just to test error paths
471 	 * in vbdev_crypto_submit_request().
472 	 */
473 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
474 	g_bdev_io->u.bdev.iovcnt = 1;
475 	g_bdev_io->u.bdev.num_blocks = 1;
476 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
477 	g_bdev_io->u.bdev.iovs[0].iov_base = (void *)0xDEADBEEF;
478 	g_crypto_bdev.crypto_bdev.blocklen = 512;
479 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
480 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
481 
482 	/* test failure of spdk_mempool_get_bulk(), will result in success because it
483 	 * will get queued.
484 	 */
485 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
486 	MOCK_SET(spdk_mempool_get, NULL);
487 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
488 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
489 
490 	/* same thing but switch to reads to test error path in _crypto_complete_io() */
491 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
492 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
493 	TAILQ_INSERT_TAIL(&g_crypto_ch->pending_cry_ios, g_bdev_io, module_link);
494 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
495 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
496 	/* Now with the read_blocks failing */
497 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
498 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
499 	MOCK_SET(spdk_bdev_readv_blocks, -1);
500 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
501 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
502 	MOCK_SET(spdk_bdev_readv_blocks, 0);
503 	MOCK_CLEAR(spdk_mempool_get);
504 
505 	/* test failure of rte_crypto_op_bulk_alloc() */
506 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
507 	ut_rte_crypto_op_bulk_alloc = 0;
508 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
509 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
510 	ut_rte_crypto_op_bulk_alloc = 1;
511 
512 	/* test failure of rte_crypto_op_attach_sym_session() */
513 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
514 	ut_rte_crypto_op_attach_sym_session = -1;
515 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
516 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
517 	ut_rte_crypto_op_attach_sym_session = 0;
518 }
519 
520 static void
521 test_simple_write(void)
522 {
523 	/* Single element block size write */
524 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
525 	g_bdev_io->u.bdev.iovcnt = 1;
526 	g_bdev_io->u.bdev.num_blocks = 1;
527 	g_bdev_io->u.bdev.offset_blocks = 0;
528 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
529 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_write;
530 	g_crypto_bdev.crypto_bdev.blocklen = 512;
531 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
532 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
533 
534 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
535 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
536 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
537 	CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == 512);
538 	CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
539 	CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
540 	CU_ASSERT(g_io_ctx->aux_num_blocks == 1);
541 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_write);
542 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
543 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
544 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
545 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
546 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
547 				     uint64_t *) == (uint64_t)g_bdev_io);
548 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr != NULL);
549 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == 512);
550 
551 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
552 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst);
553 }
554 
555 static void
556 test_simple_read(void)
557 {
558 	/* Single element block size read */
559 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
560 	g_bdev_io->u.bdev.iovcnt = 1;
561 	g_bdev_io->u.bdev.num_blocks = 1;
562 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
563 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_read;
564 	g_crypto_bdev.crypto_bdev.blocklen = 512;
565 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
566 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
567 
568 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
569 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
570 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
571 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_read);
572 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
573 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
574 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
575 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
576 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
577 				     uint64_t *) == (uint64_t)g_bdev_io);
578 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
579 
580 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
581 }
582 
583 static void
584 test_large_rw(void)
585 {
586 	unsigned block_len = 512;
587 	unsigned num_blocks = CRYPTO_MAX_IO / block_len;
588 	unsigned io_len = block_len * num_blocks;
589 	unsigned i;
590 
591 	/* Multi block size read, multi-element */
592 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
593 	g_bdev_io->u.bdev.iovcnt = 1;
594 	g_bdev_io->u.bdev.num_blocks = num_blocks;
595 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
596 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
597 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
598 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
599 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
600 
601 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
602 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
603 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
604 
605 	for (i = 0; i < num_blocks; i++) {
606 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
607 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
608 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
609 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
610 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
611 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
612 					     uint64_t *) == (uint64_t)g_bdev_io);
613 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
614 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
615 	}
616 
617 	/* Multi block size write, multi-element */
618 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
619 	g_bdev_io->u.bdev.iovcnt = 1;
620 	g_bdev_io->u.bdev.num_blocks = num_blocks;
621 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
622 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
623 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
624 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
625 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
626 
627 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
628 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
629 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
630 
631 	for (i = 0; i < num_blocks; i++) {
632 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
633 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
634 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
635 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
636 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
637 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
638 					     uint64_t *) == (uint64_t)g_bdev_io);
639 		CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == io_len);
640 		CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
641 		CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
642 		CU_ASSERT(g_io_ctx->aux_num_blocks == num_blocks);
643 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr != NULL);
644 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
645 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
646 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
647 	}
648 }
649 
650 static void
651 test_dev_full(void)
652 {
653 	struct vbdev_crypto_op *queued_op;
654 	struct rte_crypto_sym_op *sym_op;
655 	struct crypto_bdev_io *io_ctx;
656 
657 	/* Two element block size read */
658 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
659 	g_bdev_io->u.bdev.iovcnt = 1;
660 	g_bdev_io->u.bdev.num_blocks = 2;
661 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
662 	g_bdev_io->u.bdev.iovs[0].iov_base = (void *)0xDEADBEEF;
663 	g_bdev_io->u.bdev.iovs[1].iov_len = 512;
664 	g_bdev_io->u.bdev.iovs[1].iov_base = (void *)0xFEEDBEEF;
665 	g_crypto_bdev.crypto_bdev.blocklen = 512;
666 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
667 	g_enqueue_mock = g_dequeue_mock = 1;
668 	ut_rte_crypto_op_bulk_alloc = 2;
669 
670 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
671 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
672 
673 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
674 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
675 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 2);
676 	sym_op = g_test_crypto_ops[0]->sym;
677 	CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xDEADBEEF);
678 	CU_ASSERT(sym_op->m_src->data_len == 512);
679 	CU_ASSERT(sym_op->m_src->next == NULL);
680 	CU_ASSERT(sym_op->cipher.data.length == 512);
681 	CU_ASSERT(sym_op->cipher.data.offset == 0);
682 	CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)g_bdev_io);
683 	CU_ASSERT(sym_op->m_dst == NULL);
684 
685 	/* make sure one got queued and confirm its values */
686 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
687 	queued_op = TAILQ_FIRST(&g_crypto_ch->queued_cry_ops);
688 	sym_op = queued_op->crypto_op->sym;
689 	TAILQ_REMOVE(&g_crypto_ch->queued_cry_ops, queued_op, link);
690 	CU_ASSERT(queued_op->bdev_io == g_bdev_io);
691 	CU_ASSERT(queued_op->crypto_op == g_test_crypto_ops[1]);
692 	CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xFEEDBEEF);
693 	CU_ASSERT(sym_op->m_src->data_len == 512);
694 	CU_ASSERT(sym_op->m_src->next == NULL);
695 	CU_ASSERT(sym_op->cipher.data.length == 512);
696 	CU_ASSERT(sym_op->cipher.data.offset == 0);
697 	CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)g_bdev_io);
698 	CU_ASSERT(sym_op->m_dst == NULL);
699 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
700 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
701 	rte_pktmbuf_free(g_test_crypto_ops[1]->sym->m_src);
702 
703 	/* Non-busy reason for enqueue failure, all were rejected. */
704 	g_enqueue_mock = 0;
705 	g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_ERROR;
706 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
707 	io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
708 	CU_ASSERT(io_ctx->bdev_io_status == SPDK_BDEV_IO_STATUS_FAILED);
709 }
710 
711 static void
712 test_crazy_rw(void)
713 {
714 	unsigned block_len = 512;
715 	int num_blocks = 4;
716 	int i;
717 
718 	/* Multi block size read, single element, strange IOV makeup */
719 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
720 	g_bdev_io->u.bdev.iovcnt = 3;
721 	g_bdev_io->u.bdev.num_blocks = num_blocks;
722 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
723 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
724 	g_bdev_io->u.bdev.iovs[1].iov_len = 1024;
725 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 512;
726 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
727 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 512 + 1024;
728 
729 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
730 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
731 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
732 
733 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
734 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
735 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
736 
737 	for (i = 0; i < num_blocks; i++) {
738 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
739 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
740 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
741 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
742 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
743 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
744 					     uint64_t *) == (uint64_t)g_bdev_io);
745 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
746 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
747 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
748 	}
749 
750 	/* Multi block size write, single element strange IOV makeup */
751 	num_blocks = 8;
752 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
753 	g_bdev_io->u.bdev.iovcnt = 4;
754 	g_bdev_io->u.bdev.num_blocks = num_blocks;
755 	g_bdev_io->u.bdev.iovs[0].iov_len = 2048;
756 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
757 	g_bdev_io->u.bdev.iovs[1].iov_len = 512;
758 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 2048;
759 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
760 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 2048 + 512;
761 	g_bdev_io->u.bdev.iovs[3].iov_len = 1024;
762 	g_bdev_io->u.bdev.iovs[3].iov_base = &test_crazy_rw + 2048 + 512 + 512;
763 
764 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
765 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
766 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
767 
768 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
769 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
770 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
771 
772 	for (i = 0; i < num_blocks; i++) {
773 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
774 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
775 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
776 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
777 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
778 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
779 					     uint64_t *) == (uint64_t)g_bdev_io);
780 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
781 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == g_test_crypto_ops[i]->sym->m_dst);
782 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
783 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
784 	}
785 }
786 
787 static void
788 test_passthru(void)
789 {
790 	/* Make sure these follow our completion callback, test success & fail. */
791 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
792 	MOCK_SET(spdk_bdev_unmap_blocks, 0);
793 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
794 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
795 	MOCK_SET(spdk_bdev_unmap_blocks, -1);
796 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
797 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
798 	MOCK_CLEAR(spdk_bdev_unmap_blocks);
799 
800 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
801 	MOCK_SET(spdk_bdev_flush_blocks, 0);
802 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
803 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
804 	MOCK_SET(spdk_bdev_flush_blocks, -1);
805 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
806 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
807 	MOCK_CLEAR(spdk_bdev_flush_blocks);
808 
809 	/* We should never get a WZ command, we report that we don't support it. */
810 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
811 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
812 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
813 }
814 
815 static void
816 test_reset(void)
817 {
818 	/* TODO: There are a few different ways to do this given that
819 	 * the code uses spdk_for_each_channel() to implement reset
820 	 * handling. Submitting w/o UT for this function for now and
821 	 * will follow up with something shortly.
822 	 */
823 }
824 
825 static void
826 init_cleanup(void)
827 {
828 	if (g_crypto_op_mp) {
829 		rte_mempool_free(g_crypto_op_mp);
830 		g_crypto_op_mp = NULL;
831 	}
832 	if (g_mbuf_mp) {
833 		rte_mempool_free(g_mbuf_mp);
834 		g_mbuf_mp = NULL;
835 	}
836 	if (g_session_mp) {
837 		rte_mempool_free(g_session_mp);
838 		g_session_mp = NULL;
839 	}
840 	if (g_session_mp_priv != NULL) {
841 		/* g_session_mp_priv may or may not be set depending on the DPDK version */
842 		rte_mempool_free(g_session_mp_priv);
843 		g_session_mp_priv = NULL;
844 	}
845 }
846 
847 static void
848 test_initdrivers(void)
849 {
850 	int rc;
851 	static struct rte_mempool *orig_mbuf_mp;
852 	static struct rte_mempool *orig_session_mp;
853 	static struct rte_mempool *orig_session_mp_priv;
854 
855 	/* These tests will alloc and free our g_mbuf_mp
856 	 * so save that off here and restore it after each test is over.
857 	 */
858 	orig_mbuf_mp = g_mbuf_mp;
859 	orig_session_mp = g_session_mp;
860 	orig_session_mp_priv = g_session_mp_priv;
861 
862 	g_session_mp_priv = NULL;
863 	g_session_mp = NULL;
864 	g_mbuf_mp = NULL;
865 
866 	/* No drivers available, not an error though */
867 	MOCK_SET(rte_cryptodev_count, 0);
868 	rc = vbdev_crypto_init_crypto_drivers();
869 	CU_ASSERT(rc == 0);
870 	CU_ASSERT(g_mbuf_mp == NULL);
871 	CU_ASSERT(g_session_mp == NULL);
872 	CU_ASSERT(g_session_mp_priv == NULL);
873 
874 	/* Can't create session pool. */
875 	MOCK_SET(rte_cryptodev_count, 2);
876 	MOCK_SET(spdk_mempool_create, NULL);
877 	rc = vbdev_crypto_init_crypto_drivers();
878 	CU_ASSERT(rc == -ENOMEM);
879 	CU_ASSERT(g_mbuf_mp == NULL);
880 	CU_ASSERT(g_session_mp == NULL);
881 	CU_ASSERT(g_session_mp_priv == NULL);
882 	MOCK_CLEAR(spdk_mempool_create);
883 
884 	/* Can't create op pool. */
885 	MOCK_SET(rte_crypto_op_pool_create, NULL);
886 	rc = vbdev_crypto_init_crypto_drivers();
887 	CU_ASSERT(rc == -ENOMEM);
888 	CU_ASSERT(g_mbuf_mp == NULL);
889 	CU_ASSERT(g_session_mp == NULL);
890 	CU_ASSERT(g_session_mp_priv == NULL);
891 	MOCK_CLEAR(rte_crypto_op_pool_create);
892 
893 	/* Check resources are not sufficient */
894 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
895 	rc = vbdev_crypto_init_crypto_drivers();
896 	CU_ASSERT(rc == -EINVAL);
897 
898 	/* Test crypto dev configure failure. */
899 	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
900 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
901 	MOCK_SET(rte_cryptodev_configure, -1);
902 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
903 	rc = vbdev_crypto_init_crypto_drivers();
904 	MOCK_SET(rte_cryptodev_configure, 0);
905 	CU_ASSERT(g_mbuf_mp == NULL);
906 	CU_ASSERT(g_session_mp == NULL);
907 	CU_ASSERT(g_session_mp_priv == NULL);
908 	CU_ASSERT(rc == -EINVAL);
909 
910 	/* Test failure of qp setup. */
911 	MOCK_SET(rte_cryptodev_queue_pair_setup, -1);
912 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
913 	rc = vbdev_crypto_init_crypto_drivers();
914 	CU_ASSERT(rc == -EINVAL);
915 	CU_ASSERT(g_mbuf_mp == NULL);
916 	CU_ASSERT(g_session_mp == NULL);
917 	CU_ASSERT(g_session_mp_priv == NULL);
918 	MOCK_SET(rte_cryptodev_queue_pair_setup, 0);
919 
920 	/* Test failure of dev start. */
921 	MOCK_SET(rte_cryptodev_start, -1);
922 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
923 	rc = vbdev_crypto_init_crypto_drivers();
924 	CU_ASSERT(rc == -EINVAL);
925 	CU_ASSERT(g_mbuf_mp == NULL);
926 	CU_ASSERT(g_session_mp == NULL);
927 	CU_ASSERT(g_session_mp_priv == NULL);
928 	MOCK_SET(rte_cryptodev_start, 0);
929 
930 	/* Test bogus PMD */
931 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
932 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_BOGUS_PMD);
933 	rc = vbdev_crypto_init_crypto_drivers();
934 	CU_ASSERT(g_mbuf_mp == NULL);
935 	CU_ASSERT(g_session_mp == NULL);
936 	CU_ASSERT(rc == -EINVAL);
937 
938 	/* Test happy path QAT. */
939 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
940 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
941 	rc = vbdev_crypto_init_crypto_drivers();
942 	CU_ASSERT(g_mbuf_mp != NULL);
943 	CU_ASSERT(g_session_mp != NULL);
944 	init_cleanup();
945 	CU_ASSERT(rc == 0);
946 
947 	/* Test happy path AESNI. */
948 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
949 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
950 	rc = vbdev_crypto_init_crypto_drivers();
951 	CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET);
952 	init_cleanup();
953 	CU_ASSERT(rc == 0);
954 
955 	/* Test happy path MLX5. */
956 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
957 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_MLX5);
958 	rc = vbdev_crypto_init_crypto_drivers();
959 	CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET);
960 	init_cleanup();
961 	CU_ASSERT(rc == 0);
962 
963 	/* Test failure of DPDK dev init. By now it is not longer an error
964 	 * situation for entire crypto framework. */
965 	MOCK_SET(rte_cryptodev_count, 2);
966 	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
967 	MOCK_SET(rte_vdev_init, -1);
968 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
969 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
970 	rc = vbdev_crypto_init_crypto_drivers();
971 	CU_ASSERT(rc == 0);
972 	CU_ASSERT(g_mbuf_mp != NULL);
973 	CU_ASSERT(g_session_mp != NULL);
974 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
975 	CU_ASSERT(g_session_mp_priv != NULL);
976 #endif
977 	init_cleanup();
978 	MOCK_SET(rte_vdev_init, 0);
979 	MOCK_CLEAR(rte_cryptodev_device_count_by_driver);
980 
981 	/* restore our initial values. */
982 	g_mbuf_mp = orig_mbuf_mp;
983 	g_session_mp = orig_session_mp;
984 	g_session_mp_priv = orig_session_mp_priv;
985 }
986 
987 static void
988 test_crypto_op_complete(void)
989 {
990 	/* Make sure completion code respects failure. */
991 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
992 	g_completion_called = false;
993 	_crypto_operation_complete(g_bdev_io);
994 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
995 	CU_ASSERT(g_completion_called == true);
996 
997 	/* Test read completion. */
998 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
999 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
1000 	g_completion_called = false;
1001 	_crypto_operation_complete(g_bdev_io);
1002 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1003 	CU_ASSERT(g_completion_called == true);
1004 
1005 	/* Test write completion success. */
1006 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1007 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
1008 	g_completion_called = false;
1009 	MOCK_SET(spdk_bdev_writev_blocks, 0);
1010 	_crypto_operation_complete(g_bdev_io);
1011 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1012 	CU_ASSERT(g_completion_called == true);
1013 
1014 	/* Test write completion failed. */
1015 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1016 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
1017 	g_completion_called = false;
1018 	MOCK_SET(spdk_bdev_writev_blocks, -1);
1019 	_crypto_operation_complete(g_bdev_io);
1020 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1021 	CU_ASSERT(g_completion_called == true);
1022 
1023 	/* Test bogus type for this completion. */
1024 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1025 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
1026 	g_completion_called = false;
1027 	_crypto_operation_complete(g_bdev_io);
1028 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1029 	CU_ASSERT(g_completion_called == true);
1030 }
1031 
1032 static void
1033 test_supported_io(void)
1034 {
1035 	void *ctx = NULL;
1036 	bool rc = true;
1037 
1038 	/* Make sure we always report false to WZ, we need the bdev layer to
1039 	 * send real 0's so we can encrypt/decrypt them.
1040 	 */
1041 	rc = vbdev_crypto_io_type_supported(ctx, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
1042 	CU_ASSERT(rc == false);
1043 }
1044 
1045 static void
1046 test_poller(void)
1047 {
1048 	int rc;
1049 	struct rte_mbuf *src_mbufs[2];
1050 	struct vbdev_crypto_op *op_to_resubmit;
1051 
1052 	/* test regular 1 op to dequeue and complete */
1053 	g_dequeue_mock = g_enqueue_mock = 1;
1054 	rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 1);
1055 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
1056 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1057 			   uint64_t *) = (uintptr_t)g_bdev_io;
1058 	g_test_crypto_ops[0]->sym->m_dst = NULL;
1059 	g_io_ctx->cryop_cnt_remaining = 1;
1060 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
1061 	rc = crypto_dev_poller(g_crypto_ch);
1062 	CU_ASSERT(rc == 1);
1063 
1064 	/* We have nothing dequeued but have some to resubmit */
1065 	g_dequeue_mock = 0;
1066 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
1067 
1068 	/* add an op to the queued list. */
1069 	g_resubmit_test = true;
1070 	op_to_resubmit = (struct vbdev_crypto_op *)((uint8_t *)g_test_crypto_ops[0] + QUEUED_OP_OFFSET);
1071 	op_to_resubmit->crypto_op = (void *)0xDEADBEEF;
1072 	op_to_resubmit->bdev_io = g_bdev_io;
1073 	TAILQ_INSERT_TAIL(&g_crypto_ch->queued_cry_ops,
1074 			  op_to_resubmit,
1075 			  link);
1076 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
1077 	rc = crypto_dev_poller(g_crypto_ch);
1078 	g_resubmit_test = false;
1079 	CU_ASSERT(rc == 0);
1080 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
1081 
1082 	/* 2 to dequeue but 2nd one failed */
1083 	g_dequeue_mock = g_enqueue_mock = 2;
1084 	g_io_ctx->cryop_cnt_remaining = 2;
1085 	rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 2);
1086 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
1087 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1088 			   uint64_t *) = (uint64_t)g_bdev_io;
1089 	g_test_crypto_ops[0]->sym->m_dst = NULL;
1090 	g_test_crypto_ops[0]->status =  RTE_CRYPTO_OP_STATUS_SUCCESS;
1091 	g_test_crypto_ops[1]->sym->m_src = src_mbufs[1];
1092 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[1]->sym->m_src, g_mbuf_offset,
1093 			   uint64_t *) = (uint64_t)g_bdev_io;
1094 	g_test_crypto_ops[1]->sym->m_dst = NULL;
1095 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1096 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1097 	rc = crypto_dev_poller(g_crypto_ch);
1098 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1099 	CU_ASSERT(rc == 2);
1100 }
1101 
1102 /* Helper function for test_assign_device_qp() */
1103 static void
1104 _clear_device_qp_lists(void)
1105 {
1106 	struct device_qp *device_qp = NULL;
1107 
1108 	while (!TAILQ_EMPTY(&g_device_qp_qat)) {
1109 		device_qp = TAILQ_FIRST(&g_device_qp_qat);
1110 		TAILQ_REMOVE(&g_device_qp_qat, device_qp, link);
1111 		free(device_qp);
1112 
1113 	}
1114 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_qat) == true);
1115 	while (!TAILQ_EMPTY(&g_device_qp_aesni_mb)) {
1116 		device_qp = TAILQ_FIRST(&g_device_qp_aesni_mb);
1117 		TAILQ_REMOVE(&g_device_qp_aesni_mb, device_qp, link);
1118 		free(device_qp);
1119 	}
1120 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_aesni_mb) == true);
1121 	while (!TAILQ_EMPTY(&g_device_qp_mlx5)) {
1122 		device_qp = TAILQ_FIRST(&g_device_qp_mlx5);
1123 		TAILQ_REMOVE(&g_device_qp_mlx5, device_qp, link);
1124 		free(device_qp);
1125 	}
1126 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_mlx5) == true);
1127 }
1128 
1129 /* Helper function for test_assign_device_qp() */
1130 static void
1131 _check_expected_values(struct vbdev_crypto *crypto_bdev, struct device_qp *device_qp,
1132 		       struct crypto_io_channel *crypto_ch, uint8_t expected_index,
1133 		       uint8_t current_index)
1134 {
1135 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1136 	CU_ASSERT(g_crypto_ch->device_qp->index == expected_index);
1137 	CU_ASSERT(g_next_qat_index == current_index);
1138 }
1139 
1140 static void
1141 test_assign_device_qp(void)
1142 {
1143 	struct device_qp *device_qp = NULL;
1144 	int i;
1145 
1146 	/* start with a known state, clear the device/qp lists */
1147 	_clear_device_qp_lists();
1148 
1149 	/* make sure that one AESNI_MB qp is found */
1150 	device_qp = calloc(1, sizeof(struct device_qp));
1151 	TAILQ_INSERT_TAIL(&g_device_qp_aesni_mb, device_qp, link);
1152 	g_crypto_ch->device_qp = NULL;
1153 	g_crypto_bdev.opts->drv_name = AESNI_MB;
1154 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1155 	CU_ASSERT(g_crypto_ch->device_qp != NULL);
1156 
1157 	/* QAT testing is more complex as the code under test load balances by
1158 	 * assigning each subsequent device/qp to every QAT_VF_SPREAD modulo
1159 	 * g_qat_total_qp. For the current latest QAT we'll have 48 virtual functions
1160 	 * each with 2 qp so the "spread" between assignments is 32.
1161 	 */
1162 	g_qat_total_qp = 96;
1163 	for (i = 0; i < g_qat_total_qp; i++) {
1164 		device_qp = calloc(1, sizeof(struct device_qp));
1165 		device_qp->index = i;
1166 		TAILQ_INSERT_TAIL(&g_device_qp_qat, device_qp, link);
1167 	}
1168 	g_crypto_ch->device_qp = NULL;
1169 	g_crypto_bdev.opts->drv_name = QAT;
1170 
1171 	/* First assignment will assign to 0 and next at 32. */
1172 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1173 			       0, QAT_VF_SPREAD);
1174 
1175 	/* Second assignment will assign to 32 and next at 64. */
1176 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1177 			       QAT_VF_SPREAD, QAT_VF_SPREAD * 2);
1178 
1179 	/* Third assignment will assign to 64 and next at 0. */
1180 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1181 			       QAT_VF_SPREAD * 2, 0);
1182 
1183 	/* Fourth assignment will assign to 1 and next at 33. */
1184 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1185 			       1, QAT_VF_SPREAD + 1);
1186 
1187 	/* make sure that one MLX5 qp is found */
1188 	device_qp = calloc(1, sizeof(struct device_qp));
1189 	TAILQ_INSERT_TAIL(&g_device_qp_mlx5, device_qp, link);
1190 	g_crypto_ch->device_qp = NULL;
1191 	g_crypto_bdev.opts->drv_name = MLX5;
1192 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1193 	CU_ASSERT(g_crypto_ch->device_qp == device_qp);
1194 
1195 	_clear_device_qp_lists();
1196 }
1197 
1198 int
1199 main(int argc, char **argv)
1200 {
1201 	CU_pSuite	suite = NULL;
1202 	unsigned int	num_failures;
1203 
1204 	CU_set_error_action(CUEA_ABORT);
1205 	CU_initialize_registry();
1206 
1207 	suite = CU_add_suite("crypto", test_setup, test_cleanup);
1208 	CU_ADD_TEST(suite, test_error_paths);
1209 	CU_ADD_TEST(suite, test_simple_write);
1210 	CU_ADD_TEST(suite, test_simple_read);
1211 	CU_ADD_TEST(suite, test_large_rw);
1212 	CU_ADD_TEST(suite, test_dev_full);
1213 	CU_ADD_TEST(suite, test_crazy_rw);
1214 	CU_ADD_TEST(suite, test_passthru);
1215 	CU_ADD_TEST(suite, test_initdrivers);
1216 	CU_ADD_TEST(suite, test_crypto_op_complete);
1217 	CU_ADD_TEST(suite, test_supported_io);
1218 	CU_ADD_TEST(suite, test_reset);
1219 	CU_ADD_TEST(suite, test_poller);
1220 	CU_ADD_TEST(suite, test_assign_device_qp);
1221 
1222 	CU_basic_set_mode(CU_BRM_VERBOSE);
1223 	CU_basic_run_tests();
1224 	num_failures = CU_get_number_of_failures();
1225 	CU_cleanup_registry();
1226 	return num_failures;
1227 }
1228