xref: /spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c (revision a76bbe355313bd013960ee6874311a4af0ec46ae)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/test_env.c"
37 #include "spdk_internal/mock.h"
38 #include "thread/thread_internal.h"
39 #include "unit/lib/json_mock.c"
40 
41 #include <rte_crypto.h>
42 #include <rte_cryptodev.h>
43 
44 #define MAX_TEST_BLOCKS 8192
45 struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS];
46 struct rte_crypto_op *g_test_dev_full_ops[MAX_TEST_BLOCKS];
47 
48 uint16_t g_dequeue_mock;
49 uint16_t g_enqueue_mock;
50 unsigned ut_rte_crypto_op_bulk_alloc;
51 int ut_rte_crypto_op_attach_sym_session = 0;
52 #define MOCK_INFO_GET_1QP_AESNI 0
53 #define MOCK_INFO_GET_1QP_QAT 1
54 #define MOCK_INFO_GET_1QP_BOGUS_PMD 2
55 int ut_rte_cryptodev_info_get = 0;
56 bool ut_rte_cryptodev_info_get_mocked = false;
57 
58 void mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt);
59 #define rte_pktmbuf_free_bulk mock_rte_pktmbuf_free_bulk
60 void mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt)
61 {
62 	spdk_mempool_put_bulk((struct spdk_mempool *)m[0]->pool, (void **)m, cnt);
63 }
64 
65 void mock_rte_pktmbuf_free(struct rte_mbuf *m);
66 #define rte_pktmbuf_free mock_rte_pktmbuf_free
67 void mock_rte_pktmbuf_free(struct rte_mbuf *m)
68 {
69 	spdk_mempool_put((struct spdk_mempool *)m->pool, (void *)m);
70 }
71 
72 void rte_mempool_free(struct rte_mempool *mp)
73 {
74 	spdk_mempool_free((struct spdk_mempool *)mp);
75 }
76 
77 int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
78 				unsigned count);
79 #define rte_pktmbuf_alloc_bulk mock_rte_pktmbuf_alloc_bulk
80 int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
81 				unsigned count)
82 {
83 	int rc;
84 
85 	rc = spdk_mempool_get_bulk((struct spdk_mempool *)pool, (void **)mbufs, count);
86 	if (rc) {
87 		return rc;
88 	}
89 	for (unsigned i = 0; i < count; i++) {
90 		rte_pktmbuf_reset(mbufs[i]);
91 		mbufs[i]->pool = pool;
92 	}
93 	return rc;
94 }
95 
96 struct rte_mempool *
97 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
98 				      uint32_t elt_size, uint32_t cache_size,
99 				      uint16_t priv_size, int socket_id)
100 {
101 	struct spdk_mempool *tmp;
102 
103 	tmp = spdk_mempool_create(name, nb_elts, elt_size + priv_size,
104 				  cache_size, socket_id);
105 
106 	return (struct rte_mempool *)tmp;
107 
108 }
109 
110 struct rte_mempool *
111 rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size,
112 			uint16_t priv_size, uint16_t data_room_size, int socket_id)
113 {
114 	struct spdk_mempool *tmp;
115 
116 	tmp = spdk_mempool_create(name, n, sizeof(struct rte_mbuf) + priv_size,
117 				  cache_size, socket_id);
118 
119 	return (struct rte_mempool *)tmp;
120 }
121 
122 struct rte_mempool *
123 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
124 		   unsigned cache_size, unsigned private_data_size,
125 		   rte_mempool_ctor_t *mp_init, void *mp_init_arg,
126 		   rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
127 		   int socket_id, unsigned flags)
128 {
129 	struct spdk_mempool *tmp;
130 
131 	tmp = spdk_mempool_create(name, n, elt_size + private_data_size,
132 				  cache_size, socket_id);
133 
134 	return (struct rte_mempool *)tmp;
135 }
136 
137 DEFINE_RETURN_MOCK(rte_crypto_op_pool_create, struct rte_mempool *);
138 struct rte_mempool *
139 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
140 			  unsigned nb_elts, unsigned cache_size,
141 			  uint16_t priv_size, int socket_id)
142 {
143 	struct spdk_mempool *tmp;
144 
145 	HANDLE_RETURN_MOCK(rte_crypto_op_pool_create);
146 
147 	tmp = spdk_mempool_create(name, nb_elts,
148 				  sizeof(struct rte_crypto_op) + priv_size,
149 				  cache_size, socket_id);
150 
151 	return (struct rte_mempool *)tmp;
152 
153 }
154 
155 /* Those functions are defined as static inline in DPDK, so we can't
156  * mock them straight away. We use defines to redirect them into
157  * our custom functions.
158  */
159 static bool g_resubmit_test = false;
160 #define rte_cryptodev_enqueue_burst mock_rte_cryptodev_enqueue_burst
161 static inline uint16_t
162 mock_rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
163 				 struct rte_crypto_op **ops, uint16_t nb_ops)
164 {
165 	int i;
166 
167 	CU_ASSERT(nb_ops > 0);
168 
169 	for (i = 0; i < nb_ops; i++) {
170 		/* Use this empty (til now) array of pointers to store
171 		 * enqueued operations for assertion in dev_full test.
172 		 */
173 		g_test_dev_full_ops[i] = *ops++;
174 		if (g_resubmit_test == true) {
175 			CU_ASSERT(g_test_dev_full_ops[i] == (void *)0xDEADBEEF);
176 		}
177 	}
178 
179 	return g_enqueue_mock;
180 }
181 
182 #define rte_cryptodev_dequeue_burst mock_rte_cryptodev_dequeue_burst
183 static inline uint16_t
184 mock_rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
185 				 struct rte_crypto_op **ops, uint16_t nb_ops)
186 {
187 	int i;
188 
189 	CU_ASSERT(nb_ops > 0);
190 
191 	for (i = 0; i < g_dequeue_mock; i++) {
192 		*ops++ = g_test_crypto_ops[i];
193 	}
194 
195 	return g_dequeue_mock;
196 }
197 
198 /* Instead of allocating real memory, assign the allocations to our
199  * test array for assertion in tests.
200  */
201 #define rte_crypto_op_bulk_alloc mock_rte_crypto_op_bulk_alloc
202 static inline unsigned
203 mock_rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
204 			      enum rte_crypto_op_type type,
205 			      struct rte_crypto_op **ops, uint16_t nb_ops)
206 {
207 	int i;
208 
209 	for (i = 0; i < nb_ops; i++) {
210 		*ops++ = g_test_crypto_ops[i];
211 	}
212 	return ut_rte_crypto_op_bulk_alloc;
213 }
214 
215 #define rte_mempool_put_bulk mock_rte_mempool_put_bulk
216 static __rte_always_inline void
217 mock_rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table,
218 			  unsigned int n)
219 {
220 	return;
221 }
222 
223 #define rte_crypto_op_attach_sym_session mock_rte_crypto_op_attach_sym_session
224 static inline int
225 mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
226 				      struct rte_cryptodev_sym_session *sess)
227 {
228 	return ut_rte_crypto_op_attach_sym_session;
229 }
230 
231 #define rte_lcore_count mock_rte_lcore_count
232 static inline unsigned
233 mock_rte_lcore_count(void)
234 {
235 	return 1;
236 }
237 
238 #include "bdev/crypto/vbdev_crypto.c"
239 
240 /* SPDK stubs */
241 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
242 		struct spdk_bdev_io_wait_entry *entry), 0);
243 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
244 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io));
245 DEFINE_STUB_V(spdk_bdev_io_put_aux_buf, (struct spdk_bdev_io *bdev_io, void *aux_buf));
246 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
247 		enum spdk_bdev_io_type io_type), 0);
248 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
249 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
250 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0);
251 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 64);
252 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
253 DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn,
254 				     void *cb_arg));
255 DEFINE_STUB(spdk_bdev_open_ext, int, (const char *bdev_name, bool write,
256 				      spdk_bdev_event_cb_t event_cb,
257 				      void *event_ctx, struct spdk_bdev_desc **_desc), 0);
258 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
259 DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
260 		struct spdk_bdev_module *module), 0);
261 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
262 DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *vbdev), 0);
263 
264 /* DPDK stubs */
265 #define DPDK_DYNFIELD_OFFSET offsetof(struct rte_mbuf, dynfield1[1])
266 DEFINE_STUB(rte_mbuf_dynfield_register, int, (const struct rte_mbuf_dynfield *params),
267 	    DPDK_DYNFIELD_OFFSET);
268 DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0);
269 DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
270 DEFINE_STUB(rte_cryptodev_device_count_by_driver, uint8_t, (uint8_t driver_id), 0);
271 DEFINE_STUB(rte_cryptodev_configure, int, (uint8_t dev_id, struct rte_cryptodev_config *config), 0);
272 DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
273 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id), 0);
274 DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0);
275 DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id));
276 DEFINE_STUB(rte_cryptodev_close, int, (uint8_t dev_id), 0);
277 DEFINE_STUB(rte_cryptodev_sym_session_create, struct rte_cryptodev_sym_session *,
278 	    (struct rte_mempool *mempool), (struct rte_cryptodev_sym_session *)1);
279 DEFINE_STUB(rte_cryptodev_sym_session_init, int, (uint8_t dev_id,
280 		struct rte_cryptodev_sym_session *sess,
281 		struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), 0);
282 DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
283 DEFINE_STUB(rte_cryptodev_sym_session_free, int, (struct rte_cryptodev_sym_session *sess), 0);
284 DEFINE_STUB(rte_vdev_uninit, int, (const char *name), 0);
285 
286 struct rte_cryptodev *rte_cryptodevs;
287 
288 /* global vars and setup/cleanup functions used for all test functions */
289 struct spdk_bdev_io *g_bdev_io;
290 struct crypto_bdev_io *g_io_ctx;
291 struct crypto_io_channel *g_crypto_ch;
292 struct spdk_io_channel *g_io_ch;
293 struct vbdev_dev g_device;
294 struct vbdev_crypto g_crypto_bdev;
295 struct device_qp g_dev_qp;
296 
297 void
298 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
299 {
300 	dev_info->max_nb_queue_pairs = 1;
301 	if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_AESNI) {
302 		dev_info->driver_name = g_driver_names[0];
303 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_QAT) {
304 		dev_info->driver_name = g_driver_names[1];
305 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_BOGUS_PMD) {
306 		dev_info->driver_name = "junk";
307 	}
308 }
309 
310 unsigned int
311 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
312 {
313 	return (unsigned int)dev_id;
314 }
315 
316 void
317 spdk_bdev_io_get_aux_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_aux_buf_cb cb)
318 {
319 	cb(g_io_ch, g_bdev_io, (void *)0xDEADBEEF);
320 }
321 
322 void
323 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
324 {
325 	cb(g_io_ch, g_bdev_io, true);
326 }
327 
328 /* Mock these functions to call the callback and then return the value we require */
329 int ut_spdk_bdev_readv_blocks = 0;
330 bool ut_spdk_bdev_readv_blocks_mocked = false;
331 int
332 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
333 		       struct iovec *iov, int iovcnt,
334 		       uint64_t offset_blocks, uint64_t num_blocks,
335 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
336 {
337 	cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg);
338 	return ut_spdk_bdev_readv_blocks;
339 }
340 
341 int ut_spdk_bdev_writev_blocks = 0;
342 bool ut_spdk_bdev_writev_blocks_mocked = false;
343 int
344 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
345 			struct iovec *iov, int iovcnt,
346 			uint64_t offset_blocks, uint64_t num_blocks,
347 			spdk_bdev_io_completion_cb cb, void *cb_arg)
348 {
349 	cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg);
350 	return ut_spdk_bdev_writev_blocks;
351 }
352 
353 int ut_spdk_bdev_unmap_blocks = 0;
354 bool ut_spdk_bdev_unmap_blocks_mocked = false;
355 int
356 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
357 		       uint64_t offset_blocks, uint64_t num_blocks,
358 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
359 {
360 	cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg);
361 	return ut_spdk_bdev_unmap_blocks;
362 }
363 
364 int ut_spdk_bdev_flush_blocks = 0;
365 bool ut_spdk_bdev_flush_blocks_mocked = false;
366 int
367 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
368 		       uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
369 		       void *cb_arg)
370 {
371 	cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg);
372 	return ut_spdk_bdev_flush_blocks;
373 }
374 
375 int ut_spdk_bdev_reset = 0;
376 bool ut_spdk_bdev_reset_mocked = false;
377 int
378 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
379 		spdk_bdev_io_completion_cb cb, void *cb_arg)
380 {
381 	cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg);
382 	return ut_spdk_bdev_reset;
383 }
384 
385 bool g_completion_called = false;
386 void
387 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
388 {
389 	bdev_io->internal.status = status;
390 	g_completion_called = true;
391 }
392 
393 /* Global setup for all tests that share a bunch of preparation... */
394 static int
395 test_setup(void)
396 {
397 	int i, rc;
398 
399 	/* Prepare essential variables for test routines */
400 	g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct crypto_bdev_io));
401 	g_bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec) * 128);
402 	g_bdev_io->bdev = &g_crypto_bdev.crypto_bdev;
403 	g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct crypto_io_channel));
404 	g_crypto_ch = (struct crypto_io_channel *)spdk_io_channel_get_ctx(g_io_ch);
405 	g_io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
406 	memset(&g_device, 0, sizeof(struct vbdev_dev));
407 	memset(&g_crypto_bdev, 0, sizeof(struct vbdev_crypto));
408 	g_dev_qp.device = &g_device;
409 	g_io_ctx->crypto_ch = g_crypto_ch;
410 	g_io_ctx->crypto_bdev = &g_crypto_bdev;
411 	g_io_ctx->crypto_bdev->qp_desc_nr = CRYPTO_QP_DESCRIPTORS;
412 	g_crypto_ch->device_qp = &g_dev_qp;
413 	TAILQ_INIT(&g_crypto_ch->pending_cry_ios);
414 	TAILQ_INIT(&g_crypto_ch->queued_cry_ops);
415 
416 	/* Allocate a real mbuf pool so we can test error paths */
417 	g_mbuf_mp = rte_pktmbuf_pool_create("mbuf_mp", NUM_MBUFS,
418 					    (unsigned)SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
419 					    0, 0, SPDK_ENV_SOCKET_ID_ANY);
420 	/* Instead of allocating real rte mempools for these, it's easier and provides the
421 	 * same coverage just calloc them here.
422 	 */
423 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
424 		rc = posix_memalign((void **)&g_test_crypto_ops[i], 64,
425 				    sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op) +
426 				    IV_LENGTH + QUEUED_OP_LENGTH);
427 		if (rc != 0) {
428 			assert(false);
429 		}
430 		memset(g_test_crypto_ops[i], 0, sizeof(struct rte_crypto_op) +
431 		       sizeof(struct rte_crypto_sym_op) + QUEUED_OP_LENGTH);
432 	}
433 	g_mbuf_offset = DPDK_DYNFIELD_OFFSET;
434 
435 	return 0;
436 }
437 
438 /* Global teardown for all tests */
439 static int
440 test_cleanup(void)
441 {
442 	int i;
443 
444 	if (g_crypto_op_mp) {
445 		rte_mempool_free(g_crypto_op_mp);
446 		g_crypto_op_mp = NULL;
447 	}
448 	if (g_mbuf_mp) {
449 		rte_mempool_free(g_mbuf_mp);
450 		g_mbuf_mp = NULL;
451 	}
452 	if (g_session_mp) {
453 		rte_mempool_free(g_session_mp);
454 		g_session_mp = NULL;
455 	}
456 	if (g_session_mp_priv != NULL) {
457 		/* g_session_mp_priv may or may not be set depending on the DPDK version */
458 		rte_mempool_free(g_session_mp_priv);
459 		g_session_mp_priv = NULL;
460 	}
461 
462 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
463 		free(g_test_crypto_ops[i]);
464 	}
465 	free(g_bdev_io->u.bdev.iovs);
466 	free(g_bdev_io);
467 	free(g_io_ch);
468 	return 0;
469 }
470 
471 static void
472 test_error_paths(void)
473 {
474 	/* Single element block size write, just to test error paths
475 	 * in vbdev_crypto_submit_request().
476 	 */
477 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
478 	g_bdev_io->u.bdev.iovcnt = 1;
479 	g_bdev_io->u.bdev.num_blocks = 1;
480 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
481 	g_bdev_io->u.bdev.iovs[0].iov_base = (void *)0xDEADBEEF;
482 	g_crypto_bdev.crypto_bdev.blocklen = 512;
483 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
484 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
485 
486 	/* test failure of spdk_mempool_get_bulk(), will result in success because it
487 	 * will get queued.
488 	 */
489 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
490 	MOCK_SET(spdk_mempool_get, NULL);
491 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
492 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
493 
494 	/* same thing but switch to reads to test error path in _crypto_complete_io() */
495 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
496 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
497 	TAILQ_INSERT_TAIL(&g_crypto_ch->pending_cry_ios, g_bdev_io, module_link);
498 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
499 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
500 	/* Now with the read_blocks failing */
501 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
502 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
503 	MOCK_SET(spdk_bdev_readv_blocks, -1);
504 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
505 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
506 	MOCK_SET(spdk_bdev_readv_blocks, 0);
507 	MOCK_CLEAR(spdk_mempool_get);
508 
509 	/* test failure of rte_crypto_op_bulk_alloc() */
510 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
511 	ut_rte_crypto_op_bulk_alloc = 0;
512 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
513 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
514 	ut_rte_crypto_op_bulk_alloc = 1;
515 
516 	/* test failure of rte_crypto_op_attach_sym_session() */
517 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
518 	ut_rte_crypto_op_attach_sym_session = -1;
519 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
520 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
521 	ut_rte_crypto_op_attach_sym_session = 0;
522 }
523 
524 static void
525 test_simple_write(void)
526 {
527 	/* Single element block size write */
528 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
529 	g_bdev_io->u.bdev.iovcnt = 1;
530 	g_bdev_io->u.bdev.num_blocks = 1;
531 	g_bdev_io->u.bdev.offset_blocks = 0;
532 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
533 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_write;
534 	g_crypto_bdev.crypto_bdev.blocklen = 512;
535 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
536 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
537 
538 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
539 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
540 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
541 	CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == 512);
542 	CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
543 	CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
544 	CU_ASSERT(g_io_ctx->aux_num_blocks == 1);
545 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_write);
546 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
547 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
548 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
549 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
550 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
551 				     uint64_t *) == (uint64_t)g_bdev_io);
552 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr != NULL);
553 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == 512);
554 
555 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
556 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst);
557 }
558 
559 static void
560 test_simple_read(void)
561 {
562 	/* Single element block size read */
563 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
564 	g_bdev_io->u.bdev.iovcnt = 1;
565 	g_bdev_io->u.bdev.num_blocks = 1;
566 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
567 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_read;
568 	g_crypto_bdev.crypto_bdev.blocklen = 512;
569 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
570 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
571 
572 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
573 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
574 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
575 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_read);
576 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
577 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
578 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
579 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
580 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
581 				     uint64_t *) == (uint64_t)g_bdev_io);
582 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
583 
584 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
585 }
586 
587 static void
588 test_large_rw(void)
589 {
590 	unsigned block_len = 512;
591 	unsigned num_blocks = CRYPTO_MAX_IO / block_len;
592 	unsigned io_len = block_len * num_blocks;
593 	unsigned i;
594 
595 	/* Multi block size read, multi-element */
596 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
597 	g_bdev_io->u.bdev.iovcnt = 1;
598 	g_bdev_io->u.bdev.num_blocks = num_blocks;
599 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
600 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
601 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
602 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
603 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
604 
605 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
606 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
607 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
608 
609 	for (i = 0; i < num_blocks; i++) {
610 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
611 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
612 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
613 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
614 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
615 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
616 					     uint64_t *) == (uint64_t)g_bdev_io);
617 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
618 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
619 	}
620 
621 	/* Multi block size write, multi-element */
622 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
623 	g_bdev_io->u.bdev.iovcnt = 1;
624 	g_bdev_io->u.bdev.num_blocks = num_blocks;
625 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
626 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
627 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
628 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
629 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
630 
631 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
632 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
633 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
634 
635 	for (i = 0; i < num_blocks; i++) {
636 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
637 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
638 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
639 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
640 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
641 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
642 					     uint64_t *) == (uint64_t)g_bdev_io);
643 		CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == io_len);
644 		CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
645 		CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
646 		CU_ASSERT(g_io_ctx->aux_num_blocks == num_blocks);
647 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr != NULL);
648 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
649 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
650 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
651 	}
652 }
653 
654 static void
655 test_dev_full(void)
656 {
657 	struct vbdev_crypto_op *queued_op;
658 	struct rte_crypto_sym_op *sym_op;
659 	struct crypto_bdev_io *io_ctx;
660 
661 	/* Two element block size read */
662 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
663 	g_bdev_io->u.bdev.iovcnt = 1;
664 	g_bdev_io->u.bdev.num_blocks = 2;
665 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
666 	g_bdev_io->u.bdev.iovs[0].iov_base = (void *)0xDEADBEEF;
667 	g_bdev_io->u.bdev.iovs[1].iov_len = 512;
668 	g_bdev_io->u.bdev.iovs[1].iov_base = (void *)0xFEEDBEEF;
669 	g_crypto_bdev.crypto_bdev.blocklen = 512;
670 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
671 	g_enqueue_mock = g_dequeue_mock = 1;
672 	ut_rte_crypto_op_bulk_alloc = 2;
673 
674 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
675 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
676 
677 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
678 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
679 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 2);
680 	sym_op = g_test_crypto_ops[0]->sym;
681 	CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xDEADBEEF);
682 	CU_ASSERT(sym_op->m_src->data_len == 512);
683 	CU_ASSERT(sym_op->m_src->next == NULL);
684 	CU_ASSERT(sym_op->cipher.data.length == 512);
685 	CU_ASSERT(sym_op->cipher.data.offset == 0);
686 	CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)g_bdev_io);
687 	CU_ASSERT(sym_op->m_dst == NULL);
688 
689 	/* make sure one got queued and confirm its values */
690 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
691 	queued_op = TAILQ_FIRST(&g_crypto_ch->queued_cry_ops);
692 	sym_op = queued_op->crypto_op->sym;
693 	TAILQ_REMOVE(&g_crypto_ch->queued_cry_ops, queued_op, link);
694 	CU_ASSERT(queued_op->bdev_io == g_bdev_io);
695 	CU_ASSERT(queued_op->crypto_op == g_test_crypto_ops[1]);
696 	CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xFEEDBEEF);
697 	CU_ASSERT(sym_op->m_src->data_len == 512);
698 	CU_ASSERT(sym_op->m_src->next == NULL);
699 	CU_ASSERT(sym_op->cipher.data.length == 512);
700 	CU_ASSERT(sym_op->cipher.data.offset == 0);
701 	CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)g_bdev_io);
702 	CU_ASSERT(sym_op->m_dst == NULL);
703 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
704 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
705 	rte_pktmbuf_free(g_test_crypto_ops[1]->sym->m_src);
706 
707 	/* Non-busy reason for enqueue failure, all were rejected. */
708 	g_enqueue_mock = 0;
709 	g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_ERROR;
710 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
711 	io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
712 	CU_ASSERT(io_ctx->bdev_io_status == SPDK_BDEV_IO_STATUS_FAILED);
713 }
714 
715 static void
716 test_crazy_rw(void)
717 {
718 	unsigned block_len = 512;
719 	int num_blocks = 4;
720 	int i;
721 
722 	/* Multi block size read, single element, strange IOV makeup */
723 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
724 	g_bdev_io->u.bdev.iovcnt = 3;
725 	g_bdev_io->u.bdev.num_blocks = num_blocks;
726 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
727 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
728 	g_bdev_io->u.bdev.iovs[1].iov_len = 1024;
729 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 512;
730 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
731 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 512 + 1024;
732 
733 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
734 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
735 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
736 
737 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
738 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
739 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
740 
741 	for (i = 0; i < num_blocks; i++) {
742 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
743 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
744 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
745 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
746 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
747 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
748 					     uint64_t *) == (uint64_t)g_bdev_io);
749 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
750 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
751 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
752 	}
753 
754 	/* Multi block size write, single element strange IOV makeup */
755 	num_blocks = 8;
756 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
757 	g_bdev_io->u.bdev.iovcnt = 4;
758 	g_bdev_io->u.bdev.num_blocks = num_blocks;
759 	g_bdev_io->u.bdev.iovs[0].iov_len = 2048;
760 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
761 	g_bdev_io->u.bdev.iovs[1].iov_len = 512;
762 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 2048;
763 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
764 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 2048 + 512;
765 	g_bdev_io->u.bdev.iovs[3].iov_len = 1024;
766 	g_bdev_io->u.bdev.iovs[3].iov_base = &test_crazy_rw + 2048 + 512 + 512;
767 
768 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
769 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
770 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
771 
772 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
773 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
774 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
775 
776 	for (i = 0; i < num_blocks; i++) {
777 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
778 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
779 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
780 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
781 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
782 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
783 					     uint64_t *) == (uint64_t)g_bdev_io);
784 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
785 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == g_test_crypto_ops[i]->sym->m_dst);
786 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
787 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
788 	}
789 }
790 
791 static void
792 test_passthru(void)
793 {
794 	/* Make sure these follow our completion callback, test success & fail. */
795 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
796 	MOCK_SET(spdk_bdev_unmap_blocks, 0);
797 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
798 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
799 	MOCK_SET(spdk_bdev_unmap_blocks, -1);
800 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
801 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
802 	MOCK_CLEAR(spdk_bdev_unmap_blocks);
803 
804 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
805 	MOCK_SET(spdk_bdev_flush_blocks, 0);
806 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
807 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
808 	MOCK_SET(spdk_bdev_flush_blocks, -1);
809 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
810 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
811 	MOCK_CLEAR(spdk_bdev_flush_blocks);
812 
813 	/* We should never get a WZ command, we report that we don't support it. */
814 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
815 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
816 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
817 }
818 
819 static void
820 test_reset(void)
821 {
822 	/* TODO: There are a few different ways to do this given that
823 	 * the code uses spdk_for_each_channel() to implement reset
824 	 * handling. Submitting w/o UT for this function for now and
825 	 * will follow up with something shortly.
826 	 */
827 }
828 
829 static void
830 init_cleanup(void)
831 {
832 	if (g_crypto_op_mp) {
833 		rte_mempool_free(g_crypto_op_mp);
834 		g_crypto_op_mp = NULL;
835 	}
836 	if (g_mbuf_mp) {
837 		rte_mempool_free(g_mbuf_mp);
838 		g_mbuf_mp = NULL;
839 	}
840 	if (g_session_mp) {
841 		rte_mempool_free(g_session_mp);
842 		g_session_mp = NULL;
843 	}
844 	if (g_session_mp_priv != NULL) {
845 		/* g_session_mp_priv may or may not be set depending on the DPDK version */
846 		rte_mempool_free(g_session_mp_priv);
847 		g_session_mp_priv = NULL;
848 	}
849 }
850 
851 static void
852 test_initdrivers(void)
853 {
854 	int rc;
855 	static struct rte_mempool *orig_mbuf_mp;
856 	static struct rte_mempool *orig_session_mp;
857 	static struct rte_mempool *orig_session_mp_priv;
858 
859 	/* These tests will alloc and free our g_mbuf_mp
860 	 * so save that off here and restore it after each test is over.
861 	 */
862 	orig_mbuf_mp = g_mbuf_mp;
863 	orig_session_mp = g_session_mp;
864 	orig_session_mp_priv = g_session_mp_priv;
865 
866 	g_session_mp_priv = NULL;
867 	g_session_mp = NULL;
868 	g_mbuf_mp = NULL;
869 
870 	/* No drivers available, not an error though */
871 	MOCK_SET(rte_cryptodev_count, 0);
872 	rc = vbdev_crypto_init_crypto_drivers();
873 	CU_ASSERT(rc == 0);
874 	CU_ASSERT(g_mbuf_mp == NULL);
875 	CU_ASSERT(g_session_mp == NULL);
876 	CU_ASSERT(g_session_mp_priv == NULL);
877 
878 	/* Can't create session pool. */
879 	MOCK_SET(rte_cryptodev_count, 2);
880 	MOCK_SET(spdk_mempool_create, NULL);
881 	rc = vbdev_crypto_init_crypto_drivers();
882 	CU_ASSERT(rc == -ENOMEM);
883 	CU_ASSERT(g_mbuf_mp == NULL);
884 	CU_ASSERT(g_session_mp == NULL);
885 	CU_ASSERT(g_session_mp_priv == NULL);
886 	MOCK_CLEAR(spdk_mempool_create);
887 
888 	/* Can't create op pool. */
889 	MOCK_SET(rte_crypto_op_pool_create, NULL);
890 	rc = vbdev_crypto_init_crypto_drivers();
891 	CU_ASSERT(rc == -ENOMEM);
892 	CU_ASSERT(g_mbuf_mp == NULL);
893 	CU_ASSERT(g_session_mp == NULL);
894 	CU_ASSERT(g_session_mp_priv == NULL);
895 	MOCK_CLEAR(rte_crypto_op_pool_create);
896 
897 	/* Check resources are not sufficient */
898 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
899 	rc = vbdev_crypto_init_crypto_drivers();
900 	CU_ASSERT(rc == -EINVAL);
901 
902 	/* Test crypto dev configure failure. */
903 	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
904 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
905 	MOCK_SET(rte_cryptodev_configure, -1);
906 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
907 	rc = vbdev_crypto_init_crypto_drivers();
908 	MOCK_SET(rte_cryptodev_configure, 0);
909 	CU_ASSERT(g_mbuf_mp == NULL);
910 	CU_ASSERT(g_session_mp == NULL);
911 	CU_ASSERT(g_session_mp_priv == NULL);
912 	CU_ASSERT(rc == -EINVAL);
913 
914 	/* Test failure of qp setup. */
915 	MOCK_SET(rte_cryptodev_queue_pair_setup, -1);
916 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
917 	rc = vbdev_crypto_init_crypto_drivers();
918 	CU_ASSERT(rc == -EINVAL);
919 	CU_ASSERT(g_mbuf_mp == NULL);
920 	CU_ASSERT(g_session_mp == NULL);
921 	CU_ASSERT(g_session_mp_priv == NULL);
922 	MOCK_SET(rte_cryptodev_queue_pair_setup, 0);
923 
924 	/* Test failure of dev start. */
925 	MOCK_SET(rte_cryptodev_start, -1);
926 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
927 	rc = vbdev_crypto_init_crypto_drivers();
928 	CU_ASSERT(rc == -EINVAL);
929 	CU_ASSERT(g_mbuf_mp == NULL);
930 	CU_ASSERT(g_session_mp == NULL);
931 	CU_ASSERT(g_session_mp_priv == NULL);
932 	MOCK_SET(rte_cryptodev_start, 0);
933 
934 	/* Test bogus PMD */
935 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
936 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_BOGUS_PMD);
937 	rc = vbdev_crypto_init_crypto_drivers();
938 	CU_ASSERT(g_mbuf_mp == NULL);
939 	CU_ASSERT(g_session_mp == NULL);
940 	CU_ASSERT(rc == -EINVAL);
941 
942 	/* Test happy path QAT. */
943 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
944 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
945 	rc = vbdev_crypto_init_crypto_drivers();
946 	CU_ASSERT(g_mbuf_mp != NULL);
947 	CU_ASSERT(g_session_mp != NULL);
948 	init_cleanup();
949 	CU_ASSERT(rc == 0);
950 
951 	/* Test happy path AESNI. */
952 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
953 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
954 	rc = vbdev_crypto_init_crypto_drivers();
955 	CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET);
956 	init_cleanup();
957 	CU_ASSERT(rc == 0);
958 
959 	/* Test failure of DPDK dev init. By now it is not longer an error
960 	 * situation for entire crypto framework. */
961 	MOCK_SET(rte_cryptodev_count, 2);
962 	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
963 	MOCK_SET(rte_vdev_init, -1);
964 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
965 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
966 	rc = vbdev_crypto_init_crypto_drivers();
967 	CU_ASSERT(rc == 0);
968 	CU_ASSERT(g_mbuf_mp != NULL);
969 	CU_ASSERT(g_session_mp != NULL);
970 	CU_ASSERT(g_session_mp_priv != NULL);
971 	init_cleanup();
972 	MOCK_SET(rte_vdev_init, 0);
973 	MOCK_CLEAR(rte_cryptodev_device_count_by_driver);
974 
975 	/* restore our initial values. */
976 	g_mbuf_mp = orig_mbuf_mp;
977 	g_session_mp = orig_session_mp;
978 	g_session_mp_priv = orig_session_mp_priv;
979 }
980 
981 static void
982 test_crypto_op_complete(void)
983 {
984 	/* Make sure completion code respects failure. */
985 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
986 	g_completion_called = false;
987 	_crypto_operation_complete(g_bdev_io);
988 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
989 	CU_ASSERT(g_completion_called == true);
990 
991 	/* Test read completion. */
992 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
993 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
994 	g_completion_called = false;
995 	_crypto_operation_complete(g_bdev_io);
996 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
997 	CU_ASSERT(g_completion_called == true);
998 
999 	/* Test write completion success. */
1000 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1001 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
1002 	g_completion_called = false;
1003 	MOCK_SET(spdk_bdev_writev_blocks, 0);
1004 	_crypto_operation_complete(g_bdev_io);
1005 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1006 	CU_ASSERT(g_completion_called == true);
1007 
1008 	/* Test write completion failed. */
1009 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1010 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
1011 	g_completion_called = false;
1012 	MOCK_SET(spdk_bdev_writev_blocks, -1);
1013 	_crypto_operation_complete(g_bdev_io);
1014 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1015 	CU_ASSERT(g_completion_called == true);
1016 
1017 	/* Test bogus type for this completion. */
1018 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1019 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
1020 	g_completion_called = false;
1021 	_crypto_operation_complete(g_bdev_io);
1022 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1023 	CU_ASSERT(g_completion_called == true);
1024 }
1025 
1026 static void
1027 test_supported_io(void)
1028 {
1029 	void *ctx = NULL;
1030 	bool rc = true;
1031 
1032 	/* Make sure we always report false to WZ, we need the bdev layer to
1033 	 * send real 0's so we can encrypt/decrypt them.
1034 	 */
1035 	rc = vbdev_crypto_io_type_supported(ctx, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
1036 	CU_ASSERT(rc == false);
1037 }
1038 
1039 static void
1040 test_poller(void)
1041 {
1042 	int rc;
1043 	struct rte_mbuf *src_mbufs[2];
1044 	struct vbdev_crypto_op *op_to_resubmit;
1045 
1046 	/* test regular 1 op to dequeue and complete */
1047 	g_dequeue_mock = g_enqueue_mock = 1;
1048 	rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 1);
1049 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
1050 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1051 			   uint64_t *) = (uintptr_t)g_bdev_io;
1052 	g_test_crypto_ops[0]->sym->m_dst = NULL;
1053 	g_io_ctx->cryop_cnt_remaining = 1;
1054 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
1055 	rc = crypto_dev_poller(g_crypto_ch);
1056 	CU_ASSERT(rc == 1);
1057 
1058 	/* We have nothing dequeued but have some to resubmit */
1059 	g_dequeue_mock = 0;
1060 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
1061 
1062 	/* add an op to the queued list. */
1063 	g_resubmit_test = true;
1064 	op_to_resubmit = (struct vbdev_crypto_op *)((uint8_t *)g_test_crypto_ops[0] + QUEUED_OP_OFFSET);
1065 	op_to_resubmit->crypto_op = (void *)0xDEADBEEF;
1066 	op_to_resubmit->bdev_io = g_bdev_io;
1067 	TAILQ_INSERT_TAIL(&g_crypto_ch->queued_cry_ops,
1068 			  op_to_resubmit,
1069 			  link);
1070 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
1071 	rc = crypto_dev_poller(g_crypto_ch);
1072 	g_resubmit_test = false;
1073 	CU_ASSERT(rc == 0);
1074 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
1075 
1076 	/* 2 to dequeue but 2nd one failed */
1077 	g_dequeue_mock = g_enqueue_mock = 2;
1078 	g_io_ctx->cryop_cnt_remaining = 2;
1079 	rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 2);
1080 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
1081 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1082 			   uint64_t *) = (uint64_t)g_bdev_io;
1083 	g_test_crypto_ops[0]->sym->m_dst = NULL;
1084 	g_test_crypto_ops[0]->status =  RTE_CRYPTO_OP_STATUS_SUCCESS;
1085 	g_test_crypto_ops[1]->sym->m_src = src_mbufs[1];
1086 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[1]->sym->m_src, g_mbuf_offset,
1087 			   uint64_t *) = (uint64_t)g_bdev_io;
1088 	g_test_crypto_ops[1]->sym->m_dst = NULL;
1089 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1090 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1091 	rc = crypto_dev_poller(g_crypto_ch);
1092 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1093 	CU_ASSERT(rc == 2);
1094 }
1095 
1096 /* Helper function for test_assign_device_qp() */
1097 static void
1098 _clear_device_qp_lists(void)
1099 {
1100 	struct device_qp *device_qp = NULL;
1101 
1102 	while (!TAILQ_EMPTY(&g_device_qp_qat)) {
1103 		device_qp = TAILQ_FIRST(&g_device_qp_qat);
1104 		TAILQ_REMOVE(&g_device_qp_qat, device_qp, link);
1105 		free(device_qp);
1106 
1107 	}
1108 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_qat) == true);
1109 	while (!TAILQ_EMPTY(&g_device_qp_aesni_mb)) {
1110 		device_qp = TAILQ_FIRST(&g_device_qp_aesni_mb);
1111 		TAILQ_REMOVE(&g_device_qp_aesni_mb, device_qp, link);
1112 		free(device_qp);
1113 	}
1114 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_aesni_mb) == true);
1115 }
1116 
1117 /* Helper function for test_assign_device_qp() */
1118 static void
1119 _check_expected_values(struct vbdev_crypto *crypto_bdev, struct device_qp *device_qp,
1120 		       struct crypto_io_channel *crypto_ch, uint8_t expected_index,
1121 		       uint8_t current_index)
1122 {
1123 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1124 	CU_ASSERT(g_crypto_ch->device_qp->index == expected_index);
1125 	CU_ASSERT(g_next_qat_index == current_index);
1126 }
1127 
1128 static void
1129 test_assign_device_qp(void)
1130 {
1131 	struct device_qp *device_qp = NULL;
1132 	int i;
1133 
1134 	/* start with a known state, clear the device/qp lists */
1135 	_clear_device_qp_lists();
1136 
1137 	/* make sure that one AESNI_MB qp is found */
1138 	device_qp = calloc(1, sizeof(struct device_qp));
1139 	TAILQ_INSERT_TAIL(&g_device_qp_aesni_mb, device_qp, link);
1140 	g_crypto_ch->device_qp = NULL;
1141 	g_crypto_bdev.drv_name = AESNI_MB;
1142 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1143 	CU_ASSERT(g_crypto_ch->device_qp != NULL);
1144 
1145 	/* QAT testing is more complex as the code under test load balances by
1146 	 * assigning each subsequent device/qp to every QAT_VF_SPREAD modulo
1147 	 * g_qat_total_qp. For the current latest QAT we'll have 48 virtual functions
1148 	 * each with 2 qp so the "spread" between assignments is 32.
1149 	 */
1150 	g_qat_total_qp = 96;
1151 	for (i = 0; i < g_qat_total_qp; i++) {
1152 		device_qp = calloc(1, sizeof(struct device_qp));
1153 		device_qp->index = i;
1154 		TAILQ_INSERT_TAIL(&g_device_qp_qat, device_qp, link);
1155 	}
1156 	g_crypto_ch->device_qp = NULL;
1157 	g_crypto_bdev.drv_name = QAT;
1158 
1159 	/* First assignment will assign to 0 and next at 32. */
1160 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1161 			       0, QAT_VF_SPREAD);
1162 
1163 	/* Second assignment will assign to 32 and next at 64. */
1164 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1165 			       QAT_VF_SPREAD, QAT_VF_SPREAD * 2);
1166 
1167 	/* Third assignment will assign to 64 and next at 0. */
1168 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1169 			       QAT_VF_SPREAD * 2, 0);
1170 
1171 	/* Fourth assignment will assign to 1 and next at 33. */
1172 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1173 			       1, QAT_VF_SPREAD + 1);
1174 
1175 	_clear_device_qp_lists();
1176 }
1177 
1178 int
1179 main(int argc, char **argv)
1180 {
1181 	CU_pSuite	suite = NULL;
1182 	unsigned int	num_failures;
1183 
1184 	CU_set_error_action(CUEA_ABORT);
1185 	CU_initialize_registry();
1186 
1187 	suite = CU_add_suite("crypto", test_setup, test_cleanup);
1188 	CU_ADD_TEST(suite, test_error_paths);
1189 	CU_ADD_TEST(suite, test_simple_write);
1190 	CU_ADD_TEST(suite, test_simple_read);
1191 	CU_ADD_TEST(suite, test_large_rw);
1192 	CU_ADD_TEST(suite, test_dev_full);
1193 	CU_ADD_TEST(suite, test_crazy_rw);
1194 	CU_ADD_TEST(suite, test_passthru);
1195 	CU_ADD_TEST(suite, test_initdrivers);
1196 	CU_ADD_TEST(suite, test_crypto_op_complete);
1197 	CU_ADD_TEST(suite, test_supported_io);
1198 	CU_ADD_TEST(suite, test_reset);
1199 	CU_ADD_TEST(suite, test_poller);
1200 	CU_ADD_TEST(suite, test_assign_device_qp);
1201 
1202 	CU_basic_set_mode(CU_BRM_VERBOSE);
1203 	CU_basic_run_tests();
1204 	num_failures = CU_get_number_of_failures();
1205 	CU_cleanup_registry();
1206 	return num_failures;
1207 }
1208