xref: /spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c (revision 88e3ffd7b6c5ec1ea1a660354d25f02c766092e1)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/test_env.c"
37 #include "spdk_internal/mock.h"
38 #include "unit/lib/json_mock.c"
39 
40 #include <rte_crypto.h>
41 #include <rte_cryptodev.h>
42 #include <rte_cryptodev_pmd.h>
43 
44 #define MAX_TEST_BLOCKS 8192
45 struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS];
46 struct rte_crypto_op *g_test_dev_full_ops[MAX_TEST_BLOCKS];
47 
48 uint16_t g_dequeue_mock;
49 uint16_t g_enqueue_mock;
50 unsigned ut_rte_crypto_op_bulk_alloc;
51 int ut_rte_crypto_op_attach_sym_session = 0;
52 #define MOCK_INFO_GET_1QP_AESNI 0
53 #define MOCK_INFO_GET_1QP_QAT 1
54 #define MOCK_INFO_GET_1QP_BOGUS_PMD 2
55 int ut_rte_cryptodev_info_get = 0;
56 bool ut_rte_cryptodev_info_get_mocked = false;
57 
58 /* Those functions are defined as static inline in DPDK, so we can't
59  * mock them straight away. We use defines to redirect them into
60  * our custom functions.
61  */
62 static bool g_resubmit_test = false;
63 #define rte_cryptodev_enqueue_burst mock_rte_cryptodev_enqueue_burst
64 static inline uint16_t
65 mock_rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
66 				 struct rte_crypto_op **ops, uint16_t nb_ops)
67 {
68 	int i;
69 
70 	CU_ASSERT(nb_ops > 0);
71 
72 	for (i = 0; i < nb_ops; i++) {
73 		/* Use this empty (til now) array of pointers to store
74 		 * enqueued operations for assertion in dev_full test.
75 		 */
76 		g_test_dev_full_ops[i] = *ops++;
77 		if (g_resubmit_test == true) {
78 			CU_ASSERT(g_test_dev_full_ops[i] == (void *)0xDEADBEEF);
79 		}
80 	}
81 
82 	return g_enqueue_mock;
83 }
84 
85 #define rte_cryptodev_dequeue_burst mock_rte_cryptodev_dequeue_burst
86 static inline uint16_t
87 mock_rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
88 				 struct rte_crypto_op **ops, uint16_t nb_ops)
89 {
90 	int i;
91 
92 	CU_ASSERT(nb_ops > 0);
93 
94 	for (i = 0; i < g_dequeue_mock; i++) {
95 		*ops++ = g_test_crypto_ops[i];
96 	}
97 
98 	return g_dequeue_mock;
99 }
100 
101 /* Instead of allocating real memory, assign the allocations to our
102  * test array for assertion in tests.
103  */
104 #define rte_crypto_op_bulk_alloc mock_rte_crypto_op_bulk_alloc
105 static inline unsigned
106 mock_rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
107 			      enum rte_crypto_op_type type,
108 			      struct rte_crypto_op **ops, uint16_t nb_ops)
109 {
110 	int i;
111 
112 	for (i = 0; i < nb_ops; i++) {
113 		*ops++ = g_test_crypto_ops[i];
114 	}
115 	return ut_rte_crypto_op_bulk_alloc;
116 }
117 
118 #define rte_mempool_put_bulk mock_rte_mempool_put_bulk
119 static __rte_always_inline void
120 mock_rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table,
121 			  unsigned int n)
122 {
123 	return;
124 }
125 
126 #define rte_crypto_op_attach_sym_session mock_rte_crypto_op_attach_sym_session
127 static inline int
128 mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
129 				      struct rte_cryptodev_sym_session *sess)
130 {
131 	return ut_rte_crypto_op_attach_sym_session;
132 }
133 
134 #define rte_lcore_count mock_rte_lcore_count
135 static inline unsigned
136 mock_rte_lcore_count(void)
137 {
138 	return 1;
139 }
140 
141 #include "bdev/crypto/vbdev_crypto.c"
142 
143 /* SPDK stubs */
144 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
145 		struct spdk_bdev_io_wait_entry *entry), 0);
146 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
147 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io));
148 DEFINE_STUB_V(spdk_bdev_io_put_aux_buf, (struct spdk_bdev_io *bdev_io, void *aux_buf));
149 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
150 		enum spdk_bdev_io_type io_type), 0);
151 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
152 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
153 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0);
154 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 64);
155 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
156 DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn,
157 				     void *cb_arg));
158 DEFINE_STUB(spdk_bdev_open_ext, int, (const char *bdev_name, bool write,
159 				      spdk_bdev_event_cb_t event_cb,
160 				      void *event_ctx, struct spdk_bdev_desc **_desc), 0);
161 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
162 DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
163 		struct spdk_bdev_module *module), 0);
164 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
165 DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *vbdev), 0);
166 
167 /* DPDK stubs */
168 #define DPDK_DYNFIELD_OFFSET offsetof(struct rte_mbuf, dynfield1[1])
169 DEFINE_STUB(rte_mbuf_dynfield_register, int, (const struct rte_mbuf_dynfield *params),
170 	    DPDK_DYNFIELD_OFFSET);
171 DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0);
172 DEFINE_STUB_V(rte_mempool_free, (struct rte_mempool *mp));
173 DEFINE_STUB(rte_mempool_create, struct rte_mempool *, (const char *name, unsigned n,
174 		unsigned elt_size,
175 		unsigned cache_size, unsigned private_data_size,
176 		rte_mempool_ctor_t *mp_init, void *mp_init_arg,
177 		rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
178 		int socket_id, unsigned flags), (struct rte_mempool *)1);
179 DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
180 DEFINE_STUB(rte_crypto_op_pool_create, struct rte_mempool *,
181 	    (const char *name, enum rte_crypto_op_type type, unsigned nb_elts,
182 	     unsigned cache_size, uint16_t priv_size, int socket_id), (struct rte_mempool *)1);
183 DEFINE_STUB(rte_cryptodev_device_count_by_driver, uint8_t, (uint8_t driver_id), 0);
184 DEFINE_STUB(rte_cryptodev_configure, int, (uint8_t dev_id, struct rte_cryptodev_config *config), 0);
185 DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
186 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id), 0);
187 DEFINE_STUB(rte_cryptodev_sym_session_pool_create, struct rte_mempool *, (const char *name,
188 		uint32_t nb_elts,
189 		uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
190 		int socket_id), (struct rte_mempool *)1);
191 DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0);
192 DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id));
193 DEFINE_STUB(rte_cryptodev_sym_session_create, struct rte_cryptodev_sym_session *,
194 	    (struct rte_mempool *mempool), (struct rte_cryptodev_sym_session *)1);
195 DEFINE_STUB(rte_cryptodev_sym_session_init, int, (uint8_t dev_id,
196 		struct rte_cryptodev_sym_session *sess,
197 		struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), 0);
198 DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
199 DEFINE_STUB(rte_cryptodev_sym_session_free, int, (struct rte_cryptodev_sym_session *sess), 0);
200 DEFINE_STUB(rte_vdev_uninit, int, (const char *name), 0);
201 
202 struct rte_cryptodev *rte_cryptodevs;
203 
204 /* global vars and setup/cleanup functions used for all test functions */
205 struct spdk_bdev_io *g_bdev_io;
206 struct crypto_bdev_io *g_io_ctx;
207 struct crypto_io_channel *g_crypto_ch;
208 struct spdk_io_channel *g_io_ch;
209 struct vbdev_dev g_device;
210 struct vbdev_crypto g_crypto_bdev;
211 struct device_qp g_dev_qp;
212 
213 void
214 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
215 {
216 	dev_info->max_nb_queue_pairs = 1;
217 	if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_AESNI) {
218 		dev_info->driver_name = g_driver_names[0];
219 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_QAT) {
220 		dev_info->driver_name = g_driver_names[1];
221 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_BOGUS_PMD) {
222 		dev_info->driver_name = "junk";
223 	}
224 }
225 
226 unsigned int
227 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
228 {
229 	return (unsigned int)dev_id;
230 }
231 
232 void
233 spdk_bdev_io_get_aux_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_aux_buf_cb cb)
234 {
235 	cb(g_io_ch, g_bdev_io, (void *)0xDEADBEEF);
236 }
237 
238 void
239 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
240 {
241 	cb(g_io_ch, g_bdev_io, true);
242 }
243 
244 /* Mock these functions to call the callback and then return the value we require */
245 int ut_spdk_bdev_readv_blocks = 0;
246 bool ut_spdk_bdev_readv_blocks_mocked = false;
247 int
248 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
249 		       struct iovec *iov, int iovcnt,
250 		       uint64_t offset_blocks, uint64_t num_blocks,
251 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
252 {
253 	cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg);
254 	return ut_spdk_bdev_readv_blocks;
255 }
256 
257 int ut_spdk_bdev_writev_blocks = 0;
258 bool ut_spdk_bdev_writev_blocks_mocked = false;
259 int
260 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
261 			struct iovec *iov, int iovcnt,
262 			uint64_t offset_blocks, uint64_t num_blocks,
263 			spdk_bdev_io_completion_cb cb, void *cb_arg)
264 {
265 	cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg);
266 	return ut_spdk_bdev_writev_blocks;
267 }
268 
269 int ut_spdk_bdev_unmap_blocks = 0;
270 bool ut_spdk_bdev_unmap_blocks_mocked = false;
271 int
272 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
273 		       uint64_t offset_blocks, uint64_t num_blocks,
274 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
275 {
276 	cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg);
277 	return ut_spdk_bdev_unmap_blocks;
278 }
279 
280 int ut_spdk_bdev_flush_blocks = 0;
281 bool ut_spdk_bdev_flush_blocks_mocked = false;
282 int
283 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
284 		       uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
285 		       void *cb_arg)
286 {
287 	cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg);
288 	return ut_spdk_bdev_flush_blocks;
289 }
290 
291 int ut_spdk_bdev_reset = 0;
292 bool ut_spdk_bdev_reset_mocked = false;
293 int
294 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
295 		spdk_bdev_io_completion_cb cb, void *cb_arg)
296 {
297 	cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg);
298 	return ut_spdk_bdev_reset;
299 }
300 
301 bool g_completion_called = false;
302 void
303 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
304 {
305 	bdev_io->internal.status = status;
306 	g_completion_called = true;
307 }
308 
309 /* Global setup for all tests that share a bunch of preparation... */
310 static int
311 test_setup(void)
312 {
313 	int i, rc;
314 
315 	/* Prepare essential variables for test routines */
316 	g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct crypto_bdev_io));
317 	g_bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec) * 128);
318 	g_bdev_io->bdev = &g_crypto_bdev.crypto_bdev;
319 	g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct crypto_io_channel));
320 	g_crypto_ch = (struct crypto_io_channel *)((uint8_t *)g_io_ch + sizeof(struct spdk_io_channel));
321 	g_io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
322 	memset(&g_device, 0, sizeof(struct vbdev_dev));
323 	memset(&g_crypto_bdev, 0, sizeof(struct vbdev_crypto));
324 	g_dev_qp.device = &g_device;
325 	g_io_ctx->crypto_ch = g_crypto_ch;
326 	g_io_ctx->crypto_bdev = &g_crypto_bdev;
327 	g_crypto_ch->device_qp = &g_dev_qp;
328 	TAILQ_INIT(&g_crypto_ch->pending_cry_ios);
329 	TAILQ_INIT(&g_crypto_ch->queued_cry_ops);
330 
331 	/* Allocate a real mbuf pool so we can test error paths */
332 	g_mbuf_mp = spdk_mempool_create("mbuf_mp", NUM_MBUFS, sizeof(struct rte_mbuf),
333 					SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
334 					SPDK_ENV_SOCKET_ID_ANY);
335 
336 	/* Instead of allocating real rte mempools for these, it's easier and provides the
337 	 * same coverage just calloc them here.
338 	 */
339 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
340 		rc = posix_memalign((void **)&g_test_crypto_ops[i], 64,
341 				    sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op) +
342 				    AES_CBC_IV_LENGTH + QUEUED_OP_LENGTH);
343 		if (rc != 0) {
344 			assert(false);
345 		}
346 		memset(g_test_crypto_ops[i], 0, sizeof(struct rte_crypto_op) +
347 		       sizeof(struct rte_crypto_sym_op) + QUEUED_OP_LENGTH);
348 	}
349 	g_mbuf_offset = DPDK_DYNFIELD_OFFSET;
350 
351 	return 0;
352 }
353 
354 /* Global teardown for all tests */
355 static int
356 test_cleanup(void)
357 {
358 	int i;
359 
360 	spdk_mempool_free(g_mbuf_mp);
361 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
362 		free(g_test_crypto_ops[i]);
363 	}
364 	free(g_bdev_io->u.bdev.iovs);
365 	free(g_bdev_io);
366 	free(g_io_ch);
367 	return 0;
368 }
369 
370 static void
371 test_error_paths(void)
372 {
373 	/* Single element block size write, just to test error paths
374 	 * in vbdev_crypto_submit_request().
375 	 */
376 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
377 	g_bdev_io->u.bdev.iovcnt = 1;
378 	g_bdev_io->u.bdev.num_blocks = 1;
379 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
380 	g_crypto_bdev.crypto_bdev.blocklen = 512;
381 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
382 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
383 
384 	/* test failure of spdk_mempool_get_bulk(), will result in success because it
385 	 * will get queued.
386 	 */
387 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
388 	MOCK_SET(spdk_mempool_get, NULL);
389 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
390 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
391 
392 	/* same thing but switch to reads to test error path in _crypto_complete_io() */
393 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
394 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
395 	TAILQ_INSERT_TAIL(&g_crypto_ch->pending_cry_ios, g_bdev_io, module_link);
396 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
397 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
398 	/* Now with the read_blocks failing */
399 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
400 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
401 	MOCK_SET(spdk_bdev_readv_blocks, -1);
402 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
403 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
404 	MOCK_SET(spdk_bdev_readv_blocks, 0);
405 	MOCK_CLEAR(spdk_mempool_get);
406 
407 	/* test failure of rte_crypto_op_bulk_alloc() */
408 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
409 	ut_rte_crypto_op_bulk_alloc = 0;
410 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
411 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
412 	ut_rte_crypto_op_bulk_alloc = 1;
413 
414 	/* test failure of rte_crypto_op_attach_sym_session() */
415 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
416 	ut_rte_crypto_op_attach_sym_session = -1;
417 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
418 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
419 	ut_rte_crypto_op_attach_sym_session = 0;
420 }
421 
422 static void
423 test_simple_write(void)
424 {
425 	/* Single element block size write */
426 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
427 	g_bdev_io->u.bdev.iovcnt = 1;
428 	g_bdev_io->u.bdev.num_blocks = 1;
429 	g_bdev_io->u.bdev.offset_blocks = 0;
430 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
431 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_write;
432 	g_crypto_bdev.crypto_bdev.blocklen = 512;
433 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
434 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
435 
436 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
437 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
438 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
439 	CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == 512);
440 	CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
441 	CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
442 	CU_ASSERT(g_io_ctx->aux_num_blocks == 1);
443 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_write);
444 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
445 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
446 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
447 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
448 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
449 				     uint64_t *) == (uint64_t)g_bdev_io);
450 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr != NULL);
451 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == 512);
452 
453 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
454 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_dst);
455 }
456 
457 static void
458 test_simple_read(void)
459 {
460 	/* Single element block size read */
461 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
462 	g_bdev_io->u.bdev.iovcnt = 1;
463 	g_bdev_io->u.bdev.num_blocks = 1;
464 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
465 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_read;
466 	g_crypto_bdev.crypto_bdev.blocklen = 512;
467 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
468 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
469 
470 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
471 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
472 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
473 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_read);
474 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
475 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
476 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
477 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
478 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
479 				     uint64_t *) == (uint64_t)g_bdev_io);
480 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
481 
482 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
483 }
484 
485 static void
486 test_large_rw(void)
487 {
488 	unsigned block_len = 512;
489 	unsigned num_blocks = CRYPTO_MAX_IO / block_len;
490 	unsigned io_len = block_len * num_blocks;
491 	unsigned i;
492 
493 	/* Multi block size read, multi-element */
494 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
495 	g_bdev_io->u.bdev.iovcnt = 1;
496 	g_bdev_io->u.bdev.num_blocks = num_blocks;
497 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
498 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
499 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
500 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
501 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
502 
503 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
504 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
505 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
506 
507 	for (i = 0; i < num_blocks; i++) {
508 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
509 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
510 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
511 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
512 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
513 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
514 					     uint64_t *) == (uint64_t)g_bdev_io);
515 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
516 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
517 	}
518 
519 	/* Multi block size write, multi-element */
520 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
521 	g_bdev_io->u.bdev.iovcnt = 1;
522 	g_bdev_io->u.bdev.num_blocks = num_blocks;
523 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
524 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
525 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
526 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
527 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
528 
529 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
530 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
531 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
532 
533 	for (i = 0; i < num_blocks; i++) {
534 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
535 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
536 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
537 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
538 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
539 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
540 					     uint64_t *) == (uint64_t)g_bdev_io);
541 		CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == io_len);
542 		CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
543 		CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
544 		CU_ASSERT(g_io_ctx->aux_num_blocks == num_blocks);
545 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr != NULL);
546 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
547 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
548 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
549 	}
550 }
551 
552 static void
553 test_dev_full(void)
554 {
555 	struct vbdev_crypto_op *queued_op;
556 	struct rte_crypto_sym_op *sym_op;
557 	struct crypto_bdev_io *io_ctx;
558 
559 	/* Two element block size read */
560 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
561 	g_bdev_io->u.bdev.iovcnt = 1;
562 	g_bdev_io->u.bdev.num_blocks = 2;
563 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
564 	g_bdev_io->u.bdev.iovs[0].iov_base = (void *)0xDEADBEEF;
565 	g_bdev_io->u.bdev.iovs[1].iov_len = 512;
566 	g_bdev_io->u.bdev.iovs[1].iov_base = (void *)0xFEEDBEEF;
567 	g_crypto_bdev.crypto_bdev.blocklen = 512;
568 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
569 	g_enqueue_mock = g_dequeue_mock = 1;
570 	ut_rte_crypto_op_bulk_alloc = 2;
571 
572 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
573 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
574 
575 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
576 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
577 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 2);
578 	sym_op = g_test_crypto_ops[0]->sym;
579 	CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xDEADBEEF);
580 	CU_ASSERT(sym_op->m_src->data_len == 512);
581 	CU_ASSERT(sym_op->m_src->next == NULL);
582 	CU_ASSERT(sym_op->cipher.data.length == 512);
583 	CU_ASSERT(sym_op->cipher.data.offset == 0);
584 	CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)g_bdev_io);
585 	CU_ASSERT(sym_op->m_dst == NULL);
586 
587 	/* make sure one got queued and confirm its values */
588 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
589 	queued_op = TAILQ_FIRST(&g_crypto_ch->queued_cry_ops);
590 	sym_op = queued_op->crypto_op->sym;
591 	TAILQ_REMOVE(&g_crypto_ch->queued_cry_ops, queued_op, link);
592 	CU_ASSERT(queued_op->bdev_io == g_bdev_io);
593 	CU_ASSERT(queued_op->crypto_op == g_test_crypto_ops[1]);
594 	CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xFEEDBEEF);
595 	CU_ASSERT(sym_op->m_src->data_len == 512);
596 	CU_ASSERT(sym_op->m_src->next == NULL);
597 	CU_ASSERT(sym_op->cipher.data.length == 512);
598 	CU_ASSERT(sym_op->cipher.data.offset == 0);
599 	CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)g_bdev_io);
600 	CU_ASSERT(sym_op->m_dst == NULL);
601 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
602 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
603 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[1]->sym->m_src);
604 
605 	/* Non-busy reason for enqueue failure, all were rejected. */
606 	g_enqueue_mock = 0;
607 	g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_ERROR;
608 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
609 	io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
610 	CU_ASSERT(io_ctx->bdev_io_status == SPDK_BDEV_IO_STATUS_FAILED);
611 }
612 
613 static void
614 test_crazy_rw(void)
615 {
616 	unsigned block_len = 512;
617 	int num_blocks = 4;
618 	int i;
619 
620 	/* Multi block size read, single element, strange IOV makeup */
621 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
622 	g_bdev_io->u.bdev.iovcnt = 3;
623 	g_bdev_io->u.bdev.num_blocks = num_blocks;
624 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
625 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
626 	g_bdev_io->u.bdev.iovs[1].iov_len = 1024;
627 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 512;
628 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
629 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 512 + 1024;
630 
631 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
632 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
633 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
634 
635 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
636 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
637 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
638 
639 	for (i = 0; i < num_blocks; i++) {
640 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
641 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
642 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
643 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
644 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
645 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
646 					     uint64_t *) == (uint64_t)g_bdev_io);
647 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
648 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
649 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
650 	}
651 
652 	/* Multi block size write, single element strange IOV makeup */
653 	num_blocks = 8;
654 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
655 	g_bdev_io->u.bdev.iovcnt = 4;
656 	g_bdev_io->u.bdev.num_blocks = num_blocks;
657 	g_bdev_io->u.bdev.iovs[0].iov_len = 2048;
658 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
659 	g_bdev_io->u.bdev.iovs[1].iov_len = 512;
660 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 2048;
661 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
662 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 2048 + 512;
663 	g_bdev_io->u.bdev.iovs[3].iov_len = 1024;
664 	g_bdev_io->u.bdev.iovs[3].iov_base = &test_crazy_rw + 2048 + 512 + 512;
665 
666 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
667 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
668 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
669 
670 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
671 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
672 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
673 
674 	for (i = 0; i < num_blocks; i++) {
675 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
676 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
677 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
678 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
679 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
680 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
681 					     uint64_t *) == (uint64_t)g_bdev_io);
682 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
683 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == g_test_crypto_ops[i]->sym->m_dst);
684 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
685 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
686 	}
687 }
688 
689 static void
690 test_passthru(void)
691 {
692 	/* Make sure these follow our completion callback, test success & fail. */
693 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
694 	MOCK_SET(spdk_bdev_unmap_blocks, 0);
695 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
696 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
697 	MOCK_SET(spdk_bdev_unmap_blocks, -1);
698 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
699 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
700 	MOCK_CLEAR(spdk_bdev_unmap_blocks);
701 
702 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
703 	MOCK_SET(spdk_bdev_flush_blocks, 0);
704 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
705 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
706 	MOCK_SET(spdk_bdev_flush_blocks, -1);
707 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
708 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
709 	MOCK_CLEAR(spdk_bdev_flush_blocks);
710 
711 	/* We should never get a WZ command, we report that we don't support it. */
712 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
713 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
714 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
715 }
716 
717 static void
718 test_reset(void)
719 {
720 	/* TODO: There are a few different ways to do this given that
721 	 * the code uses spdk_for_each_channel() to implement reset
722 	 * handling. Submitting w/o UT for this function for now and
723 	 * will follow up with something shortly.
724 	 */
725 }
726 
727 static void
728 init_cleanup(void)
729 {
730 	spdk_mempool_free(g_mbuf_mp);
731 	rte_mempool_free(g_session_mp);
732 	g_mbuf_mp = NULL;
733 	g_session_mp = NULL;
734 	if (g_session_mp_priv != NULL) {
735 		/* g_session_mp_priv may or may not be set depending on the DPDK version */
736 		rte_mempool_free(g_session_mp_priv);
737 	}
738 }
739 
740 static void
741 test_initdrivers(void)
742 {
743 	int rc;
744 	static struct spdk_mempool *orig_mbuf_mp;
745 	static struct rte_mempool *orig_session_mp;
746 	static struct rte_mempool *orig_session_mp_priv;
747 
748 	/* These tests will alloc and free our g_mbuf_mp
749 	 * so save that off here and restore it after each test is over.
750 	 */
751 	orig_mbuf_mp = g_mbuf_mp;
752 	orig_session_mp = g_session_mp;
753 	orig_session_mp_priv = g_session_mp_priv;
754 
755 	g_session_mp_priv = NULL;
756 	g_session_mp = NULL;
757 	g_mbuf_mp = NULL;
758 
759 	/* No drivers available, not an error though */
760 	MOCK_SET(rte_cryptodev_count, 0);
761 	rc = vbdev_crypto_init_crypto_drivers();
762 	CU_ASSERT(rc == 0);
763 	CU_ASSERT(g_mbuf_mp == NULL);
764 	CU_ASSERT(g_session_mp == NULL);
765 	CU_ASSERT(g_session_mp_priv == NULL);
766 
767 	/* Test failure of DPDK dev init. */
768 	MOCK_SET(rte_cryptodev_count, 2);
769 	MOCK_SET(rte_vdev_init, -1);
770 	rc = vbdev_crypto_init_crypto_drivers();
771 	CU_ASSERT(rc == -EINVAL);
772 	CU_ASSERT(g_mbuf_mp == NULL);
773 	CU_ASSERT(g_session_mp == NULL);
774 	CU_ASSERT(g_session_mp_priv == NULL);
775 	MOCK_SET(rte_vdev_init, 0);
776 
777 	/* Can't create session pool. */
778 	MOCK_SET(spdk_mempool_create, NULL);
779 	rc = vbdev_crypto_init_crypto_drivers();
780 	CU_ASSERT(rc == -ENOMEM);
781 	CU_ASSERT(g_mbuf_mp == NULL);
782 	CU_ASSERT(g_session_mp == NULL);
783 	CU_ASSERT(g_session_mp_priv == NULL);
784 	MOCK_CLEAR(spdk_mempool_create);
785 
786 	/* Can't create op pool. */
787 	MOCK_SET(rte_crypto_op_pool_create, NULL);
788 	rc = vbdev_crypto_init_crypto_drivers();
789 	CU_ASSERT(rc == -ENOMEM);
790 	CU_ASSERT(g_mbuf_mp == NULL);
791 	CU_ASSERT(g_session_mp == NULL);
792 	CU_ASSERT(g_session_mp_priv == NULL);
793 	MOCK_SET(rte_crypto_op_pool_create, (struct rte_mempool *)1);
794 
795 	/* Check resources are not sufficient */
796 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
797 	rc = vbdev_crypto_init_crypto_drivers();
798 	CU_ASSERT(rc == -EINVAL);
799 
800 	/* Test crypto dev configure failure. */
801 	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
802 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
803 	MOCK_SET(rte_cryptodev_configure, -1);
804 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
805 	rc = vbdev_crypto_init_crypto_drivers();
806 	MOCK_SET(rte_cryptodev_configure, 0);
807 	CU_ASSERT(g_mbuf_mp == NULL);
808 	CU_ASSERT(g_session_mp == NULL);
809 	CU_ASSERT(g_session_mp_priv == NULL);
810 	CU_ASSERT(rc == -EINVAL);
811 
812 	/* Test failure of qp setup. */
813 	MOCK_SET(rte_cryptodev_queue_pair_setup, -1);
814 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
815 	rc = vbdev_crypto_init_crypto_drivers();
816 	CU_ASSERT(rc == -EINVAL);
817 	CU_ASSERT(g_mbuf_mp == NULL);
818 	CU_ASSERT(g_session_mp == NULL);
819 	CU_ASSERT(g_session_mp_priv == NULL);
820 	MOCK_SET(rte_cryptodev_queue_pair_setup, 0);
821 
822 	/* Test failure of dev start. */
823 	MOCK_SET(rte_cryptodev_start, -1);
824 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
825 	rc = vbdev_crypto_init_crypto_drivers();
826 	CU_ASSERT(rc == -EINVAL);
827 	CU_ASSERT(g_mbuf_mp == NULL);
828 	CU_ASSERT(g_session_mp == NULL);
829 	CU_ASSERT(g_session_mp_priv == NULL);
830 	MOCK_SET(rte_cryptodev_start, 0);
831 
832 	/* Test bogus PMD */
833 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
834 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_BOGUS_PMD);
835 	rc = vbdev_crypto_init_crypto_drivers();
836 	CU_ASSERT(g_mbuf_mp == NULL);
837 	CU_ASSERT(g_session_mp == NULL);
838 	CU_ASSERT(rc == -EINVAL);
839 
840 	/* Test happy path QAT. */
841 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
842 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
843 	rc = vbdev_crypto_init_crypto_drivers();
844 	CU_ASSERT(g_mbuf_mp != NULL);
845 	CU_ASSERT(g_session_mp != NULL);
846 	init_cleanup();
847 	CU_ASSERT(rc == 0);
848 
849 	/* Test happy path AESNI. */
850 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
851 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
852 	rc = vbdev_crypto_init_crypto_drivers();
853 	CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET);
854 	init_cleanup();
855 	CU_ASSERT(rc == 0);
856 
857 	/* restore our initial values. */
858 	g_mbuf_mp = orig_mbuf_mp;
859 	g_session_mp = orig_session_mp;
860 	g_session_mp_priv = orig_session_mp_priv;
861 }
862 
863 static void
864 test_crypto_op_complete(void)
865 {
866 	/* Make sure completion code respects failure. */
867 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
868 	g_completion_called = false;
869 	_crypto_operation_complete(g_bdev_io);
870 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
871 	CU_ASSERT(g_completion_called == true);
872 
873 	/* Test read completion. */
874 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
875 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
876 	g_completion_called = false;
877 	_crypto_operation_complete(g_bdev_io);
878 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
879 	CU_ASSERT(g_completion_called == true);
880 
881 	/* Test write completion success. */
882 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
883 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
884 	g_completion_called = false;
885 	MOCK_SET(spdk_bdev_writev_blocks, 0);
886 	_crypto_operation_complete(g_bdev_io);
887 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
888 	CU_ASSERT(g_completion_called == true);
889 
890 	/* Test write completion failed. */
891 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
892 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
893 	g_completion_called = false;
894 	MOCK_SET(spdk_bdev_writev_blocks, -1);
895 	_crypto_operation_complete(g_bdev_io);
896 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
897 	CU_ASSERT(g_completion_called == true);
898 
899 	/* Test bogus type for this completion. */
900 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
901 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
902 	g_completion_called = false;
903 	_crypto_operation_complete(g_bdev_io);
904 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
905 	CU_ASSERT(g_completion_called == true);
906 }
907 
908 static void
909 test_supported_io(void)
910 {
911 	void *ctx = NULL;
912 	bool rc = true;
913 
914 	/* Make sure we always report false to WZ, we need the bdev layer to
915 	 * send real 0's so we can encrypt/decrypt them.
916 	 */
917 	rc = vbdev_crypto_io_type_supported(ctx, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
918 	CU_ASSERT(rc == false);
919 }
920 
921 static void
922 test_poller(void)
923 {
924 	int rc;
925 	struct rte_mbuf *src_mbufs[2];
926 	struct vbdev_crypto_op *op_to_resubmit;
927 
928 	/* test regular 1 op to dequeue and complete */
929 	g_dequeue_mock = g_enqueue_mock = 1;
930 	spdk_mempool_get_bulk(g_mbuf_mp, (void **)&src_mbufs[0], 1);
931 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
932 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
933 			   uint64_t *) = (uintptr_t)g_bdev_io;
934 	g_test_crypto_ops[0]->sym->m_dst = NULL;
935 	g_io_ctx->cryop_cnt_remaining = 1;
936 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
937 	rc = crypto_dev_poller(g_crypto_ch);
938 	CU_ASSERT(rc == 1);
939 
940 	/* We have nothing dequeued but have some to resubmit */
941 	g_dequeue_mock = 0;
942 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
943 
944 	/* add an op to the queued list. */
945 	g_resubmit_test = true;
946 	op_to_resubmit = (struct vbdev_crypto_op *)((uint8_t *)g_test_crypto_ops[0] + QUEUED_OP_OFFSET);
947 	op_to_resubmit->crypto_op = (void *)0xDEADBEEF;
948 	op_to_resubmit->bdev_io = g_bdev_io;
949 	TAILQ_INSERT_TAIL(&g_crypto_ch->queued_cry_ops,
950 			  op_to_resubmit,
951 			  link);
952 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
953 	rc = crypto_dev_poller(g_crypto_ch);
954 	g_resubmit_test = false;
955 	CU_ASSERT(rc == 0);
956 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
957 
958 	/* 2 to dequeue but 2nd one failed */
959 	g_dequeue_mock = g_enqueue_mock = 2;
960 	g_io_ctx->cryop_cnt_remaining = 2;
961 	spdk_mempool_get_bulk(g_mbuf_mp, (void **)&src_mbufs[0], 2);
962 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
963 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
964 			   uint64_t *) = (uint64_t)g_bdev_io;
965 	g_test_crypto_ops[0]->sym->m_dst = NULL;
966 	g_test_crypto_ops[0]->status =  RTE_CRYPTO_OP_STATUS_SUCCESS;
967 	g_test_crypto_ops[1]->sym->m_src = src_mbufs[1];
968 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[1]->sym->m_src, g_mbuf_offset,
969 			   uint64_t *) = (uint64_t)g_bdev_io;
970 	g_test_crypto_ops[1]->sym->m_dst = NULL;
971 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
972 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
973 	rc = crypto_dev_poller(g_crypto_ch);
974 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
975 	CU_ASSERT(rc == 2);
976 }
977 
978 /* Helper function for test_assign_device_qp() */
979 static void
980 _clear_device_qp_lists(void)
981 {
982 	struct device_qp *device_qp = NULL;
983 
984 	while (!TAILQ_EMPTY(&g_device_qp_qat)) {
985 		device_qp = TAILQ_FIRST(&g_device_qp_qat);
986 		TAILQ_REMOVE(&g_device_qp_qat, device_qp, link);
987 		free(device_qp);
988 
989 	}
990 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_qat) == true);
991 	while (!TAILQ_EMPTY(&g_device_qp_aesni_mb)) {
992 		device_qp = TAILQ_FIRST(&g_device_qp_aesni_mb);
993 		TAILQ_REMOVE(&g_device_qp_aesni_mb, device_qp, link);
994 		free(device_qp);
995 	}
996 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_aesni_mb) == true);
997 }
998 
999 /* Helper function for test_assign_device_qp() */
1000 static void
1001 _check_expected_values(struct vbdev_crypto *crypto_bdev, struct device_qp *device_qp,
1002 		       struct crypto_io_channel *crypto_ch, uint8_t expected_index,
1003 		       uint8_t current_index)
1004 {
1005 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1006 	CU_ASSERT(g_crypto_ch->device_qp->index == expected_index);
1007 	CU_ASSERT(g_next_qat_index == current_index);
1008 }
1009 
1010 static void
1011 test_assign_device_qp(void)
1012 {
1013 	struct device_qp *device_qp = NULL;
1014 	int i;
1015 
1016 	/* start with a known state, clear the device/qp lists */
1017 	_clear_device_qp_lists();
1018 
1019 	/* make sure that one AESNI_MB qp is found */
1020 	device_qp = calloc(1, sizeof(struct device_qp));
1021 	TAILQ_INSERT_TAIL(&g_device_qp_aesni_mb, device_qp, link);
1022 	g_crypto_ch->device_qp = NULL;
1023 	g_crypto_bdev.drv_name = AESNI_MB;
1024 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1025 	CU_ASSERT(g_crypto_ch->device_qp != NULL);
1026 
1027 	/* QAT testing is more complex as the code under test load balances by
1028 	 * assigning each subsequent device/qp to every QAT_VF_SPREAD modulo
1029 	 * g_qat_total_qp. For the current latest QAT we'll have 48 virtual functions
1030 	 * each with 2 qp so the "spread" betwen assignments is 32.
1031 	 */
1032 	g_qat_total_qp = 96;
1033 	for (i = 0; i < g_qat_total_qp; i++) {
1034 		device_qp = calloc(1, sizeof(struct device_qp));
1035 		device_qp->index = i;
1036 		TAILQ_INSERT_TAIL(&g_device_qp_qat, device_qp, link);
1037 	}
1038 	g_crypto_ch->device_qp = NULL;
1039 	g_crypto_bdev.drv_name = QAT;
1040 
1041 	/* First assignment will assign to 0 and next at 32. */
1042 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1043 			       0, QAT_VF_SPREAD);
1044 
1045 	/* Second assignment will assign to 32 and next at 64. */
1046 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1047 			       QAT_VF_SPREAD, QAT_VF_SPREAD * 2);
1048 
1049 	/* Third assignment will assign to 64 and next at 0. */
1050 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1051 			       QAT_VF_SPREAD * 2, 0);
1052 
1053 	/* Fourth assignment will assign to 1 and next at 33. */
1054 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1055 			       1, QAT_VF_SPREAD + 1);
1056 
1057 	_clear_device_qp_lists();
1058 }
1059 
1060 int
1061 main(int argc, char **argv)
1062 {
1063 	CU_pSuite	suite = NULL;
1064 	unsigned int	num_failures;
1065 
1066 	CU_set_error_action(CUEA_ABORT);
1067 	CU_initialize_registry();
1068 
1069 	suite = CU_add_suite("crypto", test_setup, test_cleanup);
1070 	CU_ADD_TEST(suite, test_error_paths);
1071 	CU_ADD_TEST(suite, test_simple_write);
1072 	CU_ADD_TEST(suite, test_simple_read);
1073 	CU_ADD_TEST(suite, test_large_rw);
1074 	CU_ADD_TEST(suite, test_dev_full);
1075 	CU_ADD_TEST(suite, test_crazy_rw);
1076 	CU_ADD_TEST(suite, test_passthru);
1077 	CU_ADD_TEST(suite, test_initdrivers);
1078 	CU_ADD_TEST(suite, test_crypto_op_complete);
1079 	CU_ADD_TEST(suite, test_supported_io);
1080 	CU_ADD_TEST(suite, test_reset);
1081 	CU_ADD_TEST(suite, test_poller);
1082 	CU_ADD_TEST(suite, test_assign_device_qp);
1083 
1084 	CU_basic_set_mode(CU_BRM_VERBOSE);
1085 	CU_basic_run_tests();
1086 	num_failures = CU_get_number_of_failures();
1087 	CU_cleanup_registry();
1088 	return num_failures;
1089 }
1090