xref: /spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c (revision 2f5c602574a98ede645991abe279a96e19c50196)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/test_env.c"
37 #include "spdk_internal/mock.h"
38 #include "thread/thread_internal.h"
39 #include "unit/lib/json_mock.c"
40 
41 #include <rte_crypto.h>
42 #include <rte_cryptodev.h>
43 #include <rte_cryptodev_pmd.h>
44 
45 #define MAX_TEST_BLOCKS 8192
46 struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS];
47 struct rte_crypto_op *g_test_dev_full_ops[MAX_TEST_BLOCKS];
48 
49 uint16_t g_dequeue_mock;
50 uint16_t g_enqueue_mock;
51 unsigned ut_rte_crypto_op_bulk_alloc;
52 int ut_rte_crypto_op_attach_sym_session = 0;
53 #define MOCK_INFO_GET_1QP_AESNI 0
54 #define MOCK_INFO_GET_1QP_QAT 1
55 #define MOCK_INFO_GET_1QP_BOGUS_PMD 2
56 int ut_rte_cryptodev_info_get = 0;
57 bool ut_rte_cryptodev_info_get_mocked = false;
58 
59 /* Those functions are defined as static inline in DPDK, so we can't
60  * mock them straight away. We use defines to redirect them into
61  * our custom functions.
62  */
63 static bool g_resubmit_test = false;
64 #define rte_cryptodev_enqueue_burst mock_rte_cryptodev_enqueue_burst
65 static inline uint16_t
66 mock_rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
67 				 struct rte_crypto_op **ops, uint16_t nb_ops)
68 {
69 	int i;
70 
71 	CU_ASSERT(nb_ops > 0);
72 
73 	for (i = 0; i < nb_ops; i++) {
74 		/* Use this empty (til now) array of pointers to store
75 		 * enqueued operations for assertion in dev_full test.
76 		 */
77 		g_test_dev_full_ops[i] = *ops++;
78 		if (g_resubmit_test == true) {
79 			CU_ASSERT(g_test_dev_full_ops[i] == (void *)0xDEADBEEF);
80 		}
81 	}
82 
83 	return g_enqueue_mock;
84 }
85 
86 #define rte_cryptodev_dequeue_burst mock_rte_cryptodev_dequeue_burst
87 static inline uint16_t
88 mock_rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
89 				 struct rte_crypto_op **ops, uint16_t nb_ops)
90 {
91 	int i;
92 
93 	CU_ASSERT(nb_ops > 0);
94 
95 	for (i = 0; i < g_dequeue_mock; i++) {
96 		*ops++ = g_test_crypto_ops[i];
97 	}
98 
99 	return g_dequeue_mock;
100 }
101 
102 /* Instead of allocating real memory, assign the allocations to our
103  * test array for assertion in tests.
104  */
105 #define rte_crypto_op_bulk_alloc mock_rte_crypto_op_bulk_alloc
106 static inline unsigned
107 mock_rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
108 			      enum rte_crypto_op_type type,
109 			      struct rte_crypto_op **ops, uint16_t nb_ops)
110 {
111 	int i;
112 
113 	for (i = 0; i < nb_ops; i++) {
114 		*ops++ = g_test_crypto_ops[i];
115 	}
116 	return ut_rte_crypto_op_bulk_alloc;
117 }
118 
119 #define rte_mempool_put_bulk mock_rte_mempool_put_bulk
120 static __rte_always_inline void
121 mock_rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table,
122 			  unsigned int n)
123 {
124 	return;
125 }
126 
127 #define rte_crypto_op_attach_sym_session mock_rte_crypto_op_attach_sym_session
128 static inline int
129 mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
130 				      struct rte_cryptodev_sym_session *sess)
131 {
132 	return ut_rte_crypto_op_attach_sym_session;
133 }
134 
135 #define rte_lcore_count mock_rte_lcore_count
136 static inline unsigned
137 mock_rte_lcore_count(void)
138 {
139 	return 1;
140 }
141 
142 #include "bdev/crypto/vbdev_crypto.c"
143 
144 /* SPDK stubs */
145 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
146 		struct spdk_bdev_io_wait_entry *entry), 0);
147 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
148 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io));
149 DEFINE_STUB_V(spdk_bdev_io_put_aux_buf, (struct spdk_bdev_io *bdev_io, void *aux_buf));
150 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
151 		enum spdk_bdev_io_type io_type), 0);
152 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
153 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
154 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0);
155 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 64);
156 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
157 DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn,
158 				     void *cb_arg));
159 DEFINE_STUB(spdk_bdev_open_ext, int, (const char *bdev_name, bool write,
160 				      spdk_bdev_event_cb_t event_cb,
161 				      void *event_ctx, struct spdk_bdev_desc **_desc), 0);
162 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
163 DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
164 		struct spdk_bdev_module *module), 0);
165 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
166 DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *vbdev), 0);
167 
168 /* DPDK stubs */
169 #define DPDK_DYNFIELD_OFFSET offsetof(struct rte_mbuf, dynfield1[1])
170 DEFINE_STUB(rte_mbuf_dynfield_register, int, (const struct rte_mbuf_dynfield *params),
171 	    DPDK_DYNFIELD_OFFSET);
172 DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0);
173 DEFINE_STUB_V(rte_mempool_free, (struct rte_mempool *mp));
174 DEFINE_STUB(rte_mempool_create, struct rte_mempool *, (const char *name, unsigned n,
175 		unsigned elt_size,
176 		unsigned cache_size, unsigned private_data_size,
177 		rte_mempool_ctor_t *mp_init, void *mp_init_arg,
178 		rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
179 		int socket_id, unsigned flags), (struct rte_mempool *)1);
180 DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
181 DEFINE_STUB(rte_crypto_op_pool_create, struct rte_mempool *,
182 	    (const char *name, enum rte_crypto_op_type type, unsigned nb_elts,
183 	     unsigned cache_size, uint16_t priv_size, int socket_id), (struct rte_mempool *)1);
184 DEFINE_STUB(rte_cryptodev_device_count_by_driver, uint8_t, (uint8_t driver_id), 0);
185 DEFINE_STUB(rte_cryptodev_configure, int, (uint8_t dev_id, struct rte_cryptodev_config *config), 0);
186 DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
187 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id), 0);
188 DEFINE_STUB(rte_cryptodev_sym_session_pool_create, struct rte_mempool *, (const char *name,
189 		uint32_t nb_elts,
190 		uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
191 		int socket_id), (struct rte_mempool *)1);
192 DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0);
193 DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id));
194 DEFINE_STUB(rte_cryptodev_sym_session_create, struct rte_cryptodev_sym_session *,
195 	    (struct rte_mempool *mempool), (struct rte_cryptodev_sym_session *)1);
196 DEFINE_STUB(rte_cryptodev_sym_session_init, int, (uint8_t dev_id,
197 		struct rte_cryptodev_sym_session *sess,
198 		struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), 0);
199 DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
200 DEFINE_STUB(rte_cryptodev_sym_session_free, int, (struct rte_cryptodev_sym_session *sess), 0);
201 DEFINE_STUB(rte_vdev_uninit, int, (const char *name), 0);
202 
203 struct rte_cryptodev *rte_cryptodevs;
204 
205 /* global vars and setup/cleanup functions used for all test functions */
206 struct spdk_bdev_io *g_bdev_io;
207 struct crypto_bdev_io *g_io_ctx;
208 struct crypto_io_channel *g_crypto_ch;
209 struct spdk_io_channel *g_io_ch;
210 struct vbdev_dev g_device;
211 struct vbdev_crypto g_crypto_bdev;
212 struct device_qp g_dev_qp;
213 
214 void
215 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
216 {
217 	dev_info->max_nb_queue_pairs = 1;
218 	if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_AESNI) {
219 		dev_info->driver_name = g_driver_names[0];
220 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_QAT) {
221 		dev_info->driver_name = g_driver_names[1];
222 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_BOGUS_PMD) {
223 		dev_info->driver_name = "junk";
224 	}
225 }
226 
227 unsigned int
228 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
229 {
230 	return (unsigned int)dev_id;
231 }
232 
233 void
234 spdk_bdev_io_get_aux_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_aux_buf_cb cb)
235 {
236 	cb(g_io_ch, g_bdev_io, (void *)0xDEADBEEF);
237 }
238 
239 void
240 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
241 {
242 	cb(g_io_ch, g_bdev_io, true);
243 }
244 
245 /* Mock these functions to call the callback and then return the value we require */
246 int ut_spdk_bdev_readv_blocks = 0;
247 bool ut_spdk_bdev_readv_blocks_mocked = false;
248 int
249 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
250 		       struct iovec *iov, int iovcnt,
251 		       uint64_t offset_blocks, uint64_t num_blocks,
252 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
253 {
254 	cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg);
255 	return ut_spdk_bdev_readv_blocks;
256 }
257 
258 int ut_spdk_bdev_writev_blocks = 0;
259 bool ut_spdk_bdev_writev_blocks_mocked = false;
260 int
261 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
262 			struct iovec *iov, int iovcnt,
263 			uint64_t offset_blocks, uint64_t num_blocks,
264 			spdk_bdev_io_completion_cb cb, void *cb_arg)
265 {
266 	cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg);
267 	return ut_spdk_bdev_writev_blocks;
268 }
269 
270 int ut_spdk_bdev_unmap_blocks = 0;
271 bool ut_spdk_bdev_unmap_blocks_mocked = false;
272 int
273 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
274 		       uint64_t offset_blocks, uint64_t num_blocks,
275 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
276 {
277 	cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg);
278 	return ut_spdk_bdev_unmap_blocks;
279 }
280 
281 int ut_spdk_bdev_flush_blocks = 0;
282 bool ut_spdk_bdev_flush_blocks_mocked = false;
283 int
284 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
285 		       uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
286 		       void *cb_arg)
287 {
288 	cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg);
289 	return ut_spdk_bdev_flush_blocks;
290 }
291 
292 int ut_spdk_bdev_reset = 0;
293 bool ut_spdk_bdev_reset_mocked = false;
294 int
295 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
296 		spdk_bdev_io_completion_cb cb, void *cb_arg)
297 {
298 	cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg);
299 	return ut_spdk_bdev_reset;
300 }
301 
302 bool g_completion_called = false;
303 void
304 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
305 {
306 	bdev_io->internal.status = status;
307 	g_completion_called = true;
308 }
309 
310 /* Global setup for all tests that share a bunch of preparation... */
311 static int
312 test_setup(void)
313 {
314 	int i, rc;
315 
316 	/* Prepare essential variables for test routines */
317 	g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct crypto_bdev_io));
318 	g_bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec) * 128);
319 	g_bdev_io->bdev = &g_crypto_bdev.crypto_bdev;
320 	g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct crypto_io_channel));
321 	g_crypto_ch = (struct crypto_io_channel *)spdk_io_channel_get_ctx(g_io_ch);
322 	g_io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
323 	memset(&g_device, 0, sizeof(struct vbdev_dev));
324 	memset(&g_crypto_bdev, 0, sizeof(struct vbdev_crypto));
325 	g_dev_qp.device = &g_device;
326 	g_io_ctx->crypto_ch = g_crypto_ch;
327 	g_io_ctx->crypto_bdev = &g_crypto_bdev;
328 	g_crypto_ch->device_qp = &g_dev_qp;
329 	TAILQ_INIT(&g_crypto_ch->pending_cry_ios);
330 	TAILQ_INIT(&g_crypto_ch->queued_cry_ops);
331 
332 	/* Allocate a real mbuf pool so we can test error paths */
333 	g_mbuf_mp = spdk_mempool_create("mbuf_mp", NUM_MBUFS, sizeof(struct rte_mbuf),
334 					SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
335 					SPDK_ENV_SOCKET_ID_ANY);
336 
337 	/* Instead of allocating real rte mempools for these, it's easier and provides the
338 	 * same coverage just calloc them here.
339 	 */
340 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
341 		rc = posix_memalign((void **)&g_test_crypto_ops[i], 64,
342 				    sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op) +
343 				    AES_CBC_IV_LENGTH + QUEUED_OP_LENGTH);
344 		if (rc != 0) {
345 			assert(false);
346 		}
347 		memset(g_test_crypto_ops[i], 0, sizeof(struct rte_crypto_op) +
348 		       sizeof(struct rte_crypto_sym_op) + QUEUED_OP_LENGTH);
349 	}
350 	g_mbuf_offset = DPDK_DYNFIELD_OFFSET;
351 
352 	return 0;
353 }
354 
355 /* Global teardown for all tests */
356 static int
357 test_cleanup(void)
358 {
359 	int i;
360 
361 	spdk_mempool_free(g_mbuf_mp);
362 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
363 		free(g_test_crypto_ops[i]);
364 	}
365 	free(g_bdev_io->u.bdev.iovs);
366 	free(g_bdev_io);
367 	free(g_io_ch);
368 	return 0;
369 }
370 
371 static void
372 test_error_paths(void)
373 {
374 	/* Single element block size write, just to test error paths
375 	 * in vbdev_crypto_submit_request().
376 	 */
377 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
378 	g_bdev_io->u.bdev.iovcnt = 1;
379 	g_bdev_io->u.bdev.num_blocks = 1;
380 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
381 	g_crypto_bdev.crypto_bdev.blocklen = 512;
382 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
383 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
384 
385 	/* test failure of spdk_mempool_get_bulk(), will result in success because it
386 	 * will get queued.
387 	 */
388 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
389 	MOCK_SET(spdk_mempool_get, NULL);
390 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
391 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
392 
393 	/* same thing but switch to reads to test error path in _crypto_complete_io() */
394 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
395 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
396 	TAILQ_INSERT_TAIL(&g_crypto_ch->pending_cry_ios, g_bdev_io, module_link);
397 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
398 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
399 	/* Now with the read_blocks failing */
400 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
401 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
402 	MOCK_SET(spdk_bdev_readv_blocks, -1);
403 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
404 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
405 	MOCK_SET(spdk_bdev_readv_blocks, 0);
406 	MOCK_CLEAR(spdk_mempool_get);
407 
408 	/* test failure of rte_crypto_op_bulk_alloc() */
409 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
410 	ut_rte_crypto_op_bulk_alloc = 0;
411 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
412 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
413 	ut_rte_crypto_op_bulk_alloc = 1;
414 
415 	/* test failure of rte_crypto_op_attach_sym_session() */
416 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
417 	ut_rte_crypto_op_attach_sym_session = -1;
418 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
419 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
420 	ut_rte_crypto_op_attach_sym_session = 0;
421 }
422 
423 static void
424 test_simple_write(void)
425 {
426 	/* Single element block size write */
427 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
428 	g_bdev_io->u.bdev.iovcnt = 1;
429 	g_bdev_io->u.bdev.num_blocks = 1;
430 	g_bdev_io->u.bdev.offset_blocks = 0;
431 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
432 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_write;
433 	g_crypto_bdev.crypto_bdev.blocklen = 512;
434 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
435 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
436 
437 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
438 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
439 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
440 	CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == 512);
441 	CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
442 	CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
443 	CU_ASSERT(g_io_ctx->aux_num_blocks == 1);
444 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_write);
445 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
446 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
447 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
448 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
449 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
450 				     uint64_t *) == (uint64_t)g_bdev_io);
451 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr != NULL);
452 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == 512);
453 
454 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
455 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_dst);
456 }
457 
458 static void
459 test_simple_read(void)
460 {
461 	/* Single element block size read */
462 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
463 	g_bdev_io->u.bdev.iovcnt = 1;
464 	g_bdev_io->u.bdev.num_blocks = 1;
465 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
466 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_read;
467 	g_crypto_bdev.crypto_bdev.blocklen = 512;
468 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
469 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
470 
471 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
472 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
473 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
474 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_read);
475 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
476 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
477 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
478 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
479 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
480 				     uint64_t *) == (uint64_t)g_bdev_io);
481 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
482 
483 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
484 }
485 
486 static void
487 test_large_rw(void)
488 {
489 	unsigned block_len = 512;
490 	unsigned num_blocks = CRYPTO_MAX_IO / block_len;
491 	unsigned io_len = block_len * num_blocks;
492 	unsigned i;
493 
494 	/* Multi block size read, multi-element */
495 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
496 	g_bdev_io->u.bdev.iovcnt = 1;
497 	g_bdev_io->u.bdev.num_blocks = num_blocks;
498 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
499 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
500 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
501 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
502 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
503 
504 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
505 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
506 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
507 
508 	for (i = 0; i < num_blocks; i++) {
509 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
510 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
511 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
512 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
513 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
514 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
515 					     uint64_t *) == (uint64_t)g_bdev_io);
516 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
517 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
518 	}
519 
520 	/* Multi block size write, multi-element */
521 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
522 	g_bdev_io->u.bdev.iovcnt = 1;
523 	g_bdev_io->u.bdev.num_blocks = num_blocks;
524 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
525 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
526 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
527 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
528 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
529 
530 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
531 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
532 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
533 
534 	for (i = 0; i < num_blocks; i++) {
535 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
536 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
537 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
538 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
539 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
540 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
541 					     uint64_t *) == (uint64_t)g_bdev_io);
542 		CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == io_len);
543 		CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
544 		CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
545 		CU_ASSERT(g_io_ctx->aux_num_blocks == num_blocks);
546 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr != NULL);
547 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
548 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
549 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
550 	}
551 }
552 
553 static void
554 test_dev_full(void)
555 {
556 	struct vbdev_crypto_op *queued_op;
557 	struct rte_crypto_sym_op *sym_op;
558 	struct crypto_bdev_io *io_ctx;
559 
560 	/* Two element block size read */
561 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
562 	g_bdev_io->u.bdev.iovcnt = 1;
563 	g_bdev_io->u.bdev.num_blocks = 2;
564 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
565 	g_bdev_io->u.bdev.iovs[0].iov_base = (void *)0xDEADBEEF;
566 	g_bdev_io->u.bdev.iovs[1].iov_len = 512;
567 	g_bdev_io->u.bdev.iovs[1].iov_base = (void *)0xFEEDBEEF;
568 	g_crypto_bdev.crypto_bdev.blocklen = 512;
569 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
570 	g_enqueue_mock = g_dequeue_mock = 1;
571 	ut_rte_crypto_op_bulk_alloc = 2;
572 
573 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
574 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
575 
576 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
577 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
578 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 2);
579 	sym_op = g_test_crypto_ops[0]->sym;
580 	CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xDEADBEEF);
581 	CU_ASSERT(sym_op->m_src->data_len == 512);
582 	CU_ASSERT(sym_op->m_src->next == NULL);
583 	CU_ASSERT(sym_op->cipher.data.length == 512);
584 	CU_ASSERT(sym_op->cipher.data.offset == 0);
585 	CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)g_bdev_io);
586 	CU_ASSERT(sym_op->m_dst == NULL);
587 
588 	/* make sure one got queued and confirm its values */
589 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
590 	queued_op = TAILQ_FIRST(&g_crypto_ch->queued_cry_ops);
591 	sym_op = queued_op->crypto_op->sym;
592 	TAILQ_REMOVE(&g_crypto_ch->queued_cry_ops, queued_op, link);
593 	CU_ASSERT(queued_op->bdev_io == g_bdev_io);
594 	CU_ASSERT(queued_op->crypto_op == g_test_crypto_ops[1]);
595 	CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xFEEDBEEF);
596 	CU_ASSERT(sym_op->m_src->data_len == 512);
597 	CU_ASSERT(sym_op->m_src->next == NULL);
598 	CU_ASSERT(sym_op->cipher.data.length == 512);
599 	CU_ASSERT(sym_op->cipher.data.offset == 0);
600 	CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)g_bdev_io);
601 	CU_ASSERT(sym_op->m_dst == NULL);
602 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
603 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
604 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[1]->sym->m_src);
605 
606 	/* Non-busy reason for enqueue failure, all were rejected. */
607 	g_enqueue_mock = 0;
608 	g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_ERROR;
609 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
610 	io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
611 	CU_ASSERT(io_ctx->bdev_io_status == SPDK_BDEV_IO_STATUS_FAILED);
612 }
613 
614 static void
615 test_crazy_rw(void)
616 {
617 	unsigned block_len = 512;
618 	int num_blocks = 4;
619 	int i;
620 
621 	/* Multi block size read, single element, strange IOV makeup */
622 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
623 	g_bdev_io->u.bdev.iovcnt = 3;
624 	g_bdev_io->u.bdev.num_blocks = num_blocks;
625 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
626 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
627 	g_bdev_io->u.bdev.iovs[1].iov_len = 1024;
628 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 512;
629 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
630 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 512 + 1024;
631 
632 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
633 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
634 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
635 
636 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
637 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
638 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
639 
640 	for (i = 0; i < num_blocks; i++) {
641 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
642 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
643 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
644 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
645 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
646 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
647 					     uint64_t *) == (uint64_t)g_bdev_io);
648 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
649 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
650 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
651 	}
652 
653 	/* Multi block size write, single element strange IOV makeup */
654 	num_blocks = 8;
655 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
656 	g_bdev_io->u.bdev.iovcnt = 4;
657 	g_bdev_io->u.bdev.num_blocks = num_blocks;
658 	g_bdev_io->u.bdev.iovs[0].iov_len = 2048;
659 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
660 	g_bdev_io->u.bdev.iovs[1].iov_len = 512;
661 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 2048;
662 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
663 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 2048 + 512;
664 	g_bdev_io->u.bdev.iovs[3].iov_len = 1024;
665 	g_bdev_io->u.bdev.iovs[3].iov_base = &test_crazy_rw + 2048 + 512 + 512;
666 
667 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
668 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
669 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
670 
671 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
672 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
673 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
674 
675 	for (i = 0; i < num_blocks; i++) {
676 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
677 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
678 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
679 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
680 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
681 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
682 					     uint64_t *) == (uint64_t)g_bdev_io);
683 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
684 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == g_test_crypto_ops[i]->sym->m_dst);
685 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
686 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
687 	}
688 }
689 
690 static void
691 test_passthru(void)
692 {
693 	/* Make sure these follow our completion callback, test success & fail. */
694 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
695 	MOCK_SET(spdk_bdev_unmap_blocks, 0);
696 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
697 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
698 	MOCK_SET(spdk_bdev_unmap_blocks, -1);
699 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
700 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
701 	MOCK_CLEAR(spdk_bdev_unmap_blocks);
702 
703 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
704 	MOCK_SET(spdk_bdev_flush_blocks, 0);
705 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
706 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
707 	MOCK_SET(spdk_bdev_flush_blocks, -1);
708 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
709 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
710 	MOCK_CLEAR(spdk_bdev_flush_blocks);
711 
712 	/* We should never get a WZ command, we report that we don't support it. */
713 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
714 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
715 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
716 }
717 
718 static void
719 test_reset(void)
720 {
721 	/* TODO: There are a few different ways to do this given that
722 	 * the code uses spdk_for_each_channel() to implement reset
723 	 * handling. Submitting w/o UT for this function for now and
724 	 * will follow up with something shortly.
725 	 */
726 }
727 
728 static void
729 init_cleanup(void)
730 {
731 	spdk_mempool_free(g_mbuf_mp);
732 	rte_mempool_free(g_session_mp);
733 	g_mbuf_mp = NULL;
734 	g_session_mp = NULL;
735 	if (g_session_mp_priv != NULL) {
736 		/* g_session_mp_priv may or may not be set depending on the DPDK version */
737 		rte_mempool_free(g_session_mp_priv);
738 	}
739 }
740 
741 static void
742 test_initdrivers(void)
743 {
744 	int rc;
745 	static struct spdk_mempool *orig_mbuf_mp;
746 	static struct rte_mempool *orig_session_mp;
747 	static struct rte_mempool *orig_session_mp_priv;
748 
749 	/* These tests will alloc and free our g_mbuf_mp
750 	 * so save that off here and restore it after each test is over.
751 	 */
752 	orig_mbuf_mp = g_mbuf_mp;
753 	orig_session_mp = g_session_mp;
754 	orig_session_mp_priv = g_session_mp_priv;
755 
756 	g_session_mp_priv = NULL;
757 	g_session_mp = NULL;
758 	g_mbuf_mp = NULL;
759 
760 	/* No drivers available, not an error though */
761 	MOCK_SET(rte_cryptodev_count, 0);
762 	rc = vbdev_crypto_init_crypto_drivers();
763 	CU_ASSERT(rc == 0);
764 	CU_ASSERT(g_mbuf_mp == NULL);
765 	CU_ASSERT(g_session_mp == NULL);
766 	CU_ASSERT(g_session_mp_priv == NULL);
767 
768 	/* Test failure of DPDK dev init. */
769 	MOCK_SET(rte_cryptodev_count, 2);
770 	MOCK_SET(rte_vdev_init, -1);
771 	rc = vbdev_crypto_init_crypto_drivers();
772 	CU_ASSERT(rc == -EINVAL);
773 	CU_ASSERT(g_mbuf_mp == NULL);
774 	CU_ASSERT(g_session_mp == NULL);
775 	CU_ASSERT(g_session_mp_priv == NULL);
776 	MOCK_SET(rte_vdev_init, 0);
777 
778 	/* Can't create session pool. */
779 	MOCK_SET(spdk_mempool_create, NULL);
780 	rc = vbdev_crypto_init_crypto_drivers();
781 	CU_ASSERT(rc == -ENOMEM);
782 	CU_ASSERT(g_mbuf_mp == NULL);
783 	CU_ASSERT(g_session_mp == NULL);
784 	CU_ASSERT(g_session_mp_priv == NULL);
785 	MOCK_CLEAR(spdk_mempool_create);
786 
787 	/* Can't create op pool. */
788 	MOCK_SET(rte_crypto_op_pool_create, NULL);
789 	rc = vbdev_crypto_init_crypto_drivers();
790 	CU_ASSERT(rc == -ENOMEM);
791 	CU_ASSERT(g_mbuf_mp == NULL);
792 	CU_ASSERT(g_session_mp == NULL);
793 	CU_ASSERT(g_session_mp_priv == NULL);
794 	MOCK_SET(rte_crypto_op_pool_create, (struct rte_mempool *)1);
795 
796 	/* Check resources are not sufficient */
797 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
798 	rc = vbdev_crypto_init_crypto_drivers();
799 	CU_ASSERT(rc == -EINVAL);
800 
801 	/* Test crypto dev configure failure. */
802 	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
803 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
804 	MOCK_SET(rte_cryptodev_configure, -1);
805 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
806 	rc = vbdev_crypto_init_crypto_drivers();
807 	MOCK_SET(rte_cryptodev_configure, 0);
808 	CU_ASSERT(g_mbuf_mp == NULL);
809 	CU_ASSERT(g_session_mp == NULL);
810 	CU_ASSERT(g_session_mp_priv == NULL);
811 	CU_ASSERT(rc == -EINVAL);
812 
813 	/* Test failure of qp setup. */
814 	MOCK_SET(rte_cryptodev_queue_pair_setup, -1);
815 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
816 	rc = vbdev_crypto_init_crypto_drivers();
817 	CU_ASSERT(rc == -EINVAL);
818 	CU_ASSERT(g_mbuf_mp == NULL);
819 	CU_ASSERT(g_session_mp == NULL);
820 	CU_ASSERT(g_session_mp_priv == NULL);
821 	MOCK_SET(rte_cryptodev_queue_pair_setup, 0);
822 
823 	/* Test failure of dev start. */
824 	MOCK_SET(rte_cryptodev_start, -1);
825 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
826 	rc = vbdev_crypto_init_crypto_drivers();
827 	CU_ASSERT(rc == -EINVAL);
828 	CU_ASSERT(g_mbuf_mp == NULL);
829 	CU_ASSERT(g_session_mp == NULL);
830 	CU_ASSERT(g_session_mp_priv == NULL);
831 	MOCK_SET(rte_cryptodev_start, 0);
832 
833 	/* Test bogus PMD */
834 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
835 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_BOGUS_PMD);
836 	rc = vbdev_crypto_init_crypto_drivers();
837 	CU_ASSERT(g_mbuf_mp == NULL);
838 	CU_ASSERT(g_session_mp == NULL);
839 	CU_ASSERT(rc == -EINVAL);
840 
841 	/* Test happy path QAT. */
842 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
843 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
844 	rc = vbdev_crypto_init_crypto_drivers();
845 	CU_ASSERT(g_mbuf_mp != NULL);
846 	CU_ASSERT(g_session_mp != NULL);
847 	init_cleanup();
848 	CU_ASSERT(rc == 0);
849 
850 	/* Test happy path AESNI. */
851 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
852 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
853 	rc = vbdev_crypto_init_crypto_drivers();
854 	CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET);
855 	init_cleanup();
856 	CU_ASSERT(rc == 0);
857 
858 	/* restore our initial values. */
859 	g_mbuf_mp = orig_mbuf_mp;
860 	g_session_mp = orig_session_mp;
861 	g_session_mp_priv = orig_session_mp_priv;
862 }
863 
864 static void
865 test_crypto_op_complete(void)
866 {
867 	/* Make sure completion code respects failure. */
868 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
869 	g_completion_called = false;
870 	_crypto_operation_complete(g_bdev_io);
871 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
872 	CU_ASSERT(g_completion_called == true);
873 
874 	/* Test read completion. */
875 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
876 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
877 	g_completion_called = false;
878 	_crypto_operation_complete(g_bdev_io);
879 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
880 	CU_ASSERT(g_completion_called == true);
881 
882 	/* Test write completion success. */
883 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
884 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
885 	g_completion_called = false;
886 	MOCK_SET(spdk_bdev_writev_blocks, 0);
887 	_crypto_operation_complete(g_bdev_io);
888 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
889 	CU_ASSERT(g_completion_called == true);
890 
891 	/* Test write completion failed. */
892 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
893 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
894 	g_completion_called = false;
895 	MOCK_SET(spdk_bdev_writev_blocks, -1);
896 	_crypto_operation_complete(g_bdev_io);
897 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
898 	CU_ASSERT(g_completion_called == true);
899 
900 	/* Test bogus type for this completion. */
901 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
902 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
903 	g_completion_called = false;
904 	_crypto_operation_complete(g_bdev_io);
905 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
906 	CU_ASSERT(g_completion_called == true);
907 }
908 
909 static void
910 test_supported_io(void)
911 {
912 	void *ctx = NULL;
913 	bool rc = true;
914 
915 	/* Make sure we always report false to WZ, we need the bdev layer to
916 	 * send real 0's so we can encrypt/decrypt them.
917 	 */
918 	rc = vbdev_crypto_io_type_supported(ctx, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
919 	CU_ASSERT(rc == false);
920 }
921 
922 static void
923 test_poller(void)
924 {
925 	int rc;
926 	struct rte_mbuf *src_mbufs[2];
927 	struct vbdev_crypto_op *op_to_resubmit;
928 
929 	/* test regular 1 op to dequeue and complete */
930 	g_dequeue_mock = g_enqueue_mock = 1;
931 	spdk_mempool_get_bulk(g_mbuf_mp, (void **)&src_mbufs[0], 1);
932 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
933 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
934 			   uint64_t *) = (uintptr_t)g_bdev_io;
935 	g_test_crypto_ops[0]->sym->m_dst = NULL;
936 	g_io_ctx->cryop_cnt_remaining = 1;
937 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
938 	rc = crypto_dev_poller(g_crypto_ch);
939 	CU_ASSERT(rc == 1);
940 
941 	/* We have nothing dequeued but have some to resubmit */
942 	g_dequeue_mock = 0;
943 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
944 
945 	/* add an op to the queued list. */
946 	g_resubmit_test = true;
947 	op_to_resubmit = (struct vbdev_crypto_op *)((uint8_t *)g_test_crypto_ops[0] + QUEUED_OP_OFFSET);
948 	op_to_resubmit->crypto_op = (void *)0xDEADBEEF;
949 	op_to_resubmit->bdev_io = g_bdev_io;
950 	TAILQ_INSERT_TAIL(&g_crypto_ch->queued_cry_ops,
951 			  op_to_resubmit,
952 			  link);
953 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
954 	rc = crypto_dev_poller(g_crypto_ch);
955 	g_resubmit_test = false;
956 	CU_ASSERT(rc == 0);
957 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
958 
959 	/* 2 to dequeue but 2nd one failed */
960 	g_dequeue_mock = g_enqueue_mock = 2;
961 	g_io_ctx->cryop_cnt_remaining = 2;
962 	spdk_mempool_get_bulk(g_mbuf_mp, (void **)&src_mbufs[0], 2);
963 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
964 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
965 			   uint64_t *) = (uint64_t)g_bdev_io;
966 	g_test_crypto_ops[0]->sym->m_dst = NULL;
967 	g_test_crypto_ops[0]->status =  RTE_CRYPTO_OP_STATUS_SUCCESS;
968 	g_test_crypto_ops[1]->sym->m_src = src_mbufs[1];
969 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[1]->sym->m_src, g_mbuf_offset,
970 			   uint64_t *) = (uint64_t)g_bdev_io;
971 	g_test_crypto_ops[1]->sym->m_dst = NULL;
972 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
973 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
974 	rc = crypto_dev_poller(g_crypto_ch);
975 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
976 	CU_ASSERT(rc == 2);
977 }
978 
979 /* Helper function for test_assign_device_qp() */
980 static void
981 _clear_device_qp_lists(void)
982 {
983 	struct device_qp *device_qp = NULL;
984 
985 	while (!TAILQ_EMPTY(&g_device_qp_qat)) {
986 		device_qp = TAILQ_FIRST(&g_device_qp_qat);
987 		TAILQ_REMOVE(&g_device_qp_qat, device_qp, link);
988 		free(device_qp);
989 
990 	}
991 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_qat) == true);
992 	while (!TAILQ_EMPTY(&g_device_qp_aesni_mb)) {
993 		device_qp = TAILQ_FIRST(&g_device_qp_aesni_mb);
994 		TAILQ_REMOVE(&g_device_qp_aesni_mb, device_qp, link);
995 		free(device_qp);
996 	}
997 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_aesni_mb) == true);
998 }
999 
1000 /* Helper function for test_assign_device_qp() */
1001 static void
1002 _check_expected_values(struct vbdev_crypto *crypto_bdev, struct device_qp *device_qp,
1003 		       struct crypto_io_channel *crypto_ch, uint8_t expected_index,
1004 		       uint8_t current_index)
1005 {
1006 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1007 	CU_ASSERT(g_crypto_ch->device_qp->index == expected_index);
1008 	CU_ASSERT(g_next_qat_index == current_index);
1009 }
1010 
1011 static void
1012 test_assign_device_qp(void)
1013 {
1014 	struct device_qp *device_qp = NULL;
1015 	int i;
1016 
1017 	/* start with a known state, clear the device/qp lists */
1018 	_clear_device_qp_lists();
1019 
1020 	/* make sure that one AESNI_MB qp is found */
1021 	device_qp = calloc(1, sizeof(struct device_qp));
1022 	TAILQ_INSERT_TAIL(&g_device_qp_aesni_mb, device_qp, link);
1023 	g_crypto_ch->device_qp = NULL;
1024 	g_crypto_bdev.drv_name = AESNI_MB;
1025 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1026 	CU_ASSERT(g_crypto_ch->device_qp != NULL);
1027 
1028 	/* QAT testing is more complex as the code under test load balances by
1029 	 * assigning each subsequent device/qp to every QAT_VF_SPREAD modulo
1030 	 * g_qat_total_qp. For the current latest QAT we'll have 48 virtual functions
1031 	 * each with 2 qp so the "spread" betwen assignments is 32.
1032 	 */
1033 	g_qat_total_qp = 96;
1034 	for (i = 0; i < g_qat_total_qp; i++) {
1035 		device_qp = calloc(1, sizeof(struct device_qp));
1036 		device_qp->index = i;
1037 		TAILQ_INSERT_TAIL(&g_device_qp_qat, device_qp, link);
1038 	}
1039 	g_crypto_ch->device_qp = NULL;
1040 	g_crypto_bdev.drv_name = QAT;
1041 
1042 	/* First assignment will assign to 0 and next at 32. */
1043 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1044 			       0, QAT_VF_SPREAD);
1045 
1046 	/* Second assignment will assign to 32 and next at 64. */
1047 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1048 			       QAT_VF_SPREAD, QAT_VF_SPREAD * 2);
1049 
1050 	/* Third assignment will assign to 64 and next at 0. */
1051 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1052 			       QAT_VF_SPREAD * 2, 0);
1053 
1054 	/* Fourth assignment will assign to 1 and next at 33. */
1055 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1056 			       1, QAT_VF_SPREAD + 1);
1057 
1058 	_clear_device_qp_lists();
1059 }
1060 
1061 int
1062 main(int argc, char **argv)
1063 {
1064 	CU_pSuite	suite = NULL;
1065 	unsigned int	num_failures;
1066 
1067 	CU_set_error_action(CUEA_ABORT);
1068 	CU_initialize_registry();
1069 
1070 	suite = CU_add_suite("crypto", test_setup, test_cleanup);
1071 	CU_ADD_TEST(suite, test_error_paths);
1072 	CU_ADD_TEST(suite, test_simple_write);
1073 	CU_ADD_TEST(suite, test_simple_read);
1074 	CU_ADD_TEST(suite, test_large_rw);
1075 	CU_ADD_TEST(suite, test_dev_full);
1076 	CU_ADD_TEST(suite, test_crazy_rw);
1077 	CU_ADD_TEST(suite, test_passthru);
1078 	CU_ADD_TEST(suite, test_initdrivers);
1079 	CU_ADD_TEST(suite, test_crypto_op_complete);
1080 	CU_ADD_TEST(suite, test_supported_io);
1081 	CU_ADD_TEST(suite, test_reset);
1082 	CU_ADD_TEST(suite, test_poller);
1083 	CU_ADD_TEST(suite, test_assign_device_qp);
1084 
1085 	CU_basic_set_mode(CU_BRM_VERBOSE);
1086 	CU_basic_run_tests();
1087 	num_failures = CU_get_number_of_failures();
1088 	CU_cleanup_registry();
1089 	return num_failures;
1090 }
1091