xref: /spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c (revision 03e3fc4f5835983a4e6602b4e770922e798ce263)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/test_env.c"
37 #include "spdk_internal/mock.h"
38 #include "unit/lib/json_mock.c"
39 
40 #include <rte_crypto.h>
41 #include <rte_cryptodev.h>
42 
43 #define MAX_TEST_BLOCKS 8192
44 struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS];
45 struct rte_crypto_op *g_test_dev_full_ops[MAX_TEST_BLOCKS];
46 
47 uint16_t g_dequeue_mock;
48 uint16_t g_enqueue_mock;
49 unsigned ut_rte_crypto_op_bulk_alloc;
50 int ut_rte_crypto_op_attach_sym_session = 0;
51 #define MOCK_INFO_GET_1QP_AESNI 0
52 #define MOCK_INFO_GET_1QP_QAT 1
53 #define MOCK_INFO_GET_1QP_BOGUS_PMD 2
54 int ut_rte_cryptodev_info_get = 0;
55 bool ut_rte_cryptodev_info_get_mocked = false;
56 
57 /* Those functions are defined as static inline in DPDK, so we can't
58  * mock them straight away. We use defines to redirect them into
59  * our custom functions.
60  */
61 static bool g_resubmit_test = false;
62 #define rte_cryptodev_enqueue_burst mock_rte_cryptodev_enqueue_burst
63 static inline uint16_t
64 mock_rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
65 				 struct rte_crypto_op **ops, uint16_t nb_ops)
66 {
67 	int i;
68 
69 	CU_ASSERT(nb_ops > 0);
70 
71 	for (i = 0; i < nb_ops; i++) {
72 		/* Use this empty (til now) array of pointers to store
73 		 * enqueued operations for assertion in dev_full test.
74 		 */
75 		g_test_dev_full_ops[i] = *ops++;
76 		if (g_resubmit_test == true) {
77 			CU_ASSERT(g_test_dev_full_ops[i] == (void *)0xDEADBEEF);
78 		}
79 	}
80 
81 	return g_enqueue_mock;
82 }
83 
84 #define rte_cryptodev_dequeue_burst mock_rte_cryptodev_dequeue_burst
85 static inline uint16_t
86 mock_rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
87 				 struct rte_crypto_op **ops, uint16_t nb_ops)
88 {
89 	int i;
90 
91 	CU_ASSERT(nb_ops > 0);
92 
93 	for (i = 0; i < g_dequeue_mock; i++) {
94 		*ops++ = g_test_crypto_ops[i];
95 	}
96 
97 	return g_dequeue_mock;
98 }
99 
100 /* Instead of allocating real memory, assign the allocations to our
101  * test array for assertion in tests.
102  */
103 #define rte_crypto_op_bulk_alloc mock_rte_crypto_op_bulk_alloc
104 static inline unsigned
105 mock_rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
106 			      enum rte_crypto_op_type type,
107 			      struct rte_crypto_op **ops, uint16_t nb_ops)
108 {
109 	int i;
110 
111 	for (i = 0; i < nb_ops; i++) {
112 		*ops++ = g_test_crypto_ops[i];
113 	}
114 	return ut_rte_crypto_op_bulk_alloc;
115 }
116 
117 #define rte_mempool_put_bulk mock_rte_mempool_put_bulk
118 static __rte_always_inline void
119 mock_rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table,
120 			  unsigned int n)
121 {
122 	return;
123 }
124 
125 #define rte_crypto_op_attach_sym_session mock_rte_crypto_op_attach_sym_session
126 static inline int
127 mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
128 				      struct rte_cryptodev_sym_session *sess)
129 {
130 	return ut_rte_crypto_op_attach_sym_session;
131 }
132 
133 #define rte_lcore_count mock_rte_lcore_count
134 static inline unsigned
135 mock_rte_lcore_count(void)
136 {
137 	return 1;
138 }
139 
140 #include "bdev/crypto/vbdev_crypto.c"
141 
142 /* SPDK stubs */
143 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
144 		struct spdk_bdev_io_wait_entry *entry), 0);
145 DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *,
146 	    (struct spdk_conf *cp, const char *name), NULL);
147 DEFINE_STUB(spdk_conf_section_get_nval, char *,
148 	    (struct spdk_conf_section *sp, const char *key, int idx), NULL);
149 DEFINE_STUB(spdk_conf_section_get_nmval, char *,
150 	    (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
151 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
152 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io));
153 DEFINE_STUB_V(spdk_bdev_io_put_aux_buf, (struct spdk_bdev_io *bdev_io, void *aux_buf));
154 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
155 		enum spdk_bdev_io_type io_type), 0);
156 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
157 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
158 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0);
159 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 64);
160 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
161 DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn,
162 				     void *cb_arg));
163 DEFINE_STUB(spdk_bdev_open, int, (struct spdk_bdev *bdev, bool write,
164 				  spdk_bdev_remove_cb_t remove_cb,
165 				  void *remove_ctx, struct spdk_bdev_desc **_desc), 0);
166 DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
167 		struct spdk_bdev_module *module), 0);
168 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
169 DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *vbdev), 0);
170 
171 /* DPDK stubs */
172 DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0);
173 DEFINE_STUB_V(rte_mempool_free, (struct rte_mempool *mp));
174 DEFINE_STUB(rte_mempool_create, struct rte_mempool *, (const char *name, unsigned n,
175 		unsigned elt_size,
176 		unsigned cache_size, unsigned private_data_size,
177 		rte_mempool_ctor_t *mp_init, void *mp_init_arg,
178 		rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
179 		int socket_id, unsigned flags), (struct rte_mempool *)1);
180 DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
181 DEFINE_STUB(rte_crypto_op_pool_create, struct rte_mempool *,
182 	    (const char *name, enum rte_crypto_op_type type, unsigned nb_elts,
183 	     unsigned cache_size, uint16_t priv_size, int socket_id), (struct rte_mempool *)1);
184 DEFINE_STUB(rte_cryptodev_device_count_by_driver, uint8_t, (uint8_t driver_id), 0);
185 DEFINE_STUB(rte_cryptodev_configure, int, (uint8_t dev_id, struct rte_cryptodev_config *config), 0);
186 #if RTE_VERSION >= RTE_VERSION_NUM(19, 02, 0, 0)
187 DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
188 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id), 0);
189 DEFINE_STUB(rte_cryptodev_sym_session_pool_create, struct rte_mempool *, (const char *name,
190 		uint32_t nb_elts,
191 		uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
192 		int socket_id), (struct rte_mempool *)1);
193 #else
194 DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
195 		const struct rte_cryptodev_qp_conf *qp_conf,
196 		int socket_id, struct rte_mempool *session_pool), 0);
197 #endif
198 DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0);
199 DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id));
200 DEFINE_STUB(rte_cryptodev_sym_session_create, struct rte_cryptodev_sym_session *,
201 	    (struct rte_mempool *mempool), (struct rte_cryptodev_sym_session *)1);
202 DEFINE_STUB(rte_cryptodev_sym_session_init, int, (uint8_t dev_id,
203 		struct rte_cryptodev_sym_session *sess,
204 		struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), 0);
205 DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
206 DEFINE_STUB(rte_cryptodev_sym_session_free, int, (struct rte_cryptodev_sym_session *sess), 0);
207 DEFINE_STUB(rte_vdev_uninit, int, (const char *name), 0);
208 
209 struct rte_cryptodev *rte_cryptodevs;
210 
211 /* global vars and setup/cleanup functions used for all test functions */
212 struct spdk_bdev_io *g_bdev_io;
213 struct crypto_bdev_io *g_io_ctx;
214 struct crypto_io_channel *g_crypto_ch;
215 struct spdk_io_channel *g_io_ch;
216 struct vbdev_dev g_device;
217 struct vbdev_crypto g_crypto_bdev;
218 struct device_qp g_dev_qp;
219 
220 void
221 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
222 {
223 	dev_info->max_nb_queue_pairs = 1;
224 	if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_AESNI) {
225 		dev_info->driver_name = g_driver_names[0];
226 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_QAT) {
227 		dev_info->driver_name = g_driver_names[1];
228 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_BOGUS_PMD) {
229 		dev_info->driver_name = "junk";
230 	}
231 }
232 
233 unsigned int
234 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
235 {
236 	return (unsigned int)dev_id;
237 }
238 
239 void
240 spdk_bdev_io_get_aux_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_aux_buf_cb cb)
241 {
242 	cb(g_io_ch, g_bdev_io, (void *)0xDEADBEEF);
243 }
244 
245 void
246 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
247 {
248 	cb(g_io_ch, g_bdev_io, true);
249 }
250 
251 /* Mock these functions to call the callback and then return the value we require */
252 int ut_spdk_bdev_readv_blocks = 0;
253 bool ut_spdk_bdev_readv_blocks_mocked = false;
254 int
255 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
256 		       struct iovec *iov, int iovcnt,
257 		       uint64_t offset_blocks, uint64_t num_blocks,
258 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
259 {
260 	cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg);
261 	return ut_spdk_bdev_readv_blocks;
262 }
263 
264 int ut_spdk_bdev_writev_blocks = 0;
265 bool ut_spdk_bdev_writev_blocks_mocked = false;
266 int
267 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
268 			struct iovec *iov, int iovcnt,
269 			uint64_t offset_blocks, uint64_t num_blocks,
270 			spdk_bdev_io_completion_cb cb, void *cb_arg)
271 {
272 	cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg);
273 	return ut_spdk_bdev_writev_blocks;
274 }
275 
276 int ut_spdk_bdev_unmap_blocks = 0;
277 bool ut_spdk_bdev_unmap_blocks_mocked = false;
278 int
279 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
280 		       uint64_t offset_blocks, uint64_t num_blocks,
281 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
282 {
283 	cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg);
284 	return ut_spdk_bdev_unmap_blocks;
285 }
286 
287 int ut_spdk_bdev_flush_blocks = 0;
288 bool ut_spdk_bdev_flush_blocks_mocked = false;
289 int
290 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
291 		       uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
292 		       void *cb_arg)
293 {
294 	cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg);
295 	return ut_spdk_bdev_flush_blocks;
296 }
297 
298 int ut_spdk_bdev_reset = 0;
299 bool ut_spdk_bdev_reset_mocked = false;
300 int
301 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
302 		spdk_bdev_io_completion_cb cb, void *cb_arg)
303 {
304 	cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg);
305 	return ut_spdk_bdev_reset;
306 }
307 
308 bool g_completion_called = false;
309 void
310 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
311 {
312 	bdev_io->internal.status = status;
313 	g_completion_called = true;
314 }
315 
316 /* Global setup for all tests that share a bunch of preparation... */
317 static int
318 test_setup(void)
319 {
320 	int i, rc;
321 
322 	/* Prepare essential variables for test routines */
323 	g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct crypto_bdev_io));
324 	g_bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec) * 128);
325 	g_bdev_io->bdev = &g_crypto_bdev.crypto_bdev;
326 	g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct crypto_io_channel));
327 	g_crypto_ch = (struct crypto_io_channel *)((uint8_t *)g_io_ch + sizeof(struct spdk_io_channel));
328 	g_io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
329 	memset(&g_device, 0, sizeof(struct vbdev_dev));
330 	memset(&g_crypto_bdev, 0, sizeof(struct vbdev_crypto));
331 	g_dev_qp.device = &g_device;
332 	g_io_ctx->crypto_ch = g_crypto_ch;
333 	g_io_ctx->crypto_bdev = &g_crypto_bdev;
334 	g_crypto_ch->device_qp = &g_dev_qp;
335 	TAILQ_INIT(&g_crypto_ch->pending_cry_ios);
336 	TAILQ_INIT(&g_crypto_ch->queued_cry_ops);
337 
338 	/* Allocate a real mbuf pool so we can test error paths */
339 	g_mbuf_mp = spdk_mempool_create("mbuf_mp", NUM_MBUFS, sizeof(struct rte_mbuf),
340 					SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
341 					SPDK_ENV_SOCKET_ID_ANY);
342 
343 	/* Instead of allocating real rte mempools for these, it's easier and provides the
344 	 * same coverage just calloc them here.
345 	 */
346 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
347 		rc = posix_memalign((void **)&g_test_crypto_ops[i], 64,
348 				    sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op) +
349 				    AES_CBC_IV_LENGTH + QUEUED_OP_LENGTH);
350 		if (rc != 0) {
351 			assert(false);
352 		}
353 		memset(g_test_crypto_ops[i], 0, sizeof(struct rte_crypto_op) +
354 		       sizeof(struct rte_crypto_sym_op) + QUEUED_OP_LENGTH);
355 	}
356 	return 0;
357 }
358 
359 /* Global teardown for all tests */
360 static int
361 test_cleanup(void)
362 {
363 	int i;
364 
365 	spdk_mempool_free(g_mbuf_mp);
366 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
367 		free(g_test_crypto_ops[i]);
368 	}
369 	free(g_bdev_io->u.bdev.iovs);
370 	free(g_bdev_io);
371 	free(g_io_ch);
372 	return 0;
373 }
374 
375 static void
376 test_error_paths(void)
377 {
378 	/* Single element block size write, just to test error paths
379 	 * in vbdev_crypto_submit_request().
380 	 */
381 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
382 	g_bdev_io->u.bdev.iovcnt = 1;
383 	g_bdev_io->u.bdev.num_blocks = 1;
384 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
385 	g_crypto_bdev.crypto_bdev.blocklen = 512;
386 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
387 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
388 
389 	/* test failure of spdk_mempool_get_bulk(), will result in success because it
390 	 * will get queued.
391 	 */
392 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
393 	MOCK_SET(spdk_mempool_get, NULL);
394 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
395 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
396 
397 	/* same thing but switch to reads to test error path in _crypto_complete_io() */
398 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
399 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
400 	TAILQ_INSERT_TAIL(&g_crypto_ch->pending_cry_ios, g_bdev_io, module_link);
401 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
402 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
403 	/* Now with the read_blocks failing */
404 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
405 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
406 	MOCK_SET(spdk_bdev_readv_blocks, -1);
407 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
408 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
409 	MOCK_SET(spdk_bdev_readv_blocks, 0);
410 	MOCK_CLEAR(spdk_mempool_get);
411 
412 	/* test failure of rte_crypto_op_bulk_alloc() */
413 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
414 	ut_rte_crypto_op_bulk_alloc = 0;
415 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
416 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
417 	ut_rte_crypto_op_bulk_alloc = 1;
418 
419 	/* test failure of rte_crypto_op_attach_sym_session() */
420 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
421 	ut_rte_crypto_op_attach_sym_session = -1;
422 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
423 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
424 	ut_rte_crypto_op_attach_sym_session = 0;
425 }
426 
427 static void
428 test_simple_write(void)
429 {
430 	/* Single element block size write */
431 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
432 	g_bdev_io->u.bdev.iovcnt = 1;
433 	g_bdev_io->u.bdev.num_blocks = 1;
434 	g_bdev_io->u.bdev.offset_blocks = 0;
435 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
436 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_write;
437 	g_crypto_bdev.crypto_bdev.blocklen = 512;
438 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
439 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
440 
441 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
442 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
443 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
444 	CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == 512);
445 	CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
446 	CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
447 	CU_ASSERT(g_io_ctx->aux_num_blocks == 1);
448 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_write);
449 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
450 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
451 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
452 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
453 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->userdata == g_bdev_io);
454 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr != NULL);
455 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == 512);
456 
457 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
458 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_dst);
459 }
460 
461 static void
462 test_simple_read(void)
463 {
464 	/* Single element block size read */
465 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
466 	g_bdev_io->u.bdev.iovcnt = 1;
467 	g_bdev_io->u.bdev.num_blocks = 1;
468 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
469 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_read;
470 	g_crypto_bdev.crypto_bdev.blocklen = 512;
471 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
472 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
473 
474 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
475 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
476 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
477 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_read);
478 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
479 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
480 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
481 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
482 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->userdata == g_bdev_io);
483 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
484 
485 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
486 }
487 
488 static void
489 test_large_rw(void)
490 {
491 	unsigned block_len = 512;
492 	unsigned num_blocks = CRYPTO_MAX_IO / block_len;
493 	unsigned io_len = block_len * num_blocks;
494 	unsigned i;
495 
496 	/* Multi block size read, multi-element */
497 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
498 	g_bdev_io->u.bdev.iovcnt = 1;
499 	g_bdev_io->u.bdev.num_blocks = num_blocks;
500 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
501 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
502 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
503 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
504 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
505 
506 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
507 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
508 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
509 
510 	for (i = 0; i < num_blocks; i++) {
511 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
512 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
513 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
514 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
515 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
516 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
517 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
518 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
519 	}
520 
521 	/* Multi block size write, multi-element */
522 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
523 	g_bdev_io->u.bdev.iovcnt = 1;
524 	g_bdev_io->u.bdev.num_blocks = num_blocks;
525 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
526 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
527 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
528 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
529 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
530 
531 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
532 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
533 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
534 
535 	for (i = 0; i < num_blocks; i++) {
536 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
537 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
538 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
539 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
540 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
541 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
542 		CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == io_len);
543 		CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
544 		CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
545 		CU_ASSERT(g_io_ctx->aux_num_blocks == num_blocks);
546 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr != NULL);
547 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
548 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
549 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
550 	}
551 }
552 
553 static void
554 test_dev_full(void)
555 {
556 	struct vbdev_crypto_op *queued_op;
557 	struct rte_crypto_sym_op *sym_op;
558 	struct crypto_bdev_io *io_ctx;
559 
560 	/* Two element block size read */
561 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
562 	g_bdev_io->u.bdev.iovcnt = 1;
563 	g_bdev_io->u.bdev.num_blocks = 2;
564 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
565 	g_bdev_io->u.bdev.iovs[0].iov_base = (void *)0xDEADBEEF;
566 	g_bdev_io->u.bdev.iovs[1].iov_len = 512;
567 	g_bdev_io->u.bdev.iovs[1].iov_base = (void *)0xFEEDBEEF;
568 	g_crypto_bdev.crypto_bdev.blocklen = 512;
569 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
570 	g_enqueue_mock = g_dequeue_mock = 1;
571 	ut_rte_crypto_op_bulk_alloc = 2;
572 
573 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
574 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
575 
576 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
577 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
578 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 2);
579 	sym_op = g_test_crypto_ops[0]->sym;
580 	CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xDEADBEEF);
581 	CU_ASSERT(sym_op->m_src->data_len == 512);
582 	CU_ASSERT(sym_op->m_src->next == NULL);
583 	CU_ASSERT(sym_op->cipher.data.length == 512);
584 	CU_ASSERT(sym_op->cipher.data.offset == 0);
585 	CU_ASSERT(sym_op->m_src->userdata == g_bdev_io);
586 	CU_ASSERT(sym_op->m_dst == NULL);
587 
588 	/* make sure one got queued and confirm its values */
589 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
590 	queued_op = TAILQ_FIRST(&g_crypto_ch->queued_cry_ops);
591 	sym_op = queued_op->crypto_op->sym;
592 	TAILQ_REMOVE(&g_crypto_ch->queued_cry_ops, queued_op, link);
593 	CU_ASSERT(queued_op->bdev_io == g_bdev_io);
594 	CU_ASSERT(queued_op->crypto_op == g_test_crypto_ops[1]);
595 	CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xFEEDBEEF);
596 	CU_ASSERT(sym_op->m_src->data_len == 512);
597 	CU_ASSERT(sym_op->m_src->next == NULL);
598 	CU_ASSERT(sym_op->cipher.data.length == 512);
599 	CU_ASSERT(sym_op->cipher.data.offset == 0);
600 	CU_ASSERT(sym_op->m_src->userdata == g_bdev_io);
601 	CU_ASSERT(sym_op->m_dst == NULL);
602 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
603 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
604 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[1]->sym->m_src);
605 
606 	/* Non-busy reason for enqueue failure, all were rejected. */
607 	g_enqueue_mock = 0;
608 	g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_ERROR;
609 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
610 	io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
611 	CU_ASSERT(io_ctx->bdev_io_status == SPDK_BDEV_IO_STATUS_FAILED);
612 }
613 
614 static void
615 test_crazy_rw(void)
616 {
617 	unsigned block_len = 512;
618 	int num_blocks = 4;
619 	int i;
620 
621 	/* Multi block size read, single element, strange IOV makeup */
622 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
623 	g_bdev_io->u.bdev.iovcnt = 3;
624 	g_bdev_io->u.bdev.num_blocks = num_blocks;
625 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
626 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
627 	g_bdev_io->u.bdev.iovs[1].iov_len = 1024;
628 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 512;
629 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
630 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 512 + 1024;
631 
632 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
633 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
634 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
635 
636 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
637 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
638 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
639 
640 	for (i = 0; i < num_blocks; i++) {
641 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
642 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
643 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
644 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
645 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
646 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
647 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
648 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
649 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
650 	}
651 
652 	/* Multi block size write, single element strange IOV makeup */
653 	num_blocks = 8;
654 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
655 	g_bdev_io->u.bdev.iovcnt = 4;
656 	g_bdev_io->u.bdev.num_blocks = num_blocks;
657 	g_bdev_io->u.bdev.iovs[0].iov_len = 2048;
658 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
659 	g_bdev_io->u.bdev.iovs[1].iov_len = 512;
660 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 2048;
661 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
662 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 2048 + 512;
663 	g_bdev_io->u.bdev.iovs[3].iov_len = 1024;
664 	g_bdev_io->u.bdev.iovs[3].iov_base = &test_crazy_rw + 2048 + 512 + 512;
665 
666 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
667 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
668 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
669 
670 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
671 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
672 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
673 
674 	for (i = 0; i < num_blocks; i++) {
675 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
676 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
677 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
678 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
679 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
680 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
681 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
682 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == g_test_crypto_ops[i]->sym->m_dst);
683 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
684 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
685 	}
686 }
687 
688 static void
689 test_passthru(void)
690 {
691 	/* Make sure these follow our completion callback, test success & fail. */
692 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
693 	MOCK_SET(spdk_bdev_unmap_blocks, 0);
694 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
695 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
696 	MOCK_SET(spdk_bdev_unmap_blocks, -1);
697 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
698 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
699 	MOCK_CLEAR(spdk_bdev_unmap_blocks);
700 
701 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
702 	MOCK_SET(spdk_bdev_flush_blocks, 0);
703 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
704 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
705 	MOCK_SET(spdk_bdev_flush_blocks, -1);
706 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
707 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
708 	MOCK_CLEAR(spdk_bdev_flush_blocks);
709 
710 	/* We should never get a WZ command, we report that we don't support it. */
711 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
712 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
713 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
714 }
715 
716 static void
717 test_reset(void)
718 {
719 	/* TODO: There are a few different ways to do this given that
720 	 * the code uses spdk_for_each_channel() to implement reset
721 	 * handling. Submitting w/o UT for this function for now and
722 	 * will follow up with something shortly.
723 	 */
724 }
725 
726 static void
727 init_cleanup(void)
728 {
729 	spdk_mempool_free(g_mbuf_mp);
730 	rte_mempool_free(g_session_mp);
731 	g_mbuf_mp = NULL;
732 	g_session_mp = NULL;
733 	if (g_session_mp_priv != NULL) {
734 		/* g_session_mp_priv may or may not be set depending on the DPDK version */
735 		rte_mempool_free(g_session_mp_priv);
736 	}
737 }
738 
739 static void
740 test_initdrivers(void)
741 {
742 	int rc;
743 	static struct spdk_mempool *orig_mbuf_mp;
744 	static struct rte_mempool *orig_session_mp;
745 	static struct rte_mempool *orig_session_mp_priv;
746 
747 	/* These tests will alloc and free our g_mbuf_mp
748 	 * so save that off here and restore it after each test is over.
749 	 */
750 	orig_mbuf_mp = g_mbuf_mp;
751 	orig_session_mp = g_session_mp;
752 	orig_session_mp_priv = g_session_mp_priv;
753 
754 	g_session_mp_priv = NULL;
755 	g_session_mp = NULL;
756 	g_mbuf_mp = NULL;
757 
758 	/* No drivers available, not an error though */
759 	MOCK_SET(rte_cryptodev_count, 0);
760 	rc = vbdev_crypto_init_crypto_drivers();
761 	CU_ASSERT(rc == 0);
762 	CU_ASSERT(g_mbuf_mp == NULL);
763 	CU_ASSERT(g_session_mp == NULL);
764 	CU_ASSERT(g_session_mp_priv == NULL);
765 
766 	/* Test failure of DPDK dev init. */
767 	MOCK_SET(rte_cryptodev_count, 2);
768 	MOCK_SET(rte_vdev_init, -1);
769 	rc = vbdev_crypto_init_crypto_drivers();
770 	CU_ASSERT(rc == -EINVAL);
771 	CU_ASSERT(g_mbuf_mp == NULL);
772 	CU_ASSERT(g_session_mp == NULL);
773 	CU_ASSERT(g_session_mp_priv == NULL);
774 	MOCK_SET(rte_vdev_init, 0);
775 
776 	/* Can't create session pool. */
777 	MOCK_SET(spdk_mempool_create, NULL);
778 	rc = vbdev_crypto_init_crypto_drivers();
779 	CU_ASSERT(rc == -ENOMEM);
780 	CU_ASSERT(g_mbuf_mp == NULL);
781 	CU_ASSERT(g_session_mp == NULL);
782 	CU_ASSERT(g_session_mp_priv == NULL);
783 	MOCK_CLEAR(spdk_mempool_create);
784 
785 	/* Can't create op pool. */
786 	MOCK_SET(rte_crypto_op_pool_create, NULL);
787 	rc = vbdev_crypto_init_crypto_drivers();
788 	CU_ASSERT(rc == -ENOMEM);
789 	CU_ASSERT(g_mbuf_mp == NULL);
790 	CU_ASSERT(g_session_mp == NULL);
791 	CU_ASSERT(g_session_mp_priv == NULL);
792 	MOCK_SET(rte_crypto_op_pool_create, (struct rte_mempool *)1);
793 
794 	/* Check resources are not sufficient */
795 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
796 	rc = vbdev_crypto_init_crypto_drivers();
797 	CU_ASSERT(rc == -EINVAL);
798 
799 	/* Test crypto dev configure failure. */
800 	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
801 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
802 	MOCK_SET(rte_cryptodev_configure, -1);
803 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
804 	rc = vbdev_crypto_init_crypto_drivers();
805 	MOCK_SET(rte_cryptodev_configure, 0);
806 	CU_ASSERT(g_mbuf_mp == NULL);
807 	CU_ASSERT(g_session_mp == NULL);
808 	CU_ASSERT(g_session_mp_priv == NULL);
809 	CU_ASSERT(rc == -EINVAL);
810 
811 	/* Test failure of qp setup. */
812 	MOCK_SET(rte_cryptodev_queue_pair_setup, -1);
813 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
814 	rc = vbdev_crypto_init_crypto_drivers();
815 	CU_ASSERT(rc == -EINVAL);
816 	CU_ASSERT(g_mbuf_mp == NULL);
817 	CU_ASSERT(g_session_mp == NULL);
818 	CU_ASSERT(g_session_mp_priv == NULL);
819 	MOCK_SET(rte_cryptodev_queue_pair_setup, 0);
820 
821 	/* Test failure of dev start. */
822 	MOCK_SET(rte_cryptodev_start, -1);
823 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
824 	rc = vbdev_crypto_init_crypto_drivers();
825 	CU_ASSERT(rc == -EINVAL);
826 	CU_ASSERT(g_mbuf_mp == NULL);
827 	CU_ASSERT(g_session_mp == NULL);
828 	CU_ASSERT(g_session_mp_priv == NULL);
829 	MOCK_SET(rte_cryptodev_start, 0);
830 
831 	/* Test bogus PMD */
832 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
833 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_BOGUS_PMD);
834 	rc = vbdev_crypto_init_crypto_drivers();
835 	CU_ASSERT(g_mbuf_mp == NULL);
836 	CU_ASSERT(g_session_mp == NULL);
837 	CU_ASSERT(rc == -EINVAL);
838 
839 	/* Test happy path QAT. */
840 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
841 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
842 	rc = vbdev_crypto_init_crypto_drivers();
843 	CU_ASSERT(g_mbuf_mp != NULL);
844 	CU_ASSERT(g_session_mp != NULL);
845 	init_cleanup();
846 	CU_ASSERT(rc == 0);
847 
848 	/* Test happy path AESNI. */
849 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
850 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
851 	rc = vbdev_crypto_init_crypto_drivers();
852 	init_cleanup();
853 	CU_ASSERT(rc == 0);
854 
855 	/* restore our initial values. */
856 	g_mbuf_mp = orig_mbuf_mp;
857 	g_session_mp = orig_session_mp;
858 	g_session_mp_priv = orig_session_mp_priv;
859 }
860 
861 static void
862 test_crypto_op_complete(void)
863 {
864 	/* Make sure completion code respects failure. */
865 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
866 	g_completion_called = false;
867 	_crypto_operation_complete(g_bdev_io);
868 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
869 	CU_ASSERT(g_completion_called == true);
870 
871 	/* Test read completion. */
872 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
873 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
874 	g_completion_called = false;
875 	_crypto_operation_complete(g_bdev_io);
876 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
877 	CU_ASSERT(g_completion_called == true);
878 
879 	/* Test write completion success. */
880 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
881 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
882 	g_completion_called = false;
883 	MOCK_SET(spdk_bdev_writev_blocks, 0);
884 	_crypto_operation_complete(g_bdev_io);
885 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
886 	CU_ASSERT(g_completion_called == true);
887 
888 	/* Test write completion failed. */
889 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
890 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
891 	g_completion_called = false;
892 	MOCK_SET(spdk_bdev_writev_blocks, -1);
893 	_crypto_operation_complete(g_bdev_io);
894 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
895 	CU_ASSERT(g_completion_called == true);
896 
897 	/* Test bogus type for this completion. */
898 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
899 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
900 	g_completion_called = false;
901 	_crypto_operation_complete(g_bdev_io);
902 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
903 	CU_ASSERT(g_completion_called == true);
904 }
905 
906 static void
907 test_supported_io(void)
908 {
909 	void *ctx = NULL;
910 	bool rc = true;
911 
912 	/* Make sure we always report false to WZ, we need the bdev layer to
913 	 * send real 0's so we can encrypt/decrypt them.
914 	 */
915 	rc = vbdev_crypto_io_type_supported(ctx, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
916 	CU_ASSERT(rc == false);
917 }
918 
919 static void
920 test_poller(void)
921 {
922 	int rc;
923 	struct rte_mbuf *src_mbufs[2];
924 	struct vbdev_crypto_op *op_to_resubmit;
925 
926 	/* test regular 1 op to dequeue and complete */
927 	g_dequeue_mock = g_enqueue_mock = 1;
928 	spdk_mempool_get_bulk(g_mbuf_mp, (void **)&src_mbufs[0], 1);
929 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
930 	g_test_crypto_ops[0]->sym->m_src->userdata = g_bdev_io;
931 	g_test_crypto_ops[0]->sym->m_dst = NULL;
932 	g_io_ctx->cryop_cnt_remaining = 1;
933 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
934 	rc = crypto_dev_poller(g_crypto_ch);
935 	CU_ASSERT(rc == 1);
936 
937 	/* We have nothing dequeued but have some to resubmit */
938 	g_dequeue_mock = 0;
939 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
940 
941 	/* add an op to the queued list. */
942 	g_resubmit_test = true;
943 	op_to_resubmit = (struct vbdev_crypto_op *)((uint8_t *)g_test_crypto_ops[0] + QUEUED_OP_OFFSET);
944 	op_to_resubmit->crypto_op = (void *)0xDEADBEEF;
945 	op_to_resubmit->bdev_io = g_bdev_io;
946 	TAILQ_INSERT_TAIL(&g_crypto_ch->queued_cry_ops,
947 			  op_to_resubmit,
948 			  link);
949 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
950 	rc = crypto_dev_poller(g_crypto_ch);
951 	g_resubmit_test = false;
952 	CU_ASSERT(rc == 0);
953 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
954 
955 	/* 2 to dequeue but 2nd one failed */
956 	g_dequeue_mock = g_enqueue_mock = 2;
957 	g_io_ctx->cryop_cnt_remaining = 2;
958 	spdk_mempool_get_bulk(g_mbuf_mp, (void **)&src_mbufs[0], 2);
959 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
960 	g_test_crypto_ops[0]->sym->m_src->userdata = g_bdev_io;
961 	g_test_crypto_ops[0]->sym->m_dst = NULL;
962 	g_test_crypto_ops[0]->status =  RTE_CRYPTO_OP_STATUS_SUCCESS;
963 	g_test_crypto_ops[1]->sym->m_src = src_mbufs[1];
964 	g_test_crypto_ops[1]->sym->m_src->userdata = g_bdev_io;
965 	g_test_crypto_ops[1]->sym->m_dst = NULL;
966 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
967 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
968 	rc = crypto_dev_poller(g_crypto_ch);
969 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
970 	CU_ASSERT(rc == 2);
971 }
972 
973 /* Helper function for test_assign_device_qp() */
974 static void
975 _clear_device_qp_lists(void)
976 {
977 	struct device_qp *device_qp = NULL;
978 
979 	while (!TAILQ_EMPTY(&g_device_qp_qat)) {
980 		device_qp = TAILQ_FIRST(&g_device_qp_qat);
981 		TAILQ_REMOVE(&g_device_qp_qat, device_qp, link);
982 		free(device_qp);
983 
984 	}
985 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_qat) == true);
986 	while (!TAILQ_EMPTY(&g_device_qp_aesni_mb)) {
987 		device_qp = TAILQ_FIRST(&g_device_qp_aesni_mb);
988 		TAILQ_REMOVE(&g_device_qp_aesni_mb, device_qp, link);
989 		free(device_qp);
990 	}
991 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_aesni_mb) == true);
992 }
993 
994 /* Helper function for test_assign_device_qp() */
995 static void
996 _check_expected_values(struct vbdev_crypto *crypto_bdev, struct device_qp *device_qp,
997 		       struct crypto_io_channel *crypto_ch, uint8_t expected_index,
998 		       uint8_t current_index)
999 {
1000 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1001 	CU_ASSERT(g_crypto_ch->device_qp->index == expected_index);
1002 	CU_ASSERT(g_next_qat_index == current_index);
1003 }
1004 
1005 static void
1006 test_assign_device_qp(void)
1007 {
1008 	struct device_qp *device_qp = NULL;
1009 	int i;
1010 
1011 	/* start with a known state, clear the device/qp lists */
1012 	_clear_device_qp_lists();
1013 
1014 	/* make sure that one AESNI_MB qp is found */
1015 	device_qp = calloc(1, sizeof(struct device_qp));
1016 	TAILQ_INSERT_TAIL(&g_device_qp_aesni_mb, device_qp, link);
1017 	g_crypto_ch->device_qp = NULL;
1018 	g_crypto_bdev.drv_name = AESNI_MB;
1019 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1020 	CU_ASSERT(g_crypto_ch->device_qp != NULL);
1021 
1022 	/* QAT testing is more complex as the code under test load balances by
1023 	 * assigning each subsequent device/qp to every QAT_VF_SPREAD modulo
1024 	 * g_qat_total_qp. For the current latest QAT we'll have 48 virtual functions
1025 	 * each with 2 qp so the "spread" betwen assignments is 32.
1026 	 */
1027 	g_qat_total_qp = 96;
1028 	for (i = 0; i < g_qat_total_qp; i++) {
1029 		device_qp = calloc(1, sizeof(struct device_qp));
1030 		device_qp->index = i;
1031 		TAILQ_INSERT_TAIL(&g_device_qp_qat, device_qp, link);
1032 	}
1033 	g_crypto_ch->device_qp = NULL;
1034 	g_crypto_bdev.drv_name = QAT;
1035 
1036 	/* First assignment will assign to 0 and next at 32. */
1037 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1038 			       0, QAT_VF_SPREAD);
1039 
1040 	/* Second assignment will assign to 32 and next at 64. */
1041 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1042 			       QAT_VF_SPREAD, QAT_VF_SPREAD * 2);
1043 
1044 	/* Third assignment will assign to 64 and next at 0. */
1045 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1046 			       QAT_VF_SPREAD * 2, 0);
1047 
1048 	/* Fourth assignment will assign to 1 and next at 33. */
1049 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1050 			       1, QAT_VF_SPREAD + 1);
1051 
1052 	_clear_device_qp_lists();
1053 }
1054 
1055 int
1056 main(int argc, char **argv)
1057 {
1058 	CU_pSuite	suite = NULL;
1059 	unsigned int	num_failures;
1060 
1061 	CU_set_error_action(CUEA_ABORT);
1062 	CU_initialize_registry();
1063 
1064 	suite = CU_add_suite("crypto", test_setup, test_cleanup);
1065 	CU_ADD_TEST(suite, test_error_paths);
1066 	CU_ADD_TEST(suite, test_simple_write);
1067 	CU_ADD_TEST(suite, test_simple_read);
1068 	CU_ADD_TEST(suite, test_large_rw);
1069 	CU_ADD_TEST(suite, test_dev_full);
1070 	CU_ADD_TEST(suite, test_crazy_rw);
1071 	CU_ADD_TEST(suite, test_passthru);
1072 	CU_ADD_TEST(suite, test_initdrivers);
1073 	CU_ADD_TEST(suite, test_crypto_op_complete);
1074 	CU_ADD_TEST(suite, test_supported_io);
1075 	CU_ADD_TEST(suite, test_reset);
1076 	CU_ADD_TEST(suite, test_poller);
1077 	CU_ADD_TEST(suite, test_assign_device_qp);
1078 
1079 	CU_basic_set_mode(CU_BRM_VERBOSE);
1080 	CU_basic_run_tests();
1081 	num_failures = CU_get_number_of_failures();
1082 	CU_cleanup_registry();
1083 	return num_failures;
1084 }
1085