xref: /spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c (revision 4237d2d8cb14e5e52ede3ed9718019488b445afe)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/test_env.c"
37 #include "spdk_internal/mock.h"
38 #include "unit/lib/json_mock.c"
39 
40 #include <rte_crypto.h>
41 #include <rte_cryptodev.h>
42 
43 #define MAX_TEST_BLOCKS 8192
44 struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS];
45 struct rte_crypto_op *g_test_dev_full_ops[MAX_TEST_BLOCKS];
46 
47 uint16_t g_dequeue_mock;
48 uint16_t g_enqueue_mock;
49 unsigned ut_rte_crypto_op_bulk_alloc;
50 int ut_rte_crypto_op_attach_sym_session = 0;
51 #define MOCK_INFO_GET_1QP_AESNI 0
52 #define MOCK_INFO_GET_1QP_QAT 1
53 #define MOCK_INFO_GET_1QP_BOGUS_PMD 2
54 int ut_rte_cryptodev_info_get = 0;
55 bool ut_rte_cryptodev_info_get_mocked = false;
56 
57 /* Those functions are defined as static inline in DPDK, so we can't
58  * mock them straight away. We use defines to redirect them into
59  * our custom functions.
60  */
61 static bool g_resubmit_test = false;
62 #define rte_cryptodev_enqueue_burst mock_rte_cryptodev_enqueue_burst
63 static inline uint16_t
64 mock_rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
65 				 struct rte_crypto_op **ops, uint16_t nb_ops)
66 {
67 	int i;
68 
69 	CU_ASSERT(nb_ops > 0);
70 
71 	for (i = 0; i < nb_ops; i++) {
72 		/* Use this empty (til now) array of pointers to store
73 		 * enqueued operations for assertion in dev_full test.
74 		 */
75 		g_test_dev_full_ops[i] = *ops++;
76 		if (g_resubmit_test == true) {
77 			CU_ASSERT(g_test_dev_full_ops[i] == (void *)0xDEADBEEF);
78 		}
79 	}
80 
81 	return g_enqueue_mock;
82 }
83 
84 #define rte_cryptodev_dequeue_burst mock_rte_cryptodev_dequeue_burst
85 static inline uint16_t
86 mock_rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
87 				 struct rte_crypto_op **ops, uint16_t nb_ops)
88 {
89 	int i;
90 
91 	CU_ASSERT(nb_ops > 0);
92 
93 	for (i = 0; i < g_dequeue_mock; i++) {
94 		*ops++ = g_test_crypto_ops[i];
95 	}
96 
97 	return g_dequeue_mock;
98 }
99 
100 /* Instead of allocating real memory, assign the allocations to our
101  * test array for assertion in tests.
102  */
103 #define rte_crypto_op_bulk_alloc mock_rte_crypto_op_bulk_alloc
104 static inline unsigned
105 mock_rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
106 			      enum rte_crypto_op_type type,
107 			      struct rte_crypto_op **ops, uint16_t nb_ops)
108 {
109 	int i;
110 
111 	for (i = 0; i < nb_ops; i++) {
112 		*ops++ = g_test_crypto_ops[i];
113 	}
114 	return ut_rte_crypto_op_bulk_alloc;
115 }
116 
117 #define rte_mempool_put_bulk mock_rte_mempool_put_bulk
118 static __rte_always_inline void
119 mock_rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table,
120 			  unsigned int n)
121 {
122 	return;
123 }
124 
125 #define rte_crypto_op_attach_sym_session mock_rte_crypto_op_attach_sym_session
126 static inline int
127 mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
128 				      struct rte_cryptodev_sym_session *sess)
129 {
130 	return ut_rte_crypto_op_attach_sym_session;
131 }
132 
133 #define rte_lcore_count mock_rte_lcore_count
134 static inline unsigned
135 mock_rte_lcore_count(void)
136 {
137 	return 1;
138 }
139 
140 #include "bdev/crypto/vbdev_crypto.c"
141 
142 /* SPDK stubs */
143 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
144 		struct spdk_bdev_io_wait_entry *entry), 0);
145 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
146 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io));
147 DEFINE_STUB_V(spdk_bdev_io_put_aux_buf, (struct spdk_bdev_io *bdev_io, void *aux_buf));
148 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
149 		enum spdk_bdev_io_type io_type), 0);
150 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
151 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
152 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0);
153 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 64);
154 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
155 DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn,
156 				     void *cb_arg));
157 DEFINE_STUB(spdk_bdev_open_ext, int, (const char *bdev_name, bool write,
158 				      spdk_bdev_event_cb_t event_cb,
159 				      void *event_ctx, struct spdk_bdev_desc **_desc), 0);
160 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
161 DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
162 		struct spdk_bdev_module *module), 0);
163 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
164 DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *vbdev), 0);
165 
166 /* DPDK stubs */
167 DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0);
168 DEFINE_STUB_V(rte_mempool_free, (struct rte_mempool *mp));
169 DEFINE_STUB(rte_mempool_create, struct rte_mempool *, (const char *name, unsigned n,
170 		unsigned elt_size,
171 		unsigned cache_size, unsigned private_data_size,
172 		rte_mempool_ctor_t *mp_init, void *mp_init_arg,
173 		rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
174 		int socket_id, unsigned flags), (struct rte_mempool *)1);
175 DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
176 DEFINE_STUB(rte_crypto_op_pool_create, struct rte_mempool *,
177 	    (const char *name, enum rte_crypto_op_type type, unsigned nb_elts,
178 	     unsigned cache_size, uint16_t priv_size, int socket_id), (struct rte_mempool *)1);
179 DEFINE_STUB(rte_cryptodev_device_count_by_driver, uint8_t, (uint8_t driver_id), 0);
180 DEFINE_STUB(rte_cryptodev_configure, int, (uint8_t dev_id, struct rte_cryptodev_config *config), 0);
181 #if RTE_VERSION >= RTE_VERSION_NUM(19, 02, 0, 0)
182 DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
183 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id), 0);
184 DEFINE_STUB(rte_cryptodev_sym_session_pool_create, struct rte_mempool *, (const char *name,
185 		uint32_t nb_elts,
186 		uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
187 		int socket_id), (struct rte_mempool *)1);
188 #else
189 DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
190 		const struct rte_cryptodev_qp_conf *qp_conf,
191 		int socket_id, struct rte_mempool *session_pool), 0);
192 #endif
193 DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0);
194 DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id));
195 DEFINE_STUB(rte_cryptodev_sym_session_create, struct rte_cryptodev_sym_session *,
196 	    (struct rte_mempool *mempool), (struct rte_cryptodev_sym_session *)1);
197 DEFINE_STUB(rte_cryptodev_sym_session_init, int, (uint8_t dev_id,
198 		struct rte_cryptodev_sym_session *sess,
199 		struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), 0);
200 DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
201 DEFINE_STUB(rte_cryptodev_sym_session_free, int, (struct rte_cryptodev_sym_session *sess), 0);
202 DEFINE_STUB(rte_vdev_uninit, int, (const char *name), 0);
203 
204 struct rte_cryptodev *rte_cryptodevs;
205 
206 /* global vars and setup/cleanup functions used for all test functions */
207 struct spdk_bdev_io *g_bdev_io;
208 struct crypto_bdev_io *g_io_ctx;
209 struct crypto_io_channel *g_crypto_ch;
210 struct spdk_io_channel *g_io_ch;
211 struct vbdev_dev g_device;
212 struct vbdev_crypto g_crypto_bdev;
213 struct device_qp g_dev_qp;
214 
215 void
216 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
217 {
218 	dev_info->max_nb_queue_pairs = 1;
219 	if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_AESNI) {
220 		dev_info->driver_name = g_driver_names[0];
221 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_QAT) {
222 		dev_info->driver_name = g_driver_names[1];
223 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_BOGUS_PMD) {
224 		dev_info->driver_name = "junk";
225 	}
226 }
227 
228 unsigned int
229 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
230 {
231 	return (unsigned int)dev_id;
232 }
233 
234 void
235 spdk_bdev_io_get_aux_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_aux_buf_cb cb)
236 {
237 	cb(g_io_ch, g_bdev_io, (void *)0xDEADBEEF);
238 }
239 
240 void
241 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
242 {
243 	cb(g_io_ch, g_bdev_io, true);
244 }
245 
246 /* Mock these functions to call the callback and then return the value we require */
247 int ut_spdk_bdev_readv_blocks = 0;
248 bool ut_spdk_bdev_readv_blocks_mocked = false;
249 int
250 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
251 		       struct iovec *iov, int iovcnt,
252 		       uint64_t offset_blocks, uint64_t num_blocks,
253 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
254 {
255 	cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg);
256 	return ut_spdk_bdev_readv_blocks;
257 }
258 
259 int ut_spdk_bdev_writev_blocks = 0;
260 bool ut_spdk_bdev_writev_blocks_mocked = false;
261 int
262 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
263 			struct iovec *iov, int iovcnt,
264 			uint64_t offset_blocks, uint64_t num_blocks,
265 			spdk_bdev_io_completion_cb cb, void *cb_arg)
266 {
267 	cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg);
268 	return ut_spdk_bdev_writev_blocks;
269 }
270 
271 int ut_spdk_bdev_unmap_blocks = 0;
272 bool ut_spdk_bdev_unmap_blocks_mocked = false;
273 int
274 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
275 		       uint64_t offset_blocks, uint64_t num_blocks,
276 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
277 {
278 	cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg);
279 	return ut_spdk_bdev_unmap_blocks;
280 }
281 
282 int ut_spdk_bdev_flush_blocks = 0;
283 bool ut_spdk_bdev_flush_blocks_mocked = false;
284 int
285 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
286 		       uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
287 		       void *cb_arg)
288 {
289 	cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg);
290 	return ut_spdk_bdev_flush_blocks;
291 }
292 
293 int ut_spdk_bdev_reset = 0;
294 bool ut_spdk_bdev_reset_mocked = false;
295 int
296 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
297 		spdk_bdev_io_completion_cb cb, void *cb_arg)
298 {
299 	cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg);
300 	return ut_spdk_bdev_reset;
301 }
302 
303 bool g_completion_called = false;
304 void
305 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
306 {
307 	bdev_io->internal.status = status;
308 	g_completion_called = true;
309 }
310 
311 /* Global setup for all tests that share a bunch of preparation... */
312 static int
313 test_setup(void)
314 {
315 	int i, rc;
316 
317 	/* Prepare essential variables for test routines */
318 	g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct crypto_bdev_io));
319 	g_bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec) * 128);
320 	g_bdev_io->bdev = &g_crypto_bdev.crypto_bdev;
321 	g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct crypto_io_channel));
322 	g_crypto_ch = (struct crypto_io_channel *)((uint8_t *)g_io_ch + sizeof(struct spdk_io_channel));
323 	g_io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
324 	memset(&g_device, 0, sizeof(struct vbdev_dev));
325 	memset(&g_crypto_bdev, 0, sizeof(struct vbdev_crypto));
326 	g_dev_qp.device = &g_device;
327 	g_io_ctx->crypto_ch = g_crypto_ch;
328 	g_io_ctx->crypto_bdev = &g_crypto_bdev;
329 	g_crypto_ch->device_qp = &g_dev_qp;
330 	TAILQ_INIT(&g_crypto_ch->pending_cry_ios);
331 	TAILQ_INIT(&g_crypto_ch->queued_cry_ops);
332 
333 	/* Allocate a real mbuf pool so we can test error paths */
334 	g_mbuf_mp = spdk_mempool_create("mbuf_mp", NUM_MBUFS, sizeof(struct rte_mbuf),
335 					SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
336 					SPDK_ENV_SOCKET_ID_ANY);
337 
338 	/* Instead of allocating real rte mempools for these, it's easier and provides the
339 	 * same coverage just calloc them here.
340 	 */
341 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
342 		rc = posix_memalign((void **)&g_test_crypto_ops[i], 64,
343 				    sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op) +
344 				    AES_CBC_IV_LENGTH + QUEUED_OP_LENGTH);
345 		if (rc != 0) {
346 			assert(false);
347 		}
348 		memset(g_test_crypto_ops[i], 0, sizeof(struct rte_crypto_op) +
349 		       sizeof(struct rte_crypto_sym_op) + QUEUED_OP_LENGTH);
350 	}
351 	return 0;
352 }
353 
354 /* Global teardown for all tests */
355 static int
356 test_cleanup(void)
357 {
358 	int i;
359 
360 	spdk_mempool_free(g_mbuf_mp);
361 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
362 		free(g_test_crypto_ops[i]);
363 	}
364 	free(g_bdev_io->u.bdev.iovs);
365 	free(g_bdev_io);
366 	free(g_io_ch);
367 	return 0;
368 }
369 
370 static void
371 test_error_paths(void)
372 {
373 	/* Single element block size write, just to test error paths
374 	 * in vbdev_crypto_submit_request().
375 	 */
376 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
377 	g_bdev_io->u.bdev.iovcnt = 1;
378 	g_bdev_io->u.bdev.num_blocks = 1;
379 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
380 	g_crypto_bdev.crypto_bdev.blocklen = 512;
381 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
382 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
383 
384 	/* test failure of spdk_mempool_get_bulk(), will result in success because it
385 	 * will get queued.
386 	 */
387 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
388 	MOCK_SET(spdk_mempool_get, NULL);
389 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
390 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
391 
392 	/* same thing but switch to reads to test error path in _crypto_complete_io() */
393 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
394 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
395 	TAILQ_INSERT_TAIL(&g_crypto_ch->pending_cry_ios, g_bdev_io, module_link);
396 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
397 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
398 	/* Now with the read_blocks failing */
399 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
400 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
401 	MOCK_SET(spdk_bdev_readv_blocks, -1);
402 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
403 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
404 	MOCK_SET(spdk_bdev_readv_blocks, 0);
405 	MOCK_CLEAR(spdk_mempool_get);
406 
407 	/* test failure of rte_crypto_op_bulk_alloc() */
408 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
409 	ut_rte_crypto_op_bulk_alloc = 0;
410 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
411 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
412 	ut_rte_crypto_op_bulk_alloc = 1;
413 
414 	/* test failure of rte_crypto_op_attach_sym_session() */
415 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
416 	ut_rte_crypto_op_attach_sym_session = -1;
417 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
418 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
419 	ut_rte_crypto_op_attach_sym_session = 0;
420 }
421 
422 static void
423 test_simple_write(void)
424 {
425 	/* Single element block size write */
426 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
427 	g_bdev_io->u.bdev.iovcnt = 1;
428 	g_bdev_io->u.bdev.num_blocks = 1;
429 	g_bdev_io->u.bdev.offset_blocks = 0;
430 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
431 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_write;
432 	g_crypto_bdev.crypto_bdev.blocklen = 512;
433 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
434 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
435 
436 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
437 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
438 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
439 	CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == 512);
440 	CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
441 	CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
442 	CU_ASSERT(g_io_ctx->aux_num_blocks == 1);
443 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_write);
444 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
445 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
446 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
447 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
448 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->userdata == g_bdev_io);
449 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr != NULL);
450 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == 512);
451 
452 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
453 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_dst);
454 }
455 
456 static void
457 test_simple_read(void)
458 {
459 	/* Single element block size read */
460 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
461 	g_bdev_io->u.bdev.iovcnt = 1;
462 	g_bdev_io->u.bdev.num_blocks = 1;
463 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
464 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_read;
465 	g_crypto_bdev.crypto_bdev.blocklen = 512;
466 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
467 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
468 
469 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
470 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
471 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
472 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_read);
473 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
474 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
475 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
476 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
477 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->userdata == g_bdev_io);
478 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
479 
480 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
481 }
482 
483 static void
484 test_large_rw(void)
485 {
486 	unsigned block_len = 512;
487 	unsigned num_blocks = CRYPTO_MAX_IO / block_len;
488 	unsigned io_len = block_len * num_blocks;
489 	unsigned i;
490 
491 	/* Multi block size read, multi-element */
492 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
493 	g_bdev_io->u.bdev.iovcnt = 1;
494 	g_bdev_io->u.bdev.num_blocks = num_blocks;
495 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
496 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
497 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
498 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
499 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
500 
501 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
502 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
503 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
504 
505 	for (i = 0; i < num_blocks; i++) {
506 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
507 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
508 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
509 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
510 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
511 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
512 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
513 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
514 	}
515 
516 	/* Multi block size write, multi-element */
517 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
518 	g_bdev_io->u.bdev.iovcnt = 1;
519 	g_bdev_io->u.bdev.num_blocks = num_blocks;
520 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
521 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
522 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
523 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
524 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
525 
526 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
527 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
528 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
529 
530 	for (i = 0; i < num_blocks; i++) {
531 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
532 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
533 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
534 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
535 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
536 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
537 		CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == io_len);
538 		CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
539 		CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
540 		CU_ASSERT(g_io_ctx->aux_num_blocks == num_blocks);
541 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr != NULL);
542 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
543 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
544 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
545 	}
546 }
547 
548 static void
549 test_dev_full(void)
550 {
551 	struct vbdev_crypto_op *queued_op;
552 	struct rte_crypto_sym_op *sym_op;
553 	struct crypto_bdev_io *io_ctx;
554 
555 	/* Two element block size read */
556 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
557 	g_bdev_io->u.bdev.iovcnt = 1;
558 	g_bdev_io->u.bdev.num_blocks = 2;
559 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
560 	g_bdev_io->u.bdev.iovs[0].iov_base = (void *)0xDEADBEEF;
561 	g_bdev_io->u.bdev.iovs[1].iov_len = 512;
562 	g_bdev_io->u.bdev.iovs[1].iov_base = (void *)0xFEEDBEEF;
563 	g_crypto_bdev.crypto_bdev.blocklen = 512;
564 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
565 	g_enqueue_mock = g_dequeue_mock = 1;
566 	ut_rte_crypto_op_bulk_alloc = 2;
567 
568 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
569 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
570 
571 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
572 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
573 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 2);
574 	sym_op = g_test_crypto_ops[0]->sym;
575 	CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xDEADBEEF);
576 	CU_ASSERT(sym_op->m_src->data_len == 512);
577 	CU_ASSERT(sym_op->m_src->next == NULL);
578 	CU_ASSERT(sym_op->cipher.data.length == 512);
579 	CU_ASSERT(sym_op->cipher.data.offset == 0);
580 	CU_ASSERT(sym_op->m_src->userdata == g_bdev_io);
581 	CU_ASSERT(sym_op->m_dst == NULL);
582 
583 	/* make sure one got queued and confirm its values */
584 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
585 	queued_op = TAILQ_FIRST(&g_crypto_ch->queued_cry_ops);
586 	sym_op = queued_op->crypto_op->sym;
587 	TAILQ_REMOVE(&g_crypto_ch->queued_cry_ops, queued_op, link);
588 	CU_ASSERT(queued_op->bdev_io == g_bdev_io);
589 	CU_ASSERT(queued_op->crypto_op == g_test_crypto_ops[1]);
590 	CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xFEEDBEEF);
591 	CU_ASSERT(sym_op->m_src->data_len == 512);
592 	CU_ASSERT(sym_op->m_src->next == NULL);
593 	CU_ASSERT(sym_op->cipher.data.length == 512);
594 	CU_ASSERT(sym_op->cipher.data.offset == 0);
595 	CU_ASSERT(sym_op->m_src->userdata == g_bdev_io);
596 	CU_ASSERT(sym_op->m_dst == NULL);
597 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
598 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
599 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[1]->sym->m_src);
600 
601 	/* Non-busy reason for enqueue failure, all were rejected. */
602 	g_enqueue_mock = 0;
603 	g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_ERROR;
604 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
605 	io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
606 	CU_ASSERT(io_ctx->bdev_io_status == SPDK_BDEV_IO_STATUS_FAILED);
607 }
608 
609 static void
610 test_crazy_rw(void)
611 {
612 	unsigned block_len = 512;
613 	int num_blocks = 4;
614 	int i;
615 
616 	/* Multi block size read, single element, strange IOV makeup */
617 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
618 	g_bdev_io->u.bdev.iovcnt = 3;
619 	g_bdev_io->u.bdev.num_blocks = num_blocks;
620 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
621 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
622 	g_bdev_io->u.bdev.iovs[1].iov_len = 1024;
623 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 512;
624 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
625 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 512 + 1024;
626 
627 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
628 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
629 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
630 
631 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
632 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
633 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
634 
635 	for (i = 0; i < num_blocks; i++) {
636 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
637 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
638 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
639 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
640 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
641 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
642 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
643 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
644 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
645 	}
646 
647 	/* Multi block size write, single element strange IOV makeup */
648 	num_blocks = 8;
649 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
650 	g_bdev_io->u.bdev.iovcnt = 4;
651 	g_bdev_io->u.bdev.num_blocks = num_blocks;
652 	g_bdev_io->u.bdev.iovs[0].iov_len = 2048;
653 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
654 	g_bdev_io->u.bdev.iovs[1].iov_len = 512;
655 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 2048;
656 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
657 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 2048 + 512;
658 	g_bdev_io->u.bdev.iovs[3].iov_len = 1024;
659 	g_bdev_io->u.bdev.iovs[3].iov_base = &test_crazy_rw + 2048 + 512 + 512;
660 
661 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
662 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
663 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
664 
665 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
666 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
667 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
668 
669 	for (i = 0; i < num_blocks; i++) {
670 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
671 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
672 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
673 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
674 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
675 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
676 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
677 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == g_test_crypto_ops[i]->sym->m_dst);
678 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
679 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
680 	}
681 }
682 
683 static void
684 test_passthru(void)
685 {
686 	/* Make sure these follow our completion callback, test success & fail. */
687 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
688 	MOCK_SET(spdk_bdev_unmap_blocks, 0);
689 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
690 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
691 	MOCK_SET(spdk_bdev_unmap_blocks, -1);
692 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
693 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
694 	MOCK_CLEAR(spdk_bdev_unmap_blocks);
695 
696 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
697 	MOCK_SET(spdk_bdev_flush_blocks, 0);
698 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
699 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
700 	MOCK_SET(spdk_bdev_flush_blocks, -1);
701 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
702 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
703 	MOCK_CLEAR(spdk_bdev_flush_blocks);
704 
705 	/* We should never get a WZ command, we report that we don't support it. */
706 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
707 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
708 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
709 }
710 
711 static void
712 test_reset(void)
713 {
714 	/* TODO: There are a few different ways to do this given that
715 	 * the code uses spdk_for_each_channel() to implement reset
716 	 * handling. Submitting w/o UT for this function for now and
717 	 * will follow up with something shortly.
718 	 */
719 }
720 
721 static void
722 init_cleanup(void)
723 {
724 	spdk_mempool_free(g_mbuf_mp);
725 	rte_mempool_free(g_session_mp);
726 	g_mbuf_mp = NULL;
727 	g_session_mp = NULL;
728 	if (g_session_mp_priv != NULL) {
729 		/* g_session_mp_priv may or may not be set depending on the DPDK version */
730 		rte_mempool_free(g_session_mp_priv);
731 	}
732 }
733 
734 static void
735 test_initdrivers(void)
736 {
737 	int rc;
738 	static struct spdk_mempool *orig_mbuf_mp;
739 	static struct rte_mempool *orig_session_mp;
740 	static struct rte_mempool *orig_session_mp_priv;
741 
742 	/* These tests will alloc and free our g_mbuf_mp
743 	 * so save that off here and restore it after each test is over.
744 	 */
745 	orig_mbuf_mp = g_mbuf_mp;
746 	orig_session_mp = g_session_mp;
747 	orig_session_mp_priv = g_session_mp_priv;
748 
749 	g_session_mp_priv = NULL;
750 	g_session_mp = NULL;
751 	g_mbuf_mp = NULL;
752 
753 	/* No drivers available, not an error though */
754 	MOCK_SET(rte_cryptodev_count, 0);
755 	rc = vbdev_crypto_init_crypto_drivers();
756 	CU_ASSERT(rc == 0);
757 	CU_ASSERT(g_mbuf_mp == NULL);
758 	CU_ASSERT(g_session_mp == NULL);
759 	CU_ASSERT(g_session_mp_priv == NULL);
760 
761 	/* Test failure of DPDK dev init. */
762 	MOCK_SET(rte_cryptodev_count, 2);
763 	MOCK_SET(rte_vdev_init, -1);
764 	rc = vbdev_crypto_init_crypto_drivers();
765 	CU_ASSERT(rc == -EINVAL);
766 	CU_ASSERT(g_mbuf_mp == NULL);
767 	CU_ASSERT(g_session_mp == NULL);
768 	CU_ASSERT(g_session_mp_priv == NULL);
769 	MOCK_SET(rte_vdev_init, 0);
770 
771 	/* Can't create session pool. */
772 	MOCK_SET(spdk_mempool_create, NULL);
773 	rc = vbdev_crypto_init_crypto_drivers();
774 	CU_ASSERT(rc == -ENOMEM);
775 	CU_ASSERT(g_mbuf_mp == NULL);
776 	CU_ASSERT(g_session_mp == NULL);
777 	CU_ASSERT(g_session_mp_priv == NULL);
778 	MOCK_CLEAR(spdk_mempool_create);
779 
780 	/* Can't create op pool. */
781 	MOCK_SET(rte_crypto_op_pool_create, NULL);
782 	rc = vbdev_crypto_init_crypto_drivers();
783 	CU_ASSERT(rc == -ENOMEM);
784 	CU_ASSERT(g_mbuf_mp == NULL);
785 	CU_ASSERT(g_session_mp == NULL);
786 	CU_ASSERT(g_session_mp_priv == NULL);
787 	MOCK_SET(rte_crypto_op_pool_create, (struct rte_mempool *)1);
788 
789 	/* Check resources are not sufficient */
790 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
791 	rc = vbdev_crypto_init_crypto_drivers();
792 	CU_ASSERT(rc == -EINVAL);
793 
794 	/* Test crypto dev configure failure. */
795 	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
796 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
797 	MOCK_SET(rte_cryptodev_configure, -1);
798 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
799 	rc = vbdev_crypto_init_crypto_drivers();
800 	MOCK_SET(rte_cryptodev_configure, 0);
801 	CU_ASSERT(g_mbuf_mp == NULL);
802 	CU_ASSERT(g_session_mp == NULL);
803 	CU_ASSERT(g_session_mp_priv == NULL);
804 	CU_ASSERT(rc == -EINVAL);
805 
806 	/* Test failure of qp setup. */
807 	MOCK_SET(rte_cryptodev_queue_pair_setup, -1);
808 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
809 	rc = vbdev_crypto_init_crypto_drivers();
810 	CU_ASSERT(rc == -EINVAL);
811 	CU_ASSERT(g_mbuf_mp == NULL);
812 	CU_ASSERT(g_session_mp == NULL);
813 	CU_ASSERT(g_session_mp_priv == NULL);
814 	MOCK_SET(rte_cryptodev_queue_pair_setup, 0);
815 
816 	/* Test failure of dev start. */
817 	MOCK_SET(rte_cryptodev_start, -1);
818 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
819 	rc = vbdev_crypto_init_crypto_drivers();
820 	CU_ASSERT(rc == -EINVAL);
821 	CU_ASSERT(g_mbuf_mp == NULL);
822 	CU_ASSERT(g_session_mp == NULL);
823 	CU_ASSERT(g_session_mp_priv == NULL);
824 	MOCK_SET(rte_cryptodev_start, 0);
825 
826 	/* Test bogus PMD */
827 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
828 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_BOGUS_PMD);
829 	rc = vbdev_crypto_init_crypto_drivers();
830 	CU_ASSERT(g_mbuf_mp == NULL);
831 	CU_ASSERT(g_session_mp == NULL);
832 	CU_ASSERT(rc == -EINVAL);
833 
834 	/* Test happy path QAT. */
835 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
836 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
837 	rc = vbdev_crypto_init_crypto_drivers();
838 	CU_ASSERT(g_mbuf_mp != NULL);
839 	CU_ASSERT(g_session_mp != NULL);
840 	init_cleanup();
841 	CU_ASSERT(rc == 0);
842 
843 	/* Test happy path AESNI. */
844 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
845 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
846 	rc = vbdev_crypto_init_crypto_drivers();
847 	init_cleanup();
848 	CU_ASSERT(rc == 0);
849 
850 	/* restore our initial values. */
851 	g_mbuf_mp = orig_mbuf_mp;
852 	g_session_mp = orig_session_mp;
853 	g_session_mp_priv = orig_session_mp_priv;
854 }
855 
856 static void
857 test_crypto_op_complete(void)
858 {
859 	/* Make sure completion code respects failure. */
860 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
861 	g_completion_called = false;
862 	_crypto_operation_complete(g_bdev_io);
863 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
864 	CU_ASSERT(g_completion_called == true);
865 
866 	/* Test read completion. */
867 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
868 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
869 	g_completion_called = false;
870 	_crypto_operation_complete(g_bdev_io);
871 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
872 	CU_ASSERT(g_completion_called == true);
873 
874 	/* Test write completion success. */
875 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
876 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
877 	g_completion_called = false;
878 	MOCK_SET(spdk_bdev_writev_blocks, 0);
879 	_crypto_operation_complete(g_bdev_io);
880 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
881 	CU_ASSERT(g_completion_called == true);
882 
883 	/* Test write completion failed. */
884 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
885 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
886 	g_completion_called = false;
887 	MOCK_SET(spdk_bdev_writev_blocks, -1);
888 	_crypto_operation_complete(g_bdev_io);
889 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
890 	CU_ASSERT(g_completion_called == true);
891 
892 	/* Test bogus type for this completion. */
893 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
894 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
895 	g_completion_called = false;
896 	_crypto_operation_complete(g_bdev_io);
897 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
898 	CU_ASSERT(g_completion_called == true);
899 }
900 
901 static void
902 test_supported_io(void)
903 {
904 	void *ctx = NULL;
905 	bool rc = true;
906 
907 	/* Make sure we always report false to WZ, we need the bdev layer to
908 	 * send real 0's so we can encrypt/decrypt them.
909 	 */
910 	rc = vbdev_crypto_io_type_supported(ctx, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
911 	CU_ASSERT(rc == false);
912 }
913 
914 static void
915 test_poller(void)
916 {
917 	int rc;
918 	struct rte_mbuf *src_mbufs[2];
919 	struct vbdev_crypto_op *op_to_resubmit;
920 
921 	/* test regular 1 op to dequeue and complete */
922 	g_dequeue_mock = g_enqueue_mock = 1;
923 	spdk_mempool_get_bulk(g_mbuf_mp, (void **)&src_mbufs[0], 1);
924 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
925 	g_test_crypto_ops[0]->sym->m_src->userdata = g_bdev_io;
926 	g_test_crypto_ops[0]->sym->m_dst = NULL;
927 	g_io_ctx->cryop_cnt_remaining = 1;
928 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
929 	rc = crypto_dev_poller(g_crypto_ch);
930 	CU_ASSERT(rc == 1);
931 
932 	/* We have nothing dequeued but have some to resubmit */
933 	g_dequeue_mock = 0;
934 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
935 
936 	/* add an op to the queued list. */
937 	g_resubmit_test = true;
938 	op_to_resubmit = (struct vbdev_crypto_op *)((uint8_t *)g_test_crypto_ops[0] + QUEUED_OP_OFFSET);
939 	op_to_resubmit->crypto_op = (void *)0xDEADBEEF;
940 	op_to_resubmit->bdev_io = g_bdev_io;
941 	TAILQ_INSERT_TAIL(&g_crypto_ch->queued_cry_ops,
942 			  op_to_resubmit,
943 			  link);
944 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
945 	rc = crypto_dev_poller(g_crypto_ch);
946 	g_resubmit_test = false;
947 	CU_ASSERT(rc == 0);
948 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
949 
950 	/* 2 to dequeue but 2nd one failed */
951 	g_dequeue_mock = g_enqueue_mock = 2;
952 	g_io_ctx->cryop_cnt_remaining = 2;
953 	spdk_mempool_get_bulk(g_mbuf_mp, (void **)&src_mbufs[0], 2);
954 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
955 	g_test_crypto_ops[0]->sym->m_src->userdata = g_bdev_io;
956 	g_test_crypto_ops[0]->sym->m_dst = NULL;
957 	g_test_crypto_ops[0]->status =  RTE_CRYPTO_OP_STATUS_SUCCESS;
958 	g_test_crypto_ops[1]->sym->m_src = src_mbufs[1];
959 	g_test_crypto_ops[1]->sym->m_src->userdata = g_bdev_io;
960 	g_test_crypto_ops[1]->sym->m_dst = NULL;
961 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
962 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
963 	rc = crypto_dev_poller(g_crypto_ch);
964 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
965 	CU_ASSERT(rc == 2);
966 }
967 
968 /* Helper function for test_assign_device_qp() */
969 static void
970 _clear_device_qp_lists(void)
971 {
972 	struct device_qp *device_qp = NULL;
973 
974 	while (!TAILQ_EMPTY(&g_device_qp_qat)) {
975 		device_qp = TAILQ_FIRST(&g_device_qp_qat);
976 		TAILQ_REMOVE(&g_device_qp_qat, device_qp, link);
977 		free(device_qp);
978 
979 	}
980 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_qat) == true);
981 	while (!TAILQ_EMPTY(&g_device_qp_aesni_mb)) {
982 		device_qp = TAILQ_FIRST(&g_device_qp_aesni_mb);
983 		TAILQ_REMOVE(&g_device_qp_aesni_mb, device_qp, link);
984 		free(device_qp);
985 	}
986 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_aesni_mb) == true);
987 }
988 
989 /* Helper function for test_assign_device_qp() */
990 static void
991 _check_expected_values(struct vbdev_crypto *crypto_bdev, struct device_qp *device_qp,
992 		       struct crypto_io_channel *crypto_ch, uint8_t expected_index,
993 		       uint8_t current_index)
994 {
995 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
996 	CU_ASSERT(g_crypto_ch->device_qp->index == expected_index);
997 	CU_ASSERT(g_next_qat_index == current_index);
998 }
999 
1000 static void
1001 test_assign_device_qp(void)
1002 {
1003 	struct device_qp *device_qp = NULL;
1004 	int i;
1005 
1006 	/* start with a known state, clear the device/qp lists */
1007 	_clear_device_qp_lists();
1008 
1009 	/* make sure that one AESNI_MB qp is found */
1010 	device_qp = calloc(1, sizeof(struct device_qp));
1011 	TAILQ_INSERT_TAIL(&g_device_qp_aesni_mb, device_qp, link);
1012 	g_crypto_ch->device_qp = NULL;
1013 	g_crypto_bdev.drv_name = AESNI_MB;
1014 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1015 	CU_ASSERT(g_crypto_ch->device_qp != NULL);
1016 
1017 	/* QAT testing is more complex as the code under test load balances by
1018 	 * assigning each subsequent device/qp to every QAT_VF_SPREAD modulo
1019 	 * g_qat_total_qp. For the current latest QAT we'll have 48 virtual functions
1020 	 * each with 2 qp so the "spread" betwen assignments is 32.
1021 	 */
1022 	g_qat_total_qp = 96;
1023 	for (i = 0; i < g_qat_total_qp; i++) {
1024 		device_qp = calloc(1, sizeof(struct device_qp));
1025 		device_qp->index = i;
1026 		TAILQ_INSERT_TAIL(&g_device_qp_qat, device_qp, link);
1027 	}
1028 	g_crypto_ch->device_qp = NULL;
1029 	g_crypto_bdev.drv_name = QAT;
1030 
1031 	/* First assignment will assign to 0 and next at 32. */
1032 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1033 			       0, QAT_VF_SPREAD);
1034 
1035 	/* Second assignment will assign to 32 and next at 64. */
1036 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1037 			       QAT_VF_SPREAD, QAT_VF_SPREAD * 2);
1038 
1039 	/* Third assignment will assign to 64 and next at 0. */
1040 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1041 			       QAT_VF_SPREAD * 2, 0);
1042 
1043 	/* Fourth assignment will assign to 1 and next at 33. */
1044 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1045 			       1, QAT_VF_SPREAD + 1);
1046 
1047 	_clear_device_qp_lists();
1048 }
1049 
1050 int
1051 main(int argc, char **argv)
1052 {
1053 	CU_pSuite	suite = NULL;
1054 	unsigned int	num_failures;
1055 
1056 	CU_set_error_action(CUEA_ABORT);
1057 	CU_initialize_registry();
1058 
1059 	suite = CU_add_suite("crypto", test_setup, test_cleanup);
1060 	CU_ADD_TEST(suite, test_error_paths);
1061 	CU_ADD_TEST(suite, test_simple_write);
1062 	CU_ADD_TEST(suite, test_simple_read);
1063 	CU_ADD_TEST(suite, test_large_rw);
1064 	CU_ADD_TEST(suite, test_dev_full);
1065 	CU_ADD_TEST(suite, test_crazy_rw);
1066 	CU_ADD_TEST(suite, test_passthru);
1067 	CU_ADD_TEST(suite, test_initdrivers);
1068 	CU_ADD_TEST(suite, test_crypto_op_complete);
1069 	CU_ADD_TEST(suite, test_supported_io);
1070 	CU_ADD_TEST(suite, test_reset);
1071 	CU_ADD_TEST(suite, test_poller);
1072 	CU_ADD_TEST(suite, test_assign_device_qp);
1073 
1074 	CU_basic_set_mode(CU_BRM_VERBOSE);
1075 	CU_basic_run_tests();
1076 	num_failures = CU_get_number_of_failures();
1077 	CU_cleanup_registry();
1078 	return num_failures;
1079 }
1080