xref: /spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c (revision 927f1fd57bd004df581518466ec4c1b8083e5d23)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/test_env.c"
37 #include "spdk_internal/mock.h"
38 #include "thread/thread_internal.h"
39 #include "unit/lib/json_mock.c"
40 
41 #include <rte_crypto.h>
42 #include <rte_cryptodev.h>
43 
44 #define MAX_TEST_BLOCKS 8192
45 struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS];
46 struct rte_crypto_op *g_test_dev_full_ops[MAX_TEST_BLOCKS];
47 
48 uint16_t g_dequeue_mock;
49 uint16_t g_enqueue_mock;
50 unsigned ut_rte_crypto_op_bulk_alloc;
51 int ut_rte_crypto_op_attach_sym_session = 0;
52 #define MOCK_INFO_GET_1QP_AESNI 0
53 #define MOCK_INFO_GET_1QP_QAT 1
54 #define MOCK_INFO_GET_1QP_MLX5 2
55 #define MOCK_INFO_GET_1QP_BOGUS_PMD 3
56 int ut_rte_cryptodev_info_get = 0;
57 bool ut_rte_cryptodev_info_get_mocked = false;
58 
59 void mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt);
60 #define rte_pktmbuf_free_bulk mock_rte_pktmbuf_free_bulk
61 void mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt)
62 {
63 	spdk_mempool_put_bulk((struct spdk_mempool *)m[0]->pool, (void **)m, cnt);
64 }
65 
66 void mock_rte_pktmbuf_free(struct rte_mbuf *m);
67 #define rte_pktmbuf_free mock_rte_pktmbuf_free
68 void mock_rte_pktmbuf_free(struct rte_mbuf *m)
69 {
70 	spdk_mempool_put((struct spdk_mempool *)m->pool, (void *)m);
71 }
72 
73 void rte_mempool_free(struct rte_mempool *mp)
74 {
75 	spdk_mempool_free((struct spdk_mempool *)mp);
76 }
77 
78 int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
79 				unsigned count);
80 #define rte_pktmbuf_alloc_bulk mock_rte_pktmbuf_alloc_bulk
81 int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
82 				unsigned count)
83 {
84 	int rc;
85 
86 	rc = spdk_mempool_get_bulk((struct spdk_mempool *)pool, (void **)mbufs, count);
87 	if (rc) {
88 		return rc;
89 	}
90 	for (unsigned i = 0; i < count; i++) {
91 		rte_pktmbuf_reset(mbufs[i]);
92 		mbufs[i]->pool = pool;
93 	}
94 	return rc;
95 }
96 
97 struct rte_mempool *
98 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
99 				      uint32_t elt_size, uint32_t cache_size,
100 				      uint16_t priv_size, int socket_id)
101 {
102 	struct spdk_mempool *tmp;
103 
104 	tmp = spdk_mempool_create(name, nb_elts, elt_size + priv_size,
105 				  cache_size, socket_id);
106 
107 	return (struct rte_mempool *)tmp;
108 
109 }
110 
111 struct rte_mempool *
112 rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size,
113 			uint16_t priv_size, uint16_t data_room_size, int socket_id)
114 {
115 	struct spdk_mempool *tmp;
116 
117 	tmp = spdk_mempool_create(name, n, sizeof(struct rte_mbuf) + priv_size,
118 				  cache_size, socket_id);
119 
120 	return (struct rte_mempool *)tmp;
121 }
122 
123 struct rte_mempool *
124 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
125 		   unsigned cache_size, unsigned private_data_size,
126 		   rte_mempool_ctor_t *mp_init, void *mp_init_arg,
127 		   rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
128 		   int socket_id, unsigned flags)
129 {
130 	struct spdk_mempool *tmp;
131 
132 	tmp = spdk_mempool_create(name, n, elt_size + private_data_size,
133 				  cache_size, socket_id);
134 
135 	return (struct rte_mempool *)tmp;
136 }
137 
138 DEFINE_RETURN_MOCK(rte_crypto_op_pool_create, struct rte_mempool *);
139 struct rte_mempool *
140 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
141 			  unsigned nb_elts, unsigned cache_size,
142 			  uint16_t priv_size, int socket_id)
143 {
144 	struct spdk_mempool *tmp;
145 
146 	HANDLE_RETURN_MOCK(rte_crypto_op_pool_create);
147 
148 	tmp = spdk_mempool_create(name, nb_elts,
149 				  sizeof(struct rte_crypto_op) + priv_size,
150 				  cache_size, socket_id);
151 
152 	return (struct rte_mempool *)tmp;
153 
154 }
155 
156 /* Those functions are defined as static inline in DPDK, so we can't
157  * mock them straight away. We use defines to redirect them into
158  * our custom functions.
159  */
160 static bool g_resubmit_test = false;
161 #define rte_cryptodev_enqueue_burst mock_rte_cryptodev_enqueue_burst
162 static inline uint16_t
163 mock_rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
164 				 struct rte_crypto_op **ops, uint16_t nb_ops)
165 {
166 	int i;
167 
168 	CU_ASSERT(nb_ops > 0);
169 
170 	for (i = 0; i < nb_ops; i++) {
171 		/* Use this empty (til now) array of pointers to store
172 		 * enqueued operations for assertion in dev_full test.
173 		 */
174 		g_test_dev_full_ops[i] = *ops++;
175 		if (g_resubmit_test == true) {
176 			CU_ASSERT(g_test_dev_full_ops[i] == (void *)0xDEADBEEF);
177 		}
178 	}
179 
180 	return g_enqueue_mock;
181 }
182 
183 #define rte_cryptodev_dequeue_burst mock_rte_cryptodev_dequeue_burst
184 static inline uint16_t
185 mock_rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
186 				 struct rte_crypto_op **ops, uint16_t nb_ops)
187 {
188 	int i;
189 
190 	CU_ASSERT(nb_ops > 0);
191 
192 	for (i = 0; i < g_dequeue_mock; i++) {
193 		*ops++ = g_test_crypto_ops[i];
194 	}
195 
196 	return g_dequeue_mock;
197 }
198 
199 /* Instead of allocating real memory, assign the allocations to our
200  * test array for assertion in tests.
201  */
202 #define rte_crypto_op_bulk_alloc mock_rte_crypto_op_bulk_alloc
203 static inline unsigned
204 mock_rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
205 			      enum rte_crypto_op_type type,
206 			      struct rte_crypto_op **ops, uint16_t nb_ops)
207 {
208 	int i;
209 
210 	for (i = 0; i < nb_ops; i++) {
211 		*ops++ = g_test_crypto_ops[i];
212 	}
213 	return ut_rte_crypto_op_bulk_alloc;
214 }
215 
216 #define rte_mempool_put_bulk mock_rte_mempool_put_bulk
217 static __rte_always_inline void
218 mock_rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table,
219 			  unsigned int n)
220 {
221 	return;
222 }
223 
224 #define rte_crypto_op_attach_sym_session mock_rte_crypto_op_attach_sym_session
225 static inline int
226 mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
227 				      struct rte_cryptodev_sym_session *sess)
228 {
229 	return ut_rte_crypto_op_attach_sym_session;
230 }
231 
232 #define rte_lcore_count mock_rte_lcore_count
233 static inline unsigned
234 mock_rte_lcore_count(void)
235 {
236 	return 1;
237 }
238 
239 #include "bdev/crypto/vbdev_crypto.c"
240 
241 /* SPDK stubs */
242 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
243 		struct spdk_bdev_io_wait_entry *entry), 0);
244 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
245 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io));
246 DEFINE_STUB_V(spdk_bdev_io_put_aux_buf, (struct spdk_bdev_io *bdev_io, void *aux_buf));
247 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
248 		enum spdk_bdev_io_type io_type), 0);
249 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
250 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
251 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0);
252 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 64);
253 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
254 DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn,
255 				     void *cb_arg));
256 DEFINE_STUB(spdk_bdev_unregister_by_name, int, (const char *bdev_name,
257 		struct spdk_bdev_module *module,
258 		spdk_bdev_unregister_cb cb_fn, void *cb_arg), 0);
259 DEFINE_STUB(spdk_bdev_open_ext, int, (const char *bdev_name, bool write,
260 				      spdk_bdev_event_cb_t event_cb,
261 				      void *event_ctx, struct spdk_bdev_desc **_desc), 0);
262 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
263 DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
264 		struct spdk_bdev_module *module), 0);
265 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
266 DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *vbdev), 0);
267 
268 /* DPDK stubs */
269 #define DPDK_DYNFIELD_OFFSET offsetof(struct rte_mbuf, dynfield1[1])
270 DEFINE_STUB(rte_mbuf_dynfield_register, int, (const struct rte_mbuf_dynfield *params),
271 	    DPDK_DYNFIELD_OFFSET);
272 DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0);
273 DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
274 DEFINE_STUB(rte_cryptodev_device_count_by_driver, uint8_t, (uint8_t driver_id), 0);
275 DEFINE_STUB(rte_cryptodev_configure, int, (uint8_t dev_id, struct rte_cryptodev_config *config), 0);
276 DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
277 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id), 0);
278 DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0);
279 DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id));
280 DEFINE_STUB(rte_cryptodev_close, int, (uint8_t dev_id), 0);
281 DEFINE_STUB(rte_cryptodev_sym_session_create, struct rte_cryptodev_sym_session *,
282 	    (struct rte_mempool *mempool), (struct rte_cryptodev_sym_session *)1);
283 DEFINE_STUB(rte_cryptodev_sym_session_init, int, (uint8_t dev_id,
284 		struct rte_cryptodev_sym_session *sess,
285 		struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), 0);
286 DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
287 DEFINE_STUB(rte_cryptodev_sym_session_free, int, (struct rte_cryptodev_sym_session *sess), 0);
288 DEFINE_STUB(rte_vdev_uninit, int, (const char *name), 0);
289 
290 struct rte_cryptodev *rte_cryptodevs;
291 
292 /* global vars and setup/cleanup functions used for all test functions */
293 struct spdk_bdev_io *g_bdev_io;
294 struct crypto_bdev_io *g_io_ctx;
295 struct crypto_io_channel *g_crypto_ch;
296 struct spdk_io_channel *g_io_ch;
297 struct vbdev_dev g_device;
298 struct vbdev_crypto g_crypto_bdev;
299 struct vbdev_crypto_opts g_crypto_bdev_opts;
300 struct device_qp g_dev_qp;
301 
302 void
303 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
304 {
305 	dev_info->max_nb_queue_pairs = 1;
306 	if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_AESNI) {
307 		dev_info->driver_name = g_driver_names[0];
308 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_QAT) {
309 		dev_info->driver_name = g_driver_names[1];
310 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_MLX5) {
311 		dev_info->driver_name = g_driver_names[2];
312 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_BOGUS_PMD) {
313 		dev_info->driver_name = "junk";
314 	}
315 }
316 
317 unsigned int
318 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
319 {
320 	return (unsigned int)dev_id;
321 }
322 
323 void
324 spdk_bdev_io_get_aux_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_aux_buf_cb cb)
325 {
326 	cb(g_io_ch, g_bdev_io, (void *)0xDEADBEEF);
327 }
328 
329 void
330 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
331 {
332 	cb(g_io_ch, g_bdev_io, true);
333 }
334 
335 /* Mock these functions to call the callback and then return the value we require */
336 int ut_spdk_bdev_readv_blocks = 0;
337 bool ut_spdk_bdev_readv_blocks_mocked = false;
338 int
339 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
340 		       struct iovec *iov, int iovcnt,
341 		       uint64_t offset_blocks, uint64_t num_blocks,
342 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
343 {
344 	cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg);
345 	return ut_spdk_bdev_readv_blocks;
346 }
347 
348 int ut_spdk_bdev_writev_blocks = 0;
349 bool ut_spdk_bdev_writev_blocks_mocked = false;
350 int
351 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
352 			struct iovec *iov, int iovcnt,
353 			uint64_t offset_blocks, uint64_t num_blocks,
354 			spdk_bdev_io_completion_cb cb, void *cb_arg)
355 {
356 	cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg);
357 	return ut_spdk_bdev_writev_blocks;
358 }
359 
360 int ut_spdk_bdev_unmap_blocks = 0;
361 bool ut_spdk_bdev_unmap_blocks_mocked = false;
362 int
363 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
364 		       uint64_t offset_blocks, uint64_t num_blocks,
365 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
366 {
367 	cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg);
368 	return ut_spdk_bdev_unmap_blocks;
369 }
370 
371 int ut_spdk_bdev_flush_blocks = 0;
372 bool ut_spdk_bdev_flush_blocks_mocked = false;
373 int
374 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
375 		       uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
376 		       void *cb_arg)
377 {
378 	cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg);
379 	return ut_spdk_bdev_flush_blocks;
380 }
381 
382 int ut_spdk_bdev_reset = 0;
383 bool ut_spdk_bdev_reset_mocked = false;
384 int
385 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
386 		spdk_bdev_io_completion_cb cb, void *cb_arg)
387 {
388 	cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg);
389 	return ut_spdk_bdev_reset;
390 }
391 
392 bool g_completion_called = false;
393 void
394 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
395 {
396 	bdev_io->internal.status = status;
397 	g_completion_called = true;
398 }
399 
400 /* Global setup for all tests that share a bunch of preparation... */
401 static int
402 test_setup(void)
403 {
404 	int i, rc;
405 
406 	/* Prepare essential variables for test routines */
407 	g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct crypto_bdev_io));
408 	g_bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec) * 128);
409 	g_bdev_io->bdev = &g_crypto_bdev.crypto_bdev;
410 	g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct crypto_io_channel));
411 	g_crypto_ch = (struct crypto_io_channel *)spdk_io_channel_get_ctx(g_io_ch);
412 	g_io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
413 	memset(&g_device, 0, sizeof(struct vbdev_dev));
414 	memset(&g_crypto_bdev, 0, sizeof(struct vbdev_crypto));
415 	memset(&g_crypto_bdev_opts, 0, sizeof(struct vbdev_crypto_opts));
416 	g_dev_qp.device = &g_device;
417 	g_io_ctx->crypto_ch = g_crypto_ch;
418 	g_io_ctx->crypto_bdev = &g_crypto_bdev;
419 	g_io_ctx->crypto_bdev->qp_desc_nr = CRYPTO_QP_DESCRIPTORS;
420 	g_io_ctx->crypto_bdev->opts = &g_crypto_bdev_opts;
421 	g_crypto_ch->device_qp = &g_dev_qp;
422 	TAILQ_INIT(&g_crypto_ch->pending_cry_ios);
423 	TAILQ_INIT(&g_crypto_ch->queued_cry_ops);
424 
425 	/* Allocate a real mbuf pool so we can test error paths */
426 	g_mbuf_mp = rte_pktmbuf_pool_create("mbuf_mp", NUM_MBUFS,
427 					    (unsigned)SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
428 					    0, 0, SPDK_ENV_SOCKET_ID_ANY);
429 	/* Instead of allocating real rte mempools for these, it's easier and provides the
430 	 * same coverage just calloc them here.
431 	 */
432 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
433 		size_t size = IV_OFFSET + IV_LENGTH + QUEUED_OP_LENGTH;
434 		rc = posix_memalign((void **)&g_test_crypto_ops[i], 64, size);
435 		if (rc != 0) {
436 			assert(false);
437 		}
438 		memset(g_test_crypto_ops[i], 0, IV_OFFSET + QUEUED_OP_LENGTH);
439 	}
440 	g_mbuf_offset = DPDK_DYNFIELD_OFFSET;
441 
442 	return 0;
443 }
444 
445 /* Global teardown for all tests */
446 static int
447 test_cleanup(void)
448 {
449 	int i;
450 
451 	if (g_crypto_op_mp) {
452 		rte_mempool_free(g_crypto_op_mp);
453 		g_crypto_op_mp = NULL;
454 	}
455 	if (g_mbuf_mp) {
456 		rte_mempool_free(g_mbuf_mp);
457 		g_mbuf_mp = NULL;
458 	}
459 	if (g_session_mp) {
460 		rte_mempool_free(g_session_mp);
461 		g_session_mp = NULL;
462 	}
463 	if (g_session_mp_priv != NULL) {
464 		/* g_session_mp_priv may or may not be set depending on the DPDK version */
465 		rte_mempool_free(g_session_mp_priv);
466 		g_session_mp_priv = NULL;
467 	}
468 
469 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
470 		free(g_test_crypto_ops[i]);
471 	}
472 	free(g_bdev_io->u.bdev.iovs);
473 	free(g_bdev_io);
474 	free(g_io_ch);
475 	return 0;
476 }
477 
478 static void
479 test_error_paths(void)
480 {
481 	/* Single element block size write, just to test error paths
482 	 * in vbdev_crypto_submit_request().
483 	 */
484 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
485 	g_bdev_io->u.bdev.iovcnt = 1;
486 	g_bdev_io->u.bdev.num_blocks = 1;
487 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
488 	g_bdev_io->u.bdev.iovs[0].iov_base = (void *)0xDEADBEEF;
489 	g_crypto_bdev.crypto_bdev.blocklen = 512;
490 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
491 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
492 
493 	/* test failure of spdk_mempool_get_bulk(), will result in success because it
494 	 * will get queued.
495 	 */
496 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
497 	MOCK_SET(spdk_mempool_get, NULL);
498 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
499 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
500 
501 	/* same thing but switch to reads to test error path in _crypto_complete_io() */
502 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
503 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
504 	TAILQ_INSERT_TAIL(&g_crypto_ch->pending_cry_ios, g_bdev_io, module_link);
505 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
506 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
507 	/* Now with the read_blocks failing */
508 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
509 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
510 	MOCK_SET(spdk_bdev_readv_blocks, -1);
511 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
512 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
513 	MOCK_SET(spdk_bdev_readv_blocks, 0);
514 	MOCK_CLEAR(spdk_mempool_get);
515 
516 	/* test failure of rte_crypto_op_bulk_alloc() */
517 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
518 	ut_rte_crypto_op_bulk_alloc = 0;
519 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
520 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
521 	ut_rte_crypto_op_bulk_alloc = 1;
522 
523 	/* test failure of rte_crypto_op_attach_sym_session() */
524 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
525 	ut_rte_crypto_op_attach_sym_session = -1;
526 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
527 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
528 	ut_rte_crypto_op_attach_sym_session = 0;
529 }
530 
531 static void
532 test_simple_write(void)
533 {
534 	/* Single element block size write */
535 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
536 	g_bdev_io->u.bdev.iovcnt = 1;
537 	g_bdev_io->u.bdev.num_blocks = 1;
538 	g_bdev_io->u.bdev.offset_blocks = 0;
539 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
540 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_write;
541 	g_crypto_bdev.crypto_bdev.blocklen = 512;
542 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
543 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
544 
545 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
546 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
547 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
548 	CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == 512);
549 	CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
550 	CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
551 	CU_ASSERT(g_io_ctx->aux_num_blocks == 1);
552 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_write);
553 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
554 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
555 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
556 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
557 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
558 				     uint64_t *) == (uint64_t)g_bdev_io);
559 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr != NULL);
560 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == 512);
561 
562 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
563 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst);
564 }
565 
566 static void
567 test_simple_read(void)
568 {
569 	/* Single element block size read */
570 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
571 	g_bdev_io->u.bdev.iovcnt = 1;
572 	g_bdev_io->u.bdev.num_blocks = 1;
573 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
574 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_read;
575 	g_crypto_bdev.crypto_bdev.blocklen = 512;
576 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
577 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
578 
579 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
580 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
581 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
582 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_read);
583 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
584 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
585 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
586 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
587 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
588 				     uint64_t *) == (uint64_t)g_bdev_io);
589 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
590 
591 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
592 }
593 
594 static void
595 test_large_rw(void)
596 {
597 	unsigned block_len = 512;
598 	unsigned num_blocks = CRYPTO_MAX_IO / block_len;
599 	unsigned io_len = block_len * num_blocks;
600 	unsigned i;
601 
602 	/* Multi block size read, multi-element */
603 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
604 	g_bdev_io->u.bdev.iovcnt = 1;
605 	g_bdev_io->u.bdev.num_blocks = num_blocks;
606 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
607 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
608 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
609 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
610 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
611 
612 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
613 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
614 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
615 
616 	for (i = 0; i < num_blocks; i++) {
617 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
618 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
619 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
620 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
621 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
622 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
623 					     uint64_t *) == (uint64_t)g_bdev_io);
624 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
625 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
626 	}
627 
628 	/* Multi block size write, multi-element */
629 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
630 	g_bdev_io->u.bdev.iovcnt = 1;
631 	g_bdev_io->u.bdev.num_blocks = num_blocks;
632 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
633 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
634 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
635 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
636 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
637 
638 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
639 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
640 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
641 
642 	for (i = 0; i < num_blocks; i++) {
643 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
644 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
645 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
646 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
647 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
648 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
649 					     uint64_t *) == (uint64_t)g_bdev_io);
650 		CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == io_len);
651 		CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
652 		CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
653 		CU_ASSERT(g_io_ctx->aux_num_blocks == num_blocks);
654 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr != NULL);
655 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
656 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
657 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
658 	}
659 }
660 
661 static void
662 test_dev_full(void)
663 {
664 	struct vbdev_crypto_op *queued_op;
665 	struct rte_crypto_sym_op *sym_op;
666 	struct crypto_bdev_io *io_ctx;
667 
668 	/* Two element block size read */
669 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
670 	g_bdev_io->u.bdev.iovcnt = 1;
671 	g_bdev_io->u.bdev.num_blocks = 2;
672 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
673 	g_bdev_io->u.bdev.iovs[0].iov_base = (void *)0xDEADBEEF;
674 	g_bdev_io->u.bdev.iovs[1].iov_len = 512;
675 	g_bdev_io->u.bdev.iovs[1].iov_base = (void *)0xFEEDBEEF;
676 	g_crypto_bdev.crypto_bdev.blocklen = 512;
677 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
678 	g_enqueue_mock = g_dequeue_mock = 1;
679 	ut_rte_crypto_op_bulk_alloc = 2;
680 
681 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
682 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
683 
684 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
685 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
686 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 2);
687 	sym_op = g_test_crypto_ops[0]->sym;
688 	CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xDEADBEEF);
689 	CU_ASSERT(sym_op->m_src->data_len == 512);
690 	CU_ASSERT(sym_op->m_src->next == NULL);
691 	CU_ASSERT(sym_op->cipher.data.length == 512);
692 	CU_ASSERT(sym_op->cipher.data.offset == 0);
693 	CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)g_bdev_io);
694 	CU_ASSERT(sym_op->m_dst == NULL);
695 
696 	/* make sure one got queued and confirm its values */
697 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
698 	queued_op = TAILQ_FIRST(&g_crypto_ch->queued_cry_ops);
699 	sym_op = queued_op->crypto_op->sym;
700 	TAILQ_REMOVE(&g_crypto_ch->queued_cry_ops, queued_op, link);
701 	CU_ASSERT(queued_op->bdev_io == g_bdev_io);
702 	CU_ASSERT(queued_op->crypto_op == g_test_crypto_ops[1]);
703 	CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xFEEDBEEF);
704 	CU_ASSERT(sym_op->m_src->data_len == 512);
705 	CU_ASSERT(sym_op->m_src->next == NULL);
706 	CU_ASSERT(sym_op->cipher.data.length == 512);
707 	CU_ASSERT(sym_op->cipher.data.offset == 0);
708 	CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)g_bdev_io);
709 	CU_ASSERT(sym_op->m_dst == NULL);
710 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
711 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
712 	rte_pktmbuf_free(g_test_crypto_ops[1]->sym->m_src);
713 
714 	/* Non-busy reason for enqueue failure, all were rejected. */
715 	g_enqueue_mock = 0;
716 	g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_ERROR;
717 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
718 	io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
719 	CU_ASSERT(io_ctx->bdev_io_status == SPDK_BDEV_IO_STATUS_FAILED);
720 }
721 
722 static void
723 test_crazy_rw(void)
724 {
725 	unsigned block_len = 512;
726 	int num_blocks = 4;
727 	int i;
728 
729 	/* Multi block size read, single element, strange IOV makeup */
730 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
731 	g_bdev_io->u.bdev.iovcnt = 3;
732 	g_bdev_io->u.bdev.num_blocks = num_blocks;
733 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
734 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
735 	g_bdev_io->u.bdev.iovs[1].iov_len = 1024;
736 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 512;
737 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
738 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 512 + 1024;
739 
740 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
741 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
742 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
743 
744 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
745 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
746 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
747 
748 	for (i = 0; i < num_blocks; i++) {
749 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
750 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
751 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
752 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
753 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
754 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
755 					     uint64_t *) == (uint64_t)g_bdev_io);
756 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
757 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
758 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
759 	}
760 
761 	/* Multi block size write, single element strange IOV makeup */
762 	num_blocks = 8;
763 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
764 	g_bdev_io->u.bdev.iovcnt = 4;
765 	g_bdev_io->u.bdev.num_blocks = num_blocks;
766 	g_bdev_io->u.bdev.iovs[0].iov_len = 2048;
767 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
768 	g_bdev_io->u.bdev.iovs[1].iov_len = 512;
769 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 2048;
770 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
771 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 2048 + 512;
772 	g_bdev_io->u.bdev.iovs[3].iov_len = 1024;
773 	g_bdev_io->u.bdev.iovs[3].iov_base = &test_crazy_rw + 2048 + 512 + 512;
774 
775 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
776 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
777 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
778 
779 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
780 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
781 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
782 
783 	for (i = 0; i < num_blocks; i++) {
784 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
785 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
786 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
787 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
788 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
789 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
790 					     uint64_t *) == (uint64_t)g_bdev_io);
791 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
792 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == g_test_crypto_ops[i]->sym->m_dst);
793 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
794 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
795 	}
796 }
797 
798 static void
799 test_passthru(void)
800 {
801 	/* Make sure these follow our completion callback, test success & fail. */
802 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
803 	MOCK_SET(spdk_bdev_unmap_blocks, 0);
804 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
805 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
806 	MOCK_SET(spdk_bdev_unmap_blocks, -1);
807 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
808 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
809 	MOCK_CLEAR(spdk_bdev_unmap_blocks);
810 
811 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
812 	MOCK_SET(spdk_bdev_flush_blocks, 0);
813 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
814 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
815 	MOCK_SET(spdk_bdev_flush_blocks, -1);
816 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
817 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
818 	MOCK_CLEAR(spdk_bdev_flush_blocks);
819 
820 	/* We should never get a WZ command, we report that we don't support it. */
821 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
822 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
823 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
824 }
825 
826 static void
827 test_reset(void)
828 {
829 	/* TODO: There are a few different ways to do this given that
830 	 * the code uses spdk_for_each_channel() to implement reset
831 	 * handling. Submitting w/o UT for this function for now and
832 	 * will follow up with something shortly.
833 	 */
834 }
835 
836 static void
837 init_cleanup(void)
838 {
839 	if (g_crypto_op_mp) {
840 		rte_mempool_free(g_crypto_op_mp);
841 		g_crypto_op_mp = NULL;
842 	}
843 	if (g_mbuf_mp) {
844 		rte_mempool_free(g_mbuf_mp);
845 		g_mbuf_mp = NULL;
846 	}
847 	if (g_session_mp) {
848 		rte_mempool_free(g_session_mp);
849 		g_session_mp = NULL;
850 	}
851 	if (g_session_mp_priv != NULL) {
852 		/* g_session_mp_priv may or may not be set depending on the DPDK version */
853 		rte_mempool_free(g_session_mp_priv);
854 		g_session_mp_priv = NULL;
855 	}
856 }
857 
858 static void
859 test_initdrivers(void)
860 {
861 	int rc;
862 	static struct rte_mempool *orig_mbuf_mp;
863 	static struct rte_mempool *orig_session_mp;
864 	static struct rte_mempool *orig_session_mp_priv;
865 
866 	/* These tests will alloc and free our g_mbuf_mp
867 	 * so save that off here and restore it after each test is over.
868 	 */
869 	orig_mbuf_mp = g_mbuf_mp;
870 	orig_session_mp = g_session_mp;
871 	orig_session_mp_priv = g_session_mp_priv;
872 
873 	g_session_mp_priv = NULL;
874 	g_session_mp = NULL;
875 	g_mbuf_mp = NULL;
876 
877 	/* No drivers available, not an error though */
878 	MOCK_SET(rte_cryptodev_count, 0);
879 	rc = vbdev_crypto_init_crypto_drivers();
880 	CU_ASSERT(rc == 0);
881 	CU_ASSERT(g_mbuf_mp == NULL);
882 	CU_ASSERT(g_session_mp == NULL);
883 	CU_ASSERT(g_session_mp_priv == NULL);
884 
885 	/* Can't create session pool. */
886 	MOCK_SET(rte_cryptodev_count, 2);
887 	MOCK_SET(spdk_mempool_create, NULL);
888 	rc = vbdev_crypto_init_crypto_drivers();
889 	CU_ASSERT(rc == -ENOMEM);
890 	CU_ASSERT(g_mbuf_mp == NULL);
891 	CU_ASSERT(g_session_mp == NULL);
892 	CU_ASSERT(g_session_mp_priv == NULL);
893 	MOCK_CLEAR(spdk_mempool_create);
894 
895 	/* Can't create op pool. */
896 	MOCK_SET(rte_crypto_op_pool_create, NULL);
897 	rc = vbdev_crypto_init_crypto_drivers();
898 	CU_ASSERT(rc == -ENOMEM);
899 	CU_ASSERT(g_mbuf_mp == NULL);
900 	CU_ASSERT(g_session_mp == NULL);
901 	CU_ASSERT(g_session_mp_priv == NULL);
902 	MOCK_CLEAR(rte_crypto_op_pool_create);
903 
904 	/* Check resources are not sufficient */
905 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
906 	rc = vbdev_crypto_init_crypto_drivers();
907 	CU_ASSERT(rc == -EINVAL);
908 
909 	/* Test crypto dev configure failure. */
910 	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
911 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
912 	MOCK_SET(rte_cryptodev_configure, -1);
913 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
914 	rc = vbdev_crypto_init_crypto_drivers();
915 	MOCK_SET(rte_cryptodev_configure, 0);
916 	CU_ASSERT(g_mbuf_mp == NULL);
917 	CU_ASSERT(g_session_mp == NULL);
918 	CU_ASSERT(g_session_mp_priv == NULL);
919 	CU_ASSERT(rc == -EINVAL);
920 
921 	/* Test failure of qp setup. */
922 	MOCK_SET(rte_cryptodev_queue_pair_setup, -1);
923 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
924 	rc = vbdev_crypto_init_crypto_drivers();
925 	CU_ASSERT(rc == -EINVAL);
926 	CU_ASSERT(g_mbuf_mp == NULL);
927 	CU_ASSERT(g_session_mp == NULL);
928 	CU_ASSERT(g_session_mp_priv == NULL);
929 	MOCK_SET(rte_cryptodev_queue_pair_setup, 0);
930 
931 	/* Test failure of dev start. */
932 	MOCK_SET(rte_cryptodev_start, -1);
933 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
934 	rc = vbdev_crypto_init_crypto_drivers();
935 	CU_ASSERT(rc == -EINVAL);
936 	CU_ASSERT(g_mbuf_mp == NULL);
937 	CU_ASSERT(g_session_mp == NULL);
938 	CU_ASSERT(g_session_mp_priv == NULL);
939 	MOCK_SET(rte_cryptodev_start, 0);
940 
941 	/* Test bogus PMD */
942 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
943 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_BOGUS_PMD);
944 	rc = vbdev_crypto_init_crypto_drivers();
945 	CU_ASSERT(g_mbuf_mp == NULL);
946 	CU_ASSERT(g_session_mp == NULL);
947 	CU_ASSERT(rc == -EINVAL);
948 
949 	/* Test happy path QAT. */
950 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
951 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
952 	rc = vbdev_crypto_init_crypto_drivers();
953 	CU_ASSERT(g_mbuf_mp != NULL);
954 	CU_ASSERT(g_session_mp != NULL);
955 	init_cleanup();
956 	CU_ASSERT(rc == 0);
957 
958 	/* Test happy path AESNI. */
959 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
960 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
961 	rc = vbdev_crypto_init_crypto_drivers();
962 	CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET);
963 	init_cleanup();
964 	CU_ASSERT(rc == 0);
965 
966 	/* Test happy path MLX5. */
967 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
968 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_MLX5);
969 	rc = vbdev_crypto_init_crypto_drivers();
970 	CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET);
971 	init_cleanup();
972 	CU_ASSERT(rc == 0);
973 
974 	/* Test failure of DPDK dev init. By now it is not longer an error
975 	 * situation for entire crypto framework. */
976 	MOCK_SET(rte_cryptodev_count, 2);
977 	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
978 	MOCK_SET(rte_vdev_init, -1);
979 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
980 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
981 	rc = vbdev_crypto_init_crypto_drivers();
982 	CU_ASSERT(rc == 0);
983 	CU_ASSERT(g_mbuf_mp != NULL);
984 	CU_ASSERT(g_session_mp != NULL);
985 	CU_ASSERT(g_session_mp_priv != NULL);
986 	init_cleanup();
987 	MOCK_SET(rte_vdev_init, 0);
988 	MOCK_CLEAR(rte_cryptodev_device_count_by_driver);
989 
990 	/* restore our initial values. */
991 	g_mbuf_mp = orig_mbuf_mp;
992 	g_session_mp = orig_session_mp;
993 	g_session_mp_priv = orig_session_mp_priv;
994 }
995 
996 static void
997 test_crypto_op_complete(void)
998 {
999 	/* Make sure completion code respects failure. */
1000 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
1001 	g_completion_called = false;
1002 	_crypto_operation_complete(g_bdev_io);
1003 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1004 	CU_ASSERT(g_completion_called == true);
1005 
1006 	/* Test read completion. */
1007 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1008 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
1009 	g_completion_called = false;
1010 	_crypto_operation_complete(g_bdev_io);
1011 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1012 	CU_ASSERT(g_completion_called == true);
1013 
1014 	/* Test write completion success. */
1015 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1016 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
1017 	g_completion_called = false;
1018 	MOCK_SET(spdk_bdev_writev_blocks, 0);
1019 	_crypto_operation_complete(g_bdev_io);
1020 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1021 	CU_ASSERT(g_completion_called == true);
1022 
1023 	/* Test write completion failed. */
1024 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1025 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
1026 	g_completion_called = false;
1027 	MOCK_SET(spdk_bdev_writev_blocks, -1);
1028 	_crypto_operation_complete(g_bdev_io);
1029 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1030 	CU_ASSERT(g_completion_called == true);
1031 
1032 	/* Test bogus type for this completion. */
1033 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1034 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
1035 	g_completion_called = false;
1036 	_crypto_operation_complete(g_bdev_io);
1037 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1038 	CU_ASSERT(g_completion_called == true);
1039 }
1040 
1041 static void
1042 test_supported_io(void)
1043 {
1044 	void *ctx = NULL;
1045 	bool rc = true;
1046 
1047 	/* Make sure we always report false to WZ, we need the bdev layer to
1048 	 * send real 0's so we can encrypt/decrypt them.
1049 	 */
1050 	rc = vbdev_crypto_io_type_supported(ctx, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
1051 	CU_ASSERT(rc == false);
1052 }
1053 
1054 static void
1055 test_poller(void)
1056 {
1057 	int rc;
1058 	struct rte_mbuf *src_mbufs[2];
1059 	struct vbdev_crypto_op *op_to_resubmit;
1060 
1061 	/* test regular 1 op to dequeue and complete */
1062 	g_dequeue_mock = g_enqueue_mock = 1;
1063 	rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 1);
1064 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
1065 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1066 			   uint64_t *) = (uintptr_t)g_bdev_io;
1067 	g_test_crypto_ops[0]->sym->m_dst = NULL;
1068 	g_io_ctx->cryop_cnt_remaining = 1;
1069 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
1070 	rc = crypto_dev_poller(g_crypto_ch);
1071 	CU_ASSERT(rc == 1);
1072 
1073 	/* We have nothing dequeued but have some to resubmit */
1074 	g_dequeue_mock = 0;
1075 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
1076 
1077 	/* add an op to the queued list. */
1078 	g_resubmit_test = true;
1079 	op_to_resubmit = (struct vbdev_crypto_op *)((uint8_t *)g_test_crypto_ops[0] + QUEUED_OP_OFFSET);
1080 	op_to_resubmit->crypto_op = (void *)0xDEADBEEF;
1081 	op_to_resubmit->bdev_io = g_bdev_io;
1082 	TAILQ_INSERT_TAIL(&g_crypto_ch->queued_cry_ops,
1083 			  op_to_resubmit,
1084 			  link);
1085 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
1086 	rc = crypto_dev_poller(g_crypto_ch);
1087 	g_resubmit_test = false;
1088 	CU_ASSERT(rc == 0);
1089 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
1090 
1091 	/* 2 to dequeue but 2nd one failed */
1092 	g_dequeue_mock = g_enqueue_mock = 2;
1093 	g_io_ctx->cryop_cnt_remaining = 2;
1094 	rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 2);
1095 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
1096 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1097 			   uint64_t *) = (uint64_t)g_bdev_io;
1098 	g_test_crypto_ops[0]->sym->m_dst = NULL;
1099 	g_test_crypto_ops[0]->status =  RTE_CRYPTO_OP_STATUS_SUCCESS;
1100 	g_test_crypto_ops[1]->sym->m_src = src_mbufs[1];
1101 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[1]->sym->m_src, g_mbuf_offset,
1102 			   uint64_t *) = (uint64_t)g_bdev_io;
1103 	g_test_crypto_ops[1]->sym->m_dst = NULL;
1104 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1105 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1106 	rc = crypto_dev_poller(g_crypto_ch);
1107 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1108 	CU_ASSERT(rc == 2);
1109 }
1110 
1111 /* Helper function for test_assign_device_qp() */
1112 static void
1113 _clear_device_qp_lists(void)
1114 {
1115 	struct device_qp *device_qp = NULL;
1116 
1117 	while (!TAILQ_EMPTY(&g_device_qp_qat)) {
1118 		device_qp = TAILQ_FIRST(&g_device_qp_qat);
1119 		TAILQ_REMOVE(&g_device_qp_qat, device_qp, link);
1120 		free(device_qp);
1121 
1122 	}
1123 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_qat) == true);
1124 	while (!TAILQ_EMPTY(&g_device_qp_aesni_mb)) {
1125 		device_qp = TAILQ_FIRST(&g_device_qp_aesni_mb);
1126 		TAILQ_REMOVE(&g_device_qp_aesni_mb, device_qp, link);
1127 		free(device_qp);
1128 	}
1129 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_aesni_mb) == true);
1130 	while (!TAILQ_EMPTY(&g_device_qp_mlx5)) {
1131 		device_qp = TAILQ_FIRST(&g_device_qp_mlx5);
1132 		TAILQ_REMOVE(&g_device_qp_mlx5, device_qp, link);
1133 		free(device_qp);
1134 	}
1135 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_mlx5) == true);
1136 }
1137 
1138 /* Helper function for test_assign_device_qp() */
1139 static void
1140 _check_expected_values(struct vbdev_crypto *crypto_bdev, struct device_qp *device_qp,
1141 		       struct crypto_io_channel *crypto_ch, uint8_t expected_index,
1142 		       uint8_t current_index)
1143 {
1144 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1145 	CU_ASSERT(g_crypto_ch->device_qp->index == expected_index);
1146 	CU_ASSERT(g_next_qat_index == current_index);
1147 }
1148 
1149 static void
1150 test_assign_device_qp(void)
1151 {
1152 	struct device_qp *device_qp = NULL;
1153 	int i;
1154 
1155 	/* start with a known state, clear the device/qp lists */
1156 	_clear_device_qp_lists();
1157 
1158 	/* make sure that one AESNI_MB qp is found */
1159 	device_qp = calloc(1, sizeof(struct device_qp));
1160 	TAILQ_INSERT_TAIL(&g_device_qp_aesni_mb, device_qp, link);
1161 	g_crypto_ch->device_qp = NULL;
1162 	g_crypto_bdev.opts->drv_name = AESNI_MB;
1163 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1164 	CU_ASSERT(g_crypto_ch->device_qp != NULL);
1165 
1166 	/* QAT testing is more complex as the code under test load balances by
1167 	 * assigning each subsequent device/qp to every QAT_VF_SPREAD modulo
1168 	 * g_qat_total_qp. For the current latest QAT we'll have 48 virtual functions
1169 	 * each with 2 qp so the "spread" between assignments is 32.
1170 	 */
1171 	g_qat_total_qp = 96;
1172 	for (i = 0; i < g_qat_total_qp; i++) {
1173 		device_qp = calloc(1, sizeof(struct device_qp));
1174 		device_qp->index = i;
1175 		TAILQ_INSERT_TAIL(&g_device_qp_qat, device_qp, link);
1176 	}
1177 	g_crypto_ch->device_qp = NULL;
1178 	g_crypto_bdev.opts->drv_name = QAT;
1179 
1180 	/* First assignment will assign to 0 and next at 32. */
1181 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1182 			       0, QAT_VF_SPREAD);
1183 
1184 	/* Second assignment will assign to 32 and next at 64. */
1185 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1186 			       QAT_VF_SPREAD, QAT_VF_SPREAD * 2);
1187 
1188 	/* Third assignment will assign to 64 and next at 0. */
1189 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1190 			       QAT_VF_SPREAD * 2, 0);
1191 
1192 	/* Fourth assignment will assign to 1 and next at 33. */
1193 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1194 			       1, QAT_VF_SPREAD + 1);
1195 
1196 	/* make sure that one MLX5 qp is found */
1197 	device_qp = calloc(1, sizeof(struct device_qp));
1198 	TAILQ_INSERT_TAIL(&g_device_qp_mlx5, device_qp, link);
1199 	g_crypto_ch->device_qp = NULL;
1200 	g_crypto_bdev.opts->drv_name = MLX5;
1201 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1202 	CU_ASSERT(g_crypto_ch->device_qp == device_qp);
1203 
1204 	_clear_device_qp_lists();
1205 }
1206 
1207 int
1208 main(int argc, char **argv)
1209 {
1210 	CU_pSuite	suite = NULL;
1211 	unsigned int	num_failures;
1212 
1213 	CU_set_error_action(CUEA_ABORT);
1214 	CU_initialize_registry();
1215 
1216 	suite = CU_add_suite("crypto", test_setup, test_cleanup);
1217 	CU_ADD_TEST(suite, test_error_paths);
1218 	CU_ADD_TEST(suite, test_simple_write);
1219 	CU_ADD_TEST(suite, test_simple_read);
1220 	CU_ADD_TEST(suite, test_large_rw);
1221 	CU_ADD_TEST(suite, test_dev_full);
1222 	CU_ADD_TEST(suite, test_crazy_rw);
1223 	CU_ADD_TEST(suite, test_passthru);
1224 	CU_ADD_TEST(suite, test_initdrivers);
1225 	CU_ADD_TEST(suite, test_crypto_op_complete);
1226 	CU_ADD_TEST(suite, test_supported_io);
1227 	CU_ADD_TEST(suite, test_reset);
1228 	CU_ADD_TEST(suite, test_poller);
1229 	CU_ADD_TEST(suite, test_assign_device_qp);
1230 
1231 	CU_basic_set_mode(CU_BRM_VERBOSE);
1232 	CU_basic_run_tests();
1233 	num_failures = CU_get_number_of_failures();
1234 	CU_cleanup_registry();
1235 	return num_failures;
1236 }
1237