xref: /spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c (revision 488570ebd418ba07c9e69e65106dcc964f3bb41b)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk_cunit.h"
7 
8 #include "common/lib/test_env.c"
9 #include "spdk_internal/mock.h"
10 #include "thread/thread_internal.h"
11 #include "unit/lib/json_mock.c"
12 
13 #include <rte_crypto.h>
14 #include <rte_cryptodev.h>
15 
16 #define MAX_TEST_BLOCKS 8192
17 struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS];
18 struct rte_crypto_op *g_test_dev_full_ops[MAX_TEST_BLOCKS];
19 
20 uint16_t g_dequeue_mock;
21 uint16_t g_enqueue_mock;
22 unsigned ut_rte_crypto_op_bulk_alloc;
23 int ut_rte_crypto_op_attach_sym_session = 0;
24 #define MOCK_INFO_GET_1QP_AESNI 0
25 #define MOCK_INFO_GET_1QP_QAT 1
26 #define MOCK_INFO_GET_1QP_MLX5 2
27 #define MOCK_INFO_GET_1QP_BOGUS_PMD 3
28 int ut_rte_cryptodev_info_get = 0;
29 bool ut_rte_cryptodev_info_get_mocked = false;
30 
31 void mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt);
32 #define rte_pktmbuf_free_bulk mock_rte_pktmbuf_free_bulk
33 void mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt)
34 {
35 	spdk_mempool_put_bulk((struct spdk_mempool *)m[0]->pool, (void **)m, cnt);
36 }
37 
38 void mock_rte_pktmbuf_free(struct rte_mbuf *m);
39 #define rte_pktmbuf_free mock_rte_pktmbuf_free
40 void mock_rte_pktmbuf_free(struct rte_mbuf *m)
41 {
42 	spdk_mempool_put((struct spdk_mempool *)m->pool, (void *)m);
43 }
44 
45 void rte_mempool_free(struct rte_mempool *mp)
46 {
47 	spdk_mempool_free((struct spdk_mempool *)mp);
48 }
49 
50 int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
51 				unsigned count);
52 #define rte_pktmbuf_alloc_bulk mock_rte_pktmbuf_alloc_bulk
53 int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
54 				unsigned count)
55 {
56 	int rc;
57 
58 	rc = spdk_mempool_get_bulk((struct spdk_mempool *)pool, (void **)mbufs, count);
59 	if (rc) {
60 		return rc;
61 	}
62 	for (unsigned i = 0; i < count; i++) {
63 		rte_pktmbuf_reset(mbufs[i]);
64 		mbufs[i]->pool = pool;
65 	}
66 	return rc;
67 }
68 
69 struct rte_mempool *
70 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
71 				      uint32_t elt_size, uint32_t cache_size,
72 				      uint16_t priv_size, int socket_id)
73 {
74 	struct spdk_mempool *tmp;
75 
76 	tmp = spdk_mempool_create(name, nb_elts, elt_size + priv_size,
77 				  cache_size, socket_id);
78 
79 	return (struct rte_mempool *)tmp;
80 
81 }
82 
83 struct rte_mempool *
84 rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size,
85 			uint16_t priv_size, uint16_t data_room_size, int socket_id)
86 {
87 	struct spdk_mempool *tmp;
88 
89 	tmp = spdk_mempool_create(name, n, sizeof(struct rte_mbuf) + priv_size,
90 				  cache_size, socket_id);
91 
92 	return (struct rte_mempool *)tmp;
93 }
94 
95 struct rte_mempool *
96 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
97 		   unsigned cache_size, unsigned private_data_size,
98 		   rte_mempool_ctor_t *mp_init, void *mp_init_arg,
99 		   rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
100 		   int socket_id, unsigned flags)
101 {
102 	struct spdk_mempool *tmp;
103 
104 	tmp = spdk_mempool_create(name, n, elt_size + private_data_size,
105 				  cache_size, socket_id);
106 
107 	return (struct rte_mempool *)tmp;
108 }
109 
110 DEFINE_RETURN_MOCK(rte_crypto_op_pool_create, struct rte_mempool *);
111 struct rte_mempool *
112 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
113 			  unsigned nb_elts, unsigned cache_size,
114 			  uint16_t priv_size, int socket_id)
115 {
116 	struct spdk_mempool *tmp;
117 
118 	HANDLE_RETURN_MOCK(rte_crypto_op_pool_create);
119 
120 	tmp = spdk_mempool_create(name, nb_elts,
121 				  sizeof(struct rte_crypto_op) + priv_size,
122 				  cache_size, socket_id);
123 
124 	return (struct rte_mempool *)tmp;
125 
126 }
127 
128 /* Those functions are defined as static inline in DPDK, so we can't
129  * mock them straight away. We use defines to redirect them into
130  * our custom functions.
131  */
132 static bool g_resubmit_test = false;
133 #define rte_cryptodev_enqueue_burst mock_rte_cryptodev_enqueue_burst
134 static inline uint16_t
135 mock_rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
136 				 struct rte_crypto_op **ops, uint16_t nb_ops)
137 {
138 	int i;
139 
140 	CU_ASSERT(nb_ops > 0);
141 
142 	for (i = 0; i < nb_ops; i++) {
143 		/* Use this empty (til now) array of pointers to store
144 		 * enqueued operations for assertion in dev_full test.
145 		 */
146 		g_test_dev_full_ops[i] = *ops++;
147 		if (g_resubmit_test == true) {
148 			CU_ASSERT(g_test_dev_full_ops[i] == (void *)0xDEADBEEF);
149 		}
150 	}
151 
152 	return g_enqueue_mock;
153 }
154 
155 #define rte_cryptodev_dequeue_burst mock_rte_cryptodev_dequeue_burst
156 static inline uint16_t
157 mock_rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
158 				 struct rte_crypto_op **ops, uint16_t nb_ops)
159 {
160 	int i;
161 
162 	CU_ASSERT(nb_ops > 0);
163 
164 	for (i = 0; i < g_dequeue_mock; i++) {
165 		*ops++ = g_test_crypto_ops[i];
166 	}
167 
168 	return g_dequeue_mock;
169 }
170 
171 /* Instead of allocating real memory, assign the allocations to our
172  * test array for assertion in tests.
173  */
174 #define rte_crypto_op_bulk_alloc mock_rte_crypto_op_bulk_alloc
175 static inline unsigned
176 mock_rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
177 			      enum rte_crypto_op_type type,
178 			      struct rte_crypto_op **ops, uint16_t nb_ops)
179 {
180 	int i;
181 
182 	for (i = 0; i < nb_ops; i++) {
183 		*ops++ = g_test_crypto_ops[i];
184 	}
185 	return ut_rte_crypto_op_bulk_alloc;
186 }
187 
188 #define rte_mempool_put_bulk mock_rte_mempool_put_bulk
189 static __rte_always_inline void
190 mock_rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table,
191 			  unsigned int n)
192 {
193 	return;
194 }
195 
196 #define rte_crypto_op_attach_sym_session mock_rte_crypto_op_attach_sym_session
197 static inline int
198 mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
199 				      struct rte_cryptodev_sym_session *sess)
200 {
201 	return ut_rte_crypto_op_attach_sym_session;
202 }
203 
204 #define rte_lcore_count mock_rte_lcore_count
205 static inline unsigned
206 mock_rte_lcore_count(void)
207 {
208 	return 1;
209 }
210 
211 #include "bdev/crypto/vbdev_crypto.c"
212 
213 /* SPDK stubs */
214 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
215 		struct spdk_bdev_io_wait_entry *entry), 0);
216 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
217 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io));
218 DEFINE_STUB_V(spdk_bdev_io_put_aux_buf, (struct spdk_bdev_io *bdev_io, void *aux_buf));
219 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
220 		enum spdk_bdev_io_type io_type), 0);
221 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
222 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
223 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0);
224 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 64);
225 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
226 DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn,
227 				     void *cb_arg));
228 DEFINE_STUB(spdk_bdev_unregister_by_name, int, (const char *bdev_name,
229 		struct spdk_bdev_module *module,
230 		spdk_bdev_unregister_cb cb_fn, void *cb_arg), 0);
231 DEFINE_STUB(spdk_bdev_open_ext, int, (const char *bdev_name, bool write,
232 				      spdk_bdev_event_cb_t event_cb,
233 				      void *event_ctx, struct spdk_bdev_desc **_desc), 0);
234 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
235 DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
236 		struct spdk_bdev_module *module), 0);
237 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
238 DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *vbdev), 0);
239 
240 /* DPDK stubs */
241 #define DPDK_DYNFIELD_OFFSET offsetof(struct rte_mbuf, dynfield1[1])
242 DEFINE_STUB(rte_mbuf_dynfield_register, int, (const struct rte_mbuf_dynfield *params),
243 	    DPDK_DYNFIELD_OFFSET);
244 DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0);
245 DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
246 DEFINE_STUB(rte_cryptodev_device_count_by_driver, uint8_t, (uint8_t driver_id), 0);
247 DEFINE_STUB(rte_cryptodev_configure, int, (uint8_t dev_id, struct rte_cryptodev_config *config), 0);
248 DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
249 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id), 0);
250 DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0);
251 DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id));
252 DEFINE_STUB(rte_cryptodev_close, int, (uint8_t dev_id), 0);
253 DEFINE_STUB(rte_cryptodev_sym_session_create, struct rte_cryptodev_sym_session *,
254 	    (struct rte_mempool *mempool), (struct rte_cryptodev_sym_session *)1);
255 DEFINE_STUB(rte_cryptodev_sym_session_init, int, (uint8_t dev_id,
256 		struct rte_cryptodev_sym_session *sess,
257 		struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), 0);
258 DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
259 DEFINE_STUB(rte_cryptodev_sym_session_free, int, (struct rte_cryptodev_sym_session *sess), 0);
260 DEFINE_STUB(rte_vdev_uninit, int, (const char *name), 0);
261 
262 struct rte_cryptodev *rte_cryptodevs;
263 
264 /* global vars and setup/cleanup functions used for all test functions */
265 struct spdk_bdev_io *g_bdev_io;
266 struct crypto_bdev_io *g_io_ctx;
267 struct crypto_io_channel *g_crypto_ch;
268 struct spdk_io_channel *g_io_ch;
269 struct vbdev_dev g_device;
270 struct vbdev_crypto g_crypto_bdev;
271 struct vbdev_crypto_opts g_crypto_bdev_opts;
272 struct device_qp g_dev_qp;
273 
274 void
275 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
276 {
277 	dev_info->max_nb_queue_pairs = 1;
278 	if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_AESNI) {
279 		dev_info->driver_name = g_driver_names[0];
280 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_QAT) {
281 		dev_info->driver_name = g_driver_names[1];
282 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_MLX5) {
283 		dev_info->driver_name = g_driver_names[2];
284 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_BOGUS_PMD) {
285 		dev_info->driver_name = "junk";
286 	}
287 }
288 
289 unsigned int
290 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
291 {
292 	return (unsigned int)dev_id;
293 }
294 
295 void
296 spdk_bdev_io_get_aux_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_aux_buf_cb cb)
297 {
298 	cb(g_io_ch, g_bdev_io, (void *)0xDEADBEEF);
299 }
300 
301 void
302 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
303 {
304 	cb(g_io_ch, g_bdev_io, true);
305 }
306 
307 /* Mock these functions to call the callback and then return the value we require */
308 int ut_spdk_bdev_readv_blocks = 0;
309 bool ut_spdk_bdev_readv_blocks_mocked = false;
310 int
311 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
312 		       struct iovec *iov, int iovcnt,
313 		       uint64_t offset_blocks, uint64_t num_blocks,
314 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
315 {
316 	cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg);
317 	return ut_spdk_bdev_readv_blocks;
318 }
319 
320 int ut_spdk_bdev_writev_blocks = 0;
321 bool ut_spdk_bdev_writev_blocks_mocked = false;
322 int
323 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
324 			struct iovec *iov, int iovcnt,
325 			uint64_t offset_blocks, uint64_t num_blocks,
326 			spdk_bdev_io_completion_cb cb, void *cb_arg)
327 {
328 	cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg);
329 	return ut_spdk_bdev_writev_blocks;
330 }
331 
332 int ut_spdk_bdev_unmap_blocks = 0;
333 bool ut_spdk_bdev_unmap_blocks_mocked = false;
334 int
335 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
336 		       uint64_t offset_blocks, uint64_t num_blocks,
337 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
338 {
339 	cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg);
340 	return ut_spdk_bdev_unmap_blocks;
341 }
342 
343 int ut_spdk_bdev_flush_blocks = 0;
344 bool ut_spdk_bdev_flush_blocks_mocked = false;
345 int
346 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
347 		       uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
348 		       void *cb_arg)
349 {
350 	cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg);
351 	return ut_spdk_bdev_flush_blocks;
352 }
353 
354 int ut_spdk_bdev_reset = 0;
355 bool ut_spdk_bdev_reset_mocked = false;
356 int
357 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
358 		spdk_bdev_io_completion_cb cb, void *cb_arg)
359 {
360 	cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg);
361 	return ut_spdk_bdev_reset;
362 }
363 
364 bool g_completion_called = false;
365 void
366 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
367 {
368 	bdev_io->internal.status = status;
369 	g_completion_called = true;
370 }
371 
372 /* Global setup for all tests that share a bunch of preparation... */
373 static int
374 test_setup(void)
375 {
376 	int i, rc;
377 
378 	/* Prepare essential variables for test routines */
379 	g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct crypto_bdev_io));
380 	g_bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec) * 128);
381 	g_bdev_io->bdev = &g_crypto_bdev.crypto_bdev;
382 	g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct crypto_io_channel));
383 	g_crypto_ch = (struct crypto_io_channel *)spdk_io_channel_get_ctx(g_io_ch);
384 	g_io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
385 	memset(&g_device, 0, sizeof(struct vbdev_dev));
386 	memset(&g_crypto_bdev, 0, sizeof(struct vbdev_crypto));
387 	memset(&g_crypto_bdev_opts, 0, sizeof(struct vbdev_crypto_opts));
388 	g_dev_qp.device = &g_device;
389 	g_io_ctx->crypto_ch = g_crypto_ch;
390 	g_io_ctx->crypto_bdev = &g_crypto_bdev;
391 	g_io_ctx->crypto_bdev->qp_desc_nr = CRYPTO_QP_DESCRIPTORS;
392 	g_io_ctx->crypto_bdev->opts = &g_crypto_bdev_opts;
393 	g_crypto_ch->device_qp = &g_dev_qp;
394 	TAILQ_INIT(&g_crypto_ch->pending_cry_ios);
395 	TAILQ_INIT(&g_crypto_ch->queued_cry_ops);
396 
397 	/* Allocate a real mbuf pool so we can test error paths */
398 	g_mbuf_mp = rte_pktmbuf_pool_create("mbuf_mp", NUM_MBUFS,
399 					    (unsigned)SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
400 					    0, 0, SPDK_ENV_SOCKET_ID_ANY);
401 	/* Instead of allocating real rte mempools for these, it's easier and provides the
402 	 * same coverage just calloc them here.
403 	 */
404 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
405 		size_t size = IV_OFFSET + IV_LENGTH + QUEUED_OP_LENGTH;
406 		rc = posix_memalign((void **)&g_test_crypto_ops[i], 64, size);
407 		if (rc != 0) {
408 			assert(false);
409 		}
410 		memset(g_test_crypto_ops[i], 0, IV_OFFSET + QUEUED_OP_LENGTH);
411 	}
412 	g_mbuf_offset = DPDK_DYNFIELD_OFFSET;
413 
414 	return 0;
415 }
416 
417 /* Global teardown for all tests */
418 static int
419 test_cleanup(void)
420 {
421 	int i;
422 
423 	if (g_crypto_op_mp) {
424 		rte_mempool_free(g_crypto_op_mp);
425 		g_crypto_op_mp = NULL;
426 	}
427 	if (g_mbuf_mp) {
428 		rte_mempool_free(g_mbuf_mp);
429 		g_mbuf_mp = NULL;
430 	}
431 	if (g_session_mp) {
432 		rte_mempool_free(g_session_mp);
433 		g_session_mp = NULL;
434 	}
435 	if (g_session_mp_priv != NULL) {
436 		/* g_session_mp_priv may or may not be set depending on the DPDK version */
437 		rte_mempool_free(g_session_mp_priv);
438 		g_session_mp_priv = NULL;
439 	}
440 
441 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
442 		free(g_test_crypto_ops[i]);
443 	}
444 	free(g_bdev_io->u.bdev.iovs);
445 	free(g_bdev_io);
446 	free(g_io_ch);
447 	return 0;
448 }
449 
450 static void
451 test_error_paths(void)
452 {
453 	/* Single element block size write, just to test error paths
454 	 * in vbdev_crypto_submit_request().
455 	 */
456 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
457 	g_bdev_io->u.bdev.iovcnt = 1;
458 	g_bdev_io->u.bdev.num_blocks = 1;
459 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
460 	g_bdev_io->u.bdev.iovs[0].iov_base = (void *)0xDEADBEEF;
461 	g_crypto_bdev.crypto_bdev.blocklen = 512;
462 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
463 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
464 
465 	/* test failure of spdk_mempool_get_bulk(), will result in success because it
466 	 * will get queued.
467 	 */
468 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
469 	MOCK_SET(spdk_mempool_get, NULL);
470 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
471 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
472 
473 	/* same thing but switch to reads to test error path in _crypto_complete_io() */
474 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
475 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
476 	TAILQ_INSERT_TAIL(&g_crypto_ch->pending_cry_ios, g_bdev_io, module_link);
477 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
478 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
479 	/* Now with the read_blocks failing */
480 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
481 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
482 	MOCK_SET(spdk_bdev_readv_blocks, -1);
483 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
484 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
485 	MOCK_SET(spdk_bdev_readv_blocks, 0);
486 	MOCK_CLEAR(spdk_mempool_get);
487 
488 	/* test failure of rte_crypto_op_bulk_alloc() */
489 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
490 	ut_rte_crypto_op_bulk_alloc = 0;
491 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
492 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
493 	ut_rte_crypto_op_bulk_alloc = 1;
494 
495 	/* test failure of rte_crypto_op_attach_sym_session() */
496 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
497 	ut_rte_crypto_op_attach_sym_session = -1;
498 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
499 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
500 	ut_rte_crypto_op_attach_sym_session = 0;
501 }
502 
503 static void
504 test_simple_write(void)
505 {
506 	/* Single element block size write */
507 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
508 	g_bdev_io->u.bdev.iovcnt = 1;
509 	g_bdev_io->u.bdev.num_blocks = 1;
510 	g_bdev_io->u.bdev.offset_blocks = 0;
511 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
512 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_write;
513 	g_crypto_bdev.crypto_bdev.blocklen = 512;
514 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
515 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
516 
517 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
518 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
519 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
520 	CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == 512);
521 	CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
522 	CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
523 	CU_ASSERT(g_io_ctx->aux_num_blocks == 1);
524 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_write);
525 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
526 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
527 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
528 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
529 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
530 				     uint64_t *) == (uint64_t)g_bdev_io);
531 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr != NULL);
532 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == 512);
533 
534 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
535 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst);
536 }
537 
538 static void
539 test_simple_read(void)
540 {
541 	/* Single element block size read */
542 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
543 	g_bdev_io->u.bdev.iovcnt = 1;
544 	g_bdev_io->u.bdev.num_blocks = 1;
545 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
546 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_read;
547 	g_crypto_bdev.crypto_bdev.blocklen = 512;
548 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
549 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
550 
551 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
552 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
553 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
554 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_read);
555 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
556 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
557 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
558 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
559 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
560 				     uint64_t *) == (uint64_t)g_bdev_io);
561 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
562 
563 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
564 }
565 
566 static void
567 test_large_rw(void)
568 {
569 	unsigned block_len = 512;
570 	unsigned num_blocks = CRYPTO_MAX_IO / block_len;
571 	unsigned io_len = block_len * num_blocks;
572 	unsigned i;
573 
574 	/* Multi block size read, multi-element */
575 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
576 	g_bdev_io->u.bdev.iovcnt = 1;
577 	g_bdev_io->u.bdev.num_blocks = num_blocks;
578 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
579 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
580 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
581 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
582 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
583 
584 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
585 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
586 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
587 
588 	for (i = 0; i < num_blocks; i++) {
589 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
590 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
591 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
592 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
593 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
594 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
595 					     uint64_t *) == (uint64_t)g_bdev_io);
596 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
597 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
598 	}
599 
600 	/* Multi block size write, multi-element */
601 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
602 	g_bdev_io->u.bdev.iovcnt = 1;
603 	g_bdev_io->u.bdev.num_blocks = num_blocks;
604 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
605 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
606 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
607 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
608 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
609 
610 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
611 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
612 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
613 
614 	for (i = 0; i < num_blocks; i++) {
615 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
616 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
617 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
618 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
619 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
620 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
621 					     uint64_t *) == (uint64_t)g_bdev_io);
622 		CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == io_len);
623 		CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
624 		CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
625 		CU_ASSERT(g_io_ctx->aux_num_blocks == num_blocks);
626 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr != NULL);
627 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
628 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
629 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
630 	}
631 }
632 
633 static void
634 test_dev_full(void)
635 {
636 	struct vbdev_crypto_op *queued_op;
637 	struct rte_crypto_sym_op *sym_op;
638 	struct crypto_bdev_io *io_ctx;
639 
640 	/* Two element block size read */
641 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
642 	g_bdev_io->u.bdev.iovcnt = 1;
643 	g_bdev_io->u.bdev.num_blocks = 2;
644 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
645 	g_bdev_io->u.bdev.iovs[0].iov_base = (void *)0xDEADBEEF;
646 	g_bdev_io->u.bdev.iovs[1].iov_len = 512;
647 	g_bdev_io->u.bdev.iovs[1].iov_base = (void *)0xFEEDBEEF;
648 	g_crypto_bdev.crypto_bdev.blocklen = 512;
649 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
650 	g_enqueue_mock = g_dequeue_mock = 1;
651 	ut_rte_crypto_op_bulk_alloc = 2;
652 
653 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
654 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
655 
656 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
657 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
658 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 2);
659 	sym_op = g_test_crypto_ops[0]->sym;
660 	CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xDEADBEEF);
661 	CU_ASSERT(sym_op->m_src->data_len == 512);
662 	CU_ASSERT(sym_op->m_src->next == NULL);
663 	CU_ASSERT(sym_op->cipher.data.length == 512);
664 	CU_ASSERT(sym_op->cipher.data.offset == 0);
665 	CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)g_bdev_io);
666 	CU_ASSERT(sym_op->m_dst == NULL);
667 
668 	/* make sure one got queued and confirm its values */
669 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
670 	queued_op = TAILQ_FIRST(&g_crypto_ch->queued_cry_ops);
671 	sym_op = queued_op->crypto_op->sym;
672 	TAILQ_REMOVE(&g_crypto_ch->queued_cry_ops, queued_op, link);
673 	CU_ASSERT(queued_op->bdev_io == g_bdev_io);
674 	CU_ASSERT(queued_op->crypto_op == g_test_crypto_ops[1]);
675 	CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xFEEDBEEF);
676 	CU_ASSERT(sym_op->m_src->data_len == 512);
677 	CU_ASSERT(sym_op->m_src->next == NULL);
678 	CU_ASSERT(sym_op->cipher.data.length == 512);
679 	CU_ASSERT(sym_op->cipher.data.offset == 0);
680 	CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)g_bdev_io);
681 	CU_ASSERT(sym_op->m_dst == NULL);
682 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
683 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
684 	rte_pktmbuf_free(g_test_crypto_ops[1]->sym->m_src);
685 
686 	/* Non-busy reason for enqueue failure, all were rejected. */
687 	g_enqueue_mock = 0;
688 	g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_ERROR;
689 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
690 	io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
691 	CU_ASSERT(io_ctx->bdev_io_status == SPDK_BDEV_IO_STATUS_FAILED);
692 }
693 
694 static void
695 test_crazy_rw(void)
696 {
697 	unsigned block_len = 512;
698 	int num_blocks = 4;
699 	int i;
700 
701 	/* Multi block size read, single element, strange IOV makeup */
702 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
703 	g_bdev_io->u.bdev.iovcnt = 3;
704 	g_bdev_io->u.bdev.num_blocks = num_blocks;
705 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
706 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
707 	g_bdev_io->u.bdev.iovs[1].iov_len = 1024;
708 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 512;
709 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
710 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 512 + 1024;
711 
712 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
713 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
714 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
715 
716 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
717 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
718 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
719 
720 	for (i = 0; i < num_blocks; i++) {
721 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
722 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
723 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
724 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
725 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
726 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
727 					     uint64_t *) == (uint64_t)g_bdev_io);
728 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
729 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
730 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
731 	}
732 
733 	/* Multi block size write, single element strange IOV makeup */
734 	num_blocks = 8;
735 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
736 	g_bdev_io->u.bdev.iovcnt = 4;
737 	g_bdev_io->u.bdev.num_blocks = num_blocks;
738 	g_bdev_io->u.bdev.iovs[0].iov_len = 2048;
739 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
740 	g_bdev_io->u.bdev.iovs[1].iov_len = 512;
741 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 2048;
742 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
743 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 2048 + 512;
744 	g_bdev_io->u.bdev.iovs[3].iov_len = 1024;
745 	g_bdev_io->u.bdev.iovs[3].iov_base = &test_crazy_rw + 2048 + 512 + 512;
746 
747 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
748 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
749 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
750 
751 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
752 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
753 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
754 
755 	for (i = 0; i < num_blocks; i++) {
756 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
757 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
758 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
759 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
760 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
761 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
762 					     uint64_t *) == (uint64_t)g_bdev_io);
763 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
764 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == g_test_crypto_ops[i]->sym->m_dst);
765 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
766 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
767 	}
768 }
769 
770 static void
771 test_passthru(void)
772 {
773 	/* Make sure these follow our completion callback, test success & fail. */
774 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
775 	MOCK_SET(spdk_bdev_unmap_blocks, 0);
776 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
777 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
778 	MOCK_SET(spdk_bdev_unmap_blocks, -1);
779 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
780 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
781 	MOCK_CLEAR(spdk_bdev_unmap_blocks);
782 
783 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
784 	MOCK_SET(spdk_bdev_flush_blocks, 0);
785 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
786 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
787 	MOCK_SET(spdk_bdev_flush_blocks, -1);
788 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
789 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
790 	MOCK_CLEAR(spdk_bdev_flush_blocks);
791 
792 	/* We should never get a WZ command, we report that we don't support it. */
793 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
794 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
795 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
796 }
797 
798 static void
799 test_reset(void)
800 {
801 	/* TODO: There are a few different ways to do this given that
802 	 * the code uses spdk_for_each_channel() to implement reset
803 	 * handling. Submitting w/o UT for this function for now and
804 	 * will follow up with something shortly.
805 	 */
806 }
807 
808 static void
809 init_cleanup(void)
810 {
811 	if (g_crypto_op_mp) {
812 		rte_mempool_free(g_crypto_op_mp);
813 		g_crypto_op_mp = NULL;
814 	}
815 	if (g_mbuf_mp) {
816 		rte_mempool_free(g_mbuf_mp);
817 		g_mbuf_mp = NULL;
818 	}
819 	if (g_session_mp) {
820 		rte_mempool_free(g_session_mp);
821 		g_session_mp = NULL;
822 	}
823 	if (g_session_mp_priv != NULL) {
824 		/* g_session_mp_priv may or may not be set depending on the DPDK version */
825 		rte_mempool_free(g_session_mp_priv);
826 		g_session_mp_priv = NULL;
827 	}
828 }
829 
830 static void
831 test_initdrivers(void)
832 {
833 	int rc;
834 	static struct rte_mempool *orig_mbuf_mp;
835 	static struct rte_mempool *orig_session_mp;
836 	static struct rte_mempool *orig_session_mp_priv;
837 
838 	/* These tests will alloc and free our g_mbuf_mp
839 	 * so save that off here and restore it after each test is over.
840 	 */
841 	orig_mbuf_mp = g_mbuf_mp;
842 	orig_session_mp = g_session_mp;
843 	orig_session_mp_priv = g_session_mp_priv;
844 
845 	g_session_mp_priv = NULL;
846 	g_session_mp = NULL;
847 	g_mbuf_mp = NULL;
848 
849 	/* No drivers available, not an error though */
850 	MOCK_SET(rte_cryptodev_count, 0);
851 	rc = vbdev_crypto_init_crypto_drivers();
852 	CU_ASSERT(rc == 0);
853 	CU_ASSERT(g_mbuf_mp == NULL);
854 	CU_ASSERT(g_session_mp == NULL);
855 	CU_ASSERT(g_session_mp_priv == NULL);
856 
857 	/* Can't create session pool. */
858 	MOCK_SET(rte_cryptodev_count, 2);
859 	MOCK_SET(spdk_mempool_create, NULL);
860 	rc = vbdev_crypto_init_crypto_drivers();
861 	CU_ASSERT(rc == -ENOMEM);
862 	CU_ASSERT(g_mbuf_mp == NULL);
863 	CU_ASSERT(g_session_mp == NULL);
864 	CU_ASSERT(g_session_mp_priv == NULL);
865 	MOCK_CLEAR(spdk_mempool_create);
866 
867 	/* Can't create op pool. */
868 	MOCK_SET(rte_crypto_op_pool_create, NULL);
869 	rc = vbdev_crypto_init_crypto_drivers();
870 	CU_ASSERT(rc == -ENOMEM);
871 	CU_ASSERT(g_mbuf_mp == NULL);
872 	CU_ASSERT(g_session_mp == NULL);
873 	CU_ASSERT(g_session_mp_priv == NULL);
874 	MOCK_CLEAR(rte_crypto_op_pool_create);
875 
876 	/* Check resources are not sufficient */
877 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
878 	rc = vbdev_crypto_init_crypto_drivers();
879 	CU_ASSERT(rc == -EINVAL);
880 
881 	/* Test crypto dev configure failure. */
882 	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
883 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
884 	MOCK_SET(rte_cryptodev_configure, -1);
885 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
886 	rc = vbdev_crypto_init_crypto_drivers();
887 	MOCK_SET(rte_cryptodev_configure, 0);
888 	CU_ASSERT(g_mbuf_mp == NULL);
889 	CU_ASSERT(g_session_mp == NULL);
890 	CU_ASSERT(g_session_mp_priv == NULL);
891 	CU_ASSERT(rc == -EINVAL);
892 
893 	/* Test failure of qp setup. */
894 	MOCK_SET(rte_cryptodev_queue_pair_setup, -1);
895 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
896 	rc = vbdev_crypto_init_crypto_drivers();
897 	CU_ASSERT(rc == -EINVAL);
898 	CU_ASSERT(g_mbuf_mp == NULL);
899 	CU_ASSERT(g_session_mp == NULL);
900 	CU_ASSERT(g_session_mp_priv == NULL);
901 	MOCK_SET(rte_cryptodev_queue_pair_setup, 0);
902 
903 	/* Test failure of dev start. */
904 	MOCK_SET(rte_cryptodev_start, -1);
905 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
906 	rc = vbdev_crypto_init_crypto_drivers();
907 	CU_ASSERT(rc == -EINVAL);
908 	CU_ASSERT(g_mbuf_mp == NULL);
909 	CU_ASSERT(g_session_mp == NULL);
910 	CU_ASSERT(g_session_mp_priv == NULL);
911 	MOCK_SET(rte_cryptodev_start, 0);
912 
913 	/* Test bogus PMD */
914 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
915 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_BOGUS_PMD);
916 	rc = vbdev_crypto_init_crypto_drivers();
917 	CU_ASSERT(g_mbuf_mp == NULL);
918 	CU_ASSERT(g_session_mp == NULL);
919 	CU_ASSERT(rc == -EINVAL);
920 
921 	/* Test happy path QAT. */
922 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
923 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
924 	rc = vbdev_crypto_init_crypto_drivers();
925 	CU_ASSERT(g_mbuf_mp != NULL);
926 	CU_ASSERT(g_session_mp != NULL);
927 	init_cleanup();
928 	CU_ASSERT(rc == 0);
929 
930 	/* Test happy path AESNI. */
931 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
932 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
933 	rc = vbdev_crypto_init_crypto_drivers();
934 	CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET);
935 	init_cleanup();
936 	CU_ASSERT(rc == 0);
937 
938 	/* Test happy path MLX5. */
939 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
940 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_MLX5);
941 	rc = vbdev_crypto_init_crypto_drivers();
942 	CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET);
943 	init_cleanup();
944 	CU_ASSERT(rc == 0);
945 
946 	/* Test failure of DPDK dev init. By now it is not longer an error
947 	 * situation for entire crypto framework. */
948 	MOCK_SET(rte_cryptodev_count, 2);
949 	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
950 	MOCK_SET(rte_vdev_init, -1);
951 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
952 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
953 	rc = vbdev_crypto_init_crypto_drivers();
954 	CU_ASSERT(rc == 0);
955 	CU_ASSERT(g_mbuf_mp != NULL);
956 	CU_ASSERT(g_session_mp != NULL);
957 	CU_ASSERT(g_session_mp_priv != NULL);
958 	init_cleanup();
959 	MOCK_SET(rte_vdev_init, 0);
960 	MOCK_CLEAR(rte_cryptodev_device_count_by_driver);
961 
962 	/* restore our initial values. */
963 	g_mbuf_mp = orig_mbuf_mp;
964 	g_session_mp = orig_session_mp;
965 	g_session_mp_priv = orig_session_mp_priv;
966 }
967 
968 static void
969 test_crypto_op_complete(void)
970 {
971 	/* Make sure completion code respects failure. */
972 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
973 	g_completion_called = false;
974 	_crypto_operation_complete(g_bdev_io);
975 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
976 	CU_ASSERT(g_completion_called == true);
977 
978 	/* Test read completion. */
979 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
980 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
981 	g_completion_called = false;
982 	_crypto_operation_complete(g_bdev_io);
983 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
984 	CU_ASSERT(g_completion_called == true);
985 
986 	/* Test write completion success. */
987 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
988 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
989 	g_completion_called = false;
990 	MOCK_SET(spdk_bdev_writev_blocks, 0);
991 	_crypto_operation_complete(g_bdev_io);
992 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
993 	CU_ASSERT(g_completion_called == true);
994 
995 	/* Test write completion failed. */
996 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
997 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
998 	g_completion_called = false;
999 	MOCK_SET(spdk_bdev_writev_blocks, -1);
1000 	_crypto_operation_complete(g_bdev_io);
1001 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1002 	CU_ASSERT(g_completion_called == true);
1003 
1004 	/* Test bogus type for this completion. */
1005 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1006 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
1007 	g_completion_called = false;
1008 	_crypto_operation_complete(g_bdev_io);
1009 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1010 	CU_ASSERT(g_completion_called == true);
1011 }
1012 
1013 static void
1014 test_supported_io(void)
1015 {
1016 	void *ctx = NULL;
1017 	bool rc = true;
1018 
1019 	/* Make sure we always report false to WZ, we need the bdev layer to
1020 	 * send real 0's so we can encrypt/decrypt them.
1021 	 */
1022 	rc = vbdev_crypto_io_type_supported(ctx, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
1023 	CU_ASSERT(rc == false);
1024 }
1025 
1026 static void
1027 test_poller(void)
1028 {
1029 	int rc;
1030 	struct rte_mbuf *src_mbufs[2];
1031 	struct vbdev_crypto_op *op_to_resubmit;
1032 
1033 	/* test regular 1 op to dequeue and complete */
1034 	g_dequeue_mock = g_enqueue_mock = 1;
1035 	rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 1);
1036 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
1037 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1038 			   uint64_t *) = (uintptr_t)g_bdev_io;
1039 	g_test_crypto_ops[0]->sym->m_dst = NULL;
1040 	g_io_ctx->cryop_cnt_remaining = 1;
1041 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
1042 	rc = crypto_dev_poller(g_crypto_ch);
1043 	CU_ASSERT(rc == 1);
1044 
1045 	/* We have nothing dequeued but have some to resubmit */
1046 	g_dequeue_mock = 0;
1047 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
1048 
1049 	/* add an op to the queued list. */
1050 	g_resubmit_test = true;
1051 	op_to_resubmit = (struct vbdev_crypto_op *)((uint8_t *)g_test_crypto_ops[0] + QUEUED_OP_OFFSET);
1052 	op_to_resubmit->crypto_op = (void *)0xDEADBEEF;
1053 	op_to_resubmit->bdev_io = g_bdev_io;
1054 	TAILQ_INSERT_TAIL(&g_crypto_ch->queued_cry_ops,
1055 			  op_to_resubmit,
1056 			  link);
1057 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
1058 	rc = crypto_dev_poller(g_crypto_ch);
1059 	g_resubmit_test = false;
1060 	CU_ASSERT(rc == 0);
1061 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
1062 
1063 	/* 2 to dequeue but 2nd one failed */
1064 	g_dequeue_mock = g_enqueue_mock = 2;
1065 	g_io_ctx->cryop_cnt_remaining = 2;
1066 	rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 2);
1067 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
1068 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1069 			   uint64_t *) = (uint64_t)g_bdev_io;
1070 	g_test_crypto_ops[0]->sym->m_dst = NULL;
1071 	g_test_crypto_ops[0]->status =  RTE_CRYPTO_OP_STATUS_SUCCESS;
1072 	g_test_crypto_ops[1]->sym->m_src = src_mbufs[1];
1073 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[1]->sym->m_src, g_mbuf_offset,
1074 			   uint64_t *) = (uint64_t)g_bdev_io;
1075 	g_test_crypto_ops[1]->sym->m_dst = NULL;
1076 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1077 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1078 	rc = crypto_dev_poller(g_crypto_ch);
1079 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1080 	CU_ASSERT(rc == 2);
1081 }
1082 
1083 /* Helper function for test_assign_device_qp() */
1084 static void
1085 _clear_device_qp_lists(void)
1086 {
1087 	struct device_qp *device_qp = NULL;
1088 
1089 	while (!TAILQ_EMPTY(&g_device_qp_qat)) {
1090 		device_qp = TAILQ_FIRST(&g_device_qp_qat);
1091 		TAILQ_REMOVE(&g_device_qp_qat, device_qp, link);
1092 		free(device_qp);
1093 
1094 	}
1095 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_qat) == true);
1096 	while (!TAILQ_EMPTY(&g_device_qp_aesni_mb)) {
1097 		device_qp = TAILQ_FIRST(&g_device_qp_aesni_mb);
1098 		TAILQ_REMOVE(&g_device_qp_aesni_mb, device_qp, link);
1099 		free(device_qp);
1100 	}
1101 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_aesni_mb) == true);
1102 	while (!TAILQ_EMPTY(&g_device_qp_mlx5)) {
1103 		device_qp = TAILQ_FIRST(&g_device_qp_mlx5);
1104 		TAILQ_REMOVE(&g_device_qp_mlx5, device_qp, link);
1105 		free(device_qp);
1106 	}
1107 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_mlx5) == true);
1108 }
1109 
1110 /* Helper function for test_assign_device_qp() */
1111 static void
1112 _check_expected_values(struct vbdev_crypto *crypto_bdev, struct device_qp *device_qp,
1113 		       struct crypto_io_channel *crypto_ch, uint8_t expected_index,
1114 		       uint8_t current_index)
1115 {
1116 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1117 	CU_ASSERT(g_crypto_ch->device_qp->index == expected_index);
1118 	CU_ASSERT(g_next_qat_index == current_index);
1119 }
1120 
1121 static void
1122 test_assign_device_qp(void)
1123 {
1124 	struct device_qp *device_qp = NULL;
1125 	int i;
1126 
1127 	/* start with a known state, clear the device/qp lists */
1128 	_clear_device_qp_lists();
1129 
1130 	/* make sure that one AESNI_MB qp is found */
1131 	device_qp = calloc(1, sizeof(struct device_qp));
1132 	TAILQ_INSERT_TAIL(&g_device_qp_aesni_mb, device_qp, link);
1133 	g_crypto_ch->device_qp = NULL;
1134 	g_crypto_bdev.opts->drv_name = AESNI_MB;
1135 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1136 	CU_ASSERT(g_crypto_ch->device_qp != NULL);
1137 
1138 	/* QAT testing is more complex as the code under test load balances by
1139 	 * assigning each subsequent device/qp to every QAT_VF_SPREAD modulo
1140 	 * g_qat_total_qp. For the current latest QAT we'll have 48 virtual functions
1141 	 * each with 2 qp so the "spread" between assignments is 32.
1142 	 */
1143 	g_qat_total_qp = 96;
1144 	for (i = 0; i < g_qat_total_qp; i++) {
1145 		device_qp = calloc(1, sizeof(struct device_qp));
1146 		device_qp->index = i;
1147 		TAILQ_INSERT_TAIL(&g_device_qp_qat, device_qp, link);
1148 	}
1149 	g_crypto_ch->device_qp = NULL;
1150 	g_crypto_bdev.opts->drv_name = QAT;
1151 
1152 	/* First assignment will assign to 0 and next at 32. */
1153 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1154 			       0, QAT_VF_SPREAD);
1155 
1156 	/* Second assignment will assign to 32 and next at 64. */
1157 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1158 			       QAT_VF_SPREAD, QAT_VF_SPREAD * 2);
1159 
1160 	/* Third assignment will assign to 64 and next at 0. */
1161 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1162 			       QAT_VF_SPREAD * 2, 0);
1163 
1164 	/* Fourth assignment will assign to 1 and next at 33. */
1165 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1166 			       1, QAT_VF_SPREAD + 1);
1167 
1168 	/* make sure that one MLX5 qp is found */
1169 	device_qp = calloc(1, sizeof(struct device_qp));
1170 	TAILQ_INSERT_TAIL(&g_device_qp_mlx5, device_qp, link);
1171 	g_crypto_ch->device_qp = NULL;
1172 	g_crypto_bdev.opts->drv_name = MLX5;
1173 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1174 	CU_ASSERT(g_crypto_ch->device_qp == device_qp);
1175 
1176 	_clear_device_qp_lists();
1177 }
1178 
1179 int
1180 main(int argc, char **argv)
1181 {
1182 	CU_pSuite	suite = NULL;
1183 	unsigned int	num_failures;
1184 
1185 	CU_set_error_action(CUEA_ABORT);
1186 	CU_initialize_registry();
1187 
1188 	suite = CU_add_suite("crypto", test_setup, test_cleanup);
1189 	CU_ADD_TEST(suite, test_error_paths);
1190 	CU_ADD_TEST(suite, test_simple_write);
1191 	CU_ADD_TEST(suite, test_simple_read);
1192 	CU_ADD_TEST(suite, test_large_rw);
1193 	CU_ADD_TEST(suite, test_dev_full);
1194 	CU_ADD_TEST(suite, test_crazy_rw);
1195 	CU_ADD_TEST(suite, test_passthru);
1196 	CU_ADD_TEST(suite, test_initdrivers);
1197 	CU_ADD_TEST(suite, test_crypto_op_complete);
1198 	CU_ADD_TEST(suite, test_supported_io);
1199 	CU_ADD_TEST(suite, test_reset);
1200 	CU_ADD_TEST(suite, test_poller);
1201 	CU_ADD_TEST(suite, test_assign_device_qp);
1202 
1203 	CU_basic_set_mode(CU_BRM_VERBOSE);
1204 	CU_basic_run_tests();
1205 	num_failures = CU_get_number_of_failures();
1206 	CU_cleanup_registry();
1207 	return num_failures;
1208 }
1209