xref: /spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c (revision 784b9d48746955f210926648a0131f84f58de76f)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk_cunit.h"
7 
8 #include "common/lib/test_env.c"
9 #include "spdk_internal/mock.h"
10 #include "thread/thread_internal.h"
11 #include "unit/lib/json_mock.c"
12 
13 #include <rte_crypto.h>
14 #include <rte_cryptodev.h>
15 #include <rte_version.h>
16 
17 #define MAX_TEST_BLOCKS 8192
18 struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS];
19 struct rte_crypto_op *g_test_dev_full_ops[MAX_TEST_BLOCKS];
20 
21 uint16_t g_dequeue_mock;
22 uint16_t g_enqueue_mock;
23 unsigned ut_rte_crypto_op_bulk_alloc;
24 int ut_rte_crypto_op_attach_sym_session = 0;
25 #define MOCK_INFO_GET_1QP_AESNI 0
26 #define MOCK_INFO_GET_1QP_QAT 1
27 #define MOCK_INFO_GET_1QP_MLX5 2
28 #define MOCK_INFO_GET_1QP_BOGUS_PMD 3
29 int ut_rte_cryptodev_info_get = 0;
30 bool ut_rte_cryptodev_info_get_mocked = false;
31 
32 void mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt);
33 #define rte_pktmbuf_free_bulk mock_rte_pktmbuf_free_bulk
34 void
35 mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt)
36 {
37 	spdk_mempool_put_bulk((struct spdk_mempool *)m[0]->pool, (void **)m, cnt);
38 }
39 
40 void mock_rte_pktmbuf_free(struct rte_mbuf *m);
41 #define rte_pktmbuf_free mock_rte_pktmbuf_free
42 void
43 mock_rte_pktmbuf_free(struct rte_mbuf *m)
44 {
45 	spdk_mempool_put((struct spdk_mempool *)m->pool, (void *)m);
46 }
47 
48 void
49 rte_mempool_free(struct rte_mempool *mp)
50 {
51 	spdk_mempool_free((struct spdk_mempool *)mp);
52 }
53 
54 int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
55 				unsigned count);
56 #define rte_pktmbuf_alloc_bulk mock_rte_pktmbuf_alloc_bulk
57 int
58 mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
59 			    unsigned count)
60 {
61 	int rc;
62 
63 	rc = spdk_mempool_get_bulk((struct spdk_mempool *)pool, (void **)mbufs, count);
64 	if (rc) {
65 		return rc;
66 	}
67 	for (unsigned i = 0; i < count; i++) {
68 		rte_pktmbuf_reset(mbufs[i]);
69 		mbufs[i]->pool = pool;
70 	}
71 	return rc;
72 }
73 
74 struct rte_mempool *
75 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
76 				      uint32_t elt_size, uint32_t cache_size,
77 				      uint16_t priv_size, int socket_id)
78 {
79 	struct spdk_mempool *tmp;
80 
81 	tmp = spdk_mempool_create(name, nb_elts, elt_size + priv_size,
82 				  cache_size, socket_id);
83 
84 	return (struct rte_mempool *)tmp;
85 
86 }
87 
88 struct rte_mempool *
89 rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size,
90 			uint16_t priv_size, uint16_t data_room_size, int socket_id)
91 {
92 	struct spdk_mempool *tmp;
93 
94 	tmp = spdk_mempool_create(name, n, sizeof(struct rte_mbuf) + priv_size,
95 				  cache_size, socket_id);
96 
97 	return (struct rte_mempool *)tmp;
98 }
99 
100 struct rte_mempool *
101 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
102 		   unsigned cache_size, unsigned private_data_size,
103 		   rte_mempool_ctor_t *mp_init, void *mp_init_arg,
104 		   rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
105 		   int socket_id, unsigned flags)
106 {
107 	struct spdk_mempool *tmp;
108 
109 	tmp = spdk_mempool_create(name, n, elt_size + private_data_size,
110 				  cache_size, socket_id);
111 
112 	return (struct rte_mempool *)tmp;
113 }
114 
115 DEFINE_RETURN_MOCK(rte_crypto_op_pool_create, struct rte_mempool *);
116 struct rte_mempool *
117 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
118 			  unsigned nb_elts, unsigned cache_size,
119 			  uint16_t priv_size, int socket_id)
120 {
121 	struct spdk_mempool *tmp;
122 
123 	HANDLE_RETURN_MOCK(rte_crypto_op_pool_create);
124 
125 	tmp = spdk_mempool_create(name, nb_elts,
126 				  sizeof(struct rte_crypto_op) + priv_size,
127 				  cache_size, socket_id);
128 
129 	return (struct rte_mempool *)tmp;
130 
131 }
132 
133 /* Those functions are defined as static inline in DPDK, so we can't
134  * mock them straight away. We use defines to redirect them into
135  * our custom functions.
136  */
137 static bool g_resubmit_test = false;
138 #define rte_cryptodev_enqueue_burst mock_rte_cryptodev_enqueue_burst
139 static inline uint16_t
140 mock_rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
141 				 struct rte_crypto_op **ops, uint16_t nb_ops)
142 {
143 	int i;
144 
145 	CU_ASSERT(nb_ops > 0);
146 
147 	for (i = 0; i < nb_ops; i++) {
148 		/* Use this empty (til now) array of pointers to store
149 		 * enqueued operations for assertion in dev_full test.
150 		 */
151 		g_test_dev_full_ops[i] = *ops++;
152 		if (g_resubmit_test == true) {
153 			CU_ASSERT(g_test_dev_full_ops[i] == (void *)0xDEADBEEF);
154 		}
155 	}
156 
157 	return g_enqueue_mock;
158 }
159 
160 #define rte_cryptodev_dequeue_burst mock_rte_cryptodev_dequeue_burst
161 static inline uint16_t
162 mock_rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
163 				 struct rte_crypto_op **ops, uint16_t nb_ops)
164 {
165 	int i;
166 
167 	CU_ASSERT(nb_ops > 0);
168 
169 	for (i = 0; i < g_dequeue_mock; i++) {
170 		*ops++ = g_test_crypto_ops[i];
171 	}
172 
173 	return g_dequeue_mock;
174 }
175 
176 /* Instead of allocating real memory, assign the allocations to our
177  * test array for assertion in tests.
178  */
179 #define rte_crypto_op_bulk_alloc mock_rte_crypto_op_bulk_alloc
180 static inline unsigned
181 mock_rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
182 			      enum rte_crypto_op_type type,
183 			      struct rte_crypto_op **ops, uint16_t nb_ops)
184 {
185 	int i;
186 
187 	for (i = 0; i < nb_ops; i++) {
188 		*ops++ = g_test_crypto_ops[i];
189 	}
190 	return ut_rte_crypto_op_bulk_alloc;
191 }
192 
193 #define rte_mempool_put_bulk mock_rte_mempool_put_bulk
194 static __rte_always_inline void
195 mock_rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table,
196 			  unsigned int n)
197 {
198 	return;
199 }
200 
201 #define rte_crypto_op_attach_sym_session mock_rte_crypto_op_attach_sym_session
202 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
203 static inline int
204 mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op, void *sess)
205 #else
206 static inline int
207 mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
208 				      struct rte_cryptodev_sym_session *sess)
209 #endif
210 {
211 	return ut_rte_crypto_op_attach_sym_session;
212 }
213 
214 #define rte_lcore_count mock_rte_lcore_count
215 static inline unsigned
216 mock_rte_lcore_count(void)
217 {
218 	return 1;
219 }
220 
221 #include "bdev/crypto/vbdev_crypto.c"
222 
223 /* SPDK stubs */
224 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
225 		struct spdk_bdev_io_wait_entry *entry), 0);
226 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
227 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io));
228 DEFINE_STUB_V(spdk_bdev_io_put_aux_buf, (struct spdk_bdev_io *bdev_io, void *aux_buf));
229 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
230 		enum spdk_bdev_io_type io_type), 0);
231 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
232 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
233 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0);
234 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 64);
235 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
236 DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn,
237 				     void *cb_arg));
238 DEFINE_STUB(spdk_bdev_unregister_by_name, int, (const char *bdev_name,
239 		struct spdk_bdev_module *module,
240 		spdk_bdev_unregister_cb cb_fn, void *cb_arg), 0);
241 DEFINE_STUB(spdk_bdev_open_ext, int, (const char *bdev_name, bool write,
242 				      spdk_bdev_event_cb_t event_cb,
243 				      void *event_ctx, struct spdk_bdev_desc **_desc), 0);
244 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
245 DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
246 		struct spdk_bdev_module *module), 0);
247 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
248 DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *vbdev), 0);
249 
250 /* DPDK stubs */
251 #define DPDK_DYNFIELD_OFFSET offsetof(struct rte_mbuf, dynfield1[1])
252 DEFINE_STUB(rte_mbuf_dynfield_register, int, (const struct rte_mbuf_dynfield *params),
253 	    DPDK_DYNFIELD_OFFSET);
254 DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0);
255 DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
256 DEFINE_STUB(rte_cryptodev_device_count_by_driver, uint8_t, (uint8_t driver_id), 0);
257 DEFINE_STUB(rte_cryptodev_configure, int, (uint8_t dev_id, struct rte_cryptodev_config *config), 0);
258 DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
259 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id), 0);
260 DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0);
261 DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id));
262 DEFINE_STUB(rte_cryptodev_close, int, (uint8_t dev_id), 0);
263 DEFINE_STUB(rte_cryptodev_sym_session_create, struct rte_cryptodev_sym_session *,
264 	    (struct rte_mempool *mempool), (void *)1);
265 DEFINE_STUB(rte_cryptodev_sym_session_init, int, (uint8_t dev_id,
266 		struct rte_cryptodev_sym_session *sess,
267 		struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), 0);
268 DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
269 DEFINE_STUB(rte_cryptodev_sym_session_free, int, (struct rte_cryptodev_sym_session *sess), 0);
270 DEFINE_STUB(rte_vdev_uninit, int, (const char *name), 0);
271 
272 struct rte_cryptodev *rte_cryptodevs;
273 
274 /* global vars and setup/cleanup functions used for all test functions */
275 struct spdk_bdev_io *g_bdev_io;
276 struct crypto_bdev_io *g_io_ctx;
277 struct crypto_io_channel *g_crypto_ch;
278 struct spdk_io_channel *g_io_ch;
279 struct vbdev_dev g_device;
280 struct vbdev_crypto g_crypto_bdev;
281 struct vbdev_crypto_opts g_crypto_bdev_opts;
282 struct device_qp g_dev_qp;
283 
284 void
285 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
286 {
287 	dev_info->max_nb_queue_pairs = 1;
288 	if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_AESNI) {
289 		dev_info->driver_name = g_driver_names[0];
290 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_QAT) {
291 		dev_info->driver_name = g_driver_names[1];
292 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_MLX5) {
293 		dev_info->driver_name = g_driver_names[2];
294 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_BOGUS_PMD) {
295 		dev_info->driver_name = "junk";
296 	}
297 }
298 
299 unsigned int
300 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
301 {
302 	return (unsigned int)dev_id;
303 }
304 
305 void
306 spdk_bdev_io_get_aux_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_aux_buf_cb cb)
307 {
308 	cb(g_io_ch, g_bdev_io, (void *)0xDEADBEEF);
309 }
310 
311 void
312 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
313 {
314 	cb(g_io_ch, g_bdev_io, true);
315 }
316 
317 /* Mock these functions to call the callback and then return the value we require */
318 int ut_spdk_bdev_readv_blocks = 0;
319 bool ut_spdk_bdev_readv_blocks_mocked = false;
320 int
321 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
322 		       struct iovec *iov, int iovcnt,
323 		       uint64_t offset_blocks, uint64_t num_blocks,
324 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
325 {
326 	cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg);
327 	return ut_spdk_bdev_readv_blocks;
328 }
329 
330 int ut_spdk_bdev_writev_blocks = 0;
331 bool ut_spdk_bdev_writev_blocks_mocked = false;
332 int
333 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
334 			struct iovec *iov, int iovcnt,
335 			uint64_t offset_blocks, uint64_t num_blocks,
336 			spdk_bdev_io_completion_cb cb, void *cb_arg)
337 {
338 	cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg);
339 	return ut_spdk_bdev_writev_blocks;
340 }
341 
342 int ut_spdk_bdev_unmap_blocks = 0;
343 bool ut_spdk_bdev_unmap_blocks_mocked = false;
344 int
345 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
346 		       uint64_t offset_blocks, uint64_t num_blocks,
347 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
348 {
349 	cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg);
350 	return ut_spdk_bdev_unmap_blocks;
351 }
352 
353 int ut_spdk_bdev_flush_blocks = 0;
354 bool ut_spdk_bdev_flush_blocks_mocked = false;
355 int
356 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
357 		       uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
358 		       void *cb_arg)
359 {
360 	cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg);
361 	return ut_spdk_bdev_flush_blocks;
362 }
363 
364 int ut_spdk_bdev_reset = 0;
365 bool ut_spdk_bdev_reset_mocked = false;
366 int
367 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
368 		spdk_bdev_io_completion_cb cb, void *cb_arg)
369 {
370 	cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg);
371 	return ut_spdk_bdev_reset;
372 }
373 
374 bool g_completion_called = false;
375 void
376 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
377 {
378 	bdev_io->internal.status = status;
379 	g_completion_called = true;
380 }
381 
382 /* Global setup for all tests that share a bunch of preparation... */
383 static int
384 test_setup(void)
385 {
386 	int i, rc;
387 
388 	/* Prepare essential variables for test routines */
389 	g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct crypto_bdev_io));
390 	g_bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec) * 128);
391 	g_bdev_io->bdev = &g_crypto_bdev.crypto_bdev;
392 	g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct crypto_io_channel));
393 	g_crypto_ch = (struct crypto_io_channel *)spdk_io_channel_get_ctx(g_io_ch);
394 	g_io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
395 	memset(&g_device, 0, sizeof(struct vbdev_dev));
396 	memset(&g_crypto_bdev, 0, sizeof(struct vbdev_crypto));
397 	memset(&g_crypto_bdev_opts, 0, sizeof(struct vbdev_crypto_opts));
398 	g_dev_qp.device = &g_device;
399 	g_io_ctx->crypto_ch = g_crypto_ch;
400 	g_io_ctx->crypto_bdev = &g_crypto_bdev;
401 	g_io_ctx->crypto_bdev->qp_desc_nr = CRYPTO_QP_DESCRIPTORS;
402 	g_io_ctx->crypto_bdev->opts = &g_crypto_bdev_opts;
403 	g_crypto_ch->device_qp = &g_dev_qp;
404 	TAILQ_INIT(&g_crypto_ch->pending_cry_ios);
405 	TAILQ_INIT(&g_crypto_ch->queued_cry_ops);
406 
407 	/* Allocate a real mbuf pool so we can test error paths */
408 	g_mbuf_mp = rte_pktmbuf_pool_create("mbuf_mp", NUM_MBUFS,
409 					    (unsigned)SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
410 					    0, 0, SPDK_ENV_SOCKET_ID_ANY);
411 	/* Instead of allocating real rte mempools for these, it's easier and provides the
412 	 * same coverage just calloc them here.
413 	 */
414 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
415 		size_t size = IV_OFFSET + IV_LENGTH + QUEUED_OP_LENGTH;
416 		rc = posix_memalign((void **)&g_test_crypto_ops[i], 64, size);
417 		if (rc != 0) {
418 			assert(false);
419 		}
420 		memset(g_test_crypto_ops[i], 0, IV_OFFSET + QUEUED_OP_LENGTH);
421 	}
422 	g_mbuf_offset = DPDK_DYNFIELD_OFFSET;
423 
424 	return 0;
425 }
426 
427 /* Global teardown for all tests */
428 static int
429 test_cleanup(void)
430 {
431 	int i;
432 
433 	if (g_crypto_op_mp) {
434 		rte_mempool_free(g_crypto_op_mp);
435 		g_crypto_op_mp = NULL;
436 	}
437 	if (g_mbuf_mp) {
438 		rte_mempool_free(g_mbuf_mp);
439 		g_mbuf_mp = NULL;
440 	}
441 	if (g_session_mp) {
442 		rte_mempool_free(g_session_mp);
443 		g_session_mp = NULL;
444 	}
445 	if (g_session_mp_priv != NULL) {
446 		/* g_session_mp_priv may or may not be set depending on the DPDK version */
447 		rte_mempool_free(g_session_mp_priv);
448 		g_session_mp_priv = NULL;
449 	}
450 
451 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
452 		free(g_test_crypto_ops[i]);
453 	}
454 	free(g_bdev_io->u.bdev.iovs);
455 	free(g_bdev_io);
456 	free(g_io_ch);
457 	return 0;
458 }
459 
460 static void
461 test_error_paths(void)
462 {
463 	/* Single element block size write, just to test error paths
464 	 * in vbdev_crypto_submit_request().
465 	 */
466 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
467 	g_bdev_io->u.bdev.iovcnt = 1;
468 	g_bdev_io->u.bdev.num_blocks = 1;
469 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
470 	g_bdev_io->u.bdev.iovs[0].iov_base = (void *)0xDEADBEEF;
471 	g_crypto_bdev.crypto_bdev.blocklen = 512;
472 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
473 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
474 
475 	/* test failure of spdk_mempool_get_bulk(), will result in success because it
476 	 * will get queued.
477 	 */
478 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
479 	MOCK_SET(spdk_mempool_get, NULL);
480 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
481 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
482 
483 	/* same thing but switch to reads to test error path in _crypto_complete_io() */
484 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
485 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
486 	TAILQ_INSERT_TAIL(&g_crypto_ch->pending_cry_ios, g_bdev_io, module_link);
487 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
488 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
489 	/* Now with the read_blocks failing */
490 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
491 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
492 	MOCK_SET(spdk_bdev_readv_blocks, -1);
493 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
494 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
495 	MOCK_SET(spdk_bdev_readv_blocks, 0);
496 	MOCK_CLEAR(spdk_mempool_get);
497 
498 	/* test failure of rte_crypto_op_bulk_alloc() */
499 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
500 	ut_rte_crypto_op_bulk_alloc = 0;
501 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
502 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
503 	ut_rte_crypto_op_bulk_alloc = 1;
504 
505 	/* test failure of rte_crypto_op_attach_sym_session() */
506 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
507 	ut_rte_crypto_op_attach_sym_session = -1;
508 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
509 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
510 	ut_rte_crypto_op_attach_sym_session = 0;
511 }
512 
513 static void
514 test_simple_write(void)
515 {
516 	/* Single element block size write */
517 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
518 	g_bdev_io->u.bdev.iovcnt = 1;
519 	g_bdev_io->u.bdev.num_blocks = 1;
520 	g_bdev_io->u.bdev.offset_blocks = 0;
521 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
522 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_write;
523 	g_crypto_bdev.crypto_bdev.blocklen = 512;
524 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
525 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
526 
527 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
528 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
529 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
530 	CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == 512);
531 	CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
532 	CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
533 	CU_ASSERT(g_io_ctx->aux_num_blocks == 1);
534 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_write);
535 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
536 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
537 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
538 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
539 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
540 				     uint64_t *) == (uint64_t)g_bdev_io);
541 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr != NULL);
542 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == 512);
543 
544 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
545 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst);
546 }
547 
548 static void
549 test_simple_read(void)
550 {
551 	/* Single element block size read */
552 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
553 	g_bdev_io->u.bdev.iovcnt = 1;
554 	g_bdev_io->u.bdev.num_blocks = 1;
555 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
556 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_read;
557 	g_crypto_bdev.crypto_bdev.blocklen = 512;
558 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
559 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
560 
561 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
562 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
563 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
564 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_read);
565 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
566 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
567 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
568 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
569 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
570 				     uint64_t *) == (uint64_t)g_bdev_io);
571 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
572 
573 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
574 }
575 
576 static void
577 test_large_rw(void)
578 {
579 	unsigned block_len = 512;
580 	unsigned num_blocks = CRYPTO_MAX_IO / block_len;
581 	unsigned io_len = block_len * num_blocks;
582 	unsigned i;
583 
584 	/* Multi block size read, multi-element */
585 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
586 	g_bdev_io->u.bdev.iovcnt = 1;
587 	g_bdev_io->u.bdev.num_blocks = num_blocks;
588 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
589 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
590 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
591 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
592 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
593 
594 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
595 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
596 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
597 
598 	for (i = 0; i < num_blocks; i++) {
599 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
600 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
601 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
602 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
603 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
604 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
605 					     uint64_t *) == (uint64_t)g_bdev_io);
606 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
607 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
608 	}
609 
610 	/* Multi block size write, multi-element */
611 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
612 	g_bdev_io->u.bdev.iovcnt = 1;
613 	g_bdev_io->u.bdev.num_blocks = num_blocks;
614 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
615 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
616 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
617 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
618 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
619 
620 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
621 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
622 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
623 
624 	for (i = 0; i < num_blocks; i++) {
625 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
626 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
627 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
628 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
629 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
630 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
631 					     uint64_t *) == (uint64_t)g_bdev_io);
632 		CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == io_len);
633 		CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
634 		CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
635 		CU_ASSERT(g_io_ctx->aux_num_blocks == num_blocks);
636 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr != NULL);
637 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
638 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
639 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
640 	}
641 }
642 
643 static void
644 test_dev_full(void)
645 {
646 	struct vbdev_crypto_op *queued_op;
647 	struct rte_crypto_sym_op *sym_op;
648 	struct crypto_bdev_io *io_ctx;
649 
650 	/* Two element block size read */
651 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
652 	g_bdev_io->u.bdev.iovcnt = 1;
653 	g_bdev_io->u.bdev.num_blocks = 2;
654 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
655 	g_bdev_io->u.bdev.iovs[0].iov_base = (void *)0xDEADBEEF;
656 	g_bdev_io->u.bdev.iovs[1].iov_len = 512;
657 	g_bdev_io->u.bdev.iovs[1].iov_base = (void *)0xFEEDBEEF;
658 	g_crypto_bdev.crypto_bdev.blocklen = 512;
659 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
660 	g_enqueue_mock = g_dequeue_mock = 1;
661 	ut_rte_crypto_op_bulk_alloc = 2;
662 
663 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
664 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
665 
666 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
667 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
668 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 2);
669 	sym_op = g_test_crypto_ops[0]->sym;
670 	CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xDEADBEEF);
671 	CU_ASSERT(sym_op->m_src->data_len == 512);
672 	CU_ASSERT(sym_op->m_src->next == NULL);
673 	CU_ASSERT(sym_op->cipher.data.length == 512);
674 	CU_ASSERT(sym_op->cipher.data.offset == 0);
675 	CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)g_bdev_io);
676 	CU_ASSERT(sym_op->m_dst == NULL);
677 
678 	/* make sure one got queued and confirm its values */
679 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
680 	queued_op = TAILQ_FIRST(&g_crypto_ch->queued_cry_ops);
681 	sym_op = queued_op->crypto_op->sym;
682 	TAILQ_REMOVE(&g_crypto_ch->queued_cry_ops, queued_op, link);
683 	CU_ASSERT(queued_op->bdev_io == g_bdev_io);
684 	CU_ASSERT(queued_op->crypto_op == g_test_crypto_ops[1]);
685 	CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xFEEDBEEF);
686 	CU_ASSERT(sym_op->m_src->data_len == 512);
687 	CU_ASSERT(sym_op->m_src->next == NULL);
688 	CU_ASSERT(sym_op->cipher.data.length == 512);
689 	CU_ASSERT(sym_op->cipher.data.offset == 0);
690 	CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)g_bdev_io);
691 	CU_ASSERT(sym_op->m_dst == NULL);
692 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
693 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
694 	rte_pktmbuf_free(g_test_crypto_ops[1]->sym->m_src);
695 
696 	/* Non-busy reason for enqueue failure, all were rejected. */
697 	g_enqueue_mock = 0;
698 	g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_ERROR;
699 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
700 	io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
701 	CU_ASSERT(io_ctx->bdev_io_status == SPDK_BDEV_IO_STATUS_FAILED);
702 }
703 
704 static void
705 test_crazy_rw(void)
706 {
707 	unsigned block_len = 512;
708 	int num_blocks = 4;
709 	int i;
710 
711 	/* Multi block size read, single element, strange IOV makeup */
712 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
713 	g_bdev_io->u.bdev.iovcnt = 3;
714 	g_bdev_io->u.bdev.num_blocks = num_blocks;
715 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
716 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
717 	g_bdev_io->u.bdev.iovs[1].iov_len = 1024;
718 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 512;
719 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
720 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 512 + 1024;
721 
722 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
723 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
724 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
725 
726 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
727 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
728 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
729 
730 	for (i = 0; i < num_blocks; i++) {
731 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
732 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
733 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
734 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
735 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
736 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
737 					     uint64_t *) == (uint64_t)g_bdev_io);
738 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
739 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
740 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
741 	}
742 
743 	/* Multi block size write, single element strange IOV makeup */
744 	num_blocks = 8;
745 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
746 	g_bdev_io->u.bdev.iovcnt = 4;
747 	g_bdev_io->u.bdev.num_blocks = num_blocks;
748 	g_bdev_io->u.bdev.iovs[0].iov_len = 2048;
749 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
750 	g_bdev_io->u.bdev.iovs[1].iov_len = 512;
751 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 2048;
752 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
753 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 2048 + 512;
754 	g_bdev_io->u.bdev.iovs[3].iov_len = 1024;
755 	g_bdev_io->u.bdev.iovs[3].iov_base = &test_crazy_rw + 2048 + 512 + 512;
756 
757 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
758 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
759 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
760 
761 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
762 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
763 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
764 
765 	for (i = 0; i < num_blocks; i++) {
766 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
767 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
768 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
769 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
770 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
771 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
772 					     uint64_t *) == (uint64_t)g_bdev_io);
773 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
774 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == g_test_crypto_ops[i]->sym->m_dst);
775 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
776 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
777 	}
778 }
779 
780 static void
781 test_passthru(void)
782 {
783 	/* Make sure these follow our completion callback, test success & fail. */
784 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
785 	MOCK_SET(spdk_bdev_unmap_blocks, 0);
786 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
787 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
788 	MOCK_SET(spdk_bdev_unmap_blocks, -1);
789 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
790 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
791 	MOCK_CLEAR(spdk_bdev_unmap_blocks);
792 
793 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
794 	MOCK_SET(spdk_bdev_flush_blocks, 0);
795 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
796 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
797 	MOCK_SET(spdk_bdev_flush_blocks, -1);
798 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
799 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
800 	MOCK_CLEAR(spdk_bdev_flush_blocks);
801 
802 	/* We should never get a WZ command, we report that we don't support it. */
803 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
804 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
805 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
806 }
807 
808 static void
809 test_reset(void)
810 {
811 	/* TODO: There are a few different ways to do this given that
812 	 * the code uses spdk_for_each_channel() to implement reset
813 	 * handling. Submitting w/o UT for this function for now and
814 	 * will follow up with something shortly.
815 	 */
816 }
817 
818 static void
819 init_cleanup(void)
820 {
821 	if (g_crypto_op_mp) {
822 		rte_mempool_free(g_crypto_op_mp);
823 		g_crypto_op_mp = NULL;
824 	}
825 	if (g_mbuf_mp) {
826 		rte_mempool_free(g_mbuf_mp);
827 		g_mbuf_mp = NULL;
828 	}
829 	if (g_session_mp) {
830 		rte_mempool_free(g_session_mp);
831 		g_session_mp = NULL;
832 	}
833 	if (g_session_mp_priv != NULL) {
834 		/* g_session_mp_priv may or may not be set depending on the DPDK version */
835 		rte_mempool_free(g_session_mp_priv);
836 		g_session_mp_priv = NULL;
837 	}
838 }
839 
840 static void
841 test_initdrivers(void)
842 {
843 	int rc;
844 	static struct rte_mempool *orig_mbuf_mp;
845 	static struct rte_mempool *orig_session_mp;
846 	static struct rte_mempool *orig_session_mp_priv;
847 
848 	/* These tests will alloc and free our g_mbuf_mp
849 	 * so save that off here and restore it after each test is over.
850 	 */
851 	orig_mbuf_mp = g_mbuf_mp;
852 	orig_session_mp = g_session_mp;
853 	orig_session_mp_priv = g_session_mp_priv;
854 
855 	g_session_mp_priv = NULL;
856 	g_session_mp = NULL;
857 	g_mbuf_mp = NULL;
858 
859 	/* No drivers available, not an error though */
860 	MOCK_SET(rte_cryptodev_count, 0);
861 	rc = vbdev_crypto_init_crypto_drivers();
862 	CU_ASSERT(rc == 0);
863 	CU_ASSERT(g_mbuf_mp == NULL);
864 	CU_ASSERT(g_session_mp == NULL);
865 	CU_ASSERT(g_session_mp_priv == NULL);
866 
867 	/* Can't create session pool. */
868 	MOCK_SET(rte_cryptodev_count, 2);
869 	MOCK_SET(spdk_mempool_create, NULL);
870 	rc = vbdev_crypto_init_crypto_drivers();
871 	CU_ASSERT(rc == -ENOMEM);
872 	CU_ASSERT(g_mbuf_mp == NULL);
873 	CU_ASSERT(g_session_mp == NULL);
874 	CU_ASSERT(g_session_mp_priv == NULL);
875 	MOCK_CLEAR(spdk_mempool_create);
876 
877 	/* Can't create op pool. */
878 	MOCK_SET(rte_crypto_op_pool_create, NULL);
879 	rc = vbdev_crypto_init_crypto_drivers();
880 	CU_ASSERT(rc == -ENOMEM);
881 	CU_ASSERT(g_mbuf_mp == NULL);
882 	CU_ASSERT(g_session_mp == NULL);
883 	CU_ASSERT(g_session_mp_priv == NULL);
884 	MOCK_CLEAR(rte_crypto_op_pool_create);
885 
886 	/* Check resources are not sufficient */
887 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
888 	rc = vbdev_crypto_init_crypto_drivers();
889 	CU_ASSERT(rc == -EINVAL);
890 
891 	/* Test crypto dev configure failure. */
892 	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
893 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
894 	MOCK_SET(rte_cryptodev_configure, -1);
895 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
896 	rc = vbdev_crypto_init_crypto_drivers();
897 	MOCK_SET(rte_cryptodev_configure, 0);
898 	CU_ASSERT(g_mbuf_mp == NULL);
899 	CU_ASSERT(g_session_mp == NULL);
900 	CU_ASSERT(g_session_mp_priv == NULL);
901 	CU_ASSERT(rc == -EINVAL);
902 
903 	/* Test failure of qp setup. */
904 	MOCK_SET(rte_cryptodev_queue_pair_setup, -1);
905 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
906 	rc = vbdev_crypto_init_crypto_drivers();
907 	CU_ASSERT(rc == -EINVAL);
908 	CU_ASSERT(g_mbuf_mp == NULL);
909 	CU_ASSERT(g_session_mp == NULL);
910 	CU_ASSERT(g_session_mp_priv == NULL);
911 	MOCK_SET(rte_cryptodev_queue_pair_setup, 0);
912 
913 	/* Test failure of dev start. */
914 	MOCK_SET(rte_cryptodev_start, -1);
915 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
916 	rc = vbdev_crypto_init_crypto_drivers();
917 	CU_ASSERT(rc == -EINVAL);
918 	CU_ASSERT(g_mbuf_mp == NULL);
919 	CU_ASSERT(g_session_mp == NULL);
920 	CU_ASSERT(g_session_mp_priv == NULL);
921 	MOCK_SET(rte_cryptodev_start, 0);
922 
923 	/* Test bogus PMD */
924 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
925 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_BOGUS_PMD);
926 	rc = vbdev_crypto_init_crypto_drivers();
927 	CU_ASSERT(g_mbuf_mp == NULL);
928 	CU_ASSERT(g_session_mp == NULL);
929 	CU_ASSERT(rc == -EINVAL);
930 
931 	/* Test happy path QAT. */
932 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
933 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
934 	rc = vbdev_crypto_init_crypto_drivers();
935 	CU_ASSERT(g_mbuf_mp != NULL);
936 	CU_ASSERT(g_session_mp != NULL);
937 	init_cleanup();
938 	CU_ASSERT(rc == 0);
939 
940 	/* Test happy path AESNI. */
941 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
942 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
943 	rc = vbdev_crypto_init_crypto_drivers();
944 	CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET);
945 	init_cleanup();
946 	CU_ASSERT(rc == 0);
947 
948 	/* Test happy path MLX5. */
949 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
950 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_MLX5);
951 	rc = vbdev_crypto_init_crypto_drivers();
952 	CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET);
953 	init_cleanup();
954 	CU_ASSERT(rc == 0);
955 
956 	/* Test failure of DPDK dev init. By now it is not longer an error
957 	 * situation for entire crypto framework. */
958 	MOCK_SET(rte_cryptodev_count, 2);
959 	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
960 	MOCK_SET(rte_vdev_init, -1);
961 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
962 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
963 	rc = vbdev_crypto_init_crypto_drivers();
964 	CU_ASSERT(rc == 0);
965 	CU_ASSERT(g_mbuf_mp != NULL);
966 	CU_ASSERT(g_session_mp != NULL);
967 	CU_ASSERT(g_session_mp_priv != NULL);
968 	init_cleanup();
969 	MOCK_SET(rte_vdev_init, 0);
970 	MOCK_CLEAR(rte_cryptodev_device_count_by_driver);
971 
972 	/* restore our initial values. */
973 	g_mbuf_mp = orig_mbuf_mp;
974 	g_session_mp = orig_session_mp;
975 	g_session_mp_priv = orig_session_mp_priv;
976 }
977 
978 static void
979 test_crypto_op_complete(void)
980 {
981 	/* Make sure completion code respects failure. */
982 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
983 	g_completion_called = false;
984 	_crypto_operation_complete(g_bdev_io);
985 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
986 	CU_ASSERT(g_completion_called == true);
987 
988 	/* Test read completion. */
989 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
990 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
991 	g_completion_called = false;
992 	_crypto_operation_complete(g_bdev_io);
993 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
994 	CU_ASSERT(g_completion_called == true);
995 
996 	/* Test write completion success. */
997 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
998 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
999 	g_completion_called = false;
1000 	MOCK_SET(spdk_bdev_writev_blocks, 0);
1001 	_crypto_operation_complete(g_bdev_io);
1002 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1003 	CU_ASSERT(g_completion_called == true);
1004 
1005 	/* Test write completion failed. */
1006 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1007 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
1008 	g_completion_called = false;
1009 	MOCK_SET(spdk_bdev_writev_blocks, -1);
1010 	_crypto_operation_complete(g_bdev_io);
1011 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1012 	CU_ASSERT(g_completion_called == true);
1013 
1014 	/* Test bogus type for this completion. */
1015 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1016 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
1017 	g_completion_called = false;
1018 	_crypto_operation_complete(g_bdev_io);
1019 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1020 	CU_ASSERT(g_completion_called == true);
1021 }
1022 
1023 static void
1024 test_supported_io(void)
1025 {
1026 	void *ctx = NULL;
1027 	bool rc = true;
1028 
1029 	/* Make sure we always report false to WZ, we need the bdev layer to
1030 	 * send real 0's so we can encrypt/decrypt them.
1031 	 */
1032 	rc = vbdev_crypto_io_type_supported(ctx, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
1033 	CU_ASSERT(rc == false);
1034 }
1035 
1036 static void
1037 test_poller(void)
1038 {
1039 	int rc;
1040 	struct rte_mbuf *src_mbufs[2];
1041 	struct vbdev_crypto_op *op_to_resubmit;
1042 
1043 	/* test regular 1 op to dequeue and complete */
1044 	g_dequeue_mock = g_enqueue_mock = 1;
1045 	rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 1);
1046 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
1047 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1048 			   uint64_t *) = (uintptr_t)g_bdev_io;
1049 	g_test_crypto_ops[0]->sym->m_dst = NULL;
1050 	g_io_ctx->cryop_cnt_remaining = 1;
1051 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
1052 	rc = crypto_dev_poller(g_crypto_ch);
1053 	CU_ASSERT(rc == 1);
1054 
1055 	/* We have nothing dequeued but have some to resubmit */
1056 	g_dequeue_mock = 0;
1057 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
1058 
1059 	/* add an op to the queued list. */
1060 	g_resubmit_test = true;
1061 	op_to_resubmit = (struct vbdev_crypto_op *)((uint8_t *)g_test_crypto_ops[0] + QUEUED_OP_OFFSET);
1062 	op_to_resubmit->crypto_op = (void *)0xDEADBEEF;
1063 	op_to_resubmit->bdev_io = g_bdev_io;
1064 	TAILQ_INSERT_TAIL(&g_crypto_ch->queued_cry_ops,
1065 			  op_to_resubmit,
1066 			  link);
1067 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
1068 	rc = crypto_dev_poller(g_crypto_ch);
1069 	g_resubmit_test = false;
1070 	CU_ASSERT(rc == 0);
1071 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
1072 
1073 	/* 2 to dequeue but 2nd one failed */
1074 	g_dequeue_mock = g_enqueue_mock = 2;
1075 	g_io_ctx->cryop_cnt_remaining = 2;
1076 	rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 2);
1077 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
1078 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1079 			   uint64_t *) = (uint64_t)g_bdev_io;
1080 	g_test_crypto_ops[0]->sym->m_dst = NULL;
1081 	g_test_crypto_ops[0]->status =  RTE_CRYPTO_OP_STATUS_SUCCESS;
1082 	g_test_crypto_ops[1]->sym->m_src = src_mbufs[1];
1083 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[1]->sym->m_src, g_mbuf_offset,
1084 			   uint64_t *) = (uint64_t)g_bdev_io;
1085 	g_test_crypto_ops[1]->sym->m_dst = NULL;
1086 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1087 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1088 	rc = crypto_dev_poller(g_crypto_ch);
1089 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1090 	CU_ASSERT(rc == 2);
1091 }
1092 
1093 /* Helper function for test_assign_device_qp() */
1094 static void
1095 _clear_device_qp_lists(void)
1096 {
1097 	struct device_qp *device_qp = NULL;
1098 
1099 	while (!TAILQ_EMPTY(&g_device_qp_qat)) {
1100 		device_qp = TAILQ_FIRST(&g_device_qp_qat);
1101 		TAILQ_REMOVE(&g_device_qp_qat, device_qp, link);
1102 		free(device_qp);
1103 
1104 	}
1105 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_qat) == true);
1106 	while (!TAILQ_EMPTY(&g_device_qp_aesni_mb)) {
1107 		device_qp = TAILQ_FIRST(&g_device_qp_aesni_mb);
1108 		TAILQ_REMOVE(&g_device_qp_aesni_mb, device_qp, link);
1109 		free(device_qp);
1110 	}
1111 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_aesni_mb) == true);
1112 	while (!TAILQ_EMPTY(&g_device_qp_mlx5)) {
1113 		device_qp = TAILQ_FIRST(&g_device_qp_mlx5);
1114 		TAILQ_REMOVE(&g_device_qp_mlx5, device_qp, link);
1115 		free(device_qp);
1116 	}
1117 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_mlx5) == true);
1118 }
1119 
1120 /* Helper function for test_assign_device_qp() */
1121 static void
1122 _check_expected_values(struct vbdev_crypto *crypto_bdev, struct device_qp *device_qp,
1123 		       struct crypto_io_channel *crypto_ch, uint8_t expected_index,
1124 		       uint8_t current_index)
1125 {
1126 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1127 	CU_ASSERT(g_crypto_ch->device_qp->index == expected_index);
1128 	CU_ASSERT(g_next_qat_index == current_index);
1129 }
1130 
1131 static void
1132 test_assign_device_qp(void)
1133 {
1134 	struct device_qp *device_qp = NULL;
1135 	int i;
1136 
1137 	/* start with a known state, clear the device/qp lists */
1138 	_clear_device_qp_lists();
1139 
1140 	/* make sure that one AESNI_MB qp is found */
1141 	device_qp = calloc(1, sizeof(struct device_qp));
1142 	TAILQ_INSERT_TAIL(&g_device_qp_aesni_mb, device_qp, link);
1143 	g_crypto_ch->device_qp = NULL;
1144 	g_crypto_bdev.opts->drv_name = AESNI_MB;
1145 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1146 	CU_ASSERT(g_crypto_ch->device_qp != NULL);
1147 
1148 	/* QAT testing is more complex as the code under test load balances by
1149 	 * assigning each subsequent device/qp to every QAT_VF_SPREAD modulo
1150 	 * g_qat_total_qp. For the current latest QAT we'll have 48 virtual functions
1151 	 * each with 2 qp so the "spread" between assignments is 32.
1152 	 */
1153 	g_qat_total_qp = 96;
1154 	for (i = 0; i < g_qat_total_qp; i++) {
1155 		device_qp = calloc(1, sizeof(struct device_qp));
1156 		device_qp->index = i;
1157 		TAILQ_INSERT_TAIL(&g_device_qp_qat, device_qp, link);
1158 	}
1159 	g_crypto_ch->device_qp = NULL;
1160 	g_crypto_bdev.opts->drv_name = QAT;
1161 
1162 	/* First assignment will assign to 0 and next at 32. */
1163 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1164 			       0, QAT_VF_SPREAD);
1165 
1166 	/* Second assignment will assign to 32 and next at 64. */
1167 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1168 			       QAT_VF_SPREAD, QAT_VF_SPREAD * 2);
1169 
1170 	/* Third assignment will assign to 64 and next at 0. */
1171 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1172 			       QAT_VF_SPREAD * 2, 0);
1173 
1174 	/* Fourth assignment will assign to 1 and next at 33. */
1175 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1176 			       1, QAT_VF_SPREAD + 1);
1177 
1178 	/* make sure that one MLX5 qp is found */
1179 	device_qp = calloc(1, sizeof(struct device_qp));
1180 	TAILQ_INSERT_TAIL(&g_device_qp_mlx5, device_qp, link);
1181 	g_crypto_ch->device_qp = NULL;
1182 	g_crypto_bdev.opts->drv_name = MLX5;
1183 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1184 	CU_ASSERT(g_crypto_ch->device_qp == device_qp);
1185 
1186 	_clear_device_qp_lists();
1187 }
1188 
1189 int
1190 main(int argc, char **argv)
1191 {
1192 	CU_pSuite	suite = NULL;
1193 	unsigned int	num_failures;
1194 
1195 	CU_set_error_action(CUEA_ABORT);
1196 	CU_initialize_registry();
1197 
1198 	suite = CU_add_suite("crypto", test_setup, test_cleanup);
1199 	CU_ADD_TEST(suite, test_error_paths);
1200 	CU_ADD_TEST(suite, test_simple_write);
1201 	CU_ADD_TEST(suite, test_simple_read);
1202 	CU_ADD_TEST(suite, test_large_rw);
1203 	CU_ADD_TEST(suite, test_dev_full);
1204 	CU_ADD_TEST(suite, test_crazy_rw);
1205 	CU_ADD_TEST(suite, test_passthru);
1206 	CU_ADD_TEST(suite, test_initdrivers);
1207 	CU_ADD_TEST(suite, test_crypto_op_complete);
1208 	CU_ADD_TEST(suite, test_supported_io);
1209 	CU_ADD_TEST(suite, test_reset);
1210 	CU_ADD_TEST(suite, test_poller);
1211 	CU_ADD_TEST(suite, test_assign_device_qp);
1212 
1213 	CU_basic_set_mode(CU_BRM_VERBOSE);
1214 	CU_basic_run_tests();
1215 	num_failures = CU_get_number_of_failures();
1216 	CU_cleanup_registry();
1217 	return num_failures;
1218 }
1219