xref: /spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c (revision 0098e636761237b77c12c30c2408263a5d2260cc)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk_cunit.h"
7 
8 #include "common/lib/test_env.c"
9 #include "spdk_internal/mock.h"
10 #include "thread/thread_internal.h"
11 #include "unit/lib/json_mock.c"
12 
13 #include <rte_crypto.h>
14 #include <rte_cryptodev.h>
15 
16 #define MAX_TEST_BLOCKS 8192
17 struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS];
18 struct rte_crypto_op *g_test_dev_full_ops[MAX_TEST_BLOCKS];
19 
20 uint16_t g_dequeue_mock;
21 uint16_t g_enqueue_mock;
22 unsigned ut_rte_crypto_op_bulk_alloc;
23 int ut_rte_crypto_op_attach_sym_session = 0;
24 #define MOCK_INFO_GET_1QP_AESNI 0
25 #define MOCK_INFO_GET_1QP_QAT 1
26 #define MOCK_INFO_GET_1QP_MLX5 2
27 #define MOCK_INFO_GET_1QP_BOGUS_PMD 3
28 int ut_rte_cryptodev_info_get = 0;
29 bool ut_rte_cryptodev_info_get_mocked = false;
30 
31 void mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt);
32 #define rte_pktmbuf_free_bulk mock_rte_pktmbuf_free_bulk
33 void
34 mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt)
35 {
36 	spdk_mempool_put_bulk((struct spdk_mempool *)m[0]->pool, (void **)m, cnt);
37 }
38 
39 void mock_rte_pktmbuf_free(struct rte_mbuf *m);
40 #define rte_pktmbuf_free mock_rte_pktmbuf_free
41 void
42 mock_rte_pktmbuf_free(struct rte_mbuf *m)
43 {
44 	spdk_mempool_put((struct spdk_mempool *)m->pool, (void *)m);
45 }
46 
47 void
48 rte_mempool_free(struct rte_mempool *mp)
49 {
50 	spdk_mempool_free((struct spdk_mempool *)mp);
51 }
52 
53 int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
54 				unsigned count);
55 #define rte_pktmbuf_alloc_bulk mock_rte_pktmbuf_alloc_bulk
56 int
57 mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs,
58 			    unsigned count)
59 {
60 	int rc;
61 
62 	rc = spdk_mempool_get_bulk((struct spdk_mempool *)pool, (void **)mbufs, count);
63 	if (rc) {
64 		return rc;
65 	}
66 	for (unsigned i = 0; i < count; i++) {
67 		rte_pktmbuf_reset(mbufs[i]);
68 		mbufs[i]->pool = pool;
69 	}
70 	return rc;
71 }
72 
73 struct rte_mempool *
74 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
75 				      uint32_t elt_size, uint32_t cache_size,
76 				      uint16_t priv_size, int socket_id)
77 {
78 	struct spdk_mempool *tmp;
79 
80 	tmp = spdk_mempool_create(name, nb_elts, elt_size + priv_size,
81 				  cache_size, socket_id);
82 
83 	return (struct rte_mempool *)tmp;
84 
85 }
86 
87 struct rte_mempool *
88 rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size,
89 			uint16_t priv_size, uint16_t data_room_size, int socket_id)
90 {
91 	struct spdk_mempool *tmp;
92 
93 	tmp = spdk_mempool_create(name, n, sizeof(struct rte_mbuf) + priv_size,
94 				  cache_size, socket_id);
95 
96 	return (struct rte_mempool *)tmp;
97 }
98 
99 struct rte_mempool *
100 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
101 		   unsigned cache_size, unsigned private_data_size,
102 		   rte_mempool_ctor_t *mp_init, void *mp_init_arg,
103 		   rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
104 		   int socket_id, unsigned flags)
105 {
106 	struct spdk_mempool *tmp;
107 
108 	tmp = spdk_mempool_create(name, n, elt_size + private_data_size,
109 				  cache_size, socket_id);
110 
111 	return (struct rte_mempool *)tmp;
112 }
113 
114 DEFINE_RETURN_MOCK(rte_crypto_op_pool_create, struct rte_mempool *);
115 struct rte_mempool *
116 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
117 			  unsigned nb_elts, unsigned cache_size,
118 			  uint16_t priv_size, int socket_id)
119 {
120 	struct spdk_mempool *tmp;
121 
122 	HANDLE_RETURN_MOCK(rte_crypto_op_pool_create);
123 
124 	tmp = spdk_mempool_create(name, nb_elts,
125 				  sizeof(struct rte_crypto_op) + priv_size,
126 				  cache_size, socket_id);
127 
128 	return (struct rte_mempool *)tmp;
129 
130 }
131 
132 /* Those functions are defined as static inline in DPDK, so we can't
133  * mock them straight away. We use defines to redirect them into
134  * our custom functions.
135  */
136 static bool g_resubmit_test = false;
137 #define rte_cryptodev_enqueue_burst mock_rte_cryptodev_enqueue_burst
138 static inline uint16_t
139 mock_rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
140 				 struct rte_crypto_op **ops, uint16_t nb_ops)
141 {
142 	int i;
143 
144 	CU_ASSERT(nb_ops > 0);
145 
146 	for (i = 0; i < nb_ops; i++) {
147 		/* Use this empty (til now) array of pointers to store
148 		 * enqueued operations for assertion in dev_full test.
149 		 */
150 		g_test_dev_full_ops[i] = *ops++;
151 		if (g_resubmit_test == true) {
152 			CU_ASSERT(g_test_dev_full_ops[i] == (void *)0xDEADBEEF);
153 		}
154 	}
155 
156 	return g_enqueue_mock;
157 }
158 
159 #define rte_cryptodev_dequeue_burst mock_rte_cryptodev_dequeue_burst
160 static inline uint16_t
161 mock_rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
162 				 struct rte_crypto_op **ops, uint16_t nb_ops)
163 {
164 	int i;
165 
166 	CU_ASSERT(nb_ops > 0);
167 
168 	for (i = 0; i < g_dequeue_mock; i++) {
169 		*ops++ = g_test_crypto_ops[i];
170 	}
171 
172 	return g_dequeue_mock;
173 }
174 
175 /* Instead of allocating real memory, assign the allocations to our
176  * test array for assertion in tests.
177  */
178 #define rte_crypto_op_bulk_alloc mock_rte_crypto_op_bulk_alloc
179 static inline unsigned
180 mock_rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
181 			      enum rte_crypto_op_type type,
182 			      struct rte_crypto_op **ops, uint16_t nb_ops)
183 {
184 	int i;
185 
186 	for (i = 0; i < nb_ops; i++) {
187 		*ops++ = g_test_crypto_ops[i];
188 	}
189 	return ut_rte_crypto_op_bulk_alloc;
190 }
191 
192 #define rte_mempool_put_bulk mock_rte_mempool_put_bulk
193 static __rte_always_inline void
194 mock_rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table,
195 			  unsigned int n)
196 {
197 	return;
198 }
199 
200 #define rte_crypto_op_attach_sym_session mock_rte_crypto_op_attach_sym_session
201 static inline int
202 mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
203 				      struct rte_cryptodev_sym_session *sess)
204 {
205 	return ut_rte_crypto_op_attach_sym_session;
206 }
207 
208 #define rte_lcore_count mock_rte_lcore_count
209 static inline unsigned
210 mock_rte_lcore_count(void)
211 {
212 	return 1;
213 }
214 
215 #include "bdev/crypto/vbdev_crypto.c"
216 
217 /* SPDK stubs */
218 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
219 		struct spdk_bdev_io_wait_entry *entry), 0);
220 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
221 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io));
222 DEFINE_STUB_V(spdk_bdev_io_put_aux_buf, (struct spdk_bdev_io *bdev_io, void *aux_buf));
223 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
224 		enum spdk_bdev_io_type io_type), 0);
225 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
226 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
227 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0);
228 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 64);
229 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
230 DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn,
231 				     void *cb_arg));
232 DEFINE_STUB(spdk_bdev_unregister_by_name, int, (const char *bdev_name,
233 		struct spdk_bdev_module *module,
234 		spdk_bdev_unregister_cb cb_fn, void *cb_arg), 0);
235 DEFINE_STUB(spdk_bdev_open_ext, int, (const char *bdev_name, bool write,
236 				      spdk_bdev_event_cb_t event_cb,
237 				      void *event_ctx, struct spdk_bdev_desc **_desc), 0);
238 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
239 DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
240 		struct spdk_bdev_module *module), 0);
241 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
242 DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *vbdev), 0);
243 
244 /* DPDK stubs */
245 #define DPDK_DYNFIELD_OFFSET offsetof(struct rte_mbuf, dynfield1[1])
246 DEFINE_STUB(rte_mbuf_dynfield_register, int, (const struct rte_mbuf_dynfield *params),
247 	    DPDK_DYNFIELD_OFFSET);
248 DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0);
249 DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
250 DEFINE_STUB(rte_cryptodev_device_count_by_driver, uint8_t, (uint8_t driver_id), 0);
251 DEFINE_STUB(rte_cryptodev_configure, int, (uint8_t dev_id, struct rte_cryptodev_config *config), 0);
252 DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
253 		const struct rte_cryptodev_qp_conf *qp_conf, int socket_id), 0);
254 DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0);
255 DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id));
256 DEFINE_STUB(rte_cryptodev_close, int, (uint8_t dev_id), 0);
257 DEFINE_STUB(rte_cryptodev_sym_session_create, struct rte_cryptodev_sym_session *,
258 	    (struct rte_mempool *mempool), (struct rte_cryptodev_sym_session *)1);
259 DEFINE_STUB(rte_cryptodev_sym_session_init, int, (uint8_t dev_id,
260 		struct rte_cryptodev_sym_session *sess,
261 		struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), 0);
262 DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
263 DEFINE_STUB(rte_cryptodev_sym_session_free, int, (struct rte_cryptodev_sym_session *sess), 0);
264 DEFINE_STUB(rte_vdev_uninit, int, (const char *name), 0);
265 
266 struct rte_cryptodev *rte_cryptodevs;
267 
268 /* global vars and setup/cleanup functions used for all test functions */
269 struct spdk_bdev_io *g_bdev_io;
270 struct crypto_bdev_io *g_io_ctx;
271 struct crypto_io_channel *g_crypto_ch;
272 struct spdk_io_channel *g_io_ch;
273 struct vbdev_dev g_device;
274 struct vbdev_crypto g_crypto_bdev;
275 struct vbdev_crypto_opts g_crypto_bdev_opts;
276 struct device_qp g_dev_qp;
277 
278 void
279 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
280 {
281 	dev_info->max_nb_queue_pairs = 1;
282 	if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_AESNI) {
283 		dev_info->driver_name = g_driver_names[0];
284 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_QAT) {
285 		dev_info->driver_name = g_driver_names[1];
286 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_MLX5) {
287 		dev_info->driver_name = g_driver_names[2];
288 	} else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_BOGUS_PMD) {
289 		dev_info->driver_name = "junk";
290 	}
291 }
292 
293 unsigned int
294 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
295 {
296 	return (unsigned int)dev_id;
297 }
298 
299 void
300 spdk_bdev_io_get_aux_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_aux_buf_cb cb)
301 {
302 	cb(g_io_ch, g_bdev_io, (void *)0xDEADBEEF);
303 }
304 
305 void
306 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
307 {
308 	cb(g_io_ch, g_bdev_io, true);
309 }
310 
311 /* Mock these functions to call the callback and then return the value we require */
312 int ut_spdk_bdev_readv_blocks = 0;
313 bool ut_spdk_bdev_readv_blocks_mocked = false;
314 int
315 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
316 		       struct iovec *iov, int iovcnt,
317 		       uint64_t offset_blocks, uint64_t num_blocks,
318 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
319 {
320 	cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg);
321 	return ut_spdk_bdev_readv_blocks;
322 }
323 
324 int ut_spdk_bdev_writev_blocks = 0;
325 bool ut_spdk_bdev_writev_blocks_mocked = false;
326 int
327 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
328 			struct iovec *iov, int iovcnt,
329 			uint64_t offset_blocks, uint64_t num_blocks,
330 			spdk_bdev_io_completion_cb cb, void *cb_arg)
331 {
332 	cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg);
333 	return ut_spdk_bdev_writev_blocks;
334 }
335 
336 int ut_spdk_bdev_unmap_blocks = 0;
337 bool ut_spdk_bdev_unmap_blocks_mocked = false;
338 int
339 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
340 		       uint64_t offset_blocks, uint64_t num_blocks,
341 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
342 {
343 	cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg);
344 	return ut_spdk_bdev_unmap_blocks;
345 }
346 
347 int ut_spdk_bdev_flush_blocks = 0;
348 bool ut_spdk_bdev_flush_blocks_mocked = false;
349 int
350 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
351 		       uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
352 		       void *cb_arg)
353 {
354 	cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg);
355 	return ut_spdk_bdev_flush_blocks;
356 }
357 
358 int ut_spdk_bdev_reset = 0;
359 bool ut_spdk_bdev_reset_mocked = false;
360 int
361 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
362 		spdk_bdev_io_completion_cb cb, void *cb_arg)
363 {
364 	cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg);
365 	return ut_spdk_bdev_reset;
366 }
367 
368 bool g_completion_called = false;
369 void
370 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
371 {
372 	bdev_io->internal.status = status;
373 	g_completion_called = true;
374 }
375 
376 /* Global setup for all tests that share a bunch of preparation... */
377 static int
378 test_setup(void)
379 {
380 	int i, rc;
381 
382 	/* Prepare essential variables for test routines */
383 	g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct crypto_bdev_io));
384 	g_bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec) * 128);
385 	g_bdev_io->bdev = &g_crypto_bdev.crypto_bdev;
386 	g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct crypto_io_channel));
387 	g_crypto_ch = (struct crypto_io_channel *)spdk_io_channel_get_ctx(g_io_ch);
388 	g_io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
389 	memset(&g_device, 0, sizeof(struct vbdev_dev));
390 	memset(&g_crypto_bdev, 0, sizeof(struct vbdev_crypto));
391 	memset(&g_crypto_bdev_opts, 0, sizeof(struct vbdev_crypto_opts));
392 	g_dev_qp.device = &g_device;
393 	g_io_ctx->crypto_ch = g_crypto_ch;
394 	g_io_ctx->crypto_bdev = &g_crypto_bdev;
395 	g_io_ctx->crypto_bdev->qp_desc_nr = CRYPTO_QP_DESCRIPTORS;
396 	g_io_ctx->crypto_bdev->opts = &g_crypto_bdev_opts;
397 	g_crypto_ch->device_qp = &g_dev_qp;
398 	TAILQ_INIT(&g_crypto_ch->pending_cry_ios);
399 	TAILQ_INIT(&g_crypto_ch->queued_cry_ops);
400 
401 	/* Allocate a real mbuf pool so we can test error paths */
402 	g_mbuf_mp = rte_pktmbuf_pool_create("mbuf_mp", NUM_MBUFS,
403 					    (unsigned)SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
404 					    0, 0, SPDK_ENV_SOCKET_ID_ANY);
405 	/* Instead of allocating real rte mempools for these, it's easier and provides the
406 	 * same coverage just calloc them here.
407 	 */
408 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
409 		size_t size = IV_OFFSET + IV_LENGTH + QUEUED_OP_LENGTH;
410 		rc = posix_memalign((void **)&g_test_crypto_ops[i], 64, size);
411 		if (rc != 0) {
412 			assert(false);
413 		}
414 		memset(g_test_crypto_ops[i], 0, IV_OFFSET + QUEUED_OP_LENGTH);
415 	}
416 	g_mbuf_offset = DPDK_DYNFIELD_OFFSET;
417 
418 	return 0;
419 }
420 
421 /* Global teardown for all tests */
422 static int
423 test_cleanup(void)
424 {
425 	int i;
426 
427 	if (g_crypto_op_mp) {
428 		rte_mempool_free(g_crypto_op_mp);
429 		g_crypto_op_mp = NULL;
430 	}
431 	if (g_mbuf_mp) {
432 		rte_mempool_free(g_mbuf_mp);
433 		g_mbuf_mp = NULL;
434 	}
435 	if (g_session_mp) {
436 		rte_mempool_free(g_session_mp);
437 		g_session_mp = NULL;
438 	}
439 	if (g_session_mp_priv != NULL) {
440 		/* g_session_mp_priv may or may not be set depending on the DPDK version */
441 		rte_mempool_free(g_session_mp_priv);
442 		g_session_mp_priv = NULL;
443 	}
444 
445 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
446 		free(g_test_crypto_ops[i]);
447 	}
448 	free(g_bdev_io->u.bdev.iovs);
449 	free(g_bdev_io);
450 	free(g_io_ch);
451 	return 0;
452 }
453 
454 static void
455 test_error_paths(void)
456 {
457 	/* Single element block size write, just to test error paths
458 	 * in vbdev_crypto_submit_request().
459 	 */
460 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
461 	g_bdev_io->u.bdev.iovcnt = 1;
462 	g_bdev_io->u.bdev.num_blocks = 1;
463 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
464 	g_bdev_io->u.bdev.iovs[0].iov_base = (void *)0xDEADBEEF;
465 	g_crypto_bdev.crypto_bdev.blocklen = 512;
466 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
467 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
468 
469 	/* test failure of spdk_mempool_get_bulk(), will result in success because it
470 	 * will get queued.
471 	 */
472 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
473 	MOCK_SET(spdk_mempool_get, NULL);
474 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
475 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
476 
477 	/* same thing but switch to reads to test error path in _crypto_complete_io() */
478 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
479 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
480 	TAILQ_INSERT_TAIL(&g_crypto_ch->pending_cry_ios, g_bdev_io, module_link);
481 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
482 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
483 	/* Now with the read_blocks failing */
484 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
485 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
486 	MOCK_SET(spdk_bdev_readv_blocks, -1);
487 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
488 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
489 	MOCK_SET(spdk_bdev_readv_blocks, 0);
490 	MOCK_CLEAR(spdk_mempool_get);
491 
492 	/* test failure of rte_crypto_op_bulk_alloc() */
493 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
494 	ut_rte_crypto_op_bulk_alloc = 0;
495 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
496 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
497 	ut_rte_crypto_op_bulk_alloc = 1;
498 
499 	/* test failure of rte_crypto_op_attach_sym_session() */
500 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
501 	ut_rte_crypto_op_attach_sym_session = -1;
502 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
503 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
504 	ut_rte_crypto_op_attach_sym_session = 0;
505 }
506 
507 static void
508 test_simple_write(void)
509 {
510 	/* Single element block size write */
511 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
512 	g_bdev_io->u.bdev.iovcnt = 1;
513 	g_bdev_io->u.bdev.num_blocks = 1;
514 	g_bdev_io->u.bdev.offset_blocks = 0;
515 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
516 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_write;
517 	g_crypto_bdev.crypto_bdev.blocklen = 512;
518 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
519 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
520 
521 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
522 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
523 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
524 	CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == 512);
525 	CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
526 	CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
527 	CU_ASSERT(g_io_ctx->aux_num_blocks == 1);
528 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_write);
529 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
530 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
531 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
532 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
533 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
534 				     uint64_t *) == (uint64_t)g_bdev_io);
535 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr != NULL);
536 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == 512);
537 
538 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
539 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst);
540 }
541 
542 static void
543 test_simple_read(void)
544 {
545 	/* Single element block size read */
546 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
547 	g_bdev_io->u.bdev.iovcnt = 1;
548 	g_bdev_io->u.bdev.num_blocks = 1;
549 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
550 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_read;
551 	g_crypto_bdev.crypto_bdev.blocklen = 512;
552 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
553 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
554 
555 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
556 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
557 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
558 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_read);
559 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
560 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
561 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
562 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
563 	CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
564 				     uint64_t *) == (uint64_t)g_bdev_io);
565 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
566 
567 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
568 }
569 
570 static void
571 test_large_rw(void)
572 {
573 	unsigned block_len = 512;
574 	unsigned num_blocks = CRYPTO_MAX_IO / block_len;
575 	unsigned io_len = block_len * num_blocks;
576 	unsigned i;
577 
578 	/* Multi block size read, multi-element */
579 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
580 	g_bdev_io->u.bdev.iovcnt = 1;
581 	g_bdev_io->u.bdev.num_blocks = num_blocks;
582 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
583 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
584 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
585 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
586 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
587 
588 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
589 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
590 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
591 
592 	for (i = 0; i < num_blocks; i++) {
593 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
594 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
595 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
596 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
597 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
598 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
599 					     uint64_t *) == (uint64_t)g_bdev_io);
600 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
601 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
602 	}
603 
604 	/* Multi block size write, multi-element */
605 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
606 	g_bdev_io->u.bdev.iovcnt = 1;
607 	g_bdev_io->u.bdev.num_blocks = num_blocks;
608 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
609 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
610 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
611 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
612 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
613 
614 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
615 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
616 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
617 
618 	for (i = 0; i < num_blocks; i++) {
619 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
620 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
621 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
622 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
623 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
624 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
625 					     uint64_t *) == (uint64_t)g_bdev_io);
626 		CU_ASSERT(g_io_ctx->aux_buf_iov.iov_len == io_len);
627 		CU_ASSERT(g_io_ctx->aux_buf_iov.iov_base != NULL);
628 		CU_ASSERT(g_io_ctx->aux_offset_blocks == 0);
629 		CU_ASSERT(g_io_ctx->aux_num_blocks == num_blocks);
630 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr != NULL);
631 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
632 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
633 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
634 	}
635 }
636 
637 static void
638 test_dev_full(void)
639 {
640 	struct vbdev_crypto_op *queued_op;
641 	struct rte_crypto_sym_op *sym_op;
642 	struct crypto_bdev_io *io_ctx;
643 
644 	/* Two element block size read */
645 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
646 	g_bdev_io->u.bdev.iovcnt = 1;
647 	g_bdev_io->u.bdev.num_blocks = 2;
648 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
649 	g_bdev_io->u.bdev.iovs[0].iov_base = (void *)0xDEADBEEF;
650 	g_bdev_io->u.bdev.iovs[1].iov_len = 512;
651 	g_bdev_io->u.bdev.iovs[1].iov_base = (void *)0xFEEDBEEF;
652 	g_crypto_bdev.crypto_bdev.blocklen = 512;
653 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
654 	g_enqueue_mock = g_dequeue_mock = 1;
655 	ut_rte_crypto_op_bulk_alloc = 2;
656 
657 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
658 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
659 
660 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
661 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
662 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 2);
663 	sym_op = g_test_crypto_ops[0]->sym;
664 	CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xDEADBEEF);
665 	CU_ASSERT(sym_op->m_src->data_len == 512);
666 	CU_ASSERT(sym_op->m_src->next == NULL);
667 	CU_ASSERT(sym_op->cipher.data.length == 512);
668 	CU_ASSERT(sym_op->cipher.data.offset == 0);
669 	CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)g_bdev_io);
670 	CU_ASSERT(sym_op->m_dst == NULL);
671 
672 	/* make sure one got queued and confirm its values */
673 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
674 	queued_op = TAILQ_FIRST(&g_crypto_ch->queued_cry_ops);
675 	sym_op = queued_op->crypto_op->sym;
676 	TAILQ_REMOVE(&g_crypto_ch->queued_cry_ops, queued_op, link);
677 	CU_ASSERT(queued_op->bdev_io == g_bdev_io);
678 	CU_ASSERT(queued_op->crypto_op == g_test_crypto_ops[1]);
679 	CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xFEEDBEEF);
680 	CU_ASSERT(sym_op->m_src->data_len == 512);
681 	CU_ASSERT(sym_op->m_src->next == NULL);
682 	CU_ASSERT(sym_op->cipher.data.length == 512);
683 	CU_ASSERT(sym_op->cipher.data.offset == 0);
684 	CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)g_bdev_io);
685 	CU_ASSERT(sym_op->m_dst == NULL);
686 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
687 	rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src);
688 	rte_pktmbuf_free(g_test_crypto_ops[1]->sym->m_src);
689 
690 	/* Non-busy reason for enqueue failure, all were rejected. */
691 	g_enqueue_mock = 0;
692 	g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_ERROR;
693 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
694 	io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
695 	CU_ASSERT(io_ctx->bdev_io_status == SPDK_BDEV_IO_STATUS_FAILED);
696 }
697 
698 static void
699 test_crazy_rw(void)
700 {
701 	unsigned block_len = 512;
702 	int num_blocks = 4;
703 	int i;
704 
705 	/* Multi block size read, single element, strange IOV makeup */
706 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
707 	g_bdev_io->u.bdev.iovcnt = 3;
708 	g_bdev_io->u.bdev.num_blocks = num_blocks;
709 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
710 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
711 	g_bdev_io->u.bdev.iovs[1].iov_len = 1024;
712 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 512;
713 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
714 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 512 + 1024;
715 
716 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
717 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
718 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
719 
720 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
721 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
722 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
723 
724 	for (i = 0; i < num_blocks; i++) {
725 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
726 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
727 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
728 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
729 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
730 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
731 					     uint64_t *) == (uint64_t)g_bdev_io);
732 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
733 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
734 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
735 	}
736 
737 	/* Multi block size write, single element strange IOV makeup */
738 	num_blocks = 8;
739 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
740 	g_bdev_io->u.bdev.iovcnt = 4;
741 	g_bdev_io->u.bdev.num_blocks = num_blocks;
742 	g_bdev_io->u.bdev.iovs[0].iov_len = 2048;
743 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
744 	g_bdev_io->u.bdev.iovs[1].iov_len = 512;
745 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 2048;
746 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
747 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 2048 + 512;
748 	g_bdev_io->u.bdev.iovs[3].iov_len = 1024;
749 	g_bdev_io->u.bdev.iovs[3].iov_base = &test_crazy_rw + 2048 + 512 + 512;
750 
751 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
752 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
753 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
754 
755 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
756 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
757 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
758 
759 	for (i = 0; i < num_blocks; i++) {
760 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
761 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
762 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
763 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
764 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
765 		CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset,
766 					     uint64_t *) == (uint64_t)g_bdev_io);
767 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
768 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == g_test_crypto_ops[i]->sym->m_dst);
769 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src);
770 		rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst);
771 	}
772 }
773 
774 static void
775 test_passthru(void)
776 {
777 	/* Make sure these follow our completion callback, test success & fail. */
778 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
779 	MOCK_SET(spdk_bdev_unmap_blocks, 0);
780 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
781 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
782 	MOCK_SET(spdk_bdev_unmap_blocks, -1);
783 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
784 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
785 	MOCK_CLEAR(spdk_bdev_unmap_blocks);
786 
787 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
788 	MOCK_SET(spdk_bdev_flush_blocks, 0);
789 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
790 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
791 	MOCK_SET(spdk_bdev_flush_blocks, -1);
792 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
793 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
794 	MOCK_CLEAR(spdk_bdev_flush_blocks);
795 
796 	/* We should never get a WZ command, we report that we don't support it. */
797 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
798 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
799 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
800 }
801 
802 static void
803 test_reset(void)
804 {
805 	/* TODO: There are a few different ways to do this given that
806 	 * the code uses spdk_for_each_channel() to implement reset
807 	 * handling. Submitting w/o UT for this function for now and
808 	 * will follow up with something shortly.
809 	 */
810 }
811 
812 static void
813 init_cleanup(void)
814 {
815 	if (g_crypto_op_mp) {
816 		rte_mempool_free(g_crypto_op_mp);
817 		g_crypto_op_mp = NULL;
818 	}
819 	if (g_mbuf_mp) {
820 		rte_mempool_free(g_mbuf_mp);
821 		g_mbuf_mp = NULL;
822 	}
823 	if (g_session_mp) {
824 		rte_mempool_free(g_session_mp);
825 		g_session_mp = NULL;
826 	}
827 	if (g_session_mp_priv != NULL) {
828 		/* g_session_mp_priv may or may not be set depending on the DPDK version */
829 		rte_mempool_free(g_session_mp_priv);
830 		g_session_mp_priv = NULL;
831 	}
832 }
833 
834 static void
835 test_initdrivers(void)
836 {
837 	int rc;
838 	static struct rte_mempool *orig_mbuf_mp;
839 	static struct rte_mempool *orig_session_mp;
840 	static struct rte_mempool *orig_session_mp_priv;
841 
842 	/* These tests will alloc and free our g_mbuf_mp
843 	 * so save that off here and restore it after each test is over.
844 	 */
845 	orig_mbuf_mp = g_mbuf_mp;
846 	orig_session_mp = g_session_mp;
847 	orig_session_mp_priv = g_session_mp_priv;
848 
849 	g_session_mp_priv = NULL;
850 	g_session_mp = NULL;
851 	g_mbuf_mp = NULL;
852 
853 	/* No drivers available, not an error though */
854 	MOCK_SET(rte_cryptodev_count, 0);
855 	rc = vbdev_crypto_init_crypto_drivers();
856 	CU_ASSERT(rc == 0);
857 	CU_ASSERT(g_mbuf_mp == NULL);
858 	CU_ASSERT(g_session_mp == NULL);
859 	CU_ASSERT(g_session_mp_priv == NULL);
860 
861 	/* Can't create session pool. */
862 	MOCK_SET(rte_cryptodev_count, 2);
863 	MOCK_SET(spdk_mempool_create, NULL);
864 	rc = vbdev_crypto_init_crypto_drivers();
865 	CU_ASSERT(rc == -ENOMEM);
866 	CU_ASSERT(g_mbuf_mp == NULL);
867 	CU_ASSERT(g_session_mp == NULL);
868 	CU_ASSERT(g_session_mp_priv == NULL);
869 	MOCK_CLEAR(spdk_mempool_create);
870 
871 	/* Can't create op pool. */
872 	MOCK_SET(rte_crypto_op_pool_create, NULL);
873 	rc = vbdev_crypto_init_crypto_drivers();
874 	CU_ASSERT(rc == -ENOMEM);
875 	CU_ASSERT(g_mbuf_mp == NULL);
876 	CU_ASSERT(g_session_mp == NULL);
877 	CU_ASSERT(g_session_mp_priv == NULL);
878 	MOCK_CLEAR(rte_crypto_op_pool_create);
879 
880 	/* Check resources are not sufficient */
881 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
882 	rc = vbdev_crypto_init_crypto_drivers();
883 	CU_ASSERT(rc == -EINVAL);
884 
885 	/* Test crypto dev configure failure. */
886 	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
887 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
888 	MOCK_SET(rte_cryptodev_configure, -1);
889 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
890 	rc = vbdev_crypto_init_crypto_drivers();
891 	MOCK_SET(rte_cryptodev_configure, 0);
892 	CU_ASSERT(g_mbuf_mp == NULL);
893 	CU_ASSERT(g_session_mp == NULL);
894 	CU_ASSERT(g_session_mp_priv == NULL);
895 	CU_ASSERT(rc == -EINVAL);
896 
897 	/* Test failure of qp setup. */
898 	MOCK_SET(rte_cryptodev_queue_pair_setup, -1);
899 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
900 	rc = vbdev_crypto_init_crypto_drivers();
901 	CU_ASSERT(rc == -EINVAL);
902 	CU_ASSERT(g_mbuf_mp == NULL);
903 	CU_ASSERT(g_session_mp == NULL);
904 	CU_ASSERT(g_session_mp_priv == NULL);
905 	MOCK_SET(rte_cryptodev_queue_pair_setup, 0);
906 
907 	/* Test failure of dev start. */
908 	MOCK_SET(rte_cryptodev_start, -1);
909 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
910 	rc = vbdev_crypto_init_crypto_drivers();
911 	CU_ASSERT(rc == -EINVAL);
912 	CU_ASSERT(g_mbuf_mp == NULL);
913 	CU_ASSERT(g_session_mp == NULL);
914 	CU_ASSERT(g_session_mp_priv == NULL);
915 	MOCK_SET(rte_cryptodev_start, 0);
916 
917 	/* Test bogus PMD */
918 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
919 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_BOGUS_PMD);
920 	rc = vbdev_crypto_init_crypto_drivers();
921 	CU_ASSERT(g_mbuf_mp == NULL);
922 	CU_ASSERT(g_session_mp == NULL);
923 	CU_ASSERT(rc == -EINVAL);
924 
925 	/* Test happy path QAT. */
926 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
927 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
928 	rc = vbdev_crypto_init_crypto_drivers();
929 	CU_ASSERT(g_mbuf_mp != NULL);
930 	CU_ASSERT(g_session_mp != NULL);
931 	init_cleanup();
932 	CU_ASSERT(rc == 0);
933 
934 	/* Test happy path AESNI. */
935 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
936 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI);
937 	rc = vbdev_crypto_init_crypto_drivers();
938 	CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET);
939 	init_cleanup();
940 	CU_ASSERT(rc == 0);
941 
942 	/* Test happy path MLX5. */
943 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
944 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_MLX5);
945 	rc = vbdev_crypto_init_crypto_drivers();
946 	CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET);
947 	init_cleanup();
948 	CU_ASSERT(rc == 0);
949 
950 	/* Test failure of DPDK dev init. By now it is not longer an error
951 	 * situation for entire crypto framework. */
952 	MOCK_SET(rte_cryptodev_count, 2);
953 	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
954 	MOCK_SET(rte_vdev_init, -1);
955 	MOCK_CLEARED_ASSERT(spdk_mempool_create);
956 	MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT);
957 	rc = vbdev_crypto_init_crypto_drivers();
958 	CU_ASSERT(rc == 0);
959 	CU_ASSERT(g_mbuf_mp != NULL);
960 	CU_ASSERT(g_session_mp != NULL);
961 	CU_ASSERT(g_session_mp_priv != NULL);
962 	init_cleanup();
963 	MOCK_SET(rte_vdev_init, 0);
964 	MOCK_CLEAR(rte_cryptodev_device_count_by_driver);
965 
966 	/* restore our initial values. */
967 	g_mbuf_mp = orig_mbuf_mp;
968 	g_session_mp = orig_session_mp;
969 	g_session_mp_priv = orig_session_mp_priv;
970 }
971 
972 static void
973 test_crypto_op_complete(void)
974 {
975 	/* Make sure completion code respects failure. */
976 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
977 	g_completion_called = false;
978 	_crypto_operation_complete(g_bdev_io);
979 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
980 	CU_ASSERT(g_completion_called == true);
981 
982 	/* Test read completion. */
983 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
984 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
985 	g_completion_called = false;
986 	_crypto_operation_complete(g_bdev_io);
987 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
988 	CU_ASSERT(g_completion_called == true);
989 
990 	/* Test write completion success. */
991 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
992 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
993 	g_completion_called = false;
994 	MOCK_SET(spdk_bdev_writev_blocks, 0);
995 	_crypto_operation_complete(g_bdev_io);
996 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
997 	CU_ASSERT(g_completion_called == true);
998 
999 	/* Test write completion failed. */
1000 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1001 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
1002 	g_completion_called = false;
1003 	MOCK_SET(spdk_bdev_writev_blocks, -1);
1004 	_crypto_operation_complete(g_bdev_io);
1005 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1006 	CU_ASSERT(g_completion_called == true);
1007 
1008 	/* Test bogus type for this completion. */
1009 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1010 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
1011 	g_completion_called = false;
1012 	_crypto_operation_complete(g_bdev_io);
1013 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1014 	CU_ASSERT(g_completion_called == true);
1015 }
1016 
1017 static void
1018 test_supported_io(void)
1019 {
1020 	void *ctx = NULL;
1021 	bool rc = true;
1022 
1023 	/* Make sure we always report false to WZ, we need the bdev layer to
1024 	 * send real 0's so we can encrypt/decrypt them.
1025 	 */
1026 	rc = vbdev_crypto_io_type_supported(ctx, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
1027 	CU_ASSERT(rc == false);
1028 }
1029 
1030 static void
1031 test_poller(void)
1032 {
1033 	int rc;
1034 	struct rte_mbuf *src_mbufs[2];
1035 	struct vbdev_crypto_op *op_to_resubmit;
1036 
1037 	/* test regular 1 op to dequeue and complete */
1038 	g_dequeue_mock = g_enqueue_mock = 1;
1039 	rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 1);
1040 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
1041 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1042 			   uint64_t *) = (uintptr_t)g_bdev_io;
1043 	g_test_crypto_ops[0]->sym->m_dst = NULL;
1044 	g_io_ctx->cryop_cnt_remaining = 1;
1045 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
1046 	rc = crypto_dev_poller(g_crypto_ch);
1047 	CU_ASSERT(rc == 1);
1048 
1049 	/* We have nothing dequeued but have some to resubmit */
1050 	g_dequeue_mock = 0;
1051 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
1052 
1053 	/* add an op to the queued list. */
1054 	g_resubmit_test = true;
1055 	op_to_resubmit = (struct vbdev_crypto_op *)((uint8_t *)g_test_crypto_ops[0] + QUEUED_OP_OFFSET);
1056 	op_to_resubmit->crypto_op = (void *)0xDEADBEEF;
1057 	op_to_resubmit->bdev_io = g_bdev_io;
1058 	TAILQ_INSERT_TAIL(&g_crypto_ch->queued_cry_ops,
1059 			  op_to_resubmit,
1060 			  link);
1061 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false);
1062 	rc = crypto_dev_poller(g_crypto_ch);
1063 	g_resubmit_test = false;
1064 	CU_ASSERT(rc == 0);
1065 	CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true);
1066 
1067 	/* 2 to dequeue but 2nd one failed */
1068 	g_dequeue_mock = g_enqueue_mock = 2;
1069 	g_io_ctx->cryop_cnt_remaining = 2;
1070 	rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 2);
1071 	g_test_crypto_ops[0]->sym->m_src = src_mbufs[0];
1072 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset,
1073 			   uint64_t *) = (uint64_t)g_bdev_io;
1074 	g_test_crypto_ops[0]->sym->m_dst = NULL;
1075 	g_test_crypto_ops[0]->status =  RTE_CRYPTO_OP_STATUS_SUCCESS;
1076 	g_test_crypto_ops[1]->sym->m_src = src_mbufs[1];
1077 	*RTE_MBUF_DYNFIELD(g_test_crypto_ops[1]->sym->m_src, g_mbuf_offset,
1078 			   uint64_t *) = (uint64_t)g_bdev_io;
1079 	g_test_crypto_ops[1]->sym->m_dst = NULL;
1080 	g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1081 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
1082 	rc = crypto_dev_poller(g_crypto_ch);
1083 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
1084 	CU_ASSERT(rc == 2);
1085 }
1086 
1087 /* Helper function for test_assign_device_qp() */
1088 static void
1089 _clear_device_qp_lists(void)
1090 {
1091 	struct device_qp *device_qp = NULL;
1092 
1093 	while (!TAILQ_EMPTY(&g_device_qp_qat)) {
1094 		device_qp = TAILQ_FIRST(&g_device_qp_qat);
1095 		TAILQ_REMOVE(&g_device_qp_qat, device_qp, link);
1096 		free(device_qp);
1097 
1098 	}
1099 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_qat) == true);
1100 	while (!TAILQ_EMPTY(&g_device_qp_aesni_mb)) {
1101 		device_qp = TAILQ_FIRST(&g_device_qp_aesni_mb);
1102 		TAILQ_REMOVE(&g_device_qp_aesni_mb, device_qp, link);
1103 		free(device_qp);
1104 	}
1105 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_aesni_mb) == true);
1106 	while (!TAILQ_EMPTY(&g_device_qp_mlx5)) {
1107 		device_qp = TAILQ_FIRST(&g_device_qp_mlx5);
1108 		TAILQ_REMOVE(&g_device_qp_mlx5, device_qp, link);
1109 		free(device_qp);
1110 	}
1111 	CU_ASSERT(TAILQ_EMPTY(&g_device_qp_mlx5) == true);
1112 }
1113 
1114 /* Helper function for test_assign_device_qp() */
1115 static void
1116 _check_expected_values(struct vbdev_crypto *crypto_bdev, struct device_qp *device_qp,
1117 		       struct crypto_io_channel *crypto_ch, uint8_t expected_index,
1118 		       uint8_t current_index)
1119 {
1120 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1121 	CU_ASSERT(g_crypto_ch->device_qp->index == expected_index);
1122 	CU_ASSERT(g_next_qat_index == current_index);
1123 }
1124 
1125 static void
1126 test_assign_device_qp(void)
1127 {
1128 	struct device_qp *device_qp = NULL;
1129 	int i;
1130 
1131 	/* start with a known state, clear the device/qp lists */
1132 	_clear_device_qp_lists();
1133 
1134 	/* make sure that one AESNI_MB qp is found */
1135 	device_qp = calloc(1, sizeof(struct device_qp));
1136 	TAILQ_INSERT_TAIL(&g_device_qp_aesni_mb, device_qp, link);
1137 	g_crypto_ch->device_qp = NULL;
1138 	g_crypto_bdev.opts->drv_name = AESNI_MB;
1139 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1140 	CU_ASSERT(g_crypto_ch->device_qp != NULL);
1141 
1142 	/* QAT testing is more complex as the code under test load balances by
1143 	 * assigning each subsequent device/qp to every QAT_VF_SPREAD modulo
1144 	 * g_qat_total_qp. For the current latest QAT we'll have 48 virtual functions
1145 	 * each with 2 qp so the "spread" between assignments is 32.
1146 	 */
1147 	g_qat_total_qp = 96;
1148 	for (i = 0; i < g_qat_total_qp; i++) {
1149 		device_qp = calloc(1, sizeof(struct device_qp));
1150 		device_qp->index = i;
1151 		TAILQ_INSERT_TAIL(&g_device_qp_qat, device_qp, link);
1152 	}
1153 	g_crypto_ch->device_qp = NULL;
1154 	g_crypto_bdev.opts->drv_name = QAT;
1155 
1156 	/* First assignment will assign to 0 and next at 32. */
1157 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1158 			       0, QAT_VF_SPREAD);
1159 
1160 	/* Second assignment will assign to 32 and next at 64. */
1161 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1162 			       QAT_VF_SPREAD, QAT_VF_SPREAD * 2);
1163 
1164 	/* Third assignment will assign to 64 and next at 0. */
1165 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1166 			       QAT_VF_SPREAD * 2, 0);
1167 
1168 	/* Fourth assignment will assign to 1 and next at 33. */
1169 	_check_expected_values(&g_crypto_bdev, device_qp, g_crypto_ch,
1170 			       1, QAT_VF_SPREAD + 1);
1171 
1172 	/* make sure that one MLX5 qp is found */
1173 	device_qp = calloc(1, sizeof(struct device_qp));
1174 	TAILQ_INSERT_TAIL(&g_device_qp_mlx5, device_qp, link);
1175 	g_crypto_ch->device_qp = NULL;
1176 	g_crypto_bdev.opts->drv_name = MLX5;
1177 	_assign_device_qp(&g_crypto_bdev, device_qp, g_crypto_ch);
1178 	CU_ASSERT(g_crypto_ch->device_qp == device_qp);
1179 
1180 	_clear_device_qp_lists();
1181 }
1182 
1183 int
1184 main(int argc, char **argv)
1185 {
1186 	CU_pSuite	suite = NULL;
1187 	unsigned int	num_failures;
1188 
1189 	CU_set_error_action(CUEA_ABORT);
1190 	CU_initialize_registry();
1191 
1192 	suite = CU_add_suite("crypto", test_setup, test_cleanup);
1193 	CU_ADD_TEST(suite, test_error_paths);
1194 	CU_ADD_TEST(suite, test_simple_write);
1195 	CU_ADD_TEST(suite, test_simple_read);
1196 	CU_ADD_TEST(suite, test_large_rw);
1197 	CU_ADD_TEST(suite, test_dev_full);
1198 	CU_ADD_TEST(suite, test_crazy_rw);
1199 	CU_ADD_TEST(suite, test_passthru);
1200 	CU_ADD_TEST(suite, test_initdrivers);
1201 	CU_ADD_TEST(suite, test_crypto_op_complete);
1202 	CU_ADD_TEST(suite, test_supported_io);
1203 	CU_ADD_TEST(suite, test_reset);
1204 	CU_ADD_TEST(suite, test_poller);
1205 	CU_ADD_TEST(suite, test_assign_device_qp);
1206 
1207 	CU_basic_set_mode(CU_BRM_VERBOSE);
1208 	CU_basic_run_tests();
1209 	num_failures = CU_get_number_of_failures();
1210 	CU_cleanup_registry();
1211 	return num_failures;
1212 }
1213