xref: /spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c (revision c10f8e160e42a2a642e8a593b60c2f84561d5eba)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/test_env.c"
37 #include "spdk_internal/mock.h"
38 #include "unit/lib/json_mock.c"
39 
40 /* these rte_ headers are our local copies of the DPDK headers hacked to mock some functions
41  * included in them that can't be done with our mock library.
42  */
43 #include "rte_crypto.h"
44 #include "rte_cryptodev.h"
45 DEFINE_STUB_V(rte_crypto_op_free, (struct rte_crypto_op *op));
46 #include "bdev/crypto/vbdev_crypto.c"
47 
48 /* SPDK stubs */
49 DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *,
50 	    (struct spdk_conf *cp, const char *name), NULL);
51 DEFINE_STUB(spdk_conf_section_get_nval, char *,
52 	    (struct spdk_conf_section *sp, const char *key, int idx), NULL);
53 DEFINE_STUB(spdk_conf_section_get_nmval, char *,
54 	    (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
55 
56 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
57 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io));
58 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
59 		enum spdk_bdev_io_type io_type), 0);
60 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
61 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
62 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0);
63 DEFINE_STUB(spdk_env_get_current_core, uint32_t, (void), 0);
64 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
65 DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn,
66 				     void *cb_arg));
67 DEFINE_STUB(spdk_bdev_open, int, (struct spdk_bdev *bdev, bool write,
68 				  spdk_bdev_remove_cb_t remove_cb,
69 				  void *remove_ctx, struct spdk_bdev_desc **_desc), 0);
70 DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
71 		struct spdk_bdev_module *module), 0);
72 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
73 DEFINE_STUB(spdk_vbdev_register, int, (struct spdk_bdev *vbdev, struct spdk_bdev **base_bdevs,
74 				       int base_bdev_count), 0);
75 DEFINE_STUB(spdk_bdev_get_by_name, struct spdk_bdev *, (const char *bdev_name), NULL);
76 DEFINE_STUB(spdk_env_get_socket_id, uint32_t, (uint32_t core), 0);
77 
78 /* DPDK stubs */
79 DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0);
80 DEFINE_STUB(rte_eal_get_configuration, struct rte_config *, (void), NULL);
81 DEFINE_STUB_V(rte_mempool_free, (struct rte_mempool *mp));
82 DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
83 DEFINE_STUB(rte_crypto_op_pool_create, struct rte_mempool *,
84 	    (const char *name, enum rte_crypto_op_type type, unsigned nb_elts,
85 	     unsigned cache_size, uint16_t priv_size, int socket_id), (struct rte_mempool *)1);
86 DEFINE_STUB(rte_cryptodev_device_count_by_driver, uint8_t, (uint8_t driver_id), 0);
87 DEFINE_STUB(rte_cryptodev_socket_id, int, (uint8_t dev_id), 0);
88 DEFINE_STUB(rte_cryptodev_configure, int, (uint8_t dev_id, struct rte_cryptodev_config *config), 0);
89 DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
90 		const struct rte_cryptodev_qp_conf *qp_conf,
91 		int socket_id, struct rte_mempool *session_pool), 0);
92 DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0)
93 DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id));
94 DEFINE_STUB(rte_cryptodev_sym_session_create, struct rte_cryptodev_sym_session *,
95 	    (struct rte_mempool *mempool), (struct rte_cryptodev_sym_session *)1);
96 DEFINE_STUB(rte_cryptodev_sym_session_clear, int, (uint8_t dev_id,
97 		struct rte_cryptodev_sym_session *sess), 0);
98 DEFINE_STUB(rte_cryptodev_sym_session_free, int, (struct rte_cryptodev_sym_session *sess), 0);
99 DEFINE_STUB(rte_cryptodev_sym_session_init, int, (uint8_t dev_id,
100 		struct rte_cryptodev_sym_session *sess,
101 		struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), 0);
102 DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
103 void __attribute__((noreturn)) __rte_panic(const char *funcname, const char *format, ...)
104 {
105 	abort();
106 }
107 struct rte_mempool_ops_table rte_mempool_ops_table;
108 struct rte_cryptodev *rte_cryptodevs;
109 __thread unsigned per_lcore__lcore_id = 0;
110 
111 /* global vars and setup/cleanup functions used for all test functions */
112 struct spdk_bdev_io *g_bdev_io;
113 struct crypto_bdev_io *g_io_ctx;
114 struct crypto_io_channel *g_crypto_ch;
115 struct spdk_io_channel *g_io_ch;
116 struct vbdev_dev g_device;
117 struct vbdev_crypto g_crypto_bdev;
118 struct rte_config *g_test_config;
119 struct device_qp g_dev_qp;
120 
121 #define MAX_TEST_BLOCKS 8192
122 struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS];
123 struct rte_crypto_op *g_test_dequeued_ops[MAX_TEST_BLOCKS];
124 struct rte_crypto_op *g_test_dev_full_ops[MAX_TEST_BLOCKS];
125 
126 /* These globals are externs in our local rte_ header files so we can control
127  * specific functions for mocking.
128  */
129 uint16_t g_dequeue_mock;
130 uint16_t g_enqueue_mock;
131 unsigned ut_rte_crypto_op_bulk_alloc;
132 int ut_rte_crypto_op_attach_sym_session = 0;
133 
134 int ut_rte_cryptodev_info_get = 0;
135 bool ut_rte_cryptodev_info_get_mocked = false;
136 void
137 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
138 {
139 	dev_info->max_nb_queue_pairs = ut_rte_cryptodev_info_get;
140 }
141 
142 unsigned int
143 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
144 {
145 	return (unsigned int)dev_id;
146 }
147 
148 void
149 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
150 {
151 	cb(g_io_ch, g_bdev_io);
152 }
153 
154 /* Mock these functions to call the callback and then return the value we require */
155 int ut_spdk_bdev_readv_blocks = 0;
156 bool ut_spdk_bdev_readv_blocks_mocked = false;
157 int
158 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
159 		       struct iovec *iov, int iovcnt,
160 		       uint64_t offset_blocks, uint64_t num_blocks,
161 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
162 {
163 	cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg);
164 	return ut_spdk_bdev_readv_blocks;
165 }
166 
167 int ut_spdk_bdev_writev_blocks = 0;
168 bool ut_spdk_bdev_writev_blocks_mocked = false;
169 int
170 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
171 			struct iovec *iov, int iovcnt,
172 			uint64_t offset_blocks, uint64_t num_blocks,
173 			spdk_bdev_io_completion_cb cb, void *cb_arg)
174 {
175 	cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg);
176 	return ut_spdk_bdev_writev_blocks;
177 }
178 
179 int ut_spdk_bdev_unmap_blocks = 0;
180 bool ut_spdk_bdev_unmap_blocks_mocked = false;
181 int
182 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
183 		       uint64_t offset_blocks, uint64_t num_blocks,
184 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
185 {
186 	cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg);
187 	return ut_spdk_bdev_unmap_blocks;
188 }
189 
190 int ut_spdk_bdev_flush_blocks = 0;
191 bool ut_spdk_bdev_flush_blocks_mocked = false;
192 int
193 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
194 		       uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
195 		       void *cb_arg)
196 {
197 	cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg);
198 	return ut_spdk_bdev_flush_blocks;
199 }
200 
201 int ut_spdk_bdev_reset = 0;
202 bool ut_spdk_bdev_reset_mocked = false;
203 int
204 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
205 		spdk_bdev_io_completion_cb cb, void *cb_arg)
206 {
207 	cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg);
208 	return ut_spdk_bdev_reset;
209 }
210 
211 bool g_completion_called = false;
212 void
213 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
214 {
215 	bdev_io->internal.status = status;
216 	g_completion_called = true;
217 }
218 
219 /* Used in testing device full condition */
220 static inline uint16_t
221 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
222 			    struct rte_crypto_op **ops, uint16_t nb_ops)
223 {
224 	int i;
225 
226 	CU_ASSERT(nb_ops > 0);
227 
228 	for (i = 0; i < nb_ops; i++) {
229 		/* Use this empty (til now) array of pointers to store
230 		 * enqueued operations for assertion in dev_full test.
231 		 */
232 		g_test_dev_full_ops[i] = *ops++;
233 	}
234 
235 	return g_enqueue_mock;
236 }
237 
238 /* This is pretty ugly but in order to complete an IO via the
239  * poller in the submit path, we need to first call to this func
240  * to return the dequeued value and also decrement it.  On the subsequent
241  * call it needs to return 0 to indicate to the caller that there are
242  * no more IOs to drain.
243  */
244 int g_test_overflow = 0;
245 static inline uint16_t
246 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
247 			    struct rte_crypto_op **ops, uint16_t nb_ops)
248 {
249 	CU_ASSERT(nb_ops > 0);
250 
251 	/* A crypto device can be full on enqueue, the driver is designed to drain
252 	 * the device at the time by calling the poller until it's empty, then
253 	 * submitting the remaining crypto ops.
254 	 */
255 	if (g_test_overflow) {
256 		if (g_dequeue_mock == 0) {
257 			return 0;
258 		}
259 		*ops = g_test_crypto_ops[g_enqueue_mock];
260 		(*ops)->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
261 		g_dequeue_mock -= 1;
262 	}
263 	return (g_dequeue_mock + 1);
264 }
265 
266 /* Instead of allocating real memory, assign the allocations to our
267  * test array for assertion in tests.
268  */
269 static inline unsigned
270 rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
271 			 enum rte_crypto_op_type type,
272 			 struct rte_crypto_op **ops, uint16_t nb_ops)
273 {
274 	int i;
275 
276 	for (i = 0; i < nb_ops; i++) {
277 		*ops++ = g_test_crypto_ops[i];
278 	}
279 	return ut_rte_crypto_op_bulk_alloc;
280 }
281 
282 static __rte_always_inline void
283 rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table,
284 		     unsigned int n)
285 {
286 	return;
287 }
288 
289 static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
290 {
291 	return NULL;
292 }
293 
294 
295 static inline int
296 rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
297 				 struct rte_cryptodev_sym_session *sess)
298 {
299 	return ut_rte_crypto_op_attach_sym_session;
300 }
301 
302 /* Global setup for all tests that share a bunch of preparation... */
303 static int
304 test_setup(void)
305 {
306 	int i;
307 
308 	/* Prepare essential variables for test routines */
309 	g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct crypto_bdev_io));
310 	g_bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec) * 128);
311 	g_bdev_io->bdev = &g_crypto_bdev.crypto_bdev;
312 	g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct crypto_io_channel));
313 	g_crypto_ch = (struct crypto_io_channel *)((uint8_t *)g_io_ch + sizeof(struct spdk_io_channel));
314 	g_io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
315 	memset(&g_device, 0, sizeof(struct vbdev_dev));
316 	memset(&g_crypto_bdev, 0, sizeof(struct vbdev_crypto));
317 	g_dev_qp.device = &g_device;
318 	g_io_ctx->crypto_ch = g_crypto_ch;
319 	g_io_ctx->crypto_bdev = &g_crypto_bdev;
320 	g_crypto_ch->device_qp = &g_dev_qp;
321 	g_test_config = calloc(1, sizeof(struct rte_config));
322 	g_test_config->lcore_count = 1;
323 
324 	/* Allocate a real mbuf pool so we can test error paths */
325 	g_mbuf_mp = spdk_mempool_create("mbuf_mp", NUM_MBUFS, sizeof(struct rte_mbuf),
326 					SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
327 					SPDK_ENV_SOCKET_ID_ANY);
328 
329 	/* Instead of allocating real rte mempools for these, it's easier and provides the
330 	 * same coverage just calloc them here.
331 	 */
332 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
333 		g_test_crypto_ops[i] = calloc(1, sizeof(struct rte_crypto_op) +
334 					      sizeof(struct rte_crypto_sym_op));
335 		g_test_dequeued_ops[i] = calloc(1, sizeof(struct rte_crypto_op) +
336 						sizeof(struct rte_crypto_sym_op));
337 	}
338 	return 0;
339 }
340 
341 /* Global teardown for all tests */
342 static int
343 test_cleanup(void)
344 {
345 	int i;
346 
347 	free(g_test_config);
348 	spdk_mempool_free(g_mbuf_mp);
349 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
350 		free(g_test_crypto_ops[i]);
351 		free(g_test_dequeued_ops[i]);
352 	}
353 	free(g_bdev_io->u.bdev.iovs);
354 	free(g_bdev_io);
355 	free(g_io_ch);
356 	return 0;
357 }
358 
359 static void
360 test_error_paths(void)
361 {
362 	/* Single element block size write, just to test error paths
363 	 * in vbdev_crypto_submit_request().
364 	 */
365 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
366 	g_bdev_io->u.bdev.iovcnt = 1;
367 	g_bdev_io->u.bdev.num_blocks = 1;
368 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
369 	g_crypto_bdev.crypto_bdev.blocklen = 512;
370 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
371 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
372 
373 	/* test failure of spdk_mempool_get_bulk() */
374 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
375 	MOCK_SET(spdk_mempool_get, NULL);
376 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
377 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
378 
379 	/* same thing but switch to reads to test error path in _crypto_complete_io() */
380 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
381 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
382 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
383 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
384 	/* Now with the read_blocks failing */
385 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
386 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
387 	MOCK_SET(spdk_bdev_readv_blocks, -1);
388 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
389 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
390 	MOCK_SET(spdk_bdev_readv_blocks, 0);
391 	MOCK_CLEAR(spdk_mempool_get);
392 
393 	/* test failure of rte_crypto_op_bulk_alloc() */
394 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
395 	ut_rte_crypto_op_bulk_alloc = 0;
396 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
397 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
398 	ut_rte_crypto_op_bulk_alloc = 1;
399 
400 	/* test failure of rte_cryptodev_sym_session_create() */
401 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
402 	MOCK_SET(rte_cryptodev_sym_session_create, NULL);
403 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
404 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
405 	MOCK_SET(rte_cryptodev_sym_session_create, (struct rte_cryptodev_sym_session *)1);
406 
407 	/* test failure of rte_cryptodev_sym_session_init() */
408 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
409 	MOCK_SET(rte_cryptodev_sym_session_init, -1);
410 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
411 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
412 	MOCK_SET(rte_cryptodev_sym_session_init, 0);
413 
414 	/* test failure of rte_crypto_op_attach_sym_session() */
415 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
416 	ut_rte_crypto_op_attach_sym_session = -1;
417 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
418 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
419 	ut_rte_crypto_op_attach_sym_session = 0;
420 }
421 
422 static void
423 test_simple_write(void)
424 {
425 	/* Single element block size write */
426 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
427 	g_bdev_io->u.bdev.iovcnt = 1;
428 	g_bdev_io->u.bdev.num_blocks = 1;
429 	g_bdev_io->u.bdev.offset_blocks = 0;
430 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
431 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_write;
432 	g_crypto_bdev.crypto_bdev.blocklen = 512;
433 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
434 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
435 
436 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
437 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
438 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
439 	CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT);
440 	CU_ASSERT(g_io_ctx->cry_iov.iov_len == 512);
441 	CU_ASSERT(g_io_ctx->cry_iov.iov_base != NULL);
442 	CU_ASSERT(g_io_ctx->cry_offset_blocks == 0);
443 	CU_ASSERT(g_io_ctx->cry_num_blocks == 1);
444 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_write);
445 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
446 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
447 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
448 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
449 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->userdata == g_bdev_io);
450 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr != NULL);
451 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == 512);
452 
453 	spdk_dma_free(g_io_ctx->cry_iov.iov_base);
454 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
455 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_dst);
456 }
457 
458 static void
459 test_simple_read(void)
460 {
461 	/* Single element block size read */
462 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
463 	g_bdev_io->u.bdev.iovcnt = 1;
464 	g_bdev_io->u.bdev.num_blocks = 1;
465 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
466 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_read;
467 	g_crypto_bdev.crypto_bdev.blocklen = 512;
468 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
469 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
470 
471 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
472 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
473 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
474 	CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_DECRYPT);
475 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_read);
476 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
477 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
478 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
479 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
480 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->userdata == g_bdev_io);
481 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
482 
483 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
484 }
485 
486 static void
487 test_large_rw(void)
488 {
489 	unsigned block_len = 512;
490 	unsigned num_blocks = CRYPTO_MAX_IO / block_len;
491 	unsigned io_len = block_len * num_blocks;
492 	unsigned i;
493 
494 	/* Multi block size read, multi-element */
495 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
496 	g_bdev_io->u.bdev.iovcnt = 1;
497 	g_bdev_io->u.bdev.num_blocks = num_blocks;
498 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
499 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
500 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
501 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
502 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
503 
504 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
505 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
506 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
507 	CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_DECRYPT);
508 
509 	for (i = 0; i < num_blocks; i++) {
510 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
511 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
512 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
513 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
514 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
515 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
516 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
517 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
518 	}
519 
520 	/* Multi block size write, multi-element */
521 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
522 	g_bdev_io->u.bdev.iovcnt = 1;
523 	g_bdev_io->u.bdev.num_blocks = num_blocks;
524 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
525 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
526 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
527 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
528 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
529 
530 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
531 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
532 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
533 	CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT);
534 
535 	for (i = 0; i < num_blocks; i++) {
536 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
537 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
538 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
539 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
540 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
541 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
542 		CU_ASSERT(g_io_ctx->cry_iov.iov_len == io_len);
543 		CU_ASSERT(g_io_ctx->cry_iov.iov_base != NULL);
544 		CU_ASSERT(g_io_ctx->cry_offset_blocks == 0);
545 		CU_ASSERT(g_io_ctx->cry_num_blocks == num_blocks);
546 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr != NULL);
547 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
548 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
549 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
550 	}
551 	spdk_dma_free(g_io_ctx->cry_iov.iov_base);
552 }
553 
554 static void
555 test_dev_full(void)
556 {
557 	unsigned block_len = 512;
558 	unsigned num_blocks = 2;
559 	unsigned io_len = block_len * num_blocks;
560 	unsigned i;
561 
562 	g_test_overflow = 1;
563 
564 	/* Multi block size read, multi-element */
565 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
566 	g_bdev_io->u.bdev.iovcnt = 1;
567 	g_bdev_io->u.bdev.num_blocks = num_blocks;
568 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
569 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_dev_full;
570 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
571 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
572 	g_enqueue_mock = g_dequeue_mock = 1;
573 	ut_rte_crypto_op_bulk_alloc = num_blocks;
574 
575 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
576 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
577 
578 	/* this test only completes one of the 2 IOs (in the drain path) */
579 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
580 	CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_DECRYPT);
581 
582 	for (i = 0; i < num_blocks; i++) {
583 		/* One of the src_mbufs was freed because of the device full condition so
584 		 * we can't assert its value here.
585 		 */
586 		CU_ASSERT(g_test_dev_full_ops[i]->sym->cipher.data.length == block_len);
587 		CU_ASSERT(g_test_dev_full_ops[i]->sym->cipher.data.offset == 0);
588 		CU_ASSERT(g_test_dev_full_ops[i]->sym->m_src == g_test_dev_full_ops[i]->sym->m_src);
589 		CU_ASSERT(g_test_dev_full_ops[i]->sym->m_dst == NULL);
590 	}
591 
592 	/* Only one of the 2 blocks in the test was freed on completion by design, so
593 	 * we need to free th other one here.
594 	 */
595 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
596 	g_test_overflow = 0;
597 }
598 
599 static void
600 test_crazy_rw(void)
601 {
602 	unsigned block_len = 512;
603 	int num_blocks = 4;
604 	int i;
605 
606 	/* Multi block size read, single element, strange IOV makeup */
607 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
608 	g_bdev_io->u.bdev.iovcnt = 3;
609 	g_bdev_io->u.bdev.num_blocks = num_blocks;
610 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
611 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
612 	g_bdev_io->u.bdev.iovs[1].iov_len = 1024;
613 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 512;
614 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
615 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 512 + 1024;
616 
617 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
618 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
619 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
620 
621 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
622 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
623 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
624 	CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_DECRYPT);
625 
626 	for (i = 0; i < num_blocks; i++) {
627 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
628 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
629 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
630 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
631 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
632 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
633 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
634 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
635 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
636 	}
637 
638 	/* Multi block size write, single element strange IOV makeup */
639 	num_blocks = 8;
640 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
641 	g_bdev_io->u.bdev.iovcnt = 4;
642 	g_bdev_io->u.bdev.num_blocks = num_blocks;
643 	g_bdev_io->u.bdev.iovs[0].iov_len = 2048;
644 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
645 	g_bdev_io->u.bdev.iovs[1].iov_len = 512;
646 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 2048;
647 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
648 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 2048 + 512;
649 	g_bdev_io->u.bdev.iovs[3].iov_len = 1024;
650 	g_bdev_io->u.bdev.iovs[3].iov_base = &test_crazy_rw + 2048 + 512 + 512;
651 
652 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
653 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
654 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
655 
656 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
657 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
658 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
659 	CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT);
660 
661 	for (i = 0; i < num_blocks; i++) {
662 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
663 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
664 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
665 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
666 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
667 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
668 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
669 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == g_test_crypto_ops[i]->sym->m_dst);
670 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
671 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
672 	}
673 	spdk_dma_free(g_io_ctx->cry_iov.iov_base);
674 }
675 
676 static void
677 test_passthru(void)
678 {
679 	/* Make sure these follow our completion callback, test success & fail. */
680 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
681 	MOCK_SET(spdk_bdev_unmap_blocks, 0);
682 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
683 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
684 	MOCK_SET(spdk_bdev_unmap_blocks, -1);
685 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
686 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
687 	MOCK_CLEAR(spdk_bdev_unmap_blocks);
688 
689 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
690 	MOCK_SET(spdk_bdev_flush_blocks, 0);
691 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
692 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
693 	MOCK_SET(spdk_bdev_flush_blocks, -1);
694 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
695 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
696 	MOCK_CLEAR(spdk_bdev_flush_blocks);
697 
698 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
699 	MOCK_SET(spdk_bdev_reset, 0);
700 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
701 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
702 	MOCK_SET(spdk_bdev_reset, -1);
703 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
704 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
705 	MOCK_CLEAR(spdk_bdev_reset);
706 
707 	/* We should never get a WZ command, we report that we don't support it. */
708 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
709 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
710 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
711 }
712 
713 static void
714 test_initdrivers(void)
715 {
716 	int rc;
717 	static struct spdk_mempool *orig_mbuf_mp;
718 	static struct spdk_mempool *orig_session_mp;
719 
720 	/* No drivers available, not an error though */
721 	MOCK_SET(rte_eal_get_configuration, g_test_config);
722 	MOCK_SET(rte_cryptodev_count, 0);
723 	rc = vbdev_crypto_init_crypto_drivers();
724 	CU_ASSERT(rc == 0);
725 
726 	/* Test failure of DPDK dev init. */
727 	MOCK_SET(rte_cryptodev_count, 2);
728 	MOCK_SET(rte_vdev_init, -1);
729 	rc = vbdev_crypto_init_crypto_drivers();
730 	CU_ASSERT(rc == -EINVAL);
731 	MOCK_SET(rte_vdev_init, 0);
732 
733 	/* Can't create session pool. */
734 	MOCK_SET(spdk_mempool_create, NULL);
735 	orig_mbuf_mp = g_mbuf_mp;
736 	orig_session_mp = g_session_mp;
737 	rc = vbdev_crypto_init_crypto_drivers();
738 	g_mbuf_mp = orig_mbuf_mp;
739 	g_session_mp = orig_session_mp;
740 	CU_ASSERT(rc == -ENOMEM);
741 	MOCK_CLEAR(spdk_mempool_create);
742 
743 	/* Can't create op pool. These tests will alloc and free our g_mbuf_mp
744 	 * so save that off here and restore it after each test is over.
745 	 */
746 	orig_mbuf_mp = g_mbuf_mp;
747 	orig_session_mp = g_session_mp;
748 	MOCK_SET(rte_crypto_op_pool_create, NULL);
749 	rc = vbdev_crypto_init_crypto_drivers();
750 	g_mbuf_mp = orig_mbuf_mp;
751 	g_session_mp = orig_session_mp;
752 	CU_ASSERT(rc == -ENOMEM);
753 	MOCK_SET(rte_crypto_op_pool_create, (struct rte_mempool *)1);
754 
755 	/* Check resources are sufficient failure. */
756 	orig_mbuf_mp = g_mbuf_mp;
757 	orig_session_mp = g_session_mp;
758 	rc = vbdev_crypto_init_crypto_drivers();
759 	g_mbuf_mp = orig_mbuf_mp;
760 	g_session_mp = orig_session_mp;
761 	CU_ASSERT(rc == -EINVAL);
762 
763 	/* Test crypto dev configure failure. */
764 	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
765 	MOCK_SET(rte_cryptodev_info_get, 1);
766 	MOCK_SET(rte_cryptodev_configure, -1);
767 	orig_mbuf_mp = g_mbuf_mp;
768 	orig_session_mp = g_session_mp;
769 	rc = vbdev_crypto_init_crypto_drivers();
770 	g_mbuf_mp = orig_mbuf_mp;
771 	g_session_mp = orig_session_mp;
772 	MOCK_SET(rte_cryptodev_configure, 0);
773 	CU_ASSERT(rc == -EINVAL);
774 
775 	/* Test failure of qp setup. */
776 	MOCK_SET(rte_cryptodev_queue_pair_setup, -1);
777 	orig_mbuf_mp = g_mbuf_mp;
778 	orig_session_mp = g_session_mp;
779 	rc = vbdev_crypto_init_crypto_drivers();
780 	g_mbuf_mp = orig_mbuf_mp;
781 	g_session_mp = orig_session_mp;
782 	CU_ASSERT(rc == -EINVAL);
783 	MOCK_SET(rte_cryptodev_queue_pair_setup, 0);
784 
785 	/* Test failure of dev start. */
786 	MOCK_SET(rte_cryptodev_start, -1);
787 	orig_mbuf_mp = g_mbuf_mp;
788 	orig_session_mp = g_session_mp;
789 	rc = vbdev_crypto_init_crypto_drivers();
790 	g_mbuf_mp = orig_mbuf_mp;
791 	g_session_mp = orig_session_mp;
792 	CU_ASSERT(rc == -EINVAL);
793 	MOCK_SET(rte_cryptodev_start, 0);
794 
795 	/* Test happy path. */
796 	rc = vbdev_crypto_init_crypto_drivers();
797 	CU_ASSERT(rc == 0);
798 }
799 
800 static void
801 test_crypto_op_complete(void)
802 {
803 	/* Make sure completion code respects failure. */
804 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
805 	g_completion_called = false;
806 	_crypto_operation_complete(g_bdev_io);
807 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
808 	CU_ASSERT(g_completion_called == true);
809 
810 	/* Test read completion. */
811 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
812 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
813 	g_completion_called = false;
814 	_crypto_operation_complete(g_bdev_io);
815 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
816 	CU_ASSERT(g_completion_called == true);
817 
818 	/* Test write completion success. */
819 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
820 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
821 	g_completion_called = false;
822 	MOCK_SET(spdk_bdev_writev_blocks, 0);
823 	/* Code under test will free this, if not ASAN will complain. */
824 	g_io_ctx->cry_iov.iov_base = spdk_dma_malloc(16, 0x10, NULL);
825 	_crypto_operation_complete(g_bdev_io);
826 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
827 	CU_ASSERT(g_completion_called == true);
828 
829 	/* Test write completion failed. */
830 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
831 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
832 	g_completion_called = false;
833 	MOCK_SET(spdk_bdev_writev_blocks, -1);
834 	/* Code under test will free this, if not ASAN will complain. */
835 	g_io_ctx->cry_iov.iov_base = spdk_dma_malloc(16, 0x10, NULL);
836 	_crypto_operation_complete(g_bdev_io);
837 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
838 	CU_ASSERT(g_completion_called == true);
839 
840 	/* Test bogus type for this completion. */
841 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
842 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
843 	g_completion_called = false;
844 	_crypto_operation_complete(g_bdev_io);
845 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
846 	CU_ASSERT(g_completion_called == true);
847 }
848 
849 static void
850 test_supported_io(void)
851 {
852 	void *ctx = NULL;
853 	bool rc = true;
854 
855 	/* Make sure we always report false to WZ, we need the bdev layer to
856 	 * send real 0's so we can encrypt/decrypt them.
857 	 */
858 	rc = vbdev_crypto_io_type_supported(ctx, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
859 	CU_ASSERT(rc == false);
860 }
861 
862 int
863 main(int argc, char **argv)
864 {
865 	CU_pSuite	suite = NULL;
866 	unsigned int	num_failures;
867 
868 	if (CU_initialize_registry() != CUE_SUCCESS) {
869 		return CU_get_error();
870 	}
871 
872 	suite = CU_add_suite("crypto", test_setup, test_cleanup);
873 	if (suite == NULL) {
874 		CU_cleanup_registry();
875 		return CU_get_error();
876 	}
877 
878 	if (CU_add_test(suite, "test_error_paths",
879 			test_error_paths) == NULL ||
880 	    CU_add_test(suite, "test_simple_write",
881 			test_simple_write) == NULL ||
882 	    CU_add_test(suite, "test_simple_read",
883 			test_simple_read) == NULL ||
884 	    CU_add_test(suite, "test_large_rw",
885 			test_large_rw) == NULL ||
886 	    CU_add_test(suite, "test_dev_full",
887 			test_dev_full) == NULL ||
888 	    CU_add_test(suite, "test_crazy_rw",
889 			test_crazy_rw) == NULL ||
890 	    CU_add_test(suite, "test_passthru",
891 			test_passthru) == NULL ||
892 	    CU_add_test(suite, "test_initdrivers",
893 			test_initdrivers) == NULL ||
894 	    CU_add_test(suite, "test_crypto_op_complete",
895 			test_crypto_op_complete) == NULL ||
896 	    CU_add_test(suite, "test_supported_io",
897 			test_supported_io) == NULL
898 	   ) {
899 		CU_cleanup_registry();
900 		return CU_get_error();
901 	}
902 
903 	CU_basic_set_mode(CU_BRM_VERBOSE);
904 	CU_basic_run_tests();
905 	num_failures = CU_get_number_of_failures();
906 	CU_cleanup_registry();
907 	return num_failures;
908 }
909