xref: /spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c (revision 5d0f262073248d5a8e349fbfb48fad2fe00387d9)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/test_env.c"
37 #include "spdk_internal/mock.h"
38 #include "unit/lib/json_mock.c"
39 
40 /* these rte_ headers are our local copies of the DPDK headers hacked to mock some functions
41  * included in them that can't be done with our mock library.
42  */
43 #include "rte_crypto.h"
44 #include "rte_cryptodev.h"
45 DEFINE_STUB_V(rte_crypto_op_free, (struct rte_crypto_op *op));
46 #include "bdev/crypto/vbdev_crypto.c"
47 
48 /* SPDK stubs */
49 DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *,
50 	    (struct spdk_conf *cp, const char *name), NULL);
51 DEFINE_STUB(spdk_conf_section_get_nval, char *,
52 	    (struct spdk_conf_section *sp, const char *key, int idx), NULL);
53 DEFINE_STUB(spdk_conf_section_get_nmval, char *,
54 	    (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
55 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
56 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io));
57 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
58 		enum spdk_bdev_io_type io_type), 0);
59 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
60 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
61 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0);
62 DEFINE_STUB(spdk_env_get_current_core, uint32_t, (void), 0);
63 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
64 DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn,
65 				     void *cb_arg));
66 DEFINE_STUB(spdk_bdev_open, int, (struct spdk_bdev *bdev, bool write,
67 				  spdk_bdev_remove_cb_t remove_cb,
68 				  void *remove_ctx, struct spdk_bdev_desc **_desc), 0);
69 DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
70 		struct spdk_bdev_module *module), 0);
71 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
72 DEFINE_STUB(spdk_vbdev_register, int, (struct spdk_bdev *vbdev, struct spdk_bdev **base_bdevs,
73 				       int base_bdev_count), 0);
74 DEFINE_STUB(spdk_bdev_get_by_name, struct spdk_bdev *, (const char *bdev_name), NULL);
75 DEFINE_STUB(spdk_env_get_socket_id, uint32_t, (uint32_t core), 0);
76 
77 /* DPDK stubs */
78 DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0);
79 DEFINE_STUB(rte_eal_get_configuration, struct rte_config *, (void), NULL);
80 DEFINE_STUB_V(rte_mempool_free, (struct rte_mempool *mp));
81 DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
82 DEFINE_STUB(rte_crypto_op_pool_create, struct rte_mempool *,
83 	    (const char *name, enum rte_crypto_op_type type, unsigned nb_elts,
84 	     unsigned cache_size, uint16_t priv_size, int socket_id), (struct rte_mempool *)1);
85 DEFINE_STUB(rte_cryptodev_device_count_by_driver, uint8_t, (uint8_t driver_id), 0);
86 DEFINE_STUB(rte_cryptodev_socket_id, int, (uint8_t dev_id), 0);
87 DEFINE_STUB(rte_cryptodev_configure, int, (uint8_t dev_id, struct rte_cryptodev_config *config), 0);
88 DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
89 		const struct rte_cryptodev_qp_conf *qp_conf,
90 		int socket_id, struct rte_mempool *session_pool), 0);
91 DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0)
92 DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id));
93 DEFINE_STUB(rte_cryptodev_sym_session_create, struct rte_cryptodev_sym_session *,
94 	    (struct rte_mempool *mempool), (struct rte_cryptodev_sym_session *)1);
95 DEFINE_STUB(rte_cryptodev_sym_session_clear, int, (uint8_t dev_id,
96 		struct rte_cryptodev_sym_session *sess), 0);
97 DEFINE_STUB(rte_cryptodev_sym_session_free, int, (struct rte_cryptodev_sym_session *sess), 0);
98 DEFINE_STUB(rte_cryptodev_sym_session_init, int, (uint8_t dev_id,
99 		struct rte_cryptodev_sym_session *sess,
100 		struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), 0);
101 DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
102 void __attribute__((noreturn)) __rte_panic(const char *funcname, const char *format, ...)
103 {
104 	abort();
105 }
106 struct rte_mempool_ops_table rte_mempool_ops_table;
107 struct rte_cryptodev *rte_cryptodevs;
108 __thread unsigned per_lcore__lcore_id = 0;
109 
110 /* global vars and setup/cleanup functions used for all test functions */
111 struct spdk_bdev_io *g_bdev_io;
112 struct crypto_bdev_io *g_io_ctx;
113 struct crypto_io_channel *g_crypto_ch;
114 struct spdk_io_channel *g_io_ch;
115 struct vbdev_dev g_device;
116 struct vbdev_crypto g_crypto_bdev;
117 struct rte_config *g_test_config;
118 struct device_qp g_dev_qp;
119 
120 #define MAX_TEST_BLOCKS 8192
121 struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS];
122 struct rte_crypto_op *g_test_dequeued_ops[MAX_TEST_BLOCKS];
123 struct rte_crypto_op *g_test_dev_full_ops[MAX_TEST_BLOCKS];
124 
125 /* These globals are externs in our local rte_ header files so we can control
126  * specific functions for mocking.
127  */
128 uint16_t g_dequeue_mock;
129 uint16_t g_enqueue_mock;
130 unsigned ut_rte_crypto_op_bulk_alloc;
131 int ut_rte_crypto_op_attach_sym_session = 0;
132 int ut_rte_cryptodev_info_get = 0;
133 bool ut_rte_cryptodev_info_get_mocked = false;
134 
135 void
136 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
137 {
138 	dev_info->max_nb_queue_pairs = ut_rte_cryptodev_info_get;
139 }
140 
141 unsigned int
142 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
143 {
144 	return (unsigned int)dev_id;
145 }
146 
147 void
148 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
149 {
150 	cb(g_io_ch, g_bdev_io);
151 }
152 
153 /* Mock these functions to call the callback and then return the value we require */
154 int ut_spdk_bdev_readv_blocks = 0;
155 bool ut_spdk_bdev_readv_blocks_mocked = false;
156 int
157 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
158 		       struct iovec *iov, int iovcnt,
159 		       uint64_t offset_blocks, uint64_t num_blocks,
160 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
161 {
162 	cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg);
163 	return ut_spdk_bdev_readv_blocks;
164 }
165 
166 int ut_spdk_bdev_writev_blocks = 0;
167 bool ut_spdk_bdev_writev_blocks_mocked = false;
168 int
169 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
170 			struct iovec *iov, int iovcnt,
171 			uint64_t offset_blocks, uint64_t num_blocks,
172 			spdk_bdev_io_completion_cb cb, void *cb_arg)
173 {
174 	cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg);
175 	return ut_spdk_bdev_writev_blocks;
176 }
177 
178 int ut_spdk_bdev_unmap_blocks = 0;
179 bool ut_spdk_bdev_unmap_blocks_mocked = false;
180 int
181 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
182 		       uint64_t offset_blocks, uint64_t num_blocks,
183 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
184 {
185 	cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg);
186 	return ut_spdk_bdev_unmap_blocks;
187 }
188 
189 int ut_spdk_bdev_flush_blocks = 0;
190 bool ut_spdk_bdev_flush_blocks_mocked = false;
191 int
192 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
193 		       uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
194 		       void *cb_arg)
195 {
196 	cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg);
197 	return ut_spdk_bdev_flush_blocks;
198 }
199 
200 int ut_spdk_bdev_reset = 0;
201 bool ut_spdk_bdev_reset_mocked = false;
202 int
203 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
204 		spdk_bdev_io_completion_cb cb, void *cb_arg)
205 {
206 	cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg);
207 	return ut_spdk_bdev_reset;
208 }
209 
210 bool g_completion_called = false;
211 void
212 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
213 {
214 	bdev_io->internal.status = status;
215 	g_completion_called = true;
216 }
217 
218 /* Used in testing device full condition */
219 static inline uint16_t
220 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
221 			    struct rte_crypto_op **ops, uint16_t nb_ops)
222 {
223 	int i;
224 
225 	CU_ASSERT(nb_ops > 0);
226 
227 	for (i = 0; i < nb_ops; i++) {
228 		/* Use this empty (til now) array of pointers to store
229 		 * enqueued operations for assertion in dev_full test.
230 		 */
231 		g_test_dev_full_ops[i] = *ops++;
232 	}
233 
234 	return g_enqueue_mock;
235 }
236 
237 /* This is pretty ugly but in order to complete an IO via the
238  * poller in the submit path, we need to first call to this func
239  * to return the dequeued value and also decrement it.  On the subsequent
240  * call it needs to return 0 to indicate to the caller that there are
241  * no more IOs to drain.
242  */
243 int g_test_overflow = 0;
244 static inline uint16_t
245 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
246 			    struct rte_crypto_op **ops, uint16_t nb_ops)
247 {
248 	CU_ASSERT(nb_ops > 0);
249 
250 	/* A crypto device can be full on enqueue, the driver is designed to drain
251 	 * the device at the time by calling the poller until it's empty, then
252 	 * submitting the remaining crypto ops.
253 	 */
254 	if (g_test_overflow) {
255 		if (g_dequeue_mock == 0) {
256 			return 0;
257 		}
258 		*ops = g_test_crypto_ops[g_enqueue_mock];
259 		(*ops)->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
260 		g_dequeue_mock -= 1;
261 	}
262 	return (g_dequeue_mock + 1);
263 }
264 
265 /* Instead of allocating real memory, assign the allocations to our
266  * test array for assertion in tests.
267  */
268 static inline unsigned
269 rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
270 			 enum rte_crypto_op_type type,
271 			 struct rte_crypto_op **ops, uint16_t nb_ops)
272 {
273 	int i;
274 
275 	for (i = 0; i < nb_ops; i++) {
276 		*ops++ = g_test_crypto_ops[i];
277 	}
278 	return ut_rte_crypto_op_bulk_alloc;
279 }
280 
281 static __rte_always_inline void
282 rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table,
283 		     unsigned int n)
284 {
285 	return;
286 }
287 
288 static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
289 {
290 	return NULL;
291 }
292 
293 
294 static inline int
295 rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
296 				 struct rte_cryptodev_sym_session *sess)
297 {
298 	return ut_rte_crypto_op_attach_sym_session;
299 }
300 
301 /* Global setup for all tests that share a bunch of preparation... */
302 static int
303 test_setup(void)
304 {
305 	int i;
306 
307 	/* Prepare essential variables for test routines */
308 	g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct crypto_bdev_io));
309 	g_bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec) * 128);
310 	g_bdev_io->bdev = &g_crypto_bdev.crypto_bdev;
311 	g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct crypto_io_channel));
312 	g_crypto_ch = (struct crypto_io_channel *)((uint8_t *)g_io_ch + sizeof(struct spdk_io_channel));
313 	g_io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
314 	memset(&g_device, 0, sizeof(struct vbdev_dev));
315 	memset(&g_crypto_bdev, 0, sizeof(struct vbdev_crypto));
316 	g_dev_qp.device = &g_device;
317 	g_io_ctx->crypto_ch = g_crypto_ch;
318 	g_io_ctx->crypto_bdev = &g_crypto_bdev;
319 	g_crypto_ch->device_qp = &g_dev_qp;
320 	g_test_config = calloc(1, sizeof(struct rte_config));
321 	g_test_config->lcore_count = 1;
322 	TAILQ_INIT(&g_crypto_ch->pending_cry_ios);
323 
324 	/* Allocate a real mbuf pool so we can test error paths */
325 	g_mbuf_mp = spdk_mempool_create("mbuf_mp", NUM_MBUFS, sizeof(struct rte_mbuf),
326 					SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
327 					SPDK_ENV_SOCKET_ID_ANY);
328 
329 	/* Instead of allocating real rte mempools for these, it's easier and provides the
330 	 * same coverage just calloc them here.
331 	 */
332 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
333 		g_test_crypto_ops[i] = calloc(1, sizeof(struct rte_crypto_op) +
334 					      sizeof(struct rte_crypto_sym_op));
335 		g_test_dequeued_ops[i] = calloc(1, sizeof(struct rte_crypto_op) +
336 						sizeof(struct rte_crypto_sym_op));
337 	}
338 	return 0;
339 }
340 
341 /* Global teardown for all tests */
342 static int
343 test_cleanup(void)
344 {
345 	int i;
346 
347 	free(g_test_config);
348 	spdk_mempool_free(g_mbuf_mp);
349 	for (i = 0; i < MAX_TEST_BLOCKS; i++) {
350 		free(g_test_crypto_ops[i]);
351 		free(g_test_dequeued_ops[i]);
352 	}
353 	free(g_bdev_io->u.bdev.iovs);
354 	free(g_bdev_io);
355 	free(g_io_ch);
356 	return 0;
357 }
358 
359 static void
360 test_error_paths(void)
361 {
362 	/* Single element block size write, just to test error paths
363 	 * in vbdev_crypto_submit_request().
364 	 */
365 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
366 	g_bdev_io->u.bdev.iovcnt = 1;
367 	g_bdev_io->u.bdev.num_blocks = 1;
368 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
369 	g_crypto_bdev.crypto_bdev.blocklen = 512;
370 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
371 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
372 
373 	/* test failure of spdk_mempool_get_bulk() */
374 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
375 	MOCK_SET(spdk_mempool_get, NULL);
376 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
377 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
378 
379 	/* same thing but switch to reads to test error path in _crypto_complete_io() */
380 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
381 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
382 	TAILQ_INSERT_TAIL(&g_crypto_ch->pending_cry_ios, g_bdev_io, module_link);
383 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
384 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
385 	/* Now with the read_blocks failing */
386 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
387 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
388 	MOCK_SET(spdk_bdev_readv_blocks, -1);
389 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
390 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
391 	MOCK_SET(spdk_bdev_readv_blocks, 0);
392 	MOCK_CLEAR(spdk_mempool_get);
393 
394 	/* test failure of rte_crypto_op_bulk_alloc() */
395 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
396 	ut_rte_crypto_op_bulk_alloc = 0;
397 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
398 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
399 	ut_rte_crypto_op_bulk_alloc = 1;
400 
401 	/* test failure of rte_cryptodev_sym_session_create() */
402 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
403 	MOCK_SET(rte_cryptodev_sym_session_create, NULL);
404 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
405 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
406 	MOCK_SET(rte_cryptodev_sym_session_create, (struct rte_cryptodev_sym_session *)1);
407 
408 	/* test failure of rte_cryptodev_sym_session_init() */
409 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
410 	MOCK_SET(rte_cryptodev_sym_session_init, -1);
411 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
412 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
413 	MOCK_SET(rte_cryptodev_sym_session_init, 0);
414 
415 	/* test failure of rte_crypto_op_attach_sym_session() */
416 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
417 	ut_rte_crypto_op_attach_sym_session = -1;
418 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
419 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
420 	ut_rte_crypto_op_attach_sym_session = 0;
421 }
422 
423 static void
424 test_simple_write(void)
425 {
426 	/* Single element block size write */
427 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
428 	g_bdev_io->u.bdev.iovcnt = 1;
429 	g_bdev_io->u.bdev.num_blocks = 1;
430 	g_bdev_io->u.bdev.offset_blocks = 0;
431 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
432 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_write;
433 	g_crypto_bdev.crypto_bdev.blocklen = 512;
434 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
435 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
436 
437 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
438 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
439 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
440 	CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT);
441 	CU_ASSERT(g_io_ctx->cry_iov.iov_len == 512);
442 	CU_ASSERT(g_io_ctx->cry_iov.iov_base != NULL);
443 	CU_ASSERT(g_io_ctx->cry_offset_blocks == 0);
444 	CU_ASSERT(g_io_ctx->cry_num_blocks == 1);
445 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_write);
446 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
447 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
448 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
449 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
450 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->userdata == g_bdev_io);
451 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr != NULL);
452 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == 512);
453 
454 	spdk_dma_free(g_io_ctx->cry_iov.iov_base);
455 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
456 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_dst);
457 }
458 
459 static void
460 test_simple_read(void)
461 {
462 	/* Single element block size read */
463 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
464 	g_bdev_io->u.bdev.iovcnt = 1;
465 	g_bdev_io->u.bdev.num_blocks = 1;
466 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
467 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_read;
468 	g_crypto_bdev.crypto_bdev.blocklen = 512;
469 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
470 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
471 
472 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
473 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
474 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
475 	CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_DECRYPT);
476 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_read);
477 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
478 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
479 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
480 	CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
481 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->userdata == g_bdev_io);
482 	CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
483 
484 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
485 }
486 
487 static void
488 test_large_rw(void)
489 {
490 	unsigned block_len = 512;
491 	unsigned num_blocks = CRYPTO_MAX_IO / block_len;
492 	unsigned io_len = block_len * num_blocks;
493 	unsigned i;
494 
495 	/* Multi block size read, multi-element */
496 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
497 	g_bdev_io->u.bdev.iovcnt = 1;
498 	g_bdev_io->u.bdev.num_blocks = num_blocks;
499 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
500 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
501 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
502 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
503 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
504 
505 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
506 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
507 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
508 	CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_DECRYPT);
509 
510 	for (i = 0; i < num_blocks; i++) {
511 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
512 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
513 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
514 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
515 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
516 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
517 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
518 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
519 	}
520 
521 	/* Multi block size write, multi-element */
522 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
523 	g_bdev_io->u.bdev.iovcnt = 1;
524 	g_bdev_io->u.bdev.num_blocks = num_blocks;
525 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
526 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
527 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
528 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
529 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
530 
531 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
532 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
533 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
534 	CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT);
535 
536 	for (i = 0; i < num_blocks; i++) {
537 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
538 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
539 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
540 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
541 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
542 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
543 		CU_ASSERT(g_io_ctx->cry_iov.iov_len == io_len);
544 		CU_ASSERT(g_io_ctx->cry_iov.iov_base != NULL);
545 		CU_ASSERT(g_io_ctx->cry_offset_blocks == 0);
546 		CU_ASSERT(g_io_ctx->cry_num_blocks == num_blocks);
547 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr != NULL);
548 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
549 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
550 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
551 	}
552 	spdk_dma_free(g_io_ctx->cry_iov.iov_base);
553 }
554 
555 static void
556 test_dev_full(void)
557 {
558 	unsigned block_len = 512;
559 	unsigned num_blocks = 2;
560 	unsigned io_len = block_len * num_blocks;
561 	unsigned i;
562 
563 	g_test_overflow = 1;
564 
565 	/* Multi block size read, multi-element */
566 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
567 	g_bdev_io->u.bdev.iovcnt = 1;
568 	g_bdev_io->u.bdev.num_blocks = num_blocks;
569 	g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
570 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_dev_full;
571 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
572 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
573 	g_enqueue_mock = g_dequeue_mock = 1;
574 	ut_rte_crypto_op_bulk_alloc = num_blocks;
575 
576 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
577 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
578 
579 	/* this test only completes one of the 2 IOs (in the drain path) */
580 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
581 	CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_DECRYPT);
582 
583 	for (i = 0; i < num_blocks; i++) {
584 		/* One of the src_mbufs was freed because of the device full condition so
585 		 * we can't assert its value here.
586 		 */
587 		CU_ASSERT(g_test_dev_full_ops[i]->sym->cipher.data.length == block_len);
588 		CU_ASSERT(g_test_dev_full_ops[i]->sym->cipher.data.offset == 0);
589 		CU_ASSERT(g_test_dev_full_ops[i]->sym->m_src == g_test_dev_full_ops[i]->sym->m_src);
590 		CU_ASSERT(g_test_dev_full_ops[i]->sym->m_dst == NULL);
591 	}
592 
593 	/* Only one of the 2 blocks in the test was freed on completion by design, so
594 	 * we need to free th other one here.
595 	 */
596 	spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
597 	g_test_overflow = 0;
598 }
599 
600 static void
601 test_crazy_rw(void)
602 {
603 	unsigned block_len = 512;
604 	int num_blocks = 4;
605 	int i;
606 
607 	/* Multi block size read, single element, strange IOV makeup */
608 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
609 	g_bdev_io->u.bdev.iovcnt = 3;
610 	g_bdev_io->u.bdev.num_blocks = num_blocks;
611 	g_bdev_io->u.bdev.iovs[0].iov_len = 512;
612 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
613 	g_bdev_io->u.bdev.iovs[1].iov_len = 1024;
614 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 512;
615 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
616 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 512 + 1024;
617 
618 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
619 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
620 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
621 
622 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
623 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
624 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
625 	CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_DECRYPT);
626 
627 	for (i = 0; i < num_blocks; i++) {
628 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
629 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
630 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
631 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
632 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
633 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
634 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
635 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
636 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
637 	}
638 
639 	/* Multi block size write, single element strange IOV makeup */
640 	num_blocks = 8;
641 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
642 	g_bdev_io->u.bdev.iovcnt = 4;
643 	g_bdev_io->u.bdev.num_blocks = num_blocks;
644 	g_bdev_io->u.bdev.iovs[0].iov_len = 2048;
645 	g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
646 	g_bdev_io->u.bdev.iovs[1].iov_len = 512;
647 	g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 2048;
648 	g_bdev_io->u.bdev.iovs[2].iov_len = 512;
649 	g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 2048 + 512;
650 	g_bdev_io->u.bdev.iovs[3].iov_len = 1024;
651 	g_bdev_io->u.bdev.iovs[3].iov_base = &test_crazy_rw + 2048 + 512 + 512;
652 
653 	g_crypto_bdev.crypto_bdev.blocklen = block_len;
654 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
655 	g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
656 
657 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
658 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
659 	CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
660 	CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT);
661 
662 	for (i = 0; i < num_blocks; i++) {
663 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
664 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
665 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
666 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
667 		CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
668 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
669 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
670 		CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == g_test_crypto_ops[i]->sym->m_dst);
671 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
672 		spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
673 	}
674 	spdk_dma_free(g_io_ctx->cry_iov.iov_base);
675 }
676 
677 static void
678 test_passthru(void)
679 {
680 	/* Make sure these follow our completion callback, test success & fail. */
681 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
682 	MOCK_SET(spdk_bdev_unmap_blocks, 0);
683 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
684 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
685 	MOCK_SET(spdk_bdev_unmap_blocks, -1);
686 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
687 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
688 	MOCK_CLEAR(spdk_bdev_unmap_blocks);
689 
690 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
691 	MOCK_SET(spdk_bdev_flush_blocks, 0);
692 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
693 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
694 	MOCK_SET(spdk_bdev_flush_blocks, -1);
695 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
696 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
697 	MOCK_CLEAR(spdk_bdev_flush_blocks);
698 
699 	/* We should never get a WZ command, we report that we don't support it. */
700 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
701 	vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
702 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
703 }
704 
705 static void
706 test_reset(void)
707 {
708 	/* TODO: There are a few different ways to do this given that
709 	 * the code uses spdk_for_each_channel() to implement reset
710 	 * handling. SUbmitting w/o UT for this function for now and
711 	 * will follow up with something shortly.
712 	 */
713 }
714 
715 static void
716 test_initdrivers(void)
717 {
718 	int rc;
719 	static struct spdk_mempool *orig_mbuf_mp;
720 	static struct spdk_mempool *orig_session_mp;
721 
722 	/* No drivers available, not an error though */
723 	MOCK_SET(rte_eal_get_configuration, g_test_config);
724 	MOCK_SET(rte_cryptodev_count, 0);
725 	rc = vbdev_crypto_init_crypto_drivers();
726 	CU_ASSERT(rc == 0);
727 
728 	/* Test failure of DPDK dev init. */
729 	MOCK_SET(rte_cryptodev_count, 2);
730 	MOCK_SET(rte_vdev_init, -1);
731 	rc = vbdev_crypto_init_crypto_drivers();
732 	CU_ASSERT(rc == -EINVAL);
733 	MOCK_SET(rte_vdev_init, 0);
734 
735 	/* Can't create session pool. */
736 	MOCK_SET(spdk_mempool_create, NULL);
737 	orig_mbuf_mp = g_mbuf_mp;
738 	orig_session_mp = g_session_mp;
739 	rc = vbdev_crypto_init_crypto_drivers();
740 	g_mbuf_mp = orig_mbuf_mp;
741 	g_session_mp = orig_session_mp;
742 	CU_ASSERT(rc == -ENOMEM);
743 	MOCK_CLEAR(spdk_mempool_create);
744 
745 	/* Can't create op pool. These tests will alloc and free our g_mbuf_mp
746 	 * so save that off here and restore it after each test is over.
747 	 */
748 	orig_mbuf_mp = g_mbuf_mp;
749 	orig_session_mp = g_session_mp;
750 	MOCK_SET(rte_crypto_op_pool_create, NULL);
751 	rc = vbdev_crypto_init_crypto_drivers();
752 	g_mbuf_mp = orig_mbuf_mp;
753 	g_session_mp = orig_session_mp;
754 	CU_ASSERT(rc == -ENOMEM);
755 	MOCK_SET(rte_crypto_op_pool_create, (struct rte_mempool *)1);
756 
757 	/* Check resources are sufficient failure. */
758 	orig_mbuf_mp = g_mbuf_mp;
759 	orig_session_mp = g_session_mp;
760 	rc = vbdev_crypto_init_crypto_drivers();
761 	g_mbuf_mp = orig_mbuf_mp;
762 	g_session_mp = orig_session_mp;
763 	CU_ASSERT(rc == -EINVAL);
764 
765 	/* Test crypto dev configure failure. */
766 	MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
767 	MOCK_SET(rte_cryptodev_info_get, 1);
768 	MOCK_SET(rte_cryptodev_configure, -1);
769 	orig_mbuf_mp = g_mbuf_mp;
770 	orig_session_mp = g_session_mp;
771 	rc = vbdev_crypto_init_crypto_drivers();
772 	g_mbuf_mp = orig_mbuf_mp;
773 	g_session_mp = orig_session_mp;
774 	MOCK_SET(rte_cryptodev_configure, 0);
775 	CU_ASSERT(rc == -EINVAL);
776 
777 	/* Test failure of qp setup. */
778 	MOCK_SET(rte_cryptodev_queue_pair_setup, -1);
779 	orig_mbuf_mp = g_mbuf_mp;
780 	orig_session_mp = g_session_mp;
781 	rc = vbdev_crypto_init_crypto_drivers();
782 	g_mbuf_mp = orig_mbuf_mp;
783 	g_session_mp = orig_session_mp;
784 	CU_ASSERT(rc == -EINVAL);
785 	MOCK_SET(rte_cryptodev_queue_pair_setup, 0);
786 
787 	/* Test failure of dev start. */
788 	MOCK_SET(rte_cryptodev_start, -1);
789 	orig_mbuf_mp = g_mbuf_mp;
790 	orig_session_mp = g_session_mp;
791 	rc = vbdev_crypto_init_crypto_drivers();
792 	g_mbuf_mp = orig_mbuf_mp;
793 	g_session_mp = orig_session_mp;
794 	CU_ASSERT(rc == -EINVAL);
795 	MOCK_SET(rte_cryptodev_start, 0);
796 
797 	/* Test happy path. */
798 	rc = vbdev_crypto_init_crypto_drivers();
799 	CU_ASSERT(rc == 0);
800 }
801 
802 static void
803 test_crypto_op_complete(void)
804 {
805 	/* Make sure completion code respects failure. */
806 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
807 	g_completion_called = false;
808 	_crypto_operation_complete(g_bdev_io);
809 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
810 	CU_ASSERT(g_completion_called == true);
811 
812 	/* Test read completion. */
813 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
814 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
815 	g_completion_called = false;
816 	_crypto_operation_complete(g_bdev_io);
817 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
818 	CU_ASSERT(g_completion_called == true);
819 
820 	/* Test write completion success. */
821 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
822 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
823 	g_completion_called = false;
824 	MOCK_SET(spdk_bdev_writev_blocks, 0);
825 	/* Code under test will free this, if not ASAN will complain. */
826 	g_io_ctx->cry_iov.iov_base = spdk_dma_malloc(16, 0x10, NULL);
827 	_crypto_operation_complete(g_bdev_io);
828 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
829 	CU_ASSERT(g_completion_called == true);
830 
831 	/* Test write completion failed. */
832 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
833 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
834 	g_completion_called = false;
835 	MOCK_SET(spdk_bdev_writev_blocks, -1);
836 	/* Code under test will free this, if not ASAN will complain. */
837 	g_io_ctx->cry_iov.iov_base = spdk_dma_malloc(16, 0x10, NULL);
838 	_crypto_operation_complete(g_bdev_io);
839 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
840 	CU_ASSERT(g_completion_called == true);
841 
842 	/* Test bogus type for this completion. */
843 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
844 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
845 	g_completion_called = false;
846 	_crypto_operation_complete(g_bdev_io);
847 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
848 	CU_ASSERT(g_completion_called == true);
849 }
850 
851 static void
852 test_supported_io(void)
853 {
854 	void *ctx = NULL;
855 	bool rc = true;
856 
857 	/* Make sure we always report false to WZ, we need the bdev layer to
858 	 * send real 0's so we can encrypt/decrypt them.
859 	 */
860 	rc = vbdev_crypto_io_type_supported(ctx, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
861 	CU_ASSERT(rc == false);
862 }
863 
864 int
865 main(int argc, char **argv)
866 {
867 	CU_pSuite	suite = NULL;
868 	unsigned int	num_failures;
869 
870 	if (CU_initialize_registry() != CUE_SUCCESS) {
871 		return CU_get_error();
872 	}
873 
874 	suite = CU_add_suite("crypto", test_setup, test_cleanup);
875 	if (suite == NULL) {
876 		CU_cleanup_registry();
877 		return CU_get_error();
878 	}
879 
880 	if (CU_add_test(suite, "test_error_paths",
881 			test_error_paths) == NULL ||
882 	    CU_add_test(suite, "test_simple_write",
883 			test_simple_write) == NULL ||
884 	    CU_add_test(suite, "test_simple_read",
885 			test_simple_read) == NULL ||
886 	    CU_add_test(suite, "test_large_rw",
887 			test_large_rw) == NULL ||
888 	    CU_add_test(suite, "test_dev_full",
889 			test_dev_full) == NULL ||
890 	    CU_add_test(suite, "test_crazy_rw",
891 			test_crazy_rw) == NULL ||
892 	    CU_add_test(suite, "test_passthru",
893 			test_passthru) == NULL ||
894 	    CU_add_test(suite, "test_initdrivers",
895 			test_initdrivers) == NULL ||
896 	    CU_add_test(suite, "test_crypto_op_complete",
897 			test_crypto_op_complete) == NULL ||
898 	    CU_add_test(suite, "test_supported_io",
899 			test_supported_io) == NULL ||
900 	    CU_add_test(suite, "test_reset",
901 			test_reset) == NULL
902 	   ) {
903 		CU_cleanup_registry();
904 		return CU_get_error();
905 	}
906 
907 	CU_basic_set_mode(CU_BRM_VERBOSE);
908 	CU_basic_run_tests();
909 	num_failures = CU_get_number_of_failures();
910 	CU_cleanup_registry();
911 	return num_failures;
912 }
913