1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. 4 * All rights reserved. 5 */ 6 7 #include "spdk_cunit.h" 8 9 #include "spdk_internal/mock.h" 10 #include "thread/thread_internal.h" 11 #include "unit/lib/json_mock.c" 12 #include "common/lib/ut_multithread.c" 13 14 #include <rte_crypto.h> 15 #include <rte_cryptodev.h> 16 #include <rte_version.h> 17 18 #define MAX_TEST_BLOCKS 8192 19 struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS]; 20 struct rte_crypto_op *g_test_dev_full_ops[MAX_TEST_BLOCKS]; 21 22 uint16_t g_dequeue_mock; 23 uint16_t g_enqueue_mock; 24 unsigned ut_rte_crypto_op_bulk_alloc; 25 int ut_rte_crypto_op_attach_sym_session = 0; 26 #define MOCK_INFO_GET_1QP_AESNI 0 27 #define MOCK_INFO_GET_1QP_QAT 1 28 #define MOCK_INFO_GET_1QP_MLX5 2 29 #define MOCK_INFO_GET_1QP_BOGUS_PMD 3 30 int ut_rte_cryptodev_info_get = 0; 31 bool ut_rte_cryptodev_info_get_mocked = false; 32 33 void mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt); 34 #define rte_pktmbuf_free_bulk mock_rte_pktmbuf_free_bulk 35 void 36 mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt) 37 { 38 spdk_mempool_put_bulk((struct spdk_mempool *)m[0]->pool, (void **)m, cnt); 39 } 40 41 void mock_rte_pktmbuf_free(struct rte_mbuf *m); 42 #define rte_pktmbuf_free mock_rte_pktmbuf_free 43 void 44 mock_rte_pktmbuf_free(struct rte_mbuf *m) 45 { 46 spdk_mempool_put((struct spdk_mempool *)m->pool, (void *)m); 47 } 48 49 void 50 rte_mempool_free(struct rte_mempool *mp) 51 { 52 spdk_mempool_free((struct spdk_mempool *)mp); 53 } 54 55 int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, 56 unsigned count); 57 #define rte_pktmbuf_alloc_bulk mock_rte_pktmbuf_alloc_bulk 58 int 59 mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, 60 unsigned count) 61 { 62 int rc; 63 64 rc = spdk_mempool_get_bulk((struct spdk_mempool *)pool, (void **)mbufs, count); 65 if (rc) { 66 return rc; 67 } 68 for (unsigned i = 0; i < count; i++) { 69 rte_pktmbuf_reset(mbufs[i]); 70 mbufs[i]->pool = pool; 71 } 72 return rc; 73 } 74 75 struct rte_mempool * 76 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, 77 uint32_t elt_size, uint32_t cache_size, 78 uint16_t priv_size, int socket_id) 79 { 80 struct spdk_mempool *tmp; 81 82 tmp = spdk_mempool_create(name, nb_elts, elt_size + priv_size, 83 cache_size, socket_id); 84 85 return (struct rte_mempool *)tmp; 86 87 } 88 89 struct rte_mempool * 90 rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, 91 uint16_t priv_size, uint16_t data_room_size, int socket_id) 92 { 93 struct spdk_mempool *tmp; 94 95 tmp = spdk_mempool_create(name, n, sizeof(struct rte_mbuf) + priv_size, 96 cache_size, socket_id); 97 98 return (struct rte_mempool *)tmp; 99 } 100 101 struct rte_mempool * 102 rte_mempool_create(const char *name, unsigned n, unsigned elt_size, 103 unsigned cache_size, unsigned private_data_size, 104 rte_mempool_ctor_t *mp_init, void *mp_init_arg, 105 rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, 106 int socket_id, unsigned flags) 107 { 108 struct spdk_mempool *tmp; 109 110 tmp = spdk_mempool_create(name, n, elt_size + private_data_size, 111 cache_size, socket_id); 112 113 return (struct rte_mempool *)tmp; 114 } 115 116 DEFINE_RETURN_MOCK(rte_crypto_op_pool_create, struct rte_mempool *); 117 struct rte_mempool * 118 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type, 119 unsigned nb_elts, unsigned cache_size, 120 uint16_t priv_size, int socket_id) 121 { 122 struct spdk_mempool *tmp; 123 124 HANDLE_RETURN_MOCK(rte_crypto_op_pool_create); 125 126 tmp = spdk_mempool_create(name, nb_elts, 127 sizeof(struct rte_crypto_op) + priv_size, 128 cache_size, socket_id); 129 130 return (struct rte_mempool *)tmp; 131 132 } 133 134 /* Those functions are defined as static inline in DPDK, so we can't 135 * mock them straight away. We use defines to redirect them into 136 * our custom functions. 137 */ 138 static bool g_resubmit_test = false; 139 #define rte_cryptodev_enqueue_burst mock_rte_cryptodev_enqueue_burst 140 static inline uint16_t 141 mock_rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, 142 struct rte_crypto_op **ops, uint16_t nb_ops) 143 { 144 int i; 145 146 CU_ASSERT(nb_ops > 0); 147 148 for (i = 0; i < nb_ops; i++) { 149 /* Use this empty (til now) array of pointers to store 150 * enqueued operations for assertion in dev_full test. 151 */ 152 g_test_dev_full_ops[i] = *ops++; 153 if (g_resubmit_test == true) { 154 CU_ASSERT(g_test_dev_full_ops[i] == (void *)0xDEADBEEF); 155 } 156 } 157 158 return g_enqueue_mock; 159 } 160 161 #define rte_cryptodev_dequeue_burst mock_rte_cryptodev_dequeue_burst 162 static inline uint16_t 163 mock_rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, 164 struct rte_crypto_op **ops, uint16_t nb_ops) 165 { 166 int i; 167 168 CU_ASSERT(nb_ops > 0); 169 170 for (i = 0; i < g_dequeue_mock; i++) { 171 *ops++ = g_test_crypto_ops[i]; 172 } 173 174 return g_dequeue_mock; 175 } 176 177 /* Instead of allocating real memory, assign the allocations to our 178 * test array for assertion in tests. 179 */ 180 #define rte_crypto_op_bulk_alloc mock_rte_crypto_op_bulk_alloc 181 static inline unsigned 182 mock_rte_crypto_op_bulk_alloc(struct rte_mempool *mempool, 183 enum rte_crypto_op_type type, 184 struct rte_crypto_op **ops, uint16_t nb_ops) 185 { 186 int i; 187 188 for (i = 0; i < nb_ops; i++) { 189 *ops++ = g_test_crypto_ops[i]; 190 } 191 return ut_rte_crypto_op_bulk_alloc; 192 } 193 194 #define rte_mempool_put_bulk mock_rte_mempool_put_bulk 195 static __rte_always_inline void 196 mock_rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table, 197 unsigned int n) 198 { 199 return; 200 } 201 #define rte_crypto_op_attach_sym_session mock_rte_crypto_op_attach_sym_session 202 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0) 203 static inline int 204 mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op, void *sess) 205 #else 206 static inline int 207 mock_rte_crypto_op_attach_sym_session(struct rte_crypto_op *op, 208 struct rte_cryptodev_sym_session *sess) 209 #endif 210 { 211 return ut_rte_crypto_op_attach_sym_session; 212 } 213 214 #define rte_lcore_count mock_rte_lcore_count 215 static inline unsigned 216 mock_rte_lcore_count(void) 217 { 218 return 1; 219 } 220 221 #include "accel/dpdk_cryptodev/accel_dpdk_cryptodev.c" 222 223 /* accel stubs */ 224 DEFINE_STUB_V(spdk_accel_task_complete, (struct spdk_accel_task *task, int status)); 225 DEFINE_STUB_V(spdk_accel_module_finish, (void)); 226 DEFINE_STUB_V(spdk_accel_module_list_add, (struct spdk_accel_module_if *accel_module)); 227 228 /* DPDK stubs */ 229 #define DPDK_DYNFIELD_OFFSET offsetof(struct rte_mbuf, dynfield1[1]) 230 DEFINE_STUB(rte_mbuf_dynfield_register, int, (const struct rte_mbuf_dynfield *params), 231 DPDK_DYNFIELD_OFFSET); 232 DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0); 233 DEFINE_STUB(rte_socket_id, unsigned, (void), 0); 234 DEFINE_STUB(rte_cryptodev_device_count_by_driver, uint8_t, (uint8_t driver_id), 0); 235 DEFINE_STUB(rte_cryptodev_configure, int, (uint8_t dev_id, struct rte_cryptodev_config *config), 0); 236 DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id, 237 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id), 0); 238 DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0); 239 DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id)); 240 DEFINE_STUB(rte_cryptodev_close, int, (uint8_t dev_id), 0); 241 DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0); 242 DEFINE_STUB(rte_vdev_uninit, int, (const char *name), 0); 243 244 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0) 245 DEFINE_STUB(rte_cryptodev_sym_session_create, void *, 246 (uint8_t dev_id, struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), (void *)1); 247 DEFINE_STUB(rte_cryptodev_sym_session_free, int, (uint8_t dev_id, void *sess), 0); 248 #else 249 DEFINE_STUB(rte_cryptodev_sym_session_create, struct rte_cryptodev_sym_session *, 250 (struct rte_mempool *mempool), (void *)1); 251 DEFINE_STUB(rte_cryptodev_sym_session_init, int, (uint8_t dev_id, 252 struct rte_cryptodev_sym_session *sess, 253 struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), 0); 254 DEFINE_STUB(rte_cryptodev_sym_session_free, int, (struct rte_cryptodev_sym_session *sess), 0); 255 #endif 256 257 struct rte_cryptodev *rte_cryptodevs; 258 259 /* global vars and setup/cleanup functions used for all test functions */ 260 struct spdk_io_channel *g_io_ch; 261 struct accel_dpdk_cryptodev_io_channel *g_crypto_ch; 262 struct accel_dpdk_cryptodev_device g_aesni_crypto_dev; 263 struct accel_dpdk_cryptodev_qp g_aesni_qp; 264 struct accel_dpdk_cryptodev_key_handle g_key_handle; 265 struct accel_dpdk_cryptodev_key_priv g_key_priv; 266 struct spdk_accel_crypto_key g_key; 267 268 void 269 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info) 270 { 271 dev_info->max_nb_queue_pairs = 1; 272 if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_AESNI) { 273 dev_info->driver_name = g_driver_names[0]; 274 } else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_QAT) { 275 dev_info->driver_name = g_driver_names[1]; 276 } else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_MLX5) { 277 dev_info->driver_name = g_driver_names[2]; 278 } else if (ut_rte_cryptodev_info_get == MOCK_INFO_GET_1QP_BOGUS_PMD) { 279 dev_info->driver_name = "junk"; 280 } 281 } 282 283 unsigned int 284 rte_cryptodev_sym_get_private_session_size(uint8_t dev_id) 285 { 286 return (unsigned int)dev_id; 287 } 288 289 /* Global setup for all tests that share a bunch of preparation... */ 290 static int 291 test_setup(void) 292 { 293 int i, rc; 294 295 /* Prepare essential variables for test routines */ 296 g_io_ch = calloc(1, sizeof(*g_io_ch) + sizeof(struct accel_dpdk_cryptodev_io_channel)); 297 g_crypto_ch = (struct accel_dpdk_cryptodev_io_channel *)spdk_io_channel_get_ctx(g_io_ch); 298 TAILQ_INIT(&g_crypto_ch->queued_cry_ops); 299 TAILQ_INIT(&g_crypto_ch->queued_tasks); 300 301 g_aesni_crypto_dev.type = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB; 302 g_aesni_crypto_dev.qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS; 303 TAILQ_INIT(&g_aesni_crypto_dev.qpairs); 304 305 g_aesni_qp.device = &g_aesni_crypto_dev; 306 g_crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] = &g_aesni_qp; 307 308 g_key_handle.device = &g_aesni_crypto_dev; 309 g_key_priv.driver = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB; 310 g_key_priv.cipher = ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC; 311 TAILQ_INIT(&g_key_priv.dev_keys); 312 TAILQ_INSERT_TAIL(&g_key_priv.dev_keys, &g_key_handle, link); 313 g_key.priv = &g_key_priv; 314 g_key.module_if = &g_accel_dpdk_cryptodev_module; 315 316 317 /* Allocate a real mbuf pool so we can test error paths */ 318 g_mbuf_mp = rte_pktmbuf_pool_create("mbuf_mp", ACCEL_DPDK_CRYPTODEV_NUM_MBUFS, 319 (unsigned)SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, 320 0, 0, SPDK_ENV_SOCKET_ID_ANY); 321 /* Instead of allocating real rte mempools for these, it's easier and provides the 322 * same coverage just calloc them here. 323 */ 324 for (i = 0; i < MAX_TEST_BLOCKS; i++) { 325 size_t size = ACCEL_DPDK_CRYPTODEV_IV_OFFSET + ACCEL_DPDK_CRYPTODEV_IV_LENGTH + 326 ACCEL_DPDK_CRYPTODEV_QUEUED_OP_LENGTH; 327 rc = posix_memalign((void **)&g_test_crypto_ops[i], 64, size); 328 if (rc != 0) { 329 assert(false); 330 } 331 memset(g_test_crypto_ops[i], 0, 332 ACCEL_DPDK_CRYPTODEV_IV_OFFSET + ACCEL_DPDK_CRYPTODEV_QUEUED_OP_LENGTH); 333 } 334 g_mbuf_offset = DPDK_DYNFIELD_OFFSET; 335 336 return 0; 337 } 338 339 /* Global teardown for all tests */ 340 static int 341 test_cleanup(void) 342 { 343 int i; 344 345 if (g_crypto_op_mp) { 346 rte_mempool_free(g_crypto_op_mp); 347 g_crypto_op_mp = NULL; 348 } 349 if (g_mbuf_mp) { 350 rte_mempool_free(g_mbuf_mp); 351 g_mbuf_mp = NULL; 352 } 353 if (g_session_mp) { 354 rte_mempool_free(g_session_mp); 355 g_session_mp = NULL; 356 } 357 if (g_session_mp_priv != NULL) { 358 /* g_session_mp_priv may or may not be set depending on the DPDK version */ 359 rte_mempool_free(g_session_mp_priv); 360 g_session_mp_priv = NULL; 361 } 362 363 for (i = 0; i < MAX_TEST_BLOCKS; i++) { 364 free(g_test_crypto_ops[i]); 365 } 366 free(g_io_ch); 367 return 0; 368 } 369 370 static void 371 test_error_paths(void) 372 { 373 /* Single element block size encrypt, just to test error paths 374 * in accel_dpdk_cryptodev_submit_tasks() */ 375 struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = 512 }; 376 struct iovec dst_iov = src_iov; 377 struct accel_dpdk_cryptodev_task task = {}; 378 struct accel_dpdk_cryptodev_key_priv key_priv = {}; 379 struct spdk_accel_crypto_key key = {}; 380 int rc; 381 382 task.base.op_code = ACCEL_OPC_ENCRYPT; 383 task.base.s.iovcnt = 1; 384 task.base.s.iovs = &src_iov; 385 task.base.d.iovcnt = 1; 386 task.base.d.iovs = &dst_iov; 387 task.base.block_size = 512; 388 task.base.crypto_key = &g_key; 389 task.base.iv = 1; 390 g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1; 391 392 /* case 1 - no crypto key */ 393 task.base.crypto_key = NULL; 394 rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); 395 CU_ASSERT(rc == -EINVAL); 396 task.base.crypto_key = &g_key; 397 398 /* case 2 - crypto key with wrong module_if */ 399 key_priv.driver = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB; 400 key_priv.cipher = ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC; 401 TAILQ_INIT(&key_priv.dev_keys); 402 key.priv = &key_priv; 403 key.module_if = (struct spdk_accel_module_if *) 0x1; 404 task.base.crypto_key = &key; 405 rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); 406 CU_ASSERT(rc == -EINVAL); 407 key.module_if = &g_accel_dpdk_cryptodev_module; 408 409 /* case 3 - no key handle in the channel */ 410 rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); 411 CU_ASSERT(rc == -EINVAL); 412 task.base.crypto_key = &g_key; 413 414 /* case 4 - invalid op */ 415 task.base.op_code = ACCEL_OPC_COMPARE; 416 rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); 417 CU_ASSERT(rc == -EINVAL); 418 task.base.op_code = ACCEL_OPC_ENCRYPT; 419 420 /* case 5 - no entries in g_mbuf_mp */ 421 MOCK_SET(spdk_mempool_get, NULL); 422 CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_tasks) == true); 423 rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); 424 CU_ASSERT(rc == 0); 425 CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_tasks) == false); 426 CU_ASSERT(TAILQ_FIRST(&g_crypto_ch->queued_tasks) == &task); 427 MOCK_CLEAR(spdk_mempool_get); 428 TAILQ_INIT(&g_crypto_ch->queued_tasks); 429 430 /* case 6 - vtophys error in accel_dpdk_cryptodev_mbuf_attach_buf */ 431 MOCK_SET(spdk_vtophys, SPDK_VTOPHYS_ERROR); 432 rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); 433 CU_ASSERT(rc == -EFAULT); 434 MOCK_CLEAR(spdk_vtophys); 435 } 436 437 static void 438 test_simple_encrypt(void) 439 { 440 struct iovec src_iov[4] = {[0] = {.iov_base = (void *)0xDEADBEEF, .iov_len = 512 }}; 441 struct iovec dst_iov = src_iov[0]; 442 struct accel_dpdk_cryptodev_task task = {}; 443 struct rte_mbuf *mbuf, *next; 444 int rc, i; 445 446 task.base.op_code = ACCEL_OPC_ENCRYPT; 447 task.base.s.iovcnt = 1; 448 task.base.s.iovs = src_iov; 449 task.base.d.iovcnt = 1; 450 task.base.d.iovs = &dst_iov; 451 task.base.block_size = 512; 452 task.base.crypto_key = &g_key; 453 task.base.iv = 1; 454 g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1; 455 456 /* Inplace encryption */ 457 g_aesni_qp.num_enqueued_ops = 0; 458 rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); 459 CU_ASSERT(rc == 0); 460 CU_ASSERT(task.cryop_submitted == 1); 461 CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base); 462 CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len); 463 CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL); 464 CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512); 465 CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0); 466 CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset, 467 uint64_t *) == (uint64_t)&task); 468 CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL); 469 470 rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src); 471 472 /* out-of-place encryption */ 473 g_aesni_qp.num_enqueued_ops = 0; 474 task.cryop_submitted = 0; 475 dst_iov.iov_base = (void *)0xFEEDBEEF; 476 477 rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); 478 CU_ASSERT(rc == 0); 479 CU_ASSERT(task.cryop_submitted == 1); 480 CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base); 481 CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len); 482 CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL); 483 CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512); 484 CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0); 485 CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset, 486 uint64_t *) == (uint64_t)&task); 487 CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr == dst_iov.iov_base); 488 CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == dst_iov.iov_len); 489 490 rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src); 491 rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst); 492 493 /* out-of-place encryption, fragmented payload */ 494 g_aesni_qp.num_enqueued_ops = 0; 495 task.base.s.iovcnt = 4; 496 for (i = 0; i < 4; i++) { 497 src_iov[i].iov_base = (void *)0xDEADBEEF + i * 128; 498 src_iov[i].iov_len = 128; 499 } 500 rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); 501 CU_ASSERT(rc == 0); 502 CU_ASSERT(task.cryop_submitted == 1); 503 mbuf = g_test_crypto_ops[0]->sym->m_src; 504 SPDK_CU_ASSERT_FATAL(mbuf != NULL); 505 CU_ASSERT(mbuf->buf_addr == src_iov[0].iov_base); 506 CU_ASSERT(mbuf->data_len == src_iov[0].iov_len); 507 mbuf = mbuf->next; 508 for (i = 1; i < 4; i++) { 509 SPDK_CU_ASSERT_FATAL(mbuf != NULL); 510 CU_ASSERT(mbuf->buf_addr == src_iov[i].iov_base); 511 CU_ASSERT(mbuf->data_len == src_iov[i].iov_len); 512 next = mbuf->next; 513 rte_pktmbuf_free(mbuf); 514 mbuf = next; 515 } 516 CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512); 517 CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0); 518 CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset, 519 uint64_t *) == (uint64_t)&task); 520 CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr == dst_iov.iov_base); 521 CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == dst_iov.iov_len); 522 523 rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src); 524 rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst); 525 526 /* Big logical block size, inplace encryption */ 527 src_iov[0].iov_len = ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4; 528 dst_iov = src_iov[0]; 529 task.base.block_size = ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4; 530 task.base.s.iovcnt = 1; 531 g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1; 532 533 rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); 534 CU_ASSERT(rc == 0); 535 CU_ASSERT(task.cryop_submitted == 1); 536 mbuf = g_test_crypto_ops[0]->sym->m_src; 537 SPDK_CU_ASSERT_FATAL(mbuf != NULL); 538 CU_ASSERT(mbuf->buf_addr == src_iov[0].iov_base); 539 CU_ASSERT(mbuf->data_len == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN); 540 mbuf = mbuf->next; 541 for (i = 1; i < 4; i++) { 542 SPDK_CU_ASSERT_FATAL(mbuf != NULL); 543 CU_ASSERT(mbuf->buf_addr == (char *)src_iov[0].iov_base + i * ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN); 544 CU_ASSERT(mbuf->data_len == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN); 545 next = mbuf->next; 546 rte_pktmbuf_free(mbuf); 547 mbuf = next; 548 } 549 CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4); 550 CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0); 551 CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset, 552 uint64_t *) == (uint64_t)&task); 553 CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL); 554 555 rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src); 556 } 557 558 static void 559 test_simple_decrypt(void) 560 { 561 struct iovec src_iov[4] = {[0] = {.iov_base = (void *)0xDEADBEEF, .iov_len = 512 }}; 562 struct iovec dst_iov = src_iov[0]; 563 struct accel_dpdk_cryptodev_task task = {}; 564 struct rte_mbuf *mbuf, *next; 565 int rc, i; 566 567 task.base.op_code = ACCEL_OPC_DECRYPT; 568 task.base.s.iovcnt = 1; 569 task.base.s.iovs = src_iov; 570 task.base.d.iovcnt = 1; 571 task.base.d.iovs = &dst_iov; 572 task.base.block_size = 512; 573 task.base.crypto_key = &g_key; 574 task.base.iv = 1; 575 g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1; 576 577 /* Inplace decryption */ 578 g_aesni_qp.num_enqueued_ops = 0; 579 rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); 580 CU_ASSERT(rc == 0); 581 CU_ASSERT(task.cryop_submitted == 1); 582 CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base); 583 CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len); 584 CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL); 585 CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512); 586 CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0); 587 CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset, 588 uint64_t *) == (uint64_t)&task); 589 CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL); 590 591 rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src); 592 593 /* out-of-place decryption */ 594 g_aesni_qp.num_enqueued_ops = 0; 595 task.cryop_submitted = 0; 596 dst_iov.iov_base = (void *)0xFEEDBEEF; 597 598 rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); 599 CU_ASSERT(rc == 0); 600 CU_ASSERT(task.cryop_submitted == 1); 601 CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov[0].iov_base); 602 CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == src_iov[0].iov_len); 603 CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL); 604 CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512); 605 CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0); 606 CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset, 607 uint64_t *) == (uint64_t)&task); 608 CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr == dst_iov.iov_base); 609 CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == dst_iov.iov_len); 610 611 rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src); 612 rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst); 613 614 /* out-of-place decryption, fragmented payload */ 615 g_aesni_qp.num_enqueued_ops = 0; 616 task.base.s.iovcnt = 4; 617 for (i = 0; i < 4; i++) { 618 src_iov[i].iov_base = (void *)0xDEADBEEF + i * 128; 619 src_iov[i].iov_len = 128; 620 } 621 rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); 622 CU_ASSERT(rc == 0); 623 CU_ASSERT(task.cryop_submitted == 1); 624 mbuf = g_test_crypto_ops[0]->sym->m_src; 625 SPDK_CU_ASSERT_FATAL(mbuf != NULL); 626 CU_ASSERT(mbuf->buf_addr == src_iov[0].iov_base); 627 CU_ASSERT(mbuf->data_len == src_iov[0].iov_len); 628 mbuf = mbuf->next; 629 for (i = 1; i < 4; i++) { 630 SPDK_CU_ASSERT_FATAL(mbuf != NULL); 631 CU_ASSERT(mbuf->buf_addr == src_iov[i].iov_base); 632 CU_ASSERT(mbuf->data_len == src_iov[i].iov_len); 633 next = mbuf->next; 634 rte_pktmbuf_free(mbuf); 635 mbuf = next; 636 } 637 CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512); 638 CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0); 639 CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset, 640 uint64_t *) == (uint64_t)&task); 641 CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr == dst_iov.iov_base); 642 CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == dst_iov.iov_len); 643 644 rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src); 645 rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_dst); 646 647 /* Big logical block size, inplace encryption */ 648 src_iov[0].iov_len = ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4; 649 dst_iov = src_iov[0]; 650 task.base.block_size = ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4; 651 task.base.s.iovcnt = 1; 652 g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1; 653 654 rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); 655 CU_ASSERT(rc == 0); 656 CU_ASSERT(task.cryop_submitted == 1); 657 mbuf = g_test_crypto_ops[0]->sym->m_src; 658 SPDK_CU_ASSERT_FATAL(mbuf != NULL); 659 CU_ASSERT(mbuf->buf_addr == src_iov[0].iov_base); 660 CU_ASSERT(mbuf->data_len == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN); 661 mbuf = mbuf->next; 662 for (i = 1; i < 4; i++) { 663 SPDK_CU_ASSERT_FATAL(mbuf != NULL); 664 CU_ASSERT(mbuf->buf_addr == (char *)src_iov[0].iov_base + i * ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN); 665 CU_ASSERT(mbuf->data_len == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN); 666 next = mbuf->next; 667 rte_pktmbuf_free(mbuf); 668 mbuf = next; 669 } 670 CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN * 4); 671 CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0); 672 CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset, 673 uint64_t *) == (uint64_t)&task); 674 CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL); 675 676 rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src); 677 } 678 679 static void 680 test_large_enc_dec(void) 681 { 682 struct accel_dpdk_cryptodev_task task = {}; 683 uint32_t block_len = 512; 684 uint32_t num_blocks = ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE * 2; 685 struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = num_blocks * block_len }; 686 struct iovec dst_iov = src_iov; 687 uint32_t iov_offset = ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE * block_len; 688 uint32_t i; 689 int rc; 690 691 task.base.op_code = ACCEL_OPC_DECRYPT; 692 task.base.s.iovcnt = 1; 693 task.base.s.iovs = &src_iov; 694 task.base.d.iovcnt = 1; 695 task.base.d.iovs = &dst_iov; 696 task.base.block_size = 512; 697 task.base.crypto_key = &g_key; 698 task.base.iv = 1; 699 700 /* Test 1. Multi block size decryption, multi-element, inplace */ 701 g_aesni_qp.num_enqueued_ops = 0; 702 g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks; 703 704 rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); 705 CU_ASSERT(rc == 0); 706 CU_ASSERT(task.inplace == true); 707 CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE); 708 CU_ASSERT(task.cryop_total == num_blocks); 709 CU_ASSERT(task.cryop_completed == 0); 710 711 for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) { 712 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + (i * block_len)); 713 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len); 714 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL); 715 CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len); 716 CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0); 717 CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset, 718 uint64_t *) == (uint64_t)&task); 719 CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL); 720 rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src); 721 } 722 723 /* Call accel_dpdk_cryptodev_process_task like it was called by completion poller */ 724 g_aesni_qp.num_enqueued_ops = 0; 725 task.cryop_completed = task.cryop_submitted; 726 rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task); 727 728 CU_ASSERT(rc == 0); 729 CU_ASSERT(task.cryop_submitted == num_blocks); 730 CU_ASSERT(task.cryop_total == task.cryop_submitted); 731 732 for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) { 733 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + iov_offset + 734 (i * block_len)); 735 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len); 736 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL); 737 CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len); 738 CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0); 739 CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset, 740 uint64_t *) == (uint64_t)&task); 741 CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL); 742 rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src); 743 } 744 745 /* Test 2. Multi block size decryption, multi-element, out-of-place */ 746 g_aesni_qp.num_enqueued_ops = 0; 747 dst_iov.iov_base = (void *)0xFEEDBEEF; 748 g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks; 749 750 rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); 751 CU_ASSERT(rc == 0); 752 CU_ASSERT(task.inplace == false); 753 CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE); 754 CU_ASSERT(task.cryop_total == num_blocks); 755 CU_ASSERT(task.cryop_completed == 0); 756 757 for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) { 758 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + (i * block_len)); 759 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len); 760 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL); 761 CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len); 762 CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0); 763 CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset, 764 uint64_t *) == (uint64_t)&task); 765 CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr == dst_iov.iov_base + (i * block_len)); 766 CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len); 767 CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->next == NULL); 768 rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src); 769 rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst); 770 } 771 772 /* Call accel_dpdk_cryptodev_process_task like it was called by completion poller */ 773 g_aesni_qp.num_enqueued_ops = 0; 774 task.cryop_completed = task.cryop_submitted; 775 rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task); 776 777 CU_ASSERT(rc == 0); 778 CU_ASSERT(task.cryop_submitted == num_blocks); 779 CU_ASSERT(task.cryop_total == task.cryop_submitted); 780 781 for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) { 782 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + iov_offset + 783 (i * block_len)); 784 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len); 785 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL); 786 CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len); 787 CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0); 788 CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset, 789 uint64_t *) == (uint64_t)&task); 790 CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr == dst_iov.iov_base + iov_offset + 791 (i * block_len)); 792 CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len); 793 CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->next == NULL); 794 rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src); 795 rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst); 796 } 797 798 /* Test 3. Multi block size encryption, multi-element, inplace */ 799 g_aesni_qp.num_enqueued_ops = 0; 800 dst_iov = src_iov; 801 task.base.op_code = ACCEL_OPC_ENCRYPT; 802 task.cryop_submitted = 0; 803 g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks; 804 805 rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); 806 CU_ASSERT(rc == 0); 807 CU_ASSERT(task.inplace == true); 808 CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE); 809 CU_ASSERT(task.cryop_total == num_blocks); 810 CU_ASSERT(task.cryop_completed == 0); 811 812 for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) { 813 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + (i * block_len)); 814 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len); 815 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL); 816 CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len); 817 CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0); 818 CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset, 819 uint64_t *) == (uint64_t)&task); 820 CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL); 821 rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src); 822 } 823 824 /* Call accel_dpdk_cryptodev_process_task like it was called by completion poller */ 825 g_aesni_qp.num_enqueued_ops = 0; 826 task.cryop_completed = task.cryop_submitted; 827 rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task); 828 829 CU_ASSERT(rc == 0); 830 CU_ASSERT(task.cryop_submitted == num_blocks); 831 CU_ASSERT(task.cryop_total == task.cryop_submitted); 832 833 for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) { 834 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + iov_offset + 835 (i * block_len)); 836 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len); 837 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL); 838 CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len); 839 CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0); 840 CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset, 841 uint64_t *) == (uint64_t)&task); 842 CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL); 843 rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src); 844 } 845 846 /* Multi block size encryption, multi-element, out-of-place */ 847 g_aesni_qp.num_enqueued_ops = 0; 848 task.cryop_submitted = 0; 849 dst_iov.iov_base = (void *)0xFEEDBEEF; 850 g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks; 851 852 rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); 853 CU_ASSERT(task.inplace == false); 854 CU_ASSERT(task.cryop_submitted == ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE); 855 CU_ASSERT(task.cryop_total == num_blocks); 856 CU_ASSERT(task.cryop_completed == 0); 857 858 for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) { 859 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + (i * block_len)); 860 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len); 861 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL); 862 CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len); 863 CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0); 864 CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset, 865 uint64_t *) == (uint64_t)&task); 866 CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr == dst_iov.iov_base + (i * block_len)); 867 CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len); 868 CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->next == NULL); 869 rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src); 870 rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst); 871 } 872 873 /* Call accel_dpdk_cryptodev_process_task like it was called by completion poller */ 874 g_aesni_qp.num_enqueued_ops = 0; 875 task.cryop_completed = task.cryop_submitted; 876 rc = accel_dpdk_cryptodev_process_task(g_crypto_ch, &task); 877 878 CU_ASSERT(rc == 0); 879 CU_ASSERT(task.cryop_submitted == num_blocks); 880 CU_ASSERT(task.cryop_total == task.cryop_submitted); 881 882 for (i = 0; i < ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE; i++) { 883 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov.iov_base + iov_offset + 884 (i * block_len)); 885 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len); 886 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL); 887 CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len); 888 CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0); 889 CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset, 890 uint64_t *) == (uint64_t)&task); 891 CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr == dst_iov.iov_base + iov_offset + 892 (i * block_len)); 893 CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len); 894 CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->next == NULL); 895 rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src); 896 rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_dst); 897 } 898 } 899 900 static void 901 test_dev_full(void) 902 { 903 struct accel_dpdk_cryptodev_task task = {}; 904 struct accel_dpdk_cryptodev_queued_op *queued_op; 905 struct rte_crypto_sym_op *sym_op; 906 struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = 1024 }; 907 struct iovec dst_iov = src_iov; 908 int rc; 909 910 task.base.op_code = ACCEL_OPC_DECRYPT; 911 task.base.s.iovcnt = 1; 912 task.base.s.iovs = &src_iov; 913 task.base.d.iovcnt = 1; 914 task.base.d.iovs = &dst_iov; 915 task.base.block_size = 512; 916 task.base.crypto_key = &g_key; 917 task.base.iv = 1; 918 919 /* Two element block size decryption */ 920 g_aesni_qp.num_enqueued_ops = 0; 921 g_enqueue_mock = g_dequeue_mock = 1; 922 ut_rte_crypto_op_bulk_alloc = 2; 923 924 g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 925 CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true); 926 927 rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); 928 CU_ASSERT(rc == 0); 929 CU_ASSERT(task.cryop_submitted == 2); 930 sym_op = g_test_crypto_ops[0]->sym; 931 CU_ASSERT(sym_op->m_src->buf_addr == src_iov.iov_base); 932 CU_ASSERT(sym_op->m_src->data_len == 512); 933 CU_ASSERT(sym_op->m_src->next == NULL); 934 CU_ASSERT(sym_op->cipher.data.length == 512); 935 CU_ASSERT(sym_op->cipher.data.offset == 0); 936 CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)&task); 937 CU_ASSERT(sym_op->m_dst == NULL); 938 939 /* make sure one got queued and confirm its values */ 940 CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false); 941 queued_op = TAILQ_FIRST(&g_crypto_ch->queued_cry_ops); 942 sym_op = queued_op->crypto_op->sym; 943 TAILQ_REMOVE(&g_crypto_ch->queued_cry_ops, queued_op, link); 944 CU_ASSERT(queued_op->task == &task); 945 CU_ASSERT(queued_op->crypto_op == g_test_crypto_ops[1]); 946 CU_ASSERT(sym_op->m_src->buf_addr == (void *)0xDEADBEEF + 512); 947 CU_ASSERT(sym_op->m_src->data_len == 512); 948 CU_ASSERT(sym_op->m_src->next == NULL); 949 CU_ASSERT(sym_op->cipher.data.length == 512); 950 CU_ASSERT(sym_op->cipher.data.offset == 0); 951 CU_ASSERT(*RTE_MBUF_DYNFIELD(sym_op->m_src, g_mbuf_offset, uint64_t *) == (uint64_t)&task); 952 CU_ASSERT(sym_op->m_dst == NULL); 953 CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true); 954 rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src); 955 rte_pktmbuf_free(g_test_crypto_ops[1]->sym->m_src); 956 957 /* Non-busy reason for enqueue failure, all were rejected. */ 958 g_enqueue_mock = 0; 959 g_aesni_qp.num_enqueued_ops = 0; 960 g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_ERROR; 961 rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); 962 CU_ASSERT(rc == -EINVAL); 963 964 /* QP is full, task should be queued */ 965 g_aesni_qp.num_enqueued_ops = g_aesni_crypto_dev.qp_desc_nr; 966 CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_tasks) == true); 967 rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); 968 CU_ASSERT(rc == 0); 969 CU_ASSERT(!TAILQ_EMPTY(&g_crypto_ch->queued_tasks)); 970 CU_ASSERT(TAILQ_FIRST(&g_crypto_ch->queued_tasks) == &task); 971 972 TAILQ_INIT(&g_crypto_ch->queued_tasks); 973 } 974 975 static void 976 test_crazy_rw(void) 977 { 978 struct accel_dpdk_cryptodev_task task = {}; 979 struct iovec src_iov[4] = { 980 [0] = {.iov_base = (void *)0xDEADBEEF, .iov_len = 512 }, 981 [1] = {.iov_base = (void *)0xDEADBEEF + 512, .iov_len = 1024 }, 982 [2] = {.iov_base = (void *)0xDEADBEEF + 512 + 1024, .iov_len = 512 } 983 }; 984 struct iovec *dst_iov = src_iov; 985 uint32_t block_len = 512, num_blocks = 4, i; 986 int rc; 987 988 task.base.op_code = ACCEL_OPC_DECRYPT; 989 task.base.s.iovcnt = 3; 990 task.base.s.iovs = src_iov; 991 task.base.d.iovcnt = 3; 992 task.base.d.iovs = dst_iov; 993 task.base.block_size = 512; 994 task.base.crypto_key = &g_key; 995 task.base.iv = 1; 996 997 /* Multi block size read, single element, strange IOV makeup */ 998 g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks; 999 g_aesni_qp.num_enqueued_ops = 0; 1000 1001 rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); 1002 CU_ASSERT(rc == 0); 1003 CU_ASSERT(task.cryop_submitted == num_blocks); 1004 1005 for (i = 0; i < num_blocks; i++) { 1006 CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len); 1007 CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0); 1008 CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset, 1009 uint64_t *) == (uint64_t)&task); 1010 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL); 1011 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[0].iov_base + (i * block_len)); 1012 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len); 1013 CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL); 1014 rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src); 1015 } 1016 1017 /* Multi block size write, single element strange IOV makeup */ 1018 num_blocks = 8; 1019 task.base.op_code = ACCEL_OPC_ENCRYPT; 1020 task.cryop_submitted = 0; 1021 task.base.s.iovcnt = 4; 1022 task.base.d.iovcnt = 4; 1023 task.base.s.iovs[0].iov_len = 2048; 1024 task.base.s.iovs[0].iov_base = (void *)0xDEADBEEF; 1025 task.base.s.iovs[1].iov_len = 512; 1026 task.base.s.iovs[1].iov_base = (void *)0xDEADBEEF + 2048; 1027 task.base.s.iovs[2].iov_len = 512; 1028 task.base.s.iovs[2].iov_base = (void *)0xDEADBEEF + 2048 + 512; 1029 task.base.s.iovs[3].iov_len = 1024; 1030 task.base.s.iovs[3].iov_base = (void *)0xDEADBEEF + 2048 + 512 + 512; 1031 1032 g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks; 1033 g_aesni_qp.num_enqueued_ops = 0; 1034 1035 rc = accel_dpdk_cryptodev_submit_tasks(g_io_ch, &task.base); 1036 CU_ASSERT(rc == 0); 1037 CU_ASSERT(task.cryop_submitted == num_blocks); 1038 1039 for (i = 0; i < num_blocks; i++) { 1040 CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len); 1041 CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0); 1042 CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[i]->sym->m_src, g_mbuf_offset, 1043 uint64_t *) == (uint64_t)&task); 1044 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL); 1045 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == src_iov[0].iov_base + (i * block_len)); 1046 CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len); 1047 CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL); 1048 rte_pktmbuf_free(g_test_crypto_ops[i]->sym->m_src); 1049 } 1050 } 1051 1052 static void 1053 init_cleanup(void) 1054 { 1055 struct accel_dpdk_cryptodev_device *dev, *tmp; 1056 1057 if (g_crypto_op_mp) { 1058 rte_mempool_free(g_crypto_op_mp); 1059 g_crypto_op_mp = NULL; 1060 } 1061 if (g_mbuf_mp) { 1062 rte_mempool_free(g_mbuf_mp); 1063 g_mbuf_mp = NULL; 1064 } 1065 if (g_session_mp) { 1066 rte_mempool_free(g_session_mp); 1067 g_session_mp = NULL; 1068 } 1069 if (g_session_mp_priv != NULL) { 1070 /* g_session_mp_priv may or may not be set depending on the DPDK version */ 1071 rte_mempool_free(g_session_mp_priv); 1072 g_session_mp_priv = NULL; 1073 } 1074 1075 TAILQ_FOREACH_SAFE(dev, &g_crypto_devices, link, tmp) { 1076 TAILQ_REMOVE(&g_crypto_devices, dev, link); 1077 accel_dpdk_cryptodev_release(dev); 1078 } 1079 1080 spdk_io_device_unregister(&g_accel_dpdk_cryptodev_module, NULL); 1081 } 1082 1083 static void 1084 test_initdrivers(void) 1085 { 1086 int rc; 1087 static struct rte_mempool *orig_mbuf_mp; 1088 static struct rte_mempool *orig_session_mp; 1089 static struct rte_mempool *orig_session_mp_priv; 1090 1091 /* accel_dpdk_cryptodev_init calls spdk_io_device_register, we need to have a thread */ 1092 allocate_threads(1); 1093 set_thread(0); 1094 1095 /* These tests will alloc and free our g_mbuf_mp 1096 * so save that off here and restore it after each test is over. 1097 */ 1098 orig_mbuf_mp = g_mbuf_mp; 1099 orig_session_mp = g_session_mp; 1100 orig_session_mp_priv = g_session_mp_priv; 1101 1102 g_session_mp_priv = NULL; 1103 g_session_mp = NULL; 1104 g_mbuf_mp = NULL; 1105 1106 /* No drivers available, not an error though */ 1107 MOCK_SET(rte_cryptodev_count, 0); 1108 rc = accel_dpdk_cryptodev_init(); 1109 CU_ASSERT(rc == 0); 1110 CU_ASSERT(g_mbuf_mp == NULL); 1111 CU_ASSERT(g_session_mp == NULL); 1112 CU_ASSERT(g_session_mp_priv == NULL); 1113 1114 /* Can't create session pool. */ 1115 MOCK_SET(rte_cryptodev_count, 2); 1116 MOCK_SET(spdk_mempool_create, NULL); 1117 rc = accel_dpdk_cryptodev_init(); 1118 CU_ASSERT(rc == -ENOMEM); 1119 CU_ASSERT(g_mbuf_mp == NULL); 1120 CU_ASSERT(g_session_mp == NULL); 1121 CU_ASSERT(g_session_mp_priv == NULL); 1122 MOCK_CLEAR(spdk_mempool_create); 1123 1124 /* Can't create op pool. */ 1125 MOCK_SET(rte_crypto_op_pool_create, NULL); 1126 rc = accel_dpdk_cryptodev_init(); 1127 CU_ASSERT(rc == -ENOMEM); 1128 CU_ASSERT(g_mbuf_mp == NULL); 1129 CU_ASSERT(g_session_mp == NULL); 1130 CU_ASSERT(g_session_mp_priv == NULL); 1131 MOCK_CLEAR(rte_crypto_op_pool_create); 1132 1133 /* Check resources are not sufficient */ 1134 MOCK_CLEARED_ASSERT(spdk_mempool_create); 1135 rc = accel_dpdk_cryptodev_init(); 1136 CU_ASSERT(rc == -EINVAL); 1137 1138 /* Test crypto dev configure failure. */ 1139 MOCK_SET(rte_cryptodev_device_count_by_driver, 2); 1140 MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI); 1141 MOCK_SET(rte_cryptodev_configure, -1); 1142 MOCK_CLEARED_ASSERT(spdk_mempool_create); 1143 rc = accel_dpdk_cryptodev_init(); 1144 MOCK_SET(rte_cryptodev_configure, 0); 1145 CU_ASSERT(g_mbuf_mp == NULL); 1146 CU_ASSERT(g_session_mp == NULL); 1147 CU_ASSERT(g_session_mp_priv == NULL); 1148 CU_ASSERT(rc == -EINVAL); 1149 1150 /* Test failure of qp setup. */ 1151 MOCK_SET(rte_cryptodev_queue_pair_setup, -1); 1152 MOCK_CLEARED_ASSERT(spdk_mempool_create); 1153 rc = accel_dpdk_cryptodev_init(); 1154 CU_ASSERT(rc == -EINVAL); 1155 CU_ASSERT(g_mbuf_mp == NULL); 1156 CU_ASSERT(g_session_mp == NULL); 1157 CU_ASSERT(g_session_mp_priv == NULL); 1158 MOCK_SET(rte_cryptodev_queue_pair_setup, 0); 1159 1160 /* Test failure of dev start. */ 1161 MOCK_SET(rte_cryptodev_start, -1); 1162 MOCK_CLEARED_ASSERT(spdk_mempool_create); 1163 rc = accel_dpdk_cryptodev_init(); 1164 CU_ASSERT(rc == -EINVAL); 1165 CU_ASSERT(g_mbuf_mp == NULL); 1166 CU_ASSERT(g_session_mp == NULL); 1167 CU_ASSERT(g_session_mp_priv == NULL); 1168 MOCK_SET(rte_cryptodev_start, 0); 1169 1170 /* Test bogus PMD */ 1171 MOCK_CLEARED_ASSERT(spdk_mempool_create); 1172 MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_BOGUS_PMD); 1173 rc = accel_dpdk_cryptodev_init(); 1174 CU_ASSERT(g_mbuf_mp == NULL); 1175 CU_ASSERT(g_session_mp == NULL); 1176 CU_ASSERT(rc == -EINVAL); 1177 1178 /* Test happy path QAT. */ 1179 MOCK_CLEARED_ASSERT(spdk_mempool_create); 1180 MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT); 1181 rc = accel_dpdk_cryptodev_init(); 1182 CU_ASSERT(g_mbuf_mp != NULL); 1183 CU_ASSERT(g_session_mp != NULL); 1184 init_cleanup(); 1185 CU_ASSERT(rc == 0); 1186 1187 /* Test happy path AESNI. */ 1188 MOCK_CLEARED_ASSERT(spdk_mempool_create); 1189 MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_AESNI); 1190 rc = accel_dpdk_cryptodev_init(); 1191 CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET); 1192 init_cleanup(); 1193 CU_ASSERT(rc == 0); 1194 1195 /* Test happy path MLX5. */ 1196 MOCK_CLEARED_ASSERT(spdk_mempool_create); 1197 MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_MLX5); 1198 rc = accel_dpdk_cryptodev_init(); 1199 CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET); 1200 init_cleanup(); 1201 CU_ASSERT(rc == 0); 1202 1203 /* Test failure of DPDK dev init. By now it is not longer an error 1204 * situation for entire crypto framework. */ 1205 MOCK_SET(rte_cryptodev_count, 2); 1206 MOCK_SET(rte_cryptodev_device_count_by_driver, 2); 1207 MOCK_SET(rte_vdev_init, -1); 1208 MOCK_CLEARED_ASSERT(spdk_mempool_create); 1209 MOCK_SET(rte_cryptodev_info_get, MOCK_INFO_GET_1QP_QAT); 1210 rc = accel_dpdk_cryptodev_init(); 1211 CU_ASSERT(rc == 0); 1212 CU_ASSERT(g_mbuf_mp != NULL); 1213 CU_ASSERT(g_session_mp != NULL); 1214 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0) 1215 CU_ASSERT(g_session_mp_priv != NULL); 1216 #endif 1217 init_cleanup(); 1218 MOCK_SET(rte_vdev_init, 0); 1219 MOCK_CLEAR(rte_cryptodev_device_count_by_driver); 1220 1221 /* restore our initial values. */ 1222 g_mbuf_mp = orig_mbuf_mp; 1223 g_session_mp = orig_session_mp; 1224 g_session_mp_priv = orig_session_mp_priv; 1225 free_threads(); 1226 } 1227 1228 static void 1229 test_supported_opcodes(void) 1230 { 1231 bool rc = true; 1232 enum accel_opcode opc; 1233 1234 for (opc = 0; opc < ACCEL_OPC_LAST; opc++) { 1235 rc = accel_dpdk_cryptodev_supports_opcode(opc); 1236 switch (opc) { 1237 case ACCEL_OPC_ENCRYPT: 1238 case ACCEL_OPC_DECRYPT: 1239 CU_ASSERT(rc == true); 1240 break; 1241 default: 1242 CU_ASSERT(rc == false); 1243 } 1244 } 1245 } 1246 1247 static void 1248 test_poller(void) 1249 { 1250 struct accel_dpdk_cryptodev_task task = {}; 1251 struct iovec src_iov = {.iov_base = (void *)0xDEADBEEF, .iov_len = 1024 }; 1252 struct iovec dst_iov = src_iov; 1253 struct rte_mbuf *src_mbufs[2]; 1254 struct accel_dpdk_cryptodev_queued_op *op_to_resubmit; 1255 int rc; 1256 1257 task.base.op_code = ACCEL_OPC_DECRYPT; 1258 task.base.s.iovcnt = 1; 1259 task.base.s.iovs = &src_iov; 1260 task.base.d.iovcnt = 1; 1261 task.base.d.iovs = &dst_iov; 1262 task.base.block_size = 512; 1263 task.base.crypto_key = &g_key; 1264 task.base.iv = 1; 1265 task.inplace = true; 1266 1267 /* test regular 1 op to dequeue and complete */ 1268 g_dequeue_mock = g_enqueue_mock = 1; 1269 g_aesni_qp.num_enqueued_ops = 1; 1270 rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 1); 1271 g_test_crypto_ops[0]->sym->m_src = src_mbufs[0]; 1272 *RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset, 1273 uint64_t *) = (uintptr_t)&task; 1274 g_test_crypto_ops[0]->sym->m_dst = NULL; 1275 task.cryop_submitted = 1; 1276 task.cryop_total = 1; 1277 task.cryop_completed = 0; 1278 task.base.op_code = ACCEL_OPC_DECRYPT; 1279 rc = accel_dpdk_cryptodev_poller(g_crypto_ch); 1280 CU_ASSERT(rc == 1); 1281 CU_ASSERT(task.cryop_completed == task.cryop_submitted); 1282 CU_ASSERT(g_aesni_qp.num_enqueued_ops == 0); 1283 1284 /* We have nothing dequeued but have some to resubmit */ 1285 g_dequeue_mock = 0; 1286 CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true); 1287 1288 /* add an op to the queued list. */ 1289 task.cryop_submitted = 1; 1290 task.cryop_total = 1; 1291 task.cryop_completed = 0; 1292 g_resubmit_test = true; 1293 op_to_resubmit = (struct accel_dpdk_cryptodev_queued_op *)((uint8_t *)g_test_crypto_ops[0] + 1294 ACCEL_DPDK_CRYPTODEV_QUEUED_OP_OFFSET); 1295 op_to_resubmit->crypto_op = (void *)0xDEADBEEF; 1296 op_to_resubmit->task = &task; 1297 op_to_resubmit->qp = &g_aesni_qp; 1298 TAILQ_INSERT_TAIL(&g_crypto_ch->queued_cry_ops, 1299 op_to_resubmit, 1300 link); 1301 CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == false); 1302 rc = accel_dpdk_cryptodev_poller(g_crypto_ch); 1303 g_resubmit_test = false; 1304 CU_ASSERT(rc == 1); 1305 CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_cry_ops) == true); 1306 CU_ASSERT(g_aesni_qp.num_enqueued_ops == 1); 1307 1308 /* 2 to dequeue but 2nd one failed */ 1309 g_dequeue_mock = g_enqueue_mock = 2; 1310 g_aesni_qp.num_enqueued_ops = 2; 1311 task.cryop_submitted = 2; 1312 task.cryop_total = 2; 1313 task.cryop_completed = 0; 1314 rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 2); 1315 g_test_crypto_ops[0]->sym->m_src = src_mbufs[0]; 1316 *RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset, 1317 uint64_t *) = (uint64_t)&task; 1318 g_test_crypto_ops[0]->sym->m_dst = NULL; 1319 g_test_crypto_ops[0]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 1320 g_test_crypto_ops[1]->sym->m_src = src_mbufs[1]; 1321 *RTE_MBUF_DYNFIELD(g_test_crypto_ops[1]->sym->m_src, g_mbuf_offset, 1322 uint64_t *) = (uint64_t)&task; 1323 g_test_crypto_ops[1]->sym->m_dst = NULL; 1324 g_test_crypto_ops[1]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 1325 rc = accel_dpdk_cryptodev_poller(g_crypto_ch); 1326 CU_ASSERT(task.is_failed == true); 1327 CU_ASSERT(rc == 1); 1328 CU_ASSERT(g_aesni_qp.num_enqueued_ops == 0); 1329 1330 /* Dequeue a task which needs to be submitted again */ 1331 g_dequeue_mock = g_enqueue_mock = ut_rte_crypto_op_bulk_alloc = 1; 1332 task.cryop_submitted = 1; 1333 task.cryop_total = 2; 1334 task.cryop_completed = 0; 1335 g_aesni_qp.num_enqueued_ops = 1; 1336 rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, 1); 1337 SPDK_CU_ASSERT_FATAL(src_mbufs[0] != NULL); 1338 g_test_crypto_ops[0]->sym->m_src = src_mbufs[0]; 1339 *RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset, 1340 uint64_t *) = (uintptr_t)&task; 1341 g_test_crypto_ops[0]->sym->m_dst = NULL; 1342 rc = accel_dpdk_cryptodev_poller(g_crypto_ch); 1343 CU_ASSERT(rc == 1); 1344 CU_ASSERT(task.cryop_submitted == 2); 1345 CU_ASSERT(task.cryop_total == 2); 1346 CU_ASSERT(task.cryop_completed == 1); 1347 CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov.iov_base + task.base.block_size); 1348 CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == task.base.block_size); 1349 CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL); 1350 CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == task.base.block_size); 1351 CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0); 1352 CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset, 1353 uint64_t *) == (uint64_t)&task); 1354 CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL); 1355 CU_ASSERT(g_aesni_qp.num_enqueued_ops == 1); 1356 rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src); 1357 1358 /* Process queued tasks, qp is full */ 1359 g_dequeue_mock = g_enqueue_mock = 0; 1360 g_aesni_qp.num_enqueued_ops = g_aesni_crypto_dev.qp_desc_nr; 1361 task.cryop_submitted = 1; 1362 task.cryop_total = 2; 1363 task.cryop_completed = 1; 1364 CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_tasks)); 1365 TAILQ_INSERT_TAIL(&g_crypto_ch->queued_tasks, &task, link); 1366 1367 rc = accel_dpdk_cryptodev_poller(g_crypto_ch); 1368 CU_ASSERT(rc == 0); 1369 CU_ASSERT(TAILQ_FIRST(&g_crypto_ch->queued_tasks) == &task); 1370 1371 /* Try again when queue is empty, task should be submitted */ 1372 g_enqueue_mock = 1; 1373 g_aesni_qp.num_enqueued_ops = 0; 1374 rc = accel_dpdk_cryptodev_poller(g_crypto_ch); 1375 CU_ASSERT(rc == 1); 1376 CU_ASSERT(task.cryop_submitted == 2); 1377 CU_ASSERT(task.cryop_total == 2); 1378 CU_ASSERT(task.cryop_completed == 1); 1379 CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == src_iov.iov_base + task.base.block_size); 1380 CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == task.base.block_size); 1381 CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL); 1382 CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == task.base.block_size); 1383 CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0); 1384 CU_ASSERT(*RTE_MBUF_DYNFIELD(g_test_crypto_ops[0]->sym->m_src, g_mbuf_offset, 1385 uint64_t *) == (uint64_t)&task); 1386 CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL); 1387 CU_ASSERT(g_aesni_qp.num_enqueued_ops == 1); 1388 CU_ASSERT(TAILQ_EMPTY(&g_crypto_ch->queued_tasks)); 1389 rte_pktmbuf_free(g_test_crypto_ops[0]->sym->m_src); 1390 } 1391 1392 /* Helper function for accel_dpdk_cryptodev_assign_device_qps() */ 1393 static void 1394 _check_expected_values(struct accel_dpdk_cryptodev_io_channel *crypto_ch, 1395 uint8_t expected_qat_index, 1396 uint8_t next_qat_index) 1397 { 1398 uint32_t num_qpairs; 1399 1400 memset(crypto_ch->device_qp, 0, sizeof(crypto_ch->device_qp)); 1401 1402 num_qpairs = accel_dpdk_cryptodev_assign_device_qps(crypto_ch); 1403 CU_ASSERT(num_qpairs == 3); 1404 1405 SPDK_CU_ASSERT_FATAL(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT] != NULL); 1406 CU_ASSERT(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT]->index == expected_qat_index); 1407 CU_ASSERT(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT]->in_use == true); 1408 CU_ASSERT(g_next_qat_index == next_qat_index); 1409 SPDK_CU_ASSERT_FATAL(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] != NULL); 1410 CU_ASSERT(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB]->in_use == true); 1411 SPDK_CU_ASSERT_FATAL(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI] != NULL); 1412 CU_ASSERT(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI]->in_use == true); 1413 } 1414 1415 static void 1416 test_assign_device_qp(void) 1417 { 1418 struct accel_dpdk_cryptodev_device qat_dev = { 1419 .type = ACCEL_DPDK_CRYPTODEV_DRIVER_QAT, 1420 .qpairs = TAILQ_HEAD_INITIALIZER(qat_dev.qpairs) 1421 }; 1422 struct accel_dpdk_cryptodev_device aesni_dev = { 1423 .type = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB, 1424 .qpairs = TAILQ_HEAD_INITIALIZER(aesni_dev.qpairs) 1425 }; 1426 struct accel_dpdk_cryptodev_device mlx5_dev = { 1427 .type = ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI, 1428 .qpairs = TAILQ_HEAD_INITIALIZER(mlx5_dev.qpairs) 1429 }; 1430 struct accel_dpdk_cryptodev_qp *qat_qps; 1431 struct accel_dpdk_cryptodev_qp aesni_qps[4] = {}; 1432 struct accel_dpdk_cryptodev_qp mlx5_qps[4] = {}; 1433 struct accel_dpdk_cryptodev_io_channel io_ch = {}; 1434 TAILQ_HEAD(, accel_dpdk_cryptodev_device) devs_tmp = TAILQ_HEAD_INITIALIZER(devs_tmp); 1435 int i; 1436 1437 g_qat_total_qp = 96; 1438 qat_qps = calloc(g_qat_total_qp, sizeof(*qat_qps)); 1439 SPDK_CU_ASSERT_FATAL(qat_qps != NULL); 1440 1441 for (i = 0; i < 4; i++) { 1442 aesni_qps[i].index = i; 1443 aesni_qps[i].device = &aesni_dev; 1444 TAILQ_INSERT_TAIL(&aesni_dev.qpairs, &aesni_qps[i], link); 1445 1446 mlx5_qps[i].index = i; 1447 mlx5_qps[i].device = &mlx5_dev; 1448 TAILQ_INSERT_TAIL(&mlx5_dev.qpairs, &mlx5_qps[i], link); 1449 } 1450 for (i = 0; i < g_qat_total_qp; i++) { 1451 qat_qps[i].index = i; 1452 qat_qps[i].device = &qat_dev; 1453 TAILQ_INSERT_TAIL(&qat_dev.qpairs, &qat_qps[i], link); 1454 } 1455 1456 /* Swap g_crypto_devices so that other tests are not affected */ 1457 TAILQ_SWAP(&g_crypto_devices, &devs_tmp, accel_dpdk_cryptodev_device, link); 1458 1459 TAILQ_INSERT_TAIL(&g_crypto_devices, &qat_dev, link); 1460 TAILQ_INSERT_TAIL(&g_crypto_devices, &aesni_dev, link); 1461 TAILQ_INSERT_TAIL(&g_crypto_devices, &mlx5_dev, link); 1462 1463 /* QAT testing is more complex as the code under test load balances by 1464 * assigning each subsequent device/qp to every QAT_VF_SPREAD modulo 1465 * g_qat_total_qp. For the current latest QAT we'll have 48 virtual functions 1466 * each with 2 qp so the "spread" between assignments is 32. */ 1467 1468 /* First assignment will assign to 0 and next at 32. */ 1469 _check_expected_values(&io_ch, 0, ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD); 1470 1471 /* Second assignment will assign to 32 and next at 64. */ 1472 _check_expected_values(&io_ch, ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD, 1473 ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD * 2); 1474 1475 /* Third assignment will assign to 64 and next at 0. */ 1476 _check_expected_values(&io_ch, ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD * 2, 0); 1477 1478 /* Fourth assignment will assign to 1 and next at 33. */ 1479 _check_expected_values(&io_ch, 1, ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD + 1); 1480 1481 TAILQ_SWAP(&devs_tmp, &g_crypto_devices, accel_dpdk_cryptodev_device, link); 1482 1483 free(qat_qps); 1484 } 1485 1486 int 1487 main(int argc, char **argv) 1488 { 1489 CU_pSuite suite = NULL; 1490 unsigned int num_failures; 1491 1492 CU_set_error_action(CUEA_ABORT); 1493 CU_initialize_registry(); 1494 1495 suite = CU_add_suite("dpdk_cryptodev", test_setup, test_cleanup); 1496 CU_ADD_TEST(suite, test_error_paths); 1497 CU_ADD_TEST(suite, test_simple_encrypt); 1498 CU_ADD_TEST(suite, test_simple_decrypt); 1499 CU_ADD_TEST(suite, test_large_enc_dec); 1500 CU_ADD_TEST(suite, test_dev_full); 1501 CU_ADD_TEST(suite, test_crazy_rw); 1502 CU_ADD_TEST(suite, test_initdrivers); 1503 CU_ADD_TEST(suite, test_supported_opcodes); 1504 CU_ADD_TEST(suite, test_poller); 1505 CU_ADD_TEST(suite, test_assign_device_qp); 1506 1507 CU_basic_set_mode(CU_BRM_VERBOSE); 1508 CU_basic_run_tests(); 1509 num_failures = CU_get_number_of_failures(); 1510 CU_cleanup_registry(); 1511 return num_failures; 1512 } 1513