1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include "test.h" 7 #include <string.h> 8 #include <rte_common.h> 9 #include <rte_mempool.h> 10 #include <rte_mbuf.h> 11 #include <rte_cryptodev.h> 12 13 #ifdef RTE_EXEC_ENV_WINDOWS 14 static int 15 test_event_crypto_adapter(void) 16 { 17 printf("event_crypto_adapter not supported on Windows, skipping test\n"); 18 return TEST_SKIPPED; 19 } 20 21 #else 22 23 #include <rte_eventdev.h> 24 #include <rte_bus_vdev.h> 25 #include <rte_service.h> 26 #include <rte_event_crypto_adapter.h> 27 28 #define PKT_TRACE 0 29 #define NUM 1 30 #define DEFAULT_NUM_XFORMS (2) 31 #define NUM_MBUFS (8191) 32 #define MBUF_CACHE_SIZE (256) 33 #define MAXIMUM_IV_LENGTH (16) 34 #define DEFAULT_NUM_OPS_INFLIGHT (128) 35 #define MAX_NB_SESSIONS 4 36 #define TEST_APP_PORT_ID 0 37 #define TEST_APP_EV_QUEUE_ID 0 38 #define TEST_APP_EV_PRIORITY 0 39 #define TEST_APP_EV_FLOWID 0xAABB 40 #define TEST_CRYPTO_EV_QUEUE_ID 1 41 #define TEST_ADAPTER_ID 0 42 #define TEST_CDEV_ID 0 43 #define TEST_CDEV_QP_ID 0 44 #define PACKET_LENGTH 64 45 #define NB_TEST_PORTS 1 46 #define NB_TEST_QUEUES 2 47 #define NUM_CORES 1 48 #define CRYPTODEV_NAME_NULL_PMD crypto_null 49 50 #define MBUF_SIZE (sizeof(struct rte_mbuf) + \ 51 RTE_PKTMBUF_HEADROOM + PACKET_LENGTH) 52 #define IV_OFFSET (sizeof(struct rte_crypto_op) + \ 53 sizeof(struct rte_crypto_sym_op) + \ 54 DEFAULT_NUM_XFORMS * \ 55 sizeof(struct rte_crypto_sym_xform)) 56 57 /* Handle log statements in same manner as test macros */ 58 #define LOG_DBG(...) RTE_LOG(DEBUG, EAL, __VA_ARGS__) 59 60 static const uint8_t text_64B[] = { 61 0x05, 0x15, 0x77, 0x32, 0xc9, 0x66, 0x91, 0x50, 62 0x93, 0x9f, 0xbb, 0x4e, 0x2e, 0x5a, 0x02, 0xd0, 63 0x2d, 0x9d, 0x31, 0x5d, 0xc8, 0x9e, 0x86, 0x36, 64 0x54, 0x5c, 0x50, 0xe8, 0x75, 0x54, 0x74, 0x5e, 65 0xd5, 0xa2, 0x84, 0x21, 0x2d, 0xc5, 0xf8, 0x1c, 66 0x55, 0x1a, 0xba, 0x91, 0xce, 0xb5, 0xa3, 0x1e, 67 0x31, 0xbf, 0xe9, 0xa1, 0x97, 0x5c, 0x2b, 0xd6, 68 0x57, 0xa5, 0x9f, 0xab, 0xbd, 0xb0, 0x9b, 0x9c 69 }; 70 71 struct event_crypto_adapter_test_params { 72 struct rte_mempool *mbuf_pool; 73 struct rte_mempool *op_mpool; 74 struct rte_mempool *session_mpool; 75 struct rte_mempool *session_priv_mpool; 76 struct rte_cryptodev_config *config; 77 uint8_t crypto_event_port_id; 78 uint8_t internal_port_op_fwd; 79 }; 80 81 struct rte_event response_info = { 82 .queue_id = TEST_APP_EV_QUEUE_ID, 83 .sched_type = RTE_SCHED_TYPE_ATOMIC, 84 .flow_id = TEST_APP_EV_FLOWID, 85 .priority = TEST_APP_EV_PRIORITY 86 }; 87 88 struct rte_event_crypto_request request_info = { 89 .cdev_id = TEST_CDEV_ID, 90 .queue_pair_id = TEST_CDEV_QP_ID 91 }; 92 93 static struct event_crypto_adapter_test_params params; 94 static uint8_t crypto_adapter_setup_done; 95 static uint32_t slcore_id; 96 static int evdev; 97 98 static struct rte_mbuf * 99 alloc_fill_mbuf(struct rte_mempool *mpool, const uint8_t *data, 100 size_t len, uint8_t blocksize) 101 { 102 struct rte_mbuf *m = rte_pktmbuf_alloc(mpool); 103 size_t t_len = len - (blocksize ? (len % blocksize) : 0); 104 105 if (m) { 106 char *dst = rte_pktmbuf_append(m, t_len); 107 108 if (!dst) { 109 rte_pktmbuf_free(m); 110 return NULL; 111 } 112 113 rte_memcpy(dst, (const void *)data, t_len); 114 } 115 return m; 116 } 117 118 static int 119 send_recv_ev(struct rte_event *ev) 120 { 121 struct rte_crypto_op *op; 122 struct rte_event recv_ev; 123 int ret; 124 125 if (params.internal_port_op_fwd) 126 ret = rte_event_crypto_adapter_enqueue(evdev, TEST_APP_PORT_ID, 127 ev, NUM); 128 else 129 ret = rte_event_enqueue_burst(evdev, TEST_APP_PORT_ID, ev, NUM); 130 TEST_ASSERT_EQUAL(ret, NUM, "Failed to send event to crypto adapter\n"); 131 132 while (rte_event_dequeue_burst(evdev, 133 TEST_APP_PORT_ID, &recv_ev, NUM, 0) == 0) 134 rte_pause(); 135 136 op = recv_ev.event_ptr; 137 #if PKT_TRACE 138 struct rte_mbuf *m = op->sym->m_src; 139 rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m)); 140 #endif 141 rte_pktmbuf_free(op->sym->m_src); 142 rte_crypto_op_free(op); 143 144 return TEST_SUCCESS; 145 } 146 147 static int 148 test_crypto_adapter_stats(void) 149 { 150 struct rte_event_crypto_adapter_stats stats; 151 152 rte_event_crypto_adapter_stats_get(TEST_ADAPTER_ID, &stats); 153 printf(" +------------------------------------------------------+\n"); 154 printf(" + Crypto adapter stats for instance %u:\n", TEST_ADAPTER_ID); 155 printf(" + Event port poll count %" PRIx64 "\n", 156 stats.event_poll_count); 157 printf(" + Event dequeue count %" PRIx64 "\n", 158 stats.event_deq_count); 159 printf(" + Cryptodev enqueue count %" PRIx64 "\n", 160 stats.crypto_enq_count); 161 printf(" + Cryptodev enqueue failed count %" PRIx64 "\n", 162 stats.crypto_enq_fail); 163 printf(" + Cryptodev dequeue count %" PRIx64 "\n", 164 stats.crypto_deq_count); 165 printf(" + Event enqueue count %" PRIx64 "\n", 166 stats.event_enq_count); 167 printf(" + Event enqueue retry count %" PRIx64 "\n", 168 stats.event_enq_retry_count); 169 printf(" + Event enqueue fail count %" PRIx64 "\n", 170 stats.event_enq_fail_count); 171 printf(" +------------------------------------------------------+\n"); 172 173 rte_event_crypto_adapter_stats_reset(TEST_ADAPTER_ID); 174 return TEST_SUCCESS; 175 } 176 177 static int 178 test_op_forward_mode(uint8_t session_less) 179 { 180 struct rte_crypto_sym_xform cipher_xform; 181 struct rte_cryptodev_sym_session *sess; 182 union rte_event_crypto_metadata m_data; 183 struct rte_crypto_sym_op *sym_op; 184 struct rte_crypto_op *op; 185 struct rte_mbuf *m; 186 struct rte_event ev; 187 uint32_t cap; 188 int ret; 189 190 memset(&m_data, 0, sizeof(m_data)); 191 192 m = alloc_fill_mbuf(params.mbuf_pool, text_64B, PACKET_LENGTH, 0); 193 TEST_ASSERT_NOT_NULL(m, "Failed to allocate mbuf!\n"); 194 #if PKT_TRACE 195 rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m)); 196 #endif 197 /* Setup Cipher Parameters */ 198 cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 199 cipher_xform.next = NULL; 200 cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL; 201 cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; 202 203 op = rte_crypto_op_alloc(params.op_mpool, 204 RTE_CRYPTO_OP_TYPE_SYMMETRIC); 205 TEST_ASSERT_NOT_NULL(op, 206 "Failed to allocate symmetric crypto operation struct\n"); 207 208 sym_op = op->sym; 209 210 if (!session_less) { 211 sess = rte_cryptodev_sym_session_create( 212 params.session_mpool); 213 TEST_ASSERT_NOT_NULL(sess, "Session creation failed\n"); 214 215 /* Create Crypto session*/ 216 ret = rte_cryptodev_sym_session_init(TEST_CDEV_ID, sess, 217 &cipher_xform, params.session_priv_mpool); 218 TEST_ASSERT_SUCCESS(ret, "Failed to init session\n"); 219 220 ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, 221 &cap); 222 TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); 223 224 if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA) { 225 /* Fill in private user data information */ 226 m_data.request_info.cdev_id = request_info.cdev_id; 227 m_data.request_info.queue_pair_id = 228 request_info.queue_pair_id; 229 m_data.response_info.event = response_info.event; 230 rte_cryptodev_sym_session_set_user_data(sess, 231 &m_data, sizeof(m_data)); 232 } 233 234 rte_crypto_op_attach_sym_session(op, sess); 235 } else { 236 struct rte_crypto_sym_xform *first_xform; 237 238 rte_crypto_op_sym_xforms_alloc(op, NUM); 239 op->sess_type = RTE_CRYPTO_OP_SESSIONLESS; 240 first_xform = &cipher_xform; 241 sym_op->xform = first_xform; 242 uint32_t len = IV_OFFSET + MAXIMUM_IV_LENGTH; 243 op->private_data_offset = len; 244 /* Fill in private data information */ 245 m_data.request_info.cdev_id = request_info.cdev_id; 246 m_data.request_info.queue_pair_id = request_info.queue_pair_id; 247 m_data.response_info.event = response_info.event; 248 rte_memcpy((uint8_t *)op + len, &m_data, sizeof(m_data)); 249 } 250 251 sym_op->m_src = m; 252 sym_op->cipher.data.offset = 0; 253 sym_op->cipher.data.length = PACKET_LENGTH; 254 255 /* Fill in event info and update event_ptr with rte_crypto_op */ 256 memset(&ev, 0, sizeof(ev)); 257 ev.queue_id = TEST_CRYPTO_EV_QUEUE_ID; 258 ev.sched_type = RTE_SCHED_TYPE_ATOMIC; 259 ev.flow_id = 0xAABB; 260 ev.event_ptr = op; 261 262 ret = send_recv_ev(&ev); 263 TEST_ASSERT_SUCCESS(ret, "Failed to send/receive event to " 264 "crypto adapter\n"); 265 266 test_crypto_adapter_stats(); 267 268 return TEST_SUCCESS; 269 } 270 271 static int 272 map_adapter_service_core(void) 273 { 274 uint32_t adapter_service_id; 275 int ret; 276 277 if (rte_event_crypto_adapter_service_id_get(TEST_ADAPTER_ID, 278 &adapter_service_id) == 0) { 279 uint32_t core_list[NUM_CORES]; 280 281 ret = rte_service_lcore_list(core_list, NUM_CORES); 282 TEST_ASSERT(ret >= 0, "Failed to get service core list!"); 283 284 if (core_list[0] != slcore_id) { 285 TEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id), 286 "Failed to add service core"); 287 TEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id), 288 "Failed to start service core"); 289 } 290 291 TEST_ASSERT_SUCCESS(rte_service_map_lcore_set( 292 adapter_service_id, slcore_id, 1), 293 "Failed to map adapter service"); 294 } 295 296 return TEST_SUCCESS; 297 } 298 299 static int 300 test_sessionless_with_op_forward_mode(void) 301 { 302 uint32_t cap; 303 int ret; 304 305 ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap); 306 TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); 307 308 if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) && 309 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) 310 map_adapter_service_core(); 311 else { 312 if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD)) 313 return TEST_SKIPPED; 314 } 315 316 TEST_ASSERT_SUCCESS(rte_event_crypto_adapter_start(TEST_ADAPTER_ID), 317 "Failed to start event crypto adapter"); 318 319 ret = test_op_forward_mode(1); 320 TEST_ASSERT_SUCCESS(ret, "Sessionless - FORWARD mode test failed\n"); 321 return TEST_SUCCESS; 322 } 323 324 static int 325 test_session_with_op_forward_mode(void) 326 { 327 uint32_t cap; 328 int ret; 329 330 ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap); 331 TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); 332 333 if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) && 334 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) 335 map_adapter_service_core(); 336 else { 337 if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD)) 338 return TEST_SKIPPED; 339 } 340 341 TEST_ASSERT_SUCCESS(rte_event_crypto_adapter_start(TEST_ADAPTER_ID 342 ), "Failed to start event crypto adapter"); 343 344 ret = test_op_forward_mode(0); 345 TEST_ASSERT_SUCCESS(ret, "Session based - FORWARD mode test failed\n"); 346 return TEST_SUCCESS; 347 } 348 349 static int 350 send_op_recv_ev(struct rte_crypto_op *op) 351 { 352 struct rte_crypto_op *recv_op; 353 struct rte_event ev; 354 int ret; 355 356 ret = rte_cryptodev_enqueue_burst(TEST_CDEV_ID, TEST_CDEV_QP_ID, 357 &op, NUM); 358 TEST_ASSERT_EQUAL(ret, NUM, "Failed to enqueue to cryptodev\n"); 359 memset(&ev, 0, sizeof(ev)); 360 361 while (rte_event_dequeue_burst(evdev, 362 TEST_APP_PORT_ID, &ev, NUM, 0) == 0) 363 rte_pause(); 364 365 recv_op = ev.event_ptr; 366 #if PKT_TRACE 367 struct rte_mbuf *m = recv_op->sym->m_src; 368 rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m)); 369 #endif 370 rte_pktmbuf_free(recv_op->sym->m_src); 371 rte_crypto_op_free(recv_op); 372 373 return TEST_SUCCESS; 374 } 375 376 static int 377 test_op_new_mode(uint8_t session_less) 378 { 379 struct rte_crypto_sym_xform cipher_xform; 380 struct rte_cryptodev_sym_session *sess; 381 union rte_event_crypto_metadata m_data; 382 struct rte_crypto_sym_op *sym_op; 383 struct rte_crypto_op *op; 384 struct rte_mbuf *m; 385 uint32_t cap; 386 int ret; 387 388 memset(&m_data, 0, sizeof(m_data)); 389 390 m = alloc_fill_mbuf(params.mbuf_pool, text_64B, PACKET_LENGTH, 0); 391 TEST_ASSERT_NOT_NULL(m, "Failed to allocate mbuf!\n"); 392 #if PKT_TRACE 393 rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m)); 394 #endif 395 /* Setup Cipher Parameters */ 396 cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 397 cipher_xform.next = NULL; 398 cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL; 399 cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; 400 401 op = rte_crypto_op_alloc(params.op_mpool, 402 RTE_CRYPTO_OP_TYPE_SYMMETRIC); 403 TEST_ASSERT_NOT_NULL(op, "Failed to allocate crypto_op!\n"); 404 405 sym_op = op->sym; 406 407 if (!session_less) { 408 sess = rte_cryptodev_sym_session_create( 409 params.session_mpool); 410 TEST_ASSERT_NOT_NULL(sess, "Session creation failed\n"); 411 412 ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, 413 &cap); 414 TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); 415 416 if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA) { 417 /* Fill in private user data information */ 418 m_data.response_info.event = response_info.event; 419 rte_cryptodev_sym_session_set_user_data(sess, 420 &m_data, sizeof(m_data)); 421 } 422 ret = rte_cryptodev_sym_session_init(TEST_CDEV_ID, sess, 423 &cipher_xform, params.session_priv_mpool); 424 TEST_ASSERT_SUCCESS(ret, "Failed to init session\n"); 425 426 rte_crypto_op_attach_sym_session(op, sess); 427 } else { 428 struct rte_crypto_sym_xform *first_xform; 429 430 rte_crypto_op_sym_xforms_alloc(op, NUM); 431 op->sess_type = RTE_CRYPTO_OP_SESSIONLESS; 432 first_xform = &cipher_xform; 433 sym_op->xform = first_xform; 434 uint32_t len = IV_OFFSET + MAXIMUM_IV_LENGTH; 435 op->private_data_offset = len; 436 /* Fill in private data information */ 437 m_data.response_info.event = response_info.event; 438 rte_memcpy((uint8_t *)op + len, &m_data, sizeof(m_data)); 439 } 440 441 sym_op->m_src = m; 442 sym_op->cipher.data.offset = 0; 443 sym_op->cipher.data.length = PACKET_LENGTH; 444 445 ret = send_op_recv_ev(op); 446 TEST_ASSERT_SUCCESS(ret, "Failed to enqueue op to cryptodev\n"); 447 448 test_crypto_adapter_stats(); 449 450 return TEST_SUCCESS; 451 } 452 453 static int 454 test_sessionless_with_op_new_mode(void) 455 { 456 uint32_t cap; 457 int ret; 458 459 ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap); 460 TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); 461 462 if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) && 463 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) 464 map_adapter_service_core(); 465 else { 466 if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) 467 return TEST_SKIPPED; 468 } 469 470 /* start the event crypto adapter */ 471 TEST_ASSERT_SUCCESS(rte_event_crypto_adapter_start(TEST_ADAPTER_ID), 472 "Failed to start event crypto adapter"); 473 474 ret = test_op_new_mode(1); 475 TEST_ASSERT_SUCCESS(ret, "Sessionless - NEW mode test failed\n"); 476 return TEST_SUCCESS; 477 } 478 479 static int 480 test_session_with_op_new_mode(void) 481 { 482 uint32_t cap; 483 int ret; 484 485 ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap); 486 TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); 487 488 if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) && 489 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) 490 map_adapter_service_core(); 491 else { 492 if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) 493 return TEST_SKIPPED; 494 } 495 496 TEST_ASSERT_SUCCESS(rte_event_crypto_adapter_start(TEST_ADAPTER_ID), 497 "Failed to start event crypto adapter"); 498 499 ret = test_op_new_mode(0); 500 TEST_ASSERT_SUCCESS(ret, "Session based - NEW mode test failed\n"); 501 return TEST_SUCCESS; 502 } 503 504 static int 505 configure_cryptodev(void) 506 { 507 struct rte_cryptodev_qp_conf qp_conf; 508 struct rte_cryptodev_config conf; 509 struct rte_cryptodev_info info; 510 unsigned int session_size; 511 uint8_t nb_devs; 512 int ret; 513 514 params.mbuf_pool = rte_pktmbuf_pool_create( 515 "CRYPTO_ADAPTER_MBUFPOOL", 516 NUM_MBUFS, MBUF_CACHE_SIZE, 0, MBUF_SIZE, 517 rte_socket_id()); 518 if (params.mbuf_pool == NULL) { 519 RTE_LOG(ERR, USER1, "Can't create CRYPTO_MBUFPOOL\n"); 520 return TEST_FAILED; 521 } 522 523 params.op_mpool = rte_crypto_op_pool_create( 524 "EVENT_CRYPTO_SYM_OP_POOL", 525 RTE_CRYPTO_OP_TYPE_SYMMETRIC, 526 NUM_MBUFS, MBUF_CACHE_SIZE, 527 DEFAULT_NUM_XFORMS * 528 sizeof(struct rte_crypto_sym_xform) + 529 MAXIMUM_IV_LENGTH + 530 sizeof(union rte_event_crypto_metadata), 531 rte_socket_id()); 532 if (params.op_mpool == NULL) { 533 RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n"); 534 return TEST_FAILED; 535 } 536 537 /* Create a NULL crypto device */ 538 nb_devs = rte_cryptodev_device_count_by_driver( 539 rte_cryptodev_driver_id_get( 540 RTE_STR(CRYPTODEV_NAME_NULL_PMD))); 541 if (!nb_devs) { 542 ret = rte_vdev_init( 543 RTE_STR(CRYPTODEV_NAME_NULL_PMD), NULL); 544 545 TEST_ASSERT(ret == 0, "Failed to create pmd:%s instance\n", 546 RTE_STR(CRYPTODEV_NAME_NULL_PMD)); 547 } 548 549 nb_devs = rte_cryptodev_count(); 550 if (!nb_devs) { 551 RTE_LOG(ERR, USER1, "No crypto devices found!\n"); 552 return TEST_FAILED; 553 } 554 555 /* 556 * Create mempool with maximum number of sessions * 2, 557 * to include the session headers & private data 558 */ 559 session_size = rte_cryptodev_sym_get_private_session_size(TEST_CDEV_ID); 560 session_size += sizeof(union rte_event_crypto_metadata); 561 562 params.session_mpool = rte_cryptodev_sym_session_pool_create( 563 "CRYPTO_ADAPTER_SESSION_MP", 564 MAX_NB_SESSIONS, 0, 0, 565 sizeof(union rte_event_crypto_metadata), 566 SOCKET_ID_ANY); 567 TEST_ASSERT_NOT_NULL(params.session_mpool, 568 "session mempool allocation failed\n"); 569 570 params.session_priv_mpool = rte_mempool_create( 571 "CRYPTO_AD_SESS_MP_PRIV", 572 MAX_NB_SESSIONS, 573 session_size, 574 0, 0, NULL, NULL, NULL, 575 NULL, SOCKET_ID_ANY, 576 0); 577 TEST_ASSERT_NOT_NULL(params.session_priv_mpool, 578 "session mempool allocation failed\n"); 579 580 rte_cryptodev_info_get(TEST_CDEV_ID, &info); 581 conf.nb_queue_pairs = info.max_nb_queue_pairs; 582 conf.socket_id = SOCKET_ID_ANY; 583 conf.ff_disable = RTE_CRYPTODEV_FF_SECURITY; 584 585 TEST_ASSERT_SUCCESS(rte_cryptodev_configure(TEST_CDEV_ID, &conf), 586 "Failed to configure cryptodev %u with %u qps\n", 587 TEST_CDEV_ID, conf.nb_queue_pairs); 588 589 qp_conf.nb_descriptors = DEFAULT_NUM_OPS_INFLIGHT; 590 qp_conf.mp_session = params.session_mpool; 591 qp_conf.mp_session_private = params.session_priv_mpool; 592 593 TEST_ASSERT_SUCCESS(rte_cryptodev_queue_pair_setup( 594 TEST_CDEV_ID, TEST_CDEV_QP_ID, &qp_conf, 595 rte_cryptodev_socket_id(TEST_CDEV_ID)), 596 "Failed to setup queue pair %u on cryptodev %u\n", 597 TEST_CDEV_QP_ID, TEST_CDEV_ID); 598 599 return TEST_SUCCESS; 600 } 601 602 static inline void 603 evdev_set_conf_values(struct rte_event_dev_config *dev_conf, 604 struct rte_event_dev_info *info) 605 { 606 memset(dev_conf, 0, sizeof(struct rte_event_dev_config)); 607 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns; 608 dev_conf->nb_event_ports = NB_TEST_PORTS; 609 dev_conf->nb_event_queues = NB_TEST_QUEUES; 610 dev_conf->nb_event_queue_flows = info->max_event_queue_flows; 611 dev_conf->nb_event_port_dequeue_depth = 612 info->max_event_port_dequeue_depth; 613 dev_conf->nb_event_port_enqueue_depth = 614 info->max_event_port_enqueue_depth; 615 dev_conf->nb_event_port_enqueue_depth = 616 info->max_event_port_enqueue_depth; 617 dev_conf->nb_events_limit = 618 info->max_num_events; 619 } 620 621 static int 622 configure_eventdev(void) 623 { 624 struct rte_event_queue_conf queue_conf; 625 struct rte_event_dev_config devconf; 626 struct rte_event_dev_info info; 627 uint32_t queue_count; 628 uint32_t port_count; 629 int ret; 630 uint8_t qid; 631 632 if (!rte_event_dev_count()) { 633 /* If there is no hardware eventdev, or no software vdev was 634 * specified on the command line, create an instance of 635 * event_sw. 636 */ 637 LOG_DBG("Failed to find a valid event device... " 638 "testing with event_sw device\n"); 639 TEST_ASSERT_SUCCESS(rte_vdev_init("event_sw0", NULL), 640 "Error creating eventdev"); 641 evdev = rte_event_dev_get_dev_id("event_sw0"); 642 } 643 644 ret = rte_event_dev_info_get(evdev, &info); 645 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info\n"); 646 647 evdev_set_conf_values(&devconf, &info); 648 649 ret = rte_event_dev_configure(evdev, &devconf); 650 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev\n"); 651 652 /* Set up event queue */ 653 ret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 654 &queue_count); 655 TEST_ASSERT_SUCCESS(ret, "Queue count get failed\n"); 656 TEST_ASSERT_EQUAL(queue_count, 2, "Unexpected queue count\n"); 657 658 qid = TEST_APP_EV_QUEUE_ID; 659 ret = rte_event_queue_setup(evdev, qid, NULL); 660 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d\n", qid); 661 662 queue_conf.nb_atomic_flows = info.max_event_queue_flows; 663 queue_conf.nb_atomic_order_sequences = 32; 664 queue_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC; 665 queue_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST; 666 queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK; 667 668 qid = TEST_CRYPTO_EV_QUEUE_ID; 669 ret = rte_event_queue_setup(evdev, qid, &queue_conf); 670 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%u\n", qid); 671 672 /* Set up event port */ 673 ret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT, 674 &port_count); 675 TEST_ASSERT_SUCCESS(ret, "Port count get failed\n"); 676 TEST_ASSERT_EQUAL(port_count, 1, "Unexpected port count\n"); 677 678 ret = rte_event_port_setup(evdev, TEST_APP_PORT_ID, NULL); 679 TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d\n", 680 TEST_APP_PORT_ID); 681 682 qid = TEST_APP_EV_QUEUE_ID; 683 ret = rte_event_port_link(evdev, TEST_APP_PORT_ID, &qid, NULL, 1); 684 TEST_ASSERT(ret >= 0, "Failed to link queue port=%d\n", 685 TEST_APP_PORT_ID); 686 687 return TEST_SUCCESS; 688 } 689 690 static void 691 test_crypto_adapter_free(void) 692 { 693 rte_event_crypto_adapter_free(TEST_ADAPTER_ID); 694 } 695 696 static int 697 test_crypto_adapter_create(void) 698 { 699 struct rte_event_port_conf conf = { 700 .dequeue_depth = 8, 701 .enqueue_depth = 8, 702 .new_event_threshold = 1200, 703 }; 704 int ret; 705 706 /* Create adapter with default port creation callback */ 707 ret = rte_event_crypto_adapter_create(TEST_ADAPTER_ID, 708 evdev, 709 &conf, 0); 710 TEST_ASSERT_SUCCESS(ret, "Failed to create event crypto adapter\n"); 711 712 return TEST_SUCCESS; 713 } 714 715 static int 716 test_crypto_adapter_qp_add_del(void) 717 { 718 uint32_t cap; 719 int ret; 720 721 ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap); 722 TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); 723 724 if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) { 725 ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID, 726 TEST_CDEV_ID, TEST_CDEV_QP_ID, &response_info); 727 } else 728 ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID, 729 TEST_CDEV_ID, TEST_CDEV_QP_ID, NULL); 730 731 TEST_ASSERT_SUCCESS(ret, "Failed to create add queue pair\n"); 732 733 ret = rte_event_crypto_adapter_queue_pair_del(TEST_ADAPTER_ID, 734 TEST_CDEV_ID, TEST_CDEV_QP_ID); 735 TEST_ASSERT_SUCCESS(ret, "Failed to delete add queue pair\n"); 736 737 return TEST_SUCCESS; 738 } 739 740 static int 741 configure_event_crypto_adapter(enum rte_event_crypto_adapter_mode mode) 742 { 743 struct rte_event_port_conf conf = { 744 .dequeue_depth = 8, 745 .enqueue_depth = 8, 746 .new_event_threshold = 1200, 747 }; 748 749 uint32_t cap; 750 int ret; 751 752 ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap); 753 TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n"); 754 755 /* Skip mode and capability mismatch check for SW eventdev */ 756 if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) && 757 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) && 758 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND)) 759 goto adapter_create; 760 761 if (mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) { 762 if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) 763 params.internal_port_op_fwd = 1; 764 else 765 return -ENOTSUP; 766 } 767 768 if ((mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) && 769 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) 770 return -ENOTSUP; 771 772 adapter_create: 773 /* Create adapter with default port creation callback */ 774 ret = rte_event_crypto_adapter_create(TEST_ADAPTER_ID, 775 evdev, 776 &conf, mode); 777 TEST_ASSERT_SUCCESS(ret, "Failed to create event crypto adapter\n"); 778 779 if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) { 780 ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID, 781 TEST_CDEV_ID, TEST_CDEV_QP_ID, &response_info); 782 } else 783 ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID, 784 TEST_CDEV_ID, TEST_CDEV_QP_ID, NULL); 785 786 TEST_ASSERT_SUCCESS(ret, "Failed to add queue pair\n"); 787 788 if (!params.internal_port_op_fwd) { 789 ret = rte_event_crypto_adapter_event_port_get(TEST_ADAPTER_ID, 790 ¶ms.crypto_event_port_id); 791 TEST_ASSERT_SUCCESS(ret, "Failed to get event port\n"); 792 } 793 794 return TEST_SUCCESS; 795 } 796 797 static void 798 test_crypto_adapter_stop(void) 799 { 800 uint32_t evdev_service_id, adapter_service_id; 801 802 /* retrieve service ids & stop services */ 803 if (rte_event_crypto_adapter_service_id_get(TEST_ADAPTER_ID, 804 &adapter_service_id) == 0) { 805 rte_service_runstate_set(adapter_service_id, 0); 806 rte_service_lcore_stop(slcore_id); 807 rte_service_lcore_del(slcore_id); 808 rte_event_crypto_adapter_stop(TEST_ADAPTER_ID); 809 } 810 811 if (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) { 812 rte_service_runstate_set(evdev_service_id, 0); 813 rte_service_lcore_stop(slcore_id); 814 rte_service_lcore_del(slcore_id); 815 rte_cryptodev_stop(TEST_CDEV_ID); 816 rte_event_dev_stop(evdev); 817 } else { 818 rte_cryptodev_stop(TEST_CDEV_ID); 819 rte_event_dev_stop(evdev); 820 } 821 } 822 823 static int 824 test_crypto_adapter_conf(enum rte_event_crypto_adapter_mode mode) 825 { 826 uint32_t evdev_service_id; 827 uint8_t qid; 828 int ret; 829 830 if (!crypto_adapter_setup_done) { 831 ret = configure_event_crypto_adapter(mode); 832 if (ret) 833 return ret; 834 if (!params.internal_port_op_fwd) { 835 qid = TEST_CRYPTO_EV_QUEUE_ID; 836 ret = rte_event_port_link(evdev, 837 params.crypto_event_port_id, &qid, NULL, 1); 838 TEST_ASSERT(ret >= 0, "Failed to link queue %d " 839 "port=%u\n", qid, 840 params.crypto_event_port_id); 841 } 842 crypto_adapter_setup_done = 1; 843 } 844 845 /* retrieve service ids */ 846 if (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) { 847 /* add a service core and start it */ 848 TEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id), 849 "Failed to add service core"); 850 TEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id), 851 "Failed to start service core"); 852 853 /* map services to it */ 854 TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(evdev_service_id, 855 slcore_id, 1), "Failed to map evdev service"); 856 857 /* set services to running */ 858 TEST_ASSERT_SUCCESS(rte_service_runstate_set(evdev_service_id, 859 1), "Failed to start evdev service"); 860 } 861 862 /* start the eventdev */ 863 TEST_ASSERT_SUCCESS(rte_event_dev_start(evdev), 864 "Failed to start event device"); 865 866 /* start the cryptodev */ 867 TEST_ASSERT_SUCCESS(rte_cryptodev_start(TEST_CDEV_ID), 868 "Failed to start crypto device"); 869 870 return TEST_SUCCESS; 871 } 872 873 static int 874 test_crypto_adapter_conf_op_forward_mode(void) 875 { 876 enum rte_event_crypto_adapter_mode mode; 877 878 mode = RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD; 879 880 return test_crypto_adapter_conf(mode); 881 } 882 883 static int 884 test_crypto_adapter_conf_op_new_mode(void) 885 { 886 enum rte_event_crypto_adapter_mode mode; 887 888 mode = RTE_EVENT_CRYPTO_ADAPTER_OP_NEW; 889 890 return test_crypto_adapter_conf(mode); 891 } 892 893 894 static int 895 testsuite_setup(void) 896 { 897 int ret; 898 899 slcore_id = rte_get_next_lcore(-1, 1, 0); 900 TEST_ASSERT_NOT_EQUAL(slcore_id, RTE_MAX_LCORE, "At least 2 lcores " 901 "are required to run this autotest\n"); 902 903 /* Setup and start event device. */ 904 ret = configure_eventdev(); 905 TEST_ASSERT_SUCCESS(ret, "Failed to setup eventdev\n"); 906 907 /* Setup and start crypto device. */ 908 ret = configure_cryptodev(); 909 TEST_ASSERT_SUCCESS(ret, "cryptodev initialization failed\n"); 910 911 return TEST_SUCCESS; 912 } 913 914 static void 915 crypto_adapter_teardown(void) 916 { 917 int ret; 918 919 ret = rte_event_crypto_adapter_stop(TEST_ADAPTER_ID); 920 if (ret < 0) 921 RTE_LOG(ERR, USER1, "Failed to stop adapter!"); 922 923 ret = rte_event_crypto_adapter_queue_pair_del(TEST_ADAPTER_ID, 924 TEST_CDEV_ID, TEST_CDEV_QP_ID); 925 if (ret < 0) 926 RTE_LOG(ERR, USER1, "Failed to delete queue pair!"); 927 928 ret = rte_event_crypto_adapter_free(TEST_ADAPTER_ID); 929 if (ret < 0) 930 RTE_LOG(ERR, USER1, "Failed to free adapter!"); 931 932 crypto_adapter_setup_done = 0; 933 } 934 935 static void 936 crypto_teardown(void) 937 { 938 /* Free mbuf mempool */ 939 if (params.mbuf_pool != NULL) { 940 RTE_LOG(DEBUG, USER1, "CRYPTO_ADAPTER_MBUFPOOL count %u\n", 941 rte_mempool_avail_count(params.mbuf_pool)); 942 rte_mempool_free(params.mbuf_pool); 943 params.mbuf_pool = NULL; 944 } 945 946 /* Free session mempool */ 947 if (params.session_mpool != NULL) { 948 RTE_LOG(DEBUG, USER1, "CRYPTO_ADAPTER_SESSION_MP count %u\n", 949 rte_mempool_avail_count(params.session_mpool)); 950 rte_mempool_free(params.session_mpool); 951 params.session_mpool = NULL; 952 } 953 if (params.session_priv_mpool != NULL) { 954 rte_mempool_avail_count(params.session_priv_mpool); 955 rte_mempool_free(params.session_priv_mpool); 956 params.session_priv_mpool = NULL; 957 } 958 959 /* Free ops mempool */ 960 if (params.op_mpool != NULL) { 961 RTE_LOG(DEBUG, USER1, "EVENT_CRYPTO_SYM_OP_POOL count %u\n", 962 rte_mempool_avail_count(params.op_mpool)); 963 rte_mempool_free(params.op_mpool); 964 params.op_mpool = NULL; 965 } 966 } 967 968 static void 969 eventdev_teardown(void) 970 { 971 rte_event_dev_stop(evdev); 972 } 973 974 static void 975 testsuite_teardown(void) 976 { 977 crypto_adapter_teardown(); 978 crypto_teardown(); 979 eventdev_teardown(); 980 } 981 982 static struct unit_test_suite functional_testsuite = { 983 .suite_name = "Event crypto adapter test suite", 984 .setup = testsuite_setup, 985 .teardown = testsuite_teardown, 986 .unit_test_cases = { 987 988 TEST_CASE_ST(NULL, test_crypto_adapter_free, 989 test_crypto_adapter_create), 990 991 TEST_CASE_ST(test_crypto_adapter_create, 992 test_crypto_adapter_free, 993 test_crypto_adapter_qp_add_del), 994 995 TEST_CASE_ST(test_crypto_adapter_create, 996 test_crypto_adapter_free, 997 test_crypto_adapter_stats), 998 999 TEST_CASE_ST(test_crypto_adapter_conf_op_forward_mode, 1000 test_crypto_adapter_stop, 1001 test_session_with_op_forward_mode), 1002 1003 TEST_CASE_ST(test_crypto_adapter_conf_op_forward_mode, 1004 test_crypto_adapter_stop, 1005 test_sessionless_with_op_forward_mode), 1006 1007 TEST_CASE_ST(test_crypto_adapter_conf_op_new_mode, 1008 test_crypto_adapter_stop, 1009 test_session_with_op_new_mode), 1010 1011 TEST_CASE_ST(test_crypto_adapter_conf_op_new_mode, 1012 test_crypto_adapter_stop, 1013 test_sessionless_with_op_new_mode), 1014 1015 TEST_CASES_END() /**< NULL terminate unit test array */ 1016 } 1017 }; 1018 1019 static int 1020 test_event_crypto_adapter(void) 1021 { 1022 return unit_test_suite_runner(&functional_testsuite); 1023 } 1024 1025 #endif /* !RTE_EXEC_ENV_WINDOWS */ 1026 1027 REGISTER_TEST_COMMAND(event_crypto_adapter_autotest, 1028 test_event_crypto_adapter); 1029