1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2022 Marvell. 3 */ 4 5 6 #include <stdio.h> 7 #include <inttypes.h> 8 9 #include <rte_ethdev.h> 10 #include <rte_malloc.h> 11 #include <rte_security.h> 12 13 #include "test.h" 14 #include "test_security_inline_proto_vectors.h" 15 16 #ifdef RTE_EXEC_ENV_WINDOWS 17 static int 18 test_inline_ipsec(void) 19 { 20 printf("Inline ipsec not supported on Windows, skipping test\n"); 21 return TEST_SKIPPED; 22 } 23 24 static int 25 test_event_inline_ipsec(void) 26 { 27 printf("Event inline ipsec not supported on Windows, skipping test\n"); 28 return TEST_SKIPPED; 29 } 30 31 #else 32 33 #include <rte_eventdev.h> 34 #include <rte_event_eth_rx_adapter.h> 35 #include <rte_event_eth_tx_adapter.h> 36 37 #define NB_ETHPORTS_USED 1 38 #define MEMPOOL_CACHE_SIZE 32 39 #define MAX_PKT_BURST 32 40 #define RTE_TEST_RX_DESC_DEFAULT 1024 41 #define RTE_TEST_TX_DESC_DEFAULT 1024 42 #define RTE_PORT_ALL (~(uint16_t)0x0) 43 44 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */ 45 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */ 46 #define RX_WTHRESH 0 /**< Default values of RX write-back threshold reg. */ 47 48 #define TX_PTHRESH 32 /**< Default values of TX prefetch threshold reg. */ 49 #define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */ 50 #define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */ 51 52 #define MAX_TRAFFIC_BURST 2048 53 #define NB_MBUF 10240 54 55 #define ENCAP_DECAP_BURST_SZ 33 56 #define APP_REASS_TIMEOUT 10 57 58 extern struct ipsec_test_data pkt_aes_128_gcm; 59 extern struct ipsec_test_data pkt_aes_192_gcm; 60 extern struct ipsec_test_data pkt_aes_256_gcm; 61 extern struct ipsec_test_data pkt_aes_128_gcm_frag; 62 extern struct ipsec_test_data pkt_aes_128_cbc_null; 63 extern struct ipsec_test_data pkt_null_aes_xcbc; 64 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha384; 65 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha512; 66 67 static struct rte_mempool *mbufpool; 68 static struct rte_mempool *sess_pool; 69 /* ethernet addresses of ports */ 70 static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS]; 71 72 static struct rte_eth_conf port_conf = { 73 .rxmode = { 74 .mq_mode = RTE_ETH_MQ_RX_NONE, 75 .split_hdr_size = 0, 76 .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM | 77 RTE_ETH_RX_OFFLOAD_SECURITY, 78 }, 79 .txmode = { 80 .mq_mode = RTE_ETH_MQ_TX_NONE, 81 .offloads = RTE_ETH_TX_OFFLOAD_SECURITY | 82 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, 83 }, 84 .lpbk_mode = 1, /* enable loopback */ 85 }; 86 87 static struct rte_eth_rxconf rx_conf = { 88 .rx_thresh = { 89 .pthresh = RX_PTHRESH, 90 .hthresh = RX_HTHRESH, 91 .wthresh = RX_WTHRESH, 92 }, 93 .rx_free_thresh = 32, 94 }; 95 96 static struct rte_eth_txconf tx_conf = { 97 .tx_thresh = { 98 .pthresh = TX_PTHRESH, 99 .hthresh = TX_HTHRESH, 100 .wthresh = TX_WTHRESH, 101 }, 102 .tx_free_thresh = 32, /* Use PMD default values */ 103 .tx_rs_thresh = 32, /* Use PMD default values */ 104 }; 105 106 static uint16_t port_id; 107 static uint8_t eventdev_id; 108 static uint8_t rx_adapter_id; 109 static uint8_t tx_adapter_id; 110 111 static bool event_mode_enabled; 112 113 static uint64_t link_mbps; 114 115 static int ip_reassembly_dynfield_offset = -1; 116 117 static struct rte_flow *default_flow[RTE_MAX_ETHPORTS]; 118 119 /* Create Inline IPsec session */ 120 static int 121 create_inline_ipsec_session(struct ipsec_test_data *sa, uint16_t portid, 122 void **sess, struct rte_security_ctx **ctx, 123 uint32_t *ol_flags, const struct ipsec_test_flags *flags, 124 struct rte_security_session_conf *sess_conf) 125 { 126 uint16_t src_v6[8] = {0x2607, 0xf8b0, 0x400c, 0x0c03, 0x0000, 0x0000, 127 0x0000, 0x001a}; 128 uint16_t dst_v6[8] = {0x2001, 0x0470, 0xe5bf, 0xdead, 0x4957, 0x2174, 129 0xe82c, 0x4887}; 130 uint32_t src_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 2)); 131 uint32_t dst_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 1)); 132 struct rte_security_capability_idx sec_cap_idx; 133 const struct rte_security_capability *sec_cap; 134 enum rte_security_ipsec_sa_direction dir; 135 struct rte_security_ctx *sec_ctx; 136 uint32_t verify; 137 138 sess_conf->action_type = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL; 139 sess_conf->protocol = RTE_SECURITY_PROTOCOL_IPSEC; 140 sess_conf->ipsec = sa->ipsec_xform; 141 142 dir = sa->ipsec_xform.direction; 143 verify = flags->tunnel_hdr_verify; 144 145 if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && verify) { 146 if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR) 147 src_v4 += 1; 148 else if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR) 149 dst_v4 += 1; 150 } 151 152 if (sa->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) { 153 if (sa->ipsec_xform.tunnel.type == 154 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 155 memcpy(&sess_conf->ipsec.tunnel.ipv4.src_ip, &src_v4, 156 sizeof(src_v4)); 157 memcpy(&sess_conf->ipsec.tunnel.ipv4.dst_ip, &dst_v4, 158 sizeof(dst_v4)); 159 160 if (flags->df == TEST_IPSEC_SET_DF_0_INNER_1) 161 sess_conf->ipsec.tunnel.ipv4.df = 0; 162 163 if (flags->df == TEST_IPSEC_SET_DF_1_INNER_0) 164 sess_conf->ipsec.tunnel.ipv4.df = 1; 165 166 if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1) 167 sess_conf->ipsec.tunnel.ipv4.dscp = 0; 168 169 if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) 170 sess_conf->ipsec.tunnel.ipv4.dscp = 171 TEST_IPSEC_DSCP_VAL; 172 } else { 173 if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1) 174 sess_conf->ipsec.tunnel.ipv6.dscp = 0; 175 176 if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) 177 sess_conf->ipsec.tunnel.ipv6.dscp = 178 TEST_IPSEC_DSCP_VAL; 179 180 if (flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1) 181 sess_conf->ipsec.tunnel.ipv6.flabel = 0; 182 183 if (flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) 184 sess_conf->ipsec.tunnel.ipv6.flabel = 185 TEST_IPSEC_FLABEL_VAL; 186 187 memcpy(&sess_conf->ipsec.tunnel.ipv6.src_addr, &src_v6, 188 sizeof(src_v6)); 189 memcpy(&sess_conf->ipsec.tunnel.ipv6.dst_addr, &dst_v6, 190 sizeof(dst_v6)); 191 } 192 } 193 194 /* Save SA as userdata for the security session. When 195 * the packet is received, this userdata will be 196 * retrieved using the metadata from the packet. 197 * 198 * The PMD is expected to set similar metadata for other 199 * operations, like rte_eth_event, which are tied to 200 * security session. In such cases, the userdata could 201 * be obtained to uniquely identify the security 202 * parameters denoted. 203 */ 204 205 sess_conf->userdata = (void *) sa; 206 207 sec_ctx = (struct rte_security_ctx *)rte_eth_dev_get_sec_ctx(portid); 208 if (sec_ctx == NULL) { 209 printf("Ethernet device doesn't support security features.\n"); 210 return TEST_SKIPPED; 211 } 212 213 sec_cap_idx.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL; 214 sec_cap_idx.protocol = RTE_SECURITY_PROTOCOL_IPSEC; 215 sec_cap_idx.ipsec.proto = sess_conf->ipsec.proto; 216 sec_cap_idx.ipsec.mode = sess_conf->ipsec.mode; 217 sec_cap_idx.ipsec.direction = sess_conf->ipsec.direction; 218 sec_cap = rte_security_capability_get(sec_ctx, &sec_cap_idx); 219 if (sec_cap == NULL) { 220 printf("No capabilities registered\n"); 221 return TEST_SKIPPED; 222 } 223 224 if (sa->aead || sa->aes_gmac) 225 memcpy(&sess_conf->ipsec.salt, sa->salt.data, 226 RTE_MIN(sizeof(sess_conf->ipsec.salt), sa->salt.len)); 227 228 /* Copy cipher session parameters */ 229 if (sa->aead) { 230 rte_memcpy(sess_conf->crypto_xform, &sa->xform.aead, 231 sizeof(struct rte_crypto_sym_xform)); 232 sess_conf->crypto_xform->aead.key.data = sa->key.data; 233 /* Verify crypto capabilities */ 234 if (test_ipsec_crypto_caps_aead_verify(sec_cap, 235 sess_conf->crypto_xform) != 0) { 236 RTE_LOG(INFO, USER1, 237 "Crypto capabilities not supported\n"); 238 return TEST_SKIPPED; 239 } 240 } else { 241 if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 242 rte_memcpy(&sess_conf->crypto_xform->cipher, 243 &sa->xform.chain.cipher.cipher, 244 sizeof(struct rte_crypto_cipher_xform)); 245 246 rte_memcpy(&sess_conf->crypto_xform->next->auth, 247 &sa->xform.chain.auth.auth, 248 sizeof(struct rte_crypto_auth_xform)); 249 sess_conf->crypto_xform->cipher.key.data = 250 sa->key.data; 251 sess_conf->crypto_xform->next->auth.key.data = 252 sa->auth_key.data; 253 /* Verify crypto capabilities */ 254 if (test_ipsec_crypto_caps_cipher_verify(sec_cap, 255 sess_conf->crypto_xform) != 0) { 256 RTE_LOG(INFO, USER1, 257 "Cipher crypto capabilities not supported\n"); 258 return TEST_SKIPPED; 259 } 260 261 if (test_ipsec_crypto_caps_auth_verify(sec_cap, 262 sess_conf->crypto_xform->next) != 0) { 263 RTE_LOG(INFO, USER1, 264 "Auth crypto capabilities not supported\n"); 265 return TEST_SKIPPED; 266 } 267 } else { 268 rte_memcpy(&sess_conf->crypto_xform->next->cipher, 269 &sa->xform.chain.cipher.cipher, 270 sizeof(struct rte_crypto_cipher_xform)); 271 rte_memcpy(&sess_conf->crypto_xform->auth, 272 &sa->xform.chain.auth.auth, 273 sizeof(struct rte_crypto_auth_xform)); 274 sess_conf->crypto_xform->auth.key.data = 275 sa->auth_key.data; 276 sess_conf->crypto_xform->next->cipher.key.data = 277 sa->key.data; 278 279 /* Verify crypto capabilities */ 280 if (test_ipsec_crypto_caps_cipher_verify(sec_cap, 281 sess_conf->crypto_xform->next) != 0) { 282 RTE_LOG(INFO, USER1, 283 "Cipher crypto capabilities not supported\n"); 284 return TEST_SKIPPED; 285 } 286 287 if (test_ipsec_crypto_caps_auth_verify(sec_cap, 288 sess_conf->crypto_xform) != 0) { 289 RTE_LOG(INFO, USER1, 290 "Auth crypto capabilities not supported\n"); 291 return TEST_SKIPPED; 292 } 293 } 294 } 295 296 if (test_ipsec_sec_caps_verify(&sess_conf->ipsec, sec_cap, false) != 0) 297 return TEST_SKIPPED; 298 299 if ((sa->ipsec_xform.direction == 300 RTE_SECURITY_IPSEC_SA_DIR_EGRESS) && 301 (sa->ipsec_xform.options.iv_gen_disable == 1)) { 302 /* Set env variable when IV generation is disabled */ 303 char arr[128]; 304 int len = 0, j = 0; 305 int iv_len = (sa->aead || sa->aes_gmac) ? 8 : 16; 306 307 for (; j < iv_len; j++) 308 len += snprintf(arr+len, sizeof(arr) - len, 309 "0x%x, ", sa->iv.data[j]); 310 setenv("ETH_SEC_IV_OVR", arr, 1); 311 } 312 313 *sess = rte_security_session_create(sec_ctx, sess_conf, sess_pool); 314 if (*sess == NULL) { 315 printf("SEC Session init failed.\n"); 316 return TEST_FAILED; 317 } 318 319 *ol_flags = sec_cap->ol_flags; 320 *ctx = sec_ctx; 321 322 return 0; 323 } 324 325 /* Check the link status of all ports in up to 3s, and print them finally */ 326 static void 327 check_all_ports_link_status(uint16_t port_num, uint32_t port_mask) 328 { 329 #define CHECK_INTERVAL 100 /* 100ms */ 330 #define MAX_CHECK_TIME 30 /* 3s (30 * 100ms) in total */ 331 uint16_t portid; 332 uint8_t count, all_ports_up, print_flag = 0; 333 struct rte_eth_link link; 334 int ret; 335 char link_status[RTE_ETH_LINK_MAX_STR_LEN]; 336 337 printf("Checking link statuses...\n"); 338 fflush(stdout); 339 for (count = 0; count <= MAX_CHECK_TIME; count++) { 340 all_ports_up = 1; 341 for (portid = 0; portid < port_num; portid++) { 342 if ((port_mask & (1 << portid)) == 0) 343 continue; 344 memset(&link, 0, sizeof(link)); 345 ret = rte_eth_link_get_nowait(portid, &link); 346 if (ret < 0) { 347 all_ports_up = 0; 348 if (print_flag == 1) 349 printf("Port %u link get failed: %s\n", 350 portid, rte_strerror(-ret)); 351 continue; 352 } 353 354 /* print link status if flag set */ 355 if (print_flag == 1) { 356 if (link.link_status && link_mbps == 0) 357 link_mbps = link.link_speed; 358 359 rte_eth_link_to_str(link_status, 360 sizeof(link_status), &link); 361 printf("Port %d %s\n", portid, link_status); 362 continue; 363 } 364 /* clear all_ports_up flag if any link down */ 365 if (link.link_status == RTE_ETH_LINK_DOWN) { 366 all_ports_up = 0; 367 break; 368 } 369 } 370 /* after finally printing all link status, get out */ 371 if (print_flag == 1) 372 break; 373 374 if (all_ports_up == 0) { 375 fflush(stdout); 376 rte_delay_ms(CHECK_INTERVAL); 377 } 378 379 /* set the print_flag if all ports up or timeout */ 380 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) 381 print_flag = 1; 382 } 383 } 384 385 static void 386 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr) 387 { 388 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 389 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 390 printf("%s%s", name, buf); 391 } 392 393 static void 394 copy_buf_to_pkt_segs(const uint8_t *buf, unsigned int len, 395 struct rte_mbuf *pkt, unsigned int offset) 396 { 397 unsigned int copied = 0; 398 unsigned int copy_len; 399 struct rte_mbuf *seg; 400 void *seg_buf; 401 402 seg = pkt; 403 while (offset >= seg->data_len) { 404 offset -= seg->data_len; 405 seg = seg->next; 406 } 407 copy_len = seg->data_len - offset; 408 seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset); 409 while (len > copy_len) { 410 rte_memcpy(seg_buf, buf + copied, (size_t) copy_len); 411 len -= copy_len; 412 copied += copy_len; 413 seg = seg->next; 414 seg_buf = rte_pktmbuf_mtod(seg, void *); 415 } 416 rte_memcpy(seg_buf, buf + copied, (size_t) len); 417 } 418 419 static bool 420 is_outer_ipv4(struct ipsec_test_data *td) 421 { 422 bool outer_ipv4; 423 424 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS || 425 td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) 426 outer_ipv4 = (((td->input_text.data[0] & 0xF0) >> 4) == IPVERSION); 427 else 428 outer_ipv4 = (td->ipsec_xform.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4); 429 return outer_ipv4; 430 } 431 432 static inline struct rte_mbuf * 433 init_packet(struct rte_mempool *mp, const uint8_t *data, unsigned int len, bool outer_ipv4) 434 { 435 struct rte_mbuf *pkt; 436 437 pkt = rte_pktmbuf_alloc(mp); 438 if (pkt == NULL) 439 return NULL; 440 441 if (outer_ipv4) { 442 rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN), 443 &dummy_ipv4_eth_hdr, RTE_ETHER_HDR_LEN); 444 pkt->l3_len = sizeof(struct rte_ipv4_hdr); 445 } else { 446 rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN), 447 &dummy_ipv6_eth_hdr, RTE_ETHER_HDR_LEN); 448 pkt->l3_len = sizeof(struct rte_ipv6_hdr); 449 } 450 pkt->l2_len = RTE_ETHER_HDR_LEN; 451 452 if (pkt->buf_len > (len + RTE_ETHER_HDR_LEN)) 453 rte_memcpy(rte_pktmbuf_append(pkt, len), data, len); 454 else 455 copy_buf_to_pkt_segs(data, len, pkt, RTE_ETHER_HDR_LEN); 456 return pkt; 457 } 458 459 static int 460 init_mempools(unsigned int nb_mbuf) 461 { 462 struct rte_security_ctx *sec_ctx; 463 uint16_t nb_sess = 512; 464 uint32_t sess_sz; 465 char s[64]; 466 467 if (mbufpool == NULL) { 468 snprintf(s, sizeof(s), "mbuf_pool"); 469 mbufpool = rte_pktmbuf_pool_create(s, nb_mbuf, 470 MEMPOOL_CACHE_SIZE, 0, 471 RTE_MBUF_DEFAULT_BUF_SIZE, SOCKET_ID_ANY); 472 if (mbufpool == NULL) { 473 printf("Cannot init mbuf pool\n"); 474 return TEST_FAILED; 475 } 476 printf("Allocated mbuf pool\n"); 477 } 478 479 sec_ctx = rte_eth_dev_get_sec_ctx(port_id); 480 if (sec_ctx == NULL) { 481 printf("Device does not support Security ctx\n"); 482 return TEST_SKIPPED; 483 } 484 sess_sz = rte_security_session_get_size(sec_ctx); 485 if (sess_pool == NULL) { 486 snprintf(s, sizeof(s), "sess_pool"); 487 sess_pool = rte_mempool_create(s, nb_sess, sess_sz, 488 MEMPOOL_CACHE_SIZE, 0, 489 NULL, NULL, NULL, NULL, 490 SOCKET_ID_ANY, 0); 491 if (sess_pool == NULL) { 492 printf("Cannot init sess pool\n"); 493 return TEST_FAILED; 494 } 495 printf("Allocated sess pool\n"); 496 } 497 498 return 0; 499 } 500 501 static int 502 create_default_flow(uint16_t portid) 503 { 504 struct rte_flow_action action[2]; 505 struct rte_flow_item pattern[2]; 506 struct rte_flow_attr attr = {0}; 507 struct rte_flow_error err; 508 struct rte_flow *flow; 509 int ret; 510 511 /* Add the default rte_flow to enable SECURITY for all ESP packets */ 512 513 pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP; 514 pattern[0].spec = NULL; 515 pattern[0].mask = NULL; 516 pattern[0].last = NULL; 517 pattern[1].type = RTE_FLOW_ITEM_TYPE_END; 518 519 action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY; 520 action[0].conf = NULL; 521 action[1].type = RTE_FLOW_ACTION_TYPE_END; 522 action[1].conf = NULL; 523 524 attr.ingress = 1; 525 526 ret = rte_flow_validate(portid, &attr, pattern, action, &err); 527 if (ret) { 528 printf("\nValidate flow failed, ret = %d\n", ret); 529 return -1; 530 } 531 flow = rte_flow_create(portid, &attr, pattern, action, &err); 532 if (flow == NULL) { 533 printf("\nDefault flow rule create failed\n"); 534 return -1; 535 } 536 537 default_flow[portid] = flow; 538 539 return 0; 540 } 541 542 static void 543 destroy_default_flow(uint16_t portid) 544 { 545 struct rte_flow_error err; 546 int ret; 547 548 if (!default_flow[portid]) 549 return; 550 ret = rte_flow_destroy(portid, default_flow[portid], &err); 551 if (ret) { 552 printf("\nDefault flow rule destroy failed\n"); 553 return; 554 } 555 default_flow[portid] = NULL; 556 } 557 558 struct rte_mbuf **tx_pkts_burst; 559 struct rte_mbuf **rx_pkts_burst; 560 561 static int 562 compare_pkt_data(struct rte_mbuf *m, uint8_t *ref, unsigned int tot_len) 563 { 564 unsigned int len; 565 unsigned int nb_segs = m->nb_segs; 566 unsigned int matched = 0; 567 struct rte_mbuf *save = m; 568 569 while (m) { 570 len = tot_len; 571 if (len > m->data_len) 572 len = m->data_len; 573 if (len != 0) { 574 if (memcmp(rte_pktmbuf_mtod(m, char *), 575 ref + matched, len)) { 576 printf("\n====Reassembly case failed: Data Mismatch"); 577 rte_hexdump(stdout, "Reassembled", 578 rte_pktmbuf_mtod(m, char *), 579 len); 580 rte_hexdump(stdout, "reference", 581 ref + matched, 582 len); 583 return TEST_FAILED; 584 } 585 } 586 tot_len -= len; 587 matched += len; 588 m = m->next; 589 } 590 591 if (tot_len) { 592 printf("\n====Reassembly case failed: Data Missing %u", 593 tot_len); 594 printf("\n====nb_segs %u, tot_len %u", nb_segs, tot_len); 595 rte_pktmbuf_dump(stderr, save, -1); 596 return TEST_FAILED; 597 } 598 return TEST_SUCCESS; 599 } 600 601 static inline bool 602 is_ip_reassembly_incomplete(struct rte_mbuf *mbuf) 603 { 604 static uint64_t ip_reassembly_dynflag; 605 int ip_reassembly_dynflag_offset; 606 607 if (ip_reassembly_dynflag == 0) { 608 ip_reassembly_dynflag_offset = rte_mbuf_dynflag_lookup( 609 RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME, NULL); 610 if (ip_reassembly_dynflag_offset < 0) 611 return false; 612 ip_reassembly_dynflag = RTE_BIT64(ip_reassembly_dynflag_offset); 613 } 614 615 return (mbuf->ol_flags & ip_reassembly_dynflag) != 0; 616 } 617 618 static void 619 free_mbuf(struct rte_mbuf *mbuf) 620 { 621 rte_eth_ip_reassembly_dynfield_t dynfield; 622 623 if (!mbuf) 624 return; 625 626 if (!is_ip_reassembly_incomplete(mbuf)) { 627 rte_pktmbuf_free(mbuf); 628 } else { 629 if (ip_reassembly_dynfield_offset < 0) 630 return; 631 632 while (mbuf) { 633 dynfield = *RTE_MBUF_DYNFIELD(mbuf, 634 ip_reassembly_dynfield_offset, 635 rte_eth_ip_reassembly_dynfield_t *); 636 rte_pktmbuf_free(mbuf); 637 mbuf = dynfield.next_frag; 638 } 639 } 640 } 641 642 643 static int 644 get_and_verify_incomplete_frags(struct rte_mbuf *mbuf, 645 struct reassembly_vector *vector) 646 { 647 rte_eth_ip_reassembly_dynfield_t *dynfield[MAX_PKT_BURST]; 648 int j = 0, ret; 649 /** 650 * IP reassembly offload is incomplete, and fragments are listed in 651 * dynfield which can be reassembled in SW. 652 */ 653 printf("\nHW IP Reassembly is not complete; attempt SW IP Reassembly," 654 "\nMatching with original frags."); 655 656 if (ip_reassembly_dynfield_offset < 0) 657 return -1; 658 659 printf("\ncomparing frag: %d", j); 660 /* Skip Ethernet header comparison */ 661 rte_pktmbuf_adj(mbuf, RTE_ETHER_HDR_LEN); 662 ret = compare_pkt_data(mbuf, vector->frags[j]->data, 663 vector->frags[j]->len); 664 if (ret) 665 return ret; 666 j++; 667 dynfield[j] = RTE_MBUF_DYNFIELD(mbuf, ip_reassembly_dynfield_offset, 668 rte_eth_ip_reassembly_dynfield_t *); 669 printf("\ncomparing frag: %d", j); 670 /* Skip Ethernet header comparison */ 671 rte_pktmbuf_adj(dynfield[j]->next_frag, RTE_ETHER_HDR_LEN); 672 ret = compare_pkt_data(dynfield[j]->next_frag, vector->frags[j]->data, 673 vector->frags[j]->len); 674 if (ret) 675 return ret; 676 677 while ((dynfield[j]->nb_frags > 1) && 678 is_ip_reassembly_incomplete(dynfield[j]->next_frag)) { 679 j++; 680 dynfield[j] = RTE_MBUF_DYNFIELD(dynfield[j-1]->next_frag, 681 ip_reassembly_dynfield_offset, 682 rte_eth_ip_reassembly_dynfield_t *); 683 printf("\ncomparing frag: %d", j); 684 /* Skip Ethernet header comparison */ 685 rte_pktmbuf_adj(dynfield[j]->next_frag, RTE_ETHER_HDR_LEN); 686 ret = compare_pkt_data(dynfield[j]->next_frag, 687 vector->frags[j]->data, vector->frags[j]->len); 688 if (ret) 689 return ret; 690 } 691 return ret; 692 } 693 694 static int 695 test_ipsec_with_reassembly(struct reassembly_vector *vector, 696 const struct ipsec_test_flags *flags) 697 { 698 void *out_ses[ENCAP_DECAP_BURST_SZ] = {0}; 699 void *in_ses[ENCAP_DECAP_BURST_SZ] = {0}; 700 struct rte_eth_ip_reassembly_params reass_capa = {0}; 701 struct rte_security_session_conf sess_conf_out = {0}; 702 struct rte_security_session_conf sess_conf_in = {0}; 703 unsigned int nb_tx, burst_sz, nb_sent = 0; 704 struct rte_crypto_sym_xform cipher_out = {0}; 705 struct rte_crypto_sym_xform auth_out = {0}; 706 struct rte_crypto_sym_xform aead_out = {0}; 707 struct rte_crypto_sym_xform cipher_in = {0}; 708 struct rte_crypto_sym_xform auth_in = {0}; 709 struct rte_crypto_sym_xform aead_in = {0}; 710 struct ipsec_test_data sa_data; 711 struct rte_security_ctx *ctx; 712 unsigned int i, nb_rx = 0, j; 713 uint32_t ol_flags; 714 bool outer_ipv4; 715 int ret = 0; 716 717 burst_sz = vector->burst ? ENCAP_DECAP_BURST_SZ : 1; 718 nb_tx = vector->nb_frags * burst_sz; 719 720 rte_eth_dev_stop(port_id); 721 if (ret != 0) { 722 printf("rte_eth_dev_stop: err=%s, port=%u\n", 723 rte_strerror(-ret), port_id); 724 return ret; 725 } 726 rte_eth_ip_reassembly_capability_get(port_id, &reass_capa); 727 if (reass_capa.max_frags < vector->nb_frags) 728 return TEST_SKIPPED; 729 if (reass_capa.timeout_ms > APP_REASS_TIMEOUT) { 730 reass_capa.timeout_ms = APP_REASS_TIMEOUT; 731 rte_eth_ip_reassembly_conf_set(port_id, &reass_capa); 732 } 733 734 ret = rte_eth_dev_start(port_id); 735 if (ret < 0) { 736 printf("rte_eth_dev_start: err=%d, port=%d\n", 737 ret, port_id); 738 return ret; 739 } 740 741 memset(tx_pkts_burst, 0, sizeof(tx_pkts_burst[0]) * nb_tx); 742 memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * nb_tx); 743 744 memcpy(&sa_data, vector->sa_data, sizeof(struct ipsec_test_data)); 745 sa_data.ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS; 746 outer_ipv4 = is_outer_ipv4(&sa_data); 747 748 for (i = 0; i < nb_tx; i += vector->nb_frags) { 749 for (j = 0; j < vector->nb_frags; j++) { 750 tx_pkts_burst[i+j] = init_packet(mbufpool, 751 vector->frags[j]->data, 752 vector->frags[j]->len, outer_ipv4); 753 if (tx_pkts_burst[i+j] == NULL) { 754 ret = -1; 755 printf("\n packed init failed\n"); 756 goto out; 757 } 758 } 759 } 760 761 for (i = 0; i < burst_sz; i++) { 762 memcpy(&sa_data, vector->sa_data, 763 sizeof(struct ipsec_test_data)); 764 /* Update SPI for every new SA */ 765 sa_data.ipsec_xform.spi += i; 766 sa_data.ipsec_xform.direction = 767 RTE_SECURITY_IPSEC_SA_DIR_EGRESS; 768 if (sa_data.aead) { 769 sess_conf_out.crypto_xform = &aead_out; 770 } else { 771 sess_conf_out.crypto_xform = &cipher_out; 772 sess_conf_out.crypto_xform->next = &auth_out; 773 } 774 775 /* Create Inline IPsec outbound session. */ 776 ret = create_inline_ipsec_session(&sa_data, port_id, 777 &out_ses[i], &ctx, &ol_flags, flags, 778 &sess_conf_out); 779 if (ret) { 780 printf("\nInline outbound session create failed\n"); 781 goto out; 782 } 783 } 784 785 j = 0; 786 for (i = 0; i < nb_tx; i++) { 787 if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA) 788 rte_security_set_pkt_metadata(ctx, 789 out_ses[j], tx_pkts_burst[i], NULL); 790 tx_pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD; 791 792 /* Move to next SA after nb_frags */ 793 if ((i + 1) % vector->nb_frags == 0) 794 j++; 795 } 796 797 for (i = 0; i < burst_sz; i++) { 798 memcpy(&sa_data, vector->sa_data, 799 sizeof(struct ipsec_test_data)); 800 /* Update SPI for every new SA */ 801 sa_data.ipsec_xform.spi += i; 802 sa_data.ipsec_xform.direction = 803 RTE_SECURITY_IPSEC_SA_DIR_INGRESS; 804 805 if (sa_data.aead) { 806 sess_conf_in.crypto_xform = &aead_in; 807 } else { 808 sess_conf_in.crypto_xform = &auth_in; 809 sess_conf_in.crypto_xform->next = &cipher_in; 810 } 811 /* Create Inline IPsec inbound session. */ 812 ret = create_inline_ipsec_session(&sa_data, port_id, &in_ses[i], 813 &ctx, &ol_flags, flags, &sess_conf_in); 814 if (ret) { 815 printf("\nInline inbound session create failed\n"); 816 goto out; 817 } 818 } 819 820 /* Retrieve reassembly dynfield offset if available */ 821 if (ip_reassembly_dynfield_offset < 0 && vector->nb_frags > 1) 822 ip_reassembly_dynfield_offset = rte_mbuf_dynfield_lookup( 823 RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME, NULL); 824 825 826 ret = create_default_flow(port_id); 827 if (ret) 828 goto out; 829 830 nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_tx); 831 if (nb_sent != nb_tx) { 832 ret = -1; 833 printf("\nFailed to tx %u pkts", nb_tx); 834 goto out; 835 } 836 837 rte_delay_ms(1); 838 839 /* Retry few times before giving up */ 840 nb_rx = 0; 841 j = 0; 842 do { 843 nb_rx += rte_eth_rx_burst(port_id, 0, &rx_pkts_burst[nb_rx], 844 nb_tx - nb_rx); 845 j++; 846 if (nb_rx >= nb_tx) 847 break; 848 rte_delay_ms(1); 849 } while (j < 5 || !nb_rx); 850 851 /* Check for minimum number of Rx packets expected */ 852 if ((vector->nb_frags == 1 && nb_rx != nb_tx) || 853 (vector->nb_frags > 1 && nb_rx < burst_sz)) { 854 printf("\nreceived less Rx pkts(%u) pkts\n", nb_rx); 855 ret = TEST_FAILED; 856 goto out; 857 } 858 859 for (i = 0; i < nb_rx; i++) { 860 if (vector->nb_frags > 1 && 861 is_ip_reassembly_incomplete(rx_pkts_burst[i])) { 862 ret = get_and_verify_incomplete_frags(rx_pkts_burst[i], 863 vector); 864 if (ret != TEST_SUCCESS) 865 break; 866 continue; 867 } 868 869 if (rx_pkts_burst[i]->ol_flags & 870 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED || 871 !(rx_pkts_burst[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD)) { 872 printf("\nsecurity offload failed\n"); 873 ret = TEST_FAILED; 874 break; 875 } 876 877 if (vector->full_pkt->len + RTE_ETHER_HDR_LEN != 878 rx_pkts_burst[i]->pkt_len) { 879 printf("\nreassembled/decrypted packet length mismatch\n"); 880 ret = TEST_FAILED; 881 break; 882 } 883 rte_pktmbuf_adj(rx_pkts_burst[i], RTE_ETHER_HDR_LEN); 884 ret = compare_pkt_data(rx_pkts_burst[i], 885 vector->full_pkt->data, 886 vector->full_pkt->len); 887 if (ret != TEST_SUCCESS) 888 break; 889 } 890 891 out: 892 destroy_default_flow(port_id); 893 894 /* Clear session data. */ 895 for (i = 0; i < burst_sz; i++) { 896 if (out_ses[i]) 897 rte_security_session_destroy(ctx, out_ses[i]); 898 if (in_ses[i]) 899 rte_security_session_destroy(ctx, in_ses[i]); 900 } 901 902 for (i = nb_sent; i < nb_tx; i++) 903 free_mbuf(tx_pkts_burst[i]); 904 for (i = 0; i < nb_rx; i++) 905 free_mbuf(rx_pkts_burst[i]); 906 return ret; 907 } 908 909 static int 910 event_tx_burst(struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 911 { 912 struct rte_event ev; 913 int i, nb_sent = 0; 914 915 /* Convert packets to events */ 916 memset(&ev, 0, sizeof(ev)); 917 ev.sched_type = RTE_SCHED_TYPE_PARALLEL; 918 for (i = 0; i < nb_pkts; i++) { 919 ev.mbuf = tx_pkts[i]; 920 nb_sent += rte_event_eth_tx_adapter_enqueue( 921 eventdev_id, port_id, &ev, 1, 0); 922 } 923 924 return nb_sent; 925 } 926 927 static int 928 event_rx_burst(struct rte_mbuf **rx_pkts, uint16_t nb_pkts_to_rx) 929 { 930 int nb_ev, nb_rx = 0, j = 0; 931 const int ms_per_pkt = 3; 932 struct rte_event ev; 933 934 do { 935 nb_ev = rte_event_dequeue_burst(eventdev_id, port_id, 936 &ev, 1, 0); 937 938 if (nb_ev == 0) { 939 rte_delay_ms(1); 940 continue; 941 } 942 943 /* Get packet from event */ 944 if (ev.event_type != RTE_EVENT_TYPE_ETHDEV) { 945 printf("Unsupported event type: %i\n", 946 ev.event_type); 947 continue; 948 } 949 rx_pkts[nb_rx++] = ev.mbuf; 950 } while (j++ < (nb_pkts_to_rx * ms_per_pkt) && nb_rx < nb_pkts_to_rx); 951 952 return nb_rx; 953 } 954 955 static int 956 test_ipsec_inline_sa_exp_event_callback(uint16_t port_id, 957 enum rte_eth_event_type type, void *param, void *ret_param) 958 { 959 struct sa_expiry_vector *vector = (struct sa_expiry_vector *)param; 960 struct rte_eth_event_ipsec_desc *event_desc = NULL; 961 962 RTE_SET_USED(port_id); 963 964 if (type != RTE_ETH_EVENT_IPSEC) 965 return -1; 966 967 event_desc = ret_param; 968 if (event_desc == NULL) { 969 printf("Event descriptor not set\n"); 970 return -1; 971 } 972 vector->notify_event = true; 973 if (event_desc->metadata != (uint64_t)vector->sa_data) { 974 printf("Mismatch in event specific metadata\n"); 975 return -1; 976 } 977 switch (event_desc->subtype) { 978 case RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY: 979 vector->event = RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY; 980 break; 981 case RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY: 982 vector->event = RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY; 983 break; 984 case RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY: 985 vector->event = RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY; 986 break; 987 case RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY: 988 vector->event = RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY; 989 break; 990 default: 991 printf("Invalid IPsec event reported\n"); 992 return -1; 993 } 994 995 return 0; 996 } 997 998 static enum rte_eth_event_ipsec_subtype 999 test_ipsec_inline_setup_expiry_vector(struct sa_expiry_vector *vector, 1000 const struct ipsec_test_flags *flags, 1001 struct ipsec_test_data *tdata) 1002 { 1003 enum rte_eth_event_ipsec_subtype event = RTE_ETH_EVENT_IPSEC_UNKNOWN; 1004 1005 vector->event = RTE_ETH_EVENT_IPSEC_UNKNOWN; 1006 vector->notify_event = false; 1007 vector->sa_data = (void *)tdata; 1008 if (flags->sa_expiry_pkts_soft) 1009 event = RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY; 1010 else if (flags->sa_expiry_bytes_soft) 1011 event = RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY; 1012 else if (flags->sa_expiry_pkts_hard) 1013 event = RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY; 1014 else 1015 event = RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY; 1016 rte_eth_dev_callback_register(port_id, RTE_ETH_EVENT_IPSEC, 1017 test_ipsec_inline_sa_exp_event_callback, vector); 1018 1019 return event; 1020 } 1021 1022 static int 1023 test_ipsec_inline_proto_process(struct ipsec_test_data *td, 1024 struct ipsec_test_data *res_d, 1025 int nb_pkts, 1026 bool silent, 1027 const struct ipsec_test_flags *flags) 1028 { 1029 enum rte_eth_event_ipsec_subtype event = RTE_ETH_EVENT_IPSEC_UNKNOWN; 1030 struct rte_security_session_conf sess_conf = {0}; 1031 struct rte_crypto_sym_xform cipher = {0}; 1032 struct rte_crypto_sym_xform auth = {0}; 1033 struct rte_crypto_sym_xform aead = {0}; 1034 struct sa_expiry_vector vector = {0}; 1035 struct rte_security_ctx *ctx; 1036 int nb_rx = 0, nb_sent; 1037 uint32_t ol_flags; 1038 int i, j = 0, ret; 1039 bool outer_ipv4; 1040 void *ses; 1041 1042 memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * nb_pkts); 1043 1044 if (flags->sa_expiry_pkts_soft || flags->sa_expiry_bytes_soft || 1045 flags->sa_expiry_pkts_hard || flags->sa_expiry_bytes_hard) { 1046 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) 1047 return TEST_SUCCESS; 1048 event = test_ipsec_inline_setup_expiry_vector(&vector, flags, td); 1049 } 1050 1051 if (td->aead) { 1052 sess_conf.crypto_xform = &aead; 1053 } else { 1054 if (td->ipsec_xform.direction == 1055 RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 1056 sess_conf.crypto_xform = &cipher; 1057 sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER; 1058 sess_conf.crypto_xform->next = &auth; 1059 sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH; 1060 } else { 1061 sess_conf.crypto_xform = &auth; 1062 sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH; 1063 sess_conf.crypto_xform->next = &cipher; 1064 sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_CIPHER; 1065 } 1066 } 1067 1068 /* Create Inline IPsec session. */ 1069 ret = create_inline_ipsec_session(td, port_id, &ses, &ctx, 1070 &ol_flags, flags, &sess_conf); 1071 if (ret) 1072 return ret; 1073 1074 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 1075 ret = create_default_flow(port_id); 1076 if (ret) 1077 goto out; 1078 } 1079 outer_ipv4 = is_outer_ipv4(td); 1080 1081 for (i = 0; i < nb_pkts; i++) { 1082 tx_pkts_burst[i] = init_packet(mbufpool, td->input_text.data, 1083 td->input_text.len, outer_ipv4); 1084 if (tx_pkts_burst[i] == NULL) { 1085 while (i--) 1086 rte_pktmbuf_free(tx_pkts_burst[i]); 1087 ret = TEST_FAILED; 1088 goto out; 1089 } 1090 1091 if (test_ipsec_pkt_update(rte_pktmbuf_mtod_offset(tx_pkts_burst[i], 1092 uint8_t *, RTE_ETHER_HDR_LEN), flags)) { 1093 while (i--) 1094 rte_pktmbuf_free(tx_pkts_burst[i]); 1095 ret = TEST_FAILED; 1096 goto out; 1097 } 1098 1099 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 1100 if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA) 1101 rte_security_set_pkt_metadata(ctx, ses, 1102 tx_pkts_burst[i], NULL); 1103 tx_pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD; 1104 } 1105 } 1106 /* Send packet to ethdev for inline IPsec processing. */ 1107 if (event_mode_enabled) 1108 nb_sent = event_tx_burst(tx_pkts_burst, nb_pkts); 1109 else 1110 nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_pkts); 1111 1112 if (nb_sent != nb_pkts) { 1113 printf("\nUnable to TX %d packets, sent: %i", nb_pkts, nb_sent); 1114 for ( ; nb_sent < nb_pkts; nb_sent++) 1115 rte_pktmbuf_free(tx_pkts_burst[nb_sent]); 1116 ret = TEST_FAILED; 1117 goto out; 1118 } 1119 1120 rte_pause(); 1121 1122 /* Receive back packet on loopback interface. */ 1123 if (event_mode_enabled) 1124 nb_rx = event_rx_burst(rx_pkts_burst, nb_sent); 1125 else 1126 do { 1127 rte_delay_ms(1); 1128 nb_rx += rte_eth_rx_burst(port_id, 0, 1129 &rx_pkts_burst[nb_rx], 1130 nb_sent - nb_rx); 1131 if (nb_rx >= nb_sent) 1132 break; 1133 } while (j++ < 5 || nb_rx == 0); 1134 1135 if (!flags->sa_expiry_pkts_hard && 1136 !flags->sa_expiry_bytes_hard && 1137 (nb_rx != nb_sent)) { 1138 printf("\nUnable to RX all %d packets, received(%i)", 1139 nb_sent, nb_rx); 1140 while (--nb_rx >= 0) 1141 rte_pktmbuf_free(rx_pkts_burst[nb_rx]); 1142 ret = TEST_FAILED; 1143 goto out; 1144 } 1145 1146 for (i = 0; i < nb_rx; i++) { 1147 rte_pktmbuf_adj(rx_pkts_burst[i], RTE_ETHER_HDR_LEN); 1148 1149 ret = test_ipsec_post_process(rx_pkts_burst[i], td, 1150 res_d, silent, flags); 1151 if (ret != TEST_SUCCESS) { 1152 for ( ; i < nb_rx; i++) 1153 rte_pktmbuf_free(rx_pkts_burst[i]); 1154 goto out; 1155 } 1156 1157 ret = test_ipsec_stats_verify(ctx, ses, flags, 1158 td->ipsec_xform.direction); 1159 if (ret != TEST_SUCCESS) { 1160 for ( ; i < nb_rx; i++) 1161 rte_pktmbuf_free(rx_pkts_burst[i]); 1162 goto out; 1163 } 1164 1165 rte_pktmbuf_free(rx_pkts_burst[i]); 1166 rx_pkts_burst[i] = NULL; 1167 } 1168 1169 out: 1170 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) 1171 destroy_default_flow(port_id); 1172 if (flags->sa_expiry_pkts_soft || flags->sa_expiry_bytes_soft || 1173 flags->sa_expiry_pkts_hard || flags->sa_expiry_bytes_hard) { 1174 if (vector.notify_event && (vector.event == event)) 1175 ret = TEST_SUCCESS; 1176 else 1177 ret = TEST_FAILED; 1178 1179 rte_eth_dev_callback_unregister(port_id, RTE_ETH_EVENT_IPSEC, 1180 test_ipsec_inline_sa_exp_event_callback, &vector); 1181 } 1182 1183 /* Destroy session so that other cases can create the session again */ 1184 rte_security_session_destroy(ctx, ses); 1185 ses = NULL; 1186 1187 return ret; 1188 } 1189 1190 static int 1191 test_ipsec_inline_proto_all(const struct ipsec_test_flags *flags) 1192 { 1193 struct ipsec_test_data td_outb; 1194 struct ipsec_test_data td_inb; 1195 unsigned int i, nb_pkts = 1, pass_cnt = 0, fail_cnt = 0; 1196 int ret; 1197 1198 if (flags->iv_gen || flags->sa_expiry_pkts_soft || 1199 flags->sa_expiry_bytes_soft || 1200 flags->sa_expiry_bytes_hard || 1201 flags->sa_expiry_pkts_hard) 1202 nb_pkts = IPSEC_TEST_PACKETS_MAX; 1203 1204 for (i = 0; i < RTE_DIM(alg_list); i++) { 1205 test_ipsec_td_prepare(alg_list[i].param1, 1206 alg_list[i].param2, 1207 flags, &td_outb, 1); 1208 1209 if (!td_outb.aead) { 1210 enum rte_crypto_cipher_algorithm cipher_alg; 1211 enum rte_crypto_auth_algorithm auth_alg; 1212 1213 cipher_alg = td_outb.xform.chain.cipher.cipher.algo; 1214 auth_alg = td_outb.xform.chain.auth.auth.algo; 1215 1216 if (td_outb.aes_gmac && cipher_alg != RTE_CRYPTO_CIPHER_NULL) 1217 continue; 1218 1219 /* ICV is not applicable for NULL auth */ 1220 if (flags->icv_corrupt && 1221 auth_alg == RTE_CRYPTO_AUTH_NULL) 1222 continue; 1223 1224 /* IV is not applicable for NULL cipher */ 1225 if (flags->iv_gen && 1226 cipher_alg == RTE_CRYPTO_CIPHER_NULL) 1227 continue; 1228 } 1229 1230 if (flags->udp_encap) 1231 td_outb.ipsec_xform.options.udp_encap = 1; 1232 1233 if (flags->sa_expiry_bytes_soft) 1234 td_outb.ipsec_xform.life.bytes_soft_limit = 1235 (((td_outb.output_text.len + RTE_ETHER_HDR_LEN) 1236 * nb_pkts) >> 3) - 1; 1237 if (flags->sa_expiry_pkts_hard) 1238 td_outb.ipsec_xform.life.packets_hard_limit = 1239 IPSEC_TEST_PACKETS_MAX - 1; 1240 if (flags->sa_expiry_bytes_hard) 1241 td_outb.ipsec_xform.life.bytes_hard_limit = 1242 (((td_outb.output_text.len + RTE_ETHER_HDR_LEN) 1243 * nb_pkts) >> 3) - 1; 1244 1245 ret = test_ipsec_inline_proto_process(&td_outb, &td_inb, nb_pkts, 1246 false, flags); 1247 if (ret == TEST_SKIPPED) 1248 continue; 1249 1250 if (ret == TEST_FAILED) { 1251 printf("\n TEST FAILED"); 1252 test_ipsec_display_alg(alg_list[i].param1, 1253 alg_list[i].param2); 1254 fail_cnt++; 1255 continue; 1256 } 1257 1258 test_ipsec_td_update(&td_inb, &td_outb, 1, flags); 1259 1260 ret = test_ipsec_inline_proto_process(&td_inb, NULL, nb_pkts, 1261 false, flags); 1262 if (ret == TEST_SKIPPED) 1263 continue; 1264 1265 if (ret == TEST_FAILED) { 1266 printf("\n TEST FAILED"); 1267 test_ipsec_display_alg(alg_list[i].param1, 1268 alg_list[i].param2); 1269 fail_cnt++; 1270 continue; 1271 } 1272 1273 if (flags->display_alg) 1274 test_ipsec_display_alg(alg_list[i].param1, 1275 alg_list[i].param2); 1276 1277 pass_cnt++; 1278 } 1279 1280 printf("Tests passed: %d, failed: %d", pass_cnt, fail_cnt); 1281 if (fail_cnt > 0) 1282 return TEST_FAILED; 1283 if (pass_cnt > 0) 1284 return TEST_SUCCESS; 1285 else 1286 return TEST_SKIPPED; 1287 } 1288 1289 static int 1290 test_ipsec_inline_proto_process_with_esn(struct ipsec_test_data td[], 1291 struct ipsec_test_data res_d[], 1292 int nb_pkts, 1293 bool silent, 1294 const struct ipsec_test_flags *flags) 1295 { 1296 struct rte_security_session_conf sess_conf = {0}; 1297 struct ipsec_test_data *res_d_tmp = NULL; 1298 struct rte_crypto_sym_xform cipher = {0}; 1299 struct rte_crypto_sym_xform auth = {0}; 1300 struct rte_crypto_sym_xform aead = {0}; 1301 struct rte_mbuf *rx_pkt = NULL; 1302 struct rte_mbuf *tx_pkt = NULL; 1303 int nb_rx, nb_sent; 1304 void *ses; 1305 struct rte_security_ctx *ctx; 1306 uint32_t ol_flags; 1307 bool outer_ipv4; 1308 int i, ret; 1309 1310 if (td[0].aead) { 1311 sess_conf.crypto_xform = &aead; 1312 } else { 1313 if (td[0].ipsec_xform.direction == 1314 RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 1315 sess_conf.crypto_xform = &cipher; 1316 sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER; 1317 sess_conf.crypto_xform->next = &auth; 1318 sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH; 1319 } else { 1320 sess_conf.crypto_xform = &auth; 1321 sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH; 1322 sess_conf.crypto_xform->next = &cipher; 1323 sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_CIPHER; 1324 } 1325 } 1326 1327 /* Create Inline IPsec session. */ 1328 ret = create_inline_ipsec_session(&td[0], port_id, &ses, &ctx, 1329 &ol_flags, flags, &sess_conf); 1330 if (ret) 1331 return ret; 1332 1333 if (td[0].ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 1334 ret = create_default_flow(port_id); 1335 if (ret) 1336 goto out; 1337 } 1338 outer_ipv4 = is_outer_ipv4(td); 1339 1340 for (i = 0; i < nb_pkts; i++) { 1341 tx_pkt = init_packet(mbufpool, td[i].input_text.data, 1342 td[i].input_text.len, outer_ipv4); 1343 if (tx_pkt == NULL) { 1344 ret = TEST_FAILED; 1345 goto out; 1346 } 1347 1348 if (test_ipsec_pkt_update(rte_pktmbuf_mtod_offset(tx_pkt, 1349 uint8_t *, RTE_ETHER_HDR_LEN), flags)) { 1350 ret = TEST_FAILED; 1351 goto out; 1352 } 1353 1354 if (td[i].ipsec_xform.direction == 1355 RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 1356 if (flags->antireplay) { 1357 sess_conf.ipsec.esn.value = 1358 td[i].ipsec_xform.esn.value; 1359 ret = rte_security_session_update(ctx, ses, 1360 &sess_conf); 1361 if (ret) { 1362 printf("Could not update ESN in session\n"); 1363 rte_pktmbuf_free(tx_pkt); 1364 ret = TEST_SKIPPED; 1365 goto out; 1366 } 1367 } 1368 if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA) 1369 rte_security_set_pkt_metadata(ctx, ses, 1370 tx_pkt, NULL); 1371 tx_pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD; 1372 } 1373 /* Send packet to ethdev for inline IPsec processing. */ 1374 nb_sent = rte_eth_tx_burst(port_id, 0, &tx_pkt, 1); 1375 if (nb_sent != 1) { 1376 printf("\nUnable to TX packets"); 1377 rte_pktmbuf_free(tx_pkt); 1378 ret = TEST_FAILED; 1379 goto out; 1380 } 1381 1382 rte_pause(); 1383 1384 /* Receive back packet on loopback interface. */ 1385 do { 1386 rte_delay_ms(1); 1387 nb_rx = rte_eth_rx_burst(port_id, 0, &rx_pkt, 1); 1388 } while (nb_rx == 0); 1389 1390 rte_pktmbuf_adj(rx_pkt, RTE_ETHER_HDR_LEN); 1391 1392 if (res_d != NULL) 1393 res_d_tmp = &res_d[i]; 1394 1395 ret = test_ipsec_post_process(rx_pkt, &td[i], 1396 res_d_tmp, silent, flags); 1397 if (ret != TEST_SUCCESS) { 1398 rte_pktmbuf_free(rx_pkt); 1399 goto out; 1400 } 1401 1402 ret = test_ipsec_stats_verify(ctx, ses, flags, 1403 td->ipsec_xform.direction); 1404 if (ret != TEST_SUCCESS) { 1405 rte_pktmbuf_free(rx_pkt); 1406 goto out; 1407 } 1408 1409 rte_pktmbuf_free(rx_pkt); 1410 rx_pkt = NULL; 1411 tx_pkt = NULL; 1412 } 1413 1414 out: 1415 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) 1416 destroy_default_flow(port_id); 1417 1418 /* Destroy session so that other cases can create the session again */ 1419 rte_security_session_destroy(ctx, ses); 1420 ses = NULL; 1421 1422 return ret; 1423 } 1424 1425 static int 1426 ut_setup_inline_ipsec(void) 1427 { 1428 int ret; 1429 1430 /* Start device */ 1431 ret = rte_eth_dev_start(port_id); 1432 if (ret < 0) { 1433 printf("rte_eth_dev_start: err=%d, port=%d\n", 1434 ret, port_id); 1435 return ret; 1436 } 1437 /* always enable promiscuous */ 1438 ret = rte_eth_promiscuous_enable(port_id); 1439 if (ret != 0) { 1440 printf("rte_eth_promiscuous_enable: err=%s, port=%d\n", 1441 rte_strerror(-ret), port_id); 1442 return ret; 1443 } 1444 1445 check_all_ports_link_status(1, RTE_PORT_ALL); 1446 1447 return 0; 1448 } 1449 1450 static void 1451 ut_teardown_inline_ipsec(void) 1452 { 1453 struct rte_eth_ip_reassembly_params reass_conf = {0}; 1454 uint16_t portid; 1455 int ret; 1456 1457 /* port tear down */ 1458 RTE_ETH_FOREACH_DEV(portid) { 1459 ret = rte_eth_dev_stop(portid); 1460 if (ret != 0) 1461 printf("rte_eth_dev_stop: err=%s, port=%u\n", 1462 rte_strerror(-ret), portid); 1463 1464 /* Clear reassembly configuration */ 1465 rte_eth_ip_reassembly_conf_set(portid, &reass_conf); 1466 } 1467 } 1468 1469 static int 1470 inline_ipsec_testsuite_setup(void) 1471 { 1472 uint16_t nb_rxd; 1473 uint16_t nb_txd; 1474 uint16_t nb_ports; 1475 int ret; 1476 uint16_t nb_rx_queue = 1, nb_tx_queue = 1; 1477 1478 printf("Start inline IPsec test.\n"); 1479 1480 nb_ports = rte_eth_dev_count_avail(); 1481 if (nb_ports < NB_ETHPORTS_USED) { 1482 printf("At least %u port(s) used for test\n", 1483 NB_ETHPORTS_USED); 1484 return TEST_SKIPPED; 1485 } 1486 1487 ret = init_mempools(NB_MBUF); 1488 if (ret) 1489 return ret; 1490 1491 if (tx_pkts_burst == NULL) { 1492 tx_pkts_burst = (struct rte_mbuf **)rte_calloc("tx_buff", 1493 MAX_TRAFFIC_BURST, 1494 sizeof(void *), 1495 RTE_CACHE_LINE_SIZE); 1496 if (!tx_pkts_burst) 1497 return TEST_FAILED; 1498 1499 rx_pkts_burst = (struct rte_mbuf **)rte_calloc("rx_buff", 1500 MAX_TRAFFIC_BURST, 1501 sizeof(void *), 1502 RTE_CACHE_LINE_SIZE); 1503 if (!rx_pkts_burst) 1504 return TEST_FAILED; 1505 } 1506 1507 printf("Generate %d packets\n", MAX_TRAFFIC_BURST); 1508 1509 nb_rxd = RTE_TEST_RX_DESC_DEFAULT; 1510 nb_txd = RTE_TEST_TX_DESC_DEFAULT; 1511 1512 /* configuring port 0 for the test is enough */ 1513 port_id = 0; 1514 /* port configure */ 1515 ret = rte_eth_dev_configure(port_id, nb_rx_queue, 1516 nb_tx_queue, &port_conf); 1517 if (ret < 0) { 1518 printf("Cannot configure device: err=%d, port=%d\n", 1519 ret, port_id); 1520 return ret; 1521 } 1522 ret = rte_eth_macaddr_get(port_id, &ports_eth_addr[port_id]); 1523 if (ret < 0) { 1524 printf("Cannot get mac address: err=%d, port=%d\n", 1525 ret, port_id); 1526 return ret; 1527 } 1528 printf("Port %u ", port_id); 1529 print_ethaddr("Address:", &ports_eth_addr[port_id]); 1530 printf("\n"); 1531 1532 /* tx queue setup */ 1533 ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd, 1534 SOCKET_ID_ANY, &tx_conf); 1535 if (ret < 0) { 1536 printf("rte_eth_tx_queue_setup: err=%d, port=%d\n", 1537 ret, port_id); 1538 return ret; 1539 } 1540 /* rx queue steup */ 1541 ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, SOCKET_ID_ANY, 1542 &rx_conf, mbufpool); 1543 if (ret < 0) { 1544 printf("rte_eth_rx_queue_setup: err=%d, port=%d\n", 1545 ret, port_id); 1546 return ret; 1547 } 1548 test_ipsec_alg_list_populate(); 1549 1550 return 0; 1551 } 1552 1553 static void 1554 inline_ipsec_testsuite_teardown(void) 1555 { 1556 uint16_t portid; 1557 int ret; 1558 1559 /* port tear down */ 1560 RTE_ETH_FOREACH_DEV(portid) { 1561 ret = rte_eth_dev_reset(portid); 1562 if (ret != 0) 1563 printf("rte_eth_dev_reset: err=%s, port=%u\n", 1564 rte_strerror(-ret), port_id); 1565 } 1566 rte_free(tx_pkts_burst); 1567 rte_free(rx_pkts_burst); 1568 } 1569 1570 static int 1571 event_inline_ipsec_testsuite_setup(void) 1572 { 1573 struct rte_event_eth_rx_adapter_queue_conf queue_conf = {0}; 1574 struct rte_event_dev_info evdev_default_conf = {0}; 1575 struct rte_event_dev_config eventdev_conf = {0}; 1576 struct rte_event_queue_conf eventq_conf = {0}; 1577 struct rte_event_port_conf ev_port_conf = {0}; 1578 const uint16_t nb_txd = 1024, nb_rxd = 1024; 1579 uint16_t nb_rx_queue = 1, nb_tx_queue = 1; 1580 uint8_t ev_queue_id = 0, tx_queue_id = 0; 1581 int nb_eventqueue = 1, nb_eventport = 1; 1582 const int all_queues = -1; 1583 uint32_t caps = 0; 1584 uint16_t nb_ports; 1585 int ret; 1586 1587 printf("Start event inline IPsec test.\n"); 1588 1589 nb_ports = rte_eth_dev_count_avail(); 1590 if (nb_ports == 0) { 1591 printf("Test require: 1 port, available: 0\n"); 1592 return TEST_SKIPPED; 1593 } 1594 1595 init_mempools(NB_MBUF); 1596 1597 if (tx_pkts_burst == NULL) { 1598 tx_pkts_burst = (struct rte_mbuf **)rte_calloc("tx_buff", 1599 MAX_TRAFFIC_BURST, 1600 sizeof(void *), 1601 RTE_CACHE_LINE_SIZE); 1602 if (!tx_pkts_burst) 1603 return -1; 1604 1605 rx_pkts_burst = (struct rte_mbuf **)rte_calloc("rx_buff", 1606 MAX_TRAFFIC_BURST, 1607 sizeof(void *), 1608 RTE_CACHE_LINE_SIZE); 1609 if (!rx_pkts_burst) 1610 return -1; 1611 1612 } 1613 1614 printf("Generate %d packets\n", MAX_TRAFFIC_BURST); 1615 1616 /* configuring port 0 for the test is enough */ 1617 port_id = 0; 1618 /* port configure */ 1619 ret = rte_eth_dev_configure(port_id, nb_rx_queue, 1620 nb_tx_queue, &port_conf); 1621 if (ret < 0) { 1622 printf("Cannot configure device: err=%d, port=%d\n", 1623 ret, port_id); 1624 return ret; 1625 } 1626 1627 /* Tx queue setup */ 1628 ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd, 1629 SOCKET_ID_ANY, &tx_conf); 1630 if (ret < 0) { 1631 printf("rte_eth_tx_queue_setup: err=%d, port=%d\n", 1632 ret, port_id); 1633 return ret; 1634 } 1635 1636 /* rx queue steup */ 1637 ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, SOCKET_ID_ANY, 1638 &rx_conf, mbufpool); 1639 if (ret < 0) { 1640 printf("rte_eth_rx_queue_setup: err=%d, port=%d\n", 1641 ret, port_id); 1642 return ret; 1643 } 1644 1645 /* Setup eventdev */ 1646 eventdev_id = 0; 1647 rx_adapter_id = 0; 1648 tx_adapter_id = 0; 1649 1650 /* Get default conf of eventdev */ 1651 ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf); 1652 if (ret < 0) { 1653 printf("Error in getting event device info[devID:%d]\n", 1654 eventdev_id); 1655 return ret; 1656 } 1657 1658 /* Get Tx adapter capabilities */ 1659 ret = rte_event_eth_tx_adapter_caps_get(eventdev_id, tx_adapter_id, &caps); 1660 if (ret < 0) { 1661 printf("Failed to get event device %d eth tx adapter" 1662 " capabilities for port %d\n", 1663 eventdev_id, port_id); 1664 return ret; 1665 } 1666 if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) 1667 tx_queue_id = nb_eventqueue++; 1668 1669 eventdev_conf.nb_events_limit = 1670 evdev_default_conf.max_num_events; 1671 eventdev_conf.nb_event_queue_flows = 1672 evdev_default_conf.max_event_queue_flows; 1673 eventdev_conf.nb_event_port_dequeue_depth = 1674 evdev_default_conf.max_event_port_dequeue_depth; 1675 eventdev_conf.nb_event_port_enqueue_depth = 1676 evdev_default_conf.max_event_port_enqueue_depth; 1677 1678 eventdev_conf.nb_event_queues = nb_eventqueue; 1679 eventdev_conf.nb_event_ports = nb_eventport; 1680 1681 /* Configure event device */ 1682 1683 ret = rte_event_dev_configure(eventdev_id, &eventdev_conf); 1684 if (ret < 0) { 1685 printf("Error in configuring event device\n"); 1686 return ret; 1687 } 1688 1689 /* Configure event queue */ 1690 eventq_conf.schedule_type = RTE_SCHED_TYPE_PARALLEL; 1691 eventq_conf.nb_atomic_flows = 1024; 1692 eventq_conf.nb_atomic_order_sequences = 1024; 1693 1694 /* Setup the queue */ 1695 ret = rte_event_queue_setup(eventdev_id, ev_queue_id, &eventq_conf); 1696 if (ret < 0) { 1697 printf("Failed to setup event queue %d\n", ret); 1698 return ret; 1699 } 1700 1701 /* Configure event port */ 1702 ret = rte_event_port_setup(eventdev_id, port_id, NULL); 1703 if (ret < 0) { 1704 printf("Failed to setup event port %d\n", ret); 1705 return ret; 1706 } 1707 1708 /* Make event queue - event port link */ 1709 ret = rte_event_port_link(eventdev_id, port_id, NULL, NULL, 1); 1710 if (ret < 0) { 1711 printf("Failed to link event port %d\n", ret); 1712 return ret; 1713 } 1714 1715 /* Setup port conf */ 1716 ev_port_conf.new_event_threshold = 1200; 1717 ev_port_conf.dequeue_depth = 1718 evdev_default_conf.max_event_port_dequeue_depth; 1719 ev_port_conf.enqueue_depth = 1720 evdev_default_conf.max_event_port_enqueue_depth; 1721 1722 /* Create Rx adapter */ 1723 ret = rte_event_eth_rx_adapter_create(rx_adapter_id, eventdev_id, 1724 &ev_port_conf); 1725 if (ret < 0) { 1726 printf("Failed to create rx adapter %d\n", ret); 1727 return ret; 1728 } 1729 1730 /* Setup queue conf */ 1731 queue_conf.ev.queue_id = ev_queue_id; 1732 queue_conf.ev.sched_type = RTE_SCHED_TYPE_PARALLEL; 1733 queue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV; 1734 1735 /* Add queue to the adapter */ 1736 ret = rte_event_eth_rx_adapter_queue_add(rx_adapter_id, port_id, 1737 all_queues, &queue_conf); 1738 if (ret < 0) { 1739 printf("Failed to add eth queue to rx adapter %d\n", ret); 1740 return ret; 1741 } 1742 1743 /* Start rx adapter */ 1744 ret = rte_event_eth_rx_adapter_start(rx_adapter_id); 1745 if (ret < 0) { 1746 printf("Failed to start rx adapter %d\n", ret); 1747 return ret; 1748 } 1749 1750 /* Create tx adapter */ 1751 ret = rte_event_eth_tx_adapter_create(tx_adapter_id, eventdev_id, 1752 &ev_port_conf); 1753 if (ret < 0) { 1754 printf("Failed to create tx adapter %d\n", ret); 1755 return ret; 1756 } 1757 1758 /* Add queue to the adapter */ 1759 ret = rte_event_eth_tx_adapter_queue_add(tx_adapter_id, port_id, 1760 all_queues); 1761 if (ret < 0) { 1762 printf("Failed to add eth queue to tx adapter %d\n", ret); 1763 return ret; 1764 } 1765 /* Setup Tx queue & port */ 1766 if (tx_queue_id) { 1767 /* Setup the queue */ 1768 ret = rte_event_queue_setup(eventdev_id, tx_queue_id, 1769 &eventq_conf); 1770 if (ret < 0) { 1771 printf("Failed to setup tx event queue %d\n", ret); 1772 return ret; 1773 } 1774 /* Link Tx event queue to Tx port */ 1775 ret = rte_event_port_link(eventdev_id, port_id, 1776 &tx_queue_id, NULL, 1); 1777 if (ret != 1) { 1778 printf("Failed to link event queue to port\n"); 1779 return ret; 1780 } 1781 } 1782 1783 /* Start tx adapter */ 1784 ret = rte_event_eth_tx_adapter_start(tx_adapter_id); 1785 if (ret < 0) { 1786 printf("Failed to start tx adapter %d\n", ret); 1787 return ret; 1788 } 1789 1790 /* Start eventdev */ 1791 ret = rte_event_dev_start(eventdev_id); 1792 if (ret < 0) { 1793 printf("Failed to start event device %d\n", ret); 1794 return ret; 1795 } 1796 1797 event_mode_enabled = true; 1798 test_ipsec_alg_list_populate(); 1799 1800 return 0; 1801 } 1802 1803 static void 1804 event_inline_ipsec_testsuite_teardown(void) 1805 { 1806 uint16_t portid; 1807 int ret; 1808 1809 event_mode_enabled = false; 1810 1811 /* Stop and release rx adapter */ 1812 ret = rte_event_eth_rx_adapter_stop(rx_adapter_id); 1813 if (ret < 0) 1814 printf("Failed to stop rx adapter %d\n", ret); 1815 ret = rte_event_eth_rx_adapter_queue_del(rx_adapter_id, port_id, -1); 1816 if (ret < 0) 1817 printf("Failed to remove rx adapter queues %d\n", ret); 1818 ret = rte_event_eth_rx_adapter_free(rx_adapter_id); 1819 if (ret < 0) 1820 printf("Failed to free rx adapter %d\n", ret); 1821 1822 /* Stop and release tx adapter */ 1823 ret = rte_event_eth_tx_adapter_stop(tx_adapter_id); 1824 if (ret < 0) 1825 printf("Failed to stop tx adapter %d\n", ret); 1826 ret = rte_event_eth_tx_adapter_queue_del(tx_adapter_id, port_id, -1); 1827 if (ret < 0) 1828 printf("Failed to remove tx adapter queues %d\n", ret); 1829 ret = rte_event_eth_tx_adapter_free(tx_adapter_id); 1830 if (ret < 0) 1831 printf("Failed to free tx adapter %d\n", ret); 1832 1833 /* Stop and release event devices */ 1834 rte_event_dev_stop(eventdev_id); 1835 ret = rte_event_dev_close(eventdev_id); 1836 if (ret < 0) 1837 printf("Failed to close event dev %d, %d\n", eventdev_id, ret); 1838 1839 /* port tear down */ 1840 RTE_ETH_FOREACH_DEV(portid) { 1841 ret = rte_eth_dev_reset(portid); 1842 if (ret != 0) 1843 printf("rte_eth_dev_reset: err=%s, port=%u\n", 1844 rte_strerror(-ret), port_id); 1845 } 1846 1847 rte_free(tx_pkts_burst); 1848 rte_free(rx_pkts_burst); 1849 } 1850 1851 static int 1852 test_inline_ip_reassembly(const void *testdata) 1853 { 1854 struct reassembly_vector reassembly_td = {0}; 1855 const struct reassembly_vector *td = testdata; 1856 struct ip_reassembly_test_packet full_pkt; 1857 struct ip_reassembly_test_packet frags[MAX_FRAGS]; 1858 struct ipsec_test_flags flags = {0}; 1859 int i = 0; 1860 1861 reassembly_td.sa_data = td->sa_data; 1862 reassembly_td.nb_frags = td->nb_frags; 1863 reassembly_td.burst = td->burst; 1864 1865 memcpy(&full_pkt, td->full_pkt, 1866 sizeof(struct ip_reassembly_test_packet)); 1867 reassembly_td.full_pkt = &full_pkt; 1868 1869 test_vector_payload_populate(reassembly_td.full_pkt, true); 1870 for (; i < reassembly_td.nb_frags; i++) { 1871 memcpy(&frags[i], td->frags[i], 1872 sizeof(struct ip_reassembly_test_packet)); 1873 reassembly_td.frags[i] = &frags[i]; 1874 test_vector_payload_populate(reassembly_td.frags[i], 1875 (i == 0) ? true : false); 1876 } 1877 1878 return test_ipsec_with_reassembly(&reassembly_td, &flags); 1879 } 1880 1881 static int 1882 test_ipsec_inline_proto_known_vec(const void *test_data) 1883 { 1884 struct ipsec_test_data td_outb; 1885 struct ipsec_test_flags flags; 1886 1887 memset(&flags, 0, sizeof(flags)); 1888 1889 memcpy(&td_outb, test_data, sizeof(td_outb)); 1890 1891 if (td_outb.aead || 1892 td_outb.xform.chain.cipher.cipher.algo != RTE_CRYPTO_CIPHER_NULL) { 1893 /* Disable IV gen to be able to test with known vectors */ 1894 td_outb.ipsec_xform.options.iv_gen_disable = 1; 1895 } 1896 1897 return test_ipsec_inline_proto_process(&td_outb, NULL, 1, 1898 false, &flags); 1899 } 1900 1901 static int 1902 test_ipsec_inline_proto_known_vec_inb(const void *test_data) 1903 { 1904 const struct ipsec_test_data *td = test_data; 1905 struct ipsec_test_flags flags; 1906 struct ipsec_test_data td_inb; 1907 1908 memset(&flags, 0, sizeof(flags)); 1909 1910 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) 1911 test_ipsec_td_in_from_out(td, &td_inb); 1912 else 1913 memcpy(&td_inb, td, sizeof(td_inb)); 1914 1915 return test_ipsec_inline_proto_process(&td_inb, NULL, 1, false, &flags); 1916 } 1917 1918 static int 1919 test_ipsec_inline_proto_display_list(const void *data __rte_unused) 1920 { 1921 struct ipsec_test_flags flags; 1922 1923 memset(&flags, 0, sizeof(flags)); 1924 1925 flags.display_alg = true; 1926 1927 return test_ipsec_inline_proto_all(&flags); 1928 } 1929 1930 static int 1931 test_ipsec_inline_proto_udp_encap(const void *data __rte_unused) 1932 { 1933 struct ipsec_test_flags flags; 1934 1935 memset(&flags, 0, sizeof(flags)); 1936 1937 flags.udp_encap = true; 1938 1939 return test_ipsec_inline_proto_all(&flags); 1940 } 1941 1942 static int 1943 test_ipsec_inline_proto_udp_ports_verify(const void *data __rte_unused) 1944 { 1945 struct ipsec_test_flags flags; 1946 1947 memset(&flags, 0, sizeof(flags)); 1948 1949 flags.udp_encap = true; 1950 flags.udp_ports_verify = true; 1951 1952 return test_ipsec_inline_proto_all(&flags); 1953 } 1954 1955 static int 1956 test_ipsec_inline_proto_err_icv_corrupt(const void *data __rte_unused) 1957 { 1958 struct ipsec_test_flags flags; 1959 1960 memset(&flags, 0, sizeof(flags)); 1961 1962 flags.icv_corrupt = true; 1963 1964 return test_ipsec_inline_proto_all(&flags); 1965 } 1966 1967 static int 1968 test_ipsec_inline_proto_tunnel_dst_addr_verify(const void *data __rte_unused) 1969 { 1970 struct ipsec_test_flags flags; 1971 1972 memset(&flags, 0, sizeof(flags)); 1973 1974 flags.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR; 1975 1976 return test_ipsec_inline_proto_all(&flags); 1977 } 1978 1979 static int 1980 test_ipsec_inline_proto_tunnel_src_dst_addr_verify(const void *data __rte_unused) 1981 { 1982 struct ipsec_test_flags flags; 1983 1984 memset(&flags, 0, sizeof(flags)); 1985 1986 flags.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR; 1987 1988 return test_ipsec_inline_proto_all(&flags); 1989 } 1990 1991 static int 1992 test_ipsec_inline_proto_inner_ip_csum(const void *data __rte_unused) 1993 { 1994 struct ipsec_test_flags flags; 1995 1996 memset(&flags, 0, sizeof(flags)); 1997 1998 flags.ip_csum = true; 1999 2000 return test_ipsec_inline_proto_all(&flags); 2001 } 2002 2003 static int 2004 test_ipsec_inline_proto_inner_l4_csum(const void *data __rte_unused) 2005 { 2006 struct ipsec_test_flags flags; 2007 2008 memset(&flags, 0, sizeof(flags)); 2009 2010 flags.l4_csum = true; 2011 2012 return test_ipsec_inline_proto_all(&flags); 2013 } 2014 2015 static int 2016 test_ipsec_inline_proto_tunnel_v4_in_v4(const void *data __rte_unused) 2017 { 2018 struct ipsec_test_flags flags; 2019 2020 memset(&flags, 0, sizeof(flags)); 2021 2022 flags.ipv6 = false; 2023 flags.tunnel_ipv6 = false; 2024 2025 return test_ipsec_inline_proto_all(&flags); 2026 } 2027 2028 static int 2029 test_ipsec_inline_proto_tunnel_v6_in_v6(const void *data __rte_unused) 2030 { 2031 struct ipsec_test_flags flags; 2032 2033 memset(&flags, 0, sizeof(flags)); 2034 2035 flags.ipv6 = true; 2036 flags.tunnel_ipv6 = true; 2037 2038 return test_ipsec_inline_proto_all(&flags); 2039 } 2040 2041 static int 2042 test_ipsec_inline_proto_tunnel_v4_in_v6(const void *data __rte_unused) 2043 { 2044 struct ipsec_test_flags flags; 2045 2046 memset(&flags, 0, sizeof(flags)); 2047 2048 flags.ipv6 = false; 2049 flags.tunnel_ipv6 = true; 2050 2051 return test_ipsec_inline_proto_all(&flags); 2052 } 2053 2054 static int 2055 test_ipsec_inline_proto_tunnel_v6_in_v4(const void *data __rte_unused) 2056 { 2057 struct ipsec_test_flags flags; 2058 2059 memset(&flags, 0, sizeof(flags)); 2060 2061 flags.ipv6 = true; 2062 flags.tunnel_ipv6 = false; 2063 2064 return test_ipsec_inline_proto_all(&flags); 2065 } 2066 2067 static int 2068 test_ipsec_inline_proto_transport_v4(const void *data __rte_unused) 2069 { 2070 struct ipsec_test_flags flags; 2071 2072 memset(&flags, 0, sizeof(flags)); 2073 2074 flags.ipv6 = false; 2075 flags.transport = true; 2076 2077 return test_ipsec_inline_proto_all(&flags); 2078 } 2079 2080 static int 2081 test_ipsec_inline_proto_transport_l4_csum(const void *data __rte_unused) 2082 { 2083 struct ipsec_test_flags flags = { 2084 .l4_csum = true, 2085 .transport = true, 2086 }; 2087 2088 return test_ipsec_inline_proto_all(&flags); 2089 } 2090 2091 static int 2092 test_ipsec_inline_proto_stats(const void *data __rte_unused) 2093 { 2094 struct ipsec_test_flags flags; 2095 2096 memset(&flags, 0, sizeof(flags)); 2097 2098 flags.stats_success = true; 2099 2100 return test_ipsec_inline_proto_all(&flags); 2101 } 2102 2103 static int 2104 test_ipsec_inline_proto_pkt_fragment(const void *data __rte_unused) 2105 { 2106 struct ipsec_test_flags flags; 2107 2108 memset(&flags, 0, sizeof(flags)); 2109 2110 flags.fragment = true; 2111 2112 return test_ipsec_inline_proto_all(&flags); 2113 2114 } 2115 2116 static int 2117 test_ipsec_inline_proto_copy_df_inner_0(const void *data __rte_unused) 2118 { 2119 struct ipsec_test_flags flags; 2120 2121 memset(&flags, 0, sizeof(flags)); 2122 2123 flags.df = TEST_IPSEC_COPY_DF_INNER_0; 2124 2125 return test_ipsec_inline_proto_all(&flags); 2126 } 2127 2128 static int 2129 test_ipsec_inline_proto_copy_df_inner_1(const void *data __rte_unused) 2130 { 2131 struct ipsec_test_flags flags; 2132 2133 memset(&flags, 0, sizeof(flags)); 2134 2135 flags.df = TEST_IPSEC_COPY_DF_INNER_1; 2136 2137 return test_ipsec_inline_proto_all(&flags); 2138 } 2139 2140 static int 2141 test_ipsec_inline_proto_set_df_0_inner_1(const void *data __rte_unused) 2142 { 2143 struct ipsec_test_flags flags; 2144 2145 memset(&flags, 0, sizeof(flags)); 2146 2147 flags.df = TEST_IPSEC_SET_DF_0_INNER_1; 2148 2149 return test_ipsec_inline_proto_all(&flags); 2150 } 2151 2152 static int 2153 test_ipsec_inline_proto_set_df_1_inner_0(const void *data __rte_unused) 2154 { 2155 struct ipsec_test_flags flags; 2156 2157 memset(&flags, 0, sizeof(flags)); 2158 2159 flags.df = TEST_IPSEC_SET_DF_1_INNER_0; 2160 2161 return test_ipsec_inline_proto_all(&flags); 2162 } 2163 2164 static int 2165 test_ipsec_inline_proto_ipv4_copy_dscp_inner_0(const void *data __rte_unused) 2166 { 2167 struct ipsec_test_flags flags; 2168 2169 memset(&flags, 0, sizeof(flags)); 2170 2171 flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_0; 2172 2173 return test_ipsec_inline_proto_all(&flags); 2174 } 2175 2176 static int 2177 test_ipsec_inline_proto_ipv4_copy_dscp_inner_1(const void *data __rte_unused) 2178 { 2179 struct ipsec_test_flags flags; 2180 2181 memset(&flags, 0, sizeof(flags)); 2182 2183 flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_1; 2184 2185 return test_ipsec_inline_proto_all(&flags); 2186 } 2187 2188 static int 2189 test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1(const void *data __rte_unused) 2190 { 2191 struct ipsec_test_flags flags; 2192 2193 memset(&flags, 0, sizeof(flags)); 2194 2195 flags.dscp = TEST_IPSEC_SET_DSCP_0_INNER_1; 2196 2197 return test_ipsec_inline_proto_all(&flags); 2198 } 2199 2200 static int 2201 test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0(const void *data __rte_unused) 2202 { 2203 struct ipsec_test_flags flags; 2204 2205 memset(&flags, 0, sizeof(flags)); 2206 2207 flags.dscp = TEST_IPSEC_SET_DSCP_1_INNER_0; 2208 2209 return test_ipsec_inline_proto_all(&flags); 2210 } 2211 2212 static int 2213 test_ipsec_inline_proto_ipv6_copy_dscp_inner_0(const void *data __rte_unused) 2214 { 2215 struct ipsec_test_flags flags; 2216 2217 memset(&flags, 0, sizeof(flags)); 2218 2219 flags.ipv6 = true; 2220 flags.tunnel_ipv6 = true; 2221 flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_0; 2222 2223 return test_ipsec_inline_proto_all(&flags); 2224 } 2225 2226 static int 2227 test_ipsec_inline_proto_ipv6_copy_dscp_inner_1(const void *data __rte_unused) 2228 { 2229 struct ipsec_test_flags flags; 2230 2231 memset(&flags, 0, sizeof(flags)); 2232 2233 flags.ipv6 = true; 2234 flags.tunnel_ipv6 = true; 2235 flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_1; 2236 2237 return test_ipsec_inline_proto_all(&flags); 2238 } 2239 2240 static int 2241 test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1(const void *data __rte_unused) 2242 { 2243 struct ipsec_test_flags flags; 2244 2245 memset(&flags, 0, sizeof(flags)); 2246 2247 flags.ipv6 = true; 2248 flags.tunnel_ipv6 = true; 2249 flags.dscp = TEST_IPSEC_SET_DSCP_0_INNER_1; 2250 2251 return test_ipsec_inline_proto_all(&flags); 2252 } 2253 2254 static int 2255 test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0(const void *data __rte_unused) 2256 { 2257 struct ipsec_test_flags flags; 2258 2259 memset(&flags, 0, sizeof(flags)); 2260 2261 flags.ipv6 = true; 2262 flags.tunnel_ipv6 = true; 2263 flags.dscp = TEST_IPSEC_SET_DSCP_1_INNER_0; 2264 2265 return test_ipsec_inline_proto_all(&flags); 2266 } 2267 2268 static int 2269 test_ipsec_inline_proto_ipv6_copy_flabel_inner_0(const void *data __rte_unused) 2270 { 2271 struct ipsec_test_flags flags; 2272 2273 memset(&flags, 0, sizeof(flags)); 2274 2275 flags.ipv6 = true; 2276 flags.tunnel_ipv6 = true; 2277 flags.flabel = TEST_IPSEC_COPY_FLABEL_INNER_0; 2278 2279 return test_ipsec_inline_proto_all(&flags); 2280 } 2281 2282 static int 2283 test_ipsec_inline_proto_ipv6_copy_flabel_inner_1(const void *data __rte_unused) 2284 { 2285 struct ipsec_test_flags flags; 2286 2287 memset(&flags, 0, sizeof(flags)); 2288 2289 flags.ipv6 = true; 2290 flags.tunnel_ipv6 = true; 2291 flags.flabel = TEST_IPSEC_COPY_FLABEL_INNER_1; 2292 2293 return test_ipsec_inline_proto_all(&flags); 2294 } 2295 2296 static int 2297 test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1(const void *data __rte_unused) 2298 { 2299 struct ipsec_test_flags flags; 2300 2301 memset(&flags, 0, sizeof(flags)); 2302 2303 flags.ipv6 = true; 2304 flags.tunnel_ipv6 = true; 2305 flags.flabel = TEST_IPSEC_SET_FLABEL_0_INNER_1; 2306 2307 return test_ipsec_inline_proto_all(&flags); 2308 } 2309 2310 static int 2311 test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0(const void *data __rte_unused) 2312 { 2313 struct ipsec_test_flags flags; 2314 2315 memset(&flags, 0, sizeof(flags)); 2316 2317 flags.ipv6 = true; 2318 flags.tunnel_ipv6 = true; 2319 flags.flabel = TEST_IPSEC_SET_FLABEL_1_INNER_0; 2320 2321 return test_ipsec_inline_proto_all(&flags); 2322 } 2323 2324 static int 2325 test_ipsec_inline_proto_ipv4_ttl_decrement(const void *data __rte_unused) 2326 { 2327 struct ipsec_test_flags flags = { 2328 .dec_ttl_or_hop_limit = true 2329 }; 2330 2331 return test_ipsec_inline_proto_all(&flags); 2332 } 2333 2334 static int 2335 test_ipsec_inline_proto_ipv6_hop_limit_decrement(const void *data __rte_unused) 2336 { 2337 struct ipsec_test_flags flags = { 2338 .ipv6 = true, 2339 .dec_ttl_or_hop_limit = true 2340 }; 2341 2342 return test_ipsec_inline_proto_all(&flags); 2343 } 2344 2345 static int 2346 test_ipsec_inline_proto_iv_gen(const void *data __rte_unused) 2347 { 2348 struct ipsec_test_flags flags; 2349 2350 memset(&flags, 0, sizeof(flags)); 2351 2352 flags.iv_gen = true; 2353 2354 return test_ipsec_inline_proto_all(&flags); 2355 } 2356 2357 static int 2358 test_ipsec_inline_proto_sa_pkt_soft_expiry(const void *data __rte_unused) 2359 { 2360 struct ipsec_test_flags flags = { 2361 .sa_expiry_pkts_soft = true 2362 }; 2363 return test_ipsec_inline_proto_all(&flags); 2364 } 2365 static int 2366 test_ipsec_inline_proto_sa_byte_soft_expiry(const void *data __rte_unused) 2367 { 2368 struct ipsec_test_flags flags = { 2369 .sa_expiry_bytes_soft = true 2370 }; 2371 return test_ipsec_inline_proto_all(&flags); 2372 } 2373 2374 static int 2375 test_ipsec_inline_proto_sa_pkt_hard_expiry(const void *data __rte_unused) 2376 { 2377 struct ipsec_test_flags flags = { 2378 .sa_expiry_pkts_hard = true 2379 }; 2380 2381 return test_ipsec_inline_proto_all(&flags); 2382 } 2383 2384 static int 2385 test_ipsec_inline_proto_sa_byte_hard_expiry(const void *data __rte_unused) 2386 { 2387 struct ipsec_test_flags flags = { 2388 .sa_expiry_bytes_hard = true 2389 }; 2390 2391 return test_ipsec_inline_proto_all(&flags); 2392 } 2393 2394 static int 2395 test_ipsec_inline_proto_known_vec_fragmented(const void *test_data) 2396 { 2397 struct ipsec_test_data td_outb; 2398 struct ipsec_test_flags flags; 2399 2400 memset(&flags, 0, sizeof(flags)); 2401 flags.fragment = true; 2402 2403 memcpy(&td_outb, test_data, sizeof(td_outb)); 2404 2405 /* Disable IV gen to be able to test with known vectors */ 2406 td_outb.ipsec_xform.options.iv_gen_disable = 1; 2407 2408 return test_ipsec_inline_proto_process(&td_outb, NULL, 1, false, 2409 &flags); 2410 } 2411 2412 static int 2413 test_ipsec_inline_pkt_replay(const void *test_data, const uint64_t esn[], 2414 bool replayed_pkt[], uint32_t nb_pkts, bool esn_en, 2415 uint64_t winsz) 2416 { 2417 struct ipsec_test_data td_outb[IPSEC_TEST_PACKETS_MAX]; 2418 struct ipsec_test_data td_inb[IPSEC_TEST_PACKETS_MAX]; 2419 struct ipsec_test_flags flags; 2420 uint32_t i, ret = 0; 2421 2422 memset(&flags, 0, sizeof(flags)); 2423 flags.antireplay = true; 2424 2425 for (i = 0; i < nb_pkts; i++) { 2426 memcpy(&td_outb[i], test_data, sizeof(td_outb[0])); 2427 td_outb[i].ipsec_xform.options.iv_gen_disable = 1; 2428 td_outb[i].ipsec_xform.replay_win_sz = winsz; 2429 td_outb[i].ipsec_xform.options.esn = esn_en; 2430 } 2431 2432 for (i = 0; i < nb_pkts; i++) 2433 td_outb[i].ipsec_xform.esn.value = esn[i]; 2434 2435 ret = test_ipsec_inline_proto_process_with_esn(td_outb, td_inb, 2436 nb_pkts, true, &flags); 2437 if (ret != TEST_SUCCESS) 2438 return ret; 2439 2440 test_ipsec_td_update(td_inb, td_outb, nb_pkts, &flags); 2441 2442 for (i = 0; i < nb_pkts; i++) { 2443 td_inb[i].ipsec_xform.options.esn = esn_en; 2444 /* Set antireplay flag for packets to be dropped */ 2445 td_inb[i].ar_packet = replayed_pkt[i]; 2446 } 2447 2448 ret = test_ipsec_inline_proto_process_with_esn(td_inb, NULL, nb_pkts, 2449 true, &flags); 2450 2451 return ret; 2452 } 2453 2454 static int 2455 test_ipsec_inline_proto_pkt_antireplay(const void *test_data, uint64_t winsz) 2456 { 2457 2458 uint32_t nb_pkts = 5; 2459 bool replayed_pkt[5]; 2460 uint64_t esn[5]; 2461 2462 /* 1. Advance the TOP of the window to WS * 2 */ 2463 esn[0] = winsz * 2; 2464 /* 2. Test sequence number within the new window(WS + 1) */ 2465 esn[1] = winsz + 1; 2466 /* 3. Test sequence number less than the window BOTTOM */ 2467 esn[2] = winsz; 2468 /* 4. Test sequence number in the middle of the window */ 2469 esn[3] = winsz + (winsz / 2); 2470 /* 5. Test replay of the packet in the middle of the window */ 2471 esn[4] = winsz + (winsz / 2); 2472 2473 replayed_pkt[0] = false; 2474 replayed_pkt[1] = false; 2475 replayed_pkt[2] = true; 2476 replayed_pkt[3] = false; 2477 replayed_pkt[4] = true; 2478 2479 return test_ipsec_inline_pkt_replay(test_data, esn, replayed_pkt, 2480 nb_pkts, false, winsz); 2481 } 2482 2483 static int 2484 test_ipsec_inline_proto_pkt_antireplay1024(const void *test_data) 2485 { 2486 return test_ipsec_inline_proto_pkt_antireplay(test_data, 1024); 2487 } 2488 2489 static int 2490 test_ipsec_inline_proto_pkt_antireplay2048(const void *test_data) 2491 { 2492 return test_ipsec_inline_proto_pkt_antireplay(test_data, 2048); 2493 } 2494 2495 static int 2496 test_ipsec_inline_proto_pkt_antireplay4096(const void *test_data) 2497 { 2498 return test_ipsec_inline_proto_pkt_antireplay(test_data, 4096); 2499 } 2500 2501 static int 2502 test_ipsec_inline_proto_pkt_esn_antireplay(const void *test_data, uint64_t winsz) 2503 { 2504 2505 uint32_t nb_pkts = 7; 2506 bool replayed_pkt[7]; 2507 uint64_t esn[7]; 2508 2509 /* Set the initial sequence number */ 2510 esn[0] = (uint64_t)(0xFFFFFFFF - winsz); 2511 /* 1. Advance the TOP of the window to (1<<32 + WS/2) */ 2512 esn[1] = (uint64_t)((1ULL << 32) + (winsz / 2)); 2513 /* 2. Test sequence number within new window (1<<32 + WS/2 + 1) */ 2514 esn[2] = (uint64_t)((1ULL << 32) - (winsz / 2) + 1); 2515 /* 3. Test with sequence number within window (1<<32 - 1) */ 2516 esn[3] = (uint64_t)((1ULL << 32) - 1); 2517 /* 4. Test with sequence number within window (1<<32 - 1) */ 2518 esn[4] = (uint64_t)(1ULL << 32); 2519 /* 5. Test with duplicate sequence number within 2520 * new window (1<<32 - 1) 2521 */ 2522 esn[5] = (uint64_t)((1ULL << 32) - 1); 2523 /* 6. Test with duplicate sequence number within new window (1<<32) */ 2524 esn[6] = (uint64_t)(1ULL << 32); 2525 2526 replayed_pkt[0] = false; 2527 replayed_pkt[1] = false; 2528 replayed_pkt[2] = false; 2529 replayed_pkt[3] = false; 2530 replayed_pkt[4] = false; 2531 replayed_pkt[5] = true; 2532 replayed_pkt[6] = true; 2533 2534 return test_ipsec_inline_pkt_replay(test_data, esn, replayed_pkt, nb_pkts, 2535 true, winsz); 2536 } 2537 2538 static int 2539 test_ipsec_inline_proto_pkt_esn_antireplay1024(const void *test_data) 2540 { 2541 return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 1024); 2542 } 2543 2544 static int 2545 test_ipsec_inline_proto_pkt_esn_antireplay2048(const void *test_data) 2546 { 2547 return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 2048); 2548 } 2549 2550 static int 2551 test_ipsec_inline_proto_pkt_esn_antireplay4096(const void *test_data) 2552 { 2553 return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 4096); 2554 } 2555 2556 2557 2558 static struct unit_test_suite inline_ipsec_testsuite = { 2559 .suite_name = "Inline IPsec Ethernet Device Unit Test Suite", 2560 .unit_test_cases = { 2561 TEST_CASE_NAMED_WITH_DATA( 2562 "Outbound known vector (ESP tunnel mode IPv4 AES-GCM 128)", 2563 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2564 test_ipsec_inline_proto_known_vec, &pkt_aes_128_gcm), 2565 TEST_CASE_NAMED_WITH_DATA( 2566 "Outbound known vector (ESP tunnel mode IPv4 AES-GCM 192)", 2567 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2568 test_ipsec_inline_proto_known_vec, &pkt_aes_192_gcm), 2569 TEST_CASE_NAMED_WITH_DATA( 2570 "Outbound known vector (ESP tunnel mode IPv4 AES-GCM 256)", 2571 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2572 test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm), 2573 TEST_CASE_NAMED_WITH_DATA( 2574 "Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])", 2575 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2576 test_ipsec_inline_proto_known_vec, 2577 &pkt_aes_128_cbc_hmac_sha256), 2578 TEST_CASE_NAMED_WITH_DATA( 2579 "Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])", 2580 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2581 test_ipsec_inline_proto_known_vec, 2582 &pkt_aes_128_cbc_hmac_sha384), 2583 TEST_CASE_NAMED_WITH_DATA( 2584 "Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])", 2585 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2586 test_ipsec_inline_proto_known_vec, 2587 &pkt_aes_128_cbc_hmac_sha512), 2588 TEST_CASE_NAMED_WITH_DATA( 2589 "Outbound known vector (ESP tunnel mode IPv6 AES-GCM 128)", 2590 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2591 test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm_v6), 2592 TEST_CASE_NAMED_WITH_DATA( 2593 "Outbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])", 2594 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2595 test_ipsec_inline_proto_known_vec, 2596 &pkt_aes_128_cbc_hmac_sha256_v6), 2597 TEST_CASE_NAMED_WITH_DATA( 2598 "Outbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])", 2599 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2600 test_ipsec_inline_proto_known_vec, 2601 &pkt_null_aes_xcbc), 2602 2603 TEST_CASE_NAMED_WITH_DATA( 2604 "Outbound fragmented packet", 2605 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2606 test_ipsec_inline_proto_known_vec_fragmented, 2607 &pkt_aes_128_gcm_frag), 2608 2609 TEST_CASE_NAMED_WITH_DATA( 2610 "Inbound known vector (ESP tunnel mode IPv4 AES-GCM 128)", 2611 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2612 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_gcm), 2613 TEST_CASE_NAMED_WITH_DATA( 2614 "Inbound known vector (ESP tunnel mode IPv4 AES-GCM 192)", 2615 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2616 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_192_gcm), 2617 TEST_CASE_NAMED_WITH_DATA( 2618 "Inbound known vector (ESP tunnel mode IPv4 AES-GCM 256)", 2619 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2620 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm), 2621 TEST_CASE_NAMED_WITH_DATA( 2622 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128)", 2623 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2624 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_cbc_null), 2625 TEST_CASE_NAMED_WITH_DATA( 2626 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])", 2627 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2628 test_ipsec_inline_proto_known_vec_inb, 2629 &pkt_aes_128_cbc_hmac_sha256), 2630 TEST_CASE_NAMED_WITH_DATA( 2631 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])", 2632 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2633 test_ipsec_inline_proto_known_vec_inb, 2634 &pkt_aes_128_cbc_hmac_sha384), 2635 TEST_CASE_NAMED_WITH_DATA( 2636 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])", 2637 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2638 test_ipsec_inline_proto_known_vec_inb, 2639 &pkt_aes_128_cbc_hmac_sha512), 2640 TEST_CASE_NAMED_WITH_DATA( 2641 "Inbound known vector (ESP tunnel mode IPv6 AES-GCM 128)", 2642 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2643 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm_v6), 2644 TEST_CASE_NAMED_WITH_DATA( 2645 "Inbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])", 2646 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2647 test_ipsec_inline_proto_known_vec_inb, 2648 &pkt_aes_128_cbc_hmac_sha256_v6), 2649 TEST_CASE_NAMED_WITH_DATA( 2650 "Inbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])", 2651 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2652 test_ipsec_inline_proto_known_vec_inb, 2653 &pkt_null_aes_xcbc), 2654 2655 TEST_CASE_NAMED_ST( 2656 "Combined test alg list", 2657 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2658 test_ipsec_inline_proto_display_list), 2659 2660 TEST_CASE_NAMED_ST( 2661 "UDP encapsulation", 2662 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2663 test_ipsec_inline_proto_udp_encap), 2664 TEST_CASE_NAMED_ST( 2665 "UDP encapsulation ports verification test", 2666 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2667 test_ipsec_inline_proto_udp_ports_verify), 2668 TEST_CASE_NAMED_ST( 2669 "Negative test: ICV corruption", 2670 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2671 test_ipsec_inline_proto_err_icv_corrupt), 2672 TEST_CASE_NAMED_ST( 2673 "Tunnel dst addr verification", 2674 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2675 test_ipsec_inline_proto_tunnel_dst_addr_verify), 2676 TEST_CASE_NAMED_ST( 2677 "Tunnel src and dst addr verification", 2678 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2679 test_ipsec_inline_proto_tunnel_src_dst_addr_verify), 2680 TEST_CASE_NAMED_ST( 2681 "Inner IP checksum", 2682 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2683 test_ipsec_inline_proto_inner_ip_csum), 2684 TEST_CASE_NAMED_ST( 2685 "Inner L4 checksum", 2686 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2687 test_ipsec_inline_proto_inner_l4_csum), 2688 TEST_CASE_NAMED_ST( 2689 "Tunnel IPv4 in IPv4", 2690 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2691 test_ipsec_inline_proto_tunnel_v4_in_v4), 2692 TEST_CASE_NAMED_ST( 2693 "Tunnel IPv6 in IPv6", 2694 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2695 test_ipsec_inline_proto_tunnel_v6_in_v6), 2696 TEST_CASE_NAMED_ST( 2697 "Tunnel IPv4 in IPv6", 2698 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2699 test_ipsec_inline_proto_tunnel_v4_in_v6), 2700 TEST_CASE_NAMED_ST( 2701 "Tunnel IPv6 in IPv4", 2702 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2703 test_ipsec_inline_proto_tunnel_v6_in_v4), 2704 TEST_CASE_NAMED_ST( 2705 "Transport IPv4", 2706 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2707 test_ipsec_inline_proto_transport_v4), 2708 TEST_CASE_NAMED_ST( 2709 "Transport l4 checksum", 2710 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2711 test_ipsec_inline_proto_transport_l4_csum), 2712 TEST_CASE_NAMED_ST( 2713 "Statistics: success", 2714 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2715 test_ipsec_inline_proto_stats), 2716 TEST_CASE_NAMED_ST( 2717 "Fragmented packet", 2718 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2719 test_ipsec_inline_proto_pkt_fragment), 2720 TEST_CASE_NAMED_ST( 2721 "Tunnel header copy DF (inner 0)", 2722 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2723 test_ipsec_inline_proto_copy_df_inner_0), 2724 TEST_CASE_NAMED_ST( 2725 "Tunnel header copy DF (inner 1)", 2726 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2727 test_ipsec_inline_proto_copy_df_inner_1), 2728 TEST_CASE_NAMED_ST( 2729 "Tunnel header set DF 0 (inner 1)", 2730 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2731 test_ipsec_inline_proto_set_df_0_inner_1), 2732 TEST_CASE_NAMED_ST( 2733 "Tunnel header set DF 1 (inner 0)", 2734 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2735 test_ipsec_inline_proto_set_df_1_inner_0), 2736 TEST_CASE_NAMED_ST( 2737 "Tunnel header IPv4 copy DSCP (inner 0)", 2738 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2739 test_ipsec_inline_proto_ipv4_copy_dscp_inner_0), 2740 TEST_CASE_NAMED_ST( 2741 "Tunnel header IPv4 copy DSCP (inner 1)", 2742 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2743 test_ipsec_inline_proto_ipv4_copy_dscp_inner_1), 2744 TEST_CASE_NAMED_ST( 2745 "Tunnel header IPv4 set DSCP 0 (inner 1)", 2746 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2747 test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1), 2748 TEST_CASE_NAMED_ST( 2749 "Tunnel header IPv4 set DSCP 1 (inner 0)", 2750 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2751 test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0), 2752 TEST_CASE_NAMED_ST( 2753 "Tunnel header IPv6 copy DSCP (inner 0)", 2754 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2755 test_ipsec_inline_proto_ipv6_copy_dscp_inner_0), 2756 TEST_CASE_NAMED_ST( 2757 "Tunnel header IPv6 copy DSCP (inner 1)", 2758 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2759 test_ipsec_inline_proto_ipv6_copy_dscp_inner_1), 2760 TEST_CASE_NAMED_ST( 2761 "Tunnel header IPv6 set DSCP 0 (inner 1)", 2762 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2763 test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1), 2764 TEST_CASE_NAMED_ST( 2765 "Tunnel header IPv6 set DSCP 1 (inner 0)", 2766 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2767 test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0), 2768 TEST_CASE_NAMED_ST( 2769 "Tunnel header IPv6 copy FLABEL (inner 0)", 2770 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2771 test_ipsec_inline_proto_ipv6_copy_flabel_inner_0), 2772 TEST_CASE_NAMED_ST( 2773 "Tunnel header IPv6 copy FLABEL (inner 1)", 2774 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2775 test_ipsec_inline_proto_ipv6_copy_flabel_inner_1), 2776 TEST_CASE_NAMED_ST( 2777 "Tunnel header IPv6 set FLABEL 0 (inner 1)", 2778 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2779 test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1), 2780 TEST_CASE_NAMED_ST( 2781 "Tunnel header IPv6 set FLABEL 1 (inner 0)", 2782 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2783 test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0), 2784 TEST_CASE_NAMED_ST( 2785 "Tunnel header IPv4 decrement inner TTL", 2786 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2787 test_ipsec_inline_proto_ipv4_ttl_decrement), 2788 TEST_CASE_NAMED_ST( 2789 "Tunnel header IPv6 decrement inner hop limit", 2790 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2791 test_ipsec_inline_proto_ipv6_hop_limit_decrement), 2792 TEST_CASE_NAMED_ST( 2793 "IV generation", 2794 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2795 test_ipsec_inline_proto_iv_gen), 2796 TEST_CASE_NAMED_ST( 2797 "SA soft expiry with packet limit", 2798 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2799 test_ipsec_inline_proto_sa_pkt_soft_expiry), 2800 TEST_CASE_NAMED_ST( 2801 "SA soft expiry with byte limit", 2802 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2803 test_ipsec_inline_proto_sa_byte_soft_expiry), 2804 TEST_CASE_NAMED_ST( 2805 "SA hard expiry with packet limit", 2806 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2807 test_ipsec_inline_proto_sa_pkt_hard_expiry), 2808 TEST_CASE_NAMED_ST( 2809 "SA hard expiry with byte limit", 2810 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2811 test_ipsec_inline_proto_sa_byte_hard_expiry), 2812 2813 TEST_CASE_NAMED_WITH_DATA( 2814 "Antireplay with window size 1024", 2815 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2816 test_ipsec_inline_proto_pkt_antireplay1024, 2817 &pkt_aes_128_gcm), 2818 TEST_CASE_NAMED_WITH_DATA( 2819 "Antireplay with window size 2048", 2820 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2821 test_ipsec_inline_proto_pkt_antireplay2048, 2822 &pkt_aes_128_gcm), 2823 TEST_CASE_NAMED_WITH_DATA( 2824 "Antireplay with window size 4096", 2825 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2826 test_ipsec_inline_proto_pkt_antireplay4096, 2827 &pkt_aes_128_gcm), 2828 TEST_CASE_NAMED_WITH_DATA( 2829 "ESN and Antireplay with window size 1024", 2830 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2831 test_ipsec_inline_proto_pkt_esn_antireplay1024, 2832 &pkt_aes_128_gcm), 2833 TEST_CASE_NAMED_WITH_DATA( 2834 "ESN and Antireplay with window size 2048", 2835 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2836 test_ipsec_inline_proto_pkt_esn_antireplay2048, 2837 &pkt_aes_128_gcm), 2838 TEST_CASE_NAMED_WITH_DATA( 2839 "ESN and Antireplay with window size 4096", 2840 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2841 test_ipsec_inline_proto_pkt_esn_antireplay4096, 2842 &pkt_aes_128_gcm), 2843 2844 TEST_CASE_NAMED_WITH_DATA( 2845 "IPv4 Reassembly with 2 fragments", 2846 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2847 test_inline_ip_reassembly, &ipv4_2frag_vector), 2848 TEST_CASE_NAMED_WITH_DATA( 2849 "IPv6 Reassembly with 2 fragments", 2850 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2851 test_inline_ip_reassembly, &ipv6_2frag_vector), 2852 TEST_CASE_NAMED_WITH_DATA( 2853 "IPv4 Reassembly with 4 fragments", 2854 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2855 test_inline_ip_reassembly, &ipv4_4frag_vector), 2856 TEST_CASE_NAMED_WITH_DATA( 2857 "IPv6 Reassembly with 4 fragments", 2858 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2859 test_inline_ip_reassembly, &ipv6_4frag_vector), 2860 TEST_CASE_NAMED_WITH_DATA( 2861 "IPv4 Reassembly with 5 fragments", 2862 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2863 test_inline_ip_reassembly, &ipv4_5frag_vector), 2864 TEST_CASE_NAMED_WITH_DATA( 2865 "IPv6 Reassembly with 5 fragments", 2866 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2867 test_inline_ip_reassembly, &ipv6_5frag_vector), 2868 TEST_CASE_NAMED_WITH_DATA( 2869 "IPv4 Reassembly with incomplete fragments", 2870 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2871 test_inline_ip_reassembly, &ipv4_incomplete_vector), 2872 TEST_CASE_NAMED_WITH_DATA( 2873 "IPv4 Reassembly with overlapping fragments", 2874 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2875 test_inline_ip_reassembly, &ipv4_overlap_vector), 2876 TEST_CASE_NAMED_WITH_DATA( 2877 "IPv4 Reassembly with out of order fragments", 2878 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2879 test_inline_ip_reassembly, &ipv4_out_of_order_vector), 2880 TEST_CASE_NAMED_WITH_DATA( 2881 "IPv4 Reassembly with burst of 4 fragments", 2882 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2883 test_inline_ip_reassembly, &ipv4_4frag_burst_vector), 2884 2885 TEST_CASES_END() /**< NULL terminate unit test array */ 2886 }, 2887 }; 2888 2889 2890 static int 2891 test_inline_ipsec(void) 2892 { 2893 inline_ipsec_testsuite.setup = inline_ipsec_testsuite_setup; 2894 inline_ipsec_testsuite.teardown = inline_ipsec_testsuite_teardown; 2895 return unit_test_suite_runner(&inline_ipsec_testsuite); 2896 } 2897 2898 static int 2899 test_event_inline_ipsec(void) 2900 { 2901 inline_ipsec_testsuite.setup = event_inline_ipsec_testsuite_setup; 2902 inline_ipsec_testsuite.teardown = event_inline_ipsec_testsuite_teardown; 2903 return unit_test_suite_runner(&inline_ipsec_testsuite); 2904 } 2905 2906 #endif /* !RTE_EXEC_ENV_WINDOWS */ 2907 2908 REGISTER_TEST_COMMAND(inline_ipsec_autotest, test_inline_ipsec); 2909 REGISTER_TEST_COMMAND(event_inline_ipsec_autotest, test_event_inline_ipsec); 2910