1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2022 Marvell. 3 */ 4 5 6 #include <stdio.h> 7 #include <inttypes.h> 8 9 #include <rte_ethdev.h> 10 #include <rte_malloc.h> 11 #include <rte_security.h> 12 13 #include "test.h" 14 #include "test_security_inline_proto_vectors.h" 15 16 #ifdef RTE_EXEC_ENV_WINDOWS 17 static int 18 test_inline_ipsec(void) 19 { 20 printf("Inline ipsec not supported on Windows, skipping test\n"); 21 return TEST_SKIPPED; 22 } 23 24 static int 25 test_event_inline_ipsec(void) 26 { 27 printf("Event inline ipsec not supported on Windows, skipping test\n"); 28 return TEST_SKIPPED; 29 } 30 31 #else 32 33 #include <rte_eventdev.h> 34 #include <rte_event_eth_rx_adapter.h> 35 #include <rte_event_eth_tx_adapter.h> 36 37 #define NB_ETHPORTS_USED 1 38 #define MEMPOOL_CACHE_SIZE 32 39 #define MAX_PKT_BURST 32 40 #define RX_DESC_DEFAULT 1024 41 #define TX_DESC_DEFAULT 1024 42 #define RTE_PORT_ALL (~(uint16_t)0x0) 43 44 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */ 45 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */ 46 #define RX_WTHRESH 0 /**< Default values of RX write-back threshold reg. */ 47 48 #define TX_PTHRESH 32 /**< Default values of TX prefetch threshold reg. */ 49 #define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */ 50 #define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */ 51 52 #define MAX_TRAFFIC_BURST 2048 53 #define NB_MBUF 10240 54 55 #define ENCAP_DECAP_BURST_SZ 33 56 #define APP_REASS_TIMEOUT 10 57 58 extern struct ipsec_test_data pkt_aes_128_gcm; 59 extern struct ipsec_test_data pkt_aes_192_gcm; 60 extern struct ipsec_test_data pkt_aes_256_gcm; 61 extern struct ipsec_test_data pkt_aes_128_gcm_frag; 62 extern struct ipsec_test_data pkt_aes_128_cbc_null; 63 extern struct ipsec_test_data pkt_null_aes_xcbc; 64 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha384; 65 extern struct ipsec_test_data pkt_aes_128_cbc_hmac_sha512; 66 67 static struct rte_mempool *mbufpool; 68 static struct rte_mempool *sess_pool; 69 /* ethernet addresses of ports */ 70 static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS]; 71 72 static struct rte_eth_conf port_conf = { 73 .rxmode = { 74 .mq_mode = RTE_ETH_MQ_RX_NONE, 75 .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM | 76 RTE_ETH_RX_OFFLOAD_SECURITY, 77 }, 78 .txmode = { 79 .mq_mode = RTE_ETH_MQ_TX_NONE, 80 .offloads = RTE_ETH_TX_OFFLOAD_SECURITY | 81 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, 82 }, 83 .lpbk_mode = 1, /* enable loopback */ 84 }; 85 86 static struct rte_eth_rxconf rx_conf = { 87 .rx_thresh = { 88 .pthresh = RX_PTHRESH, 89 .hthresh = RX_HTHRESH, 90 .wthresh = RX_WTHRESH, 91 }, 92 .rx_free_thresh = 32, 93 }; 94 95 static struct rte_eth_txconf tx_conf = { 96 .tx_thresh = { 97 .pthresh = TX_PTHRESH, 98 .hthresh = TX_HTHRESH, 99 .wthresh = TX_WTHRESH, 100 }, 101 .tx_free_thresh = 32, /* Use PMD default values */ 102 .tx_rs_thresh = 32, /* Use PMD default values */ 103 }; 104 105 static uint16_t port_id; 106 static uint8_t eventdev_id; 107 static uint8_t rx_adapter_id; 108 static uint8_t tx_adapter_id; 109 110 static bool event_mode_enabled; 111 112 static uint64_t link_mbps; 113 114 static int ip_reassembly_dynfield_offset = -1; 115 116 static struct rte_flow *default_flow[RTE_MAX_ETHPORTS]; 117 118 /* Create Inline IPsec session */ 119 static int 120 create_inline_ipsec_session(struct ipsec_test_data *sa, uint16_t portid, 121 void **sess, struct rte_security_ctx **ctx, 122 uint32_t *ol_flags, const struct ipsec_test_flags *flags, 123 struct rte_security_session_conf *sess_conf) 124 { 125 uint16_t src_v6[8] = {0x2607, 0xf8b0, 0x400c, 0x0c03, 0x0000, 0x0000, 126 0x0000, 0x001a}; 127 uint16_t dst_v6[8] = {0x2001, 0x0470, 0xe5bf, 0xdead, 0x4957, 0x2174, 128 0xe82c, 0x4887}; 129 uint32_t src_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 2)); 130 uint32_t dst_v4 = rte_cpu_to_be_32(RTE_IPV4(192, 168, 1, 1)); 131 struct rte_security_capability_idx sec_cap_idx; 132 const struct rte_security_capability *sec_cap; 133 enum rte_security_ipsec_sa_direction dir; 134 struct rte_security_ctx *sec_ctx; 135 uint32_t verify; 136 137 sess_conf->action_type = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL; 138 sess_conf->protocol = RTE_SECURITY_PROTOCOL_IPSEC; 139 sess_conf->ipsec = sa->ipsec_xform; 140 141 dir = sa->ipsec_xform.direction; 142 verify = flags->tunnel_hdr_verify; 143 144 if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && verify) { 145 if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR) 146 src_v4 += 1; 147 else if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR) 148 dst_v4 += 1; 149 } 150 151 if (sa->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) { 152 if (sa->ipsec_xform.tunnel.type == 153 RTE_SECURITY_IPSEC_TUNNEL_IPV4) { 154 memcpy(&sess_conf->ipsec.tunnel.ipv4.src_ip, &src_v4, 155 sizeof(src_v4)); 156 memcpy(&sess_conf->ipsec.tunnel.ipv4.dst_ip, &dst_v4, 157 sizeof(dst_v4)); 158 159 if (flags->df == TEST_IPSEC_SET_DF_0_INNER_1) 160 sess_conf->ipsec.tunnel.ipv4.df = 0; 161 162 if (flags->df == TEST_IPSEC_SET_DF_1_INNER_0) 163 sess_conf->ipsec.tunnel.ipv4.df = 1; 164 165 if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1) 166 sess_conf->ipsec.tunnel.ipv4.dscp = 0; 167 168 if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) 169 sess_conf->ipsec.tunnel.ipv4.dscp = 170 TEST_IPSEC_DSCP_VAL; 171 } else { 172 if (flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1) 173 sess_conf->ipsec.tunnel.ipv6.dscp = 0; 174 175 if (flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) 176 sess_conf->ipsec.tunnel.ipv6.dscp = 177 TEST_IPSEC_DSCP_VAL; 178 179 if (flags->flabel == TEST_IPSEC_SET_FLABEL_0_INNER_1) 180 sess_conf->ipsec.tunnel.ipv6.flabel = 0; 181 182 if (flags->flabel == TEST_IPSEC_SET_FLABEL_1_INNER_0) 183 sess_conf->ipsec.tunnel.ipv6.flabel = 184 TEST_IPSEC_FLABEL_VAL; 185 186 memcpy(&sess_conf->ipsec.tunnel.ipv6.src_addr, &src_v6, 187 sizeof(src_v6)); 188 memcpy(&sess_conf->ipsec.tunnel.ipv6.dst_addr, &dst_v6, 189 sizeof(dst_v6)); 190 } 191 } 192 193 /* Save SA as userdata for the security session. When 194 * the packet is received, this userdata will be 195 * retrieved using the metadata from the packet. 196 * 197 * The PMD is expected to set similar metadata for other 198 * operations, like rte_eth_event, which are tied to 199 * security session. In such cases, the userdata could 200 * be obtained to uniquely identify the security 201 * parameters denoted. 202 */ 203 204 sess_conf->userdata = (void *) sa; 205 206 sec_ctx = (struct rte_security_ctx *)rte_eth_dev_get_sec_ctx(portid); 207 if (sec_ctx == NULL) { 208 printf("Ethernet device doesn't support security features.\n"); 209 return TEST_SKIPPED; 210 } 211 212 sec_cap_idx.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL; 213 sec_cap_idx.protocol = RTE_SECURITY_PROTOCOL_IPSEC; 214 sec_cap_idx.ipsec.proto = sess_conf->ipsec.proto; 215 sec_cap_idx.ipsec.mode = sess_conf->ipsec.mode; 216 sec_cap_idx.ipsec.direction = sess_conf->ipsec.direction; 217 sec_cap = rte_security_capability_get(sec_ctx, &sec_cap_idx); 218 if (sec_cap == NULL) { 219 printf("No capabilities registered\n"); 220 return TEST_SKIPPED; 221 } 222 223 if (sa->aead || sa->aes_gmac) 224 memcpy(&sess_conf->ipsec.salt, sa->salt.data, 225 RTE_MIN(sizeof(sess_conf->ipsec.salt), sa->salt.len)); 226 227 /* Copy cipher session parameters */ 228 if (sa->aead) { 229 rte_memcpy(sess_conf->crypto_xform, &sa->xform.aead, 230 sizeof(struct rte_crypto_sym_xform)); 231 sess_conf->crypto_xform->aead.key.data = sa->key.data; 232 /* Verify crypto capabilities */ 233 if (test_ipsec_crypto_caps_aead_verify(sec_cap, 234 sess_conf->crypto_xform) != 0) { 235 RTE_LOG(INFO, USER1, 236 "Crypto capabilities not supported\n"); 237 return TEST_SKIPPED; 238 } 239 } else { 240 if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 241 rte_memcpy(&sess_conf->crypto_xform->cipher, 242 &sa->xform.chain.cipher.cipher, 243 sizeof(struct rte_crypto_cipher_xform)); 244 245 rte_memcpy(&sess_conf->crypto_xform->next->auth, 246 &sa->xform.chain.auth.auth, 247 sizeof(struct rte_crypto_auth_xform)); 248 sess_conf->crypto_xform->cipher.key.data = 249 sa->key.data; 250 sess_conf->crypto_xform->next->auth.key.data = 251 sa->auth_key.data; 252 /* Verify crypto capabilities */ 253 if (test_ipsec_crypto_caps_cipher_verify(sec_cap, 254 sess_conf->crypto_xform) != 0) { 255 RTE_LOG(INFO, USER1, 256 "Cipher crypto capabilities not supported\n"); 257 return TEST_SKIPPED; 258 } 259 260 if (test_ipsec_crypto_caps_auth_verify(sec_cap, 261 sess_conf->crypto_xform->next) != 0) { 262 RTE_LOG(INFO, USER1, 263 "Auth crypto capabilities not supported\n"); 264 return TEST_SKIPPED; 265 } 266 } else { 267 rte_memcpy(&sess_conf->crypto_xform->next->cipher, 268 &sa->xform.chain.cipher.cipher, 269 sizeof(struct rte_crypto_cipher_xform)); 270 rte_memcpy(&sess_conf->crypto_xform->auth, 271 &sa->xform.chain.auth.auth, 272 sizeof(struct rte_crypto_auth_xform)); 273 sess_conf->crypto_xform->auth.key.data = 274 sa->auth_key.data; 275 sess_conf->crypto_xform->next->cipher.key.data = 276 sa->key.data; 277 278 /* Verify crypto capabilities */ 279 if (test_ipsec_crypto_caps_cipher_verify(sec_cap, 280 sess_conf->crypto_xform->next) != 0) { 281 RTE_LOG(INFO, USER1, 282 "Cipher crypto capabilities not supported\n"); 283 return TEST_SKIPPED; 284 } 285 286 if (test_ipsec_crypto_caps_auth_verify(sec_cap, 287 sess_conf->crypto_xform) != 0) { 288 RTE_LOG(INFO, USER1, 289 "Auth crypto capabilities not supported\n"); 290 return TEST_SKIPPED; 291 } 292 } 293 } 294 295 if (test_ipsec_sec_caps_verify(&sess_conf->ipsec, sec_cap, false) != 0) 296 return TEST_SKIPPED; 297 298 if ((sa->ipsec_xform.direction == 299 RTE_SECURITY_IPSEC_SA_DIR_EGRESS) && 300 (sa->ipsec_xform.options.iv_gen_disable == 1)) { 301 /* Set env variable when IV generation is disabled */ 302 char arr[128]; 303 int len = 0, j = 0; 304 int iv_len = (sa->aead || sa->aes_gmac) ? 8 : 16; 305 306 for (; j < iv_len; j++) 307 len += snprintf(arr+len, sizeof(arr) - len, 308 "0x%x, ", sa->iv.data[j]); 309 setenv("ETH_SEC_IV_OVR", arr, 1); 310 } 311 312 *sess = rte_security_session_create(sec_ctx, sess_conf, sess_pool); 313 if (*sess == NULL) { 314 printf("SEC Session init failed.\n"); 315 return TEST_FAILED; 316 } 317 318 *ol_flags = sec_cap->ol_flags; 319 *ctx = sec_ctx; 320 321 return 0; 322 } 323 324 /* Check the link status of all ports in up to 3s, and print them finally */ 325 static void 326 check_all_ports_link_status(uint16_t port_num, uint32_t port_mask) 327 { 328 #define CHECK_INTERVAL 100 /* 100ms */ 329 #define MAX_CHECK_TIME 30 /* 3s (30 * 100ms) in total */ 330 uint16_t portid; 331 uint8_t count, all_ports_up, print_flag = 0; 332 struct rte_eth_link link; 333 int ret; 334 char link_status[RTE_ETH_LINK_MAX_STR_LEN]; 335 336 printf("Checking link statuses...\n"); 337 fflush(stdout); 338 for (count = 0; count <= MAX_CHECK_TIME; count++) { 339 all_ports_up = 1; 340 for (portid = 0; portid < port_num; portid++) { 341 if ((port_mask & (1 << portid)) == 0) 342 continue; 343 memset(&link, 0, sizeof(link)); 344 ret = rte_eth_link_get_nowait(portid, &link); 345 if (ret < 0) { 346 all_ports_up = 0; 347 if (print_flag == 1) 348 printf("Port %u link get failed: %s\n", 349 portid, rte_strerror(-ret)); 350 continue; 351 } 352 353 /* print link status if flag set */ 354 if (print_flag == 1) { 355 if (link.link_status && link_mbps == 0) 356 link_mbps = link.link_speed; 357 358 rte_eth_link_to_str(link_status, 359 sizeof(link_status), &link); 360 printf("Port %d %s\n", portid, link_status); 361 continue; 362 } 363 /* clear all_ports_up flag if any link down */ 364 if (link.link_status == RTE_ETH_LINK_DOWN) { 365 all_ports_up = 0; 366 break; 367 } 368 } 369 /* after finally printing all link status, get out */ 370 if (print_flag == 1) 371 break; 372 373 if (all_ports_up == 0) { 374 fflush(stdout); 375 rte_delay_ms(CHECK_INTERVAL); 376 } 377 378 /* set the print_flag if all ports up or timeout */ 379 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) 380 print_flag = 1; 381 } 382 } 383 384 static void 385 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr) 386 { 387 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 388 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 389 printf("%s%s", name, buf); 390 } 391 392 static void 393 copy_buf_to_pkt_segs(const uint8_t *buf, unsigned int len, 394 struct rte_mbuf *pkt, unsigned int offset) 395 { 396 unsigned int copied = 0; 397 unsigned int copy_len; 398 struct rte_mbuf *seg; 399 void *seg_buf; 400 401 seg = pkt; 402 while (offset >= seg->data_len) { 403 offset -= seg->data_len; 404 seg = seg->next; 405 } 406 copy_len = seg->data_len - offset; 407 seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset); 408 while (len > copy_len) { 409 rte_memcpy(seg_buf, buf + copied, (size_t) copy_len); 410 len -= copy_len; 411 copied += copy_len; 412 seg = seg->next; 413 seg_buf = rte_pktmbuf_mtod(seg, void *); 414 } 415 rte_memcpy(seg_buf, buf + copied, (size_t) len); 416 } 417 418 static bool 419 is_outer_ipv4(struct ipsec_test_data *td) 420 { 421 bool outer_ipv4; 422 423 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS || 424 td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) 425 outer_ipv4 = (((td->input_text.data[0] & 0xF0) >> 4) == IPVERSION); 426 else 427 outer_ipv4 = (td->ipsec_xform.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4); 428 return outer_ipv4; 429 } 430 431 static inline struct rte_mbuf * 432 init_packet(struct rte_mempool *mp, const uint8_t *data, unsigned int len, bool outer_ipv4) 433 { 434 struct rte_mbuf *pkt; 435 436 pkt = rte_pktmbuf_alloc(mp); 437 if (pkt == NULL) 438 return NULL; 439 440 if (outer_ipv4) { 441 rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN), 442 &dummy_ipv4_eth_hdr, RTE_ETHER_HDR_LEN); 443 pkt->l3_len = sizeof(struct rte_ipv4_hdr); 444 } else { 445 rte_memcpy(rte_pktmbuf_append(pkt, RTE_ETHER_HDR_LEN), 446 &dummy_ipv6_eth_hdr, RTE_ETHER_HDR_LEN); 447 pkt->l3_len = sizeof(struct rte_ipv6_hdr); 448 } 449 pkt->l2_len = RTE_ETHER_HDR_LEN; 450 451 if (pkt->buf_len > (len + RTE_ETHER_HDR_LEN)) 452 rte_memcpy(rte_pktmbuf_append(pkt, len), data, len); 453 else 454 copy_buf_to_pkt_segs(data, len, pkt, RTE_ETHER_HDR_LEN); 455 return pkt; 456 } 457 458 static int 459 init_mempools(unsigned int nb_mbuf) 460 { 461 struct rte_security_ctx *sec_ctx; 462 uint16_t nb_sess = 512; 463 uint32_t sess_sz; 464 char s[64]; 465 466 if (mbufpool == NULL) { 467 snprintf(s, sizeof(s), "mbuf_pool"); 468 mbufpool = rte_pktmbuf_pool_create(s, nb_mbuf, 469 MEMPOOL_CACHE_SIZE, 0, 470 RTE_MBUF_DEFAULT_BUF_SIZE, SOCKET_ID_ANY); 471 if (mbufpool == NULL) { 472 printf("Cannot init mbuf pool\n"); 473 return TEST_FAILED; 474 } 475 printf("Allocated mbuf pool\n"); 476 } 477 478 sec_ctx = rte_eth_dev_get_sec_ctx(port_id); 479 if (sec_ctx == NULL) { 480 printf("Device does not support Security ctx\n"); 481 return TEST_SKIPPED; 482 } 483 sess_sz = rte_security_session_get_size(sec_ctx); 484 if (sess_pool == NULL) { 485 snprintf(s, sizeof(s), "sess_pool"); 486 sess_pool = rte_mempool_create(s, nb_sess, sess_sz, 487 MEMPOOL_CACHE_SIZE, 0, 488 NULL, NULL, NULL, NULL, 489 SOCKET_ID_ANY, 0); 490 if (sess_pool == NULL) { 491 printf("Cannot init sess pool\n"); 492 return TEST_FAILED; 493 } 494 printf("Allocated sess pool\n"); 495 } 496 497 return 0; 498 } 499 500 static int 501 create_default_flow(uint16_t portid) 502 { 503 struct rte_flow_action action[2]; 504 struct rte_flow_item pattern[2]; 505 struct rte_flow_attr attr = {0}; 506 struct rte_flow_error err; 507 struct rte_flow *flow; 508 int ret; 509 510 /* Add the default rte_flow to enable SECURITY for all ESP packets */ 511 512 pattern[0].type = RTE_FLOW_ITEM_TYPE_ESP; 513 pattern[0].spec = NULL; 514 pattern[0].mask = NULL; 515 pattern[0].last = NULL; 516 pattern[1].type = RTE_FLOW_ITEM_TYPE_END; 517 518 action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY; 519 action[0].conf = NULL; 520 action[1].type = RTE_FLOW_ACTION_TYPE_END; 521 action[1].conf = NULL; 522 523 attr.ingress = 1; 524 525 ret = rte_flow_validate(portid, &attr, pattern, action, &err); 526 if (ret) { 527 printf("\nValidate flow failed, ret = %d\n", ret); 528 return -1; 529 } 530 flow = rte_flow_create(portid, &attr, pattern, action, &err); 531 if (flow == NULL) { 532 printf("\nDefault flow rule create failed\n"); 533 return -1; 534 } 535 536 default_flow[portid] = flow; 537 538 return 0; 539 } 540 541 static void 542 destroy_default_flow(uint16_t portid) 543 { 544 struct rte_flow_error err; 545 int ret; 546 547 if (!default_flow[portid]) 548 return; 549 ret = rte_flow_destroy(portid, default_flow[portid], &err); 550 if (ret) { 551 printf("\nDefault flow rule destroy failed\n"); 552 return; 553 } 554 default_flow[portid] = NULL; 555 } 556 557 struct rte_mbuf **tx_pkts_burst; 558 struct rte_mbuf **rx_pkts_burst; 559 560 static int 561 compare_pkt_data(struct rte_mbuf *m, uint8_t *ref, unsigned int tot_len) 562 { 563 unsigned int len; 564 unsigned int nb_segs = m->nb_segs; 565 unsigned int matched = 0; 566 struct rte_mbuf *save = m; 567 568 while (m) { 569 len = tot_len; 570 if (len > m->data_len) 571 len = m->data_len; 572 if (len != 0) { 573 if (memcmp(rte_pktmbuf_mtod(m, char *), 574 ref + matched, len)) { 575 printf("\n====Reassembly case failed: Data Mismatch"); 576 rte_hexdump(stdout, "Reassembled", 577 rte_pktmbuf_mtod(m, char *), 578 len); 579 rte_hexdump(stdout, "reference", 580 ref + matched, 581 len); 582 return TEST_FAILED; 583 } 584 } 585 tot_len -= len; 586 matched += len; 587 m = m->next; 588 } 589 590 if (tot_len) { 591 printf("\n====Reassembly case failed: Data Missing %u", 592 tot_len); 593 printf("\n====nb_segs %u, tot_len %u", nb_segs, tot_len); 594 rte_pktmbuf_dump(stderr, save, -1); 595 return TEST_FAILED; 596 } 597 return TEST_SUCCESS; 598 } 599 600 static inline bool 601 is_ip_reassembly_incomplete(struct rte_mbuf *mbuf) 602 { 603 static uint64_t ip_reassembly_dynflag; 604 int ip_reassembly_dynflag_offset; 605 606 if (ip_reassembly_dynflag == 0) { 607 ip_reassembly_dynflag_offset = rte_mbuf_dynflag_lookup( 608 RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME, NULL); 609 if (ip_reassembly_dynflag_offset < 0) 610 return false; 611 ip_reassembly_dynflag = RTE_BIT64(ip_reassembly_dynflag_offset); 612 } 613 614 return (mbuf->ol_flags & ip_reassembly_dynflag) != 0; 615 } 616 617 static void 618 free_mbuf(struct rte_mbuf *mbuf) 619 { 620 rte_eth_ip_reassembly_dynfield_t dynfield; 621 622 if (!mbuf) 623 return; 624 625 if (!is_ip_reassembly_incomplete(mbuf)) { 626 rte_pktmbuf_free(mbuf); 627 } else { 628 if (ip_reassembly_dynfield_offset < 0) 629 return; 630 631 while (mbuf) { 632 dynfield = *RTE_MBUF_DYNFIELD(mbuf, 633 ip_reassembly_dynfield_offset, 634 rte_eth_ip_reassembly_dynfield_t *); 635 rte_pktmbuf_free(mbuf); 636 mbuf = dynfield.next_frag; 637 } 638 } 639 } 640 641 642 static int 643 get_and_verify_incomplete_frags(struct rte_mbuf *mbuf, 644 struct reassembly_vector *vector) 645 { 646 rte_eth_ip_reassembly_dynfield_t *dynfield[MAX_PKT_BURST]; 647 int j = 0, ret; 648 /** 649 * IP reassembly offload is incomplete, and fragments are listed in 650 * dynfield which can be reassembled in SW. 651 */ 652 printf("\nHW IP Reassembly is not complete; attempt SW IP Reassembly," 653 "\nMatching with original frags."); 654 655 if (ip_reassembly_dynfield_offset < 0) 656 return -1; 657 658 printf("\ncomparing frag: %d", j); 659 /* Skip Ethernet header comparison */ 660 rte_pktmbuf_adj(mbuf, RTE_ETHER_HDR_LEN); 661 ret = compare_pkt_data(mbuf, vector->frags[j]->data, 662 vector->frags[j]->len); 663 if (ret) 664 return ret; 665 j++; 666 dynfield[j] = RTE_MBUF_DYNFIELD(mbuf, ip_reassembly_dynfield_offset, 667 rte_eth_ip_reassembly_dynfield_t *); 668 printf("\ncomparing frag: %d", j); 669 /* Skip Ethernet header comparison */ 670 rte_pktmbuf_adj(dynfield[j]->next_frag, RTE_ETHER_HDR_LEN); 671 ret = compare_pkt_data(dynfield[j]->next_frag, vector->frags[j]->data, 672 vector->frags[j]->len); 673 if (ret) 674 return ret; 675 676 while ((dynfield[j]->nb_frags > 1) && 677 is_ip_reassembly_incomplete(dynfield[j]->next_frag)) { 678 j++; 679 dynfield[j] = RTE_MBUF_DYNFIELD(dynfield[j-1]->next_frag, 680 ip_reassembly_dynfield_offset, 681 rte_eth_ip_reassembly_dynfield_t *); 682 printf("\ncomparing frag: %d", j); 683 /* Skip Ethernet header comparison */ 684 rte_pktmbuf_adj(dynfield[j]->next_frag, RTE_ETHER_HDR_LEN); 685 ret = compare_pkt_data(dynfield[j]->next_frag, 686 vector->frags[j]->data, vector->frags[j]->len); 687 if (ret) 688 return ret; 689 } 690 return ret; 691 } 692 693 static int 694 test_ipsec_with_reassembly(struct reassembly_vector *vector, 695 const struct ipsec_test_flags *flags) 696 { 697 void *out_ses[ENCAP_DECAP_BURST_SZ] = {0}; 698 void *in_ses[ENCAP_DECAP_BURST_SZ] = {0}; 699 struct rte_eth_ip_reassembly_params reass_capa = {0}; 700 struct rte_security_session_conf sess_conf_out = {0}; 701 struct rte_security_session_conf sess_conf_in = {0}; 702 unsigned int nb_tx, burst_sz, nb_sent = 0; 703 struct rte_crypto_sym_xform cipher_out = {0}; 704 struct rte_crypto_sym_xform auth_out = {0}; 705 struct rte_crypto_sym_xform aead_out = {0}; 706 struct rte_crypto_sym_xform cipher_in = {0}; 707 struct rte_crypto_sym_xform auth_in = {0}; 708 struct rte_crypto_sym_xform aead_in = {0}; 709 struct ipsec_test_data sa_data; 710 struct rte_security_ctx *ctx; 711 unsigned int i, nb_rx = 0, j; 712 uint32_t ol_flags; 713 bool outer_ipv4; 714 int ret = 0; 715 716 burst_sz = vector->burst ? ENCAP_DECAP_BURST_SZ : 1; 717 nb_tx = vector->nb_frags * burst_sz; 718 719 rte_eth_dev_stop(port_id); 720 if (ret != 0) { 721 printf("rte_eth_dev_stop: err=%s, port=%u\n", 722 rte_strerror(-ret), port_id); 723 return ret; 724 } 725 rte_eth_ip_reassembly_capability_get(port_id, &reass_capa); 726 if (reass_capa.max_frags < vector->nb_frags) 727 return TEST_SKIPPED; 728 if (reass_capa.timeout_ms > APP_REASS_TIMEOUT) { 729 reass_capa.timeout_ms = APP_REASS_TIMEOUT; 730 rte_eth_ip_reassembly_conf_set(port_id, &reass_capa); 731 } 732 733 ret = rte_eth_dev_start(port_id); 734 if (ret < 0) { 735 printf("rte_eth_dev_start: err=%d, port=%d\n", 736 ret, port_id); 737 return ret; 738 } 739 740 memset(tx_pkts_burst, 0, sizeof(tx_pkts_burst[0]) * nb_tx); 741 memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * nb_tx); 742 743 memcpy(&sa_data, vector->sa_data, sizeof(struct ipsec_test_data)); 744 sa_data.ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS; 745 outer_ipv4 = is_outer_ipv4(&sa_data); 746 747 for (i = 0; i < nb_tx; i += vector->nb_frags) { 748 for (j = 0; j < vector->nb_frags; j++) { 749 tx_pkts_burst[i+j] = init_packet(mbufpool, 750 vector->frags[j]->data, 751 vector->frags[j]->len, outer_ipv4); 752 if (tx_pkts_burst[i+j] == NULL) { 753 ret = -1; 754 printf("\n packed init failed\n"); 755 goto out; 756 } 757 } 758 } 759 760 for (i = 0; i < burst_sz; i++) { 761 memcpy(&sa_data, vector->sa_data, 762 sizeof(struct ipsec_test_data)); 763 /* Update SPI for every new SA */ 764 sa_data.ipsec_xform.spi += i; 765 sa_data.ipsec_xform.direction = 766 RTE_SECURITY_IPSEC_SA_DIR_EGRESS; 767 if (sa_data.aead) { 768 sess_conf_out.crypto_xform = &aead_out; 769 } else { 770 sess_conf_out.crypto_xform = &cipher_out; 771 sess_conf_out.crypto_xform->next = &auth_out; 772 } 773 774 /* Create Inline IPsec outbound session. */ 775 ret = create_inline_ipsec_session(&sa_data, port_id, 776 &out_ses[i], &ctx, &ol_flags, flags, 777 &sess_conf_out); 778 if (ret) { 779 printf("\nInline outbound session create failed\n"); 780 goto out; 781 } 782 } 783 784 j = 0; 785 for (i = 0; i < nb_tx; i++) { 786 if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA) 787 rte_security_set_pkt_metadata(ctx, 788 out_ses[j], tx_pkts_burst[i], NULL); 789 tx_pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD; 790 791 /* Move to next SA after nb_frags */ 792 if ((i + 1) % vector->nb_frags == 0) 793 j++; 794 } 795 796 for (i = 0; i < burst_sz; i++) { 797 memcpy(&sa_data, vector->sa_data, 798 sizeof(struct ipsec_test_data)); 799 /* Update SPI for every new SA */ 800 sa_data.ipsec_xform.spi += i; 801 sa_data.ipsec_xform.direction = 802 RTE_SECURITY_IPSEC_SA_DIR_INGRESS; 803 804 if (sa_data.aead) { 805 sess_conf_in.crypto_xform = &aead_in; 806 } else { 807 sess_conf_in.crypto_xform = &auth_in; 808 sess_conf_in.crypto_xform->next = &cipher_in; 809 } 810 /* Create Inline IPsec inbound session. */ 811 ret = create_inline_ipsec_session(&sa_data, port_id, &in_ses[i], 812 &ctx, &ol_flags, flags, &sess_conf_in); 813 if (ret) { 814 printf("\nInline inbound session create failed\n"); 815 goto out; 816 } 817 } 818 819 /* Retrieve reassembly dynfield offset if available */ 820 if (ip_reassembly_dynfield_offset < 0 && vector->nb_frags > 1) 821 ip_reassembly_dynfield_offset = rte_mbuf_dynfield_lookup( 822 RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME, NULL); 823 824 825 ret = create_default_flow(port_id); 826 if (ret) 827 goto out; 828 829 nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_tx); 830 if (nb_sent != nb_tx) { 831 ret = -1; 832 printf("\nFailed to tx %u pkts", nb_tx); 833 goto out; 834 } 835 836 rte_delay_ms(1); 837 838 /* Retry few times before giving up */ 839 nb_rx = 0; 840 j = 0; 841 do { 842 nb_rx += rte_eth_rx_burst(port_id, 0, &rx_pkts_burst[nb_rx], 843 nb_tx - nb_rx); 844 j++; 845 if (nb_rx >= nb_tx) 846 break; 847 rte_delay_ms(1); 848 } while (j < 5 || !nb_rx); 849 850 /* Check for minimum number of Rx packets expected */ 851 if ((vector->nb_frags == 1 && nb_rx != nb_tx) || 852 (vector->nb_frags > 1 && nb_rx < burst_sz)) { 853 printf("\nreceived less Rx pkts(%u) pkts\n", nb_rx); 854 ret = TEST_FAILED; 855 goto out; 856 } 857 858 for (i = 0; i < nb_rx; i++) { 859 if (vector->nb_frags > 1 && 860 is_ip_reassembly_incomplete(rx_pkts_burst[i])) { 861 ret = get_and_verify_incomplete_frags(rx_pkts_burst[i], 862 vector); 863 if (ret != TEST_SUCCESS) 864 break; 865 continue; 866 } 867 868 if (rx_pkts_burst[i]->ol_flags & 869 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED || 870 !(rx_pkts_burst[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD)) { 871 printf("\nsecurity offload failed\n"); 872 ret = TEST_FAILED; 873 break; 874 } 875 876 if (vector->full_pkt->len + RTE_ETHER_HDR_LEN != 877 rx_pkts_burst[i]->pkt_len) { 878 printf("\nreassembled/decrypted packet length mismatch\n"); 879 ret = TEST_FAILED; 880 break; 881 } 882 rte_pktmbuf_adj(rx_pkts_burst[i], RTE_ETHER_HDR_LEN); 883 ret = compare_pkt_data(rx_pkts_burst[i], 884 vector->full_pkt->data, 885 vector->full_pkt->len); 886 if (ret != TEST_SUCCESS) 887 break; 888 } 889 890 out: 891 destroy_default_flow(port_id); 892 893 /* Clear session data. */ 894 for (i = 0; i < burst_sz; i++) { 895 if (out_ses[i]) 896 rte_security_session_destroy(ctx, out_ses[i]); 897 if (in_ses[i]) 898 rte_security_session_destroy(ctx, in_ses[i]); 899 } 900 901 for (i = nb_sent; i < nb_tx; i++) 902 free_mbuf(tx_pkts_burst[i]); 903 for (i = 0; i < nb_rx; i++) 904 free_mbuf(rx_pkts_burst[i]); 905 return ret; 906 } 907 908 static int 909 event_tx_burst(struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 910 { 911 struct rte_event ev; 912 int i, nb_sent = 0; 913 914 /* Convert packets to events */ 915 memset(&ev, 0, sizeof(ev)); 916 ev.sched_type = RTE_SCHED_TYPE_PARALLEL; 917 for (i = 0; i < nb_pkts; i++) { 918 ev.mbuf = tx_pkts[i]; 919 nb_sent += rte_event_eth_tx_adapter_enqueue( 920 eventdev_id, port_id, &ev, 1, 0); 921 } 922 923 return nb_sent; 924 } 925 926 static int 927 event_rx_burst(struct rte_mbuf **rx_pkts, uint16_t nb_pkts_to_rx) 928 { 929 int nb_ev, nb_rx = 0, j = 0; 930 const int ms_per_pkt = 3; 931 struct rte_event ev; 932 933 do { 934 nb_ev = rte_event_dequeue_burst(eventdev_id, port_id, 935 &ev, 1, 0); 936 937 if (nb_ev == 0) { 938 rte_delay_ms(1); 939 continue; 940 } 941 942 /* Get packet from event */ 943 if (ev.event_type != RTE_EVENT_TYPE_ETHDEV) { 944 printf("Unsupported event type: %i\n", 945 ev.event_type); 946 continue; 947 } 948 rx_pkts[nb_rx++] = ev.mbuf; 949 } while (j++ < (nb_pkts_to_rx * ms_per_pkt) && nb_rx < nb_pkts_to_rx); 950 951 return nb_rx; 952 } 953 954 static int 955 test_ipsec_inline_sa_exp_event_callback(uint16_t port_id, 956 enum rte_eth_event_type type, void *param, void *ret_param) 957 { 958 struct sa_expiry_vector *vector = (struct sa_expiry_vector *)param; 959 struct rte_eth_event_ipsec_desc *event_desc = NULL; 960 961 RTE_SET_USED(port_id); 962 963 if (type != RTE_ETH_EVENT_IPSEC) 964 return -1; 965 966 event_desc = ret_param; 967 if (event_desc == NULL) { 968 printf("Event descriptor not set\n"); 969 return -1; 970 } 971 vector->notify_event = true; 972 if (event_desc->metadata != (uint64_t)vector->sa_data) { 973 printf("Mismatch in event specific metadata\n"); 974 return -1; 975 } 976 switch (event_desc->subtype) { 977 case RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY: 978 vector->event = RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY; 979 break; 980 case RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY: 981 vector->event = RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY; 982 break; 983 case RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY: 984 vector->event = RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY; 985 break; 986 case RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY: 987 vector->event = RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY; 988 break; 989 default: 990 printf("Invalid IPsec event reported\n"); 991 return -1; 992 } 993 994 return 0; 995 } 996 997 static enum rte_eth_event_ipsec_subtype 998 test_ipsec_inline_setup_expiry_vector(struct sa_expiry_vector *vector, 999 const struct ipsec_test_flags *flags, 1000 struct ipsec_test_data *tdata) 1001 { 1002 enum rte_eth_event_ipsec_subtype event = RTE_ETH_EVENT_IPSEC_UNKNOWN; 1003 1004 vector->event = RTE_ETH_EVENT_IPSEC_UNKNOWN; 1005 vector->notify_event = false; 1006 vector->sa_data = (void *)tdata; 1007 if (flags->sa_expiry_pkts_soft) 1008 event = RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY; 1009 else if (flags->sa_expiry_bytes_soft) 1010 event = RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY; 1011 else if (flags->sa_expiry_pkts_hard) 1012 event = RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY; 1013 else 1014 event = RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY; 1015 rte_eth_dev_callback_register(port_id, RTE_ETH_EVENT_IPSEC, 1016 test_ipsec_inline_sa_exp_event_callback, vector); 1017 1018 return event; 1019 } 1020 1021 static int 1022 test_ipsec_inline_proto_process(struct ipsec_test_data *td, 1023 struct ipsec_test_data *res_d, 1024 int nb_pkts, 1025 bool silent, 1026 const struct ipsec_test_flags *flags) 1027 { 1028 enum rte_eth_event_ipsec_subtype event = RTE_ETH_EVENT_IPSEC_UNKNOWN; 1029 struct rte_security_session_conf sess_conf = {0}; 1030 struct rte_crypto_sym_xform cipher = {0}; 1031 struct rte_crypto_sym_xform auth = {0}; 1032 struct rte_crypto_sym_xform aead = {0}; 1033 struct sa_expiry_vector vector = {0}; 1034 struct rte_security_ctx *ctx; 1035 int nb_rx = 0, nb_sent; 1036 uint32_t ol_flags; 1037 int i, j = 0, ret; 1038 bool outer_ipv4; 1039 void *ses; 1040 1041 memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * nb_pkts); 1042 1043 if (flags->sa_expiry_pkts_soft || flags->sa_expiry_bytes_soft || 1044 flags->sa_expiry_pkts_hard || flags->sa_expiry_bytes_hard) { 1045 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) 1046 return TEST_SUCCESS; 1047 event = test_ipsec_inline_setup_expiry_vector(&vector, flags, td); 1048 } 1049 1050 if (td->aead) { 1051 sess_conf.crypto_xform = &aead; 1052 } else { 1053 if (td->ipsec_xform.direction == 1054 RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 1055 sess_conf.crypto_xform = &cipher; 1056 sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER; 1057 sess_conf.crypto_xform->next = &auth; 1058 sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH; 1059 } else { 1060 sess_conf.crypto_xform = &auth; 1061 sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH; 1062 sess_conf.crypto_xform->next = &cipher; 1063 sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_CIPHER; 1064 } 1065 } 1066 1067 /* Create Inline IPsec session. */ 1068 ret = create_inline_ipsec_session(td, port_id, &ses, &ctx, 1069 &ol_flags, flags, &sess_conf); 1070 if (ret) 1071 return ret; 1072 1073 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 1074 ret = create_default_flow(port_id); 1075 if (ret) 1076 goto out; 1077 } 1078 outer_ipv4 = is_outer_ipv4(td); 1079 1080 for (i = 0; i < nb_pkts; i++) { 1081 tx_pkts_burst[i] = init_packet(mbufpool, td->input_text.data, 1082 td->input_text.len, outer_ipv4); 1083 if (tx_pkts_burst[i] == NULL) { 1084 while (i--) 1085 rte_pktmbuf_free(tx_pkts_burst[i]); 1086 ret = TEST_FAILED; 1087 goto out; 1088 } 1089 1090 if (test_ipsec_pkt_update(rte_pktmbuf_mtod_offset(tx_pkts_burst[i], 1091 uint8_t *, RTE_ETHER_HDR_LEN), flags)) { 1092 while (i--) 1093 rte_pktmbuf_free(tx_pkts_burst[i]); 1094 ret = TEST_FAILED; 1095 goto out; 1096 } 1097 1098 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 1099 if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA) 1100 rte_security_set_pkt_metadata(ctx, ses, 1101 tx_pkts_burst[i], NULL); 1102 tx_pkts_burst[i]->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD; 1103 } 1104 } 1105 /* Send packet to ethdev for inline IPsec processing. */ 1106 if (event_mode_enabled) 1107 nb_sent = event_tx_burst(tx_pkts_burst, nb_pkts); 1108 else 1109 nb_sent = rte_eth_tx_burst(port_id, 0, tx_pkts_burst, nb_pkts); 1110 1111 if (nb_sent != nb_pkts) { 1112 printf("\nUnable to TX %d packets, sent: %i", nb_pkts, nb_sent); 1113 for ( ; nb_sent < nb_pkts; nb_sent++) 1114 rte_pktmbuf_free(tx_pkts_burst[nb_sent]); 1115 ret = TEST_FAILED; 1116 goto out; 1117 } 1118 1119 rte_pause(); 1120 1121 /* Receive back packet on loopback interface. */ 1122 if (event_mode_enabled) 1123 nb_rx = event_rx_burst(rx_pkts_burst, nb_sent); 1124 else 1125 do { 1126 rte_delay_ms(1); 1127 nb_rx += rte_eth_rx_burst(port_id, 0, 1128 &rx_pkts_burst[nb_rx], 1129 nb_sent - nb_rx); 1130 if (nb_rx >= nb_sent) 1131 break; 1132 } while (j++ < 5 || nb_rx == 0); 1133 1134 if (!flags->sa_expiry_pkts_hard && 1135 !flags->sa_expiry_bytes_hard && 1136 (nb_rx != nb_sent)) { 1137 printf("\nUnable to RX all %d packets, received(%i)", 1138 nb_sent, nb_rx); 1139 while (--nb_rx >= 0) 1140 rte_pktmbuf_free(rx_pkts_burst[nb_rx]); 1141 ret = TEST_FAILED; 1142 goto out; 1143 } 1144 1145 for (i = 0; i < nb_rx; i++) { 1146 rte_pktmbuf_adj(rx_pkts_burst[i], RTE_ETHER_HDR_LEN); 1147 1148 ret = test_ipsec_post_process(rx_pkts_burst[i], td, 1149 res_d, silent, flags); 1150 if (ret != TEST_SUCCESS) { 1151 for ( ; i < nb_rx; i++) 1152 rte_pktmbuf_free(rx_pkts_burst[i]); 1153 goto out; 1154 } 1155 1156 ret = test_ipsec_stats_verify(ctx, ses, flags, 1157 td->ipsec_xform.direction); 1158 if (ret != TEST_SUCCESS) { 1159 for ( ; i < nb_rx; i++) 1160 rte_pktmbuf_free(rx_pkts_burst[i]); 1161 goto out; 1162 } 1163 1164 rte_pktmbuf_free(rx_pkts_burst[i]); 1165 rx_pkts_burst[i] = NULL; 1166 } 1167 1168 out: 1169 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) 1170 destroy_default_flow(port_id); 1171 if (flags->sa_expiry_pkts_soft || flags->sa_expiry_bytes_soft || 1172 flags->sa_expiry_pkts_hard || flags->sa_expiry_bytes_hard) { 1173 if (vector.notify_event && (vector.event == event)) 1174 ret = TEST_SUCCESS; 1175 else 1176 ret = TEST_FAILED; 1177 1178 rte_eth_dev_callback_unregister(port_id, RTE_ETH_EVENT_IPSEC, 1179 test_ipsec_inline_sa_exp_event_callback, &vector); 1180 } 1181 1182 /* Destroy session so that other cases can create the session again */ 1183 rte_security_session_destroy(ctx, ses); 1184 ses = NULL; 1185 1186 return ret; 1187 } 1188 1189 static int 1190 test_ipsec_inline_proto_all(const struct ipsec_test_flags *flags) 1191 { 1192 struct ipsec_test_data td_outb; 1193 struct ipsec_test_data td_inb; 1194 unsigned int i, nb_pkts = 1, pass_cnt = 0, fail_cnt = 0; 1195 int ret; 1196 1197 if (flags->iv_gen || flags->sa_expiry_pkts_soft || 1198 flags->sa_expiry_bytes_soft || 1199 flags->sa_expiry_bytes_hard || 1200 flags->sa_expiry_pkts_hard) 1201 nb_pkts = IPSEC_TEST_PACKETS_MAX; 1202 1203 for (i = 0; i < RTE_DIM(alg_list); i++) { 1204 test_ipsec_td_prepare(alg_list[i].param1, 1205 alg_list[i].param2, 1206 flags, &td_outb, 1); 1207 1208 if (!td_outb.aead) { 1209 enum rte_crypto_cipher_algorithm cipher_alg; 1210 enum rte_crypto_auth_algorithm auth_alg; 1211 1212 cipher_alg = td_outb.xform.chain.cipher.cipher.algo; 1213 auth_alg = td_outb.xform.chain.auth.auth.algo; 1214 1215 if (td_outb.aes_gmac && cipher_alg != RTE_CRYPTO_CIPHER_NULL) 1216 continue; 1217 1218 /* ICV is not applicable for NULL auth */ 1219 if (flags->icv_corrupt && 1220 auth_alg == RTE_CRYPTO_AUTH_NULL) 1221 continue; 1222 1223 /* IV is not applicable for NULL cipher */ 1224 if (flags->iv_gen && 1225 cipher_alg == RTE_CRYPTO_CIPHER_NULL) 1226 continue; 1227 } 1228 1229 if (flags->udp_encap) 1230 td_outb.ipsec_xform.options.udp_encap = 1; 1231 1232 if (flags->sa_expiry_bytes_soft) 1233 td_outb.ipsec_xform.life.bytes_soft_limit = 1234 (((td_outb.output_text.len + RTE_ETHER_HDR_LEN) 1235 * nb_pkts) >> 3) - 1; 1236 if (flags->sa_expiry_pkts_hard) 1237 td_outb.ipsec_xform.life.packets_hard_limit = 1238 IPSEC_TEST_PACKETS_MAX - 1; 1239 if (flags->sa_expiry_bytes_hard) 1240 td_outb.ipsec_xform.life.bytes_hard_limit = 1241 (((td_outb.output_text.len + RTE_ETHER_HDR_LEN) 1242 * nb_pkts) >> 3) - 1; 1243 1244 ret = test_ipsec_inline_proto_process(&td_outb, &td_inb, nb_pkts, 1245 false, flags); 1246 if (ret == TEST_SKIPPED) 1247 continue; 1248 1249 if (ret == TEST_FAILED) { 1250 printf("\n TEST FAILED"); 1251 test_ipsec_display_alg(alg_list[i].param1, 1252 alg_list[i].param2); 1253 fail_cnt++; 1254 continue; 1255 } 1256 1257 test_ipsec_td_update(&td_inb, &td_outb, 1, flags); 1258 1259 ret = test_ipsec_inline_proto_process(&td_inb, NULL, nb_pkts, 1260 false, flags); 1261 if (ret == TEST_SKIPPED) 1262 continue; 1263 1264 if (ret == TEST_FAILED) { 1265 printf("\n TEST FAILED"); 1266 test_ipsec_display_alg(alg_list[i].param1, 1267 alg_list[i].param2); 1268 fail_cnt++; 1269 continue; 1270 } 1271 1272 if (flags->display_alg) 1273 test_ipsec_display_alg(alg_list[i].param1, 1274 alg_list[i].param2); 1275 1276 pass_cnt++; 1277 } 1278 1279 printf("Tests passed: %d, failed: %d", pass_cnt, fail_cnt); 1280 if (fail_cnt > 0) 1281 return TEST_FAILED; 1282 if (pass_cnt > 0) 1283 return TEST_SUCCESS; 1284 else 1285 return TEST_SKIPPED; 1286 } 1287 1288 static int 1289 test_ipsec_inline_proto_process_with_esn(struct ipsec_test_data td[], 1290 struct ipsec_test_data res_d[], 1291 int nb_pkts, 1292 bool silent, 1293 const struct ipsec_test_flags *flags) 1294 { 1295 struct rte_security_session_conf sess_conf = {0}; 1296 struct ipsec_test_data *res_d_tmp = NULL; 1297 struct rte_crypto_sym_xform cipher = {0}; 1298 struct rte_crypto_sym_xform auth = {0}; 1299 struct rte_crypto_sym_xform aead = {0}; 1300 struct rte_mbuf *rx_pkt = NULL; 1301 struct rte_mbuf *tx_pkt = NULL; 1302 int nb_rx, nb_sent; 1303 void *ses; 1304 struct rte_security_ctx *ctx; 1305 uint32_t ol_flags; 1306 bool outer_ipv4; 1307 int i, ret; 1308 1309 if (td[0].aead) { 1310 sess_conf.crypto_xform = &aead; 1311 } else { 1312 if (td[0].ipsec_xform.direction == 1313 RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 1314 sess_conf.crypto_xform = &cipher; 1315 sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER; 1316 sess_conf.crypto_xform->next = &auth; 1317 sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH; 1318 } else { 1319 sess_conf.crypto_xform = &auth; 1320 sess_conf.crypto_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH; 1321 sess_conf.crypto_xform->next = &cipher; 1322 sess_conf.crypto_xform->next->type = RTE_CRYPTO_SYM_XFORM_CIPHER; 1323 } 1324 } 1325 1326 /* Create Inline IPsec session. */ 1327 ret = create_inline_ipsec_session(&td[0], port_id, &ses, &ctx, 1328 &ol_flags, flags, &sess_conf); 1329 if (ret) 1330 return ret; 1331 1332 if (td[0].ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { 1333 ret = create_default_flow(port_id); 1334 if (ret) 1335 goto out; 1336 } 1337 outer_ipv4 = is_outer_ipv4(td); 1338 1339 for (i = 0; i < nb_pkts; i++) { 1340 tx_pkt = init_packet(mbufpool, td[i].input_text.data, 1341 td[i].input_text.len, outer_ipv4); 1342 if (tx_pkt == NULL) { 1343 ret = TEST_FAILED; 1344 goto out; 1345 } 1346 1347 if (test_ipsec_pkt_update(rte_pktmbuf_mtod_offset(tx_pkt, 1348 uint8_t *, RTE_ETHER_HDR_LEN), flags)) { 1349 ret = TEST_FAILED; 1350 goto out; 1351 } 1352 1353 if (td[i].ipsec_xform.direction == 1354 RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { 1355 if (flags->antireplay) { 1356 sess_conf.ipsec.esn.value = 1357 td[i].ipsec_xform.esn.value; 1358 ret = rte_security_session_update(ctx, ses, 1359 &sess_conf); 1360 if (ret) { 1361 printf("Could not update ESN in session\n"); 1362 rte_pktmbuf_free(tx_pkt); 1363 ret = TEST_SKIPPED; 1364 goto out; 1365 } 1366 } 1367 if (ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA) 1368 rte_security_set_pkt_metadata(ctx, ses, 1369 tx_pkt, NULL); 1370 tx_pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD; 1371 } 1372 /* Send packet to ethdev for inline IPsec processing. */ 1373 nb_sent = rte_eth_tx_burst(port_id, 0, &tx_pkt, 1); 1374 if (nb_sent != 1) { 1375 printf("\nUnable to TX packets"); 1376 rte_pktmbuf_free(tx_pkt); 1377 ret = TEST_FAILED; 1378 goto out; 1379 } 1380 1381 rte_pause(); 1382 1383 /* Receive back packet on loopback interface. */ 1384 do { 1385 rte_delay_ms(1); 1386 nb_rx = rte_eth_rx_burst(port_id, 0, &rx_pkt, 1); 1387 } while (nb_rx == 0); 1388 1389 rte_pktmbuf_adj(rx_pkt, RTE_ETHER_HDR_LEN); 1390 1391 if (res_d != NULL) 1392 res_d_tmp = &res_d[i]; 1393 1394 ret = test_ipsec_post_process(rx_pkt, &td[i], 1395 res_d_tmp, silent, flags); 1396 if (ret != TEST_SUCCESS) { 1397 rte_pktmbuf_free(rx_pkt); 1398 goto out; 1399 } 1400 1401 ret = test_ipsec_stats_verify(ctx, ses, flags, 1402 td->ipsec_xform.direction); 1403 if (ret != TEST_SUCCESS) { 1404 rte_pktmbuf_free(rx_pkt); 1405 goto out; 1406 } 1407 1408 rte_pktmbuf_free(rx_pkt); 1409 rx_pkt = NULL; 1410 tx_pkt = NULL; 1411 } 1412 1413 out: 1414 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) 1415 destroy_default_flow(port_id); 1416 1417 /* Destroy session so that other cases can create the session again */ 1418 rte_security_session_destroy(ctx, ses); 1419 ses = NULL; 1420 1421 return ret; 1422 } 1423 1424 static int 1425 ut_setup_inline_ipsec(void) 1426 { 1427 int ret; 1428 1429 /* Start device */ 1430 ret = rte_eth_dev_start(port_id); 1431 if (ret < 0) { 1432 printf("rte_eth_dev_start: err=%d, port=%d\n", 1433 ret, port_id); 1434 return ret; 1435 } 1436 /* always enable promiscuous */ 1437 ret = rte_eth_promiscuous_enable(port_id); 1438 if (ret != 0) { 1439 printf("rte_eth_promiscuous_enable: err=%s, port=%d\n", 1440 rte_strerror(-ret), port_id); 1441 return ret; 1442 } 1443 1444 check_all_ports_link_status(1, RTE_PORT_ALL); 1445 1446 return 0; 1447 } 1448 1449 static void 1450 ut_teardown_inline_ipsec(void) 1451 { 1452 struct rte_eth_ip_reassembly_params reass_conf = {0}; 1453 uint16_t portid; 1454 int ret; 1455 1456 /* port tear down */ 1457 RTE_ETH_FOREACH_DEV(portid) { 1458 ret = rte_eth_dev_stop(portid); 1459 if (ret != 0) 1460 printf("rte_eth_dev_stop: err=%s, port=%u\n", 1461 rte_strerror(-ret), portid); 1462 1463 /* Clear reassembly configuration */ 1464 rte_eth_ip_reassembly_conf_set(portid, &reass_conf); 1465 } 1466 } 1467 1468 static int 1469 inline_ipsec_testsuite_setup(void) 1470 { 1471 uint16_t nb_rxd; 1472 uint16_t nb_txd; 1473 uint16_t nb_ports; 1474 int ret; 1475 uint16_t nb_rx_queue = 1, nb_tx_queue = 1; 1476 1477 printf("Start inline IPsec test.\n"); 1478 1479 nb_ports = rte_eth_dev_count_avail(); 1480 if (nb_ports < NB_ETHPORTS_USED) { 1481 printf("At least %u port(s) used for test\n", 1482 NB_ETHPORTS_USED); 1483 return TEST_SKIPPED; 1484 } 1485 1486 ret = init_mempools(NB_MBUF); 1487 if (ret) 1488 return ret; 1489 1490 if (tx_pkts_burst == NULL) { 1491 tx_pkts_burst = (struct rte_mbuf **)rte_calloc("tx_buff", 1492 MAX_TRAFFIC_BURST, 1493 sizeof(void *), 1494 RTE_CACHE_LINE_SIZE); 1495 if (!tx_pkts_burst) 1496 return TEST_FAILED; 1497 1498 rx_pkts_burst = (struct rte_mbuf **)rte_calloc("rx_buff", 1499 MAX_TRAFFIC_BURST, 1500 sizeof(void *), 1501 RTE_CACHE_LINE_SIZE); 1502 if (!rx_pkts_burst) 1503 return TEST_FAILED; 1504 } 1505 1506 printf("Generate %d packets\n", MAX_TRAFFIC_BURST); 1507 1508 nb_rxd = RX_DESC_DEFAULT; 1509 nb_txd = TX_DESC_DEFAULT; 1510 1511 /* configuring port 0 for the test is enough */ 1512 port_id = 0; 1513 /* port configure */ 1514 ret = rte_eth_dev_configure(port_id, nb_rx_queue, 1515 nb_tx_queue, &port_conf); 1516 if (ret < 0) { 1517 printf("Cannot configure device: err=%d, port=%d\n", 1518 ret, port_id); 1519 return ret; 1520 } 1521 ret = rte_eth_macaddr_get(port_id, &ports_eth_addr[port_id]); 1522 if (ret < 0) { 1523 printf("Cannot get mac address: err=%d, port=%d\n", 1524 ret, port_id); 1525 return ret; 1526 } 1527 printf("Port %u ", port_id); 1528 print_ethaddr("Address:", &ports_eth_addr[port_id]); 1529 printf("\n"); 1530 1531 /* tx queue setup */ 1532 ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd, 1533 SOCKET_ID_ANY, &tx_conf); 1534 if (ret < 0) { 1535 printf("rte_eth_tx_queue_setup: err=%d, port=%d\n", 1536 ret, port_id); 1537 return ret; 1538 } 1539 /* rx queue steup */ 1540 ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, SOCKET_ID_ANY, 1541 &rx_conf, mbufpool); 1542 if (ret < 0) { 1543 printf("rte_eth_rx_queue_setup: err=%d, port=%d\n", 1544 ret, port_id); 1545 return ret; 1546 } 1547 test_ipsec_alg_list_populate(); 1548 1549 return 0; 1550 } 1551 1552 static void 1553 inline_ipsec_testsuite_teardown(void) 1554 { 1555 uint16_t portid; 1556 int ret; 1557 1558 /* port tear down */ 1559 RTE_ETH_FOREACH_DEV(portid) { 1560 ret = rte_eth_dev_reset(portid); 1561 if (ret != 0) 1562 printf("rte_eth_dev_reset: err=%s, port=%u\n", 1563 rte_strerror(-ret), port_id); 1564 } 1565 rte_free(tx_pkts_burst); 1566 rte_free(rx_pkts_burst); 1567 } 1568 1569 static int 1570 event_inline_ipsec_testsuite_setup(void) 1571 { 1572 struct rte_event_eth_rx_adapter_queue_conf queue_conf = {0}; 1573 struct rte_event_dev_info evdev_default_conf = {0}; 1574 struct rte_event_dev_config eventdev_conf = {0}; 1575 struct rte_event_queue_conf eventq_conf = {0}; 1576 struct rte_event_port_conf ev_port_conf = {0}; 1577 const uint16_t nb_txd = 1024, nb_rxd = 1024; 1578 uint16_t nb_rx_queue = 1, nb_tx_queue = 1; 1579 uint8_t ev_queue_id = 0, tx_queue_id = 0; 1580 int nb_eventqueue = 1, nb_eventport = 1; 1581 const int all_queues = -1; 1582 uint32_t caps = 0; 1583 uint16_t nb_ports; 1584 int ret; 1585 1586 printf("Start event inline IPsec test.\n"); 1587 1588 nb_ports = rte_eth_dev_count_avail(); 1589 if (nb_ports == 0) { 1590 printf("Test require: 1 port, available: 0\n"); 1591 return TEST_SKIPPED; 1592 } 1593 1594 init_mempools(NB_MBUF); 1595 1596 if (tx_pkts_burst == NULL) { 1597 tx_pkts_burst = (struct rte_mbuf **)rte_calloc("tx_buff", 1598 MAX_TRAFFIC_BURST, 1599 sizeof(void *), 1600 RTE_CACHE_LINE_SIZE); 1601 if (!tx_pkts_burst) 1602 return -1; 1603 1604 rx_pkts_burst = (struct rte_mbuf **)rte_calloc("rx_buff", 1605 MAX_TRAFFIC_BURST, 1606 sizeof(void *), 1607 RTE_CACHE_LINE_SIZE); 1608 if (!rx_pkts_burst) 1609 return -1; 1610 1611 } 1612 1613 printf("Generate %d packets\n", MAX_TRAFFIC_BURST); 1614 1615 /* configuring port 0 for the test is enough */ 1616 port_id = 0; 1617 /* port configure */ 1618 ret = rte_eth_dev_configure(port_id, nb_rx_queue, 1619 nb_tx_queue, &port_conf); 1620 if (ret < 0) { 1621 printf("Cannot configure device: err=%d, port=%d\n", 1622 ret, port_id); 1623 return ret; 1624 } 1625 1626 /* Tx queue setup */ 1627 ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd, 1628 SOCKET_ID_ANY, &tx_conf); 1629 if (ret < 0) { 1630 printf("rte_eth_tx_queue_setup: err=%d, port=%d\n", 1631 ret, port_id); 1632 return ret; 1633 } 1634 1635 /* rx queue steup */ 1636 ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, SOCKET_ID_ANY, 1637 &rx_conf, mbufpool); 1638 if (ret < 0) { 1639 printf("rte_eth_rx_queue_setup: err=%d, port=%d\n", 1640 ret, port_id); 1641 return ret; 1642 } 1643 1644 /* Setup eventdev */ 1645 eventdev_id = 0; 1646 rx_adapter_id = 0; 1647 tx_adapter_id = 0; 1648 1649 /* Get default conf of eventdev */ 1650 ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf); 1651 if (ret < 0) { 1652 printf("Error in getting event device info[devID:%d]\n", 1653 eventdev_id); 1654 return ret; 1655 } 1656 1657 /* Get Tx adapter capabilities */ 1658 ret = rte_event_eth_tx_adapter_caps_get(eventdev_id, tx_adapter_id, &caps); 1659 if (ret < 0) { 1660 printf("Failed to get event device %d eth tx adapter" 1661 " capabilities for port %d\n", 1662 eventdev_id, port_id); 1663 return ret; 1664 } 1665 if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) 1666 tx_queue_id = nb_eventqueue++; 1667 1668 eventdev_conf.nb_events_limit = 1669 evdev_default_conf.max_num_events; 1670 eventdev_conf.nb_event_queue_flows = 1671 evdev_default_conf.max_event_queue_flows; 1672 eventdev_conf.nb_event_port_dequeue_depth = 1673 evdev_default_conf.max_event_port_dequeue_depth; 1674 eventdev_conf.nb_event_port_enqueue_depth = 1675 evdev_default_conf.max_event_port_enqueue_depth; 1676 1677 eventdev_conf.nb_event_queues = nb_eventqueue; 1678 eventdev_conf.nb_event_ports = nb_eventport; 1679 1680 /* Configure event device */ 1681 1682 ret = rte_event_dev_configure(eventdev_id, &eventdev_conf); 1683 if (ret < 0) { 1684 printf("Error in configuring event device\n"); 1685 return ret; 1686 } 1687 1688 /* Configure event queue */ 1689 eventq_conf.schedule_type = RTE_SCHED_TYPE_PARALLEL; 1690 eventq_conf.nb_atomic_flows = 1024; 1691 eventq_conf.nb_atomic_order_sequences = 1024; 1692 1693 /* Setup the queue */ 1694 ret = rte_event_queue_setup(eventdev_id, ev_queue_id, &eventq_conf); 1695 if (ret < 0) { 1696 printf("Failed to setup event queue %d\n", ret); 1697 return ret; 1698 } 1699 1700 /* Configure event port */ 1701 ret = rte_event_port_setup(eventdev_id, port_id, NULL); 1702 if (ret < 0) { 1703 printf("Failed to setup event port %d\n", ret); 1704 return ret; 1705 } 1706 1707 /* Make event queue - event port link */ 1708 ret = rte_event_port_link(eventdev_id, port_id, NULL, NULL, 1); 1709 if (ret < 0) { 1710 printf("Failed to link event port %d\n", ret); 1711 return ret; 1712 } 1713 1714 /* Setup port conf */ 1715 ev_port_conf.new_event_threshold = 1200; 1716 ev_port_conf.dequeue_depth = 1717 evdev_default_conf.max_event_port_dequeue_depth; 1718 ev_port_conf.enqueue_depth = 1719 evdev_default_conf.max_event_port_enqueue_depth; 1720 1721 /* Create Rx adapter */ 1722 ret = rte_event_eth_rx_adapter_create(rx_adapter_id, eventdev_id, 1723 &ev_port_conf); 1724 if (ret < 0) { 1725 printf("Failed to create rx adapter %d\n", ret); 1726 return ret; 1727 } 1728 1729 /* Setup queue conf */ 1730 queue_conf.ev.queue_id = ev_queue_id; 1731 queue_conf.ev.sched_type = RTE_SCHED_TYPE_PARALLEL; 1732 queue_conf.ev.event_type = RTE_EVENT_TYPE_ETHDEV; 1733 1734 /* Add queue to the adapter */ 1735 ret = rte_event_eth_rx_adapter_queue_add(rx_adapter_id, port_id, 1736 all_queues, &queue_conf); 1737 if (ret < 0) { 1738 printf("Failed to add eth queue to rx adapter %d\n", ret); 1739 return ret; 1740 } 1741 1742 /* Start rx adapter */ 1743 ret = rte_event_eth_rx_adapter_start(rx_adapter_id); 1744 if (ret < 0) { 1745 printf("Failed to start rx adapter %d\n", ret); 1746 return ret; 1747 } 1748 1749 /* Create tx adapter */ 1750 ret = rte_event_eth_tx_adapter_create(tx_adapter_id, eventdev_id, 1751 &ev_port_conf); 1752 if (ret < 0) { 1753 printf("Failed to create tx adapter %d\n", ret); 1754 return ret; 1755 } 1756 1757 /* Add queue to the adapter */ 1758 ret = rte_event_eth_tx_adapter_queue_add(tx_adapter_id, port_id, 1759 all_queues); 1760 if (ret < 0) { 1761 printf("Failed to add eth queue to tx adapter %d\n", ret); 1762 return ret; 1763 } 1764 /* Setup Tx queue & port */ 1765 if (tx_queue_id) { 1766 /* Setup the queue */ 1767 ret = rte_event_queue_setup(eventdev_id, tx_queue_id, 1768 &eventq_conf); 1769 if (ret < 0) { 1770 printf("Failed to setup tx event queue %d\n", ret); 1771 return ret; 1772 } 1773 /* Link Tx event queue to Tx port */ 1774 ret = rte_event_port_link(eventdev_id, port_id, 1775 &tx_queue_id, NULL, 1); 1776 if (ret != 1) { 1777 printf("Failed to link event queue to port\n"); 1778 return ret; 1779 } 1780 } 1781 1782 /* Start tx adapter */ 1783 ret = rte_event_eth_tx_adapter_start(tx_adapter_id); 1784 if (ret < 0) { 1785 printf("Failed to start tx adapter %d\n", ret); 1786 return ret; 1787 } 1788 1789 /* Start eventdev */ 1790 ret = rte_event_dev_start(eventdev_id); 1791 if (ret < 0) { 1792 printf("Failed to start event device %d\n", ret); 1793 return ret; 1794 } 1795 1796 event_mode_enabled = true; 1797 test_ipsec_alg_list_populate(); 1798 1799 return 0; 1800 } 1801 1802 static void 1803 event_inline_ipsec_testsuite_teardown(void) 1804 { 1805 uint16_t portid; 1806 int ret; 1807 1808 event_mode_enabled = false; 1809 1810 /* Stop and release rx adapter */ 1811 ret = rte_event_eth_rx_adapter_stop(rx_adapter_id); 1812 if (ret < 0) 1813 printf("Failed to stop rx adapter %d\n", ret); 1814 ret = rte_event_eth_rx_adapter_queue_del(rx_adapter_id, port_id, -1); 1815 if (ret < 0) 1816 printf("Failed to remove rx adapter queues %d\n", ret); 1817 ret = rte_event_eth_rx_adapter_free(rx_adapter_id); 1818 if (ret < 0) 1819 printf("Failed to free rx adapter %d\n", ret); 1820 1821 /* Stop and release tx adapter */ 1822 ret = rte_event_eth_tx_adapter_stop(tx_adapter_id); 1823 if (ret < 0) 1824 printf("Failed to stop tx adapter %d\n", ret); 1825 ret = rte_event_eth_tx_adapter_queue_del(tx_adapter_id, port_id, -1); 1826 if (ret < 0) 1827 printf("Failed to remove tx adapter queues %d\n", ret); 1828 ret = rte_event_eth_tx_adapter_free(tx_adapter_id); 1829 if (ret < 0) 1830 printf("Failed to free tx adapter %d\n", ret); 1831 1832 /* Stop and release event devices */ 1833 rte_event_dev_stop(eventdev_id); 1834 ret = rte_event_dev_close(eventdev_id); 1835 if (ret < 0) 1836 printf("Failed to close event dev %d, %d\n", eventdev_id, ret); 1837 1838 /* port tear down */ 1839 RTE_ETH_FOREACH_DEV(portid) { 1840 ret = rte_eth_dev_reset(portid); 1841 if (ret != 0) 1842 printf("rte_eth_dev_reset: err=%s, port=%u\n", 1843 rte_strerror(-ret), port_id); 1844 } 1845 1846 rte_free(tx_pkts_burst); 1847 rte_free(rx_pkts_burst); 1848 } 1849 1850 static int 1851 test_inline_ip_reassembly(const void *testdata) 1852 { 1853 struct reassembly_vector reassembly_td = {0}; 1854 const struct reassembly_vector *td = testdata; 1855 struct ip_reassembly_test_packet full_pkt; 1856 struct ip_reassembly_test_packet frags[MAX_FRAGS]; 1857 struct ipsec_test_flags flags = {0}; 1858 int i = 0; 1859 1860 reassembly_td.sa_data = td->sa_data; 1861 reassembly_td.nb_frags = td->nb_frags; 1862 reassembly_td.burst = td->burst; 1863 1864 memcpy(&full_pkt, td->full_pkt, 1865 sizeof(struct ip_reassembly_test_packet)); 1866 reassembly_td.full_pkt = &full_pkt; 1867 1868 test_vector_payload_populate(reassembly_td.full_pkt, true); 1869 for (; i < reassembly_td.nb_frags; i++) { 1870 memcpy(&frags[i], td->frags[i], 1871 sizeof(struct ip_reassembly_test_packet)); 1872 reassembly_td.frags[i] = &frags[i]; 1873 test_vector_payload_populate(reassembly_td.frags[i], 1874 (i == 0) ? true : false); 1875 } 1876 1877 return test_ipsec_with_reassembly(&reassembly_td, &flags); 1878 } 1879 1880 static int 1881 test_ipsec_inline_proto_known_vec(const void *test_data) 1882 { 1883 struct ipsec_test_data td_outb; 1884 struct ipsec_test_flags flags; 1885 1886 memset(&flags, 0, sizeof(flags)); 1887 1888 memcpy(&td_outb, test_data, sizeof(td_outb)); 1889 1890 if (td_outb.aead || 1891 td_outb.xform.chain.cipher.cipher.algo != RTE_CRYPTO_CIPHER_NULL) { 1892 /* Disable IV gen to be able to test with known vectors */ 1893 td_outb.ipsec_xform.options.iv_gen_disable = 1; 1894 } 1895 1896 return test_ipsec_inline_proto_process(&td_outb, NULL, 1, 1897 false, &flags); 1898 } 1899 1900 static int 1901 test_ipsec_inline_proto_known_vec_inb(const void *test_data) 1902 { 1903 const struct ipsec_test_data *td = test_data; 1904 struct ipsec_test_flags flags; 1905 struct ipsec_test_data td_inb; 1906 1907 memset(&flags, 0, sizeof(flags)); 1908 1909 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) 1910 test_ipsec_td_in_from_out(td, &td_inb); 1911 else 1912 memcpy(&td_inb, td, sizeof(td_inb)); 1913 1914 return test_ipsec_inline_proto_process(&td_inb, NULL, 1, false, &flags); 1915 } 1916 1917 static int 1918 test_ipsec_inline_proto_display_list(const void *data __rte_unused) 1919 { 1920 struct ipsec_test_flags flags; 1921 1922 memset(&flags, 0, sizeof(flags)); 1923 1924 flags.display_alg = true; 1925 1926 return test_ipsec_inline_proto_all(&flags); 1927 } 1928 1929 static int 1930 test_ipsec_inline_proto_udp_encap(const void *data __rte_unused) 1931 { 1932 struct ipsec_test_flags flags; 1933 1934 memset(&flags, 0, sizeof(flags)); 1935 1936 flags.udp_encap = true; 1937 1938 return test_ipsec_inline_proto_all(&flags); 1939 } 1940 1941 static int 1942 test_ipsec_inline_proto_udp_ports_verify(const void *data __rte_unused) 1943 { 1944 struct ipsec_test_flags flags; 1945 1946 memset(&flags, 0, sizeof(flags)); 1947 1948 flags.udp_encap = true; 1949 flags.udp_ports_verify = true; 1950 1951 return test_ipsec_inline_proto_all(&flags); 1952 } 1953 1954 static int 1955 test_ipsec_inline_proto_err_icv_corrupt(const void *data __rte_unused) 1956 { 1957 struct ipsec_test_flags flags; 1958 1959 memset(&flags, 0, sizeof(flags)); 1960 1961 flags.icv_corrupt = true; 1962 1963 return test_ipsec_inline_proto_all(&flags); 1964 } 1965 1966 static int 1967 test_ipsec_inline_proto_tunnel_dst_addr_verify(const void *data __rte_unused) 1968 { 1969 struct ipsec_test_flags flags; 1970 1971 memset(&flags, 0, sizeof(flags)); 1972 1973 flags.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR; 1974 1975 return test_ipsec_inline_proto_all(&flags); 1976 } 1977 1978 static int 1979 test_ipsec_inline_proto_tunnel_src_dst_addr_verify(const void *data __rte_unused) 1980 { 1981 struct ipsec_test_flags flags; 1982 1983 memset(&flags, 0, sizeof(flags)); 1984 1985 flags.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR; 1986 1987 return test_ipsec_inline_proto_all(&flags); 1988 } 1989 1990 static int 1991 test_ipsec_inline_proto_inner_ip_csum(const void *data __rte_unused) 1992 { 1993 struct ipsec_test_flags flags; 1994 1995 memset(&flags, 0, sizeof(flags)); 1996 1997 flags.ip_csum = true; 1998 1999 return test_ipsec_inline_proto_all(&flags); 2000 } 2001 2002 static int 2003 test_ipsec_inline_proto_inner_l4_csum(const void *data __rte_unused) 2004 { 2005 struct ipsec_test_flags flags; 2006 2007 memset(&flags, 0, sizeof(flags)); 2008 2009 flags.l4_csum = true; 2010 2011 return test_ipsec_inline_proto_all(&flags); 2012 } 2013 2014 static int 2015 test_ipsec_inline_proto_tunnel_v4_in_v4(const void *data __rte_unused) 2016 { 2017 struct ipsec_test_flags flags; 2018 2019 memset(&flags, 0, sizeof(flags)); 2020 2021 flags.ipv6 = false; 2022 flags.tunnel_ipv6 = false; 2023 2024 return test_ipsec_inline_proto_all(&flags); 2025 } 2026 2027 static int 2028 test_ipsec_inline_proto_tunnel_v6_in_v6(const void *data __rte_unused) 2029 { 2030 struct ipsec_test_flags flags; 2031 2032 memset(&flags, 0, sizeof(flags)); 2033 2034 flags.ipv6 = true; 2035 flags.tunnel_ipv6 = true; 2036 2037 return test_ipsec_inline_proto_all(&flags); 2038 } 2039 2040 static int 2041 test_ipsec_inline_proto_tunnel_v4_in_v6(const void *data __rte_unused) 2042 { 2043 struct ipsec_test_flags flags; 2044 2045 memset(&flags, 0, sizeof(flags)); 2046 2047 flags.ipv6 = false; 2048 flags.tunnel_ipv6 = true; 2049 2050 return test_ipsec_inline_proto_all(&flags); 2051 } 2052 2053 static int 2054 test_ipsec_inline_proto_tunnel_v6_in_v4(const void *data __rte_unused) 2055 { 2056 struct ipsec_test_flags flags; 2057 2058 memset(&flags, 0, sizeof(flags)); 2059 2060 flags.ipv6 = true; 2061 flags.tunnel_ipv6 = false; 2062 2063 return test_ipsec_inline_proto_all(&flags); 2064 } 2065 2066 static int 2067 test_ipsec_inline_proto_transport_v4(const void *data __rte_unused) 2068 { 2069 struct ipsec_test_flags flags; 2070 2071 memset(&flags, 0, sizeof(flags)); 2072 2073 flags.ipv6 = false; 2074 flags.transport = true; 2075 2076 return test_ipsec_inline_proto_all(&flags); 2077 } 2078 2079 static int 2080 test_ipsec_inline_proto_transport_l4_csum(const void *data __rte_unused) 2081 { 2082 struct ipsec_test_flags flags = { 2083 .l4_csum = true, 2084 .transport = true, 2085 }; 2086 2087 return test_ipsec_inline_proto_all(&flags); 2088 } 2089 2090 static int 2091 test_ipsec_inline_proto_stats(const void *data __rte_unused) 2092 { 2093 struct ipsec_test_flags flags; 2094 2095 memset(&flags, 0, sizeof(flags)); 2096 2097 flags.stats_success = true; 2098 2099 return test_ipsec_inline_proto_all(&flags); 2100 } 2101 2102 static int 2103 test_ipsec_inline_proto_pkt_fragment(const void *data __rte_unused) 2104 { 2105 struct ipsec_test_flags flags; 2106 2107 memset(&flags, 0, sizeof(flags)); 2108 2109 flags.fragment = true; 2110 2111 return test_ipsec_inline_proto_all(&flags); 2112 2113 } 2114 2115 static int 2116 test_ipsec_inline_proto_copy_df_inner_0(const void *data __rte_unused) 2117 { 2118 struct ipsec_test_flags flags; 2119 2120 memset(&flags, 0, sizeof(flags)); 2121 2122 flags.df = TEST_IPSEC_COPY_DF_INNER_0; 2123 2124 return test_ipsec_inline_proto_all(&flags); 2125 } 2126 2127 static int 2128 test_ipsec_inline_proto_copy_df_inner_1(const void *data __rte_unused) 2129 { 2130 struct ipsec_test_flags flags; 2131 2132 memset(&flags, 0, sizeof(flags)); 2133 2134 flags.df = TEST_IPSEC_COPY_DF_INNER_1; 2135 2136 return test_ipsec_inline_proto_all(&flags); 2137 } 2138 2139 static int 2140 test_ipsec_inline_proto_set_df_0_inner_1(const void *data __rte_unused) 2141 { 2142 struct ipsec_test_flags flags; 2143 2144 memset(&flags, 0, sizeof(flags)); 2145 2146 flags.df = TEST_IPSEC_SET_DF_0_INNER_1; 2147 2148 return test_ipsec_inline_proto_all(&flags); 2149 } 2150 2151 static int 2152 test_ipsec_inline_proto_set_df_1_inner_0(const void *data __rte_unused) 2153 { 2154 struct ipsec_test_flags flags; 2155 2156 memset(&flags, 0, sizeof(flags)); 2157 2158 flags.df = TEST_IPSEC_SET_DF_1_INNER_0; 2159 2160 return test_ipsec_inline_proto_all(&flags); 2161 } 2162 2163 static int 2164 test_ipsec_inline_proto_ipv4_copy_dscp_inner_0(const void *data __rte_unused) 2165 { 2166 struct ipsec_test_flags flags; 2167 2168 memset(&flags, 0, sizeof(flags)); 2169 2170 flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_0; 2171 2172 return test_ipsec_inline_proto_all(&flags); 2173 } 2174 2175 static int 2176 test_ipsec_inline_proto_ipv4_copy_dscp_inner_1(const void *data __rte_unused) 2177 { 2178 struct ipsec_test_flags flags; 2179 2180 memset(&flags, 0, sizeof(flags)); 2181 2182 flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_1; 2183 2184 return test_ipsec_inline_proto_all(&flags); 2185 } 2186 2187 static int 2188 test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1(const void *data __rte_unused) 2189 { 2190 struct ipsec_test_flags flags; 2191 2192 memset(&flags, 0, sizeof(flags)); 2193 2194 flags.dscp = TEST_IPSEC_SET_DSCP_0_INNER_1; 2195 2196 return test_ipsec_inline_proto_all(&flags); 2197 } 2198 2199 static int 2200 test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0(const void *data __rte_unused) 2201 { 2202 struct ipsec_test_flags flags; 2203 2204 memset(&flags, 0, sizeof(flags)); 2205 2206 flags.dscp = TEST_IPSEC_SET_DSCP_1_INNER_0; 2207 2208 return test_ipsec_inline_proto_all(&flags); 2209 } 2210 2211 static int 2212 test_ipsec_inline_proto_ipv6_copy_dscp_inner_0(const void *data __rte_unused) 2213 { 2214 struct ipsec_test_flags flags; 2215 2216 memset(&flags, 0, sizeof(flags)); 2217 2218 flags.ipv6 = true; 2219 flags.tunnel_ipv6 = true; 2220 flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_0; 2221 2222 return test_ipsec_inline_proto_all(&flags); 2223 } 2224 2225 static int 2226 test_ipsec_inline_proto_ipv6_copy_dscp_inner_1(const void *data __rte_unused) 2227 { 2228 struct ipsec_test_flags flags; 2229 2230 memset(&flags, 0, sizeof(flags)); 2231 2232 flags.ipv6 = true; 2233 flags.tunnel_ipv6 = true; 2234 flags.dscp = TEST_IPSEC_COPY_DSCP_INNER_1; 2235 2236 return test_ipsec_inline_proto_all(&flags); 2237 } 2238 2239 static int 2240 test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1(const void *data __rte_unused) 2241 { 2242 struct ipsec_test_flags flags; 2243 2244 memset(&flags, 0, sizeof(flags)); 2245 2246 flags.ipv6 = true; 2247 flags.tunnel_ipv6 = true; 2248 flags.dscp = TEST_IPSEC_SET_DSCP_0_INNER_1; 2249 2250 return test_ipsec_inline_proto_all(&flags); 2251 } 2252 2253 static int 2254 test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0(const void *data __rte_unused) 2255 { 2256 struct ipsec_test_flags flags; 2257 2258 memset(&flags, 0, sizeof(flags)); 2259 2260 flags.ipv6 = true; 2261 flags.tunnel_ipv6 = true; 2262 flags.dscp = TEST_IPSEC_SET_DSCP_1_INNER_0; 2263 2264 return test_ipsec_inline_proto_all(&flags); 2265 } 2266 2267 static int 2268 test_ipsec_inline_proto_ipv6_copy_flabel_inner_0(const void *data __rte_unused) 2269 { 2270 struct ipsec_test_flags flags; 2271 2272 memset(&flags, 0, sizeof(flags)); 2273 2274 flags.ipv6 = true; 2275 flags.tunnel_ipv6 = true; 2276 flags.flabel = TEST_IPSEC_COPY_FLABEL_INNER_0; 2277 2278 return test_ipsec_inline_proto_all(&flags); 2279 } 2280 2281 static int 2282 test_ipsec_inline_proto_ipv6_copy_flabel_inner_1(const void *data __rte_unused) 2283 { 2284 struct ipsec_test_flags flags; 2285 2286 memset(&flags, 0, sizeof(flags)); 2287 2288 flags.ipv6 = true; 2289 flags.tunnel_ipv6 = true; 2290 flags.flabel = TEST_IPSEC_COPY_FLABEL_INNER_1; 2291 2292 return test_ipsec_inline_proto_all(&flags); 2293 } 2294 2295 static int 2296 test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1(const void *data __rte_unused) 2297 { 2298 struct ipsec_test_flags flags; 2299 2300 memset(&flags, 0, sizeof(flags)); 2301 2302 flags.ipv6 = true; 2303 flags.tunnel_ipv6 = true; 2304 flags.flabel = TEST_IPSEC_SET_FLABEL_0_INNER_1; 2305 2306 return test_ipsec_inline_proto_all(&flags); 2307 } 2308 2309 static int 2310 test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0(const void *data __rte_unused) 2311 { 2312 struct ipsec_test_flags flags; 2313 2314 memset(&flags, 0, sizeof(flags)); 2315 2316 flags.ipv6 = true; 2317 flags.tunnel_ipv6 = true; 2318 flags.flabel = TEST_IPSEC_SET_FLABEL_1_INNER_0; 2319 2320 return test_ipsec_inline_proto_all(&flags); 2321 } 2322 2323 static int 2324 test_ipsec_inline_proto_ipv4_ttl_decrement(const void *data __rte_unused) 2325 { 2326 struct ipsec_test_flags flags = { 2327 .dec_ttl_or_hop_limit = true 2328 }; 2329 2330 return test_ipsec_inline_proto_all(&flags); 2331 } 2332 2333 static int 2334 test_ipsec_inline_proto_ipv6_hop_limit_decrement(const void *data __rte_unused) 2335 { 2336 struct ipsec_test_flags flags = { 2337 .ipv6 = true, 2338 .dec_ttl_or_hop_limit = true 2339 }; 2340 2341 return test_ipsec_inline_proto_all(&flags); 2342 } 2343 2344 static int 2345 test_ipsec_inline_proto_iv_gen(const void *data __rte_unused) 2346 { 2347 struct ipsec_test_flags flags; 2348 2349 memset(&flags, 0, sizeof(flags)); 2350 2351 flags.iv_gen = true; 2352 2353 return test_ipsec_inline_proto_all(&flags); 2354 } 2355 2356 static int 2357 test_ipsec_inline_proto_sa_pkt_soft_expiry(const void *data __rte_unused) 2358 { 2359 struct ipsec_test_flags flags = { 2360 .sa_expiry_pkts_soft = true 2361 }; 2362 return test_ipsec_inline_proto_all(&flags); 2363 } 2364 static int 2365 test_ipsec_inline_proto_sa_byte_soft_expiry(const void *data __rte_unused) 2366 { 2367 struct ipsec_test_flags flags = { 2368 .sa_expiry_bytes_soft = true 2369 }; 2370 return test_ipsec_inline_proto_all(&flags); 2371 } 2372 2373 static int 2374 test_ipsec_inline_proto_sa_pkt_hard_expiry(const void *data __rte_unused) 2375 { 2376 struct ipsec_test_flags flags = { 2377 .sa_expiry_pkts_hard = true 2378 }; 2379 2380 return test_ipsec_inline_proto_all(&flags); 2381 } 2382 2383 static int 2384 test_ipsec_inline_proto_sa_byte_hard_expiry(const void *data __rte_unused) 2385 { 2386 struct ipsec_test_flags flags = { 2387 .sa_expiry_bytes_hard = true 2388 }; 2389 2390 return test_ipsec_inline_proto_all(&flags); 2391 } 2392 2393 static int 2394 test_ipsec_inline_proto_known_vec_fragmented(const void *test_data) 2395 { 2396 struct ipsec_test_data td_outb; 2397 struct ipsec_test_flags flags; 2398 2399 memset(&flags, 0, sizeof(flags)); 2400 flags.fragment = true; 2401 2402 memcpy(&td_outb, test_data, sizeof(td_outb)); 2403 2404 /* Disable IV gen to be able to test with known vectors */ 2405 td_outb.ipsec_xform.options.iv_gen_disable = 1; 2406 2407 return test_ipsec_inline_proto_process(&td_outb, NULL, 1, false, 2408 &flags); 2409 } 2410 2411 static int 2412 test_ipsec_inline_pkt_replay(const void *test_data, const uint64_t esn[], 2413 bool replayed_pkt[], uint32_t nb_pkts, bool esn_en, 2414 uint64_t winsz) 2415 { 2416 struct ipsec_test_data td_outb[IPSEC_TEST_PACKETS_MAX]; 2417 struct ipsec_test_data td_inb[IPSEC_TEST_PACKETS_MAX]; 2418 struct ipsec_test_flags flags; 2419 uint32_t i, ret = 0; 2420 2421 memset(&flags, 0, sizeof(flags)); 2422 flags.antireplay = true; 2423 2424 for (i = 0; i < nb_pkts; i++) { 2425 memcpy(&td_outb[i], test_data, sizeof(td_outb[0])); 2426 td_outb[i].ipsec_xform.options.iv_gen_disable = 1; 2427 td_outb[i].ipsec_xform.replay_win_sz = winsz; 2428 td_outb[i].ipsec_xform.options.esn = esn_en; 2429 } 2430 2431 for (i = 0; i < nb_pkts; i++) 2432 td_outb[i].ipsec_xform.esn.value = esn[i]; 2433 2434 ret = test_ipsec_inline_proto_process_with_esn(td_outb, td_inb, 2435 nb_pkts, true, &flags); 2436 if (ret != TEST_SUCCESS) 2437 return ret; 2438 2439 test_ipsec_td_update(td_inb, td_outb, nb_pkts, &flags); 2440 2441 for (i = 0; i < nb_pkts; i++) { 2442 td_inb[i].ipsec_xform.options.esn = esn_en; 2443 /* Set antireplay flag for packets to be dropped */ 2444 td_inb[i].ar_packet = replayed_pkt[i]; 2445 } 2446 2447 ret = test_ipsec_inline_proto_process_with_esn(td_inb, NULL, nb_pkts, 2448 true, &flags); 2449 2450 return ret; 2451 } 2452 2453 static int 2454 test_ipsec_inline_proto_pkt_antireplay(const void *test_data, uint64_t winsz) 2455 { 2456 2457 uint32_t nb_pkts = 5; 2458 bool replayed_pkt[5]; 2459 uint64_t esn[5]; 2460 2461 /* 1. Advance the TOP of the window to WS * 2 */ 2462 esn[0] = winsz * 2; 2463 /* 2. Test sequence number within the new window(WS + 1) */ 2464 esn[1] = winsz + 1; 2465 /* 3. Test sequence number less than the window BOTTOM */ 2466 esn[2] = winsz; 2467 /* 4. Test sequence number in the middle of the window */ 2468 esn[3] = winsz + (winsz / 2); 2469 /* 5. Test replay of the packet in the middle of the window */ 2470 esn[4] = winsz + (winsz / 2); 2471 2472 replayed_pkt[0] = false; 2473 replayed_pkt[1] = false; 2474 replayed_pkt[2] = true; 2475 replayed_pkt[3] = false; 2476 replayed_pkt[4] = true; 2477 2478 return test_ipsec_inline_pkt_replay(test_data, esn, replayed_pkt, 2479 nb_pkts, false, winsz); 2480 } 2481 2482 static int 2483 test_ipsec_inline_proto_pkt_antireplay1024(const void *test_data) 2484 { 2485 return test_ipsec_inline_proto_pkt_antireplay(test_data, 1024); 2486 } 2487 2488 static int 2489 test_ipsec_inline_proto_pkt_antireplay2048(const void *test_data) 2490 { 2491 return test_ipsec_inline_proto_pkt_antireplay(test_data, 2048); 2492 } 2493 2494 static int 2495 test_ipsec_inline_proto_pkt_antireplay4096(const void *test_data) 2496 { 2497 return test_ipsec_inline_proto_pkt_antireplay(test_data, 4096); 2498 } 2499 2500 static int 2501 test_ipsec_inline_proto_pkt_esn_antireplay(const void *test_data, uint64_t winsz) 2502 { 2503 2504 uint32_t nb_pkts = 7; 2505 bool replayed_pkt[7]; 2506 uint64_t esn[7]; 2507 2508 /* Set the initial sequence number */ 2509 esn[0] = (uint64_t)(0xFFFFFFFF - winsz); 2510 /* 1. Advance the TOP of the window to (1<<32 + WS/2) */ 2511 esn[1] = (uint64_t)((1ULL << 32) + (winsz / 2)); 2512 /* 2. Test sequence number within new window (1<<32 + WS/2 + 1) */ 2513 esn[2] = (uint64_t)((1ULL << 32) - (winsz / 2) + 1); 2514 /* 3. Test with sequence number within window (1<<32 - 1) */ 2515 esn[3] = (uint64_t)((1ULL << 32) - 1); 2516 /* 4. Test with sequence number within window (1<<32 - 1) */ 2517 esn[4] = (uint64_t)(1ULL << 32); 2518 /* 5. Test with duplicate sequence number within 2519 * new window (1<<32 - 1) 2520 */ 2521 esn[5] = (uint64_t)((1ULL << 32) - 1); 2522 /* 6. Test with duplicate sequence number within new window (1<<32) */ 2523 esn[6] = (uint64_t)(1ULL << 32); 2524 2525 replayed_pkt[0] = false; 2526 replayed_pkt[1] = false; 2527 replayed_pkt[2] = false; 2528 replayed_pkt[3] = false; 2529 replayed_pkt[4] = false; 2530 replayed_pkt[5] = true; 2531 replayed_pkt[6] = true; 2532 2533 return test_ipsec_inline_pkt_replay(test_data, esn, replayed_pkt, nb_pkts, 2534 true, winsz); 2535 } 2536 2537 static int 2538 test_ipsec_inline_proto_pkt_esn_antireplay1024(const void *test_data) 2539 { 2540 return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 1024); 2541 } 2542 2543 static int 2544 test_ipsec_inline_proto_pkt_esn_antireplay2048(const void *test_data) 2545 { 2546 return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 2048); 2547 } 2548 2549 static int 2550 test_ipsec_inline_proto_pkt_esn_antireplay4096(const void *test_data) 2551 { 2552 return test_ipsec_inline_proto_pkt_esn_antireplay(test_data, 4096); 2553 } 2554 2555 2556 2557 static struct unit_test_suite inline_ipsec_testsuite = { 2558 .suite_name = "Inline IPsec Ethernet Device Unit Test Suite", 2559 .unit_test_cases = { 2560 TEST_CASE_NAMED_WITH_DATA( 2561 "Outbound known vector (ESP tunnel mode IPv4 AES-GCM 128)", 2562 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2563 test_ipsec_inline_proto_known_vec, &pkt_aes_128_gcm), 2564 TEST_CASE_NAMED_WITH_DATA( 2565 "Outbound known vector (ESP tunnel mode IPv4 AES-GCM 192)", 2566 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2567 test_ipsec_inline_proto_known_vec, &pkt_aes_192_gcm), 2568 TEST_CASE_NAMED_WITH_DATA( 2569 "Outbound known vector (ESP tunnel mode IPv4 AES-GCM 256)", 2570 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2571 test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm), 2572 TEST_CASE_NAMED_WITH_DATA( 2573 "Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])", 2574 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2575 test_ipsec_inline_proto_known_vec, 2576 &pkt_aes_128_cbc_hmac_sha256), 2577 TEST_CASE_NAMED_WITH_DATA( 2578 "Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])", 2579 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2580 test_ipsec_inline_proto_known_vec, 2581 &pkt_aes_128_cbc_hmac_sha384), 2582 TEST_CASE_NAMED_WITH_DATA( 2583 "Outbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])", 2584 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2585 test_ipsec_inline_proto_known_vec, 2586 &pkt_aes_128_cbc_hmac_sha512), 2587 TEST_CASE_NAMED_WITH_DATA( 2588 "Outbound known vector (ESP tunnel mode IPv6 AES-GCM 128)", 2589 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2590 test_ipsec_inline_proto_known_vec, &pkt_aes_256_gcm_v6), 2591 TEST_CASE_NAMED_WITH_DATA( 2592 "Outbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])", 2593 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2594 test_ipsec_inline_proto_known_vec, 2595 &pkt_aes_128_cbc_hmac_sha256_v6), 2596 TEST_CASE_NAMED_WITH_DATA( 2597 "Outbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])", 2598 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2599 test_ipsec_inline_proto_known_vec, 2600 &pkt_null_aes_xcbc), 2601 2602 TEST_CASE_NAMED_WITH_DATA( 2603 "Outbound fragmented packet", 2604 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2605 test_ipsec_inline_proto_known_vec_fragmented, 2606 &pkt_aes_128_gcm_frag), 2607 2608 TEST_CASE_NAMED_WITH_DATA( 2609 "Inbound known vector (ESP tunnel mode IPv4 AES-GCM 128)", 2610 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2611 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_gcm), 2612 TEST_CASE_NAMED_WITH_DATA( 2613 "Inbound known vector (ESP tunnel mode IPv4 AES-GCM 192)", 2614 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2615 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_192_gcm), 2616 TEST_CASE_NAMED_WITH_DATA( 2617 "Inbound known vector (ESP tunnel mode IPv4 AES-GCM 256)", 2618 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2619 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm), 2620 TEST_CASE_NAMED_WITH_DATA( 2621 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128)", 2622 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2623 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_128_cbc_null), 2624 TEST_CASE_NAMED_WITH_DATA( 2625 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA256 [16B ICV])", 2626 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2627 test_ipsec_inline_proto_known_vec_inb, 2628 &pkt_aes_128_cbc_hmac_sha256), 2629 TEST_CASE_NAMED_WITH_DATA( 2630 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA384 [24B ICV])", 2631 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2632 test_ipsec_inline_proto_known_vec_inb, 2633 &pkt_aes_128_cbc_hmac_sha384), 2634 TEST_CASE_NAMED_WITH_DATA( 2635 "Inbound known vector (ESP tunnel mode IPv4 AES-CBC 128 HMAC-SHA512 [32B ICV])", 2636 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2637 test_ipsec_inline_proto_known_vec_inb, 2638 &pkt_aes_128_cbc_hmac_sha512), 2639 TEST_CASE_NAMED_WITH_DATA( 2640 "Inbound known vector (ESP tunnel mode IPv6 AES-GCM 128)", 2641 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2642 test_ipsec_inline_proto_known_vec_inb, &pkt_aes_256_gcm_v6), 2643 TEST_CASE_NAMED_WITH_DATA( 2644 "Inbound known vector (ESP tunnel mode IPv6 AES-CBC 128 HMAC-SHA256 [16B ICV])", 2645 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2646 test_ipsec_inline_proto_known_vec_inb, 2647 &pkt_aes_128_cbc_hmac_sha256_v6), 2648 TEST_CASE_NAMED_WITH_DATA( 2649 "Inbound known vector (ESP tunnel mode IPv4 NULL AES-XCBC-MAC [12B ICV])", 2650 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2651 test_ipsec_inline_proto_known_vec_inb, 2652 &pkt_null_aes_xcbc), 2653 2654 TEST_CASE_NAMED_ST( 2655 "Combined test alg list", 2656 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2657 test_ipsec_inline_proto_display_list), 2658 2659 TEST_CASE_NAMED_ST( 2660 "UDP encapsulation", 2661 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2662 test_ipsec_inline_proto_udp_encap), 2663 TEST_CASE_NAMED_ST( 2664 "UDP encapsulation ports verification test", 2665 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2666 test_ipsec_inline_proto_udp_ports_verify), 2667 TEST_CASE_NAMED_ST( 2668 "Negative test: ICV corruption", 2669 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2670 test_ipsec_inline_proto_err_icv_corrupt), 2671 TEST_CASE_NAMED_ST( 2672 "Tunnel dst addr verification", 2673 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2674 test_ipsec_inline_proto_tunnel_dst_addr_verify), 2675 TEST_CASE_NAMED_ST( 2676 "Tunnel src and dst addr verification", 2677 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2678 test_ipsec_inline_proto_tunnel_src_dst_addr_verify), 2679 TEST_CASE_NAMED_ST( 2680 "Inner IP checksum", 2681 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2682 test_ipsec_inline_proto_inner_ip_csum), 2683 TEST_CASE_NAMED_ST( 2684 "Inner L4 checksum", 2685 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2686 test_ipsec_inline_proto_inner_l4_csum), 2687 TEST_CASE_NAMED_ST( 2688 "Tunnel IPv4 in IPv4", 2689 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2690 test_ipsec_inline_proto_tunnel_v4_in_v4), 2691 TEST_CASE_NAMED_ST( 2692 "Tunnel IPv6 in IPv6", 2693 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2694 test_ipsec_inline_proto_tunnel_v6_in_v6), 2695 TEST_CASE_NAMED_ST( 2696 "Tunnel IPv4 in IPv6", 2697 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2698 test_ipsec_inline_proto_tunnel_v4_in_v6), 2699 TEST_CASE_NAMED_ST( 2700 "Tunnel IPv6 in IPv4", 2701 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2702 test_ipsec_inline_proto_tunnel_v6_in_v4), 2703 TEST_CASE_NAMED_ST( 2704 "Transport IPv4", 2705 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2706 test_ipsec_inline_proto_transport_v4), 2707 TEST_CASE_NAMED_ST( 2708 "Transport l4 checksum", 2709 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2710 test_ipsec_inline_proto_transport_l4_csum), 2711 TEST_CASE_NAMED_ST( 2712 "Statistics: success", 2713 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2714 test_ipsec_inline_proto_stats), 2715 TEST_CASE_NAMED_ST( 2716 "Fragmented packet", 2717 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2718 test_ipsec_inline_proto_pkt_fragment), 2719 TEST_CASE_NAMED_ST( 2720 "Tunnel header copy DF (inner 0)", 2721 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2722 test_ipsec_inline_proto_copy_df_inner_0), 2723 TEST_CASE_NAMED_ST( 2724 "Tunnel header copy DF (inner 1)", 2725 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2726 test_ipsec_inline_proto_copy_df_inner_1), 2727 TEST_CASE_NAMED_ST( 2728 "Tunnel header set DF 0 (inner 1)", 2729 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2730 test_ipsec_inline_proto_set_df_0_inner_1), 2731 TEST_CASE_NAMED_ST( 2732 "Tunnel header set DF 1 (inner 0)", 2733 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2734 test_ipsec_inline_proto_set_df_1_inner_0), 2735 TEST_CASE_NAMED_ST( 2736 "Tunnel header IPv4 copy DSCP (inner 0)", 2737 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2738 test_ipsec_inline_proto_ipv4_copy_dscp_inner_0), 2739 TEST_CASE_NAMED_ST( 2740 "Tunnel header IPv4 copy DSCP (inner 1)", 2741 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2742 test_ipsec_inline_proto_ipv4_copy_dscp_inner_1), 2743 TEST_CASE_NAMED_ST( 2744 "Tunnel header IPv4 set DSCP 0 (inner 1)", 2745 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2746 test_ipsec_inline_proto_ipv4_set_dscp_0_inner_1), 2747 TEST_CASE_NAMED_ST( 2748 "Tunnel header IPv4 set DSCP 1 (inner 0)", 2749 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2750 test_ipsec_inline_proto_ipv4_set_dscp_1_inner_0), 2751 TEST_CASE_NAMED_ST( 2752 "Tunnel header IPv6 copy DSCP (inner 0)", 2753 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2754 test_ipsec_inline_proto_ipv6_copy_dscp_inner_0), 2755 TEST_CASE_NAMED_ST( 2756 "Tunnel header IPv6 copy DSCP (inner 1)", 2757 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2758 test_ipsec_inline_proto_ipv6_copy_dscp_inner_1), 2759 TEST_CASE_NAMED_ST( 2760 "Tunnel header IPv6 set DSCP 0 (inner 1)", 2761 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2762 test_ipsec_inline_proto_ipv6_set_dscp_0_inner_1), 2763 TEST_CASE_NAMED_ST( 2764 "Tunnel header IPv6 set DSCP 1 (inner 0)", 2765 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2766 test_ipsec_inline_proto_ipv6_set_dscp_1_inner_0), 2767 TEST_CASE_NAMED_ST( 2768 "Tunnel header IPv6 copy FLABEL (inner 0)", 2769 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2770 test_ipsec_inline_proto_ipv6_copy_flabel_inner_0), 2771 TEST_CASE_NAMED_ST( 2772 "Tunnel header IPv6 copy FLABEL (inner 1)", 2773 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2774 test_ipsec_inline_proto_ipv6_copy_flabel_inner_1), 2775 TEST_CASE_NAMED_ST( 2776 "Tunnel header IPv6 set FLABEL 0 (inner 1)", 2777 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2778 test_ipsec_inline_proto_ipv6_set_flabel_0_inner_1), 2779 TEST_CASE_NAMED_ST( 2780 "Tunnel header IPv6 set FLABEL 1 (inner 0)", 2781 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2782 test_ipsec_inline_proto_ipv6_set_flabel_1_inner_0), 2783 TEST_CASE_NAMED_ST( 2784 "Tunnel header IPv4 decrement inner TTL", 2785 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2786 test_ipsec_inline_proto_ipv4_ttl_decrement), 2787 TEST_CASE_NAMED_ST( 2788 "Tunnel header IPv6 decrement inner hop limit", 2789 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2790 test_ipsec_inline_proto_ipv6_hop_limit_decrement), 2791 TEST_CASE_NAMED_ST( 2792 "IV generation", 2793 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2794 test_ipsec_inline_proto_iv_gen), 2795 TEST_CASE_NAMED_ST( 2796 "SA soft expiry with packet limit", 2797 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2798 test_ipsec_inline_proto_sa_pkt_soft_expiry), 2799 TEST_CASE_NAMED_ST( 2800 "SA soft expiry with byte limit", 2801 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2802 test_ipsec_inline_proto_sa_byte_soft_expiry), 2803 TEST_CASE_NAMED_ST( 2804 "SA hard expiry with packet limit", 2805 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2806 test_ipsec_inline_proto_sa_pkt_hard_expiry), 2807 TEST_CASE_NAMED_ST( 2808 "SA hard expiry with byte limit", 2809 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2810 test_ipsec_inline_proto_sa_byte_hard_expiry), 2811 2812 TEST_CASE_NAMED_WITH_DATA( 2813 "Antireplay with window size 1024", 2814 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2815 test_ipsec_inline_proto_pkt_antireplay1024, 2816 &pkt_aes_128_gcm), 2817 TEST_CASE_NAMED_WITH_DATA( 2818 "Antireplay with window size 2048", 2819 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2820 test_ipsec_inline_proto_pkt_antireplay2048, 2821 &pkt_aes_128_gcm), 2822 TEST_CASE_NAMED_WITH_DATA( 2823 "Antireplay with window size 4096", 2824 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2825 test_ipsec_inline_proto_pkt_antireplay4096, 2826 &pkt_aes_128_gcm), 2827 TEST_CASE_NAMED_WITH_DATA( 2828 "ESN and Antireplay with window size 1024", 2829 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2830 test_ipsec_inline_proto_pkt_esn_antireplay1024, 2831 &pkt_aes_128_gcm), 2832 TEST_CASE_NAMED_WITH_DATA( 2833 "ESN and Antireplay with window size 2048", 2834 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2835 test_ipsec_inline_proto_pkt_esn_antireplay2048, 2836 &pkt_aes_128_gcm), 2837 TEST_CASE_NAMED_WITH_DATA( 2838 "ESN and Antireplay with window size 4096", 2839 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2840 test_ipsec_inline_proto_pkt_esn_antireplay4096, 2841 &pkt_aes_128_gcm), 2842 2843 TEST_CASE_NAMED_WITH_DATA( 2844 "IPv4 Reassembly with 2 fragments", 2845 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2846 test_inline_ip_reassembly, &ipv4_2frag_vector), 2847 TEST_CASE_NAMED_WITH_DATA( 2848 "IPv6 Reassembly with 2 fragments", 2849 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2850 test_inline_ip_reassembly, &ipv6_2frag_vector), 2851 TEST_CASE_NAMED_WITH_DATA( 2852 "IPv4 Reassembly with 4 fragments", 2853 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2854 test_inline_ip_reassembly, &ipv4_4frag_vector), 2855 TEST_CASE_NAMED_WITH_DATA( 2856 "IPv6 Reassembly with 4 fragments", 2857 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2858 test_inline_ip_reassembly, &ipv6_4frag_vector), 2859 TEST_CASE_NAMED_WITH_DATA( 2860 "IPv4 Reassembly with 5 fragments", 2861 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2862 test_inline_ip_reassembly, &ipv4_5frag_vector), 2863 TEST_CASE_NAMED_WITH_DATA( 2864 "IPv6 Reassembly with 5 fragments", 2865 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2866 test_inline_ip_reassembly, &ipv6_5frag_vector), 2867 TEST_CASE_NAMED_WITH_DATA( 2868 "IPv4 Reassembly with incomplete fragments", 2869 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2870 test_inline_ip_reassembly, &ipv4_incomplete_vector), 2871 TEST_CASE_NAMED_WITH_DATA( 2872 "IPv4 Reassembly with overlapping fragments", 2873 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2874 test_inline_ip_reassembly, &ipv4_overlap_vector), 2875 TEST_CASE_NAMED_WITH_DATA( 2876 "IPv4 Reassembly with out of order fragments", 2877 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2878 test_inline_ip_reassembly, &ipv4_out_of_order_vector), 2879 TEST_CASE_NAMED_WITH_DATA( 2880 "IPv4 Reassembly with burst of 4 fragments", 2881 ut_setup_inline_ipsec, ut_teardown_inline_ipsec, 2882 test_inline_ip_reassembly, &ipv4_4frag_burst_vector), 2883 2884 TEST_CASES_END() /**< NULL terminate unit test array */ 2885 }, 2886 }; 2887 2888 2889 static int 2890 test_inline_ipsec(void) 2891 { 2892 inline_ipsec_testsuite.setup = inline_ipsec_testsuite_setup; 2893 inline_ipsec_testsuite.teardown = inline_ipsec_testsuite_teardown; 2894 return unit_test_suite_runner(&inline_ipsec_testsuite); 2895 } 2896 2897 static int 2898 test_event_inline_ipsec(void) 2899 { 2900 inline_ipsec_testsuite.setup = event_inline_ipsec_testsuite_setup; 2901 inline_ipsec_testsuite.teardown = event_inline_ipsec_testsuite_teardown; 2902 return unit_test_suite_runner(&inline_ipsec_testsuite); 2903 } 2904 2905 #endif /* !RTE_EXEC_ENV_WINDOWS */ 2906 2907 REGISTER_TEST_COMMAND(inline_ipsec_autotest, test_inline_ipsec); 2908 REGISTER_TEST_COMMAND(event_inline_ipsec_autotest, test_event_inline_ipsec); 2909