1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016 Intel Corporation 3 */ 4 5 #include <stdio.h> 6 #include <stdlib.h> 7 #include <stdint.h> 8 #include <inttypes.h> 9 #include <sys/types.h> 10 #include <netinet/in.h> 11 #include <netinet/ip.h> 12 #include <netinet/ip6.h> 13 #include <string.h> 14 #include <sys/queue.h> 15 #include <stdarg.h> 16 #include <errno.h> 17 #include <getopt.h> 18 19 #include <rte_common.h> 20 #include <rte_byteorder.h> 21 #include <rte_log.h> 22 #include <rte_eal.h> 23 #include <rte_launch.h> 24 #include <rte_atomic.h> 25 #include <rte_cycles.h> 26 #include <rte_prefetch.h> 27 #include <rte_lcore.h> 28 #include <rte_per_lcore.h> 29 #include <rte_branch_prediction.h> 30 #include <rte_interrupts.h> 31 #include <rte_random.h> 32 #include <rte_debug.h> 33 #include <rte_ether.h> 34 #include <rte_ethdev.h> 35 #include <rte_mempool.h> 36 #include <rte_mbuf.h> 37 #include <rte_acl.h> 38 #include <rte_lpm.h> 39 #include <rte_lpm6.h> 40 #include <rte_hash.h> 41 #include <rte_jhash.h> 42 #include <rte_cryptodev.h> 43 #include <rte_security.h> 44 45 #include "ipsec.h" 46 #include "parser.h" 47 48 #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1 49 50 #define MAX_JUMBO_PKT_LEN 9600 51 52 #define MEMPOOL_CACHE_SIZE 256 53 54 #define NB_MBUF (32000) 55 56 #define CDEV_QUEUE_DESC 2048 57 #define CDEV_MAP_ENTRIES 16384 58 #define CDEV_MP_NB_OBJS 1024 59 #define CDEV_MP_CACHE_SZ 64 60 #define MAX_QUEUE_PAIRS 1 61 62 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ 63 64 #define NB_SOCKETS 4 65 66 /* Configure how many packets ahead to prefetch, when reading packets */ 67 #define PREFETCH_OFFSET 3 68 69 #define MAX_RX_QUEUE_PER_LCORE 16 70 71 #define MAX_LCORE_PARAMS 1024 72 73 #define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid)) 74 75 /* 76 * Configurable number of RX/TX ring descriptors 77 */ 78 #define IPSEC_SECGW_RX_DESC_DEFAULT 1024 79 #define IPSEC_SECGW_TX_DESC_DEFAULT 1024 80 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT; 81 static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT; 82 83 #if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN 84 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ 85 (((uint64_t)((a) & 0xff) << 56) | \ 86 ((uint64_t)((b) & 0xff) << 48) | \ 87 ((uint64_t)((c) & 0xff) << 40) | \ 88 ((uint64_t)((d) & 0xff) << 32) | \ 89 ((uint64_t)((e) & 0xff) << 24) | \ 90 ((uint64_t)((f) & 0xff) << 16) | \ 91 ((uint64_t)((g) & 0xff) << 8) | \ 92 ((uint64_t)(h) & 0xff)) 93 #else 94 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \ 95 (((uint64_t)((h) & 0xff) << 56) | \ 96 ((uint64_t)((g) & 0xff) << 48) | \ 97 ((uint64_t)((f) & 0xff) << 40) | \ 98 ((uint64_t)((e) & 0xff) << 32) | \ 99 ((uint64_t)((d) & 0xff) << 24) | \ 100 ((uint64_t)((c) & 0xff) << 16) | \ 101 ((uint64_t)((b) & 0xff) << 8) | \ 102 ((uint64_t)(a) & 0xff)) 103 #endif 104 #define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0)) 105 106 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \ 107 (addr)->addr_bytes[0], (addr)->addr_bytes[1], \ 108 (addr)->addr_bytes[2], (addr)->addr_bytes[3], \ 109 (addr)->addr_bytes[4], (addr)->addr_bytes[5], \ 110 0, 0) 111 112 /* port/source ethernet addr and destination ethernet addr */ 113 struct ethaddr_info { 114 uint64_t src, dst; 115 }; 116 117 struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = { 118 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) }, 119 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) }, 120 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) }, 121 { 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) } 122 }; 123 124 #define CMD_LINE_OPT_CONFIG "config" 125 #define CMD_LINE_OPT_SINGLE_SA "single-sa" 126 #define CMD_LINE_OPT_CRYPTODEV_MASK "cryptodev_mask" 127 #define CMD_LINE_OPT_RX_OFFLOAD "rxoffload" 128 #define CMD_LINE_OPT_TX_OFFLOAD "txoffload" 129 130 enum { 131 /* long options mapped to a short option */ 132 133 /* first long only option value must be >= 256, so that we won't 134 * conflict with short options 135 */ 136 CMD_LINE_OPT_MIN_NUM = 256, 137 CMD_LINE_OPT_CONFIG_NUM, 138 CMD_LINE_OPT_SINGLE_SA_NUM, 139 CMD_LINE_OPT_CRYPTODEV_MASK_NUM, 140 CMD_LINE_OPT_RX_OFFLOAD_NUM, 141 CMD_LINE_OPT_TX_OFFLOAD_NUM, 142 }; 143 144 static const struct option lgopts[] = { 145 {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM}, 146 {CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM}, 147 {CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM}, 148 {CMD_LINE_OPT_RX_OFFLOAD, 1, 0, CMD_LINE_OPT_RX_OFFLOAD_NUM}, 149 {CMD_LINE_OPT_TX_OFFLOAD, 1, 0, CMD_LINE_OPT_TX_OFFLOAD_NUM}, 150 {NULL, 0, 0, 0} 151 }; 152 153 /* mask of enabled ports */ 154 static uint32_t enabled_port_mask; 155 static uint64_t enabled_cryptodev_mask = UINT64_MAX; 156 static uint32_t unprotected_port_mask; 157 static int32_t promiscuous_on = 1; 158 static int32_t numa_on = 1; /**< NUMA is enabled by default. */ 159 static uint32_t nb_lcores; 160 static uint32_t single_sa; 161 static uint32_t single_sa_idx; 162 static uint32_t frame_size; 163 164 /* 165 * RX/TX HW offload capabilities to enable/use on ethernet ports. 166 * By default all capabilities are enabled. 167 */ 168 static uint64_t dev_rx_offload = UINT64_MAX; 169 static uint64_t dev_tx_offload = UINT64_MAX; 170 171 /* application wide librte_ipsec/SA parameters */ 172 struct app_sa_prm app_sa_prm = {.enable = 0}; 173 174 struct lcore_rx_queue { 175 uint16_t port_id; 176 uint8_t queue_id; 177 } __rte_cache_aligned; 178 179 struct lcore_params { 180 uint16_t port_id; 181 uint8_t queue_id; 182 uint8_t lcore_id; 183 } __rte_cache_aligned; 184 185 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS]; 186 187 static struct lcore_params *lcore_params; 188 static uint16_t nb_lcore_params; 189 190 static struct rte_hash *cdev_map_in; 191 static struct rte_hash *cdev_map_out; 192 193 struct buffer { 194 uint16_t len; 195 struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *)); 196 }; 197 198 struct lcore_conf { 199 uint16_t nb_rx_queue; 200 struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE]; 201 uint16_t tx_queue_id[RTE_MAX_ETHPORTS]; 202 struct buffer tx_mbufs[RTE_MAX_ETHPORTS]; 203 struct ipsec_ctx inbound; 204 struct ipsec_ctx outbound; 205 struct rt_ctx *rt4_ctx; 206 struct rt_ctx *rt6_ctx; 207 } __rte_cache_aligned; 208 209 static struct lcore_conf lcore_conf[RTE_MAX_LCORE]; 210 211 static struct rte_eth_conf port_conf = { 212 .rxmode = { 213 .mq_mode = ETH_MQ_RX_RSS, 214 .max_rx_pkt_len = ETHER_MAX_LEN, 215 .split_hdr_size = 0, 216 .offloads = DEV_RX_OFFLOAD_CHECKSUM, 217 }, 218 .rx_adv_conf = { 219 .rss_conf = { 220 .rss_key = NULL, 221 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP | 222 ETH_RSS_TCP | ETH_RSS_SCTP, 223 }, 224 }, 225 .txmode = { 226 .mq_mode = ETH_MQ_TX_NONE, 227 }, 228 }; 229 230 static struct socket_ctx socket_ctx[NB_SOCKETS]; 231 232 static inline void 233 prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) 234 { 235 uint8_t *nlp; 236 struct ether_hdr *eth; 237 238 eth = rte_pktmbuf_mtod(pkt, struct ether_hdr *); 239 if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) { 240 nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN); 241 nlp = RTE_PTR_ADD(nlp, offsetof(struct ip, ip_p)); 242 if (*nlp == IPPROTO_ESP) 243 t->ipsec.pkts[(t->ipsec.num)++] = pkt; 244 else { 245 t->ip4.data[t->ip4.num] = nlp; 246 t->ip4.pkts[(t->ip4.num)++] = pkt; 247 } 248 pkt->l2_len = 0; 249 pkt->l3_len = sizeof(struct ip); 250 } else if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) { 251 nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN); 252 nlp = RTE_PTR_ADD(nlp, offsetof(struct ip6_hdr, ip6_nxt)); 253 if (*nlp == IPPROTO_ESP) 254 t->ipsec.pkts[(t->ipsec.num)++] = pkt; 255 else { 256 t->ip6.data[t->ip6.num] = nlp; 257 t->ip6.pkts[(t->ip6.num)++] = pkt; 258 } 259 pkt->l2_len = 0; 260 pkt->l3_len = sizeof(struct ip6_hdr); 261 } else { 262 /* Unknown/Unsupported type, drop the packet */ 263 RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n", 264 rte_be_to_cpu_16(eth->ether_type)); 265 rte_pktmbuf_free(pkt); 266 } 267 268 /* Check if the packet has been processed inline. For inline protocol 269 * processed packets, the metadata in the mbuf can be used to identify 270 * the security processing done on the packet. The metadata will be 271 * used to retrieve the application registered userdata associated 272 * with the security session. 273 */ 274 275 if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) { 276 struct ipsec_sa *sa; 277 struct ipsec_mbuf_metadata *priv; 278 struct rte_security_ctx *ctx = (struct rte_security_ctx *) 279 rte_eth_dev_get_sec_ctx( 280 pkt->port); 281 282 /* Retrieve the userdata registered. Here, the userdata 283 * registered is the SA pointer. 284 */ 285 286 sa = (struct ipsec_sa *) 287 rte_security_get_userdata(ctx, pkt->udata64); 288 289 if (sa == NULL) { 290 /* userdata could not be retrieved */ 291 return; 292 } 293 294 /* Save SA as priv member in mbuf. This will be used in the 295 * IPsec selector(SP-SA) check. 296 */ 297 298 priv = get_priv(pkt); 299 priv->sa = sa; 300 } 301 } 302 303 static inline void 304 prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t, 305 uint16_t nb_pkts) 306 { 307 int32_t i; 308 309 t->ipsec.num = 0; 310 t->ip4.num = 0; 311 t->ip6.num = 0; 312 313 for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) { 314 rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET], 315 void *)); 316 prepare_one_packet(pkts[i], t); 317 } 318 /* Process left packets */ 319 for (; i < nb_pkts; i++) 320 prepare_one_packet(pkts[i], t); 321 } 322 323 static inline void 324 prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port, 325 const struct lcore_conf *qconf) 326 { 327 struct ip *ip; 328 struct ether_hdr *ethhdr; 329 330 ip = rte_pktmbuf_mtod(pkt, struct ip *); 331 332 ethhdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, ETHER_HDR_LEN); 333 334 if (ip->ip_v == IPVERSION) { 335 pkt->ol_flags |= qconf->outbound.ipv4_offloads; 336 pkt->l3_len = sizeof(struct ip); 337 pkt->l2_len = ETHER_HDR_LEN; 338 339 ip->ip_sum = 0; 340 341 /* calculate IPv4 cksum in SW */ 342 if ((pkt->ol_flags & PKT_TX_IP_CKSUM) == 0) 343 ip->ip_sum = rte_ipv4_cksum((struct ipv4_hdr *)ip); 344 345 ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); 346 } else { 347 pkt->ol_flags |= qconf->outbound.ipv6_offloads; 348 pkt->l3_len = sizeof(struct ip6_hdr); 349 pkt->l2_len = ETHER_HDR_LEN; 350 351 ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6); 352 } 353 354 memcpy(ðhdr->s_addr, ðaddr_tbl[port].src, 355 sizeof(struct ether_addr)); 356 memcpy(ðhdr->d_addr, ðaddr_tbl[port].dst, 357 sizeof(struct ether_addr)); 358 } 359 360 static inline void 361 prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port, 362 const struct lcore_conf *qconf) 363 { 364 int32_t i; 365 const int32_t prefetch_offset = 2; 366 367 for (i = 0; i < (nb_pkts - prefetch_offset); i++) { 368 rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]); 369 prepare_tx_pkt(pkts[i], port, qconf); 370 } 371 /* Process left packets */ 372 for (; i < nb_pkts; i++) 373 prepare_tx_pkt(pkts[i], port, qconf); 374 } 375 376 /* Send burst of packets on an output interface */ 377 static inline int32_t 378 send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port) 379 { 380 struct rte_mbuf **m_table; 381 int32_t ret; 382 uint16_t queueid; 383 384 queueid = qconf->tx_queue_id[port]; 385 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; 386 387 prepare_tx_burst(m_table, n, port, qconf); 388 389 ret = rte_eth_tx_burst(port, queueid, m_table, n); 390 if (unlikely(ret < n)) { 391 do { 392 rte_pktmbuf_free(m_table[ret]); 393 } while (++ret < n); 394 } 395 396 return 0; 397 } 398 399 /* Enqueue a single packet, and send burst if queue is filled */ 400 static inline int32_t 401 send_single_packet(struct rte_mbuf *m, uint16_t port) 402 { 403 uint32_t lcore_id; 404 uint16_t len; 405 struct lcore_conf *qconf; 406 407 lcore_id = rte_lcore_id(); 408 409 qconf = &lcore_conf[lcore_id]; 410 len = qconf->tx_mbufs[port].len; 411 qconf->tx_mbufs[port].m_table[len] = m; 412 len++; 413 414 /* enough pkts to be sent */ 415 if (unlikely(len == MAX_PKT_BURST)) { 416 send_burst(qconf, MAX_PKT_BURST, port); 417 len = 0; 418 } 419 420 qconf->tx_mbufs[port].len = len; 421 return 0; 422 } 423 424 static inline void 425 inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip, 426 uint16_t lim) 427 { 428 struct rte_mbuf *m; 429 uint32_t i, j, res, sa_idx; 430 431 if (ip->num == 0 || sp == NULL) 432 return; 433 434 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, 435 ip->num, DEFAULT_MAX_CATEGORIES); 436 437 j = 0; 438 for (i = 0; i < ip->num; i++) { 439 m = ip->pkts[i]; 440 res = ip->res[i]; 441 if (res & BYPASS) { 442 ip->pkts[j++] = m; 443 continue; 444 } 445 if (res & DISCARD) { 446 rte_pktmbuf_free(m); 447 continue; 448 } 449 450 /* Only check SPI match for processed IPSec packets */ 451 if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) { 452 rte_pktmbuf_free(m); 453 continue; 454 } 455 456 sa_idx = ip->res[i] & PROTECT_MASK; 457 if (sa_idx >= IPSEC_SA_MAX_ENTRIES || 458 !inbound_sa_check(sa, m, sa_idx)) { 459 rte_pktmbuf_free(m); 460 continue; 461 } 462 ip->pkts[j++] = m; 463 } 464 ip->num = j; 465 } 466 467 static void 468 split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num) 469 { 470 uint32_t i, n4, n6; 471 struct ip *ip; 472 struct rte_mbuf *m; 473 474 n4 = trf->ip4.num; 475 n6 = trf->ip6.num; 476 477 for (i = 0; i < num; i++) { 478 479 m = mb[i]; 480 ip = rte_pktmbuf_mtod(m, struct ip *); 481 482 if (ip->ip_v == IPVERSION) { 483 trf->ip4.pkts[n4] = m; 484 trf->ip4.data[n4] = rte_pktmbuf_mtod_offset(m, 485 uint8_t *, offsetof(struct ip, ip_p)); 486 n4++; 487 } else if (ip->ip_v == IP6_VERSION) { 488 trf->ip6.pkts[n6] = m; 489 trf->ip6.data[n6] = rte_pktmbuf_mtod_offset(m, 490 uint8_t *, 491 offsetof(struct ip6_hdr, ip6_nxt)); 492 n6++; 493 } else 494 rte_pktmbuf_free(m); 495 } 496 497 trf->ip4.num = n4; 498 trf->ip6.num = n6; 499 } 500 501 502 static inline void 503 process_pkts_inbound(struct ipsec_ctx *ipsec_ctx, 504 struct ipsec_traffic *traffic) 505 { 506 uint16_t nb_pkts_in, n_ip4, n_ip6; 507 508 n_ip4 = traffic->ip4.num; 509 n_ip6 = traffic->ip6.num; 510 511 if (app_sa_prm.enable == 0) { 512 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts, 513 traffic->ipsec.num, MAX_PKT_BURST); 514 split46_traffic(traffic, traffic->ipsec.pkts, nb_pkts_in); 515 } else { 516 inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts, 517 traffic->ipsec.saptr, traffic->ipsec.num); 518 ipsec_process(ipsec_ctx, traffic); 519 } 520 521 inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4, 522 n_ip4); 523 524 inbound_sp_sa(ipsec_ctx->sp6_ctx, ipsec_ctx->sa_ctx, &traffic->ip6, 525 n_ip6); 526 } 527 528 static inline void 529 outbound_sp(struct sp_ctx *sp, struct traffic_type *ip, 530 struct traffic_type *ipsec) 531 { 532 struct rte_mbuf *m; 533 uint32_t i, j, sa_idx; 534 535 if (ip->num == 0 || sp == NULL) 536 return; 537 538 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, 539 ip->num, DEFAULT_MAX_CATEGORIES); 540 541 j = 0; 542 for (i = 0; i < ip->num; i++) { 543 m = ip->pkts[i]; 544 sa_idx = ip->res[i] & PROTECT_MASK; 545 if (ip->res[i] & DISCARD) 546 rte_pktmbuf_free(m); 547 else if (ip->res[i] & BYPASS) 548 ip->pkts[j++] = m; 549 else if (sa_idx < IPSEC_SA_MAX_ENTRIES) { 550 ipsec->res[ipsec->num] = sa_idx; 551 ipsec->pkts[ipsec->num++] = m; 552 } else /* invalid SA idx */ 553 rte_pktmbuf_free(m); 554 } 555 ip->num = j; 556 } 557 558 static inline void 559 process_pkts_outbound(struct ipsec_ctx *ipsec_ctx, 560 struct ipsec_traffic *traffic) 561 { 562 struct rte_mbuf *m; 563 uint16_t idx, nb_pkts_out, i; 564 565 /* Drop any IPsec traffic from protected ports */ 566 for (i = 0; i < traffic->ipsec.num; i++) 567 rte_pktmbuf_free(traffic->ipsec.pkts[i]); 568 569 traffic->ipsec.num = 0; 570 571 outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec); 572 573 outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec); 574 575 if (app_sa_prm.enable == 0) { 576 577 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts, 578 traffic->ipsec.res, traffic->ipsec.num, 579 MAX_PKT_BURST); 580 581 for (i = 0; i < nb_pkts_out; i++) { 582 m = traffic->ipsec.pkts[i]; 583 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *); 584 if (ip->ip_v == IPVERSION) { 585 idx = traffic->ip4.num++; 586 traffic->ip4.pkts[idx] = m; 587 } else { 588 idx = traffic->ip6.num++; 589 traffic->ip6.pkts[idx] = m; 590 } 591 } 592 } else { 593 outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res, 594 traffic->ipsec.saptr, traffic->ipsec.num); 595 ipsec_process(ipsec_ctx, traffic); 596 } 597 } 598 599 static inline void 600 process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx, 601 struct ipsec_traffic *traffic) 602 { 603 struct rte_mbuf *m; 604 uint32_t nb_pkts_in, i, idx; 605 606 /* Drop any IPv4 traffic from unprotected ports */ 607 for (i = 0; i < traffic->ip4.num; i++) 608 rte_pktmbuf_free(traffic->ip4.pkts[i]); 609 610 traffic->ip4.num = 0; 611 612 /* Drop any IPv6 traffic from unprotected ports */ 613 for (i = 0; i < traffic->ip6.num; i++) 614 rte_pktmbuf_free(traffic->ip6.pkts[i]); 615 616 traffic->ip6.num = 0; 617 618 if (app_sa_prm.enable == 0) { 619 620 nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts, 621 traffic->ipsec.num, MAX_PKT_BURST); 622 623 for (i = 0; i < nb_pkts_in; i++) { 624 m = traffic->ipsec.pkts[i]; 625 struct ip *ip = rte_pktmbuf_mtod(m, struct ip *); 626 if (ip->ip_v == IPVERSION) { 627 idx = traffic->ip4.num++; 628 traffic->ip4.pkts[idx] = m; 629 } else { 630 idx = traffic->ip6.num++; 631 traffic->ip6.pkts[idx] = m; 632 } 633 } 634 } else { 635 inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts, 636 traffic->ipsec.saptr, traffic->ipsec.num); 637 ipsec_process(ipsec_ctx, traffic); 638 } 639 } 640 641 static inline void 642 process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx, 643 struct ipsec_traffic *traffic) 644 { 645 struct rte_mbuf *m; 646 uint32_t nb_pkts_out, i, n; 647 struct ip *ip; 648 649 /* Drop any IPsec traffic from protected ports */ 650 for (i = 0; i < traffic->ipsec.num; i++) 651 rte_pktmbuf_free(traffic->ipsec.pkts[i]); 652 653 n = 0; 654 655 for (i = 0; i < traffic->ip4.num; i++) { 656 traffic->ipsec.pkts[n] = traffic->ip4.pkts[i]; 657 traffic->ipsec.res[n++] = single_sa_idx; 658 } 659 660 for (i = 0; i < traffic->ip6.num; i++) { 661 traffic->ipsec.pkts[n] = traffic->ip6.pkts[i]; 662 traffic->ipsec.res[n++] = single_sa_idx; 663 } 664 665 traffic->ip4.num = 0; 666 traffic->ip6.num = 0; 667 traffic->ipsec.num = n; 668 669 if (app_sa_prm.enable == 0) { 670 671 nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts, 672 traffic->ipsec.res, traffic->ipsec.num, 673 MAX_PKT_BURST); 674 675 /* They all sue the same SA (ip4 or ip6 tunnel) */ 676 m = traffic->ipsec.pkts[0]; 677 ip = rte_pktmbuf_mtod(m, struct ip *); 678 if (ip->ip_v == IPVERSION) { 679 traffic->ip4.num = nb_pkts_out; 680 for (i = 0; i < nb_pkts_out; i++) 681 traffic->ip4.pkts[i] = traffic->ipsec.pkts[i]; 682 } else { 683 traffic->ip6.num = nb_pkts_out; 684 for (i = 0; i < nb_pkts_out; i++) 685 traffic->ip6.pkts[i] = traffic->ipsec.pkts[i]; 686 } 687 } else { 688 outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res, 689 traffic->ipsec.saptr, traffic->ipsec.num); 690 ipsec_process(ipsec_ctx, traffic); 691 } 692 } 693 694 static inline int32_t 695 get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6) 696 { 697 struct ipsec_mbuf_metadata *priv; 698 struct ipsec_sa *sa; 699 700 priv = get_priv(pkt); 701 702 sa = priv->sa; 703 if (unlikely(sa == NULL)) { 704 RTE_LOG(ERR, IPSEC, "SA not saved in private data\n"); 705 goto fail; 706 } 707 708 if (is_ipv6) 709 return sa->portid; 710 711 /* else */ 712 return (sa->portid | RTE_LPM_LOOKUP_SUCCESS); 713 714 fail: 715 if (is_ipv6) 716 return -1; 717 718 /* else */ 719 return 0; 720 } 721 722 static inline void 723 route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) 724 { 725 uint32_t hop[MAX_PKT_BURST * 2]; 726 uint32_t dst_ip[MAX_PKT_BURST * 2]; 727 int32_t pkt_hop = 0; 728 uint16_t i, offset; 729 uint16_t lpm_pkts = 0; 730 731 if (nb_pkts == 0) 732 return; 733 734 /* Need to do an LPM lookup for non-inline packets. Inline packets will 735 * have port ID in the SA 736 */ 737 738 for (i = 0; i < nb_pkts; i++) { 739 if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) { 740 /* Security offload not enabled. So an LPM lookup is 741 * required to get the hop 742 */ 743 offset = offsetof(struct ip, ip_dst); 744 dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i], 745 uint32_t *, offset); 746 dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]); 747 lpm_pkts++; 748 } 749 } 750 751 rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts); 752 753 lpm_pkts = 0; 754 755 for (i = 0; i < nb_pkts; i++) { 756 if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) { 757 /* Read hop from the SA */ 758 pkt_hop = get_hop_for_offload_pkt(pkts[i], 0); 759 } else { 760 /* Need to use hop returned by lookup */ 761 pkt_hop = hop[lpm_pkts++]; 762 } 763 764 if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) { 765 rte_pktmbuf_free(pkts[i]); 766 continue; 767 } 768 send_single_packet(pkts[i], pkt_hop & 0xff); 769 } 770 } 771 772 static inline void 773 route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) 774 { 775 int32_t hop[MAX_PKT_BURST * 2]; 776 uint8_t dst_ip[MAX_PKT_BURST * 2][16]; 777 uint8_t *ip6_dst; 778 int32_t pkt_hop = 0; 779 uint16_t i, offset; 780 uint16_t lpm_pkts = 0; 781 782 if (nb_pkts == 0) 783 return; 784 785 /* Need to do an LPM lookup for non-inline packets. Inline packets will 786 * have port ID in the SA 787 */ 788 789 for (i = 0; i < nb_pkts; i++) { 790 if (!(pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD)) { 791 /* Security offload not enabled. So an LPM lookup is 792 * required to get the hop 793 */ 794 offset = offsetof(struct ip6_hdr, ip6_dst); 795 ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *, 796 offset); 797 memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16); 798 lpm_pkts++; 799 } 800 } 801 802 rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop, 803 lpm_pkts); 804 805 lpm_pkts = 0; 806 807 for (i = 0; i < nb_pkts; i++) { 808 if (pkts[i]->ol_flags & PKT_TX_SEC_OFFLOAD) { 809 /* Read hop from the SA */ 810 pkt_hop = get_hop_for_offload_pkt(pkts[i], 1); 811 } else { 812 /* Need to use hop returned by lookup */ 813 pkt_hop = hop[lpm_pkts++]; 814 } 815 816 if (pkt_hop == -1) { 817 rte_pktmbuf_free(pkts[i]); 818 continue; 819 } 820 send_single_packet(pkts[i], pkt_hop & 0xff); 821 } 822 } 823 824 static inline void 825 process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts, 826 uint8_t nb_pkts, uint16_t portid) 827 { 828 struct ipsec_traffic traffic; 829 830 prepare_traffic(pkts, &traffic, nb_pkts); 831 832 if (unlikely(single_sa)) { 833 if (UNPROTECTED_PORT(portid)) 834 process_pkts_inbound_nosp(&qconf->inbound, &traffic); 835 else 836 process_pkts_outbound_nosp(&qconf->outbound, &traffic); 837 } else { 838 if (UNPROTECTED_PORT(portid)) 839 process_pkts_inbound(&qconf->inbound, &traffic); 840 else 841 process_pkts_outbound(&qconf->outbound, &traffic); 842 } 843 844 route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num); 845 route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num); 846 } 847 848 static inline void 849 drain_tx_buffers(struct lcore_conf *qconf) 850 { 851 struct buffer *buf; 852 uint32_t portid; 853 854 for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { 855 buf = &qconf->tx_mbufs[portid]; 856 if (buf->len == 0) 857 continue; 858 send_burst(qconf, buf->len, portid); 859 buf->len = 0; 860 } 861 } 862 863 static inline void 864 drain_crypto_buffers(struct lcore_conf *qconf) 865 { 866 uint32_t i; 867 struct ipsec_ctx *ctx; 868 869 /* drain inbound buffers*/ 870 ctx = &qconf->inbound; 871 for (i = 0; i != ctx->nb_qps; i++) { 872 if (ctx->tbl[i].len != 0) 873 enqueue_cop_burst(ctx->tbl + i); 874 } 875 876 /* drain outbound buffers*/ 877 ctx = &qconf->outbound; 878 for (i = 0; i != ctx->nb_qps; i++) { 879 if (ctx->tbl[i].len != 0) 880 enqueue_cop_burst(ctx->tbl + i); 881 } 882 } 883 884 static void 885 drain_inbound_crypto_queues(const struct lcore_conf *qconf, 886 struct ipsec_ctx *ctx) 887 { 888 uint32_t n; 889 struct ipsec_traffic trf; 890 891 if (app_sa_prm.enable == 0) { 892 893 /* dequeue packets from crypto-queue */ 894 n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts, 895 RTE_DIM(trf.ipsec.pkts)); 896 897 trf.ip4.num = 0; 898 trf.ip6.num = 0; 899 900 /* split traffic by ipv4-ipv6 */ 901 split46_traffic(&trf, trf.ipsec.pkts, n); 902 } else 903 ipsec_cqp_process(ctx, &trf); 904 905 /* process ipv4 packets */ 906 if (trf.ip4.num != 0) { 907 inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0); 908 route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num); 909 } 910 911 /* process ipv6 packets */ 912 if (trf.ip6.num != 0) { 913 inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0); 914 route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num); 915 } 916 } 917 918 static void 919 drain_outbound_crypto_queues(const struct lcore_conf *qconf, 920 struct ipsec_ctx *ctx) 921 { 922 uint32_t n; 923 struct ipsec_traffic trf; 924 925 if (app_sa_prm.enable == 0) { 926 927 /* dequeue packets from crypto-queue */ 928 n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts, 929 RTE_DIM(trf.ipsec.pkts)); 930 931 trf.ip4.num = 0; 932 trf.ip6.num = 0; 933 934 /* split traffic by ipv4-ipv6 */ 935 split46_traffic(&trf, trf.ipsec.pkts, n); 936 } else 937 ipsec_cqp_process(ctx, &trf); 938 939 /* process ipv4 packets */ 940 if (trf.ip4.num != 0) 941 route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num); 942 943 /* process ipv6 packets */ 944 if (trf.ip6.num != 0) 945 route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num); 946 } 947 948 /* main processing loop */ 949 static int32_t 950 main_loop(__attribute__((unused)) void *dummy) 951 { 952 struct rte_mbuf *pkts[MAX_PKT_BURST]; 953 uint32_t lcore_id; 954 uint64_t prev_tsc, diff_tsc, cur_tsc; 955 int32_t i, nb_rx; 956 uint16_t portid; 957 uint8_t queueid; 958 struct lcore_conf *qconf; 959 int32_t socket_id; 960 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) 961 / US_PER_S * BURST_TX_DRAIN_US; 962 struct lcore_rx_queue *rxql; 963 964 prev_tsc = 0; 965 lcore_id = rte_lcore_id(); 966 qconf = &lcore_conf[lcore_id]; 967 rxql = qconf->rx_queue_list; 968 socket_id = rte_lcore_to_socket_id(lcore_id); 969 970 qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4; 971 qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6; 972 qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in; 973 qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in; 974 qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in; 975 qconf->inbound.cdev_map = cdev_map_in; 976 qconf->inbound.session_pool = socket_ctx[socket_id].session_pool; 977 qconf->inbound.session_priv_pool = 978 socket_ctx[socket_id].session_priv_pool; 979 qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out; 980 qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out; 981 qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out; 982 qconf->outbound.cdev_map = cdev_map_out; 983 qconf->outbound.session_pool = socket_ctx[socket_id].session_pool; 984 qconf->outbound.session_priv_pool = 985 socket_ctx[socket_id].session_priv_pool; 986 987 if (qconf->nb_rx_queue == 0) { 988 RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n", 989 lcore_id); 990 return 0; 991 } 992 993 RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id); 994 995 for (i = 0; i < qconf->nb_rx_queue; i++) { 996 portid = rxql[i].port_id; 997 queueid = rxql[i].queue_id; 998 RTE_LOG(INFO, IPSEC, 999 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n", 1000 lcore_id, portid, queueid); 1001 } 1002 1003 while (1) { 1004 cur_tsc = rte_rdtsc(); 1005 1006 /* TX queue buffer drain */ 1007 diff_tsc = cur_tsc - prev_tsc; 1008 1009 if (unlikely(diff_tsc > drain_tsc)) { 1010 drain_tx_buffers(qconf); 1011 drain_crypto_buffers(qconf); 1012 prev_tsc = cur_tsc; 1013 } 1014 1015 for (i = 0; i < qconf->nb_rx_queue; ++i) { 1016 1017 /* Read packets from RX queues */ 1018 portid = rxql[i].port_id; 1019 queueid = rxql[i].queue_id; 1020 nb_rx = rte_eth_rx_burst(portid, queueid, 1021 pkts, MAX_PKT_BURST); 1022 1023 if (nb_rx > 0) 1024 process_pkts(qconf, pkts, nb_rx, portid); 1025 1026 /* dequeue and process completed crypto-ops */ 1027 if (UNPROTECTED_PORT(portid)) 1028 drain_inbound_crypto_queues(qconf, 1029 &qconf->inbound); 1030 else 1031 drain_outbound_crypto_queues(qconf, 1032 &qconf->outbound); 1033 } 1034 } 1035 } 1036 1037 static int32_t 1038 check_params(void) 1039 { 1040 uint8_t lcore; 1041 uint16_t portid; 1042 uint16_t i; 1043 int32_t socket_id; 1044 1045 if (lcore_params == NULL) { 1046 printf("Error: No port/queue/core mappings\n"); 1047 return -1; 1048 } 1049 1050 for (i = 0; i < nb_lcore_params; ++i) { 1051 lcore = lcore_params[i].lcore_id; 1052 if (!rte_lcore_is_enabled(lcore)) { 1053 printf("error: lcore %hhu is not enabled in " 1054 "lcore mask\n", lcore); 1055 return -1; 1056 } 1057 socket_id = rte_lcore_to_socket_id(lcore); 1058 if (socket_id != 0 && numa_on == 0) { 1059 printf("warning: lcore %hhu is on socket %d " 1060 "with numa off\n", 1061 lcore, socket_id); 1062 } 1063 portid = lcore_params[i].port_id; 1064 if ((enabled_port_mask & (1 << portid)) == 0) { 1065 printf("port %u is not enabled in port mask\n", portid); 1066 return -1; 1067 } 1068 if (!rte_eth_dev_is_valid_port(portid)) { 1069 printf("port %u is not present on the board\n", portid); 1070 return -1; 1071 } 1072 } 1073 return 0; 1074 } 1075 1076 static uint8_t 1077 get_port_nb_rx_queues(const uint16_t port) 1078 { 1079 int32_t queue = -1; 1080 uint16_t i; 1081 1082 for (i = 0; i < nb_lcore_params; ++i) { 1083 if (lcore_params[i].port_id == port && 1084 lcore_params[i].queue_id > queue) 1085 queue = lcore_params[i].queue_id; 1086 } 1087 return (uint8_t)(++queue); 1088 } 1089 1090 static int32_t 1091 init_lcore_rx_queues(void) 1092 { 1093 uint16_t i, nb_rx_queue; 1094 uint8_t lcore; 1095 1096 for (i = 0; i < nb_lcore_params; ++i) { 1097 lcore = lcore_params[i].lcore_id; 1098 nb_rx_queue = lcore_conf[lcore].nb_rx_queue; 1099 if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) { 1100 printf("error: too many queues (%u) for lcore: %u\n", 1101 nb_rx_queue + 1, lcore); 1102 return -1; 1103 } 1104 lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id = 1105 lcore_params[i].port_id; 1106 lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id = 1107 lcore_params[i].queue_id; 1108 lcore_conf[lcore].nb_rx_queue++; 1109 } 1110 return 0; 1111 } 1112 1113 /* display usage */ 1114 static void 1115 print_usage(const char *prgname) 1116 { 1117 fprintf(stderr, "%s [EAL options] --" 1118 " -p PORTMASK" 1119 " [-P]" 1120 " [-u PORTMASK]" 1121 " [-j FRAMESIZE]" 1122 " [-l]" 1123 " [-w REPLAY_WINDOW_SIZE]" 1124 " [-e]" 1125 " [-a]" 1126 " -f CONFIG_FILE" 1127 " --config (port,queue,lcore)[,(port,queue,lcore)]" 1128 " [--single-sa SAIDX]" 1129 " [--cryptodev_mask MASK]" 1130 " [--" CMD_LINE_OPT_RX_OFFLOAD " RX_OFFLOAD_MASK]" 1131 " [--" CMD_LINE_OPT_TX_OFFLOAD " TX_OFFLOAD_MASK]" 1132 "\n\n" 1133 " -p PORTMASK: Hexadecimal bitmask of ports to configure\n" 1134 " -P : Enable promiscuous mode\n" 1135 " -u PORTMASK: Hexadecimal bitmask of unprotected ports\n" 1136 " -j FRAMESIZE: Enable jumbo frame with 'FRAMESIZE' as maximum\n" 1137 " packet size\n" 1138 " -l enables code-path that uses librte_ipsec\n" 1139 " -w REPLAY_WINDOW_SIZE specifies IPsec SQN replay window\n" 1140 " size for each SA\n" 1141 " -e enables ESN\n" 1142 " -a enables SA SQN atomic behaviour\n" 1143 " -f CONFIG_FILE: Configuration file\n" 1144 " --config (port,queue,lcore): Rx queue configuration\n" 1145 " --single-sa SAIDX: Use single SA index for outbound traffic,\n" 1146 " bypassing the SP\n" 1147 " --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n" 1148 " devices to configure\n" 1149 " --" CMD_LINE_OPT_RX_OFFLOAD 1150 ": bitmask of the RX HW offload capabilities to enable/use\n" 1151 " (DEV_RX_OFFLOAD_*)\n" 1152 " --" CMD_LINE_OPT_TX_OFFLOAD 1153 ": bitmask of the TX HW offload capabilities to enable/use\n" 1154 " (DEV_TX_OFFLOAD_*)\n" 1155 "\n", 1156 prgname); 1157 } 1158 1159 static int 1160 parse_mask(const char *str, uint64_t *val) 1161 { 1162 char *end; 1163 unsigned long t; 1164 1165 errno = 0; 1166 t = strtoul(str, &end, 0); 1167 if (errno != 0 || end[0] != 0) 1168 return -EINVAL; 1169 1170 *val = t; 1171 return 0; 1172 } 1173 1174 static int32_t 1175 parse_portmask(const char *portmask) 1176 { 1177 char *end = NULL; 1178 unsigned long pm; 1179 1180 /* parse hexadecimal string */ 1181 pm = strtoul(portmask, &end, 16); 1182 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) 1183 return -1; 1184 1185 if ((pm == 0) && errno) 1186 return -1; 1187 1188 return pm; 1189 } 1190 1191 static int32_t 1192 parse_decimal(const char *str) 1193 { 1194 char *end = NULL; 1195 unsigned long num; 1196 1197 num = strtoul(str, &end, 10); 1198 if ((str[0] == '\0') || (end == NULL) || (*end != '\0')) 1199 return -1; 1200 1201 return num; 1202 } 1203 1204 static int32_t 1205 parse_config(const char *q_arg) 1206 { 1207 char s[256]; 1208 const char *p, *p0 = q_arg; 1209 char *end; 1210 enum fieldnames { 1211 FLD_PORT = 0, 1212 FLD_QUEUE, 1213 FLD_LCORE, 1214 _NUM_FLD 1215 }; 1216 unsigned long int_fld[_NUM_FLD]; 1217 char *str_fld[_NUM_FLD]; 1218 int32_t i; 1219 uint32_t size; 1220 1221 nb_lcore_params = 0; 1222 1223 while ((p = strchr(p0, '(')) != NULL) { 1224 ++p; 1225 p0 = strchr(p, ')'); 1226 if (p0 == NULL) 1227 return -1; 1228 1229 size = p0 - p; 1230 if (size >= sizeof(s)) 1231 return -1; 1232 1233 snprintf(s, sizeof(s), "%.*s", size, p); 1234 if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != 1235 _NUM_FLD) 1236 return -1; 1237 for (i = 0; i < _NUM_FLD; i++) { 1238 errno = 0; 1239 int_fld[i] = strtoul(str_fld[i], &end, 0); 1240 if (errno != 0 || end == str_fld[i] || int_fld[i] > 255) 1241 return -1; 1242 } 1243 if (nb_lcore_params >= MAX_LCORE_PARAMS) { 1244 printf("exceeded max number of lcore params: %hu\n", 1245 nb_lcore_params); 1246 return -1; 1247 } 1248 lcore_params_array[nb_lcore_params].port_id = 1249 (uint8_t)int_fld[FLD_PORT]; 1250 lcore_params_array[nb_lcore_params].queue_id = 1251 (uint8_t)int_fld[FLD_QUEUE]; 1252 lcore_params_array[nb_lcore_params].lcore_id = 1253 (uint8_t)int_fld[FLD_LCORE]; 1254 ++nb_lcore_params; 1255 } 1256 lcore_params = lcore_params_array; 1257 return 0; 1258 } 1259 1260 static void 1261 print_app_sa_prm(const struct app_sa_prm *prm) 1262 { 1263 printf("librte_ipsec usage: %s\n", 1264 (prm->enable == 0) ? "disabled" : "enabled"); 1265 1266 if (prm->enable == 0) 1267 return; 1268 1269 printf("replay window size: %u\n", prm->window_size); 1270 printf("ESN: %s\n", (prm->enable_esn == 0) ? "disabled" : "enabled"); 1271 printf("SA flags: %#" PRIx64 "\n", prm->flags); 1272 } 1273 1274 static int32_t 1275 parse_args(int32_t argc, char **argv) 1276 { 1277 int32_t opt, ret; 1278 char **argvopt; 1279 int32_t option_index; 1280 char *prgname = argv[0]; 1281 int32_t f_present = 0; 1282 1283 argvopt = argv; 1284 1285 while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:", 1286 lgopts, &option_index)) != EOF) { 1287 1288 switch (opt) { 1289 case 'p': 1290 enabled_port_mask = parse_portmask(optarg); 1291 if (enabled_port_mask == 0) { 1292 printf("invalid portmask\n"); 1293 print_usage(prgname); 1294 return -1; 1295 } 1296 break; 1297 case 'P': 1298 printf("Promiscuous mode selected\n"); 1299 promiscuous_on = 1; 1300 break; 1301 case 'u': 1302 unprotected_port_mask = parse_portmask(optarg); 1303 if (unprotected_port_mask == 0) { 1304 printf("invalid unprotected portmask\n"); 1305 print_usage(prgname); 1306 return -1; 1307 } 1308 break; 1309 case 'f': 1310 if (f_present == 1) { 1311 printf("\"-f\" option present more than " 1312 "once!\n"); 1313 print_usage(prgname); 1314 return -1; 1315 } 1316 if (parse_cfg_file(optarg) < 0) { 1317 printf("parsing file \"%s\" failed\n", 1318 optarg); 1319 print_usage(prgname); 1320 return -1; 1321 } 1322 f_present = 1; 1323 break; 1324 case 'j': 1325 { 1326 int32_t size = parse_decimal(optarg); 1327 if (size <= 1518) { 1328 printf("Invalid jumbo frame size\n"); 1329 if (size < 0) { 1330 print_usage(prgname); 1331 return -1; 1332 } 1333 printf("Using default value 9000\n"); 1334 frame_size = 9000; 1335 } else { 1336 frame_size = size; 1337 } 1338 } 1339 printf("Enabled jumbo frames size %u\n", frame_size); 1340 break; 1341 case 'l': 1342 app_sa_prm.enable = 1; 1343 break; 1344 case 'w': 1345 app_sa_prm.enable = 1; 1346 app_sa_prm.window_size = parse_decimal(optarg); 1347 break; 1348 case 'e': 1349 app_sa_prm.enable = 1; 1350 app_sa_prm.enable_esn = 1; 1351 break; 1352 case 'a': 1353 app_sa_prm.enable = 1; 1354 app_sa_prm.flags |= RTE_IPSEC_SAFLAG_SQN_ATOM; 1355 break; 1356 case CMD_LINE_OPT_CONFIG_NUM: 1357 ret = parse_config(optarg); 1358 if (ret) { 1359 printf("Invalid config\n"); 1360 print_usage(prgname); 1361 return -1; 1362 } 1363 break; 1364 case CMD_LINE_OPT_SINGLE_SA_NUM: 1365 ret = parse_decimal(optarg); 1366 if (ret == -1) { 1367 printf("Invalid argument[sa_idx]\n"); 1368 print_usage(prgname); 1369 return -1; 1370 } 1371 1372 /* else */ 1373 single_sa = 1; 1374 single_sa_idx = ret; 1375 printf("Configured with single SA index %u\n", 1376 single_sa_idx); 1377 break; 1378 case CMD_LINE_OPT_CRYPTODEV_MASK_NUM: 1379 ret = parse_portmask(optarg); 1380 if (ret == -1) { 1381 printf("Invalid argument[portmask]\n"); 1382 print_usage(prgname); 1383 return -1; 1384 } 1385 1386 /* else */ 1387 enabled_cryptodev_mask = ret; 1388 break; 1389 case CMD_LINE_OPT_RX_OFFLOAD_NUM: 1390 ret = parse_mask(optarg, &dev_rx_offload); 1391 if (ret != 0) { 1392 printf("Invalid argument for \'%s\': %s\n", 1393 CMD_LINE_OPT_RX_OFFLOAD, optarg); 1394 print_usage(prgname); 1395 return -1; 1396 } 1397 break; 1398 case CMD_LINE_OPT_TX_OFFLOAD_NUM: 1399 ret = parse_mask(optarg, &dev_tx_offload); 1400 if (ret != 0) { 1401 printf("Invalid argument for \'%s\': %s\n", 1402 CMD_LINE_OPT_TX_OFFLOAD, optarg); 1403 print_usage(prgname); 1404 return -1; 1405 } 1406 break; 1407 default: 1408 print_usage(prgname); 1409 return -1; 1410 } 1411 } 1412 1413 if (f_present == 0) { 1414 printf("Mandatory option \"-f\" not present\n"); 1415 return -1; 1416 } 1417 1418 print_app_sa_prm(&app_sa_prm); 1419 1420 if (optind >= 0) 1421 argv[optind-1] = prgname; 1422 1423 ret = optind-1; 1424 optind = 1; /* reset getopt lib */ 1425 return ret; 1426 } 1427 1428 static void 1429 print_ethaddr(const char *name, const struct ether_addr *eth_addr) 1430 { 1431 char buf[ETHER_ADDR_FMT_SIZE]; 1432 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); 1433 printf("%s%s", name, buf); 1434 } 1435 1436 /* 1437 * Update destination ethaddr for the port. 1438 */ 1439 int 1440 add_dst_ethaddr(uint16_t port, const struct ether_addr *addr) 1441 { 1442 if (port >= RTE_DIM(ethaddr_tbl)) 1443 return -EINVAL; 1444 1445 ethaddr_tbl[port].dst = ETHADDR_TO_UINT64(addr); 1446 return 0; 1447 } 1448 1449 /* Check the link status of all ports in up to 9s, and print them finally */ 1450 static void 1451 check_all_ports_link_status(uint32_t port_mask) 1452 { 1453 #define CHECK_INTERVAL 100 /* 100ms */ 1454 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ 1455 uint16_t portid; 1456 uint8_t count, all_ports_up, print_flag = 0; 1457 struct rte_eth_link link; 1458 1459 printf("\nChecking link status"); 1460 fflush(stdout); 1461 for (count = 0; count <= MAX_CHECK_TIME; count++) { 1462 all_ports_up = 1; 1463 RTE_ETH_FOREACH_DEV(portid) { 1464 if ((port_mask & (1 << portid)) == 0) 1465 continue; 1466 memset(&link, 0, sizeof(link)); 1467 rte_eth_link_get_nowait(portid, &link); 1468 /* print link status if flag set */ 1469 if (print_flag == 1) { 1470 if (link.link_status) 1471 printf( 1472 "Port%d Link Up - speed %u Mbps -%s\n", 1473 portid, link.link_speed, 1474 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? 1475 ("full-duplex") : ("half-duplex\n")); 1476 else 1477 printf("Port %d Link Down\n", portid); 1478 continue; 1479 } 1480 /* clear all_ports_up flag if any link down */ 1481 if (link.link_status == ETH_LINK_DOWN) { 1482 all_ports_up = 0; 1483 break; 1484 } 1485 } 1486 /* after finally printing all link status, get out */ 1487 if (print_flag == 1) 1488 break; 1489 1490 if (all_ports_up == 0) { 1491 printf("."); 1492 fflush(stdout); 1493 rte_delay_ms(CHECK_INTERVAL); 1494 } 1495 1496 /* set the print_flag if all ports up or timeout */ 1497 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { 1498 print_flag = 1; 1499 printf("done\n"); 1500 } 1501 } 1502 } 1503 1504 static int32_t 1505 add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id, 1506 uint16_t qp, struct lcore_params *params, 1507 struct ipsec_ctx *ipsec_ctx, 1508 const struct rte_cryptodev_capabilities *cipher, 1509 const struct rte_cryptodev_capabilities *auth, 1510 const struct rte_cryptodev_capabilities *aead) 1511 { 1512 int32_t ret = 0; 1513 unsigned long i; 1514 struct cdev_key key = { 0 }; 1515 1516 key.lcore_id = params->lcore_id; 1517 if (cipher) 1518 key.cipher_algo = cipher->sym.cipher.algo; 1519 if (auth) 1520 key.auth_algo = auth->sym.auth.algo; 1521 if (aead) 1522 key.aead_algo = aead->sym.aead.algo; 1523 1524 ret = rte_hash_lookup(map, &key); 1525 if (ret != -ENOENT) 1526 return 0; 1527 1528 for (i = 0; i < ipsec_ctx->nb_qps; i++) 1529 if (ipsec_ctx->tbl[i].id == cdev_id) 1530 break; 1531 1532 if (i == ipsec_ctx->nb_qps) { 1533 if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) { 1534 printf("Maximum number of crypto devices assigned to " 1535 "a core, increase MAX_QP_PER_LCORE value\n"); 1536 return 0; 1537 } 1538 ipsec_ctx->tbl[i].id = cdev_id; 1539 ipsec_ctx->tbl[i].qp = qp; 1540 ipsec_ctx->nb_qps++; 1541 printf("%s cdev mapping: lcore %u using cdev %u qp %u " 1542 "(cdev_id_qp %lu)\n", str, key.lcore_id, 1543 cdev_id, qp, i); 1544 } 1545 1546 ret = rte_hash_add_key_data(map, &key, (void *)i); 1547 if (ret < 0) { 1548 printf("Faled to insert cdev mapping for (lcore %u, " 1549 "cdev %u, qp %u), errno %d\n", 1550 key.lcore_id, ipsec_ctx->tbl[i].id, 1551 ipsec_ctx->tbl[i].qp, ret); 1552 return 0; 1553 } 1554 1555 return 1; 1556 } 1557 1558 static int32_t 1559 add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id, 1560 uint16_t qp, struct lcore_params *params) 1561 { 1562 int32_t ret = 0; 1563 const struct rte_cryptodev_capabilities *i, *j; 1564 struct rte_hash *map; 1565 struct lcore_conf *qconf; 1566 struct ipsec_ctx *ipsec_ctx; 1567 const char *str; 1568 1569 qconf = &lcore_conf[params->lcore_id]; 1570 1571 if ((unprotected_port_mask & (1 << params->port_id)) == 0) { 1572 map = cdev_map_out; 1573 ipsec_ctx = &qconf->outbound; 1574 str = "Outbound"; 1575 } else { 1576 map = cdev_map_in; 1577 ipsec_ctx = &qconf->inbound; 1578 str = "Inbound"; 1579 } 1580 1581 /* Required cryptodevs with operation chainning */ 1582 if (!(dev_info->feature_flags & 1583 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING)) 1584 return ret; 1585 1586 for (i = dev_info->capabilities; 1587 i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) { 1588 if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) 1589 continue; 1590 1591 if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) { 1592 ret |= add_mapping(map, str, cdev_id, qp, params, 1593 ipsec_ctx, NULL, NULL, i); 1594 continue; 1595 } 1596 1597 if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER) 1598 continue; 1599 1600 for (j = dev_info->capabilities; 1601 j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) { 1602 if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) 1603 continue; 1604 1605 if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH) 1606 continue; 1607 1608 ret |= add_mapping(map, str, cdev_id, qp, params, 1609 ipsec_ctx, i, j, NULL); 1610 } 1611 } 1612 1613 return ret; 1614 } 1615 1616 /* Check if the device is enabled by cryptodev_mask */ 1617 static int 1618 check_cryptodev_mask(uint8_t cdev_id) 1619 { 1620 if (enabled_cryptodev_mask & (1 << cdev_id)) 1621 return 0; 1622 1623 return -1; 1624 } 1625 1626 static int32_t 1627 cryptodevs_init(void) 1628 { 1629 struct rte_cryptodev_config dev_conf; 1630 struct rte_cryptodev_qp_conf qp_conf; 1631 uint16_t idx, max_nb_qps, qp, i; 1632 int16_t cdev_id, port_id; 1633 struct rte_hash_parameters params = { 0 }; 1634 1635 params.entries = CDEV_MAP_ENTRIES; 1636 params.key_len = sizeof(struct cdev_key); 1637 params.hash_func = rte_jhash; 1638 params.hash_func_init_val = 0; 1639 params.socket_id = rte_socket_id(); 1640 1641 params.name = "cdev_map_in"; 1642 cdev_map_in = rte_hash_create(¶ms); 1643 if (cdev_map_in == NULL) 1644 rte_panic("Failed to create cdev_map hash table, errno = %d\n", 1645 rte_errno); 1646 1647 params.name = "cdev_map_out"; 1648 cdev_map_out = rte_hash_create(¶ms); 1649 if (cdev_map_out == NULL) 1650 rte_panic("Failed to create cdev_map hash table, errno = %d\n", 1651 rte_errno); 1652 1653 printf("lcore/cryptodev/qp mappings:\n"); 1654 1655 uint32_t max_sess_sz = 0, sess_sz; 1656 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) { 1657 void *sec_ctx; 1658 1659 /* Get crypto priv session size */ 1660 sess_sz = rte_cryptodev_sym_get_private_session_size(cdev_id); 1661 if (sess_sz > max_sess_sz) 1662 max_sess_sz = sess_sz; 1663 1664 /* 1665 * If crypto device is security capable, need to check the 1666 * size of security session as well. 1667 */ 1668 1669 /* Get security context of the crypto device */ 1670 sec_ctx = rte_cryptodev_get_sec_ctx(cdev_id); 1671 if (sec_ctx == NULL) 1672 continue; 1673 1674 /* Get size of security session */ 1675 sess_sz = rte_security_session_get_size(sec_ctx); 1676 if (sess_sz > max_sess_sz) 1677 max_sess_sz = sess_sz; 1678 } 1679 RTE_ETH_FOREACH_DEV(port_id) { 1680 void *sec_ctx; 1681 1682 if ((enabled_port_mask & (1 << port_id)) == 0) 1683 continue; 1684 1685 sec_ctx = rte_eth_dev_get_sec_ctx(port_id); 1686 if (sec_ctx == NULL) 1687 continue; 1688 1689 sess_sz = rte_security_session_get_size(sec_ctx); 1690 if (sess_sz > max_sess_sz) 1691 max_sess_sz = sess_sz; 1692 } 1693 1694 idx = 0; 1695 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) { 1696 struct rte_cryptodev_info cdev_info; 1697 1698 if (check_cryptodev_mask((uint8_t)cdev_id)) 1699 continue; 1700 1701 rte_cryptodev_info_get(cdev_id, &cdev_info); 1702 1703 if (nb_lcore_params > cdev_info.max_nb_queue_pairs) 1704 max_nb_qps = cdev_info.max_nb_queue_pairs; 1705 else 1706 max_nb_qps = nb_lcore_params; 1707 1708 qp = 0; 1709 i = 0; 1710 while (qp < max_nb_qps && i < nb_lcore_params) { 1711 if (add_cdev_mapping(&cdev_info, cdev_id, qp, 1712 &lcore_params[idx])) 1713 qp++; 1714 idx++; 1715 idx = idx % nb_lcore_params; 1716 i++; 1717 } 1718 1719 if (qp == 0) 1720 continue; 1721 1722 dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id); 1723 dev_conf.nb_queue_pairs = qp; 1724 1725 uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions; 1726 if (dev_max_sess != 0 && dev_max_sess < CDEV_MP_NB_OBJS) 1727 rte_exit(EXIT_FAILURE, 1728 "Device does not support at least %u " 1729 "sessions", CDEV_MP_NB_OBJS); 1730 1731 if (!socket_ctx[dev_conf.socket_id].session_pool) { 1732 char mp_name[RTE_MEMPOOL_NAMESIZE]; 1733 struct rte_mempool *sess_mp; 1734 1735 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, 1736 "sess_mp_%u", dev_conf.socket_id); 1737 sess_mp = rte_cryptodev_sym_session_pool_create( 1738 mp_name, CDEV_MP_NB_OBJS, 1739 0, CDEV_MP_CACHE_SZ, 0, 1740 dev_conf.socket_id); 1741 socket_ctx[dev_conf.socket_id].session_pool = sess_mp; 1742 } 1743 1744 if (!socket_ctx[dev_conf.socket_id].session_priv_pool) { 1745 char mp_name[RTE_MEMPOOL_NAMESIZE]; 1746 struct rte_mempool *sess_mp; 1747 1748 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, 1749 "sess_mp_priv_%u", dev_conf.socket_id); 1750 sess_mp = rte_mempool_create(mp_name, 1751 CDEV_MP_NB_OBJS, 1752 max_sess_sz, 1753 CDEV_MP_CACHE_SZ, 1754 0, NULL, NULL, NULL, 1755 NULL, dev_conf.socket_id, 1756 0); 1757 socket_ctx[dev_conf.socket_id].session_priv_pool = 1758 sess_mp; 1759 } 1760 1761 if (!socket_ctx[dev_conf.socket_id].session_priv_pool || 1762 !socket_ctx[dev_conf.socket_id].session_pool) 1763 rte_exit(EXIT_FAILURE, 1764 "Cannot create session pool on socket %d\n", 1765 dev_conf.socket_id); 1766 else 1767 printf("Allocated session pool on socket %d\n", 1768 dev_conf.socket_id); 1769 1770 if (rte_cryptodev_configure(cdev_id, &dev_conf)) 1771 rte_panic("Failed to initialize cryptodev %u\n", 1772 cdev_id); 1773 1774 qp_conf.nb_descriptors = CDEV_QUEUE_DESC; 1775 qp_conf.mp_session = 1776 socket_ctx[dev_conf.socket_id].session_pool; 1777 qp_conf.mp_session_private = 1778 socket_ctx[dev_conf.socket_id].session_priv_pool; 1779 for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++) 1780 if (rte_cryptodev_queue_pair_setup(cdev_id, qp, 1781 &qp_conf, dev_conf.socket_id)) 1782 rte_panic("Failed to setup queue %u for " 1783 "cdev_id %u\n", 0, cdev_id); 1784 1785 if (rte_cryptodev_start(cdev_id)) 1786 rte_panic("Failed to start cryptodev %u\n", 1787 cdev_id); 1788 } 1789 1790 /* create session pools for eth devices that implement security */ 1791 RTE_ETH_FOREACH_DEV(port_id) { 1792 if ((enabled_port_mask & (1 << port_id)) && 1793 rte_eth_dev_get_sec_ctx(port_id)) { 1794 int socket_id = rte_eth_dev_socket_id(port_id); 1795 1796 if (!socket_ctx[socket_id].session_pool) { 1797 char mp_name[RTE_MEMPOOL_NAMESIZE]; 1798 struct rte_mempool *sess_mp; 1799 1800 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, 1801 "sess_mp_%u", socket_id); 1802 sess_mp = rte_mempool_create(mp_name, 1803 (CDEV_MP_NB_OBJS * 2), 1804 max_sess_sz, 1805 CDEV_MP_CACHE_SZ, 1806 0, NULL, NULL, NULL, 1807 NULL, socket_id, 1808 0); 1809 if (sess_mp == NULL) 1810 rte_exit(EXIT_FAILURE, 1811 "Cannot create session pool " 1812 "on socket %d\n", socket_id); 1813 else 1814 printf("Allocated session pool " 1815 "on socket %d\n", socket_id); 1816 socket_ctx[socket_id].session_pool = sess_mp; 1817 } 1818 } 1819 } 1820 1821 1822 printf("\n"); 1823 1824 return 0; 1825 } 1826 1827 static void 1828 port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads) 1829 { 1830 struct rte_eth_dev_info dev_info; 1831 struct rte_eth_txconf *txconf; 1832 uint16_t nb_tx_queue, nb_rx_queue; 1833 uint16_t tx_queueid, rx_queueid, queue, lcore_id; 1834 int32_t ret, socket_id; 1835 struct lcore_conf *qconf; 1836 struct ether_addr ethaddr; 1837 struct rte_eth_conf local_port_conf = port_conf; 1838 1839 rte_eth_dev_info_get(portid, &dev_info); 1840 1841 /* limit allowed HW offloafs, as user requested */ 1842 dev_info.rx_offload_capa &= dev_rx_offload; 1843 dev_info.tx_offload_capa &= dev_tx_offload; 1844 1845 printf("Configuring device port %u:\n", portid); 1846 1847 rte_eth_macaddr_get(portid, ðaddr); 1848 ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ðaddr); 1849 print_ethaddr("Address: ", ðaddr); 1850 printf("\n"); 1851 1852 nb_rx_queue = get_port_nb_rx_queues(portid); 1853 nb_tx_queue = nb_lcores; 1854 1855 if (nb_rx_queue > dev_info.max_rx_queues) 1856 rte_exit(EXIT_FAILURE, "Error: queue %u not available " 1857 "(max rx queue is %u)\n", 1858 nb_rx_queue, dev_info.max_rx_queues); 1859 1860 if (nb_tx_queue > dev_info.max_tx_queues) 1861 rte_exit(EXIT_FAILURE, "Error: queue %u not available " 1862 "(max tx queue is %u)\n", 1863 nb_tx_queue, dev_info.max_tx_queues); 1864 1865 printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n", 1866 nb_rx_queue, nb_tx_queue); 1867 1868 if (frame_size) { 1869 local_port_conf.rxmode.max_rx_pkt_len = frame_size; 1870 local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; 1871 } 1872 1873 local_port_conf.rxmode.offloads |= req_rx_offloads; 1874 local_port_conf.txmode.offloads |= req_tx_offloads; 1875 1876 /* Check that all required capabilities are supported */ 1877 if ((local_port_conf.rxmode.offloads & dev_info.rx_offload_capa) != 1878 local_port_conf.rxmode.offloads) 1879 rte_exit(EXIT_FAILURE, 1880 "Error: port %u required RX offloads: 0x%" PRIx64 1881 ", avaialbe RX offloads: 0x%" PRIx64 "\n", 1882 portid, local_port_conf.rxmode.offloads, 1883 dev_info.rx_offload_capa); 1884 1885 if ((local_port_conf.txmode.offloads & dev_info.tx_offload_capa) != 1886 local_port_conf.txmode.offloads) 1887 rte_exit(EXIT_FAILURE, 1888 "Error: port %u required TX offloads: 0x%" PRIx64 1889 ", avaialbe TX offloads: 0x%" PRIx64 "\n", 1890 portid, local_port_conf.txmode.offloads, 1891 dev_info.tx_offload_capa); 1892 1893 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) 1894 local_port_conf.txmode.offloads |= 1895 DEV_TX_OFFLOAD_MBUF_FAST_FREE; 1896 1897 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) 1898 local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM; 1899 1900 printf("port %u configurng rx_offloads=0x%" PRIx64 1901 ", tx_offloads=0x%" PRIx64 "\n", 1902 portid, local_port_conf.rxmode.offloads, 1903 local_port_conf.txmode.offloads); 1904 1905 local_port_conf.rx_adv_conf.rss_conf.rss_hf &= 1906 dev_info.flow_type_rss_offloads; 1907 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf != 1908 port_conf.rx_adv_conf.rss_conf.rss_hf) { 1909 printf("Port %u modified RSS hash function based on hardware support," 1910 "requested:%#"PRIx64" configured:%#"PRIx64"\n", 1911 portid, 1912 port_conf.rx_adv_conf.rss_conf.rss_hf, 1913 local_port_conf.rx_adv_conf.rss_conf.rss_hf); 1914 } 1915 1916 ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue, 1917 &local_port_conf); 1918 if (ret < 0) 1919 rte_exit(EXIT_FAILURE, "Cannot configure device: " 1920 "err=%d, port=%d\n", ret, portid); 1921 1922 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd); 1923 if (ret < 0) 1924 rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: " 1925 "err=%d, port=%d\n", ret, portid); 1926 1927 /* init one TX queue per lcore */ 1928 tx_queueid = 0; 1929 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { 1930 if (rte_lcore_is_enabled(lcore_id) == 0) 1931 continue; 1932 1933 if (numa_on) 1934 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id); 1935 else 1936 socket_id = 0; 1937 1938 /* init TX queue */ 1939 printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id); 1940 1941 txconf = &dev_info.default_txconf; 1942 txconf->offloads = local_port_conf.txmode.offloads; 1943 1944 ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd, 1945 socket_id, txconf); 1946 if (ret < 0) 1947 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: " 1948 "err=%d, port=%d\n", ret, portid); 1949 1950 qconf = &lcore_conf[lcore_id]; 1951 qconf->tx_queue_id[portid] = tx_queueid; 1952 1953 /* Pre-populate pkt offloads based on capabilities */ 1954 qconf->outbound.ipv4_offloads = PKT_TX_IPV4; 1955 qconf->outbound.ipv6_offloads = PKT_TX_IPV6; 1956 if (local_port_conf.txmode.offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) 1957 qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM; 1958 1959 tx_queueid++; 1960 1961 /* init RX queues */ 1962 for (queue = 0; queue < qconf->nb_rx_queue; ++queue) { 1963 struct rte_eth_rxconf rxq_conf; 1964 1965 if (portid != qconf->rx_queue_list[queue].port_id) 1966 continue; 1967 1968 rx_queueid = qconf->rx_queue_list[queue].queue_id; 1969 1970 printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid, 1971 socket_id); 1972 1973 rxq_conf = dev_info.default_rxconf; 1974 rxq_conf.offloads = local_port_conf.rxmode.offloads; 1975 ret = rte_eth_rx_queue_setup(portid, rx_queueid, 1976 nb_rxd, socket_id, &rxq_conf, 1977 socket_ctx[socket_id].mbuf_pool); 1978 if (ret < 0) 1979 rte_exit(EXIT_FAILURE, 1980 "rte_eth_rx_queue_setup: err=%d, " 1981 "port=%d\n", ret, portid); 1982 } 1983 } 1984 printf("\n"); 1985 } 1986 1987 static void 1988 pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf) 1989 { 1990 char s[64]; 1991 uint32_t buff_size = frame_size ? (frame_size + RTE_PKTMBUF_HEADROOM) : 1992 RTE_MBUF_DEFAULT_BUF_SIZE; 1993 1994 1995 snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id); 1996 ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf, 1997 MEMPOOL_CACHE_SIZE, ipsec_metadata_size(), 1998 buff_size, 1999 socket_id); 2000 if (ctx->mbuf_pool == NULL) 2001 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n", 2002 socket_id); 2003 else 2004 printf("Allocated mbuf pool on socket %d\n", socket_id); 2005 } 2006 2007 static inline int 2008 inline_ipsec_event_esn_overflow(struct rte_security_ctx *ctx, uint64_t md) 2009 { 2010 struct ipsec_sa *sa; 2011 2012 /* For inline protocol processing, the metadata in the event will 2013 * uniquely identify the security session which raised the event. 2014 * Application would then need the userdata it had registered with the 2015 * security session to process the event. 2016 */ 2017 2018 sa = (struct ipsec_sa *)rte_security_get_userdata(ctx, md); 2019 2020 if (sa == NULL) { 2021 /* userdata could not be retrieved */ 2022 return -1; 2023 } 2024 2025 /* Sequence number over flow. SA need to be re-established */ 2026 RTE_SET_USED(sa); 2027 return 0; 2028 } 2029 2030 static int 2031 inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type, 2032 void *param, void *ret_param) 2033 { 2034 uint64_t md; 2035 struct rte_eth_event_ipsec_desc *event_desc = NULL; 2036 struct rte_security_ctx *ctx = (struct rte_security_ctx *) 2037 rte_eth_dev_get_sec_ctx(port_id); 2038 2039 RTE_SET_USED(param); 2040 2041 if (type != RTE_ETH_EVENT_IPSEC) 2042 return -1; 2043 2044 event_desc = ret_param; 2045 if (event_desc == NULL) { 2046 printf("Event descriptor not set\n"); 2047 return -1; 2048 } 2049 2050 md = event_desc->metadata; 2051 2052 if (event_desc->subtype == RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW) 2053 return inline_ipsec_event_esn_overflow(ctx, md); 2054 else if (event_desc->subtype >= RTE_ETH_EVENT_IPSEC_MAX) { 2055 printf("Invalid IPsec event reported\n"); 2056 return -1; 2057 } 2058 2059 return -1; 2060 } 2061 2062 int32_t 2063 main(int32_t argc, char **argv) 2064 { 2065 int32_t ret; 2066 uint32_t lcore_id; 2067 uint8_t socket_id; 2068 uint16_t portid; 2069 uint64_t req_rx_offloads, req_tx_offloads; 2070 2071 /* init EAL */ 2072 ret = rte_eal_init(argc, argv); 2073 if (ret < 0) 2074 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n"); 2075 argc -= ret; 2076 argv += ret; 2077 2078 /* parse application arguments (after the EAL ones) */ 2079 ret = parse_args(argc, argv); 2080 if (ret < 0) 2081 rte_exit(EXIT_FAILURE, "Invalid parameters\n"); 2082 2083 if ((unprotected_port_mask & enabled_port_mask) != 2084 unprotected_port_mask) 2085 rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n", 2086 unprotected_port_mask); 2087 2088 if (check_params() < 0) 2089 rte_exit(EXIT_FAILURE, "check_params failed\n"); 2090 2091 ret = init_lcore_rx_queues(); 2092 if (ret < 0) 2093 rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n"); 2094 2095 nb_lcores = rte_lcore_count(); 2096 2097 /* Replicate each context per socket */ 2098 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { 2099 if (rte_lcore_is_enabled(lcore_id) == 0) 2100 continue; 2101 2102 if (numa_on) 2103 socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id); 2104 else 2105 socket_id = 0; 2106 2107 if (socket_ctx[socket_id].mbuf_pool) 2108 continue; 2109 2110 /* initilaze SPD */ 2111 sp4_init(&socket_ctx[socket_id], socket_id); 2112 2113 sp6_init(&socket_ctx[socket_id], socket_id); 2114 2115 /* initilaze SAD */ 2116 sa_init(&socket_ctx[socket_id], socket_id); 2117 2118 rt_init(&socket_ctx[socket_id], socket_id); 2119 2120 pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF); 2121 } 2122 2123 RTE_ETH_FOREACH_DEV(portid) { 2124 if ((enabled_port_mask & (1 << portid)) == 0) 2125 continue; 2126 2127 sa_check_offloads(portid, &req_rx_offloads, &req_tx_offloads); 2128 port_init(portid, req_rx_offloads, req_tx_offloads); 2129 } 2130 2131 cryptodevs_init(); 2132 2133 /* start ports */ 2134 RTE_ETH_FOREACH_DEV(portid) { 2135 if ((enabled_port_mask & (1 << portid)) == 0) 2136 continue; 2137 2138 /* Start device */ 2139 ret = rte_eth_dev_start(portid); 2140 if (ret < 0) 2141 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: " 2142 "err=%d, port=%d\n", ret, portid); 2143 /* 2144 * If enabled, put device in promiscuous mode. 2145 * This allows IO forwarding mode to forward packets 2146 * to itself through 2 cross-connected ports of the 2147 * target machine. 2148 */ 2149 if (promiscuous_on) 2150 rte_eth_promiscuous_enable(portid); 2151 2152 rte_eth_dev_callback_register(portid, 2153 RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL); 2154 } 2155 2156 check_all_ports_link_status(enabled_port_mask); 2157 2158 /* launch per-lcore init on every lcore */ 2159 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER); 2160 RTE_LCORE_FOREACH_SLAVE(lcore_id) { 2161 if (rte_eal_wait_lcore(lcore_id) < 0) 2162 return -1; 2163 } 2164 2165 return 0; 2166 } 2167