1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 * Copyright (C) 2020 Marvell International Ltd. 4 */ 5 #include <rte_acl.h> 6 #include <rte_event_crypto_adapter.h> 7 #include <rte_event_eth_tx_adapter.h> 8 #include <rte_lpm.h> 9 #include <rte_lpm6.h> 10 11 #include "event_helper.h" 12 #include "ipsec.h" 13 #include "ipsec-secgw.h" 14 #include "ipsec_worker.h" 15 #include "sad.h" 16 17 #if defined(__ARM_NEON) 18 #include "ipsec_lpm_neon.h" 19 #endif 20 21 struct port_drv_mode_data { 22 void *sess; 23 void *ctx; 24 }; 25 26 typedef void (*ipsec_worker_fn_t)(void); 27 28 int ip_reassembly_dynfield_offset = -1; 29 uint64_t ip_reassembly_dynflag; 30 31 static inline enum pkt_type 32 process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp) 33 { 34 struct rte_ether_hdr *eth; 35 uint32_t ptype = pkt->packet_type; 36 37 eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); 38 rte_prefetch0(eth); 39 40 if (RTE_ETH_IS_IPV4_HDR(ptype)) { 41 *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN + 42 offsetof(struct ip, ip_p)); 43 if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP) 44 return PKT_TYPE_IPSEC_IPV4; 45 else 46 return PKT_TYPE_PLAIN_IPV4; 47 } else if (RTE_ETH_IS_IPV6_HDR(ptype)) { 48 *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN + 49 offsetof(struct ip6_hdr, ip6_nxt)); 50 if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP) 51 return PKT_TYPE_IPSEC_IPV6; 52 else 53 return PKT_TYPE_PLAIN_IPV6; 54 } 55 56 /* Unknown/Unsupported type */ 57 return PKT_TYPE_INVALID; 58 } 59 60 static inline void 61 update_mac_addrs(struct rte_ether_hdr *ethhdr, uint16_t portid) 62 { 63 memcpy(ðhdr->src_addr, ðaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN); 64 memcpy(ðhdr->dst_addr, ðaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN); 65 } 66 67 static inline void 68 ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id) 69 { 70 /* Save the destination port in the mbuf */ 71 m->port = port_id; 72 73 /* Save eth queue for Tx */ 74 rte_event_eth_tx_adapter_txq_set(m, 0); 75 } 76 77 static inline void 78 ev_vector_attr_init(struct rte_event_vector *vec) 79 { 80 vec->attr_valid = 1; 81 vec->port = 0xFFFF; 82 vec->queue = 0; 83 } 84 85 static inline void 86 ev_vector_attr_update(struct rte_event_vector *vec, struct rte_mbuf *pkt) 87 { 88 if (vec->port == 0xFFFF) { 89 vec->port = pkt->port; 90 return; 91 } 92 if (vec->attr_valid && (vec->port != pkt->port)) 93 vec->attr_valid = 0; 94 } 95 96 static inline void 97 prepare_out_sessions_tbl(struct sa_ctx *sa_out, 98 struct port_drv_mode_data *data, 99 uint16_t size) 100 { 101 struct rte_ipsec_session *pri_sess; 102 struct ipsec_sa *sa; 103 uint32_t i; 104 105 if (!sa_out) 106 return; 107 108 for (i = 0; i < sa_out->nb_sa; i++) { 109 110 sa = &sa_out->sa[i]; 111 if (!sa) 112 continue; 113 114 pri_sess = ipsec_get_primary_session(sa); 115 if (!pri_sess) 116 continue; 117 118 if (pri_sess->type != 119 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) { 120 121 RTE_LOG(ERR, IPSEC, "Invalid session type %d\n", 122 pri_sess->type); 123 continue; 124 } 125 126 if (sa->portid >= size) { 127 RTE_LOG(ERR, IPSEC, 128 "Port id >= than table size %d, %d\n", 129 sa->portid, size); 130 continue; 131 } 132 133 /* Use only first inline session found for a given port */ 134 if (data[sa->portid].sess) 135 continue; 136 data[sa->portid].sess = pri_sess->security.ses; 137 data[sa->portid].ctx = pri_sess->security.ctx; 138 } 139 } 140 141 static inline int 142 check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx) 143 { 144 uint32_t res; 145 146 if (unlikely(sp == NULL)) 147 return 0; 148 149 rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1, 150 DEFAULT_MAX_CATEGORIES); 151 152 if (unlikely(res == DISCARD)) 153 return 0; 154 else if (res == BYPASS) { 155 *sa_idx = -1; 156 return 1; 157 } 158 159 *sa_idx = res - 1; 160 return 1; 161 } 162 163 static inline void 164 check_sp_bulk(struct sp_ctx *sp, struct traffic_type *ip, 165 struct traffic_type *ipsec) 166 { 167 uint32_t i, j, res; 168 struct rte_mbuf *m; 169 170 if (unlikely(sp == NULL || ip->num == 0)) 171 return; 172 173 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num, 174 DEFAULT_MAX_CATEGORIES); 175 176 j = 0; 177 for (i = 0; i < ip->num; i++) { 178 m = ip->pkts[i]; 179 res = ip->res[i]; 180 if (unlikely(res == DISCARD)) 181 free_pkts(&m, 1); 182 else if (res == BYPASS) 183 ip->pkts[j++] = m; 184 else { 185 ipsec->res[ipsec->num] = res - 1; 186 ipsec->pkts[ipsec->num++] = m; 187 } 188 } 189 ip->num = j; 190 } 191 192 static inline void 193 check_sp_sa_bulk(struct sp_ctx *sp, struct sa_ctx *sa_ctx, 194 struct traffic_type *ip) 195 { 196 struct ipsec_sa *sa; 197 uint32_t i, j, res; 198 struct rte_mbuf *m; 199 200 if (unlikely(sp == NULL || ip->num == 0)) 201 return; 202 203 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num, 204 DEFAULT_MAX_CATEGORIES); 205 206 j = 0; 207 for (i = 0; i < ip->num; i++) { 208 m = ip->pkts[i]; 209 res = ip->res[i]; 210 if (unlikely(res == DISCARD)) 211 free_pkts(&m, 1); 212 else if (res == BYPASS) 213 ip->pkts[j++] = m; 214 else { 215 sa = *(struct ipsec_sa **)rte_security_dynfield(m); 216 if (sa == NULL) { 217 free_pkts(&m, 1); 218 continue; 219 } 220 221 /* SPI on the packet should match with the one in SA */ 222 if (unlikely(sa->spi != sa_ctx->sa[res - 1].spi)) { 223 free_pkts(&m, 1); 224 continue; 225 } 226 227 ip->pkts[j++] = m; 228 } 229 } 230 ip->num = j; 231 } 232 233 static inline void 234 ipv4_pkt_l3_len_set(struct rte_mbuf *pkt) 235 { 236 struct rte_ipv4_hdr *ipv4; 237 238 ipv4 = rte_pktmbuf_mtod(pkt, struct rte_ipv4_hdr *); 239 pkt->l3_len = ipv4->ihl * 4; 240 } 241 242 static inline int 243 ipv6_pkt_l3_len_set(struct rte_mbuf *pkt) 244 { 245 struct rte_ipv6_hdr *ipv6; 246 size_t l3_len, ext_len; 247 uint32_t l3_type; 248 int next_proto; 249 uint8_t *p; 250 251 ipv6 = rte_pktmbuf_mtod(pkt, struct rte_ipv6_hdr *); 252 l3_len = sizeof(struct rte_ipv6_hdr); 253 l3_type = pkt->packet_type & RTE_PTYPE_L3_MASK; 254 255 if (l3_type == RTE_PTYPE_L3_IPV6_EXT || 256 l3_type == RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) { 257 p = rte_pktmbuf_mtod(pkt, uint8_t *); 258 next_proto = ipv6->proto; 259 while (next_proto != IPPROTO_ESP && 260 l3_len < pkt->data_len && 261 (next_proto = rte_ipv6_get_next_ext(p + l3_len, 262 next_proto, &ext_len)) >= 0) 263 l3_len += ext_len; 264 265 /* Drop pkt when IPv6 header exceeds first seg size */ 266 if (unlikely(l3_len > pkt->data_len)) 267 return -EINVAL; 268 } 269 pkt->l3_len = l3_len; 270 271 return 0; 272 } 273 274 static inline uint16_t 275 route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) 276 { 277 uint32_t dst_ip; 278 uint16_t offset; 279 uint32_t hop; 280 int ret; 281 282 offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst); 283 dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset); 284 dst_ip = rte_be_to_cpu_32(dst_ip); 285 286 ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop); 287 288 if (ret == 0) { 289 /* We have a hit */ 290 return hop; 291 } 292 293 /* else */ 294 return RTE_MAX_ETHPORTS; 295 } 296 297 /* TODO: To be tested */ 298 static inline uint16_t 299 route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) 300 { 301 struct rte_ipv6_hdr *ip; 302 uint32_t hop; 303 int ret; 304 305 ip = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv6_hdr *, RTE_ETHER_HDR_LEN); 306 ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, &ip->dst_addr, &hop); 307 308 if (ret == 0) { 309 /* We have a hit */ 310 return hop; 311 } 312 313 /* else */ 314 return RTE_MAX_ETHPORTS; 315 } 316 317 static inline uint16_t 318 get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type) 319 { 320 if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4) 321 return route4_pkt(pkt, rt->rt4_ctx); 322 else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6) 323 return route6_pkt(pkt, rt->rt6_ctx); 324 325 return RTE_MAX_ETHPORTS; 326 } 327 328 static inline void 329 crypto_op_reset(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], 330 struct rte_crypto_op *cop[], uint16_t num) 331 { 332 struct rte_crypto_sym_op *sop; 333 uint32_t i; 334 335 const struct rte_crypto_op unproc_cop = { 336 .type = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 337 .status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED, 338 .sess_type = RTE_CRYPTO_OP_SECURITY_SESSION, 339 }; 340 341 for (i = 0; i != num; i++) { 342 cop[i]->raw = unproc_cop.raw; 343 sop = cop[i]->sym; 344 sop->m_src = mb[i]; 345 sop->m_dst = NULL; 346 __rte_security_attach_session(sop, ss->security.ses); 347 } 348 } 349 350 static inline void 351 crypto_prepare_event(struct rte_mbuf *pkt, struct rte_ipsec_session *sess, struct rte_event *ev) 352 { 353 struct ipsec_mbuf_metadata *priv; 354 struct rte_crypto_op *cop; 355 356 /* Get pkt private data */ 357 priv = get_priv(pkt); 358 cop = &priv->cop; 359 360 /* Reset crypto operation data */ 361 crypto_op_reset(sess, &pkt, &cop, 1); 362 363 /* Update event_ptr with rte_crypto_op */ 364 ev->event = 0; 365 ev->event_ptr = cop; 366 } 367 368 static inline void 369 free_pkts_from_events(struct rte_event events[], uint16_t count) 370 { 371 struct rte_crypto_op *cop; 372 int i; 373 374 for (i = 0; i < count; i++) { 375 cop = events[i].event_ptr; 376 free_pkts(&cop->sym->m_src, 1); 377 } 378 } 379 380 static inline int 381 event_crypto_enqueue(struct rte_mbuf *pkt, 382 struct ipsec_sa *sa, const struct eh_event_link_info *ev_link) 383 { 384 struct rte_ipsec_session *sess; 385 struct rte_event ev; 386 int ret; 387 388 /* Get IPsec session */ 389 sess = ipsec_get_primary_session(sa); 390 391 crypto_prepare_event(pkt, sess, &ev); 392 393 /* Enqueue event to crypto adapter */ 394 ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id, 395 ev_link->event_port_id, &ev, 1); 396 if (unlikely(ret != 1)) { 397 /* pkt will be freed by the caller */ 398 RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue event: %i (errno: %i)\n", ret, rte_errno); 399 return rte_errno; 400 } 401 402 return 0; 403 } 404 405 static inline int 406 process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt, 407 const struct eh_event_link_info *ev_link, struct rte_event *ev) 408 { 409 struct ipsec_sa *sa = NULL; 410 struct rte_mbuf *pkt; 411 uint16_t port_id = 0; 412 enum pkt_type type; 413 uint32_t sa_idx; 414 uint8_t *nlp; 415 416 /* Get pkt from event */ 417 pkt = ev->mbuf; 418 if (is_ip_reassembly_incomplete(pkt) > 0) { 419 free_reassembly_fail_pkt(pkt); 420 return PKT_DROPPED; 421 } 422 423 /* Check the packet type */ 424 type = process_ipsec_get_pkt_type(pkt, &nlp); 425 426 switch (type) { 427 case PKT_TYPE_PLAIN_IPV4: 428 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) { 429 if (unlikely(pkt->ol_flags & 430 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) { 431 RTE_LOG(ERR, IPSEC, 432 "Inbound security offload failed\n"); 433 goto drop_pkt_and_exit; 434 } 435 sa = *(struct ipsec_sa **)rte_security_dynfield(pkt); 436 } 437 438 /* Check if we have a match */ 439 if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) { 440 /* No valid match */ 441 goto drop_pkt_and_exit; 442 } 443 break; 444 445 case PKT_TYPE_PLAIN_IPV6: 446 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) { 447 if (unlikely(pkt->ol_flags & 448 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) { 449 RTE_LOG(ERR, IPSEC, 450 "Inbound security offload failed\n"); 451 goto drop_pkt_and_exit; 452 } 453 sa = *(struct ipsec_sa **)rte_security_dynfield(pkt); 454 } 455 456 /* Check if we have a match */ 457 if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) { 458 /* No valid match */ 459 goto drop_pkt_and_exit; 460 } 461 break; 462 case PKT_TYPE_IPSEC_IPV4: 463 rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN); 464 ipv4_pkt_l3_len_set(pkt); 465 sad_lookup(&ctx->sa_ctx->sad, &pkt, (void **)&sa, 1); 466 sa = ipsec_mask_saptr(sa); 467 if (unlikely(sa == NULL)) { 468 RTE_LOG_DP(DEBUG, IPSEC, "Cannot find sa\n"); 469 goto drop_pkt_and_exit; 470 } 471 472 if (unlikely(event_crypto_enqueue(pkt, sa, ev_link))) 473 goto drop_pkt_and_exit; 474 475 return PKT_POSTED; 476 case PKT_TYPE_IPSEC_IPV6: 477 rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN); 478 if (unlikely(ipv6_pkt_l3_len_set(pkt) != 0)) 479 goto drop_pkt_and_exit; 480 sad_lookup(&ctx->sa_ctx->sad, &pkt, (void **)&sa, 1); 481 sa = ipsec_mask_saptr(sa); 482 if (unlikely(sa == NULL)) { 483 RTE_LOG_DP(DEBUG, IPSEC, "Cannot find sa\n"); 484 goto drop_pkt_and_exit; 485 } 486 487 if (unlikely(event_crypto_enqueue(pkt, sa, ev_link))) 488 goto drop_pkt_and_exit; 489 490 return PKT_POSTED; 491 default: 492 RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n", 493 type); 494 goto drop_pkt_and_exit; 495 } 496 497 /* Check if the packet has to be bypassed */ 498 if (sa_idx == BYPASS) 499 goto route_and_send_pkt; 500 501 /* Validate sa_idx */ 502 if (sa_idx >= ctx->sa_ctx->nb_sa) 503 goto drop_pkt_and_exit; 504 505 /* Else the packet has to be protected with SA */ 506 507 /* If the packet was IPsec processed, then SA pointer should be set */ 508 if (sa == NULL) 509 goto drop_pkt_and_exit; 510 511 /* SPI on the packet should match with the one in SA */ 512 if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi)) 513 goto drop_pkt_and_exit; 514 515 route_and_send_pkt: 516 port_id = get_route(pkt, rt, type); 517 if (unlikely(port_id == RTE_MAX_ETHPORTS)) { 518 /* no match */ 519 goto drop_pkt_and_exit; 520 } 521 /* else, we have a matching route */ 522 523 /* Update mac addresses */ 524 update_mac_addrs(rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *), port_id); 525 526 /* Update the event with the dest port */ 527 ipsec_event_pre_forward(pkt, port_id); 528 return PKT_FORWARDED; 529 530 drop_pkt_and_exit: 531 free_pkts(&pkt, 1); 532 ev->mbuf = NULL; 533 return PKT_DROPPED; 534 } 535 536 static inline int 537 process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt, 538 const struct eh_event_link_info *ev_link, struct rte_event *ev) 539 { 540 struct rte_ipsec_session *sess; 541 struct rte_ether_hdr *ethhdr; 542 struct sa_ctx *sa_ctx; 543 struct rte_mbuf *pkt; 544 uint16_t port_id = 0; 545 struct ipsec_sa *sa; 546 enum pkt_type type; 547 uint32_t sa_idx; 548 uint8_t *nlp; 549 550 /* Get pkt from event */ 551 pkt = ev->mbuf; 552 553 /* Check the packet type */ 554 type = process_ipsec_get_pkt_type(pkt, &nlp); 555 556 switch (type) { 557 case PKT_TYPE_PLAIN_IPV4: 558 /* Check if we have a match */ 559 if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) { 560 /* No valid match */ 561 goto drop_pkt_and_exit; 562 } 563 break; 564 case PKT_TYPE_PLAIN_IPV6: 565 /* Check if we have a match */ 566 if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) { 567 /* No valid match */ 568 goto drop_pkt_and_exit; 569 } 570 break; 571 default: 572 /* 573 * Only plain IPv4 & IPv6 packets are allowed 574 * on protected port. Drop the rest. 575 */ 576 RTE_LOG_DP(DEBUG, IPSEC, "Unsupported packet type = %d\n", type); 577 goto drop_pkt_and_exit; 578 } 579 580 ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); 581 /* Check if the packet has to be bypassed */ 582 if (sa_idx == BYPASS) { 583 port_id = get_route(pkt, rt, type); 584 if (unlikely(port_id == RTE_MAX_ETHPORTS)) { 585 /* no match */ 586 goto drop_pkt_and_exit; 587 } 588 /* else, we have a matching route */ 589 goto send_pkt; 590 } 591 592 /* Validate sa_idx */ 593 if (unlikely(sa_idx >= ctx->sa_ctx->nb_sa)) 594 goto drop_pkt_and_exit; 595 596 /* Else the packet has to be protected */ 597 598 /* Get SA ctx*/ 599 sa_ctx = ctx->sa_ctx; 600 601 /* Get SA */ 602 sa = &(sa_ctx->sa[sa_idx]); 603 604 /* Get IPsec session */ 605 sess = ipsec_get_primary_session(sa); 606 607 /* Determine protocol type */ 608 if (sess->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) 609 goto lookaside; 610 611 rte_security_set_pkt_metadata(sess->security.ctx, 612 sess->security.ses, pkt, NULL); 613 614 /* Mark the packet for Tx security offload */ 615 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD; 616 /* Update ether type */ 617 ethhdr->ether_type = (IS_IP4(sa->flags) ? rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) : 618 rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)); 619 620 /* Get the port to which this pkt need to be submitted */ 621 port_id = sa->portid; 622 623 send_pkt: 624 /* Provide L2 len for Outbound processing */ 625 pkt->l2_len = RTE_ETHER_HDR_LEN; 626 627 /* Update mac addresses */ 628 update_mac_addrs(ethhdr, port_id); 629 630 /* Update the event with the dest port */ 631 ipsec_event_pre_forward(pkt, port_id); 632 return PKT_FORWARDED; 633 634 lookaside: 635 /* prepare pkt - advance start to L3 */ 636 rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN); 637 638 if (likely(event_crypto_enqueue(pkt, sa, ev_link) == 0)) 639 return PKT_POSTED; 640 641 drop_pkt_and_exit: 642 RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n"); 643 free_pkts(&pkt, 1); 644 ev->mbuf = NULL; 645 return PKT_DROPPED; 646 } 647 648 static inline int 649 ipsec_ev_route_ip_pkts(struct rte_event_vector *vec, struct route_table *rt, 650 struct ipsec_traffic *t) 651 { 652 struct rte_ether_hdr *ethhdr; 653 struct rte_mbuf *pkt; 654 uint16_t port_id = 0; 655 uint32_t i, j = 0; 656 657 /* Route IPv4 packets */ 658 for (i = 0; i < t->ip4.num; i++) { 659 pkt = t->ip4.pkts[i]; 660 port_id = route4_pkt(pkt, rt->rt4_ctx); 661 if (port_id != RTE_MAX_ETHPORTS) { 662 /* Update mac addresses */ 663 ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); 664 update_mac_addrs(ethhdr, port_id); 665 /* Update the event with the dest port */ 666 ipsec_event_pre_forward(pkt, port_id); 667 ev_vector_attr_update(vec, pkt); 668 vec->mbufs[j++] = pkt; 669 } else 670 free_pkts(&pkt, 1); 671 } 672 673 /* Route IPv6 packets */ 674 for (i = 0; i < t->ip6.num; i++) { 675 pkt = t->ip6.pkts[i]; 676 port_id = route6_pkt(pkt, rt->rt6_ctx); 677 if (port_id != RTE_MAX_ETHPORTS) { 678 /* Update mac addresses */ 679 ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); 680 update_mac_addrs(ethhdr, port_id); 681 /* Update the event with the dest port */ 682 ipsec_event_pre_forward(pkt, port_id); 683 ev_vector_attr_update(vec, pkt); 684 vec->mbufs[j++] = pkt; 685 } else 686 free_pkts(&pkt, 1); 687 } 688 689 return j; 690 } 691 692 static inline int 693 ipsec_ev_inbound_route_pkts(struct rte_event_vector *vec, 694 struct route_table *rt, 695 struct ipsec_traffic *t, 696 const struct eh_event_link_info *ev_link) 697 { 698 uint32_t ret, i, j, ev_len = 0; 699 struct rte_event events[MAX_PKTS]; 700 struct rte_ipsec_session *sess; 701 struct rte_mbuf *pkt; 702 struct ipsec_sa *sa; 703 704 j = ipsec_ev_route_ip_pkts(vec, rt, t); 705 706 /* Route ESP packets */ 707 for (i = 0; i < t->ipsec.num; i++) { 708 pkt = t->ipsec.pkts[i]; 709 sa = ipsec_mask_saptr(t->ipsec.saptr[i]); 710 if (unlikely(sa == NULL)) { 711 free_pkts(&pkt, 1); 712 continue; 713 } 714 sess = ipsec_get_primary_session(sa); 715 crypto_prepare_event(pkt, sess, &events[ev_len]); 716 ev_len++; 717 } 718 719 if (ev_len) { 720 ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id, 721 ev_link->event_port_id, events, ev_len); 722 if (ret < ev_len) { 723 RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue events: %i (errno: %i)\n", 724 ev_len, rte_errno); 725 free_pkts_from_events(&events[ret], ev_len - ret); 726 return -rte_errno; 727 } 728 } 729 730 return j; 731 } 732 733 static inline int 734 ipsec_ev_outbound_route_pkts(struct rte_event_vector *vec, struct route_table *rt, 735 struct ipsec_traffic *t, struct sa_ctx *sa_ctx, 736 const struct eh_event_link_info *ev_link) 737 { 738 uint32_t sa_idx, ret, i, j, ev_len = 0; 739 struct rte_event events[MAX_PKTS]; 740 struct rte_ipsec_session *sess; 741 struct rte_ether_hdr *ethhdr; 742 uint16_t port_id = 0; 743 struct rte_mbuf *pkt; 744 struct ipsec_sa *sa; 745 746 j = ipsec_ev_route_ip_pkts(vec, rt, t); 747 748 /* Handle IPsec packets. 749 * For lookaside IPsec packets, submit to cryptodev queue. 750 * For inline IPsec packets, route the packet. 751 */ 752 for (i = 0; i < t->ipsec.num; i++) { 753 /* Validate sa_idx */ 754 sa_idx = t->ipsec.res[i]; 755 pkt = t->ipsec.pkts[i]; 756 if (unlikely(sa_idx >= sa_ctx->nb_sa)) { 757 free_pkts(&pkt, 1); 758 continue; 759 } 760 /* Else the packet has to be protected */ 761 sa = &(sa_ctx->sa[sa_idx]); 762 /* Get IPsec session */ 763 sess = ipsec_get_primary_session(sa); 764 switch (sess->type) { 765 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: 766 rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN); 767 crypto_prepare_event(pkt, sess, &events[ev_len]); 768 ev_len++; 769 break; 770 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: 771 rte_security_set_pkt_metadata(sess->security.ctx, 772 sess->security.ses, pkt, NULL); 773 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD; 774 port_id = sa->portid; 775 776 /* Fetch outer ip type and update */ 777 ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); 778 ethhdr->ether_type = (IS_IP4(sa->flags) ? 779 rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) : 780 rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)); 781 update_mac_addrs(ethhdr, port_id); 782 783 ipsec_event_pre_forward(pkt, port_id); 784 ev_vector_attr_update(vec, pkt); 785 vec->mbufs[j++] = pkt; 786 break; 787 default: 788 RTE_LOG(ERR, IPSEC, "SA type not supported\n"); 789 free_pkts(&pkt, 1); 790 break; 791 } 792 } 793 794 if (ev_len) { 795 ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id, 796 ev_link->event_port_id, events, ev_len); 797 if (ret < ev_len) { 798 RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue events: %i (errno: %i)\n", 799 ev_len, rte_errno); 800 free_pkts_from_events(&events[ret], ev_len - ret); 801 return -rte_errno; 802 } 803 } 804 805 return j; 806 } 807 808 static inline void 809 classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t) 810 { 811 enum pkt_type type; 812 uint8_t *nlp; 813 814 /* Check the packet type */ 815 type = process_ipsec_get_pkt_type(pkt, &nlp); 816 817 switch (type) { 818 case PKT_TYPE_PLAIN_IPV4: 819 t->ip4.data[t->ip4.num] = nlp; 820 t->ip4.pkts[(t->ip4.num)++] = pkt; 821 break; 822 case PKT_TYPE_PLAIN_IPV6: 823 t->ip6.data[t->ip6.num] = nlp; 824 t->ip6.pkts[(t->ip6.num)++] = pkt; 825 break; 826 case PKT_TYPE_IPSEC_IPV4: 827 rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN); 828 ipv4_pkt_l3_len_set(pkt); 829 t->ipsec.pkts[(t->ipsec.num)++] = pkt; 830 break; 831 case PKT_TYPE_IPSEC_IPV6: 832 rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN); 833 if (ipv6_pkt_l3_len_set(pkt) != 0) { 834 free_pkts(&pkt, 1); 835 return; 836 } 837 t->ipsec.pkts[(t->ipsec.num)++] = pkt; 838 break; 839 default: 840 RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n", 841 type); 842 free_pkts(&pkt, 1); 843 break; 844 } 845 } 846 847 static inline int 848 process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt, 849 struct rte_event_vector *vec, 850 const struct eh_event_link_info *ev_link) 851 { 852 struct ipsec_traffic t; 853 struct rte_mbuf *pkt; 854 int i; 855 856 t.ip4.num = 0; 857 t.ip6.num = 0; 858 t.ipsec.num = 0; 859 860 for (i = 0; i < vec->nb_elem; i++) { 861 /* Get pkt from event */ 862 pkt = vec->mbufs[i]; 863 if (is_ip_reassembly_incomplete(pkt) > 0) { 864 free_reassembly_fail_pkt(pkt); 865 continue; 866 } 867 868 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) { 869 if (unlikely(pkt->ol_flags & 870 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) { 871 RTE_LOG(ERR, IPSEC, 872 "Inbound security offload failed\n"); 873 free_pkts(&pkt, 1); 874 continue; 875 } 876 } 877 878 classify_pkt(pkt, &t); 879 } 880 881 check_sp_sa_bulk(ctx->sp4_ctx, ctx->sa_ctx, &t.ip4); 882 check_sp_sa_bulk(ctx->sp6_ctx, ctx->sa_ctx, &t.ip6); 883 884 if (t.ipsec.num != 0) 885 sad_lookup(&ctx->sa_ctx->sad, t.ipsec.pkts, t.ipsec.saptr, t.ipsec.num); 886 887 return ipsec_ev_inbound_route_pkts(vec, rt, &t, ev_link); 888 } 889 890 static inline int 891 process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt, 892 struct rte_event_vector *vec, 893 const struct eh_event_link_info *ev_link) 894 { 895 struct ipsec_traffic t; 896 struct rte_mbuf *pkt; 897 uint32_t i; 898 899 t.ip4.num = 0; 900 t.ip6.num = 0; 901 t.ipsec.num = 0; 902 903 for (i = 0; i < vec->nb_elem; i++) { 904 /* Get pkt from event */ 905 pkt = vec->mbufs[i]; 906 907 classify_pkt(pkt, &t); 908 909 /* Provide L2 len for Outbound processing */ 910 pkt->l2_len = RTE_ETHER_HDR_LEN; 911 } 912 913 check_sp_bulk(ctx->sp4_ctx, &t.ip4, &t.ipsec); 914 check_sp_bulk(ctx->sp6_ctx, &t.ip6, &t.ipsec); 915 916 return ipsec_ev_outbound_route_pkts(vec, rt, &t, ctx->sa_ctx, ev_link); 917 } 918 919 static inline int 920 process_ipsec_ev_drv_mode_outbound_vector(struct rte_event_vector *vec, 921 struct port_drv_mode_data *data) 922 { 923 struct rte_mbuf *pkt; 924 int16_t port_id; 925 uint32_t i; 926 int j = 0; 927 928 for (i = 0; i < vec->nb_elem; i++) { 929 pkt = vec->mbufs[i]; 930 port_id = pkt->port; 931 932 if (unlikely(!data[port_id].sess)) { 933 free_pkts(&pkt, 1); 934 continue; 935 } 936 ipsec_event_pre_forward(pkt, port_id); 937 /* Save security session */ 938 rte_security_set_pkt_metadata(data[port_id].ctx, 939 data[port_id].sess, pkt, 940 NULL); 941 942 /* Mark the packet for Tx security offload */ 943 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD; 944 945 /* Provide L2 len for Outbound processing */ 946 pkt->l2_len = RTE_ETHER_HDR_LEN; 947 948 vec->mbufs[j++] = pkt; 949 } 950 951 return j; 952 } 953 954 static void 955 ipsec_event_vector_free(struct rte_event *ev) 956 { 957 struct rte_event_vector *vec = ev->vec; 958 959 if (ev->event_type == RTE_EVENT_TYPE_CRYPTODEV_VECTOR) { 960 struct rte_crypto_op *cop; 961 int i; 962 963 for (i = 0; i < vec->nb_elem; i++) { 964 cop = vec->ptrs[i]; 965 rte_pktmbuf_free(cop->sym->m_src); 966 } 967 } else { 968 rte_pktmbuf_free_bulk(vec->mbufs + vec->elem_offset, vec->nb_elem); 969 } 970 rte_mempool_put(rte_mempool_from_obj(vec), vec); 971 } 972 973 static inline void 974 ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf, 975 struct eh_event_link_info *links, 976 struct rte_event *ev) 977 { 978 struct rte_event_vector *vec = ev->vec; 979 struct rte_mbuf *pkt; 980 int ret; 981 982 pkt = vec->mbufs[0]; 983 984 ev_vector_attr_init(vec); 985 core_stats_update_rx(vec->nb_elem); 986 987 if (is_unprotected_port(pkt->port)) 988 ret = process_ipsec_ev_inbound_vector(&lconf->inbound, 989 &lconf->rt, vec, links); 990 else 991 ret = process_ipsec_ev_outbound_vector(&lconf->outbound, 992 &lconf->rt, vec, links); 993 994 if (likely(ret > 0)) { 995 core_stats_update_tx(vec->nb_elem); 996 vec->nb_elem = ret; 997 ret = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id, 998 links[0].event_port_id, ev, 1, 0); 999 if (unlikely(ret == 0)) 1000 ipsec_event_vector_free(ev); 1001 } else { 1002 rte_mempool_put(rte_mempool_from_obj(vec), vec); 1003 } 1004 } 1005 1006 static inline void 1007 ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links, 1008 struct rte_event *ev, 1009 struct port_drv_mode_data *data) 1010 { 1011 struct rte_event_vector *vec = ev->vec; 1012 struct rte_mbuf *pkt; 1013 uint16_t ret; 1014 1015 pkt = vec->mbufs[0]; 1016 vec->attr_valid = 1; 1017 vec->port = pkt->port; 1018 1019 if (!is_unprotected_port(pkt->port)) 1020 vec->nb_elem = process_ipsec_ev_drv_mode_outbound_vector(vec, 1021 data); 1022 if (likely(vec->nb_elem > 0)) { 1023 ret = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id, 1024 links[0].event_port_id, ev, 1, 0); 1025 if (unlikely(ret == 0)) 1026 ipsec_event_vector_free(ev); 1027 } else 1028 rte_mempool_put(rte_mempool_from_obj(vec), vec); 1029 } 1030 1031 static inline int 1032 ipsec_ev_cryptodev_process_one_pkt( 1033 const struct lcore_conf_ev_tx_int_port_wrkr *lconf, 1034 const struct rte_crypto_op *cop, struct rte_mbuf *pkt) 1035 { 1036 struct rte_ether_hdr *ethhdr; 1037 uint16_t port_id; 1038 struct ip *ip; 1039 1040 /* If operation was not successful, free the packet */ 1041 if (unlikely(cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) { 1042 RTE_LOG_DP(INFO, IPSEC, "Crypto operation failed\n"); 1043 free_pkts(&pkt, 1); 1044 return -1; 1045 } 1046 1047 ip = rte_pktmbuf_mtod(pkt, struct ip *); 1048 1049 /* Prepend Ether layer */ 1050 ethhdr = (struct rte_ether_hdr *)rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN); 1051 1052 /* Route pkt and update required fields */ 1053 if (ip->ip_v == IPVERSION) { 1054 pkt->ol_flags |= lconf->outbound.ipv4_offloads; 1055 pkt->l3_len = sizeof(struct ip); 1056 pkt->l2_len = RTE_ETHER_HDR_LEN; 1057 1058 ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); 1059 1060 port_id = route4_pkt(pkt, lconf->rt.rt4_ctx); 1061 } else { 1062 pkt->ol_flags |= lconf->outbound.ipv6_offloads; 1063 pkt->l3_len = sizeof(struct ip6_hdr); 1064 pkt->l2_len = RTE_ETHER_HDR_LEN; 1065 1066 ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); 1067 1068 port_id = route6_pkt(pkt, lconf->rt.rt6_ctx); 1069 } 1070 1071 if (unlikely(port_id == RTE_MAX_ETHPORTS)) { 1072 RTE_LOG_DP(DEBUG, IPSEC, "Cannot route processed packet\n"); 1073 free_pkts(&pkt, 1); 1074 return -1; 1075 } 1076 1077 /* Update Ether with port's MAC addresses */ 1078 memcpy(ðhdr->src_addr, ðaddr_tbl[port_id].src, sizeof(struct rte_ether_addr)); 1079 memcpy(ðhdr->dst_addr, ðaddr_tbl[port_id].dst, sizeof(struct rte_ether_addr)); 1080 1081 ipsec_event_pre_forward(pkt, port_id); 1082 1083 return 0; 1084 } 1085 1086 static inline void 1087 ipsec_ev_cryptodev_vector_process( 1088 const struct lcore_conf_ev_tx_int_port_wrkr *lconf, 1089 const struct eh_event_link_info *links, 1090 struct rte_event *ev) 1091 { 1092 struct rte_event_vector *vec = ev->vec; 1093 const uint16_t nb_events = 1; 1094 struct rte_crypto_op *cop; 1095 struct rte_mbuf *pkt; 1096 uint16_t enqueued; 1097 int i, n = 0; 1098 1099 ev_vector_attr_init(vec); 1100 /* Transform cop vec into pkt vec */ 1101 for (i = 0; i < vec->nb_elem; i++) { 1102 /* Get pkt data */ 1103 cop = vec->ptrs[i]; 1104 pkt = cop->sym->m_src; 1105 if (ipsec_ev_cryptodev_process_one_pkt(lconf, cop, pkt)) 1106 continue; 1107 1108 vec->mbufs[n++] = pkt; 1109 ev_vector_attr_update(vec, pkt); 1110 } 1111 1112 if (n == 0) { 1113 rte_mempool_put(rte_mempool_from_obj(vec), vec); 1114 return; 1115 } 1116 1117 vec->nb_elem = n; 1118 enqueued = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id, 1119 links[0].event_port_id, ev, nb_events, 0); 1120 if (enqueued != nb_events) { 1121 RTE_LOG_DP(DEBUG, IPSEC, "Failed to enqueue to tx, ret = %u," 1122 " errno = %i\n", enqueued, rte_errno); 1123 free_pkts(vec->mbufs, vec->nb_elem); 1124 rte_mempool_put(rte_mempool_from_obj(vec), vec); 1125 } else { 1126 core_stats_update_tx(n); 1127 } 1128 } 1129 1130 static inline int 1131 ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf, 1132 struct rte_event *ev) 1133 { 1134 struct rte_crypto_op *cop; 1135 struct rte_mbuf *pkt; 1136 1137 /* Get pkt data */ 1138 cop = ev->event_ptr; 1139 pkt = cop->sym->m_src; 1140 1141 if (ipsec_ev_cryptodev_process_one_pkt(lconf, cop, pkt)) 1142 return PKT_DROPPED; 1143 1144 /* Update event */ 1145 ev->mbuf = pkt; 1146 1147 return PKT_FORWARDED; 1148 } 1149 1150 /* 1151 * Event mode exposes various operating modes depending on the 1152 * capabilities of the event device and the operating mode 1153 * selected. 1154 */ 1155 1156 static void 1157 ipsec_event_port_flush(uint8_t eventdev_id __rte_unused, struct rte_event ev, 1158 void *args __rte_unused) 1159 { 1160 if (ev.event_type & RTE_EVENT_TYPE_VECTOR) 1161 ipsec_event_vector_free(&ev); 1162 else 1163 rte_pktmbuf_free(ev.mbuf); 1164 } 1165 1166 /* Workers registered */ 1167 #define IPSEC_EVENTMODE_WORKERS 2 1168 1169 static void 1170 ipsec_ip_reassembly_dyn_offset_get(void) 1171 { 1172 /* Retrieve reassembly dynfield offset if available */ 1173 if (ip_reassembly_dynfield_offset < 0) 1174 ip_reassembly_dynfield_offset = rte_mbuf_dynfield_lookup( 1175 RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME, NULL); 1176 1177 if (ip_reassembly_dynflag == 0) { 1178 int ip_reassembly_dynflag_offset; 1179 ip_reassembly_dynflag_offset = rte_mbuf_dynflag_lookup( 1180 RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME, NULL); 1181 if (ip_reassembly_dynflag_offset >= 0) 1182 ip_reassembly_dynflag = RTE_BIT64(ip_reassembly_dynflag_offset); 1183 } 1184 } 1185 1186 /* 1187 * Event mode worker 1188 * Operating parameters : non-burst - Tx internal port - driver mode 1189 */ 1190 static void 1191 ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links, 1192 uint8_t nb_links) 1193 { 1194 struct port_drv_mode_data data[RTE_MAX_ETHPORTS]; 1195 unsigned int nb_rx = 0, nb_tx; 1196 struct rte_mbuf *pkt; 1197 struct rte_event ev; 1198 uint32_t lcore_id; 1199 int32_t socket_id; 1200 int16_t port_id; 1201 1202 /* Check if we have links registered for this lcore */ 1203 if (nb_links == 0) { 1204 /* No links registered - exit */ 1205 return; 1206 } 1207 1208 memset(&data, 0, sizeof(struct port_drv_mode_data)); 1209 1210 /* Get core ID */ 1211 lcore_id = rte_lcore_id(); 1212 1213 /* Get socket ID */ 1214 socket_id = rte_lcore_to_socket_id(lcore_id); 1215 1216 /* 1217 * Prepare security sessions table. In outbound driver mode 1218 * we always use first session configured for a given port 1219 */ 1220 prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, data, 1221 RTE_MAX_ETHPORTS); 1222 1223 RTE_LOG(INFO, IPSEC, 1224 "Launching event mode worker (non-burst - Tx internal port - " 1225 "driver mode) on lcore %d\n", lcore_id); 1226 1227 /* We have valid links */ 1228 1229 /* Check if it's single link */ 1230 if (nb_links != 1) { 1231 RTE_LOG(INFO, IPSEC, 1232 "Multiple links not supported. Using first link\n"); 1233 } 1234 1235 RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id, 1236 links[0].event_port_id); 1237 while (!force_quit) { 1238 /* Read packet from event queues */ 1239 nb_rx = rte_event_dequeue_burst(links[0].eventdev_id, 1240 links[0].event_port_id, 1241 &ev, /* events */ 1242 1, /* nb_events */ 1243 0 /* timeout_ticks */); 1244 1245 if (nb_rx == 0) 1246 continue; 1247 1248 switch (ev.event_type) { 1249 case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR: 1250 case RTE_EVENT_TYPE_ETHDEV_VECTOR: 1251 ipsec_ev_vector_drv_mode_process(links, &ev, data); 1252 continue; 1253 case RTE_EVENT_TYPE_ETHDEV: 1254 break; 1255 default: 1256 RTE_LOG(ERR, IPSEC, "Invalid event type %u", 1257 ev.event_type); 1258 continue; 1259 } 1260 1261 pkt = ev.mbuf; 1262 port_id = pkt->port; 1263 1264 rte_prefetch0(rte_pktmbuf_mtod(pkt, void *)); 1265 1266 /* Process packet */ 1267 ipsec_event_pre_forward(pkt, port_id); 1268 1269 if (!is_unprotected_port(port_id)) { 1270 1271 if (unlikely(!data[port_id].sess)) { 1272 rte_pktmbuf_free(pkt); 1273 continue; 1274 } 1275 1276 /* Save security session */ 1277 rte_security_set_pkt_metadata(data[port_id].ctx, 1278 data[port_id].sess, pkt, 1279 NULL); 1280 1281 /* Mark the packet for Tx security offload */ 1282 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD; 1283 1284 /* Provide L2 len for Outbound processing */ 1285 pkt->l2_len = RTE_ETHER_HDR_LEN; 1286 } 1287 1288 /* 1289 * Since tx internal port is available, events can be 1290 * directly enqueued to the adapter and it would be 1291 * internally submitted to the eth device. 1292 */ 1293 nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id, 1294 links[0].event_port_id, 1295 &ev, /* events */ 1296 1, /* nb_events */ 1297 0 /* flags */); 1298 if (!nb_tx) 1299 rte_pktmbuf_free(ev.mbuf); 1300 } 1301 1302 if (ev.u64) { 1303 ev.op = RTE_EVENT_OP_RELEASE; 1304 rte_event_enqueue_burst(links[0].eventdev_id, 1305 links[0].event_port_id, &ev, 1); 1306 } 1307 1308 rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id, 1309 ipsec_event_port_flush, NULL); 1310 } 1311 1312 /* 1313 * Event mode worker 1314 * Operating parameters : non-burst - Tx internal port - app mode 1315 */ 1316 static void 1317 ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links, 1318 uint8_t nb_links) 1319 { 1320 struct lcore_conf_ev_tx_int_port_wrkr lconf; 1321 unsigned int nb_rx = 0, nb_tx; 1322 struct rte_event ev; 1323 uint32_t lcore_id; 1324 int32_t socket_id; 1325 int ret; 1326 1327 /* Check if we have links registered for this lcore */ 1328 if (nb_links == 0) { 1329 /* No links registered - exit */ 1330 return; 1331 } 1332 1333 /* We have valid links */ 1334 1335 /* Get core ID */ 1336 lcore_id = rte_lcore_id(); 1337 1338 /* Get socket ID */ 1339 socket_id = rte_lcore_to_socket_id(lcore_id); 1340 1341 /* Save routing table */ 1342 lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4; 1343 lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6; 1344 lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in; 1345 lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in; 1346 lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in; 1347 lconf.inbound.lcore_id = lcore_id; 1348 lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out; 1349 lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out; 1350 lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out; 1351 lconf.outbound.ipv4_offloads = tx_offloads.ipv4_offloads; 1352 lconf.outbound.ipv6_offloads = tx_offloads.ipv6_offloads; 1353 lconf.outbound.lcore_id = lcore_id; 1354 1355 RTE_LOG(INFO, IPSEC, 1356 "Launching event mode worker (non-burst - Tx internal port - " 1357 "app mode) on lcore %d\n", lcore_id); 1358 1359 ret = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz); 1360 if (ret != 0) { 1361 RTE_LOG(ERR, IPSEC, 1362 "SAD cache init on lcore %u, failed with code: %d\n", 1363 lcore_id, ret); 1364 return; 1365 } 1366 1367 /* Check if it's single link */ 1368 if (nb_links != 1) { 1369 RTE_LOG(INFO, IPSEC, 1370 "Multiple links not supported. Using first link\n"); 1371 } 1372 1373 RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id, 1374 links[0].event_port_id); 1375 1376 ipsec_ip_reassembly_dyn_offset_get(); 1377 1378 while (!force_quit) { 1379 /* Read packet from event queues */ 1380 nb_rx = rte_event_dequeue_burst(links[0].eventdev_id, 1381 links[0].event_port_id, 1382 &ev, /* events */ 1383 1, /* nb_events */ 1384 0 /* timeout_ticks */); 1385 1386 if (nb_rx == 0) 1387 continue; 1388 1389 switch (ev.event_type) { 1390 case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR: 1391 case RTE_EVENT_TYPE_ETHDEV_VECTOR: 1392 ipsec_ev_vector_process(&lconf, links, &ev); 1393 continue; 1394 case RTE_EVENT_TYPE_ETHDEV: 1395 core_stats_update_rx(1); 1396 if (is_unprotected_port(ev.mbuf->port)) 1397 ret = process_ipsec_ev_inbound(&lconf.inbound, 1398 &lconf.rt, links, &ev); 1399 else 1400 ret = process_ipsec_ev_outbound(&lconf.outbound, 1401 &lconf.rt, links, &ev); 1402 if (ret != 1) 1403 /* The pkt has been dropped or posted */ 1404 continue; 1405 break; 1406 case RTE_EVENT_TYPE_CRYPTODEV: 1407 ret = ipsec_ev_cryptodev_process(&lconf, &ev); 1408 if (unlikely(ret != PKT_FORWARDED)) 1409 continue; 1410 break; 1411 case RTE_EVENT_TYPE_CRYPTODEV_VECTOR: 1412 ipsec_ev_cryptodev_vector_process(&lconf, links, &ev); 1413 continue; 1414 default: 1415 RTE_LOG(ERR, IPSEC, "Invalid event type %u", 1416 ev.event_type); 1417 continue; 1418 } 1419 1420 core_stats_update_tx(1); 1421 /* 1422 * Since tx internal port is available, events can be 1423 * directly enqueued to the adapter and it would be 1424 * internally submitted to the eth device. 1425 */ 1426 nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id, 1427 links[0].event_port_id, 1428 &ev, /* events */ 1429 1, /* nb_events */ 1430 0 /* flags */); 1431 if (!nb_tx) 1432 rte_pktmbuf_free(ev.mbuf); 1433 } 1434 1435 if (ev.u64) { 1436 ev.op = RTE_EVENT_OP_RELEASE; 1437 rte_event_enqueue_burst(links[0].eventdev_id, 1438 links[0].event_port_id, &ev, 1); 1439 } 1440 1441 rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id, 1442 ipsec_event_port_flush, NULL); 1443 } 1444 1445 static uint8_t 1446 ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs) 1447 { 1448 struct eh_app_worker_params *wrkr; 1449 uint8_t nb_wrkr_param = 0; 1450 1451 /* Save workers */ 1452 wrkr = wrkrs; 1453 1454 /* Non-burst - Tx internal port - driver mode */ 1455 wrkr->cap.burst = EH_RX_TYPE_NON_BURST; 1456 wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT; 1457 wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER; 1458 wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode; 1459 wrkr++; 1460 nb_wrkr_param++; 1461 1462 /* Non-burst - Tx internal port - app mode */ 1463 wrkr->cap.burst = EH_RX_TYPE_NON_BURST; 1464 wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT; 1465 wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP; 1466 wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode; 1467 nb_wrkr_param++; 1468 1469 return nb_wrkr_param; 1470 } 1471 1472 static void 1473 ipsec_eventmode_worker(struct eh_conf *conf) 1474 { 1475 struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = { 1476 {{{0} }, NULL } }; 1477 uint8_t nb_wrkr_param; 1478 1479 /* Populate l2fwd_wrkr params */ 1480 nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr); 1481 1482 /* 1483 * Launch correct worker after checking 1484 * the event device's capabilities. 1485 */ 1486 eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param); 1487 } 1488 1489 static __rte_always_inline void 1490 outb_inl_pro_spd_process(struct sp_ctx *sp, 1491 struct sa_ctx *sa_ctx, 1492 struct traffic_type *ip, 1493 struct traffic_type *match, 1494 struct traffic_type *mismatch, 1495 bool match_flag, 1496 struct ipsec_spd_stats *stats) 1497 { 1498 uint32_t prev_sa_idx = UINT32_MAX; 1499 struct rte_mbuf *ipsec[MAX_PKT_BURST]; 1500 struct rte_ipsec_session *ips; 1501 uint32_t i, j, j_mis, sa_idx; 1502 struct ipsec_sa *sa = NULL; 1503 uint32_t ipsec_num = 0; 1504 struct rte_mbuf *m; 1505 uint64_t satp; 1506 1507 if (ip->num == 0 || sp == NULL) 1508 return; 1509 1510 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, 1511 ip->num, DEFAULT_MAX_CATEGORIES); 1512 1513 j = match->num; 1514 j_mis = mismatch->num; 1515 1516 for (i = 0; i < ip->num; i++) { 1517 m = ip->pkts[i]; 1518 sa_idx = ip->res[i] - 1; 1519 1520 if (unlikely(ip->res[i] == DISCARD)) { 1521 free_pkts(&m, 1); 1522 1523 stats->discard++; 1524 } else if (unlikely(ip->res[i] == BYPASS)) { 1525 match->pkts[j++] = m; 1526 1527 stats->bypass++; 1528 } else { 1529 if (prev_sa_idx == UINT32_MAX) { 1530 prev_sa_idx = sa_idx; 1531 sa = &sa_ctx->sa[sa_idx]; 1532 ips = ipsec_get_primary_session(sa); 1533 satp = rte_ipsec_sa_type(ips->sa); 1534 } 1535 1536 if (sa_idx != prev_sa_idx) { 1537 prep_process_group(sa, ipsec, ipsec_num); 1538 1539 /* Prepare packets for outbound */ 1540 rte_ipsec_pkt_process(ips, ipsec, ipsec_num); 1541 1542 /* Copy to current tr or a different tr */ 1543 if (SATP_OUT_IPV4(satp) == match_flag) { 1544 memcpy(&match->pkts[j], ipsec, 1545 ipsec_num * sizeof(void *)); 1546 j += ipsec_num; 1547 } else { 1548 memcpy(&mismatch->pkts[j_mis], ipsec, 1549 ipsec_num * sizeof(void *)); 1550 j_mis += ipsec_num; 1551 } 1552 1553 /* Update to new SA */ 1554 sa = &sa_ctx->sa[sa_idx]; 1555 ips = ipsec_get_primary_session(sa); 1556 satp = rte_ipsec_sa_type(ips->sa); 1557 ipsec_num = 0; 1558 } 1559 1560 ipsec[ipsec_num++] = m; 1561 stats->protect++; 1562 } 1563 } 1564 1565 if (ipsec_num) { 1566 prep_process_group(sa, ipsec, ipsec_num); 1567 1568 /* Prepare pacekts for outbound */ 1569 rte_ipsec_pkt_process(ips, ipsec, ipsec_num); 1570 1571 /* Copy to current tr or a different tr */ 1572 if (SATP_OUT_IPV4(satp) == match_flag) { 1573 memcpy(&match->pkts[j], ipsec, 1574 ipsec_num * sizeof(void *)); 1575 j += ipsec_num; 1576 } else { 1577 memcpy(&mismatch->pkts[j_mis], ipsec, 1578 ipsec_num * sizeof(void *)); 1579 j_mis += ipsec_num; 1580 } 1581 } 1582 match->num = j; 1583 mismatch->num = j_mis; 1584 } 1585 1586 /* Poll mode worker when all SA's are of type inline protocol */ 1587 void 1588 ipsec_poll_mode_wrkr_inl_pr(void) 1589 { 1590 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) 1591 / US_PER_S * BURST_TX_DRAIN_US; 1592 struct sp_ctx *sp4_in, *sp6_in, *sp4_out, *sp6_out; 1593 struct rte_mbuf *pkts[MAX_PKT_BURST]; 1594 uint64_t prev_tsc, diff_tsc, cur_tsc; 1595 struct ipsec_core_statistics *stats; 1596 struct rt_ctx *rt4_ctx, *rt6_ctx; 1597 struct sa_ctx *sa_in, *sa_out; 1598 struct traffic_type ip4, ip6; 1599 struct lcore_rx_queue *rxql; 1600 struct rte_mbuf **v4, **v6; 1601 struct ipsec_traffic trf; 1602 struct lcore_conf *qconf; 1603 uint16_t v4_num, v6_num; 1604 int32_t socket_id; 1605 uint32_t lcore_id; 1606 int32_t i, nb_rx; 1607 uint16_t portid, queueid; 1608 1609 prev_tsc = 0; 1610 lcore_id = rte_lcore_id(); 1611 qconf = &lcore_conf[lcore_id]; 1612 rxql = qconf->rx_queue_list; 1613 socket_id = rte_lcore_to_socket_id(lcore_id); 1614 stats = &core_statistics[lcore_id]; 1615 1616 rt4_ctx = socket_ctx[socket_id].rt_ip4; 1617 rt6_ctx = socket_ctx[socket_id].rt_ip6; 1618 1619 sp4_in = socket_ctx[socket_id].sp_ip4_in; 1620 sp6_in = socket_ctx[socket_id].sp_ip6_in; 1621 sa_in = socket_ctx[socket_id].sa_in; 1622 1623 sp4_out = socket_ctx[socket_id].sp_ip4_out; 1624 sp6_out = socket_ctx[socket_id].sp_ip6_out; 1625 sa_out = socket_ctx[socket_id].sa_out; 1626 1627 qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir; 1628 1629 if (qconf->nb_rx_queue == 0) { 1630 RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n", 1631 lcore_id); 1632 return; 1633 } 1634 1635 RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id); 1636 1637 for (i = 0; i < qconf->nb_rx_queue; i++) { 1638 portid = rxql[i].port_id; 1639 queueid = rxql[i].queue_id; 1640 RTE_LOG(INFO, IPSEC, 1641 " -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n", 1642 lcore_id, portid, queueid); 1643 } 1644 1645 ipsec_ip_reassembly_dyn_offset_get(); 1646 1647 while (!force_quit) { 1648 cur_tsc = rte_rdtsc(); 1649 1650 /* TX queue buffer drain */ 1651 diff_tsc = cur_tsc - prev_tsc; 1652 1653 if (unlikely(diff_tsc > drain_tsc)) { 1654 drain_tx_buffers(qconf); 1655 prev_tsc = cur_tsc; 1656 } 1657 1658 for (i = 0; i < qconf->nb_rx_queue; ++i) { 1659 /* Read packets from RX queues */ 1660 portid = rxql[i].port_id; 1661 queueid = rxql[i].queue_id; 1662 nb_rx = rte_eth_rx_burst(portid, queueid, 1663 pkts, MAX_PKT_BURST); 1664 1665 if (nb_rx <= 0) 1666 continue; 1667 1668 core_stats_update_rx(nb_rx); 1669 1670 prepare_traffic(rxql[i].sec_ctx, pkts, &trf, nb_rx); 1671 1672 /* Drop any IPsec traffic */ 1673 free_pkts(trf.ipsec.pkts, trf.ipsec.num); 1674 1675 if (is_unprotected_port(portid)) { 1676 inbound_sp_sa(sp4_in, sa_in, &trf.ip4, 1677 trf.ip4.num, 1678 &stats->inbound.spd4); 1679 1680 inbound_sp_sa(sp6_in, sa_in, &trf.ip6, 1681 trf.ip6.num, 1682 &stats->inbound.spd6); 1683 1684 v4 = trf.ip4.pkts; 1685 v4_num = trf.ip4.num; 1686 v6 = trf.ip6.pkts; 1687 v6_num = trf.ip6.num; 1688 } else { 1689 ip4.num = 0; 1690 ip6.num = 0; 1691 1692 outb_inl_pro_spd_process(sp4_out, sa_out, 1693 &trf.ip4, &ip4, &ip6, 1694 true, 1695 &stats->outbound.spd4); 1696 1697 outb_inl_pro_spd_process(sp6_out, sa_out, 1698 &trf.ip6, &ip6, &ip4, 1699 false, 1700 &stats->outbound.spd6); 1701 v4 = ip4.pkts; 1702 v4_num = ip4.num; 1703 v6 = ip6.pkts; 1704 v6_num = ip6.num; 1705 } 1706 1707 #if defined __ARM_NEON 1708 route4_pkts_neon(rt4_ctx, v4, v4_num, 0, false); 1709 route6_pkts_neon(rt6_ctx, v6, v6_num); 1710 #else 1711 route4_pkts(rt4_ctx, v4, v4_num, 0, false); 1712 route6_pkts(rt6_ctx, v6, v6_num); 1713 #endif 1714 } 1715 } 1716 } 1717 1718 /* Poll mode worker when all SA's are of type inline protocol 1719 * and single sa mode is enabled. 1720 */ 1721 void 1722 ipsec_poll_mode_wrkr_inl_pr_ss(void) 1723 { 1724 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) 1725 / US_PER_S * BURST_TX_DRAIN_US; 1726 uint16_t sa_out_portid = 0, sa_out_proto = 0; 1727 struct rte_mbuf *pkts[MAX_PKT_BURST], *pkt; 1728 uint64_t prev_tsc, diff_tsc, cur_tsc; 1729 struct rte_ipsec_session *ips = NULL; 1730 struct lcore_rx_queue *rxql; 1731 struct ipsec_sa *sa = NULL; 1732 struct lcore_conf *qconf; 1733 struct sa_ctx *sa_out; 1734 uint32_t i, nb_rx, j; 1735 int32_t socket_id; 1736 uint32_t lcore_id; 1737 uint16_t portid, queueid; 1738 1739 prev_tsc = 0; 1740 lcore_id = rte_lcore_id(); 1741 qconf = &lcore_conf[lcore_id]; 1742 rxql = qconf->rx_queue_list; 1743 socket_id = rte_lcore_to_socket_id(lcore_id); 1744 1745 /* Get SA info */ 1746 sa_out = socket_ctx[socket_id].sa_out; 1747 if (sa_out && single_sa_idx < sa_out->nb_sa) { 1748 sa = &sa_out->sa[single_sa_idx]; 1749 ips = ipsec_get_primary_session(sa); 1750 sa_out_portid = sa->portid; 1751 if (sa->flags & IP6_TUNNEL) 1752 sa_out_proto = IPPROTO_IPV6; 1753 else 1754 sa_out_proto = IPPROTO_IP; 1755 } 1756 1757 qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir; 1758 1759 if (qconf->nb_rx_queue == 0) { 1760 RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n", 1761 lcore_id); 1762 return; 1763 } 1764 1765 RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id); 1766 1767 for (i = 0; i < qconf->nb_rx_queue; i++) { 1768 portid = rxql[i].port_id; 1769 queueid = rxql[i].queue_id; 1770 RTE_LOG(INFO, IPSEC, 1771 " -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n", 1772 lcore_id, portid, queueid); 1773 } 1774 1775 while (!force_quit) { 1776 cur_tsc = rte_rdtsc(); 1777 1778 /* TX queue buffer drain */ 1779 diff_tsc = cur_tsc - prev_tsc; 1780 1781 if (unlikely(diff_tsc > drain_tsc)) { 1782 drain_tx_buffers(qconf); 1783 prev_tsc = cur_tsc; 1784 } 1785 1786 for (i = 0; i < qconf->nb_rx_queue; ++i) { 1787 /* Read packets from RX queues */ 1788 portid = rxql[i].port_id; 1789 queueid = rxql[i].queue_id; 1790 nb_rx = rte_eth_rx_burst(portid, queueid, 1791 pkts, MAX_PKT_BURST); 1792 1793 if (nb_rx <= 0) 1794 continue; 1795 1796 core_stats_update_rx(nb_rx); 1797 1798 if (is_unprotected_port(portid)) { 1799 /* Nothing much to do for inbound inline 1800 * decrypted traffic. 1801 */ 1802 for (j = 0; j < nb_rx; j++) { 1803 uint32_t ptype, proto; 1804 1805 pkt = pkts[j]; 1806 ptype = pkt->packet_type & 1807 RTE_PTYPE_L3_MASK; 1808 if (ptype == RTE_PTYPE_L3_IPV4) 1809 proto = IPPROTO_IP; 1810 else 1811 proto = IPPROTO_IPV6; 1812 1813 send_single_packet(pkt, portid, proto); 1814 } 1815 1816 continue; 1817 } 1818 1819 /* Free packets if there are no outbound sessions */ 1820 if (unlikely(!ips)) { 1821 rte_pktmbuf_free_bulk(pkts, nb_rx); 1822 continue; 1823 } 1824 1825 rte_ipsec_pkt_process(ips, pkts, nb_rx); 1826 1827 /* Send pkts out */ 1828 for (j = 0; j < nb_rx; j++) { 1829 pkt = pkts[j]; 1830 1831 pkt->l2_len = RTE_ETHER_HDR_LEN; 1832 send_single_packet(pkt, sa_out_portid, 1833 sa_out_proto); 1834 } 1835 } 1836 } 1837 } 1838 1839 static void 1840 ipsec_poll_mode_wrkr_launch(void) 1841 { 1842 static ipsec_worker_fn_t poll_mode_wrkrs[MAX_F] = { 1843 [INL_PR_F] = ipsec_poll_mode_wrkr_inl_pr, 1844 [INL_PR_F | SS_F] = ipsec_poll_mode_wrkr_inl_pr_ss, 1845 }; 1846 ipsec_worker_fn_t fn; 1847 1848 if (!app_sa_prm.enable) { 1849 fn = ipsec_poll_mode_worker; 1850 } else { 1851 fn = poll_mode_wrkrs[wrkr_flags]; 1852 1853 /* Always default to all mode worker */ 1854 if (!fn) 1855 fn = ipsec_poll_mode_worker; 1856 } 1857 1858 /* Launch worker */ 1859 (*fn)(); 1860 } 1861 1862 int ipsec_launch_one_lcore(void *args) 1863 { 1864 struct eh_conf *conf; 1865 1866 conf = (struct eh_conf *)args; 1867 1868 if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) { 1869 /* Run in poll mode */ 1870 ipsec_poll_mode_wrkr_launch(); 1871 } else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) { 1872 /* Run in event mode */ 1873 ipsec_eventmode_worker(conf); 1874 } 1875 return 0; 1876 } 1877