1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 * Copyright (C) 2020 Marvell International Ltd. 4 */ 5 #include <rte_acl.h> 6 #include <rte_event_crypto_adapter.h> 7 #include <rte_event_eth_tx_adapter.h> 8 #include <rte_lpm.h> 9 #include <rte_lpm6.h> 10 11 #include "event_helper.h" 12 #include "ipsec.h" 13 #include "ipsec-secgw.h" 14 #include "ipsec_worker.h" 15 #include "sad.h" 16 17 #if defined(__ARM_NEON) 18 #include "ipsec_lpm_neon.h" 19 #endif 20 21 struct port_drv_mode_data { 22 void *sess; 23 struct rte_security_ctx *ctx; 24 }; 25 26 typedef void (*ipsec_worker_fn_t)(void); 27 28 int ip_reassembly_dynfield_offset = -1; 29 uint64_t ip_reassembly_dynflag; 30 31 static inline enum pkt_type 32 process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp) 33 { 34 struct rte_ether_hdr *eth; 35 uint32_t ptype = pkt->packet_type; 36 37 eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); 38 rte_prefetch0(eth); 39 40 if (RTE_ETH_IS_IPV4_HDR(ptype)) { 41 *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN + 42 offsetof(struct ip, ip_p)); 43 if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP) 44 return PKT_TYPE_IPSEC_IPV4; 45 else 46 return PKT_TYPE_PLAIN_IPV4; 47 } else if (RTE_ETH_IS_IPV6_HDR(ptype)) { 48 *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN + 49 offsetof(struct ip6_hdr, ip6_nxt)); 50 if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP) 51 return PKT_TYPE_IPSEC_IPV6; 52 else 53 return PKT_TYPE_PLAIN_IPV6; 54 } 55 56 /* Unknown/Unsupported type */ 57 return PKT_TYPE_INVALID; 58 } 59 60 static inline void 61 update_mac_addrs(struct rte_ether_hdr *ethhdr, uint16_t portid) 62 { 63 memcpy(ðhdr->src_addr, ðaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN); 64 memcpy(ðhdr->dst_addr, ðaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN); 65 } 66 67 static inline void 68 ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id) 69 { 70 /* Save the destination port in the mbuf */ 71 m->port = port_id; 72 73 /* Save eth queue for Tx */ 74 rte_event_eth_tx_adapter_txq_set(m, 0); 75 } 76 77 static inline void 78 ev_vector_attr_init(struct rte_event_vector *vec) 79 { 80 vec->attr_valid = 1; 81 vec->port = 0xFFFF; 82 vec->queue = 0; 83 } 84 85 static inline void 86 ev_vector_attr_update(struct rte_event_vector *vec, struct rte_mbuf *pkt) 87 { 88 if (vec->port == 0xFFFF) { 89 vec->port = pkt->port; 90 return; 91 } 92 if (vec->attr_valid && (vec->port != pkt->port)) 93 vec->attr_valid = 0; 94 } 95 96 static inline void 97 prepare_out_sessions_tbl(struct sa_ctx *sa_out, 98 struct port_drv_mode_data *data, 99 uint16_t size) 100 { 101 struct rte_ipsec_session *pri_sess; 102 struct ipsec_sa *sa; 103 uint32_t i; 104 105 if (!sa_out) 106 return; 107 108 for (i = 0; i < sa_out->nb_sa; i++) { 109 110 sa = &sa_out->sa[i]; 111 if (!sa) 112 continue; 113 114 pri_sess = ipsec_get_primary_session(sa); 115 if (!pri_sess) 116 continue; 117 118 if (pri_sess->type != 119 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) { 120 121 RTE_LOG(ERR, IPSEC, "Invalid session type %d\n", 122 pri_sess->type); 123 continue; 124 } 125 126 if (sa->portid >= size) { 127 RTE_LOG(ERR, IPSEC, 128 "Port id >= than table size %d, %d\n", 129 sa->portid, size); 130 continue; 131 } 132 133 /* Use only first inline session found for a given port */ 134 if (data[sa->portid].sess) 135 continue; 136 data[sa->portid].sess = pri_sess->security.ses; 137 data[sa->portid].ctx = pri_sess->security.ctx; 138 } 139 } 140 141 static inline int 142 check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx) 143 { 144 uint32_t res; 145 146 if (unlikely(sp == NULL)) 147 return 0; 148 149 rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1, 150 DEFAULT_MAX_CATEGORIES); 151 152 if (unlikely(res == DISCARD)) 153 return 0; 154 else if (res == BYPASS) { 155 *sa_idx = -1; 156 return 1; 157 } 158 159 *sa_idx = res - 1; 160 return 1; 161 } 162 163 static inline void 164 check_sp_bulk(struct sp_ctx *sp, struct traffic_type *ip, 165 struct traffic_type *ipsec) 166 { 167 uint32_t i, j, res; 168 struct rte_mbuf *m; 169 170 if (unlikely(sp == NULL || ip->num == 0)) 171 return; 172 173 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num, 174 DEFAULT_MAX_CATEGORIES); 175 176 j = 0; 177 for (i = 0; i < ip->num; i++) { 178 m = ip->pkts[i]; 179 res = ip->res[i]; 180 if (unlikely(res == DISCARD)) 181 free_pkts(&m, 1); 182 else if (res == BYPASS) 183 ip->pkts[j++] = m; 184 else { 185 ipsec->res[ipsec->num] = res - 1; 186 ipsec->pkts[ipsec->num++] = m; 187 } 188 } 189 ip->num = j; 190 } 191 192 static inline void 193 check_sp_sa_bulk(struct sp_ctx *sp, struct sa_ctx *sa_ctx, 194 struct traffic_type *ip) 195 { 196 struct ipsec_sa *sa; 197 uint32_t i, j, res; 198 struct rte_mbuf *m; 199 200 if (unlikely(sp == NULL || ip->num == 0)) 201 return; 202 203 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num, 204 DEFAULT_MAX_CATEGORIES); 205 206 j = 0; 207 for (i = 0; i < ip->num; i++) { 208 m = ip->pkts[i]; 209 res = ip->res[i]; 210 if (unlikely(res == DISCARD)) 211 free_pkts(&m, 1); 212 else if (res == BYPASS) 213 ip->pkts[j++] = m; 214 else { 215 sa = *(struct ipsec_sa **)rte_security_dynfield(m); 216 if (sa == NULL) { 217 free_pkts(&m, 1); 218 continue; 219 } 220 221 /* SPI on the packet should match with the one in SA */ 222 if (unlikely(sa->spi != sa_ctx->sa[res - 1].spi)) { 223 free_pkts(&m, 1); 224 continue; 225 } 226 227 ip->pkts[j++] = m; 228 } 229 } 230 ip->num = j; 231 } 232 233 static inline void 234 ipv4_pkt_l3_len_set(struct rte_mbuf *pkt) 235 { 236 struct rte_ipv4_hdr *ipv4; 237 238 ipv4 = rte_pktmbuf_mtod(pkt, struct rte_ipv4_hdr *); 239 pkt->l3_len = ipv4->ihl * 4; 240 } 241 242 static inline int 243 ipv6_pkt_l3_len_set(struct rte_mbuf *pkt) 244 { 245 struct rte_ipv6_hdr *ipv6; 246 size_t l3_len, ext_len; 247 uint32_t l3_type; 248 int next_proto; 249 uint8_t *p; 250 251 ipv6 = rte_pktmbuf_mtod(pkt, struct rte_ipv6_hdr *); 252 l3_len = sizeof(struct rte_ipv6_hdr); 253 l3_type = pkt->packet_type & RTE_PTYPE_L3_MASK; 254 255 if (l3_type == RTE_PTYPE_L3_IPV6_EXT || 256 l3_type == RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) { 257 p = rte_pktmbuf_mtod(pkt, uint8_t *); 258 next_proto = ipv6->proto; 259 while (next_proto != IPPROTO_ESP && 260 l3_len < pkt->data_len && 261 (next_proto = rte_ipv6_get_next_ext(p + l3_len, 262 next_proto, &ext_len)) >= 0) 263 l3_len += ext_len; 264 265 /* Drop pkt when IPv6 header exceeds first seg size */ 266 if (unlikely(l3_len > pkt->data_len)) 267 return -EINVAL; 268 } 269 pkt->l3_len = l3_len; 270 271 return 0; 272 } 273 274 static inline uint16_t 275 route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) 276 { 277 uint32_t dst_ip; 278 uint16_t offset; 279 uint32_t hop; 280 int ret; 281 282 offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst); 283 dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset); 284 dst_ip = rte_be_to_cpu_32(dst_ip); 285 286 ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop); 287 288 if (ret == 0) { 289 /* We have a hit */ 290 return hop; 291 } 292 293 /* else */ 294 return RTE_MAX_ETHPORTS; 295 } 296 297 /* TODO: To be tested */ 298 static inline uint16_t 299 route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) 300 { 301 uint8_t dst_ip[16]; 302 uint8_t *ip6_dst; 303 uint16_t offset; 304 uint32_t hop; 305 int ret; 306 307 offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst); 308 ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset); 309 memcpy(&dst_ip[0], ip6_dst, 16); 310 311 ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop); 312 313 if (ret == 0) { 314 /* We have a hit */ 315 return hop; 316 } 317 318 /* else */ 319 return RTE_MAX_ETHPORTS; 320 } 321 322 static inline uint16_t 323 get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type) 324 { 325 if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4) 326 return route4_pkt(pkt, rt->rt4_ctx); 327 else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6) 328 return route6_pkt(pkt, rt->rt6_ctx); 329 330 return RTE_MAX_ETHPORTS; 331 } 332 333 static inline void 334 crypto_op_reset(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], 335 struct rte_crypto_op *cop[], uint16_t num) 336 { 337 struct rte_crypto_sym_op *sop; 338 uint32_t i; 339 340 const struct rte_crypto_op unproc_cop = { 341 .type = RTE_CRYPTO_OP_TYPE_SYMMETRIC, 342 .status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED, 343 .sess_type = RTE_CRYPTO_OP_SECURITY_SESSION, 344 }; 345 346 for (i = 0; i != num; i++) { 347 cop[i]->raw = unproc_cop.raw; 348 sop = cop[i]->sym; 349 sop->m_src = mb[i]; 350 sop->m_dst = NULL; 351 __rte_security_attach_session(sop, ss->security.ses); 352 } 353 } 354 355 static inline void 356 crypto_prepare_event(struct rte_mbuf *pkt, struct rte_ipsec_session *sess, struct rte_event *ev) 357 { 358 struct ipsec_mbuf_metadata *priv; 359 struct rte_crypto_op *cop; 360 361 /* Get pkt private data */ 362 priv = get_priv(pkt); 363 cop = &priv->cop; 364 365 /* Reset crypto operation data */ 366 crypto_op_reset(sess, &pkt, &cop, 1); 367 368 /* Update event_ptr with rte_crypto_op */ 369 ev->event = 0; 370 ev->event_ptr = cop; 371 } 372 373 static inline void 374 free_pkts_from_events(struct rte_event events[], uint16_t count) 375 { 376 struct rte_crypto_op *cop; 377 int i; 378 379 for (i = 0; i < count; i++) { 380 cop = events[i].event_ptr; 381 free_pkts(&cop->sym->m_src, 1); 382 } 383 } 384 385 static inline int 386 event_crypto_enqueue(struct rte_mbuf *pkt, 387 struct ipsec_sa *sa, const struct eh_event_link_info *ev_link) 388 { 389 struct rte_ipsec_session *sess; 390 struct rte_event ev; 391 int ret; 392 393 /* Get IPsec session */ 394 sess = ipsec_get_primary_session(sa); 395 396 crypto_prepare_event(pkt, sess, &ev); 397 398 /* Enqueue event to crypto adapter */ 399 ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id, 400 ev_link->event_port_id, &ev, 1); 401 if (unlikely(ret != 1)) { 402 /* pkt will be freed by the caller */ 403 RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue event: %i (errno: %i)\n", ret, rte_errno); 404 return rte_errno; 405 } 406 407 return 0; 408 } 409 410 static inline int 411 process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt, 412 const struct eh_event_link_info *ev_link, struct rte_event *ev) 413 { 414 struct ipsec_sa *sa = NULL; 415 struct rte_mbuf *pkt; 416 uint16_t port_id = 0; 417 enum pkt_type type; 418 uint32_t sa_idx; 419 uint8_t *nlp; 420 421 /* Get pkt from event */ 422 pkt = ev->mbuf; 423 if (is_ip_reassembly_incomplete(pkt) > 0) { 424 free_reassembly_fail_pkt(pkt); 425 return PKT_DROPPED; 426 } 427 428 /* Check the packet type */ 429 type = process_ipsec_get_pkt_type(pkt, &nlp); 430 431 switch (type) { 432 case PKT_TYPE_PLAIN_IPV4: 433 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) { 434 if (unlikely(pkt->ol_flags & 435 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) { 436 RTE_LOG(ERR, IPSEC, 437 "Inbound security offload failed\n"); 438 goto drop_pkt_and_exit; 439 } 440 sa = *(struct ipsec_sa **)rte_security_dynfield(pkt); 441 } 442 443 /* Check if we have a match */ 444 if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) { 445 /* No valid match */ 446 goto drop_pkt_and_exit; 447 } 448 break; 449 450 case PKT_TYPE_PLAIN_IPV6: 451 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) { 452 if (unlikely(pkt->ol_flags & 453 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) { 454 RTE_LOG(ERR, IPSEC, 455 "Inbound security offload failed\n"); 456 goto drop_pkt_and_exit; 457 } 458 sa = *(struct ipsec_sa **)rte_security_dynfield(pkt); 459 } 460 461 /* Check if we have a match */ 462 if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) { 463 /* No valid match */ 464 goto drop_pkt_and_exit; 465 } 466 break; 467 case PKT_TYPE_IPSEC_IPV4: 468 rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN); 469 ipv4_pkt_l3_len_set(pkt); 470 sad_lookup(&ctx->sa_ctx->sad, &pkt, (void **)&sa, 1); 471 sa = ipsec_mask_saptr(sa); 472 if (unlikely(sa == NULL)) { 473 RTE_LOG_DP(DEBUG, IPSEC, "Cannot find sa\n"); 474 goto drop_pkt_and_exit; 475 } 476 477 if (unlikely(event_crypto_enqueue(pkt, sa, ev_link))) 478 goto drop_pkt_and_exit; 479 480 return PKT_POSTED; 481 case PKT_TYPE_IPSEC_IPV6: 482 rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN); 483 if (unlikely(ipv6_pkt_l3_len_set(pkt) != 0)) 484 goto drop_pkt_and_exit; 485 sad_lookup(&ctx->sa_ctx->sad, &pkt, (void **)&sa, 1); 486 sa = ipsec_mask_saptr(sa); 487 if (unlikely(sa == NULL)) { 488 RTE_LOG_DP(DEBUG, IPSEC, "Cannot find sa\n"); 489 goto drop_pkt_and_exit; 490 } 491 492 if (unlikely(event_crypto_enqueue(pkt, sa, ev_link))) 493 goto drop_pkt_and_exit; 494 495 return PKT_POSTED; 496 default: 497 RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n", 498 type); 499 goto drop_pkt_and_exit; 500 } 501 502 /* Check if the packet has to be bypassed */ 503 if (sa_idx == BYPASS) 504 goto route_and_send_pkt; 505 506 /* Validate sa_idx */ 507 if (sa_idx >= ctx->sa_ctx->nb_sa) 508 goto drop_pkt_and_exit; 509 510 /* Else the packet has to be protected with SA */ 511 512 /* If the packet was IPsec processed, then SA pointer should be set */ 513 if (sa == NULL) 514 goto drop_pkt_and_exit; 515 516 /* SPI on the packet should match with the one in SA */ 517 if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi)) 518 goto drop_pkt_and_exit; 519 520 route_and_send_pkt: 521 port_id = get_route(pkt, rt, type); 522 if (unlikely(port_id == RTE_MAX_ETHPORTS)) { 523 /* no match */ 524 goto drop_pkt_and_exit; 525 } 526 /* else, we have a matching route */ 527 528 /* Update mac addresses */ 529 update_mac_addrs(rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *), port_id); 530 531 /* Update the event with the dest port */ 532 ipsec_event_pre_forward(pkt, port_id); 533 return PKT_FORWARDED; 534 535 drop_pkt_and_exit: 536 free_pkts(&pkt, 1); 537 ev->mbuf = NULL; 538 return PKT_DROPPED; 539 } 540 541 static inline int 542 process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt, 543 const struct eh_event_link_info *ev_link, struct rte_event *ev) 544 { 545 struct rte_ipsec_session *sess; 546 struct rte_ether_hdr *ethhdr; 547 struct sa_ctx *sa_ctx; 548 struct rte_mbuf *pkt; 549 uint16_t port_id = 0; 550 struct ipsec_sa *sa; 551 enum pkt_type type; 552 uint32_t sa_idx; 553 uint8_t *nlp; 554 555 /* Get pkt from event */ 556 pkt = ev->mbuf; 557 558 /* Check the packet type */ 559 type = process_ipsec_get_pkt_type(pkt, &nlp); 560 561 switch (type) { 562 case PKT_TYPE_PLAIN_IPV4: 563 /* Check if we have a match */ 564 if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) { 565 /* No valid match */ 566 goto drop_pkt_and_exit; 567 } 568 break; 569 case PKT_TYPE_PLAIN_IPV6: 570 /* Check if we have a match */ 571 if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) { 572 /* No valid match */ 573 goto drop_pkt_and_exit; 574 } 575 break; 576 default: 577 /* 578 * Only plain IPv4 & IPv6 packets are allowed 579 * on protected port. Drop the rest. 580 */ 581 RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type); 582 goto drop_pkt_and_exit; 583 } 584 585 ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); 586 /* Check if the packet has to be bypassed */ 587 if (sa_idx == BYPASS) { 588 port_id = get_route(pkt, rt, type); 589 if (unlikely(port_id == RTE_MAX_ETHPORTS)) { 590 /* no match */ 591 goto drop_pkt_and_exit; 592 } 593 /* else, we have a matching route */ 594 goto send_pkt; 595 } 596 597 /* Validate sa_idx */ 598 if (unlikely(sa_idx >= ctx->sa_ctx->nb_sa)) 599 goto drop_pkt_and_exit; 600 601 /* Else the packet has to be protected */ 602 603 /* Get SA ctx*/ 604 sa_ctx = ctx->sa_ctx; 605 606 /* Get SA */ 607 sa = &(sa_ctx->sa[sa_idx]); 608 609 /* Get IPsec session */ 610 sess = ipsec_get_primary_session(sa); 611 612 /* Determine protocol type */ 613 if (sess->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) 614 goto lookaside; 615 616 rte_security_set_pkt_metadata(sess->security.ctx, 617 sess->security.ses, pkt, NULL); 618 619 /* Mark the packet for Tx security offload */ 620 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD; 621 /* Update ether type */ 622 ethhdr->ether_type = (IS_IP4(sa->flags) ? rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) : 623 rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)); 624 625 /* Get the port to which this pkt need to be submitted */ 626 port_id = sa->portid; 627 628 send_pkt: 629 /* Provide L2 len for Outbound processing */ 630 pkt->l2_len = RTE_ETHER_HDR_LEN; 631 632 /* Update mac addresses */ 633 update_mac_addrs(ethhdr, port_id); 634 635 /* Update the event with the dest port */ 636 ipsec_event_pre_forward(pkt, port_id); 637 return PKT_FORWARDED; 638 639 lookaside: 640 /* prepare pkt - advance start to L3 */ 641 rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN); 642 643 if (likely(event_crypto_enqueue(pkt, sa, ev_link) == 0)) 644 return PKT_POSTED; 645 646 drop_pkt_and_exit: 647 RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n"); 648 free_pkts(&pkt, 1); 649 ev->mbuf = NULL; 650 return PKT_DROPPED; 651 } 652 653 static inline int 654 ipsec_ev_route_ip_pkts(struct rte_event_vector *vec, struct route_table *rt, 655 struct ipsec_traffic *t) 656 { 657 struct rte_ether_hdr *ethhdr; 658 struct rte_mbuf *pkt; 659 uint16_t port_id = 0; 660 uint32_t i, j = 0; 661 662 /* Route IPv4 packets */ 663 for (i = 0; i < t->ip4.num; i++) { 664 pkt = t->ip4.pkts[i]; 665 port_id = route4_pkt(pkt, rt->rt4_ctx); 666 if (port_id != RTE_MAX_ETHPORTS) { 667 /* Update mac addresses */ 668 ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); 669 update_mac_addrs(ethhdr, port_id); 670 /* Update the event with the dest port */ 671 ipsec_event_pre_forward(pkt, port_id); 672 ev_vector_attr_update(vec, pkt); 673 vec->mbufs[j++] = pkt; 674 } else 675 free_pkts(&pkt, 1); 676 } 677 678 /* Route IPv6 packets */ 679 for (i = 0; i < t->ip6.num; i++) { 680 pkt = t->ip6.pkts[i]; 681 port_id = route6_pkt(pkt, rt->rt6_ctx); 682 if (port_id != RTE_MAX_ETHPORTS) { 683 /* Update mac addresses */ 684 ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); 685 update_mac_addrs(ethhdr, port_id); 686 /* Update the event with the dest port */ 687 ipsec_event_pre_forward(pkt, port_id); 688 ev_vector_attr_update(vec, pkt); 689 vec->mbufs[j++] = pkt; 690 } else 691 free_pkts(&pkt, 1); 692 } 693 694 return j; 695 } 696 697 static inline int 698 ipsec_ev_inbound_route_pkts(struct rte_event_vector *vec, 699 struct route_table *rt, 700 struct ipsec_traffic *t, 701 const struct eh_event_link_info *ev_link) 702 { 703 uint32_t ret, i, j, ev_len = 0; 704 struct rte_event events[MAX_PKTS]; 705 struct rte_ipsec_session *sess; 706 struct rte_mbuf *pkt; 707 struct ipsec_sa *sa; 708 709 j = ipsec_ev_route_ip_pkts(vec, rt, t); 710 711 /* Route ESP packets */ 712 for (i = 0; i < t->ipsec.num; i++) { 713 pkt = t->ipsec.pkts[i]; 714 sa = ipsec_mask_saptr(t->ipsec.saptr[i]); 715 if (unlikely(sa == NULL)) { 716 free_pkts(&pkt, 1); 717 continue; 718 } 719 sess = ipsec_get_primary_session(sa); 720 crypto_prepare_event(pkt, sess, &events[ev_len]); 721 ev_len++; 722 } 723 724 if (ev_len) { 725 ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id, 726 ev_link->event_port_id, events, ev_len); 727 if (ret < ev_len) { 728 RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue events: %i (errno: %i)\n", 729 ev_len, rte_errno); 730 free_pkts_from_events(&events[ret], ev_len - ret); 731 return -rte_errno; 732 } 733 } 734 735 return j; 736 } 737 738 static inline int 739 ipsec_ev_outbound_route_pkts(struct rte_event_vector *vec, struct route_table *rt, 740 struct ipsec_traffic *t, struct sa_ctx *sa_ctx, 741 const struct eh_event_link_info *ev_link) 742 { 743 uint32_t sa_idx, ret, i, j, ev_len = 0; 744 struct rte_event events[MAX_PKTS]; 745 struct rte_ipsec_session *sess; 746 struct rte_ether_hdr *ethhdr; 747 uint16_t port_id = 0; 748 struct rte_mbuf *pkt; 749 struct ipsec_sa *sa; 750 751 j = ipsec_ev_route_ip_pkts(vec, rt, t); 752 753 /* Handle IPsec packets. 754 * For lookaside IPsec packets, submit to cryptodev queue. 755 * For inline IPsec packets, route the packet. 756 */ 757 for (i = 0; i < t->ipsec.num; i++) { 758 /* Validate sa_idx */ 759 sa_idx = t->ipsec.res[i]; 760 pkt = t->ipsec.pkts[i]; 761 if (unlikely(sa_idx >= sa_ctx->nb_sa)) { 762 free_pkts(&pkt, 1); 763 continue; 764 } 765 /* Else the packet has to be protected */ 766 sa = &(sa_ctx->sa[sa_idx]); 767 /* Get IPsec session */ 768 sess = ipsec_get_primary_session(sa); 769 switch (sess->type) { 770 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: 771 rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN); 772 crypto_prepare_event(pkt, sess, &events[ev_len]); 773 ev_len++; 774 break; 775 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: 776 rte_security_set_pkt_metadata(sess->security.ctx, 777 sess->security.ses, pkt, NULL); 778 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD; 779 port_id = sa->portid; 780 781 /* Fetch outer ip type and update */ 782 ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); 783 ethhdr->ether_type = (IS_IP4(sa->flags) ? 784 rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) : 785 rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)); 786 update_mac_addrs(ethhdr, port_id); 787 788 ipsec_event_pre_forward(pkt, port_id); 789 ev_vector_attr_update(vec, pkt); 790 vec->mbufs[j++] = pkt; 791 break; 792 default: 793 RTE_LOG(ERR, IPSEC, "SA type not supported\n"); 794 free_pkts(&pkt, 1); 795 break; 796 } 797 } 798 799 if (ev_len) { 800 ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id, 801 ev_link->event_port_id, events, ev_len); 802 if (ret < ev_len) { 803 RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue events: %i (errno: %i)\n", 804 ev_len, rte_errno); 805 free_pkts_from_events(&events[ret], ev_len - ret); 806 return -rte_errno; 807 } 808 } 809 810 return j; 811 } 812 813 static inline void 814 classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t) 815 { 816 enum pkt_type type; 817 uint8_t *nlp; 818 819 /* Check the packet type */ 820 type = process_ipsec_get_pkt_type(pkt, &nlp); 821 822 switch (type) { 823 case PKT_TYPE_PLAIN_IPV4: 824 t->ip4.data[t->ip4.num] = nlp; 825 t->ip4.pkts[(t->ip4.num)++] = pkt; 826 break; 827 case PKT_TYPE_PLAIN_IPV6: 828 t->ip6.data[t->ip6.num] = nlp; 829 t->ip6.pkts[(t->ip6.num)++] = pkt; 830 break; 831 case PKT_TYPE_IPSEC_IPV4: 832 rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN); 833 ipv4_pkt_l3_len_set(pkt); 834 t->ipsec.pkts[(t->ipsec.num)++] = pkt; 835 break; 836 case PKT_TYPE_IPSEC_IPV6: 837 rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN); 838 if (ipv6_pkt_l3_len_set(pkt) != 0) { 839 free_pkts(&pkt, 1); 840 return; 841 } 842 t->ipsec.pkts[(t->ipsec.num)++] = pkt; 843 break; 844 default: 845 RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n", 846 type); 847 free_pkts(&pkt, 1); 848 break; 849 } 850 } 851 852 static inline int 853 process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt, 854 struct rte_event_vector *vec, 855 const struct eh_event_link_info *ev_link) 856 { 857 struct ipsec_traffic t; 858 struct rte_mbuf *pkt; 859 int i; 860 861 t.ip4.num = 0; 862 t.ip6.num = 0; 863 t.ipsec.num = 0; 864 865 for (i = 0; i < vec->nb_elem; i++) { 866 /* Get pkt from event */ 867 pkt = vec->mbufs[i]; 868 if (is_ip_reassembly_incomplete(pkt) > 0) { 869 free_reassembly_fail_pkt(pkt); 870 continue; 871 } 872 873 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) { 874 if (unlikely(pkt->ol_flags & 875 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) { 876 RTE_LOG(ERR, IPSEC, 877 "Inbound security offload failed\n"); 878 free_pkts(&pkt, 1); 879 continue; 880 } 881 } 882 883 classify_pkt(pkt, &t); 884 } 885 886 check_sp_sa_bulk(ctx->sp4_ctx, ctx->sa_ctx, &t.ip4); 887 check_sp_sa_bulk(ctx->sp6_ctx, ctx->sa_ctx, &t.ip6); 888 889 if (t.ipsec.num != 0) 890 sad_lookup(&ctx->sa_ctx->sad, t.ipsec.pkts, t.ipsec.saptr, t.ipsec.num); 891 892 return ipsec_ev_inbound_route_pkts(vec, rt, &t, ev_link); 893 } 894 895 static inline int 896 process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt, 897 struct rte_event_vector *vec, 898 const struct eh_event_link_info *ev_link) 899 { 900 struct ipsec_traffic t; 901 struct rte_mbuf *pkt; 902 uint32_t i; 903 904 t.ip4.num = 0; 905 t.ip6.num = 0; 906 t.ipsec.num = 0; 907 908 for (i = 0; i < vec->nb_elem; i++) { 909 /* Get pkt from event */ 910 pkt = vec->mbufs[i]; 911 912 classify_pkt(pkt, &t); 913 914 /* Provide L2 len for Outbound processing */ 915 pkt->l2_len = RTE_ETHER_HDR_LEN; 916 } 917 918 check_sp_bulk(ctx->sp4_ctx, &t.ip4, &t.ipsec); 919 check_sp_bulk(ctx->sp6_ctx, &t.ip6, &t.ipsec); 920 921 return ipsec_ev_outbound_route_pkts(vec, rt, &t, ctx->sa_ctx, ev_link); 922 } 923 924 static inline int 925 process_ipsec_ev_drv_mode_outbound_vector(struct rte_event_vector *vec, 926 struct port_drv_mode_data *data) 927 { 928 struct rte_mbuf *pkt; 929 int16_t port_id; 930 uint32_t i; 931 int j = 0; 932 933 for (i = 0; i < vec->nb_elem; i++) { 934 pkt = vec->mbufs[i]; 935 port_id = pkt->port; 936 937 if (unlikely(!data[port_id].sess)) { 938 free_pkts(&pkt, 1); 939 continue; 940 } 941 ipsec_event_pre_forward(pkt, port_id); 942 /* Save security session */ 943 rte_security_set_pkt_metadata(data[port_id].ctx, 944 data[port_id].sess, pkt, 945 NULL); 946 947 /* Mark the packet for Tx security offload */ 948 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD; 949 950 /* Provide L2 len for Outbound processing */ 951 pkt->l2_len = RTE_ETHER_HDR_LEN; 952 953 vec->mbufs[j++] = pkt; 954 } 955 956 return j; 957 } 958 959 static void 960 ipsec_event_vector_free(struct rte_event *ev) 961 { 962 struct rte_event_vector *vec = ev->vec; 963 rte_pktmbuf_free_bulk(vec->mbufs + vec->elem_offset, vec->nb_elem); 964 rte_mempool_put(rte_mempool_from_obj(vec), vec); 965 } 966 967 static inline void 968 ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf, 969 struct eh_event_link_info *links, 970 struct rte_event *ev) 971 { 972 struct rte_event_vector *vec = ev->vec; 973 struct rte_mbuf *pkt; 974 int ret; 975 976 pkt = vec->mbufs[0]; 977 978 ev_vector_attr_init(vec); 979 core_stats_update_rx(vec->nb_elem); 980 981 if (is_unprotected_port(pkt->port)) 982 ret = process_ipsec_ev_inbound_vector(&lconf->inbound, 983 &lconf->rt, vec, links); 984 else 985 ret = process_ipsec_ev_outbound_vector(&lconf->outbound, 986 &lconf->rt, vec, links); 987 988 if (likely(ret > 0)) { 989 core_stats_update_tx(vec->nb_elem); 990 vec->nb_elem = ret; 991 ret = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id, 992 links[0].event_port_id, ev, 1, 0); 993 if (unlikely(ret == 0)) 994 ipsec_event_vector_free(ev); 995 } else { 996 rte_mempool_put(rte_mempool_from_obj(vec), vec); 997 } 998 } 999 1000 static inline void 1001 ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links, 1002 struct rte_event *ev, 1003 struct port_drv_mode_data *data) 1004 { 1005 struct rte_event_vector *vec = ev->vec; 1006 struct rte_mbuf *pkt; 1007 uint16_t ret; 1008 1009 pkt = vec->mbufs[0]; 1010 vec->attr_valid = 1; 1011 vec->port = pkt->port; 1012 1013 if (!is_unprotected_port(pkt->port)) 1014 vec->nb_elem = process_ipsec_ev_drv_mode_outbound_vector(vec, 1015 data); 1016 if (likely(vec->nb_elem > 0)) { 1017 ret = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id, 1018 links[0].event_port_id, ev, 1, 0); 1019 if (unlikely(ret == 0)) 1020 ipsec_event_vector_free(ev); 1021 } else 1022 rte_mempool_put(rte_mempool_from_obj(vec), vec); 1023 } 1024 1025 static inline int 1026 ipsec_ev_cryptodev_process_one_pkt( 1027 const struct lcore_conf_ev_tx_int_port_wrkr *lconf, 1028 const struct rte_crypto_op *cop, struct rte_mbuf *pkt) 1029 { 1030 struct rte_ether_hdr *ethhdr; 1031 uint16_t port_id; 1032 struct ip *ip; 1033 1034 /* If operation was not successful, free the packet */ 1035 if (unlikely(cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) { 1036 RTE_LOG_DP(INFO, IPSEC, "Crypto operation failed\n"); 1037 free_pkts(&pkt, 1); 1038 return -1; 1039 } 1040 1041 ip = rte_pktmbuf_mtod(pkt, struct ip *); 1042 1043 /* Prepend Ether layer */ 1044 ethhdr = (struct rte_ether_hdr *)rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN); 1045 1046 /* Route pkt and update required fields */ 1047 if (ip->ip_v == IPVERSION) { 1048 pkt->ol_flags |= lconf->outbound.ipv4_offloads; 1049 pkt->l3_len = sizeof(struct ip); 1050 pkt->l2_len = RTE_ETHER_HDR_LEN; 1051 1052 ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); 1053 1054 port_id = route4_pkt(pkt, lconf->rt.rt4_ctx); 1055 } else { 1056 pkt->ol_flags |= lconf->outbound.ipv6_offloads; 1057 pkt->l3_len = sizeof(struct ip6_hdr); 1058 pkt->l2_len = RTE_ETHER_HDR_LEN; 1059 1060 ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); 1061 1062 port_id = route6_pkt(pkt, lconf->rt.rt6_ctx); 1063 } 1064 1065 if (unlikely(port_id == RTE_MAX_ETHPORTS)) { 1066 RTE_LOG_DP(DEBUG, IPSEC, "Cannot route processed packet\n"); 1067 free_pkts(&pkt, 1); 1068 return -1; 1069 } 1070 1071 /* Update Ether with port's MAC addresses */ 1072 memcpy(ðhdr->src_addr, ðaddr_tbl[port_id].src, sizeof(struct rte_ether_addr)); 1073 memcpy(ðhdr->dst_addr, ðaddr_tbl[port_id].dst, sizeof(struct rte_ether_addr)); 1074 1075 ipsec_event_pre_forward(pkt, port_id); 1076 1077 return 0; 1078 } 1079 1080 static inline void 1081 ipsec_ev_cryptodev_vector_process( 1082 const struct lcore_conf_ev_tx_int_port_wrkr *lconf, 1083 const struct eh_event_link_info *links, 1084 struct rte_event *ev) 1085 { 1086 struct rte_event_vector *vec = ev->vec; 1087 const uint16_t nb_events = 1; 1088 struct rte_crypto_op *cop; 1089 struct rte_mbuf *pkt; 1090 uint16_t enqueued; 1091 int i, n = 0; 1092 1093 ev_vector_attr_init(vec); 1094 /* Transform cop vec into pkt vec */ 1095 for (i = 0; i < vec->nb_elem; i++) { 1096 /* Get pkt data */ 1097 cop = vec->ptrs[i]; 1098 pkt = cop->sym->m_src; 1099 if (ipsec_ev_cryptodev_process_one_pkt(lconf, cop, pkt)) 1100 continue; 1101 1102 vec->mbufs[n++] = pkt; 1103 ev_vector_attr_update(vec, pkt); 1104 } 1105 1106 if (n == 0) { 1107 rte_mempool_put(rte_mempool_from_obj(vec), vec); 1108 return; 1109 } 1110 1111 vec->nb_elem = n; 1112 enqueued = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id, 1113 links[0].event_port_id, ev, nb_events, 0); 1114 if (enqueued != nb_events) { 1115 RTE_LOG_DP(DEBUG, IPSEC, "Failed to enqueue to tx, ret = %u," 1116 " errno = %i\n", enqueued, rte_errno); 1117 free_pkts(vec->mbufs, vec->nb_elem); 1118 rte_mempool_put(rte_mempool_from_obj(vec), vec); 1119 } else { 1120 core_stats_update_tx(n); 1121 } 1122 } 1123 1124 static inline int 1125 ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf, 1126 struct rte_event *ev) 1127 { 1128 struct rte_crypto_op *cop; 1129 struct rte_mbuf *pkt; 1130 1131 /* Get pkt data */ 1132 cop = ev->event_ptr; 1133 pkt = cop->sym->m_src; 1134 1135 if (ipsec_ev_cryptodev_process_one_pkt(lconf, cop, pkt)) 1136 return PKT_DROPPED; 1137 1138 /* Update event */ 1139 ev->mbuf = pkt; 1140 1141 return PKT_FORWARDED; 1142 } 1143 1144 /* 1145 * Event mode exposes various operating modes depending on the 1146 * capabilities of the event device and the operating mode 1147 * selected. 1148 */ 1149 1150 static void 1151 ipsec_event_port_flush(uint8_t eventdev_id __rte_unused, struct rte_event ev, 1152 void *args __rte_unused) 1153 { 1154 if (ev.event_type & RTE_EVENT_TYPE_VECTOR) 1155 ipsec_event_vector_free(&ev); 1156 else 1157 rte_pktmbuf_free(ev.mbuf); 1158 } 1159 1160 /* Workers registered */ 1161 #define IPSEC_EVENTMODE_WORKERS 2 1162 1163 static void 1164 ipsec_ip_reassembly_dyn_offset_get(void) 1165 { 1166 /* Retrieve reassembly dynfield offset if available */ 1167 if (ip_reassembly_dynfield_offset < 0) 1168 ip_reassembly_dynfield_offset = rte_mbuf_dynfield_lookup( 1169 RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME, NULL); 1170 1171 if (ip_reassembly_dynflag == 0) { 1172 int ip_reassembly_dynflag_offset; 1173 ip_reassembly_dynflag_offset = rte_mbuf_dynflag_lookup( 1174 RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME, NULL); 1175 if (ip_reassembly_dynflag_offset >= 0) 1176 ip_reassembly_dynflag = RTE_BIT64(ip_reassembly_dynflag_offset); 1177 } 1178 } 1179 1180 /* 1181 * Event mode worker 1182 * Operating parameters : non-burst - Tx internal port - driver mode 1183 */ 1184 static void 1185 ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links, 1186 uint8_t nb_links) 1187 { 1188 struct port_drv_mode_data data[RTE_MAX_ETHPORTS]; 1189 unsigned int nb_rx = 0, nb_tx; 1190 struct rte_mbuf *pkt; 1191 struct rte_event ev; 1192 uint32_t lcore_id; 1193 int32_t socket_id; 1194 int16_t port_id; 1195 1196 /* Check if we have links registered for this lcore */ 1197 if (nb_links == 0) { 1198 /* No links registered - exit */ 1199 return; 1200 } 1201 1202 memset(&data, 0, sizeof(struct port_drv_mode_data)); 1203 1204 /* Get core ID */ 1205 lcore_id = rte_lcore_id(); 1206 1207 /* Get socket ID */ 1208 socket_id = rte_lcore_to_socket_id(lcore_id); 1209 1210 /* 1211 * Prepare security sessions table. In outbound driver mode 1212 * we always use first session configured for a given port 1213 */ 1214 prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, data, 1215 RTE_MAX_ETHPORTS); 1216 1217 RTE_LOG(INFO, IPSEC, 1218 "Launching event mode worker (non-burst - Tx internal port - " 1219 "driver mode) on lcore %d\n", lcore_id); 1220 1221 /* We have valid links */ 1222 1223 /* Check if it's single link */ 1224 if (nb_links != 1) { 1225 RTE_LOG(INFO, IPSEC, 1226 "Multiple links not supported. Using first link\n"); 1227 } 1228 1229 RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id, 1230 links[0].event_port_id); 1231 while (!force_quit) { 1232 /* Read packet from event queues */ 1233 nb_rx = rte_event_dequeue_burst(links[0].eventdev_id, 1234 links[0].event_port_id, 1235 &ev, /* events */ 1236 1, /* nb_events */ 1237 0 /* timeout_ticks */); 1238 1239 if (nb_rx == 0) 1240 continue; 1241 1242 switch (ev.event_type) { 1243 case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR: 1244 case RTE_EVENT_TYPE_ETHDEV_VECTOR: 1245 ipsec_ev_vector_drv_mode_process(links, &ev, data); 1246 continue; 1247 case RTE_EVENT_TYPE_ETHDEV: 1248 break; 1249 default: 1250 RTE_LOG(ERR, IPSEC, "Invalid event type %u", 1251 ev.event_type); 1252 continue; 1253 } 1254 1255 pkt = ev.mbuf; 1256 port_id = pkt->port; 1257 1258 rte_prefetch0(rte_pktmbuf_mtod(pkt, void *)); 1259 1260 /* Process packet */ 1261 ipsec_event_pre_forward(pkt, port_id); 1262 1263 if (!is_unprotected_port(port_id)) { 1264 1265 if (unlikely(!data[port_id].sess)) { 1266 rte_pktmbuf_free(pkt); 1267 continue; 1268 } 1269 1270 /* Save security session */ 1271 rte_security_set_pkt_metadata(data[port_id].ctx, 1272 data[port_id].sess, pkt, 1273 NULL); 1274 1275 /* Mark the packet for Tx security offload */ 1276 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD; 1277 1278 /* Provide L2 len for Outbound processing */ 1279 pkt->l2_len = RTE_ETHER_HDR_LEN; 1280 } 1281 1282 /* 1283 * Since tx internal port is available, events can be 1284 * directly enqueued to the adapter and it would be 1285 * internally submitted to the eth device. 1286 */ 1287 nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id, 1288 links[0].event_port_id, 1289 &ev, /* events */ 1290 1, /* nb_events */ 1291 0 /* flags */); 1292 if (!nb_tx) 1293 rte_pktmbuf_free(ev.mbuf); 1294 } 1295 1296 if (ev.u64) { 1297 ev.op = RTE_EVENT_OP_RELEASE; 1298 rte_event_enqueue_burst(links[0].eventdev_id, 1299 links[0].event_port_id, &ev, 1); 1300 } 1301 1302 rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id, 1303 ipsec_event_port_flush, NULL); 1304 } 1305 1306 /* 1307 * Event mode worker 1308 * Operating parameters : non-burst - Tx internal port - app mode 1309 */ 1310 static void 1311 ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links, 1312 uint8_t nb_links) 1313 { 1314 struct lcore_conf_ev_tx_int_port_wrkr lconf; 1315 unsigned int nb_rx = 0, nb_tx; 1316 struct rte_event ev; 1317 uint32_t lcore_id; 1318 int32_t socket_id; 1319 int ret; 1320 1321 /* Check if we have links registered for this lcore */ 1322 if (nb_links == 0) { 1323 /* No links registered - exit */ 1324 return; 1325 } 1326 1327 /* We have valid links */ 1328 1329 /* Get core ID */ 1330 lcore_id = rte_lcore_id(); 1331 1332 /* Get socket ID */ 1333 socket_id = rte_lcore_to_socket_id(lcore_id); 1334 1335 /* Save routing table */ 1336 lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4; 1337 lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6; 1338 lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in; 1339 lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in; 1340 lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in; 1341 lconf.inbound.lcore_id = lcore_id; 1342 lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out; 1343 lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out; 1344 lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out; 1345 lconf.outbound.lcore_id = lcore_id; 1346 1347 RTE_LOG(INFO, IPSEC, 1348 "Launching event mode worker (non-burst - Tx internal port - " 1349 "app mode) on lcore %d\n", lcore_id); 1350 1351 ret = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz); 1352 if (ret != 0) { 1353 RTE_LOG(ERR, IPSEC, 1354 "SAD cache init on lcore %u, failed with code: %d\n", 1355 lcore_id, ret); 1356 return; 1357 } 1358 1359 /* Check if it's single link */ 1360 if (nb_links != 1) { 1361 RTE_LOG(INFO, IPSEC, 1362 "Multiple links not supported. Using first link\n"); 1363 } 1364 1365 RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id, 1366 links[0].event_port_id); 1367 1368 ipsec_ip_reassembly_dyn_offset_get(); 1369 1370 while (!force_quit) { 1371 /* Read packet from event queues */ 1372 nb_rx = rte_event_dequeue_burst(links[0].eventdev_id, 1373 links[0].event_port_id, 1374 &ev, /* events */ 1375 1, /* nb_events */ 1376 0 /* timeout_ticks */); 1377 1378 if (nb_rx == 0) 1379 continue; 1380 1381 switch (ev.event_type) { 1382 case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR: 1383 case RTE_EVENT_TYPE_ETHDEV_VECTOR: 1384 ipsec_ev_vector_process(&lconf, links, &ev); 1385 continue; 1386 case RTE_EVENT_TYPE_ETHDEV: 1387 core_stats_update_rx(1); 1388 if (is_unprotected_port(ev.mbuf->port)) 1389 ret = process_ipsec_ev_inbound(&lconf.inbound, 1390 &lconf.rt, links, &ev); 1391 else 1392 ret = process_ipsec_ev_outbound(&lconf.outbound, 1393 &lconf.rt, links, &ev); 1394 if (ret != 1) 1395 /* The pkt has been dropped or posted */ 1396 continue; 1397 break; 1398 case RTE_EVENT_TYPE_CRYPTODEV: 1399 ret = ipsec_ev_cryptodev_process(&lconf, &ev); 1400 if (unlikely(ret != PKT_FORWARDED)) 1401 continue; 1402 break; 1403 case RTE_EVENT_TYPE_CRYPTODEV_VECTOR: 1404 ipsec_ev_cryptodev_vector_process(&lconf, links, &ev); 1405 continue; 1406 default: 1407 RTE_LOG(ERR, IPSEC, "Invalid event type %u", 1408 ev.event_type); 1409 continue; 1410 } 1411 1412 core_stats_update_tx(1); 1413 /* 1414 * Since tx internal port is available, events can be 1415 * directly enqueued to the adapter and it would be 1416 * internally submitted to the eth device. 1417 */ 1418 nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id, 1419 links[0].event_port_id, 1420 &ev, /* events */ 1421 1, /* nb_events */ 1422 0 /* flags */); 1423 if (!nb_tx) 1424 rte_pktmbuf_free(ev.mbuf); 1425 } 1426 1427 if (ev.u64) { 1428 ev.op = RTE_EVENT_OP_RELEASE; 1429 rte_event_enqueue_burst(links[0].eventdev_id, 1430 links[0].event_port_id, &ev, 1); 1431 } 1432 1433 rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id, 1434 ipsec_event_port_flush, NULL); 1435 } 1436 1437 static uint8_t 1438 ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs) 1439 { 1440 struct eh_app_worker_params *wrkr; 1441 uint8_t nb_wrkr_param = 0; 1442 1443 /* Save workers */ 1444 wrkr = wrkrs; 1445 1446 /* Non-burst - Tx internal port - driver mode */ 1447 wrkr->cap.burst = EH_RX_TYPE_NON_BURST; 1448 wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT; 1449 wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER; 1450 wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode; 1451 wrkr++; 1452 nb_wrkr_param++; 1453 1454 /* Non-burst - Tx internal port - app mode */ 1455 wrkr->cap.burst = EH_RX_TYPE_NON_BURST; 1456 wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT; 1457 wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP; 1458 wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode; 1459 nb_wrkr_param++; 1460 1461 return nb_wrkr_param; 1462 } 1463 1464 static void 1465 ipsec_eventmode_worker(struct eh_conf *conf) 1466 { 1467 struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = { 1468 {{{0} }, NULL } }; 1469 uint8_t nb_wrkr_param; 1470 1471 /* Populate l2fwd_wrkr params */ 1472 nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr); 1473 1474 /* 1475 * Launch correct worker after checking 1476 * the event device's capabilities. 1477 */ 1478 eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param); 1479 } 1480 1481 static __rte_always_inline void 1482 outb_inl_pro_spd_process(struct sp_ctx *sp, 1483 struct sa_ctx *sa_ctx, 1484 struct traffic_type *ip, 1485 struct traffic_type *match, 1486 struct traffic_type *mismatch, 1487 bool match_flag, 1488 struct ipsec_spd_stats *stats) 1489 { 1490 uint32_t prev_sa_idx = UINT32_MAX; 1491 struct rte_mbuf *ipsec[MAX_PKT_BURST]; 1492 struct rte_ipsec_session *ips; 1493 uint32_t i, j, j_mis, sa_idx; 1494 struct ipsec_sa *sa = NULL; 1495 uint32_t ipsec_num = 0; 1496 struct rte_mbuf *m; 1497 uint64_t satp; 1498 1499 if (ip->num == 0 || sp == NULL) 1500 return; 1501 1502 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, 1503 ip->num, DEFAULT_MAX_CATEGORIES); 1504 1505 j = match->num; 1506 j_mis = mismatch->num; 1507 1508 for (i = 0; i < ip->num; i++) { 1509 m = ip->pkts[i]; 1510 sa_idx = ip->res[i] - 1; 1511 1512 if (unlikely(ip->res[i] == DISCARD)) { 1513 free_pkts(&m, 1); 1514 1515 stats->discard++; 1516 } else if (unlikely(ip->res[i] == BYPASS)) { 1517 match->pkts[j++] = m; 1518 1519 stats->bypass++; 1520 } else { 1521 if (prev_sa_idx == UINT32_MAX) { 1522 prev_sa_idx = sa_idx; 1523 sa = &sa_ctx->sa[sa_idx]; 1524 ips = ipsec_get_primary_session(sa); 1525 satp = rte_ipsec_sa_type(ips->sa); 1526 } 1527 1528 if (sa_idx != prev_sa_idx) { 1529 prep_process_group(sa, ipsec, ipsec_num); 1530 1531 /* Prepare packets for outbound */ 1532 rte_ipsec_pkt_process(ips, ipsec, ipsec_num); 1533 1534 /* Copy to current tr or a different tr */ 1535 if (SATP_OUT_IPV4(satp) == match_flag) { 1536 memcpy(&match->pkts[j], ipsec, 1537 ipsec_num * sizeof(void *)); 1538 j += ipsec_num; 1539 } else { 1540 memcpy(&mismatch->pkts[j_mis], ipsec, 1541 ipsec_num * sizeof(void *)); 1542 j_mis += ipsec_num; 1543 } 1544 1545 /* Update to new SA */ 1546 sa = &sa_ctx->sa[sa_idx]; 1547 ips = ipsec_get_primary_session(sa); 1548 satp = rte_ipsec_sa_type(ips->sa); 1549 ipsec_num = 0; 1550 } 1551 1552 ipsec[ipsec_num++] = m; 1553 stats->protect++; 1554 } 1555 } 1556 1557 if (ipsec_num) { 1558 prep_process_group(sa, ipsec, ipsec_num); 1559 1560 /* Prepare pacekts for outbound */ 1561 rte_ipsec_pkt_process(ips, ipsec, ipsec_num); 1562 1563 /* Copy to current tr or a different tr */ 1564 if (SATP_OUT_IPV4(satp) == match_flag) { 1565 memcpy(&match->pkts[j], ipsec, 1566 ipsec_num * sizeof(void *)); 1567 j += ipsec_num; 1568 } else { 1569 memcpy(&mismatch->pkts[j_mis], ipsec, 1570 ipsec_num * sizeof(void *)); 1571 j_mis += ipsec_num; 1572 } 1573 } 1574 match->num = j; 1575 mismatch->num = j_mis; 1576 } 1577 1578 /* Poll mode worker when all SA's are of type inline protocol */ 1579 void 1580 ipsec_poll_mode_wrkr_inl_pr(void) 1581 { 1582 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) 1583 / US_PER_S * BURST_TX_DRAIN_US; 1584 struct sp_ctx *sp4_in, *sp6_in, *sp4_out, *sp6_out; 1585 struct rte_mbuf *pkts[MAX_PKT_BURST]; 1586 uint64_t prev_tsc, diff_tsc, cur_tsc; 1587 struct ipsec_core_statistics *stats; 1588 struct rt_ctx *rt4_ctx, *rt6_ctx; 1589 struct sa_ctx *sa_in, *sa_out; 1590 struct traffic_type ip4, ip6; 1591 struct lcore_rx_queue *rxql; 1592 struct rte_mbuf **v4, **v6; 1593 struct ipsec_traffic trf; 1594 struct lcore_conf *qconf; 1595 uint16_t v4_num, v6_num; 1596 int32_t socket_id; 1597 uint32_t lcore_id; 1598 int32_t i, nb_rx; 1599 uint16_t portid; 1600 uint8_t queueid; 1601 1602 prev_tsc = 0; 1603 lcore_id = rte_lcore_id(); 1604 qconf = &lcore_conf[lcore_id]; 1605 rxql = qconf->rx_queue_list; 1606 socket_id = rte_lcore_to_socket_id(lcore_id); 1607 stats = &core_statistics[lcore_id]; 1608 1609 rt4_ctx = socket_ctx[socket_id].rt_ip4; 1610 rt6_ctx = socket_ctx[socket_id].rt_ip6; 1611 1612 sp4_in = socket_ctx[socket_id].sp_ip4_in; 1613 sp6_in = socket_ctx[socket_id].sp_ip6_in; 1614 sa_in = socket_ctx[socket_id].sa_in; 1615 1616 sp4_out = socket_ctx[socket_id].sp_ip4_out; 1617 sp6_out = socket_ctx[socket_id].sp_ip6_out; 1618 sa_out = socket_ctx[socket_id].sa_out; 1619 1620 qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir; 1621 1622 if (qconf->nb_rx_queue == 0) { 1623 RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n", 1624 lcore_id); 1625 return; 1626 } 1627 1628 RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id); 1629 1630 for (i = 0; i < qconf->nb_rx_queue; i++) { 1631 portid = rxql[i].port_id; 1632 queueid = rxql[i].queue_id; 1633 RTE_LOG(INFO, IPSEC, 1634 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n", 1635 lcore_id, portid, queueid); 1636 } 1637 1638 ipsec_ip_reassembly_dyn_offset_get(); 1639 1640 while (!force_quit) { 1641 cur_tsc = rte_rdtsc(); 1642 1643 /* TX queue buffer drain */ 1644 diff_tsc = cur_tsc - prev_tsc; 1645 1646 if (unlikely(diff_tsc > drain_tsc)) { 1647 drain_tx_buffers(qconf); 1648 prev_tsc = cur_tsc; 1649 } 1650 1651 for (i = 0; i < qconf->nb_rx_queue; ++i) { 1652 /* Read packets from RX queues */ 1653 portid = rxql[i].port_id; 1654 queueid = rxql[i].queue_id; 1655 nb_rx = rte_eth_rx_burst(portid, queueid, 1656 pkts, MAX_PKT_BURST); 1657 1658 if (nb_rx <= 0) 1659 continue; 1660 1661 core_stats_update_rx(nb_rx); 1662 1663 prepare_traffic(rxql[i].sec_ctx, pkts, &trf, nb_rx); 1664 1665 /* Drop any IPsec traffic */ 1666 free_pkts(trf.ipsec.pkts, trf.ipsec.num); 1667 1668 if (is_unprotected_port(portid)) { 1669 inbound_sp_sa(sp4_in, sa_in, &trf.ip4, 1670 trf.ip4.num, 1671 &stats->inbound.spd4); 1672 1673 inbound_sp_sa(sp6_in, sa_in, &trf.ip6, 1674 trf.ip6.num, 1675 &stats->inbound.spd6); 1676 1677 v4 = trf.ip4.pkts; 1678 v4_num = trf.ip4.num; 1679 v6 = trf.ip6.pkts; 1680 v6_num = trf.ip6.num; 1681 } else { 1682 ip4.num = 0; 1683 ip6.num = 0; 1684 1685 outb_inl_pro_spd_process(sp4_out, sa_out, 1686 &trf.ip4, &ip4, &ip6, 1687 true, 1688 &stats->outbound.spd4); 1689 1690 outb_inl_pro_spd_process(sp6_out, sa_out, 1691 &trf.ip6, &ip6, &ip4, 1692 false, 1693 &stats->outbound.spd6); 1694 v4 = ip4.pkts; 1695 v4_num = ip4.num; 1696 v6 = ip6.pkts; 1697 v6_num = ip6.num; 1698 } 1699 1700 #if defined __ARM_NEON 1701 route4_pkts_neon(rt4_ctx, v4, v4_num, 0, false); 1702 route6_pkts_neon(rt6_ctx, v6, v6_num); 1703 #else 1704 route4_pkts(rt4_ctx, v4, v4_num, 0, false); 1705 route6_pkts(rt6_ctx, v6, v6_num); 1706 #endif 1707 } 1708 } 1709 } 1710 1711 /* Poll mode worker when all SA's are of type inline protocol 1712 * and single sa mode is enabled. 1713 */ 1714 void 1715 ipsec_poll_mode_wrkr_inl_pr_ss(void) 1716 { 1717 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) 1718 / US_PER_S * BURST_TX_DRAIN_US; 1719 uint16_t sa_out_portid = 0, sa_out_proto = 0; 1720 struct rte_mbuf *pkts[MAX_PKT_BURST], *pkt; 1721 uint64_t prev_tsc, diff_tsc, cur_tsc; 1722 struct rte_ipsec_session *ips = NULL; 1723 struct lcore_rx_queue *rxql; 1724 struct ipsec_sa *sa = NULL; 1725 struct lcore_conf *qconf; 1726 struct sa_ctx *sa_out; 1727 uint32_t i, nb_rx, j; 1728 int32_t socket_id; 1729 uint32_t lcore_id; 1730 uint16_t portid; 1731 uint8_t queueid; 1732 1733 prev_tsc = 0; 1734 lcore_id = rte_lcore_id(); 1735 qconf = &lcore_conf[lcore_id]; 1736 rxql = qconf->rx_queue_list; 1737 socket_id = rte_lcore_to_socket_id(lcore_id); 1738 1739 /* Get SA info */ 1740 sa_out = socket_ctx[socket_id].sa_out; 1741 if (sa_out && single_sa_idx < sa_out->nb_sa) { 1742 sa = &sa_out->sa[single_sa_idx]; 1743 ips = ipsec_get_primary_session(sa); 1744 sa_out_portid = sa->portid; 1745 if (sa->flags & IP6_TUNNEL) 1746 sa_out_proto = IPPROTO_IPV6; 1747 else 1748 sa_out_proto = IPPROTO_IP; 1749 } 1750 1751 qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir; 1752 1753 if (qconf->nb_rx_queue == 0) { 1754 RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n", 1755 lcore_id); 1756 return; 1757 } 1758 1759 RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id); 1760 1761 for (i = 0; i < qconf->nb_rx_queue; i++) { 1762 portid = rxql[i].port_id; 1763 queueid = rxql[i].queue_id; 1764 RTE_LOG(INFO, IPSEC, 1765 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n", 1766 lcore_id, portid, queueid); 1767 } 1768 1769 while (!force_quit) { 1770 cur_tsc = rte_rdtsc(); 1771 1772 /* TX queue buffer drain */ 1773 diff_tsc = cur_tsc - prev_tsc; 1774 1775 if (unlikely(diff_tsc > drain_tsc)) { 1776 drain_tx_buffers(qconf); 1777 prev_tsc = cur_tsc; 1778 } 1779 1780 for (i = 0; i < qconf->nb_rx_queue; ++i) { 1781 /* Read packets from RX queues */ 1782 portid = rxql[i].port_id; 1783 queueid = rxql[i].queue_id; 1784 nb_rx = rte_eth_rx_burst(portid, queueid, 1785 pkts, MAX_PKT_BURST); 1786 1787 if (nb_rx <= 0) 1788 continue; 1789 1790 core_stats_update_rx(nb_rx); 1791 1792 if (is_unprotected_port(portid)) { 1793 /* Nothing much to do for inbound inline 1794 * decrypted traffic. 1795 */ 1796 for (j = 0; j < nb_rx; j++) { 1797 uint32_t ptype, proto; 1798 1799 pkt = pkts[j]; 1800 ptype = pkt->packet_type & 1801 RTE_PTYPE_L3_MASK; 1802 if (ptype == RTE_PTYPE_L3_IPV4) 1803 proto = IPPROTO_IP; 1804 else 1805 proto = IPPROTO_IPV6; 1806 1807 send_single_packet(pkt, portid, proto); 1808 } 1809 1810 continue; 1811 } 1812 1813 /* Free packets if there are no outbound sessions */ 1814 if (unlikely(!ips)) { 1815 rte_pktmbuf_free_bulk(pkts, nb_rx); 1816 continue; 1817 } 1818 1819 rte_ipsec_pkt_process(ips, pkts, nb_rx); 1820 1821 /* Send pkts out */ 1822 for (j = 0; j < nb_rx; j++) { 1823 pkt = pkts[j]; 1824 1825 pkt->l2_len = RTE_ETHER_HDR_LEN; 1826 send_single_packet(pkt, sa_out_portid, 1827 sa_out_proto); 1828 } 1829 } 1830 } 1831 } 1832 1833 static void 1834 ipsec_poll_mode_wrkr_launch(void) 1835 { 1836 static ipsec_worker_fn_t poll_mode_wrkrs[MAX_F] = { 1837 [INL_PR_F] = ipsec_poll_mode_wrkr_inl_pr, 1838 [INL_PR_F | SS_F] = ipsec_poll_mode_wrkr_inl_pr_ss, 1839 }; 1840 ipsec_worker_fn_t fn; 1841 1842 if (!app_sa_prm.enable) { 1843 fn = ipsec_poll_mode_worker; 1844 } else { 1845 fn = poll_mode_wrkrs[wrkr_flags]; 1846 1847 /* Always default to all mode worker */ 1848 if (!fn) 1849 fn = ipsec_poll_mode_worker; 1850 } 1851 1852 /* Launch worker */ 1853 (*fn)(); 1854 } 1855 1856 int ipsec_launch_one_lcore(void *args) 1857 { 1858 struct eh_conf *conf; 1859 1860 conf = (struct eh_conf *)args; 1861 1862 if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) { 1863 /* Run in poll mode */ 1864 ipsec_poll_mode_wrkr_launch(); 1865 } else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) { 1866 /* Run in event mode */ 1867 ipsec_eventmode_worker(conf); 1868 } 1869 return 0; 1870 } 1871