1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 * Copyright (C) 2020 Marvell International Ltd. 4 */ 5 #include <rte_acl.h> 6 #include <rte_event_eth_tx_adapter.h> 7 #include <rte_lpm.h> 8 #include <rte_lpm6.h> 9 10 #include "event_helper.h" 11 #include "ipsec.h" 12 #include "ipsec-secgw.h" 13 #include "ipsec_worker.h" 14 15 static inline enum pkt_type 16 process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp) 17 { 18 struct rte_ether_hdr *eth; 19 20 eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); 21 if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { 22 *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN + 23 offsetof(struct ip, ip_p)); 24 if (**nlp == IPPROTO_ESP) 25 return PKT_TYPE_IPSEC_IPV4; 26 else 27 return PKT_TYPE_PLAIN_IPV4; 28 } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { 29 *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN + 30 offsetof(struct ip6_hdr, ip6_nxt)); 31 if (**nlp == IPPROTO_ESP) 32 return PKT_TYPE_IPSEC_IPV6; 33 else 34 return PKT_TYPE_PLAIN_IPV6; 35 } 36 37 /* Unknown/Unsupported type */ 38 return PKT_TYPE_INVALID; 39 } 40 41 static inline void 42 update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid) 43 { 44 struct rte_ether_hdr *ethhdr; 45 46 ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); 47 memcpy(ðhdr->s_addr, ðaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN); 48 memcpy(ðhdr->d_addr, ðaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN); 49 } 50 51 static inline void 52 ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id) 53 { 54 /* Save the destination port in the mbuf */ 55 m->port = port_id; 56 57 /* Save eth queue for Tx */ 58 rte_event_eth_tx_adapter_txq_set(m, 0); 59 } 60 61 static inline void 62 prepare_out_sessions_tbl(struct sa_ctx *sa_out, 63 struct rte_security_session **sess_tbl, uint16_t size) 64 { 65 struct rte_ipsec_session *pri_sess; 66 struct ipsec_sa *sa; 67 uint32_t i; 68 69 if (!sa_out) 70 return; 71 72 for (i = 0; i < sa_out->nb_sa; i++) { 73 74 sa = &sa_out->sa[i]; 75 if (!sa) 76 continue; 77 78 pri_sess = ipsec_get_primary_session(sa); 79 if (!pri_sess) 80 continue; 81 82 if (pri_sess->type != 83 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) { 84 85 RTE_LOG(ERR, IPSEC, "Invalid session type %d\n", 86 pri_sess->type); 87 continue; 88 } 89 90 if (sa->portid >= size) { 91 RTE_LOG(ERR, IPSEC, 92 "Port id >= than table size %d, %d\n", 93 sa->portid, size); 94 continue; 95 } 96 97 /* Use only first inline session found for a given port */ 98 if (sess_tbl[sa->portid]) 99 continue; 100 sess_tbl[sa->portid] = pri_sess->security.ses; 101 } 102 } 103 104 static inline int 105 check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx) 106 { 107 uint32_t res; 108 109 if (unlikely(sp == NULL)) 110 return 0; 111 112 rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1, 113 DEFAULT_MAX_CATEGORIES); 114 115 if (unlikely(res == DISCARD)) 116 return 0; 117 else if (res == BYPASS) { 118 *sa_idx = -1; 119 return 1; 120 } 121 122 *sa_idx = res - 1; 123 return 1; 124 } 125 126 static inline uint16_t 127 route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) 128 { 129 uint32_t dst_ip; 130 uint16_t offset; 131 uint32_t hop; 132 int ret; 133 134 offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst); 135 dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset); 136 dst_ip = rte_be_to_cpu_32(dst_ip); 137 138 ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop); 139 140 if (ret == 0) { 141 /* We have a hit */ 142 return hop; 143 } 144 145 /* else */ 146 return RTE_MAX_ETHPORTS; 147 } 148 149 /* TODO: To be tested */ 150 static inline uint16_t 151 route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) 152 { 153 uint8_t dst_ip[16]; 154 uint8_t *ip6_dst; 155 uint16_t offset; 156 uint32_t hop; 157 int ret; 158 159 offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst); 160 ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset); 161 memcpy(&dst_ip[0], ip6_dst, 16); 162 163 ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop); 164 165 if (ret == 0) { 166 /* We have a hit */ 167 return hop; 168 } 169 170 /* else */ 171 return RTE_MAX_ETHPORTS; 172 } 173 174 static inline uint16_t 175 get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type) 176 { 177 if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4) 178 return route4_pkt(pkt, rt->rt4_ctx); 179 else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6) 180 return route6_pkt(pkt, rt->rt6_ctx); 181 182 return RTE_MAX_ETHPORTS; 183 } 184 185 static inline int 186 process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt, 187 struct rte_event *ev) 188 { 189 struct ipsec_sa *sa = NULL; 190 struct rte_mbuf *pkt; 191 uint16_t port_id = 0; 192 enum pkt_type type; 193 uint32_t sa_idx; 194 uint8_t *nlp; 195 196 /* Get pkt from event */ 197 pkt = ev->mbuf; 198 199 /* Check the packet type */ 200 type = process_ipsec_get_pkt_type(pkt, &nlp); 201 202 switch (type) { 203 case PKT_TYPE_PLAIN_IPV4: 204 if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) { 205 if (unlikely(pkt->ol_flags & 206 PKT_RX_SEC_OFFLOAD_FAILED)) { 207 RTE_LOG(ERR, IPSEC, 208 "Inbound security offload failed\n"); 209 goto drop_pkt_and_exit; 210 } 211 sa = pkt->userdata; 212 } 213 214 /* Check if we have a match */ 215 if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) { 216 /* No valid match */ 217 goto drop_pkt_and_exit; 218 } 219 break; 220 221 case PKT_TYPE_PLAIN_IPV6: 222 if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) { 223 if (unlikely(pkt->ol_flags & 224 PKT_RX_SEC_OFFLOAD_FAILED)) { 225 RTE_LOG(ERR, IPSEC, 226 "Inbound security offload failed\n"); 227 goto drop_pkt_and_exit; 228 } 229 sa = pkt->userdata; 230 } 231 232 /* Check if we have a match */ 233 if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) { 234 /* No valid match */ 235 goto drop_pkt_and_exit; 236 } 237 break; 238 239 default: 240 RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type); 241 goto drop_pkt_and_exit; 242 } 243 244 /* Check if the packet has to be bypassed */ 245 if (sa_idx == BYPASS) 246 goto route_and_send_pkt; 247 248 /* Validate sa_idx */ 249 if (sa_idx >= ctx->sa_ctx->nb_sa) 250 goto drop_pkt_and_exit; 251 252 /* Else the packet has to be protected with SA */ 253 254 /* If the packet was IPsec processed, then SA pointer should be set */ 255 if (sa == NULL) 256 goto drop_pkt_and_exit; 257 258 /* SPI on the packet should match with the one in SA */ 259 if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi)) 260 goto drop_pkt_and_exit; 261 262 route_and_send_pkt: 263 port_id = get_route(pkt, rt, type); 264 if (unlikely(port_id == RTE_MAX_ETHPORTS)) { 265 /* no match */ 266 goto drop_pkt_and_exit; 267 } 268 /* else, we have a matching route */ 269 270 /* Update mac addresses */ 271 update_mac_addrs(pkt, port_id); 272 273 /* Update the event with the dest port */ 274 ipsec_event_pre_forward(pkt, port_id); 275 return PKT_FORWARDED; 276 277 drop_pkt_and_exit: 278 RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n"); 279 rte_pktmbuf_free(pkt); 280 ev->mbuf = NULL; 281 return PKT_DROPPED; 282 } 283 284 static inline int 285 process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt, 286 struct rte_event *ev) 287 { 288 struct rte_ipsec_session *sess; 289 struct sa_ctx *sa_ctx; 290 struct rte_mbuf *pkt; 291 uint16_t port_id = 0; 292 struct ipsec_sa *sa; 293 enum pkt_type type; 294 uint32_t sa_idx; 295 uint8_t *nlp; 296 297 /* Get pkt from event */ 298 pkt = ev->mbuf; 299 300 /* Check the packet type */ 301 type = process_ipsec_get_pkt_type(pkt, &nlp); 302 303 switch (type) { 304 case PKT_TYPE_PLAIN_IPV4: 305 /* Check if we have a match */ 306 if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) { 307 /* No valid match */ 308 goto drop_pkt_and_exit; 309 } 310 break; 311 case PKT_TYPE_PLAIN_IPV6: 312 /* Check if we have a match */ 313 if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) { 314 /* No valid match */ 315 goto drop_pkt_and_exit; 316 } 317 break; 318 default: 319 /* 320 * Only plain IPv4 & IPv6 packets are allowed 321 * on protected port. Drop the rest. 322 */ 323 RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type); 324 goto drop_pkt_and_exit; 325 } 326 327 /* Check if the packet has to be bypassed */ 328 if (sa_idx == BYPASS) { 329 port_id = get_route(pkt, rt, type); 330 if (unlikely(port_id == RTE_MAX_ETHPORTS)) { 331 /* no match */ 332 goto drop_pkt_and_exit; 333 } 334 /* else, we have a matching route */ 335 goto send_pkt; 336 } 337 338 /* Validate sa_idx */ 339 if (sa_idx >= ctx->sa_ctx->nb_sa) 340 goto drop_pkt_and_exit; 341 342 /* Else the packet has to be protected */ 343 344 /* Get SA ctx*/ 345 sa_ctx = ctx->sa_ctx; 346 347 /* Get SA */ 348 sa = &(sa_ctx->sa[sa_idx]); 349 350 /* Get IPsec session */ 351 sess = ipsec_get_primary_session(sa); 352 353 /* Allow only inline protocol for now */ 354 if (sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) { 355 RTE_LOG(ERR, IPSEC, "SA type not supported\n"); 356 goto drop_pkt_and_exit; 357 } 358 359 if (sess->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA) 360 pkt->userdata = sess->security.ses; 361 362 /* Mark the packet for Tx security offload */ 363 pkt->ol_flags |= PKT_TX_SEC_OFFLOAD; 364 365 /* Get the port to which this pkt need to be submitted */ 366 port_id = sa->portid; 367 368 send_pkt: 369 /* Update mac addresses */ 370 update_mac_addrs(pkt, port_id); 371 372 /* Update the event with the dest port */ 373 ipsec_event_pre_forward(pkt, port_id); 374 return PKT_FORWARDED; 375 376 drop_pkt_and_exit: 377 RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n"); 378 rte_pktmbuf_free(pkt); 379 ev->mbuf = NULL; 380 return PKT_DROPPED; 381 } 382 383 /* 384 * Event mode exposes various operating modes depending on the 385 * capabilities of the event device and the operating mode 386 * selected. 387 */ 388 389 /* Workers registered */ 390 #define IPSEC_EVENTMODE_WORKERS 2 391 392 /* 393 * Event mode worker 394 * Operating parameters : non-burst - Tx internal port - driver mode 395 */ 396 static void 397 ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links, 398 uint8_t nb_links) 399 { 400 struct rte_security_session *sess_tbl[RTE_MAX_ETHPORTS] = { NULL }; 401 unsigned int nb_rx = 0; 402 struct rte_mbuf *pkt; 403 struct rte_event ev; 404 uint32_t lcore_id; 405 int32_t socket_id; 406 int16_t port_id; 407 408 /* Check if we have links registered for this lcore */ 409 if (nb_links == 0) { 410 /* No links registered - exit */ 411 return; 412 } 413 414 /* Get core ID */ 415 lcore_id = rte_lcore_id(); 416 417 /* Get socket ID */ 418 socket_id = rte_lcore_to_socket_id(lcore_id); 419 420 /* 421 * Prepare security sessions table. In outbound driver mode 422 * we always use first session configured for a given port 423 */ 424 prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, sess_tbl, 425 RTE_MAX_ETHPORTS); 426 427 RTE_LOG(INFO, IPSEC, 428 "Launching event mode worker (non-burst - Tx internal port - " 429 "driver mode) on lcore %d\n", lcore_id); 430 431 /* We have valid links */ 432 433 /* Check if it's single link */ 434 if (nb_links != 1) { 435 RTE_LOG(INFO, IPSEC, 436 "Multiple links not supported. Using first link\n"); 437 } 438 439 RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id, 440 links[0].event_port_id); 441 while (!force_quit) { 442 /* Read packet from event queues */ 443 nb_rx = rte_event_dequeue_burst(links[0].eventdev_id, 444 links[0].event_port_id, 445 &ev, /* events */ 446 1, /* nb_events */ 447 0 /* timeout_ticks */); 448 449 if (nb_rx == 0) 450 continue; 451 452 pkt = ev.mbuf; 453 port_id = pkt->port; 454 455 rte_prefetch0(rte_pktmbuf_mtod(pkt, void *)); 456 457 /* Process packet */ 458 ipsec_event_pre_forward(pkt, port_id); 459 460 if (!is_unprotected_port(port_id)) { 461 462 if (unlikely(!sess_tbl[port_id])) { 463 rte_pktmbuf_free(pkt); 464 continue; 465 } 466 467 /* Save security session */ 468 pkt->userdata = sess_tbl[port_id]; 469 470 /* Mark the packet for Tx security offload */ 471 pkt->ol_flags |= PKT_TX_SEC_OFFLOAD; 472 } 473 474 /* 475 * Since tx internal port is available, events can be 476 * directly enqueued to the adapter and it would be 477 * internally submitted to the eth device. 478 */ 479 rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id, 480 links[0].event_port_id, 481 &ev, /* events */ 482 1, /* nb_events */ 483 0 /* flags */); 484 } 485 } 486 487 /* 488 * Event mode worker 489 * Operating parameters : non-burst - Tx internal port - app mode 490 */ 491 static void 492 ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links, 493 uint8_t nb_links) 494 { 495 struct lcore_conf_ev_tx_int_port_wrkr lconf; 496 unsigned int nb_rx = 0; 497 struct rte_event ev; 498 uint32_t lcore_id; 499 int32_t socket_id; 500 int ret; 501 502 /* Check if we have links registered for this lcore */ 503 if (nb_links == 0) { 504 /* No links registered - exit */ 505 return; 506 } 507 508 /* We have valid links */ 509 510 /* Get core ID */ 511 lcore_id = rte_lcore_id(); 512 513 /* Get socket ID */ 514 socket_id = rte_lcore_to_socket_id(lcore_id); 515 516 /* Save routing table */ 517 lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4; 518 lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6; 519 lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in; 520 lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in; 521 lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in; 522 lconf.inbound.session_pool = socket_ctx[socket_id].session_pool; 523 lconf.inbound.session_priv_pool = 524 socket_ctx[socket_id].session_priv_pool; 525 lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out; 526 lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out; 527 lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out; 528 lconf.outbound.session_pool = socket_ctx[socket_id].session_pool; 529 lconf.outbound.session_priv_pool = 530 socket_ctx[socket_id].session_priv_pool; 531 532 RTE_LOG(INFO, IPSEC, 533 "Launching event mode worker (non-burst - Tx internal port - " 534 "app mode) on lcore %d\n", lcore_id); 535 536 /* Check if it's single link */ 537 if (nb_links != 1) { 538 RTE_LOG(INFO, IPSEC, 539 "Multiple links not supported. Using first link\n"); 540 } 541 542 RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id, 543 links[0].event_port_id); 544 545 while (!force_quit) { 546 /* Read packet from event queues */ 547 nb_rx = rte_event_dequeue_burst(links[0].eventdev_id, 548 links[0].event_port_id, 549 &ev, /* events */ 550 1, /* nb_events */ 551 0 /* timeout_ticks */); 552 553 if (nb_rx == 0) 554 continue; 555 556 if (unlikely(ev.event_type != RTE_EVENT_TYPE_ETHDEV)) { 557 RTE_LOG(ERR, IPSEC, "Invalid event type %u", 558 ev.event_type); 559 560 continue; 561 } 562 563 if (is_unprotected_port(ev.mbuf->port)) 564 ret = process_ipsec_ev_inbound(&lconf.inbound, 565 &lconf.rt, &ev); 566 else 567 ret = process_ipsec_ev_outbound(&lconf.outbound, 568 &lconf.rt, &ev); 569 if (ret != 1) 570 /* The pkt has been dropped */ 571 continue; 572 573 /* 574 * Since tx internal port is available, events can be 575 * directly enqueued to the adapter and it would be 576 * internally submitted to the eth device. 577 */ 578 rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id, 579 links[0].event_port_id, 580 &ev, /* events */ 581 1, /* nb_events */ 582 0 /* flags */); 583 } 584 } 585 586 static uint8_t 587 ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs) 588 { 589 struct eh_app_worker_params *wrkr; 590 uint8_t nb_wrkr_param = 0; 591 592 /* Save workers */ 593 wrkr = wrkrs; 594 595 /* Non-burst - Tx internal port - driver mode */ 596 wrkr->cap.burst = EH_RX_TYPE_NON_BURST; 597 wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT; 598 wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER; 599 wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode; 600 wrkr++; 601 nb_wrkr_param++; 602 603 /* Non-burst - Tx internal port - app mode */ 604 wrkr->cap.burst = EH_RX_TYPE_NON_BURST; 605 wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT; 606 wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP; 607 wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode; 608 nb_wrkr_param++; 609 610 return nb_wrkr_param; 611 } 612 613 static void 614 ipsec_eventmode_worker(struct eh_conf *conf) 615 { 616 struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = { 617 {{{0} }, NULL } }; 618 uint8_t nb_wrkr_param; 619 620 /* Populate l2fwd_wrkr params */ 621 nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr); 622 623 /* 624 * Launch correct worker after checking 625 * the event device's capabilities. 626 */ 627 eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param); 628 } 629 630 int ipsec_launch_one_lcore(void *args) 631 { 632 struct eh_conf *conf; 633 634 conf = (struct eh_conf *)args; 635 636 if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) { 637 /* Run in poll mode */ 638 ipsec_poll_mode_worker(); 639 } else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) { 640 /* Run in event mode */ 641 ipsec_eventmode_worker(conf); 642 } 643 return 0; 644 } 645