xref: /dpdk/examples/ipsec-secgw/ipsec_worker.c (revision 515cd4a488b6a0c6e40d20e6b10d8e89657dc23f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  * Copyright (C) 2020 Marvell International Ltd.
4  */
5 #include <rte_acl.h>
6 #include <rte_event_eth_tx_adapter.h>
7 #include <rte_lpm.h>
8 #include <rte_lpm6.h>
9 
10 #include "event_helper.h"
11 #include "ipsec.h"
12 #include "ipsec-secgw.h"
13 #include "ipsec_worker.h"
14 
15 #if defined(__ARM_NEON)
16 #include "ipsec_lpm_neon.h"
17 #endif
18 
19 struct port_drv_mode_data {
20 	struct rte_security_session *sess;
21 	struct rte_security_ctx *ctx;
22 };
23 
24 typedef void (*ipsec_worker_fn_t)(void);
25 
26 static inline enum pkt_type
27 process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
28 {
29 	struct rte_ether_hdr *eth;
30 	uint32_t ptype = pkt->packet_type;
31 
32 	eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
33 	rte_prefetch0(eth);
34 
35 	if (RTE_ETH_IS_IPV4_HDR(ptype)) {
36 		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
37 				offsetof(struct ip, ip_p));
38 		if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
39 			return PKT_TYPE_IPSEC_IPV4;
40 		else
41 			return PKT_TYPE_PLAIN_IPV4;
42 	} else if (RTE_ETH_IS_IPV6_HDR(ptype)) {
43 		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
44 				offsetof(struct ip6_hdr, ip6_nxt));
45 		if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
46 			return PKT_TYPE_IPSEC_IPV6;
47 		else
48 			return PKT_TYPE_PLAIN_IPV6;
49 	}
50 
51 	/* Unknown/Unsupported type */
52 	return PKT_TYPE_INVALID;
53 }
54 
55 static inline void
56 update_mac_addrs(struct rte_ether_hdr *ethhdr, uint16_t portid)
57 {
58 	memcpy(&ethhdr->src_addr, &ethaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
59 	memcpy(&ethhdr->dst_addr, &ethaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
60 }
61 
62 static inline void
63 ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
64 {
65 	/* Save the destination port in the mbuf */
66 	m->port = port_id;
67 
68 	/* Save eth queue for Tx */
69 	rte_event_eth_tx_adapter_txq_set(m, 0);
70 }
71 
72 static inline void
73 ev_vector_attr_init(struct rte_event_vector *vec)
74 {
75 	vec->attr_valid = 1;
76 	vec->port = 0xFFFF;
77 	vec->queue = 0;
78 }
79 
80 static inline void
81 ev_vector_attr_update(struct rte_event_vector *vec, struct rte_mbuf *pkt)
82 {
83 	if (vec->port == 0xFFFF) {
84 		vec->port = pkt->port;
85 		return;
86 	}
87 	if (vec->attr_valid && (vec->port != pkt->port))
88 		vec->attr_valid = 0;
89 }
90 
91 static inline void
92 prepare_out_sessions_tbl(struct sa_ctx *sa_out,
93 			 struct port_drv_mode_data *data,
94 			 uint16_t size)
95 {
96 	struct rte_ipsec_session *pri_sess;
97 	struct ipsec_sa *sa;
98 	uint32_t i;
99 
100 	if (!sa_out)
101 		return;
102 
103 	for (i = 0; i < sa_out->nb_sa; i++) {
104 
105 		sa = &sa_out->sa[i];
106 		if (!sa)
107 			continue;
108 
109 		pri_sess = ipsec_get_primary_session(sa);
110 		if (!pri_sess)
111 			continue;
112 
113 		if (pri_sess->type !=
114 			RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
115 
116 			RTE_LOG(ERR, IPSEC, "Invalid session type %d\n",
117 				pri_sess->type);
118 			continue;
119 		}
120 
121 		if (sa->portid >= size) {
122 			RTE_LOG(ERR, IPSEC,
123 				"Port id >= than table size %d, %d\n",
124 				sa->portid, size);
125 			continue;
126 		}
127 
128 		/* Use only first inline session found for a given port */
129 		if (data[sa->portid].sess)
130 			continue;
131 		data[sa->portid].sess = pri_sess->security.ses;
132 		data[sa->portid].ctx = pri_sess->security.ctx;
133 	}
134 }
135 
136 static inline int
137 check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
138 {
139 	uint32_t res;
140 
141 	if (unlikely(sp == NULL))
142 		return 0;
143 
144 	rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
145 			DEFAULT_MAX_CATEGORIES);
146 
147 	if (unlikely(res == DISCARD))
148 		return 0;
149 	else if (res == BYPASS) {
150 		*sa_idx = -1;
151 		return 1;
152 	}
153 
154 	*sa_idx = res - 1;
155 	return 1;
156 }
157 
158 static inline void
159 check_sp_bulk(struct sp_ctx *sp, struct traffic_type *ip,
160 	      struct traffic_type *ipsec)
161 {
162 	uint32_t i, j, res;
163 	struct rte_mbuf *m;
164 
165 	if (unlikely(sp == NULL || ip->num == 0))
166 		return;
167 
168 	rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num,
169 			 DEFAULT_MAX_CATEGORIES);
170 
171 	j = 0;
172 	for (i = 0; i < ip->num; i++) {
173 		m = ip->pkts[i];
174 		res = ip->res[i];
175 		if (unlikely(res == DISCARD))
176 			free_pkts(&m, 1);
177 		else if (res == BYPASS)
178 			ip->pkts[j++] = m;
179 		else {
180 			ipsec->res[ipsec->num] = res - 1;
181 			ipsec->pkts[ipsec->num++] = m;
182 		}
183 	}
184 	ip->num = j;
185 }
186 
187 static inline void
188 check_sp_sa_bulk(struct sp_ctx *sp, struct sa_ctx *sa_ctx,
189 		 struct traffic_type *ip)
190 {
191 	struct ipsec_sa *sa;
192 	uint32_t i, j, res;
193 	struct rte_mbuf *m;
194 
195 	if (unlikely(sp == NULL || ip->num == 0))
196 		return;
197 
198 	rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num,
199 			 DEFAULT_MAX_CATEGORIES);
200 
201 	j = 0;
202 	for (i = 0; i < ip->num; i++) {
203 		m = ip->pkts[i];
204 		res = ip->res[i];
205 		if (unlikely(res == DISCARD))
206 			free_pkts(&m, 1);
207 		else if (res == BYPASS)
208 			ip->pkts[j++] = m;
209 		else {
210 			sa = *(struct ipsec_sa **)rte_security_dynfield(m);
211 			if (sa == NULL) {
212 				free_pkts(&m, 1);
213 				continue;
214 			}
215 
216 			/* SPI on the packet should match with the one in SA */
217 			if (unlikely(sa->spi != sa_ctx->sa[res - 1].spi)) {
218 				free_pkts(&m, 1);
219 				continue;
220 			}
221 
222 			ip->pkts[j++] = m;
223 		}
224 	}
225 	ip->num = j;
226 }
227 
228 static inline uint16_t
229 route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
230 {
231 	uint32_t dst_ip;
232 	uint16_t offset;
233 	uint32_t hop;
234 	int ret;
235 
236 	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
237 	dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
238 	dst_ip = rte_be_to_cpu_32(dst_ip);
239 
240 	ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
241 
242 	if (ret == 0) {
243 		/* We have a hit */
244 		return hop;
245 	}
246 
247 	/* else */
248 	return RTE_MAX_ETHPORTS;
249 }
250 
251 /* TODO: To be tested */
252 static inline uint16_t
253 route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
254 {
255 	uint8_t dst_ip[16];
256 	uint8_t *ip6_dst;
257 	uint16_t offset;
258 	uint32_t hop;
259 	int ret;
260 
261 	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
262 	ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
263 	memcpy(&dst_ip[0], ip6_dst, 16);
264 
265 	ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
266 
267 	if (ret == 0) {
268 		/* We have a hit */
269 		return hop;
270 	}
271 
272 	/* else */
273 	return RTE_MAX_ETHPORTS;
274 }
275 
276 static inline uint16_t
277 get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
278 {
279 	if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
280 		return route4_pkt(pkt, rt->rt4_ctx);
281 	else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
282 		return route6_pkt(pkt, rt->rt6_ctx);
283 
284 	return RTE_MAX_ETHPORTS;
285 }
286 
287 static inline int
288 process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
289 		struct rte_event *ev)
290 {
291 	struct ipsec_sa *sa = NULL;
292 	struct rte_mbuf *pkt;
293 	uint16_t port_id = 0;
294 	enum pkt_type type;
295 	uint32_t sa_idx;
296 	uint8_t *nlp;
297 
298 	/* Get pkt from event */
299 	pkt = ev->mbuf;
300 
301 	/* Check the packet type */
302 	type = process_ipsec_get_pkt_type(pkt, &nlp);
303 
304 	switch (type) {
305 	case PKT_TYPE_PLAIN_IPV4:
306 		if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
307 			if (unlikely(pkt->ol_flags &
308 				     RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
309 				RTE_LOG(ERR, IPSEC,
310 					"Inbound security offload failed\n");
311 				goto drop_pkt_and_exit;
312 			}
313 			sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
314 		}
315 
316 		/* Check if we have a match */
317 		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
318 			/* No valid match */
319 			goto drop_pkt_and_exit;
320 		}
321 		break;
322 
323 	case PKT_TYPE_PLAIN_IPV6:
324 		if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
325 			if (unlikely(pkt->ol_flags &
326 				     RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
327 				RTE_LOG(ERR, IPSEC,
328 					"Inbound security offload failed\n");
329 				goto drop_pkt_and_exit;
330 			}
331 			sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
332 		}
333 
334 		/* Check if we have a match */
335 		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
336 			/* No valid match */
337 			goto drop_pkt_and_exit;
338 		}
339 		break;
340 
341 	default:
342 		RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
343 			   type);
344 		goto drop_pkt_and_exit;
345 	}
346 
347 	/* Check if the packet has to be bypassed */
348 	if (sa_idx == BYPASS)
349 		goto route_and_send_pkt;
350 
351 	/* Validate sa_idx */
352 	if (sa_idx >= ctx->sa_ctx->nb_sa)
353 		goto drop_pkt_and_exit;
354 
355 	/* Else the packet has to be protected with SA */
356 
357 	/* If the packet was IPsec processed, then SA pointer should be set */
358 	if (sa == NULL)
359 		goto drop_pkt_and_exit;
360 
361 	/* SPI on the packet should match with the one in SA */
362 	if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
363 		goto drop_pkt_and_exit;
364 
365 route_and_send_pkt:
366 	port_id = get_route(pkt, rt, type);
367 	if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
368 		/* no match */
369 		goto drop_pkt_and_exit;
370 	}
371 	/* else, we have a matching route */
372 
373 	/* Update mac addresses */
374 	update_mac_addrs(rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *), port_id);
375 
376 	/* Update the event with the dest port */
377 	ipsec_event_pre_forward(pkt, port_id);
378 	return PKT_FORWARDED;
379 
380 drop_pkt_and_exit:
381 	RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
382 	rte_pktmbuf_free(pkt);
383 	ev->mbuf = NULL;
384 	return PKT_DROPPED;
385 }
386 
387 static inline int
388 process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
389 		struct rte_event *ev)
390 {
391 	struct rte_ipsec_session *sess;
392 	struct rte_ether_hdr *ethhdr;
393 	struct sa_ctx *sa_ctx;
394 	struct rte_mbuf *pkt;
395 	uint16_t port_id = 0;
396 	struct ipsec_sa *sa;
397 	enum pkt_type type;
398 	uint32_t sa_idx;
399 	uint8_t *nlp;
400 
401 	/* Get pkt from event */
402 	pkt = ev->mbuf;
403 
404 	/* Check the packet type */
405 	type = process_ipsec_get_pkt_type(pkt, &nlp);
406 
407 	switch (type) {
408 	case PKT_TYPE_PLAIN_IPV4:
409 		/* Check if we have a match */
410 		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
411 			/* No valid match */
412 			goto drop_pkt_and_exit;
413 		}
414 		break;
415 	case PKT_TYPE_PLAIN_IPV6:
416 		/* Check if we have a match */
417 		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
418 			/* No valid match */
419 			goto drop_pkt_and_exit;
420 		}
421 		break;
422 	default:
423 		/*
424 		 * Only plain IPv4 & IPv6 packets are allowed
425 		 * on protected port. Drop the rest.
426 		 */
427 		RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
428 		goto drop_pkt_and_exit;
429 	}
430 
431 	ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
432 	/* Check if the packet has to be bypassed */
433 	if (sa_idx == BYPASS) {
434 		port_id = get_route(pkt, rt, type);
435 		if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
436 			/* no match */
437 			goto drop_pkt_and_exit;
438 		}
439 		/* else, we have a matching route */
440 		goto send_pkt;
441 	}
442 
443 	/* Validate sa_idx */
444 	if (unlikely(sa_idx >= ctx->sa_ctx->nb_sa))
445 		goto drop_pkt_and_exit;
446 
447 	/* Else the packet has to be protected */
448 
449 	/* Get SA ctx*/
450 	sa_ctx = ctx->sa_ctx;
451 
452 	/* Get SA */
453 	sa = &(sa_ctx->sa[sa_idx]);
454 
455 	/* Get IPsec session */
456 	sess = ipsec_get_primary_session(sa);
457 
458 	/* Allow only inline protocol for now */
459 	if (unlikely(sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
460 		RTE_LOG(ERR, IPSEC, "SA type not supported\n");
461 		goto drop_pkt_and_exit;
462 	}
463 
464 	rte_security_set_pkt_metadata(sess->security.ctx,
465 				      sess->security.ses, pkt, NULL);
466 
467 	/* Mark the packet for Tx security offload */
468 	pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
469 	/* Update ether type */
470 	ethhdr->ether_type = (IS_IP4(sa->flags) ? rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) :
471 			      rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6));
472 
473 	/* Get the port to which this pkt need to be submitted */
474 	port_id = sa->portid;
475 
476 send_pkt:
477 	/* Provide L2 len for Outbound processing */
478 	pkt->l2_len = RTE_ETHER_HDR_LEN;
479 
480 	/* Update mac addresses */
481 	update_mac_addrs(ethhdr, port_id);
482 
483 	/* Update the event with the dest port */
484 	ipsec_event_pre_forward(pkt, port_id);
485 	return PKT_FORWARDED;
486 
487 drop_pkt_and_exit:
488 	RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
489 	rte_pktmbuf_free(pkt);
490 	ev->mbuf = NULL;
491 	return PKT_DROPPED;
492 }
493 
494 static inline int
495 ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
496 		    struct ipsec_traffic *t, struct sa_ctx *sa_ctx)
497 {
498 	struct rte_ipsec_session *sess;
499 	struct rte_ether_hdr *ethhdr;
500 	uint32_t sa_idx, i, j = 0;
501 	uint16_t port_id = 0;
502 	struct rte_mbuf *pkt;
503 	struct ipsec_sa *sa;
504 
505 	/* Route IPv4 packets */
506 	for (i = 0; i < t->ip4.num; i++) {
507 		pkt = t->ip4.pkts[i];
508 		port_id = route4_pkt(pkt, rt->rt4_ctx);
509 		if (port_id != RTE_MAX_ETHPORTS) {
510 			/* Update mac addresses */
511 			ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
512 			update_mac_addrs(ethhdr, port_id);
513 			/* Update the event with the dest port */
514 			ipsec_event_pre_forward(pkt, port_id);
515 			ev_vector_attr_update(vec, pkt);
516 			vec->mbufs[j++] = pkt;
517 		} else
518 			free_pkts(&pkt, 1);
519 	}
520 
521 	/* Route IPv6 packets */
522 	for (i = 0; i < t->ip6.num; i++) {
523 		pkt = t->ip6.pkts[i];
524 		port_id = route6_pkt(pkt, rt->rt6_ctx);
525 		if (port_id != RTE_MAX_ETHPORTS) {
526 			/* Update mac addresses */
527 			ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
528 			update_mac_addrs(ethhdr, port_id);
529 			/* Update the event with the dest port */
530 			ipsec_event_pre_forward(pkt, port_id);
531 			ev_vector_attr_update(vec, pkt);
532 			vec->mbufs[j++] = pkt;
533 		} else
534 			free_pkts(&pkt, 1);
535 	}
536 
537 	/* Route ESP packets */
538 	for (i = 0; i < t->ipsec.num; i++) {
539 		/* Validate sa_idx */
540 		sa_idx = t->ipsec.res[i];
541 		pkt = t->ipsec.pkts[i];
542 		if (unlikely(sa_idx >= sa_ctx->nb_sa))
543 			free_pkts(&pkt, 1);
544 		else {
545 			/* Else the packet has to be protected */
546 			sa = &(sa_ctx->sa[sa_idx]);
547 			/* Get IPsec session */
548 			sess = ipsec_get_primary_session(sa);
549 			/* Allow only inline protocol for now */
550 			if (unlikely(sess->type !=
551 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
552 				RTE_LOG(ERR, IPSEC, "SA type not supported\n");
553 				free_pkts(&pkt, 1);
554 				continue;
555 			}
556 			rte_security_set_pkt_metadata(sess->security.ctx,
557 						sess->security.ses, pkt, NULL);
558 
559 			pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
560 			port_id = sa->portid;
561 
562 			/* Fetch outer ip type and update */
563 			ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
564 			ethhdr->ether_type = (IS_IP4(sa->flags) ?
565 					      rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) :
566 					      rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6));
567 			update_mac_addrs(ethhdr, port_id);
568 
569 			ipsec_event_pre_forward(pkt, port_id);
570 			ev_vector_attr_update(vec, pkt);
571 			vec->mbufs[j++] = pkt;
572 		}
573 	}
574 
575 	return j;
576 }
577 
578 static inline void
579 classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t)
580 {
581 	enum pkt_type type;
582 	uint8_t *nlp;
583 
584 	/* Check the packet type */
585 	type = process_ipsec_get_pkt_type(pkt, &nlp);
586 
587 	switch (type) {
588 	case PKT_TYPE_PLAIN_IPV4:
589 		t->ip4.data[t->ip4.num] = nlp;
590 		t->ip4.pkts[(t->ip4.num)++] = pkt;
591 		break;
592 	case PKT_TYPE_PLAIN_IPV6:
593 		t->ip6.data[t->ip6.num] = nlp;
594 		t->ip6.pkts[(t->ip6.num)++] = pkt;
595 		break;
596 	default:
597 		RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
598 			   type);
599 		free_pkts(&pkt, 1);
600 		break;
601 	}
602 }
603 
604 static inline int
605 process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
606 				struct rte_event_vector *vec)
607 {
608 	struct ipsec_traffic t;
609 	struct rte_mbuf *pkt;
610 	int i;
611 
612 	t.ip4.num = 0;
613 	t.ip6.num = 0;
614 	t.ipsec.num = 0;
615 
616 	for (i = 0; i < vec->nb_elem; i++) {
617 		/* Get pkt from event */
618 		pkt = vec->mbufs[i];
619 
620 		if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
621 			if (unlikely(pkt->ol_flags &
622 				     RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
623 				RTE_LOG(ERR, IPSEC,
624 					"Inbound security offload failed\n");
625 				free_pkts(&pkt, 1);
626 				continue;
627 			}
628 		}
629 
630 		classify_pkt(pkt, &t);
631 	}
632 
633 	check_sp_sa_bulk(ctx->sp4_ctx, ctx->sa_ctx, &t.ip4);
634 	check_sp_sa_bulk(ctx->sp6_ctx, ctx->sa_ctx, &t.ip6);
635 
636 	return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
637 }
638 
639 static inline int
640 process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
641 				 struct rte_event_vector *vec)
642 {
643 	struct ipsec_traffic t;
644 	struct rte_mbuf *pkt;
645 	uint32_t i;
646 
647 	t.ip4.num = 0;
648 	t.ip6.num = 0;
649 	t.ipsec.num = 0;
650 
651 	for (i = 0; i < vec->nb_elem; i++) {
652 		/* Get pkt from event */
653 		pkt = vec->mbufs[i];
654 
655 		classify_pkt(pkt, &t);
656 
657 		/* Provide L2 len for Outbound processing */
658 		pkt->l2_len = RTE_ETHER_HDR_LEN;
659 	}
660 
661 	check_sp_bulk(ctx->sp4_ctx, &t.ip4, &t.ipsec);
662 	check_sp_bulk(ctx->sp6_ctx, &t.ip6, &t.ipsec);
663 
664 	return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
665 }
666 
667 static inline int
668 process_ipsec_ev_drv_mode_outbound_vector(struct rte_event_vector *vec,
669 					  struct port_drv_mode_data *data)
670 {
671 	struct rte_mbuf *pkt;
672 	int16_t port_id;
673 	uint32_t i;
674 	int j = 0;
675 
676 	for (i = 0; i < vec->nb_elem; i++) {
677 		pkt = vec->mbufs[i];
678 		port_id = pkt->port;
679 
680 		if (unlikely(!data[port_id].sess)) {
681 			free_pkts(&pkt, 1);
682 			continue;
683 		}
684 		ipsec_event_pre_forward(pkt, port_id);
685 		/* Save security session */
686 		rte_security_set_pkt_metadata(data[port_id].ctx,
687 					      data[port_id].sess, pkt,
688 					      NULL);
689 
690 		/* Mark the packet for Tx security offload */
691 		pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
692 
693 		/* Provide L2 len for Outbound processing */
694 		pkt->l2_len = RTE_ETHER_HDR_LEN;
695 
696 		vec->mbufs[j++] = pkt;
697 	}
698 
699 	return j;
700 }
701 
702 static inline void
703 ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
704 			struct eh_event_link_info *links,
705 			struct rte_event *ev)
706 {
707 	struct rte_event_vector *vec = ev->vec;
708 	struct rte_mbuf *pkt;
709 	int ret;
710 
711 	pkt = vec->mbufs[0];
712 
713 	ev_vector_attr_init(vec);
714 	if (is_unprotected_port(pkt->port))
715 		ret = process_ipsec_ev_inbound_vector(&lconf->inbound,
716 						      &lconf->rt, vec);
717 	else
718 		ret = process_ipsec_ev_outbound_vector(&lconf->outbound,
719 						       &lconf->rt, vec);
720 
721 	if (likely(ret > 0)) {
722 		vec->nb_elem = ret;
723 		rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
724 						 links[0].event_port_id,
725 						 ev, 1, 0);
726 	} else {
727 		rte_mempool_put(rte_mempool_from_obj(vec), vec);
728 	}
729 }
730 
731 static inline void
732 ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
733 				 struct rte_event *ev,
734 				 struct port_drv_mode_data *data)
735 {
736 	struct rte_event_vector *vec = ev->vec;
737 	struct rte_mbuf *pkt;
738 
739 	pkt = vec->mbufs[0];
740 
741 	if (!is_unprotected_port(pkt->port))
742 		vec->nb_elem = process_ipsec_ev_drv_mode_outbound_vector(vec,
743 									 data);
744 	if (vec->nb_elem > 0)
745 		rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
746 						 links[0].event_port_id,
747 						 ev, 1, 0);
748 	else
749 		rte_mempool_put(rte_mempool_from_obj(vec), vec);
750 }
751 
752 /*
753  * Event mode exposes various operating modes depending on the
754  * capabilities of the event device and the operating mode
755  * selected.
756  */
757 
758 static void
759 ipsec_event_port_flush(uint8_t eventdev_id __rte_unused, struct rte_event ev,
760 		       void *args __rte_unused)
761 {
762 	rte_pktmbuf_free(ev.mbuf);
763 }
764 
765 /* Workers registered */
766 #define IPSEC_EVENTMODE_WORKERS		2
767 
768 /*
769  * Event mode worker
770  * Operating parameters : non-burst - Tx internal port - driver mode
771  */
772 static void
773 ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
774 		uint8_t nb_links)
775 {
776 	struct port_drv_mode_data data[RTE_MAX_ETHPORTS];
777 	unsigned int nb_rx = 0, nb_tx;
778 	struct rte_mbuf *pkt;
779 	struct rte_event ev;
780 	uint32_t lcore_id;
781 	int32_t socket_id;
782 	int16_t port_id;
783 
784 	/* Check if we have links registered for this lcore */
785 	if (nb_links == 0) {
786 		/* No links registered - exit */
787 		return;
788 	}
789 
790 	memset(&data, 0, sizeof(struct port_drv_mode_data));
791 
792 	/* Get core ID */
793 	lcore_id = rte_lcore_id();
794 
795 	/* Get socket ID */
796 	socket_id = rte_lcore_to_socket_id(lcore_id);
797 
798 	/*
799 	 * Prepare security sessions table. In outbound driver mode
800 	 * we always use first session configured for a given port
801 	 */
802 	prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, data,
803 				 RTE_MAX_ETHPORTS);
804 
805 	RTE_LOG(INFO, IPSEC,
806 		"Launching event mode worker (non-burst - Tx internal port - "
807 		"driver mode) on lcore %d\n", lcore_id);
808 
809 	/* We have valid links */
810 
811 	/* Check if it's single link */
812 	if (nb_links != 1) {
813 		RTE_LOG(INFO, IPSEC,
814 			"Multiple links not supported. Using first link\n");
815 	}
816 
817 	RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
818 			links[0].event_port_id);
819 	while (!force_quit) {
820 		/* Read packet from event queues */
821 		nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
822 				links[0].event_port_id,
823 				&ev,	/* events */
824 				1,	/* nb_events */
825 				0	/* timeout_ticks */);
826 
827 		if (nb_rx == 0)
828 			continue;
829 
830 		switch (ev.event_type) {
831 		case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR:
832 		case RTE_EVENT_TYPE_ETHDEV_VECTOR:
833 			ipsec_ev_vector_drv_mode_process(links, &ev, data);
834 			continue;
835 		case RTE_EVENT_TYPE_ETHDEV:
836 			break;
837 		default:
838 			RTE_LOG(ERR, IPSEC, "Invalid event type %u",
839 				ev.event_type);
840 			continue;
841 		}
842 
843 		pkt = ev.mbuf;
844 		port_id = pkt->port;
845 
846 		rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
847 
848 		/* Process packet */
849 		ipsec_event_pre_forward(pkt, port_id);
850 
851 		if (!is_unprotected_port(port_id)) {
852 
853 			if (unlikely(!data[port_id].sess)) {
854 				rte_pktmbuf_free(pkt);
855 				continue;
856 			}
857 
858 			/* Save security session */
859 			rte_security_set_pkt_metadata(data[port_id].ctx,
860 						      data[port_id].sess, pkt,
861 						      NULL);
862 
863 			/* Mark the packet for Tx security offload */
864 			pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
865 
866 			/* Provide L2 len for Outbound processing */
867 			pkt->l2_len = RTE_ETHER_HDR_LEN;
868 		}
869 
870 		/*
871 		 * Since tx internal port is available, events can be
872 		 * directly enqueued to the adapter and it would be
873 		 * internally submitted to the eth device.
874 		 */
875 		nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
876 							 links[0].event_port_id,
877 							 &ev, /* events */
878 							 1,   /* nb_events */
879 							 0 /* flags */);
880 		if (!nb_tx)
881 			rte_pktmbuf_free(ev.mbuf);
882 	}
883 
884 	if (ev.u64) {
885 		ev.op = RTE_EVENT_OP_RELEASE;
886 		rte_event_enqueue_burst(links[0].eventdev_id,
887 					links[0].event_port_id, &ev, 1);
888 	}
889 
890 	rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
891 			       ipsec_event_port_flush, NULL);
892 }
893 
894 /*
895  * Event mode worker
896  * Operating parameters : non-burst - Tx internal port - app mode
897  */
898 static void
899 ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
900 		uint8_t nb_links)
901 {
902 	struct lcore_conf_ev_tx_int_port_wrkr lconf;
903 	unsigned int nb_rx = 0, nb_tx;
904 	struct rte_event ev;
905 	uint32_t lcore_id;
906 	int32_t socket_id;
907 	int ret;
908 
909 	/* Check if we have links registered for this lcore */
910 	if (nb_links == 0) {
911 		/* No links registered - exit */
912 		return;
913 	}
914 
915 	/* We have valid links */
916 
917 	/* Get core ID */
918 	lcore_id = rte_lcore_id();
919 
920 	/* Get socket ID */
921 	socket_id = rte_lcore_to_socket_id(lcore_id);
922 
923 	/* Save routing table */
924 	lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
925 	lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
926 	lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
927 	lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
928 	lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
929 	lconf.inbound.lcore_id = lcore_id;
930 	lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
931 	lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
932 	lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
933 	lconf.outbound.lcore_id = lcore_id;
934 
935 	RTE_LOG(INFO, IPSEC,
936 		"Launching event mode worker (non-burst - Tx internal port - "
937 		"app mode) on lcore %d\n", lcore_id);
938 
939 	/* Check if it's single link */
940 	if (nb_links != 1) {
941 		RTE_LOG(INFO, IPSEC,
942 			"Multiple links not supported. Using first link\n");
943 	}
944 
945 	RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
946 		links[0].event_port_id);
947 
948 	while (!force_quit) {
949 		/* Read packet from event queues */
950 		nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
951 				links[0].event_port_id,
952 				&ev,     /* events */
953 				1,       /* nb_events */
954 				0        /* timeout_ticks */);
955 
956 		if (nb_rx == 0)
957 			continue;
958 
959 		switch (ev.event_type) {
960 		case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR:
961 		case RTE_EVENT_TYPE_ETHDEV_VECTOR:
962 			ipsec_ev_vector_process(&lconf, links, &ev);
963 			continue;
964 		case RTE_EVENT_TYPE_ETHDEV:
965 			break;
966 		default:
967 			RTE_LOG(ERR, IPSEC, "Invalid event type %u",
968 				ev.event_type);
969 			continue;
970 		}
971 
972 		if (is_unprotected_port(ev.mbuf->port))
973 			ret = process_ipsec_ev_inbound(&lconf.inbound,
974 							&lconf.rt, &ev);
975 		else
976 			ret = process_ipsec_ev_outbound(&lconf.outbound,
977 							&lconf.rt, &ev);
978 		if (ret != 1)
979 			/* The pkt has been dropped */
980 			continue;
981 
982 		/*
983 		 * Since tx internal port is available, events can be
984 		 * directly enqueued to the adapter and it would be
985 		 * internally submitted to the eth device.
986 		 */
987 		nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
988 							 links[0].event_port_id,
989 							 &ev, /* events */
990 							 1,   /* nb_events */
991 							 0 /* flags */);
992 		if (!nb_tx)
993 			rte_pktmbuf_free(ev.mbuf);
994 	}
995 
996 	if (ev.u64) {
997 		ev.op = RTE_EVENT_OP_RELEASE;
998 		rte_event_enqueue_burst(links[0].eventdev_id,
999 					links[0].event_port_id, &ev, 1);
1000 	}
1001 
1002 	rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
1003 			       ipsec_event_port_flush, NULL);
1004 }
1005 
1006 static uint8_t
1007 ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
1008 {
1009 	struct eh_app_worker_params *wrkr;
1010 	uint8_t nb_wrkr_param = 0;
1011 
1012 	/* Save workers */
1013 	wrkr = wrkrs;
1014 
1015 	/* Non-burst - Tx internal port - driver mode */
1016 	wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
1017 	wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
1018 	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
1019 	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
1020 	wrkr++;
1021 	nb_wrkr_param++;
1022 
1023 	/* Non-burst - Tx internal port - app mode */
1024 	wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
1025 	wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
1026 	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
1027 	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
1028 	nb_wrkr_param++;
1029 
1030 	return nb_wrkr_param;
1031 }
1032 
1033 static void
1034 ipsec_eventmode_worker(struct eh_conf *conf)
1035 {
1036 	struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = {
1037 					{{{0} }, NULL } };
1038 	uint8_t nb_wrkr_param;
1039 
1040 	/* Populate l2fwd_wrkr params */
1041 	nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr);
1042 
1043 	/*
1044 	 * Launch correct worker after checking
1045 	 * the event device's capabilities.
1046 	 */
1047 	eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
1048 }
1049 
1050 static __rte_always_inline void
1051 outb_inl_pro_spd_process(struct sp_ctx *sp,
1052 			 struct sa_ctx *sa_ctx,
1053 			 struct traffic_type *ip,
1054 			 struct traffic_type *match,
1055 			 struct traffic_type *mismatch,
1056 			 bool match_flag,
1057 			 struct ipsec_spd_stats *stats)
1058 {
1059 	uint32_t prev_sa_idx = UINT32_MAX;
1060 	struct rte_mbuf *ipsec[MAX_PKT_BURST];
1061 	struct rte_ipsec_session *ips;
1062 	uint32_t i, j, j_mis, sa_idx;
1063 	struct ipsec_sa *sa = NULL;
1064 	uint32_t ipsec_num = 0;
1065 	struct rte_mbuf *m;
1066 	uint64_t satp;
1067 
1068 	if (ip->num == 0 || sp == NULL)
1069 		return;
1070 
1071 	rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
1072 			ip->num, DEFAULT_MAX_CATEGORIES);
1073 
1074 	j = match->num;
1075 	j_mis = mismatch->num;
1076 
1077 	for (i = 0; i < ip->num; i++) {
1078 		m = ip->pkts[i];
1079 		sa_idx = ip->res[i] - 1;
1080 
1081 		if (unlikely(ip->res[i] == DISCARD)) {
1082 			free_pkts(&m, 1);
1083 
1084 			stats->discard++;
1085 		} else if (unlikely(ip->res[i] == BYPASS)) {
1086 			match->pkts[j++] = m;
1087 
1088 			stats->bypass++;
1089 		} else {
1090 			if (prev_sa_idx == UINT32_MAX) {
1091 				prev_sa_idx = sa_idx;
1092 				sa = &sa_ctx->sa[sa_idx];
1093 				ips = ipsec_get_primary_session(sa);
1094 				satp = rte_ipsec_sa_type(ips->sa);
1095 			}
1096 
1097 			if (sa_idx != prev_sa_idx) {
1098 				prep_process_group(sa, ipsec, ipsec_num);
1099 
1100 				/* Prepare packets for outbound */
1101 				rte_ipsec_pkt_process(ips, ipsec, ipsec_num);
1102 
1103 				/* Copy to current tr or a different tr */
1104 				if (SATP_OUT_IPV4(satp) == match_flag) {
1105 					memcpy(&match->pkts[j], ipsec,
1106 					       ipsec_num * sizeof(void *));
1107 					j += ipsec_num;
1108 				} else {
1109 					memcpy(&mismatch->pkts[j_mis], ipsec,
1110 					       ipsec_num * sizeof(void *));
1111 					j_mis += ipsec_num;
1112 				}
1113 
1114 				/* Update to new SA */
1115 				sa = &sa_ctx->sa[sa_idx];
1116 				ips = ipsec_get_primary_session(sa);
1117 				satp = rte_ipsec_sa_type(ips->sa);
1118 				ipsec_num = 0;
1119 			}
1120 
1121 			ipsec[ipsec_num++] = m;
1122 			stats->protect++;
1123 		}
1124 	}
1125 
1126 	if (ipsec_num) {
1127 		prep_process_group(sa, ipsec, ipsec_num);
1128 
1129 		/* Prepare pacekts for outbound */
1130 		rte_ipsec_pkt_process(ips, ipsec, ipsec_num);
1131 
1132 		/* Copy to current tr or a different tr */
1133 		if (SATP_OUT_IPV4(satp) == match_flag) {
1134 			memcpy(&match->pkts[j], ipsec,
1135 			       ipsec_num * sizeof(void *));
1136 			j += ipsec_num;
1137 		} else {
1138 			memcpy(&mismatch->pkts[j_mis], ipsec,
1139 			       ipsec_num * sizeof(void *));
1140 			j_mis += ipsec_num;
1141 		}
1142 	}
1143 	match->num = j;
1144 	mismatch->num = j_mis;
1145 }
1146 
1147 /* Poll mode worker when all SA's are of type inline protocol */
1148 void
1149 ipsec_poll_mode_wrkr_inl_pr(void)
1150 {
1151 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
1152 			/ US_PER_S * BURST_TX_DRAIN_US;
1153 	struct sp_ctx *sp4_in, *sp6_in, *sp4_out, *sp6_out;
1154 	struct rte_mbuf *pkts[MAX_PKT_BURST];
1155 	uint64_t prev_tsc, diff_tsc, cur_tsc;
1156 	struct ipsec_core_statistics *stats;
1157 	struct rt_ctx *rt4_ctx, *rt6_ctx;
1158 	struct sa_ctx *sa_in, *sa_out;
1159 	struct traffic_type ip4, ip6;
1160 	struct lcore_rx_queue *rxql;
1161 	struct rte_mbuf **v4, **v6;
1162 	struct ipsec_traffic trf;
1163 	struct lcore_conf *qconf;
1164 	uint16_t v4_num, v6_num;
1165 	int32_t socket_id;
1166 	uint32_t lcore_id;
1167 	int32_t i, nb_rx;
1168 	uint16_t portid;
1169 	uint8_t queueid;
1170 
1171 	prev_tsc = 0;
1172 	lcore_id = rte_lcore_id();
1173 	qconf = &lcore_conf[lcore_id];
1174 	rxql = qconf->rx_queue_list;
1175 	socket_id = rte_lcore_to_socket_id(lcore_id);
1176 	stats = &core_statistics[lcore_id];
1177 
1178 	rt4_ctx = socket_ctx[socket_id].rt_ip4;
1179 	rt6_ctx = socket_ctx[socket_id].rt_ip6;
1180 
1181 	sp4_in = socket_ctx[socket_id].sp_ip4_in;
1182 	sp6_in = socket_ctx[socket_id].sp_ip6_in;
1183 	sa_in = socket_ctx[socket_id].sa_in;
1184 
1185 	sp4_out = socket_ctx[socket_id].sp_ip4_out;
1186 	sp6_out = socket_ctx[socket_id].sp_ip6_out;
1187 	sa_out = socket_ctx[socket_id].sa_out;
1188 
1189 	qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
1190 
1191 	if (qconf->nb_rx_queue == 0) {
1192 		RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
1193 			lcore_id);
1194 		return;
1195 	}
1196 
1197 	RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
1198 
1199 	for (i = 0; i < qconf->nb_rx_queue; i++) {
1200 		portid = rxql[i].port_id;
1201 		queueid = rxql[i].queue_id;
1202 		RTE_LOG(INFO, IPSEC,
1203 			" -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
1204 			lcore_id, portid, queueid);
1205 	}
1206 
1207 	while (!force_quit) {
1208 		cur_tsc = rte_rdtsc();
1209 
1210 		/* TX queue buffer drain */
1211 		diff_tsc = cur_tsc - prev_tsc;
1212 
1213 		if (unlikely(diff_tsc > drain_tsc)) {
1214 			drain_tx_buffers(qconf);
1215 			prev_tsc = cur_tsc;
1216 		}
1217 
1218 		for (i = 0; i < qconf->nb_rx_queue; ++i) {
1219 			/* Read packets from RX queues */
1220 			portid = rxql[i].port_id;
1221 			queueid = rxql[i].queue_id;
1222 			nb_rx = rte_eth_rx_burst(portid, queueid,
1223 					pkts, MAX_PKT_BURST);
1224 
1225 			if (nb_rx <= 0)
1226 				continue;
1227 
1228 			core_stats_update_rx(nb_rx);
1229 
1230 			prepare_traffic(rxql[i].sec_ctx, pkts, &trf, nb_rx);
1231 
1232 			/* Drop any IPsec traffic */
1233 			free_pkts(trf.ipsec.pkts, trf.ipsec.num);
1234 
1235 			if (is_unprotected_port(portid)) {
1236 				inbound_sp_sa(sp4_in, sa_in, &trf.ip4,
1237 					      trf.ip4.num,
1238 					      &stats->inbound.spd4);
1239 
1240 				inbound_sp_sa(sp6_in, sa_in, &trf.ip6,
1241 					      trf.ip6.num,
1242 					      &stats->inbound.spd6);
1243 
1244 				v4 = trf.ip4.pkts;
1245 				v4_num = trf.ip4.num;
1246 				v6 = trf.ip6.pkts;
1247 				v6_num = trf.ip6.num;
1248 			} else {
1249 				ip4.num = 0;
1250 				ip6.num = 0;
1251 
1252 				outb_inl_pro_spd_process(sp4_out, sa_out,
1253 							 &trf.ip4, &ip4, &ip6,
1254 							 true,
1255 							 &stats->outbound.spd4);
1256 
1257 				outb_inl_pro_spd_process(sp6_out, sa_out,
1258 							 &trf.ip6, &ip6, &ip4,
1259 							 false,
1260 							 &stats->outbound.spd6);
1261 				v4 = ip4.pkts;
1262 				v4_num = ip4.num;
1263 				v6 = ip6.pkts;
1264 				v6_num = ip6.num;
1265 			}
1266 
1267 #if defined __ARM_NEON
1268 			route4_pkts_neon(rt4_ctx, v4, v4_num, 0, false);
1269 			route6_pkts_neon(rt6_ctx, v6, v6_num);
1270 #else
1271 			route4_pkts(rt4_ctx, v4, v4_num, 0, false);
1272 			route6_pkts(rt6_ctx, v6, v6_num);
1273 #endif
1274 		}
1275 	}
1276 }
1277 
1278 /* Poll mode worker when all SA's are of type inline protocol
1279  * and single sa mode is enabled.
1280  */
1281 void
1282 ipsec_poll_mode_wrkr_inl_pr_ss(void)
1283 {
1284 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
1285 			/ US_PER_S * BURST_TX_DRAIN_US;
1286 	uint16_t sa_out_portid = 0, sa_out_proto = 0;
1287 	struct rte_mbuf *pkts[MAX_PKT_BURST], *pkt;
1288 	uint64_t prev_tsc, diff_tsc, cur_tsc;
1289 	struct rte_ipsec_session *ips = NULL;
1290 	struct lcore_rx_queue *rxql;
1291 	struct ipsec_sa *sa = NULL;
1292 	struct lcore_conf *qconf;
1293 	struct sa_ctx *sa_out;
1294 	uint32_t i, nb_rx, j;
1295 	int32_t socket_id;
1296 	uint32_t lcore_id;
1297 	uint16_t portid;
1298 	uint8_t queueid;
1299 
1300 	prev_tsc = 0;
1301 	lcore_id = rte_lcore_id();
1302 	qconf = &lcore_conf[lcore_id];
1303 	rxql = qconf->rx_queue_list;
1304 	socket_id = rte_lcore_to_socket_id(lcore_id);
1305 
1306 	/* Get SA info */
1307 	sa_out = socket_ctx[socket_id].sa_out;
1308 	if (sa_out && single_sa_idx < sa_out->nb_sa) {
1309 		sa = &sa_out->sa[single_sa_idx];
1310 		ips = ipsec_get_primary_session(sa);
1311 		sa_out_portid = sa->portid;
1312 		if (sa->flags & IP6_TUNNEL)
1313 			sa_out_proto = IPPROTO_IPV6;
1314 		else
1315 			sa_out_proto = IPPROTO_IP;
1316 	}
1317 
1318 	qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
1319 
1320 	if (qconf->nb_rx_queue == 0) {
1321 		RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
1322 			lcore_id);
1323 		return;
1324 	}
1325 
1326 	RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
1327 
1328 	for (i = 0; i < qconf->nb_rx_queue; i++) {
1329 		portid = rxql[i].port_id;
1330 		queueid = rxql[i].queue_id;
1331 		RTE_LOG(INFO, IPSEC,
1332 			" -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
1333 			lcore_id, portid, queueid);
1334 	}
1335 
1336 	while (!force_quit) {
1337 		cur_tsc = rte_rdtsc();
1338 
1339 		/* TX queue buffer drain */
1340 		diff_tsc = cur_tsc - prev_tsc;
1341 
1342 		if (unlikely(diff_tsc > drain_tsc)) {
1343 			drain_tx_buffers(qconf);
1344 			prev_tsc = cur_tsc;
1345 		}
1346 
1347 		for (i = 0; i < qconf->nb_rx_queue; ++i) {
1348 			/* Read packets from RX queues */
1349 			portid = rxql[i].port_id;
1350 			queueid = rxql[i].queue_id;
1351 			nb_rx = rte_eth_rx_burst(portid, queueid,
1352 						 pkts, MAX_PKT_BURST);
1353 
1354 			if (nb_rx <= 0)
1355 				continue;
1356 
1357 			core_stats_update_rx(nb_rx);
1358 
1359 			if (is_unprotected_port(portid)) {
1360 				/* Nothing much to do for inbound inline
1361 				 * decrypted traffic.
1362 				 */
1363 				for (j = 0; j < nb_rx; j++) {
1364 					uint32_t ptype, proto;
1365 
1366 					pkt = pkts[j];
1367 					ptype = pkt->packet_type &
1368 						RTE_PTYPE_L3_MASK;
1369 					if (ptype == RTE_PTYPE_L3_IPV4)
1370 						proto = IPPROTO_IP;
1371 					else
1372 						proto = IPPROTO_IPV6;
1373 
1374 					send_single_packet(pkt, portid, proto);
1375 				}
1376 
1377 				continue;
1378 			}
1379 
1380 			/* Free packets if there are no outbound sessions */
1381 			if (unlikely(!ips)) {
1382 				rte_pktmbuf_free_bulk(pkts, nb_rx);
1383 				continue;
1384 			}
1385 
1386 			rte_ipsec_pkt_process(ips, pkts, nb_rx);
1387 
1388 			/* Send pkts out */
1389 			for (j = 0; j < nb_rx; j++) {
1390 				pkt = pkts[j];
1391 
1392 				pkt->l2_len = RTE_ETHER_HDR_LEN;
1393 				send_single_packet(pkt, sa_out_portid,
1394 						   sa_out_proto);
1395 			}
1396 		}
1397 	}
1398 }
1399 
1400 static void
1401 ipsec_poll_mode_wrkr_launch(void)
1402 {
1403 	static ipsec_worker_fn_t poll_mode_wrkrs[MAX_F] = {
1404 		[INL_PR_F]        = ipsec_poll_mode_wrkr_inl_pr,
1405 		[INL_PR_F | SS_F] = ipsec_poll_mode_wrkr_inl_pr_ss,
1406 	};
1407 	ipsec_worker_fn_t fn;
1408 
1409 	if (!app_sa_prm.enable) {
1410 		fn = ipsec_poll_mode_worker;
1411 	} else {
1412 		fn = poll_mode_wrkrs[wrkr_flags];
1413 
1414 		/* Always default to all mode worker */
1415 		if (!fn)
1416 			fn = ipsec_poll_mode_worker;
1417 	}
1418 
1419 	/* Launch worker */
1420 	(*fn)();
1421 }
1422 
1423 int ipsec_launch_one_lcore(void *args)
1424 {
1425 	struct eh_conf *conf;
1426 
1427 	conf = (struct eh_conf *)args;
1428 
1429 	if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
1430 		/* Run in poll mode */
1431 		ipsec_poll_mode_wrkr_launch();
1432 	} else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
1433 		/* Run in event mode */
1434 		ipsec_eventmode_worker(conf);
1435 	}
1436 	return 0;
1437 }
1438