xref: /dpdk/examples/ipsec-secgw/ipsec_worker.c (revision 42a8fc7daa46256d150278fc9a7a846e27945a0c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  * Copyright (C) 2020 Marvell International Ltd.
4  */
5 #include <rte_acl.h>
6 #include <rte_event_eth_tx_adapter.h>
7 #include <rte_lpm.h>
8 #include <rte_lpm6.h>
9 
10 #include "event_helper.h"
11 #include "ipsec.h"
12 #include "ipsec-secgw.h"
13 #include "ipsec_worker.h"
14 
15 struct port_drv_mode_data {
16 	struct rte_security_session *sess;
17 	struct rte_security_ctx *ctx;
18 };
19 
20 typedef void (*ipsec_worker_fn_t)(void);
21 
22 static inline enum pkt_type
23 process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
24 {
25 	struct rte_ether_hdr *eth;
26 	uint32_t ptype = pkt->packet_type;
27 
28 	eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
29 	rte_prefetch0(eth);
30 
31 	if (RTE_ETH_IS_IPV4_HDR(ptype)) {
32 		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
33 				offsetof(struct ip, ip_p));
34 		if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
35 			return PKT_TYPE_IPSEC_IPV4;
36 		else
37 			return PKT_TYPE_PLAIN_IPV4;
38 	} else if (RTE_ETH_IS_IPV6_HDR(ptype)) {
39 		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
40 				offsetof(struct ip6_hdr, ip6_nxt));
41 		if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
42 			return PKT_TYPE_IPSEC_IPV6;
43 		else
44 			return PKT_TYPE_PLAIN_IPV6;
45 	}
46 
47 	/* Unknown/Unsupported type */
48 	return PKT_TYPE_INVALID;
49 }
50 
51 static inline void
52 update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid)
53 {
54 	struct rte_ether_hdr *ethhdr;
55 
56 	ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
57 	memcpy(&ethhdr->src_addr, &ethaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
58 	memcpy(&ethhdr->dst_addr, &ethaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
59 }
60 
61 static inline void
62 ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
63 {
64 	/* Save the destination port in the mbuf */
65 	m->port = port_id;
66 
67 	/* Save eth queue for Tx */
68 	rte_event_eth_tx_adapter_txq_set(m, 0);
69 }
70 
71 static inline void
72 ev_vector_attr_init(struct rte_event_vector *vec)
73 {
74 	vec->attr_valid = 1;
75 	vec->port = 0xFFFF;
76 	vec->queue = 0;
77 }
78 
79 static inline void
80 ev_vector_attr_update(struct rte_event_vector *vec, struct rte_mbuf *pkt)
81 {
82 	if (vec->port == 0xFFFF) {
83 		vec->port = pkt->port;
84 		return;
85 	}
86 	if (vec->attr_valid && (vec->port != pkt->port))
87 		vec->attr_valid = 0;
88 }
89 
90 static inline void
91 prepare_out_sessions_tbl(struct sa_ctx *sa_out,
92 			 struct port_drv_mode_data *data,
93 			 uint16_t size)
94 {
95 	struct rte_ipsec_session *pri_sess;
96 	struct ipsec_sa *sa;
97 	uint32_t i;
98 
99 	if (!sa_out)
100 		return;
101 
102 	for (i = 0; i < sa_out->nb_sa; i++) {
103 
104 		sa = &sa_out->sa[i];
105 		if (!sa)
106 			continue;
107 
108 		pri_sess = ipsec_get_primary_session(sa);
109 		if (!pri_sess)
110 			continue;
111 
112 		if (pri_sess->type !=
113 			RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
114 
115 			RTE_LOG(ERR, IPSEC, "Invalid session type %d\n",
116 				pri_sess->type);
117 			continue;
118 		}
119 
120 		if (sa->portid >= size) {
121 			RTE_LOG(ERR, IPSEC,
122 				"Port id >= than table size %d, %d\n",
123 				sa->portid, size);
124 			continue;
125 		}
126 
127 		/* Use only first inline session found for a given port */
128 		if (data[sa->portid].sess)
129 			continue;
130 		data[sa->portid].sess = pri_sess->security.ses;
131 		data[sa->portid].ctx = pri_sess->security.ctx;
132 	}
133 }
134 
135 static inline int
136 check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
137 {
138 	uint32_t res;
139 
140 	if (unlikely(sp == NULL))
141 		return 0;
142 
143 	rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
144 			DEFAULT_MAX_CATEGORIES);
145 
146 	if (unlikely(res == DISCARD))
147 		return 0;
148 	else if (res == BYPASS) {
149 		*sa_idx = -1;
150 		return 1;
151 	}
152 
153 	*sa_idx = res - 1;
154 	return 1;
155 }
156 
157 static inline void
158 check_sp_bulk(struct sp_ctx *sp, struct traffic_type *ip,
159 	      struct traffic_type *ipsec)
160 {
161 	uint32_t i, j, res;
162 	struct rte_mbuf *m;
163 
164 	if (unlikely(sp == NULL || ip->num == 0))
165 		return;
166 
167 	rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num,
168 			 DEFAULT_MAX_CATEGORIES);
169 
170 	j = 0;
171 	for (i = 0; i < ip->num; i++) {
172 		m = ip->pkts[i];
173 		res = ip->res[i];
174 		if (unlikely(res == DISCARD))
175 			free_pkts(&m, 1);
176 		else if (res == BYPASS)
177 			ip->pkts[j++] = m;
178 		else {
179 			ipsec->res[ipsec->num] = res - 1;
180 			ipsec->pkts[ipsec->num++] = m;
181 		}
182 	}
183 	ip->num = j;
184 }
185 
186 static inline void
187 check_sp_sa_bulk(struct sp_ctx *sp, struct sa_ctx *sa_ctx,
188 		 struct traffic_type *ip)
189 {
190 	struct ipsec_sa *sa;
191 	uint32_t i, j, res;
192 	struct rte_mbuf *m;
193 
194 	if (unlikely(sp == NULL || ip->num == 0))
195 		return;
196 
197 	rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num,
198 			 DEFAULT_MAX_CATEGORIES);
199 
200 	j = 0;
201 	for (i = 0; i < ip->num; i++) {
202 		m = ip->pkts[i];
203 		res = ip->res[i];
204 		if (unlikely(res == DISCARD))
205 			free_pkts(&m, 1);
206 		else if (res == BYPASS)
207 			ip->pkts[j++] = m;
208 		else {
209 			sa = *(struct ipsec_sa **)rte_security_dynfield(m);
210 			if (sa == NULL) {
211 				free_pkts(&m, 1);
212 				continue;
213 			}
214 
215 			/* SPI on the packet should match with the one in SA */
216 			if (unlikely(sa->spi != sa_ctx->sa[res - 1].spi)) {
217 				free_pkts(&m, 1);
218 				continue;
219 			}
220 
221 			ip->pkts[j++] = m;
222 		}
223 	}
224 	ip->num = j;
225 }
226 
227 static inline uint16_t
228 route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
229 {
230 	uint32_t dst_ip;
231 	uint16_t offset;
232 	uint32_t hop;
233 	int ret;
234 
235 	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
236 	dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
237 	dst_ip = rte_be_to_cpu_32(dst_ip);
238 
239 	ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
240 
241 	if (ret == 0) {
242 		/* We have a hit */
243 		return hop;
244 	}
245 
246 	/* else */
247 	return RTE_MAX_ETHPORTS;
248 }
249 
250 /* TODO: To be tested */
251 static inline uint16_t
252 route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
253 {
254 	uint8_t dst_ip[16];
255 	uint8_t *ip6_dst;
256 	uint16_t offset;
257 	uint32_t hop;
258 	int ret;
259 
260 	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
261 	ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
262 	memcpy(&dst_ip[0], ip6_dst, 16);
263 
264 	ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
265 
266 	if (ret == 0) {
267 		/* We have a hit */
268 		return hop;
269 	}
270 
271 	/* else */
272 	return RTE_MAX_ETHPORTS;
273 }
274 
275 static inline uint16_t
276 get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
277 {
278 	if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
279 		return route4_pkt(pkt, rt->rt4_ctx);
280 	else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
281 		return route6_pkt(pkt, rt->rt6_ctx);
282 
283 	return RTE_MAX_ETHPORTS;
284 }
285 
286 static inline int
287 process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
288 		struct rte_event *ev)
289 {
290 	struct ipsec_sa *sa = NULL;
291 	struct rte_mbuf *pkt;
292 	uint16_t port_id = 0;
293 	enum pkt_type type;
294 	uint32_t sa_idx;
295 	uint8_t *nlp;
296 
297 	/* Get pkt from event */
298 	pkt = ev->mbuf;
299 
300 	/* Check the packet type */
301 	type = process_ipsec_get_pkt_type(pkt, &nlp);
302 
303 	switch (type) {
304 	case PKT_TYPE_PLAIN_IPV4:
305 		if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
306 			if (unlikely(pkt->ol_flags &
307 				     RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
308 				RTE_LOG(ERR, IPSEC,
309 					"Inbound security offload failed\n");
310 				goto drop_pkt_and_exit;
311 			}
312 			sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
313 		}
314 
315 		/* Check if we have a match */
316 		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
317 			/* No valid match */
318 			goto drop_pkt_and_exit;
319 		}
320 		break;
321 
322 	case PKT_TYPE_PLAIN_IPV6:
323 		if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
324 			if (unlikely(pkt->ol_flags &
325 				     RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
326 				RTE_LOG(ERR, IPSEC,
327 					"Inbound security offload failed\n");
328 				goto drop_pkt_and_exit;
329 			}
330 			sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
331 		}
332 
333 		/* Check if we have a match */
334 		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
335 			/* No valid match */
336 			goto drop_pkt_and_exit;
337 		}
338 		break;
339 
340 	default:
341 		RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
342 			   type);
343 		goto drop_pkt_and_exit;
344 	}
345 
346 	/* Check if the packet has to be bypassed */
347 	if (sa_idx == BYPASS)
348 		goto route_and_send_pkt;
349 
350 	/* Validate sa_idx */
351 	if (sa_idx >= ctx->sa_ctx->nb_sa)
352 		goto drop_pkt_and_exit;
353 
354 	/* Else the packet has to be protected with SA */
355 
356 	/* If the packet was IPsec processed, then SA pointer should be set */
357 	if (sa == NULL)
358 		goto drop_pkt_and_exit;
359 
360 	/* SPI on the packet should match with the one in SA */
361 	if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
362 		goto drop_pkt_and_exit;
363 
364 route_and_send_pkt:
365 	port_id = get_route(pkt, rt, type);
366 	if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
367 		/* no match */
368 		goto drop_pkt_and_exit;
369 	}
370 	/* else, we have a matching route */
371 
372 	/* Update mac addresses */
373 	update_mac_addrs(pkt, port_id);
374 
375 	/* Update the event with the dest port */
376 	ipsec_event_pre_forward(pkt, port_id);
377 	return PKT_FORWARDED;
378 
379 drop_pkt_and_exit:
380 	RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
381 	rte_pktmbuf_free(pkt);
382 	ev->mbuf = NULL;
383 	return PKT_DROPPED;
384 }
385 
386 static inline int
387 process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
388 		struct rte_event *ev)
389 {
390 	struct rte_ipsec_session *sess;
391 	struct sa_ctx *sa_ctx;
392 	struct rte_mbuf *pkt;
393 	uint16_t port_id = 0;
394 	struct ipsec_sa *sa;
395 	enum pkt_type type;
396 	uint32_t sa_idx;
397 	uint8_t *nlp;
398 
399 	/* Get pkt from event */
400 	pkt = ev->mbuf;
401 
402 	/* Check the packet type */
403 	type = process_ipsec_get_pkt_type(pkt, &nlp);
404 
405 	switch (type) {
406 	case PKT_TYPE_PLAIN_IPV4:
407 		/* Check if we have a match */
408 		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
409 			/* No valid match */
410 			goto drop_pkt_and_exit;
411 		}
412 		break;
413 	case PKT_TYPE_PLAIN_IPV6:
414 		/* Check if we have a match */
415 		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
416 			/* No valid match */
417 			goto drop_pkt_and_exit;
418 		}
419 		break;
420 	default:
421 		/*
422 		 * Only plain IPv4 & IPv6 packets are allowed
423 		 * on protected port. Drop the rest.
424 		 */
425 		RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
426 		goto drop_pkt_and_exit;
427 	}
428 
429 	/* Check if the packet has to be bypassed */
430 	if (sa_idx == BYPASS) {
431 		port_id = get_route(pkt, rt, type);
432 		if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
433 			/* no match */
434 			goto drop_pkt_and_exit;
435 		}
436 		/* else, we have a matching route */
437 		goto send_pkt;
438 	}
439 
440 	/* Validate sa_idx */
441 	if (unlikely(sa_idx >= ctx->sa_ctx->nb_sa))
442 		goto drop_pkt_and_exit;
443 
444 	/* Else the packet has to be protected */
445 
446 	/* Get SA ctx*/
447 	sa_ctx = ctx->sa_ctx;
448 
449 	/* Get SA */
450 	sa = &(sa_ctx->sa[sa_idx]);
451 
452 	/* Get IPsec session */
453 	sess = ipsec_get_primary_session(sa);
454 
455 	/* Allow only inline protocol for now */
456 	if (unlikely(sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
457 		RTE_LOG(ERR, IPSEC, "SA type not supported\n");
458 		goto drop_pkt_and_exit;
459 	}
460 
461 	rte_security_set_pkt_metadata(sess->security.ctx,
462 				      sess->security.ses, pkt, NULL);
463 
464 	/* Mark the packet for Tx security offload */
465 	pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
466 
467 	/* Get the port to which this pkt need to be submitted */
468 	port_id = sa->portid;
469 
470 send_pkt:
471 	/* Provide L2 len for Outbound processing */
472 	pkt->l2_len = RTE_ETHER_HDR_LEN;
473 
474 	/* Update mac addresses */
475 	update_mac_addrs(pkt, port_id);
476 
477 	/* Update the event with the dest port */
478 	ipsec_event_pre_forward(pkt, port_id);
479 	return PKT_FORWARDED;
480 
481 drop_pkt_and_exit:
482 	RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
483 	rte_pktmbuf_free(pkt);
484 	ev->mbuf = NULL;
485 	return PKT_DROPPED;
486 }
487 
488 static inline int
489 ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
490 		    struct ipsec_traffic *t, struct sa_ctx *sa_ctx)
491 {
492 	struct rte_ipsec_session *sess;
493 	uint32_t sa_idx, i, j = 0;
494 	uint16_t port_id = 0;
495 	struct rte_mbuf *pkt;
496 	struct ipsec_sa *sa;
497 
498 	/* Route IPv4 packets */
499 	for (i = 0; i < t->ip4.num; i++) {
500 		pkt = t->ip4.pkts[i];
501 		port_id = route4_pkt(pkt, rt->rt4_ctx);
502 		if (port_id != RTE_MAX_ETHPORTS) {
503 			/* Update mac addresses */
504 			update_mac_addrs(pkt, port_id);
505 			/* Update the event with the dest port */
506 			ipsec_event_pre_forward(pkt, port_id);
507 			ev_vector_attr_update(vec, pkt);
508 			vec->mbufs[j++] = pkt;
509 		} else
510 			free_pkts(&pkt, 1);
511 	}
512 
513 	/* Route IPv6 packets */
514 	for (i = 0; i < t->ip6.num; i++) {
515 		pkt = t->ip6.pkts[i];
516 		port_id = route6_pkt(pkt, rt->rt6_ctx);
517 		if (port_id != RTE_MAX_ETHPORTS) {
518 			/* Update mac addresses */
519 			update_mac_addrs(pkt, port_id);
520 			/* Update the event with the dest port */
521 			ipsec_event_pre_forward(pkt, port_id);
522 			ev_vector_attr_update(vec, pkt);
523 			vec->mbufs[j++] = pkt;
524 		} else
525 			free_pkts(&pkt, 1);
526 	}
527 
528 	/* Route ESP packets */
529 	for (i = 0; i < t->ipsec.num; i++) {
530 		/* Validate sa_idx */
531 		sa_idx = t->ipsec.res[i];
532 		pkt = t->ipsec.pkts[i];
533 		if (unlikely(sa_idx >= sa_ctx->nb_sa))
534 			free_pkts(&pkt, 1);
535 		else {
536 			/* Else the packet has to be protected */
537 			sa = &(sa_ctx->sa[sa_idx]);
538 			/* Get IPsec session */
539 			sess = ipsec_get_primary_session(sa);
540 			/* Allow only inline protocol for now */
541 			if (unlikely(sess->type !=
542 				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
543 				RTE_LOG(ERR, IPSEC, "SA type not supported\n");
544 				free_pkts(&pkt, 1);
545 				continue;
546 			}
547 			rte_security_set_pkt_metadata(sess->security.ctx,
548 						sess->security.ses, pkt, NULL);
549 
550 			pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
551 			port_id = sa->portid;
552 			update_mac_addrs(pkt, port_id);
553 			ipsec_event_pre_forward(pkt, port_id);
554 			ev_vector_attr_update(vec, pkt);
555 			vec->mbufs[j++] = pkt;
556 		}
557 	}
558 
559 	return j;
560 }
561 
562 static inline void
563 classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t)
564 {
565 	enum pkt_type type;
566 	uint8_t *nlp;
567 
568 	/* Check the packet type */
569 	type = process_ipsec_get_pkt_type(pkt, &nlp);
570 
571 	switch (type) {
572 	case PKT_TYPE_PLAIN_IPV4:
573 		t->ip4.data[t->ip4.num] = nlp;
574 		t->ip4.pkts[(t->ip4.num)++] = pkt;
575 		break;
576 	case PKT_TYPE_PLAIN_IPV6:
577 		t->ip6.data[t->ip6.num] = nlp;
578 		t->ip6.pkts[(t->ip6.num)++] = pkt;
579 		break;
580 	default:
581 		RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
582 			   type);
583 		free_pkts(&pkt, 1);
584 		break;
585 	}
586 }
587 
588 static inline int
589 process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
590 				struct rte_event_vector *vec)
591 {
592 	struct ipsec_traffic t;
593 	struct rte_mbuf *pkt;
594 	int i;
595 
596 	t.ip4.num = 0;
597 	t.ip6.num = 0;
598 	t.ipsec.num = 0;
599 
600 	for (i = 0; i < vec->nb_elem; i++) {
601 		/* Get pkt from event */
602 		pkt = vec->mbufs[i];
603 
604 		if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
605 			if (unlikely(pkt->ol_flags &
606 				     RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
607 				RTE_LOG(ERR, IPSEC,
608 					"Inbound security offload failed\n");
609 				free_pkts(&pkt, 1);
610 				continue;
611 			}
612 		}
613 
614 		classify_pkt(pkt, &t);
615 	}
616 
617 	check_sp_sa_bulk(ctx->sp4_ctx, ctx->sa_ctx, &t.ip4);
618 	check_sp_sa_bulk(ctx->sp6_ctx, ctx->sa_ctx, &t.ip6);
619 
620 	return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
621 }
622 
623 static inline int
624 process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
625 				 struct rte_event_vector *vec)
626 {
627 	struct ipsec_traffic t;
628 	struct rte_mbuf *pkt;
629 	uint32_t i;
630 
631 	t.ip4.num = 0;
632 	t.ip6.num = 0;
633 	t.ipsec.num = 0;
634 
635 	for (i = 0; i < vec->nb_elem; i++) {
636 		/* Get pkt from event */
637 		pkt = vec->mbufs[i];
638 
639 		classify_pkt(pkt, &t);
640 
641 		/* Provide L2 len for Outbound processing */
642 		pkt->l2_len = RTE_ETHER_HDR_LEN;
643 	}
644 
645 	check_sp_bulk(ctx->sp4_ctx, &t.ip4, &t.ipsec);
646 	check_sp_bulk(ctx->sp6_ctx, &t.ip6, &t.ipsec);
647 
648 	return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
649 }
650 
651 static inline int
652 process_ipsec_ev_drv_mode_outbound_vector(struct rte_event_vector *vec,
653 					  struct port_drv_mode_data *data)
654 {
655 	struct rte_mbuf *pkt;
656 	int16_t port_id;
657 	uint32_t i;
658 	int j = 0;
659 
660 	for (i = 0; i < vec->nb_elem; i++) {
661 		pkt = vec->mbufs[i];
662 		port_id = pkt->port;
663 
664 		if (unlikely(!data[port_id].sess)) {
665 			free_pkts(&pkt, 1);
666 			continue;
667 		}
668 		ipsec_event_pre_forward(pkt, port_id);
669 		/* Save security session */
670 		rte_security_set_pkt_metadata(data[port_id].ctx,
671 					      data[port_id].sess, pkt,
672 					      NULL);
673 
674 		/* Mark the packet for Tx security offload */
675 		pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
676 
677 		/* Provide L2 len for Outbound processing */
678 		pkt->l2_len = RTE_ETHER_HDR_LEN;
679 
680 		vec->mbufs[j++] = pkt;
681 	}
682 
683 	return j;
684 }
685 
686 static inline void
687 ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
688 			struct eh_event_link_info *links,
689 			struct rte_event *ev)
690 {
691 	struct rte_event_vector *vec = ev->vec;
692 	struct rte_mbuf *pkt;
693 	int ret;
694 
695 	pkt = vec->mbufs[0];
696 
697 	ev_vector_attr_init(vec);
698 	if (is_unprotected_port(pkt->port))
699 		ret = process_ipsec_ev_inbound_vector(&lconf->inbound,
700 						      &lconf->rt, vec);
701 	else
702 		ret = process_ipsec_ev_outbound_vector(&lconf->outbound,
703 						       &lconf->rt, vec);
704 
705 	if (likely(ret > 0)) {
706 		vec->nb_elem = ret;
707 		rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
708 						 links[0].event_port_id,
709 						 ev, 1, 0);
710 	} else {
711 		rte_mempool_put(rte_mempool_from_obj(vec), vec);
712 	}
713 }
714 
715 static inline void
716 ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
717 				 struct rte_event *ev,
718 				 struct port_drv_mode_data *data)
719 {
720 	struct rte_event_vector *vec = ev->vec;
721 	struct rte_mbuf *pkt;
722 
723 	pkt = vec->mbufs[0];
724 
725 	if (!is_unprotected_port(pkt->port))
726 		vec->nb_elem = process_ipsec_ev_drv_mode_outbound_vector(vec,
727 									 data);
728 	if (vec->nb_elem > 0)
729 		rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
730 						 links[0].event_port_id,
731 						 ev, 1, 0);
732 	else
733 		rte_mempool_put(rte_mempool_from_obj(vec), vec);
734 }
735 
736 /*
737  * Event mode exposes various operating modes depending on the
738  * capabilities of the event device and the operating mode
739  * selected.
740  */
741 
742 static void
743 ipsec_event_port_flush(uint8_t eventdev_id __rte_unused, struct rte_event ev,
744 		       void *args __rte_unused)
745 {
746 	rte_pktmbuf_free(ev.mbuf);
747 }
748 
749 /* Workers registered */
750 #define IPSEC_EVENTMODE_WORKERS		2
751 
752 /*
753  * Event mode worker
754  * Operating parameters : non-burst - Tx internal port - driver mode
755  */
756 static void
757 ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
758 		uint8_t nb_links)
759 {
760 	struct port_drv_mode_data data[RTE_MAX_ETHPORTS];
761 	unsigned int nb_rx = 0, nb_tx;
762 	struct rte_mbuf *pkt;
763 	struct rte_event ev;
764 	uint32_t lcore_id;
765 	int32_t socket_id;
766 	int16_t port_id;
767 
768 	/* Check if we have links registered for this lcore */
769 	if (nb_links == 0) {
770 		/* No links registered - exit */
771 		return;
772 	}
773 
774 	memset(&data, 0, sizeof(struct port_drv_mode_data));
775 
776 	/* Get core ID */
777 	lcore_id = rte_lcore_id();
778 
779 	/* Get socket ID */
780 	socket_id = rte_lcore_to_socket_id(lcore_id);
781 
782 	/*
783 	 * Prepare security sessions table. In outbound driver mode
784 	 * we always use first session configured for a given port
785 	 */
786 	prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, data,
787 				 RTE_MAX_ETHPORTS);
788 
789 	RTE_LOG(INFO, IPSEC,
790 		"Launching event mode worker (non-burst - Tx internal port - "
791 		"driver mode) on lcore %d\n", lcore_id);
792 
793 	/* We have valid links */
794 
795 	/* Check if it's single link */
796 	if (nb_links != 1) {
797 		RTE_LOG(INFO, IPSEC,
798 			"Multiple links not supported. Using first link\n");
799 	}
800 
801 	RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
802 			links[0].event_port_id);
803 	while (!force_quit) {
804 		/* Read packet from event queues */
805 		nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
806 				links[0].event_port_id,
807 				&ev,	/* events */
808 				1,	/* nb_events */
809 				0	/* timeout_ticks */);
810 
811 		if (nb_rx == 0)
812 			continue;
813 
814 		switch (ev.event_type) {
815 		case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR:
816 		case RTE_EVENT_TYPE_ETHDEV_VECTOR:
817 			ipsec_ev_vector_drv_mode_process(links, &ev, data);
818 			continue;
819 		case RTE_EVENT_TYPE_ETHDEV:
820 			break;
821 		default:
822 			RTE_LOG(ERR, IPSEC, "Invalid event type %u",
823 				ev.event_type);
824 			continue;
825 		}
826 
827 		pkt = ev.mbuf;
828 		port_id = pkt->port;
829 
830 		rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
831 
832 		/* Process packet */
833 		ipsec_event_pre_forward(pkt, port_id);
834 
835 		if (!is_unprotected_port(port_id)) {
836 
837 			if (unlikely(!data[port_id].sess)) {
838 				rte_pktmbuf_free(pkt);
839 				continue;
840 			}
841 
842 			/* Save security session */
843 			rte_security_set_pkt_metadata(data[port_id].ctx,
844 						      data[port_id].sess, pkt,
845 						      NULL);
846 
847 			/* Mark the packet for Tx security offload */
848 			pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
849 
850 			/* Provide L2 len for Outbound processing */
851 			pkt->l2_len = RTE_ETHER_HDR_LEN;
852 		}
853 
854 		/*
855 		 * Since tx internal port is available, events can be
856 		 * directly enqueued to the adapter and it would be
857 		 * internally submitted to the eth device.
858 		 */
859 		nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
860 							 links[0].event_port_id,
861 							 &ev, /* events */
862 							 1,   /* nb_events */
863 							 0 /* flags */);
864 		if (!nb_tx)
865 			rte_pktmbuf_free(ev.mbuf);
866 	}
867 
868 	if (ev.u64) {
869 		ev.op = RTE_EVENT_OP_RELEASE;
870 		rte_event_enqueue_burst(links[0].eventdev_id,
871 					links[0].event_port_id, &ev, 1);
872 	}
873 
874 	rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
875 			       ipsec_event_port_flush, NULL);
876 }
877 
878 /*
879  * Event mode worker
880  * Operating parameters : non-burst - Tx internal port - app mode
881  */
882 static void
883 ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
884 		uint8_t nb_links)
885 {
886 	struct lcore_conf_ev_tx_int_port_wrkr lconf;
887 	unsigned int nb_rx = 0, nb_tx;
888 	struct rte_event ev;
889 	uint32_t lcore_id;
890 	int32_t socket_id;
891 	int ret;
892 
893 	/* Check if we have links registered for this lcore */
894 	if (nb_links == 0) {
895 		/* No links registered - exit */
896 		return;
897 	}
898 
899 	/* We have valid links */
900 
901 	/* Get core ID */
902 	lcore_id = rte_lcore_id();
903 
904 	/* Get socket ID */
905 	socket_id = rte_lcore_to_socket_id(lcore_id);
906 
907 	/* Save routing table */
908 	lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
909 	lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
910 	lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
911 	lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
912 	lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
913 	lconf.inbound.lcore_id = lcore_id;
914 	lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
915 	lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
916 	lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
917 	lconf.outbound.lcore_id = lcore_id;
918 
919 	RTE_LOG(INFO, IPSEC,
920 		"Launching event mode worker (non-burst - Tx internal port - "
921 		"app mode) on lcore %d\n", lcore_id);
922 
923 	/* Check if it's single link */
924 	if (nb_links != 1) {
925 		RTE_LOG(INFO, IPSEC,
926 			"Multiple links not supported. Using first link\n");
927 	}
928 
929 	RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
930 		links[0].event_port_id);
931 
932 	while (!force_quit) {
933 		/* Read packet from event queues */
934 		nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
935 				links[0].event_port_id,
936 				&ev,     /* events */
937 				1,       /* nb_events */
938 				0        /* timeout_ticks */);
939 
940 		if (nb_rx == 0)
941 			continue;
942 
943 		switch (ev.event_type) {
944 		case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR:
945 		case RTE_EVENT_TYPE_ETHDEV_VECTOR:
946 			ipsec_ev_vector_process(&lconf, links, &ev);
947 			continue;
948 		case RTE_EVENT_TYPE_ETHDEV:
949 			break;
950 		default:
951 			RTE_LOG(ERR, IPSEC, "Invalid event type %u",
952 				ev.event_type);
953 			continue;
954 		}
955 
956 		if (is_unprotected_port(ev.mbuf->port))
957 			ret = process_ipsec_ev_inbound(&lconf.inbound,
958 							&lconf.rt, &ev);
959 		else
960 			ret = process_ipsec_ev_outbound(&lconf.outbound,
961 							&lconf.rt, &ev);
962 		if (ret != 1)
963 			/* The pkt has been dropped */
964 			continue;
965 
966 		/*
967 		 * Since tx internal port is available, events can be
968 		 * directly enqueued to the adapter and it would be
969 		 * internally submitted to the eth device.
970 		 */
971 		nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
972 							 links[0].event_port_id,
973 							 &ev, /* events */
974 							 1,   /* nb_events */
975 							 0 /* flags */);
976 		if (!nb_tx)
977 			rte_pktmbuf_free(ev.mbuf);
978 	}
979 
980 	if (ev.u64) {
981 		ev.op = RTE_EVENT_OP_RELEASE;
982 		rte_event_enqueue_burst(links[0].eventdev_id,
983 					links[0].event_port_id, &ev, 1);
984 	}
985 
986 	rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
987 			       ipsec_event_port_flush, NULL);
988 }
989 
990 static uint8_t
991 ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
992 {
993 	struct eh_app_worker_params *wrkr;
994 	uint8_t nb_wrkr_param = 0;
995 
996 	/* Save workers */
997 	wrkr = wrkrs;
998 
999 	/* Non-burst - Tx internal port - driver mode */
1000 	wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
1001 	wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
1002 	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
1003 	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
1004 	wrkr++;
1005 	nb_wrkr_param++;
1006 
1007 	/* Non-burst - Tx internal port - app mode */
1008 	wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
1009 	wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
1010 	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
1011 	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
1012 	nb_wrkr_param++;
1013 
1014 	return nb_wrkr_param;
1015 }
1016 
1017 static void
1018 ipsec_eventmode_worker(struct eh_conf *conf)
1019 {
1020 	struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = {
1021 					{{{0} }, NULL } };
1022 	uint8_t nb_wrkr_param;
1023 
1024 	/* Populate l2fwd_wrkr params */
1025 	nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr);
1026 
1027 	/*
1028 	 * Launch correct worker after checking
1029 	 * the event device's capabilities.
1030 	 */
1031 	eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
1032 }
1033 
1034 static __rte_always_inline void
1035 outb_inl_pro_spd_process(struct sp_ctx *sp,
1036 			 struct sa_ctx *sa_ctx,
1037 			 struct traffic_type *ip,
1038 			 struct traffic_type *match,
1039 			 struct traffic_type *mismatch,
1040 			 bool match_flag,
1041 			 struct ipsec_spd_stats *stats)
1042 {
1043 	uint32_t prev_sa_idx = UINT32_MAX;
1044 	struct rte_mbuf *ipsec[MAX_PKT_BURST];
1045 	struct rte_ipsec_session *ips;
1046 	uint32_t i, j, j_mis, sa_idx;
1047 	struct ipsec_sa *sa = NULL;
1048 	uint32_t ipsec_num = 0;
1049 	struct rte_mbuf *m;
1050 	uint64_t satp;
1051 
1052 	if (ip->num == 0 || sp == NULL)
1053 		return;
1054 
1055 	rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
1056 			ip->num, DEFAULT_MAX_CATEGORIES);
1057 
1058 	j = match->num;
1059 	j_mis = mismatch->num;
1060 
1061 	for (i = 0; i < ip->num; i++) {
1062 		m = ip->pkts[i];
1063 		sa_idx = ip->res[i] - 1;
1064 
1065 		if (unlikely(ip->res[i] == DISCARD)) {
1066 			free_pkts(&m, 1);
1067 
1068 			stats->discard++;
1069 		} else if (unlikely(ip->res[i] == BYPASS)) {
1070 			match->pkts[j++] = m;
1071 
1072 			stats->bypass++;
1073 		} else {
1074 			if (prev_sa_idx == UINT32_MAX) {
1075 				prev_sa_idx = sa_idx;
1076 				sa = &sa_ctx->sa[sa_idx];
1077 				ips = ipsec_get_primary_session(sa);
1078 				satp = rte_ipsec_sa_type(ips->sa);
1079 			}
1080 
1081 			if (sa_idx != prev_sa_idx) {
1082 				prep_process_group(sa, ipsec, ipsec_num);
1083 
1084 				/* Prepare packets for outbound */
1085 				rte_ipsec_pkt_process(ips, ipsec, ipsec_num);
1086 
1087 				/* Copy to current tr or a different tr */
1088 				if (SATP_OUT_IPV4(satp) == match_flag) {
1089 					memcpy(&match->pkts[j], ipsec,
1090 					       ipsec_num * sizeof(void *));
1091 					j += ipsec_num;
1092 				} else {
1093 					memcpy(&mismatch->pkts[j_mis], ipsec,
1094 					       ipsec_num * sizeof(void *));
1095 					j_mis += ipsec_num;
1096 				}
1097 
1098 				/* Update to new SA */
1099 				sa = &sa_ctx->sa[sa_idx];
1100 				ips = ipsec_get_primary_session(sa);
1101 				satp = rte_ipsec_sa_type(ips->sa);
1102 				ipsec_num = 0;
1103 			}
1104 
1105 			ipsec[ipsec_num++] = m;
1106 			stats->protect++;
1107 		}
1108 	}
1109 
1110 	if (ipsec_num) {
1111 		prep_process_group(sa, ipsec, ipsec_num);
1112 
1113 		/* Prepare pacekts for outbound */
1114 		rte_ipsec_pkt_process(ips, ipsec, ipsec_num);
1115 
1116 		/* Copy to current tr or a different tr */
1117 		if (SATP_OUT_IPV4(satp) == match_flag) {
1118 			memcpy(&match->pkts[j], ipsec,
1119 			       ipsec_num * sizeof(void *));
1120 			j += ipsec_num;
1121 		} else {
1122 			memcpy(&mismatch->pkts[j_mis], ipsec,
1123 			       ipsec_num * sizeof(void *));
1124 			j_mis += ipsec_num;
1125 		}
1126 	}
1127 	match->num = j;
1128 	mismatch->num = j_mis;
1129 }
1130 
1131 /* Poll mode worker when all SA's are of type inline protocol */
1132 void
1133 ipsec_poll_mode_wrkr_inl_pr(void)
1134 {
1135 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
1136 			/ US_PER_S * BURST_TX_DRAIN_US;
1137 	struct sp_ctx *sp4_in, *sp6_in, *sp4_out, *sp6_out;
1138 	struct rte_mbuf *pkts[MAX_PKT_BURST];
1139 	uint64_t prev_tsc, diff_tsc, cur_tsc;
1140 	struct ipsec_core_statistics *stats;
1141 	struct rt_ctx *rt4_ctx, *rt6_ctx;
1142 	struct sa_ctx *sa_in, *sa_out;
1143 	struct traffic_type ip4, ip6;
1144 	struct lcore_rx_queue *rxql;
1145 	struct rte_mbuf **v4, **v6;
1146 	struct ipsec_traffic trf;
1147 	struct lcore_conf *qconf;
1148 	uint16_t v4_num, v6_num;
1149 	int32_t socket_id;
1150 	uint32_t lcore_id;
1151 	int32_t i, nb_rx;
1152 	uint16_t portid;
1153 	uint8_t queueid;
1154 
1155 	prev_tsc = 0;
1156 	lcore_id = rte_lcore_id();
1157 	qconf = &lcore_conf[lcore_id];
1158 	rxql = qconf->rx_queue_list;
1159 	socket_id = rte_lcore_to_socket_id(lcore_id);
1160 	stats = &core_statistics[lcore_id];
1161 
1162 	rt4_ctx = socket_ctx[socket_id].rt_ip4;
1163 	rt6_ctx = socket_ctx[socket_id].rt_ip6;
1164 
1165 	sp4_in = socket_ctx[socket_id].sp_ip4_in;
1166 	sp6_in = socket_ctx[socket_id].sp_ip6_in;
1167 	sa_in = socket_ctx[socket_id].sa_in;
1168 
1169 	sp4_out = socket_ctx[socket_id].sp_ip4_out;
1170 	sp6_out = socket_ctx[socket_id].sp_ip6_out;
1171 	sa_out = socket_ctx[socket_id].sa_out;
1172 
1173 	qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
1174 
1175 	if (qconf->nb_rx_queue == 0) {
1176 		RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
1177 			lcore_id);
1178 		return;
1179 	}
1180 
1181 	RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
1182 
1183 	for (i = 0; i < qconf->nb_rx_queue; i++) {
1184 		portid = rxql[i].port_id;
1185 		queueid = rxql[i].queue_id;
1186 		RTE_LOG(INFO, IPSEC,
1187 			" -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
1188 			lcore_id, portid, queueid);
1189 	}
1190 
1191 	while (!force_quit) {
1192 		cur_tsc = rte_rdtsc();
1193 
1194 		/* TX queue buffer drain */
1195 		diff_tsc = cur_tsc - prev_tsc;
1196 
1197 		if (unlikely(diff_tsc > drain_tsc)) {
1198 			drain_tx_buffers(qconf);
1199 			prev_tsc = cur_tsc;
1200 		}
1201 
1202 		for (i = 0; i < qconf->nb_rx_queue; ++i) {
1203 			/* Read packets from RX queues */
1204 			portid = rxql[i].port_id;
1205 			queueid = rxql[i].queue_id;
1206 			nb_rx = rte_eth_rx_burst(portid, queueid,
1207 					pkts, MAX_PKT_BURST);
1208 
1209 			if (nb_rx <= 0)
1210 				continue;
1211 
1212 			core_stats_update_rx(nb_rx);
1213 
1214 			prepare_traffic(rxql[i].sec_ctx, pkts, &trf, nb_rx);
1215 
1216 			/* Drop any IPsec traffic */
1217 			free_pkts(trf.ipsec.pkts, trf.ipsec.num);
1218 
1219 			if (is_unprotected_port(portid)) {
1220 				inbound_sp_sa(sp4_in, sa_in, &trf.ip4,
1221 					      trf.ip4.num,
1222 					      &stats->inbound.spd4);
1223 
1224 				inbound_sp_sa(sp6_in, sa_in, &trf.ip6,
1225 					      trf.ip6.num,
1226 					      &stats->inbound.spd6);
1227 
1228 				v4 = trf.ip4.pkts;
1229 				v4_num = trf.ip4.num;
1230 				v6 = trf.ip6.pkts;
1231 				v6_num = trf.ip6.num;
1232 			} else {
1233 				ip4.num = 0;
1234 				ip6.num = 0;
1235 
1236 				outb_inl_pro_spd_process(sp4_out, sa_out,
1237 							 &trf.ip4, &ip4, &ip6,
1238 							 true,
1239 							 &stats->outbound.spd4);
1240 
1241 				outb_inl_pro_spd_process(sp6_out, sa_out,
1242 							 &trf.ip6, &ip6, &ip4,
1243 							 false,
1244 							 &stats->outbound.spd6);
1245 				v4 = ip4.pkts;
1246 				v4_num = ip4.num;
1247 				v6 = ip6.pkts;
1248 				v6_num = ip6.num;
1249 			}
1250 
1251 			route4_pkts(rt4_ctx, v4, v4_num, 0, false);
1252 			route6_pkts(rt6_ctx, v6, v6_num);
1253 		}
1254 	}
1255 }
1256 
1257 /* Poll mode worker when all SA's are of type inline protocol
1258  * and single sa mode is enabled.
1259  */
1260 void
1261 ipsec_poll_mode_wrkr_inl_pr_ss(void)
1262 {
1263 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
1264 			/ US_PER_S * BURST_TX_DRAIN_US;
1265 	uint16_t sa_out_portid = 0, sa_out_proto = 0;
1266 	struct rte_mbuf *pkts[MAX_PKT_BURST], *pkt;
1267 	uint64_t prev_tsc, diff_tsc, cur_tsc;
1268 	struct rte_ipsec_session *ips = NULL;
1269 	struct lcore_rx_queue *rxql;
1270 	struct ipsec_sa *sa = NULL;
1271 	struct lcore_conf *qconf;
1272 	struct sa_ctx *sa_out;
1273 	uint32_t i, nb_rx, j;
1274 	int32_t socket_id;
1275 	uint32_t lcore_id;
1276 	uint16_t portid;
1277 	uint8_t queueid;
1278 
1279 	prev_tsc = 0;
1280 	lcore_id = rte_lcore_id();
1281 	qconf = &lcore_conf[lcore_id];
1282 	rxql = qconf->rx_queue_list;
1283 	socket_id = rte_lcore_to_socket_id(lcore_id);
1284 
1285 	/* Get SA info */
1286 	sa_out = socket_ctx[socket_id].sa_out;
1287 	if (sa_out && single_sa_idx < sa_out->nb_sa) {
1288 		sa = &sa_out->sa[single_sa_idx];
1289 		ips = ipsec_get_primary_session(sa);
1290 		sa_out_portid = sa->portid;
1291 		if (sa->flags & IP6_TUNNEL)
1292 			sa_out_proto = IPPROTO_IPV6;
1293 		else
1294 			sa_out_proto = IPPROTO_IP;
1295 	}
1296 
1297 	qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
1298 
1299 	if (qconf->nb_rx_queue == 0) {
1300 		RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
1301 			lcore_id);
1302 		return;
1303 	}
1304 
1305 	RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
1306 
1307 	for (i = 0; i < qconf->nb_rx_queue; i++) {
1308 		portid = rxql[i].port_id;
1309 		queueid = rxql[i].queue_id;
1310 		RTE_LOG(INFO, IPSEC,
1311 			" -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
1312 			lcore_id, portid, queueid);
1313 	}
1314 
1315 	while (!force_quit) {
1316 		cur_tsc = rte_rdtsc();
1317 
1318 		/* TX queue buffer drain */
1319 		diff_tsc = cur_tsc - prev_tsc;
1320 
1321 		if (unlikely(diff_tsc > drain_tsc)) {
1322 			drain_tx_buffers(qconf);
1323 			prev_tsc = cur_tsc;
1324 		}
1325 
1326 		for (i = 0; i < qconf->nb_rx_queue; ++i) {
1327 			/* Read packets from RX queues */
1328 			portid = rxql[i].port_id;
1329 			queueid = rxql[i].queue_id;
1330 			nb_rx = rte_eth_rx_burst(portid, queueid,
1331 						 pkts, MAX_PKT_BURST);
1332 
1333 			if (nb_rx <= 0)
1334 				continue;
1335 
1336 			core_stats_update_rx(nb_rx);
1337 
1338 			if (is_unprotected_port(portid)) {
1339 				/* Nothing much to do for inbound inline
1340 				 * decrypted traffic.
1341 				 */
1342 				for (j = 0; j < nb_rx; j++) {
1343 					uint32_t ptype, proto;
1344 
1345 					pkt = pkts[j];
1346 					ptype = pkt->packet_type &
1347 						RTE_PTYPE_L3_MASK;
1348 					if (ptype == RTE_PTYPE_L3_IPV4)
1349 						proto = IPPROTO_IP;
1350 					else
1351 						proto = IPPROTO_IPV6;
1352 
1353 					send_single_packet(pkt, portid, proto);
1354 				}
1355 
1356 				continue;
1357 			}
1358 
1359 			/* Free packets if there are no outbound sessions */
1360 			if (unlikely(!ips)) {
1361 				rte_pktmbuf_free_bulk(pkts, nb_rx);
1362 				continue;
1363 			}
1364 
1365 			rte_ipsec_pkt_process(ips, pkts, nb_rx);
1366 
1367 			/* Send pkts out */
1368 			for (j = 0; j < nb_rx; j++) {
1369 				pkt = pkts[j];
1370 
1371 				pkt->l2_len = RTE_ETHER_HDR_LEN;
1372 				send_single_packet(pkt, sa_out_portid,
1373 						   sa_out_proto);
1374 			}
1375 		}
1376 	}
1377 }
1378 
1379 static void
1380 ipsec_poll_mode_wrkr_launch(void)
1381 {
1382 	static ipsec_worker_fn_t poll_mode_wrkrs[MAX_F] = {
1383 		[INL_PR_F]        = ipsec_poll_mode_wrkr_inl_pr,
1384 		[INL_PR_F | SS_F] = ipsec_poll_mode_wrkr_inl_pr_ss,
1385 	};
1386 	ipsec_worker_fn_t fn;
1387 
1388 	if (!app_sa_prm.enable) {
1389 		fn = ipsec_poll_mode_worker;
1390 	} else {
1391 		fn = poll_mode_wrkrs[wrkr_flags];
1392 
1393 		/* Always default to all mode worker */
1394 		if (!fn)
1395 			fn = ipsec_poll_mode_worker;
1396 	}
1397 
1398 	/* Launch worker */
1399 	(*fn)();
1400 }
1401 
1402 int ipsec_launch_one_lcore(void *args)
1403 {
1404 	struct eh_conf *conf;
1405 
1406 	conf = (struct eh_conf *)args;
1407 
1408 	if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
1409 		/* Run in poll mode */
1410 		ipsec_poll_mode_wrkr_launch();
1411 	} else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
1412 		/* Run in event mode */
1413 		ipsec_eventmode_worker(conf);
1414 	}
1415 	return 0;
1416 }
1417