xref: /dpdk/examples/ipsec-secgw/ipsec_worker.c (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  * Copyright (C) 2020 Marvell International Ltd.
4  */
5 #include <rte_acl.h>
6 #include <rte_event_crypto_adapter.h>
7 #include <rte_event_eth_tx_adapter.h>
8 #include <rte_lpm.h>
9 #include <rte_lpm6.h>
10 
11 #include "event_helper.h"
12 #include "ipsec.h"
13 #include "ipsec-secgw.h"
14 #include "ipsec_worker.h"
15 #include "sad.h"
16 
17 #if defined(__ARM_NEON)
18 #include "ipsec_lpm_neon.h"
19 #endif
20 
21 struct port_drv_mode_data {
22 	void *sess;
23 	void *ctx;
24 };
25 
26 typedef void (*ipsec_worker_fn_t)(void);
27 
28 int ip_reassembly_dynfield_offset = -1;
29 uint64_t ip_reassembly_dynflag;
30 
31 static inline enum pkt_type
32 process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
33 {
34 	struct rte_ether_hdr *eth;
35 	uint32_t ptype = pkt->packet_type;
36 
37 	eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
38 	rte_prefetch0(eth);
39 
40 	if (RTE_ETH_IS_IPV4_HDR(ptype)) {
41 		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
42 				offsetof(struct ip, ip_p));
43 		if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
44 			return PKT_TYPE_IPSEC_IPV4;
45 		else
46 			return PKT_TYPE_PLAIN_IPV4;
47 	} else if (RTE_ETH_IS_IPV6_HDR(ptype)) {
48 		*nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
49 				offsetof(struct ip6_hdr, ip6_nxt));
50 		if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
51 			return PKT_TYPE_IPSEC_IPV6;
52 		else
53 			return PKT_TYPE_PLAIN_IPV6;
54 	}
55 
56 	/* Unknown/Unsupported type */
57 	return PKT_TYPE_INVALID;
58 }
59 
60 static inline void
61 update_mac_addrs(struct rte_ether_hdr *ethhdr, uint16_t portid)
62 {
63 	memcpy(&ethhdr->src_addr, &ethaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
64 	memcpy(&ethhdr->dst_addr, &ethaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
65 }
66 
67 static inline void
68 ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
69 {
70 	/* Save the destination port in the mbuf */
71 	m->port = port_id;
72 
73 	/* Save eth queue for Tx */
74 	rte_event_eth_tx_adapter_txq_set(m, 0);
75 }
76 
77 static inline void
78 ev_vector_attr_init(struct rte_event_vector *vec)
79 {
80 	vec->attr_valid = 1;
81 	vec->port = 0xFFFF;
82 	vec->queue = 0;
83 }
84 
85 static inline void
86 ev_vector_attr_update(struct rte_event_vector *vec, struct rte_mbuf *pkt)
87 {
88 	if (vec->port == 0xFFFF) {
89 		vec->port = pkt->port;
90 		return;
91 	}
92 	if (vec->attr_valid && (vec->port != pkt->port))
93 		vec->attr_valid = 0;
94 }
95 
96 static inline void
97 prepare_out_sessions_tbl(struct sa_ctx *sa_out,
98 			 struct port_drv_mode_data *data,
99 			 uint16_t size)
100 {
101 	struct rte_ipsec_session *pri_sess;
102 	struct ipsec_sa *sa;
103 	uint32_t i;
104 
105 	if (!sa_out)
106 		return;
107 
108 	for (i = 0; i < sa_out->nb_sa; i++) {
109 
110 		sa = &sa_out->sa[i];
111 		if (!sa)
112 			continue;
113 
114 		pri_sess = ipsec_get_primary_session(sa);
115 		if (!pri_sess)
116 			continue;
117 
118 		if (pri_sess->type !=
119 			RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
120 
121 			RTE_LOG(ERR, IPSEC, "Invalid session type %d\n",
122 				pri_sess->type);
123 			continue;
124 		}
125 
126 		if (sa->portid >= size) {
127 			RTE_LOG(ERR, IPSEC,
128 				"Port id >= than table size %d, %d\n",
129 				sa->portid, size);
130 			continue;
131 		}
132 
133 		/* Use only first inline session found for a given port */
134 		if (data[sa->portid].sess)
135 			continue;
136 		data[sa->portid].sess = pri_sess->security.ses;
137 		data[sa->portid].ctx = pri_sess->security.ctx;
138 	}
139 }
140 
141 static inline int
142 check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
143 {
144 	uint32_t res;
145 
146 	if (unlikely(sp == NULL))
147 		return 0;
148 
149 	rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
150 			DEFAULT_MAX_CATEGORIES);
151 
152 	if (unlikely(res == DISCARD))
153 		return 0;
154 	else if (res == BYPASS) {
155 		*sa_idx = -1;
156 		return 1;
157 	}
158 
159 	*sa_idx = res - 1;
160 	return 1;
161 }
162 
163 static inline void
164 check_sp_bulk(struct sp_ctx *sp, struct traffic_type *ip,
165 	      struct traffic_type *ipsec)
166 {
167 	uint32_t i, j, res;
168 	struct rte_mbuf *m;
169 
170 	if (unlikely(sp == NULL || ip->num == 0))
171 		return;
172 
173 	rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num,
174 			 DEFAULT_MAX_CATEGORIES);
175 
176 	j = 0;
177 	for (i = 0; i < ip->num; i++) {
178 		m = ip->pkts[i];
179 		res = ip->res[i];
180 		if (unlikely(res == DISCARD))
181 			free_pkts(&m, 1);
182 		else if (res == BYPASS)
183 			ip->pkts[j++] = m;
184 		else {
185 			ipsec->res[ipsec->num] = res - 1;
186 			ipsec->pkts[ipsec->num++] = m;
187 		}
188 	}
189 	ip->num = j;
190 }
191 
192 static inline void
193 check_sp_sa_bulk(struct sp_ctx *sp, struct sa_ctx *sa_ctx,
194 		 struct traffic_type *ip)
195 {
196 	struct ipsec_sa *sa;
197 	uint32_t i, j, res;
198 	struct rte_mbuf *m;
199 
200 	if (unlikely(sp == NULL || ip->num == 0))
201 		return;
202 
203 	rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num,
204 			 DEFAULT_MAX_CATEGORIES);
205 
206 	j = 0;
207 	for (i = 0; i < ip->num; i++) {
208 		m = ip->pkts[i];
209 		res = ip->res[i];
210 		if (unlikely(res == DISCARD))
211 			free_pkts(&m, 1);
212 		else if (res == BYPASS)
213 			ip->pkts[j++] = m;
214 		else {
215 			sa = *(struct ipsec_sa **)rte_security_dynfield(m);
216 			if (sa == NULL) {
217 				free_pkts(&m, 1);
218 				continue;
219 			}
220 
221 			/* SPI on the packet should match with the one in SA */
222 			if (unlikely(sa->spi != sa_ctx->sa[res - 1].spi)) {
223 				free_pkts(&m, 1);
224 				continue;
225 			}
226 
227 			ip->pkts[j++] = m;
228 		}
229 	}
230 	ip->num = j;
231 }
232 
233 static inline void
234 ipv4_pkt_l3_len_set(struct rte_mbuf *pkt)
235 {
236 	struct rte_ipv4_hdr *ipv4;
237 
238 	ipv4 = rte_pktmbuf_mtod(pkt, struct rte_ipv4_hdr *);
239 	pkt->l3_len = ipv4->ihl * 4;
240 }
241 
242 static inline int
243 ipv6_pkt_l3_len_set(struct rte_mbuf *pkt)
244 {
245 	struct rte_ipv6_hdr *ipv6;
246 	size_t l3_len, ext_len;
247 	uint32_t l3_type;
248 	int next_proto;
249 	uint8_t *p;
250 
251 	ipv6 = rte_pktmbuf_mtod(pkt, struct rte_ipv6_hdr *);
252 	l3_len = sizeof(struct rte_ipv6_hdr);
253 	l3_type = pkt->packet_type & RTE_PTYPE_L3_MASK;
254 
255 	if (l3_type == RTE_PTYPE_L3_IPV6_EXT ||
256 		l3_type == RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) {
257 		p = rte_pktmbuf_mtod(pkt, uint8_t *);
258 		next_proto = ipv6->proto;
259 		while (next_proto != IPPROTO_ESP &&
260 			l3_len < pkt->data_len &&
261 			(next_proto = rte_ipv6_get_next_ext(p + l3_len,
262 					next_proto, &ext_len)) >= 0)
263 			l3_len += ext_len;
264 
265 		/* Drop pkt when IPv6 header exceeds first seg size */
266 		if (unlikely(l3_len > pkt->data_len))
267 			return -EINVAL;
268 	}
269 	pkt->l3_len = l3_len;
270 
271 	return 0;
272 }
273 
274 static inline uint16_t
275 route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
276 {
277 	uint32_t dst_ip;
278 	uint16_t offset;
279 	uint32_t hop;
280 	int ret;
281 
282 	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
283 	dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
284 	dst_ip = rte_be_to_cpu_32(dst_ip);
285 
286 	ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
287 
288 	if (ret == 0) {
289 		/* We have a hit */
290 		return hop;
291 	}
292 
293 	/* else */
294 	return RTE_MAX_ETHPORTS;
295 }
296 
297 /* TODO: To be tested */
298 static inline uint16_t
299 route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
300 {
301 	uint8_t dst_ip[16];
302 	uint8_t *ip6_dst;
303 	uint16_t offset;
304 	uint32_t hop;
305 	int ret;
306 
307 	offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
308 	ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
309 	memcpy(&dst_ip[0], ip6_dst, 16);
310 
311 	ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
312 
313 	if (ret == 0) {
314 		/* We have a hit */
315 		return hop;
316 	}
317 
318 	/* else */
319 	return RTE_MAX_ETHPORTS;
320 }
321 
322 static inline uint16_t
323 get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
324 {
325 	if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
326 		return route4_pkt(pkt, rt->rt4_ctx);
327 	else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
328 		return route6_pkt(pkt, rt->rt6_ctx);
329 
330 	return RTE_MAX_ETHPORTS;
331 }
332 
333 static inline void
334 crypto_op_reset(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
335 		struct rte_crypto_op *cop[], uint16_t num)
336 {
337 	struct rte_crypto_sym_op *sop;
338 	uint32_t i;
339 
340 	const struct rte_crypto_op unproc_cop = {
341 		.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
342 		.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
343 		.sess_type = RTE_CRYPTO_OP_SECURITY_SESSION,
344 	};
345 
346 	for (i = 0; i != num; i++) {
347 		cop[i]->raw = unproc_cop.raw;
348 		sop = cop[i]->sym;
349 		sop->m_src = mb[i];
350 		sop->m_dst = NULL;
351 		__rte_security_attach_session(sop, ss->security.ses);
352 	}
353 }
354 
355 static inline void
356 crypto_prepare_event(struct rte_mbuf *pkt, struct rte_ipsec_session *sess, struct rte_event *ev)
357 {
358 	struct ipsec_mbuf_metadata *priv;
359 	struct rte_crypto_op *cop;
360 
361 	/* Get pkt private data */
362 	priv = get_priv(pkt);
363 	cop = &priv->cop;
364 
365 	/* Reset crypto operation data */
366 	crypto_op_reset(sess, &pkt, &cop, 1);
367 
368 	/* Update event_ptr with rte_crypto_op */
369 	ev->event = 0;
370 	ev->event_ptr = cop;
371 }
372 
373 static inline void
374 free_pkts_from_events(struct rte_event events[], uint16_t count)
375 {
376 	struct rte_crypto_op *cop;
377 	int i;
378 
379 	for (i = 0; i < count; i++) {
380 		cop = events[i].event_ptr;
381 		free_pkts(&cop->sym->m_src, 1);
382 	}
383 }
384 
385 static inline int
386 event_crypto_enqueue(struct rte_mbuf *pkt,
387 		struct ipsec_sa *sa, const struct eh_event_link_info *ev_link)
388 {
389 	struct rte_ipsec_session *sess;
390 	struct rte_event ev;
391 	int ret;
392 
393 	/* Get IPsec session */
394 	sess = ipsec_get_primary_session(sa);
395 
396 	crypto_prepare_event(pkt, sess, &ev);
397 
398 	/* Enqueue event to crypto adapter */
399 	ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id,
400 			ev_link->event_port_id, &ev, 1);
401 	if (unlikely(ret != 1)) {
402 		/* pkt will be freed by the caller */
403 		RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue event: %i (errno: %i)\n", ret, rte_errno);
404 		return rte_errno;
405 	}
406 
407 	return 0;
408 }
409 
410 static inline int
411 process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
412 	const struct eh_event_link_info *ev_link, struct rte_event *ev)
413 {
414 	struct ipsec_sa *sa = NULL;
415 	struct rte_mbuf *pkt;
416 	uint16_t port_id = 0;
417 	enum pkt_type type;
418 	uint32_t sa_idx;
419 	uint8_t *nlp;
420 
421 	/* Get pkt from event */
422 	pkt = ev->mbuf;
423 	if (is_ip_reassembly_incomplete(pkt) > 0) {
424 		free_reassembly_fail_pkt(pkt);
425 		return PKT_DROPPED;
426 	}
427 
428 	/* Check the packet type */
429 	type = process_ipsec_get_pkt_type(pkt, &nlp);
430 
431 	switch (type) {
432 	case PKT_TYPE_PLAIN_IPV4:
433 		if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
434 			if (unlikely(pkt->ol_flags &
435 				     RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
436 				RTE_LOG(ERR, IPSEC,
437 					"Inbound security offload failed\n");
438 				goto drop_pkt_and_exit;
439 			}
440 			sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
441 		}
442 
443 		/* Check if we have a match */
444 		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
445 			/* No valid match */
446 			goto drop_pkt_and_exit;
447 		}
448 		break;
449 
450 	case PKT_TYPE_PLAIN_IPV6:
451 		if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
452 			if (unlikely(pkt->ol_flags &
453 				     RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
454 				RTE_LOG(ERR, IPSEC,
455 					"Inbound security offload failed\n");
456 				goto drop_pkt_and_exit;
457 			}
458 			sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
459 		}
460 
461 		/* Check if we have a match */
462 		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
463 			/* No valid match */
464 			goto drop_pkt_and_exit;
465 		}
466 		break;
467 	case PKT_TYPE_IPSEC_IPV4:
468 		rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
469 		ipv4_pkt_l3_len_set(pkt);
470 		sad_lookup(&ctx->sa_ctx->sad, &pkt, (void **)&sa, 1);
471 		sa = ipsec_mask_saptr(sa);
472 		if (unlikely(sa == NULL)) {
473 			RTE_LOG_DP(DEBUG, IPSEC, "Cannot find sa\n");
474 			goto drop_pkt_and_exit;
475 		}
476 
477 		if (unlikely(event_crypto_enqueue(pkt, sa, ev_link)))
478 			goto drop_pkt_and_exit;
479 
480 		return PKT_POSTED;
481 	case PKT_TYPE_IPSEC_IPV6:
482 		rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
483 		if (unlikely(ipv6_pkt_l3_len_set(pkt) != 0))
484 			goto drop_pkt_and_exit;
485 		sad_lookup(&ctx->sa_ctx->sad, &pkt, (void **)&sa, 1);
486 		sa = ipsec_mask_saptr(sa);
487 		if (unlikely(sa == NULL)) {
488 			RTE_LOG_DP(DEBUG, IPSEC, "Cannot find sa\n");
489 			goto drop_pkt_and_exit;
490 		}
491 
492 		if (unlikely(event_crypto_enqueue(pkt, sa, ev_link)))
493 			goto drop_pkt_and_exit;
494 
495 		return PKT_POSTED;
496 	default:
497 		RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
498 			   type);
499 		goto drop_pkt_and_exit;
500 	}
501 
502 	/* Check if the packet has to be bypassed */
503 	if (sa_idx == BYPASS)
504 		goto route_and_send_pkt;
505 
506 	/* Validate sa_idx */
507 	if (sa_idx >= ctx->sa_ctx->nb_sa)
508 		goto drop_pkt_and_exit;
509 
510 	/* Else the packet has to be protected with SA */
511 
512 	/* If the packet was IPsec processed, then SA pointer should be set */
513 	if (sa == NULL)
514 		goto drop_pkt_and_exit;
515 
516 	/* SPI on the packet should match with the one in SA */
517 	if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
518 		goto drop_pkt_and_exit;
519 
520 route_and_send_pkt:
521 	port_id = get_route(pkt, rt, type);
522 	if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
523 		/* no match */
524 		goto drop_pkt_and_exit;
525 	}
526 	/* else, we have a matching route */
527 
528 	/* Update mac addresses */
529 	update_mac_addrs(rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *), port_id);
530 
531 	/* Update the event with the dest port */
532 	ipsec_event_pre_forward(pkt, port_id);
533 	return PKT_FORWARDED;
534 
535 drop_pkt_and_exit:
536 	free_pkts(&pkt, 1);
537 	ev->mbuf = NULL;
538 	return PKT_DROPPED;
539 }
540 
541 static inline int
542 process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
543 		const struct eh_event_link_info *ev_link, struct rte_event *ev)
544 {
545 	struct rte_ipsec_session *sess;
546 	struct rte_ether_hdr *ethhdr;
547 	struct sa_ctx *sa_ctx;
548 	struct rte_mbuf *pkt;
549 	uint16_t port_id = 0;
550 	struct ipsec_sa *sa;
551 	enum pkt_type type;
552 	uint32_t sa_idx;
553 	uint8_t *nlp;
554 
555 	/* Get pkt from event */
556 	pkt = ev->mbuf;
557 
558 	/* Check the packet type */
559 	type = process_ipsec_get_pkt_type(pkt, &nlp);
560 
561 	switch (type) {
562 	case PKT_TYPE_PLAIN_IPV4:
563 		/* Check if we have a match */
564 		if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
565 			/* No valid match */
566 			goto drop_pkt_and_exit;
567 		}
568 		break;
569 	case PKT_TYPE_PLAIN_IPV6:
570 		/* Check if we have a match */
571 		if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
572 			/* No valid match */
573 			goto drop_pkt_and_exit;
574 		}
575 		break;
576 	default:
577 		/*
578 		 * Only plain IPv4 & IPv6 packets are allowed
579 		 * on protected port. Drop the rest.
580 		 */
581 		RTE_LOG_DP(DEBUG, IPSEC, "Unsupported packet type = %d\n", type);
582 		goto drop_pkt_and_exit;
583 	}
584 
585 	ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
586 	/* Check if the packet has to be bypassed */
587 	if (sa_idx == BYPASS) {
588 		port_id = get_route(pkt, rt, type);
589 		if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
590 			/* no match */
591 			goto drop_pkt_and_exit;
592 		}
593 		/* else, we have a matching route */
594 		goto send_pkt;
595 	}
596 
597 	/* Validate sa_idx */
598 	if (unlikely(sa_idx >= ctx->sa_ctx->nb_sa))
599 		goto drop_pkt_and_exit;
600 
601 	/* Else the packet has to be protected */
602 
603 	/* Get SA ctx*/
604 	sa_ctx = ctx->sa_ctx;
605 
606 	/* Get SA */
607 	sa = &(sa_ctx->sa[sa_idx]);
608 
609 	/* Get IPsec session */
610 	sess = ipsec_get_primary_session(sa);
611 
612 	/* Determine protocol type */
613 	if (sess->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
614 		goto lookaside;
615 
616 	rte_security_set_pkt_metadata(sess->security.ctx,
617 				      sess->security.ses, pkt, NULL);
618 
619 	/* Mark the packet for Tx security offload */
620 	pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
621 	/* Update ether type */
622 	ethhdr->ether_type = (IS_IP4(sa->flags) ? rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) :
623 			      rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6));
624 
625 	/* Get the port to which this pkt need to be submitted */
626 	port_id = sa->portid;
627 
628 send_pkt:
629 	/* Provide L2 len for Outbound processing */
630 	pkt->l2_len = RTE_ETHER_HDR_LEN;
631 
632 	/* Update mac addresses */
633 	update_mac_addrs(ethhdr, port_id);
634 
635 	/* Update the event with the dest port */
636 	ipsec_event_pre_forward(pkt, port_id);
637 	return PKT_FORWARDED;
638 
639 lookaside:
640 	/* prepare pkt - advance start to L3 */
641 	rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
642 
643 	if (likely(event_crypto_enqueue(pkt, sa, ev_link) == 0))
644 		return PKT_POSTED;
645 
646 drop_pkt_and_exit:
647 	RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
648 	free_pkts(&pkt, 1);
649 	ev->mbuf = NULL;
650 	return PKT_DROPPED;
651 }
652 
653 static inline int
654 ipsec_ev_route_ip_pkts(struct rte_event_vector *vec, struct route_table *rt,
655 		    struct ipsec_traffic *t)
656 {
657 	struct rte_ether_hdr *ethhdr;
658 	struct rte_mbuf *pkt;
659 	uint16_t port_id = 0;
660 	uint32_t i, j = 0;
661 
662 	/* Route IPv4 packets */
663 	for (i = 0; i < t->ip4.num; i++) {
664 		pkt = t->ip4.pkts[i];
665 		port_id = route4_pkt(pkt, rt->rt4_ctx);
666 		if (port_id != RTE_MAX_ETHPORTS) {
667 			/* Update mac addresses */
668 			ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
669 			update_mac_addrs(ethhdr, port_id);
670 			/* Update the event with the dest port */
671 			ipsec_event_pre_forward(pkt, port_id);
672 			ev_vector_attr_update(vec, pkt);
673 			vec->mbufs[j++] = pkt;
674 		} else
675 			free_pkts(&pkt, 1);
676 	}
677 
678 	/* Route IPv6 packets */
679 	for (i = 0; i < t->ip6.num; i++) {
680 		pkt = t->ip6.pkts[i];
681 		port_id = route6_pkt(pkt, rt->rt6_ctx);
682 		if (port_id != RTE_MAX_ETHPORTS) {
683 			/* Update mac addresses */
684 			ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
685 			update_mac_addrs(ethhdr, port_id);
686 			/* Update the event with the dest port */
687 			ipsec_event_pre_forward(pkt, port_id);
688 			ev_vector_attr_update(vec, pkt);
689 			vec->mbufs[j++] = pkt;
690 		} else
691 			free_pkts(&pkt, 1);
692 	}
693 
694 	return j;
695 }
696 
697 static inline int
698 ipsec_ev_inbound_route_pkts(struct rte_event_vector *vec,
699 			    struct route_table *rt,
700 			    struct ipsec_traffic *t,
701 			    const struct eh_event_link_info *ev_link)
702 {
703 	uint32_t ret, i, j, ev_len = 0;
704 	struct rte_event events[MAX_PKTS];
705 	struct rte_ipsec_session *sess;
706 	struct rte_mbuf *pkt;
707 	struct ipsec_sa *sa;
708 
709 	j = ipsec_ev_route_ip_pkts(vec, rt, t);
710 
711 	/* Route ESP packets */
712 	for (i = 0; i < t->ipsec.num; i++) {
713 		pkt = t->ipsec.pkts[i];
714 		sa = ipsec_mask_saptr(t->ipsec.saptr[i]);
715 		if (unlikely(sa == NULL)) {
716 			free_pkts(&pkt, 1);
717 			continue;
718 		}
719 		sess = ipsec_get_primary_session(sa);
720 		crypto_prepare_event(pkt, sess, &events[ev_len]);
721 		ev_len++;
722 	}
723 
724 	if (ev_len) {
725 		ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id,
726 				ev_link->event_port_id, events, ev_len);
727 		if (ret < ev_len) {
728 			RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue events: %i (errno: %i)\n",
729 					ev_len, rte_errno);
730 			free_pkts_from_events(&events[ret], ev_len - ret);
731 			return -rte_errno;
732 		}
733 	}
734 
735 	return j;
736 }
737 
738 static inline int
739 ipsec_ev_outbound_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
740 		    struct ipsec_traffic *t, struct sa_ctx *sa_ctx,
741 		    const struct eh_event_link_info *ev_link)
742 {
743 	uint32_t sa_idx, ret, i, j, ev_len = 0;
744 	struct rte_event events[MAX_PKTS];
745 	struct rte_ipsec_session *sess;
746 	struct rte_ether_hdr *ethhdr;
747 	uint16_t port_id = 0;
748 	struct rte_mbuf *pkt;
749 	struct ipsec_sa *sa;
750 
751 	j = ipsec_ev_route_ip_pkts(vec, rt, t);
752 
753 	/* Handle IPsec packets.
754 	 * For lookaside IPsec packets, submit to cryptodev queue.
755 	 * For inline IPsec packets, route the packet.
756 	 */
757 	for (i = 0; i < t->ipsec.num; i++) {
758 		/* Validate sa_idx */
759 		sa_idx = t->ipsec.res[i];
760 		pkt = t->ipsec.pkts[i];
761 		if (unlikely(sa_idx >= sa_ctx->nb_sa)) {
762 			free_pkts(&pkt, 1);
763 			continue;
764 		}
765 		/* Else the packet has to be protected */
766 		sa = &(sa_ctx->sa[sa_idx]);
767 		/* Get IPsec session */
768 		sess = ipsec_get_primary_session(sa);
769 		switch (sess->type) {
770 		case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
771 			rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
772 			crypto_prepare_event(pkt, sess, &events[ev_len]);
773 			ev_len++;
774 			break;
775 		case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
776 			rte_security_set_pkt_metadata(sess->security.ctx,
777 						sess->security.ses, pkt, NULL);
778 			pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
779 			port_id = sa->portid;
780 
781 			/* Fetch outer ip type and update */
782 			ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
783 			ethhdr->ether_type = (IS_IP4(sa->flags) ?
784 					      rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) :
785 					      rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6));
786 			update_mac_addrs(ethhdr, port_id);
787 
788 			ipsec_event_pre_forward(pkt, port_id);
789 			ev_vector_attr_update(vec, pkt);
790 			vec->mbufs[j++] = pkt;
791 			break;
792 		default:
793 			RTE_LOG(ERR, IPSEC, "SA type not supported\n");
794 			free_pkts(&pkt, 1);
795 			break;
796 		}
797 	}
798 
799 	if (ev_len) {
800 		ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id,
801 				ev_link->event_port_id, events, ev_len);
802 		if (ret < ev_len) {
803 			RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue events: %i (errno: %i)\n",
804 				   ev_len, rte_errno);
805 			free_pkts_from_events(&events[ret], ev_len - ret);
806 			return -rte_errno;
807 		}
808 	}
809 
810 	return j;
811 }
812 
813 static inline void
814 classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t)
815 {
816 	enum pkt_type type;
817 	uint8_t *nlp;
818 
819 	/* Check the packet type */
820 	type = process_ipsec_get_pkt_type(pkt, &nlp);
821 
822 	switch (type) {
823 	case PKT_TYPE_PLAIN_IPV4:
824 		t->ip4.data[t->ip4.num] = nlp;
825 		t->ip4.pkts[(t->ip4.num)++] = pkt;
826 		break;
827 	case PKT_TYPE_PLAIN_IPV6:
828 		t->ip6.data[t->ip6.num] = nlp;
829 		t->ip6.pkts[(t->ip6.num)++] = pkt;
830 		break;
831 	case PKT_TYPE_IPSEC_IPV4:
832 		rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
833 		ipv4_pkt_l3_len_set(pkt);
834 		t->ipsec.pkts[(t->ipsec.num)++] = pkt;
835 		break;
836 	case PKT_TYPE_IPSEC_IPV6:
837 		rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
838 		if (ipv6_pkt_l3_len_set(pkt) != 0) {
839 			free_pkts(&pkt, 1);
840 			return;
841 		}
842 		t->ipsec.pkts[(t->ipsec.num)++] = pkt;
843 		break;
844 	default:
845 		RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
846 			   type);
847 		free_pkts(&pkt, 1);
848 		break;
849 	}
850 }
851 
852 static inline int
853 process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
854 				struct rte_event_vector *vec,
855 				const struct eh_event_link_info *ev_link)
856 {
857 	struct ipsec_traffic t;
858 	struct rte_mbuf *pkt;
859 	int i;
860 
861 	t.ip4.num = 0;
862 	t.ip6.num = 0;
863 	t.ipsec.num = 0;
864 
865 	for (i = 0; i < vec->nb_elem; i++) {
866 		/* Get pkt from event */
867 		pkt = vec->mbufs[i];
868 		if (is_ip_reassembly_incomplete(pkt) > 0) {
869 			free_reassembly_fail_pkt(pkt);
870 			continue;
871 		}
872 
873 		if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
874 			if (unlikely(pkt->ol_flags &
875 				     RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
876 				RTE_LOG(ERR, IPSEC,
877 					"Inbound security offload failed\n");
878 				free_pkts(&pkt, 1);
879 				continue;
880 			}
881 		}
882 
883 		classify_pkt(pkt, &t);
884 	}
885 
886 	check_sp_sa_bulk(ctx->sp4_ctx, ctx->sa_ctx, &t.ip4);
887 	check_sp_sa_bulk(ctx->sp6_ctx, ctx->sa_ctx, &t.ip6);
888 
889 	if (t.ipsec.num != 0)
890 		sad_lookup(&ctx->sa_ctx->sad, t.ipsec.pkts, t.ipsec.saptr, t.ipsec.num);
891 
892 	return ipsec_ev_inbound_route_pkts(vec, rt, &t, ev_link);
893 }
894 
895 static inline int
896 process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
897 				 struct rte_event_vector *vec,
898 				 const struct eh_event_link_info *ev_link)
899 {
900 	struct ipsec_traffic t;
901 	struct rte_mbuf *pkt;
902 	uint32_t i;
903 
904 	t.ip4.num = 0;
905 	t.ip6.num = 0;
906 	t.ipsec.num = 0;
907 
908 	for (i = 0; i < vec->nb_elem; i++) {
909 		/* Get pkt from event */
910 		pkt = vec->mbufs[i];
911 
912 		classify_pkt(pkt, &t);
913 
914 		/* Provide L2 len for Outbound processing */
915 		pkt->l2_len = RTE_ETHER_HDR_LEN;
916 	}
917 
918 	check_sp_bulk(ctx->sp4_ctx, &t.ip4, &t.ipsec);
919 	check_sp_bulk(ctx->sp6_ctx, &t.ip6, &t.ipsec);
920 
921 	return ipsec_ev_outbound_route_pkts(vec, rt, &t, ctx->sa_ctx, ev_link);
922 }
923 
924 static inline int
925 process_ipsec_ev_drv_mode_outbound_vector(struct rte_event_vector *vec,
926 					  struct port_drv_mode_data *data)
927 {
928 	struct rte_mbuf *pkt;
929 	int16_t port_id;
930 	uint32_t i;
931 	int j = 0;
932 
933 	for (i = 0; i < vec->nb_elem; i++) {
934 		pkt = vec->mbufs[i];
935 		port_id = pkt->port;
936 
937 		if (unlikely(!data[port_id].sess)) {
938 			free_pkts(&pkt, 1);
939 			continue;
940 		}
941 		ipsec_event_pre_forward(pkt, port_id);
942 		/* Save security session */
943 		rte_security_set_pkt_metadata(data[port_id].ctx,
944 					      data[port_id].sess, pkt,
945 					      NULL);
946 
947 		/* Mark the packet for Tx security offload */
948 		pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
949 
950 		/* Provide L2 len for Outbound processing */
951 		pkt->l2_len = RTE_ETHER_HDR_LEN;
952 
953 		vec->mbufs[j++] = pkt;
954 	}
955 
956 	return j;
957 }
958 
959 static void
960 ipsec_event_vector_free(struct rte_event *ev)
961 {
962 	struct rte_event_vector *vec = ev->vec;
963 
964 	if (ev->event_type == RTE_EVENT_TYPE_CRYPTODEV_VECTOR) {
965 		struct rte_crypto_op *cop;
966 		int i;
967 
968 		for (i = 0; i < vec->nb_elem; i++) {
969 			cop = vec->ptrs[i];
970 			rte_pktmbuf_free(cop->sym->m_src);
971 		}
972 	} else {
973 		rte_pktmbuf_free_bulk(vec->mbufs + vec->elem_offset, vec->nb_elem);
974 	}
975 	rte_mempool_put(rte_mempool_from_obj(vec), vec);
976 }
977 
978 static inline void
979 ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
980 			struct eh_event_link_info *links,
981 			struct rte_event *ev)
982 {
983 	struct rte_event_vector *vec = ev->vec;
984 	struct rte_mbuf *pkt;
985 	int ret;
986 
987 	pkt = vec->mbufs[0];
988 
989 	ev_vector_attr_init(vec);
990 	core_stats_update_rx(vec->nb_elem);
991 
992 	if (is_unprotected_port(pkt->port))
993 		ret = process_ipsec_ev_inbound_vector(&lconf->inbound,
994 						      &lconf->rt, vec, links);
995 	else
996 		ret = process_ipsec_ev_outbound_vector(&lconf->outbound,
997 						       &lconf->rt, vec, links);
998 
999 	if (likely(ret > 0)) {
1000 		core_stats_update_tx(vec->nb_elem);
1001 		vec->nb_elem = ret;
1002 		ret = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
1003 						       links[0].event_port_id, ev, 1, 0);
1004 		if (unlikely(ret == 0))
1005 			ipsec_event_vector_free(ev);
1006 	} else {
1007 		rte_mempool_put(rte_mempool_from_obj(vec), vec);
1008 	}
1009 }
1010 
1011 static inline void
1012 ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
1013 				 struct rte_event *ev,
1014 				 struct port_drv_mode_data *data)
1015 {
1016 	struct rte_event_vector *vec = ev->vec;
1017 	struct rte_mbuf *pkt;
1018 	uint16_t ret;
1019 
1020 	pkt = vec->mbufs[0];
1021 	vec->attr_valid = 1;
1022 	vec->port = pkt->port;
1023 
1024 	if (!is_unprotected_port(pkt->port))
1025 		vec->nb_elem = process_ipsec_ev_drv_mode_outbound_vector(vec,
1026 									 data);
1027 	if (likely(vec->nb_elem > 0)) {
1028 		ret = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
1029 						       links[0].event_port_id, ev, 1, 0);
1030 		if (unlikely(ret == 0))
1031 			ipsec_event_vector_free(ev);
1032 	} else
1033 		rte_mempool_put(rte_mempool_from_obj(vec), vec);
1034 }
1035 
1036 static inline int
1037 ipsec_ev_cryptodev_process_one_pkt(
1038 		const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
1039 		const struct rte_crypto_op *cop, struct rte_mbuf *pkt)
1040 {
1041 	struct rte_ether_hdr *ethhdr;
1042 	uint16_t port_id;
1043 	struct ip *ip;
1044 
1045 	/* If operation was not successful, free the packet */
1046 	if (unlikely(cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) {
1047 		RTE_LOG_DP(INFO, IPSEC, "Crypto operation failed\n");
1048 		free_pkts(&pkt, 1);
1049 		return -1;
1050 	}
1051 
1052 	ip = rte_pktmbuf_mtod(pkt, struct ip *);
1053 
1054 	/* Prepend Ether layer */
1055 	ethhdr = (struct rte_ether_hdr *)rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
1056 
1057 	/* Route pkt and update required fields */
1058 	if (ip->ip_v == IPVERSION) {
1059 		pkt->ol_flags |= lconf->outbound.ipv4_offloads;
1060 		pkt->l3_len = sizeof(struct ip);
1061 		pkt->l2_len = RTE_ETHER_HDR_LEN;
1062 
1063 		ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1064 
1065 		port_id = route4_pkt(pkt, lconf->rt.rt4_ctx);
1066 	} else {
1067 		pkt->ol_flags |= lconf->outbound.ipv6_offloads;
1068 		pkt->l3_len = sizeof(struct ip6_hdr);
1069 		pkt->l2_len = RTE_ETHER_HDR_LEN;
1070 
1071 		ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
1072 
1073 		port_id = route6_pkt(pkt, lconf->rt.rt6_ctx);
1074 	}
1075 
1076 	if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
1077 		RTE_LOG_DP(DEBUG, IPSEC, "Cannot route processed packet\n");
1078 		free_pkts(&pkt, 1);
1079 		return -1;
1080 	}
1081 
1082 	/* Update Ether with port's MAC addresses */
1083 	memcpy(&ethhdr->src_addr, &ethaddr_tbl[port_id].src, sizeof(struct rte_ether_addr));
1084 	memcpy(&ethhdr->dst_addr, &ethaddr_tbl[port_id].dst, sizeof(struct rte_ether_addr));
1085 
1086 	ipsec_event_pre_forward(pkt, port_id);
1087 
1088 	return 0;
1089 }
1090 
1091 static inline void
1092 ipsec_ev_cryptodev_vector_process(
1093 		const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
1094 		const struct eh_event_link_info *links,
1095 		struct rte_event *ev)
1096 {
1097 	struct rte_event_vector *vec = ev->vec;
1098 	const uint16_t nb_events = 1;
1099 	struct rte_crypto_op *cop;
1100 	struct rte_mbuf *pkt;
1101 	uint16_t enqueued;
1102 	int i, n = 0;
1103 
1104 	ev_vector_attr_init(vec);
1105 	/* Transform cop vec into pkt vec */
1106 	for (i = 0; i < vec->nb_elem; i++) {
1107 		/* Get pkt data */
1108 		cop = vec->ptrs[i];
1109 		pkt = cop->sym->m_src;
1110 		if (ipsec_ev_cryptodev_process_one_pkt(lconf, cop, pkt))
1111 			continue;
1112 
1113 		vec->mbufs[n++] = pkt;
1114 		ev_vector_attr_update(vec, pkt);
1115 	}
1116 
1117 	if (n == 0) {
1118 		rte_mempool_put(rte_mempool_from_obj(vec), vec);
1119 		return;
1120 	}
1121 
1122 	vec->nb_elem = n;
1123 	enqueued = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
1124 			links[0].event_port_id, ev, nb_events, 0);
1125 	if (enqueued != nb_events) {
1126 		RTE_LOG_DP(DEBUG, IPSEC, "Failed to enqueue to tx, ret = %u,"
1127 				" errno = %i\n", enqueued, rte_errno);
1128 		free_pkts(vec->mbufs, vec->nb_elem);
1129 		rte_mempool_put(rte_mempool_from_obj(vec), vec);
1130 	} else {
1131 		core_stats_update_tx(n);
1132 	}
1133 }
1134 
1135 static inline int
1136 ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
1137 			   struct rte_event *ev)
1138 {
1139 	struct rte_crypto_op *cop;
1140 	struct rte_mbuf *pkt;
1141 
1142 	/* Get pkt data */
1143 	cop = ev->event_ptr;
1144 	pkt = cop->sym->m_src;
1145 
1146 	if (ipsec_ev_cryptodev_process_one_pkt(lconf, cop, pkt))
1147 		return PKT_DROPPED;
1148 
1149 	/* Update event */
1150 	ev->mbuf = pkt;
1151 
1152 	return PKT_FORWARDED;
1153 }
1154 
1155 /*
1156  * Event mode exposes various operating modes depending on the
1157  * capabilities of the event device and the operating mode
1158  * selected.
1159  */
1160 
1161 static void
1162 ipsec_event_port_flush(uint8_t eventdev_id __rte_unused, struct rte_event ev,
1163 		       void *args __rte_unused)
1164 {
1165 	if (ev.event_type & RTE_EVENT_TYPE_VECTOR)
1166 		ipsec_event_vector_free(&ev);
1167 	else
1168 		rte_pktmbuf_free(ev.mbuf);
1169 }
1170 
1171 /* Workers registered */
1172 #define IPSEC_EVENTMODE_WORKERS		2
1173 
1174 static void
1175 ipsec_ip_reassembly_dyn_offset_get(void)
1176 {
1177 	/* Retrieve reassembly dynfield offset if available */
1178 	if (ip_reassembly_dynfield_offset < 0)
1179 		ip_reassembly_dynfield_offset = rte_mbuf_dynfield_lookup(
1180 				RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME, NULL);
1181 
1182 	if (ip_reassembly_dynflag == 0) {
1183 		int ip_reassembly_dynflag_offset;
1184 		ip_reassembly_dynflag_offset = rte_mbuf_dynflag_lookup(
1185 				RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME, NULL);
1186 		if (ip_reassembly_dynflag_offset >= 0)
1187 			ip_reassembly_dynflag = RTE_BIT64(ip_reassembly_dynflag_offset);
1188 	}
1189 }
1190 
1191 /*
1192  * Event mode worker
1193  * Operating parameters : non-burst - Tx internal port - driver mode
1194  */
1195 static void
1196 ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
1197 		uint8_t nb_links)
1198 {
1199 	struct port_drv_mode_data data[RTE_MAX_ETHPORTS];
1200 	unsigned int nb_rx = 0, nb_tx;
1201 	struct rte_mbuf *pkt;
1202 	struct rte_event ev;
1203 	uint32_t lcore_id;
1204 	int32_t socket_id;
1205 	int16_t port_id;
1206 
1207 	/* Check if we have links registered for this lcore */
1208 	if (nb_links == 0) {
1209 		/* No links registered - exit */
1210 		return;
1211 	}
1212 
1213 	memset(&data, 0, sizeof(struct port_drv_mode_data));
1214 
1215 	/* Get core ID */
1216 	lcore_id = rte_lcore_id();
1217 
1218 	/* Get socket ID */
1219 	socket_id = rte_lcore_to_socket_id(lcore_id);
1220 
1221 	/*
1222 	 * Prepare security sessions table. In outbound driver mode
1223 	 * we always use first session configured for a given port
1224 	 */
1225 	prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, data,
1226 				 RTE_MAX_ETHPORTS);
1227 
1228 	RTE_LOG(INFO, IPSEC,
1229 		"Launching event mode worker (non-burst - Tx internal port - "
1230 		"driver mode) on lcore %d\n", lcore_id);
1231 
1232 	/* We have valid links */
1233 
1234 	/* Check if it's single link */
1235 	if (nb_links != 1) {
1236 		RTE_LOG(INFO, IPSEC,
1237 			"Multiple links not supported. Using first link\n");
1238 	}
1239 
1240 	RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
1241 			links[0].event_port_id);
1242 	while (!force_quit) {
1243 		/* Read packet from event queues */
1244 		nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
1245 				links[0].event_port_id,
1246 				&ev,	/* events */
1247 				1,	/* nb_events */
1248 				0	/* timeout_ticks */);
1249 
1250 		if (nb_rx == 0)
1251 			continue;
1252 
1253 		switch (ev.event_type) {
1254 		case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR:
1255 		case RTE_EVENT_TYPE_ETHDEV_VECTOR:
1256 			ipsec_ev_vector_drv_mode_process(links, &ev, data);
1257 			continue;
1258 		case RTE_EVENT_TYPE_ETHDEV:
1259 			break;
1260 		default:
1261 			RTE_LOG(ERR, IPSEC, "Invalid event type %u",
1262 				ev.event_type);
1263 			continue;
1264 		}
1265 
1266 		pkt = ev.mbuf;
1267 		port_id = pkt->port;
1268 
1269 		rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
1270 
1271 		/* Process packet */
1272 		ipsec_event_pre_forward(pkt, port_id);
1273 
1274 		if (!is_unprotected_port(port_id)) {
1275 
1276 			if (unlikely(!data[port_id].sess)) {
1277 				rte_pktmbuf_free(pkt);
1278 				continue;
1279 			}
1280 
1281 			/* Save security session */
1282 			rte_security_set_pkt_metadata(data[port_id].ctx,
1283 						      data[port_id].sess, pkt,
1284 						      NULL);
1285 
1286 			/* Mark the packet for Tx security offload */
1287 			pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
1288 
1289 			/* Provide L2 len for Outbound processing */
1290 			pkt->l2_len = RTE_ETHER_HDR_LEN;
1291 		}
1292 
1293 		/*
1294 		 * Since tx internal port is available, events can be
1295 		 * directly enqueued to the adapter and it would be
1296 		 * internally submitted to the eth device.
1297 		 */
1298 		nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
1299 							 links[0].event_port_id,
1300 							 &ev, /* events */
1301 							 1,   /* nb_events */
1302 							 0 /* flags */);
1303 		if (!nb_tx)
1304 			rte_pktmbuf_free(ev.mbuf);
1305 	}
1306 
1307 	if (ev.u64) {
1308 		ev.op = RTE_EVENT_OP_RELEASE;
1309 		rte_event_enqueue_burst(links[0].eventdev_id,
1310 					links[0].event_port_id, &ev, 1);
1311 	}
1312 
1313 	rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
1314 			       ipsec_event_port_flush, NULL);
1315 }
1316 
1317 /*
1318  * Event mode worker
1319  * Operating parameters : non-burst - Tx internal port - app mode
1320  */
1321 static void
1322 ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
1323 		uint8_t nb_links)
1324 {
1325 	struct lcore_conf_ev_tx_int_port_wrkr lconf;
1326 	unsigned int nb_rx = 0, nb_tx;
1327 	struct rte_event ev;
1328 	uint32_t lcore_id;
1329 	int32_t socket_id;
1330 	int ret;
1331 
1332 	/* Check if we have links registered for this lcore */
1333 	if (nb_links == 0) {
1334 		/* No links registered - exit */
1335 		return;
1336 	}
1337 
1338 	/* We have valid links */
1339 
1340 	/* Get core ID */
1341 	lcore_id = rte_lcore_id();
1342 
1343 	/* Get socket ID */
1344 	socket_id = rte_lcore_to_socket_id(lcore_id);
1345 
1346 	/* Save routing table */
1347 	lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
1348 	lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
1349 	lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
1350 	lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
1351 	lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
1352 	lconf.inbound.lcore_id = lcore_id;
1353 	lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
1354 	lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
1355 	lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
1356 	lconf.outbound.ipv4_offloads = tx_offloads.ipv4_offloads;
1357 	lconf.outbound.ipv6_offloads = tx_offloads.ipv6_offloads;
1358 	lconf.outbound.lcore_id = lcore_id;
1359 
1360 	RTE_LOG(INFO, IPSEC,
1361 		"Launching event mode worker (non-burst - Tx internal port - "
1362 		"app mode) on lcore %d\n", lcore_id);
1363 
1364 	ret = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz);
1365 	if (ret != 0) {
1366 		RTE_LOG(ERR, IPSEC,
1367 			"SAD cache init on lcore %u, failed with code: %d\n",
1368 			lcore_id, ret);
1369 		return;
1370 	}
1371 
1372 	/* Check if it's single link */
1373 	if (nb_links != 1) {
1374 		RTE_LOG(INFO, IPSEC,
1375 			"Multiple links not supported. Using first link\n");
1376 	}
1377 
1378 	RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
1379 		links[0].event_port_id);
1380 
1381 	ipsec_ip_reassembly_dyn_offset_get();
1382 
1383 	while (!force_quit) {
1384 		/* Read packet from event queues */
1385 		nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
1386 				links[0].event_port_id,
1387 				&ev,     /* events */
1388 				1,       /* nb_events */
1389 				0        /* timeout_ticks */);
1390 
1391 		if (nb_rx == 0)
1392 			continue;
1393 
1394 		switch (ev.event_type) {
1395 		case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR:
1396 		case RTE_EVENT_TYPE_ETHDEV_VECTOR:
1397 			ipsec_ev_vector_process(&lconf, links, &ev);
1398 			continue;
1399 		case RTE_EVENT_TYPE_ETHDEV:
1400 			core_stats_update_rx(1);
1401 			if (is_unprotected_port(ev.mbuf->port))
1402 				ret = process_ipsec_ev_inbound(&lconf.inbound,
1403 								&lconf.rt, links, &ev);
1404 			else
1405 				ret = process_ipsec_ev_outbound(&lconf.outbound,
1406 								&lconf.rt, links, &ev);
1407 			if (ret != 1)
1408 				/* The pkt has been dropped or posted */
1409 				continue;
1410 			break;
1411 		case RTE_EVENT_TYPE_CRYPTODEV:
1412 			ret = ipsec_ev_cryptodev_process(&lconf, &ev);
1413 			if (unlikely(ret != PKT_FORWARDED))
1414 				continue;
1415 			break;
1416 		case RTE_EVENT_TYPE_CRYPTODEV_VECTOR:
1417 			ipsec_ev_cryptodev_vector_process(&lconf, links, &ev);
1418 			continue;
1419 		default:
1420 			RTE_LOG(ERR, IPSEC, "Invalid event type %u",
1421 				ev.event_type);
1422 			continue;
1423 		}
1424 
1425 		core_stats_update_tx(1);
1426 		/*
1427 		 * Since tx internal port is available, events can be
1428 		 * directly enqueued to the adapter and it would be
1429 		 * internally submitted to the eth device.
1430 		 */
1431 		nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
1432 							 links[0].event_port_id,
1433 							 &ev, /* events */
1434 							 1,   /* nb_events */
1435 							 0 /* flags */);
1436 		if (!nb_tx)
1437 			rte_pktmbuf_free(ev.mbuf);
1438 	}
1439 
1440 	if (ev.u64) {
1441 		ev.op = RTE_EVENT_OP_RELEASE;
1442 		rte_event_enqueue_burst(links[0].eventdev_id,
1443 					links[0].event_port_id, &ev, 1);
1444 	}
1445 
1446 	rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
1447 			       ipsec_event_port_flush, NULL);
1448 }
1449 
1450 static uint8_t
1451 ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
1452 {
1453 	struct eh_app_worker_params *wrkr;
1454 	uint8_t nb_wrkr_param = 0;
1455 
1456 	/* Save workers */
1457 	wrkr = wrkrs;
1458 
1459 	/* Non-burst - Tx internal port - driver mode */
1460 	wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
1461 	wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
1462 	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
1463 	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
1464 	wrkr++;
1465 	nb_wrkr_param++;
1466 
1467 	/* Non-burst - Tx internal port - app mode */
1468 	wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
1469 	wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
1470 	wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
1471 	wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
1472 	nb_wrkr_param++;
1473 
1474 	return nb_wrkr_param;
1475 }
1476 
1477 static void
1478 ipsec_eventmode_worker(struct eh_conf *conf)
1479 {
1480 	struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = {
1481 					{{{0} }, NULL } };
1482 	uint8_t nb_wrkr_param;
1483 
1484 	/* Populate l2fwd_wrkr params */
1485 	nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr);
1486 
1487 	/*
1488 	 * Launch correct worker after checking
1489 	 * the event device's capabilities.
1490 	 */
1491 	eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
1492 }
1493 
1494 static __rte_always_inline void
1495 outb_inl_pro_spd_process(struct sp_ctx *sp,
1496 			 struct sa_ctx *sa_ctx,
1497 			 struct traffic_type *ip,
1498 			 struct traffic_type *match,
1499 			 struct traffic_type *mismatch,
1500 			 bool match_flag,
1501 			 struct ipsec_spd_stats *stats)
1502 {
1503 	uint32_t prev_sa_idx = UINT32_MAX;
1504 	struct rte_mbuf *ipsec[MAX_PKT_BURST];
1505 	struct rte_ipsec_session *ips;
1506 	uint32_t i, j, j_mis, sa_idx;
1507 	struct ipsec_sa *sa = NULL;
1508 	uint32_t ipsec_num = 0;
1509 	struct rte_mbuf *m;
1510 	uint64_t satp;
1511 
1512 	if (ip->num == 0 || sp == NULL)
1513 		return;
1514 
1515 	rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
1516 			ip->num, DEFAULT_MAX_CATEGORIES);
1517 
1518 	j = match->num;
1519 	j_mis = mismatch->num;
1520 
1521 	for (i = 0; i < ip->num; i++) {
1522 		m = ip->pkts[i];
1523 		sa_idx = ip->res[i] - 1;
1524 
1525 		if (unlikely(ip->res[i] == DISCARD)) {
1526 			free_pkts(&m, 1);
1527 
1528 			stats->discard++;
1529 		} else if (unlikely(ip->res[i] == BYPASS)) {
1530 			match->pkts[j++] = m;
1531 
1532 			stats->bypass++;
1533 		} else {
1534 			if (prev_sa_idx == UINT32_MAX) {
1535 				prev_sa_idx = sa_idx;
1536 				sa = &sa_ctx->sa[sa_idx];
1537 				ips = ipsec_get_primary_session(sa);
1538 				satp = rte_ipsec_sa_type(ips->sa);
1539 			}
1540 
1541 			if (sa_idx != prev_sa_idx) {
1542 				prep_process_group(sa, ipsec, ipsec_num);
1543 
1544 				/* Prepare packets for outbound */
1545 				rte_ipsec_pkt_process(ips, ipsec, ipsec_num);
1546 
1547 				/* Copy to current tr or a different tr */
1548 				if (SATP_OUT_IPV4(satp) == match_flag) {
1549 					memcpy(&match->pkts[j], ipsec,
1550 					       ipsec_num * sizeof(void *));
1551 					j += ipsec_num;
1552 				} else {
1553 					memcpy(&mismatch->pkts[j_mis], ipsec,
1554 					       ipsec_num * sizeof(void *));
1555 					j_mis += ipsec_num;
1556 				}
1557 
1558 				/* Update to new SA */
1559 				sa = &sa_ctx->sa[sa_idx];
1560 				ips = ipsec_get_primary_session(sa);
1561 				satp = rte_ipsec_sa_type(ips->sa);
1562 				ipsec_num = 0;
1563 			}
1564 
1565 			ipsec[ipsec_num++] = m;
1566 			stats->protect++;
1567 		}
1568 	}
1569 
1570 	if (ipsec_num) {
1571 		prep_process_group(sa, ipsec, ipsec_num);
1572 
1573 		/* Prepare pacekts for outbound */
1574 		rte_ipsec_pkt_process(ips, ipsec, ipsec_num);
1575 
1576 		/* Copy to current tr or a different tr */
1577 		if (SATP_OUT_IPV4(satp) == match_flag) {
1578 			memcpy(&match->pkts[j], ipsec,
1579 			       ipsec_num * sizeof(void *));
1580 			j += ipsec_num;
1581 		} else {
1582 			memcpy(&mismatch->pkts[j_mis], ipsec,
1583 			       ipsec_num * sizeof(void *));
1584 			j_mis += ipsec_num;
1585 		}
1586 	}
1587 	match->num = j;
1588 	mismatch->num = j_mis;
1589 }
1590 
1591 /* Poll mode worker when all SA's are of type inline protocol */
1592 void
1593 ipsec_poll_mode_wrkr_inl_pr(void)
1594 {
1595 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
1596 			/ US_PER_S * BURST_TX_DRAIN_US;
1597 	struct sp_ctx *sp4_in, *sp6_in, *sp4_out, *sp6_out;
1598 	struct rte_mbuf *pkts[MAX_PKT_BURST];
1599 	uint64_t prev_tsc, diff_tsc, cur_tsc;
1600 	struct ipsec_core_statistics *stats;
1601 	struct rt_ctx *rt4_ctx, *rt6_ctx;
1602 	struct sa_ctx *sa_in, *sa_out;
1603 	struct traffic_type ip4, ip6;
1604 	struct lcore_rx_queue *rxql;
1605 	struct rte_mbuf **v4, **v6;
1606 	struct ipsec_traffic trf;
1607 	struct lcore_conf *qconf;
1608 	uint16_t v4_num, v6_num;
1609 	int32_t socket_id;
1610 	uint32_t lcore_id;
1611 	int32_t i, nb_rx;
1612 	uint16_t portid, queueid;
1613 
1614 	prev_tsc = 0;
1615 	lcore_id = rte_lcore_id();
1616 	qconf = &lcore_conf[lcore_id];
1617 	rxql = qconf->rx_queue_list;
1618 	socket_id = rte_lcore_to_socket_id(lcore_id);
1619 	stats = &core_statistics[lcore_id];
1620 
1621 	rt4_ctx = socket_ctx[socket_id].rt_ip4;
1622 	rt6_ctx = socket_ctx[socket_id].rt_ip6;
1623 
1624 	sp4_in = socket_ctx[socket_id].sp_ip4_in;
1625 	sp6_in = socket_ctx[socket_id].sp_ip6_in;
1626 	sa_in = socket_ctx[socket_id].sa_in;
1627 
1628 	sp4_out = socket_ctx[socket_id].sp_ip4_out;
1629 	sp6_out = socket_ctx[socket_id].sp_ip6_out;
1630 	sa_out = socket_ctx[socket_id].sa_out;
1631 
1632 	qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
1633 
1634 	if (qconf->nb_rx_queue == 0) {
1635 		RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
1636 			lcore_id);
1637 		return;
1638 	}
1639 
1640 	RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
1641 
1642 	for (i = 0; i < qconf->nb_rx_queue; i++) {
1643 		portid = rxql[i].port_id;
1644 		queueid = rxql[i].queue_id;
1645 		RTE_LOG(INFO, IPSEC,
1646 			" -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n",
1647 			lcore_id, portid, queueid);
1648 	}
1649 
1650 	ipsec_ip_reassembly_dyn_offset_get();
1651 
1652 	while (!force_quit) {
1653 		cur_tsc = rte_rdtsc();
1654 
1655 		/* TX queue buffer drain */
1656 		diff_tsc = cur_tsc - prev_tsc;
1657 
1658 		if (unlikely(diff_tsc > drain_tsc)) {
1659 			drain_tx_buffers(qconf);
1660 			prev_tsc = cur_tsc;
1661 		}
1662 
1663 		for (i = 0; i < qconf->nb_rx_queue; ++i) {
1664 			/* Read packets from RX queues */
1665 			portid = rxql[i].port_id;
1666 			queueid = rxql[i].queue_id;
1667 			nb_rx = rte_eth_rx_burst(portid, queueid,
1668 					pkts, MAX_PKT_BURST);
1669 
1670 			if (nb_rx <= 0)
1671 				continue;
1672 
1673 			core_stats_update_rx(nb_rx);
1674 
1675 			prepare_traffic(rxql[i].sec_ctx, pkts, &trf, nb_rx);
1676 
1677 			/* Drop any IPsec traffic */
1678 			free_pkts(trf.ipsec.pkts, trf.ipsec.num);
1679 
1680 			if (is_unprotected_port(portid)) {
1681 				inbound_sp_sa(sp4_in, sa_in, &trf.ip4,
1682 					      trf.ip4.num,
1683 					      &stats->inbound.spd4);
1684 
1685 				inbound_sp_sa(sp6_in, sa_in, &trf.ip6,
1686 					      trf.ip6.num,
1687 					      &stats->inbound.spd6);
1688 
1689 				v4 = trf.ip4.pkts;
1690 				v4_num = trf.ip4.num;
1691 				v6 = trf.ip6.pkts;
1692 				v6_num = trf.ip6.num;
1693 			} else {
1694 				ip4.num = 0;
1695 				ip6.num = 0;
1696 
1697 				outb_inl_pro_spd_process(sp4_out, sa_out,
1698 							 &trf.ip4, &ip4, &ip6,
1699 							 true,
1700 							 &stats->outbound.spd4);
1701 
1702 				outb_inl_pro_spd_process(sp6_out, sa_out,
1703 							 &trf.ip6, &ip6, &ip4,
1704 							 false,
1705 							 &stats->outbound.spd6);
1706 				v4 = ip4.pkts;
1707 				v4_num = ip4.num;
1708 				v6 = ip6.pkts;
1709 				v6_num = ip6.num;
1710 			}
1711 
1712 #if defined __ARM_NEON
1713 			route4_pkts_neon(rt4_ctx, v4, v4_num, 0, false);
1714 			route6_pkts_neon(rt6_ctx, v6, v6_num);
1715 #else
1716 			route4_pkts(rt4_ctx, v4, v4_num, 0, false);
1717 			route6_pkts(rt6_ctx, v6, v6_num);
1718 #endif
1719 		}
1720 	}
1721 }
1722 
1723 /* Poll mode worker when all SA's are of type inline protocol
1724  * and single sa mode is enabled.
1725  */
1726 void
1727 ipsec_poll_mode_wrkr_inl_pr_ss(void)
1728 {
1729 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
1730 			/ US_PER_S * BURST_TX_DRAIN_US;
1731 	uint16_t sa_out_portid = 0, sa_out_proto = 0;
1732 	struct rte_mbuf *pkts[MAX_PKT_BURST], *pkt;
1733 	uint64_t prev_tsc, diff_tsc, cur_tsc;
1734 	struct rte_ipsec_session *ips = NULL;
1735 	struct lcore_rx_queue *rxql;
1736 	struct ipsec_sa *sa = NULL;
1737 	struct lcore_conf *qconf;
1738 	struct sa_ctx *sa_out;
1739 	uint32_t i, nb_rx, j;
1740 	int32_t socket_id;
1741 	uint32_t lcore_id;
1742 	uint16_t portid, queueid;
1743 
1744 	prev_tsc = 0;
1745 	lcore_id = rte_lcore_id();
1746 	qconf = &lcore_conf[lcore_id];
1747 	rxql = qconf->rx_queue_list;
1748 	socket_id = rte_lcore_to_socket_id(lcore_id);
1749 
1750 	/* Get SA info */
1751 	sa_out = socket_ctx[socket_id].sa_out;
1752 	if (sa_out && single_sa_idx < sa_out->nb_sa) {
1753 		sa = &sa_out->sa[single_sa_idx];
1754 		ips = ipsec_get_primary_session(sa);
1755 		sa_out_portid = sa->portid;
1756 		if (sa->flags & IP6_TUNNEL)
1757 			sa_out_proto = IPPROTO_IPV6;
1758 		else
1759 			sa_out_proto = IPPROTO_IP;
1760 	}
1761 
1762 	qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
1763 
1764 	if (qconf->nb_rx_queue == 0) {
1765 		RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
1766 			lcore_id);
1767 		return;
1768 	}
1769 
1770 	RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
1771 
1772 	for (i = 0; i < qconf->nb_rx_queue; i++) {
1773 		portid = rxql[i].port_id;
1774 		queueid = rxql[i].queue_id;
1775 		RTE_LOG(INFO, IPSEC,
1776 			" -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n",
1777 			lcore_id, portid, queueid);
1778 	}
1779 
1780 	while (!force_quit) {
1781 		cur_tsc = rte_rdtsc();
1782 
1783 		/* TX queue buffer drain */
1784 		diff_tsc = cur_tsc - prev_tsc;
1785 
1786 		if (unlikely(diff_tsc > drain_tsc)) {
1787 			drain_tx_buffers(qconf);
1788 			prev_tsc = cur_tsc;
1789 		}
1790 
1791 		for (i = 0; i < qconf->nb_rx_queue; ++i) {
1792 			/* Read packets from RX queues */
1793 			portid = rxql[i].port_id;
1794 			queueid = rxql[i].queue_id;
1795 			nb_rx = rte_eth_rx_burst(portid, queueid,
1796 						 pkts, MAX_PKT_BURST);
1797 
1798 			if (nb_rx <= 0)
1799 				continue;
1800 
1801 			core_stats_update_rx(nb_rx);
1802 
1803 			if (is_unprotected_port(portid)) {
1804 				/* Nothing much to do for inbound inline
1805 				 * decrypted traffic.
1806 				 */
1807 				for (j = 0; j < nb_rx; j++) {
1808 					uint32_t ptype, proto;
1809 
1810 					pkt = pkts[j];
1811 					ptype = pkt->packet_type &
1812 						RTE_PTYPE_L3_MASK;
1813 					if (ptype == RTE_PTYPE_L3_IPV4)
1814 						proto = IPPROTO_IP;
1815 					else
1816 						proto = IPPROTO_IPV6;
1817 
1818 					send_single_packet(pkt, portid, proto);
1819 				}
1820 
1821 				continue;
1822 			}
1823 
1824 			/* Free packets if there are no outbound sessions */
1825 			if (unlikely(!ips)) {
1826 				rte_pktmbuf_free_bulk(pkts, nb_rx);
1827 				continue;
1828 			}
1829 
1830 			rte_ipsec_pkt_process(ips, pkts, nb_rx);
1831 
1832 			/* Send pkts out */
1833 			for (j = 0; j < nb_rx; j++) {
1834 				pkt = pkts[j];
1835 
1836 				pkt->l2_len = RTE_ETHER_HDR_LEN;
1837 				send_single_packet(pkt, sa_out_portid,
1838 						   sa_out_proto);
1839 			}
1840 		}
1841 	}
1842 }
1843 
1844 static void
1845 ipsec_poll_mode_wrkr_launch(void)
1846 {
1847 	static ipsec_worker_fn_t poll_mode_wrkrs[MAX_F] = {
1848 		[INL_PR_F]        = ipsec_poll_mode_wrkr_inl_pr,
1849 		[INL_PR_F | SS_F] = ipsec_poll_mode_wrkr_inl_pr_ss,
1850 	};
1851 	ipsec_worker_fn_t fn;
1852 
1853 	if (!app_sa_prm.enable) {
1854 		fn = ipsec_poll_mode_worker;
1855 	} else {
1856 		fn = poll_mode_wrkrs[wrkr_flags];
1857 
1858 		/* Always default to all mode worker */
1859 		if (!fn)
1860 			fn = ipsec_poll_mode_worker;
1861 	}
1862 
1863 	/* Launch worker */
1864 	(*fn)();
1865 }
1866 
1867 int ipsec_launch_one_lcore(void *args)
1868 {
1869 	struct eh_conf *conf;
1870 
1871 	conf = (struct eh_conf *)args;
1872 
1873 	if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
1874 		/* Run in poll mode */
1875 		ipsec_poll_mode_wrkr_launch();
1876 	} else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
1877 		/* Run in event mode */
1878 		ipsec_eventmode_worker(conf);
1879 	}
1880 	return 0;
1881 }
1882