xref: /dpdk/examples/ipsec-secgw/ipsec_worker.h (revision e1a06e391ba74f9c4d46a6ecef6d8ee084f4229e)
19ad50c29SLukasz Bartosik /* SPDX-License-Identifier: BSD-3-Clause
29ad50c29SLukasz Bartosik  * Copyright (C) 2020 Marvell International Ltd.
39ad50c29SLukasz Bartosik  */
49ad50c29SLukasz Bartosik #ifndef _IPSEC_WORKER_H_
59ad50c29SLukasz Bartosik #define _IPSEC_WORKER_H_
69ad50c29SLukasz Bartosik 
7dcbf9ad5SNithin Dabilpuram #include <rte_acl.h>
8dcbf9ad5SNithin Dabilpuram #include <rte_ethdev.h>
9dcbf9ad5SNithin Dabilpuram #include <rte_lpm.h>
10dcbf9ad5SNithin Dabilpuram #include <rte_lpm6.h>
11dcbf9ad5SNithin Dabilpuram 
129ad50c29SLukasz Bartosik #include "ipsec.h"
139ad50c29SLukasz Bartosik 
14dcbf9ad5SNithin Dabilpuram /* Configure how many packets ahead to prefetch, when reading packets */
15dcbf9ad5SNithin Dabilpuram #define PREFETCH_OFFSET	3
160d76e22dSNithin Dabilpuram #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
170d76e22dSNithin Dabilpuram 
189ad50c29SLukasz Bartosik enum pkt_type {
199ad50c29SLukasz Bartosik 	PKT_TYPE_PLAIN_IPV4 = 1,
209ad50c29SLukasz Bartosik 	PKT_TYPE_IPSEC_IPV4,
219ad50c29SLukasz Bartosik 	PKT_TYPE_PLAIN_IPV6,
229ad50c29SLukasz Bartosik 	PKT_TYPE_IPSEC_IPV6,
239ad50c29SLukasz Bartosik 	PKT_TYPE_INVALID
249ad50c29SLukasz Bartosik };
259ad50c29SLukasz Bartosik 
269ad50c29SLukasz Bartosik enum {
279ad50c29SLukasz Bartosik 	PKT_DROPPED = 0,
289ad50c29SLukasz Bartosik 	PKT_FORWARDED,
299ad50c29SLukasz Bartosik 	PKT_POSTED	/* for lookaside case */
309ad50c29SLukasz Bartosik };
319ad50c29SLukasz Bartosik 
329ad50c29SLukasz Bartosik struct route_table {
339ad50c29SLukasz Bartosik 	struct rt_ctx *rt4_ctx;
349ad50c29SLukasz Bartosik 	struct rt_ctx *rt6_ctx;
359ad50c29SLukasz Bartosik };
369ad50c29SLukasz Bartosik 
379ad50c29SLukasz Bartosik /*
389ad50c29SLukasz Bartosik  * Conf required by event mode worker with tx internal port
399ad50c29SLukasz Bartosik  */
407e06c0deSTyler Retzlaff struct __rte_cache_aligned lcore_conf_ev_tx_int_port_wrkr {
419ad50c29SLukasz Bartosik 	struct ipsec_ctx inbound;
429ad50c29SLukasz Bartosik 	struct ipsec_ctx outbound;
439ad50c29SLukasz Bartosik 	struct route_table rt;
447e06c0deSTyler Retzlaff };
459ad50c29SLukasz Bartosik 
469ad50c29SLukasz Bartosik void ipsec_poll_mode_worker(void);
470d76e22dSNithin Dabilpuram void ipsec_poll_mode_wrkr_inl_pr(void);
480d76e22dSNithin Dabilpuram void ipsec_poll_mode_wrkr_inl_pr_ss(void);
499ad50c29SLukasz Bartosik 
509ad50c29SLukasz Bartosik int ipsec_launch_one_lcore(void *args);
519ad50c29SLukasz Bartosik 
52dcbf9ad5SNithin Dabilpuram /*
53dcbf9ad5SNithin Dabilpuram  * helper routine for inline and cpu(synchronous) processing
54dcbf9ad5SNithin Dabilpuram  * this is just to satisfy inbound_sa_check() and get_hop_for_offload_pkt().
55dcbf9ad5SNithin Dabilpuram  * Should be removed in future.
56dcbf9ad5SNithin Dabilpuram  */
57dcbf9ad5SNithin Dabilpuram static inline void
58dcbf9ad5SNithin Dabilpuram prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt)
59dcbf9ad5SNithin Dabilpuram {
60dcbf9ad5SNithin Dabilpuram 	uint32_t j;
61dcbf9ad5SNithin Dabilpuram 	struct ipsec_mbuf_metadata *priv;
62dcbf9ad5SNithin Dabilpuram 
63dcbf9ad5SNithin Dabilpuram 	for (j = 0; j != cnt; j++) {
64dcbf9ad5SNithin Dabilpuram 		priv = get_priv(mb[j]);
65dcbf9ad5SNithin Dabilpuram 		priv->sa = sa;
66dcbf9ad5SNithin Dabilpuram 		/* setup TSO related fields if TSO enabled*/
67dcbf9ad5SNithin Dabilpuram 		if (priv->sa->mss) {
68dcbf9ad5SNithin Dabilpuram 			uint32_t ptype = mb[j]->packet_type;
69dcbf9ad5SNithin Dabilpuram 			/* only TCP is supported */
70dcbf9ad5SNithin Dabilpuram 			if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
71dcbf9ad5SNithin Dabilpuram 				mb[j]->tso_segsz = priv->sa->mss;
72dcbf9ad5SNithin Dabilpuram 				if ((IS_TUNNEL(priv->sa->flags))) {
73dcbf9ad5SNithin Dabilpuram 					mb[j]->outer_l3_len = mb[j]->l3_len;
74dcbf9ad5SNithin Dabilpuram 					mb[j]->outer_l2_len = mb[j]->l2_len;
75dcbf9ad5SNithin Dabilpuram 					mb[j]->ol_flags |=
76dcbf9ad5SNithin Dabilpuram 						RTE_MBUF_F_TX_TUNNEL_ESP;
77dcbf9ad5SNithin Dabilpuram 					if (RTE_ETH_IS_IPV4_HDR(ptype))
78dcbf9ad5SNithin Dabilpuram 						mb[j]->ol_flags |=
79dcbf9ad5SNithin Dabilpuram 						RTE_MBUF_F_TX_OUTER_IP_CKSUM;
80dcbf9ad5SNithin Dabilpuram 				}
81dcbf9ad5SNithin Dabilpuram 				mb[j]->l4_len = sizeof(struct rte_tcp_hdr);
82dcbf9ad5SNithin Dabilpuram 				mb[j]->ol_flags |= (RTE_MBUF_F_TX_TCP_SEG |
83dcbf9ad5SNithin Dabilpuram 						RTE_MBUF_F_TX_TCP_CKSUM);
84dcbf9ad5SNithin Dabilpuram 				if (RTE_ETH_IS_IPV4_HDR(ptype))
85dcbf9ad5SNithin Dabilpuram 					mb[j]->ol_flags |=
86dcbf9ad5SNithin Dabilpuram 						RTE_MBUF_F_TX_OUTER_IPV4;
87dcbf9ad5SNithin Dabilpuram 				else
88dcbf9ad5SNithin Dabilpuram 					mb[j]->ol_flags |=
89dcbf9ad5SNithin Dabilpuram 						RTE_MBUF_F_TX_OUTER_IPV6;
90dcbf9ad5SNithin Dabilpuram 			}
91dcbf9ad5SNithin Dabilpuram 		}
92dcbf9ad5SNithin Dabilpuram 	}
93dcbf9ad5SNithin Dabilpuram }
94dcbf9ad5SNithin Dabilpuram 
95c7e6d808SNithin Dabilpuram static __rte_always_inline void
96dcbf9ad5SNithin Dabilpuram adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
97dcbf9ad5SNithin Dabilpuram 	uint32_t l2_len)
98dcbf9ad5SNithin Dabilpuram {
99dcbf9ad5SNithin Dabilpuram 	uint32_t plen, trim;
100dcbf9ad5SNithin Dabilpuram 
101dcbf9ad5SNithin Dabilpuram 	plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
102dcbf9ad5SNithin Dabilpuram 	if (plen < m->pkt_len) {
103dcbf9ad5SNithin Dabilpuram 		trim = m->pkt_len - plen;
104dcbf9ad5SNithin Dabilpuram 		rte_pktmbuf_trim(m, trim);
105dcbf9ad5SNithin Dabilpuram 	}
106dcbf9ad5SNithin Dabilpuram }
107dcbf9ad5SNithin Dabilpuram 
108c7e6d808SNithin Dabilpuram static __rte_always_inline void
109dcbf9ad5SNithin Dabilpuram adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
110dcbf9ad5SNithin Dabilpuram 	uint32_t l2_len)
111dcbf9ad5SNithin Dabilpuram {
112dcbf9ad5SNithin Dabilpuram 	uint32_t plen, trim;
113dcbf9ad5SNithin Dabilpuram 
114dcbf9ad5SNithin Dabilpuram 	plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
115dcbf9ad5SNithin Dabilpuram 	if (plen < m->pkt_len) {
116dcbf9ad5SNithin Dabilpuram 		trim = m->pkt_len - plen;
117dcbf9ad5SNithin Dabilpuram 		rte_pktmbuf_trim(m, trim);
118dcbf9ad5SNithin Dabilpuram 	}
119dcbf9ad5SNithin Dabilpuram }
120dcbf9ad5SNithin Dabilpuram 
121c7e6d808SNithin Dabilpuram static __rte_always_inline void
12279bdb787SAkhil Goyal prepare_one_packet(void *ctx, struct rte_mbuf *pkt,
123c7e6d808SNithin Dabilpuram 		   struct ipsec_traffic *t)
124dcbf9ad5SNithin Dabilpuram {
125d04bb1c5SNithin Dabilpuram 	uint32_t ptype = pkt->packet_type;
126dcbf9ad5SNithin Dabilpuram 	const struct rte_ipv4_hdr *iph4;
127dcbf9ad5SNithin Dabilpuram 	const struct rte_ipv6_hdr *iph6;
128d04bb1c5SNithin Dabilpuram 	uint32_t tun_type, l3_type;
129d04bb1c5SNithin Dabilpuram 	uint64_t tx_offload;
130d04bb1c5SNithin Dabilpuram 	uint16_t l3len;
131d04bb1c5SNithin Dabilpuram 
132d8d51d4fSRahul Bhansali 	if (is_ip_reassembly_incomplete(pkt) > 0) {
133d8d51d4fSRahul Bhansali 		free_reassembly_fail_pkt(pkt);
134d8d51d4fSRahul Bhansali 		return;
135d8d51d4fSRahul Bhansali 	}
136d8d51d4fSRahul Bhansali 
137d04bb1c5SNithin Dabilpuram 	tun_type = ptype & RTE_PTYPE_TUNNEL_MASK;
138d04bb1c5SNithin Dabilpuram 	l3_type = ptype & RTE_PTYPE_L3_MASK;
139dcbf9ad5SNithin Dabilpuram 
140d04bb1c5SNithin Dabilpuram 	if (RTE_ETH_IS_IPV4_HDR(l3_type)) {
141dcbf9ad5SNithin Dabilpuram 		iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
142dcbf9ad5SNithin Dabilpuram 			RTE_ETHER_HDR_LEN);
143dcbf9ad5SNithin Dabilpuram 		adjust_ipv4_pktlen(pkt, iph4, 0);
144dcbf9ad5SNithin Dabilpuram 
145d04bb1c5SNithin Dabilpuram 		if (tun_type == RTE_PTYPE_TUNNEL_ESP) {
146dcbf9ad5SNithin Dabilpuram 			t->ipsec.pkts[(t->ipsec.num)++] = pkt;
147d04bb1c5SNithin Dabilpuram 		} else {
148dcbf9ad5SNithin Dabilpuram 			t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
149dcbf9ad5SNithin Dabilpuram 			t->ip4.pkts[(t->ip4.num)++] = pkt;
150dcbf9ad5SNithin Dabilpuram 		}
151d04bb1c5SNithin Dabilpuram 		tx_offload = sizeof(*iph4) << RTE_MBUF_L2_LEN_BITS;
152d04bb1c5SNithin Dabilpuram 	} else if (RTE_ETH_IS_IPV6_HDR(l3_type)) {
153dcbf9ad5SNithin Dabilpuram 		int next_proto;
154d04bb1c5SNithin Dabilpuram 		size_t ext_len;
155dcbf9ad5SNithin Dabilpuram 		uint8_t *p;
156dcbf9ad5SNithin Dabilpuram 
157dcbf9ad5SNithin Dabilpuram 		/* get protocol type */
158dcbf9ad5SNithin Dabilpuram 		iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt,
159dcbf9ad5SNithin Dabilpuram 			RTE_ETHER_HDR_LEN);
160dcbf9ad5SNithin Dabilpuram 		adjust_ipv6_pktlen(pkt, iph6, 0);
161dcbf9ad5SNithin Dabilpuram 
162dcbf9ad5SNithin Dabilpuram 		l3len = sizeof(struct ip6_hdr);
163d04bb1c5SNithin Dabilpuram 
164d04bb1c5SNithin Dabilpuram 		if (tun_type == RTE_PTYPE_TUNNEL_ESP) {
165d04bb1c5SNithin Dabilpuram 			t->ipsec.pkts[(t->ipsec.num)++] = pkt;
166d04bb1c5SNithin Dabilpuram 		} else {
167d04bb1c5SNithin Dabilpuram 			t->ip6.data[t->ip6.num] = &iph6->proto;
168d04bb1c5SNithin Dabilpuram 			t->ip6.pkts[(t->ip6.num)++] = pkt;
169d04bb1c5SNithin Dabilpuram 		}
170d04bb1c5SNithin Dabilpuram 
171d04bb1c5SNithin Dabilpuram 		/* Determine l3 header size up to ESP extension by walking
172d04bb1c5SNithin Dabilpuram 		 * through extension headers.
173d04bb1c5SNithin Dabilpuram 		 */
174d04bb1c5SNithin Dabilpuram 		if (l3_type == RTE_PTYPE_L3_IPV6_EXT ||
175d04bb1c5SNithin Dabilpuram 		     l3_type == RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) {
176dcbf9ad5SNithin Dabilpuram 			p = rte_pktmbuf_mtod(pkt, uint8_t *);
177d04bb1c5SNithin Dabilpuram 			next_proto = iph6->proto;
178d04bb1c5SNithin Dabilpuram 			while (next_proto != IPPROTO_ESP &&
179d04bb1c5SNithin Dabilpuram 			       l3len < pkt->data_len &&
180dcbf9ad5SNithin Dabilpuram 			       (next_proto = rte_ipv6_get_next_ext(p + l3len,
181dcbf9ad5SNithin Dabilpuram 						next_proto, &ext_len)) >= 0)
182dcbf9ad5SNithin Dabilpuram 				l3len += ext_len;
183dcbf9ad5SNithin Dabilpuram 
184d04bb1c5SNithin Dabilpuram 			/* Drop pkt when IPv6 header exceeds first seg size */
185dcbf9ad5SNithin Dabilpuram 			if (unlikely(l3len > pkt->data_len)) {
186dcbf9ad5SNithin Dabilpuram 				free_pkts(&pkt, 1);
187dcbf9ad5SNithin Dabilpuram 				return;
188dcbf9ad5SNithin Dabilpuram 			}
189dcbf9ad5SNithin Dabilpuram 		}
190d04bb1c5SNithin Dabilpuram 		tx_offload = l3len << RTE_MBUF_L2_LEN_BITS;
191dcbf9ad5SNithin Dabilpuram 	} else {
192dcbf9ad5SNithin Dabilpuram 		/* Unknown/Unsupported type, drop the packet */
193aabf7ec4SNithin Dabilpuram 		RTE_LOG_DP(DEBUG, IPSEC, "Unsupported packet type 0x%x\n", ptype);
194dcbf9ad5SNithin Dabilpuram 		free_pkts(&pkt, 1);
195dcbf9ad5SNithin Dabilpuram 		return;
196dcbf9ad5SNithin Dabilpuram 	}
197dcbf9ad5SNithin Dabilpuram 
198d04bb1c5SNithin Dabilpuram 	if  ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP)
199d04bb1c5SNithin Dabilpuram 		tx_offload |= (sizeof(struct rte_tcp_hdr) <<
200d04bb1c5SNithin Dabilpuram 			       (RTE_MBUF_L2_LEN_BITS + RTE_MBUF_L3_LEN_BITS));
201d04bb1c5SNithin Dabilpuram 	else if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
202d04bb1c5SNithin Dabilpuram 		tx_offload |= (sizeof(struct rte_udp_hdr) <<
203d04bb1c5SNithin Dabilpuram 			       (RTE_MBUF_L2_LEN_BITS + RTE_MBUF_L3_LEN_BITS));
204d04bb1c5SNithin Dabilpuram 	pkt->tx_offload = tx_offload;
205d04bb1c5SNithin Dabilpuram 
206dcbf9ad5SNithin Dabilpuram 	/* Check if the packet has been processed inline. For inline protocol
207dcbf9ad5SNithin Dabilpuram 	 * processed packets, the metadata in the mbuf can be used to identify
208dcbf9ad5SNithin Dabilpuram 	 * the security processing done on the packet. The metadata will be
209dcbf9ad5SNithin Dabilpuram 	 * used to retrieve the application registered userdata associated
210dcbf9ad5SNithin Dabilpuram 	 * with the security session.
211dcbf9ad5SNithin Dabilpuram 	 */
212dcbf9ad5SNithin Dabilpuram 
213c7e6d808SNithin Dabilpuram 	if (ctx && pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
214dcbf9ad5SNithin Dabilpuram 		struct ipsec_sa *sa;
215dcbf9ad5SNithin Dabilpuram 		struct ipsec_mbuf_metadata *priv;
216dcbf9ad5SNithin Dabilpuram 
21768d25915SSrujana Challa 		sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
218dcbf9ad5SNithin Dabilpuram 		if (sa == NULL) {
219dcbf9ad5SNithin Dabilpuram 			/* userdata could not be retrieved */
220dcbf9ad5SNithin Dabilpuram 			return;
221dcbf9ad5SNithin Dabilpuram 		}
222dcbf9ad5SNithin Dabilpuram 
223dcbf9ad5SNithin Dabilpuram 		/* Save SA as priv member in mbuf. This will be used in the
224dcbf9ad5SNithin Dabilpuram 		 * IPsec selector(SP-SA) check.
225dcbf9ad5SNithin Dabilpuram 		 */
226dcbf9ad5SNithin Dabilpuram 
227dcbf9ad5SNithin Dabilpuram 		priv = get_priv(pkt);
228dcbf9ad5SNithin Dabilpuram 		priv->sa = sa;
229dcbf9ad5SNithin Dabilpuram 	}
230dcbf9ad5SNithin Dabilpuram }
231dcbf9ad5SNithin Dabilpuram 
232c7e6d808SNithin Dabilpuram static __rte_always_inline void
23379bdb787SAkhil Goyal prepare_traffic(void *ctx, struct rte_mbuf **pkts,
234c7e6d808SNithin Dabilpuram 		struct ipsec_traffic *t, uint16_t nb_pkts)
235dcbf9ad5SNithin Dabilpuram {
236dcbf9ad5SNithin Dabilpuram 	int32_t i;
237dcbf9ad5SNithin Dabilpuram 
238dcbf9ad5SNithin Dabilpuram 	t->ipsec.num = 0;
239dcbf9ad5SNithin Dabilpuram 	t->ip4.num = 0;
240dcbf9ad5SNithin Dabilpuram 	t->ip6.num = 0;
241dcbf9ad5SNithin Dabilpuram 
242dcbf9ad5SNithin Dabilpuram 	for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
243dcbf9ad5SNithin Dabilpuram 		rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
244dcbf9ad5SNithin Dabilpuram 					void *));
245c7e6d808SNithin Dabilpuram 		prepare_one_packet(ctx, pkts[i], t);
246dcbf9ad5SNithin Dabilpuram 	}
247dcbf9ad5SNithin Dabilpuram 	/* Process left packets */
248dcbf9ad5SNithin Dabilpuram 	for (; i < nb_pkts; i++)
249c7e6d808SNithin Dabilpuram 		prepare_one_packet(ctx, pkts[i], t);
250dcbf9ad5SNithin Dabilpuram }
251dcbf9ad5SNithin Dabilpuram 
252dcbf9ad5SNithin Dabilpuram /* Send burst of packets on an output interface */
253c7e6d808SNithin Dabilpuram static __rte_always_inline int32_t
254dcbf9ad5SNithin Dabilpuram send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
255dcbf9ad5SNithin Dabilpuram {
256dcbf9ad5SNithin Dabilpuram 	struct rte_mbuf **m_table;
257dcbf9ad5SNithin Dabilpuram 	int32_t ret;
258dcbf9ad5SNithin Dabilpuram 	uint16_t queueid;
259dcbf9ad5SNithin Dabilpuram 
260dcbf9ad5SNithin Dabilpuram 	queueid = qconf->tx_queue_id[port];
261dcbf9ad5SNithin Dabilpuram 	m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
262dcbf9ad5SNithin Dabilpuram 
263dcbf9ad5SNithin Dabilpuram 	ret = rte_eth_tx_burst(port, queueid, m_table, n);
264dcbf9ad5SNithin Dabilpuram 
265dcbf9ad5SNithin Dabilpuram 	core_stats_update_tx(ret);
266dcbf9ad5SNithin Dabilpuram 
267dcbf9ad5SNithin Dabilpuram 	if (unlikely(ret < n)) {
268dcbf9ad5SNithin Dabilpuram 		do {
269dcbf9ad5SNithin Dabilpuram 			free_pkts(&m_table[ret], 1);
270dcbf9ad5SNithin Dabilpuram 		} while (++ret < n);
271dcbf9ad5SNithin Dabilpuram 	}
272dcbf9ad5SNithin Dabilpuram 
273dcbf9ad5SNithin Dabilpuram 	return 0;
274dcbf9ad5SNithin Dabilpuram }
275dcbf9ad5SNithin Dabilpuram 
276dcbf9ad5SNithin Dabilpuram /*
277dcbf9ad5SNithin Dabilpuram  * Helper function to fragment and queue for TX one packet.
278dcbf9ad5SNithin Dabilpuram  */
279c7e6d808SNithin Dabilpuram static __rte_always_inline uint32_t
280dcbf9ad5SNithin Dabilpuram send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
281dcbf9ad5SNithin Dabilpuram 	uint16_t port, uint8_t proto)
282dcbf9ad5SNithin Dabilpuram {
2834fbfa6c7SNithin Dabilpuram 	struct rte_ether_hdr *ethhdr;
2844fbfa6c7SNithin Dabilpuram 	struct rte_ipv4_hdr *ip;
2854fbfa6c7SNithin Dabilpuram 	struct rte_mbuf *pkt;
286dcbf9ad5SNithin Dabilpuram 	struct buffer *tbl;
2874fbfa6c7SNithin Dabilpuram 	uint32_t len, n, i;
288dcbf9ad5SNithin Dabilpuram 	int32_t rc;
289dcbf9ad5SNithin Dabilpuram 
290dcbf9ad5SNithin Dabilpuram 	tbl =  qconf->tx_mbufs + port;
291dcbf9ad5SNithin Dabilpuram 	len = tbl->len;
292dcbf9ad5SNithin Dabilpuram 
293dcbf9ad5SNithin Dabilpuram 	/* free space for new fragments */
294dcbf9ad5SNithin Dabilpuram 	if (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >=  RTE_DIM(tbl->m_table)) {
295dcbf9ad5SNithin Dabilpuram 		send_burst(qconf, len, port);
296dcbf9ad5SNithin Dabilpuram 		len = 0;
297dcbf9ad5SNithin Dabilpuram 	}
298dcbf9ad5SNithin Dabilpuram 
299dcbf9ad5SNithin Dabilpuram 	n = RTE_DIM(tbl->m_table) - len;
300dcbf9ad5SNithin Dabilpuram 
3014fbfa6c7SNithin Dabilpuram 	/* Strip the ethernet header that was prepended earlier */
3024fbfa6c7SNithin Dabilpuram 	rte_pktmbuf_adj(m, RTE_ETHER_HDR_LEN);
3034fbfa6c7SNithin Dabilpuram 
304dcbf9ad5SNithin Dabilpuram 	if (proto == IPPROTO_IP)
305dcbf9ad5SNithin Dabilpuram 		rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
306dcbf9ad5SNithin Dabilpuram 			n, mtu_size, m->pool, qconf->frag.pool_indir);
307dcbf9ad5SNithin Dabilpuram 	else
308dcbf9ad5SNithin Dabilpuram 		rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
309dcbf9ad5SNithin Dabilpuram 			n, mtu_size, m->pool, qconf->frag.pool_indir);
310dcbf9ad5SNithin Dabilpuram 
3114fbfa6c7SNithin Dabilpuram 	if (rc < 0) {
312dcbf9ad5SNithin Dabilpuram 		RTE_LOG(ERR, IPSEC,
313dcbf9ad5SNithin Dabilpuram 			"%s: failed to fragment packet with size %u, "
314dcbf9ad5SNithin Dabilpuram 			"error code: %d\n",
315dcbf9ad5SNithin Dabilpuram 			__func__, m->pkt_len, rte_errno);
3164fbfa6c7SNithin Dabilpuram 		rc = 0;
3174fbfa6c7SNithin Dabilpuram 	}
3184fbfa6c7SNithin Dabilpuram 
3194fbfa6c7SNithin Dabilpuram 	i = len;
3204fbfa6c7SNithin Dabilpuram 	len += rc;
3214fbfa6c7SNithin Dabilpuram 	for (; i < len; i++) {
3224fbfa6c7SNithin Dabilpuram 		pkt = tbl->m_table[i];
3234fbfa6c7SNithin Dabilpuram 
3244fbfa6c7SNithin Dabilpuram 		/* Update Ethernet header */
3254fbfa6c7SNithin Dabilpuram 		ethhdr = (struct rte_ether_hdr *)
3264fbfa6c7SNithin Dabilpuram 			rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
3274fbfa6c7SNithin Dabilpuram 		pkt->l2_len = RTE_ETHER_HDR_LEN;
3284fbfa6c7SNithin Dabilpuram 
3294fbfa6c7SNithin Dabilpuram 		if (proto == IPPROTO_IP) {
3304fbfa6c7SNithin Dabilpuram 			ethhdr->ether_type =
3314fbfa6c7SNithin Dabilpuram 				rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
3324fbfa6c7SNithin Dabilpuram 			/* Update minimum offload data */
3334fbfa6c7SNithin Dabilpuram 			pkt->l3_len = sizeof(struct rte_ipv4_hdr);
3344fbfa6c7SNithin Dabilpuram 			pkt->ol_flags |= qconf->outbound.ipv4_offloads;
3354fbfa6c7SNithin Dabilpuram 
3364fbfa6c7SNithin Dabilpuram 			ip = (struct rte_ipv4_hdr *)(ethhdr + 1);
3374fbfa6c7SNithin Dabilpuram 			ip->hdr_checksum = 0;
3384fbfa6c7SNithin Dabilpuram 
3394fbfa6c7SNithin Dabilpuram 			/* calculate IPv4 cksum in SW */
3404fbfa6c7SNithin Dabilpuram 			if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
3414fbfa6c7SNithin Dabilpuram 				ip->hdr_checksum = rte_ipv4_cksum(ip);
3424fbfa6c7SNithin Dabilpuram 		} else {
3434fbfa6c7SNithin Dabilpuram 			ethhdr->ether_type =
3444fbfa6c7SNithin Dabilpuram 				rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
3454fbfa6c7SNithin Dabilpuram 
3464fbfa6c7SNithin Dabilpuram 			/* Update minimum offload data */
3474fbfa6c7SNithin Dabilpuram 			pkt->l3_len = sizeof(struct rte_ipv6_hdr);
3484fbfa6c7SNithin Dabilpuram 			pkt->ol_flags |= qconf->outbound.ipv6_offloads;
3494fbfa6c7SNithin Dabilpuram 		}
3504fbfa6c7SNithin Dabilpuram 
3514fbfa6c7SNithin Dabilpuram 		memcpy(&ethhdr->src_addr, &ethaddr_tbl[port].src,
3524fbfa6c7SNithin Dabilpuram 		       sizeof(struct rte_ether_addr));
3534fbfa6c7SNithin Dabilpuram 		memcpy(&ethhdr->dst_addr, &ethaddr_tbl[port].dst,
3544fbfa6c7SNithin Dabilpuram 		       sizeof(struct rte_ether_addr));
3554fbfa6c7SNithin Dabilpuram 	}
356dcbf9ad5SNithin Dabilpuram 
357dcbf9ad5SNithin Dabilpuram 	free_pkts(&m, 1);
358dcbf9ad5SNithin Dabilpuram 	return len;
359dcbf9ad5SNithin Dabilpuram }
360dcbf9ad5SNithin Dabilpuram 
361dcbf9ad5SNithin Dabilpuram /* Enqueue a single packet, and send burst if queue is filled */
362c7e6d808SNithin Dabilpuram static __rte_always_inline int32_t
363dcbf9ad5SNithin Dabilpuram send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
364dcbf9ad5SNithin Dabilpuram {
365dcbf9ad5SNithin Dabilpuram 	uint32_t lcore_id;
366dcbf9ad5SNithin Dabilpuram 	uint16_t len;
367dcbf9ad5SNithin Dabilpuram 	struct lcore_conf *qconf;
368dcbf9ad5SNithin Dabilpuram 
369dcbf9ad5SNithin Dabilpuram 	lcore_id = rte_lcore_id();
370dcbf9ad5SNithin Dabilpuram 
371dcbf9ad5SNithin Dabilpuram 	qconf = &lcore_conf[lcore_id];
372dcbf9ad5SNithin Dabilpuram 	len = qconf->tx_mbufs[port].len;
373dcbf9ad5SNithin Dabilpuram 
3744fbfa6c7SNithin Dabilpuram 	/* L2 header is already part of packet */
3754fbfa6c7SNithin Dabilpuram 	if (m->pkt_len - RTE_ETHER_HDR_LEN <= mtu_size) {
376dcbf9ad5SNithin Dabilpuram 		qconf->tx_mbufs[port].m_table[len] = m;
377dcbf9ad5SNithin Dabilpuram 		len++;
378dcbf9ad5SNithin Dabilpuram 
379dcbf9ad5SNithin Dabilpuram 	/* need to fragment the packet */
380dcbf9ad5SNithin Dabilpuram 	} else if (frag_tbl_sz > 0)
381dcbf9ad5SNithin Dabilpuram 		len = send_fragment_packet(qconf, m, port, proto);
382dcbf9ad5SNithin Dabilpuram 	else
383dcbf9ad5SNithin Dabilpuram 		free_pkts(&m, 1);
384dcbf9ad5SNithin Dabilpuram 
385dcbf9ad5SNithin Dabilpuram 	/* enough pkts to be sent */
386dcbf9ad5SNithin Dabilpuram 	if (unlikely(len == MAX_PKT_BURST)) {
387dcbf9ad5SNithin Dabilpuram 		send_burst(qconf, MAX_PKT_BURST, port);
388dcbf9ad5SNithin Dabilpuram 		len = 0;
389dcbf9ad5SNithin Dabilpuram 	}
390dcbf9ad5SNithin Dabilpuram 
391dcbf9ad5SNithin Dabilpuram 	qconf->tx_mbufs[port].len = len;
392dcbf9ad5SNithin Dabilpuram 	return 0;
393dcbf9ad5SNithin Dabilpuram }
394dcbf9ad5SNithin Dabilpuram 
395c7e6d808SNithin Dabilpuram static __rte_always_inline void
396dcbf9ad5SNithin Dabilpuram inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
397dcbf9ad5SNithin Dabilpuram 		uint16_t lim, struct ipsec_spd_stats *stats)
398dcbf9ad5SNithin Dabilpuram {
399dcbf9ad5SNithin Dabilpuram 	struct rte_mbuf *m;
400dcbf9ad5SNithin Dabilpuram 	uint32_t i, j, res, sa_idx;
401dcbf9ad5SNithin Dabilpuram 
402dcbf9ad5SNithin Dabilpuram 	if (ip->num == 0 || sp == NULL)
403dcbf9ad5SNithin Dabilpuram 		return;
404dcbf9ad5SNithin Dabilpuram 
405dcbf9ad5SNithin Dabilpuram 	rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
406dcbf9ad5SNithin Dabilpuram 			ip->num, DEFAULT_MAX_CATEGORIES);
407dcbf9ad5SNithin Dabilpuram 
408dcbf9ad5SNithin Dabilpuram 	j = 0;
409dcbf9ad5SNithin Dabilpuram 	for (i = 0; i < ip->num; i++) {
410dcbf9ad5SNithin Dabilpuram 		m = ip->pkts[i];
411dcbf9ad5SNithin Dabilpuram 		res = ip->res[i];
412dcbf9ad5SNithin Dabilpuram 		if (res == BYPASS) {
413dcbf9ad5SNithin Dabilpuram 			ip->pkts[j++] = m;
414dcbf9ad5SNithin Dabilpuram 			stats->bypass++;
415dcbf9ad5SNithin Dabilpuram 			continue;
416dcbf9ad5SNithin Dabilpuram 		}
417dcbf9ad5SNithin Dabilpuram 		if (res == DISCARD) {
418dcbf9ad5SNithin Dabilpuram 			free_pkts(&m, 1);
419dcbf9ad5SNithin Dabilpuram 			stats->discard++;
420dcbf9ad5SNithin Dabilpuram 			continue;
421dcbf9ad5SNithin Dabilpuram 		}
422dcbf9ad5SNithin Dabilpuram 
423dcbf9ad5SNithin Dabilpuram 		/* Only check SPI match for processed IPSec packets */
424dcbf9ad5SNithin Dabilpuram 		if (i < lim && ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)) {
425dcbf9ad5SNithin Dabilpuram 			stats->discard++;
426dcbf9ad5SNithin Dabilpuram 			free_pkts(&m, 1);
427dcbf9ad5SNithin Dabilpuram 			continue;
428dcbf9ad5SNithin Dabilpuram 		}
429dcbf9ad5SNithin Dabilpuram 
430dcbf9ad5SNithin Dabilpuram 		sa_idx = res - 1;
431dcbf9ad5SNithin Dabilpuram 		if (!inbound_sa_check(sa, m, sa_idx)) {
432dcbf9ad5SNithin Dabilpuram 			stats->discard++;
433dcbf9ad5SNithin Dabilpuram 			free_pkts(&m, 1);
434dcbf9ad5SNithin Dabilpuram 			continue;
435dcbf9ad5SNithin Dabilpuram 		}
436dcbf9ad5SNithin Dabilpuram 		ip->pkts[j++] = m;
437dcbf9ad5SNithin Dabilpuram 		stats->protect++;
438dcbf9ad5SNithin Dabilpuram 	}
439dcbf9ad5SNithin Dabilpuram 	ip->num = j;
440dcbf9ad5SNithin Dabilpuram }
441dcbf9ad5SNithin Dabilpuram 
442dae3a7afSAmit Prakash Shukla static __rte_always_inline uint32_t
443dcbf9ad5SNithin Dabilpuram get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
444dcbf9ad5SNithin Dabilpuram {
445dcbf9ad5SNithin Dabilpuram 	struct ipsec_mbuf_metadata *priv;
446dcbf9ad5SNithin Dabilpuram 	struct ipsec_sa *sa;
447dcbf9ad5SNithin Dabilpuram 
448dcbf9ad5SNithin Dabilpuram 	priv = get_priv(pkt);
449dcbf9ad5SNithin Dabilpuram 
450dcbf9ad5SNithin Dabilpuram 	sa = priv->sa;
451dcbf9ad5SNithin Dabilpuram 	if (unlikely(sa == NULL)) {
452dcbf9ad5SNithin Dabilpuram 		RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
453dcbf9ad5SNithin Dabilpuram 		goto fail;
454dcbf9ad5SNithin Dabilpuram 	}
455dcbf9ad5SNithin Dabilpuram 
456dcbf9ad5SNithin Dabilpuram 	if (is_ipv6)
457dcbf9ad5SNithin Dabilpuram 		return sa->portid;
458dcbf9ad5SNithin Dabilpuram 
459dcbf9ad5SNithin Dabilpuram 	/* else */
460dcbf9ad5SNithin Dabilpuram 	return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
461dcbf9ad5SNithin Dabilpuram 
462dcbf9ad5SNithin Dabilpuram fail:
463dcbf9ad5SNithin Dabilpuram 	if (is_ipv6)
464dae3a7afSAmit Prakash Shukla 		return BAD_PORT;
465dcbf9ad5SNithin Dabilpuram 
466dcbf9ad5SNithin Dabilpuram 	/* else */
467dcbf9ad5SNithin Dabilpuram 	return 0;
468dcbf9ad5SNithin Dabilpuram }
469dcbf9ad5SNithin Dabilpuram 
4704fbfa6c7SNithin Dabilpuram static __rte_always_inline void
4714fbfa6c7SNithin Dabilpuram route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[],
4726b9dabfdSAnoob Joseph 	    uint32_t nb_pkts, uint64_t tx_offloads, bool ip_cksum)
473dcbf9ad5SNithin Dabilpuram {
474dcbf9ad5SNithin Dabilpuram 	uint32_t hop[MAX_PKT_BURST * 2];
475dcbf9ad5SNithin Dabilpuram 	uint32_t dst_ip[MAX_PKT_BURST * 2];
4764fbfa6c7SNithin Dabilpuram 	struct rte_ether_hdr *ethhdr;
477dae3a7afSAmit Prakash Shukla 	uint32_t pkt_hop = 0;
478dcbf9ad5SNithin Dabilpuram 	uint16_t i, offset;
479dcbf9ad5SNithin Dabilpuram 	uint16_t lpm_pkts = 0;
480dcbf9ad5SNithin Dabilpuram 	unsigned int lcoreid = rte_lcore_id();
4814fbfa6c7SNithin Dabilpuram 	struct rte_mbuf *pkt;
4824fbfa6c7SNithin Dabilpuram 	uint16_t port;
483dcbf9ad5SNithin Dabilpuram 
484dcbf9ad5SNithin Dabilpuram 	if (nb_pkts == 0)
485dcbf9ad5SNithin Dabilpuram 		return;
486dcbf9ad5SNithin Dabilpuram 
487dcbf9ad5SNithin Dabilpuram 	/* Need to do an LPM lookup for non-inline packets. Inline packets will
488dcbf9ad5SNithin Dabilpuram 	 * have port ID in the SA
489dcbf9ad5SNithin Dabilpuram 	 */
490dcbf9ad5SNithin Dabilpuram 
491dcbf9ad5SNithin Dabilpuram 	for (i = 0; i < nb_pkts; i++) {
4924fbfa6c7SNithin Dabilpuram 		pkt = pkts[i];
4934fbfa6c7SNithin Dabilpuram 		if (!(pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
494dcbf9ad5SNithin Dabilpuram 			/* Security offload not enabled. So an LPM lookup is
495dcbf9ad5SNithin Dabilpuram 			 * required to get the hop
496dcbf9ad5SNithin Dabilpuram 			 */
497dcbf9ad5SNithin Dabilpuram 			offset = offsetof(struct ip, ip_dst);
4984fbfa6c7SNithin Dabilpuram 			dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkt,
499dcbf9ad5SNithin Dabilpuram 					uint32_t *, offset);
500dcbf9ad5SNithin Dabilpuram 			dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
501dcbf9ad5SNithin Dabilpuram 			lpm_pkts++;
502dcbf9ad5SNithin Dabilpuram 		}
503dcbf9ad5SNithin Dabilpuram 	}
504dcbf9ad5SNithin Dabilpuram 
505dcbf9ad5SNithin Dabilpuram 	rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
506dcbf9ad5SNithin Dabilpuram 
507dcbf9ad5SNithin Dabilpuram 	lpm_pkts = 0;
508dcbf9ad5SNithin Dabilpuram 
509dcbf9ad5SNithin Dabilpuram 	for (i = 0; i < nb_pkts; i++) {
5104fbfa6c7SNithin Dabilpuram 		pkt = pkts[i];
5114fbfa6c7SNithin Dabilpuram 		if (pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
512dcbf9ad5SNithin Dabilpuram 			/* Read hop from the SA */
5134fbfa6c7SNithin Dabilpuram 			pkt_hop = get_hop_for_offload_pkt(pkt, 0);
514dcbf9ad5SNithin Dabilpuram 		} else {
515dcbf9ad5SNithin Dabilpuram 			/* Need to use hop returned by lookup */
516dcbf9ad5SNithin Dabilpuram 			pkt_hop = hop[lpm_pkts++];
517dcbf9ad5SNithin Dabilpuram 		}
518dcbf9ad5SNithin Dabilpuram 
519dcbf9ad5SNithin Dabilpuram 		if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
520dcbf9ad5SNithin Dabilpuram 			core_statistics[lcoreid].lpm4.miss++;
5214fbfa6c7SNithin Dabilpuram 			free_pkts(&pkt, 1);
522dcbf9ad5SNithin Dabilpuram 			continue;
523dcbf9ad5SNithin Dabilpuram 		}
5244fbfa6c7SNithin Dabilpuram 
5254fbfa6c7SNithin Dabilpuram 		port = pkt_hop & 0xff;
5264fbfa6c7SNithin Dabilpuram 
5274fbfa6c7SNithin Dabilpuram 		/* Update minimum offload data */
5284fbfa6c7SNithin Dabilpuram 		pkt->l3_len = sizeof(struct rte_ipv4_hdr);
5294fbfa6c7SNithin Dabilpuram 		pkt->l2_len = RTE_ETHER_HDR_LEN;
5304fbfa6c7SNithin Dabilpuram 		pkt->ol_flags |= RTE_MBUF_F_TX_IPV4;
5314fbfa6c7SNithin Dabilpuram 
5324fbfa6c7SNithin Dabilpuram 		/* Update Ethernet header */
5334fbfa6c7SNithin Dabilpuram 		ethhdr = (struct rte_ether_hdr *)
5344fbfa6c7SNithin Dabilpuram 			rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
5354fbfa6c7SNithin Dabilpuram 
5364fbfa6c7SNithin Dabilpuram 		if (ip_cksum) {
5374fbfa6c7SNithin Dabilpuram 			struct rte_ipv4_hdr *ip;
5384fbfa6c7SNithin Dabilpuram 
5394fbfa6c7SNithin Dabilpuram 			pkt->ol_flags |= tx_offloads;
5404fbfa6c7SNithin Dabilpuram 
5414fbfa6c7SNithin Dabilpuram 			ip = (struct rte_ipv4_hdr *)(ethhdr + 1);
5424fbfa6c7SNithin Dabilpuram 			ip->hdr_checksum = 0;
5434fbfa6c7SNithin Dabilpuram 
5444fbfa6c7SNithin Dabilpuram 			/* calculate IPv4 cksum in SW */
5454fbfa6c7SNithin Dabilpuram 			if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
5464fbfa6c7SNithin Dabilpuram 				ip->hdr_checksum = rte_ipv4_cksum(ip);
5474fbfa6c7SNithin Dabilpuram 		}
5484fbfa6c7SNithin Dabilpuram 
5494fbfa6c7SNithin Dabilpuram 		ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5504fbfa6c7SNithin Dabilpuram 		memcpy(&ethhdr->src_addr, &ethaddr_tbl[port].src,
5514fbfa6c7SNithin Dabilpuram 		       sizeof(struct rte_ether_addr));
5524fbfa6c7SNithin Dabilpuram 		memcpy(&ethhdr->dst_addr, &ethaddr_tbl[port].dst,
5534fbfa6c7SNithin Dabilpuram 		       sizeof(struct rte_ether_addr));
5544fbfa6c7SNithin Dabilpuram 
5554fbfa6c7SNithin Dabilpuram 		send_single_packet(pkt, port, IPPROTO_IP);
556dcbf9ad5SNithin Dabilpuram 	}
557dcbf9ad5SNithin Dabilpuram }
558dcbf9ad5SNithin Dabilpuram 
559c7e6d808SNithin Dabilpuram static __rte_always_inline void
5606b9dabfdSAnoob Joseph route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint32_t nb_pkts)
561dcbf9ad5SNithin Dabilpuram {
562dcbf9ad5SNithin Dabilpuram 	int32_t hop[MAX_PKT_BURST * 2];
563*e1a06e39SRobin Jarry 	struct rte_ipv6_addr dst_ip[MAX_PKT_BURST * 2];
5644fbfa6c7SNithin Dabilpuram 	struct rte_ether_hdr *ethhdr;
565dcbf9ad5SNithin Dabilpuram 	uint8_t *ip6_dst;
566dae3a7afSAmit Prakash Shukla 	uint32_t pkt_hop = 0;
567dcbf9ad5SNithin Dabilpuram 	uint16_t i, offset;
568dcbf9ad5SNithin Dabilpuram 	uint16_t lpm_pkts = 0;
569dcbf9ad5SNithin Dabilpuram 	unsigned int lcoreid = rte_lcore_id();
5704fbfa6c7SNithin Dabilpuram 	struct rte_mbuf *pkt;
5714fbfa6c7SNithin Dabilpuram 	uint16_t port;
572dcbf9ad5SNithin Dabilpuram 
573dcbf9ad5SNithin Dabilpuram 	if (nb_pkts == 0)
574dcbf9ad5SNithin Dabilpuram 		return;
575dcbf9ad5SNithin Dabilpuram 
576dcbf9ad5SNithin Dabilpuram 	/* Need to do an LPM lookup for non-inline packets. Inline packets will
577dcbf9ad5SNithin Dabilpuram 	 * have port ID in the SA
578dcbf9ad5SNithin Dabilpuram 	 */
579dcbf9ad5SNithin Dabilpuram 
580dcbf9ad5SNithin Dabilpuram 	for (i = 0; i < nb_pkts; i++) {
5814fbfa6c7SNithin Dabilpuram 		pkt = pkts[i];
5824fbfa6c7SNithin Dabilpuram 		if (!(pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
583dcbf9ad5SNithin Dabilpuram 			/* Security offload not enabled. So an LPM lookup is
584dcbf9ad5SNithin Dabilpuram 			 * required to get the hop
585dcbf9ad5SNithin Dabilpuram 			 */
586dcbf9ad5SNithin Dabilpuram 			offset = offsetof(struct ip6_hdr, ip6_dst);
5874fbfa6c7SNithin Dabilpuram 			ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *,
588dcbf9ad5SNithin Dabilpuram 					offset);
589*e1a06e39SRobin Jarry 			memcpy(&dst_ip[lpm_pkts], ip6_dst, 16);
590dcbf9ad5SNithin Dabilpuram 			lpm_pkts++;
591dcbf9ad5SNithin Dabilpuram 		}
592dcbf9ad5SNithin Dabilpuram 	}
593dcbf9ad5SNithin Dabilpuram 
594dcbf9ad5SNithin Dabilpuram 	rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
595dcbf9ad5SNithin Dabilpuram 			lpm_pkts);
596dcbf9ad5SNithin Dabilpuram 
597dcbf9ad5SNithin Dabilpuram 	lpm_pkts = 0;
598dcbf9ad5SNithin Dabilpuram 
599dcbf9ad5SNithin Dabilpuram 	for (i = 0; i < nb_pkts; i++) {
6004fbfa6c7SNithin Dabilpuram 		pkt = pkts[i];
6014fbfa6c7SNithin Dabilpuram 		if (pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
602dcbf9ad5SNithin Dabilpuram 			/* Read hop from the SA */
6034fbfa6c7SNithin Dabilpuram 			pkt_hop = get_hop_for_offload_pkt(pkt, 1);
604dcbf9ad5SNithin Dabilpuram 		} else {
605dcbf9ad5SNithin Dabilpuram 			/* Need to use hop returned by lookup */
606dae3a7afSAmit Prakash Shukla 			pkt_hop = (uint16_t)hop[lpm_pkts++];
607dcbf9ad5SNithin Dabilpuram 		}
608dcbf9ad5SNithin Dabilpuram 
609dae3a7afSAmit Prakash Shukla 		if (pkt_hop == BAD_PORT) {
610dcbf9ad5SNithin Dabilpuram 			core_statistics[lcoreid].lpm6.miss++;
6114fbfa6c7SNithin Dabilpuram 			free_pkts(&pkt, 1);
612dcbf9ad5SNithin Dabilpuram 			continue;
613dcbf9ad5SNithin Dabilpuram 		}
6144fbfa6c7SNithin Dabilpuram 
6154fbfa6c7SNithin Dabilpuram 		port = pkt_hop & 0xff;
6164fbfa6c7SNithin Dabilpuram 
6174fbfa6c7SNithin Dabilpuram 		/* Update minimum offload data */
6184fbfa6c7SNithin Dabilpuram 		pkt->ol_flags |= RTE_MBUF_F_TX_IPV6;
6194fbfa6c7SNithin Dabilpuram 		pkt->l3_len = sizeof(struct ip6_hdr);
6204fbfa6c7SNithin Dabilpuram 		pkt->l2_len = RTE_ETHER_HDR_LEN;
6214fbfa6c7SNithin Dabilpuram 
6224fbfa6c7SNithin Dabilpuram 		/* Update Ethernet header */
6234fbfa6c7SNithin Dabilpuram 		ethhdr = (struct rte_ether_hdr *)
6244fbfa6c7SNithin Dabilpuram 			rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
6254fbfa6c7SNithin Dabilpuram 
6264fbfa6c7SNithin Dabilpuram 		ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
6274fbfa6c7SNithin Dabilpuram 		memcpy(&ethhdr->src_addr, &ethaddr_tbl[port].src,
6284fbfa6c7SNithin Dabilpuram 		       sizeof(struct rte_ether_addr));
6294fbfa6c7SNithin Dabilpuram 		memcpy(&ethhdr->dst_addr, &ethaddr_tbl[port].dst,
6304fbfa6c7SNithin Dabilpuram 		       sizeof(struct rte_ether_addr));
6314fbfa6c7SNithin Dabilpuram 
6324fbfa6c7SNithin Dabilpuram 		send_single_packet(pkt, port, IPPROTO_IPV6);
633dcbf9ad5SNithin Dabilpuram 	}
634dcbf9ad5SNithin Dabilpuram }
635dcbf9ad5SNithin Dabilpuram 
636c7e6d808SNithin Dabilpuram static __rte_always_inline void
637dcbf9ad5SNithin Dabilpuram drain_tx_buffers(struct lcore_conf *qconf)
638dcbf9ad5SNithin Dabilpuram {
639dcbf9ad5SNithin Dabilpuram 	struct buffer *buf;
640dcbf9ad5SNithin Dabilpuram 	uint32_t portid;
641dcbf9ad5SNithin Dabilpuram 
642dcbf9ad5SNithin Dabilpuram 	for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
643dcbf9ad5SNithin Dabilpuram 		buf = &qconf->tx_mbufs[portid];
644dcbf9ad5SNithin Dabilpuram 		if (buf->len == 0)
645dcbf9ad5SNithin Dabilpuram 			continue;
646dcbf9ad5SNithin Dabilpuram 		send_burst(qconf, buf->len, portid);
647dcbf9ad5SNithin Dabilpuram 		buf->len = 0;
648dcbf9ad5SNithin Dabilpuram 	}
649dcbf9ad5SNithin Dabilpuram }
650dcbf9ad5SNithin Dabilpuram 
6519ad50c29SLukasz Bartosik #endif /* _IPSEC_WORKER_H_ */
652