xref: /dpdk/examples/ipsec-secgw/ipsec_worker.h (revision e1a06e391ba74f9c4d46a6ecef6d8ee084f4229e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) 2020 Marvell International Ltd.
3  */
4 #ifndef _IPSEC_WORKER_H_
5 #define _IPSEC_WORKER_H_
6 
7 #include <rte_acl.h>
8 #include <rte_ethdev.h>
9 #include <rte_lpm.h>
10 #include <rte_lpm6.h>
11 
12 #include "ipsec.h"
13 
14 /* Configure how many packets ahead to prefetch, when reading packets */
15 #define PREFETCH_OFFSET	3
16 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
17 
18 enum pkt_type {
19 	PKT_TYPE_PLAIN_IPV4 = 1,
20 	PKT_TYPE_IPSEC_IPV4,
21 	PKT_TYPE_PLAIN_IPV6,
22 	PKT_TYPE_IPSEC_IPV6,
23 	PKT_TYPE_INVALID
24 };
25 
26 enum {
27 	PKT_DROPPED = 0,
28 	PKT_FORWARDED,
29 	PKT_POSTED	/* for lookaside case */
30 };
31 
32 struct route_table {
33 	struct rt_ctx *rt4_ctx;
34 	struct rt_ctx *rt6_ctx;
35 };
36 
37 /*
38  * Conf required by event mode worker with tx internal port
39  */
40 struct __rte_cache_aligned lcore_conf_ev_tx_int_port_wrkr {
41 	struct ipsec_ctx inbound;
42 	struct ipsec_ctx outbound;
43 	struct route_table rt;
44 };
45 
46 void ipsec_poll_mode_worker(void);
47 void ipsec_poll_mode_wrkr_inl_pr(void);
48 void ipsec_poll_mode_wrkr_inl_pr_ss(void);
49 
50 int ipsec_launch_one_lcore(void *args);
51 
52 /*
53  * helper routine for inline and cpu(synchronous) processing
54  * this is just to satisfy inbound_sa_check() and get_hop_for_offload_pkt().
55  * Should be removed in future.
56  */
57 static inline void
58 prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt)
59 {
60 	uint32_t j;
61 	struct ipsec_mbuf_metadata *priv;
62 
63 	for (j = 0; j != cnt; j++) {
64 		priv = get_priv(mb[j]);
65 		priv->sa = sa;
66 		/* setup TSO related fields if TSO enabled*/
67 		if (priv->sa->mss) {
68 			uint32_t ptype = mb[j]->packet_type;
69 			/* only TCP is supported */
70 			if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
71 				mb[j]->tso_segsz = priv->sa->mss;
72 				if ((IS_TUNNEL(priv->sa->flags))) {
73 					mb[j]->outer_l3_len = mb[j]->l3_len;
74 					mb[j]->outer_l2_len = mb[j]->l2_len;
75 					mb[j]->ol_flags |=
76 						RTE_MBUF_F_TX_TUNNEL_ESP;
77 					if (RTE_ETH_IS_IPV4_HDR(ptype))
78 						mb[j]->ol_flags |=
79 						RTE_MBUF_F_TX_OUTER_IP_CKSUM;
80 				}
81 				mb[j]->l4_len = sizeof(struct rte_tcp_hdr);
82 				mb[j]->ol_flags |= (RTE_MBUF_F_TX_TCP_SEG |
83 						RTE_MBUF_F_TX_TCP_CKSUM);
84 				if (RTE_ETH_IS_IPV4_HDR(ptype))
85 					mb[j]->ol_flags |=
86 						RTE_MBUF_F_TX_OUTER_IPV4;
87 				else
88 					mb[j]->ol_flags |=
89 						RTE_MBUF_F_TX_OUTER_IPV6;
90 			}
91 		}
92 	}
93 }
94 
95 static __rte_always_inline void
96 adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph,
97 	uint32_t l2_len)
98 {
99 	uint32_t plen, trim;
100 
101 	plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
102 	if (plen < m->pkt_len) {
103 		trim = m->pkt_len - plen;
104 		rte_pktmbuf_trim(m, trim);
105 	}
106 }
107 
108 static __rte_always_inline void
109 adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
110 	uint32_t l2_len)
111 {
112 	uint32_t plen, trim;
113 
114 	plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
115 	if (plen < m->pkt_len) {
116 		trim = m->pkt_len - plen;
117 		rte_pktmbuf_trim(m, trim);
118 	}
119 }
120 
121 static __rte_always_inline void
122 prepare_one_packet(void *ctx, struct rte_mbuf *pkt,
123 		   struct ipsec_traffic *t)
124 {
125 	uint32_t ptype = pkt->packet_type;
126 	const struct rte_ipv4_hdr *iph4;
127 	const struct rte_ipv6_hdr *iph6;
128 	uint32_t tun_type, l3_type;
129 	uint64_t tx_offload;
130 	uint16_t l3len;
131 
132 	if (is_ip_reassembly_incomplete(pkt) > 0) {
133 		free_reassembly_fail_pkt(pkt);
134 		return;
135 	}
136 
137 	tun_type = ptype & RTE_PTYPE_TUNNEL_MASK;
138 	l3_type = ptype & RTE_PTYPE_L3_MASK;
139 
140 	if (RTE_ETH_IS_IPV4_HDR(l3_type)) {
141 		iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt,
142 			RTE_ETHER_HDR_LEN);
143 		adjust_ipv4_pktlen(pkt, iph4, 0);
144 
145 		if (tun_type == RTE_PTYPE_TUNNEL_ESP) {
146 			t->ipsec.pkts[(t->ipsec.num)++] = pkt;
147 		} else {
148 			t->ip4.data[t->ip4.num] = &iph4->next_proto_id;
149 			t->ip4.pkts[(t->ip4.num)++] = pkt;
150 		}
151 		tx_offload = sizeof(*iph4) << RTE_MBUF_L2_LEN_BITS;
152 	} else if (RTE_ETH_IS_IPV6_HDR(l3_type)) {
153 		int next_proto;
154 		size_t ext_len;
155 		uint8_t *p;
156 
157 		/* get protocol type */
158 		iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt,
159 			RTE_ETHER_HDR_LEN);
160 		adjust_ipv6_pktlen(pkt, iph6, 0);
161 
162 		l3len = sizeof(struct ip6_hdr);
163 
164 		if (tun_type == RTE_PTYPE_TUNNEL_ESP) {
165 			t->ipsec.pkts[(t->ipsec.num)++] = pkt;
166 		} else {
167 			t->ip6.data[t->ip6.num] = &iph6->proto;
168 			t->ip6.pkts[(t->ip6.num)++] = pkt;
169 		}
170 
171 		/* Determine l3 header size up to ESP extension by walking
172 		 * through extension headers.
173 		 */
174 		if (l3_type == RTE_PTYPE_L3_IPV6_EXT ||
175 		     l3_type == RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) {
176 			p = rte_pktmbuf_mtod(pkt, uint8_t *);
177 			next_proto = iph6->proto;
178 			while (next_proto != IPPROTO_ESP &&
179 			       l3len < pkt->data_len &&
180 			       (next_proto = rte_ipv6_get_next_ext(p + l3len,
181 						next_proto, &ext_len)) >= 0)
182 				l3len += ext_len;
183 
184 			/* Drop pkt when IPv6 header exceeds first seg size */
185 			if (unlikely(l3len > pkt->data_len)) {
186 				free_pkts(&pkt, 1);
187 				return;
188 			}
189 		}
190 		tx_offload = l3len << RTE_MBUF_L2_LEN_BITS;
191 	} else {
192 		/* Unknown/Unsupported type, drop the packet */
193 		RTE_LOG_DP(DEBUG, IPSEC, "Unsupported packet type 0x%x\n", ptype);
194 		free_pkts(&pkt, 1);
195 		return;
196 	}
197 
198 	if  ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP)
199 		tx_offload |= (sizeof(struct rte_tcp_hdr) <<
200 			       (RTE_MBUF_L2_LEN_BITS + RTE_MBUF_L3_LEN_BITS));
201 	else if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
202 		tx_offload |= (sizeof(struct rte_udp_hdr) <<
203 			       (RTE_MBUF_L2_LEN_BITS + RTE_MBUF_L3_LEN_BITS));
204 	pkt->tx_offload = tx_offload;
205 
206 	/* Check if the packet has been processed inline. For inline protocol
207 	 * processed packets, the metadata in the mbuf can be used to identify
208 	 * the security processing done on the packet. The metadata will be
209 	 * used to retrieve the application registered userdata associated
210 	 * with the security session.
211 	 */
212 
213 	if (ctx && pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
214 		struct ipsec_sa *sa;
215 		struct ipsec_mbuf_metadata *priv;
216 
217 		sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
218 		if (sa == NULL) {
219 			/* userdata could not be retrieved */
220 			return;
221 		}
222 
223 		/* Save SA as priv member in mbuf. This will be used in the
224 		 * IPsec selector(SP-SA) check.
225 		 */
226 
227 		priv = get_priv(pkt);
228 		priv->sa = sa;
229 	}
230 }
231 
232 static __rte_always_inline void
233 prepare_traffic(void *ctx, struct rte_mbuf **pkts,
234 		struct ipsec_traffic *t, uint16_t nb_pkts)
235 {
236 	int32_t i;
237 
238 	t->ipsec.num = 0;
239 	t->ip4.num = 0;
240 	t->ip6.num = 0;
241 
242 	for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
243 		rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
244 					void *));
245 		prepare_one_packet(ctx, pkts[i], t);
246 	}
247 	/* Process left packets */
248 	for (; i < nb_pkts; i++)
249 		prepare_one_packet(ctx, pkts[i], t);
250 }
251 
252 /* Send burst of packets on an output interface */
253 static __rte_always_inline int32_t
254 send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
255 {
256 	struct rte_mbuf **m_table;
257 	int32_t ret;
258 	uint16_t queueid;
259 
260 	queueid = qconf->tx_queue_id[port];
261 	m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
262 
263 	ret = rte_eth_tx_burst(port, queueid, m_table, n);
264 
265 	core_stats_update_tx(ret);
266 
267 	if (unlikely(ret < n)) {
268 		do {
269 			free_pkts(&m_table[ret], 1);
270 		} while (++ret < n);
271 	}
272 
273 	return 0;
274 }
275 
276 /*
277  * Helper function to fragment and queue for TX one packet.
278  */
279 static __rte_always_inline uint32_t
280 send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
281 	uint16_t port, uint8_t proto)
282 {
283 	struct rte_ether_hdr *ethhdr;
284 	struct rte_ipv4_hdr *ip;
285 	struct rte_mbuf *pkt;
286 	struct buffer *tbl;
287 	uint32_t len, n, i;
288 	int32_t rc;
289 
290 	tbl =  qconf->tx_mbufs + port;
291 	len = tbl->len;
292 
293 	/* free space for new fragments */
294 	if (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >=  RTE_DIM(tbl->m_table)) {
295 		send_burst(qconf, len, port);
296 		len = 0;
297 	}
298 
299 	n = RTE_DIM(tbl->m_table) - len;
300 
301 	/* Strip the ethernet header that was prepended earlier */
302 	rte_pktmbuf_adj(m, RTE_ETHER_HDR_LEN);
303 
304 	if (proto == IPPROTO_IP)
305 		rc = rte_ipv4_fragment_packet(m, tbl->m_table + len,
306 			n, mtu_size, m->pool, qconf->frag.pool_indir);
307 	else
308 		rc = rte_ipv6_fragment_packet(m, tbl->m_table + len,
309 			n, mtu_size, m->pool, qconf->frag.pool_indir);
310 
311 	if (rc < 0) {
312 		RTE_LOG(ERR, IPSEC,
313 			"%s: failed to fragment packet with size %u, "
314 			"error code: %d\n",
315 			__func__, m->pkt_len, rte_errno);
316 		rc = 0;
317 	}
318 
319 	i = len;
320 	len += rc;
321 	for (; i < len; i++) {
322 		pkt = tbl->m_table[i];
323 
324 		/* Update Ethernet header */
325 		ethhdr = (struct rte_ether_hdr *)
326 			rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
327 		pkt->l2_len = RTE_ETHER_HDR_LEN;
328 
329 		if (proto == IPPROTO_IP) {
330 			ethhdr->ether_type =
331 				rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
332 			/* Update minimum offload data */
333 			pkt->l3_len = sizeof(struct rte_ipv4_hdr);
334 			pkt->ol_flags |= qconf->outbound.ipv4_offloads;
335 
336 			ip = (struct rte_ipv4_hdr *)(ethhdr + 1);
337 			ip->hdr_checksum = 0;
338 
339 			/* calculate IPv4 cksum in SW */
340 			if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
341 				ip->hdr_checksum = rte_ipv4_cksum(ip);
342 		} else {
343 			ethhdr->ether_type =
344 				rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
345 
346 			/* Update minimum offload data */
347 			pkt->l3_len = sizeof(struct rte_ipv6_hdr);
348 			pkt->ol_flags |= qconf->outbound.ipv6_offloads;
349 		}
350 
351 		memcpy(&ethhdr->src_addr, &ethaddr_tbl[port].src,
352 		       sizeof(struct rte_ether_addr));
353 		memcpy(&ethhdr->dst_addr, &ethaddr_tbl[port].dst,
354 		       sizeof(struct rte_ether_addr));
355 	}
356 
357 	free_pkts(&m, 1);
358 	return len;
359 }
360 
361 /* Enqueue a single packet, and send burst if queue is filled */
362 static __rte_always_inline int32_t
363 send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
364 {
365 	uint32_t lcore_id;
366 	uint16_t len;
367 	struct lcore_conf *qconf;
368 
369 	lcore_id = rte_lcore_id();
370 
371 	qconf = &lcore_conf[lcore_id];
372 	len = qconf->tx_mbufs[port].len;
373 
374 	/* L2 header is already part of packet */
375 	if (m->pkt_len - RTE_ETHER_HDR_LEN <= mtu_size) {
376 		qconf->tx_mbufs[port].m_table[len] = m;
377 		len++;
378 
379 	/* need to fragment the packet */
380 	} else if (frag_tbl_sz > 0)
381 		len = send_fragment_packet(qconf, m, port, proto);
382 	else
383 		free_pkts(&m, 1);
384 
385 	/* enough pkts to be sent */
386 	if (unlikely(len == MAX_PKT_BURST)) {
387 		send_burst(qconf, MAX_PKT_BURST, port);
388 		len = 0;
389 	}
390 
391 	qconf->tx_mbufs[port].len = len;
392 	return 0;
393 }
394 
395 static __rte_always_inline void
396 inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
397 		uint16_t lim, struct ipsec_spd_stats *stats)
398 {
399 	struct rte_mbuf *m;
400 	uint32_t i, j, res, sa_idx;
401 
402 	if (ip->num == 0 || sp == NULL)
403 		return;
404 
405 	rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
406 			ip->num, DEFAULT_MAX_CATEGORIES);
407 
408 	j = 0;
409 	for (i = 0; i < ip->num; i++) {
410 		m = ip->pkts[i];
411 		res = ip->res[i];
412 		if (res == BYPASS) {
413 			ip->pkts[j++] = m;
414 			stats->bypass++;
415 			continue;
416 		}
417 		if (res == DISCARD) {
418 			free_pkts(&m, 1);
419 			stats->discard++;
420 			continue;
421 		}
422 
423 		/* Only check SPI match for processed IPSec packets */
424 		if (i < lim && ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)) {
425 			stats->discard++;
426 			free_pkts(&m, 1);
427 			continue;
428 		}
429 
430 		sa_idx = res - 1;
431 		if (!inbound_sa_check(sa, m, sa_idx)) {
432 			stats->discard++;
433 			free_pkts(&m, 1);
434 			continue;
435 		}
436 		ip->pkts[j++] = m;
437 		stats->protect++;
438 	}
439 	ip->num = j;
440 }
441 
442 static __rte_always_inline uint32_t
443 get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6)
444 {
445 	struct ipsec_mbuf_metadata *priv;
446 	struct ipsec_sa *sa;
447 
448 	priv = get_priv(pkt);
449 
450 	sa = priv->sa;
451 	if (unlikely(sa == NULL)) {
452 		RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
453 		goto fail;
454 	}
455 
456 	if (is_ipv6)
457 		return sa->portid;
458 
459 	/* else */
460 	return (sa->portid | RTE_LPM_LOOKUP_SUCCESS);
461 
462 fail:
463 	if (is_ipv6)
464 		return BAD_PORT;
465 
466 	/* else */
467 	return 0;
468 }
469 
470 static __rte_always_inline void
471 route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[],
472 	    uint32_t nb_pkts, uint64_t tx_offloads, bool ip_cksum)
473 {
474 	uint32_t hop[MAX_PKT_BURST * 2];
475 	uint32_t dst_ip[MAX_PKT_BURST * 2];
476 	struct rte_ether_hdr *ethhdr;
477 	uint32_t pkt_hop = 0;
478 	uint16_t i, offset;
479 	uint16_t lpm_pkts = 0;
480 	unsigned int lcoreid = rte_lcore_id();
481 	struct rte_mbuf *pkt;
482 	uint16_t port;
483 
484 	if (nb_pkts == 0)
485 		return;
486 
487 	/* Need to do an LPM lookup for non-inline packets. Inline packets will
488 	 * have port ID in the SA
489 	 */
490 
491 	for (i = 0; i < nb_pkts; i++) {
492 		pkt = pkts[i];
493 		if (!(pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
494 			/* Security offload not enabled. So an LPM lookup is
495 			 * required to get the hop
496 			 */
497 			offset = offsetof(struct ip, ip_dst);
498 			dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkt,
499 					uint32_t *, offset);
500 			dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]);
501 			lpm_pkts++;
502 		}
503 	}
504 
505 	rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts);
506 
507 	lpm_pkts = 0;
508 
509 	for (i = 0; i < nb_pkts; i++) {
510 		pkt = pkts[i];
511 		if (pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
512 			/* Read hop from the SA */
513 			pkt_hop = get_hop_for_offload_pkt(pkt, 0);
514 		} else {
515 			/* Need to use hop returned by lookup */
516 			pkt_hop = hop[lpm_pkts++];
517 		}
518 
519 		if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
520 			core_statistics[lcoreid].lpm4.miss++;
521 			free_pkts(&pkt, 1);
522 			continue;
523 		}
524 
525 		port = pkt_hop & 0xff;
526 
527 		/* Update minimum offload data */
528 		pkt->l3_len = sizeof(struct rte_ipv4_hdr);
529 		pkt->l2_len = RTE_ETHER_HDR_LEN;
530 		pkt->ol_flags |= RTE_MBUF_F_TX_IPV4;
531 
532 		/* Update Ethernet header */
533 		ethhdr = (struct rte_ether_hdr *)
534 			rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
535 
536 		if (ip_cksum) {
537 			struct rte_ipv4_hdr *ip;
538 
539 			pkt->ol_flags |= tx_offloads;
540 
541 			ip = (struct rte_ipv4_hdr *)(ethhdr + 1);
542 			ip->hdr_checksum = 0;
543 
544 			/* calculate IPv4 cksum in SW */
545 			if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
546 				ip->hdr_checksum = rte_ipv4_cksum(ip);
547 		}
548 
549 		ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
550 		memcpy(&ethhdr->src_addr, &ethaddr_tbl[port].src,
551 		       sizeof(struct rte_ether_addr));
552 		memcpy(&ethhdr->dst_addr, &ethaddr_tbl[port].dst,
553 		       sizeof(struct rte_ether_addr));
554 
555 		send_single_packet(pkt, port, IPPROTO_IP);
556 	}
557 }
558 
559 static __rte_always_inline void
560 route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint32_t nb_pkts)
561 {
562 	int32_t hop[MAX_PKT_BURST * 2];
563 	struct rte_ipv6_addr dst_ip[MAX_PKT_BURST * 2];
564 	struct rte_ether_hdr *ethhdr;
565 	uint8_t *ip6_dst;
566 	uint32_t pkt_hop = 0;
567 	uint16_t i, offset;
568 	uint16_t lpm_pkts = 0;
569 	unsigned int lcoreid = rte_lcore_id();
570 	struct rte_mbuf *pkt;
571 	uint16_t port;
572 
573 	if (nb_pkts == 0)
574 		return;
575 
576 	/* Need to do an LPM lookup for non-inline packets. Inline packets will
577 	 * have port ID in the SA
578 	 */
579 
580 	for (i = 0; i < nb_pkts; i++) {
581 		pkt = pkts[i];
582 		if (!(pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
583 			/* Security offload not enabled. So an LPM lookup is
584 			 * required to get the hop
585 			 */
586 			offset = offsetof(struct ip6_hdr, ip6_dst);
587 			ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *,
588 					offset);
589 			memcpy(&dst_ip[lpm_pkts], ip6_dst, 16);
590 			lpm_pkts++;
591 		}
592 	}
593 
594 	rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop,
595 			lpm_pkts);
596 
597 	lpm_pkts = 0;
598 
599 	for (i = 0; i < nb_pkts; i++) {
600 		pkt = pkts[i];
601 		if (pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
602 			/* Read hop from the SA */
603 			pkt_hop = get_hop_for_offload_pkt(pkt, 1);
604 		} else {
605 			/* Need to use hop returned by lookup */
606 			pkt_hop = (uint16_t)hop[lpm_pkts++];
607 		}
608 
609 		if (pkt_hop == BAD_PORT) {
610 			core_statistics[lcoreid].lpm6.miss++;
611 			free_pkts(&pkt, 1);
612 			continue;
613 		}
614 
615 		port = pkt_hop & 0xff;
616 
617 		/* Update minimum offload data */
618 		pkt->ol_flags |= RTE_MBUF_F_TX_IPV6;
619 		pkt->l3_len = sizeof(struct ip6_hdr);
620 		pkt->l2_len = RTE_ETHER_HDR_LEN;
621 
622 		/* Update Ethernet header */
623 		ethhdr = (struct rte_ether_hdr *)
624 			rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
625 
626 		ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
627 		memcpy(&ethhdr->src_addr, &ethaddr_tbl[port].src,
628 		       sizeof(struct rte_ether_addr));
629 		memcpy(&ethhdr->dst_addr, &ethaddr_tbl[port].dst,
630 		       sizeof(struct rte_ether_addr));
631 
632 		send_single_packet(pkt, port, IPPROTO_IPV6);
633 	}
634 }
635 
636 static __rte_always_inline void
637 drain_tx_buffers(struct lcore_conf *qconf)
638 {
639 	struct buffer *buf;
640 	uint32_t portid;
641 
642 	for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
643 		buf = &qconf->tx_mbufs[portid];
644 		if (buf->len == 0)
645 			continue;
646 		send_burst(qconf, buf->len, portid);
647 		buf->len = 0;
648 	}
649 }
650 
651 #endif /* _IPSEC_WORKER_H_ */
652