xref: /dpdk/app/test-pmd/csumonly.c (revision ae2e4c4885c8a185beaf209d7b7c122700cc966f)
17faa7292SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause
27faa7292SOlivier Matz  * Copyright(c) 2010-2014 Intel Corporation.
351f694ddSOlivier Matz  * Copyright 2014 6WIND S.A.
4af75078fSIntel  */
5af75078fSIntel 
6af75078fSIntel #include <stdarg.h>
7af75078fSIntel #include <stdio.h>
8af75078fSIntel #include <errno.h>
9af75078fSIntel #include <stdint.h>
10af75078fSIntel #include <unistd.h>
11af75078fSIntel #include <inttypes.h>
12af75078fSIntel 
13af75078fSIntel #include <sys/queue.h>
14af75078fSIntel #include <sys/stat.h>
15af75078fSIntel 
16af75078fSIntel #include <rte_common.h>
17af75078fSIntel #include <rte_byteorder.h>
18af75078fSIntel #include <rte_log.h>
19af75078fSIntel #include <rte_debug.h>
20af75078fSIntel #include <rte_cycles.h>
21af75078fSIntel #include <rte_memory.h>
22af75078fSIntel #include <rte_memcpy.h>
23af75078fSIntel #include <rte_launch.h>
24af75078fSIntel #include <rte_eal.h>
25af75078fSIntel #include <rte_per_lcore.h>
26af75078fSIntel #include <rte_lcore.h>
27af75078fSIntel #include <rte_branch_prediction.h>
28af75078fSIntel #include <rte_mempool.h>
29af75078fSIntel #include <rte_mbuf.h>
30af75078fSIntel #include <rte_interrupts.h>
31af75078fSIntel #include <rte_ether.h>
32af75078fSIntel #include <rte_ethdev.h>
33af75078fSIntel #include <rte_ip.h>
34af75078fSIntel #include <rte_tcp.h>
35af75078fSIntel #include <rte_udp.h>
36512d873fSFlavia Musatescu #include <rte_vxlan.h>
37af75078fSIntel #include <rte_sctp.h>
38d8e5e69fSTing Xu #include <rte_gtp.h>
39af75078fSIntel #include <rte_prefetch.h>
40af75078fSIntel #include <rte_string_fns.h>
41938a184aSAdrien Mazarguil #include <rte_flow.h>
426970401eSDavid Marchand #ifdef RTE_LIB_GRO
43b40f8d78SJiayu Hu #include <rte_gro.h>
446970401eSDavid Marchand #endif
456970401eSDavid Marchand #ifdef RTE_LIB_GSO
4652f38a20SJiayu Hu #include <rte_gso.h>
476970401eSDavid Marchand #endif
48ea0e711bSOphir Munk #include <rte_geneve.h>
4952f38a20SJiayu Hu 
50af75078fSIntel #include "testpmd.h"
51af75078fSIntel 
52af75078fSIntel #define IP_DEFTTL  64   /* from RFC 1340. */
53af75078fSIntel 
54ad93fb8cSXueming Li #define GRE_CHECKSUM_PRESENT	0x8000
55b297cdcaSJijiang Liu #define GRE_KEY_PRESENT		0x2000
56ad93fb8cSXueming Li #define GRE_SEQUENCE_PRESENT	0x1000
57ad93fb8cSXueming Li #define GRE_EXT_LEN		4
58ad93fb8cSXueming Li #define GRE_SUPPORTED_FIELDS	(GRE_CHECKSUM_PRESENT | GRE_KEY_PRESENT |\
59ad93fb8cSXueming Li 				 GRE_SEQUENCE_PRESENT)
60b297cdcaSJijiang Liu 
6144eb9456SThomas Monjalon /* We cannot use rte_cpu_to_be_16() on a constant in a switch/case */
6244eb9456SThomas Monjalon #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6351f694ddSOlivier Matz #define _htons(x) ((uint16_t)((((x) & 0x00ffU) << 8) | (((x) & 0xff00U) >> 8)))
6451f694ddSOlivier Matz #else
6551f694ddSOlivier Matz #define _htons(x) (x)
6651f694ddSOlivier Matz #endif
6751f694ddSOlivier Matz 
68df655504SOphir Munk uint16_t vxlan_gpe_udp_port = RTE_VXLAN_GPE_DEFAULT_PORT;
69ea0e711bSOphir Munk uint16_t geneve_udp_port = RTE_GENEVE_DEFAULT_PORT;
7039e5e20fSXueming Li 
71c5b60331SOlivier Matz /* structure that caches offload info for the current packet */
72c5b60331SOlivier Matz struct testpmd_offload_info {
73c5b60331SOlivier Matz 	uint16_t ethertype;
746970401eSDavid Marchand #ifdef RTE_LIB_GSO
7552f38a20SJiayu Hu 	uint8_t gso_enable;
766970401eSDavid Marchand #endif
77c5b60331SOlivier Matz 	uint16_t l2_len;
78c5b60331SOlivier Matz 	uint16_t l3_len;
79c5b60331SOlivier Matz 	uint16_t l4_len;
80c5b60331SOlivier Matz 	uint8_t l4_proto;
81c5b60331SOlivier Matz 	uint8_t is_tunnel;
82c5b60331SOlivier Matz 	uint16_t outer_ethertype;
83c5b60331SOlivier Matz 	uint16_t outer_l2_len;
84c5b60331SOlivier Matz 	uint16_t outer_l3_len;
8574e929a7SOlivier Matz 	uint8_t outer_l4_proto;
86c5b60331SOlivier Matz 	uint16_t tso_segsz;
870f62d635SJianfeng Tan 	uint16_t tunnel_tso_segsz;
8897c21329SOlivier Matz 	uint32_t pkt_len;
89c5b60331SOlivier Matz };
90c5b60331SOlivier Matz 
91b297cdcaSJijiang Liu /* simplified GRE header */
9274e929a7SOlivier Matz struct simple_gre_hdr {
9374e929a7SOlivier Matz 	uint16_t flags;
9474e929a7SOlivier Matz 	uint16_t proto;
95*ae2e4c48SAndre Muezerie };
9674e929a7SOlivier Matz 
9751f694ddSOlivier Matz static uint16_t
98e6b9d641SXiaoyun Li get_udptcp_checksum(struct rte_mbuf *m, void *l3_hdr, uint16_t l4_off,
99e6b9d641SXiaoyun Li 		    uint16_t ethertype)
10051f694ddSOlivier Matz {
1010c9da755SDavid Marchand 	if (ethertype == _htons(RTE_ETHER_TYPE_IPV4))
102e6b9d641SXiaoyun Li 		return rte_ipv4_udptcp_cksum_mbuf(m, l3_hdr, l4_off);
1030c9da755SDavid Marchand 	else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
104e6b9d641SXiaoyun Li 		return rte_ipv6_udptcp_cksum_mbuf(m, l3_hdr, l4_off);
10551f694ddSOlivier Matz }
106af75078fSIntel 
107160c3dc9SOlivier Matz /* Parse an IPv4 header to fill l3_len, l4_len, and l4_proto */
108160c3dc9SOlivier Matz static void
109a7c528e5SOlivier Matz parse_ipv4(struct rte_ipv4_hdr *ipv4_hdr, struct testpmd_offload_info *info)
110160c3dc9SOlivier Matz {
111f41b5156SOlivier Matz 	struct rte_tcp_hdr *tcp_hdr;
112160c3dc9SOlivier Matz 
1139863627fSMichael Pfeiffer 	info->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
114c5b60331SOlivier Matz 	info->l4_proto = ipv4_hdr->next_proto_id;
115160c3dc9SOlivier Matz 
116160c3dc9SOlivier Matz 	/* only fill l4_len for TCP, it's useful for TSO */
117c5b60331SOlivier Matz 	if (info->l4_proto == IPPROTO_TCP) {
118f41b5156SOlivier Matz 		tcp_hdr = (struct rte_tcp_hdr *)
119f41b5156SOlivier Matz 			((char *)ipv4_hdr + info->l3_len);
120c5b60331SOlivier Matz 		info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
1212b5651c0SRaslan Darawsheh 	} else if (info->l4_proto == IPPROTO_UDP)
122e73e3547SOlivier Matz 		info->l4_len = sizeof(struct rte_udp_hdr);
1232b5651c0SRaslan Darawsheh 	else
124c5b60331SOlivier Matz 		info->l4_len = 0;
125160c3dc9SOlivier Matz }
126160c3dc9SOlivier Matz 
127160c3dc9SOlivier Matz /* Parse an IPv6 header to fill l3_len, l4_len, and l4_proto */
128160c3dc9SOlivier Matz static void
129a7c528e5SOlivier Matz parse_ipv6(struct rte_ipv6_hdr *ipv6_hdr, struct testpmd_offload_info *info)
130160c3dc9SOlivier Matz {
131f41b5156SOlivier Matz 	struct rte_tcp_hdr *tcp_hdr;
132160c3dc9SOlivier Matz 
133a7c528e5SOlivier Matz 	info->l3_len = sizeof(struct rte_ipv6_hdr);
134c5b60331SOlivier Matz 	info->l4_proto = ipv6_hdr->proto;
135160c3dc9SOlivier Matz 
136160c3dc9SOlivier Matz 	/* only fill l4_len for TCP, it's useful for TSO */
137c5b60331SOlivier Matz 	if (info->l4_proto == IPPROTO_TCP) {
138f41b5156SOlivier Matz 		tcp_hdr = (struct rte_tcp_hdr *)
139f41b5156SOlivier Matz 			((char *)ipv6_hdr + info->l3_len);
140c5b60331SOlivier Matz 		info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
1412b5651c0SRaslan Darawsheh 	} else if (info->l4_proto == IPPROTO_UDP)
142e73e3547SOlivier Matz 		info->l4_len = sizeof(struct rte_udp_hdr);
1432b5651c0SRaslan Darawsheh 	else
144c5b60331SOlivier Matz 		info->l4_len = 0;
145160c3dc9SOlivier Matz }
146160c3dc9SOlivier Matz 
147af75078fSIntel /*
14851f694ddSOlivier Matz  * Parse an ethernet header to fill the ethertype, l2_len, l3_len and
149f16d3771SRaslan Darawsheh  * ipproto. This function is able to recognize IPv4/IPv6 with optional VLAN
150f16d3771SRaslan Darawsheh  * headers. The l4_len argument is only set in case of TCP (useful for TSO).
15151f694ddSOlivier Matz  */
15251f694ddSOlivier Matz static void
1536d13ea8eSOlivier Matz parse_ethernet(struct rte_ether_hdr *eth_hdr, struct testpmd_offload_info *info)
15451f694ddSOlivier Matz {
155a7c528e5SOlivier Matz 	struct rte_ipv4_hdr *ipv4_hdr;
156a7c528e5SOlivier Matz 	struct rte_ipv6_hdr *ipv6_hdr;
157f16d3771SRaslan Darawsheh 	struct rte_vlan_hdr *vlan_hdr;
15851f694ddSOlivier Matz 
1596d13ea8eSOlivier Matz 	info->l2_len = sizeof(struct rte_ether_hdr);
160c5b60331SOlivier Matz 	info->ethertype = eth_hdr->ether_type;
16151f694ddSOlivier Matz 
162f16d3771SRaslan Darawsheh 	while (info->ethertype == _htons(RTE_ETHER_TYPE_VLAN) ||
163f16d3771SRaslan Darawsheh 	       info->ethertype == _htons(RTE_ETHER_TYPE_QINQ)) {
164f16d3771SRaslan Darawsheh 		vlan_hdr = (struct rte_vlan_hdr *)
165f16d3771SRaslan Darawsheh 			((char *)eth_hdr + info->l2_len);
1666d13ea8eSOlivier Matz 		info->l2_len  += sizeof(struct rte_vlan_hdr);
167c5b60331SOlivier Matz 		info->ethertype = vlan_hdr->eth_proto;
16851f694ddSOlivier Matz 	}
16951f694ddSOlivier Matz 
170c5b60331SOlivier Matz 	switch (info->ethertype) {
1710c9da755SDavid Marchand 	case _htons(RTE_ETHER_TYPE_IPV4):
172a7c528e5SOlivier Matz 		ipv4_hdr = (struct rte_ipv4_hdr *)
173a7c528e5SOlivier Matz 			((char *)eth_hdr + info->l2_len);
174c5b60331SOlivier Matz 		parse_ipv4(ipv4_hdr, info);
17551f694ddSOlivier Matz 		break;
1760c9da755SDavid Marchand 	case _htons(RTE_ETHER_TYPE_IPV6):
177a7c528e5SOlivier Matz 		ipv6_hdr = (struct rte_ipv6_hdr *)
178a7c528e5SOlivier Matz 			((char *)eth_hdr + info->l2_len);
179c5b60331SOlivier Matz 		parse_ipv6(ipv6_hdr, info);
18051f694ddSOlivier Matz 		break;
18151f694ddSOlivier Matz 	default:
182c5b60331SOlivier Matz 		info->l4_len = 0;
183c5b60331SOlivier Matz 		info->l3_len = 0;
184c5b60331SOlivier Matz 		info->l4_proto = 0;
18551f694ddSOlivier Matz 		break;
18651f694ddSOlivier Matz 	}
18751f694ddSOlivier Matz }
18851f694ddSOlivier Matz 
189df655504SOphir Munk /* Fill in outer layers length */
190df655504SOphir Munk static void
191df655504SOphir Munk update_tunnel_outer(struct testpmd_offload_info *info)
192df655504SOphir Munk {
193df655504SOphir Munk 	info->is_tunnel = 1;
194df655504SOphir Munk 	info->outer_ethertype = info->ethertype;
195df655504SOphir Munk 	info->outer_l2_len = info->l2_len;
196df655504SOphir Munk 	info->outer_l3_len = info->l3_len;
197df655504SOphir Munk 	info->outer_l4_proto = info->l4_proto;
198df655504SOphir Munk }
199df655504SOphir Munk 
200d8e5e69fSTing Xu /*
201d8e5e69fSTing Xu  * Parse a GTP protocol header.
202d8e5e69fSTing Xu  * No optional fields and next extension header type.
203d8e5e69fSTing Xu  */
204d8e5e69fSTing Xu static void
205d8e5e69fSTing Xu parse_gtp(struct rte_udp_hdr *udp_hdr,
206d8e5e69fSTing Xu 	  struct testpmd_offload_info *info)
207d8e5e69fSTing Xu {
208d8e5e69fSTing Xu 	struct rte_ipv4_hdr *ipv4_hdr;
209d8e5e69fSTing Xu 	struct rte_ipv6_hdr *ipv6_hdr;
210d8e5e69fSTing Xu 	struct rte_gtp_hdr *gtp_hdr;
211d8e5e69fSTing Xu 	uint8_t gtp_len = sizeof(*gtp_hdr);
212d8e5e69fSTing Xu 	uint8_t ip_ver;
213d8e5e69fSTing Xu 
214d8e5e69fSTing Xu 	/* Check udp destination port. */
215d8e5e69fSTing Xu 	if (udp_hdr->dst_port != _htons(RTE_GTPC_UDP_PORT) &&
216d8e5e69fSTing Xu 	    udp_hdr->src_port != _htons(RTE_GTPC_UDP_PORT) &&
217d8e5e69fSTing Xu 	    udp_hdr->dst_port != _htons(RTE_GTPU_UDP_PORT))
218d8e5e69fSTing Xu 		return;
219d8e5e69fSTing Xu 
220df655504SOphir Munk 	update_tunnel_outer(info);
221d8e5e69fSTing Xu 	info->l2_len = 0;
222d8e5e69fSTing Xu 
223d8e5e69fSTing Xu 	gtp_hdr = (struct rte_gtp_hdr *)((char *)udp_hdr +
224d8e5e69fSTing Xu 		  sizeof(struct rte_udp_hdr));
225a058de21SGregory Etelson 	if (gtp_hdr->e || gtp_hdr->s || gtp_hdr->pn)
226a058de21SGregory Etelson 		gtp_len += sizeof(struct rte_gtp_hdr_ext_word);
227d8e5e69fSTing Xu 	/*
228d8e5e69fSTing Xu 	 * Check message type. If message type is 0xff, it is
229d8e5e69fSTing Xu 	 * a GTP data packet. If not, it is a GTP control packet
230d8e5e69fSTing Xu 	 */
231d8e5e69fSTing Xu 	if (gtp_hdr->msg_type == 0xff) {
232a058de21SGregory Etelson 		ip_ver = *(uint8_t *)((char *)gtp_hdr + gtp_len);
233d8e5e69fSTing Xu 		ip_ver = (ip_ver) & 0xf0;
234d8e5e69fSTing Xu 
235d8e5e69fSTing Xu 		if (ip_ver == RTE_GTP_TYPE_IPV4) {
236d8e5e69fSTing Xu 			ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gtp_hdr +
237d8e5e69fSTing Xu 				   gtp_len);
238d8e5e69fSTing Xu 			info->ethertype = _htons(RTE_ETHER_TYPE_IPV4);
239d8e5e69fSTing Xu 			parse_ipv4(ipv4_hdr, info);
240d8e5e69fSTing Xu 		} else if (ip_ver == RTE_GTP_TYPE_IPV6) {
241d8e5e69fSTing Xu 			ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gtp_hdr +
242d8e5e69fSTing Xu 				   gtp_len);
243d8e5e69fSTing Xu 			info->ethertype = _htons(RTE_ETHER_TYPE_IPV6);
244d8e5e69fSTing Xu 			parse_ipv6(ipv6_hdr, info);
245d8e5e69fSTing Xu 		}
246d8e5e69fSTing Xu 	} else {
247d8e5e69fSTing Xu 		info->ethertype = 0;
248d8e5e69fSTing Xu 		info->l4_len = 0;
249d8e5e69fSTing Xu 		info->l3_len = 0;
250d8e5e69fSTing Xu 		info->l4_proto = 0;
251d8e5e69fSTing Xu 	}
252d8e5e69fSTing Xu 
253f7bfa128SShiyang He 	info->l2_len += gtp_len + sizeof(*udp_hdr);
254d8e5e69fSTing Xu }
255d8e5e69fSTing Xu 
256c10a026cSOlivier Matz /* Parse a vxlan header */
257c10a026cSOlivier Matz static void
258e73e3547SOlivier Matz parse_vxlan(struct rte_udp_hdr *udp_hdr,
259993677afSRaja Zidane 	    struct testpmd_offload_info *info)
260c10a026cSOlivier Matz {
2616d13ea8eSOlivier Matz 	struct rte_ether_hdr *eth_hdr;
262c10a026cSOlivier Matz 
263df655504SOphir Munk 	/* check udp destination port, RTE_VXLAN_DEFAULT_PORT (4789) is the
264df655504SOphir Munk 	 * default vxlan port (rfc7348) or that the rx offload flag is set
265df655504SOphir Munk 	 * (i40e only currently)
266df655504SOphir Munk 	 */
267993677afSRaja Zidane 	if (udp_hdr->dst_port != _htons(RTE_VXLAN_DEFAULT_PORT))
268c10a026cSOlivier Matz 		return;
269c10a026cSOlivier Matz 
270df655504SOphir Munk 	update_tunnel_outer(info);
271c10a026cSOlivier Matz 
2726d13ea8eSOlivier Matz 	eth_hdr = (struct rte_ether_hdr *)((char *)udp_hdr +
273e73e3547SOlivier Matz 		sizeof(struct rte_udp_hdr) +
2746d13ea8eSOlivier Matz 		sizeof(struct rte_vxlan_hdr));
275c10a026cSOlivier Matz 
276c10a026cSOlivier Matz 	parse_ethernet(eth_hdr, info);
27735b2d13fSOlivier Matz 	info->l2_len += RTE_ETHER_VXLAN_HLEN; /* add udp + vxlan */
278c10a026cSOlivier Matz }
279c10a026cSOlivier Matz 
28039e5e20fSXueming Li /* Parse a vxlan-gpe header */
28139e5e20fSXueming Li static void
282e73e3547SOlivier Matz parse_vxlan_gpe(struct rte_udp_hdr *udp_hdr,
28339e5e20fSXueming Li 	    struct testpmd_offload_info *info)
28439e5e20fSXueming Li {
2856d13ea8eSOlivier Matz 	struct rte_ether_hdr *eth_hdr;
286a7c528e5SOlivier Matz 	struct rte_ipv4_hdr *ipv4_hdr;
287a7c528e5SOlivier Matz 	struct rte_ipv6_hdr *ipv6_hdr;
2886d13ea8eSOlivier Matz 	struct rte_vxlan_gpe_hdr *vxlan_gpe_hdr;
28939e5e20fSXueming Li 	uint8_t vxlan_gpe_len = sizeof(*vxlan_gpe_hdr);
29039e5e20fSXueming Li 
29139e5e20fSXueming Li 	/* Check udp destination port. */
29239e5e20fSXueming Li 	if (udp_hdr->dst_port != _htons(vxlan_gpe_udp_port))
29339e5e20fSXueming Li 		return;
29439e5e20fSXueming Li 
2956d13ea8eSOlivier Matz 	vxlan_gpe_hdr = (struct rte_vxlan_gpe_hdr *)((char *)udp_hdr +
296e73e3547SOlivier Matz 				sizeof(struct rte_udp_hdr));
29739e5e20fSXueming Li 
29839e5e20fSXueming Li 	if (!vxlan_gpe_hdr->proto || vxlan_gpe_hdr->proto ==
29935b2d13fSOlivier Matz 	    RTE_VXLAN_GPE_TYPE_IPV4) {
300df655504SOphir Munk 		update_tunnel_outer(info);
30139e5e20fSXueming Li 
302a7c528e5SOlivier Matz 		ipv4_hdr = (struct rte_ipv4_hdr *)((char *)vxlan_gpe_hdr +
30339e5e20fSXueming Li 			   vxlan_gpe_len);
30439e5e20fSXueming Li 
30539e5e20fSXueming Li 		parse_ipv4(ipv4_hdr, info);
3060c9da755SDavid Marchand 		info->ethertype = _htons(RTE_ETHER_TYPE_IPV4);
30739e5e20fSXueming Li 		info->l2_len = 0;
30839e5e20fSXueming Li 
30935b2d13fSOlivier Matz 	} else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_IPV6) {
310df655504SOphir Munk 		update_tunnel_outer(info);
31139e5e20fSXueming Li 
312a7c528e5SOlivier Matz 		ipv6_hdr = (struct rte_ipv6_hdr *)((char *)vxlan_gpe_hdr +
31339e5e20fSXueming Li 			   vxlan_gpe_len);
31439e5e20fSXueming Li 
3150c9da755SDavid Marchand 		info->ethertype = _htons(RTE_ETHER_TYPE_IPV6);
31639e5e20fSXueming Li 		parse_ipv6(ipv6_hdr, info);
31739e5e20fSXueming Li 		info->l2_len = 0;
31839e5e20fSXueming Li 
31935b2d13fSOlivier Matz 	} else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_ETH) {
320df655504SOphir Munk 		update_tunnel_outer(info);
32139e5e20fSXueming Li 
3226d13ea8eSOlivier Matz 		eth_hdr = (struct rte_ether_hdr *)((char *)vxlan_gpe_hdr +
32339e5e20fSXueming Li 			  vxlan_gpe_len);
32439e5e20fSXueming Li 
32539e5e20fSXueming Li 		parse_ethernet(eth_hdr, info);
32639e5e20fSXueming Li 	} else
32739e5e20fSXueming Li 		return;
32839e5e20fSXueming Li 
32935b2d13fSOlivier Matz 	info->l2_len += RTE_ETHER_VXLAN_GPE_HLEN;
33039e5e20fSXueming Li }
33139e5e20fSXueming Li 
332ea0e711bSOphir Munk /* Parse a geneve header */
333ea0e711bSOphir Munk static void
334ea0e711bSOphir Munk parse_geneve(struct rte_udp_hdr *udp_hdr,
335ea0e711bSOphir Munk 	    struct testpmd_offload_info *info)
336ea0e711bSOphir Munk {
337ea0e711bSOphir Munk 	struct rte_ether_hdr *eth_hdr;
338ea0e711bSOphir Munk 	struct rte_ipv4_hdr *ipv4_hdr;
339ea0e711bSOphir Munk 	struct rte_ipv6_hdr *ipv6_hdr;
340ea0e711bSOphir Munk 	struct rte_geneve_hdr *geneve_hdr;
341ea0e711bSOphir Munk 	uint16_t geneve_len;
342ea0e711bSOphir Munk 
343ea0e711bSOphir Munk 	/* Check udp destination port. */
344ea0e711bSOphir Munk 	if (udp_hdr->dst_port != _htons(geneve_udp_port))
345ea0e711bSOphir Munk 		return;
346ea0e711bSOphir Munk 
347ea0e711bSOphir Munk 	geneve_hdr = (struct rte_geneve_hdr *)((char *)udp_hdr +
348ea0e711bSOphir Munk 				sizeof(struct rte_udp_hdr));
349ea0e711bSOphir Munk 	geneve_len = sizeof(struct rte_geneve_hdr) + geneve_hdr->opt_len * 4;
350ea0e711bSOphir Munk 	if (!geneve_hdr->proto || geneve_hdr->proto ==
351ea0e711bSOphir Munk 	    _htons(RTE_ETHER_TYPE_IPV4)) {
352ea0e711bSOphir Munk 		update_tunnel_outer(info);
353ea0e711bSOphir Munk 		ipv4_hdr = (struct rte_ipv4_hdr *)((char *)geneve_hdr +
354ea0e711bSOphir Munk 			   geneve_len);
355ea0e711bSOphir Munk 		parse_ipv4(ipv4_hdr, info);
356ea0e711bSOphir Munk 		info->ethertype = _htons(RTE_ETHER_TYPE_IPV4);
357ea0e711bSOphir Munk 		info->l2_len = 0;
358ea0e711bSOphir Munk 	} else if (geneve_hdr->proto == _htons(RTE_ETHER_TYPE_IPV6)) {
359ea0e711bSOphir Munk 		update_tunnel_outer(info);
360ea0e711bSOphir Munk 		ipv6_hdr = (struct rte_ipv6_hdr *)((char *)geneve_hdr +
361ea0e711bSOphir Munk 			   geneve_len);
362ea0e711bSOphir Munk 		info->ethertype = _htons(RTE_ETHER_TYPE_IPV6);
363ea0e711bSOphir Munk 		parse_ipv6(ipv6_hdr, info);
364ea0e711bSOphir Munk 		info->l2_len = 0;
365ea0e711bSOphir Munk 
366ea0e711bSOphir Munk 	} else if (geneve_hdr->proto == _htons(RTE_GENEVE_TYPE_ETH)) {
367ea0e711bSOphir Munk 		update_tunnel_outer(info);
368ea0e711bSOphir Munk 		eth_hdr = (struct rte_ether_hdr *)((char *)geneve_hdr +
369ea0e711bSOphir Munk 			  geneve_len);
370ea0e711bSOphir Munk 		parse_ethernet(eth_hdr, info);
371ea0e711bSOphir Munk 	} else
372ea0e711bSOphir Munk 		return;
373ea0e711bSOphir Munk 
374ea0e711bSOphir Munk 	info->l2_len +=
375ea0e711bSOphir Munk 		(sizeof(struct rte_udp_hdr) + sizeof(struct rte_geneve_hdr) +
376ea0e711bSOphir Munk 		((struct rte_geneve_hdr *)geneve_hdr)->opt_len * 4);
377ea0e711bSOphir Munk }
378ea0e711bSOphir Munk 
37974e929a7SOlivier Matz /* Parse a gre header */
38074e929a7SOlivier Matz static void
38174e929a7SOlivier Matz parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
38274e929a7SOlivier Matz {
3836d13ea8eSOlivier Matz 	struct rte_ether_hdr *eth_hdr;
384a7c528e5SOlivier Matz 	struct rte_ipv4_hdr *ipv4_hdr;
385a7c528e5SOlivier Matz 	struct rte_ipv6_hdr *ipv6_hdr;
386b297cdcaSJijiang Liu 	uint8_t gre_len = 0;
38774e929a7SOlivier Matz 
388b297cdcaSJijiang Liu 	gre_len += sizeof(struct simple_gre_hdr);
389b297cdcaSJijiang Liu 
390b297cdcaSJijiang Liu 	if (gre_hdr->flags & _htons(GRE_KEY_PRESENT))
391ad93fb8cSXueming Li 		gre_len += GRE_EXT_LEN;
392ad93fb8cSXueming Li 	if (gre_hdr->flags & _htons(GRE_SEQUENCE_PRESENT))
393ad93fb8cSXueming Li 		gre_len += GRE_EXT_LEN;
394ad93fb8cSXueming Li 	if (gre_hdr->flags & _htons(GRE_CHECKSUM_PRESENT))
395ad93fb8cSXueming Li 		gre_len += GRE_EXT_LEN;
396b297cdcaSJijiang Liu 
3970c9da755SDavid Marchand 	if (gre_hdr->proto == _htons(RTE_ETHER_TYPE_IPV4)) {
398df655504SOphir Munk 		update_tunnel_outer(info);
39974e929a7SOlivier Matz 
400a7c528e5SOlivier Matz 		ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gre_hdr + gre_len);
40174e929a7SOlivier Matz 
40274e929a7SOlivier Matz 		parse_ipv4(ipv4_hdr, info);
4030c9da755SDavid Marchand 		info->ethertype = _htons(RTE_ETHER_TYPE_IPV4);
40474e929a7SOlivier Matz 		info->l2_len = 0;
40574e929a7SOlivier Matz 
4060c9da755SDavid Marchand 	} else if (gre_hdr->proto == _htons(RTE_ETHER_TYPE_IPV6)) {
407df655504SOphir Munk 		update_tunnel_outer(info);
40874e929a7SOlivier Matz 
409a7c528e5SOlivier Matz 		ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gre_hdr + gre_len);
41074e929a7SOlivier Matz 
4110c9da755SDavid Marchand 		info->ethertype = _htons(RTE_ETHER_TYPE_IPV6);
41274e929a7SOlivier Matz 		parse_ipv6(ipv6_hdr, info);
41374e929a7SOlivier Matz 		info->l2_len = 0;
41474e929a7SOlivier Matz 
41535b2d13fSOlivier Matz 	} else if (gre_hdr->proto == _htons(RTE_ETHER_TYPE_TEB)) {
416df655504SOphir Munk 		update_tunnel_outer(info);
41774e929a7SOlivier Matz 
4186d13ea8eSOlivier Matz 		eth_hdr = (struct rte_ether_hdr *)((char *)gre_hdr + gre_len);
41974e929a7SOlivier Matz 
42074e929a7SOlivier Matz 		parse_ethernet(eth_hdr, info);
42174e929a7SOlivier Matz 	} else
42274e929a7SOlivier Matz 		return;
42374e929a7SOlivier Matz 
424b297cdcaSJijiang Liu 	info->l2_len += gre_len;
42574e929a7SOlivier Matz }
42674e929a7SOlivier Matz 
4279075b0f1SOlivier Matz 
4289075b0f1SOlivier Matz /* Parse an encapsulated ip or ipv6 header */
4299075b0f1SOlivier Matz static void
4309075b0f1SOlivier Matz parse_encap_ip(void *encap_ip, struct testpmd_offload_info *info)
4319075b0f1SOlivier Matz {
432a7c528e5SOlivier Matz 	struct rte_ipv4_hdr *ipv4_hdr = encap_ip;
433a7c528e5SOlivier Matz 	struct rte_ipv6_hdr *ipv6_hdr = encap_ip;
4349075b0f1SOlivier Matz 	uint8_t ip_version;
4359075b0f1SOlivier Matz 
4369075b0f1SOlivier Matz 	ip_version = (ipv4_hdr->version_ihl & 0xf0) >> 4;
4379075b0f1SOlivier Matz 
4389075b0f1SOlivier Matz 	if (ip_version != 4 && ip_version != 6)
4399075b0f1SOlivier Matz 		return;
4409075b0f1SOlivier Matz 
4419075b0f1SOlivier Matz 	info->is_tunnel = 1;
4429075b0f1SOlivier Matz 	info->outer_ethertype = info->ethertype;
4439075b0f1SOlivier Matz 	info->outer_l2_len = info->l2_len;
4449075b0f1SOlivier Matz 	info->outer_l3_len = info->l3_len;
4459075b0f1SOlivier Matz 
4469075b0f1SOlivier Matz 	if (ip_version == 4) {
4479075b0f1SOlivier Matz 		parse_ipv4(ipv4_hdr, info);
4480c9da755SDavid Marchand 		info->ethertype = _htons(RTE_ETHER_TYPE_IPV4);
4499075b0f1SOlivier Matz 	} else {
4509075b0f1SOlivier Matz 		parse_ipv6(ipv6_hdr, info);
4510c9da755SDavid Marchand 		info->ethertype = _htons(RTE_ETHER_TYPE_IPV6);
4529075b0f1SOlivier Matz 	}
4539075b0f1SOlivier Matz 	info->l2_len = 0;
4549075b0f1SOlivier Matz }
4559075b0f1SOlivier Matz 
45651f694ddSOlivier Matz /* if possible, calculate the checksum of a packet in hw or sw,
45751f694ddSOlivier Matz  * depending on the testpmd command line configuration */
45851f694ddSOlivier Matz static uint64_t
459c5b60331SOlivier Matz process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
460e6b9d641SXiaoyun Li 	uint64_t tx_offloads, struct rte_mbuf *m)
46151f694ddSOlivier Matz {
462a7c528e5SOlivier Matz 	struct rte_ipv4_hdr *ipv4_hdr = l3_hdr;
463e73e3547SOlivier Matz 	struct rte_udp_hdr *udp_hdr;
464f41b5156SOlivier Matz 	struct rte_tcp_hdr *tcp_hdr;
46509d9ae1aSOlivier Matz 	struct rte_sctp_hdr *sctp_hdr;
46651f694ddSOlivier Matz 	uint64_t ol_flags = 0;
46797c21329SOlivier Matz 	uint32_t max_pkt_len, tso_segsz = 0;
468e6b9d641SXiaoyun Li 	uint16_t l4_off;
46990052c65SHuisong Li 	uint64_t all_tunnel_tso = RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
47090052c65SHuisong Li 				RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
47190052c65SHuisong Li 				RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
47290052c65SHuisong Li 				RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
47390052c65SHuisong Li 				RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
47490052c65SHuisong Li 				RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO;
47597c21329SOlivier Matz 
47697c21329SOlivier Matz 	/* ensure packet is large enough to require tso */
47797c21329SOlivier Matz 	if (!info->is_tunnel) {
47897c21329SOlivier Matz 		max_pkt_len = info->l2_len + info->l3_len + info->l4_len +
47997c21329SOlivier Matz 			info->tso_segsz;
480e834daafSOlivier Matz 		if (info->tso_segsz != 0 && info->pkt_len > max_pkt_len)
48197c21329SOlivier Matz 			tso_segsz = info->tso_segsz;
48297c21329SOlivier Matz 	} else {
48397c21329SOlivier Matz 		max_pkt_len = info->outer_l2_len + info->outer_l3_len +
48497c21329SOlivier Matz 			info->l2_len + info->l3_len + info->l4_len +
48597c21329SOlivier Matz 			info->tunnel_tso_segsz;
48697c21329SOlivier Matz 		if (info->tunnel_tso_segsz != 0 && info->pkt_len > max_pkt_len)
48797c21329SOlivier Matz 			tso_segsz = info->tunnel_tso_segsz;
48897c21329SOlivier Matz 	}
48951f694ddSOlivier Matz 
4900c9da755SDavid Marchand 	if (info->ethertype == _htons(RTE_ETHER_TYPE_IPV4)) {
49151f694ddSOlivier Matz 		ipv4_hdr = l3_hdr;
49251f694ddSOlivier Matz 
493daa02b5cSOlivier Matz 		ol_flags |= RTE_MBUF_F_TX_IPV4;
494e834daafSOlivier Matz 		if (info->l4_proto == IPPROTO_TCP && tso_segsz) {
495daa02b5cSOlivier Matz 			ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
496b51c4753SOlivier Matz 		} else {
497295968d1SFerruh Yigit 			if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) {
498daa02b5cSOlivier Matz 				ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
499de73c8acSGregory Etelson 			} else {
500b2a9e4a8SGregory Etelson 				ipv4_hdr->hdr_checksum = 0;
501b51c4753SOlivier Matz 				ipv4_hdr->hdr_checksum =
502b51c4753SOlivier Matz 					rte_ipv4_cksum(ipv4_hdr);
503c14236f2SJijiang Liu 			}
504b2a9e4a8SGregory Etelson 		}
5050c9da755SDavid Marchand 	} else if (info->ethertype == _htons(RTE_ETHER_TYPE_IPV6))
506daa02b5cSOlivier Matz 		ol_flags |= RTE_MBUF_F_TX_IPV6;
50751f694ddSOlivier Matz 	else
50851f694ddSOlivier Matz 		return 0; /* packet type not supported, nothing to do */
50951f694ddSOlivier Matz 
510c5b60331SOlivier Matz 	if (info->l4_proto == IPPROTO_UDP) {
511e73e3547SOlivier Matz 		udp_hdr = (struct rte_udp_hdr *)((char *)l3_hdr + info->l3_len);
51251f694ddSOlivier Matz 		/* do not recalculate udp cksum if it was 0 */
51351f694ddSOlivier Matz 		if (udp_hdr->dgram_cksum != 0) {
51490052c65SHuisong Li 			if (tso_segsz && (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_TSO))
515ce8e6e74SZhichao Zeng 				ol_flags |= RTE_MBUF_F_TX_UDP_SEG;
516ce8e6e74SZhichao Zeng 			else if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
517daa02b5cSOlivier Matz 				ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
518b2a9e4a8SGregory Etelson 			} else {
519e6b9d641SXiaoyun Li 				if (info->is_tunnel)
5207dc92d17SKevin Liu 					l4_off = info->outer_l2_len +
521e6b9d641SXiaoyun Li 						 info->outer_l3_len +
522e6b9d641SXiaoyun Li 						 info->l2_len + info->l3_len;
523e6b9d641SXiaoyun Li 				else
524e6b9d641SXiaoyun Li 					l4_off = info->l2_len +	info->l3_len;
525b2a9e4a8SGregory Etelson 				udp_hdr->dgram_cksum = 0;
52651f694ddSOlivier Matz 				udp_hdr->dgram_cksum =
527e6b9d641SXiaoyun Li 					get_udptcp_checksum(m, l3_hdr, l4_off,
528c5b60331SOlivier Matz 						info->ethertype);
52951f694ddSOlivier Matz 			}
53051f694ddSOlivier Matz 		}
5316970401eSDavid Marchand #ifdef RTE_LIB_GSO
532aaacd052SJiayu Hu 		if (info->gso_enable)
533daa02b5cSOlivier Matz 			ol_flags |= RTE_MBUF_F_TX_UDP_SEG;
5346970401eSDavid Marchand #endif
535c5b60331SOlivier Matz 	} else if (info->l4_proto == IPPROTO_TCP) {
536f41b5156SOlivier Matz 		tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len);
53790052c65SHuisong Li 		if (tso_segsz &&
53890052c65SHuisong Li 		    (tx_offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO | all_tunnel_tso)))
539daa02b5cSOlivier Matz 			ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
540295968d1SFerruh Yigit 		else if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
541daa02b5cSOlivier Matz 			ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
542de73c8acSGregory Etelson 		} else {
543e6b9d641SXiaoyun Li 			if (info->is_tunnel)
5447dc92d17SKevin Liu 				l4_off = info->outer_l2_len + info->outer_l3_len +
545e6b9d641SXiaoyun Li 					 info->l2_len + info->l3_len;
546e6b9d641SXiaoyun Li 			else
547e6b9d641SXiaoyun Li 				l4_off = info->l2_len + info->l3_len;
548b2a9e4a8SGregory Etelson 			tcp_hdr->cksum = 0;
54951f694ddSOlivier Matz 			tcp_hdr->cksum =
550e6b9d641SXiaoyun Li 				get_udptcp_checksum(m, l3_hdr, l4_off,
551c5b60331SOlivier Matz 					info->ethertype);
55251f694ddSOlivier Matz 		}
5536970401eSDavid Marchand #ifdef RTE_LIB_GSO
55452f38a20SJiayu Hu 		if (info->gso_enable)
555daa02b5cSOlivier Matz 			ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
5566970401eSDavid Marchand #endif
557c5b60331SOlivier Matz 	} else if (info->l4_proto == IPPROTO_SCTP) {
55809d9ae1aSOlivier Matz 		sctp_hdr = (struct rte_sctp_hdr *)
55909d9ae1aSOlivier Matz 			((char *)l3_hdr + info->l3_len);
56051f694ddSOlivier Matz 		/* sctp payload must be a multiple of 4 to be
56151f694ddSOlivier Matz 		 * offloaded */
562295968d1SFerruh Yigit 		if ((tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
56351f694ddSOlivier Matz 			((ipv4_hdr->total_length & 0x3) == 0)) {
564daa02b5cSOlivier Matz 			ol_flags |= RTE_MBUF_F_TX_SCTP_CKSUM;
565de73c8acSGregory Etelson 		} else {
566b2a9e4a8SGregory Etelson 			sctp_hdr->cksum = 0;
56751f694ddSOlivier Matz 			/* XXX implement CRC32c, example available in
56851f694ddSOlivier Matz 			 * RFC3309 */
56951f694ddSOlivier Matz 		}
57051f694ddSOlivier Matz 	}
57151f694ddSOlivier Matz 
57251f694ddSOlivier Matz 	return ol_flags;
57351f694ddSOlivier Matz }
57451f694ddSOlivier Matz 
5750f62d635SJianfeng Tan /* Calculate the checksum of outer header */
57651f694ddSOlivier Matz static uint64_t
577c5b60331SOlivier Matz process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
578e6b9d641SXiaoyun Li 	uint64_t tx_offloads, int tso_enabled, struct rte_mbuf *m)
57951f694ddSOlivier Matz {
580e73e3547SOlivier Matz 	struct rte_udp_hdr *udp_hdr;
58151f694ddSOlivier Matz 	uint64_t ol_flags = 0;
58251f694ddSOlivier Matz 
5830c9da755SDavid Marchand 	if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4)) {
584daa02b5cSOlivier Matz 		ol_flags |= RTE_MBUF_F_TX_OUTER_IPV4;
58551f694ddSOlivier Matz 
586b9d859d8SDavid Marchand 		if (tx_offloads	& RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
587daa02b5cSOlivier Matz 			ol_flags |= RTE_MBUF_F_TX_OUTER_IP_CKSUM;
588b9d859d8SDavid Marchand 		} else {
589f876dbefSDavid Marchand 			struct rte_ipv4_hdr *ipv4_hdr = outer_l3_hdr;
590f876dbefSDavid Marchand 
591b9d859d8SDavid Marchand 			ipv4_hdr->hdr_checksum = 0;
5926006818cSOlivier Matz 			ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
593b9d859d8SDavid Marchand 		}
594b9d859d8SDavid Marchand 	} else {
595daa02b5cSOlivier Matz 		ol_flags |= RTE_MBUF_F_TX_OUTER_IPV6;
596b9d859d8SDavid Marchand 	}
59751f694ddSOlivier Matz 
59874e929a7SOlivier Matz 	if (info->outer_l4_proto != IPPROTO_UDP)
59974e929a7SOlivier Matz 		return ol_flags;
60074e929a7SOlivier Matz 
601d8e5e69fSTing Xu 	udp_hdr = (struct rte_udp_hdr *)
602d8e5e69fSTing Xu 		((char *)outer_l3_hdr + info->outer_l3_len);
603d8e5e69fSTing Xu 
604ce8e6e74SZhichao Zeng 	if (tso_enabled && info->l4_proto == IPPROTO_TCP)
605daa02b5cSOlivier Matz 		ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
606ce8e6e74SZhichao Zeng 	else if (tso_enabled && info->l4_proto == IPPROTO_UDP)
607ce8e6e74SZhichao Zeng 		ol_flags |= RTE_MBUF_F_TX_UDP_SEG;
6080322272cSPeng Huang 
609bf5618faSJerin Jacob 	/* Skip SW outer UDP checksum generation if HW supports it */
610295968d1SFerruh Yigit 	if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) {
611daa02b5cSOlivier Matz 		ol_flags |= RTE_MBUF_F_TX_OUTER_UDP_CKSUM;
612bf5618faSJerin Jacob 		return ol_flags;
613bf5618faSJerin Jacob 	}
614bf5618faSJerin Jacob 
615ee86d6e9SDavid Marchand 	/* Outer UDP checksum is done in software.
6160f62d635SJianfeng Tan 	 *
6170f62d635SJianfeng Tan 	 * If a packet will be TSOed into small packets by NIC, we cannot
6180f62d635SJianfeng Tan 	 * set/calculate a non-zero checksum, because it will be a wrong
6190f62d635SJianfeng Tan 	 * value after the packet be split into several small packets.
6200f62d635SJianfeng Tan 	 */
621ee86d6e9SDavid Marchand 	if (!tso_enabled && udp_hdr->dgram_cksum != 0) {
62251f694ddSOlivier Matz 		udp_hdr->dgram_cksum = 0;
623e6b9d641SXiaoyun Li 		udp_hdr->dgram_cksum = get_udptcp_checksum(m, outer_l3_hdr,
6247dc92d17SKevin Liu 					info->outer_l2_len + info->outer_l3_len,
625e6b9d641SXiaoyun Li 					info->outer_ethertype);
62651f694ddSOlivier Matz 	}
62751f694ddSOlivier Matz 
62851f694ddSOlivier Matz 	return ol_flags;
62951f694ddSOlivier Matz }
63051f694ddSOlivier Matz 
63151f694ddSOlivier Matz /*
63279bec05bSKonstantin Ananyev  * Helper function.
63379bec05bSKonstantin Ananyev  * Performs actual copying.
63479bec05bSKonstantin Ananyev  * Returns number of segments in the destination mbuf on success,
63579bec05bSKonstantin Ananyev  * or negative error code on failure.
63679bec05bSKonstantin Ananyev  */
63779bec05bSKonstantin Ananyev static int
63879bec05bSKonstantin Ananyev mbuf_copy_split(const struct rte_mbuf *ms, struct rte_mbuf *md[],
63979bec05bSKonstantin Ananyev 	uint16_t seglen[], uint8_t nb_seg)
64079bec05bSKonstantin Ananyev {
64179bec05bSKonstantin Ananyev 	uint32_t dlen, slen, tlen;
64279bec05bSKonstantin Ananyev 	uint32_t i, len;
64379bec05bSKonstantin Ananyev 	const struct rte_mbuf *m;
64479bec05bSKonstantin Ananyev 	const uint8_t *src;
64579bec05bSKonstantin Ananyev 	uint8_t *dst;
64679bec05bSKonstantin Ananyev 
64779bec05bSKonstantin Ananyev 	dlen = 0;
64879bec05bSKonstantin Ananyev 	slen = 0;
64979bec05bSKonstantin Ananyev 	tlen = 0;
65079bec05bSKonstantin Ananyev 
65179bec05bSKonstantin Ananyev 	dst = NULL;
65279bec05bSKonstantin Ananyev 	src = NULL;
65379bec05bSKonstantin Ananyev 
65479bec05bSKonstantin Ananyev 	m = ms;
65579bec05bSKonstantin Ananyev 	i = 0;
65679bec05bSKonstantin Ananyev 	while (ms != NULL && i != nb_seg) {
65779bec05bSKonstantin Ananyev 
65879bec05bSKonstantin Ananyev 		if (slen == 0) {
65979bec05bSKonstantin Ananyev 			slen = rte_pktmbuf_data_len(ms);
66079bec05bSKonstantin Ananyev 			src = rte_pktmbuf_mtod(ms, const uint8_t *);
66179bec05bSKonstantin Ananyev 		}
66279bec05bSKonstantin Ananyev 
66379bec05bSKonstantin Ananyev 		if (dlen == 0) {
66479bec05bSKonstantin Ananyev 			dlen = RTE_MIN(seglen[i], slen);
66579bec05bSKonstantin Ananyev 			md[i]->data_len = dlen;
66679bec05bSKonstantin Ananyev 			md[i]->next = (i + 1 == nb_seg) ? NULL : md[i + 1];
66779bec05bSKonstantin Ananyev 			dst = rte_pktmbuf_mtod(md[i], uint8_t *);
66879bec05bSKonstantin Ananyev 		}
66979bec05bSKonstantin Ananyev 
67079bec05bSKonstantin Ananyev 		len = RTE_MIN(slen, dlen);
67179bec05bSKonstantin Ananyev 		memcpy(dst, src, len);
67279bec05bSKonstantin Ananyev 		tlen += len;
67379bec05bSKonstantin Ananyev 		slen -= len;
67479bec05bSKonstantin Ananyev 		dlen -= len;
67579bec05bSKonstantin Ananyev 		src += len;
67679bec05bSKonstantin Ananyev 		dst += len;
67779bec05bSKonstantin Ananyev 
67879bec05bSKonstantin Ananyev 		if (slen == 0)
67979bec05bSKonstantin Ananyev 			ms = ms->next;
68079bec05bSKonstantin Ananyev 		if (dlen == 0)
68179bec05bSKonstantin Ananyev 			i++;
68279bec05bSKonstantin Ananyev 	}
68379bec05bSKonstantin Ananyev 
68479bec05bSKonstantin Ananyev 	if (ms != NULL)
68579bec05bSKonstantin Ananyev 		return -ENOBUFS;
68679bec05bSKonstantin Ananyev 	else if (tlen != m->pkt_len)
68779bec05bSKonstantin Ananyev 		return -EINVAL;
68879bec05bSKonstantin Ananyev 
68979bec05bSKonstantin Ananyev 	md[0]->nb_segs = nb_seg;
69079bec05bSKonstantin Ananyev 	md[0]->pkt_len = tlen;
69179bec05bSKonstantin Ananyev 	md[0]->vlan_tci = m->vlan_tci;
69279bec05bSKonstantin Ananyev 	md[0]->vlan_tci_outer = m->vlan_tci_outer;
69379bec05bSKonstantin Ananyev 	md[0]->ol_flags = m->ol_flags;
69479bec05bSKonstantin Ananyev 	md[0]->tx_offload = m->tx_offload;
69579bec05bSKonstantin Ananyev 
69679bec05bSKonstantin Ananyev 	return nb_seg;
69779bec05bSKonstantin Ananyev }
69879bec05bSKonstantin Ananyev 
69979bec05bSKonstantin Ananyev /*
70079bec05bSKonstantin Ananyev  * Allocate a new mbuf with up to tx_pkt_nb_segs segments.
70151db3a89SDekel Peled  * Copy packet contents and offload information into the new segmented mbuf.
70279bec05bSKonstantin Ananyev  */
70379bec05bSKonstantin Ananyev static struct rte_mbuf *
70479bec05bSKonstantin Ananyev pkt_copy_split(const struct rte_mbuf *pkt)
70579bec05bSKonstantin Ananyev {
70679bec05bSKonstantin Ananyev 	int32_t n, rc;
70779bec05bSKonstantin Ananyev 	uint32_t i, len, nb_seg;
70879bec05bSKonstantin Ananyev 	struct rte_mempool *mp;
70979bec05bSKonstantin Ananyev 	uint16_t seglen[RTE_MAX_SEGS_PER_PKT];
71079bec05bSKonstantin Ananyev 	struct rte_mbuf *p, *md[RTE_MAX_SEGS_PER_PKT];
71179bec05bSKonstantin Ananyev 
71279bec05bSKonstantin Ananyev 	mp = current_fwd_lcore()->mbp;
71379bec05bSKonstantin Ananyev 
71479bec05bSKonstantin Ananyev 	if (tx_pkt_split == TX_PKT_SPLIT_RND)
715761f7ae1SJie Zhou 		nb_seg = rte_rand() % tx_pkt_nb_segs + 1;
71679bec05bSKonstantin Ananyev 	else
71779bec05bSKonstantin Ananyev 		nb_seg = tx_pkt_nb_segs;
71879bec05bSKonstantin Ananyev 
71979bec05bSKonstantin Ananyev 	memcpy(seglen, tx_pkt_seg_lengths, nb_seg * sizeof(seglen[0]));
72079bec05bSKonstantin Ananyev 
72179bec05bSKonstantin Ananyev 	/* calculate number of segments to use and their length. */
72279bec05bSKonstantin Ananyev 	len = 0;
72379bec05bSKonstantin Ananyev 	for (i = 0; i != nb_seg && len < pkt->pkt_len; i++) {
72479bec05bSKonstantin Ananyev 		len += seglen[i];
72579bec05bSKonstantin Ananyev 		md[i] = NULL;
72679bec05bSKonstantin Ananyev 	}
72779bec05bSKonstantin Ananyev 
72879bec05bSKonstantin Ananyev 	n = pkt->pkt_len - len;
72979bec05bSKonstantin Ananyev 
73079bec05bSKonstantin Ananyev 	/* update size of the last segment to fit rest of the packet */
73179bec05bSKonstantin Ananyev 	if (n >= 0) {
73279bec05bSKonstantin Ananyev 		seglen[i - 1] += n;
73379bec05bSKonstantin Ananyev 		len += n;
73479bec05bSKonstantin Ananyev 	}
73579bec05bSKonstantin Ananyev 
73679bec05bSKonstantin Ananyev 	nb_seg = i;
73779bec05bSKonstantin Ananyev 	while (i != 0) {
73879bec05bSKonstantin Ananyev 		p = rte_pktmbuf_alloc(mp);
73979bec05bSKonstantin Ananyev 		if (p == NULL) {
740285fd101SOlivier Matz 			TESTPMD_LOG(ERR,
74179bec05bSKonstantin Ananyev 				"failed to allocate %u-th of %u mbuf "
74279bec05bSKonstantin Ananyev 				"from mempool: %s\n",
74379bec05bSKonstantin Ananyev 				nb_seg - i, nb_seg, mp->name);
74479bec05bSKonstantin Ananyev 			break;
74579bec05bSKonstantin Ananyev 		}
74679bec05bSKonstantin Ananyev 
74779bec05bSKonstantin Ananyev 		md[--i] = p;
74879bec05bSKonstantin Ananyev 		if (rte_pktmbuf_tailroom(md[i]) < seglen[i]) {
749285fd101SOlivier Matz 			TESTPMD_LOG(ERR, "mempool %s, %u-th segment: "
75079bec05bSKonstantin Ananyev 				"expected seglen: %u, "
75179bec05bSKonstantin Ananyev 				"actual mbuf tailroom: %u\n",
75279bec05bSKonstantin Ananyev 				mp->name, i, seglen[i],
75379bec05bSKonstantin Ananyev 				rte_pktmbuf_tailroom(md[i]));
75479bec05bSKonstantin Ananyev 			break;
75579bec05bSKonstantin Ananyev 		}
75679bec05bSKonstantin Ananyev 	}
75779bec05bSKonstantin Ananyev 
75879bec05bSKonstantin Ananyev 	/* all mbufs successfully allocated, do copy */
75979bec05bSKonstantin Ananyev 	if (i == 0) {
76079bec05bSKonstantin Ananyev 		rc = mbuf_copy_split(pkt, md, seglen, nb_seg);
76179bec05bSKonstantin Ananyev 		if (rc < 0)
762285fd101SOlivier Matz 			TESTPMD_LOG(ERR,
76397cb466dSOlivier Matz 				"mbuf_copy_split for %p(len=%u, nb_seg=%u) "
76479bec05bSKonstantin Ananyev 				"into %u segments failed with error code: %d\n",
76579bec05bSKonstantin Ananyev 				pkt, pkt->pkt_len, pkt->nb_segs, nb_seg, rc);
76679bec05bSKonstantin Ananyev 
76779bec05bSKonstantin Ananyev 		/* figure out how many mbufs to free. */
76879bec05bSKonstantin Ananyev 		i = RTE_MAX(rc, 0);
76979bec05bSKonstantin Ananyev 	}
77079bec05bSKonstantin Ananyev 
77179bec05bSKonstantin Ananyev 	/* free unused mbufs */
77279bec05bSKonstantin Ananyev 	for (; i != nb_seg; i++) {
77379bec05bSKonstantin Ananyev 		rte_pktmbuf_free_seg(md[i]);
77479bec05bSKonstantin Ananyev 		md[i] = NULL;
77579bec05bSKonstantin Ananyev 	}
77679bec05bSKonstantin Ananyev 
77779bec05bSKonstantin Ananyev 	return md[0];
77879bec05bSKonstantin Ananyev }
77979bec05bSKonstantin Ananyev 
7801945c646SWenwu Ma #if defined(RTE_LIB_GRO) || defined(RTE_LIB_GSO)
7811945c646SWenwu Ma /*
7821945c646SWenwu Ma  * Re-calculate IP checksum for merged/fragmented packets.
7831945c646SWenwu Ma  */
7841945c646SWenwu Ma static void
7851945c646SWenwu Ma pkts_ip_csum_recalc(struct rte_mbuf **pkts_burst, const uint16_t nb_pkts, uint64_t tx_offloads)
7861945c646SWenwu Ma {
7871945c646SWenwu Ma 	int i;
7881945c646SWenwu Ma 	struct rte_ipv4_hdr *ipv4_hdr;
7891945c646SWenwu Ma 	for (i = 0; i < nb_pkts; i++) {
7901945c646SWenwu Ma 		if ((pkts_burst[i]->ol_flags & RTE_MBUF_F_TX_IPV4) &&
7911945c646SWenwu Ma 			(tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) == 0) {
7921945c646SWenwu Ma 			ipv4_hdr = rte_pktmbuf_mtod_offset(pkts_burst[i],
7931945c646SWenwu Ma 						struct rte_ipv4_hdr *,
7941945c646SWenwu Ma 						pkts_burst[i]->l2_len);
7951945c646SWenwu Ma 			ipv4_hdr->hdr_checksum = 0;
7961945c646SWenwu Ma 			ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
7971945c646SWenwu Ma 		}
7981945c646SWenwu Ma 	}
7991945c646SWenwu Ma }
8001945c646SWenwu Ma #endif
8011945c646SWenwu Ma 
80279bec05bSKonstantin Ananyev /*
80351f694ddSOlivier Matz  * Receive a burst of packets, and for each packet:
80451f694ddSOlivier Matz  *  - parse packet, and try to recognize a supported packet type (1)
80551f694ddSOlivier Matz  *  - if it's not a supported packet type, don't touch the packet, else:
80651f694ddSOlivier Matz  *  - reprocess the checksum of all supported layers. This is done in SW
80751f694ddSOlivier Matz  *    or HW, depending on testpmd command line configuration
808b51c4753SOlivier Matz  *  - if TSO is enabled in testpmd command line, also flag the mbuf for TCP
809b51c4753SOlivier Matz  *    segmentation offload (this implies HW TCP checksum)
81051f694ddSOlivier Matz  * Then transmit packets on the output port.
81151f694ddSOlivier Matz  *
81251f694ddSOlivier Matz  * (1) Supported packets are:
81351f694ddSOlivier Matz  *   Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP .
81451f694ddSOlivier Matz  *   Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
81551f694ddSOlivier Matz  *           UDP|TCP|SCTP
81639e5e20fSXueming Li  *   Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / Ether / IP|IP6 /
81739e5e20fSXueming Li  *           UDP|TCP|SCTP
81839e5e20fSXueming Li  *   Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / IP|IP6 /
81939e5e20fSXueming Li  *           UDP|TCP|SCTP
820d8e5e69fSTing Xu  *   Ether / (vlan) / outer IP / outer UDP / GTP / IP|IP6 / UDP|TCP|SCTP
82174e929a7SOlivier Matz  *   Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
82274e929a7SOlivier Matz  *   Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP
8239075b0f1SOlivier Matz  *   Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP
82451f694ddSOlivier Matz  *
82551f694ddSOlivier Matz  * The testpmd command line for this forward engine sets the flags
82651f694ddSOlivier Matz  * TESTPMD_TX_OFFLOAD_* in ports[tx_port].tx_ol_flags. They control
8277be78d02SJosh Soref  * whether a checksum must be calculated in software or in hardware. The
82851f694ddSOlivier Matz  * IP, UDP, TCP and SCTP flags always concern the inner layer. The
8293994a3e8SOlivier Matz  * OUTER_IP is only useful for tunnel packets.
830af75078fSIntel  */
83106c20561SDavid Marchand static bool
832af75078fSIntel pkt_burst_checksum_forward(struct fwd_stream *fs)
833af75078fSIntel {
834af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
8356970401eSDavid Marchand #ifdef RTE_LIB_GSO
83652f38a20SJiayu Hu 	struct rte_mbuf *gso_segments[GSO_MAX_PKT_BURST];
83752f38a20SJiayu Hu 	struct rte_gso_ctx *gso_ctx;
8386970401eSDavid Marchand #endif
83952f38a20SJiayu Hu 	struct rte_mbuf **tx_pkts_burst;
840af75078fSIntel 	struct rte_port *txp;
84179bec05bSKonstantin Ananyev 	struct rte_mbuf *m, *p;
8426d13ea8eSOlivier Matz 	struct rte_ether_hdr *eth_hdr;
84351f694ddSOlivier Matz 	void *l3_hdr = NULL, *outer_l3_hdr = NULL; /* can be IPv4 or IPv6 */
8446970401eSDavid Marchand #ifdef RTE_LIB_GRO
845b7091f1dSJiayu Hu 	void **gro_ctx;
846b7091f1dSJiayu Hu 	uint16_t gro_pkts_num;
847b7091f1dSJiayu Hu 	uint8_t gro_enable;
8486970401eSDavid Marchand #endif
849af75078fSIntel 	uint16_t nb_rx;
8506b520d54STomasz Kulasek 	uint16_t nb_prep;
851af75078fSIntel 	uint16_t i;
8529c709b2eSOlivier Matz 	uint64_t rx_ol_flags, tx_ol_flags;
8533eecba26SShahaf Shuler 	uint64_t tx_offloads;
854af75078fSIntel 	uint32_t rx_bad_ip_csum;
855af75078fSIntel 	uint32_t rx_bad_l4_csum;
85658d475b7SJerin Jacob 	uint32_t rx_bad_outer_l4_csum;
857d139cf23SLance Richardson 	uint32_t rx_bad_outer_ip_csum;
858c5b60331SOlivier Matz 	struct testpmd_offload_info info;
859af75078fSIntel 
86051f694ddSOlivier Matz 	/* receive a burst of packet */
861d3dae396SDavid Marchand 	nb_rx = common_fwd_stream_receive(fs, pkts_burst, nb_pkt_per_burst);
862461c287aSKumara Parameshwaran 	if (unlikely(nb_rx == 0)) {
863461c287aSKumara Parameshwaran #ifndef RTE_LIB_GRO
86406c20561SDavid Marchand 		return false;
865461c287aSKumara Parameshwaran #else
866461c287aSKumara Parameshwaran 		gro_enable = gro_ports[fs->rx_port].enable;
867461c287aSKumara Parameshwaran 		/*
868461c287aSKumara Parameshwaran 		 * Check if packets need to be flushed in the GRO context
869461c287aSKumara Parameshwaran 		 * due to a timeout.
870461c287aSKumara Parameshwaran 		 *
871461c287aSKumara Parameshwaran 		 * Continue only in GRO heavyweight mode and if there are
872461c287aSKumara Parameshwaran 		 * packets in the GRO context.
873461c287aSKumara Parameshwaran 		 */
874461c287aSKumara Parameshwaran 		if (!gro_enable || (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) ||
875461c287aSKumara Parameshwaran 			(rte_gro_get_pkt_count(current_fwd_lcore()->gro_ctx) == 0))
876461c287aSKumara Parameshwaran 			return false;
877461c287aSKumara Parameshwaran #endif
878461c287aSKumara Parameshwaran 	}
8797569b8c1SHonnappa Nagarahalli 
880af75078fSIntel 	rx_bad_ip_csum = 0;
881af75078fSIntel 	rx_bad_l4_csum = 0;
88258d475b7SJerin Jacob 	rx_bad_outer_l4_csum = 0;
883d139cf23SLance Richardson 	rx_bad_outer_ip_csum = 0;
884af75078fSIntel 
885af75078fSIntel 	txp = &ports[fs->tx_port];
8863eecba26SShahaf Shuler 	tx_offloads = txp->dev_conf.txmode.offloads;
887c5b60331SOlivier Matz 	memset(&info, 0, sizeof(info));
888c5b60331SOlivier Matz 	info.tso_segsz = txp->tso_segsz;
8890f62d635SJianfeng Tan 	info.tunnel_tso_segsz = txp->tunnel_tso_segsz;
8906970401eSDavid Marchand #ifdef RTE_LIB_GSO
89152f38a20SJiayu Hu 	if (gso_ports[fs->tx_port].enable)
89252f38a20SJiayu Hu 		info.gso_enable = 1;
8936970401eSDavid Marchand #endif
894af75078fSIntel 
895af75078fSIntel 	for (i = 0; i < nb_rx; i++) {
8967fdb263bSJerin Jacob 		if (likely(i < nb_rx - 1))
8977fdb263bSJerin Jacob 			rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
8987fdb263bSJerin Jacob 						       void *));
899af75078fSIntel 
90051f694ddSOlivier Matz 		m = pkts_burst[i];
9019c709b2eSOlivier Matz 		info.is_tunnel = 0;
90297c21329SOlivier Matz 		info.pkt_len = rte_pktmbuf_pkt_len(m);
90323520b3aSYongseok Koh 		tx_ol_flags = m->ol_flags &
904daa02b5cSOlivier Matz 			      (RTE_MBUF_F_INDIRECT | RTE_MBUF_F_EXTERNAL);
9059c709b2eSOlivier Matz 		rx_ol_flags = m->ol_flags;
906af75078fSIntel 
90751f694ddSOlivier Matz 		/* Update the L3/L4 checksum error packet statistics */
908daa02b5cSOlivier Matz 		if ((rx_ol_flags & RTE_MBUF_F_RX_IP_CKSUM_MASK) == RTE_MBUF_F_RX_IP_CKSUM_BAD)
9095842289aSOlivier Matz 			rx_bad_ip_csum += 1;
910daa02b5cSOlivier Matz 		if ((rx_ol_flags & RTE_MBUF_F_RX_L4_CKSUM_MASK) == RTE_MBUF_F_RX_L4_CKSUM_BAD)
9115842289aSOlivier Matz 			rx_bad_l4_csum += 1;
912daa02b5cSOlivier Matz 		if (rx_ol_flags & RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD)
91358d475b7SJerin Jacob 			rx_bad_outer_l4_csum += 1;
914daa02b5cSOlivier Matz 		if (rx_ol_flags & RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD)
915d139cf23SLance Richardson 			rx_bad_outer_ip_csum += 1;
916af75078fSIntel 
91751f694ddSOlivier Matz 		/* step 1: dissect packet, parsing optional vlan, ip4/ip6, vxlan
91851f694ddSOlivier Matz 		 * and inner headers */
919013af9b6SIntel 
9206d13ea8eSOlivier Matz 		eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
921236bc417SGregory Etelson 		if (ports[fs->tx_port].fwd_mac_swap) {
922236bc417SGregory Etelson 			rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
923236bc417SGregory Etelson 					    &eth_hdr->dst_addr);
924236bc417SGregory Etelson 			rte_ether_addr_copy(&ports[fs->tx_port].eth_addr,
925236bc417SGregory Etelson 					    &eth_hdr->src_addr);
926236bc417SGregory Etelson 		}
927c5b60331SOlivier Matz 		parse_ethernet(eth_hdr, &info);
928c5b60331SOlivier Matz 		l3_hdr = (char *)eth_hdr + info.l2_len;
929af75078fSIntel 
93074e929a7SOlivier Matz 		/* check if it's a supported tunnel */
9313eecba26SShahaf Shuler 		if (txp->parse_tunnel) {
93274e929a7SOlivier Matz 			if (info.l4_proto == IPPROTO_UDP) {
933e73e3547SOlivier Matz 				struct rte_udp_hdr *udp_hdr;
9340f62d635SJianfeng Tan 
935e73e3547SOlivier Matz 				udp_hdr = (struct rte_udp_hdr *)
936e73e3547SOlivier Matz 					((char *)l3_hdr + info.l3_len);
937d8e5e69fSTing Xu 				parse_gtp(udp_hdr, &info);
938d8e5e69fSTing Xu 				if (info.is_tunnel) {
939daa02b5cSOlivier Matz 					tx_ol_flags |= RTE_MBUF_F_TX_TUNNEL_GTP;
940d8e5e69fSTing Xu 					goto tunnel_update;
941d8e5e69fSTing Xu 				}
94239e5e20fSXueming Li 				parse_vxlan_gpe(udp_hdr, &info);
94339e5e20fSXueming Li 				if (info.is_tunnel) {
944d8e5e69fSTing Xu 					tx_ol_flags |=
945daa02b5cSOlivier Matz 						RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE;
946d8e5e69fSTing Xu 					goto tunnel_update;
947d8e5e69fSTing Xu 				}
948993677afSRaja Zidane 				parse_vxlan(udp_hdr, &info);
949ea0e711bSOphir Munk 				if (info.is_tunnel) {
95039e5e20fSXueming Li 					tx_ol_flags |=
951daa02b5cSOlivier Matz 						RTE_MBUF_F_TX_TUNNEL_VXLAN;
952ea0e711bSOphir Munk 					goto tunnel_update;
953ea0e711bSOphir Munk 				}
954ea0e711bSOphir Munk 				parse_geneve(udp_hdr, &info);
955ea0e711bSOphir Munk 				if (info.is_tunnel) {
956ea0e711bSOphir Munk 					tx_ol_flags |=
957daa02b5cSOlivier Matz 						RTE_MBUF_F_TX_TUNNEL_GENEVE;
958ea0e711bSOphir Munk 					goto tunnel_update;
959ea0e711bSOphir Munk 				}
960993677afSRaja Zidane 				/* Always keep last. */
961993677afSRaja Zidane 				if (unlikely(RTE_ETH_IS_TUNNEL_PKT(
962993677afSRaja Zidane 							m->packet_type) != 0)) {
963993677afSRaja Zidane 					TESTPMD_LOG(DEBUG, "Unknown tunnel packet. UDP dst port: %hu",
964993677afSRaja Zidane 						udp_hdr->dst_port);
965993677afSRaja Zidane 				}
96674e929a7SOlivier Matz 			} else if (info.l4_proto == IPPROTO_GRE) {
96774e929a7SOlivier Matz 				struct simple_gre_hdr *gre_hdr;
9680f62d635SJianfeng Tan 
96974e929a7SOlivier Matz 				gre_hdr = (struct simple_gre_hdr *)
97074e929a7SOlivier Matz 					((char *)l3_hdr + info.l3_len);
97174e929a7SOlivier Matz 				parse_gre(gre_hdr, &info);
9720f62d635SJianfeng Tan 				if (info.is_tunnel)
973daa02b5cSOlivier Matz 					tx_ol_flags |= RTE_MBUF_F_TX_TUNNEL_GRE;
9749075b0f1SOlivier Matz 			} else if (info.l4_proto == IPPROTO_IPIP) {
9759075b0f1SOlivier Matz 				void *encap_ip_hdr;
9760f62d635SJianfeng Tan 
9779075b0f1SOlivier Matz 				encap_ip_hdr = (char *)l3_hdr + info.l3_len;
9789075b0f1SOlivier Matz 				parse_encap_ip(encap_ip_hdr, &info);
9790f62d635SJianfeng Tan 				if (info.is_tunnel)
980daa02b5cSOlivier Matz 					tx_ol_flags |= RTE_MBUF_F_TX_TUNNEL_IPIP;
98174e929a7SOlivier Matz 			}
98251f694ddSOlivier Matz 		}
983c10a026cSOlivier Matz 
984d8e5e69fSTing Xu tunnel_update:
985c10a026cSOlivier Matz 		/* update l3_hdr and outer_l3_hdr if a tunnel was parsed */
986c10a026cSOlivier Matz 		if (info.is_tunnel) {
987c10a026cSOlivier Matz 			outer_l3_hdr = l3_hdr;
988c10a026cSOlivier Matz 			l3_hdr = (char *)l3_hdr + info.outer_l3_len + info.l2_len;
98951f694ddSOlivier Matz 		}
99051f694ddSOlivier Matz 
99103d17e4dSOlivier Matz 		/* step 2: depending on user command line configuration,
99251f694ddSOlivier Matz 		 * recompute checksum either in software or flag the
993b51c4753SOlivier Matz 		 * mbuf to offload the calculation to the NIC. If TSO
994b51c4753SOlivier Matz 		 * is configured, prepare the mbuf for TCP segmentation. */
99551f694ddSOlivier Matz 
99651f694ddSOlivier Matz 		/* process checksums of inner headers first */
9979c709b2eSOlivier Matz 		tx_ol_flags |= process_inner_cksums(l3_hdr, &info,
998e6b9d641SXiaoyun Li 			tx_offloads, m);
99951f694ddSOlivier Matz 
100051f694ddSOlivier Matz 		/* Then process outer headers if any. Note that the software
100151f694ddSOlivier Matz 		 * checksum will be wrong if one of the inner checksums is
100251f694ddSOlivier Matz 		 * processed in hardware. */
1003c5b60331SOlivier Matz 		if (info.is_tunnel == 1) {
10049c709b2eSOlivier Matz 			tx_ol_flags |= process_outer_cksums(outer_l3_hdr, &info,
10053eecba26SShahaf Shuler 					tx_offloads,
1006ce8e6e74SZhichao Zeng 					!!(tx_ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
1007ce8e6e74SZhichao Zeng 						RTE_MBUF_F_TX_UDP_SEG)),
1008e6b9d641SXiaoyun Li 					m);
100951f694ddSOlivier Matz 		}
101051f694ddSOlivier Matz 
101103d17e4dSOlivier Matz 		/* step 3: fill the mbuf meta data (flags and header lengths) */
101251f694ddSOlivier Matz 
1013ad93fb8cSXueming Li 		m->tx_offload = 0;
1014c5b60331SOlivier Matz 		if (info.is_tunnel == 1) {
10150f62d635SJianfeng Tan 			if (info.tunnel_tso_segsz ||
10163eecba26SShahaf Shuler 			    (tx_offloads &
1017295968d1SFerruh Yigit 			     RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
1018bf5618faSJerin Jacob 			    (tx_offloads &
1019295968d1SFerruh Yigit 			     RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
1020c5b60331SOlivier Matz 				m->outer_l2_len = info.outer_l2_len;
1021c5b60331SOlivier Matz 				m->outer_l3_len = info.outer_l3_len;
1022c10a026cSOlivier Matz 				m->l2_len = info.l2_len;
1023c5b60331SOlivier Matz 				m->l3_len = info.l3_len;
102408f661acSOlivier Matz 				m->l4_len = info.l4_len;
10250f62d635SJianfeng Tan 				m->tso_segsz = info.tunnel_tso_segsz;
1026af75078fSIntel 			}
1027af75078fSIntel 			else {
10283994a3e8SOlivier Matz 				/* if there is a outer UDP cksum
10293994a3e8SOlivier Matz 				   processed in sw and the inner in hw,
10303994a3e8SOlivier Matz 				   the outer checksum will be wrong as
10313994a3e8SOlivier Matz 				   the payload will be modified by the
10323994a3e8SOlivier Matz 				   hardware */
1033c5b60331SOlivier Matz 				m->l2_len = info.outer_l2_len +
1034c10a026cSOlivier Matz 					info.outer_l3_len + info.l2_len;
1035c5b60331SOlivier Matz 				m->l3_len = info.l3_len;
1036c5b60331SOlivier Matz 				m->l4_len = info.l4_len;
1037af75078fSIntel 			}
1038af75078fSIntel 		} else {
103951f694ddSOlivier Matz 			/* this is only useful if an offload flag is
104051f694ddSOlivier Matz 			 * set, but it does not hurt to fill it in any
104151f694ddSOlivier Matz 			 * case */
1042c5b60331SOlivier Matz 			m->l2_len = info.l2_len;
1043c5b60331SOlivier Matz 			m->l3_len = info.l3_len;
1044c5b60331SOlivier Matz 			m->l4_len = info.l4_len;
1045c5b60331SOlivier Matz 			m->tso_segsz = info.tso_segsz;
10460f62d635SJianfeng Tan 		}
10479c709b2eSOlivier Matz 		m->ol_flags = tx_ol_flags;
1048af75078fSIntel 
104979bec05bSKonstantin Ananyev 		/* Do split & copy for the packet. */
105079bec05bSKonstantin Ananyev 		if (tx_pkt_split != TX_PKT_SPLIT_OFF) {
105179bec05bSKonstantin Ananyev 			p = pkt_copy_split(m);
105279bec05bSKonstantin Ananyev 			if (p != NULL) {
105379bec05bSKonstantin Ananyev 				rte_pktmbuf_free(m);
105479bec05bSKonstantin Ananyev 				m = p;
105579bec05bSKonstantin Ananyev 				pkts_burst[i] = m;
105679bec05bSKonstantin Ananyev 			}
105779bec05bSKonstantin Ananyev 		}
105879bec05bSKonstantin Ananyev 
105924378143SOlivier Matz 		/* if verbose mode is enabled, dump debug info */
106024378143SOlivier Matz 		if (verbose_level > 0) {
10613f3061f4SOlivier Matz 			char buf[256];
106224378143SOlivier Matz 
106324378143SOlivier Matz 			printf("-----------------\n");
106497cb466dSOlivier Matz 			printf("port=%u, mbuf=%p, pkt_len=%u, nb_segs=%u:\n",
10656eab3078SOlivier Matz 				fs->rx_port, m, m->pkt_len, m->nb_segs);
106624378143SOlivier Matz 			/* dump rx parsed packet info */
10679c709b2eSOlivier Matz 			rte_get_rx_ol_flag_list(rx_ol_flags, buf, sizeof(buf));
106824378143SOlivier Matz 			printf("rx: l2_len=%d ethertype=%x l3_len=%d "
10699c709b2eSOlivier Matz 				"l4_proto=%d l4_len=%d flags=%s\n",
1070c5b60331SOlivier Matz 				info.l2_len, rte_be_to_cpu_16(info.ethertype),
10719c709b2eSOlivier Matz 				info.l3_len, info.l4_proto, info.l4_len, buf);
1072daa02b5cSOlivier Matz 			if (rx_ol_flags & RTE_MBUF_F_RX_LRO)
107358969992SOlivier Matz 				printf("rx: m->lro_segsz=%u\n", m->tso_segsz);
1074c5b60331SOlivier Matz 			if (info.is_tunnel == 1)
107524378143SOlivier Matz 				printf("rx: outer_l2_len=%d outer_ethertype=%x "
1076c5b60331SOlivier Matz 					"outer_l3_len=%d\n", info.outer_l2_len,
1077c5b60331SOlivier Matz 					rte_be_to_cpu_16(info.outer_ethertype),
1078c5b60331SOlivier Matz 					info.outer_l3_len);
107924378143SOlivier Matz 			/* dump tx packet info */
1080295968d1SFerruh Yigit 			if ((tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1081295968d1SFerruh Yigit 					    RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1082295968d1SFerruh Yigit 					    RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
1083295968d1SFerruh Yigit 					    RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) ||
1084c5b60331SOlivier Matz 				info.tso_segsz != 0)
108524378143SOlivier Matz 				printf("tx: m->l2_len=%d m->l3_len=%d "
108624378143SOlivier Matz 					"m->l4_len=%d\n",
108724378143SOlivier Matz 					m->l2_len, m->l3_len, m->l4_len);
10880f62d635SJianfeng Tan 			if (info.is_tunnel == 1) {
10893eecba26SShahaf Shuler 				if ((tx_offloads &
1090295968d1SFerruh Yigit 				    RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
1091bf5618faSJerin Jacob 				    (tx_offloads &
1092295968d1SFerruh Yigit 				    RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
1093daa02b5cSOlivier Matz 				    (tx_ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))
10940f62d635SJianfeng Tan 					printf("tx: m->outer_l2_len=%d "
10950f62d635SJianfeng Tan 						"m->outer_l3_len=%d\n",
10960f62d635SJianfeng Tan 						m->outer_l2_len,
10970f62d635SJianfeng Tan 						m->outer_l3_len);
1098b9e18d41SOlivier Matz 				if (info.tunnel_tso_segsz != 0 &&
1099ce8e6e74SZhichao Zeng 						(m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
1100ce8e6e74SZhichao Zeng 							RTE_MBUF_F_TX_UDP_SEG)))
11010f62d635SJianfeng Tan 					printf("tx: m->tso_segsz=%d\n",
11020f62d635SJianfeng Tan 						m->tso_segsz);
1103b9e18d41SOlivier Matz 			} else if (info.tso_segsz != 0 &&
1104ce8e6e74SZhichao Zeng 					(m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
1105ce8e6e74SZhichao Zeng 						RTE_MBUF_F_TX_UDP_SEG)))
110624378143SOlivier Matz 				printf("tx: m->tso_segsz=%d\n", m->tso_segsz);
11073f3061f4SOlivier Matz 			rte_get_tx_ol_flag_list(m->ol_flags, buf, sizeof(buf));
11083f3061f4SOlivier Matz 			printf("tx: flags=%s", buf);
110924378143SOlivier Matz 			printf("\n");
111024378143SOlivier Matz 		}
1111af75078fSIntel 	}
11126b520d54STomasz Kulasek 
11136970401eSDavid Marchand #ifdef RTE_LIB_GRO
1114461c287aSKumara Parameshwaran 	gro_enable = gro_ports[fs->rx_port].enable;
1115b7091f1dSJiayu Hu 	if (unlikely(gro_enable)) {
1116b7091f1dSJiayu Hu 		if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
1117b7091f1dSJiayu Hu 			nb_rx = rte_gro_reassemble_burst(pkts_burst, nb_rx,
1118b7091f1dSJiayu Hu 					&(gro_ports[fs->rx_port].param));
1119b7091f1dSJiayu Hu 		} else {
1120b7091f1dSJiayu Hu 			gro_ctx = current_fwd_lcore()->gro_ctx;
1121b7091f1dSJiayu Hu 			nb_rx = rte_gro_reassemble(pkts_burst, nb_rx, gro_ctx);
1122b7091f1dSJiayu Hu 
1123b7091f1dSJiayu Hu 			if (++fs->gro_times >= gro_flush_cycles) {
1124b7091f1dSJiayu Hu 				gro_pkts_num = rte_gro_get_pkt_count(gro_ctx);
1125b7091f1dSJiayu Hu 				if (gro_pkts_num > MAX_PKT_BURST - nb_rx)
1126b7091f1dSJiayu Hu 					gro_pkts_num = MAX_PKT_BURST - nb_rx;
1127b7091f1dSJiayu Hu 
1128b7091f1dSJiayu Hu 				nb_rx += rte_gro_timeout_flush(gro_ctx, 0,
1129b7091f1dSJiayu Hu 						RTE_GRO_TCP_IPV4,
1130b7091f1dSJiayu Hu 						&pkts_burst[nb_rx],
1131b7091f1dSJiayu Hu 						gro_pkts_num);
1132b7091f1dSJiayu Hu 				fs->gro_times = 0;
1133b7091f1dSJiayu Hu 			}
11340007e404SKumara Parameshwaran 			if (nb_rx == 0)
11350007e404SKumara Parameshwaran 				return false;
1136b7091f1dSJiayu Hu 		}
11371945c646SWenwu Ma 
11381945c646SWenwu Ma 		pkts_ip_csum_recalc(pkts_burst, nb_rx, tx_offloads);
1139b7091f1dSJiayu Hu 	}
11406970401eSDavid Marchand #endif
1141b7091f1dSJiayu Hu 
11426970401eSDavid Marchand #ifdef RTE_LIB_GSO
11436970401eSDavid Marchand 	if (gso_ports[fs->tx_port].enable != 0) {
11446970401eSDavid Marchand 		uint16_t nb_segments = 0;
11456970401eSDavid Marchand 
114652f38a20SJiayu Hu 		gso_ctx = &(current_fwd_lcore()->gso_ctx);
114752f38a20SJiayu Hu 		gso_ctx->gso_size = gso_max_segment_size;
114852f38a20SJiayu Hu 		for (i = 0; i < nb_rx; i++) {
11496970401eSDavid Marchand 			int ret;
11506970401eSDavid Marchand 
115152f38a20SJiayu Hu 			ret = rte_gso_segment(pkts_burst[i], gso_ctx,
115252f38a20SJiayu Hu 					&gso_segments[nb_segments],
115352f38a20SJiayu Hu 					GSO_MAX_PKT_BURST - nb_segments);
1154c0d002aeSYi Yang 			if (ret >= 1) {
1155c0d002aeSYi Yang 				/* pkts_burst[i] can be freed safely here. */
1156c0d002aeSYi Yang 				rte_pktmbuf_free(pkts_burst[i]);
115752f38a20SJiayu Hu 				nb_segments += ret;
1158c0d002aeSYi Yang 			} else if (ret == 0) {
1159c0d002aeSYi Yang 				/* 0 means it can be transmitted directly
1160c0d002aeSYi Yang 				 * without gso.
1161c0d002aeSYi Yang 				 */
1162c0d002aeSYi Yang 				gso_segments[nb_segments] = pkts_burst[i];
1163c0d002aeSYi Yang 				nb_segments += 1;
1164c0d002aeSYi Yang 			} else {
1165285fd101SOlivier Matz 				TESTPMD_LOG(DEBUG, "Unable to segment packet");
116652f38a20SJiayu Hu 				rte_pktmbuf_free(pkts_burst[i]);
116752f38a20SJiayu Hu 			}
116852f38a20SJiayu Hu 		}
116952f38a20SJiayu Hu 
117052f38a20SJiayu Hu 		tx_pkts_burst = gso_segments;
117152f38a20SJiayu Hu 		nb_rx = nb_segments;
11721945c646SWenwu Ma 
11731945c646SWenwu Ma 		pkts_ip_csum_recalc(tx_pkts_burst, nb_rx, tx_offloads);
11746970401eSDavid Marchand 	} else
11756970401eSDavid Marchand #endif
11766970401eSDavid Marchand 		tx_pkts_burst = pkts_burst;
117752f38a20SJiayu Hu 
11786b520d54STomasz Kulasek 	nb_prep = rte_eth_tx_prepare(fs->tx_port, fs->tx_queue,
117952f38a20SJiayu Hu 			tx_pkts_burst, nb_rx);
118088a0b0f3SDavid Marchand 	if (nb_prep != nb_rx) {
118161a3b0e5SAndrew Rybchenko 		fprintf(stderr,
118261a3b0e5SAndrew Rybchenko 			"Preparing packet burst to transmit failed: %s\n",
11836b520d54STomasz Kulasek 			rte_strerror(rte_errno));
118488a0b0f3SDavid Marchand 		fs->fwd_dropped += (nb_rx - nb_prep);
118588a0b0f3SDavid Marchand 		rte_pktmbuf_free_bulk(&tx_pkts_burst[nb_prep], nb_rx - nb_prep);
118688a0b0f3SDavid Marchand 	}
11876b520d54STomasz Kulasek 
1188655131ccSDavid Marchand 	common_fwd_stream_transmit(fs, tx_pkts_burst, nb_prep);
11896b520d54STomasz Kulasek 
1190af75078fSIntel 	fs->rx_bad_ip_csum += rx_bad_ip_csum;
1191af75078fSIntel 	fs->rx_bad_l4_csum += rx_bad_l4_csum;
119258d475b7SJerin Jacob 	fs->rx_bad_outer_l4_csum += rx_bad_outer_l4_csum;
1193d139cf23SLance Richardson 	fs->rx_bad_outer_ip_csum += rx_bad_outer_ip_csum;
1194af75078fSIntel 
119506c20561SDavid Marchand 	return true;
1196af75078fSIntel }
1197af75078fSIntel 
1198af75078fSIntel struct fwd_engine csum_fwd_engine = {
1199af75078fSIntel 	.fwd_mode_name  = "csum",
1200180ba023SDavid Marchand 	.stream_init    = common_fwd_stream_init,
1201af75078fSIntel 	.packet_fwd     = pkt_burst_checksum_forward,
1202af75078fSIntel };
1203