xref: /dpdk/app/test-pmd/csumonly.c (revision 8205e241b2b01c05f2cffe5158c053d614d1f68c)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   Copyright 2014 6WIND S.A.
6  *   All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <stdarg.h>
36 #include <stdio.h>
37 #include <errno.h>
38 #include <stdint.h>
39 #include <unistd.h>
40 #include <inttypes.h>
41 
42 #include <sys/queue.h>
43 #include <sys/stat.h>
44 
45 #include <rte_common.h>
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_cycles.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_eal.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
59 #include <rte_ring.h>
60 #include <rte_memory.h>
61 #include <rte_mempool.h>
62 #include <rte_mbuf.h>
63 #include <rte_memcpy.h>
64 #include <rte_interrupts.h>
65 #include <rte_pci.h>
66 #include <rte_ether.h>
67 #include <rte_ethdev.h>
68 #include <rte_ip.h>
69 #include <rte_tcp.h>
70 #include <rte_udp.h>
71 #include <rte_sctp.h>
72 #include <rte_prefetch.h>
73 #include <rte_string_fns.h>
74 #include "testpmd.h"
75 
76 #define IP_DEFTTL  64   /* from RFC 1340. */
77 #define IP_VERSION 0x40
78 #define IP_HDRLEN  0x05 /* default IP header length == five 32-bits words. */
79 #define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
80 
81 #define GRE_KEY_PRESENT 0x2000
82 #define GRE_KEY_LEN     4
83 #define GRE_SUPPORTED_FIELDS GRE_KEY_PRESENT
84 
85 /* We cannot use rte_cpu_to_be_16() on a constant in a switch/case */
86 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
87 #define _htons(x) ((uint16_t)((((x) & 0x00ffU) << 8) | (((x) & 0xff00U) >> 8)))
88 #else
89 #define _htons(x) (x)
90 #endif
91 
92 /* structure that caches offload info for the current packet */
93 struct testpmd_offload_info {
94 	uint16_t ethertype;
95 	uint16_t l2_len;
96 	uint16_t l3_len;
97 	uint16_t l4_len;
98 	uint8_t l4_proto;
99 	uint8_t is_tunnel;
100 	uint16_t outer_ethertype;
101 	uint16_t outer_l2_len;
102 	uint16_t outer_l3_len;
103 	uint8_t outer_l4_proto;
104 	uint16_t tso_segsz;
105 };
106 
107 /* simplified GRE header */
108 struct simple_gre_hdr {
109 	uint16_t flags;
110 	uint16_t proto;
111 } __attribute__((__packed__));
112 
113 static uint16_t
114 get_psd_sum(void *l3_hdr, uint16_t ethertype, uint64_t ol_flags)
115 {
116 	if (ethertype == _htons(ETHER_TYPE_IPv4))
117 		return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
118 	else /* assume ethertype == ETHER_TYPE_IPv6 */
119 		return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
120 }
121 
122 static uint16_t
123 get_udptcp_checksum(void *l3_hdr, void *l4_hdr, uint16_t ethertype)
124 {
125 	if (ethertype == _htons(ETHER_TYPE_IPv4))
126 		return rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr);
127 	else /* assume ethertype == ETHER_TYPE_IPv6 */
128 		return rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr);
129 }
130 
131 /* Parse an IPv4 header to fill l3_len, l4_len, and l4_proto */
132 static void
133 parse_ipv4(struct ipv4_hdr *ipv4_hdr, struct testpmd_offload_info *info)
134 {
135 	struct tcp_hdr *tcp_hdr;
136 
137 	info->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
138 	info->l4_proto = ipv4_hdr->next_proto_id;
139 
140 	/* only fill l4_len for TCP, it's useful for TSO */
141 	if (info->l4_proto == IPPROTO_TCP) {
142 		tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + info->l3_len);
143 		info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
144 	} else
145 		info->l4_len = 0;
146 }
147 
148 /* Parse an IPv6 header to fill l3_len, l4_len, and l4_proto */
149 static void
150 parse_ipv6(struct ipv6_hdr *ipv6_hdr, struct testpmd_offload_info *info)
151 {
152 	struct tcp_hdr *tcp_hdr;
153 
154 	info->l3_len = sizeof(struct ipv6_hdr);
155 	info->l4_proto = ipv6_hdr->proto;
156 
157 	/* only fill l4_len for TCP, it's useful for TSO */
158 	if (info->l4_proto == IPPROTO_TCP) {
159 		tcp_hdr = (struct tcp_hdr *)((char *)ipv6_hdr + info->l3_len);
160 		info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
161 	} else
162 		info->l4_len = 0;
163 }
164 
165 /*
166  * Parse an ethernet header to fill the ethertype, l2_len, l3_len and
167  * ipproto. This function is able to recognize IPv4/IPv6 with one optional vlan
168  * header. The l4_len argument is only set in case of TCP (useful for TSO).
169  */
170 static void
171 parse_ethernet(struct ether_hdr *eth_hdr, struct testpmd_offload_info *info)
172 {
173 	struct ipv4_hdr *ipv4_hdr;
174 	struct ipv6_hdr *ipv6_hdr;
175 
176 	info->l2_len = sizeof(struct ether_hdr);
177 	info->ethertype = eth_hdr->ether_type;
178 
179 	if (info->ethertype == _htons(ETHER_TYPE_VLAN)) {
180 		struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
181 
182 		info->l2_len  += sizeof(struct vlan_hdr);
183 		info->ethertype = vlan_hdr->eth_proto;
184 	}
185 
186 	switch (info->ethertype) {
187 	case _htons(ETHER_TYPE_IPv4):
188 		ipv4_hdr = (struct ipv4_hdr *) ((char *)eth_hdr + info->l2_len);
189 		parse_ipv4(ipv4_hdr, info);
190 		break;
191 	case _htons(ETHER_TYPE_IPv6):
192 		ipv6_hdr = (struct ipv6_hdr *) ((char *)eth_hdr + info->l2_len);
193 		parse_ipv6(ipv6_hdr, info);
194 		break;
195 	default:
196 		info->l4_len = 0;
197 		info->l3_len = 0;
198 		info->l4_proto = 0;
199 		break;
200 	}
201 }
202 
203 /* Parse a vxlan header */
204 static void
205 parse_vxlan(struct udp_hdr *udp_hdr,
206 	    struct testpmd_offload_info *info,
207 	    uint32_t pkt_type)
208 {
209 	struct ether_hdr *eth_hdr;
210 
211 	/* check udp destination port, 4789 is the default vxlan port
212 	 * (rfc7348) or that the rx offload flag is set (i40e only
213 	 * currently) */
214 	if (udp_hdr->dst_port != _htons(4789) &&
215 		RTE_ETH_IS_TUNNEL_PKT(pkt_type) == 0)
216 		return;
217 
218 	info->is_tunnel = 1;
219 	info->outer_ethertype = info->ethertype;
220 	info->outer_l2_len = info->l2_len;
221 	info->outer_l3_len = info->l3_len;
222 	info->outer_l4_proto = info->l4_proto;
223 
224 	eth_hdr = (struct ether_hdr *)((char *)udp_hdr +
225 		sizeof(struct udp_hdr) +
226 		sizeof(struct vxlan_hdr));
227 
228 	parse_ethernet(eth_hdr, info);
229 	info->l2_len += ETHER_VXLAN_HLEN; /* add udp + vxlan */
230 }
231 
232 /* Parse a gre header */
233 static void
234 parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
235 {
236 	struct ether_hdr *eth_hdr;
237 	struct ipv4_hdr *ipv4_hdr;
238 	struct ipv6_hdr *ipv6_hdr;
239 	uint8_t gre_len = 0;
240 
241 	/* check which fields are supported */
242 	if ((gre_hdr->flags & _htons(~GRE_SUPPORTED_FIELDS)) != 0)
243 		return;
244 
245 	gre_len += sizeof(struct simple_gre_hdr);
246 
247 	if (gre_hdr->flags & _htons(GRE_KEY_PRESENT))
248 		gre_len += GRE_KEY_LEN;
249 
250 	if (gre_hdr->proto == _htons(ETHER_TYPE_IPv4)) {
251 		info->is_tunnel = 1;
252 		info->outer_ethertype = info->ethertype;
253 		info->outer_l2_len = info->l2_len;
254 		info->outer_l3_len = info->l3_len;
255 		info->outer_l4_proto = info->l4_proto;
256 
257 		ipv4_hdr = (struct ipv4_hdr *)((char *)gre_hdr + gre_len);
258 
259 		parse_ipv4(ipv4_hdr, info);
260 		info->ethertype = _htons(ETHER_TYPE_IPv4);
261 		info->l2_len = 0;
262 
263 	} else if (gre_hdr->proto == _htons(ETHER_TYPE_IPv6)) {
264 		info->is_tunnel = 1;
265 		info->outer_ethertype = info->ethertype;
266 		info->outer_l2_len = info->l2_len;
267 		info->outer_l3_len = info->l3_len;
268 		info->outer_l4_proto = info->l4_proto;
269 
270 		ipv6_hdr = (struct ipv6_hdr *)((char *)gre_hdr + gre_len);
271 
272 		info->ethertype = _htons(ETHER_TYPE_IPv6);
273 		parse_ipv6(ipv6_hdr, info);
274 		info->l2_len = 0;
275 
276 	} else if (gre_hdr->proto == _htons(ETHER_TYPE_TEB)) {
277 		info->is_tunnel = 1;
278 		info->outer_ethertype = info->ethertype;
279 		info->outer_l2_len = info->l2_len;
280 		info->outer_l3_len = info->l3_len;
281 		info->outer_l4_proto = info->l4_proto;
282 
283 		eth_hdr = (struct ether_hdr *)((char *)gre_hdr + gre_len);
284 
285 		parse_ethernet(eth_hdr, info);
286 	} else
287 		return;
288 
289 	info->l2_len += gre_len;
290 }
291 
292 
293 /* Parse an encapsulated ip or ipv6 header */
294 static void
295 parse_encap_ip(void *encap_ip, struct testpmd_offload_info *info)
296 {
297 	struct ipv4_hdr *ipv4_hdr = encap_ip;
298 	struct ipv6_hdr *ipv6_hdr = encap_ip;
299 	uint8_t ip_version;
300 
301 	ip_version = (ipv4_hdr->version_ihl & 0xf0) >> 4;
302 
303 	if (ip_version != 4 && ip_version != 6)
304 		return;
305 
306 	info->is_tunnel = 1;
307 	info->outer_ethertype = info->ethertype;
308 	info->outer_l2_len = info->l2_len;
309 	info->outer_l3_len = info->l3_len;
310 
311 	if (ip_version == 4) {
312 		parse_ipv4(ipv4_hdr, info);
313 		info->ethertype = _htons(ETHER_TYPE_IPv4);
314 	} else {
315 		parse_ipv6(ipv6_hdr, info);
316 		info->ethertype = _htons(ETHER_TYPE_IPv6);
317 	}
318 	info->l2_len = 0;
319 }
320 
321 /* modify the IPv4 or IPv4 source address of a packet */
322 static void
323 change_ip_addresses(void *l3_hdr, uint16_t ethertype)
324 {
325 	struct ipv4_hdr *ipv4_hdr = l3_hdr;
326 	struct ipv6_hdr *ipv6_hdr = l3_hdr;
327 
328 	if (ethertype == _htons(ETHER_TYPE_IPv4)) {
329 		ipv4_hdr->src_addr =
330 			rte_cpu_to_be_32(rte_be_to_cpu_32(ipv4_hdr->src_addr) + 1);
331 	} else if (ethertype == _htons(ETHER_TYPE_IPv6)) {
332 		ipv6_hdr->src_addr[15] = ipv6_hdr->src_addr[15] + 1;
333 	}
334 }
335 
336 /* if possible, calculate the checksum of a packet in hw or sw,
337  * depending on the testpmd command line configuration */
338 static uint64_t
339 process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
340 	uint16_t testpmd_ol_flags)
341 {
342 	struct ipv4_hdr *ipv4_hdr = l3_hdr;
343 	struct udp_hdr *udp_hdr;
344 	struct tcp_hdr *tcp_hdr;
345 	struct sctp_hdr *sctp_hdr;
346 	uint64_t ol_flags = 0;
347 
348 	if (info->ethertype == _htons(ETHER_TYPE_IPv4)) {
349 		ipv4_hdr = l3_hdr;
350 		ipv4_hdr->hdr_checksum = 0;
351 
352 		ol_flags |= PKT_TX_IPV4;
353 		if (info->tso_segsz != 0 && info->l4_proto == IPPROTO_TCP) {
354 			ol_flags |= PKT_TX_IP_CKSUM;
355 		} else {
356 			if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM)
357 				ol_flags |= PKT_TX_IP_CKSUM;
358 			else
359 				ipv4_hdr->hdr_checksum =
360 					rte_ipv4_cksum(ipv4_hdr);
361 		}
362 	} else if (info->ethertype == _htons(ETHER_TYPE_IPv6))
363 		ol_flags |= PKT_TX_IPV6;
364 	else
365 		return 0; /* packet type not supported, nothing to do */
366 
367 	if (info->l4_proto == IPPROTO_UDP) {
368 		udp_hdr = (struct udp_hdr *)((char *)l3_hdr + info->l3_len);
369 		/* do not recalculate udp cksum if it was 0 */
370 		if (udp_hdr->dgram_cksum != 0) {
371 			udp_hdr->dgram_cksum = 0;
372 			if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) {
373 				ol_flags |= PKT_TX_UDP_CKSUM;
374 				udp_hdr->dgram_cksum = get_psd_sum(l3_hdr,
375 					info->ethertype, ol_flags);
376 			} else {
377 				udp_hdr->dgram_cksum =
378 					get_udptcp_checksum(l3_hdr, udp_hdr,
379 						info->ethertype);
380 			}
381 		}
382 	} else if (info->l4_proto == IPPROTO_TCP) {
383 		tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + info->l3_len);
384 		tcp_hdr->cksum = 0;
385 		if (info->tso_segsz != 0) {
386 			ol_flags |= PKT_TX_TCP_SEG;
387 			tcp_hdr->cksum = get_psd_sum(l3_hdr, info->ethertype,
388 				ol_flags);
389 		} else if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) {
390 			ol_flags |= PKT_TX_TCP_CKSUM;
391 			tcp_hdr->cksum = get_psd_sum(l3_hdr, info->ethertype,
392 				ol_flags);
393 		} else {
394 			tcp_hdr->cksum =
395 				get_udptcp_checksum(l3_hdr, tcp_hdr,
396 					info->ethertype);
397 		}
398 	} else if (info->l4_proto == IPPROTO_SCTP) {
399 		sctp_hdr = (struct sctp_hdr *)((char *)l3_hdr + info->l3_len);
400 		sctp_hdr->cksum = 0;
401 		/* sctp payload must be a multiple of 4 to be
402 		 * offloaded */
403 		if ((testpmd_ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM) &&
404 			((ipv4_hdr->total_length & 0x3) == 0)) {
405 			ol_flags |= PKT_TX_SCTP_CKSUM;
406 		} else {
407 			/* XXX implement CRC32c, example available in
408 			 * RFC3309 */
409 		}
410 	}
411 
412 	return ol_flags;
413 }
414 
415 /* Calculate the checksum of outer header (only vxlan is supported,
416  * meaning IP + UDP). The caller already checked that it's a vxlan
417  * packet */
418 static uint64_t
419 process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
420 	uint16_t testpmd_ol_flags)
421 {
422 	struct ipv4_hdr *ipv4_hdr = outer_l3_hdr;
423 	struct ipv6_hdr *ipv6_hdr = outer_l3_hdr;
424 	struct udp_hdr *udp_hdr;
425 	uint64_t ol_flags = 0;
426 
427 	if (info->outer_ethertype == _htons(ETHER_TYPE_IPv4)) {
428 		ipv4_hdr->hdr_checksum = 0;
429 		ol_flags |= PKT_TX_OUTER_IPV4;
430 
431 		if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM)
432 			ol_flags |= PKT_TX_OUTER_IP_CKSUM;
433 		else
434 			ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
435 	} else if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM)
436 		ol_flags |= PKT_TX_OUTER_IPV6;
437 
438 	if (info->outer_l4_proto != IPPROTO_UDP)
439 		return ol_flags;
440 
441 	/* outer UDP checksum is always done in software as we have no
442 	 * hardware supporting it today, and no API for it. */
443 
444 	udp_hdr = (struct udp_hdr *)((char *)outer_l3_hdr + info->outer_l3_len);
445 	/* do not recalculate udp cksum if it was 0 */
446 	if (udp_hdr->dgram_cksum != 0) {
447 		udp_hdr->dgram_cksum = 0;
448 		if (info->outer_ethertype == _htons(ETHER_TYPE_IPv4))
449 			udp_hdr->dgram_cksum =
450 				rte_ipv4_udptcp_cksum(ipv4_hdr, udp_hdr);
451 		else
452 			udp_hdr->dgram_cksum =
453 				rte_ipv6_udptcp_cksum(ipv6_hdr, udp_hdr);
454 	}
455 
456 	return ol_flags;
457 }
458 
459 /*
460  * Receive a burst of packets, and for each packet:
461  *  - parse packet, and try to recognize a supported packet type (1)
462  *  - if it's not a supported packet type, don't touch the packet, else:
463  *  - modify the IPs in inner headers and in outer headers if any
464  *  - reprocess the checksum of all supported layers. This is done in SW
465  *    or HW, depending on testpmd command line configuration
466  *  - if TSO is enabled in testpmd command line, also flag the mbuf for TCP
467  *    segmentation offload (this implies HW TCP checksum)
468  * Then transmit packets on the output port.
469  *
470  * (1) Supported packets are:
471  *   Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP .
472  *   Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
473  *           UDP|TCP|SCTP
474  *   Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
475  *   Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP
476  *   Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP
477  *
478  * The testpmd command line for this forward engine sets the flags
479  * TESTPMD_TX_OFFLOAD_* in ports[tx_port].tx_ol_flags. They control
480  * wether a checksum must be calculated in software or in hardware. The
481  * IP, UDP, TCP and SCTP flags always concern the inner layer. The
482  * OUTER_IP is only useful for tunnel packets.
483  */
484 static void
485 pkt_burst_checksum_forward(struct fwd_stream *fs)
486 {
487 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
488 	struct rte_port *txp;
489 	struct rte_mbuf *m;
490 	struct ether_hdr *eth_hdr;
491 	void *l3_hdr = NULL, *outer_l3_hdr = NULL; /* can be IPv4 or IPv6 */
492 	uint16_t nb_rx;
493 	uint16_t nb_tx;
494 	uint16_t i;
495 	uint64_t ol_flags;
496 	uint16_t testpmd_ol_flags;
497 	uint32_t rx_bad_ip_csum;
498 	uint32_t rx_bad_l4_csum;
499 	struct testpmd_offload_info info;
500 
501 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
502 	uint64_t start_tsc;
503 	uint64_t end_tsc;
504 	uint64_t core_cycles;
505 #endif
506 
507 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
508 	start_tsc = rte_rdtsc();
509 #endif
510 
511 	/* receive a burst of packet */
512 	nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
513 				 nb_pkt_per_burst);
514 	if (unlikely(nb_rx == 0))
515 		return;
516 
517 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
518 	fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
519 #endif
520 	fs->rx_packets += nb_rx;
521 	rx_bad_ip_csum = 0;
522 	rx_bad_l4_csum = 0;
523 
524 	txp = &ports[fs->tx_port];
525 	testpmd_ol_flags = txp->tx_ol_flags;
526 	memset(&info, 0, sizeof(info));
527 	info.tso_segsz = txp->tso_segsz;
528 
529 	for (i = 0; i < nb_rx; i++) {
530 
531 		ol_flags = 0;
532 		info.is_tunnel = 0;
533 		m = pkts_burst[i];
534 
535 		/* Update the L3/L4 checksum error packet statistics */
536 		rx_bad_ip_csum += ((m->ol_flags & PKT_RX_IP_CKSUM_BAD) != 0);
537 		rx_bad_l4_csum += ((m->ol_flags & PKT_RX_L4_CKSUM_BAD) != 0);
538 
539 		/* step 1: dissect packet, parsing optional vlan, ip4/ip6, vxlan
540 		 * and inner headers */
541 
542 		eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
543 		ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
544 				&eth_hdr->d_addr);
545 		ether_addr_copy(&ports[fs->tx_port].eth_addr,
546 				&eth_hdr->s_addr);
547 		parse_ethernet(eth_hdr, &info);
548 		l3_hdr = (char *)eth_hdr + info.l2_len;
549 
550 		/* check if it's a supported tunnel */
551 		if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_PARSE_TUNNEL) {
552 			if (info.l4_proto == IPPROTO_UDP) {
553 				struct udp_hdr *udp_hdr;
554 				udp_hdr = (struct udp_hdr *)((char *)l3_hdr +
555 					info.l3_len);
556 				parse_vxlan(udp_hdr, &info, m->packet_type);
557 			} else if (info.l4_proto == IPPROTO_GRE) {
558 				struct simple_gre_hdr *gre_hdr;
559 				gre_hdr = (struct simple_gre_hdr *)
560 					((char *)l3_hdr + info.l3_len);
561 				parse_gre(gre_hdr, &info);
562 			} else if (info.l4_proto == IPPROTO_IPIP) {
563 				void *encap_ip_hdr;
564 				encap_ip_hdr = (char *)l3_hdr + info.l3_len;
565 				parse_encap_ip(encap_ip_hdr, &info);
566 			}
567 		}
568 
569 		/* update l3_hdr and outer_l3_hdr if a tunnel was parsed */
570 		if (info.is_tunnel) {
571 			outer_l3_hdr = l3_hdr;
572 			l3_hdr = (char *)l3_hdr + info.outer_l3_len + info.l2_len;
573 		}
574 
575 		/* step 2: change all source IPs (v4 or v6) so we need
576 		 * to recompute the chksums even if they were correct */
577 
578 		change_ip_addresses(l3_hdr, info.ethertype);
579 		if (info.is_tunnel == 1)
580 			change_ip_addresses(outer_l3_hdr, info.outer_ethertype);
581 
582 		/* step 3: depending on user command line configuration,
583 		 * recompute checksum either in software or flag the
584 		 * mbuf to offload the calculation to the NIC. If TSO
585 		 * is configured, prepare the mbuf for TCP segmentation. */
586 
587 		/* process checksums of inner headers first */
588 		ol_flags |= process_inner_cksums(l3_hdr, &info, testpmd_ol_flags);
589 
590 		/* Then process outer headers if any. Note that the software
591 		 * checksum will be wrong if one of the inner checksums is
592 		 * processed in hardware. */
593 		if (info.is_tunnel == 1) {
594 			ol_flags |= process_outer_cksums(outer_l3_hdr, &info,
595 				testpmd_ol_flags);
596 		}
597 
598 		/* step 4: fill the mbuf meta data (flags and header lengths) */
599 
600 		if (info.is_tunnel == 1) {
601 			if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) {
602 				m->outer_l2_len = info.outer_l2_len;
603 				m->outer_l3_len = info.outer_l3_len;
604 				m->l2_len = info.l2_len;
605 				m->l3_len = info.l3_len;
606 				m->l4_len = info.l4_len;
607 			}
608 			else {
609 				/* if there is a outer UDP cksum
610 				   processed in sw and the inner in hw,
611 				   the outer checksum will be wrong as
612 				   the payload will be modified by the
613 				   hardware */
614 				m->l2_len = info.outer_l2_len +
615 					info.outer_l3_len + info.l2_len;
616 				m->l3_len = info.l3_len;
617 				m->l4_len = info.l4_len;
618 			}
619 		} else {
620 			/* this is only useful if an offload flag is
621 			 * set, but it does not hurt to fill it in any
622 			 * case */
623 			m->l2_len = info.l2_len;
624 			m->l3_len = info.l3_len;
625 			m->l4_len = info.l4_len;
626 		}
627 		m->tso_segsz = info.tso_segsz;
628 		m->ol_flags = ol_flags;
629 
630 		/* if verbose mode is enabled, dump debug info */
631 		if (verbose_level > 0) {
632 			struct {
633 				uint64_t flag;
634 				uint64_t mask;
635 			} tx_flags[] = {
636 				{ PKT_TX_IP_CKSUM, PKT_TX_IP_CKSUM },
637 				{ PKT_TX_UDP_CKSUM, PKT_TX_L4_MASK },
638 				{ PKT_TX_TCP_CKSUM, PKT_TX_L4_MASK },
639 				{ PKT_TX_SCTP_CKSUM, PKT_TX_L4_MASK },
640 				{ PKT_TX_IPV4, PKT_TX_IPV4 },
641 				{ PKT_TX_IPV6, PKT_TX_IPV6 },
642 				{ PKT_TX_OUTER_IP_CKSUM, PKT_TX_OUTER_IP_CKSUM },
643 				{ PKT_TX_OUTER_IPV4, PKT_TX_OUTER_IPV4 },
644 				{ PKT_TX_OUTER_IPV6, PKT_TX_OUTER_IPV6 },
645 				{ PKT_TX_TCP_SEG, PKT_TX_TCP_SEG },
646 			};
647 			unsigned j;
648 			const char *name;
649 
650 			printf("-----------------\n");
651 			/* dump rx parsed packet info */
652 			printf("rx: l2_len=%d ethertype=%x l3_len=%d "
653 				"l4_proto=%d l4_len=%d\n",
654 				info.l2_len, rte_be_to_cpu_16(info.ethertype),
655 				info.l3_len, info.l4_proto, info.l4_len);
656 			if (info.is_tunnel == 1)
657 				printf("rx: outer_l2_len=%d outer_ethertype=%x "
658 					"outer_l3_len=%d\n", info.outer_l2_len,
659 					rte_be_to_cpu_16(info.outer_ethertype),
660 					info.outer_l3_len);
661 			/* dump tx packet info */
662 			if ((testpmd_ol_flags & (TESTPMD_TX_OFFLOAD_IP_CKSUM |
663 						TESTPMD_TX_OFFLOAD_UDP_CKSUM |
664 						TESTPMD_TX_OFFLOAD_TCP_CKSUM |
665 						TESTPMD_TX_OFFLOAD_SCTP_CKSUM)) ||
666 				info.tso_segsz != 0)
667 				printf("tx: m->l2_len=%d m->l3_len=%d "
668 					"m->l4_len=%d\n",
669 					m->l2_len, m->l3_len, m->l4_len);
670 			if ((info.is_tunnel == 1) &&
671 				(testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM))
672 				printf("tx: m->outer_l2_len=%d m->outer_l3_len=%d\n",
673 					m->outer_l2_len, m->outer_l3_len);
674 			if (info.tso_segsz != 0)
675 				printf("tx: m->tso_segsz=%d\n", m->tso_segsz);
676 			printf("tx: flags=");
677 			for (j = 0; j < sizeof(tx_flags)/sizeof(*tx_flags); j++) {
678 				name = rte_get_tx_ol_flag_name(tx_flags[j].flag);
679 				if ((m->ol_flags & tx_flags[j].mask) ==
680 					tx_flags[j].flag)
681 					printf("%s ", name);
682 			}
683 			printf("\n");
684 		}
685 	}
686 	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
687 	fs->tx_packets += nb_tx;
688 	fs->rx_bad_ip_csum += rx_bad_ip_csum;
689 	fs->rx_bad_l4_csum += rx_bad_l4_csum;
690 
691 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
692 	fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
693 #endif
694 	if (unlikely(nb_tx < nb_rx)) {
695 		fs->fwd_dropped += (nb_rx - nb_tx);
696 		do {
697 			rte_pktmbuf_free(pkts_burst[nb_tx]);
698 		} while (++nb_tx < nb_rx);
699 	}
700 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
701 	end_tsc = rte_rdtsc();
702 	core_cycles = (end_tsc - start_tsc);
703 	fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
704 #endif
705 }
706 
707 struct fwd_engine csum_fwd_engine = {
708 	.fwd_mode_name  = "csum",
709 	.port_fwd_begin = NULL,
710 	.port_fwd_end   = NULL,
711 	.packet_fwd     = pkt_burst_checksum_forward,
712 };
713