xref: /dpdk/app/test-pmd/txonly.c (revision f9e1d67f237a00cf94feb4413e3d978fdd632052)
1174a1631SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2174a1631SBruce Richardson  * Copyright(c) 2010-2014 Intel Corporation
3af75078fSIntel  */
4af75078fSIntel 
5af75078fSIntel #include <stdarg.h>
6af75078fSIntel #include <string.h>
7af75078fSIntel #include <stdio.h>
8af75078fSIntel #include <errno.h>
9af75078fSIntel #include <stdint.h>
10af75078fSIntel #include <unistd.h>
11af75078fSIntel #include <inttypes.h>
12af75078fSIntel 
13af75078fSIntel #include <sys/queue.h>
14af75078fSIntel #include <sys/stat.h>
15af75078fSIntel 
16af75078fSIntel #include <rte_common.h>
17af75078fSIntel #include <rte_byteorder.h>
18af75078fSIntel #include <rte_log.h>
19af75078fSIntel #include <rte_debug.h>
20af75078fSIntel #include <rte_cycles.h>
21af75078fSIntel #include <rte_memory.h>
22af75078fSIntel #include <rte_memcpy.h>
23af75078fSIntel #include <rte_launch.h>
24af75078fSIntel #include <rte_eal.h>
25af75078fSIntel #include <rte_per_lcore.h>
26af75078fSIntel #include <rte_lcore.h>
27af75078fSIntel #include <rte_branch_prediction.h>
28af75078fSIntel #include <rte_mempool.h>
29af75078fSIntel #include <rte_mbuf.h>
30af75078fSIntel #include <rte_interrupts.h>
31af75078fSIntel #include <rte_ether.h>
32af75078fSIntel #include <rte_ethdev.h>
33af75078fSIntel #include <rte_ip.h>
34af75078fSIntel #include <rte_tcp.h>
35af75078fSIntel #include <rte_udp.h>
36af75078fSIntel #include <rte_string_fns.h>
37938a184aSAdrien Mazarguil #include <rte_flow.h>
38af75078fSIntel 
39af75078fSIntel #include "testpmd.h"
40af75078fSIntel 
4102220e53SAlvin Zhang struct tx_timestamp {
4202220e53SAlvin Zhang 	rte_be32_t signature;
4302220e53SAlvin Zhang 	rte_be16_t pkt_idx;
4402220e53SAlvin Zhang 	rte_be16_t queue_idx;
4502220e53SAlvin Zhang 	rte_be64_t ts;
4602220e53SAlvin Zhang };
4702220e53SAlvin Zhang 
48bf5b2126SStephen Hemminger /* use RFC863 Discard Protocol */
49bf5b2126SStephen Hemminger uint16_t tx_udp_src_port = 9;
50bf5b2126SStephen Hemminger uint16_t tx_udp_dst_port = 9;
51af75078fSIntel 
52bf5b2126SStephen Hemminger /* use RFC5735 / RFC2544 reserved network test addresses */
538d7c19d9SStephen Hemminger uint32_t tx_ip_src_addr = (198U << 24) | (18 << 16) | (0 << 8) | 1;
548d7c19d9SStephen Hemminger uint32_t tx_ip_dst_addr = (198U << 24) | (18 << 16) | (0 << 8) | 2;
55af75078fSIntel 
56af75078fSIntel #define IP_DEFTTL  64   /* from RFC 1340. */
57af75078fSIntel 
58a7c528e5SOlivier Matz static struct rte_ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
5994f633f6SJoshua Washington RTE_DEFINE_PER_LCORE(uint8_t, _src_port_var); /**< Source port variation */
60e73e3547SOlivier Matz static struct rte_udp_hdr pkt_udp_hdr; /**< UDP header of tx packets. */
615ce13f1aSViacheslav Ovsiienko 
624940344dSViacheslav Ovsiienko static uint64_t timestamp_mask; /**< Timestamp dynamic flag mask */
634940344dSViacheslav Ovsiienko static int32_t timestamp_off; /**< Timestamp dynamic field offset */
644940344dSViacheslav Ovsiienko static bool timestamp_enable; /**< Timestamp enable */
654940344dSViacheslav Ovsiienko static uint64_t timestamp_initial[RTE_MAX_ETHPORTS];
66af75078fSIntel 
67af75078fSIntel static void
68af75078fSIntel copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt,
69af75078fSIntel 		     unsigned offset)
70af75078fSIntel {
71af75078fSIntel 	struct rte_mbuf *seg;
72af75078fSIntel 	void *seg_buf;
73af75078fSIntel 	unsigned copy_len;
74af75078fSIntel 
75af75078fSIntel 	seg = pkt;
76ea672a8bSOlivier Matz 	while (offset >= seg->data_len) {
77ea672a8bSOlivier Matz 		offset -= seg->data_len;
78ea672a8bSOlivier Matz 		seg = seg->next;
79af75078fSIntel 	}
80ea672a8bSOlivier Matz 	copy_len = seg->data_len - offset;
8182be8d54SCyril Chemparathy 	seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset);
82af75078fSIntel 	while (len > copy_len) {
83af75078fSIntel 		rte_memcpy(seg_buf, buf, (size_t) copy_len);
84af75078fSIntel 		len -= copy_len;
85af75078fSIntel 		buf = ((char*) buf + copy_len);
86ea672a8bSOlivier Matz 		seg = seg->next;
8708b563ffSOlivier Matz 		seg_buf = rte_pktmbuf_mtod(seg, char *);
88b03913e9SYongseok Koh 		copy_len = seg->data_len;
89af75078fSIntel 	}
90af75078fSIntel 	rte_memcpy(seg_buf, buf, (size_t) len);
91af75078fSIntel }
92af75078fSIntel 
93af75078fSIntel static inline void
94af75078fSIntel copy_buf_to_pkt(void* buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
95af75078fSIntel {
96ea672a8bSOlivier Matz 	if (offset + len <= pkt->data_len) {
9782be8d54SCyril Chemparathy 		rte_memcpy(rte_pktmbuf_mtod_offset(pkt, char *, offset),
9808b563ffSOlivier Matz 			buf, (size_t) len);
99af75078fSIntel 		return;
100af75078fSIntel 	}
101af75078fSIntel 	copy_buf_to_pkt_segs(buf, len, pkt, offset);
102af75078fSIntel }
103af75078fSIntel 
104af75078fSIntel static void
105a7c528e5SOlivier Matz setup_pkt_udp_ip_headers(struct rte_ipv4_hdr *ip_hdr,
106e73e3547SOlivier Matz 			 struct rte_udp_hdr *udp_hdr,
107af75078fSIntel 			 uint16_t pkt_data_len)
108af75078fSIntel {
109af75078fSIntel 	uint16_t pkt_len;
110af75078fSIntel 
111af75078fSIntel 	/*
112af75078fSIntel 	 * Initialize UDP header.
113af75078fSIntel 	 */
114e73e3547SOlivier Matz 	pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
115bf5b2126SStephen Hemminger 	udp_hdr->src_port = rte_cpu_to_be_16(tx_udp_src_port);
116bf5b2126SStephen Hemminger 	udp_hdr->dst_port = rte_cpu_to_be_16(tx_udp_dst_port);
117af75078fSIntel 	udp_hdr->dgram_len      = RTE_CPU_TO_BE_16(pkt_len);
118af75078fSIntel 	udp_hdr->dgram_cksum    = 0; /* No UDP checksum. */
119af75078fSIntel 
120af75078fSIntel 	/*
121af75078fSIntel 	 * Initialize IP header.
122af75078fSIntel 	 */
123a7c528e5SOlivier Matz 	pkt_len = (uint16_t) (pkt_len + sizeof(struct rte_ipv4_hdr));
1245fde1a75SReshma Pattan 	ip_hdr->version_ihl   = RTE_IPV4_VHL_DEF;
125af75078fSIntel 	ip_hdr->type_of_service   = 0;
126af75078fSIntel 	ip_hdr->fragment_offset = 0;
127af75078fSIntel 	ip_hdr->time_to_live   = IP_DEFTTL;
128af75078fSIntel 	ip_hdr->next_proto_id = IPPROTO_UDP;
129af75078fSIntel 	ip_hdr->packet_id = 0;
130af75078fSIntel 	ip_hdr->total_length   = RTE_CPU_TO_BE_16(pkt_len);
131bf5b2126SStephen Hemminger 	ip_hdr->src_addr = rte_cpu_to_be_32(tx_ip_src_addr);
132bf5b2126SStephen Hemminger 	ip_hdr->dst_addr = rte_cpu_to_be_32(tx_ip_dst_addr);
133af75078fSIntel 
134af75078fSIntel 	/*
135af75078fSIntel 	 * Compute IP header checksum.
136af75078fSIntel 	 */
137*f9e1d67fSBruce Richardson 	ip_hdr->hdr_checksum = rte_ipv4_cksum_simple(ip_hdr);
138af75078fSIntel }
139af75078fSIntel 
140b253a6bbSChengchang Tang static inline void
141b253a6bbSChengchang Tang update_pkt_header(struct rte_mbuf *pkt, uint32_t total_pkt_len)
142b253a6bbSChengchang Tang {
143b253a6bbSChengchang Tang 	struct rte_ipv4_hdr *ip_hdr;
144b253a6bbSChengchang Tang 	struct rte_udp_hdr *udp_hdr;
145b253a6bbSChengchang Tang 	uint16_t pkt_data_len;
146b253a6bbSChengchang Tang 	uint16_t pkt_len;
147b253a6bbSChengchang Tang 
148b253a6bbSChengchang Tang 	pkt_data_len = (uint16_t) (total_pkt_len - (
149b253a6bbSChengchang Tang 					sizeof(struct rte_ether_hdr) +
150b253a6bbSChengchang Tang 					sizeof(struct rte_ipv4_hdr) +
151b253a6bbSChengchang Tang 					sizeof(struct rte_udp_hdr)));
1527be78d02SJosh Soref 	/* update UDP packet length */
153b253a6bbSChengchang Tang 	udp_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_udp_hdr *,
154b253a6bbSChengchang Tang 				sizeof(struct rte_ether_hdr) +
155b253a6bbSChengchang Tang 				sizeof(struct rte_ipv4_hdr));
156b253a6bbSChengchang Tang 	pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
157b253a6bbSChengchang Tang 	udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len);
158b253a6bbSChengchang Tang 
1597be78d02SJosh Soref 	/* update IP packet length and checksum */
160b253a6bbSChengchang Tang 	ip_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *,
161b253a6bbSChengchang Tang 				sizeof(struct rte_ether_hdr));
162b253a6bbSChengchang Tang 	ip_hdr->hdr_checksum = 0;
163b253a6bbSChengchang Tang 	pkt_len = (uint16_t) (pkt_len + sizeof(struct rte_ipv4_hdr));
164b253a6bbSChengchang Tang 	ip_hdr->total_length = RTE_CPU_TO_BE_16(pkt_len);
165b253a6bbSChengchang Tang 	ip_hdr->hdr_checksum = rte_ipv4_cksum(ip_hdr);
166b253a6bbSChengchang Tang }
167b253a6bbSChengchang Tang 
16801b645dcSPavan Nikhilesh static inline bool
16901b645dcSPavan Nikhilesh pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
1706d13ea8eSOlivier Matz 		struct rte_ether_hdr *eth_hdr, const uint16_t vlan_tci,
1714940344dSViacheslav Ovsiienko 		const uint16_t vlan_tci_outer, const uint64_t ol_flags,
1729fac5ca8SViacheslav Ovsiienko 		const uint16_t idx, struct fwd_stream *fs)
17301b645dcSPavan Nikhilesh {
17401b645dcSPavan Nikhilesh 	struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT];
17501b645dcSPavan Nikhilesh 	struct rte_mbuf *pkt_seg;
17601b645dcSPavan Nikhilesh 	uint32_t nb_segs, pkt_len;
17701b645dcSPavan Nikhilesh 	uint8_t i;
17801b645dcSPavan Nikhilesh 
17901b645dcSPavan Nikhilesh 	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND))
1807392ad06SPavan Nikhilesh 		nb_segs = rte_rand() % tx_pkt_nb_segs + 1;
18101b645dcSPavan Nikhilesh 	else
18201b645dcSPavan Nikhilesh 		nb_segs = tx_pkt_nb_segs;
18301b645dcSPavan Nikhilesh 
18401b645dcSPavan Nikhilesh 	if (nb_segs > 1) {
185b3711336SAndrew Rybchenko 		if (rte_mempool_get_bulk(mbp, (void **)pkt_segs, nb_segs - 1))
18601b645dcSPavan Nikhilesh 			return false;
18701b645dcSPavan Nikhilesh 	}
18801b645dcSPavan Nikhilesh 
18901b645dcSPavan Nikhilesh 	rte_pktmbuf_reset_headroom(pkt);
19001b645dcSPavan Nikhilesh 	pkt->data_len = tx_pkt_seg_lengths[0];
191daa02b5cSOlivier Matz 	pkt->ol_flags &= RTE_MBUF_F_EXTERNAL;
19272512e18SViacheslav Ovsiienko 	pkt->ol_flags |= ol_flags;
19301b645dcSPavan Nikhilesh 	pkt->vlan_tci = vlan_tci;
19401b645dcSPavan Nikhilesh 	pkt->vlan_tci_outer = vlan_tci_outer;
1956d13ea8eSOlivier Matz 	pkt->l2_len = sizeof(struct rte_ether_hdr);
196a7c528e5SOlivier Matz 	pkt->l3_len = sizeof(struct rte_ipv4_hdr);
19701b645dcSPavan Nikhilesh 
19801b645dcSPavan Nikhilesh 	pkt_len = pkt->data_len;
19901b645dcSPavan Nikhilesh 	pkt_seg = pkt;
20001b645dcSPavan Nikhilesh 	for (i = 1; i < nb_segs; i++) {
20101b645dcSPavan Nikhilesh 		pkt_seg->next = pkt_segs[i - 1];
20201b645dcSPavan Nikhilesh 		pkt_seg = pkt_seg->next;
20301b645dcSPavan Nikhilesh 		pkt_seg->data_len = tx_pkt_seg_lengths[i];
20401b645dcSPavan Nikhilesh 		pkt_len += pkt_seg->data_len;
20501b645dcSPavan Nikhilesh 	}
20601b645dcSPavan Nikhilesh 	pkt_seg->next = NULL; /* Last segment of packet. */
20701b645dcSPavan Nikhilesh 	/*
20801b645dcSPavan Nikhilesh 	 * Copy headers in first packet segment(s).
20901b645dcSPavan Nikhilesh 	 */
2101eab29b9SPavan Nikhilesh 	copy_buf_to_pkt(eth_hdr, sizeof(*eth_hdr), pkt, 0);
21101b645dcSPavan Nikhilesh 	copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
2126d13ea8eSOlivier Matz 			sizeof(struct rte_ether_hdr));
21301b645dcSPavan Nikhilesh 	copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
2146d13ea8eSOlivier Matz 			sizeof(struct rte_ether_hdr) +
215a7c528e5SOlivier Matz 			sizeof(struct rte_ipv4_hdr));
21694f633f6SJoshua Washington 	if (txonly_multi_flow) {
21794f633f6SJoshua Washington 		uint16_t src_var = RTE_PER_LCORE(_src_port_var);
21894f633f6SJoshua Washington 		struct rte_udp_hdr *udp_hdr;
21994f633f6SJoshua Washington 		uint16_t src_port;
22094f633f6SJoshua Washington 
22194f633f6SJoshua Washington 		udp_hdr = rte_pktmbuf_mtod_offset(pkt,
22294f633f6SJoshua Washington 				struct rte_udp_hdr *,
22394f633f6SJoshua Washington 				sizeof(struct rte_ether_hdr) +
22494f633f6SJoshua Washington 				sizeof(struct rte_ipv4_hdr));
22594f633f6SJoshua Washington 		/*
22694f633f6SJoshua Washington 		 * Generate multiple flows by varying UDP source port.
22794f633f6SJoshua Washington 		 * This enables packets are well distributed by RSS in
22894f633f6SJoshua Washington 		 * receiver side if any and txonly mode can be a decent
22994f633f6SJoshua Washington 		 * packet generator for developer's quick performance
23094f633f6SJoshua Washington 		 * regression test.
23194f633f6SJoshua Washington 		 *
23294f633f6SJoshua Washington 		 * Only ports in the range 49152 (0xC000) and 65535 (0xFFFF)
23394f633f6SJoshua Washington 		 * will be used, with the least significant byte representing
23494f633f6SJoshua Washington 		 * the lcore ID. As such, the most significant byte will cycle
23594f633f6SJoshua Washington 		 * through 0xC0 and 0xFF.
23694f633f6SJoshua Washington 		 */
23794f633f6SJoshua Washington 		src_port = ((src_var++ | 0xC0) << 8) + rte_lcore_id();
23894f633f6SJoshua Washington 		udp_hdr->src_port = rte_cpu_to_be_16(src_port);
23994f633f6SJoshua Washington 		RTE_PER_LCORE(_src_port_var) = src_var;
24094f633f6SJoshua Washington 	}
241b253a6bbSChengchang Tang 
242b253a6bbSChengchang Tang 	if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND) || txonly_multi_flow)
243b253a6bbSChengchang Tang 		update_pkt_header(pkt, pkt_len);
244b253a6bbSChengchang Tang 
2454940344dSViacheslav Ovsiienko 	if (unlikely(timestamp_enable)) {
2469fac5ca8SViacheslav Ovsiienko 		uint64_t skew = fs->ts_skew;
24702220e53SAlvin Zhang 		struct tx_timestamp timestamp_mark;
2484940344dSViacheslav Ovsiienko 
2499fac5ca8SViacheslav Ovsiienko 		if (unlikely(!skew)) {
2500a0821bcSPaulis Gributs 			struct rte_eth_dev_info dev_info;
2510a0821bcSPaulis Gributs 			unsigned int txqs_n;
2520a0821bcSPaulis Gributs 			uint64_t phase;
2530a0821bcSPaulis Gributs 			int ret;
2540a0821bcSPaulis Gributs 
2550a0821bcSPaulis Gributs 			ret = eth_dev_info_get_print_err(fs->tx_port, &dev_info);
2560a0821bcSPaulis Gributs 			if (ret != 0) {
2570a0821bcSPaulis Gributs 				TESTPMD_LOG(ERR,
2580a0821bcSPaulis Gributs 					"Failed to get device info for port %d,"
2590a0821bcSPaulis Gributs 					"could not finish timestamp init",
2600a0821bcSPaulis Gributs 					fs->tx_port);
2610a0821bcSPaulis Gributs 				return false;
2620a0821bcSPaulis Gributs 			}
2630a0821bcSPaulis Gributs 			txqs_n = dev_info.nb_tx_queues;
2640a0821bcSPaulis Gributs 			phase = tx_pkt_times_inter * fs->tx_queue /
2654940344dSViacheslav Ovsiienko 					 (txqs_n ? txqs_n : 1);
2664940344dSViacheslav Ovsiienko 			/*
2674940344dSViacheslav Ovsiienko 			 * Initialize the scheduling time phase shift
2684940344dSViacheslav Ovsiienko 			 * depending on queue index.
2694940344dSViacheslav Ovsiienko 			 */
2704940344dSViacheslav Ovsiienko 			skew = timestamp_initial[fs->tx_port] +
2714940344dSViacheslav Ovsiienko 			       tx_pkt_times_inter + phase;
2729fac5ca8SViacheslav Ovsiienko 			fs->ts_skew = skew;
2734940344dSViacheslav Ovsiienko 		}
2744940344dSViacheslav Ovsiienko 		timestamp_mark.pkt_idx = rte_cpu_to_be_16(idx);
2754940344dSViacheslav Ovsiienko 		timestamp_mark.queue_idx = rte_cpu_to_be_16(fs->tx_queue);
2764940344dSViacheslav Ovsiienko 		timestamp_mark.signature = rte_cpu_to_be_32(0xBEEFC0DE);
2774940344dSViacheslav Ovsiienko 		if (unlikely(!idx)) {
2784940344dSViacheslav Ovsiienko 			skew +=	tx_pkt_times_inter;
2794940344dSViacheslav Ovsiienko 			pkt->ol_flags |= timestamp_mask;
2804940344dSViacheslav Ovsiienko 			*RTE_MBUF_DYNFIELD
2814940344dSViacheslav Ovsiienko 				(pkt, timestamp_off, uint64_t *) = skew;
2829fac5ca8SViacheslav Ovsiienko 			fs->ts_skew = skew;
2834940344dSViacheslav Ovsiienko 			timestamp_mark.ts = rte_cpu_to_be_64(skew);
2844940344dSViacheslav Ovsiienko 		} else if (tx_pkt_times_intra) {
2854940344dSViacheslav Ovsiienko 			skew +=	tx_pkt_times_intra;
2864940344dSViacheslav Ovsiienko 			pkt->ol_flags |= timestamp_mask;
2874940344dSViacheslav Ovsiienko 			*RTE_MBUF_DYNFIELD
2884940344dSViacheslav Ovsiienko 				(pkt, timestamp_off, uint64_t *) = skew;
2899fac5ca8SViacheslav Ovsiienko 			fs->ts_skew = skew;
2904940344dSViacheslav Ovsiienko 			timestamp_mark.ts = rte_cpu_to_be_64(skew);
2914940344dSViacheslav Ovsiienko 		} else {
2924940344dSViacheslav Ovsiienko 			timestamp_mark.ts = RTE_BE64(0);
2934940344dSViacheslav Ovsiienko 		}
2944940344dSViacheslav Ovsiienko 		copy_buf_to_pkt(&timestamp_mark, sizeof(timestamp_mark), pkt,
2954940344dSViacheslav Ovsiienko 			sizeof(struct rte_ether_hdr) +
2964940344dSViacheslav Ovsiienko 			sizeof(struct rte_ipv4_hdr) +
2974940344dSViacheslav Ovsiienko 			sizeof(pkt_udp_hdr));
2984940344dSViacheslav Ovsiienko 	}
29901b645dcSPavan Nikhilesh 	/*
30001b645dcSPavan Nikhilesh 	 * Complete first mbuf of packet and append it to the
30101b645dcSPavan Nikhilesh 	 * burst of packets to be transmitted.
30201b645dcSPavan Nikhilesh 	 */
30301b645dcSPavan Nikhilesh 	pkt->nb_segs = nb_segs;
30401b645dcSPavan Nikhilesh 	pkt->pkt_len = pkt_len;
30501b645dcSPavan Nikhilesh 
30601b645dcSPavan Nikhilesh 	return true;
30701b645dcSPavan Nikhilesh }
30801b645dcSPavan Nikhilesh 
309af75078fSIntel /*
310af75078fSIntel  * Transmit a burst of multi-segments packets.
311af75078fSIntel  */
31206c20561SDavid Marchand static bool
313af75078fSIntel pkt_burst_transmit(struct fwd_stream *fs)
314af75078fSIntel {
315af75078fSIntel 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
316cf543fdbSOlivier Matz 	struct rte_port *txp;
317af75078fSIntel 	struct rte_mbuf *pkt;
318af75078fSIntel 	struct rte_mempool *mbp;
3196d13ea8eSOlivier Matz 	struct rte_ether_hdr eth_hdr;
320af75078fSIntel 	uint16_t nb_tx;
321af75078fSIntel 	uint16_t nb_pkt;
32292ebda07SHelin Zhang 	uint16_t vlan_tci, vlan_tci_outer;
323cf543fdbSOlivier Matz 	uint64_t ol_flags = 0;
3243eecba26SShahaf Shuler 	uint64_t tx_offloads;
325af75078fSIntel 
326af75078fSIntel 	mbp = current_fwd_lcore()->mbp;
327cf543fdbSOlivier Matz 	txp = &ports[fs->tx_port];
3283eecba26SShahaf Shuler 	tx_offloads = txp->dev_conf.txmode.offloads;
329cf543fdbSOlivier Matz 	vlan_tci = txp->tx_vlan_id;
33092ebda07SHelin Zhang 	vlan_tci_outer = txp->tx_vlan_id_outer;
331295968d1SFerruh Yigit 	if (tx_offloads	& RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
332daa02b5cSOlivier Matz 		ol_flags = RTE_MBUF_F_TX_VLAN;
333295968d1SFerruh Yigit 	if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
334daa02b5cSOlivier Matz 		ol_flags |= RTE_MBUF_F_TX_QINQ;
335295968d1SFerruh Yigit 	if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
336daa02b5cSOlivier Matz 		ol_flags |= RTE_MBUF_F_TX_MACSEC;
337e54ac3b1SPavan Nikhilesh 
338e54ac3b1SPavan Nikhilesh 	/*
339e54ac3b1SPavan Nikhilesh 	 * Initialize Ethernet header.
340e54ac3b1SPavan Nikhilesh 	 */
34104d43857SDmitry Kozlyuk 	rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr], &eth_hdr.dst_addr);
34204d43857SDmitry Kozlyuk 	rte_ether_addr_copy(&ports[fs->tx_port].eth_addr, &eth_hdr.src_addr);
3430c9da755SDavid Marchand 	eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
344e54ac3b1SPavan Nikhilesh 
34532941b5dSPavan Nikhilesh 	if (rte_mempool_get_bulk(mbp, (void **)pkts_burst,
34632941b5dSPavan Nikhilesh 				nb_pkt_per_burst) == 0) {
34732941b5dSPavan Nikhilesh 		for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
34832941b5dSPavan Nikhilesh 			if (unlikely(!pkt_burst_prepare(pkts_burst[nb_pkt], mbp,
34932941b5dSPavan Nikhilesh 							&eth_hdr, vlan_tci,
35032941b5dSPavan Nikhilesh 							vlan_tci_outer,
3514940344dSViacheslav Ovsiienko 							ol_flags,
3524940344dSViacheslav Ovsiienko 							nb_pkt, fs))) {
35332941b5dSPavan Nikhilesh 				rte_mempool_put_bulk(mbp,
35432941b5dSPavan Nikhilesh 						(void **)&pkts_burst[nb_pkt],
35532941b5dSPavan Nikhilesh 						nb_pkt_per_burst - nb_pkt);
35632941b5dSPavan Nikhilesh 				break;
35732941b5dSPavan Nikhilesh 			}
35832941b5dSPavan Nikhilesh 		}
35932941b5dSPavan Nikhilesh 	} else {
360af75078fSIntel 		for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
361fbfd9955SOlivier Matz 			pkt = rte_mbuf_raw_alloc(mbp);
36201b645dcSPavan Nikhilesh 			if (pkt == NULL)
36301b645dcSPavan Nikhilesh 				break;
36432941b5dSPavan Nikhilesh 			if (unlikely(!pkt_burst_prepare(pkt, mbp, &eth_hdr,
36532941b5dSPavan Nikhilesh 							vlan_tci,
36632941b5dSPavan Nikhilesh 							vlan_tci_outer,
3674940344dSViacheslav Ovsiienko 							ol_flags,
3684940344dSViacheslav Ovsiienko 							nb_pkt, fs))) {
36901b645dcSPavan Nikhilesh 				rte_pktmbuf_free(pkt);
370af75078fSIntel 				break;
371af75078fSIntel 			}
372af75078fSIntel 			pkts_burst[nb_pkt] = pkt;
373af75078fSIntel 		}
37432941b5dSPavan Nikhilesh 	}
37501b645dcSPavan Nikhilesh 
37601b645dcSPavan Nikhilesh 	if (nb_pkt == 0)
37706c20561SDavid Marchand 		return false;
37801b645dcSPavan Nikhilesh 
379655131ccSDavid Marchand 	nb_tx = common_fwd_stream_transmit(fs, pkts_burst, nb_pkt);
380af75078fSIntel 
38182010ef5SYongseok Koh 	if (txonly_multi_flow)
38294f633f6SJoshua Washington 		RTE_PER_LCORE(_src_port_var) -= nb_pkt - nb_tx;
38382010ef5SYongseok Koh 
384af75078fSIntel 	if (unlikely(nb_tx < nb_pkt)) {
385af75078fSIntel 		if (verbose_level > 0 && fs->fwd_dropped == 0)
386af75078fSIntel 			printf("port %d tx_queue %d - drop "
387af75078fSIntel 			       "(nb_pkt:%u - nb_tx:%u)=%u packets\n",
388af75078fSIntel 			       fs->tx_port, fs->tx_queue,
389af75078fSIntel 			       (unsigned) nb_pkt, (unsigned) nb_tx,
390af75078fSIntel 			       (unsigned) (nb_pkt - nb_tx));
391af75078fSIntel 	}
392af75078fSIntel 
39306c20561SDavid Marchand 	return true;
394af75078fSIntel }
395af75078fSIntel 
396a78040c9SAlvin Zhang static int
3974940344dSViacheslav Ovsiienko tx_only_begin(portid_t pi)
398af75078fSIntel {
39902220e53SAlvin Zhang 	uint16_t pkt_hdr_len, pkt_data_len;
4004940344dSViacheslav Ovsiienko 	int dynf;
401af75078fSIntel 
40202220e53SAlvin Zhang 	pkt_hdr_len = (uint16_t)(sizeof(struct rte_ether_hdr) +
403a7c528e5SOlivier Matz 				 sizeof(struct rte_ipv4_hdr) +
40402220e53SAlvin Zhang 				 sizeof(struct rte_udp_hdr));
40502220e53SAlvin Zhang 	pkt_data_len = tx_pkt_length - pkt_hdr_len;
40602220e53SAlvin Zhang 
40702220e53SAlvin Zhang 	if ((tx_pkt_split == TX_PKT_SPLIT_RND || txonly_multi_flow) &&
40802220e53SAlvin Zhang 	    tx_pkt_seg_lengths[0] < pkt_hdr_len) {
40902220e53SAlvin Zhang 		TESTPMD_LOG(ERR,
41002220e53SAlvin Zhang 			    "Random segment number or multiple flow is enabled, "
41102220e53SAlvin Zhang 			    "but tx_pkt_seg_lengths[0] %u < %u (needed)\n",
41202220e53SAlvin Zhang 			    tx_pkt_seg_lengths[0], pkt_hdr_len);
41302220e53SAlvin Zhang 		return -EINVAL;
41402220e53SAlvin Zhang 	}
41502220e53SAlvin Zhang 
416af75078fSIntel 	setup_pkt_udp_ip_headers(&pkt_ip_hdr, &pkt_udp_hdr, pkt_data_len);
4174940344dSViacheslav Ovsiienko 
4184940344dSViacheslav Ovsiienko 	timestamp_enable = false;
4194940344dSViacheslav Ovsiienko 	timestamp_mask = 0;
4204940344dSViacheslav Ovsiienko 	timestamp_off = -1;
4214940344dSViacheslav Ovsiienko 	dynf = rte_mbuf_dynflag_lookup
4224940344dSViacheslav Ovsiienko 				(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
4234940344dSViacheslav Ovsiienko 	if (dynf >= 0)
4244940344dSViacheslav Ovsiienko 		timestamp_mask = 1ULL << dynf;
4254940344dSViacheslav Ovsiienko 	dynf = rte_mbuf_dynfield_lookup
4264940344dSViacheslav Ovsiienko 				(RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
4274940344dSViacheslav Ovsiienko 	if (dynf >= 0)
4284940344dSViacheslav Ovsiienko 		timestamp_off = dynf;
4294940344dSViacheslav Ovsiienko 	timestamp_enable = tx_pkt_times_inter &&
4304940344dSViacheslav Ovsiienko 			   timestamp_mask &&
4314940344dSViacheslav Ovsiienko 			   timestamp_off >= 0 &&
4324940344dSViacheslav Ovsiienko 			   !rte_eth_read_clock(pi, &timestamp_initial[pi]);
43302220e53SAlvin Zhang 
43402220e53SAlvin Zhang 	if (timestamp_enable) {
43502220e53SAlvin Zhang 		pkt_hdr_len += sizeof(struct tx_timestamp);
43602220e53SAlvin Zhang 
43702220e53SAlvin Zhang 		if (tx_pkt_split == TX_PKT_SPLIT_RND) {
43802220e53SAlvin Zhang 			if (tx_pkt_seg_lengths[0] < pkt_hdr_len) {
43902220e53SAlvin Zhang 				TESTPMD_LOG(ERR,
44002220e53SAlvin Zhang 					    "Time stamp and random segment number are enabled, "
44102220e53SAlvin Zhang 					    "but tx_pkt_seg_lengths[0] %u < %u (needed)\n",
44202220e53SAlvin Zhang 					    tx_pkt_seg_lengths[0], pkt_hdr_len);
44302220e53SAlvin Zhang 				return -EINVAL;
44402220e53SAlvin Zhang 			}
44502220e53SAlvin Zhang 		} else {
44602220e53SAlvin Zhang 			uint16_t total = 0;
44702220e53SAlvin Zhang 			uint8_t i;
44802220e53SAlvin Zhang 
44902220e53SAlvin Zhang 			for (i = 0; i < tx_pkt_nb_segs; i++) {
45002220e53SAlvin Zhang 				total += tx_pkt_seg_lengths[i];
45102220e53SAlvin Zhang 				if (total >= pkt_hdr_len)
45202220e53SAlvin Zhang 					break;
45302220e53SAlvin Zhang 			}
45402220e53SAlvin Zhang 
45502220e53SAlvin Zhang 			if (total < pkt_hdr_len) {
45602220e53SAlvin Zhang 				TESTPMD_LOG(ERR,
45702220e53SAlvin Zhang 					    "Not enough Tx segment space for time stamp info, "
45802220e53SAlvin Zhang 					    "total %u < %u (needed)\n",
45902220e53SAlvin Zhang 					    total, pkt_hdr_len);
46002220e53SAlvin Zhang 				return -EINVAL;
46102220e53SAlvin Zhang 			}
46202220e53SAlvin Zhang 		}
46302220e53SAlvin Zhang 	}
46402220e53SAlvin Zhang 
4655ce13f1aSViacheslav Ovsiienko 	/* Make sure all settings are visible on forwarding cores.*/
4665ce13f1aSViacheslav Ovsiienko 	rte_wmb();
467a78040c9SAlvin Zhang 	return 0;
468af75078fSIntel }
469af75078fSIntel 
4703c4426dbSDmitry Kozlyuk static void
4713c4426dbSDmitry Kozlyuk tx_only_stream_init(struct fwd_stream *fs)
4723c4426dbSDmitry Kozlyuk {
4733c4426dbSDmitry Kozlyuk 	fs->disabled = ports[fs->tx_port].txq[fs->tx_queue].state ==
4743c4426dbSDmitry Kozlyuk 						RTE_ETH_QUEUE_STATE_STOPPED;
4753c4426dbSDmitry Kozlyuk }
4763c4426dbSDmitry Kozlyuk 
477af75078fSIntel struct fwd_engine tx_only_engine = {
478af75078fSIntel 	.fwd_mode_name  = "txonly",
479af75078fSIntel 	.port_fwd_begin = tx_only_begin,
4803c4426dbSDmitry Kozlyuk 	.stream_init    = tx_only_stream_init,
481af75078fSIntel 	.packet_fwd     = pkt_burst_transmit,
482af75078fSIntel };
483