xref: /dpdk/app/test-pmd/util.c (revision d56ec3dcad056c47cef4e837d5191d04c936d87e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  * Copyright(c) 2018 Mellanox Technology
4  */
5 
6 #include <stdio.h>
7 
8 #include <rte_net.h>
9 #include <rte_mbuf.h>
10 #include <rte_ether.h>
11 #include <rte_vxlan.h>
12 #include <rte_ethdev.h>
13 #include <rte_flow.h>
14 
15 #include "testpmd.h"
16 
17 static inline void
18 print_ether_addr(const char *what, const struct rte_ether_addr *eth_addr)
19 {
20 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
21 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
22 	printf("%s%s", what, buf);
23 }
24 
25 static inline void
26 dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
27 	      uint16_t nb_pkts, int is_rx)
28 {
29 	struct rte_mbuf  *mb;
30 	const struct rte_ether_hdr *eth_hdr;
31 	struct rte_ether_hdr _eth_hdr;
32 	uint16_t eth_type;
33 	uint64_t ol_flags;
34 	uint16_t i, packet_type;
35 	uint16_t is_encapsulation;
36 	char buf[256];
37 	struct rte_net_hdr_lens hdr_lens;
38 	uint32_t sw_packet_type;
39 	uint16_t udp_port;
40 	uint32_t vx_vni;
41 	const char *reason;
42 
43 	if (!nb_pkts)
44 		return;
45 	printf("port %u/queue %u: %s %u packets\n",
46 		port_id, queue,
47 	       is_rx ? "received" : "sent",
48 	       (unsigned int) nb_pkts);
49 	for (i = 0; i < nb_pkts; i++) {
50 		mb = pkts[i];
51 		eth_hdr = rte_pktmbuf_read(mb, 0, sizeof(_eth_hdr), &_eth_hdr);
52 		eth_type = RTE_BE_TO_CPU_16(eth_hdr->ether_type);
53 		ol_flags = mb->ol_flags;
54 		packet_type = mb->packet_type;
55 		is_encapsulation = RTE_ETH_IS_TUNNEL_PKT(packet_type);
56 
57 		print_ether_addr("  src=", &eth_hdr->s_addr);
58 		print_ether_addr(" - dst=", &eth_hdr->d_addr);
59 		printf(" - type=0x%04x - length=%u - nb_segs=%d",
60 		       eth_type, (unsigned int) mb->pkt_len,
61 		       (int)mb->nb_segs);
62 		if (ol_flags & PKT_RX_RSS_HASH) {
63 			printf(" - RSS hash=0x%x", (unsigned int) mb->hash.rss);
64 			printf(" - RSS queue=0x%x", (unsigned int) queue);
65 		}
66 		if (ol_flags & PKT_RX_FDIR) {
67 			printf(" - FDIR matched ");
68 			if (ol_flags & PKT_RX_FDIR_ID)
69 				printf("ID=0x%x",
70 				       mb->hash.fdir.hi);
71 			else if (ol_flags & PKT_RX_FDIR_FLX)
72 				printf("flex bytes=0x%08x %08x",
73 				       mb->hash.fdir.hi, mb->hash.fdir.lo);
74 			else
75 				printf("hash=0x%x ID=0x%x ",
76 				       mb->hash.fdir.hash, mb->hash.fdir.id);
77 		}
78 		if (ol_flags & PKT_RX_TIMESTAMP)
79 			printf(" - timestamp %"PRIu64" ", mb->timestamp);
80 		if (ol_flags & PKT_RX_QINQ)
81 			printf(" - QinQ VLAN tci=0x%x, VLAN tci outer=0x%x",
82 			       mb->vlan_tci, mb->vlan_tci_outer);
83 		else if (ol_flags & PKT_RX_VLAN)
84 			printf(" - VLAN tci=0x%x", mb->vlan_tci);
85 		if (!is_rx && (ol_flags & PKT_TX_DYNF_METADATA))
86 			printf(" - Tx metadata: 0x%x",
87 			       *RTE_FLOW_DYNF_METADATA(mb));
88 		if (is_rx && (ol_flags & PKT_RX_DYNF_METADATA))
89 			printf(" - Rx metadata: 0x%x",
90 			       *RTE_FLOW_DYNF_METADATA(mb));
91 		if (mb->packet_type) {
92 			rte_get_ptype_name(mb->packet_type, buf, sizeof(buf));
93 			printf(" - hw ptype: %s", buf);
94 		}
95 		sw_packet_type = rte_net_get_ptype(mb, &hdr_lens,
96 					RTE_PTYPE_ALL_MASK);
97 		rte_get_ptype_name(sw_packet_type, buf, sizeof(buf));
98 		printf(" - sw ptype: %s", buf);
99 		if (sw_packet_type & RTE_PTYPE_L2_MASK)
100 			printf(" - l2_len=%d", hdr_lens.l2_len);
101 		if (sw_packet_type & RTE_PTYPE_L3_MASK)
102 			printf(" - l3_len=%d", hdr_lens.l3_len);
103 		if (sw_packet_type & RTE_PTYPE_L4_MASK)
104 			printf(" - l4_len=%d", hdr_lens.l4_len);
105 		if (sw_packet_type & RTE_PTYPE_TUNNEL_MASK)
106 			printf(" - tunnel_len=%d", hdr_lens.tunnel_len);
107 		if (sw_packet_type & RTE_PTYPE_INNER_L2_MASK)
108 			printf(" - inner_l2_len=%d", hdr_lens.inner_l2_len);
109 		if (sw_packet_type & RTE_PTYPE_INNER_L3_MASK)
110 			printf(" - inner_l3_len=%d", hdr_lens.inner_l3_len);
111 		if (sw_packet_type & RTE_PTYPE_INNER_L4_MASK)
112 			printf(" - inner_l4_len=%d", hdr_lens.inner_l4_len);
113 		if (is_encapsulation) {
114 			struct rte_ipv4_hdr *ipv4_hdr;
115 			struct rte_ipv6_hdr *ipv6_hdr;
116 			struct rte_udp_hdr *udp_hdr;
117 			uint8_t l2_len;
118 			uint8_t l3_len;
119 			uint8_t l4_len;
120 			uint8_t l4_proto;
121 			struct  rte_vxlan_hdr *vxlan_hdr;
122 
123 			l2_len  = sizeof(struct rte_ether_hdr);
124 
125 			/* Do not support ipv4 option field */
126 			if (RTE_ETH_IS_IPV4_HDR(packet_type)) {
127 				l3_len = sizeof(struct rte_ipv4_hdr);
128 				ipv4_hdr = rte_pktmbuf_mtod_offset(mb,
129 				struct rte_ipv4_hdr *,
130 				l2_len);
131 				l4_proto = ipv4_hdr->next_proto_id;
132 			} else {
133 				l3_len = sizeof(struct rte_ipv6_hdr);
134 				ipv6_hdr = rte_pktmbuf_mtod_offset(mb,
135 				struct rte_ipv6_hdr *,
136 				l2_len);
137 				l4_proto = ipv6_hdr->proto;
138 			}
139 			if (l4_proto == IPPROTO_UDP) {
140 				udp_hdr = rte_pktmbuf_mtod_offset(mb,
141 				struct rte_udp_hdr *,
142 				l2_len + l3_len);
143 				l4_len = sizeof(struct rte_udp_hdr);
144 				vxlan_hdr = rte_pktmbuf_mtod_offset(mb,
145 				struct rte_vxlan_hdr *,
146 				l2_len + l3_len + l4_len);
147 				udp_port = RTE_BE_TO_CPU_16(udp_hdr->dst_port);
148 				vx_vni = rte_be_to_cpu_32(vxlan_hdr->vx_vni);
149 				printf(" - VXLAN packet: packet type =%d, "
150 				       "Destination UDP port =%d, VNI = %d",
151 				       packet_type, udp_port, vx_vni >> 8);
152 			}
153 		}
154 		printf(" - %s queue=0x%x", is_rx ? "Receive" : "Send",
155 			(unsigned int) queue);
156 		printf("\n");
157 		rte_get_rx_ol_flag_list(mb->ol_flags, buf, sizeof(buf));
158 		printf("  ol_flags: %s\n", buf);
159 		if (rte_mbuf_check(mb, 1, &reason) < 0)
160 			printf("INVALID mbuf: %s\n", reason);
161 	}
162 }
163 
164 uint16_t
165 dump_rx_pkts(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
166 	     uint16_t nb_pkts, __rte_unused uint16_t max_pkts,
167 	     __rte_unused void *user_param)
168 {
169 	dump_pkt_burst(port_id, queue, pkts, nb_pkts, 1);
170 	return nb_pkts;
171 }
172 
173 uint16_t
174 dump_tx_pkts(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
175 	     uint16_t nb_pkts, __rte_unused void *user_param)
176 {
177 	dump_pkt_burst(port_id, queue, pkts, nb_pkts, 0);
178 	return nb_pkts;
179 }
180 
181 uint16_t
182 tx_pkt_set_md(uint16_t port_id, __rte_unused uint16_t queue,
183 	      struct rte_mbuf *pkts[], uint16_t nb_pkts,
184 	      __rte_unused void *user_param)
185 {
186 	uint16_t i = 0;
187 
188 	/*
189 	 * Add metadata value to every Tx packet,
190 	 * and set ol_flags accordingly.
191 	 */
192 	if (rte_flow_dynf_metadata_avail())
193 		for (i = 0; i < nb_pkts; i++) {
194 			*RTE_FLOW_DYNF_METADATA(pkts[i]) =
195 						ports[port_id].tx_metadata;
196 			pkts[i]->ol_flags |= PKT_TX_DYNF_METADATA;
197 		}
198 	return nb_pkts;
199 }
200 
201 void
202 add_tx_md_callback(portid_t portid)
203 {
204 	struct rte_eth_dev_info dev_info;
205 	uint16_t queue;
206 	int ret;
207 
208 	if (port_id_is_invalid(portid, ENABLED_WARN))
209 		return;
210 
211 	ret = eth_dev_info_get_print_err(portid, &dev_info);
212 	if (ret != 0)
213 		return;
214 
215 	for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
216 		if (!ports[portid].tx_set_md_cb[queue])
217 			ports[portid].tx_set_md_cb[queue] =
218 				rte_eth_add_tx_callback(portid, queue,
219 							tx_pkt_set_md, NULL);
220 }
221 
222 void
223 remove_tx_md_callback(portid_t portid)
224 {
225 	struct rte_eth_dev_info dev_info;
226 	uint16_t queue;
227 	int ret;
228 
229 	if (port_id_is_invalid(portid, ENABLED_WARN))
230 		return;
231 
232 	ret = eth_dev_info_get_print_err(portid, &dev_info);
233 	if (ret != 0)
234 		return;
235 
236 	for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
237 		if (ports[portid].tx_set_md_cb[queue]) {
238 			rte_eth_remove_tx_callback(portid, queue,
239 				ports[portid].tx_set_md_cb[queue]);
240 			ports[portid].tx_set_md_cb[queue] = NULL;
241 		}
242 }
243 
244 int
245 eth_dev_info_get_print_err(uint16_t port_id,
246 					struct rte_eth_dev_info *dev_info)
247 {
248 	int ret;
249 
250 	ret = rte_eth_dev_info_get(port_id, dev_info);
251 	if (ret != 0)
252 		printf("Error during getting device (port %u) info: %s\n",
253 				port_id, strerror(-ret));
254 
255 	return ret;
256 }
257 
258 void
259 eth_set_promisc_mode(uint16_t port, int enable)
260 {
261 	int ret;
262 
263 	if (enable)
264 		ret = rte_eth_promiscuous_enable(port);
265 	else
266 		ret = rte_eth_promiscuous_disable(port);
267 
268 	if (ret != 0)
269 		printf("Error during %s promiscuous mode for port %u: %s\n",
270 			enable ? "enabling" : "disabling",
271 			port, rte_strerror(-ret));
272 }
273 
274 void
275 eth_set_allmulticast_mode(uint16_t port, int enable)
276 {
277 	int ret;
278 
279 	if (enable)
280 		ret = rte_eth_allmulticast_enable(port);
281 	else
282 		ret = rte_eth_allmulticast_disable(port);
283 
284 	if (ret != 0)
285 		printf("Error during %s all-multicast mode for port %u: %s\n",
286 			enable ? "enabling" : "disabling",
287 			port, rte_strerror(-ret));
288 }
289 
290 int
291 eth_link_get_nowait_print_err(uint16_t port_id, struct rte_eth_link *link)
292 {
293 	int ret;
294 
295 	ret = rte_eth_link_get_nowait(port_id, link);
296 	if (ret < 0)
297 		printf("Device (port %u) link get (without wait) failed: %s\n",
298 			port_id, rte_strerror(-ret));
299 
300 	return ret;
301 }
302 
303 int
304 eth_macaddr_get_print_err(uint16_t port_id, struct rte_ether_addr *mac_addr)
305 {
306 	int ret;
307 
308 	ret = rte_eth_macaddr_get(port_id, mac_addr);
309 	if (ret != 0)
310 		printf("Error getting device (port %u) mac address: %s\n",
311 				port_id, rte_strerror(-ret));
312 
313 	return ret;
314 }
315