xref: /dpdk/app/test-pmd/config.c (revision 3bb3ebb51b789d4ecb417cbdb1dce5c7211f6f18)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation.
3  * Copyright 2013-2014 6WIND S.A.
4  */
5 
6 #include <stdarg.h>
7 #include <errno.h>
8 #include <stdio.h>
9 #include <string.h>
10 #include <stdint.h>
11 #include <inttypes.h>
12 
13 #include <sys/queue.h>
14 #include <sys/types.h>
15 #include <sys/stat.h>
16 #include <fcntl.h>
17 #include <unistd.h>
18 
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
21 #include <rte_debug.h>
22 #include <rte_log.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
33 #include <rte_mbuf.h>
34 #include <rte_interrupts.h>
35 #include <rte_pci.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_string_fns.h>
39 #include <rte_cycles.h>
40 #include <rte_flow.h>
41 #include <rte_errno.h>
42 #ifdef RTE_NET_IXGBE
43 #include <rte_pmd_ixgbe.h>
44 #endif
45 #ifdef RTE_NET_I40E
46 #include <rte_pmd_i40e.h>
47 #endif
48 #ifdef RTE_NET_BNXT
49 #include <rte_pmd_bnxt.h>
50 #endif
51 #include <rte_gro.h>
52 #include <rte_hexdump.h>
53 
54 #include "testpmd.h"
55 
56 #define ETHDEV_FWVERS_LEN 32
57 
58 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
59 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW
60 #else
61 #define CLOCK_TYPE_ID CLOCK_MONOTONIC
62 #endif
63 
64 #define NS_PER_SEC 1E9
65 
66 static char *flowtype_to_str(uint16_t flow_type);
67 
68 static const struct {
69 	enum tx_pkt_split split;
70 	const char *name;
71 } tx_split_name[] = {
72 	{
73 		.split = TX_PKT_SPLIT_OFF,
74 		.name = "off",
75 	},
76 	{
77 		.split = TX_PKT_SPLIT_ON,
78 		.name = "on",
79 	},
80 	{
81 		.split = TX_PKT_SPLIT_RND,
82 		.name = "rand",
83 	},
84 };
85 
86 const struct rss_type_info rss_type_table[] = {
87 	{ "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP |
88 		ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD |
89 		ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP |
90 		ETH_RSS_GTPU | ETH_RSS_ECPRI},
91 	{ "none", 0 },
92 	{ "eth", ETH_RSS_ETH },
93 	{ "l2-src-only", ETH_RSS_L2_SRC_ONLY },
94 	{ "l2-dst-only", ETH_RSS_L2_DST_ONLY },
95 	{ "vlan", ETH_RSS_VLAN },
96 	{ "s-vlan", ETH_RSS_S_VLAN },
97 	{ "c-vlan", ETH_RSS_C_VLAN },
98 	{ "ipv4", ETH_RSS_IPV4 },
99 	{ "ipv4-frag", ETH_RSS_FRAG_IPV4 },
100 	{ "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
101 	{ "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
102 	{ "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
103 	{ "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
104 	{ "ipv6", ETH_RSS_IPV6 },
105 	{ "ipv6-frag", ETH_RSS_FRAG_IPV6 },
106 	{ "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
107 	{ "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
108 	{ "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
109 	{ "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
110 	{ "l2-payload", ETH_RSS_L2_PAYLOAD },
111 	{ "ipv6-ex", ETH_RSS_IPV6_EX },
112 	{ "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
113 	{ "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
114 	{ "port", ETH_RSS_PORT },
115 	{ "vxlan", ETH_RSS_VXLAN },
116 	{ "geneve", ETH_RSS_GENEVE },
117 	{ "nvgre", ETH_RSS_NVGRE },
118 	{ "ip", ETH_RSS_IP },
119 	{ "udp", ETH_RSS_UDP },
120 	{ "tcp", ETH_RSS_TCP },
121 	{ "sctp", ETH_RSS_SCTP },
122 	{ "tunnel", ETH_RSS_TUNNEL },
123 	{ "l3-pre32", RTE_ETH_RSS_L3_PRE32 },
124 	{ "l3-pre40", RTE_ETH_RSS_L3_PRE40 },
125 	{ "l3-pre48", RTE_ETH_RSS_L3_PRE48 },
126 	{ "l3-pre56", RTE_ETH_RSS_L3_PRE56 },
127 	{ "l3-pre64", RTE_ETH_RSS_L3_PRE64 },
128 	{ "l3-pre96", RTE_ETH_RSS_L3_PRE96 },
129 	{ "l3-src-only", ETH_RSS_L3_SRC_ONLY },
130 	{ "l3-dst-only", ETH_RSS_L3_DST_ONLY },
131 	{ "l4-src-only", ETH_RSS_L4_SRC_ONLY },
132 	{ "l4-dst-only", ETH_RSS_L4_DST_ONLY },
133 	{ "esp", ETH_RSS_ESP },
134 	{ "ah", ETH_RSS_AH },
135 	{ "l2tpv3", ETH_RSS_L2TPV3 },
136 	{ "pfcp", ETH_RSS_PFCP },
137 	{ "pppoe", ETH_RSS_PPPOE },
138 	{ "gtpu", ETH_RSS_GTPU },
139 	{ "ecpri", ETH_RSS_ECPRI },
140 	{ NULL, 0 },
141 };
142 
143 static const struct {
144 	enum rte_eth_fec_mode mode;
145 	const char *name;
146 } fec_mode_name[] = {
147 	{
148 		.mode = RTE_ETH_FEC_NOFEC,
149 		.name = "off",
150 	},
151 	{
152 		.mode = RTE_ETH_FEC_AUTO,
153 		.name = "auto",
154 	},
155 	{
156 		.mode = RTE_ETH_FEC_BASER,
157 		.name = "baser",
158 	},
159 	{
160 		.mode = RTE_ETH_FEC_RS,
161 		.name = "rs",
162 	},
163 };
164 
165 static void
166 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
167 {
168 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
169 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
170 	printf("%s%s", name, buf);
171 }
172 
173 void
174 nic_stats_display(portid_t port_id)
175 {
176 	static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
177 	static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
178 	static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS];
179 	static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS];
180 	static uint64_t prev_ns[RTE_MAX_ETHPORTS];
181 	struct timespec cur_time;
182 	uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx,
183 								diff_ns;
184 	uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx;
185 	struct rte_eth_stats stats;
186 
187 	static const char *nic_stats_border = "########################";
188 
189 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
190 		print_valid_ports();
191 		return;
192 	}
193 	rte_eth_stats_get(port_id, &stats);
194 	printf("\n  %s NIC statistics for port %-2d %s\n",
195 	       nic_stats_border, port_id, nic_stats_border);
196 
197 	printf("  RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes:  "
198 	       "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes);
199 	printf("  RX-errors: %-"PRIu64"\n", stats.ierrors);
200 	printf("  RX-nombuf:  %-10"PRIu64"\n", stats.rx_nombuf);
201 	printf("  TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes:  "
202 	       "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes);
203 
204 	diff_ns = 0;
205 	if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) {
206 		uint64_t ns;
207 
208 		ns = cur_time.tv_sec * NS_PER_SEC;
209 		ns += cur_time.tv_nsec;
210 
211 		if (prev_ns[port_id] != 0)
212 			diff_ns = ns - prev_ns[port_id];
213 		prev_ns[port_id] = ns;
214 	}
215 
216 	diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
217 		(stats.ipackets - prev_pkts_rx[port_id]) : 0;
218 	diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
219 		(stats.opackets - prev_pkts_tx[port_id]) : 0;
220 	prev_pkts_rx[port_id] = stats.ipackets;
221 	prev_pkts_tx[port_id] = stats.opackets;
222 	mpps_rx = diff_ns > 0 ?
223 		(double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0;
224 	mpps_tx = diff_ns > 0 ?
225 		(double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0;
226 
227 	diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ?
228 		(stats.ibytes - prev_bytes_rx[port_id]) : 0;
229 	diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ?
230 		(stats.obytes - prev_bytes_tx[port_id]) : 0;
231 	prev_bytes_rx[port_id] = stats.ibytes;
232 	prev_bytes_tx[port_id] = stats.obytes;
233 	mbps_rx = diff_ns > 0 ?
234 		(double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0;
235 	mbps_tx = diff_ns > 0 ?
236 		(double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0;
237 
238 	printf("\n  Throughput (since last show)\n");
239 	printf("  Rx-pps: %12"PRIu64"          Rx-bps: %12"PRIu64"\n  Tx-pps: %12"
240 	       PRIu64"          Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8,
241 	       mpps_tx, mbps_tx * 8);
242 
243 	printf("  %s############################%s\n",
244 	       nic_stats_border, nic_stats_border);
245 }
246 
247 void
248 nic_stats_clear(portid_t port_id)
249 {
250 	int ret;
251 
252 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
253 		print_valid_ports();
254 		return;
255 	}
256 
257 	ret = rte_eth_stats_reset(port_id);
258 	if (ret != 0) {
259 		printf("%s: Error: failed to reset stats (port %u): %s",
260 		       __func__, port_id, strerror(-ret));
261 		return;
262 	}
263 
264 	ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
265 	if (ret != 0) {
266 		if (ret < 0)
267 			ret = -ret;
268 		printf("%s: Error: failed to get stats (port %u): %s",
269 		       __func__, port_id, strerror(ret));
270 		return;
271 	}
272 	printf("\n  NIC statistics for port %d cleared\n", port_id);
273 }
274 
275 void
276 nic_xstats_display(portid_t port_id)
277 {
278 	struct rte_eth_xstat *xstats;
279 	int cnt_xstats, idx_xstat;
280 	struct rte_eth_xstat_name *xstats_names;
281 
282 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
283 		print_valid_ports();
284 		return;
285 	}
286 	printf("###### NIC extended statistics for port %-2d\n", port_id);
287 	if (!rte_eth_dev_is_valid_port(port_id)) {
288 		printf("Error: Invalid port number %i\n", port_id);
289 		return;
290 	}
291 
292 	/* Get count */
293 	cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
294 	if (cnt_xstats  < 0) {
295 		printf("Error: Cannot get count of xstats\n");
296 		return;
297 	}
298 
299 	/* Get id-name lookup table */
300 	xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
301 	if (xstats_names == NULL) {
302 		printf("Cannot allocate memory for xstats lookup\n");
303 		return;
304 	}
305 	if (cnt_xstats != rte_eth_xstats_get_names(
306 			port_id, xstats_names, cnt_xstats)) {
307 		printf("Error: Cannot get xstats lookup\n");
308 		free(xstats_names);
309 		return;
310 	}
311 
312 	/* Get stats themselves */
313 	xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
314 	if (xstats == NULL) {
315 		printf("Cannot allocate memory for xstats\n");
316 		free(xstats_names);
317 		return;
318 	}
319 	if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
320 		printf("Error: Unable to get xstats\n");
321 		free(xstats_names);
322 		free(xstats);
323 		return;
324 	}
325 
326 	/* Display xstats */
327 	for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
328 		if (xstats_hide_zero && !xstats[idx_xstat].value)
329 			continue;
330 		printf("%s: %"PRIu64"\n",
331 			xstats_names[idx_xstat].name,
332 			xstats[idx_xstat].value);
333 	}
334 	free(xstats_names);
335 	free(xstats);
336 }
337 
338 void
339 nic_xstats_clear(portid_t port_id)
340 {
341 	int ret;
342 
343 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
344 		print_valid_ports();
345 		return;
346 	}
347 
348 	ret = rte_eth_xstats_reset(port_id);
349 	if (ret != 0) {
350 		printf("%s: Error: failed to reset xstats (port %u): %s",
351 		       __func__, port_id, strerror(-ret));
352 		return;
353 	}
354 
355 	ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
356 	if (ret != 0) {
357 		if (ret < 0)
358 			ret = -ret;
359 		printf("%s: Error: failed to get stats (port %u): %s",
360 		       __func__, port_id, strerror(ret));
361 		return;
362 	}
363 }
364 
365 void
366 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
367 {
368 	struct rte_eth_burst_mode mode;
369 	struct rte_eth_rxq_info qinfo;
370 	int32_t rc;
371 	static const char *info_border = "*********************";
372 
373 	rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
374 	if (rc != 0) {
375 		printf("Failed to retrieve information for port: %u, "
376 			"RX queue: %hu\nerror desc: %s(%d)\n",
377 			port_id, queue_id, strerror(-rc), rc);
378 		return;
379 	}
380 
381 	printf("\n%s Infos for port %-2u, RX queue %-2u %s",
382 	       info_border, port_id, queue_id, info_border);
383 
384 	printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
385 	printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
386 	printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
387 	printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
388 	printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
389 	printf("\nRX drop packets: %s",
390 		(qinfo.conf.rx_drop_en != 0) ? "on" : "off");
391 	printf("\nRX deferred start: %s",
392 		(qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
393 	printf("\nRX scattered packets: %s",
394 		(qinfo.scattered_rx != 0) ? "on" : "off");
395 	if (qinfo.rx_buf_size != 0)
396 		printf("\nRX buffer size: %hu", qinfo.rx_buf_size);
397 	printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
398 
399 	if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0)
400 		printf("\nBurst mode: %s%s",
401 		       mode.info,
402 		       mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
403 				" (per queue)" : "");
404 
405 	printf("\n");
406 }
407 
408 void
409 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
410 {
411 	struct rte_eth_burst_mode mode;
412 	struct rte_eth_txq_info qinfo;
413 	int32_t rc;
414 	static const char *info_border = "*********************";
415 
416 	rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
417 	if (rc != 0) {
418 		printf("Failed to retrieve information for port: %u, "
419 			"TX queue: %hu\nerror desc: %s(%d)\n",
420 			port_id, queue_id, strerror(-rc), rc);
421 		return;
422 	}
423 
424 	printf("\n%s Infos for port %-2u, TX queue %-2u %s",
425 	       info_border, port_id, queue_id, info_border);
426 
427 	printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
428 	printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
429 	printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
430 	printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
431 	printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
432 	printf("\nTX deferred start: %s",
433 		(qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
434 	printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
435 
436 	if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0)
437 		printf("\nBurst mode: %s%s",
438 		       mode.info,
439 		       mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
440 				" (per queue)" : "");
441 
442 	printf("\n");
443 }
444 
445 static int bus_match_all(const struct rte_bus *bus, const void *data)
446 {
447 	RTE_SET_USED(bus);
448 	RTE_SET_USED(data);
449 	return 0;
450 }
451 
452 static void
453 device_infos_display_speeds(uint32_t speed_capa)
454 {
455 	printf("\n\tDevice speed capability:");
456 	if (speed_capa == ETH_LINK_SPEED_AUTONEG)
457 		printf(" Autonegotiate (all speeds)");
458 	if (speed_capa & ETH_LINK_SPEED_FIXED)
459 		printf(" Disable autonegotiate (fixed speed)  ");
460 	if (speed_capa & ETH_LINK_SPEED_10M_HD)
461 		printf(" 10 Mbps half-duplex  ");
462 	if (speed_capa & ETH_LINK_SPEED_10M)
463 		printf(" 10 Mbps full-duplex  ");
464 	if (speed_capa & ETH_LINK_SPEED_100M_HD)
465 		printf(" 100 Mbps half-duplex  ");
466 	if (speed_capa & ETH_LINK_SPEED_100M)
467 		printf(" 100 Mbps full-duplex  ");
468 	if (speed_capa & ETH_LINK_SPEED_1G)
469 		printf(" 1 Gbps  ");
470 	if (speed_capa & ETH_LINK_SPEED_2_5G)
471 		printf(" 2.5 Gbps  ");
472 	if (speed_capa & ETH_LINK_SPEED_5G)
473 		printf(" 5 Gbps  ");
474 	if (speed_capa & ETH_LINK_SPEED_10G)
475 		printf(" 10 Gbps  ");
476 	if (speed_capa & ETH_LINK_SPEED_20G)
477 		printf(" 20 Gbps  ");
478 	if (speed_capa & ETH_LINK_SPEED_25G)
479 		printf(" 25 Gbps  ");
480 	if (speed_capa & ETH_LINK_SPEED_40G)
481 		printf(" 40 Gbps  ");
482 	if (speed_capa & ETH_LINK_SPEED_50G)
483 		printf(" 50 Gbps  ");
484 	if (speed_capa & ETH_LINK_SPEED_56G)
485 		printf(" 56 Gbps  ");
486 	if (speed_capa & ETH_LINK_SPEED_100G)
487 		printf(" 100 Gbps  ");
488 	if (speed_capa & ETH_LINK_SPEED_200G)
489 		printf(" 200 Gbps  ");
490 }
491 
492 void
493 device_infos_display(const char *identifier)
494 {
495 	static const char *info_border = "*********************";
496 	struct rte_bus *start = NULL, *next;
497 	struct rte_dev_iterator dev_iter;
498 	char name[RTE_ETH_NAME_MAX_LEN];
499 	struct rte_ether_addr mac_addr;
500 	struct rte_device *dev;
501 	struct rte_devargs da;
502 	portid_t port_id;
503 	struct rte_eth_dev_info dev_info;
504 	char devstr[128];
505 
506 	memset(&da, 0, sizeof(da));
507 	if (!identifier)
508 		goto skip_parse;
509 
510 	if (rte_devargs_parsef(&da, "%s", identifier)) {
511 		printf("cannot parse identifier\n");
512 		if (da.args)
513 			free(da.args);
514 		return;
515 	}
516 
517 skip_parse:
518 	while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
519 
520 		start = next;
521 		if (identifier && da.bus != next)
522 			continue;
523 
524 		/* Skip buses that don't have iterate method */
525 		if (!next->dev_iterate)
526 			continue;
527 
528 		snprintf(devstr, sizeof(devstr), "bus=%s", next->name);
529 		RTE_DEV_FOREACH(dev, devstr, &dev_iter) {
530 
531 			if (!dev->driver)
532 				continue;
533 			/* Check for matching device if identifier is present */
534 			if (identifier &&
535 			    strncmp(da.name, dev->name, strlen(dev->name)))
536 				continue;
537 			printf("\n%s Infos for device %s %s\n",
538 			       info_border, dev->name, info_border);
539 			printf("Bus name: %s", dev->bus->name);
540 			printf("\nDriver name: %s", dev->driver->name);
541 			printf("\nDevargs: %s",
542 			       dev->devargs ? dev->devargs->args : "");
543 			printf("\nConnect to socket: %d", dev->numa_node);
544 			printf("\n");
545 
546 			/* List ports with matching device name */
547 			RTE_ETH_FOREACH_DEV_OF(port_id, dev) {
548 				printf("\n\tPort id: %-2d", port_id);
549 				if (eth_macaddr_get_print_err(port_id,
550 							      &mac_addr) == 0)
551 					print_ethaddr("\n\tMAC address: ",
552 						      &mac_addr);
553 				rte_eth_dev_get_name_by_port(port_id, name);
554 				printf("\n\tDevice name: %s", name);
555 				if (rte_eth_dev_info_get(port_id, &dev_info) == 0)
556 					device_infos_display_speeds(dev_info.speed_capa);
557 				printf("\n");
558 			}
559 		}
560 	};
561 }
562 
563 void
564 port_infos_display(portid_t port_id)
565 {
566 	struct rte_port *port;
567 	struct rte_ether_addr mac_addr;
568 	struct rte_eth_link link;
569 	struct rte_eth_dev_info dev_info;
570 	int vlan_offload;
571 	struct rte_mempool * mp;
572 	static const char *info_border = "*********************";
573 	uint16_t mtu;
574 	char name[RTE_ETH_NAME_MAX_LEN];
575 	int ret;
576 	char fw_version[ETHDEV_FWVERS_LEN];
577 
578 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
579 		print_valid_ports();
580 		return;
581 	}
582 	port = &ports[port_id];
583 	ret = eth_link_get_nowait_print_err(port_id, &link);
584 	if (ret < 0)
585 		return;
586 
587 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
588 	if (ret != 0)
589 		return;
590 
591 	printf("\n%s Infos for port %-2d %s\n",
592 	       info_border, port_id, info_border);
593 	if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0)
594 		print_ethaddr("MAC address: ", &mac_addr);
595 	rte_eth_dev_get_name_by_port(port_id, name);
596 	printf("\nDevice name: %s", name);
597 	printf("\nDriver name: %s", dev_info.driver_name);
598 
599 	if (rte_eth_dev_fw_version_get(port_id, fw_version,
600 						ETHDEV_FWVERS_LEN) == 0)
601 		printf("\nFirmware-version: %s", fw_version);
602 	else
603 		printf("\nFirmware-version: %s", "not available");
604 
605 	if (dev_info.device->devargs && dev_info.device->devargs->args)
606 		printf("\nDevargs: %s", dev_info.device->devargs->args);
607 	printf("\nConnect to socket: %u", port->socket_id);
608 
609 	if (port_numa[port_id] != NUMA_NO_CONFIG) {
610 		mp = mbuf_pool_find(port_numa[port_id], 0);
611 		if (mp)
612 			printf("\nmemory allocation on the socket: %d",
613 							port_numa[port_id]);
614 	} else
615 		printf("\nmemory allocation on the socket: %u",port->socket_id);
616 
617 	printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
618 	printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed));
619 	printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
620 	       ("full-duplex") : ("half-duplex"));
621 
622 	if (!rte_eth_dev_get_mtu(port_id, &mtu))
623 		printf("MTU: %u\n", mtu);
624 
625 	printf("Promiscuous mode: %s\n",
626 	       rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
627 	printf("Allmulticast mode: %s\n",
628 	       rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
629 	printf("Maximum number of MAC addresses: %u\n",
630 	       (unsigned int)(port->dev_info.max_mac_addrs));
631 	printf("Maximum number of MAC addresses of hash filtering: %u\n",
632 	       (unsigned int)(port->dev_info.max_hash_mac_addrs));
633 
634 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
635 	if (vlan_offload >= 0){
636 		printf("VLAN offload: \n");
637 		if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
638 			printf("  strip on, ");
639 		else
640 			printf("  strip off, ");
641 
642 		if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
643 			printf("filter on, ");
644 		else
645 			printf("filter off, ");
646 
647 		if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
648 			printf("extend on, ");
649 		else
650 			printf("extend off, ");
651 
652 		if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
653 			printf("qinq strip on\n");
654 		else
655 			printf("qinq strip off\n");
656 	}
657 
658 	if (dev_info.hash_key_size > 0)
659 		printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
660 	if (dev_info.reta_size > 0)
661 		printf("Redirection table size: %u\n", dev_info.reta_size);
662 	if (!dev_info.flow_type_rss_offloads)
663 		printf("No RSS offload flow type is supported.\n");
664 	else {
665 		uint16_t i;
666 		char *p;
667 
668 		printf("Supported RSS offload flow types:\n");
669 		for (i = RTE_ETH_FLOW_UNKNOWN + 1;
670 		     i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
671 			if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
672 				continue;
673 			p = flowtype_to_str(i);
674 			if (p)
675 				printf("  %s\n", p);
676 			else
677 				printf("  user defined %d\n", i);
678 		}
679 	}
680 
681 	printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize);
682 	printf("Maximum configurable length of RX packet: %u\n",
683 		dev_info.max_rx_pktlen);
684 	printf("Maximum configurable size of LRO aggregated packet: %u\n",
685 		dev_info.max_lro_pkt_size);
686 	if (dev_info.max_vfs)
687 		printf("Maximum number of VFs: %u\n", dev_info.max_vfs);
688 	if (dev_info.max_vmdq_pools)
689 		printf("Maximum number of VMDq pools: %u\n",
690 			dev_info.max_vmdq_pools);
691 
692 	printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues);
693 	printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
694 	printf("Max possible number of RXDs per queue: %hu\n",
695 		dev_info.rx_desc_lim.nb_max);
696 	printf("Min possible number of RXDs per queue: %hu\n",
697 		dev_info.rx_desc_lim.nb_min);
698 	printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
699 
700 	printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues);
701 	printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
702 	printf("Max possible number of TXDs per queue: %hu\n",
703 		dev_info.tx_desc_lim.nb_max);
704 	printf("Min possible number of TXDs per queue: %hu\n",
705 		dev_info.tx_desc_lim.nb_min);
706 	printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
707 	printf("Max segment number per packet: %hu\n",
708 		dev_info.tx_desc_lim.nb_seg_max);
709 	printf("Max segment number per MTU/TSO: %hu\n",
710 		dev_info.tx_desc_lim.nb_mtu_seg_max);
711 
712 	/* Show switch info only if valid switch domain and port id is set */
713 	if (dev_info.switch_info.domain_id !=
714 		RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
715 		if (dev_info.switch_info.name)
716 			printf("Switch name: %s\n", dev_info.switch_info.name);
717 
718 		printf("Switch domain Id: %u\n",
719 			dev_info.switch_info.domain_id);
720 		printf("Switch Port Id: %u\n",
721 			dev_info.switch_info.port_id);
722 	}
723 }
724 
725 void
726 port_summary_header_display(void)
727 {
728 	uint16_t port_number;
729 
730 	port_number = rte_eth_dev_count_avail();
731 	printf("Number of available ports: %i\n", port_number);
732 	printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name",
733 			"Driver", "Status", "Link");
734 }
735 
736 void
737 port_summary_display(portid_t port_id)
738 {
739 	struct rte_ether_addr mac_addr;
740 	struct rte_eth_link link;
741 	struct rte_eth_dev_info dev_info;
742 	char name[RTE_ETH_NAME_MAX_LEN];
743 	int ret;
744 
745 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
746 		print_valid_ports();
747 		return;
748 	}
749 
750 	ret = eth_link_get_nowait_print_err(port_id, &link);
751 	if (ret < 0)
752 		return;
753 
754 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
755 	if (ret != 0)
756 		return;
757 
758 	rte_eth_dev_get_name_by_port(port_id, name);
759 	ret = eth_macaddr_get_print_err(port_id, &mac_addr);
760 	if (ret != 0)
761 		return;
762 
763 	printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %s\n",
764 		port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
765 		mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
766 		mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name,
767 		dev_info.driver_name, (link.link_status) ? ("up") : ("down"),
768 		rte_eth_link_speed_to_str(link.link_speed));
769 }
770 
771 void
772 port_eeprom_display(portid_t port_id)
773 {
774 	struct rte_dev_eeprom_info einfo;
775 	int ret;
776 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
777 		print_valid_ports();
778 		return;
779 	}
780 
781 	int len_eeprom = rte_eth_dev_get_eeprom_length(port_id);
782 	if (len_eeprom < 0) {
783 		switch (len_eeprom) {
784 		case -ENODEV:
785 			printf("port index %d invalid\n", port_id);
786 			break;
787 		case -ENOTSUP:
788 			printf("operation not supported by device\n");
789 			break;
790 		case -EIO:
791 			printf("device is removed\n");
792 			break;
793 		default:
794 			printf("Unable to get EEPROM: %d\n", len_eeprom);
795 			break;
796 		}
797 		return;
798 	}
799 
800 	char buf[len_eeprom];
801 	einfo.offset = 0;
802 	einfo.length = len_eeprom;
803 	einfo.data = buf;
804 
805 	ret = rte_eth_dev_get_eeprom(port_id, &einfo);
806 	if (ret != 0) {
807 		switch (ret) {
808 		case -ENODEV:
809 			printf("port index %d invalid\n", port_id);
810 			break;
811 		case -ENOTSUP:
812 			printf("operation not supported by device\n");
813 			break;
814 		case -EIO:
815 			printf("device is removed\n");
816 			break;
817 		default:
818 			printf("Unable to get EEPROM: %d\n", ret);
819 			break;
820 		}
821 		return;
822 	}
823 	rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
824 	printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom);
825 }
826 
827 void
828 port_module_eeprom_display(portid_t port_id)
829 {
830 	struct rte_eth_dev_module_info minfo;
831 	struct rte_dev_eeprom_info einfo;
832 	int ret;
833 
834 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
835 		print_valid_ports();
836 		return;
837 	}
838 
839 
840 	ret = rte_eth_dev_get_module_info(port_id, &minfo);
841 	if (ret != 0) {
842 		switch (ret) {
843 		case -ENODEV:
844 			printf("port index %d invalid\n", port_id);
845 			break;
846 		case -ENOTSUP:
847 			printf("operation not supported by device\n");
848 			break;
849 		case -EIO:
850 			printf("device is removed\n");
851 			break;
852 		default:
853 			printf("Unable to get module EEPROM: %d\n", ret);
854 			break;
855 		}
856 		return;
857 	}
858 
859 	char buf[minfo.eeprom_len];
860 	einfo.offset = 0;
861 	einfo.length = minfo.eeprom_len;
862 	einfo.data = buf;
863 
864 	ret = rte_eth_dev_get_module_eeprom(port_id, &einfo);
865 	if (ret != 0) {
866 		switch (ret) {
867 		case -ENODEV:
868 			printf("port index %d invalid\n", port_id);
869 			break;
870 		case -ENOTSUP:
871 			printf("operation not supported by device\n");
872 			break;
873 		case -EIO:
874 			printf("device is removed\n");
875 			break;
876 		default:
877 			printf("Unable to get module EEPROM: %d\n", ret);
878 			break;
879 		}
880 		return;
881 	}
882 
883 	rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
884 	printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length);
885 }
886 
887 int
888 port_id_is_invalid(portid_t port_id, enum print_warning warning)
889 {
890 	uint16_t pid;
891 
892 	if (port_id == (portid_t)RTE_PORT_ALL)
893 		return 0;
894 
895 	RTE_ETH_FOREACH_DEV(pid)
896 		if (port_id == pid)
897 			return 0;
898 
899 	if (warning == ENABLED_WARN)
900 		printf("Invalid port %d\n", port_id);
901 
902 	return 1;
903 }
904 
905 void print_valid_ports(void)
906 {
907 	portid_t pid;
908 
909 	printf("The valid ports array is [");
910 	RTE_ETH_FOREACH_DEV(pid) {
911 		printf(" %d", pid);
912 	}
913 	printf(" ]\n");
914 }
915 
916 static int
917 vlan_id_is_invalid(uint16_t vlan_id)
918 {
919 	if (vlan_id < 4096)
920 		return 0;
921 	printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
922 	return 1;
923 }
924 
925 static int
926 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
927 {
928 	const struct rte_pci_device *pci_dev;
929 	const struct rte_bus *bus;
930 	uint64_t pci_len;
931 
932 	if (reg_off & 0x3) {
933 		printf("Port register offset 0x%X not aligned on a 4-byte "
934 		       "boundary\n",
935 		       (unsigned)reg_off);
936 		return 1;
937 	}
938 
939 	if (!ports[port_id].dev_info.device) {
940 		printf("Invalid device\n");
941 		return 0;
942 	}
943 
944 	bus = rte_bus_find_by_device(ports[port_id].dev_info.device);
945 	if (bus && !strcmp(bus->name, "pci")) {
946 		pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device);
947 	} else {
948 		printf("Not a PCI device\n");
949 		return 1;
950 	}
951 
952 	pci_len = pci_dev->mem_resource[0].len;
953 	if (reg_off >= pci_len) {
954 		printf("Port %d: register offset %u (0x%X) out of port PCI "
955 		       "resource (length=%"PRIu64")\n",
956 		       port_id, (unsigned)reg_off, (unsigned)reg_off,  pci_len);
957 		return 1;
958 	}
959 	return 0;
960 }
961 
962 static int
963 reg_bit_pos_is_invalid(uint8_t bit_pos)
964 {
965 	if (bit_pos <= 31)
966 		return 0;
967 	printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
968 	return 1;
969 }
970 
971 #define display_port_and_reg_off(port_id, reg_off) \
972 	printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
973 
974 static inline void
975 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
976 {
977 	display_port_and_reg_off(port_id, (unsigned)reg_off);
978 	printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
979 }
980 
981 void
982 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
983 {
984 	uint32_t reg_v;
985 
986 
987 	if (port_id_is_invalid(port_id, ENABLED_WARN))
988 		return;
989 	if (port_reg_off_is_invalid(port_id, reg_off))
990 		return;
991 	if (reg_bit_pos_is_invalid(bit_x))
992 		return;
993 	reg_v = port_id_pci_reg_read(port_id, reg_off);
994 	display_port_and_reg_off(port_id, (unsigned)reg_off);
995 	printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
996 }
997 
998 void
999 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
1000 			   uint8_t bit1_pos, uint8_t bit2_pos)
1001 {
1002 	uint32_t reg_v;
1003 	uint8_t  l_bit;
1004 	uint8_t  h_bit;
1005 
1006 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1007 		return;
1008 	if (port_reg_off_is_invalid(port_id, reg_off))
1009 		return;
1010 	if (reg_bit_pos_is_invalid(bit1_pos))
1011 		return;
1012 	if (reg_bit_pos_is_invalid(bit2_pos))
1013 		return;
1014 	if (bit1_pos > bit2_pos)
1015 		l_bit = bit2_pos, h_bit = bit1_pos;
1016 	else
1017 		l_bit = bit1_pos, h_bit = bit2_pos;
1018 
1019 	reg_v = port_id_pci_reg_read(port_id, reg_off);
1020 	reg_v >>= l_bit;
1021 	if (h_bit < 31)
1022 		reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
1023 	display_port_and_reg_off(port_id, (unsigned)reg_off);
1024 	printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
1025 	       ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
1026 }
1027 
1028 void
1029 port_reg_display(portid_t port_id, uint32_t reg_off)
1030 {
1031 	uint32_t reg_v;
1032 
1033 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1034 		return;
1035 	if (port_reg_off_is_invalid(port_id, reg_off))
1036 		return;
1037 	reg_v = port_id_pci_reg_read(port_id, reg_off);
1038 	display_port_reg_value(port_id, reg_off, reg_v);
1039 }
1040 
1041 void
1042 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
1043 		 uint8_t bit_v)
1044 {
1045 	uint32_t reg_v;
1046 
1047 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1048 		return;
1049 	if (port_reg_off_is_invalid(port_id, reg_off))
1050 		return;
1051 	if (reg_bit_pos_is_invalid(bit_pos))
1052 		return;
1053 	if (bit_v > 1) {
1054 		printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
1055 		return;
1056 	}
1057 	reg_v = port_id_pci_reg_read(port_id, reg_off);
1058 	if (bit_v == 0)
1059 		reg_v &= ~(1 << bit_pos);
1060 	else
1061 		reg_v |= (1 << bit_pos);
1062 	port_id_pci_reg_write(port_id, reg_off, reg_v);
1063 	display_port_reg_value(port_id, reg_off, reg_v);
1064 }
1065 
1066 void
1067 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
1068 		       uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
1069 {
1070 	uint32_t max_v;
1071 	uint32_t reg_v;
1072 	uint8_t  l_bit;
1073 	uint8_t  h_bit;
1074 
1075 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1076 		return;
1077 	if (port_reg_off_is_invalid(port_id, reg_off))
1078 		return;
1079 	if (reg_bit_pos_is_invalid(bit1_pos))
1080 		return;
1081 	if (reg_bit_pos_is_invalid(bit2_pos))
1082 		return;
1083 	if (bit1_pos > bit2_pos)
1084 		l_bit = bit2_pos, h_bit = bit1_pos;
1085 	else
1086 		l_bit = bit1_pos, h_bit = bit2_pos;
1087 
1088 	if ((h_bit - l_bit) < 31)
1089 		max_v = (1 << (h_bit - l_bit + 1)) - 1;
1090 	else
1091 		max_v = 0xFFFFFFFF;
1092 
1093 	if (value > max_v) {
1094 		printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
1095 				(unsigned)value, (unsigned)value,
1096 				(unsigned)max_v, (unsigned)max_v);
1097 		return;
1098 	}
1099 	reg_v = port_id_pci_reg_read(port_id, reg_off);
1100 	reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
1101 	reg_v |= (value << l_bit); /* Set changed bits */
1102 	port_id_pci_reg_write(port_id, reg_off, reg_v);
1103 	display_port_reg_value(port_id, reg_off, reg_v);
1104 }
1105 
1106 void
1107 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1108 {
1109 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1110 		return;
1111 	if (port_reg_off_is_invalid(port_id, reg_off))
1112 		return;
1113 	port_id_pci_reg_write(port_id, reg_off, reg_v);
1114 	display_port_reg_value(port_id, reg_off, reg_v);
1115 }
1116 
1117 void
1118 port_mtu_set(portid_t port_id, uint16_t mtu)
1119 {
1120 	int diag;
1121 	struct rte_port *rte_port = &ports[port_id];
1122 	struct rte_eth_dev_info dev_info;
1123 	uint16_t eth_overhead;
1124 	int ret;
1125 
1126 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1127 		return;
1128 
1129 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
1130 	if (ret != 0)
1131 		return;
1132 
1133 	if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) {
1134 		printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n",
1135 			mtu, dev_info.min_mtu, dev_info.max_mtu);
1136 		return;
1137 	}
1138 	diag = rte_eth_dev_set_mtu(port_id, mtu);
1139 	if (diag)
1140 		printf("Set MTU failed. diag=%d\n", diag);
1141 	else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1142 		/*
1143 		 * Ether overhead in driver is equal to the difference of
1144 		 * max_rx_pktlen and max_mtu in rte_eth_dev_info when the
1145 		 * device supports jumbo frame.
1146 		 */
1147 		eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu;
1148 		if (mtu > RTE_ETHER_MTU) {
1149 			rte_port->dev_conf.rxmode.offloads |=
1150 						DEV_RX_OFFLOAD_JUMBO_FRAME;
1151 			rte_port->dev_conf.rxmode.max_rx_pkt_len =
1152 						mtu + eth_overhead;
1153 		} else
1154 			rte_port->dev_conf.rxmode.offloads &=
1155 						~DEV_RX_OFFLOAD_JUMBO_FRAME;
1156 	}
1157 }
1158 
1159 /* Generic flow management functions. */
1160 
1161 static struct port_flow_tunnel *
1162 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id)
1163 {
1164 	struct port_flow_tunnel *flow_tunnel;
1165 
1166 	LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) {
1167 		if (flow_tunnel->id == port_tunnel_id)
1168 			goto out;
1169 	}
1170 	flow_tunnel = NULL;
1171 
1172 out:
1173 	return flow_tunnel;
1174 }
1175 
1176 const char *
1177 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel)
1178 {
1179 	const char *type;
1180 	switch (tunnel->type) {
1181 	default:
1182 		type = "unknown";
1183 		break;
1184 	case RTE_FLOW_ITEM_TYPE_VXLAN:
1185 		type = "vxlan";
1186 		break;
1187 	}
1188 
1189 	return type;
1190 }
1191 
1192 struct port_flow_tunnel *
1193 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun)
1194 {
1195 	struct rte_port *port = &ports[port_id];
1196 	struct port_flow_tunnel *flow_tunnel;
1197 
1198 	LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) {
1199 		if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun)))
1200 			goto out;
1201 	}
1202 	flow_tunnel = NULL;
1203 
1204 out:
1205 	return flow_tunnel;
1206 }
1207 
1208 void port_flow_tunnel_list(portid_t port_id)
1209 {
1210 	struct rte_port *port = &ports[port_id];
1211 	struct port_flow_tunnel *flt;
1212 
1213 	LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1214 		printf("port %u tunnel #%u type=%s",
1215 			port_id, flt->id, port_flow_tunnel_type(&flt->tunnel));
1216 		if (flt->tunnel.tun_id)
1217 			printf(" id=%" PRIu64, flt->tunnel.tun_id);
1218 		printf("\n");
1219 	}
1220 }
1221 
1222 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id)
1223 {
1224 	struct rte_port *port = &ports[port_id];
1225 	struct port_flow_tunnel *flt;
1226 
1227 	LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1228 		if (flt->id == tunnel_id)
1229 			break;
1230 	}
1231 	if (flt) {
1232 		LIST_REMOVE(flt, chain);
1233 		free(flt);
1234 		printf("port %u: flow tunnel #%u destroyed\n",
1235 			port_id, tunnel_id);
1236 	}
1237 }
1238 
1239 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops)
1240 {
1241 	struct rte_port *port = &ports[port_id];
1242 	enum rte_flow_item_type	type;
1243 	struct port_flow_tunnel *flt;
1244 
1245 	if (!strcmp(ops->type, "vxlan"))
1246 		type = RTE_FLOW_ITEM_TYPE_VXLAN;
1247 	else {
1248 		printf("cannot offload \"%s\" tunnel type\n", ops->type);
1249 		return;
1250 	}
1251 	LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1252 		if (flt->tunnel.type == type)
1253 			break;
1254 	}
1255 	if (!flt) {
1256 		flt = calloc(1, sizeof(*flt));
1257 		if (!flt) {
1258 			printf("failed to allocate port flt object\n");
1259 			return;
1260 		}
1261 		flt->tunnel.type = type;
1262 		flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 :
1263 				  LIST_FIRST(&port->flow_tunnel_list)->id + 1;
1264 		LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain);
1265 	}
1266 	printf("port %d: flow tunnel #%u type %s\n",
1267 		port_id, flt->id, ops->type);
1268 }
1269 
1270 /** Generate a port_flow entry from attributes/pattern/actions. */
1271 static struct port_flow *
1272 port_flow_new(const struct rte_flow_attr *attr,
1273 	      const struct rte_flow_item *pattern,
1274 	      const struct rte_flow_action *actions,
1275 	      struct rte_flow_error *error)
1276 {
1277 	const struct rte_flow_conv_rule rule = {
1278 		.attr_ro = attr,
1279 		.pattern_ro = pattern,
1280 		.actions_ro = actions,
1281 	};
1282 	struct port_flow *pf;
1283 	int ret;
1284 
1285 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error);
1286 	if (ret < 0)
1287 		return NULL;
1288 	pf = calloc(1, offsetof(struct port_flow, rule) + ret);
1289 	if (!pf) {
1290 		rte_flow_error_set
1291 			(error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1292 			 "calloc() failed");
1293 		return NULL;
1294 	}
1295 	if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule,
1296 			  error) >= 0)
1297 		return pf;
1298 	free(pf);
1299 	return NULL;
1300 }
1301 
1302 /** Print a message out of a flow error. */
1303 static int
1304 port_flow_complain(struct rte_flow_error *error)
1305 {
1306 	static const char *const errstrlist[] = {
1307 		[RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1308 		[RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1309 		[RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1310 		[RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1311 		[RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1312 		[RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1313 		[RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1314 		[RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field",
1315 		[RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1316 		[RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1317 		[RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification",
1318 		[RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range",
1319 		[RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask",
1320 		[RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1321 		[RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1322 		[RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration",
1323 		[RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1324 	};
1325 	const char *errstr;
1326 	char buf[32];
1327 	int err = rte_errno;
1328 
1329 	if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1330 	    !errstrlist[error->type])
1331 		errstr = "unknown type";
1332 	else
1333 		errstr = errstrlist[error->type];
1334 	printf("%s(): Caught PMD error type %d (%s): %s%s: %s\n", __func__,
1335 	       error->type, errstr,
1336 	       error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1337 					error->cause), buf) : "",
1338 	       error->message ? error->message : "(no stated reason)",
1339 	       rte_strerror(err));
1340 	return -err;
1341 }
1342 
1343 static void
1344 rss_config_display(struct rte_flow_action_rss *rss_conf)
1345 {
1346 	uint8_t i;
1347 
1348 	if (rss_conf == NULL) {
1349 		printf("Invalid rule\n");
1350 		return;
1351 	}
1352 
1353 	printf("RSS:\n"
1354 	       " queues:");
1355 	if (rss_conf->queue_num == 0)
1356 		printf(" none");
1357 	for (i = 0; i < rss_conf->queue_num; i++)
1358 		printf(" %d", rss_conf->queue[i]);
1359 	printf("\n");
1360 
1361 	printf(" function: ");
1362 	switch (rss_conf->func) {
1363 	case RTE_ETH_HASH_FUNCTION_DEFAULT:
1364 		printf("default\n");
1365 		break;
1366 	case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1367 		printf("toeplitz\n");
1368 		break;
1369 	case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1370 		printf("simple_xor\n");
1371 		break;
1372 	case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1373 		printf("symmetric_toeplitz\n");
1374 		break;
1375 	default:
1376 		printf("Unknown function\n");
1377 		return;
1378 	}
1379 
1380 	printf(" types:\n");
1381 	if (rss_conf->types == 0) {
1382 		printf("  none\n");
1383 		return;
1384 	}
1385 	for (i = 0; rss_type_table[i].str; i++) {
1386 		if ((rss_conf->types &
1387 		    rss_type_table[i].rss_type) ==
1388 		    rss_type_table[i].rss_type &&
1389 		    rss_type_table[i].rss_type != 0)
1390 			printf("  %s\n", rss_type_table[i].str);
1391 	}
1392 }
1393 
1394 static struct port_shared_action *
1395 action_get_by_id(portid_t port_id, uint32_t id)
1396 {
1397 	struct rte_port *port;
1398 	struct port_shared_action **ppsa;
1399 	struct port_shared_action *psa = NULL;
1400 
1401 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1402 	    port_id == (portid_t)RTE_PORT_ALL)
1403 		return NULL;
1404 	port = &ports[port_id];
1405 	ppsa = &port->actions_list;
1406 	while (*ppsa) {
1407 		if ((*ppsa)->id == id) {
1408 			psa = *ppsa;
1409 			break;
1410 		}
1411 		ppsa = &(*ppsa)->next;
1412 	}
1413 	if (!psa)
1414 		printf("Failed to find shared action #%u on port %u\n",
1415 		       id, port_id);
1416 	return psa;
1417 }
1418 
1419 static int
1420 action_alloc(portid_t port_id, uint32_t id,
1421 	     struct port_shared_action **action)
1422 {
1423 	struct rte_port *port;
1424 	struct port_shared_action **ppsa;
1425 	struct port_shared_action *psa = NULL;
1426 
1427 	*action = NULL;
1428 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1429 	    port_id == (portid_t)RTE_PORT_ALL)
1430 		return -EINVAL;
1431 	port = &ports[port_id];
1432 	if (id == UINT32_MAX) {
1433 		/* taking first available ID */
1434 		if (port->actions_list) {
1435 			if (port->actions_list->id == UINT32_MAX - 1) {
1436 				printf("Highest shared action ID is already"
1437 				" assigned, delete it first\n");
1438 				return -ENOMEM;
1439 			}
1440 			id = port->actions_list->id + 1;
1441 		} else {
1442 			id = 0;
1443 		}
1444 	}
1445 	psa = calloc(1, sizeof(*psa));
1446 	if (!psa) {
1447 		printf("Allocation of port %u shared action failed\n",
1448 		       port_id);
1449 		return -ENOMEM;
1450 	}
1451 	ppsa = &port->actions_list;
1452 	while (*ppsa && (*ppsa)->id > id)
1453 		ppsa = &(*ppsa)->next;
1454 	if (*ppsa && (*ppsa)->id == id) {
1455 		printf("Shared action #%u is already assigned,"
1456 			" delete it first\n", id);
1457 		free(psa);
1458 		return -EINVAL;
1459 	}
1460 	psa->next = *ppsa;
1461 	psa->id = id;
1462 	*ppsa = psa;
1463 	*action = psa;
1464 	return 0;
1465 }
1466 
1467 /** Create shared action */
1468 int
1469 port_shared_action_create(portid_t port_id, uint32_t id,
1470 			  const struct rte_flow_shared_action_conf *conf,
1471 			  const struct rte_flow_action *action)
1472 {
1473 	struct port_shared_action *psa;
1474 	int ret;
1475 	struct rte_flow_error error;
1476 
1477 	ret = action_alloc(port_id, id, &psa);
1478 	if (ret)
1479 		return ret;
1480 	if (action->type == RTE_FLOW_ACTION_TYPE_AGE) {
1481 		struct rte_flow_action_age *age =
1482 			(struct rte_flow_action_age *)(uintptr_t)(action->conf);
1483 
1484 		psa->age_type = ACTION_AGE_CONTEXT_TYPE_SHARED_ACTION;
1485 		age->context = &psa->age_type;
1486 	}
1487 	/* Poisoning to make sure PMDs update it in case of error. */
1488 	memset(&error, 0x22, sizeof(error));
1489 	psa->action = rte_flow_shared_action_create(port_id, conf, action,
1490 						    &error);
1491 	if (!psa->action) {
1492 		uint32_t destroy_id = psa->id;
1493 		port_shared_action_destroy(port_id, 1, &destroy_id);
1494 		return port_flow_complain(&error);
1495 	}
1496 	psa->type = action->type;
1497 	printf("Shared action #%u created\n", psa->id);
1498 	return 0;
1499 }
1500 
1501 /** Destroy shared action */
1502 int
1503 port_shared_action_destroy(portid_t port_id,
1504 			   uint32_t n,
1505 			   const uint32_t *actions)
1506 {
1507 	struct rte_port *port;
1508 	struct port_shared_action **tmp;
1509 	uint32_t c = 0;
1510 	int ret = 0;
1511 
1512 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1513 	    port_id == (portid_t)RTE_PORT_ALL)
1514 		return -EINVAL;
1515 	port = &ports[port_id];
1516 	tmp = &port->actions_list;
1517 	while (*tmp) {
1518 		uint32_t i;
1519 
1520 		for (i = 0; i != n; ++i) {
1521 			struct rte_flow_error error;
1522 			struct port_shared_action *psa = *tmp;
1523 
1524 			if (actions[i] != psa->id)
1525 				continue;
1526 			/*
1527 			 * Poisoning to make sure PMDs update it in case
1528 			 * of error.
1529 			 */
1530 			memset(&error, 0x33, sizeof(error));
1531 
1532 			if (psa->action && rte_flow_shared_action_destroy(
1533 					port_id, psa->action, &error)) {
1534 				ret = port_flow_complain(&error);
1535 				continue;
1536 			}
1537 			*tmp = psa->next;
1538 			printf("Shared action #%u destroyed\n", psa->id);
1539 			free(psa);
1540 			break;
1541 		}
1542 		if (i == n)
1543 			tmp = &(*tmp)->next;
1544 		++c;
1545 	}
1546 	return ret;
1547 }
1548 
1549 
1550 /** Get shared action by port + id */
1551 struct rte_flow_shared_action *
1552 port_shared_action_get_by_id(portid_t port_id, uint32_t id)
1553 {
1554 
1555 	struct port_shared_action *psa = action_get_by_id(port_id, id);
1556 
1557 	return (psa) ? psa->action : NULL;
1558 }
1559 
1560 /** Update shared action */
1561 int
1562 port_shared_action_update(portid_t port_id, uint32_t id,
1563 			  const struct rte_flow_action *action)
1564 {
1565 	struct rte_flow_error error;
1566 	struct rte_flow_shared_action *shared_action;
1567 
1568 	shared_action = port_shared_action_get_by_id(port_id, id);
1569 	if (!shared_action)
1570 		return -EINVAL;
1571 	if (rte_flow_shared_action_update(port_id, shared_action, action,
1572 					  &error)) {
1573 		return port_flow_complain(&error);
1574 	}
1575 	printf("Shared action #%u updated\n", id);
1576 	return 0;
1577 }
1578 
1579 int
1580 port_shared_action_query(portid_t port_id, uint32_t id)
1581 {
1582 	struct rte_flow_error error;
1583 	struct port_shared_action *psa;
1584 	uint64_t default_data;
1585 	void *data = NULL;
1586 	int ret = 0;
1587 
1588 	psa = action_get_by_id(port_id, id);
1589 	if (!psa)
1590 		return -EINVAL;
1591 	switch (psa->type) {
1592 	case RTE_FLOW_ACTION_TYPE_RSS:
1593 	case RTE_FLOW_ACTION_TYPE_AGE:
1594 		data = &default_data;
1595 		break;
1596 	default:
1597 		printf("Shared action %u (type: %d) on port %u doesn't support"
1598 		       " query\n", id, psa->type, port_id);
1599 		return -1;
1600 	}
1601 	if (rte_flow_shared_action_query(port_id, psa->action, data, &error))
1602 		ret = port_flow_complain(&error);
1603 	switch (psa->type) {
1604 	case RTE_FLOW_ACTION_TYPE_RSS:
1605 		if (!ret)
1606 			printf("Shared RSS action:\n\trefs:%u\n",
1607 			       *((uint32_t *)data));
1608 		data = NULL;
1609 		break;
1610 	case RTE_FLOW_ACTION_TYPE_AGE:
1611 		if (!ret) {
1612 			struct rte_flow_query_age *resp = data;
1613 
1614 			printf("AGE:\n"
1615 			       " aged: %u\n"
1616 			       " sec_since_last_hit_valid: %u\n"
1617 			       " sec_since_last_hit: %" PRIu32 "\n",
1618 			       resp->aged,
1619 			       resp->sec_since_last_hit_valid,
1620 			       resp->sec_since_last_hit);
1621 		}
1622 		data = NULL;
1623 		break;
1624 	default:
1625 		printf("Shared action %u (type: %d) on port %u doesn't support"
1626 		       " query\n", id, psa->type, port_id);
1627 		ret = -1;
1628 	}
1629 	return ret;
1630 }
1631 
1632 static struct port_flow_tunnel *
1633 port_flow_tunnel_offload_cmd_prep(portid_t port_id,
1634 				  const struct rte_flow_item *pattern,
1635 				  const struct rte_flow_action *actions,
1636 				  const struct tunnel_ops *tunnel_ops)
1637 {
1638 	int ret;
1639 	struct rte_port *port;
1640 	struct port_flow_tunnel *pft;
1641 	struct rte_flow_error error;
1642 
1643 	port = &ports[port_id];
1644 	pft = port_flow_locate_tunnel_id(port, tunnel_ops->id);
1645 	if (!pft) {
1646 		printf("failed to locate port flow tunnel #%u\n",
1647 			tunnel_ops->id);
1648 		return NULL;
1649 	}
1650 	if (tunnel_ops->actions) {
1651 		uint32_t num_actions;
1652 		const struct rte_flow_action *aptr;
1653 
1654 		ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel,
1655 						&pft->pmd_actions,
1656 						&pft->num_pmd_actions,
1657 						&error);
1658 		if (ret) {
1659 			port_flow_complain(&error);
1660 			return NULL;
1661 		}
1662 		for (aptr = actions, num_actions = 1;
1663 		     aptr->type != RTE_FLOW_ACTION_TYPE_END;
1664 		     aptr++, num_actions++);
1665 		pft->actions = malloc(
1666 				(num_actions +  pft->num_pmd_actions) *
1667 				sizeof(actions[0]));
1668 		if (!pft->actions) {
1669 			rte_flow_tunnel_action_decap_release(
1670 					port_id, pft->actions,
1671 					pft->num_pmd_actions, &error);
1672 			return NULL;
1673 		}
1674 		rte_memcpy(pft->actions, pft->pmd_actions,
1675 			   pft->num_pmd_actions * sizeof(actions[0]));
1676 		rte_memcpy(pft->actions + pft->num_pmd_actions, actions,
1677 			   num_actions * sizeof(actions[0]));
1678 	}
1679 	if (tunnel_ops->items) {
1680 		uint32_t num_items;
1681 		const struct rte_flow_item *iptr;
1682 
1683 		ret = rte_flow_tunnel_match(port_id, &pft->tunnel,
1684 					    &pft->pmd_items,
1685 					    &pft->num_pmd_items,
1686 					    &error);
1687 		if (ret) {
1688 			port_flow_complain(&error);
1689 			return NULL;
1690 		}
1691 		for (iptr = pattern, num_items = 1;
1692 		     iptr->type != RTE_FLOW_ITEM_TYPE_END;
1693 		     iptr++, num_items++);
1694 		pft->items = malloc((num_items + pft->num_pmd_items) *
1695 				    sizeof(pattern[0]));
1696 		if (!pft->items) {
1697 			rte_flow_tunnel_item_release(
1698 					port_id, pft->pmd_items,
1699 					pft->num_pmd_items, &error);
1700 			return NULL;
1701 		}
1702 		rte_memcpy(pft->items, pft->pmd_items,
1703 			   pft->num_pmd_items * sizeof(pattern[0]));
1704 		rte_memcpy(pft->items + pft->num_pmd_items, pattern,
1705 			   num_items * sizeof(pattern[0]));
1706 	}
1707 
1708 	return pft;
1709 }
1710 
1711 static void
1712 port_flow_tunnel_offload_cmd_release(portid_t port_id,
1713 				     const struct tunnel_ops *tunnel_ops,
1714 				     struct port_flow_tunnel *pft)
1715 {
1716 	struct rte_flow_error error;
1717 
1718 	if (tunnel_ops->actions) {
1719 		free(pft->actions);
1720 		rte_flow_tunnel_action_decap_release(
1721 			port_id, pft->pmd_actions,
1722 			pft->num_pmd_actions, &error);
1723 		pft->actions = NULL;
1724 		pft->pmd_actions = NULL;
1725 	}
1726 	if (tunnel_ops->items) {
1727 		free(pft->items);
1728 		rte_flow_tunnel_item_release(port_id, pft->pmd_items,
1729 					     pft->num_pmd_items,
1730 					     &error);
1731 		pft->items = NULL;
1732 		pft->pmd_items = NULL;
1733 	}
1734 }
1735 
1736 /** Validate flow rule. */
1737 int
1738 port_flow_validate(portid_t port_id,
1739 		   const struct rte_flow_attr *attr,
1740 		   const struct rte_flow_item *pattern,
1741 		   const struct rte_flow_action *actions,
1742 		   const struct tunnel_ops *tunnel_ops)
1743 {
1744 	struct rte_flow_error error;
1745 	struct port_flow_tunnel *pft = NULL;
1746 
1747 	/* Poisoning to make sure PMDs update it in case of error. */
1748 	memset(&error, 0x11, sizeof(error));
1749 	if (tunnel_ops->enabled) {
1750 		pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern,
1751 							actions, tunnel_ops);
1752 		if (!pft)
1753 			return -ENOENT;
1754 		if (pft->items)
1755 			pattern = pft->items;
1756 		if (pft->actions)
1757 			actions = pft->actions;
1758 	}
1759 	if (rte_flow_validate(port_id, attr, pattern, actions, &error))
1760 		return port_flow_complain(&error);
1761 	if (tunnel_ops->enabled)
1762 		port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft);
1763 	printf("Flow rule validated\n");
1764 	return 0;
1765 }
1766 
1767 /** Return age action structure if exists, otherwise NULL. */
1768 static struct rte_flow_action_age *
1769 age_action_get(const struct rte_flow_action *actions)
1770 {
1771 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1772 		switch (actions->type) {
1773 		case RTE_FLOW_ACTION_TYPE_AGE:
1774 			return (struct rte_flow_action_age *)
1775 				(uintptr_t)actions->conf;
1776 		default:
1777 			break;
1778 		}
1779 	}
1780 	return NULL;
1781 }
1782 
1783 /** Create flow rule. */
1784 int
1785 port_flow_create(portid_t port_id,
1786 		 const struct rte_flow_attr *attr,
1787 		 const struct rte_flow_item *pattern,
1788 		 const struct rte_flow_action *actions,
1789 		 const struct tunnel_ops *tunnel_ops)
1790 {
1791 	struct rte_flow *flow;
1792 	struct rte_port *port;
1793 	struct port_flow *pf;
1794 	uint32_t id = 0;
1795 	struct rte_flow_error error;
1796 	struct port_flow_tunnel *pft = NULL;
1797 	struct rte_flow_action_age *age = age_action_get(actions);
1798 
1799 	port = &ports[port_id];
1800 	if (port->flow_list) {
1801 		if (port->flow_list->id == UINT32_MAX) {
1802 			printf("Highest rule ID is already assigned, delete"
1803 			       " it first");
1804 			return -ENOMEM;
1805 		}
1806 		id = port->flow_list->id + 1;
1807 	}
1808 	if (tunnel_ops->enabled) {
1809 		pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern,
1810 							actions, tunnel_ops);
1811 		if (!pft)
1812 			return -ENOENT;
1813 		if (pft->items)
1814 			pattern = pft->items;
1815 		if (pft->actions)
1816 			actions = pft->actions;
1817 	}
1818 	pf = port_flow_new(attr, pattern, actions, &error);
1819 	if (!pf)
1820 		return port_flow_complain(&error);
1821 	if (age) {
1822 		pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW;
1823 		age->context = &pf->age_type;
1824 	}
1825 	/* Poisoning to make sure PMDs update it in case of error. */
1826 	memset(&error, 0x22, sizeof(error));
1827 	flow = rte_flow_create(port_id, attr, pattern, actions, &error);
1828 	if (!flow) {
1829 		free(pf);
1830 		return port_flow_complain(&error);
1831 	}
1832 	pf->next = port->flow_list;
1833 	pf->id = id;
1834 	pf->flow = flow;
1835 	port->flow_list = pf;
1836 	if (tunnel_ops->enabled)
1837 		port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft);
1838 	printf("Flow rule #%u created\n", pf->id);
1839 	return 0;
1840 }
1841 
1842 /** Destroy a number of flow rules. */
1843 int
1844 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
1845 {
1846 	struct rte_port *port;
1847 	struct port_flow **tmp;
1848 	uint32_t c = 0;
1849 	int ret = 0;
1850 
1851 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1852 	    port_id == (portid_t)RTE_PORT_ALL)
1853 		return -EINVAL;
1854 	port = &ports[port_id];
1855 	tmp = &port->flow_list;
1856 	while (*tmp) {
1857 		uint32_t i;
1858 
1859 		for (i = 0; i != n; ++i) {
1860 			struct rte_flow_error error;
1861 			struct port_flow *pf = *tmp;
1862 
1863 			if (rule[i] != pf->id)
1864 				continue;
1865 			/*
1866 			 * Poisoning to make sure PMDs update it in case
1867 			 * of error.
1868 			 */
1869 			memset(&error, 0x33, sizeof(error));
1870 			if (rte_flow_destroy(port_id, pf->flow, &error)) {
1871 				ret = port_flow_complain(&error);
1872 				continue;
1873 			}
1874 			printf("Flow rule #%u destroyed\n", pf->id);
1875 			*tmp = pf->next;
1876 			free(pf);
1877 			break;
1878 		}
1879 		if (i == n)
1880 			tmp = &(*tmp)->next;
1881 		++c;
1882 	}
1883 	return ret;
1884 }
1885 
1886 /** Remove all flow rules. */
1887 int
1888 port_flow_flush(portid_t port_id)
1889 {
1890 	struct rte_flow_error error;
1891 	struct rte_port *port;
1892 	int ret = 0;
1893 
1894 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1895 		port_id == (portid_t)RTE_PORT_ALL)
1896 		return -EINVAL;
1897 
1898 	port = &ports[port_id];
1899 
1900 	if (port->flow_list == NULL)
1901 		return ret;
1902 
1903 	/* Poisoning to make sure PMDs update it in case of error. */
1904 	memset(&error, 0x44, sizeof(error));
1905 	if (rte_flow_flush(port_id, &error)) {
1906 		port_flow_complain(&error);
1907 	}
1908 
1909 	while (port->flow_list) {
1910 		struct port_flow *pf = port->flow_list->next;
1911 
1912 		free(port->flow_list);
1913 		port->flow_list = pf;
1914 	}
1915 	return ret;
1916 }
1917 
1918 /** Dump all flow rules. */
1919 int
1920 port_flow_dump(portid_t port_id, const char *file_name)
1921 {
1922 	int ret = 0;
1923 	FILE *file = stdout;
1924 	struct rte_flow_error error;
1925 
1926 	if (file_name && strlen(file_name)) {
1927 		file = fopen(file_name, "w");
1928 		if (!file) {
1929 			printf("Failed to create file %s: %s\n", file_name,
1930 			       strerror(errno));
1931 			return -errno;
1932 		}
1933 	}
1934 	ret = rte_flow_dev_dump(port_id, file, &error);
1935 	if (ret) {
1936 		port_flow_complain(&error);
1937 		printf("Failed to dump flow: %s\n", strerror(-ret));
1938 	} else
1939 		printf("Flow dump finished\n");
1940 	if (file_name && strlen(file_name))
1941 		fclose(file);
1942 	return ret;
1943 }
1944 
1945 /** Query a flow rule. */
1946 int
1947 port_flow_query(portid_t port_id, uint32_t rule,
1948 		const struct rte_flow_action *action)
1949 {
1950 	struct rte_flow_error error;
1951 	struct rte_port *port;
1952 	struct port_flow *pf;
1953 	const char *name;
1954 	union {
1955 		struct rte_flow_query_count count;
1956 		struct rte_flow_action_rss rss_conf;
1957 		struct rte_flow_query_age age;
1958 	} query;
1959 	int ret;
1960 
1961 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1962 	    port_id == (portid_t)RTE_PORT_ALL)
1963 		return -EINVAL;
1964 	port = &ports[port_id];
1965 	for (pf = port->flow_list; pf; pf = pf->next)
1966 		if (pf->id == rule)
1967 			break;
1968 	if (!pf) {
1969 		printf("Flow rule #%u not found\n", rule);
1970 		return -ENOENT;
1971 	}
1972 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
1973 			    &name, sizeof(name),
1974 			    (void *)(uintptr_t)action->type, &error);
1975 	if (ret < 0)
1976 		return port_flow_complain(&error);
1977 	switch (action->type) {
1978 	case RTE_FLOW_ACTION_TYPE_COUNT:
1979 	case RTE_FLOW_ACTION_TYPE_RSS:
1980 	case RTE_FLOW_ACTION_TYPE_AGE:
1981 		break;
1982 	default:
1983 		printf("Cannot query action type %d (%s)\n",
1984 			action->type, name);
1985 		return -ENOTSUP;
1986 	}
1987 	/* Poisoning to make sure PMDs update it in case of error. */
1988 	memset(&error, 0x55, sizeof(error));
1989 	memset(&query, 0, sizeof(query));
1990 	if (rte_flow_query(port_id, pf->flow, action, &query, &error))
1991 		return port_flow_complain(&error);
1992 	switch (action->type) {
1993 	case RTE_FLOW_ACTION_TYPE_COUNT:
1994 		printf("%s:\n"
1995 		       " hits_set: %u\n"
1996 		       " bytes_set: %u\n"
1997 		       " hits: %" PRIu64 "\n"
1998 		       " bytes: %" PRIu64 "\n",
1999 		       name,
2000 		       query.count.hits_set,
2001 		       query.count.bytes_set,
2002 		       query.count.hits,
2003 		       query.count.bytes);
2004 		break;
2005 	case RTE_FLOW_ACTION_TYPE_RSS:
2006 		rss_config_display(&query.rss_conf);
2007 		break;
2008 	case RTE_FLOW_ACTION_TYPE_AGE:
2009 		printf("%s:\n"
2010 		       " aged: %u\n"
2011 		       " sec_since_last_hit_valid: %u\n"
2012 		       " sec_since_last_hit: %" PRIu32 "\n",
2013 		       name,
2014 		       query.age.aged,
2015 		       query.age.sec_since_last_hit_valid,
2016 		       query.age.sec_since_last_hit);
2017 		break;
2018 	default:
2019 		printf("Cannot display result for action type %d (%s)\n",
2020 		       action->type, name);
2021 		break;
2022 	}
2023 	return 0;
2024 }
2025 
2026 /** List simply and destroy all aged flows. */
2027 void
2028 port_flow_aged(portid_t port_id, uint8_t destroy)
2029 {
2030 	void **contexts;
2031 	int nb_context, total = 0, idx;
2032 	struct rte_flow_error error;
2033 	enum age_action_context_type *type;
2034 	union {
2035 		struct port_flow *pf;
2036 		struct port_shared_action *psa;
2037 	} ctx;
2038 
2039 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2040 	    port_id == (portid_t)RTE_PORT_ALL)
2041 		return;
2042 	total = rte_flow_get_aged_flows(port_id, NULL, 0, &error);
2043 	printf("Port %u total aged flows: %d\n", port_id, total);
2044 	if (total < 0) {
2045 		port_flow_complain(&error);
2046 		return;
2047 	}
2048 	if (total == 0)
2049 		return;
2050 	contexts = malloc(sizeof(void *) * total);
2051 	if (contexts == NULL) {
2052 		printf("Cannot allocate contexts for aged flow\n");
2053 		return;
2054 	}
2055 	printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type");
2056 	nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error);
2057 	if (nb_context != total) {
2058 		printf("Port:%d get aged flows count(%d) != total(%d)\n",
2059 			port_id, nb_context, total);
2060 		free(contexts);
2061 		return;
2062 	}
2063 	total = 0;
2064 	for (idx = 0; idx < nb_context; idx++) {
2065 		if (!contexts[idx]) {
2066 			printf("Error: get Null context in port %u\n", port_id);
2067 			continue;
2068 		}
2069 		type = (enum age_action_context_type *)contexts[idx];
2070 		switch (*type) {
2071 		case ACTION_AGE_CONTEXT_TYPE_FLOW:
2072 			ctx.pf = container_of(type, struct port_flow, age_type);
2073 			printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32
2074 								 "\t%c%c%c\t\n",
2075 			       "Flow",
2076 			       ctx.pf->id,
2077 			       ctx.pf->rule.attr->group,
2078 			       ctx.pf->rule.attr->priority,
2079 			       ctx.pf->rule.attr->ingress ? 'i' : '-',
2080 			       ctx.pf->rule.attr->egress ? 'e' : '-',
2081 			       ctx.pf->rule.attr->transfer ? 't' : '-');
2082 			if (destroy && !port_flow_destroy(port_id, 1,
2083 							  &ctx.pf->id))
2084 				total++;
2085 			break;
2086 		case ACTION_AGE_CONTEXT_TYPE_SHARED_ACTION:
2087 			ctx.psa = container_of(type, struct port_shared_action,
2088 					       age_type);
2089 			printf("%-20s\t%" PRIu32 "\n", "Shared action",
2090 			       ctx.psa->id);
2091 			break;
2092 		default:
2093 			printf("Error: invalid context type %u\n", port_id);
2094 			break;
2095 		}
2096 	}
2097 	printf("\n%d flows destroyed\n", total);
2098 	free(contexts);
2099 }
2100 
2101 /** List flow rules. */
2102 void
2103 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group)
2104 {
2105 	struct rte_port *port;
2106 	struct port_flow *pf;
2107 	struct port_flow *list = NULL;
2108 	uint32_t i;
2109 
2110 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2111 	    port_id == (portid_t)RTE_PORT_ALL)
2112 		return;
2113 	port = &ports[port_id];
2114 	if (!port->flow_list)
2115 		return;
2116 	/* Sort flows by group, priority and ID. */
2117 	for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2118 		struct port_flow **tmp;
2119 		const struct rte_flow_attr *curr = pf->rule.attr;
2120 
2121 		if (n) {
2122 			/* Filter out unwanted groups. */
2123 			for (i = 0; i != n; ++i)
2124 				if (curr->group == group[i])
2125 					break;
2126 			if (i == n)
2127 				continue;
2128 		}
2129 		for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) {
2130 			const struct rte_flow_attr *comp = (*tmp)->rule.attr;
2131 
2132 			if (curr->group > comp->group ||
2133 			    (curr->group == comp->group &&
2134 			     curr->priority > comp->priority) ||
2135 			    (curr->group == comp->group &&
2136 			     curr->priority == comp->priority &&
2137 			     pf->id > (*tmp)->id))
2138 				continue;
2139 			break;
2140 		}
2141 		pf->tmp = *tmp;
2142 		*tmp = pf;
2143 	}
2144 	printf("ID\tGroup\tPrio\tAttr\tRule\n");
2145 	for (pf = list; pf != NULL; pf = pf->tmp) {
2146 		const struct rte_flow_item *item = pf->rule.pattern;
2147 		const struct rte_flow_action *action = pf->rule.actions;
2148 		const char *name;
2149 
2150 		printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
2151 		       pf->id,
2152 		       pf->rule.attr->group,
2153 		       pf->rule.attr->priority,
2154 		       pf->rule.attr->ingress ? 'i' : '-',
2155 		       pf->rule.attr->egress ? 'e' : '-',
2156 		       pf->rule.attr->transfer ? 't' : '-');
2157 		while (item->type != RTE_FLOW_ITEM_TYPE_END) {
2158 			if ((uint32_t)item->type > INT_MAX)
2159 				name = "PMD_INTERNAL";
2160 			else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR,
2161 					  &name, sizeof(name),
2162 					  (void *)(uintptr_t)item->type,
2163 					  NULL) <= 0)
2164 				name = "[UNKNOWN]";
2165 			if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
2166 				printf("%s ", name);
2167 			++item;
2168 		}
2169 		printf("=>");
2170 		while (action->type != RTE_FLOW_ACTION_TYPE_END) {
2171 			if ((uint32_t)action->type > INT_MAX)
2172 				name = "PMD_INTERNAL";
2173 			else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
2174 					  &name, sizeof(name),
2175 					  (void *)(uintptr_t)action->type,
2176 					  NULL) <= 0)
2177 				name = "[UNKNOWN]";
2178 			if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
2179 				printf(" %s", name);
2180 			++action;
2181 		}
2182 		printf("\n");
2183 	}
2184 }
2185 
2186 /** Restrict ingress traffic to the defined flow rules. */
2187 int
2188 port_flow_isolate(portid_t port_id, int set)
2189 {
2190 	struct rte_flow_error error;
2191 
2192 	/* Poisoning to make sure PMDs update it in case of error. */
2193 	memset(&error, 0x66, sizeof(error));
2194 	if (rte_flow_isolate(port_id, set, &error))
2195 		return port_flow_complain(&error);
2196 	printf("Ingress traffic on port %u is %s to the defined flow rules\n",
2197 	       port_id,
2198 	       set ? "now restricted" : "not restricted anymore");
2199 	return 0;
2200 }
2201 
2202 /*
2203  * RX/TX ring descriptors display functions.
2204  */
2205 int
2206 rx_queue_id_is_invalid(queueid_t rxq_id)
2207 {
2208 	if (rxq_id < nb_rxq)
2209 		return 0;
2210 	printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
2211 	return 1;
2212 }
2213 
2214 int
2215 tx_queue_id_is_invalid(queueid_t txq_id)
2216 {
2217 	if (txq_id < nb_txq)
2218 		return 0;
2219 	printf("Invalid TX queue %d (must be < nb_txq=%d)\n", txq_id, nb_txq);
2220 	return 1;
2221 }
2222 
2223 static int
2224 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size)
2225 {
2226 	struct rte_port *port = &ports[port_id];
2227 	struct rte_eth_rxq_info rx_qinfo;
2228 	int ret;
2229 
2230 	ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo);
2231 	if (ret == 0) {
2232 		*ring_size = rx_qinfo.nb_desc;
2233 		return ret;
2234 	}
2235 
2236 	if (ret != -ENOTSUP)
2237 		return ret;
2238 	/*
2239 	 * If the rte_eth_rx_queue_info_get is not support for this PMD,
2240 	 * ring_size stored in testpmd will be used for validity verification.
2241 	 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc
2242 	 * being 0, it will use a default value provided by PMDs to setup this
2243 	 * rxq. If the default value is 0, it will use the
2244 	 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq.
2245 	 */
2246 	if (port->nb_rx_desc[rxq_id])
2247 		*ring_size = port->nb_rx_desc[rxq_id];
2248 	else if (port->dev_info.default_rxportconf.ring_size)
2249 		*ring_size = port->dev_info.default_rxportconf.ring_size;
2250 	else
2251 		*ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2252 	return 0;
2253 }
2254 
2255 static int
2256 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size)
2257 {
2258 	struct rte_port *port = &ports[port_id];
2259 	struct rte_eth_txq_info tx_qinfo;
2260 	int ret;
2261 
2262 	ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo);
2263 	if (ret == 0) {
2264 		*ring_size = tx_qinfo.nb_desc;
2265 		return ret;
2266 	}
2267 
2268 	if (ret != -ENOTSUP)
2269 		return ret;
2270 	/*
2271 	 * If the rte_eth_tx_queue_info_get is not support for this PMD,
2272 	 * ring_size stored in testpmd will be used for validity verification.
2273 	 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc
2274 	 * being 0, it will use a default value provided by PMDs to setup this
2275 	 * txq. If the default value is 0, it will use the
2276 	 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq.
2277 	 */
2278 	if (port->nb_tx_desc[txq_id])
2279 		*ring_size = port->nb_tx_desc[txq_id];
2280 	else if (port->dev_info.default_txportconf.ring_size)
2281 		*ring_size = port->dev_info.default_txportconf.ring_size;
2282 	else
2283 		*ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2284 	return 0;
2285 }
2286 
2287 static int
2288 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id)
2289 {
2290 	uint16_t ring_size;
2291 	int ret;
2292 
2293 	ret = get_rx_ring_size(port_id, rxq_id, &ring_size);
2294 	if (ret)
2295 		return 1;
2296 
2297 	if (rxdesc_id < ring_size)
2298 		return 0;
2299 
2300 	printf("Invalid RX descriptor %u (must be < ring_size=%u)\n",
2301 	       rxdesc_id, ring_size);
2302 	return 1;
2303 }
2304 
2305 static int
2306 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id)
2307 {
2308 	uint16_t ring_size;
2309 	int ret;
2310 
2311 	ret = get_tx_ring_size(port_id, txq_id, &ring_size);
2312 	if (ret)
2313 		return 1;
2314 
2315 	if (txdesc_id < ring_size)
2316 		return 0;
2317 
2318 	printf("Invalid TX descriptor %u (must be < ring_size=%u)\n",
2319 	       txdesc_id, ring_size);
2320 	return 1;
2321 }
2322 
2323 static const struct rte_memzone *
2324 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
2325 {
2326 	char mz_name[RTE_MEMZONE_NAMESIZE];
2327 	const struct rte_memzone *mz;
2328 
2329 	snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s",
2330 			port_id, q_id, ring_name);
2331 	mz = rte_memzone_lookup(mz_name);
2332 	if (mz == NULL)
2333 		printf("%s ring memory zoneof (port %d, queue %d) not"
2334 		       "found (zone name = %s\n",
2335 		       ring_name, port_id, q_id, mz_name);
2336 	return mz;
2337 }
2338 
2339 union igb_ring_dword {
2340 	uint64_t dword;
2341 	struct {
2342 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
2343 		uint32_t lo;
2344 		uint32_t hi;
2345 #else
2346 		uint32_t hi;
2347 		uint32_t lo;
2348 #endif
2349 	} words;
2350 };
2351 
2352 struct igb_ring_desc_32_bytes {
2353 	union igb_ring_dword lo_dword;
2354 	union igb_ring_dword hi_dword;
2355 	union igb_ring_dword resv1;
2356 	union igb_ring_dword resv2;
2357 };
2358 
2359 struct igb_ring_desc_16_bytes {
2360 	union igb_ring_dword lo_dword;
2361 	union igb_ring_dword hi_dword;
2362 };
2363 
2364 static void
2365 ring_rxd_display_dword(union igb_ring_dword dword)
2366 {
2367 	printf("    0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
2368 					(unsigned)dword.words.hi);
2369 }
2370 
2371 static void
2372 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
2373 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
2374 			   portid_t port_id,
2375 #else
2376 			   __rte_unused portid_t port_id,
2377 #endif
2378 			   uint16_t desc_id)
2379 {
2380 	struct igb_ring_desc_16_bytes *ring =
2381 		(struct igb_ring_desc_16_bytes *)ring_mz->addr;
2382 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
2383 	int ret;
2384 	struct rte_eth_dev_info dev_info;
2385 
2386 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
2387 	if (ret != 0)
2388 		return;
2389 
2390 	if (strstr(dev_info.driver_name, "i40e") != NULL) {
2391 		/* 32 bytes RX descriptor, i40e only */
2392 		struct igb_ring_desc_32_bytes *ring =
2393 			(struct igb_ring_desc_32_bytes *)ring_mz->addr;
2394 		ring[desc_id].lo_dword.dword =
2395 			rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
2396 		ring_rxd_display_dword(ring[desc_id].lo_dword);
2397 		ring[desc_id].hi_dword.dword =
2398 			rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
2399 		ring_rxd_display_dword(ring[desc_id].hi_dword);
2400 		ring[desc_id].resv1.dword =
2401 			rte_le_to_cpu_64(ring[desc_id].resv1.dword);
2402 		ring_rxd_display_dword(ring[desc_id].resv1);
2403 		ring[desc_id].resv2.dword =
2404 			rte_le_to_cpu_64(ring[desc_id].resv2.dword);
2405 		ring_rxd_display_dword(ring[desc_id].resv2);
2406 
2407 		return;
2408 	}
2409 #endif
2410 	/* 16 bytes RX descriptor */
2411 	ring[desc_id].lo_dword.dword =
2412 		rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
2413 	ring_rxd_display_dword(ring[desc_id].lo_dword);
2414 	ring[desc_id].hi_dword.dword =
2415 		rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
2416 	ring_rxd_display_dword(ring[desc_id].hi_dword);
2417 }
2418 
2419 static void
2420 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
2421 {
2422 	struct igb_ring_desc_16_bytes *ring;
2423 	struct igb_ring_desc_16_bytes txd;
2424 
2425 	ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
2426 	txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
2427 	txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
2428 	printf("    0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
2429 			(unsigned)txd.lo_dword.words.lo,
2430 			(unsigned)txd.lo_dword.words.hi,
2431 			(unsigned)txd.hi_dword.words.lo,
2432 			(unsigned)txd.hi_dword.words.hi);
2433 }
2434 
2435 void
2436 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
2437 {
2438 	const struct rte_memzone *rx_mz;
2439 
2440 	if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id))
2441 		return;
2442 	rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
2443 	if (rx_mz == NULL)
2444 		return;
2445 	ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
2446 }
2447 
2448 void
2449 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
2450 {
2451 	const struct rte_memzone *tx_mz;
2452 
2453 	if (tx_desc_id_is_invalid(port_id, txq_id, txd_id))
2454 		return;
2455 	tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
2456 	if (tx_mz == NULL)
2457 		return;
2458 	ring_tx_descriptor_display(tx_mz, txd_id);
2459 }
2460 
2461 void
2462 fwd_lcores_config_display(void)
2463 {
2464 	lcoreid_t lc_id;
2465 
2466 	printf("List of forwarding lcores:");
2467 	for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
2468 		printf(" %2u", fwd_lcores_cpuids[lc_id]);
2469 	printf("\n");
2470 }
2471 void
2472 rxtx_config_display(void)
2473 {
2474 	portid_t pid;
2475 	queueid_t qid;
2476 
2477 	printf("  %s packet forwarding%s packets/burst=%d\n",
2478 	       cur_fwd_eng->fwd_mode_name,
2479 	       retry_enabled == 0 ? "" : " with retry",
2480 	       nb_pkt_per_burst);
2481 
2482 	if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
2483 		printf("  packet len=%u - nb packet segments=%d\n",
2484 				(unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
2485 
2486 	printf("  nb forwarding cores=%d - nb forwarding ports=%d\n",
2487 	       nb_fwd_lcores, nb_fwd_ports);
2488 
2489 	RTE_ETH_FOREACH_DEV(pid) {
2490 		struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0];
2491 		struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0];
2492 		uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
2493 		uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
2494 		struct rte_eth_rxq_info rx_qinfo;
2495 		struct rte_eth_txq_info tx_qinfo;
2496 		uint16_t rx_free_thresh_tmp;
2497 		uint16_t tx_free_thresh_tmp;
2498 		uint16_t tx_rs_thresh_tmp;
2499 		uint16_t nb_rx_desc_tmp;
2500 		uint16_t nb_tx_desc_tmp;
2501 		uint64_t offloads_tmp;
2502 		uint8_t pthresh_tmp;
2503 		uint8_t hthresh_tmp;
2504 		uint8_t wthresh_tmp;
2505 		int32_t rc;
2506 
2507 		/* per port config */
2508 		printf("  port %d: RX queue number: %d Tx queue number: %d\n",
2509 				(unsigned int)pid, nb_rxq, nb_txq);
2510 
2511 		printf("    Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n",
2512 				ports[pid].dev_conf.rxmode.offloads,
2513 				ports[pid].dev_conf.txmode.offloads);
2514 
2515 		/* per rx queue config only for first queue to be less verbose */
2516 		for (qid = 0; qid < 1; qid++) {
2517 			rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
2518 			if (rc) {
2519 				nb_rx_desc_tmp = nb_rx_desc[qid];
2520 				rx_free_thresh_tmp =
2521 					rx_conf[qid].rx_free_thresh;
2522 				pthresh_tmp = rx_conf[qid].rx_thresh.pthresh;
2523 				hthresh_tmp = rx_conf[qid].rx_thresh.hthresh;
2524 				wthresh_tmp = rx_conf[qid].rx_thresh.wthresh;
2525 				offloads_tmp = rx_conf[qid].offloads;
2526 			} else {
2527 				nb_rx_desc_tmp = rx_qinfo.nb_desc;
2528 				rx_free_thresh_tmp =
2529 						rx_qinfo.conf.rx_free_thresh;
2530 				pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh;
2531 				hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh;
2532 				wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh;
2533 				offloads_tmp = rx_qinfo.conf.offloads;
2534 			}
2535 
2536 			printf("    RX queue: %d\n", qid);
2537 			printf("      RX desc=%d - RX free threshold=%d\n",
2538 				nb_rx_desc_tmp, rx_free_thresh_tmp);
2539 			printf("      RX threshold registers: pthresh=%d hthresh=%d "
2540 				" wthresh=%d\n",
2541 				pthresh_tmp, hthresh_tmp, wthresh_tmp);
2542 			printf("      RX Offloads=0x%"PRIx64"\n", offloads_tmp);
2543 		}
2544 
2545 		/* per tx queue config only for first queue to be less verbose */
2546 		for (qid = 0; qid < 1; qid++) {
2547 			rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
2548 			if (rc) {
2549 				nb_tx_desc_tmp = nb_tx_desc[qid];
2550 				tx_free_thresh_tmp =
2551 					tx_conf[qid].tx_free_thresh;
2552 				pthresh_tmp = tx_conf[qid].tx_thresh.pthresh;
2553 				hthresh_tmp = tx_conf[qid].tx_thresh.hthresh;
2554 				wthresh_tmp = tx_conf[qid].tx_thresh.wthresh;
2555 				offloads_tmp = tx_conf[qid].offloads;
2556 				tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh;
2557 			} else {
2558 				nb_tx_desc_tmp = tx_qinfo.nb_desc;
2559 				tx_free_thresh_tmp =
2560 						tx_qinfo.conf.tx_free_thresh;
2561 				pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh;
2562 				hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh;
2563 				wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh;
2564 				offloads_tmp = tx_qinfo.conf.offloads;
2565 				tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh;
2566 			}
2567 
2568 			printf("    TX queue: %d\n", qid);
2569 			printf("      TX desc=%d - TX free threshold=%d\n",
2570 				nb_tx_desc_tmp, tx_free_thresh_tmp);
2571 			printf("      TX threshold registers: pthresh=%d hthresh=%d "
2572 				" wthresh=%d\n",
2573 				pthresh_tmp, hthresh_tmp, wthresh_tmp);
2574 			printf("      TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
2575 				offloads_tmp, tx_rs_thresh_tmp);
2576 		}
2577 	}
2578 }
2579 
2580 void
2581 port_rss_reta_info(portid_t port_id,
2582 		   struct rte_eth_rss_reta_entry64 *reta_conf,
2583 		   uint16_t nb_entries)
2584 {
2585 	uint16_t i, idx, shift;
2586 	int ret;
2587 
2588 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2589 		return;
2590 
2591 	ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
2592 	if (ret != 0) {
2593 		printf("Failed to get RSS RETA info, return code = %d\n", ret);
2594 		return;
2595 	}
2596 
2597 	for (i = 0; i < nb_entries; i++) {
2598 		idx = i / RTE_RETA_GROUP_SIZE;
2599 		shift = i % RTE_RETA_GROUP_SIZE;
2600 		if (!(reta_conf[idx].mask & (1ULL << shift)))
2601 			continue;
2602 		printf("RSS RETA configuration: hash index=%u, queue=%u\n",
2603 					i, reta_conf[idx].reta[shift]);
2604 	}
2605 }
2606 
2607 /*
2608  * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
2609  * key of the port.
2610  */
2611 void
2612 port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
2613 {
2614 	struct rte_eth_rss_conf rss_conf = {0};
2615 	uint8_t rss_key[RSS_HASH_KEY_LENGTH];
2616 	uint64_t rss_hf;
2617 	uint8_t i;
2618 	int diag;
2619 	struct rte_eth_dev_info dev_info;
2620 	uint8_t hash_key_size;
2621 	int ret;
2622 
2623 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2624 		return;
2625 
2626 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
2627 	if (ret != 0)
2628 		return;
2629 
2630 	if (dev_info.hash_key_size > 0 &&
2631 			dev_info.hash_key_size <= sizeof(rss_key))
2632 		hash_key_size = dev_info.hash_key_size;
2633 	else {
2634 		printf("dev_info did not provide a valid hash key size\n");
2635 		return;
2636 	}
2637 
2638 	/* Get RSS hash key if asked to display it */
2639 	rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
2640 	rss_conf.rss_key_len = hash_key_size;
2641 	diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
2642 	if (diag != 0) {
2643 		switch (diag) {
2644 		case -ENODEV:
2645 			printf("port index %d invalid\n", port_id);
2646 			break;
2647 		case -ENOTSUP:
2648 			printf("operation not supported by device\n");
2649 			break;
2650 		default:
2651 			printf("operation failed - diag=%d\n", diag);
2652 			break;
2653 		}
2654 		return;
2655 	}
2656 	rss_hf = rss_conf.rss_hf;
2657 	if (rss_hf == 0) {
2658 		printf("RSS disabled\n");
2659 		return;
2660 	}
2661 	printf("RSS functions:\n ");
2662 	for (i = 0; rss_type_table[i].str; i++) {
2663 		if (rss_hf & rss_type_table[i].rss_type)
2664 			printf("%s ", rss_type_table[i].str);
2665 	}
2666 	printf("\n");
2667 	if (!show_rss_key)
2668 		return;
2669 	printf("RSS key:\n");
2670 	for (i = 0; i < hash_key_size; i++)
2671 		printf("%02X", rss_key[i]);
2672 	printf("\n");
2673 }
2674 
2675 void
2676 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
2677 			 uint8_t hash_key_len)
2678 {
2679 	struct rte_eth_rss_conf rss_conf;
2680 	int diag;
2681 	unsigned int i;
2682 
2683 	rss_conf.rss_key = NULL;
2684 	rss_conf.rss_key_len = hash_key_len;
2685 	rss_conf.rss_hf = 0;
2686 	for (i = 0; rss_type_table[i].str; i++) {
2687 		if (!strcmp(rss_type_table[i].str, rss_type))
2688 			rss_conf.rss_hf = rss_type_table[i].rss_type;
2689 	}
2690 	diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
2691 	if (diag == 0) {
2692 		rss_conf.rss_key = hash_key;
2693 		diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
2694 	}
2695 	if (diag == 0)
2696 		return;
2697 
2698 	switch (diag) {
2699 	case -ENODEV:
2700 		printf("port index %d invalid\n", port_id);
2701 		break;
2702 	case -ENOTSUP:
2703 		printf("operation not supported by device\n");
2704 		break;
2705 	default:
2706 		printf("operation failed - diag=%d\n", diag);
2707 		break;
2708 	}
2709 }
2710 
2711 /*
2712  * Setup forwarding configuration for each logical core.
2713  */
2714 static void
2715 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
2716 {
2717 	streamid_t nb_fs_per_lcore;
2718 	streamid_t nb_fs;
2719 	streamid_t sm_id;
2720 	lcoreid_t  nb_extra;
2721 	lcoreid_t  nb_fc;
2722 	lcoreid_t  nb_lc;
2723 	lcoreid_t  lc_id;
2724 
2725 	nb_fs = cfg->nb_fwd_streams;
2726 	nb_fc = cfg->nb_fwd_lcores;
2727 	if (nb_fs <= nb_fc) {
2728 		nb_fs_per_lcore = 1;
2729 		nb_extra = 0;
2730 	} else {
2731 		nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
2732 		nb_extra = (lcoreid_t) (nb_fs % nb_fc);
2733 	}
2734 
2735 	nb_lc = (lcoreid_t) (nb_fc - nb_extra);
2736 	sm_id = 0;
2737 	for (lc_id = 0; lc_id < nb_lc; lc_id++) {
2738 		fwd_lcores[lc_id]->stream_idx = sm_id;
2739 		fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
2740 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
2741 	}
2742 
2743 	/*
2744 	 * Assign extra remaining streams, if any.
2745 	 */
2746 	nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
2747 	for (lc_id = 0; lc_id < nb_extra; lc_id++) {
2748 		fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
2749 		fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
2750 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
2751 	}
2752 }
2753 
2754 static portid_t
2755 fwd_topology_tx_port_get(portid_t rxp)
2756 {
2757 	static int warning_once = 1;
2758 
2759 	RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
2760 
2761 	switch (port_topology) {
2762 	default:
2763 	case PORT_TOPOLOGY_PAIRED:
2764 		if ((rxp & 0x1) == 0) {
2765 			if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
2766 				return rxp + 1;
2767 			if (warning_once) {
2768 				printf("\nWarning! port-topology=paired"
2769 				       " and odd forward ports number,"
2770 				       " the last port will pair with"
2771 				       " itself.\n\n");
2772 				warning_once = 0;
2773 			}
2774 			return rxp;
2775 		}
2776 		return rxp - 1;
2777 	case PORT_TOPOLOGY_CHAINED:
2778 		return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
2779 	case PORT_TOPOLOGY_LOOP:
2780 		return rxp;
2781 	}
2782 }
2783 
2784 static void
2785 simple_fwd_config_setup(void)
2786 {
2787 	portid_t i;
2788 
2789 	cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
2790 	cur_fwd_config.nb_fwd_streams =
2791 		(streamid_t) cur_fwd_config.nb_fwd_ports;
2792 
2793 	/* reinitialize forwarding streams */
2794 	init_fwd_streams();
2795 
2796 	/*
2797 	 * In the simple forwarding test, the number of forwarding cores
2798 	 * must be lower or equal to the number of forwarding ports.
2799 	 */
2800 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2801 	if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
2802 		cur_fwd_config.nb_fwd_lcores =
2803 			(lcoreid_t) cur_fwd_config.nb_fwd_ports;
2804 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
2805 
2806 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2807 		fwd_streams[i]->rx_port   = fwd_ports_ids[i];
2808 		fwd_streams[i]->rx_queue  = 0;
2809 		fwd_streams[i]->tx_port   =
2810 				fwd_ports_ids[fwd_topology_tx_port_get(i)];
2811 		fwd_streams[i]->tx_queue  = 0;
2812 		fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
2813 		fwd_streams[i]->retry_enabled = retry_enabled;
2814 	}
2815 }
2816 
2817 /**
2818  * For the RSS forwarding test all streams distributed over lcores. Each stream
2819  * being composed of a RX queue to poll on a RX port for input messages,
2820  * associated with a TX queue of a TX port where to send forwarded packets.
2821  */
2822 static void
2823 rss_fwd_config_setup(void)
2824 {
2825 	portid_t   rxp;
2826 	portid_t   txp;
2827 	queueid_t  rxq;
2828 	queueid_t  nb_q;
2829 	streamid_t  sm_id;
2830 
2831 	nb_q = nb_rxq;
2832 	if (nb_q > nb_txq)
2833 		nb_q = nb_txq;
2834 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2835 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2836 	cur_fwd_config.nb_fwd_streams =
2837 		(streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
2838 
2839 	if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2840 		cur_fwd_config.nb_fwd_lcores =
2841 			(lcoreid_t)cur_fwd_config.nb_fwd_streams;
2842 
2843 	/* reinitialize forwarding streams */
2844 	init_fwd_streams();
2845 
2846 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
2847 	rxp = 0; rxq = 0;
2848 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2849 		struct fwd_stream *fs;
2850 
2851 		fs = fwd_streams[sm_id];
2852 		txp = fwd_topology_tx_port_get(rxp);
2853 		fs->rx_port = fwd_ports_ids[rxp];
2854 		fs->rx_queue = rxq;
2855 		fs->tx_port = fwd_ports_ids[txp];
2856 		fs->tx_queue = rxq;
2857 		fs->peer_addr = fs->tx_port;
2858 		fs->retry_enabled = retry_enabled;
2859 		rxp++;
2860 		if (rxp < nb_fwd_ports)
2861 			continue;
2862 		rxp = 0;
2863 		rxq++;
2864 	}
2865 }
2866 
2867 /**
2868  * For the DCB forwarding test, each core is assigned on each traffic class.
2869  *
2870  * Each core is assigned a multi-stream, each stream being composed of
2871  * a RX queue to poll on a RX port for input messages, associated with
2872  * a TX queue of a TX port where to send forwarded packets. All RX and
2873  * TX queues are mapping to the same traffic class.
2874  * If VMDQ and DCB co-exist, each traffic class on different POOLs share
2875  * the same core
2876  */
2877 static void
2878 dcb_fwd_config_setup(void)
2879 {
2880 	struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
2881 	portid_t txp, rxp = 0;
2882 	queueid_t txq, rxq = 0;
2883 	lcoreid_t  lc_id;
2884 	uint16_t nb_rx_queue, nb_tx_queue;
2885 	uint16_t i, j, k, sm_id = 0;
2886 	uint8_t tc = 0;
2887 
2888 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2889 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2890 	cur_fwd_config.nb_fwd_streams =
2891 		(streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2892 
2893 	/* reinitialize forwarding streams */
2894 	init_fwd_streams();
2895 	sm_id = 0;
2896 	txp = 1;
2897 	/* get the dcb info on the first RX and TX ports */
2898 	(void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2899 	(void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2900 
2901 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2902 		fwd_lcores[lc_id]->stream_nb = 0;
2903 		fwd_lcores[lc_id]->stream_idx = sm_id;
2904 		for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
2905 			/* if the nb_queue is zero, means this tc is
2906 			 * not enabled on the POOL
2907 			 */
2908 			if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
2909 				break;
2910 			k = fwd_lcores[lc_id]->stream_nb +
2911 				fwd_lcores[lc_id]->stream_idx;
2912 			rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
2913 			txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
2914 			nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2915 			nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
2916 			for (j = 0; j < nb_rx_queue; j++) {
2917 				struct fwd_stream *fs;
2918 
2919 				fs = fwd_streams[k + j];
2920 				fs->rx_port = fwd_ports_ids[rxp];
2921 				fs->rx_queue = rxq + j;
2922 				fs->tx_port = fwd_ports_ids[txp];
2923 				fs->tx_queue = txq + j % nb_tx_queue;
2924 				fs->peer_addr = fs->tx_port;
2925 				fs->retry_enabled = retry_enabled;
2926 			}
2927 			fwd_lcores[lc_id]->stream_nb +=
2928 				rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2929 		}
2930 		sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
2931 
2932 		tc++;
2933 		if (tc < rxp_dcb_info.nb_tcs)
2934 			continue;
2935 		/* Restart from TC 0 on next RX port */
2936 		tc = 0;
2937 		if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
2938 			rxp = (portid_t)
2939 				(rxp + ((nb_ports >> 1) / nb_fwd_ports));
2940 		else
2941 			rxp++;
2942 		if (rxp >= nb_fwd_ports)
2943 			return;
2944 		/* get the dcb information on next RX and TX ports */
2945 		if ((rxp & 0x1) == 0)
2946 			txp = (portid_t) (rxp + 1);
2947 		else
2948 			txp = (portid_t) (rxp - 1);
2949 		rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2950 		rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2951 	}
2952 }
2953 
2954 static void
2955 icmp_echo_config_setup(void)
2956 {
2957 	portid_t  rxp;
2958 	queueid_t rxq;
2959 	lcoreid_t lc_id;
2960 	uint16_t  sm_id;
2961 
2962 	if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
2963 		cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
2964 			(nb_txq * nb_fwd_ports);
2965 	else
2966 		cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2967 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2968 	cur_fwd_config.nb_fwd_streams =
2969 		(streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2970 	if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2971 		cur_fwd_config.nb_fwd_lcores =
2972 			(lcoreid_t)cur_fwd_config.nb_fwd_streams;
2973 	if (verbose_level > 0) {
2974 		printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
2975 		       __FUNCTION__,
2976 		       cur_fwd_config.nb_fwd_lcores,
2977 		       cur_fwd_config.nb_fwd_ports,
2978 		       cur_fwd_config.nb_fwd_streams);
2979 	}
2980 
2981 	/* reinitialize forwarding streams */
2982 	init_fwd_streams();
2983 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
2984 	rxp = 0; rxq = 0;
2985 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2986 		if (verbose_level > 0)
2987 			printf("  core=%d: \n", lc_id);
2988 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2989 			struct fwd_stream *fs;
2990 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2991 			fs->rx_port = fwd_ports_ids[rxp];
2992 			fs->rx_queue = rxq;
2993 			fs->tx_port = fs->rx_port;
2994 			fs->tx_queue = rxq;
2995 			fs->peer_addr = fs->tx_port;
2996 			fs->retry_enabled = retry_enabled;
2997 			if (verbose_level > 0)
2998 				printf("  stream=%d port=%d rxq=%d txq=%d\n",
2999 				       sm_id, fs->rx_port, fs->rx_queue,
3000 				       fs->tx_queue);
3001 			rxq = (queueid_t) (rxq + 1);
3002 			if (rxq == nb_rxq) {
3003 				rxq = 0;
3004 				rxp = (portid_t) (rxp + 1);
3005 			}
3006 		}
3007 	}
3008 }
3009 
3010 void
3011 fwd_config_setup(void)
3012 {
3013 	cur_fwd_config.fwd_eng = cur_fwd_eng;
3014 	if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
3015 		icmp_echo_config_setup();
3016 		return;
3017 	}
3018 
3019 	if ((nb_rxq > 1) && (nb_txq > 1)){
3020 		if (dcb_config)
3021 			dcb_fwd_config_setup();
3022 		else
3023 			rss_fwd_config_setup();
3024 	}
3025 	else
3026 		simple_fwd_config_setup();
3027 }
3028 
3029 static const char *
3030 mp_alloc_to_str(uint8_t mode)
3031 {
3032 	switch (mode) {
3033 	case MP_ALLOC_NATIVE:
3034 		return "native";
3035 	case MP_ALLOC_ANON:
3036 		return "anon";
3037 	case MP_ALLOC_XMEM:
3038 		return "xmem";
3039 	case MP_ALLOC_XMEM_HUGE:
3040 		return "xmemhuge";
3041 	case MP_ALLOC_XBUF:
3042 		return "xbuf";
3043 	default:
3044 		return "invalid";
3045 	}
3046 }
3047 
3048 void
3049 pkt_fwd_config_display(struct fwd_config *cfg)
3050 {
3051 	struct fwd_stream *fs;
3052 	lcoreid_t  lc_id;
3053 	streamid_t sm_id;
3054 
3055 	printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
3056 		"NUMA support %s, MP allocation mode: %s\n",
3057 		cfg->fwd_eng->fwd_mode_name,
3058 		retry_enabled == 0 ? "" : " with retry",
3059 		cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
3060 		numa_support == 1 ? "enabled" : "disabled",
3061 		mp_alloc_to_str(mp_alloc_type));
3062 
3063 	if (retry_enabled)
3064 		printf("TX retry num: %u, delay between TX retries: %uus\n",
3065 			burst_tx_retry_num, burst_tx_delay_time);
3066 	for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
3067 		printf("Logical Core %u (socket %u) forwards packets on "
3068 		       "%d streams:",
3069 		       fwd_lcores_cpuids[lc_id],
3070 		       rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
3071 		       fwd_lcores[lc_id]->stream_nb);
3072 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
3073 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
3074 			printf("\n  RX P=%d/Q=%d (socket %u) -> TX "
3075 			       "P=%d/Q=%d (socket %u) ",
3076 			       fs->rx_port, fs->rx_queue,
3077 			       ports[fs->rx_port].socket_id,
3078 			       fs->tx_port, fs->tx_queue,
3079 			       ports[fs->tx_port].socket_id);
3080 			print_ethaddr("peer=",
3081 				      &peer_eth_addrs[fs->peer_addr]);
3082 		}
3083 		printf("\n");
3084 	}
3085 	printf("\n");
3086 }
3087 
3088 void
3089 set_fwd_eth_peer(portid_t port_id, char *peer_addr)
3090 {
3091 	struct rte_ether_addr new_peer_addr;
3092 	if (!rte_eth_dev_is_valid_port(port_id)) {
3093 		printf("Error: Invalid port number %i\n", port_id);
3094 		return;
3095 	}
3096 	if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) {
3097 		printf("Error: Invalid ethernet address: %s\n", peer_addr);
3098 		return;
3099 	}
3100 	peer_eth_addrs[port_id] = new_peer_addr;
3101 }
3102 
3103 int
3104 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
3105 {
3106 	unsigned int i;
3107 	unsigned int lcore_cpuid;
3108 	int record_now;
3109 
3110 	record_now = 0;
3111  again:
3112 	for (i = 0; i < nb_lc; i++) {
3113 		lcore_cpuid = lcorelist[i];
3114 		if (! rte_lcore_is_enabled(lcore_cpuid)) {
3115 			printf("lcore %u not enabled\n", lcore_cpuid);
3116 			return -1;
3117 		}
3118 		if (lcore_cpuid == rte_get_main_lcore()) {
3119 			printf("lcore %u cannot be masked on for running "
3120 			       "packet forwarding, which is the main lcore "
3121 			       "and reserved for command line parsing only\n",
3122 			       lcore_cpuid);
3123 			return -1;
3124 		}
3125 		if (record_now)
3126 			fwd_lcores_cpuids[i] = lcore_cpuid;
3127 	}
3128 	if (record_now == 0) {
3129 		record_now = 1;
3130 		goto again;
3131 	}
3132 	nb_cfg_lcores = (lcoreid_t) nb_lc;
3133 	if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
3134 		printf("previous number of forwarding cores %u - changed to "
3135 		       "number of configured cores %u\n",
3136 		       (unsigned int) nb_fwd_lcores, nb_lc);
3137 		nb_fwd_lcores = (lcoreid_t) nb_lc;
3138 	}
3139 
3140 	return 0;
3141 }
3142 
3143 int
3144 set_fwd_lcores_mask(uint64_t lcoremask)
3145 {
3146 	unsigned int lcorelist[64];
3147 	unsigned int nb_lc;
3148 	unsigned int i;
3149 
3150 	if (lcoremask == 0) {
3151 		printf("Invalid NULL mask of cores\n");
3152 		return -1;
3153 	}
3154 	nb_lc = 0;
3155 	for (i = 0; i < 64; i++) {
3156 		if (! ((uint64_t)(1ULL << i) & lcoremask))
3157 			continue;
3158 		lcorelist[nb_lc++] = i;
3159 	}
3160 	return set_fwd_lcores_list(lcorelist, nb_lc);
3161 }
3162 
3163 void
3164 set_fwd_lcores_number(uint16_t nb_lc)
3165 {
3166 	if (test_done == 0) {
3167 		printf("Please stop forwarding first\n");
3168 		return;
3169 	}
3170 	if (nb_lc > nb_cfg_lcores) {
3171 		printf("nb fwd cores %u > %u (max. number of configured "
3172 		       "lcores) - ignored\n",
3173 		       (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
3174 		return;
3175 	}
3176 	nb_fwd_lcores = (lcoreid_t) nb_lc;
3177 	printf("Number of forwarding cores set to %u\n",
3178 	       (unsigned int) nb_fwd_lcores);
3179 }
3180 
3181 void
3182 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
3183 {
3184 	unsigned int i;
3185 	portid_t port_id;
3186 	int record_now;
3187 
3188 	record_now = 0;
3189  again:
3190 	for (i = 0; i < nb_pt; i++) {
3191 		port_id = (portid_t) portlist[i];
3192 		if (port_id_is_invalid(port_id, ENABLED_WARN))
3193 			return;
3194 		if (record_now)
3195 			fwd_ports_ids[i] = port_id;
3196 	}
3197 	if (record_now == 0) {
3198 		record_now = 1;
3199 		goto again;
3200 	}
3201 	nb_cfg_ports = (portid_t) nb_pt;
3202 	if (nb_fwd_ports != (portid_t) nb_pt) {
3203 		printf("previous number of forwarding ports %u - changed to "
3204 		       "number of configured ports %u\n",
3205 		       (unsigned int) nb_fwd_ports, nb_pt);
3206 		nb_fwd_ports = (portid_t) nb_pt;
3207 	}
3208 }
3209 
3210 /**
3211  * Parse the user input and obtain the list of forwarding ports
3212  *
3213  * @param[in] list
3214  *   String containing the user input. User can specify
3215  *   in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6.
3216  *   For example, if the user wants to use all the available
3217  *   4 ports in his system, then the input can be 0-3 or 0,1,2,3.
3218  *   If the user wants to use only the ports 1,2 then the input
3219  *   is 1,2.
3220  *   valid characters are '-' and ','
3221  * @param[out] values
3222  *   This array will be filled with a list of port IDs
3223  *   based on the user input
3224  *   Note that duplicate entries are discarded and only the first
3225  *   count entries in this array are port IDs and all the rest
3226  *   will contain default values
3227  * @param[in] maxsize
3228  *   This parameter denotes 2 things
3229  *   1) Number of elements in the values array
3230  *   2) Maximum value of each element in the values array
3231  * @return
3232  *   On success, returns total count of parsed port IDs
3233  *   On failure, returns 0
3234  */
3235 static unsigned int
3236 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize)
3237 {
3238 	unsigned int count = 0;
3239 	char *end = NULL;
3240 	int min, max;
3241 	int value, i;
3242 	unsigned int marked[maxsize];
3243 
3244 	if (list == NULL || values == NULL)
3245 		return 0;
3246 
3247 	for (i = 0; i < (int)maxsize; i++)
3248 		marked[i] = 0;
3249 
3250 	min = INT_MAX;
3251 
3252 	do {
3253 		/*Remove the blank spaces if any*/
3254 		while (isblank(*list))
3255 			list++;
3256 		if (*list == '\0')
3257 			break;
3258 		errno = 0;
3259 		value = strtol(list, &end, 10);
3260 		if (errno || end == NULL)
3261 			return 0;
3262 		if (value < 0 || value >= (int)maxsize)
3263 			return 0;
3264 		while (isblank(*end))
3265 			end++;
3266 		if (*end == '-' && min == INT_MAX) {
3267 			min = value;
3268 		} else if ((*end == ',') || (*end == '\0')) {
3269 			max = value;
3270 			if (min == INT_MAX)
3271 				min = value;
3272 			for (i = min; i <= max; i++) {
3273 				if (count < maxsize) {
3274 					if (marked[i])
3275 						continue;
3276 					values[count] = i;
3277 					marked[i] = 1;
3278 					count++;
3279 				}
3280 			}
3281 			min = INT_MAX;
3282 		} else
3283 			return 0;
3284 		list = end + 1;
3285 	} while (*end != '\0');
3286 
3287 	return count;
3288 }
3289 
3290 void
3291 parse_fwd_portlist(const char *portlist)
3292 {
3293 	unsigned int portcount;
3294 	unsigned int portindex[RTE_MAX_ETHPORTS];
3295 	unsigned int i, valid_port_count = 0;
3296 
3297 	portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS);
3298 	if (!portcount)
3299 		rte_exit(EXIT_FAILURE, "Invalid fwd port list\n");
3300 
3301 	/*
3302 	 * Here we verify the validity of the ports
3303 	 * and thereby calculate the total number of
3304 	 * valid ports
3305 	 */
3306 	for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) {
3307 		if (rte_eth_dev_is_valid_port(portindex[i])) {
3308 			portindex[valid_port_count] = portindex[i];
3309 			valid_port_count++;
3310 		}
3311 	}
3312 
3313 	set_fwd_ports_list(portindex, valid_port_count);
3314 }
3315 
3316 void
3317 set_fwd_ports_mask(uint64_t portmask)
3318 {
3319 	unsigned int portlist[64];
3320 	unsigned int nb_pt;
3321 	unsigned int i;
3322 
3323 	if (portmask == 0) {
3324 		printf("Invalid NULL mask of ports\n");
3325 		return;
3326 	}
3327 	nb_pt = 0;
3328 	RTE_ETH_FOREACH_DEV(i) {
3329 		if (! ((uint64_t)(1ULL << i) & portmask))
3330 			continue;
3331 		portlist[nb_pt++] = i;
3332 	}
3333 	set_fwd_ports_list(portlist, nb_pt);
3334 }
3335 
3336 void
3337 set_fwd_ports_number(uint16_t nb_pt)
3338 {
3339 	if (nb_pt > nb_cfg_ports) {
3340 		printf("nb fwd ports %u > %u (number of configured "
3341 		       "ports) - ignored\n",
3342 		       (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
3343 		return;
3344 	}
3345 	nb_fwd_ports = (portid_t) nb_pt;
3346 	printf("Number of forwarding ports set to %u\n",
3347 	       (unsigned int) nb_fwd_ports);
3348 }
3349 
3350 int
3351 port_is_forwarding(portid_t port_id)
3352 {
3353 	unsigned int i;
3354 
3355 	if (port_id_is_invalid(port_id, ENABLED_WARN))
3356 		return -1;
3357 
3358 	for (i = 0; i < nb_fwd_ports; i++) {
3359 		if (fwd_ports_ids[i] == port_id)
3360 			return 1;
3361 	}
3362 
3363 	return 0;
3364 }
3365 
3366 void
3367 set_nb_pkt_per_burst(uint16_t nb)
3368 {
3369 	if (nb > MAX_PKT_BURST) {
3370 		printf("nb pkt per burst: %u > %u (maximum packet per burst) "
3371 		       " ignored\n",
3372 		       (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
3373 		return;
3374 	}
3375 	nb_pkt_per_burst = nb;
3376 	printf("Number of packets per burst set to %u\n",
3377 	       (unsigned int) nb_pkt_per_burst);
3378 }
3379 
3380 static const char *
3381 tx_split_get_name(enum tx_pkt_split split)
3382 {
3383 	uint32_t i;
3384 
3385 	for (i = 0; i != RTE_DIM(tx_split_name); i++) {
3386 		if (tx_split_name[i].split == split)
3387 			return tx_split_name[i].name;
3388 	}
3389 	return NULL;
3390 }
3391 
3392 void
3393 set_tx_pkt_split(const char *name)
3394 {
3395 	uint32_t i;
3396 
3397 	for (i = 0; i != RTE_DIM(tx_split_name); i++) {
3398 		if (strcmp(tx_split_name[i].name, name) == 0) {
3399 			tx_pkt_split = tx_split_name[i].split;
3400 			return;
3401 		}
3402 	}
3403 	printf("unknown value: \"%s\"\n", name);
3404 }
3405 
3406 int
3407 parse_fec_mode(const char *name, uint32_t *mode)
3408 {
3409 	uint8_t i;
3410 
3411 	for (i = 0; i < RTE_DIM(fec_mode_name); i++) {
3412 		if (strcmp(fec_mode_name[i].name, name) == 0) {
3413 			*mode = RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode);
3414 			return 0;
3415 		}
3416 	}
3417 	return -1;
3418 }
3419 
3420 void
3421 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa)
3422 {
3423 	unsigned int i, j;
3424 
3425 	printf("FEC capabilities:\n");
3426 
3427 	for (i = 0; i < num; i++) {
3428 		printf("%s : ",
3429 			rte_eth_link_speed_to_str(speed_fec_capa[i].speed));
3430 
3431 		for (j = 0; j < RTE_DIM(fec_mode_name); j++) {
3432 			if (RTE_ETH_FEC_MODE_TO_CAPA(j) &
3433 						speed_fec_capa[i].capa)
3434 				printf("%s ", fec_mode_name[j].name);
3435 		}
3436 		printf("\n");
3437 	}
3438 }
3439 
3440 void
3441 show_rx_pkt_offsets(void)
3442 {
3443 	uint32_t i, n;
3444 
3445 	n = rx_pkt_nb_offs;
3446 	printf("Number of offsets: %u\n", n);
3447 	if (n) {
3448 		printf("Segment offsets: ");
3449 		for (i = 0; i != n - 1; i++)
3450 			printf("%hu,", rx_pkt_seg_offsets[i]);
3451 		printf("%hu\n", rx_pkt_seg_lengths[i]);
3452 	}
3453 }
3454 
3455 void
3456 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs)
3457 {
3458 	unsigned int i;
3459 
3460 	if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) {
3461 		printf("nb segments per RX packets=%u >= "
3462 		       "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs);
3463 		return;
3464 	}
3465 
3466 	/*
3467 	 * No extra check here, the segment length will be checked by PMD
3468 	 * in the extended queue setup.
3469 	 */
3470 	for (i = 0; i < nb_offs; i++) {
3471 		if (seg_offsets[i] >= UINT16_MAX) {
3472 			printf("offset[%u]=%u > UINT16_MAX - give up\n",
3473 			       i, seg_offsets[i]);
3474 			return;
3475 		}
3476 	}
3477 
3478 	for (i = 0; i < nb_offs; i++)
3479 		rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i];
3480 
3481 	rx_pkt_nb_offs = (uint8_t) nb_offs;
3482 }
3483 
3484 void
3485 show_rx_pkt_segments(void)
3486 {
3487 	uint32_t i, n;
3488 
3489 	n = rx_pkt_nb_segs;
3490 	printf("Number of segments: %u\n", n);
3491 	if (n) {
3492 		printf("Segment sizes: ");
3493 		for (i = 0; i != n - 1; i++)
3494 			printf("%hu,", rx_pkt_seg_lengths[i]);
3495 		printf("%hu\n", rx_pkt_seg_lengths[i]);
3496 	}
3497 }
3498 
3499 void
3500 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs)
3501 {
3502 	unsigned int i;
3503 
3504 	if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) {
3505 		printf("nb segments per RX packets=%u >= "
3506 		       "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs);
3507 		return;
3508 	}
3509 
3510 	/*
3511 	 * No extra check here, the segment length will be checked by PMD
3512 	 * in the extended queue setup.
3513 	 */
3514 	for (i = 0; i < nb_segs; i++) {
3515 		if (seg_lengths[i] >= UINT16_MAX) {
3516 			printf("length[%u]=%u > UINT16_MAX - give up\n",
3517 			       i, seg_lengths[i]);
3518 			return;
3519 		}
3520 	}
3521 
3522 	for (i = 0; i < nb_segs; i++)
3523 		rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
3524 
3525 	rx_pkt_nb_segs = (uint8_t) nb_segs;
3526 }
3527 
3528 void
3529 show_tx_pkt_segments(void)
3530 {
3531 	uint32_t i, n;
3532 	const char *split;
3533 
3534 	n = tx_pkt_nb_segs;
3535 	split = tx_split_get_name(tx_pkt_split);
3536 
3537 	printf("Number of segments: %u\n", n);
3538 	printf("Segment sizes: ");
3539 	for (i = 0; i != n - 1; i++)
3540 		printf("%hu,", tx_pkt_seg_lengths[i]);
3541 	printf("%hu\n", tx_pkt_seg_lengths[i]);
3542 	printf("Split packet: %s\n", split);
3543 }
3544 
3545 static bool
3546 nb_segs_is_invalid(unsigned int nb_segs)
3547 {
3548 	uint16_t ring_size;
3549 	uint16_t queue_id;
3550 	uint16_t port_id;
3551 	int ret;
3552 
3553 	RTE_ETH_FOREACH_DEV(port_id) {
3554 		for (queue_id = 0; queue_id < nb_txq; queue_id++) {
3555 			ret = get_tx_ring_size(port_id, queue_id, &ring_size);
3556 
3557 			if (ret)
3558 				return true;
3559 
3560 			if (ring_size < nb_segs) {
3561 				printf("nb segments per TX packets=%u >= "
3562 				       "TX queue(%u) ring_size=%u - ignored\n",
3563 				       nb_segs, queue_id, ring_size);
3564 				return true;
3565 			}
3566 		}
3567 	}
3568 
3569 	return false;
3570 }
3571 
3572 void
3573 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs)
3574 {
3575 	uint16_t tx_pkt_len;
3576 	unsigned int i;
3577 
3578 	if (nb_segs_is_invalid(nb_segs))
3579 		return;
3580 
3581 	/*
3582 	 * Check that each segment length is greater or equal than
3583 	 * the mbuf data sise.
3584 	 * Check also that the total packet length is greater or equal than the
3585 	 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
3586 	 * 20 + 8).
3587 	 */
3588 	tx_pkt_len = 0;
3589 	for (i = 0; i < nb_segs; i++) {
3590 		if (seg_lengths[i] > mbuf_data_size[0]) {
3591 			printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
3592 			       i, seg_lengths[i], mbuf_data_size[0]);
3593 			return;
3594 		}
3595 		tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
3596 	}
3597 	if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
3598 		printf("total packet length=%u < %d - give up\n",
3599 				(unsigned) tx_pkt_len,
3600 				(int)(sizeof(struct rte_ether_hdr) + 20 + 8));
3601 		return;
3602 	}
3603 
3604 	for (i = 0; i < nb_segs; i++)
3605 		tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
3606 
3607 	tx_pkt_length  = tx_pkt_len;
3608 	tx_pkt_nb_segs = (uint8_t) nb_segs;
3609 }
3610 
3611 void
3612 show_tx_pkt_times(void)
3613 {
3614 	printf("Interburst gap: %u\n", tx_pkt_times_inter);
3615 	printf("Intraburst gap: %u\n", tx_pkt_times_intra);
3616 }
3617 
3618 void
3619 set_tx_pkt_times(unsigned int *tx_times)
3620 {
3621 	tx_pkt_times_inter = tx_times[0];
3622 	tx_pkt_times_intra = tx_times[1];
3623 }
3624 
3625 void
3626 setup_gro(const char *onoff, portid_t port_id)
3627 {
3628 	if (!rte_eth_dev_is_valid_port(port_id)) {
3629 		printf("invalid port id %u\n", port_id);
3630 		return;
3631 	}
3632 	if (test_done == 0) {
3633 		printf("Before enable/disable GRO,"
3634 				" please stop forwarding first\n");
3635 		return;
3636 	}
3637 	if (strcmp(onoff, "on") == 0) {
3638 		if (gro_ports[port_id].enable != 0) {
3639 			printf("Port %u has enabled GRO. Please"
3640 					" disable GRO first\n", port_id);
3641 			return;
3642 		}
3643 		if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
3644 			gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
3645 			gro_ports[port_id].param.max_flow_num =
3646 				GRO_DEFAULT_FLOW_NUM;
3647 			gro_ports[port_id].param.max_item_per_flow =
3648 				GRO_DEFAULT_ITEM_NUM_PER_FLOW;
3649 		}
3650 		gro_ports[port_id].enable = 1;
3651 	} else {
3652 		if (gro_ports[port_id].enable == 0) {
3653 			printf("Port %u has disabled GRO\n", port_id);
3654 			return;
3655 		}
3656 		gro_ports[port_id].enable = 0;
3657 	}
3658 }
3659 
3660 void
3661 setup_gro_flush_cycles(uint8_t cycles)
3662 {
3663 	if (test_done == 0) {
3664 		printf("Before change flush interval for GRO,"
3665 				" please stop forwarding first.\n");
3666 		return;
3667 	}
3668 
3669 	if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
3670 			GRO_DEFAULT_FLUSH_CYCLES) {
3671 		printf("The flushing cycle be in the range"
3672 				" of 1 to %u. Revert to the default"
3673 				" value %u.\n",
3674 				GRO_MAX_FLUSH_CYCLES,
3675 				GRO_DEFAULT_FLUSH_CYCLES);
3676 		cycles = GRO_DEFAULT_FLUSH_CYCLES;
3677 	}
3678 
3679 	gro_flush_cycles = cycles;
3680 }
3681 
3682 void
3683 show_gro(portid_t port_id)
3684 {
3685 	struct rte_gro_param *param;
3686 	uint32_t max_pkts_num;
3687 
3688 	param = &gro_ports[port_id].param;
3689 
3690 	if (!rte_eth_dev_is_valid_port(port_id)) {
3691 		printf("Invalid port id %u.\n", port_id);
3692 		return;
3693 	}
3694 	if (gro_ports[port_id].enable) {
3695 		printf("GRO type: TCP/IPv4\n");
3696 		if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
3697 			max_pkts_num = param->max_flow_num *
3698 				param->max_item_per_flow;
3699 		} else
3700 			max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
3701 		printf("Max number of packets to perform GRO: %u\n",
3702 				max_pkts_num);
3703 		printf("Flushing cycles: %u\n", gro_flush_cycles);
3704 	} else
3705 		printf("Port %u doesn't enable GRO.\n", port_id);
3706 }
3707 
3708 void
3709 setup_gso(const char *mode, portid_t port_id)
3710 {
3711 	if (!rte_eth_dev_is_valid_port(port_id)) {
3712 		printf("invalid port id %u\n", port_id);
3713 		return;
3714 	}
3715 	if (strcmp(mode, "on") == 0) {
3716 		if (test_done == 0) {
3717 			printf("before enabling GSO,"
3718 					" please stop forwarding first\n");
3719 			return;
3720 		}
3721 		gso_ports[port_id].enable = 1;
3722 	} else if (strcmp(mode, "off") == 0) {
3723 		if (test_done == 0) {
3724 			printf("before disabling GSO,"
3725 					" please stop forwarding first\n");
3726 			return;
3727 		}
3728 		gso_ports[port_id].enable = 0;
3729 	}
3730 }
3731 
3732 char*
3733 list_pkt_forwarding_modes(void)
3734 {
3735 	static char fwd_modes[128] = "";
3736 	const char *separator = "|";
3737 	struct fwd_engine *fwd_eng;
3738 	unsigned i = 0;
3739 
3740 	if (strlen (fwd_modes) == 0) {
3741 		while ((fwd_eng = fwd_engines[i++]) != NULL) {
3742 			strncat(fwd_modes, fwd_eng->fwd_mode_name,
3743 					sizeof(fwd_modes) - strlen(fwd_modes) - 1);
3744 			strncat(fwd_modes, separator,
3745 					sizeof(fwd_modes) - strlen(fwd_modes) - 1);
3746 		}
3747 		fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
3748 	}
3749 
3750 	return fwd_modes;
3751 }
3752 
3753 char*
3754 list_pkt_forwarding_retry_modes(void)
3755 {
3756 	static char fwd_modes[128] = "";
3757 	const char *separator = "|";
3758 	struct fwd_engine *fwd_eng;
3759 	unsigned i = 0;
3760 
3761 	if (strlen(fwd_modes) == 0) {
3762 		while ((fwd_eng = fwd_engines[i++]) != NULL) {
3763 			if (fwd_eng == &rx_only_engine)
3764 				continue;
3765 			strncat(fwd_modes, fwd_eng->fwd_mode_name,
3766 					sizeof(fwd_modes) -
3767 					strlen(fwd_modes) - 1);
3768 			strncat(fwd_modes, separator,
3769 					sizeof(fwd_modes) -
3770 					strlen(fwd_modes) - 1);
3771 		}
3772 		fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
3773 	}
3774 
3775 	return fwd_modes;
3776 }
3777 
3778 void
3779 set_pkt_forwarding_mode(const char *fwd_mode_name)
3780 {
3781 	struct fwd_engine *fwd_eng;
3782 	unsigned i;
3783 
3784 	i = 0;
3785 	while ((fwd_eng = fwd_engines[i]) != NULL) {
3786 		if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
3787 			printf("Set %s packet forwarding mode%s\n",
3788 			       fwd_mode_name,
3789 			       retry_enabled == 0 ? "" : " with retry");
3790 			cur_fwd_eng = fwd_eng;
3791 			return;
3792 		}
3793 		i++;
3794 	}
3795 	printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
3796 }
3797 
3798 void
3799 add_rx_dump_callbacks(portid_t portid)
3800 {
3801 	struct rte_eth_dev_info dev_info;
3802 	uint16_t queue;
3803 	int ret;
3804 
3805 	if (port_id_is_invalid(portid, ENABLED_WARN))
3806 		return;
3807 
3808 	ret = eth_dev_info_get_print_err(portid, &dev_info);
3809 	if (ret != 0)
3810 		return;
3811 
3812 	for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
3813 		if (!ports[portid].rx_dump_cb[queue])
3814 			ports[portid].rx_dump_cb[queue] =
3815 				rte_eth_add_rx_callback(portid, queue,
3816 					dump_rx_pkts, NULL);
3817 }
3818 
3819 void
3820 add_tx_dump_callbacks(portid_t portid)
3821 {
3822 	struct rte_eth_dev_info dev_info;
3823 	uint16_t queue;
3824 	int ret;
3825 
3826 	if (port_id_is_invalid(portid, ENABLED_WARN))
3827 		return;
3828 
3829 	ret = eth_dev_info_get_print_err(portid, &dev_info);
3830 	if (ret != 0)
3831 		return;
3832 
3833 	for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
3834 		if (!ports[portid].tx_dump_cb[queue])
3835 			ports[portid].tx_dump_cb[queue] =
3836 				rte_eth_add_tx_callback(portid, queue,
3837 							dump_tx_pkts, NULL);
3838 }
3839 
3840 void
3841 remove_rx_dump_callbacks(portid_t portid)
3842 {
3843 	struct rte_eth_dev_info dev_info;
3844 	uint16_t queue;
3845 	int ret;
3846 
3847 	if (port_id_is_invalid(portid, ENABLED_WARN))
3848 		return;
3849 
3850 	ret = eth_dev_info_get_print_err(portid, &dev_info);
3851 	if (ret != 0)
3852 		return;
3853 
3854 	for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
3855 		if (ports[portid].rx_dump_cb[queue]) {
3856 			rte_eth_remove_rx_callback(portid, queue,
3857 				ports[portid].rx_dump_cb[queue]);
3858 			ports[portid].rx_dump_cb[queue] = NULL;
3859 		}
3860 }
3861 
3862 void
3863 remove_tx_dump_callbacks(portid_t portid)
3864 {
3865 	struct rte_eth_dev_info dev_info;
3866 	uint16_t queue;
3867 	int ret;
3868 
3869 	if (port_id_is_invalid(portid, ENABLED_WARN))
3870 		return;
3871 
3872 	ret = eth_dev_info_get_print_err(portid, &dev_info);
3873 	if (ret != 0)
3874 		return;
3875 
3876 	for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
3877 		if (ports[portid].tx_dump_cb[queue]) {
3878 			rte_eth_remove_tx_callback(portid, queue,
3879 				ports[portid].tx_dump_cb[queue]);
3880 			ports[portid].tx_dump_cb[queue] = NULL;
3881 		}
3882 }
3883 
3884 void
3885 configure_rxtx_dump_callbacks(uint16_t verbose)
3886 {
3887 	portid_t portid;
3888 
3889 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3890 		TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n");
3891 		return;
3892 #endif
3893 
3894 	RTE_ETH_FOREACH_DEV(portid)
3895 	{
3896 		if (verbose == 1 || verbose > 2)
3897 			add_rx_dump_callbacks(portid);
3898 		else
3899 			remove_rx_dump_callbacks(portid);
3900 		if (verbose >= 2)
3901 			add_tx_dump_callbacks(portid);
3902 		else
3903 			remove_tx_dump_callbacks(portid);
3904 	}
3905 }
3906 
3907 void
3908 set_verbose_level(uint16_t vb_level)
3909 {
3910 	printf("Change verbose level from %u to %u\n",
3911 	       (unsigned int) verbose_level, (unsigned int) vb_level);
3912 	verbose_level = vb_level;
3913 	configure_rxtx_dump_callbacks(verbose_level);
3914 }
3915 
3916 void
3917 vlan_extend_set(portid_t port_id, int on)
3918 {
3919 	int diag;
3920 	int vlan_offload;
3921 	uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3922 
3923 	if (port_id_is_invalid(port_id, ENABLED_WARN))
3924 		return;
3925 
3926 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3927 
3928 	if (on) {
3929 		vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
3930 		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
3931 	} else {
3932 		vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
3933 		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
3934 	}
3935 
3936 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3937 	if (diag < 0) {
3938 		printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
3939 	       "diag=%d\n", port_id, on, diag);
3940 		return;
3941 	}
3942 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3943 }
3944 
3945 void
3946 rx_vlan_strip_set(portid_t port_id, int on)
3947 {
3948 	int diag;
3949 	int vlan_offload;
3950 	uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3951 
3952 	if (port_id_is_invalid(port_id, ENABLED_WARN))
3953 		return;
3954 
3955 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3956 
3957 	if (on) {
3958 		vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
3959 		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
3960 	} else {
3961 		vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
3962 		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
3963 	}
3964 
3965 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
3966 	if (diag < 0) {
3967 		printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
3968 	       "diag=%d\n", port_id, on, diag);
3969 		return;
3970 	}
3971 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
3972 }
3973 
3974 void
3975 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
3976 {
3977 	int diag;
3978 
3979 	if (port_id_is_invalid(port_id, ENABLED_WARN))
3980 		return;
3981 
3982 	diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
3983 	if (diag < 0)
3984 		printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
3985 	       "diag=%d\n", port_id, queue_id, on, diag);
3986 }
3987 
3988 void
3989 rx_vlan_filter_set(portid_t port_id, int on)
3990 {
3991 	int diag;
3992 	int vlan_offload;
3993 	uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
3994 
3995 	if (port_id_is_invalid(port_id, ENABLED_WARN))
3996 		return;
3997 
3998 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
3999 
4000 	if (on) {
4001 		vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
4002 		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
4003 	} else {
4004 		vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
4005 		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
4006 	}
4007 
4008 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
4009 	if (diag < 0) {
4010 		printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
4011 	       "diag=%d\n", port_id, on, diag);
4012 		return;
4013 	}
4014 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
4015 }
4016 
4017 void
4018 rx_vlan_qinq_strip_set(portid_t port_id, int on)
4019 {
4020 	int diag;
4021 	int vlan_offload;
4022 	uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
4023 
4024 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4025 		return;
4026 
4027 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
4028 
4029 	if (on) {
4030 		vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
4031 		port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
4032 	} else {
4033 		vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
4034 		port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
4035 	}
4036 
4037 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
4038 	if (diag < 0) {
4039 		printf("%s(port_pi=%d, on=%d) failed "
4040 	       "diag=%d\n", __func__, port_id, on, diag);
4041 		return;
4042 	}
4043 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
4044 }
4045 
4046 int
4047 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
4048 {
4049 	int diag;
4050 
4051 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4052 		return 1;
4053 	if (vlan_id_is_invalid(vlan_id))
4054 		return 1;
4055 	diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
4056 	if (diag == 0)
4057 		return 0;
4058 	printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
4059 	       "diag=%d\n",
4060 	       port_id, vlan_id, on, diag);
4061 	return -1;
4062 }
4063 
4064 void
4065 rx_vlan_all_filter_set(portid_t port_id, int on)
4066 {
4067 	uint16_t vlan_id;
4068 
4069 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4070 		return;
4071 	for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
4072 		if (rx_vft_set(port_id, vlan_id, on))
4073 			break;
4074 	}
4075 }
4076 
4077 void
4078 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
4079 {
4080 	int diag;
4081 
4082 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4083 		return;
4084 
4085 	diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
4086 	if (diag == 0)
4087 		return;
4088 
4089 	printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed "
4090 	       "diag=%d\n",
4091 	       port_id, vlan_type, tp_id, diag);
4092 }
4093 
4094 void
4095 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
4096 {
4097 	struct rte_eth_dev_info dev_info;
4098 	int ret;
4099 
4100 	if (vlan_id_is_invalid(vlan_id))
4101 		return;
4102 
4103 	if (ports[port_id].dev_conf.txmode.offloads &
4104 	    DEV_TX_OFFLOAD_QINQ_INSERT) {
4105 		printf("Error, as QinQ has been enabled.\n");
4106 		return;
4107 	}
4108 
4109 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
4110 	if (ret != 0)
4111 		return;
4112 
4113 	if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
4114 		printf("Error: vlan insert is not supported by port %d\n",
4115 			port_id);
4116 		return;
4117 	}
4118 
4119 	tx_vlan_reset(port_id);
4120 	ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
4121 	ports[port_id].tx_vlan_id = vlan_id;
4122 }
4123 
4124 void
4125 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
4126 {
4127 	struct rte_eth_dev_info dev_info;
4128 	int ret;
4129 
4130 	if (vlan_id_is_invalid(vlan_id))
4131 		return;
4132 	if (vlan_id_is_invalid(vlan_id_outer))
4133 		return;
4134 
4135 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
4136 	if (ret != 0)
4137 		return;
4138 
4139 	if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
4140 		printf("Error: qinq insert not supported by port %d\n",
4141 			port_id);
4142 		return;
4143 	}
4144 
4145 	tx_vlan_reset(port_id);
4146 	ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
4147 						    DEV_TX_OFFLOAD_QINQ_INSERT);
4148 	ports[port_id].tx_vlan_id = vlan_id;
4149 	ports[port_id].tx_vlan_id_outer = vlan_id_outer;
4150 }
4151 
4152 void
4153 tx_vlan_reset(portid_t port_id)
4154 {
4155 	ports[port_id].dev_conf.txmode.offloads &=
4156 				~(DEV_TX_OFFLOAD_VLAN_INSERT |
4157 				  DEV_TX_OFFLOAD_QINQ_INSERT);
4158 	ports[port_id].tx_vlan_id = 0;
4159 	ports[port_id].tx_vlan_id_outer = 0;
4160 }
4161 
4162 void
4163 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
4164 {
4165 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4166 		return;
4167 
4168 	rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
4169 }
4170 
4171 void
4172 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
4173 {
4174 	int ret;
4175 
4176 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4177 		return;
4178 
4179 	if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
4180 		return;
4181 
4182 	if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
4183 		printf("map_value not in required range 0..%d\n",
4184 		       RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
4185 		return;
4186 	}
4187 
4188 	if (!is_rx) { /* tx */
4189 		ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id,
4190 							     map_value);
4191 		if (ret) {
4192 			printf("failed to set tx queue stats mapping.\n");
4193 			return;
4194 		}
4195 	} else { /* rx */
4196 		ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id,
4197 							     map_value);
4198 		if (ret) {
4199 			printf("failed to set rx queue stats mapping.\n");
4200 			return;
4201 		}
4202 	}
4203 }
4204 
4205 void
4206 set_xstats_hide_zero(uint8_t on_off)
4207 {
4208 	xstats_hide_zero = on_off;
4209 }
4210 
4211 void
4212 set_record_core_cycles(uint8_t on_off)
4213 {
4214 	record_core_cycles = on_off;
4215 }
4216 
4217 void
4218 set_record_burst_stats(uint8_t on_off)
4219 {
4220 	record_burst_stats = on_off;
4221 }
4222 
4223 static inline void
4224 print_fdir_mask(struct rte_eth_fdir_masks *mask)
4225 {
4226 	printf("\n    vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
4227 
4228 	if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
4229 		printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
4230 			" tunnel_id: 0x%08x",
4231 			mask->mac_addr_byte_mask, mask->tunnel_type_mask,
4232 			rte_be_to_cpu_32(mask->tunnel_id_mask));
4233 	else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
4234 		printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
4235 			rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
4236 			rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
4237 
4238 		printf("\n    src_port: 0x%04x, dst_port: 0x%04x",
4239 			rte_be_to_cpu_16(mask->src_port_mask),
4240 			rte_be_to_cpu_16(mask->dst_port_mask));
4241 
4242 		printf("\n    src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
4243 			rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
4244 			rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
4245 			rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
4246 			rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
4247 
4248 		printf("\n    dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
4249 			rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
4250 			rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
4251 			rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
4252 			rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
4253 	}
4254 
4255 	printf("\n");
4256 }
4257 
4258 static inline void
4259 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
4260 {
4261 	struct rte_eth_flex_payload_cfg *cfg;
4262 	uint32_t i, j;
4263 
4264 	for (i = 0; i < flex_conf->nb_payloads; i++) {
4265 		cfg = &flex_conf->flex_set[i];
4266 		if (cfg->type == RTE_ETH_RAW_PAYLOAD)
4267 			printf("\n    RAW:  ");
4268 		else if (cfg->type == RTE_ETH_L2_PAYLOAD)
4269 			printf("\n    L2_PAYLOAD:  ");
4270 		else if (cfg->type == RTE_ETH_L3_PAYLOAD)
4271 			printf("\n    L3_PAYLOAD:  ");
4272 		else if (cfg->type == RTE_ETH_L4_PAYLOAD)
4273 			printf("\n    L4_PAYLOAD:  ");
4274 		else
4275 			printf("\n    UNKNOWN PAYLOAD(%u):  ", cfg->type);
4276 		for (j = 0; j < num; j++)
4277 			printf("  %-5u", cfg->src_offset[j]);
4278 	}
4279 	printf("\n");
4280 }
4281 
4282 static char *
4283 flowtype_to_str(uint16_t flow_type)
4284 {
4285 	struct flow_type_info {
4286 		char str[32];
4287 		uint16_t ftype;
4288 	};
4289 
4290 	uint8_t i;
4291 	static struct flow_type_info flowtype_str_table[] = {
4292 		{"raw", RTE_ETH_FLOW_RAW},
4293 		{"ipv4", RTE_ETH_FLOW_IPV4},
4294 		{"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
4295 		{"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
4296 		{"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
4297 		{"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
4298 		{"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
4299 		{"ipv6", RTE_ETH_FLOW_IPV6},
4300 		{"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
4301 		{"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
4302 		{"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
4303 		{"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
4304 		{"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
4305 		{"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
4306 		{"port", RTE_ETH_FLOW_PORT},
4307 		{"vxlan", RTE_ETH_FLOW_VXLAN},
4308 		{"geneve", RTE_ETH_FLOW_GENEVE},
4309 		{"nvgre", RTE_ETH_FLOW_NVGRE},
4310 		{"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
4311 	};
4312 
4313 	for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
4314 		if (flowtype_str_table[i].ftype == flow_type)
4315 			return flowtype_str_table[i].str;
4316 	}
4317 
4318 	return NULL;
4319 }
4320 
4321 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE)
4322 
4323 static inline void
4324 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
4325 {
4326 	struct rte_eth_fdir_flex_mask *mask;
4327 	uint32_t i, j;
4328 	char *p;
4329 
4330 	for (i = 0; i < flex_conf->nb_flexmasks; i++) {
4331 		mask = &flex_conf->flex_mask[i];
4332 		p = flowtype_to_str(mask->flow_type);
4333 		printf("\n    %s:\t", p ? p : "unknown");
4334 		for (j = 0; j < num; j++)
4335 			printf(" %02x", mask->mask[j]);
4336 	}
4337 	printf("\n");
4338 }
4339 
4340 static inline void
4341 print_fdir_flow_type(uint32_t flow_types_mask)
4342 {
4343 	int i;
4344 	char *p;
4345 
4346 	for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
4347 		if (!(flow_types_mask & (1 << i)))
4348 			continue;
4349 		p = flowtype_to_str(i);
4350 		if (p)
4351 			printf(" %s", p);
4352 		else
4353 			printf(" unknown");
4354 	}
4355 	printf("\n");
4356 }
4357 
4358 static int
4359 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info,
4360 		    struct rte_eth_fdir_stats *fdir_stat)
4361 {
4362 	int ret = -ENOTSUP;
4363 
4364 #ifdef RTE_NET_I40E
4365 	if (ret == -ENOTSUP) {
4366 		ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info);
4367 		if (!ret)
4368 			ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat);
4369 	}
4370 #endif
4371 #ifdef RTE_NET_IXGBE
4372 	if (ret == -ENOTSUP) {
4373 		ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info);
4374 		if (!ret)
4375 			ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat);
4376 	}
4377 #endif
4378 	switch (ret) {
4379 	case 0:
4380 		break;
4381 	case -ENOTSUP:
4382 		printf("\n FDIR is not supported on port %-2d\n",
4383 			port_id);
4384 		break;
4385 	default:
4386 		printf("programming error: (%s)\n", strerror(-ret));
4387 		break;
4388 	}
4389 	return ret;
4390 }
4391 
4392 void
4393 fdir_get_infos(portid_t port_id)
4394 {
4395 	struct rte_eth_fdir_stats fdir_stat;
4396 	struct rte_eth_fdir_info fdir_info;
4397 
4398 	static const char *fdir_stats_border = "########################";
4399 
4400 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4401 		return;
4402 
4403 	memset(&fdir_info, 0, sizeof(fdir_info));
4404 	memset(&fdir_stat, 0, sizeof(fdir_stat));
4405 	if (get_fdir_info(port_id, &fdir_info, &fdir_stat))
4406 		return;
4407 
4408 	printf("\n  %s FDIR infos for port %-2d     %s\n",
4409 	       fdir_stats_border, port_id, fdir_stats_border);
4410 	printf("  MODE: ");
4411 	if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
4412 		printf("  PERFECT\n");
4413 	else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
4414 		printf("  PERFECT-MAC-VLAN\n");
4415 	else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
4416 		printf("  PERFECT-TUNNEL\n");
4417 	else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
4418 		printf("  SIGNATURE\n");
4419 	else
4420 		printf("  DISABLE\n");
4421 	if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
4422 		&& fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
4423 		printf("  SUPPORTED FLOW TYPE: ");
4424 		print_fdir_flow_type(fdir_info.flow_types_mask[0]);
4425 	}
4426 	printf("  FLEX PAYLOAD INFO:\n");
4427 	printf("  max_len:       %-10"PRIu32"  payload_limit: %-10"PRIu32"\n"
4428 	       "  payload_unit:  %-10"PRIu32"  payload_seg:   %-10"PRIu32"\n"
4429 	       "  bitmask_unit:  %-10"PRIu32"  bitmask_num:   %-10"PRIu32"\n",
4430 		fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
4431 		fdir_info.flex_payload_unit,
4432 		fdir_info.max_flex_payload_segment_num,
4433 		fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
4434 	printf("  MASK: ");
4435 	print_fdir_mask(&fdir_info.mask);
4436 	if (fdir_info.flex_conf.nb_payloads > 0) {
4437 		printf("  FLEX PAYLOAD SRC OFFSET:");
4438 		print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
4439 	}
4440 	if (fdir_info.flex_conf.nb_flexmasks > 0) {
4441 		printf("  FLEX MASK CFG:");
4442 		print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
4443 	}
4444 	printf("  guarant_count: %-10"PRIu32"  best_count:    %"PRIu32"\n",
4445 	       fdir_stat.guarant_cnt, fdir_stat.best_cnt);
4446 	printf("  guarant_space: %-10"PRIu32"  best_space:    %"PRIu32"\n",
4447 	       fdir_info.guarant_spc, fdir_info.best_spc);
4448 	printf("  collision:     %-10"PRIu32"  free:          %"PRIu32"\n"
4449 	       "  maxhash:       %-10"PRIu32"  maxlen:        %"PRIu32"\n"
4450 	       "  add:	         %-10"PRIu64"  remove:        %"PRIu64"\n"
4451 	       "  f_add:         %-10"PRIu64"  f_remove:      %"PRIu64"\n",
4452 	       fdir_stat.collision, fdir_stat.free,
4453 	       fdir_stat.maxhash, fdir_stat.maxlen,
4454 	       fdir_stat.add, fdir_stat.remove,
4455 	       fdir_stat.f_add, fdir_stat.f_remove);
4456 	printf("  %s############################%s\n",
4457 	       fdir_stats_border, fdir_stats_border);
4458 }
4459 
4460 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */
4461 
4462 void
4463 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
4464 {
4465 	struct rte_port *port;
4466 	struct rte_eth_fdir_flex_conf *flex_conf;
4467 	int i, idx = 0;
4468 
4469 	port = &ports[port_id];
4470 	flex_conf = &port->dev_conf.fdir_conf.flex_conf;
4471 	for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
4472 		if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
4473 			idx = i;
4474 			break;
4475 		}
4476 	}
4477 	if (i >= RTE_ETH_FLOW_MAX) {
4478 		if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
4479 			idx = flex_conf->nb_flexmasks;
4480 			flex_conf->nb_flexmasks++;
4481 		} else {
4482 			printf("The flex mask table is full. Can not set flex"
4483 				" mask for flow_type(%u).", cfg->flow_type);
4484 			return;
4485 		}
4486 	}
4487 	rte_memcpy(&flex_conf->flex_mask[idx],
4488 			 cfg,
4489 			 sizeof(struct rte_eth_fdir_flex_mask));
4490 }
4491 
4492 void
4493 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
4494 {
4495 	struct rte_port *port;
4496 	struct rte_eth_fdir_flex_conf *flex_conf;
4497 	int i, idx = 0;
4498 
4499 	port = &ports[port_id];
4500 	flex_conf = &port->dev_conf.fdir_conf.flex_conf;
4501 	for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
4502 		if (cfg->type == flex_conf->flex_set[i].type) {
4503 			idx = i;
4504 			break;
4505 		}
4506 	}
4507 	if (i >= RTE_ETH_PAYLOAD_MAX) {
4508 		if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
4509 			idx = flex_conf->nb_payloads;
4510 			flex_conf->nb_payloads++;
4511 		} else {
4512 			printf("The flex payload table is full. Can not set"
4513 				" flex payload for type(%u).", cfg->type);
4514 			return;
4515 		}
4516 	}
4517 	rte_memcpy(&flex_conf->flex_set[idx],
4518 			 cfg,
4519 			 sizeof(struct rte_eth_flex_payload_cfg));
4520 
4521 }
4522 
4523 void
4524 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
4525 {
4526 #ifdef RTE_NET_IXGBE
4527 	int diag;
4528 
4529 	if (is_rx)
4530 		diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
4531 	else
4532 		diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
4533 
4534 	if (diag == 0)
4535 		return;
4536 	printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
4537 			is_rx ? "rx" : "tx", port_id, diag);
4538 	return;
4539 #endif
4540 	printf("VF %s setting not supported for port %d\n",
4541 			is_rx ? "Rx" : "Tx", port_id);
4542 	RTE_SET_USED(vf);
4543 	RTE_SET_USED(on);
4544 }
4545 
4546 int
4547 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
4548 {
4549 	int diag;
4550 	struct rte_eth_link link;
4551 	int ret;
4552 
4553 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4554 		return 1;
4555 	ret = eth_link_get_nowait_print_err(port_id, &link);
4556 	if (ret < 0)
4557 		return 1;
4558 	if (link.link_speed != ETH_SPEED_NUM_UNKNOWN &&
4559 	    rate > link.link_speed) {
4560 		printf("Invalid rate value:%u bigger than link speed: %u\n",
4561 			rate, link.link_speed);
4562 		return 1;
4563 	}
4564 	diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
4565 	if (diag == 0)
4566 		return diag;
4567 	printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
4568 		port_id, diag);
4569 	return diag;
4570 }
4571 
4572 int
4573 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
4574 {
4575 	int diag = -ENOTSUP;
4576 
4577 	RTE_SET_USED(vf);
4578 	RTE_SET_USED(rate);
4579 	RTE_SET_USED(q_msk);
4580 
4581 #ifdef RTE_NET_IXGBE
4582 	if (diag == -ENOTSUP)
4583 		diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
4584 						       q_msk);
4585 #endif
4586 #ifdef RTE_NET_BNXT
4587 	if (diag == -ENOTSUP)
4588 		diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
4589 #endif
4590 	if (diag == 0)
4591 		return diag;
4592 
4593 	printf("set_vf_rate_limit for port_id=%d failed diag=%d\n",
4594 		port_id, diag);
4595 	return diag;
4596 }
4597 
4598 /*
4599  * Functions to manage the set of filtered Multicast MAC addresses.
4600  *
4601  * A pool of filtered multicast MAC addresses is associated with each port.
4602  * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
4603  * The address of the pool and the number of valid multicast MAC addresses
4604  * recorded in the pool are stored in the fields "mc_addr_pool" and
4605  * "mc_addr_nb" of the "rte_port" data structure.
4606  *
4607  * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
4608  * to be supplied a contiguous array of multicast MAC addresses.
4609  * To comply with this constraint, the set of multicast addresses recorded
4610  * into the pool are systematically compacted at the beginning of the pool.
4611  * Hence, when a multicast address is removed from the pool, all following
4612  * addresses, if any, are copied back to keep the set contiguous.
4613  */
4614 #define MCAST_POOL_INC 32
4615 
4616 static int
4617 mcast_addr_pool_extend(struct rte_port *port)
4618 {
4619 	struct rte_ether_addr *mc_pool;
4620 	size_t mc_pool_size;
4621 
4622 	/*
4623 	 * If a free entry is available at the end of the pool, just
4624 	 * increment the number of recorded multicast addresses.
4625 	 */
4626 	if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
4627 		port->mc_addr_nb++;
4628 		return 0;
4629 	}
4630 
4631 	/*
4632 	 * [re]allocate a pool with MCAST_POOL_INC more entries.
4633 	 * The previous test guarantees that port->mc_addr_nb is a multiple
4634 	 * of MCAST_POOL_INC.
4635 	 */
4636 	mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb +
4637 						    MCAST_POOL_INC);
4638 	mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool,
4639 						mc_pool_size);
4640 	if (mc_pool == NULL) {
4641 		printf("allocation of pool of %u multicast addresses failed\n",
4642 		       port->mc_addr_nb + MCAST_POOL_INC);
4643 		return -ENOMEM;
4644 	}
4645 
4646 	port->mc_addr_pool = mc_pool;
4647 	port->mc_addr_nb++;
4648 	return 0;
4649 
4650 }
4651 
4652 static void
4653 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr)
4654 {
4655 	if (mcast_addr_pool_extend(port) != 0)
4656 		return;
4657 	rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]);
4658 }
4659 
4660 static void
4661 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
4662 {
4663 	port->mc_addr_nb--;
4664 	if (addr_idx == port->mc_addr_nb) {
4665 		/* No need to recompact the set of multicast addressses. */
4666 		if (port->mc_addr_nb == 0) {
4667 			/* free the pool of multicast addresses. */
4668 			free(port->mc_addr_pool);
4669 			port->mc_addr_pool = NULL;
4670 		}
4671 		return;
4672 	}
4673 	memmove(&port->mc_addr_pool[addr_idx],
4674 		&port->mc_addr_pool[addr_idx + 1],
4675 		sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx));
4676 }
4677 
4678 static int
4679 eth_port_multicast_addr_list_set(portid_t port_id)
4680 {
4681 	struct rte_port *port;
4682 	int diag;
4683 
4684 	port = &ports[port_id];
4685 	diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
4686 					    port->mc_addr_nb);
4687 	if (diag < 0)
4688 		printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
4689 			port_id, port->mc_addr_nb, diag);
4690 
4691 	return diag;
4692 }
4693 
4694 void
4695 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr)
4696 {
4697 	struct rte_port *port;
4698 	uint32_t i;
4699 
4700 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4701 		return;
4702 
4703 	port = &ports[port_id];
4704 
4705 	/*
4706 	 * Check that the added multicast MAC address is not already recorded
4707 	 * in the pool of multicast addresses.
4708 	 */
4709 	for (i = 0; i < port->mc_addr_nb; i++) {
4710 		if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
4711 			printf("multicast address already filtered by port\n");
4712 			return;
4713 		}
4714 	}
4715 
4716 	mcast_addr_pool_append(port, mc_addr);
4717 	if (eth_port_multicast_addr_list_set(port_id) < 0)
4718 		/* Rollback on failure, remove the address from the pool */
4719 		mcast_addr_pool_remove(port, i);
4720 }
4721 
4722 void
4723 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr)
4724 {
4725 	struct rte_port *port;
4726 	uint32_t i;
4727 
4728 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4729 		return;
4730 
4731 	port = &ports[port_id];
4732 
4733 	/*
4734 	 * Search the pool of multicast MAC addresses for the removed address.
4735 	 */
4736 	for (i = 0; i < port->mc_addr_nb; i++) {
4737 		if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
4738 			break;
4739 	}
4740 	if (i == port->mc_addr_nb) {
4741 		printf("multicast address not filtered by port %d\n", port_id);
4742 		return;
4743 	}
4744 
4745 	mcast_addr_pool_remove(port, i);
4746 	if (eth_port_multicast_addr_list_set(port_id) < 0)
4747 		/* Rollback on failure, add the address back into the pool */
4748 		mcast_addr_pool_append(port, mc_addr);
4749 }
4750 
4751 void
4752 port_dcb_info_display(portid_t port_id)
4753 {
4754 	struct rte_eth_dcb_info dcb_info;
4755 	uint16_t i;
4756 	int ret;
4757 	static const char *border = "================";
4758 
4759 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4760 		return;
4761 
4762 	ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
4763 	if (ret) {
4764 		printf("\n Failed to get dcb infos on port %-2d\n",
4765 			port_id);
4766 		return;
4767 	}
4768 	printf("\n  %s DCB infos for port %-2d  %s\n", border, port_id, border);
4769 	printf("  TC NUMBER: %d\n", dcb_info.nb_tcs);
4770 	printf("\n  TC :        ");
4771 	for (i = 0; i < dcb_info.nb_tcs; i++)
4772 		printf("\t%4d", i);
4773 	printf("\n  Priority :  ");
4774 	for (i = 0; i < dcb_info.nb_tcs; i++)
4775 		printf("\t%4d", dcb_info.prio_tc[i]);
4776 	printf("\n  BW percent :");
4777 	for (i = 0; i < dcb_info.nb_tcs; i++)
4778 		printf("\t%4d%%", dcb_info.tc_bws[i]);
4779 	printf("\n  RXQ base :  ");
4780 	for (i = 0; i < dcb_info.nb_tcs; i++)
4781 		printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
4782 	printf("\n  RXQ number :");
4783 	for (i = 0; i < dcb_info.nb_tcs; i++)
4784 		printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
4785 	printf("\n  TXQ base :  ");
4786 	for (i = 0; i < dcb_info.nb_tcs; i++)
4787 		printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
4788 	printf("\n  TXQ number :");
4789 	for (i = 0; i < dcb_info.nb_tcs; i++)
4790 		printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
4791 	printf("\n");
4792 }
4793 
4794 uint8_t *
4795 open_file(const char *file_path, uint32_t *size)
4796 {
4797 	int fd = open(file_path, O_RDONLY);
4798 	off_t pkg_size;
4799 	uint8_t *buf = NULL;
4800 	int ret = 0;
4801 	struct stat st_buf;
4802 
4803 	if (size)
4804 		*size = 0;
4805 
4806 	if (fd == -1) {
4807 		printf("%s: Failed to open %s\n", __func__, file_path);
4808 		return buf;
4809 	}
4810 
4811 	if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
4812 		close(fd);
4813 		printf("%s: File operations failed\n", __func__);
4814 		return buf;
4815 	}
4816 
4817 	pkg_size = st_buf.st_size;
4818 	if (pkg_size < 0) {
4819 		close(fd);
4820 		printf("%s: File operations failed\n", __func__);
4821 		return buf;
4822 	}
4823 
4824 	buf = (uint8_t *)malloc(pkg_size);
4825 	if (!buf) {
4826 		close(fd);
4827 		printf("%s: Failed to malloc memory\n",	__func__);
4828 		return buf;
4829 	}
4830 
4831 	ret = read(fd, buf, pkg_size);
4832 	if (ret < 0) {
4833 		close(fd);
4834 		printf("%s: File read operation failed\n", __func__);
4835 		close_file(buf);
4836 		return NULL;
4837 	}
4838 
4839 	if (size)
4840 		*size = pkg_size;
4841 
4842 	close(fd);
4843 
4844 	return buf;
4845 }
4846 
4847 int
4848 save_file(const char *file_path, uint8_t *buf, uint32_t size)
4849 {
4850 	FILE *fh = fopen(file_path, "wb");
4851 
4852 	if (fh == NULL) {
4853 		printf("%s: Failed to open %s\n", __func__, file_path);
4854 		return -1;
4855 	}
4856 
4857 	if (fwrite(buf, 1, size, fh) != size) {
4858 		fclose(fh);
4859 		printf("%s: File write operation failed\n", __func__);
4860 		return -1;
4861 	}
4862 
4863 	fclose(fh);
4864 
4865 	return 0;
4866 }
4867 
4868 int
4869 close_file(uint8_t *buf)
4870 {
4871 	if (buf) {
4872 		free((void *)buf);
4873 		return 0;
4874 	}
4875 
4876 	return -1;
4877 }
4878 
4879 void
4880 port_queue_region_info_display(portid_t port_id, void *buf)
4881 {
4882 #ifdef RTE_NET_I40E
4883 	uint16_t i, j;
4884 	struct rte_pmd_i40e_queue_regions *info =
4885 		(struct rte_pmd_i40e_queue_regions *)buf;
4886 	static const char *queue_region_info_stats_border = "-------";
4887 
4888 	if (!info->queue_region_number)
4889 		printf("there is no region has been set before");
4890 
4891 	printf("\n	%s All queue region info for port=%2d %s",
4892 			queue_region_info_stats_border, port_id,
4893 			queue_region_info_stats_border);
4894 	printf("\n	queue_region_number: %-14u \n",
4895 			info->queue_region_number);
4896 
4897 	for (i = 0; i < info->queue_region_number; i++) {
4898 		printf("\n	region_id: %-14u queue_number: %-14u "
4899 			"queue_start_index: %-14u \n",
4900 			info->region[i].region_id,
4901 			info->region[i].queue_num,
4902 			info->region[i].queue_start_index);
4903 
4904 		printf("  user_priority_num is	%-14u :",
4905 					info->region[i].user_priority_num);
4906 		for (j = 0; j < info->region[i].user_priority_num; j++)
4907 			printf(" %-14u ", info->region[i].user_priority[j]);
4908 
4909 		printf("\n	flowtype_num is  %-14u :",
4910 				info->region[i].flowtype_num);
4911 		for (j = 0; j < info->region[i].flowtype_num; j++)
4912 			printf(" %-14u ", info->region[i].hw_flowtype[j]);
4913 	}
4914 #else
4915 	RTE_SET_USED(port_id);
4916 	RTE_SET_USED(buf);
4917 #endif
4918 
4919 	printf("\n\n");
4920 }
4921 
4922 void
4923 show_macs(portid_t port_id)
4924 {
4925 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
4926 	struct rte_eth_dev_info dev_info;
4927 	struct rte_ether_addr *addr;
4928 	uint32_t i, num_macs = 0;
4929 	struct rte_eth_dev *dev;
4930 
4931 	dev = &rte_eth_devices[port_id];
4932 
4933 	if (eth_dev_info_get_print_err(port_id, &dev_info))
4934 		return;
4935 
4936 	for (i = 0; i < dev_info.max_mac_addrs; i++) {
4937 		addr = &dev->data->mac_addrs[i];
4938 
4939 		/* skip zero address */
4940 		if (rte_is_zero_ether_addr(addr))
4941 			continue;
4942 
4943 		num_macs++;
4944 	}
4945 
4946 	printf("Number of MAC address added: %d\n", num_macs);
4947 
4948 	for (i = 0; i < dev_info.max_mac_addrs; i++) {
4949 		addr = &dev->data->mac_addrs[i];
4950 
4951 		/* skip zero address */
4952 		if (rte_is_zero_ether_addr(addr))
4953 			continue;
4954 
4955 		rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
4956 		printf("  %s\n", buf);
4957 	}
4958 }
4959 
4960 void
4961 show_mcast_macs(portid_t port_id)
4962 {
4963 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
4964 	struct rte_ether_addr *addr;
4965 	struct rte_port *port;
4966 	uint32_t i;
4967 
4968 	port = &ports[port_id];
4969 
4970 	printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb);
4971 
4972 	for (i = 0; i < port->mc_addr_nb; i++) {
4973 		addr = &port->mc_addr_pool[i];
4974 
4975 		rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
4976 		printf("  %s\n", buf);
4977 	}
4978 }
4979