xref: /dpdk/app/test-pmd/config.c (revision 41fc015197b9c135b81e565dba5f1e6da83545f4)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation.
3  * Copyright 2013-2014 6WIND S.A.
4  */
5 
6 #include <stdarg.h>
7 #include <errno.h>
8 #include <stdio.h>
9 #include <string.h>
10 #include <stdint.h>
11 #include <inttypes.h>
12 
13 #include <sys/queue.h>
14 #include <sys/types.h>
15 #include <sys/stat.h>
16 #include <fcntl.h>
17 #include <unistd.h>
18 
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
21 #include <rte_debug.h>
22 #include <rte_log.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_atomic.h>
31 #include <rte_branch_prediction.h>
32 #include <rte_mempool.h>
33 #include <rte_mbuf.h>
34 #include <rte_interrupts.h>
35 #include <rte_pci.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_string_fns.h>
39 #include <rte_cycles.h>
40 #include <rte_flow.h>
41 #include <rte_errno.h>
42 #ifdef RTE_LIBRTE_IXGBE_PMD
43 #include <rte_pmd_ixgbe.h>
44 #endif
45 #ifdef RTE_LIBRTE_I40E_PMD
46 #include <rte_pmd_i40e.h>
47 #endif
48 #ifdef RTE_LIBRTE_BNXT_PMD
49 #include <rte_pmd_bnxt.h>
50 #endif
51 #include <rte_gro.h>
52 #include <cmdline_parse_etheraddr.h>
53 
54 #include "testpmd.h"
55 
56 static char *flowtype_to_str(uint16_t flow_type);
57 
58 static const struct {
59 	enum tx_pkt_split split;
60 	const char *name;
61 } tx_split_name[] = {
62 	{
63 		.split = TX_PKT_SPLIT_OFF,
64 		.name = "off",
65 	},
66 	{
67 		.split = TX_PKT_SPLIT_ON,
68 		.name = "on",
69 	},
70 	{
71 		.split = TX_PKT_SPLIT_RND,
72 		.name = "rand",
73 	},
74 };
75 
76 struct rss_type_info {
77 	char str[32];
78 	uint64_t rss_type;
79 };
80 
81 static const struct rss_type_info rss_type_table[] = {
82 	{ "ipv4", ETH_RSS_IPV4 },
83 	{ "ipv4-frag", ETH_RSS_FRAG_IPV4 },
84 	{ "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
85 	{ "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
86 	{ "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
87 	{ "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
88 	{ "ipv6", ETH_RSS_IPV6 },
89 	{ "ipv6-frag", ETH_RSS_FRAG_IPV6 },
90 	{ "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
91 	{ "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
92 	{ "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
93 	{ "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
94 	{ "l2-payload", ETH_RSS_L2_PAYLOAD },
95 	{ "ipv6-ex", ETH_RSS_IPV6_EX },
96 	{ "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
97 	{ "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
98 	{ "port", ETH_RSS_PORT },
99 	{ "vxlan", ETH_RSS_VXLAN },
100 	{ "geneve", ETH_RSS_GENEVE },
101 	{ "nvgre", ETH_RSS_NVGRE },
102 
103 };
104 
105 static void
106 print_ethaddr(const char *name, struct ether_addr *eth_addr)
107 {
108 	char buf[ETHER_ADDR_FMT_SIZE];
109 	ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
110 	printf("%s%s", name, buf);
111 }
112 
113 void
114 nic_stats_display(portid_t port_id)
115 {
116 	static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
117 	static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
118 	static uint64_t prev_cycles[RTE_MAX_ETHPORTS];
119 	uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles;
120 	uint64_t mpps_rx, mpps_tx;
121 	struct rte_eth_stats stats;
122 	struct rte_port *port = &ports[port_id];
123 	uint8_t i;
124 	portid_t pid;
125 
126 	static const char *nic_stats_border = "########################";
127 
128 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
129 		printf("Valid port range is [0");
130 		RTE_ETH_FOREACH_DEV(pid)
131 			printf(", %d", pid);
132 		printf("]\n");
133 		return;
134 	}
135 	rte_eth_stats_get(port_id, &stats);
136 	printf("\n  %s NIC statistics for port %-2d %s\n",
137 	       nic_stats_border, port_id, nic_stats_border);
138 
139 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
140 		printf("  RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes:  "
141 		       "%-"PRIu64"\n",
142 		       stats.ipackets, stats.imissed, stats.ibytes);
143 		printf("  RX-errors: %-"PRIu64"\n", stats.ierrors);
144 		printf("  RX-nombuf:  %-10"PRIu64"\n",
145 		       stats.rx_nombuf);
146 		printf("  TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes:  "
147 		       "%-"PRIu64"\n",
148 		       stats.opackets, stats.oerrors, stats.obytes);
149 	}
150 	else {
151 		printf("  RX-packets:              %10"PRIu64"    RX-errors: %10"PRIu64
152 		       "    RX-bytes: %10"PRIu64"\n",
153 		       stats.ipackets, stats.ierrors, stats.ibytes);
154 		printf("  RX-errors:  %10"PRIu64"\n", stats.ierrors);
155 		printf("  RX-nombuf:               %10"PRIu64"\n",
156 		       stats.rx_nombuf);
157 		printf("  TX-packets:              %10"PRIu64"    TX-errors: %10"PRIu64
158 		       "    TX-bytes: %10"PRIu64"\n",
159 		       stats.opackets, stats.oerrors, stats.obytes);
160 	}
161 
162 	if (port->rx_queue_stats_mapping_enabled) {
163 		printf("\n");
164 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
165 			printf("  Stats reg %2d RX-packets: %10"PRIu64
166 			       "    RX-errors: %10"PRIu64
167 			       "    RX-bytes: %10"PRIu64"\n",
168 			       i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
169 		}
170 	}
171 	if (port->tx_queue_stats_mapping_enabled) {
172 		printf("\n");
173 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
174 			printf("  Stats reg %2d TX-packets: %10"PRIu64
175 			       "                             TX-bytes: %10"PRIu64"\n",
176 			       i, stats.q_opackets[i], stats.q_obytes[i]);
177 		}
178 	}
179 
180 	diff_cycles = prev_cycles[port_id];
181 	prev_cycles[port_id] = rte_rdtsc();
182 	if (diff_cycles > 0)
183 		diff_cycles = prev_cycles[port_id] - diff_cycles;
184 
185 	diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
186 		(stats.ipackets - prev_pkts_rx[port_id]) : 0;
187 	diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
188 		(stats.opackets - prev_pkts_tx[port_id]) : 0;
189 	prev_pkts_rx[port_id] = stats.ipackets;
190 	prev_pkts_tx[port_id] = stats.opackets;
191 	mpps_rx = diff_cycles > 0 ?
192 		diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0;
193 	mpps_tx = diff_cycles > 0 ?
194 		diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0;
195 	printf("\n  Throughput (since last show)\n");
196 	printf("  Rx-pps: %12"PRIu64"\n  Tx-pps: %12"PRIu64"\n",
197 			mpps_rx, mpps_tx);
198 
199 	printf("  %s############################%s\n",
200 	       nic_stats_border, nic_stats_border);
201 }
202 
203 void
204 nic_stats_clear(portid_t port_id)
205 {
206 	portid_t pid;
207 
208 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
209 		printf("Valid port range is [0");
210 		RTE_ETH_FOREACH_DEV(pid)
211 			printf(", %d", pid);
212 		printf("]\n");
213 		return;
214 	}
215 	rte_eth_stats_reset(port_id);
216 	printf("\n  NIC statistics for port %d cleared\n", port_id);
217 }
218 
219 void
220 nic_xstats_display(portid_t port_id)
221 {
222 	struct rte_eth_xstat *xstats;
223 	int cnt_xstats, idx_xstat;
224 	struct rte_eth_xstat_name *xstats_names;
225 
226 	printf("###### NIC extended statistics for port %-2d\n", port_id);
227 	if (!rte_eth_dev_is_valid_port(port_id)) {
228 		printf("Error: Invalid port number %i\n", port_id);
229 		return;
230 	}
231 
232 	/* Get count */
233 	cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
234 	if (cnt_xstats  < 0) {
235 		printf("Error: Cannot get count of xstats\n");
236 		return;
237 	}
238 
239 	/* Get id-name lookup table */
240 	xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
241 	if (xstats_names == NULL) {
242 		printf("Cannot allocate memory for xstats lookup\n");
243 		return;
244 	}
245 	if (cnt_xstats != rte_eth_xstats_get_names(
246 			port_id, xstats_names, cnt_xstats)) {
247 		printf("Error: Cannot get xstats lookup\n");
248 		free(xstats_names);
249 		return;
250 	}
251 
252 	/* Get stats themselves */
253 	xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
254 	if (xstats == NULL) {
255 		printf("Cannot allocate memory for xstats\n");
256 		free(xstats_names);
257 		return;
258 	}
259 	if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
260 		printf("Error: Unable to get xstats\n");
261 		free(xstats_names);
262 		free(xstats);
263 		return;
264 	}
265 
266 	/* Display xstats */
267 	for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
268 		if (xstats_hide_zero && !xstats[idx_xstat].value)
269 			continue;
270 		printf("%s: %"PRIu64"\n",
271 			xstats_names[idx_xstat].name,
272 			xstats[idx_xstat].value);
273 	}
274 	free(xstats_names);
275 	free(xstats);
276 }
277 
278 void
279 nic_xstats_clear(portid_t port_id)
280 {
281 	rte_eth_xstats_reset(port_id);
282 }
283 
284 void
285 nic_stats_mapping_display(portid_t port_id)
286 {
287 	struct rte_port *port = &ports[port_id];
288 	uint16_t i;
289 	portid_t pid;
290 
291 	static const char *nic_stats_mapping_border = "########################";
292 
293 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
294 		printf("Valid port range is [0");
295 		RTE_ETH_FOREACH_DEV(pid)
296 			printf(", %d", pid);
297 		printf("]\n");
298 		return;
299 	}
300 
301 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
302 		printf("Port id %d - either does not support queue statistic mapping or"
303 		       " no queue statistic mapping set\n", port_id);
304 		return;
305 	}
306 
307 	printf("\n  %s NIC statistics mapping for port %-2d %s\n",
308 	       nic_stats_mapping_border, port_id, nic_stats_mapping_border);
309 
310 	if (port->rx_queue_stats_mapping_enabled) {
311 		for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
312 			if (rx_queue_stats_mappings[i].port_id == port_id) {
313 				printf("  RX-queue %2d mapped to Stats Reg %2d\n",
314 				       rx_queue_stats_mappings[i].queue_id,
315 				       rx_queue_stats_mappings[i].stats_counter_id);
316 			}
317 		}
318 		printf("\n");
319 	}
320 
321 
322 	if (port->tx_queue_stats_mapping_enabled) {
323 		for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
324 			if (tx_queue_stats_mappings[i].port_id == port_id) {
325 				printf("  TX-queue %2d mapped to Stats Reg %2d\n",
326 				       tx_queue_stats_mappings[i].queue_id,
327 				       tx_queue_stats_mappings[i].stats_counter_id);
328 			}
329 		}
330 	}
331 
332 	printf("  %s####################################%s\n",
333 	       nic_stats_mapping_border, nic_stats_mapping_border);
334 }
335 
336 void
337 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
338 {
339 	struct rte_eth_rxq_info qinfo;
340 	int32_t rc;
341 	static const char *info_border = "*********************";
342 
343 	rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
344 	if (rc != 0) {
345 		printf("Failed to retrieve information for port: %u, "
346 			"RX queue: %hu\nerror desc: %s(%d)\n",
347 			port_id, queue_id, strerror(-rc), rc);
348 		return;
349 	}
350 
351 	printf("\n%s Infos for port %-2u, RX queue %-2u %s",
352 	       info_border, port_id, queue_id, info_border);
353 
354 	printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
355 	printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
356 	printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
357 	printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
358 	printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
359 	printf("\nRX drop packets: %s",
360 		(qinfo.conf.rx_drop_en != 0) ? "on" : "off");
361 	printf("\nRX deferred start: %s",
362 		(qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
363 	printf("\nRX scattered packets: %s",
364 		(qinfo.scattered_rx != 0) ? "on" : "off");
365 	printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
366 	printf("\n");
367 }
368 
369 void
370 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
371 {
372 	struct rte_eth_txq_info qinfo;
373 	int32_t rc;
374 	static const char *info_border = "*********************";
375 
376 	rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
377 	if (rc != 0) {
378 		printf("Failed to retrieve information for port: %u, "
379 			"TX queue: %hu\nerror desc: %s(%d)\n",
380 			port_id, queue_id, strerror(-rc), rc);
381 		return;
382 	}
383 
384 	printf("\n%s Infos for port %-2u, TX queue %-2u %s",
385 	       info_border, port_id, queue_id, info_border);
386 
387 	printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
388 	printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
389 	printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
390 	printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
391 	printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
392 	printf("\nTX deferred start: %s",
393 		(qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
394 	printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
395 	printf("\n");
396 }
397 
398 void
399 port_infos_display(portid_t port_id)
400 {
401 	struct rte_port *port;
402 	struct ether_addr mac_addr;
403 	struct rte_eth_link link;
404 	struct rte_eth_dev_info dev_info;
405 	int vlan_offload;
406 	struct rte_mempool * mp;
407 	static const char *info_border = "*********************";
408 	portid_t pid;
409 	uint16_t mtu;
410 
411 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
412 		printf("Valid port range is [0");
413 		RTE_ETH_FOREACH_DEV(pid)
414 			printf(", %d", pid);
415 		printf("]\n");
416 		return;
417 	}
418 	port = &ports[port_id];
419 	rte_eth_link_get_nowait(port_id, &link);
420 	memset(&dev_info, 0, sizeof(dev_info));
421 	rte_eth_dev_info_get(port_id, &dev_info);
422 	printf("\n%s Infos for port %-2d %s\n",
423 	       info_border, port_id, info_border);
424 	rte_eth_macaddr_get(port_id, &mac_addr);
425 	print_ethaddr("MAC address: ", &mac_addr);
426 	printf("\nDriver name: %s", dev_info.driver_name);
427 	printf("\nConnect to socket: %u", port->socket_id);
428 
429 	if (port_numa[port_id] != NUMA_NO_CONFIG) {
430 		mp = mbuf_pool_find(port_numa[port_id]);
431 		if (mp)
432 			printf("\nmemory allocation on the socket: %d",
433 							port_numa[port_id]);
434 	} else
435 		printf("\nmemory allocation on the socket: %u",port->socket_id);
436 
437 	printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
438 	printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
439 	printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
440 	       ("full-duplex") : ("half-duplex"));
441 
442 	if (!rte_eth_dev_get_mtu(port_id, &mtu))
443 		printf("MTU: %u\n", mtu);
444 
445 	printf("Promiscuous mode: %s\n",
446 	       rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
447 	printf("Allmulticast mode: %s\n",
448 	       rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
449 	printf("Maximum number of MAC addresses: %u\n",
450 	       (unsigned int)(port->dev_info.max_mac_addrs));
451 	printf("Maximum number of MAC addresses of hash filtering: %u\n",
452 	       (unsigned int)(port->dev_info.max_hash_mac_addrs));
453 
454 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
455 	if (vlan_offload >= 0){
456 		printf("VLAN offload: \n");
457 		if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
458 			printf("  strip on \n");
459 		else
460 			printf("  strip off \n");
461 
462 		if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
463 			printf("  filter on \n");
464 		else
465 			printf("  filter off \n");
466 
467 		if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
468 			printf("  qinq(extend) on \n");
469 		else
470 			printf("  qinq(extend) off \n");
471 	}
472 
473 	if (dev_info.hash_key_size > 0)
474 		printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
475 	if (dev_info.reta_size > 0)
476 		printf("Redirection table size: %u\n", dev_info.reta_size);
477 	if (!dev_info.flow_type_rss_offloads)
478 		printf("No flow type is supported.\n");
479 	else {
480 		uint16_t i;
481 		char *p;
482 
483 		printf("Supported flow types:\n");
484 		for (i = RTE_ETH_FLOW_UNKNOWN + 1;
485 		     i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
486 			if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
487 				continue;
488 			p = flowtype_to_str(i);
489 			if (p)
490 				printf("  %s\n", p);
491 			else
492 				printf("  user defined %d\n", i);
493 		}
494 	}
495 
496 	printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize);
497 	printf("Maximum configurable length of RX packet: %u\n",
498 		dev_info.max_rx_pktlen);
499 	if (dev_info.max_vfs)
500 		printf("Maximum number of VFs: %u\n", dev_info.max_vfs);
501 	if (dev_info.max_vmdq_pools)
502 		printf("Maximum number of VMDq pools: %u\n",
503 			dev_info.max_vmdq_pools);
504 
505 	printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues);
506 	printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
507 	printf("Max possible number of RXDs per queue: %hu\n",
508 		dev_info.rx_desc_lim.nb_max);
509 	printf("Min possible number of RXDs per queue: %hu\n",
510 		dev_info.rx_desc_lim.nb_min);
511 	printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
512 
513 	printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues);
514 	printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
515 	printf("Max possible number of TXDs per queue: %hu\n",
516 		dev_info.tx_desc_lim.nb_max);
517 	printf("Min possible number of TXDs per queue: %hu\n",
518 		dev_info.tx_desc_lim.nb_min);
519 	printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
520 }
521 
522 void
523 port_offload_cap_display(portid_t port_id)
524 {
525 	struct rte_eth_dev_info dev_info;
526 	static const char *info_border = "************";
527 
528 	if (port_id_is_invalid(port_id, ENABLED_WARN))
529 		return;
530 
531 	rte_eth_dev_info_get(port_id, &dev_info);
532 
533 	printf("\n%s Port %d supported offload features: %s\n",
534 		info_border, port_id, info_border);
535 
536 	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) {
537 		printf("VLAN stripped:                 ");
538 		if (ports[port_id].dev_conf.rxmode.offloads &
539 		    DEV_RX_OFFLOAD_VLAN_STRIP)
540 			printf("on\n");
541 		else
542 			printf("off\n");
543 	}
544 
545 	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) {
546 		printf("Double VLANs stripped:         ");
547 		if (ports[port_id].dev_conf.rxmode.offloads &
548 		    DEV_RX_OFFLOAD_VLAN_EXTEND)
549 			printf("on\n");
550 		else
551 			printf("off\n");
552 	}
553 
554 	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) {
555 		printf("RX IPv4 checksum:              ");
556 		if (ports[port_id].dev_conf.rxmode.offloads &
557 		    DEV_RX_OFFLOAD_IPV4_CKSUM)
558 			printf("on\n");
559 		else
560 			printf("off\n");
561 	}
562 
563 	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) {
564 		printf("RX UDP checksum:               ");
565 		if (ports[port_id].dev_conf.rxmode.offloads &
566 		    DEV_RX_OFFLOAD_UDP_CKSUM)
567 			printf("on\n");
568 		else
569 			printf("off\n");
570 	}
571 
572 	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) {
573 		printf("RX TCP checksum:               ");
574 		if (ports[port_id].dev_conf.rxmode.offloads &
575 		    DEV_RX_OFFLOAD_TCP_CKSUM)
576 			printf("on\n");
577 		else
578 			printf("off\n");
579 	}
580 
581 	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) {
582 		printf("RX Outer IPv4 checksum:               ");
583 		if (ports[port_id].dev_conf.rxmode.offloads &
584 		    DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)
585 			printf("on\n");
586 		else
587 			printf("off\n");
588 	}
589 
590 	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) {
591 		printf("Large receive offload:         ");
592 		if (ports[port_id].dev_conf.rxmode.offloads &
593 		    DEV_RX_OFFLOAD_TCP_LRO)
594 			printf("on\n");
595 		else
596 			printf("off\n");
597 	}
598 
599 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) {
600 		printf("VLAN insert:                   ");
601 		if (ports[port_id].dev_conf.txmode.offloads &
602 		    DEV_TX_OFFLOAD_VLAN_INSERT)
603 			printf("on\n");
604 		else
605 			printf("off\n");
606 	}
607 
608 	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) {
609 		printf("HW timestamp:                  ");
610 		if (ports[port_id].dev_conf.rxmode.offloads &
611 		    DEV_RX_OFFLOAD_TIMESTAMP)
612 			printf("on\n");
613 		else
614 			printf("off\n");
615 	}
616 
617 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) {
618 		printf("Double VLANs insert:           ");
619 		if (ports[port_id].dev_conf.txmode.offloads &
620 		    DEV_TX_OFFLOAD_QINQ_INSERT)
621 			printf("on\n");
622 		else
623 			printf("off\n");
624 	}
625 
626 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) {
627 		printf("TX IPv4 checksum:              ");
628 		if (ports[port_id].dev_conf.txmode.offloads &
629 		    DEV_TX_OFFLOAD_IPV4_CKSUM)
630 			printf("on\n");
631 		else
632 			printf("off\n");
633 	}
634 
635 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) {
636 		printf("TX UDP checksum:               ");
637 		if (ports[port_id].dev_conf.txmode.offloads &
638 		    DEV_TX_OFFLOAD_UDP_CKSUM)
639 			printf("on\n");
640 		else
641 			printf("off\n");
642 	}
643 
644 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) {
645 		printf("TX TCP checksum:               ");
646 		if (ports[port_id].dev_conf.txmode.offloads &
647 		    DEV_TX_OFFLOAD_TCP_CKSUM)
648 			printf("on\n");
649 		else
650 			printf("off\n");
651 	}
652 
653 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) {
654 		printf("TX SCTP checksum:              ");
655 		if (ports[port_id].dev_conf.txmode.offloads &
656 		    DEV_TX_OFFLOAD_SCTP_CKSUM)
657 			printf("on\n");
658 		else
659 			printf("off\n");
660 	}
661 
662 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
663 		printf("TX Outer IPv4 checksum:        ");
664 		if (ports[port_id].dev_conf.txmode.offloads &
665 		    DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
666 			printf("on\n");
667 		else
668 			printf("off\n");
669 	}
670 
671 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) {
672 		printf("TX TCP segmentation:           ");
673 		if (ports[port_id].dev_conf.txmode.offloads &
674 		    DEV_TX_OFFLOAD_TCP_TSO)
675 			printf("on\n");
676 		else
677 			printf("off\n");
678 	}
679 
680 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) {
681 		printf("TX UDP segmentation:           ");
682 		if (ports[port_id].dev_conf.txmode.offloads &
683 		    DEV_TX_OFFLOAD_UDP_TSO)
684 			printf("on\n");
685 		else
686 			printf("off\n");
687 	}
688 
689 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) {
690 		printf("TSO for VXLAN tunnel packet:   ");
691 		if (ports[port_id].dev_conf.txmode.offloads &
692 		    DEV_TX_OFFLOAD_VXLAN_TNL_TSO)
693 			printf("on\n");
694 		else
695 			printf("off\n");
696 	}
697 
698 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) {
699 		printf("TSO for GRE tunnel packet:     ");
700 		if (ports[port_id].dev_conf.txmode.offloads &
701 		    DEV_TX_OFFLOAD_GRE_TNL_TSO)
702 			printf("on\n");
703 		else
704 			printf("off\n");
705 	}
706 
707 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) {
708 		printf("TSO for IPIP tunnel packet:    ");
709 		if (ports[port_id].dev_conf.txmode.offloads &
710 		    DEV_TX_OFFLOAD_IPIP_TNL_TSO)
711 			printf("on\n");
712 		else
713 			printf("off\n");
714 	}
715 
716 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
717 		printf("TSO for GENEVE tunnel packet:  ");
718 		if (ports[port_id].dev_conf.txmode.offloads &
719 		    DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
720 			printf("on\n");
721 		else
722 			printf("off\n");
723 	}
724 
725 }
726 
727 int
728 port_id_is_invalid(portid_t port_id, enum print_warning warning)
729 {
730 	uint16_t pid;
731 
732 	if (port_id == (portid_t)RTE_PORT_ALL)
733 		return 0;
734 
735 	RTE_ETH_FOREACH_DEV(pid)
736 		if (port_id == pid)
737 			return 0;
738 
739 	if (warning == ENABLED_WARN)
740 		printf("Invalid port %d\n", port_id);
741 
742 	return 1;
743 }
744 
745 static int
746 vlan_id_is_invalid(uint16_t vlan_id)
747 {
748 	if (vlan_id < 4096)
749 		return 0;
750 	printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
751 	return 1;
752 }
753 
754 static int
755 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
756 {
757 	uint64_t pci_len;
758 
759 	if (reg_off & 0x3) {
760 		printf("Port register offset 0x%X not aligned on a 4-byte "
761 		       "boundary\n",
762 		       (unsigned)reg_off);
763 		return 1;
764 	}
765 	pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len;
766 	if (reg_off >= pci_len) {
767 		printf("Port %d: register offset %u (0x%X) out of port PCI "
768 		       "resource (length=%"PRIu64")\n",
769 		       port_id, (unsigned)reg_off, (unsigned)reg_off,  pci_len);
770 		return 1;
771 	}
772 	return 0;
773 }
774 
775 static int
776 reg_bit_pos_is_invalid(uint8_t bit_pos)
777 {
778 	if (bit_pos <= 31)
779 		return 0;
780 	printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
781 	return 1;
782 }
783 
784 #define display_port_and_reg_off(port_id, reg_off) \
785 	printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
786 
787 static inline void
788 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
789 {
790 	display_port_and_reg_off(port_id, (unsigned)reg_off);
791 	printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
792 }
793 
794 void
795 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
796 {
797 	uint32_t reg_v;
798 
799 
800 	if (port_id_is_invalid(port_id, ENABLED_WARN))
801 		return;
802 	if (port_reg_off_is_invalid(port_id, reg_off))
803 		return;
804 	if (reg_bit_pos_is_invalid(bit_x))
805 		return;
806 	reg_v = port_id_pci_reg_read(port_id, reg_off);
807 	display_port_and_reg_off(port_id, (unsigned)reg_off);
808 	printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
809 }
810 
811 void
812 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
813 			   uint8_t bit1_pos, uint8_t bit2_pos)
814 {
815 	uint32_t reg_v;
816 	uint8_t  l_bit;
817 	uint8_t  h_bit;
818 
819 	if (port_id_is_invalid(port_id, ENABLED_WARN))
820 		return;
821 	if (port_reg_off_is_invalid(port_id, reg_off))
822 		return;
823 	if (reg_bit_pos_is_invalid(bit1_pos))
824 		return;
825 	if (reg_bit_pos_is_invalid(bit2_pos))
826 		return;
827 	if (bit1_pos > bit2_pos)
828 		l_bit = bit2_pos, h_bit = bit1_pos;
829 	else
830 		l_bit = bit1_pos, h_bit = bit2_pos;
831 
832 	reg_v = port_id_pci_reg_read(port_id, reg_off);
833 	reg_v >>= l_bit;
834 	if (h_bit < 31)
835 		reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
836 	display_port_and_reg_off(port_id, (unsigned)reg_off);
837 	printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
838 	       ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
839 }
840 
841 void
842 port_reg_display(portid_t port_id, uint32_t reg_off)
843 {
844 	uint32_t reg_v;
845 
846 	if (port_id_is_invalid(port_id, ENABLED_WARN))
847 		return;
848 	if (port_reg_off_is_invalid(port_id, reg_off))
849 		return;
850 	reg_v = port_id_pci_reg_read(port_id, reg_off);
851 	display_port_reg_value(port_id, reg_off, reg_v);
852 }
853 
854 void
855 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
856 		 uint8_t bit_v)
857 {
858 	uint32_t reg_v;
859 
860 	if (port_id_is_invalid(port_id, ENABLED_WARN))
861 		return;
862 	if (port_reg_off_is_invalid(port_id, reg_off))
863 		return;
864 	if (reg_bit_pos_is_invalid(bit_pos))
865 		return;
866 	if (bit_v > 1) {
867 		printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
868 		return;
869 	}
870 	reg_v = port_id_pci_reg_read(port_id, reg_off);
871 	if (bit_v == 0)
872 		reg_v &= ~(1 << bit_pos);
873 	else
874 		reg_v |= (1 << bit_pos);
875 	port_id_pci_reg_write(port_id, reg_off, reg_v);
876 	display_port_reg_value(port_id, reg_off, reg_v);
877 }
878 
879 void
880 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
881 		       uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
882 {
883 	uint32_t max_v;
884 	uint32_t reg_v;
885 	uint8_t  l_bit;
886 	uint8_t  h_bit;
887 
888 	if (port_id_is_invalid(port_id, ENABLED_WARN))
889 		return;
890 	if (port_reg_off_is_invalid(port_id, reg_off))
891 		return;
892 	if (reg_bit_pos_is_invalid(bit1_pos))
893 		return;
894 	if (reg_bit_pos_is_invalid(bit2_pos))
895 		return;
896 	if (bit1_pos > bit2_pos)
897 		l_bit = bit2_pos, h_bit = bit1_pos;
898 	else
899 		l_bit = bit1_pos, h_bit = bit2_pos;
900 
901 	if ((h_bit - l_bit) < 31)
902 		max_v = (1 << (h_bit - l_bit + 1)) - 1;
903 	else
904 		max_v = 0xFFFFFFFF;
905 
906 	if (value > max_v) {
907 		printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
908 				(unsigned)value, (unsigned)value,
909 				(unsigned)max_v, (unsigned)max_v);
910 		return;
911 	}
912 	reg_v = port_id_pci_reg_read(port_id, reg_off);
913 	reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
914 	reg_v |= (value << l_bit); /* Set changed bits */
915 	port_id_pci_reg_write(port_id, reg_off, reg_v);
916 	display_port_reg_value(port_id, reg_off, reg_v);
917 }
918 
919 void
920 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
921 {
922 	if (port_id_is_invalid(port_id, ENABLED_WARN))
923 		return;
924 	if (port_reg_off_is_invalid(port_id, reg_off))
925 		return;
926 	port_id_pci_reg_write(port_id, reg_off, reg_v);
927 	display_port_reg_value(port_id, reg_off, reg_v);
928 }
929 
930 void
931 port_mtu_set(portid_t port_id, uint16_t mtu)
932 {
933 	int diag;
934 
935 	if (port_id_is_invalid(port_id, ENABLED_WARN))
936 		return;
937 	diag = rte_eth_dev_set_mtu(port_id, mtu);
938 	if (diag == 0)
939 		return;
940 	printf("Set MTU failed. diag=%d\n", diag);
941 }
942 
943 /* Generic flow management functions. */
944 
945 /** Generate flow_item[] entry. */
946 #define MK_FLOW_ITEM(t, s) \
947 	[RTE_FLOW_ITEM_TYPE_ ## t] = { \
948 		.name = # t, \
949 		.size = s, \
950 	}
951 
952 /** Information about known flow pattern items. */
953 static const struct {
954 	const char *name;
955 	size_t size;
956 } flow_item[] = {
957 	MK_FLOW_ITEM(END, 0),
958 	MK_FLOW_ITEM(VOID, 0),
959 	MK_FLOW_ITEM(INVERT, 0),
960 	MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
961 	MK_FLOW_ITEM(PF, 0),
962 	MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
963 	MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)),
964 	MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */
965 	MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
966 	MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
967 	MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
968 	MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
969 	MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
970 	MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
971 	MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
972 	MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
973 	MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
974 	MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
975 	MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
976 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
977 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
978 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
979 	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
980 	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
981 	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
982 	MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
983 };
984 
985 /** Compute storage space needed by item specification. */
986 static void
987 flow_item_spec_size(const struct rte_flow_item *item,
988 		    size_t *size, size_t *pad)
989 {
990 	if (!item->spec) {
991 		*size = 0;
992 		goto empty;
993 	}
994 	switch (item->type) {
995 		union {
996 			const struct rte_flow_item_raw *raw;
997 		} spec;
998 
999 	case RTE_FLOW_ITEM_TYPE_RAW:
1000 		spec.raw = item->spec;
1001 		*size = offsetof(struct rte_flow_item_raw, pattern) +
1002 			spec.raw->length * sizeof(*spec.raw->pattern);
1003 		break;
1004 	default:
1005 		*size = flow_item[item->type].size;
1006 		break;
1007 	}
1008 empty:
1009 	*pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
1010 }
1011 
1012 /** Generate flow_action[] entry. */
1013 #define MK_FLOW_ACTION(t, s) \
1014 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
1015 		.name = # t, \
1016 		.size = s, \
1017 	}
1018 
1019 /** Information about known flow actions. */
1020 static const struct {
1021 	const char *name;
1022 	size_t size;
1023 } flow_action[] = {
1024 	MK_FLOW_ACTION(END, 0),
1025 	MK_FLOW_ACTION(VOID, 0),
1026 	MK_FLOW_ACTION(PASSTHRU, 0),
1027 	MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1028 	MK_FLOW_ACTION(FLAG, 0),
1029 	MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
1030 	MK_FLOW_ACTION(DROP, 0),
1031 	MK_FLOW_ACTION(COUNT, 0),
1032 	MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
1033 	MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */
1034 	MK_FLOW_ACTION(PF, 0),
1035 	MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1036 	MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
1037 };
1038 
1039 /** Compute storage space needed by action configuration. */
1040 static void
1041 flow_action_conf_size(const struct rte_flow_action *action,
1042 		      size_t *size, size_t *pad)
1043 {
1044 	if (!action->conf) {
1045 		*size = 0;
1046 		goto empty;
1047 	}
1048 	switch (action->type) {
1049 		union {
1050 			const struct rte_flow_action_rss *rss;
1051 		} conf;
1052 
1053 	case RTE_FLOW_ACTION_TYPE_RSS:
1054 		conf.rss = action->conf;
1055 		*size = offsetof(struct rte_flow_action_rss, queue) +
1056 			conf.rss->num * sizeof(*conf.rss->queue);
1057 		break;
1058 	default:
1059 		*size = flow_action[action->type].size;
1060 		break;
1061 	}
1062 empty:
1063 	*pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
1064 }
1065 
1066 /** Generate a port_flow entry from attributes/pattern/actions. */
1067 static struct port_flow *
1068 port_flow_new(const struct rte_flow_attr *attr,
1069 	      const struct rte_flow_item *pattern,
1070 	      const struct rte_flow_action *actions)
1071 {
1072 	const struct rte_flow_item *item;
1073 	const struct rte_flow_action *action;
1074 	struct port_flow *pf = NULL;
1075 	size_t tmp;
1076 	size_t pad;
1077 	size_t off1 = 0;
1078 	size_t off2 = 0;
1079 	int err = ENOTSUP;
1080 
1081 store:
1082 	item = pattern;
1083 	if (pf)
1084 		pf->pattern = (void *)&pf->data[off1];
1085 	do {
1086 		struct rte_flow_item *dst = NULL;
1087 
1088 		if ((unsigned int)item->type >= RTE_DIM(flow_item) ||
1089 		    !flow_item[item->type].name)
1090 			goto notsup;
1091 		if (pf)
1092 			dst = memcpy(pf->data + off1, item, sizeof(*item));
1093 		off1 += sizeof(*item);
1094 		flow_item_spec_size(item, &tmp, &pad);
1095 		if (item->spec) {
1096 			if (pf)
1097 				dst->spec = memcpy(pf->data + off2,
1098 						   item->spec, tmp);
1099 			off2 += tmp + pad;
1100 		}
1101 		if (item->last) {
1102 			if (pf)
1103 				dst->last = memcpy(pf->data + off2,
1104 						   item->last, tmp);
1105 			off2 += tmp + pad;
1106 		}
1107 		if (item->mask) {
1108 			if (pf)
1109 				dst->mask = memcpy(pf->data + off2,
1110 						   item->mask, tmp);
1111 			off2 += tmp + pad;
1112 		}
1113 		off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
1114 	} while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
1115 	off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
1116 	action = actions;
1117 	if (pf)
1118 		pf->actions = (void *)&pf->data[off1];
1119 	do {
1120 		struct rte_flow_action *dst = NULL;
1121 
1122 		if ((unsigned int)action->type >= RTE_DIM(flow_action) ||
1123 		    !flow_action[action->type].name)
1124 			goto notsup;
1125 		if (pf)
1126 			dst = memcpy(pf->data + off1, action, sizeof(*action));
1127 		off1 += sizeof(*action);
1128 		flow_action_conf_size(action, &tmp, &pad);
1129 		if (action->conf) {
1130 			if (pf)
1131 				dst->conf = memcpy(pf->data + off2,
1132 						   action->conf, tmp);
1133 			off2 += tmp + pad;
1134 		}
1135 		off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
1136 	} while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
1137 	if (pf != NULL)
1138 		return pf;
1139 	off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
1140 	tmp = RTE_ALIGN_CEIL(offsetof(struct port_flow, data), sizeof(double));
1141 	pf = calloc(1, tmp + off1 + off2);
1142 	if (pf == NULL)
1143 		err = errno;
1144 	else {
1145 		*pf = (const struct port_flow){
1146 			.size = tmp + off1 + off2,
1147 			.attr = *attr,
1148 		};
1149 		tmp -= offsetof(struct port_flow, data);
1150 		off2 = tmp + off1;
1151 		off1 = tmp;
1152 		goto store;
1153 	}
1154 notsup:
1155 	rte_errno = err;
1156 	return NULL;
1157 }
1158 
1159 /** Print a message out of a flow error. */
1160 static int
1161 port_flow_complain(struct rte_flow_error *error)
1162 {
1163 	static const char *const errstrlist[] = {
1164 		[RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1165 		[RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1166 		[RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1167 		[RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1168 		[RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1169 		[RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1170 		[RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1171 		[RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1172 		[RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1173 		[RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1174 		[RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1175 		[RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1176 	};
1177 	const char *errstr;
1178 	char buf[32];
1179 	int err = rte_errno;
1180 
1181 	if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1182 	    !errstrlist[error->type])
1183 		errstr = "unknown type";
1184 	else
1185 		errstr = errstrlist[error->type];
1186 	printf("Caught error type %d (%s): %s%s\n",
1187 	       error->type, errstr,
1188 	       error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1189 					error->cause), buf) : "",
1190 	       error->message ? error->message : "(no stated reason)");
1191 	return -err;
1192 }
1193 
1194 /** Validate flow rule. */
1195 int
1196 port_flow_validate(portid_t port_id,
1197 		   const struct rte_flow_attr *attr,
1198 		   const struct rte_flow_item *pattern,
1199 		   const struct rte_flow_action *actions)
1200 {
1201 	struct rte_flow_error error;
1202 
1203 	/* Poisoning to make sure PMDs update it in case of error. */
1204 	memset(&error, 0x11, sizeof(error));
1205 	if (rte_flow_validate(port_id, attr, pattern, actions, &error))
1206 		return port_flow_complain(&error);
1207 	printf("Flow rule validated\n");
1208 	return 0;
1209 }
1210 
1211 /** Create flow rule. */
1212 int
1213 port_flow_create(portid_t port_id,
1214 		 const struct rte_flow_attr *attr,
1215 		 const struct rte_flow_item *pattern,
1216 		 const struct rte_flow_action *actions)
1217 {
1218 	struct rte_flow *flow;
1219 	struct rte_port *port;
1220 	struct port_flow *pf;
1221 	uint32_t id;
1222 	struct rte_flow_error error;
1223 
1224 	/* Poisoning to make sure PMDs update it in case of error. */
1225 	memset(&error, 0x22, sizeof(error));
1226 	flow = rte_flow_create(port_id, attr, pattern, actions, &error);
1227 	if (!flow)
1228 		return port_flow_complain(&error);
1229 	port = &ports[port_id];
1230 	if (port->flow_list) {
1231 		if (port->flow_list->id == UINT32_MAX) {
1232 			printf("Highest rule ID is already assigned, delete"
1233 			       " it first");
1234 			rte_flow_destroy(port_id, flow, NULL);
1235 			return -ENOMEM;
1236 		}
1237 		id = port->flow_list->id + 1;
1238 	} else
1239 		id = 0;
1240 	pf = port_flow_new(attr, pattern, actions);
1241 	if (!pf) {
1242 		int err = rte_errno;
1243 
1244 		printf("Cannot allocate flow: %s\n", rte_strerror(err));
1245 		rte_flow_destroy(port_id, flow, NULL);
1246 		return -err;
1247 	}
1248 	pf->next = port->flow_list;
1249 	pf->id = id;
1250 	pf->flow = flow;
1251 	port->flow_list = pf;
1252 	printf("Flow rule #%u created\n", pf->id);
1253 	return 0;
1254 }
1255 
1256 /** Destroy a number of flow rules. */
1257 int
1258 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
1259 {
1260 	struct rte_port *port;
1261 	struct port_flow **tmp;
1262 	uint32_t c = 0;
1263 	int ret = 0;
1264 
1265 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1266 	    port_id == (portid_t)RTE_PORT_ALL)
1267 		return -EINVAL;
1268 	port = &ports[port_id];
1269 	tmp = &port->flow_list;
1270 	while (*tmp) {
1271 		uint32_t i;
1272 
1273 		for (i = 0; i != n; ++i) {
1274 			struct rte_flow_error error;
1275 			struct port_flow *pf = *tmp;
1276 
1277 			if (rule[i] != pf->id)
1278 				continue;
1279 			/*
1280 			 * Poisoning to make sure PMDs update it in case
1281 			 * of error.
1282 			 */
1283 			memset(&error, 0x33, sizeof(error));
1284 			if (rte_flow_destroy(port_id, pf->flow, &error)) {
1285 				ret = port_flow_complain(&error);
1286 				continue;
1287 			}
1288 			printf("Flow rule #%u destroyed\n", pf->id);
1289 			*tmp = pf->next;
1290 			free(pf);
1291 			break;
1292 		}
1293 		if (i == n)
1294 			tmp = &(*tmp)->next;
1295 		++c;
1296 	}
1297 	return ret;
1298 }
1299 
1300 /** Remove all flow rules. */
1301 int
1302 port_flow_flush(portid_t port_id)
1303 {
1304 	struct rte_flow_error error;
1305 	struct rte_port *port;
1306 	int ret = 0;
1307 
1308 	/* Poisoning to make sure PMDs update it in case of error. */
1309 	memset(&error, 0x44, sizeof(error));
1310 	if (rte_flow_flush(port_id, &error)) {
1311 		ret = port_flow_complain(&error);
1312 		if (port_id_is_invalid(port_id, DISABLED_WARN) ||
1313 		    port_id == (portid_t)RTE_PORT_ALL)
1314 			return ret;
1315 	}
1316 	port = &ports[port_id];
1317 	while (port->flow_list) {
1318 		struct port_flow *pf = port->flow_list->next;
1319 
1320 		free(port->flow_list);
1321 		port->flow_list = pf;
1322 	}
1323 	return ret;
1324 }
1325 
1326 /** Query a flow rule. */
1327 int
1328 port_flow_query(portid_t port_id, uint32_t rule,
1329 		enum rte_flow_action_type action)
1330 {
1331 	struct rte_flow_error error;
1332 	struct rte_port *port;
1333 	struct port_flow *pf;
1334 	const char *name;
1335 	union {
1336 		struct rte_flow_query_count count;
1337 	} query;
1338 
1339 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1340 	    port_id == (portid_t)RTE_PORT_ALL)
1341 		return -EINVAL;
1342 	port = &ports[port_id];
1343 	for (pf = port->flow_list; pf; pf = pf->next)
1344 		if (pf->id == rule)
1345 			break;
1346 	if (!pf) {
1347 		printf("Flow rule #%u not found\n", rule);
1348 		return -ENOENT;
1349 	}
1350 	if ((unsigned int)action >= RTE_DIM(flow_action) ||
1351 	    !flow_action[action].name)
1352 		name = "unknown";
1353 	else
1354 		name = flow_action[action].name;
1355 	switch (action) {
1356 	case RTE_FLOW_ACTION_TYPE_COUNT:
1357 		break;
1358 	default:
1359 		printf("Cannot query action type %d (%s)\n", action, name);
1360 		return -ENOTSUP;
1361 	}
1362 	/* Poisoning to make sure PMDs update it in case of error. */
1363 	memset(&error, 0x55, sizeof(error));
1364 	memset(&query, 0, sizeof(query));
1365 	if (rte_flow_query(port_id, pf->flow, action, &query, &error))
1366 		return port_flow_complain(&error);
1367 	switch (action) {
1368 	case RTE_FLOW_ACTION_TYPE_COUNT:
1369 		printf("%s:\n"
1370 		       " hits_set: %u\n"
1371 		       " bytes_set: %u\n"
1372 		       " hits: %" PRIu64 "\n"
1373 		       " bytes: %" PRIu64 "\n",
1374 		       name,
1375 		       query.count.hits_set,
1376 		       query.count.bytes_set,
1377 		       query.count.hits,
1378 		       query.count.bytes);
1379 		break;
1380 	default:
1381 		printf("Cannot display result for action type %d (%s)\n",
1382 		       action, name);
1383 		break;
1384 	}
1385 	return 0;
1386 }
1387 
1388 /** List flow rules. */
1389 void
1390 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n])
1391 {
1392 	struct rte_port *port;
1393 	struct port_flow *pf;
1394 	struct port_flow *list = NULL;
1395 	uint32_t i;
1396 
1397 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1398 	    port_id == (portid_t)RTE_PORT_ALL)
1399 		return;
1400 	port = &ports[port_id];
1401 	if (!port->flow_list)
1402 		return;
1403 	/* Sort flows by group, priority and ID. */
1404 	for (pf = port->flow_list; pf != NULL; pf = pf->next) {
1405 		struct port_flow **tmp;
1406 
1407 		if (n) {
1408 			/* Filter out unwanted groups. */
1409 			for (i = 0; i != n; ++i)
1410 				if (pf->attr.group == group[i])
1411 					break;
1412 			if (i == n)
1413 				continue;
1414 		}
1415 		tmp = &list;
1416 		while (*tmp &&
1417 		       (pf->attr.group > (*tmp)->attr.group ||
1418 			(pf->attr.group == (*tmp)->attr.group &&
1419 			 pf->attr.priority > (*tmp)->attr.priority) ||
1420 			(pf->attr.group == (*tmp)->attr.group &&
1421 			 pf->attr.priority == (*tmp)->attr.priority &&
1422 			 pf->id > (*tmp)->id)))
1423 			tmp = &(*tmp)->tmp;
1424 		pf->tmp = *tmp;
1425 		*tmp = pf;
1426 	}
1427 	printf("ID\tGroup\tPrio\tAttr\tRule\n");
1428 	for (pf = list; pf != NULL; pf = pf->tmp) {
1429 		const struct rte_flow_item *item = pf->pattern;
1430 		const struct rte_flow_action *action = pf->actions;
1431 
1432 		printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c\t",
1433 		       pf->id,
1434 		       pf->attr.group,
1435 		       pf->attr.priority,
1436 		       pf->attr.ingress ? 'i' : '-',
1437 		       pf->attr.egress ? 'e' : '-');
1438 		while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1439 			if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
1440 				printf("%s ", flow_item[item->type].name);
1441 			++item;
1442 		}
1443 		printf("=>");
1444 		while (action->type != RTE_FLOW_ACTION_TYPE_END) {
1445 			if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
1446 				printf(" %s", flow_action[action->type].name);
1447 			++action;
1448 		}
1449 		printf("\n");
1450 	}
1451 }
1452 
1453 /** Restrict ingress traffic to the defined flow rules. */
1454 int
1455 port_flow_isolate(portid_t port_id, int set)
1456 {
1457 	struct rte_flow_error error;
1458 
1459 	/* Poisoning to make sure PMDs update it in case of error. */
1460 	memset(&error, 0x66, sizeof(error));
1461 	if (rte_flow_isolate(port_id, set, &error))
1462 		return port_flow_complain(&error);
1463 	printf("Ingress traffic on port %u is %s to the defined flow rules\n",
1464 	       port_id,
1465 	       set ? "now restricted" : "not restricted anymore");
1466 	return 0;
1467 }
1468 
1469 /*
1470  * RX/TX ring descriptors display functions.
1471  */
1472 int
1473 rx_queue_id_is_invalid(queueid_t rxq_id)
1474 {
1475 	if (rxq_id < nb_rxq)
1476 		return 0;
1477 	printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
1478 	return 1;
1479 }
1480 
1481 int
1482 tx_queue_id_is_invalid(queueid_t txq_id)
1483 {
1484 	if (txq_id < nb_txq)
1485 		return 0;
1486 	printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
1487 	return 1;
1488 }
1489 
1490 static int
1491 rx_desc_id_is_invalid(uint16_t rxdesc_id)
1492 {
1493 	if (rxdesc_id < nb_rxd)
1494 		return 0;
1495 	printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
1496 	       rxdesc_id, nb_rxd);
1497 	return 1;
1498 }
1499 
1500 static int
1501 tx_desc_id_is_invalid(uint16_t txdesc_id)
1502 {
1503 	if (txdesc_id < nb_txd)
1504 		return 0;
1505 	printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
1506 	       txdesc_id, nb_txd);
1507 	return 1;
1508 }
1509 
1510 static const struct rte_memzone *
1511 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
1512 {
1513 	char mz_name[RTE_MEMZONE_NAMESIZE];
1514 	const struct rte_memzone *mz;
1515 
1516 	snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d",
1517 		 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id);
1518 	mz = rte_memzone_lookup(mz_name);
1519 	if (mz == NULL)
1520 		printf("%s ring memory zoneof (port %d, queue %d) not"
1521 		       "found (zone name = %s\n",
1522 		       ring_name, port_id, q_id, mz_name);
1523 	return mz;
1524 }
1525 
1526 union igb_ring_dword {
1527 	uint64_t dword;
1528 	struct {
1529 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1530 		uint32_t lo;
1531 		uint32_t hi;
1532 #else
1533 		uint32_t hi;
1534 		uint32_t lo;
1535 #endif
1536 	} words;
1537 };
1538 
1539 struct igb_ring_desc_32_bytes {
1540 	union igb_ring_dword lo_dword;
1541 	union igb_ring_dword hi_dword;
1542 	union igb_ring_dword resv1;
1543 	union igb_ring_dword resv2;
1544 };
1545 
1546 struct igb_ring_desc_16_bytes {
1547 	union igb_ring_dword lo_dword;
1548 	union igb_ring_dword hi_dword;
1549 };
1550 
1551 static void
1552 ring_rxd_display_dword(union igb_ring_dword dword)
1553 {
1554 	printf("    0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
1555 					(unsigned)dword.words.hi);
1556 }
1557 
1558 static void
1559 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
1560 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1561 			   portid_t port_id,
1562 #else
1563 			   __rte_unused portid_t port_id,
1564 #endif
1565 			   uint16_t desc_id)
1566 {
1567 	struct igb_ring_desc_16_bytes *ring =
1568 		(struct igb_ring_desc_16_bytes *)ring_mz->addr;
1569 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1570 	struct rte_eth_dev_info dev_info;
1571 
1572 	memset(&dev_info, 0, sizeof(dev_info));
1573 	rte_eth_dev_info_get(port_id, &dev_info);
1574 	if (strstr(dev_info.driver_name, "i40e") != NULL) {
1575 		/* 32 bytes RX descriptor, i40e only */
1576 		struct igb_ring_desc_32_bytes *ring =
1577 			(struct igb_ring_desc_32_bytes *)ring_mz->addr;
1578 		ring[desc_id].lo_dword.dword =
1579 			rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1580 		ring_rxd_display_dword(ring[desc_id].lo_dword);
1581 		ring[desc_id].hi_dword.dword =
1582 			rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1583 		ring_rxd_display_dword(ring[desc_id].hi_dword);
1584 		ring[desc_id].resv1.dword =
1585 			rte_le_to_cpu_64(ring[desc_id].resv1.dword);
1586 		ring_rxd_display_dword(ring[desc_id].resv1);
1587 		ring[desc_id].resv2.dword =
1588 			rte_le_to_cpu_64(ring[desc_id].resv2.dword);
1589 		ring_rxd_display_dword(ring[desc_id].resv2);
1590 
1591 		return;
1592 	}
1593 #endif
1594 	/* 16 bytes RX descriptor */
1595 	ring[desc_id].lo_dword.dword =
1596 		rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1597 	ring_rxd_display_dword(ring[desc_id].lo_dword);
1598 	ring[desc_id].hi_dword.dword =
1599 		rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1600 	ring_rxd_display_dword(ring[desc_id].hi_dword);
1601 }
1602 
1603 static void
1604 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
1605 {
1606 	struct igb_ring_desc_16_bytes *ring;
1607 	struct igb_ring_desc_16_bytes txd;
1608 
1609 	ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1610 	txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1611 	txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1612 	printf("    0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
1613 			(unsigned)txd.lo_dword.words.lo,
1614 			(unsigned)txd.lo_dword.words.hi,
1615 			(unsigned)txd.hi_dword.words.lo,
1616 			(unsigned)txd.hi_dword.words.hi);
1617 }
1618 
1619 void
1620 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
1621 {
1622 	const struct rte_memzone *rx_mz;
1623 
1624 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1625 		return;
1626 	if (rx_queue_id_is_invalid(rxq_id))
1627 		return;
1628 	if (rx_desc_id_is_invalid(rxd_id))
1629 		return;
1630 	rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
1631 	if (rx_mz == NULL)
1632 		return;
1633 	ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
1634 }
1635 
1636 void
1637 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
1638 {
1639 	const struct rte_memzone *tx_mz;
1640 
1641 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1642 		return;
1643 	if (tx_queue_id_is_invalid(txq_id))
1644 		return;
1645 	if (tx_desc_id_is_invalid(txd_id))
1646 		return;
1647 	tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
1648 	if (tx_mz == NULL)
1649 		return;
1650 	ring_tx_descriptor_display(tx_mz, txd_id);
1651 }
1652 
1653 void
1654 fwd_lcores_config_display(void)
1655 {
1656 	lcoreid_t lc_id;
1657 
1658 	printf("List of forwarding lcores:");
1659 	for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
1660 		printf(" %2u", fwd_lcores_cpuids[lc_id]);
1661 	printf("\n");
1662 }
1663 void
1664 rxtx_config_display(void)
1665 {
1666 	portid_t pid;
1667 
1668 	printf("  %s packet forwarding%s packets/burst=%d\n",
1669 	       cur_fwd_eng->fwd_mode_name,
1670 	       retry_enabled == 0 ? "" : " with retry",
1671 	       nb_pkt_per_burst);
1672 
1673 	if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
1674 		printf("  packet len=%u - nb packet segments=%d\n",
1675 				(unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
1676 
1677 	printf("  nb forwarding cores=%d - nb forwarding ports=%d\n",
1678 	       nb_fwd_lcores, nb_fwd_ports);
1679 
1680 	RTE_ETH_FOREACH_DEV(pid) {
1681 		struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf;
1682 		struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf;
1683 
1684 		printf("  port %d:\n", (unsigned int)pid);
1685 		printf("  CRC stripping %s\n",
1686 				(ports[pid].dev_conf.rxmode.offloads &
1687 				 DEV_RX_OFFLOAD_CRC_STRIP) ?
1688 				"enabled" : "disabled");
1689 		printf("  RX queues=%d - RX desc=%d - RX free threshold=%d\n",
1690 				nb_rxq, nb_rxd, rx_conf->rx_free_thresh);
1691 		printf("  RX threshold registers: pthresh=%d hthresh=%d "
1692 		       " wthresh=%d\n",
1693 				rx_conf->rx_thresh.pthresh,
1694 				rx_conf->rx_thresh.hthresh,
1695 				rx_conf->rx_thresh.wthresh);
1696 		printf("  TX queues=%d - TX desc=%d - TX free threshold=%d\n",
1697 				nb_txq, nb_txd, tx_conf->tx_free_thresh);
1698 		printf("  TX threshold registers: pthresh=%d hthresh=%d "
1699 		       " wthresh=%d\n",
1700 				tx_conf->tx_thresh.pthresh,
1701 				tx_conf->tx_thresh.hthresh,
1702 				tx_conf->tx_thresh.wthresh);
1703 		printf("  TX RS bit threshold=%d - TXQ offloads=0x%"PRIx64"\n",
1704 				tx_conf->tx_rs_thresh, tx_conf->offloads);
1705 	}
1706 }
1707 
1708 void
1709 port_rss_reta_info(portid_t port_id,
1710 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1711 		   uint16_t nb_entries)
1712 {
1713 	uint16_t i, idx, shift;
1714 	int ret;
1715 
1716 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1717 		return;
1718 
1719 	ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
1720 	if (ret != 0) {
1721 		printf("Failed to get RSS RETA info, return code = %d\n", ret);
1722 		return;
1723 	}
1724 
1725 	for (i = 0; i < nb_entries; i++) {
1726 		idx = i / RTE_RETA_GROUP_SIZE;
1727 		shift = i % RTE_RETA_GROUP_SIZE;
1728 		if (!(reta_conf[idx].mask & (1ULL << shift)))
1729 			continue;
1730 		printf("RSS RETA configuration: hash index=%u, queue=%u\n",
1731 					i, reta_conf[idx].reta[shift]);
1732 	}
1733 }
1734 
1735 /*
1736  * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
1737  * key of the port.
1738  */
1739 void
1740 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key)
1741 {
1742 	struct rte_eth_rss_conf rss_conf;
1743 	uint8_t rss_key[RSS_HASH_KEY_LENGTH];
1744 	uint64_t rss_hf;
1745 	uint8_t i;
1746 	int diag;
1747 	struct rte_eth_dev_info dev_info;
1748 	uint8_t hash_key_size;
1749 
1750 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1751 		return;
1752 
1753 	memset(&dev_info, 0, sizeof(dev_info));
1754 	rte_eth_dev_info_get(port_id, &dev_info);
1755 	if (dev_info.hash_key_size > 0 &&
1756 			dev_info.hash_key_size <= sizeof(rss_key))
1757 		hash_key_size = dev_info.hash_key_size;
1758 	else {
1759 		printf("dev_info did not provide a valid hash key size\n");
1760 		return;
1761 	}
1762 
1763 	rss_conf.rss_hf = 0;
1764 	for (i = 0; i < RTE_DIM(rss_type_table); i++) {
1765 		if (!strcmp(rss_info, rss_type_table[i].str))
1766 			rss_conf.rss_hf = rss_type_table[i].rss_type;
1767 	}
1768 
1769 	/* Get RSS hash key if asked to display it */
1770 	rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
1771 	rss_conf.rss_key_len = hash_key_size;
1772 	diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1773 	if (diag != 0) {
1774 		switch (diag) {
1775 		case -ENODEV:
1776 			printf("port index %d invalid\n", port_id);
1777 			break;
1778 		case -ENOTSUP:
1779 			printf("operation not supported by device\n");
1780 			break;
1781 		default:
1782 			printf("operation failed - diag=%d\n", diag);
1783 			break;
1784 		}
1785 		return;
1786 	}
1787 	rss_hf = rss_conf.rss_hf;
1788 	if (rss_hf == 0) {
1789 		printf("RSS disabled\n");
1790 		return;
1791 	}
1792 	printf("RSS functions:\n ");
1793 	for (i = 0; i < RTE_DIM(rss_type_table); i++) {
1794 		if (rss_hf & rss_type_table[i].rss_type)
1795 			printf("%s ", rss_type_table[i].str);
1796 	}
1797 	printf("\n");
1798 	if (!show_rss_key)
1799 		return;
1800 	printf("RSS key:\n");
1801 	for (i = 0; i < hash_key_size; i++)
1802 		printf("%02X", rss_key[i]);
1803 	printf("\n");
1804 }
1805 
1806 void
1807 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
1808 			 uint hash_key_len)
1809 {
1810 	struct rte_eth_rss_conf rss_conf;
1811 	int diag;
1812 	unsigned int i;
1813 
1814 	rss_conf.rss_key = NULL;
1815 	rss_conf.rss_key_len = hash_key_len;
1816 	rss_conf.rss_hf = 0;
1817 	for (i = 0; i < RTE_DIM(rss_type_table); i++) {
1818 		if (!strcmp(rss_type_table[i].str, rss_type))
1819 			rss_conf.rss_hf = rss_type_table[i].rss_type;
1820 	}
1821 	diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1822 	if (diag == 0) {
1823 		rss_conf.rss_key = hash_key;
1824 		diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
1825 	}
1826 	if (diag == 0)
1827 		return;
1828 
1829 	switch (diag) {
1830 	case -ENODEV:
1831 		printf("port index %d invalid\n", port_id);
1832 		break;
1833 	case -ENOTSUP:
1834 		printf("operation not supported by device\n");
1835 		break;
1836 	default:
1837 		printf("operation failed - diag=%d\n", diag);
1838 		break;
1839 	}
1840 }
1841 
1842 /*
1843  * Setup forwarding configuration for each logical core.
1844  */
1845 static void
1846 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
1847 {
1848 	streamid_t nb_fs_per_lcore;
1849 	streamid_t nb_fs;
1850 	streamid_t sm_id;
1851 	lcoreid_t  nb_extra;
1852 	lcoreid_t  nb_fc;
1853 	lcoreid_t  nb_lc;
1854 	lcoreid_t  lc_id;
1855 
1856 	nb_fs = cfg->nb_fwd_streams;
1857 	nb_fc = cfg->nb_fwd_lcores;
1858 	if (nb_fs <= nb_fc) {
1859 		nb_fs_per_lcore = 1;
1860 		nb_extra = 0;
1861 	} else {
1862 		nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
1863 		nb_extra = (lcoreid_t) (nb_fs % nb_fc);
1864 	}
1865 
1866 	nb_lc = (lcoreid_t) (nb_fc - nb_extra);
1867 	sm_id = 0;
1868 	for (lc_id = 0; lc_id < nb_lc; lc_id++) {
1869 		fwd_lcores[lc_id]->stream_idx = sm_id;
1870 		fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
1871 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
1872 	}
1873 
1874 	/*
1875 	 * Assign extra remaining streams, if any.
1876 	 */
1877 	nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
1878 	for (lc_id = 0; lc_id < nb_extra; lc_id++) {
1879 		fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
1880 		fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
1881 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
1882 	}
1883 }
1884 
1885 static portid_t
1886 fwd_topology_tx_port_get(portid_t rxp)
1887 {
1888 	static int warning_once = 1;
1889 
1890 	RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
1891 
1892 	switch (port_topology) {
1893 	default:
1894 	case PORT_TOPOLOGY_PAIRED:
1895 		if ((rxp & 0x1) == 0) {
1896 			if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
1897 				return rxp + 1;
1898 			if (warning_once) {
1899 				printf("\nWarning! port-topology=paired"
1900 				       " and odd forward ports number,"
1901 				       " the last port will pair with"
1902 				       " itself.\n\n");
1903 				warning_once = 0;
1904 			}
1905 			return rxp;
1906 		}
1907 		return rxp - 1;
1908 	case PORT_TOPOLOGY_CHAINED:
1909 		return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
1910 	case PORT_TOPOLOGY_LOOP:
1911 		return rxp;
1912 	}
1913 }
1914 
1915 static void
1916 simple_fwd_config_setup(void)
1917 {
1918 	portid_t i;
1919 
1920 	cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
1921 	cur_fwd_config.nb_fwd_streams =
1922 		(streamid_t) cur_fwd_config.nb_fwd_ports;
1923 
1924 	/* reinitialize forwarding streams */
1925 	init_fwd_streams();
1926 
1927 	/*
1928 	 * In the simple forwarding test, the number of forwarding cores
1929 	 * must be lower or equal to the number of forwarding ports.
1930 	 */
1931 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1932 	if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
1933 		cur_fwd_config.nb_fwd_lcores =
1934 			(lcoreid_t) cur_fwd_config.nb_fwd_ports;
1935 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
1936 
1937 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1938 		fwd_streams[i]->rx_port   = fwd_ports_ids[i];
1939 		fwd_streams[i]->rx_queue  = 0;
1940 		fwd_streams[i]->tx_port   =
1941 				fwd_ports_ids[fwd_topology_tx_port_get(i)];
1942 		fwd_streams[i]->tx_queue  = 0;
1943 		fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
1944 		fwd_streams[i]->retry_enabled = retry_enabled;
1945 	}
1946 }
1947 
1948 /**
1949  * For the RSS forwarding test all streams distributed over lcores. Each stream
1950  * being composed of a RX queue to poll on a RX port for input messages,
1951  * associated with a TX queue of a TX port where to send forwarded packets.
1952  */
1953 static void
1954 rss_fwd_config_setup(void)
1955 {
1956 	portid_t   rxp;
1957 	portid_t   txp;
1958 	queueid_t  rxq;
1959 	queueid_t  nb_q;
1960 	streamid_t  sm_id;
1961 
1962 	nb_q = nb_rxq;
1963 	if (nb_q > nb_txq)
1964 		nb_q = nb_txq;
1965 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1966 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1967 	cur_fwd_config.nb_fwd_streams =
1968 		(streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
1969 
1970 	if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
1971 		cur_fwd_config.nb_fwd_lcores =
1972 			(lcoreid_t)cur_fwd_config.nb_fwd_streams;
1973 
1974 	/* reinitialize forwarding streams */
1975 	init_fwd_streams();
1976 
1977 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
1978 	rxp = 0; rxq = 0;
1979 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1980 		struct fwd_stream *fs;
1981 
1982 		fs = fwd_streams[sm_id];
1983 		txp = fwd_topology_tx_port_get(rxp);
1984 		fs->rx_port = fwd_ports_ids[rxp];
1985 		fs->rx_queue = rxq;
1986 		fs->tx_port = fwd_ports_ids[txp];
1987 		fs->tx_queue = rxq;
1988 		fs->peer_addr = fs->tx_port;
1989 		fs->retry_enabled = retry_enabled;
1990 		rxq = (queueid_t) (rxq + 1);
1991 		if (rxq < nb_q)
1992 			continue;
1993 		/*
1994 		 * rxq == nb_q
1995 		 * Restart from RX queue 0 on next RX port
1996 		 */
1997 		rxq = 0;
1998 		rxp++;
1999 	}
2000 }
2001 
2002 /**
2003  * For the DCB forwarding test, each core is assigned on each traffic class.
2004  *
2005  * Each core is assigned a multi-stream, each stream being composed of
2006  * a RX queue to poll on a RX port for input messages, associated with
2007  * a TX queue of a TX port where to send forwarded packets. All RX and
2008  * TX queues are mapping to the same traffic class.
2009  * If VMDQ and DCB co-exist, each traffic class on different POOLs share
2010  * the same core
2011  */
2012 static void
2013 dcb_fwd_config_setup(void)
2014 {
2015 	struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
2016 	portid_t txp, rxp = 0;
2017 	queueid_t txq, rxq = 0;
2018 	lcoreid_t  lc_id;
2019 	uint16_t nb_rx_queue, nb_tx_queue;
2020 	uint16_t i, j, k, sm_id = 0;
2021 	uint8_t tc = 0;
2022 
2023 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2024 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2025 	cur_fwd_config.nb_fwd_streams =
2026 		(streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2027 
2028 	/* reinitialize forwarding streams */
2029 	init_fwd_streams();
2030 	sm_id = 0;
2031 	txp = 1;
2032 	/* get the dcb info on the first RX and TX ports */
2033 	(void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2034 	(void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2035 
2036 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2037 		fwd_lcores[lc_id]->stream_nb = 0;
2038 		fwd_lcores[lc_id]->stream_idx = sm_id;
2039 		for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
2040 			/* if the nb_queue is zero, means this tc is
2041 			 * not enabled on the POOL
2042 			 */
2043 			if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
2044 				break;
2045 			k = fwd_lcores[lc_id]->stream_nb +
2046 				fwd_lcores[lc_id]->stream_idx;
2047 			rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
2048 			txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
2049 			nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2050 			nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
2051 			for (j = 0; j < nb_rx_queue; j++) {
2052 				struct fwd_stream *fs;
2053 
2054 				fs = fwd_streams[k + j];
2055 				fs->rx_port = fwd_ports_ids[rxp];
2056 				fs->rx_queue = rxq + j;
2057 				fs->tx_port = fwd_ports_ids[txp];
2058 				fs->tx_queue = txq + j % nb_tx_queue;
2059 				fs->peer_addr = fs->tx_port;
2060 				fs->retry_enabled = retry_enabled;
2061 			}
2062 			fwd_lcores[lc_id]->stream_nb +=
2063 				rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2064 		}
2065 		sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
2066 
2067 		tc++;
2068 		if (tc < rxp_dcb_info.nb_tcs)
2069 			continue;
2070 		/* Restart from TC 0 on next RX port */
2071 		tc = 0;
2072 		if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
2073 			rxp = (portid_t)
2074 				(rxp + ((nb_ports >> 1) / nb_fwd_ports));
2075 		else
2076 			rxp++;
2077 		if (rxp >= nb_fwd_ports)
2078 			return;
2079 		/* get the dcb information on next RX and TX ports */
2080 		if ((rxp & 0x1) == 0)
2081 			txp = (portid_t) (rxp + 1);
2082 		else
2083 			txp = (portid_t) (rxp - 1);
2084 		rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2085 		rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2086 	}
2087 }
2088 
2089 static void
2090 icmp_echo_config_setup(void)
2091 {
2092 	portid_t  rxp;
2093 	queueid_t rxq;
2094 	lcoreid_t lc_id;
2095 	uint16_t  sm_id;
2096 
2097 	if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
2098 		cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
2099 			(nb_txq * nb_fwd_ports);
2100 	else
2101 		cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2102 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2103 	cur_fwd_config.nb_fwd_streams =
2104 		(streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2105 	if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2106 		cur_fwd_config.nb_fwd_lcores =
2107 			(lcoreid_t)cur_fwd_config.nb_fwd_streams;
2108 	if (verbose_level > 0) {
2109 		printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
2110 		       __FUNCTION__,
2111 		       cur_fwd_config.nb_fwd_lcores,
2112 		       cur_fwd_config.nb_fwd_ports,
2113 		       cur_fwd_config.nb_fwd_streams);
2114 	}
2115 
2116 	/* reinitialize forwarding streams */
2117 	init_fwd_streams();
2118 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
2119 	rxp = 0; rxq = 0;
2120 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2121 		if (verbose_level > 0)
2122 			printf("  core=%d: \n", lc_id);
2123 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2124 			struct fwd_stream *fs;
2125 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2126 			fs->rx_port = fwd_ports_ids[rxp];
2127 			fs->rx_queue = rxq;
2128 			fs->tx_port = fs->rx_port;
2129 			fs->tx_queue = rxq;
2130 			fs->peer_addr = fs->tx_port;
2131 			fs->retry_enabled = retry_enabled;
2132 			if (verbose_level > 0)
2133 				printf("  stream=%d port=%d rxq=%d txq=%d\n",
2134 				       sm_id, fs->rx_port, fs->rx_queue,
2135 				       fs->tx_queue);
2136 			rxq = (queueid_t) (rxq + 1);
2137 			if (rxq == nb_rxq) {
2138 				rxq = 0;
2139 				rxp = (portid_t) (rxp + 1);
2140 			}
2141 		}
2142 	}
2143 }
2144 
2145 void
2146 fwd_config_setup(void)
2147 {
2148 	cur_fwd_config.fwd_eng = cur_fwd_eng;
2149 	if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
2150 		icmp_echo_config_setup();
2151 		return;
2152 	}
2153 	if ((nb_rxq > 1) && (nb_txq > 1)){
2154 		if (dcb_config)
2155 			dcb_fwd_config_setup();
2156 		else
2157 			rss_fwd_config_setup();
2158 	}
2159 	else
2160 		simple_fwd_config_setup();
2161 }
2162 
2163 void
2164 pkt_fwd_config_display(struct fwd_config *cfg)
2165 {
2166 	struct fwd_stream *fs;
2167 	lcoreid_t  lc_id;
2168 	streamid_t sm_id;
2169 
2170 	printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
2171 		"NUMA support %s, MP over anonymous pages %s\n",
2172 		cfg->fwd_eng->fwd_mode_name,
2173 		retry_enabled == 0 ? "" : " with retry",
2174 		cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
2175 		numa_support == 1 ? "enabled" : "disabled",
2176 		mp_anon != 0 ? "enabled" : "disabled");
2177 
2178 	if (retry_enabled)
2179 		printf("TX retry num: %u, delay between TX retries: %uus\n",
2180 			burst_tx_retry_num, burst_tx_delay_time);
2181 	for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
2182 		printf("Logical Core %u (socket %u) forwards packets on "
2183 		       "%d streams:",
2184 		       fwd_lcores_cpuids[lc_id],
2185 		       rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
2186 		       fwd_lcores[lc_id]->stream_nb);
2187 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2188 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2189 			printf("\n  RX P=%d/Q=%d (socket %u) -> TX "
2190 			       "P=%d/Q=%d (socket %u) ",
2191 			       fs->rx_port, fs->rx_queue,
2192 			       ports[fs->rx_port].socket_id,
2193 			       fs->tx_port, fs->tx_queue,
2194 			       ports[fs->tx_port].socket_id);
2195 			print_ethaddr("peer=",
2196 				      &peer_eth_addrs[fs->peer_addr]);
2197 		}
2198 		printf("\n");
2199 	}
2200 	printf("\n");
2201 }
2202 
2203 void
2204 set_fwd_eth_peer(portid_t port_id, char *peer_addr)
2205 {
2206 	uint8_t c, new_peer_addr[6];
2207 	if (!rte_eth_dev_is_valid_port(port_id)) {
2208 		printf("Error: Invalid port number %i\n", port_id);
2209 		return;
2210 	}
2211 	if (cmdline_parse_etheraddr(NULL, peer_addr, &new_peer_addr,
2212 					sizeof(new_peer_addr)) < 0) {
2213 		printf("Error: Invalid ethernet address: %s\n", peer_addr);
2214 		return;
2215 	}
2216 	for (c = 0; c < 6; c++)
2217 		peer_eth_addrs[port_id].addr_bytes[c] =
2218 			new_peer_addr[c];
2219 }
2220 
2221 int
2222 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
2223 {
2224 	unsigned int i;
2225 	unsigned int lcore_cpuid;
2226 	int record_now;
2227 
2228 	record_now = 0;
2229  again:
2230 	for (i = 0; i < nb_lc; i++) {
2231 		lcore_cpuid = lcorelist[i];
2232 		if (! rte_lcore_is_enabled(lcore_cpuid)) {
2233 			printf("lcore %u not enabled\n", lcore_cpuid);
2234 			return -1;
2235 		}
2236 		if (lcore_cpuid == rte_get_master_lcore()) {
2237 			printf("lcore %u cannot be masked on for running "
2238 			       "packet forwarding, which is the master lcore "
2239 			       "and reserved for command line parsing only\n",
2240 			       lcore_cpuid);
2241 			return -1;
2242 		}
2243 		if (record_now)
2244 			fwd_lcores_cpuids[i] = lcore_cpuid;
2245 	}
2246 	if (record_now == 0) {
2247 		record_now = 1;
2248 		goto again;
2249 	}
2250 	nb_cfg_lcores = (lcoreid_t) nb_lc;
2251 	if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
2252 		printf("previous number of forwarding cores %u - changed to "
2253 		       "number of configured cores %u\n",
2254 		       (unsigned int) nb_fwd_lcores, nb_lc);
2255 		nb_fwd_lcores = (lcoreid_t) nb_lc;
2256 	}
2257 
2258 	return 0;
2259 }
2260 
2261 int
2262 set_fwd_lcores_mask(uint64_t lcoremask)
2263 {
2264 	unsigned int lcorelist[64];
2265 	unsigned int nb_lc;
2266 	unsigned int i;
2267 
2268 	if (lcoremask == 0) {
2269 		printf("Invalid NULL mask of cores\n");
2270 		return -1;
2271 	}
2272 	nb_lc = 0;
2273 	for (i = 0; i < 64; i++) {
2274 		if (! ((uint64_t)(1ULL << i) & lcoremask))
2275 			continue;
2276 		lcorelist[nb_lc++] = i;
2277 	}
2278 	return set_fwd_lcores_list(lcorelist, nb_lc);
2279 }
2280 
2281 void
2282 set_fwd_lcores_number(uint16_t nb_lc)
2283 {
2284 	if (nb_lc > nb_cfg_lcores) {
2285 		printf("nb fwd cores %u > %u (max. number of configured "
2286 		       "lcores) - ignored\n",
2287 		       (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
2288 		return;
2289 	}
2290 	nb_fwd_lcores = (lcoreid_t) nb_lc;
2291 	printf("Number of forwarding cores set to %u\n",
2292 	       (unsigned int) nb_fwd_lcores);
2293 }
2294 
2295 void
2296 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
2297 {
2298 	unsigned int i;
2299 	portid_t port_id;
2300 	int record_now;
2301 
2302 	record_now = 0;
2303  again:
2304 	for (i = 0; i < nb_pt; i++) {
2305 		port_id = (portid_t) portlist[i];
2306 		if (port_id_is_invalid(port_id, ENABLED_WARN))
2307 			return;
2308 		if (record_now)
2309 			fwd_ports_ids[i] = port_id;
2310 	}
2311 	if (record_now == 0) {
2312 		record_now = 1;
2313 		goto again;
2314 	}
2315 	nb_cfg_ports = (portid_t) nb_pt;
2316 	if (nb_fwd_ports != (portid_t) nb_pt) {
2317 		printf("previous number of forwarding ports %u - changed to "
2318 		       "number of configured ports %u\n",
2319 		       (unsigned int) nb_fwd_ports, nb_pt);
2320 		nb_fwd_ports = (portid_t) nb_pt;
2321 	}
2322 }
2323 
2324 void
2325 set_fwd_ports_mask(uint64_t portmask)
2326 {
2327 	unsigned int portlist[64];
2328 	unsigned int nb_pt;
2329 	unsigned int i;
2330 
2331 	if (portmask == 0) {
2332 		printf("Invalid NULL mask of ports\n");
2333 		return;
2334 	}
2335 	nb_pt = 0;
2336 	RTE_ETH_FOREACH_DEV(i) {
2337 		if (! ((uint64_t)(1ULL << i) & portmask))
2338 			continue;
2339 		portlist[nb_pt++] = i;
2340 	}
2341 	set_fwd_ports_list(portlist, nb_pt);
2342 }
2343 
2344 void
2345 set_fwd_ports_number(uint16_t nb_pt)
2346 {
2347 	if (nb_pt > nb_cfg_ports) {
2348 		printf("nb fwd ports %u > %u (number of configured "
2349 		       "ports) - ignored\n",
2350 		       (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
2351 		return;
2352 	}
2353 	nb_fwd_ports = (portid_t) nb_pt;
2354 	printf("Number of forwarding ports set to %u\n",
2355 	       (unsigned int) nb_fwd_ports);
2356 }
2357 
2358 int
2359 port_is_forwarding(portid_t port_id)
2360 {
2361 	unsigned int i;
2362 
2363 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2364 		return -1;
2365 
2366 	for (i = 0; i < nb_fwd_ports; i++) {
2367 		if (fwd_ports_ids[i] == port_id)
2368 			return 1;
2369 	}
2370 
2371 	return 0;
2372 }
2373 
2374 void
2375 set_nb_pkt_per_burst(uint16_t nb)
2376 {
2377 	if (nb > MAX_PKT_BURST) {
2378 		printf("nb pkt per burst: %u > %u (maximum packet per burst) "
2379 		       " ignored\n",
2380 		       (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
2381 		return;
2382 	}
2383 	nb_pkt_per_burst = nb;
2384 	printf("Number of packets per burst set to %u\n",
2385 	       (unsigned int) nb_pkt_per_burst);
2386 }
2387 
2388 static const char *
2389 tx_split_get_name(enum tx_pkt_split split)
2390 {
2391 	uint32_t i;
2392 
2393 	for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2394 		if (tx_split_name[i].split == split)
2395 			return tx_split_name[i].name;
2396 	}
2397 	return NULL;
2398 }
2399 
2400 void
2401 set_tx_pkt_split(const char *name)
2402 {
2403 	uint32_t i;
2404 
2405 	for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2406 		if (strcmp(tx_split_name[i].name, name) == 0) {
2407 			tx_pkt_split = tx_split_name[i].split;
2408 			return;
2409 		}
2410 	}
2411 	printf("unknown value: \"%s\"\n", name);
2412 }
2413 
2414 void
2415 show_tx_pkt_segments(void)
2416 {
2417 	uint32_t i, n;
2418 	const char *split;
2419 
2420 	n = tx_pkt_nb_segs;
2421 	split = tx_split_get_name(tx_pkt_split);
2422 
2423 	printf("Number of segments: %u\n", n);
2424 	printf("Segment sizes: ");
2425 	for (i = 0; i != n - 1; i++)
2426 		printf("%hu,", tx_pkt_seg_lengths[i]);
2427 	printf("%hu\n", tx_pkt_seg_lengths[i]);
2428 	printf("Split packet: %s\n", split);
2429 }
2430 
2431 void
2432 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
2433 {
2434 	uint16_t tx_pkt_len;
2435 	unsigned i;
2436 
2437 	if (nb_segs >= (unsigned) nb_txd) {
2438 		printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
2439 		       nb_segs, (unsigned int) nb_txd);
2440 		return;
2441 	}
2442 
2443 	/*
2444 	 * Check that each segment length is greater or equal than
2445 	 * the mbuf data sise.
2446 	 * Check also that the total packet length is greater or equal than the
2447 	 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8).
2448 	 */
2449 	tx_pkt_len = 0;
2450 	for (i = 0; i < nb_segs; i++) {
2451 		if (seg_lengths[i] > (unsigned) mbuf_data_size) {
2452 			printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
2453 			       i, seg_lengths[i], (unsigned) mbuf_data_size);
2454 			return;
2455 		}
2456 		tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
2457 	}
2458 	if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) {
2459 		printf("total packet length=%u < %d - give up\n",
2460 				(unsigned) tx_pkt_len,
2461 				(int)(sizeof(struct ether_hdr) + 20 + 8));
2462 		return;
2463 	}
2464 
2465 	for (i = 0; i < nb_segs; i++)
2466 		tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
2467 
2468 	tx_pkt_length  = tx_pkt_len;
2469 	tx_pkt_nb_segs = (uint8_t) nb_segs;
2470 }
2471 
2472 void
2473 setup_gro(const char *onoff, portid_t port_id)
2474 {
2475 	if (!rte_eth_dev_is_valid_port(port_id)) {
2476 		printf("invalid port id %u\n", port_id);
2477 		return;
2478 	}
2479 	if (test_done == 0) {
2480 		printf("Before enable/disable GRO,"
2481 				" please stop forwarding first\n");
2482 		return;
2483 	}
2484 	if (strcmp(onoff, "on") == 0) {
2485 		if (gro_ports[port_id].enable != 0) {
2486 			printf("Port %u has enabled GRO. Please"
2487 					" disable GRO first\n", port_id);
2488 			return;
2489 		}
2490 		if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
2491 			gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
2492 			gro_ports[port_id].param.max_flow_num =
2493 				GRO_DEFAULT_FLOW_NUM;
2494 			gro_ports[port_id].param.max_item_per_flow =
2495 				GRO_DEFAULT_ITEM_NUM_PER_FLOW;
2496 		}
2497 		gro_ports[port_id].enable = 1;
2498 	} else {
2499 		if (gro_ports[port_id].enable == 0) {
2500 			printf("Port %u has disabled GRO\n", port_id);
2501 			return;
2502 		}
2503 		gro_ports[port_id].enable = 0;
2504 	}
2505 }
2506 
2507 void
2508 setup_gro_flush_cycles(uint8_t cycles)
2509 {
2510 	if (test_done == 0) {
2511 		printf("Before change flush interval for GRO,"
2512 				" please stop forwarding first.\n");
2513 		return;
2514 	}
2515 
2516 	if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
2517 			GRO_DEFAULT_FLUSH_CYCLES) {
2518 		printf("The flushing cycle be in the range"
2519 				" of 1 to %u. Revert to the default"
2520 				" value %u.\n",
2521 				GRO_MAX_FLUSH_CYCLES,
2522 				GRO_DEFAULT_FLUSH_CYCLES);
2523 		cycles = GRO_DEFAULT_FLUSH_CYCLES;
2524 	}
2525 
2526 	gro_flush_cycles = cycles;
2527 }
2528 
2529 void
2530 show_gro(portid_t port_id)
2531 {
2532 	struct rte_gro_param *param;
2533 	uint32_t max_pkts_num;
2534 
2535 	param = &gro_ports[port_id].param;
2536 
2537 	if (!rte_eth_dev_is_valid_port(port_id)) {
2538 		printf("Invalid port id %u.\n", port_id);
2539 		return;
2540 	}
2541 	if (gro_ports[port_id].enable) {
2542 		printf("GRO type: TCP/IPv4\n");
2543 		if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
2544 			max_pkts_num = param->max_flow_num *
2545 				param->max_item_per_flow;
2546 		} else
2547 			max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
2548 		printf("Max number of packets to perform GRO: %u\n",
2549 				max_pkts_num);
2550 		printf("Flushing cycles: %u\n", gro_flush_cycles);
2551 	} else
2552 		printf("Port %u doesn't enable GRO.\n", port_id);
2553 }
2554 
2555 void
2556 setup_gso(const char *mode, portid_t port_id)
2557 {
2558 	if (!rte_eth_dev_is_valid_port(port_id)) {
2559 		printf("invalid port id %u\n", port_id);
2560 		return;
2561 	}
2562 	if (strcmp(mode, "on") == 0) {
2563 		if (test_done == 0) {
2564 			printf("before enabling GSO,"
2565 					" please stop forwarding first\n");
2566 			return;
2567 		}
2568 		gso_ports[port_id].enable = 1;
2569 	} else if (strcmp(mode, "off") == 0) {
2570 		if (test_done == 0) {
2571 			printf("before disabling GSO,"
2572 					" please stop forwarding first\n");
2573 			return;
2574 		}
2575 		gso_ports[port_id].enable = 0;
2576 	}
2577 }
2578 
2579 char*
2580 list_pkt_forwarding_modes(void)
2581 {
2582 	static char fwd_modes[128] = "";
2583 	const char *separator = "|";
2584 	struct fwd_engine *fwd_eng;
2585 	unsigned i = 0;
2586 
2587 	if (strlen (fwd_modes) == 0) {
2588 		while ((fwd_eng = fwd_engines[i++]) != NULL) {
2589 			strncat(fwd_modes, fwd_eng->fwd_mode_name,
2590 					sizeof(fwd_modes) - strlen(fwd_modes) - 1);
2591 			strncat(fwd_modes, separator,
2592 					sizeof(fwd_modes) - strlen(fwd_modes) - 1);
2593 		}
2594 		fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
2595 	}
2596 
2597 	return fwd_modes;
2598 }
2599 
2600 char*
2601 list_pkt_forwarding_retry_modes(void)
2602 {
2603 	static char fwd_modes[128] = "";
2604 	const char *separator = "|";
2605 	struct fwd_engine *fwd_eng;
2606 	unsigned i = 0;
2607 
2608 	if (strlen(fwd_modes) == 0) {
2609 		while ((fwd_eng = fwd_engines[i++]) != NULL) {
2610 			if (fwd_eng == &rx_only_engine)
2611 				continue;
2612 			strncat(fwd_modes, fwd_eng->fwd_mode_name,
2613 					sizeof(fwd_modes) -
2614 					strlen(fwd_modes) - 1);
2615 			strncat(fwd_modes, separator,
2616 					sizeof(fwd_modes) -
2617 					strlen(fwd_modes) - 1);
2618 		}
2619 		fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
2620 	}
2621 
2622 	return fwd_modes;
2623 }
2624 
2625 void
2626 set_pkt_forwarding_mode(const char *fwd_mode_name)
2627 {
2628 	struct fwd_engine *fwd_eng;
2629 	unsigned i;
2630 
2631 	i = 0;
2632 	while ((fwd_eng = fwd_engines[i]) != NULL) {
2633 		if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
2634 			printf("Set %s packet forwarding mode%s\n",
2635 			       fwd_mode_name,
2636 			       retry_enabled == 0 ? "" : " with retry");
2637 			cur_fwd_eng = fwd_eng;
2638 			return;
2639 		}
2640 		i++;
2641 	}
2642 	printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
2643 }
2644 
2645 void
2646 set_verbose_level(uint16_t vb_level)
2647 {
2648 	printf("Change verbose level from %u to %u\n",
2649 	       (unsigned int) verbose_level, (unsigned int) vb_level);
2650 	verbose_level = vb_level;
2651 }
2652 
2653 void
2654 vlan_extend_set(portid_t port_id, int on)
2655 {
2656 	int diag;
2657 	int vlan_offload;
2658 	uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
2659 
2660 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2661 		return;
2662 
2663 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2664 
2665 	if (on) {
2666 		vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
2667 		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
2668 	} else {
2669 		vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
2670 		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2671 	}
2672 
2673 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
2674 	if (diag < 0)
2675 		printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
2676 	       "diag=%d\n", port_id, on, diag);
2677 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
2678 }
2679 
2680 void
2681 rx_vlan_strip_set(portid_t port_id, int on)
2682 {
2683 	int diag;
2684 	int vlan_offload;
2685 	uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
2686 
2687 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2688 		return;
2689 
2690 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2691 
2692 	if (on) {
2693 		vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
2694 		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2695 	} else {
2696 		vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
2697 		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
2698 	}
2699 
2700 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
2701 	if (diag < 0)
2702 		printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
2703 	       "diag=%d\n", port_id, on, diag);
2704 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
2705 }
2706 
2707 void
2708 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
2709 {
2710 	int diag;
2711 
2712 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2713 		return;
2714 
2715 	diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
2716 	if (diag < 0)
2717 		printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
2718 	       "diag=%d\n", port_id, queue_id, on, diag);
2719 }
2720 
2721 void
2722 rx_vlan_filter_set(portid_t port_id, int on)
2723 {
2724 	int diag;
2725 	int vlan_offload;
2726 	uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
2727 
2728 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2729 		return;
2730 
2731 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2732 
2733 	if (on) {
2734 		vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
2735 		port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2736 	} else {
2737 		vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
2738 		port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
2739 	}
2740 
2741 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
2742 	if (diag < 0)
2743 		printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
2744 	       "diag=%d\n", port_id, on, diag);
2745 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
2746 }
2747 
2748 int
2749 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
2750 {
2751 	int diag;
2752 
2753 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2754 		return 1;
2755 	if (vlan_id_is_invalid(vlan_id))
2756 		return 1;
2757 	diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
2758 	if (diag == 0)
2759 		return 0;
2760 	printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
2761 	       "diag=%d\n",
2762 	       port_id, vlan_id, on, diag);
2763 	return -1;
2764 }
2765 
2766 void
2767 rx_vlan_all_filter_set(portid_t port_id, int on)
2768 {
2769 	uint16_t vlan_id;
2770 
2771 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2772 		return;
2773 	for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
2774 		if (rx_vft_set(port_id, vlan_id, on))
2775 			break;
2776 	}
2777 }
2778 
2779 void
2780 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
2781 {
2782 	int diag;
2783 
2784 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2785 		return;
2786 
2787 	diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
2788 	if (diag == 0)
2789 		return;
2790 
2791 	printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed "
2792 	       "diag=%d\n",
2793 	       port_id, vlan_type, tp_id, diag);
2794 }
2795 
2796 void
2797 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
2798 {
2799 	int vlan_offload;
2800 	struct rte_eth_dev_info dev_info;
2801 
2802 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2803 		return;
2804 	if (vlan_id_is_invalid(vlan_id))
2805 		return;
2806 
2807 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2808 	if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) {
2809 		printf("Error, as QinQ has been enabled.\n");
2810 		return;
2811 	}
2812 	rte_eth_dev_info_get(port_id, &dev_info);
2813 	if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
2814 		printf("Error: vlan insert is not supported by port %d\n",
2815 			port_id);
2816 		return;
2817 	}
2818 
2819 	tx_vlan_reset(port_id);
2820 	ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
2821 	ports[port_id].tx_vlan_id = vlan_id;
2822 }
2823 
2824 void
2825 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
2826 {
2827 	int vlan_offload;
2828 	struct rte_eth_dev_info dev_info;
2829 
2830 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2831 		return;
2832 	if (vlan_id_is_invalid(vlan_id))
2833 		return;
2834 	if (vlan_id_is_invalid(vlan_id_outer))
2835 		return;
2836 
2837 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2838 	if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) {
2839 		printf("Error, as QinQ hasn't been enabled.\n");
2840 		return;
2841 	}
2842 	rte_eth_dev_info_get(port_id, &dev_info);
2843 	if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
2844 		printf("Error: qinq insert not supported by port %d\n",
2845 			port_id);
2846 		return;
2847 	}
2848 
2849 	tx_vlan_reset(port_id);
2850 	ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_QINQ_INSERT;
2851 	ports[port_id].tx_vlan_id = vlan_id;
2852 	ports[port_id].tx_vlan_id_outer = vlan_id_outer;
2853 }
2854 
2855 void
2856 tx_vlan_reset(portid_t port_id)
2857 {
2858 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2859 		return;
2860 	ports[port_id].dev_conf.txmode.offloads &=
2861 				~(DEV_TX_OFFLOAD_VLAN_INSERT |
2862 				  DEV_TX_OFFLOAD_QINQ_INSERT);
2863 	ports[port_id].tx_vlan_id = 0;
2864 	ports[port_id].tx_vlan_id_outer = 0;
2865 }
2866 
2867 void
2868 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
2869 {
2870 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2871 		return;
2872 
2873 	rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
2874 }
2875 
2876 void
2877 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
2878 {
2879 	uint16_t i;
2880 	uint8_t existing_mapping_found = 0;
2881 
2882 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2883 		return;
2884 
2885 	if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
2886 		return;
2887 
2888 	if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
2889 		printf("map_value not in required range 0..%d\n",
2890 				RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
2891 		return;
2892 	}
2893 
2894 	if (!is_rx) { /*then tx*/
2895 		for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2896 			if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2897 			    (tx_queue_stats_mappings[i].queue_id == queue_id)) {
2898 				tx_queue_stats_mappings[i].stats_counter_id = map_value;
2899 				existing_mapping_found = 1;
2900 				break;
2901 			}
2902 		}
2903 		if (!existing_mapping_found) { /* A new additional mapping... */
2904 			tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
2905 			tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
2906 			tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
2907 			nb_tx_queue_stats_mappings++;
2908 		}
2909 	}
2910 	else { /*rx*/
2911 		for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2912 			if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2913 			    (rx_queue_stats_mappings[i].queue_id == queue_id)) {
2914 				rx_queue_stats_mappings[i].stats_counter_id = map_value;
2915 				existing_mapping_found = 1;
2916 				break;
2917 			}
2918 		}
2919 		if (!existing_mapping_found) { /* A new additional mapping... */
2920 			rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
2921 			rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
2922 			rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
2923 			nb_rx_queue_stats_mappings++;
2924 		}
2925 	}
2926 }
2927 
2928 void
2929 set_xstats_hide_zero(uint8_t on_off)
2930 {
2931 	xstats_hide_zero = on_off;
2932 }
2933 
2934 static inline void
2935 print_fdir_mask(struct rte_eth_fdir_masks *mask)
2936 {
2937 	printf("\n    vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
2938 
2939 	if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
2940 		printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
2941 			" tunnel_id: 0x%08x",
2942 			mask->mac_addr_byte_mask, mask->tunnel_type_mask,
2943 			rte_be_to_cpu_32(mask->tunnel_id_mask));
2944 	else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
2945 		printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
2946 			rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
2947 			rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
2948 
2949 		printf("\n    src_port: 0x%04x, dst_port: 0x%04x",
2950 			rte_be_to_cpu_16(mask->src_port_mask),
2951 			rte_be_to_cpu_16(mask->dst_port_mask));
2952 
2953 		printf("\n    src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
2954 			rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
2955 			rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
2956 			rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
2957 			rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
2958 
2959 		printf("\n    dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
2960 			rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
2961 			rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
2962 			rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
2963 			rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
2964 	}
2965 
2966 	printf("\n");
2967 }
2968 
2969 static inline void
2970 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
2971 {
2972 	struct rte_eth_flex_payload_cfg *cfg;
2973 	uint32_t i, j;
2974 
2975 	for (i = 0; i < flex_conf->nb_payloads; i++) {
2976 		cfg = &flex_conf->flex_set[i];
2977 		if (cfg->type == RTE_ETH_RAW_PAYLOAD)
2978 			printf("\n    RAW:  ");
2979 		else if (cfg->type == RTE_ETH_L2_PAYLOAD)
2980 			printf("\n    L2_PAYLOAD:  ");
2981 		else if (cfg->type == RTE_ETH_L3_PAYLOAD)
2982 			printf("\n    L3_PAYLOAD:  ");
2983 		else if (cfg->type == RTE_ETH_L4_PAYLOAD)
2984 			printf("\n    L4_PAYLOAD:  ");
2985 		else
2986 			printf("\n    UNKNOWN PAYLOAD(%u):  ", cfg->type);
2987 		for (j = 0; j < num; j++)
2988 			printf("  %-5u", cfg->src_offset[j]);
2989 	}
2990 	printf("\n");
2991 }
2992 
2993 static char *
2994 flowtype_to_str(uint16_t flow_type)
2995 {
2996 	struct flow_type_info {
2997 		char str[32];
2998 		uint16_t ftype;
2999 	};
3000 
3001 	uint8_t i;
3002 	static struct flow_type_info flowtype_str_table[] = {
3003 		{"raw", RTE_ETH_FLOW_RAW},
3004 		{"ipv4", RTE_ETH_FLOW_IPV4},
3005 		{"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
3006 		{"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
3007 		{"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
3008 		{"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
3009 		{"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
3010 		{"ipv6", RTE_ETH_FLOW_IPV6},
3011 		{"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
3012 		{"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
3013 		{"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
3014 		{"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
3015 		{"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
3016 		{"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
3017 		{"port", RTE_ETH_FLOW_PORT},
3018 		{"vxlan", RTE_ETH_FLOW_VXLAN},
3019 		{"geneve", RTE_ETH_FLOW_GENEVE},
3020 		{"nvgre", RTE_ETH_FLOW_NVGRE},
3021 	};
3022 
3023 	for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
3024 		if (flowtype_str_table[i].ftype == flow_type)
3025 			return flowtype_str_table[i].str;
3026 	}
3027 
3028 	return NULL;
3029 }
3030 
3031 static inline void
3032 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
3033 {
3034 	struct rte_eth_fdir_flex_mask *mask;
3035 	uint32_t i, j;
3036 	char *p;
3037 
3038 	for (i = 0; i < flex_conf->nb_flexmasks; i++) {
3039 		mask = &flex_conf->flex_mask[i];
3040 		p = flowtype_to_str(mask->flow_type);
3041 		printf("\n    %s:\t", p ? p : "unknown");
3042 		for (j = 0; j < num; j++)
3043 			printf(" %02x", mask->mask[j]);
3044 	}
3045 	printf("\n");
3046 }
3047 
3048 static inline void
3049 print_fdir_flow_type(uint32_t flow_types_mask)
3050 {
3051 	int i;
3052 	char *p;
3053 
3054 	for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
3055 		if (!(flow_types_mask & (1 << i)))
3056 			continue;
3057 		p = flowtype_to_str(i);
3058 		if (p)
3059 			printf(" %s", p);
3060 		else
3061 			printf(" unknown");
3062 	}
3063 	printf("\n");
3064 }
3065 
3066 void
3067 fdir_get_infos(portid_t port_id)
3068 {
3069 	struct rte_eth_fdir_stats fdir_stat;
3070 	struct rte_eth_fdir_info fdir_info;
3071 	int ret;
3072 
3073 	static const char *fdir_stats_border = "########################";
3074 
3075 	if (port_id_is_invalid(port_id, ENABLED_WARN))
3076 		return;
3077 	ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
3078 	if (ret < 0) {
3079 		printf("\n FDIR is not supported on port %-2d\n",
3080 			port_id);
3081 		return;
3082 	}
3083 
3084 	memset(&fdir_info, 0, sizeof(fdir_info));
3085 	rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3086 			       RTE_ETH_FILTER_INFO, &fdir_info);
3087 	memset(&fdir_stat, 0, sizeof(fdir_stat));
3088 	rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3089 			       RTE_ETH_FILTER_STATS, &fdir_stat);
3090 	printf("\n  %s FDIR infos for port %-2d     %s\n",
3091 	       fdir_stats_border, port_id, fdir_stats_border);
3092 	printf("  MODE: ");
3093 	if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
3094 		printf("  PERFECT\n");
3095 	else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
3096 		printf("  PERFECT-MAC-VLAN\n");
3097 	else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3098 		printf("  PERFECT-TUNNEL\n");
3099 	else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
3100 		printf("  SIGNATURE\n");
3101 	else
3102 		printf("  DISABLE\n");
3103 	if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
3104 		&& fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
3105 		printf("  SUPPORTED FLOW TYPE: ");
3106 		print_fdir_flow_type(fdir_info.flow_types_mask[0]);
3107 	}
3108 	printf("  FLEX PAYLOAD INFO:\n");
3109 	printf("  max_len:       %-10"PRIu32"  payload_limit: %-10"PRIu32"\n"
3110 	       "  payload_unit:  %-10"PRIu32"  payload_seg:   %-10"PRIu32"\n"
3111 	       "  bitmask_unit:  %-10"PRIu32"  bitmask_num:   %-10"PRIu32"\n",
3112 		fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
3113 		fdir_info.flex_payload_unit,
3114 		fdir_info.max_flex_payload_segment_num,
3115 		fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
3116 	printf("  MASK: ");
3117 	print_fdir_mask(&fdir_info.mask);
3118 	if (fdir_info.flex_conf.nb_payloads > 0) {
3119 		printf("  FLEX PAYLOAD SRC OFFSET:");
3120 		print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3121 	}
3122 	if (fdir_info.flex_conf.nb_flexmasks > 0) {
3123 		printf("  FLEX MASK CFG:");
3124 		print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3125 	}
3126 	printf("  guarant_count: %-10"PRIu32"  best_count:    %"PRIu32"\n",
3127 	       fdir_stat.guarant_cnt, fdir_stat.best_cnt);
3128 	printf("  guarant_space: %-10"PRIu32"  best_space:    %"PRIu32"\n",
3129 	       fdir_info.guarant_spc, fdir_info.best_spc);
3130 	printf("  collision:     %-10"PRIu32"  free:          %"PRIu32"\n"
3131 	       "  maxhash:       %-10"PRIu32"  maxlen:        %"PRIu32"\n"
3132 	       "  add:	         %-10"PRIu64"  remove:        %"PRIu64"\n"
3133 	       "  f_add:         %-10"PRIu64"  f_remove:      %"PRIu64"\n",
3134 	       fdir_stat.collision, fdir_stat.free,
3135 	       fdir_stat.maxhash, fdir_stat.maxlen,
3136 	       fdir_stat.add, fdir_stat.remove,
3137 	       fdir_stat.f_add, fdir_stat.f_remove);
3138 	printf("  %s############################%s\n",
3139 	       fdir_stats_border, fdir_stats_border);
3140 }
3141 
3142 void
3143 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
3144 {
3145 	struct rte_port *port;
3146 	struct rte_eth_fdir_flex_conf *flex_conf;
3147 	int i, idx = 0;
3148 
3149 	port = &ports[port_id];
3150 	flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3151 	for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
3152 		if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
3153 			idx = i;
3154 			break;
3155 		}
3156 	}
3157 	if (i >= RTE_ETH_FLOW_MAX) {
3158 		if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
3159 			idx = flex_conf->nb_flexmasks;
3160 			flex_conf->nb_flexmasks++;
3161 		} else {
3162 			printf("The flex mask table is full. Can not set flex"
3163 				" mask for flow_type(%u).", cfg->flow_type);
3164 			return;
3165 		}
3166 	}
3167 	rte_memcpy(&flex_conf->flex_mask[idx],
3168 			 cfg,
3169 			 sizeof(struct rte_eth_fdir_flex_mask));
3170 }
3171 
3172 void
3173 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
3174 {
3175 	struct rte_port *port;
3176 	struct rte_eth_fdir_flex_conf *flex_conf;
3177 	int i, idx = 0;
3178 
3179 	port = &ports[port_id];
3180 	flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3181 	for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
3182 		if (cfg->type == flex_conf->flex_set[i].type) {
3183 			idx = i;
3184 			break;
3185 		}
3186 	}
3187 	if (i >= RTE_ETH_PAYLOAD_MAX) {
3188 		if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
3189 			idx = flex_conf->nb_payloads;
3190 			flex_conf->nb_payloads++;
3191 		} else {
3192 			printf("The flex payload table is full. Can not set"
3193 				" flex payload for type(%u).", cfg->type);
3194 			return;
3195 		}
3196 	}
3197 	rte_memcpy(&flex_conf->flex_set[idx],
3198 			 cfg,
3199 			 sizeof(struct rte_eth_flex_payload_cfg));
3200 
3201 }
3202 
3203 void
3204 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
3205 {
3206 #ifdef RTE_LIBRTE_IXGBE_PMD
3207 	int diag;
3208 
3209 	if (is_rx)
3210 		diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
3211 	else
3212 		diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
3213 
3214 	if (diag == 0)
3215 		return;
3216 	printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
3217 			is_rx ? "rx" : "tx", port_id, diag);
3218 	return;
3219 #endif
3220 	printf("VF %s setting not supported for port %d\n",
3221 			is_rx ? "Rx" : "Tx", port_id);
3222 	RTE_SET_USED(vf);
3223 	RTE_SET_USED(on);
3224 }
3225 
3226 int
3227 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
3228 {
3229 	int diag;
3230 	struct rte_eth_link link;
3231 
3232 	if (port_id_is_invalid(port_id, ENABLED_WARN))
3233 		return 1;
3234 	rte_eth_link_get_nowait(port_id, &link);
3235 	if (rate > link.link_speed) {
3236 		printf("Invalid rate value:%u bigger than link speed: %u\n",
3237 			rate, link.link_speed);
3238 		return 1;
3239 	}
3240 	diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
3241 	if (diag == 0)
3242 		return diag;
3243 	printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
3244 		port_id, diag);
3245 	return diag;
3246 }
3247 
3248 int
3249 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
3250 {
3251 	int diag = -ENOTSUP;
3252 
3253 	RTE_SET_USED(vf);
3254 	RTE_SET_USED(rate);
3255 	RTE_SET_USED(q_msk);
3256 
3257 #ifdef RTE_LIBRTE_IXGBE_PMD
3258 	if (diag == -ENOTSUP)
3259 		diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
3260 						       q_msk);
3261 #endif
3262 #ifdef RTE_LIBRTE_BNXT_PMD
3263 	if (diag == -ENOTSUP)
3264 		diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
3265 #endif
3266 	if (diag == 0)
3267 		return diag;
3268 
3269 	printf("set_vf_rate_limit for port_id=%d failed diag=%d\n",
3270 		port_id, diag);
3271 	return diag;
3272 }
3273 
3274 /*
3275  * Functions to manage the set of filtered Multicast MAC addresses.
3276  *
3277  * A pool of filtered multicast MAC addresses is associated with each port.
3278  * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
3279  * The address of the pool and the number of valid multicast MAC addresses
3280  * recorded in the pool are stored in the fields "mc_addr_pool" and
3281  * "mc_addr_nb" of the "rte_port" data structure.
3282  *
3283  * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
3284  * to be supplied a contiguous array of multicast MAC addresses.
3285  * To comply with this constraint, the set of multicast addresses recorded
3286  * into the pool are systematically compacted at the beginning of the pool.
3287  * Hence, when a multicast address is removed from the pool, all following
3288  * addresses, if any, are copied back to keep the set contiguous.
3289  */
3290 #define MCAST_POOL_INC 32
3291 
3292 static int
3293 mcast_addr_pool_extend(struct rte_port *port)
3294 {
3295 	struct ether_addr *mc_pool;
3296 	size_t mc_pool_size;
3297 
3298 	/*
3299 	 * If a free entry is available at the end of the pool, just
3300 	 * increment the number of recorded multicast addresses.
3301 	 */
3302 	if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
3303 		port->mc_addr_nb++;
3304 		return 0;
3305 	}
3306 
3307 	/*
3308 	 * [re]allocate a pool with MCAST_POOL_INC more entries.
3309 	 * The previous test guarantees that port->mc_addr_nb is a multiple
3310 	 * of MCAST_POOL_INC.
3311 	 */
3312 	mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb +
3313 						    MCAST_POOL_INC);
3314 	mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool,
3315 						mc_pool_size);
3316 	if (mc_pool == NULL) {
3317 		printf("allocation of pool of %u multicast addresses failed\n",
3318 		       port->mc_addr_nb + MCAST_POOL_INC);
3319 		return -ENOMEM;
3320 	}
3321 
3322 	port->mc_addr_pool = mc_pool;
3323 	port->mc_addr_nb++;
3324 	return 0;
3325 
3326 }
3327 
3328 static void
3329 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
3330 {
3331 	port->mc_addr_nb--;
3332 	if (addr_idx == port->mc_addr_nb) {
3333 		/* No need to recompact the set of multicast addressses. */
3334 		if (port->mc_addr_nb == 0) {
3335 			/* free the pool of multicast addresses. */
3336 			free(port->mc_addr_pool);
3337 			port->mc_addr_pool = NULL;
3338 		}
3339 		return;
3340 	}
3341 	memmove(&port->mc_addr_pool[addr_idx],
3342 		&port->mc_addr_pool[addr_idx + 1],
3343 		sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx));
3344 }
3345 
3346 static void
3347 eth_port_multicast_addr_list_set(portid_t port_id)
3348 {
3349 	struct rte_port *port;
3350 	int diag;
3351 
3352 	port = &ports[port_id];
3353 	diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
3354 					    port->mc_addr_nb);
3355 	if (diag == 0)
3356 		return;
3357 	printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
3358 	       port->mc_addr_nb, port_id, -diag);
3359 }
3360 
3361 void
3362 mcast_addr_add(portid_t port_id, struct ether_addr *mc_addr)
3363 {
3364 	struct rte_port *port;
3365 	uint32_t i;
3366 
3367 	if (port_id_is_invalid(port_id, ENABLED_WARN))
3368 		return;
3369 
3370 	port = &ports[port_id];
3371 
3372 	/*
3373 	 * Check that the added multicast MAC address is not already recorded
3374 	 * in the pool of multicast addresses.
3375 	 */
3376 	for (i = 0; i < port->mc_addr_nb; i++) {
3377 		if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
3378 			printf("multicast address already filtered by port\n");
3379 			return;
3380 		}
3381 	}
3382 
3383 	if (mcast_addr_pool_extend(port) != 0)
3384 		return;
3385 	ether_addr_copy(mc_addr, &port->mc_addr_pool[i]);
3386 	eth_port_multicast_addr_list_set(port_id);
3387 }
3388 
3389 void
3390 mcast_addr_remove(portid_t port_id, struct ether_addr *mc_addr)
3391 {
3392 	struct rte_port *port;
3393 	uint32_t i;
3394 
3395 	if (port_id_is_invalid(port_id, ENABLED_WARN))
3396 		return;
3397 
3398 	port = &ports[port_id];
3399 
3400 	/*
3401 	 * Search the pool of multicast MAC addresses for the removed address.
3402 	 */
3403 	for (i = 0; i < port->mc_addr_nb; i++) {
3404 		if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
3405 			break;
3406 	}
3407 	if (i == port->mc_addr_nb) {
3408 		printf("multicast address not filtered by port %d\n", port_id);
3409 		return;
3410 	}
3411 
3412 	mcast_addr_pool_remove(port, i);
3413 	eth_port_multicast_addr_list_set(port_id);
3414 }
3415 
3416 void
3417 port_dcb_info_display(portid_t port_id)
3418 {
3419 	struct rte_eth_dcb_info dcb_info;
3420 	uint16_t i;
3421 	int ret;
3422 	static const char *border = "================";
3423 
3424 	if (port_id_is_invalid(port_id, ENABLED_WARN))
3425 		return;
3426 
3427 	ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
3428 	if (ret) {
3429 		printf("\n Failed to get dcb infos on port %-2d\n",
3430 			port_id);
3431 		return;
3432 	}
3433 	printf("\n  %s DCB infos for port %-2d  %s\n", border, port_id, border);
3434 	printf("  TC NUMBER: %d\n", dcb_info.nb_tcs);
3435 	printf("\n  TC :        ");
3436 	for (i = 0; i < dcb_info.nb_tcs; i++)
3437 		printf("\t%4d", i);
3438 	printf("\n  Priority :  ");
3439 	for (i = 0; i < dcb_info.nb_tcs; i++)
3440 		printf("\t%4d", dcb_info.prio_tc[i]);
3441 	printf("\n  BW percent :");
3442 	for (i = 0; i < dcb_info.nb_tcs; i++)
3443 		printf("\t%4d%%", dcb_info.tc_bws[i]);
3444 	printf("\n  RXQ base :  ");
3445 	for (i = 0; i < dcb_info.nb_tcs; i++)
3446 		printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
3447 	printf("\n  RXQ number :");
3448 	for (i = 0; i < dcb_info.nb_tcs; i++)
3449 		printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
3450 	printf("\n  TXQ base :  ");
3451 	for (i = 0; i < dcb_info.nb_tcs; i++)
3452 		printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
3453 	printf("\n  TXQ number :");
3454 	for (i = 0; i < dcb_info.nb_tcs; i++)
3455 		printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
3456 	printf("\n");
3457 }
3458 
3459 uint8_t *
3460 open_file(const char *file_path, uint32_t *size)
3461 {
3462 	int fd = open(file_path, O_RDONLY);
3463 	off_t pkg_size;
3464 	uint8_t *buf = NULL;
3465 	int ret = 0;
3466 	struct stat st_buf;
3467 
3468 	if (size)
3469 		*size = 0;
3470 
3471 	if (fd == -1) {
3472 		printf("%s: Failed to open %s\n", __func__, file_path);
3473 		return buf;
3474 	}
3475 
3476 	if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
3477 		close(fd);
3478 		printf("%s: File operations failed\n", __func__);
3479 		return buf;
3480 	}
3481 
3482 	pkg_size = st_buf.st_size;
3483 	if (pkg_size < 0) {
3484 		close(fd);
3485 		printf("%s: File operations failed\n", __func__);
3486 		return buf;
3487 	}
3488 
3489 	buf = (uint8_t *)malloc(pkg_size);
3490 	if (!buf) {
3491 		close(fd);
3492 		printf("%s: Failed to malloc memory\n",	__func__);
3493 		return buf;
3494 	}
3495 
3496 	ret = read(fd, buf, pkg_size);
3497 	if (ret < 0) {
3498 		close(fd);
3499 		printf("%s: File read operation failed\n", __func__);
3500 		close_file(buf);
3501 		return NULL;
3502 	}
3503 
3504 	if (size)
3505 		*size = pkg_size;
3506 
3507 	close(fd);
3508 
3509 	return buf;
3510 }
3511 
3512 int
3513 save_file(const char *file_path, uint8_t *buf, uint32_t size)
3514 {
3515 	FILE *fh = fopen(file_path, "wb");
3516 
3517 	if (fh == NULL) {
3518 		printf("%s: Failed to open %s\n", __func__, file_path);
3519 		return -1;
3520 	}
3521 
3522 	if (fwrite(buf, 1, size, fh) != size) {
3523 		fclose(fh);
3524 		printf("%s: File write operation failed\n", __func__);
3525 		return -1;
3526 	}
3527 
3528 	fclose(fh);
3529 
3530 	return 0;
3531 }
3532 
3533 int
3534 close_file(uint8_t *buf)
3535 {
3536 	if (buf) {
3537 		free((void *)buf);
3538 		return 0;
3539 	}
3540 
3541 	return -1;
3542 }
3543 
3544 void
3545 port_queue_region_info_display(portid_t port_id, void *buf)
3546 {
3547 #ifdef RTE_LIBRTE_I40E_PMD
3548 	uint16_t i, j;
3549 	struct rte_pmd_i40e_queue_regions *info =
3550 		(struct rte_pmd_i40e_queue_regions *)buf;
3551 	static const char *queue_region_info_stats_border = "-------";
3552 
3553 	if (!info->queue_region_number)
3554 		printf("there is no region has been set before");
3555 
3556 	printf("\n	%s All queue region info for port=%2d %s",
3557 			queue_region_info_stats_border, port_id,
3558 			queue_region_info_stats_border);
3559 	printf("\n	queue_region_number: %-14u \n",
3560 			info->queue_region_number);
3561 
3562 	for (i = 0; i < info->queue_region_number; i++) {
3563 		printf("\n	region_id: %-14u queue_number: %-14u "
3564 			"queue_start_index: %-14u \n",
3565 			info->region[i].region_id,
3566 			info->region[i].queue_num,
3567 			info->region[i].queue_start_index);
3568 
3569 		printf("  user_priority_num is	%-14u :",
3570 					info->region[i].user_priority_num);
3571 		for (j = 0; j < info->region[i].user_priority_num; j++)
3572 			printf(" %-14u ", info->region[i].user_priority[j]);
3573 
3574 		printf("\n	flowtype_num is  %-14u :",
3575 				info->region[i].flowtype_num);
3576 		for (j = 0; j < info->region[i].flowtype_num; j++)
3577 			printf(" %-14u ", info->region[i].hw_flowtype[j]);
3578 	}
3579 #else
3580 	RTE_SET_USED(port_id);
3581 	RTE_SET_USED(buf);
3582 #endif
3583 
3584 	printf("\n\n");
3585 }
3586