xref: /dpdk/app/test-pmd/config.c (revision a103a97e7191179ad6a451ce85182df2ecb10c26)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   Copyright 2013-2014 6WIND S.A.
6  *   All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <stdarg.h>
36 #include <errno.h>
37 #include <stdio.h>
38 #include <string.h>
39 #include <stdint.h>
40 #include <inttypes.h>
41 
42 #include <sys/queue.h>
43 
44 #include <rte_common.h>
45 #include <rte_byteorder.h>
46 #include <rte_debug.h>
47 #include <rte_log.h>
48 #include <rte_memory.h>
49 #include <rte_memcpy.h>
50 #include <rte_memzone.h>
51 #include <rte_launch.h>
52 #include <rte_eal.h>
53 #include <rte_per_lcore.h>
54 #include <rte_lcore.h>
55 #include <rte_atomic.h>
56 #include <rte_branch_prediction.h>
57 #include <rte_mempool.h>
58 #include <rte_mbuf.h>
59 #include <rte_interrupts.h>
60 #include <rte_pci.h>
61 #include <rte_ether.h>
62 #include <rte_ethdev.h>
63 #include <rte_string_fns.h>
64 #include <rte_cycles.h>
65 #include <rte_flow.h>
66 #include <rte_errno.h>
67 #ifdef RTE_LIBRTE_IXGBE_PMD
68 #include <rte_pmd_ixgbe.h>
69 #endif
70 #ifdef RTE_LIBRTE_BNXT_PMD
71 #include <rte_pmd_bnxt.h>
72 #endif
73 #include <rte_gro.h>
74 
75 #include "testpmd.h"
76 
77 static char *flowtype_to_str(uint16_t flow_type);
78 
79 static const struct {
80 	enum tx_pkt_split split;
81 	const char *name;
82 } tx_split_name[] = {
83 	{
84 		.split = TX_PKT_SPLIT_OFF,
85 		.name = "off",
86 	},
87 	{
88 		.split = TX_PKT_SPLIT_ON,
89 		.name = "on",
90 	},
91 	{
92 		.split = TX_PKT_SPLIT_RND,
93 		.name = "rand",
94 	},
95 };
96 
97 struct rss_type_info {
98 	char str[32];
99 	uint64_t rss_type;
100 };
101 
102 static const struct rss_type_info rss_type_table[] = {
103 	{ "ipv4", ETH_RSS_IPV4 },
104 	{ "ipv4-frag", ETH_RSS_FRAG_IPV4 },
105 	{ "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
106 	{ "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
107 	{ "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
108 	{ "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
109 	{ "ipv6", ETH_RSS_IPV6 },
110 	{ "ipv6-frag", ETH_RSS_FRAG_IPV6 },
111 	{ "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
112 	{ "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
113 	{ "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
114 	{ "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
115 	{ "l2-payload", ETH_RSS_L2_PAYLOAD },
116 	{ "ipv6-ex", ETH_RSS_IPV6_EX },
117 	{ "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
118 	{ "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
119 	{ "port", ETH_RSS_PORT },
120 	{ "vxlan", ETH_RSS_VXLAN },
121 	{ "geneve", ETH_RSS_GENEVE },
122 	{ "nvgre", ETH_RSS_NVGRE },
123 
124 };
125 
126 static void
127 print_ethaddr(const char *name, struct ether_addr *eth_addr)
128 {
129 	char buf[ETHER_ADDR_FMT_SIZE];
130 	ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
131 	printf("%s%s", name, buf);
132 }
133 
134 void
135 nic_stats_display(portid_t port_id)
136 {
137 	static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
138 	static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
139 	static uint64_t prev_cycles[RTE_MAX_ETHPORTS];
140 	uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles;
141 	uint64_t mpps_rx, mpps_tx;
142 	struct rte_eth_stats stats;
143 	struct rte_port *port = &ports[port_id];
144 	uint8_t i;
145 	portid_t pid;
146 
147 	static const char *nic_stats_border = "########################";
148 
149 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
150 		printf("Valid port range is [0");
151 		RTE_ETH_FOREACH_DEV(pid)
152 			printf(", %d", pid);
153 		printf("]\n");
154 		return;
155 	}
156 	rte_eth_stats_get(port_id, &stats);
157 	printf("\n  %s NIC statistics for port %-2d %s\n",
158 	       nic_stats_border, port_id, nic_stats_border);
159 
160 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
161 		printf("  RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes:  "
162 		       "%-"PRIu64"\n",
163 		       stats.ipackets, stats.imissed, stats.ibytes);
164 		printf("  RX-errors: %-"PRIu64"\n", stats.ierrors);
165 		printf("  RX-nombuf:  %-10"PRIu64"\n",
166 		       stats.rx_nombuf);
167 		printf("  TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes:  "
168 		       "%-"PRIu64"\n",
169 		       stats.opackets, stats.oerrors, stats.obytes);
170 	}
171 	else {
172 		printf("  RX-packets:              %10"PRIu64"    RX-errors: %10"PRIu64
173 		       "    RX-bytes: %10"PRIu64"\n",
174 		       stats.ipackets, stats.ierrors, stats.ibytes);
175 		printf("  RX-errors:  %10"PRIu64"\n", stats.ierrors);
176 		printf("  RX-nombuf:               %10"PRIu64"\n",
177 		       stats.rx_nombuf);
178 		printf("  TX-packets:              %10"PRIu64"    TX-errors: %10"PRIu64
179 		       "    TX-bytes: %10"PRIu64"\n",
180 		       stats.opackets, stats.oerrors, stats.obytes);
181 	}
182 
183 	if (port->rx_queue_stats_mapping_enabled) {
184 		printf("\n");
185 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
186 			printf("  Stats reg %2d RX-packets: %10"PRIu64
187 			       "    RX-errors: %10"PRIu64
188 			       "    RX-bytes: %10"PRIu64"\n",
189 			       i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
190 		}
191 	}
192 	if (port->tx_queue_stats_mapping_enabled) {
193 		printf("\n");
194 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
195 			printf("  Stats reg %2d TX-packets: %10"PRIu64
196 			       "                             TX-bytes: %10"PRIu64"\n",
197 			       i, stats.q_opackets[i], stats.q_obytes[i]);
198 		}
199 	}
200 
201 	diff_cycles = prev_cycles[port_id];
202 	prev_cycles[port_id] = rte_rdtsc();
203 	if (diff_cycles > 0)
204 		diff_cycles = prev_cycles[port_id] - diff_cycles;
205 
206 	diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
207 		(stats.ipackets - prev_pkts_rx[port_id]) : 0;
208 	diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
209 		(stats.opackets - prev_pkts_tx[port_id]) : 0;
210 	prev_pkts_rx[port_id] = stats.ipackets;
211 	prev_pkts_tx[port_id] = stats.opackets;
212 	mpps_rx = diff_cycles > 0 ?
213 		diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0;
214 	mpps_tx = diff_cycles > 0 ?
215 		diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0;
216 	printf("\n  Throughput (since last show)\n");
217 	printf("  Rx-pps: %12"PRIu64"\n  Tx-pps: %12"PRIu64"\n",
218 			mpps_rx, mpps_tx);
219 
220 	printf("  %s############################%s\n",
221 	       nic_stats_border, nic_stats_border);
222 }
223 
224 void
225 nic_stats_clear(portid_t port_id)
226 {
227 	portid_t pid;
228 
229 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
230 		printf("Valid port range is [0");
231 		RTE_ETH_FOREACH_DEV(pid)
232 			printf(", %d", pid);
233 		printf("]\n");
234 		return;
235 	}
236 	rte_eth_stats_reset(port_id);
237 	printf("\n  NIC statistics for port %d cleared\n", port_id);
238 }
239 
240 void
241 nic_xstats_display(portid_t port_id)
242 {
243 	struct rte_eth_xstat *xstats;
244 	int cnt_xstats, idx_xstat;
245 	struct rte_eth_xstat_name *xstats_names;
246 
247 	printf("###### NIC extended statistics for port %-2d\n", port_id);
248 	if (!rte_eth_dev_is_valid_port(port_id)) {
249 		printf("Error: Invalid port number %i\n", port_id);
250 		return;
251 	}
252 
253 	/* Get count */
254 	cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
255 	if (cnt_xstats  < 0) {
256 		printf("Error: Cannot get count of xstats\n");
257 		return;
258 	}
259 
260 	/* Get id-name lookup table */
261 	xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
262 	if (xstats_names == NULL) {
263 		printf("Cannot allocate memory for xstats lookup\n");
264 		return;
265 	}
266 	if (cnt_xstats != rte_eth_xstats_get_names(
267 			port_id, xstats_names, cnt_xstats)) {
268 		printf("Error: Cannot get xstats lookup\n");
269 		free(xstats_names);
270 		return;
271 	}
272 
273 	/* Get stats themselves */
274 	xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
275 	if (xstats == NULL) {
276 		printf("Cannot allocate memory for xstats\n");
277 		free(xstats_names);
278 		return;
279 	}
280 	if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
281 		printf("Error: Unable to get xstats\n");
282 		free(xstats_names);
283 		free(xstats);
284 		return;
285 	}
286 
287 	/* Display xstats */
288 	for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++)
289 		printf("%s: %"PRIu64"\n",
290 			xstats_names[idx_xstat].name,
291 			xstats[idx_xstat].value);
292 	free(xstats_names);
293 	free(xstats);
294 }
295 
296 void
297 nic_xstats_clear(portid_t port_id)
298 {
299 	rte_eth_xstats_reset(port_id);
300 }
301 
302 void
303 nic_stats_mapping_display(portid_t port_id)
304 {
305 	struct rte_port *port = &ports[port_id];
306 	uint16_t i;
307 	portid_t pid;
308 
309 	static const char *nic_stats_mapping_border = "########################";
310 
311 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
312 		printf("Valid port range is [0");
313 		RTE_ETH_FOREACH_DEV(pid)
314 			printf(", %d", pid);
315 		printf("]\n");
316 		return;
317 	}
318 
319 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
320 		printf("Port id %d - either does not support queue statistic mapping or"
321 		       " no queue statistic mapping set\n", port_id);
322 		return;
323 	}
324 
325 	printf("\n  %s NIC statistics mapping for port %-2d %s\n",
326 	       nic_stats_mapping_border, port_id, nic_stats_mapping_border);
327 
328 	if (port->rx_queue_stats_mapping_enabled) {
329 		for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
330 			if (rx_queue_stats_mappings[i].port_id == port_id) {
331 				printf("  RX-queue %2d mapped to Stats Reg %2d\n",
332 				       rx_queue_stats_mappings[i].queue_id,
333 				       rx_queue_stats_mappings[i].stats_counter_id);
334 			}
335 		}
336 		printf("\n");
337 	}
338 
339 
340 	if (port->tx_queue_stats_mapping_enabled) {
341 		for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
342 			if (tx_queue_stats_mappings[i].port_id == port_id) {
343 				printf("  TX-queue %2d mapped to Stats Reg %2d\n",
344 				       tx_queue_stats_mappings[i].queue_id,
345 				       tx_queue_stats_mappings[i].stats_counter_id);
346 			}
347 		}
348 	}
349 
350 	printf("  %s####################################%s\n",
351 	       nic_stats_mapping_border, nic_stats_mapping_border);
352 }
353 
354 void
355 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
356 {
357 	struct rte_eth_rxq_info qinfo;
358 	int32_t rc;
359 	static const char *info_border = "*********************";
360 
361 	rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
362 	if (rc != 0) {
363 		printf("Failed to retrieve information for port: %u, "
364 			"RX queue: %hu\nerror desc: %s(%d)\n",
365 			port_id, queue_id, strerror(-rc), rc);
366 		return;
367 	}
368 
369 	printf("\n%s Infos for port %-2u, RX queue %-2u %s",
370 	       info_border, port_id, queue_id, info_border);
371 
372 	printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
373 	printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
374 	printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
375 	printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
376 	printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
377 	printf("\nRX drop packets: %s",
378 		(qinfo.conf.rx_drop_en != 0) ? "on" : "off");
379 	printf("\nRX deferred start: %s",
380 		(qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
381 	printf("\nRX scattered packets: %s",
382 		(qinfo.scattered_rx != 0) ? "on" : "off");
383 	printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
384 	printf("\n");
385 }
386 
387 void
388 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
389 {
390 	struct rte_eth_txq_info qinfo;
391 	int32_t rc;
392 	static const char *info_border = "*********************";
393 
394 	rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
395 	if (rc != 0) {
396 		printf("Failed to retrieve information for port: %u, "
397 			"TX queue: %hu\nerror desc: %s(%d)\n",
398 			port_id, queue_id, strerror(-rc), rc);
399 		return;
400 	}
401 
402 	printf("\n%s Infos for port %-2u, TX queue %-2u %s",
403 	       info_border, port_id, queue_id, info_border);
404 
405 	printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
406 	printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
407 	printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
408 	printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
409 	printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
410 	printf("\nTX flags: %#x", qinfo.conf.txq_flags);
411 	printf("\nTX deferred start: %s",
412 		(qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
413 	printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
414 	printf("\n");
415 }
416 
417 void
418 port_infos_display(portid_t port_id)
419 {
420 	struct rte_port *port;
421 	struct ether_addr mac_addr;
422 	struct rte_eth_link link;
423 	struct rte_eth_dev_info dev_info;
424 	int vlan_offload;
425 	struct rte_mempool * mp;
426 	static const char *info_border = "*********************";
427 	portid_t pid;
428 	uint16_t mtu;
429 
430 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
431 		printf("Valid port range is [0");
432 		RTE_ETH_FOREACH_DEV(pid)
433 			printf(", %d", pid);
434 		printf("]\n");
435 		return;
436 	}
437 	port = &ports[port_id];
438 	rte_eth_link_get_nowait(port_id, &link);
439 	memset(&dev_info, 0, sizeof(dev_info));
440 	rte_eth_dev_info_get(port_id, &dev_info);
441 	printf("\n%s Infos for port %-2d %s\n",
442 	       info_border, port_id, info_border);
443 	rte_eth_macaddr_get(port_id, &mac_addr);
444 	print_ethaddr("MAC address: ", &mac_addr);
445 	printf("\nDriver name: %s", dev_info.driver_name);
446 	printf("\nConnect to socket: %u", port->socket_id);
447 
448 	if (port_numa[port_id] != NUMA_NO_CONFIG) {
449 		mp = mbuf_pool_find(port_numa[port_id]);
450 		if (mp)
451 			printf("\nmemory allocation on the socket: %d",
452 							port_numa[port_id]);
453 	} else
454 		printf("\nmemory allocation on the socket: %u",port->socket_id);
455 
456 	printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
457 	printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
458 	printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
459 	       ("full-duplex") : ("half-duplex"));
460 
461 	if (!rte_eth_dev_get_mtu(port_id, &mtu))
462 		printf("MTU: %u\n", mtu);
463 
464 	printf("Promiscuous mode: %s\n",
465 	       rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
466 	printf("Allmulticast mode: %s\n",
467 	       rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
468 	printf("Maximum number of MAC addresses: %u\n",
469 	       (unsigned int)(port->dev_info.max_mac_addrs));
470 	printf("Maximum number of MAC addresses of hash filtering: %u\n",
471 	       (unsigned int)(port->dev_info.max_hash_mac_addrs));
472 
473 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
474 	if (vlan_offload >= 0){
475 		printf("VLAN offload: \n");
476 		if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
477 			printf("  strip on \n");
478 		else
479 			printf("  strip off \n");
480 
481 		if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
482 			printf("  filter on \n");
483 		else
484 			printf("  filter off \n");
485 
486 		if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
487 			printf("  qinq(extend) on \n");
488 		else
489 			printf("  qinq(extend) off \n");
490 	}
491 
492 	if (dev_info.hash_key_size > 0)
493 		printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
494 	if (dev_info.reta_size > 0)
495 		printf("Redirection table size: %u\n", dev_info.reta_size);
496 	if (!dev_info.flow_type_rss_offloads)
497 		printf("No flow type is supported.\n");
498 	else {
499 		uint16_t i;
500 		char *p;
501 
502 		printf("Supported flow types:\n");
503 		for (i = RTE_ETH_FLOW_UNKNOWN + 1;
504 		     i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
505 			if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
506 				continue;
507 			p = flowtype_to_str(i);
508 			if (p)
509 				printf("  %s\n", p);
510 			else
511 				printf("  user defined %d\n", i);
512 		}
513 	}
514 
515 	printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
516 	printf("Max possible number of RXDs per queue: %hu\n",
517 		dev_info.rx_desc_lim.nb_max);
518 	printf("Min possible number of RXDs per queue: %hu\n",
519 		dev_info.rx_desc_lim.nb_min);
520 	printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
521 
522 	printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
523 	printf("Max possible number of TXDs per queue: %hu\n",
524 		dev_info.tx_desc_lim.nb_max);
525 	printf("Min possible number of TXDs per queue: %hu\n",
526 		dev_info.tx_desc_lim.nb_min);
527 	printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
528 }
529 
530 void
531 port_offload_cap_display(portid_t port_id)
532 {
533 	struct rte_eth_dev *dev;
534 	struct rte_eth_dev_info dev_info;
535 	static const char *info_border = "************";
536 
537 	if (port_id_is_invalid(port_id, ENABLED_WARN))
538 		return;
539 
540 	dev = &rte_eth_devices[port_id];
541 	rte_eth_dev_info_get(port_id, &dev_info);
542 
543 	printf("\n%s Port %d supported offload features: %s\n",
544 		info_border, port_id, info_border);
545 
546 	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) {
547 		printf("VLAN stripped:                 ");
548 		if (dev->data->dev_conf.rxmode.hw_vlan_strip)
549 			printf("on\n");
550 		else
551 			printf("off\n");
552 	}
553 
554 	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) {
555 		printf("Double VLANs stripped:         ");
556 		if (dev->data->dev_conf.rxmode.hw_vlan_extend)
557 			printf("on\n");
558 		else
559 			printf("off\n");
560 	}
561 
562 	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) {
563 		printf("RX IPv4 checksum:              ");
564 		if (dev->data->dev_conf.rxmode.hw_ip_checksum)
565 			printf("on\n");
566 		else
567 			printf("off\n");
568 	}
569 
570 	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) {
571 		printf("RX UDP checksum:               ");
572 		if (dev->data->dev_conf.rxmode.hw_ip_checksum)
573 			printf("on\n");
574 		else
575 			printf("off\n");
576 	}
577 
578 	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) {
579 		printf("RX TCP checksum:               ");
580 		if (dev->data->dev_conf.rxmode.hw_ip_checksum)
581 			printf("on\n");
582 		else
583 			printf("off\n");
584 	}
585 
586 	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)
587 		printf("RX Outer IPv4 checksum:        on");
588 
589 	if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) {
590 		printf("Large receive offload:         ");
591 		if (dev->data->dev_conf.rxmode.enable_lro)
592 			printf("on\n");
593 		else
594 			printf("off\n");
595 	}
596 
597 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) {
598 		printf("VLAN insert:                   ");
599 		if (ports[port_id].tx_ol_flags &
600 		    TESTPMD_TX_OFFLOAD_INSERT_VLAN)
601 			printf("on\n");
602 		else
603 			printf("off\n");
604 	}
605 
606 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) {
607 		printf("Double VLANs insert:           ");
608 		if (ports[port_id].tx_ol_flags &
609 		    TESTPMD_TX_OFFLOAD_INSERT_QINQ)
610 			printf("on\n");
611 		else
612 			printf("off\n");
613 	}
614 
615 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) {
616 		printf("TX IPv4 checksum:              ");
617 		if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM)
618 			printf("on\n");
619 		else
620 			printf("off\n");
621 	}
622 
623 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) {
624 		printf("TX UDP checksum:               ");
625 		if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM)
626 			printf("on\n");
627 		else
628 			printf("off\n");
629 	}
630 
631 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) {
632 		printf("TX TCP checksum:               ");
633 		if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM)
634 			printf("on\n");
635 		else
636 			printf("off\n");
637 	}
638 
639 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) {
640 		printf("TX SCTP checksum:              ");
641 		if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM)
642 			printf("on\n");
643 		else
644 			printf("off\n");
645 	}
646 
647 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
648 		printf("TX Outer IPv4 checksum:        ");
649 		if (ports[port_id].tx_ol_flags &
650 		    TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM)
651 			printf("on\n");
652 		else
653 			printf("off\n");
654 	}
655 
656 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) {
657 		printf("TX TCP segmentation:           ");
658 		if (ports[port_id].tso_segsz != 0)
659 			printf("on\n");
660 		else
661 			printf("off\n");
662 	}
663 
664 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) {
665 		printf("TX UDP segmentation:           ");
666 		if (ports[port_id].tso_segsz != 0)
667 			printf("on\n");
668 		else
669 			printf("off\n");
670 	}
671 
672 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) {
673 		printf("TSO for VXLAN tunnel packet:   ");
674 		if (ports[port_id].tunnel_tso_segsz)
675 			printf("on\n");
676 		else
677 			printf("off\n");
678 	}
679 
680 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) {
681 		printf("TSO for GRE tunnel packet:     ");
682 		if (ports[port_id].tunnel_tso_segsz)
683 			printf("on\n");
684 		else
685 			printf("off\n");
686 	}
687 
688 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) {
689 		printf("TSO for IPIP tunnel packet:    ");
690 		if (ports[port_id].tunnel_tso_segsz)
691 			printf("on\n");
692 		else
693 			printf("off\n");
694 	}
695 
696 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
697 		printf("TSO for GENEVE tunnel packet:  ");
698 		if (ports[port_id].tunnel_tso_segsz)
699 			printf("on\n");
700 		else
701 			printf("off\n");
702 	}
703 
704 }
705 
706 int
707 port_id_is_invalid(portid_t port_id, enum print_warning warning)
708 {
709 	if (port_id == (portid_t)RTE_PORT_ALL)
710 		return 0;
711 
712 	if (rte_eth_dev_is_valid_port(port_id))
713 		return 0;
714 
715 	if (warning == ENABLED_WARN)
716 		printf("Invalid port %d\n", port_id);
717 
718 	return 1;
719 }
720 
721 static int
722 vlan_id_is_invalid(uint16_t vlan_id)
723 {
724 	if (vlan_id < 4096)
725 		return 0;
726 	printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
727 	return 1;
728 }
729 
730 static int
731 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
732 {
733 	uint64_t pci_len;
734 
735 	if (reg_off & 0x3) {
736 		printf("Port register offset 0x%X not aligned on a 4-byte "
737 		       "boundary\n",
738 		       (unsigned)reg_off);
739 		return 1;
740 	}
741 	pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len;
742 	if (reg_off >= pci_len) {
743 		printf("Port %d: register offset %u (0x%X) out of port PCI "
744 		       "resource (length=%"PRIu64")\n",
745 		       port_id, (unsigned)reg_off, (unsigned)reg_off,  pci_len);
746 		return 1;
747 	}
748 	return 0;
749 }
750 
751 static int
752 reg_bit_pos_is_invalid(uint8_t bit_pos)
753 {
754 	if (bit_pos <= 31)
755 		return 0;
756 	printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
757 	return 1;
758 }
759 
760 #define display_port_and_reg_off(port_id, reg_off) \
761 	printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
762 
763 static inline void
764 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
765 {
766 	display_port_and_reg_off(port_id, (unsigned)reg_off);
767 	printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
768 }
769 
770 void
771 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
772 {
773 	uint32_t reg_v;
774 
775 
776 	if (port_id_is_invalid(port_id, ENABLED_WARN))
777 		return;
778 	if (port_reg_off_is_invalid(port_id, reg_off))
779 		return;
780 	if (reg_bit_pos_is_invalid(bit_x))
781 		return;
782 	reg_v = port_id_pci_reg_read(port_id, reg_off);
783 	display_port_and_reg_off(port_id, (unsigned)reg_off);
784 	printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
785 }
786 
787 void
788 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
789 			   uint8_t bit1_pos, uint8_t bit2_pos)
790 {
791 	uint32_t reg_v;
792 	uint8_t  l_bit;
793 	uint8_t  h_bit;
794 
795 	if (port_id_is_invalid(port_id, ENABLED_WARN))
796 		return;
797 	if (port_reg_off_is_invalid(port_id, reg_off))
798 		return;
799 	if (reg_bit_pos_is_invalid(bit1_pos))
800 		return;
801 	if (reg_bit_pos_is_invalid(bit2_pos))
802 		return;
803 	if (bit1_pos > bit2_pos)
804 		l_bit = bit2_pos, h_bit = bit1_pos;
805 	else
806 		l_bit = bit1_pos, h_bit = bit2_pos;
807 
808 	reg_v = port_id_pci_reg_read(port_id, reg_off);
809 	reg_v >>= l_bit;
810 	if (h_bit < 31)
811 		reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
812 	display_port_and_reg_off(port_id, (unsigned)reg_off);
813 	printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
814 	       ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
815 }
816 
817 void
818 port_reg_display(portid_t port_id, uint32_t reg_off)
819 {
820 	uint32_t reg_v;
821 
822 	if (port_id_is_invalid(port_id, ENABLED_WARN))
823 		return;
824 	if (port_reg_off_is_invalid(port_id, reg_off))
825 		return;
826 	reg_v = port_id_pci_reg_read(port_id, reg_off);
827 	display_port_reg_value(port_id, reg_off, reg_v);
828 }
829 
830 void
831 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
832 		 uint8_t bit_v)
833 {
834 	uint32_t reg_v;
835 
836 	if (port_id_is_invalid(port_id, ENABLED_WARN))
837 		return;
838 	if (port_reg_off_is_invalid(port_id, reg_off))
839 		return;
840 	if (reg_bit_pos_is_invalid(bit_pos))
841 		return;
842 	if (bit_v > 1) {
843 		printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
844 		return;
845 	}
846 	reg_v = port_id_pci_reg_read(port_id, reg_off);
847 	if (bit_v == 0)
848 		reg_v &= ~(1 << bit_pos);
849 	else
850 		reg_v |= (1 << bit_pos);
851 	port_id_pci_reg_write(port_id, reg_off, reg_v);
852 	display_port_reg_value(port_id, reg_off, reg_v);
853 }
854 
855 void
856 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
857 		       uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
858 {
859 	uint32_t max_v;
860 	uint32_t reg_v;
861 	uint8_t  l_bit;
862 	uint8_t  h_bit;
863 
864 	if (port_id_is_invalid(port_id, ENABLED_WARN))
865 		return;
866 	if (port_reg_off_is_invalid(port_id, reg_off))
867 		return;
868 	if (reg_bit_pos_is_invalid(bit1_pos))
869 		return;
870 	if (reg_bit_pos_is_invalid(bit2_pos))
871 		return;
872 	if (bit1_pos > bit2_pos)
873 		l_bit = bit2_pos, h_bit = bit1_pos;
874 	else
875 		l_bit = bit1_pos, h_bit = bit2_pos;
876 
877 	if ((h_bit - l_bit) < 31)
878 		max_v = (1 << (h_bit - l_bit + 1)) - 1;
879 	else
880 		max_v = 0xFFFFFFFF;
881 
882 	if (value > max_v) {
883 		printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
884 				(unsigned)value, (unsigned)value,
885 				(unsigned)max_v, (unsigned)max_v);
886 		return;
887 	}
888 	reg_v = port_id_pci_reg_read(port_id, reg_off);
889 	reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
890 	reg_v |= (value << l_bit); /* Set changed bits */
891 	port_id_pci_reg_write(port_id, reg_off, reg_v);
892 	display_port_reg_value(port_id, reg_off, reg_v);
893 }
894 
895 void
896 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
897 {
898 	if (port_id_is_invalid(port_id, ENABLED_WARN))
899 		return;
900 	if (port_reg_off_is_invalid(port_id, reg_off))
901 		return;
902 	port_id_pci_reg_write(port_id, reg_off, reg_v);
903 	display_port_reg_value(port_id, reg_off, reg_v);
904 }
905 
906 void
907 port_mtu_set(portid_t port_id, uint16_t mtu)
908 {
909 	int diag;
910 
911 	if (port_id_is_invalid(port_id, ENABLED_WARN))
912 		return;
913 	diag = rte_eth_dev_set_mtu(port_id, mtu);
914 	if (diag == 0)
915 		return;
916 	printf("Set MTU failed. diag=%d\n", diag);
917 }
918 
919 /* Generic flow management functions. */
920 
921 /** Generate flow_item[] entry. */
922 #define MK_FLOW_ITEM(t, s) \
923 	[RTE_FLOW_ITEM_TYPE_ ## t] = { \
924 		.name = # t, \
925 		.size = s, \
926 	}
927 
928 /** Information about known flow pattern items. */
929 static const struct {
930 	const char *name;
931 	size_t size;
932 } flow_item[] = {
933 	MK_FLOW_ITEM(END, 0),
934 	MK_FLOW_ITEM(VOID, 0),
935 	MK_FLOW_ITEM(INVERT, 0),
936 	MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
937 	MK_FLOW_ITEM(PF, 0),
938 	MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
939 	MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)),
940 	MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */
941 	MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
942 	MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
943 	MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
944 	MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
945 	MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
946 	MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
947 	MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
948 	MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
949 	MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
950 	MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
951 	MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
952 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
953 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
954 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
955 	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
956 	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
957 	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
958 };
959 
960 /** Compute storage space needed by item specification. */
961 static void
962 flow_item_spec_size(const struct rte_flow_item *item,
963 		    size_t *size, size_t *pad)
964 {
965 	if (!item->spec) {
966 		*size = 0;
967 		goto empty;
968 	}
969 	switch (item->type) {
970 		union {
971 			const struct rte_flow_item_raw *raw;
972 		} spec;
973 
974 	case RTE_FLOW_ITEM_TYPE_RAW:
975 		spec.raw = item->spec;
976 		*size = offsetof(struct rte_flow_item_raw, pattern) +
977 			spec.raw->length * sizeof(*spec.raw->pattern);
978 		break;
979 	default:
980 		*size = flow_item[item->type].size;
981 		break;
982 	}
983 empty:
984 	*pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
985 }
986 
987 /** Generate flow_action[] entry. */
988 #define MK_FLOW_ACTION(t, s) \
989 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
990 		.name = # t, \
991 		.size = s, \
992 	}
993 
994 /** Information about known flow actions. */
995 static const struct {
996 	const char *name;
997 	size_t size;
998 } flow_action[] = {
999 	MK_FLOW_ACTION(END, 0),
1000 	MK_FLOW_ACTION(VOID, 0),
1001 	MK_FLOW_ACTION(PASSTHRU, 0),
1002 	MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1003 	MK_FLOW_ACTION(FLAG, 0),
1004 	MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
1005 	MK_FLOW_ACTION(DROP, 0),
1006 	MK_FLOW_ACTION(COUNT, 0),
1007 	MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
1008 	MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */
1009 	MK_FLOW_ACTION(PF, 0),
1010 	MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1011 };
1012 
1013 /** Compute storage space needed by action configuration. */
1014 static void
1015 flow_action_conf_size(const struct rte_flow_action *action,
1016 		      size_t *size, size_t *pad)
1017 {
1018 	if (!action->conf) {
1019 		*size = 0;
1020 		goto empty;
1021 	}
1022 	switch (action->type) {
1023 		union {
1024 			const struct rte_flow_action_rss *rss;
1025 		} conf;
1026 
1027 	case RTE_FLOW_ACTION_TYPE_RSS:
1028 		conf.rss = action->conf;
1029 		*size = offsetof(struct rte_flow_action_rss, queue) +
1030 			conf.rss->num * sizeof(*conf.rss->queue);
1031 		break;
1032 	default:
1033 		*size = flow_action[action->type].size;
1034 		break;
1035 	}
1036 empty:
1037 	*pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
1038 }
1039 
1040 /** Generate a port_flow entry from attributes/pattern/actions. */
1041 static struct port_flow *
1042 port_flow_new(const struct rte_flow_attr *attr,
1043 	      const struct rte_flow_item *pattern,
1044 	      const struct rte_flow_action *actions)
1045 {
1046 	const struct rte_flow_item *item;
1047 	const struct rte_flow_action *action;
1048 	struct port_flow *pf = NULL;
1049 	size_t tmp;
1050 	size_t pad;
1051 	size_t off1 = 0;
1052 	size_t off2 = 0;
1053 	int err = ENOTSUP;
1054 
1055 store:
1056 	item = pattern;
1057 	if (pf)
1058 		pf->pattern = (void *)&pf->data[off1];
1059 	do {
1060 		struct rte_flow_item *dst = NULL;
1061 
1062 		if ((unsigned int)item->type >= RTE_DIM(flow_item) ||
1063 		    !flow_item[item->type].name)
1064 			goto notsup;
1065 		if (pf)
1066 			dst = memcpy(pf->data + off1, item, sizeof(*item));
1067 		off1 += sizeof(*item);
1068 		flow_item_spec_size(item, &tmp, &pad);
1069 		if (item->spec) {
1070 			if (pf)
1071 				dst->spec = memcpy(pf->data + off2,
1072 						   item->spec, tmp);
1073 			off2 += tmp + pad;
1074 		}
1075 		if (item->last) {
1076 			if (pf)
1077 				dst->last = memcpy(pf->data + off2,
1078 						   item->last, tmp);
1079 			off2 += tmp + pad;
1080 		}
1081 		if (item->mask) {
1082 			if (pf)
1083 				dst->mask = memcpy(pf->data + off2,
1084 						   item->mask, tmp);
1085 			off2 += tmp + pad;
1086 		}
1087 		off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
1088 	} while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
1089 	off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
1090 	action = actions;
1091 	if (pf)
1092 		pf->actions = (void *)&pf->data[off1];
1093 	do {
1094 		struct rte_flow_action *dst = NULL;
1095 
1096 		if ((unsigned int)action->type >= RTE_DIM(flow_action) ||
1097 		    !flow_action[action->type].name)
1098 			goto notsup;
1099 		if (pf)
1100 			dst = memcpy(pf->data + off1, action, sizeof(*action));
1101 		off1 += sizeof(*action);
1102 		flow_action_conf_size(action, &tmp, &pad);
1103 		if (action->conf) {
1104 			if (pf)
1105 				dst->conf = memcpy(pf->data + off2,
1106 						   action->conf, tmp);
1107 			off2 += tmp + pad;
1108 		}
1109 		off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
1110 	} while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
1111 	if (pf != NULL)
1112 		return pf;
1113 	off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
1114 	tmp = RTE_ALIGN_CEIL(offsetof(struct port_flow, data), sizeof(double));
1115 	pf = calloc(1, tmp + off1 + off2);
1116 	if (pf == NULL)
1117 		err = errno;
1118 	else {
1119 		*pf = (const struct port_flow){
1120 			.size = tmp + off1 + off2,
1121 			.attr = *attr,
1122 		};
1123 		tmp -= offsetof(struct port_flow, data);
1124 		off2 = tmp + off1;
1125 		off1 = tmp;
1126 		goto store;
1127 	}
1128 notsup:
1129 	rte_errno = err;
1130 	return NULL;
1131 }
1132 
1133 /** Print a message out of a flow error. */
1134 static int
1135 port_flow_complain(struct rte_flow_error *error)
1136 {
1137 	static const char *const errstrlist[] = {
1138 		[RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1139 		[RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1140 		[RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1141 		[RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1142 		[RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1143 		[RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1144 		[RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1145 		[RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1146 		[RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1147 		[RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1148 		[RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1149 		[RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1150 	};
1151 	const char *errstr;
1152 	char buf[32];
1153 	int err = rte_errno;
1154 
1155 	if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1156 	    !errstrlist[error->type])
1157 		errstr = "unknown type";
1158 	else
1159 		errstr = errstrlist[error->type];
1160 	printf("Caught error type %d (%s): %s%s\n",
1161 	       error->type, errstr,
1162 	       error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1163 					error->cause), buf) : "",
1164 	       error->message ? error->message : "(no stated reason)");
1165 	return -err;
1166 }
1167 
1168 /** Validate flow rule. */
1169 int
1170 port_flow_validate(portid_t port_id,
1171 		   const struct rte_flow_attr *attr,
1172 		   const struct rte_flow_item *pattern,
1173 		   const struct rte_flow_action *actions)
1174 {
1175 	struct rte_flow_error error;
1176 
1177 	/* Poisoning to make sure PMDs update it in case of error. */
1178 	memset(&error, 0x11, sizeof(error));
1179 	if (rte_flow_validate(port_id, attr, pattern, actions, &error))
1180 		return port_flow_complain(&error);
1181 	printf("Flow rule validated\n");
1182 	return 0;
1183 }
1184 
1185 /** Create flow rule. */
1186 int
1187 port_flow_create(portid_t port_id,
1188 		 const struct rte_flow_attr *attr,
1189 		 const struct rte_flow_item *pattern,
1190 		 const struct rte_flow_action *actions)
1191 {
1192 	struct rte_flow *flow;
1193 	struct rte_port *port;
1194 	struct port_flow *pf;
1195 	uint32_t id;
1196 	struct rte_flow_error error;
1197 
1198 	/* Poisoning to make sure PMDs update it in case of error. */
1199 	memset(&error, 0x22, sizeof(error));
1200 	flow = rte_flow_create(port_id, attr, pattern, actions, &error);
1201 	if (!flow)
1202 		return port_flow_complain(&error);
1203 	port = &ports[port_id];
1204 	if (port->flow_list) {
1205 		if (port->flow_list->id == UINT32_MAX) {
1206 			printf("Highest rule ID is already assigned, delete"
1207 			       " it first");
1208 			rte_flow_destroy(port_id, flow, NULL);
1209 			return -ENOMEM;
1210 		}
1211 		id = port->flow_list->id + 1;
1212 	} else
1213 		id = 0;
1214 	pf = port_flow_new(attr, pattern, actions);
1215 	if (!pf) {
1216 		int err = rte_errno;
1217 
1218 		printf("Cannot allocate flow: %s\n", rte_strerror(err));
1219 		rte_flow_destroy(port_id, flow, NULL);
1220 		return -err;
1221 	}
1222 	pf->next = port->flow_list;
1223 	pf->id = id;
1224 	pf->flow = flow;
1225 	port->flow_list = pf;
1226 	printf("Flow rule #%u created\n", pf->id);
1227 	return 0;
1228 }
1229 
1230 /** Destroy a number of flow rules. */
1231 int
1232 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
1233 {
1234 	struct rte_port *port;
1235 	struct port_flow **tmp;
1236 	uint32_t c = 0;
1237 	int ret = 0;
1238 
1239 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1240 	    port_id == (portid_t)RTE_PORT_ALL)
1241 		return -EINVAL;
1242 	port = &ports[port_id];
1243 	tmp = &port->flow_list;
1244 	while (*tmp) {
1245 		uint32_t i;
1246 
1247 		for (i = 0; i != n; ++i) {
1248 			struct rte_flow_error error;
1249 			struct port_flow *pf = *tmp;
1250 
1251 			if (rule[i] != pf->id)
1252 				continue;
1253 			/*
1254 			 * Poisoning to make sure PMDs update it in case
1255 			 * of error.
1256 			 */
1257 			memset(&error, 0x33, sizeof(error));
1258 			if (rte_flow_destroy(port_id, pf->flow, &error)) {
1259 				ret = port_flow_complain(&error);
1260 				continue;
1261 			}
1262 			printf("Flow rule #%u destroyed\n", pf->id);
1263 			*tmp = pf->next;
1264 			free(pf);
1265 			break;
1266 		}
1267 		if (i == n)
1268 			tmp = &(*tmp)->next;
1269 		++c;
1270 	}
1271 	return ret;
1272 }
1273 
1274 /** Remove all flow rules. */
1275 int
1276 port_flow_flush(portid_t port_id)
1277 {
1278 	struct rte_flow_error error;
1279 	struct rte_port *port;
1280 	int ret = 0;
1281 
1282 	/* Poisoning to make sure PMDs update it in case of error. */
1283 	memset(&error, 0x44, sizeof(error));
1284 	if (rte_flow_flush(port_id, &error)) {
1285 		ret = port_flow_complain(&error);
1286 		if (port_id_is_invalid(port_id, DISABLED_WARN) ||
1287 		    port_id == (portid_t)RTE_PORT_ALL)
1288 			return ret;
1289 	}
1290 	port = &ports[port_id];
1291 	while (port->flow_list) {
1292 		struct port_flow *pf = port->flow_list->next;
1293 
1294 		free(port->flow_list);
1295 		port->flow_list = pf;
1296 	}
1297 	return ret;
1298 }
1299 
1300 /** Query a flow rule. */
1301 int
1302 port_flow_query(portid_t port_id, uint32_t rule,
1303 		enum rte_flow_action_type action)
1304 {
1305 	struct rte_flow_error error;
1306 	struct rte_port *port;
1307 	struct port_flow *pf;
1308 	const char *name;
1309 	union {
1310 		struct rte_flow_query_count count;
1311 	} query;
1312 
1313 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1314 	    port_id == (portid_t)RTE_PORT_ALL)
1315 		return -EINVAL;
1316 	port = &ports[port_id];
1317 	for (pf = port->flow_list; pf; pf = pf->next)
1318 		if (pf->id == rule)
1319 			break;
1320 	if (!pf) {
1321 		printf("Flow rule #%u not found\n", rule);
1322 		return -ENOENT;
1323 	}
1324 	if ((unsigned int)action >= RTE_DIM(flow_action) ||
1325 	    !flow_action[action].name)
1326 		name = "unknown";
1327 	else
1328 		name = flow_action[action].name;
1329 	switch (action) {
1330 	case RTE_FLOW_ACTION_TYPE_COUNT:
1331 		break;
1332 	default:
1333 		printf("Cannot query action type %d (%s)\n", action, name);
1334 		return -ENOTSUP;
1335 	}
1336 	/* Poisoning to make sure PMDs update it in case of error. */
1337 	memset(&error, 0x55, sizeof(error));
1338 	memset(&query, 0, sizeof(query));
1339 	if (rte_flow_query(port_id, pf->flow, action, &query, &error))
1340 		return port_flow_complain(&error);
1341 	switch (action) {
1342 	case RTE_FLOW_ACTION_TYPE_COUNT:
1343 		printf("%s:\n"
1344 		       " hits_set: %u\n"
1345 		       " bytes_set: %u\n"
1346 		       " hits: %" PRIu64 "\n"
1347 		       " bytes: %" PRIu64 "\n",
1348 		       name,
1349 		       query.count.hits_set,
1350 		       query.count.bytes_set,
1351 		       query.count.hits,
1352 		       query.count.bytes);
1353 		break;
1354 	default:
1355 		printf("Cannot display result for action type %d (%s)\n",
1356 		       action, name);
1357 		break;
1358 	}
1359 	return 0;
1360 }
1361 
1362 /** List flow rules. */
1363 void
1364 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n])
1365 {
1366 	struct rte_port *port;
1367 	struct port_flow *pf;
1368 	struct port_flow *list = NULL;
1369 	uint32_t i;
1370 
1371 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1372 	    port_id == (portid_t)RTE_PORT_ALL)
1373 		return;
1374 	port = &ports[port_id];
1375 	if (!port->flow_list)
1376 		return;
1377 	/* Sort flows by group, priority and ID. */
1378 	for (pf = port->flow_list; pf != NULL; pf = pf->next) {
1379 		struct port_flow **tmp;
1380 
1381 		if (n) {
1382 			/* Filter out unwanted groups. */
1383 			for (i = 0; i != n; ++i)
1384 				if (pf->attr.group == group[i])
1385 					break;
1386 			if (i == n)
1387 				continue;
1388 		}
1389 		tmp = &list;
1390 		while (*tmp &&
1391 		       (pf->attr.group > (*tmp)->attr.group ||
1392 			(pf->attr.group == (*tmp)->attr.group &&
1393 			 pf->attr.priority > (*tmp)->attr.priority) ||
1394 			(pf->attr.group == (*tmp)->attr.group &&
1395 			 pf->attr.priority == (*tmp)->attr.priority &&
1396 			 pf->id > (*tmp)->id)))
1397 			tmp = &(*tmp)->tmp;
1398 		pf->tmp = *tmp;
1399 		*tmp = pf;
1400 	}
1401 	printf("ID\tGroup\tPrio\tAttr\tRule\n");
1402 	for (pf = list; pf != NULL; pf = pf->tmp) {
1403 		const struct rte_flow_item *item = pf->pattern;
1404 		const struct rte_flow_action *action = pf->actions;
1405 
1406 		printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c\t",
1407 		       pf->id,
1408 		       pf->attr.group,
1409 		       pf->attr.priority,
1410 		       pf->attr.ingress ? 'i' : '-',
1411 		       pf->attr.egress ? 'e' : '-');
1412 		while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1413 			if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
1414 				printf("%s ", flow_item[item->type].name);
1415 			++item;
1416 		}
1417 		printf("=>");
1418 		while (action->type != RTE_FLOW_ACTION_TYPE_END) {
1419 			if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
1420 				printf(" %s", flow_action[action->type].name);
1421 			++action;
1422 		}
1423 		printf("\n");
1424 	}
1425 }
1426 
1427 /** Restrict ingress traffic to the defined flow rules. */
1428 int
1429 port_flow_isolate(portid_t port_id, int set)
1430 {
1431 	struct rte_flow_error error;
1432 
1433 	/* Poisoning to make sure PMDs update it in case of error. */
1434 	memset(&error, 0x66, sizeof(error));
1435 	if (rte_flow_isolate(port_id, set, &error))
1436 		return port_flow_complain(&error);
1437 	printf("Ingress traffic on port %u is %s to the defined flow rules\n",
1438 	       port_id,
1439 	       set ? "now restricted" : "not restricted anymore");
1440 	return 0;
1441 }
1442 
1443 /*
1444  * RX/TX ring descriptors display functions.
1445  */
1446 int
1447 rx_queue_id_is_invalid(queueid_t rxq_id)
1448 {
1449 	if (rxq_id < nb_rxq)
1450 		return 0;
1451 	printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
1452 	return 1;
1453 }
1454 
1455 int
1456 tx_queue_id_is_invalid(queueid_t txq_id)
1457 {
1458 	if (txq_id < nb_txq)
1459 		return 0;
1460 	printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
1461 	return 1;
1462 }
1463 
1464 static int
1465 rx_desc_id_is_invalid(uint16_t rxdesc_id)
1466 {
1467 	if (rxdesc_id < nb_rxd)
1468 		return 0;
1469 	printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
1470 	       rxdesc_id, nb_rxd);
1471 	return 1;
1472 }
1473 
1474 static int
1475 tx_desc_id_is_invalid(uint16_t txdesc_id)
1476 {
1477 	if (txdesc_id < nb_txd)
1478 		return 0;
1479 	printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
1480 	       txdesc_id, nb_txd);
1481 	return 1;
1482 }
1483 
1484 static const struct rte_memzone *
1485 ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id)
1486 {
1487 	char mz_name[RTE_MEMZONE_NAMESIZE];
1488 	const struct rte_memzone *mz;
1489 
1490 	snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d",
1491 		 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id);
1492 	mz = rte_memzone_lookup(mz_name);
1493 	if (mz == NULL)
1494 		printf("%s ring memory zoneof (port %d, queue %d) not"
1495 		       "found (zone name = %s\n",
1496 		       ring_name, port_id, q_id, mz_name);
1497 	return mz;
1498 }
1499 
1500 union igb_ring_dword {
1501 	uint64_t dword;
1502 	struct {
1503 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1504 		uint32_t lo;
1505 		uint32_t hi;
1506 #else
1507 		uint32_t hi;
1508 		uint32_t lo;
1509 #endif
1510 	} words;
1511 };
1512 
1513 struct igb_ring_desc_32_bytes {
1514 	union igb_ring_dword lo_dword;
1515 	union igb_ring_dword hi_dword;
1516 	union igb_ring_dword resv1;
1517 	union igb_ring_dword resv2;
1518 };
1519 
1520 struct igb_ring_desc_16_bytes {
1521 	union igb_ring_dword lo_dword;
1522 	union igb_ring_dword hi_dword;
1523 };
1524 
1525 static void
1526 ring_rxd_display_dword(union igb_ring_dword dword)
1527 {
1528 	printf("    0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
1529 					(unsigned)dword.words.hi);
1530 }
1531 
1532 static void
1533 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
1534 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1535 			   uint8_t port_id,
1536 #else
1537 			   __rte_unused uint8_t port_id,
1538 #endif
1539 			   uint16_t desc_id)
1540 {
1541 	struct igb_ring_desc_16_bytes *ring =
1542 		(struct igb_ring_desc_16_bytes *)ring_mz->addr;
1543 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1544 	struct rte_eth_dev_info dev_info;
1545 
1546 	memset(&dev_info, 0, sizeof(dev_info));
1547 	rte_eth_dev_info_get(port_id, &dev_info);
1548 	if (strstr(dev_info.driver_name, "i40e") != NULL) {
1549 		/* 32 bytes RX descriptor, i40e only */
1550 		struct igb_ring_desc_32_bytes *ring =
1551 			(struct igb_ring_desc_32_bytes *)ring_mz->addr;
1552 		ring[desc_id].lo_dword.dword =
1553 			rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1554 		ring_rxd_display_dword(ring[desc_id].lo_dword);
1555 		ring[desc_id].hi_dword.dword =
1556 			rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1557 		ring_rxd_display_dword(ring[desc_id].hi_dword);
1558 		ring[desc_id].resv1.dword =
1559 			rte_le_to_cpu_64(ring[desc_id].resv1.dword);
1560 		ring_rxd_display_dword(ring[desc_id].resv1);
1561 		ring[desc_id].resv2.dword =
1562 			rte_le_to_cpu_64(ring[desc_id].resv2.dword);
1563 		ring_rxd_display_dword(ring[desc_id].resv2);
1564 
1565 		return;
1566 	}
1567 #endif
1568 	/* 16 bytes RX descriptor */
1569 	ring[desc_id].lo_dword.dword =
1570 		rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1571 	ring_rxd_display_dword(ring[desc_id].lo_dword);
1572 	ring[desc_id].hi_dword.dword =
1573 		rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1574 	ring_rxd_display_dword(ring[desc_id].hi_dword);
1575 }
1576 
1577 static void
1578 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
1579 {
1580 	struct igb_ring_desc_16_bytes *ring;
1581 	struct igb_ring_desc_16_bytes txd;
1582 
1583 	ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1584 	txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1585 	txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1586 	printf("    0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
1587 			(unsigned)txd.lo_dword.words.lo,
1588 			(unsigned)txd.lo_dword.words.hi,
1589 			(unsigned)txd.hi_dword.words.lo,
1590 			(unsigned)txd.hi_dword.words.hi);
1591 }
1592 
1593 void
1594 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
1595 {
1596 	const struct rte_memzone *rx_mz;
1597 
1598 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1599 		return;
1600 	if (rx_queue_id_is_invalid(rxq_id))
1601 		return;
1602 	if (rx_desc_id_is_invalid(rxd_id))
1603 		return;
1604 	rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
1605 	if (rx_mz == NULL)
1606 		return;
1607 	ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
1608 }
1609 
1610 void
1611 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
1612 {
1613 	const struct rte_memzone *tx_mz;
1614 
1615 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1616 		return;
1617 	if (tx_queue_id_is_invalid(txq_id))
1618 		return;
1619 	if (tx_desc_id_is_invalid(txd_id))
1620 		return;
1621 	tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
1622 	if (tx_mz == NULL)
1623 		return;
1624 	ring_tx_descriptor_display(tx_mz, txd_id);
1625 }
1626 
1627 void
1628 fwd_lcores_config_display(void)
1629 {
1630 	lcoreid_t lc_id;
1631 
1632 	printf("List of forwarding lcores:");
1633 	for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
1634 		printf(" %2u", fwd_lcores_cpuids[lc_id]);
1635 	printf("\n");
1636 }
1637 void
1638 rxtx_config_display(void)
1639 {
1640 	printf("  %s packet forwarding%s - CRC stripping %s - "
1641 	       "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name,
1642 	       retry_enabled == 0 ? "" : " with retry",
1643 	       rx_mode.hw_strip_crc ? "enabled" : "disabled",
1644 	       nb_pkt_per_burst);
1645 
1646 	if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
1647 		printf("  packet len=%u - nb packet segments=%d\n",
1648 				(unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
1649 
1650 	struct rte_eth_rxconf *rx_conf = &ports[0].rx_conf;
1651 	struct rte_eth_txconf *tx_conf = &ports[0].tx_conf;
1652 
1653 	printf("  nb forwarding cores=%d - nb forwarding ports=%d\n",
1654 	       nb_fwd_lcores, nb_fwd_ports);
1655 	printf("  RX queues=%d - RX desc=%d - RX free threshold=%d\n",
1656 	       nb_rxq, nb_rxd, rx_conf->rx_free_thresh);
1657 	printf("  RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
1658 	       rx_conf->rx_thresh.pthresh, rx_conf->rx_thresh.hthresh,
1659 	       rx_conf->rx_thresh.wthresh);
1660 	printf("  TX queues=%d - TX desc=%d - TX free threshold=%d\n",
1661 	       nb_txq, nb_txd, tx_conf->tx_free_thresh);
1662 	printf("  TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
1663 	       tx_conf->tx_thresh.pthresh, tx_conf->tx_thresh.hthresh,
1664 	       tx_conf->tx_thresh.wthresh);
1665 	printf("  TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n",
1666 	       tx_conf->tx_rs_thresh, tx_conf->txq_flags);
1667 }
1668 
1669 void
1670 port_rss_reta_info(portid_t port_id,
1671 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1672 		   uint16_t nb_entries)
1673 {
1674 	uint16_t i, idx, shift;
1675 	int ret;
1676 
1677 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1678 		return;
1679 
1680 	ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
1681 	if (ret != 0) {
1682 		printf("Failed to get RSS RETA info, return code = %d\n", ret);
1683 		return;
1684 	}
1685 
1686 	for (i = 0; i < nb_entries; i++) {
1687 		idx = i / RTE_RETA_GROUP_SIZE;
1688 		shift = i % RTE_RETA_GROUP_SIZE;
1689 		if (!(reta_conf[idx].mask & (1ULL << shift)))
1690 			continue;
1691 		printf("RSS RETA configuration: hash index=%u, queue=%u\n",
1692 					i, reta_conf[idx].reta[shift]);
1693 	}
1694 }
1695 
1696 /*
1697  * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
1698  * key of the port.
1699  */
1700 void
1701 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key)
1702 {
1703 	struct rte_eth_rss_conf rss_conf;
1704 	uint8_t rss_key[RSS_HASH_KEY_LENGTH];
1705 	uint64_t rss_hf;
1706 	uint8_t i;
1707 	int diag;
1708 	struct rte_eth_dev_info dev_info;
1709 	uint8_t hash_key_size;
1710 
1711 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1712 		return;
1713 
1714 	memset(&dev_info, 0, sizeof(dev_info));
1715 	rte_eth_dev_info_get(port_id, &dev_info);
1716 	if (dev_info.hash_key_size > 0 &&
1717 			dev_info.hash_key_size <= sizeof(rss_key))
1718 		hash_key_size = dev_info.hash_key_size;
1719 	else {
1720 		printf("dev_info did not provide a valid hash key size\n");
1721 		return;
1722 	}
1723 
1724 	rss_conf.rss_hf = 0;
1725 	for (i = 0; i < RTE_DIM(rss_type_table); i++) {
1726 		if (!strcmp(rss_info, rss_type_table[i].str))
1727 			rss_conf.rss_hf = rss_type_table[i].rss_type;
1728 	}
1729 
1730 	/* Get RSS hash key if asked to display it */
1731 	rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
1732 	rss_conf.rss_key_len = hash_key_size;
1733 	diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1734 	if (diag != 0) {
1735 		switch (diag) {
1736 		case -ENODEV:
1737 			printf("port index %d invalid\n", port_id);
1738 			break;
1739 		case -ENOTSUP:
1740 			printf("operation not supported by device\n");
1741 			break;
1742 		default:
1743 			printf("operation failed - diag=%d\n", diag);
1744 			break;
1745 		}
1746 		return;
1747 	}
1748 	rss_hf = rss_conf.rss_hf;
1749 	if (rss_hf == 0) {
1750 		printf("RSS disabled\n");
1751 		return;
1752 	}
1753 	printf("RSS functions:\n ");
1754 	for (i = 0; i < RTE_DIM(rss_type_table); i++) {
1755 		if (rss_hf & rss_type_table[i].rss_type)
1756 			printf("%s ", rss_type_table[i].str);
1757 	}
1758 	printf("\n");
1759 	if (!show_rss_key)
1760 		return;
1761 	printf("RSS key:\n");
1762 	for (i = 0; i < hash_key_size; i++)
1763 		printf("%02X", rss_key[i]);
1764 	printf("\n");
1765 }
1766 
1767 void
1768 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
1769 			 uint hash_key_len)
1770 {
1771 	struct rte_eth_rss_conf rss_conf;
1772 	int diag;
1773 	unsigned int i;
1774 
1775 	rss_conf.rss_key = NULL;
1776 	rss_conf.rss_key_len = hash_key_len;
1777 	rss_conf.rss_hf = 0;
1778 	for (i = 0; i < RTE_DIM(rss_type_table); i++) {
1779 		if (!strcmp(rss_type_table[i].str, rss_type))
1780 			rss_conf.rss_hf = rss_type_table[i].rss_type;
1781 	}
1782 	diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1783 	if (diag == 0) {
1784 		rss_conf.rss_key = hash_key;
1785 		diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
1786 	}
1787 	if (diag == 0)
1788 		return;
1789 
1790 	switch (diag) {
1791 	case -ENODEV:
1792 		printf("port index %d invalid\n", port_id);
1793 		break;
1794 	case -ENOTSUP:
1795 		printf("operation not supported by device\n");
1796 		break;
1797 	default:
1798 		printf("operation failed - diag=%d\n", diag);
1799 		break;
1800 	}
1801 }
1802 
1803 /*
1804  * Setup forwarding configuration for each logical core.
1805  */
1806 static void
1807 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
1808 {
1809 	streamid_t nb_fs_per_lcore;
1810 	streamid_t nb_fs;
1811 	streamid_t sm_id;
1812 	lcoreid_t  nb_extra;
1813 	lcoreid_t  nb_fc;
1814 	lcoreid_t  nb_lc;
1815 	lcoreid_t  lc_id;
1816 
1817 	nb_fs = cfg->nb_fwd_streams;
1818 	nb_fc = cfg->nb_fwd_lcores;
1819 	if (nb_fs <= nb_fc) {
1820 		nb_fs_per_lcore = 1;
1821 		nb_extra = 0;
1822 	} else {
1823 		nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
1824 		nb_extra = (lcoreid_t) (nb_fs % nb_fc);
1825 	}
1826 
1827 	nb_lc = (lcoreid_t) (nb_fc - nb_extra);
1828 	sm_id = 0;
1829 	for (lc_id = 0; lc_id < nb_lc; lc_id++) {
1830 		fwd_lcores[lc_id]->stream_idx = sm_id;
1831 		fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
1832 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
1833 	}
1834 
1835 	/*
1836 	 * Assign extra remaining streams, if any.
1837 	 */
1838 	nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
1839 	for (lc_id = 0; lc_id < nb_extra; lc_id++) {
1840 		fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
1841 		fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
1842 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
1843 	}
1844 }
1845 
1846 static void
1847 simple_fwd_config_setup(void)
1848 {
1849 	portid_t i;
1850 	portid_t j;
1851 	portid_t inc = 2;
1852 
1853 	if (port_topology == PORT_TOPOLOGY_CHAINED ||
1854 	    port_topology == PORT_TOPOLOGY_LOOP) {
1855 		inc = 1;
1856 	} else if (nb_fwd_ports % 2) {
1857 		printf("\nWarning! Cannot handle an odd number of ports "
1858 		       "with the current port topology. Configuration "
1859 		       "must be changed to have an even number of ports, "
1860 		       "or relaunch application with "
1861 		       "--port-topology=chained\n\n");
1862 	}
1863 
1864 	cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
1865 	cur_fwd_config.nb_fwd_streams =
1866 		(streamid_t) cur_fwd_config.nb_fwd_ports;
1867 
1868 	/* reinitialize forwarding streams */
1869 	init_fwd_streams();
1870 
1871 	/*
1872 	 * In the simple forwarding test, the number of forwarding cores
1873 	 * must be lower or equal to the number of forwarding ports.
1874 	 */
1875 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1876 	if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
1877 		cur_fwd_config.nb_fwd_lcores =
1878 			(lcoreid_t) cur_fwd_config.nb_fwd_ports;
1879 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
1880 
1881 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) {
1882 		if (port_topology != PORT_TOPOLOGY_LOOP)
1883 			j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports);
1884 		else
1885 			j = i;
1886 		fwd_streams[i]->rx_port   = fwd_ports_ids[i];
1887 		fwd_streams[i]->rx_queue  = 0;
1888 		fwd_streams[i]->tx_port   = fwd_ports_ids[j];
1889 		fwd_streams[i]->tx_queue  = 0;
1890 		fwd_streams[i]->peer_addr = j;
1891 		fwd_streams[i]->retry_enabled = retry_enabled;
1892 
1893 		if (port_topology == PORT_TOPOLOGY_PAIRED) {
1894 			fwd_streams[j]->rx_port   = fwd_ports_ids[j];
1895 			fwd_streams[j]->rx_queue  = 0;
1896 			fwd_streams[j]->tx_port   = fwd_ports_ids[i];
1897 			fwd_streams[j]->tx_queue  = 0;
1898 			fwd_streams[j]->peer_addr = i;
1899 			fwd_streams[j]->retry_enabled = retry_enabled;
1900 		}
1901 	}
1902 }
1903 
1904 /**
1905  * For the RSS forwarding test all streams distributed over lcores. Each stream
1906  * being composed of a RX queue to poll on a RX port for input messages,
1907  * associated with a TX queue of a TX port where to send forwarded packets.
1908  * All packets received on the RX queue of index "RxQj" of the RX port "RxPi"
1909  * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two
1910  * following rules:
1911  *    - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
1912  *    - TxQl = RxQj
1913  */
1914 static void
1915 rss_fwd_config_setup(void)
1916 {
1917 	portid_t   rxp;
1918 	portid_t   txp;
1919 	queueid_t  rxq;
1920 	queueid_t  nb_q;
1921 	streamid_t  sm_id;
1922 
1923 	nb_q = nb_rxq;
1924 	if (nb_q > nb_txq)
1925 		nb_q = nb_txq;
1926 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1927 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1928 	cur_fwd_config.nb_fwd_streams =
1929 		(streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
1930 
1931 	if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
1932 		cur_fwd_config.nb_fwd_lcores =
1933 			(lcoreid_t)cur_fwd_config.nb_fwd_streams;
1934 
1935 	/* reinitialize forwarding streams */
1936 	init_fwd_streams();
1937 
1938 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
1939 	rxp = 0; rxq = 0;
1940 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1941 		struct fwd_stream *fs;
1942 
1943 		fs = fwd_streams[sm_id];
1944 
1945 		if ((rxp & 0x1) == 0)
1946 			txp = (portid_t) (rxp + 1);
1947 		else
1948 			txp = (portid_t) (rxp - 1);
1949 		/*
1950 		 * if we are in loopback, simply send stuff out through the
1951 		 * ingress port
1952 		 */
1953 		if (port_topology == PORT_TOPOLOGY_LOOP)
1954 			txp = rxp;
1955 
1956 		fs->rx_port = fwd_ports_ids[rxp];
1957 		fs->rx_queue = rxq;
1958 		fs->tx_port = fwd_ports_ids[txp];
1959 		fs->tx_queue = rxq;
1960 		fs->peer_addr = fs->tx_port;
1961 		fs->retry_enabled = retry_enabled;
1962 		rxq = (queueid_t) (rxq + 1);
1963 		if (rxq < nb_q)
1964 			continue;
1965 		/*
1966 		 * rxq == nb_q
1967 		 * Restart from RX queue 0 on next RX port
1968 		 */
1969 		rxq = 0;
1970 		if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
1971 			rxp = (portid_t)
1972 				(rxp + ((nb_ports >> 1) / nb_fwd_ports));
1973 		else
1974 			rxp = (portid_t) (rxp + 1);
1975 	}
1976 }
1977 
1978 /**
1979  * For the DCB forwarding test, each core is assigned on each traffic class.
1980  *
1981  * Each core is assigned a multi-stream, each stream being composed of
1982  * a RX queue to poll on a RX port for input messages, associated with
1983  * a TX queue of a TX port where to send forwarded packets. All RX and
1984  * TX queues are mapping to the same traffic class.
1985  * If VMDQ and DCB co-exist, each traffic class on different POOLs share
1986  * the same core
1987  */
1988 static void
1989 dcb_fwd_config_setup(void)
1990 {
1991 	struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
1992 	portid_t txp, rxp = 0;
1993 	queueid_t txq, rxq = 0;
1994 	lcoreid_t  lc_id;
1995 	uint16_t nb_rx_queue, nb_tx_queue;
1996 	uint16_t i, j, k, sm_id = 0;
1997 	uint8_t tc = 0;
1998 
1999 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2000 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2001 	cur_fwd_config.nb_fwd_streams =
2002 		(streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2003 
2004 	/* reinitialize forwarding streams */
2005 	init_fwd_streams();
2006 	sm_id = 0;
2007 	txp = 1;
2008 	/* get the dcb info on the first RX and TX ports */
2009 	(void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2010 	(void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2011 
2012 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2013 		fwd_lcores[lc_id]->stream_nb = 0;
2014 		fwd_lcores[lc_id]->stream_idx = sm_id;
2015 		for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
2016 			/* if the nb_queue is zero, means this tc is
2017 			 * not enabled on the POOL
2018 			 */
2019 			if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
2020 				break;
2021 			k = fwd_lcores[lc_id]->stream_nb +
2022 				fwd_lcores[lc_id]->stream_idx;
2023 			rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
2024 			txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
2025 			nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2026 			nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
2027 			for (j = 0; j < nb_rx_queue; j++) {
2028 				struct fwd_stream *fs;
2029 
2030 				fs = fwd_streams[k + j];
2031 				fs->rx_port = fwd_ports_ids[rxp];
2032 				fs->rx_queue = rxq + j;
2033 				fs->tx_port = fwd_ports_ids[txp];
2034 				fs->tx_queue = txq + j % nb_tx_queue;
2035 				fs->peer_addr = fs->tx_port;
2036 				fs->retry_enabled = retry_enabled;
2037 			}
2038 			fwd_lcores[lc_id]->stream_nb +=
2039 				rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2040 		}
2041 		sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
2042 
2043 		tc++;
2044 		if (tc < rxp_dcb_info.nb_tcs)
2045 			continue;
2046 		/* Restart from TC 0 on next RX port */
2047 		tc = 0;
2048 		if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
2049 			rxp = (portid_t)
2050 				(rxp + ((nb_ports >> 1) / nb_fwd_ports));
2051 		else
2052 			rxp++;
2053 		if (rxp >= nb_fwd_ports)
2054 			return;
2055 		/* get the dcb information on next RX and TX ports */
2056 		if ((rxp & 0x1) == 0)
2057 			txp = (portid_t) (rxp + 1);
2058 		else
2059 			txp = (portid_t) (rxp - 1);
2060 		rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2061 		rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2062 	}
2063 }
2064 
2065 static void
2066 icmp_echo_config_setup(void)
2067 {
2068 	portid_t  rxp;
2069 	queueid_t rxq;
2070 	lcoreid_t lc_id;
2071 	uint16_t  sm_id;
2072 
2073 	if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
2074 		cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
2075 			(nb_txq * nb_fwd_ports);
2076 	else
2077 		cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2078 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2079 	cur_fwd_config.nb_fwd_streams =
2080 		(streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2081 	if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2082 		cur_fwd_config.nb_fwd_lcores =
2083 			(lcoreid_t)cur_fwd_config.nb_fwd_streams;
2084 	if (verbose_level > 0) {
2085 		printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
2086 		       __FUNCTION__,
2087 		       cur_fwd_config.nb_fwd_lcores,
2088 		       cur_fwd_config.nb_fwd_ports,
2089 		       cur_fwd_config.nb_fwd_streams);
2090 	}
2091 
2092 	/* reinitialize forwarding streams */
2093 	init_fwd_streams();
2094 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
2095 	rxp = 0; rxq = 0;
2096 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2097 		if (verbose_level > 0)
2098 			printf("  core=%d: \n", lc_id);
2099 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2100 			struct fwd_stream *fs;
2101 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2102 			fs->rx_port = fwd_ports_ids[rxp];
2103 			fs->rx_queue = rxq;
2104 			fs->tx_port = fs->rx_port;
2105 			fs->tx_queue = rxq;
2106 			fs->peer_addr = fs->tx_port;
2107 			fs->retry_enabled = retry_enabled;
2108 			if (verbose_level > 0)
2109 				printf("  stream=%d port=%d rxq=%d txq=%d\n",
2110 				       sm_id, fs->rx_port, fs->rx_queue,
2111 				       fs->tx_queue);
2112 			rxq = (queueid_t) (rxq + 1);
2113 			if (rxq == nb_rxq) {
2114 				rxq = 0;
2115 				rxp = (portid_t) (rxp + 1);
2116 			}
2117 		}
2118 	}
2119 }
2120 
2121 void
2122 fwd_config_setup(void)
2123 {
2124 	cur_fwd_config.fwd_eng = cur_fwd_eng;
2125 	if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
2126 		icmp_echo_config_setup();
2127 		return;
2128 	}
2129 	if ((nb_rxq > 1) && (nb_txq > 1)){
2130 		if (dcb_config)
2131 			dcb_fwd_config_setup();
2132 		else
2133 			rss_fwd_config_setup();
2134 	}
2135 	else
2136 		simple_fwd_config_setup();
2137 }
2138 
2139 void
2140 pkt_fwd_config_display(struct fwd_config *cfg)
2141 {
2142 	struct fwd_stream *fs;
2143 	lcoreid_t  lc_id;
2144 	streamid_t sm_id;
2145 
2146 	printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
2147 		"NUMA support %s, MP over anonymous pages %s\n",
2148 		cfg->fwd_eng->fwd_mode_name,
2149 		retry_enabled == 0 ? "" : " with retry",
2150 		cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
2151 		numa_support == 1 ? "enabled" : "disabled",
2152 		mp_anon != 0 ? "enabled" : "disabled");
2153 
2154 	if (retry_enabled)
2155 		printf("TX retry num: %u, delay between TX retries: %uus\n",
2156 			burst_tx_retry_num, burst_tx_delay_time);
2157 	for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
2158 		printf("Logical Core %u (socket %u) forwards packets on "
2159 		       "%d streams:",
2160 		       fwd_lcores_cpuids[lc_id],
2161 		       rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
2162 		       fwd_lcores[lc_id]->stream_nb);
2163 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2164 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2165 			printf("\n  RX P=%d/Q=%d (socket %u) -> TX "
2166 			       "P=%d/Q=%d (socket %u) ",
2167 			       fs->rx_port, fs->rx_queue,
2168 			       ports[fs->rx_port].socket_id,
2169 			       fs->tx_port, fs->tx_queue,
2170 			       ports[fs->tx_port].socket_id);
2171 			print_ethaddr("peer=",
2172 				      &peer_eth_addrs[fs->peer_addr]);
2173 		}
2174 		printf("\n");
2175 	}
2176 	printf("\n");
2177 }
2178 
2179 int
2180 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
2181 {
2182 	unsigned int i;
2183 	unsigned int lcore_cpuid;
2184 	int record_now;
2185 
2186 	record_now = 0;
2187  again:
2188 	for (i = 0; i < nb_lc; i++) {
2189 		lcore_cpuid = lcorelist[i];
2190 		if (! rte_lcore_is_enabled(lcore_cpuid)) {
2191 			printf("lcore %u not enabled\n", lcore_cpuid);
2192 			return -1;
2193 		}
2194 		if (lcore_cpuid == rte_get_master_lcore()) {
2195 			printf("lcore %u cannot be masked on for running "
2196 			       "packet forwarding, which is the master lcore "
2197 			       "and reserved for command line parsing only\n",
2198 			       lcore_cpuid);
2199 			return -1;
2200 		}
2201 		if (record_now)
2202 			fwd_lcores_cpuids[i] = lcore_cpuid;
2203 	}
2204 	if (record_now == 0) {
2205 		record_now = 1;
2206 		goto again;
2207 	}
2208 	nb_cfg_lcores = (lcoreid_t) nb_lc;
2209 	if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
2210 		printf("previous number of forwarding cores %u - changed to "
2211 		       "number of configured cores %u\n",
2212 		       (unsigned int) nb_fwd_lcores, nb_lc);
2213 		nb_fwd_lcores = (lcoreid_t) nb_lc;
2214 	}
2215 
2216 	return 0;
2217 }
2218 
2219 int
2220 set_fwd_lcores_mask(uint64_t lcoremask)
2221 {
2222 	unsigned int lcorelist[64];
2223 	unsigned int nb_lc;
2224 	unsigned int i;
2225 
2226 	if (lcoremask == 0) {
2227 		printf("Invalid NULL mask of cores\n");
2228 		return -1;
2229 	}
2230 	nb_lc = 0;
2231 	for (i = 0; i < 64; i++) {
2232 		if (! ((uint64_t)(1ULL << i) & lcoremask))
2233 			continue;
2234 		lcorelist[nb_lc++] = i;
2235 	}
2236 	return set_fwd_lcores_list(lcorelist, nb_lc);
2237 }
2238 
2239 void
2240 set_fwd_lcores_number(uint16_t nb_lc)
2241 {
2242 	if (nb_lc > nb_cfg_lcores) {
2243 		printf("nb fwd cores %u > %u (max. number of configured "
2244 		       "lcores) - ignored\n",
2245 		       (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
2246 		return;
2247 	}
2248 	nb_fwd_lcores = (lcoreid_t) nb_lc;
2249 	printf("Number of forwarding cores set to %u\n",
2250 	       (unsigned int) nb_fwd_lcores);
2251 }
2252 
2253 void
2254 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
2255 {
2256 	unsigned int i;
2257 	portid_t port_id;
2258 	int record_now;
2259 
2260 	record_now = 0;
2261  again:
2262 	for (i = 0; i < nb_pt; i++) {
2263 		port_id = (portid_t) portlist[i];
2264 		if (port_id_is_invalid(port_id, ENABLED_WARN))
2265 			return;
2266 		if (record_now)
2267 			fwd_ports_ids[i] = port_id;
2268 	}
2269 	if (record_now == 0) {
2270 		record_now = 1;
2271 		goto again;
2272 	}
2273 	nb_cfg_ports = (portid_t) nb_pt;
2274 	if (nb_fwd_ports != (portid_t) nb_pt) {
2275 		printf("previous number of forwarding ports %u - changed to "
2276 		       "number of configured ports %u\n",
2277 		       (unsigned int) nb_fwd_ports, nb_pt);
2278 		nb_fwd_ports = (portid_t) nb_pt;
2279 	}
2280 }
2281 
2282 void
2283 set_fwd_ports_mask(uint64_t portmask)
2284 {
2285 	unsigned int portlist[64];
2286 	unsigned int nb_pt;
2287 	unsigned int i;
2288 
2289 	if (portmask == 0) {
2290 		printf("Invalid NULL mask of ports\n");
2291 		return;
2292 	}
2293 	nb_pt = 0;
2294 	RTE_ETH_FOREACH_DEV(i) {
2295 		if (! ((uint64_t)(1ULL << i) & portmask))
2296 			continue;
2297 		portlist[nb_pt++] = i;
2298 	}
2299 	set_fwd_ports_list(portlist, nb_pt);
2300 }
2301 
2302 void
2303 set_fwd_ports_number(uint16_t nb_pt)
2304 {
2305 	if (nb_pt > nb_cfg_ports) {
2306 		printf("nb fwd ports %u > %u (number of configured "
2307 		       "ports) - ignored\n",
2308 		       (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
2309 		return;
2310 	}
2311 	nb_fwd_ports = (portid_t) nb_pt;
2312 	printf("Number of forwarding ports set to %u\n",
2313 	       (unsigned int) nb_fwd_ports);
2314 }
2315 
2316 int
2317 port_is_forwarding(portid_t port_id)
2318 {
2319 	unsigned int i;
2320 
2321 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2322 		return -1;
2323 
2324 	for (i = 0; i < nb_fwd_ports; i++) {
2325 		if (fwd_ports_ids[i] == port_id)
2326 			return 1;
2327 	}
2328 
2329 	return 0;
2330 }
2331 
2332 void
2333 set_nb_pkt_per_burst(uint16_t nb)
2334 {
2335 	if (nb > MAX_PKT_BURST) {
2336 		printf("nb pkt per burst: %u > %u (maximum packet per burst) "
2337 		       " ignored\n",
2338 		       (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
2339 		return;
2340 	}
2341 	nb_pkt_per_burst = nb;
2342 	printf("Number of packets per burst set to %u\n",
2343 	       (unsigned int) nb_pkt_per_burst);
2344 }
2345 
2346 static const char *
2347 tx_split_get_name(enum tx_pkt_split split)
2348 {
2349 	uint32_t i;
2350 
2351 	for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2352 		if (tx_split_name[i].split == split)
2353 			return tx_split_name[i].name;
2354 	}
2355 	return NULL;
2356 }
2357 
2358 void
2359 set_tx_pkt_split(const char *name)
2360 {
2361 	uint32_t i;
2362 
2363 	for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2364 		if (strcmp(tx_split_name[i].name, name) == 0) {
2365 			tx_pkt_split = tx_split_name[i].split;
2366 			return;
2367 		}
2368 	}
2369 	printf("unknown value: \"%s\"\n", name);
2370 }
2371 
2372 void
2373 show_tx_pkt_segments(void)
2374 {
2375 	uint32_t i, n;
2376 	const char *split;
2377 
2378 	n = tx_pkt_nb_segs;
2379 	split = tx_split_get_name(tx_pkt_split);
2380 
2381 	printf("Number of segments: %u\n", n);
2382 	printf("Segment sizes: ");
2383 	for (i = 0; i != n - 1; i++)
2384 		printf("%hu,", tx_pkt_seg_lengths[i]);
2385 	printf("%hu\n", tx_pkt_seg_lengths[i]);
2386 	printf("Split packet: %s\n", split);
2387 }
2388 
2389 void
2390 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
2391 {
2392 	uint16_t tx_pkt_len;
2393 	unsigned i;
2394 
2395 	if (nb_segs >= (unsigned) nb_txd) {
2396 		printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
2397 		       nb_segs, (unsigned int) nb_txd);
2398 		return;
2399 	}
2400 
2401 	/*
2402 	 * Check that each segment length is greater or equal than
2403 	 * the mbuf data sise.
2404 	 * Check also that the total packet length is greater or equal than the
2405 	 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8).
2406 	 */
2407 	tx_pkt_len = 0;
2408 	for (i = 0; i < nb_segs; i++) {
2409 		if (seg_lengths[i] > (unsigned) mbuf_data_size) {
2410 			printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
2411 			       i, seg_lengths[i], (unsigned) mbuf_data_size);
2412 			return;
2413 		}
2414 		tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
2415 	}
2416 	if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) {
2417 		printf("total packet length=%u < %d - give up\n",
2418 				(unsigned) tx_pkt_len,
2419 				(int)(sizeof(struct ether_hdr) + 20 + 8));
2420 		return;
2421 	}
2422 
2423 	for (i = 0; i < nb_segs; i++)
2424 		tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
2425 
2426 	tx_pkt_length  = tx_pkt_len;
2427 	tx_pkt_nb_segs = (uint8_t) nb_segs;
2428 }
2429 
2430 void
2431 setup_gro(const char *mode, uint8_t port_id)
2432 {
2433 	if (!rte_eth_dev_is_valid_port(port_id)) {
2434 		printf("invalid port id %u\n", port_id);
2435 		return;
2436 	}
2437 	if (test_done == 0) {
2438 		printf("Before enable/disable GRO,"
2439 				" please stop forwarding first\n");
2440 		return;
2441 	}
2442 	if (strcmp(mode, "on") == 0) {
2443 		if (gro_ports[port_id].enable) {
2444 			printf("port %u has enabled GRO\n", port_id);
2445 			return;
2446 		}
2447 		gro_ports[port_id].enable = 1;
2448 		gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
2449 
2450 		if (gro_ports[port_id].param.max_flow_num == 0)
2451 			gro_ports[port_id].param.max_flow_num =
2452 				GRO_DEFAULT_FLOW_NUM;
2453 		if (gro_ports[port_id].param.max_item_per_flow == 0)
2454 			gro_ports[port_id].param.max_item_per_flow =
2455 				GRO_DEFAULT_ITEM_NUM_PER_FLOW;
2456 	} else {
2457 		if (gro_ports[port_id].enable == 0) {
2458 			printf("port %u has disabled GRO\n", port_id);
2459 			return;
2460 		}
2461 		gro_ports[port_id].enable = 0;
2462 	}
2463 }
2464 
2465 char*
2466 list_pkt_forwarding_modes(void)
2467 {
2468 	static char fwd_modes[128] = "";
2469 	const char *separator = "|";
2470 	struct fwd_engine *fwd_eng;
2471 	unsigned i = 0;
2472 
2473 	if (strlen (fwd_modes) == 0) {
2474 		while ((fwd_eng = fwd_engines[i++]) != NULL) {
2475 			strncat(fwd_modes, fwd_eng->fwd_mode_name,
2476 					sizeof(fwd_modes) - strlen(fwd_modes) - 1);
2477 			strncat(fwd_modes, separator,
2478 					sizeof(fwd_modes) - strlen(fwd_modes) - 1);
2479 		}
2480 		fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
2481 	}
2482 
2483 	return fwd_modes;
2484 }
2485 
2486 char*
2487 list_pkt_forwarding_retry_modes(void)
2488 {
2489 	static char fwd_modes[128] = "";
2490 	const char *separator = "|";
2491 	struct fwd_engine *fwd_eng;
2492 	unsigned i = 0;
2493 
2494 	if (strlen(fwd_modes) == 0) {
2495 		while ((fwd_eng = fwd_engines[i++]) != NULL) {
2496 			if (fwd_eng == &rx_only_engine)
2497 				continue;
2498 			strncat(fwd_modes, fwd_eng->fwd_mode_name,
2499 					sizeof(fwd_modes) -
2500 					strlen(fwd_modes) - 1);
2501 			strncat(fwd_modes, separator,
2502 					sizeof(fwd_modes) -
2503 					strlen(fwd_modes) - 1);
2504 		}
2505 		fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
2506 	}
2507 
2508 	return fwd_modes;
2509 }
2510 
2511 void
2512 set_pkt_forwarding_mode(const char *fwd_mode_name)
2513 {
2514 	struct fwd_engine *fwd_eng;
2515 	unsigned i;
2516 
2517 	i = 0;
2518 	while ((fwd_eng = fwd_engines[i]) != NULL) {
2519 		if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
2520 			printf("Set %s packet forwarding mode%s\n",
2521 			       fwd_mode_name,
2522 			       retry_enabled == 0 ? "" : " with retry");
2523 			cur_fwd_eng = fwd_eng;
2524 			return;
2525 		}
2526 		i++;
2527 	}
2528 	printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
2529 }
2530 
2531 void
2532 set_verbose_level(uint16_t vb_level)
2533 {
2534 	printf("Change verbose level from %u to %u\n",
2535 	       (unsigned int) verbose_level, (unsigned int) vb_level);
2536 	verbose_level = vb_level;
2537 }
2538 
2539 void
2540 vlan_extend_set(portid_t port_id, int on)
2541 {
2542 	int diag;
2543 	int vlan_offload;
2544 
2545 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2546 		return;
2547 
2548 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2549 
2550 	if (on)
2551 		vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
2552 	else
2553 		vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
2554 
2555 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
2556 	if (diag < 0)
2557 		printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
2558 	       "diag=%d\n", port_id, on, diag);
2559 }
2560 
2561 void
2562 rx_vlan_strip_set(portid_t port_id, int on)
2563 {
2564 	int diag;
2565 	int vlan_offload;
2566 
2567 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2568 		return;
2569 
2570 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2571 
2572 	if (on)
2573 		vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
2574 	else
2575 		vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
2576 
2577 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
2578 	if (diag < 0)
2579 		printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
2580 	       "diag=%d\n", port_id, on, diag);
2581 }
2582 
2583 void
2584 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
2585 {
2586 	int diag;
2587 
2588 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2589 		return;
2590 
2591 	diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
2592 	if (diag < 0)
2593 		printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
2594 	       "diag=%d\n", port_id, queue_id, on, diag);
2595 }
2596 
2597 void
2598 rx_vlan_filter_set(portid_t port_id, int on)
2599 {
2600 	int diag;
2601 	int vlan_offload;
2602 
2603 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2604 		return;
2605 
2606 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2607 
2608 	if (on)
2609 		vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
2610 	else
2611 		vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
2612 
2613 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
2614 	if (diag < 0)
2615 		printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
2616 	       "diag=%d\n", port_id, on, diag);
2617 }
2618 
2619 int
2620 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
2621 {
2622 	int diag;
2623 
2624 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2625 		return 1;
2626 	if (vlan_id_is_invalid(vlan_id))
2627 		return 1;
2628 	diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
2629 	if (diag == 0)
2630 		return 0;
2631 	printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
2632 	       "diag=%d\n",
2633 	       port_id, vlan_id, on, diag);
2634 	return -1;
2635 }
2636 
2637 void
2638 rx_vlan_all_filter_set(portid_t port_id, int on)
2639 {
2640 	uint16_t vlan_id;
2641 
2642 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2643 		return;
2644 	for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
2645 		if (rx_vft_set(port_id, vlan_id, on))
2646 			break;
2647 	}
2648 }
2649 
2650 void
2651 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
2652 {
2653 	int diag;
2654 
2655 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2656 		return;
2657 
2658 	diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
2659 	if (diag == 0)
2660 		return;
2661 
2662 	printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed "
2663 	       "diag=%d\n",
2664 	       port_id, vlan_type, tp_id, diag);
2665 }
2666 
2667 void
2668 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
2669 {
2670 	int vlan_offload;
2671 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2672 		return;
2673 	if (vlan_id_is_invalid(vlan_id))
2674 		return;
2675 
2676 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2677 	if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) {
2678 		printf("Error, as QinQ has been enabled.\n");
2679 		return;
2680 	}
2681 
2682 	tx_vlan_reset(port_id);
2683 	ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_VLAN;
2684 	ports[port_id].tx_vlan_id = vlan_id;
2685 }
2686 
2687 void
2688 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
2689 {
2690 	int vlan_offload;
2691 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2692 		return;
2693 	if (vlan_id_is_invalid(vlan_id))
2694 		return;
2695 	if (vlan_id_is_invalid(vlan_id_outer))
2696 		return;
2697 
2698 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2699 	if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) {
2700 		printf("Error, as QinQ hasn't been enabled.\n");
2701 		return;
2702 	}
2703 
2704 	tx_vlan_reset(port_id);
2705 	ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_QINQ;
2706 	ports[port_id].tx_vlan_id = vlan_id;
2707 	ports[port_id].tx_vlan_id_outer = vlan_id_outer;
2708 }
2709 
2710 void
2711 tx_vlan_reset(portid_t port_id)
2712 {
2713 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2714 		return;
2715 	ports[port_id].tx_ol_flags &= ~(TESTPMD_TX_OFFLOAD_INSERT_VLAN |
2716 				TESTPMD_TX_OFFLOAD_INSERT_QINQ);
2717 	ports[port_id].tx_vlan_id = 0;
2718 	ports[port_id].tx_vlan_id_outer = 0;
2719 }
2720 
2721 void
2722 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
2723 {
2724 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2725 		return;
2726 
2727 	rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
2728 }
2729 
2730 void
2731 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
2732 {
2733 	uint16_t i;
2734 	uint8_t existing_mapping_found = 0;
2735 
2736 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2737 		return;
2738 
2739 	if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
2740 		return;
2741 
2742 	if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
2743 		printf("map_value not in required range 0..%d\n",
2744 				RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
2745 		return;
2746 	}
2747 
2748 	if (!is_rx) { /*then tx*/
2749 		for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2750 			if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2751 			    (tx_queue_stats_mappings[i].queue_id == queue_id)) {
2752 				tx_queue_stats_mappings[i].stats_counter_id = map_value;
2753 				existing_mapping_found = 1;
2754 				break;
2755 			}
2756 		}
2757 		if (!existing_mapping_found) { /* A new additional mapping... */
2758 			tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
2759 			tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
2760 			tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
2761 			nb_tx_queue_stats_mappings++;
2762 		}
2763 	}
2764 	else { /*rx*/
2765 		for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2766 			if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2767 			    (rx_queue_stats_mappings[i].queue_id == queue_id)) {
2768 				rx_queue_stats_mappings[i].stats_counter_id = map_value;
2769 				existing_mapping_found = 1;
2770 				break;
2771 			}
2772 		}
2773 		if (!existing_mapping_found) { /* A new additional mapping... */
2774 			rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
2775 			rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
2776 			rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
2777 			nb_rx_queue_stats_mappings++;
2778 		}
2779 	}
2780 }
2781 
2782 static inline void
2783 print_fdir_mask(struct rte_eth_fdir_masks *mask)
2784 {
2785 	printf("\n    vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
2786 
2787 	if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
2788 		printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
2789 			" tunnel_id: 0x%08x",
2790 			mask->mac_addr_byte_mask, mask->tunnel_type_mask,
2791 			rte_be_to_cpu_32(mask->tunnel_id_mask));
2792 	else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
2793 		printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
2794 			rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
2795 			rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
2796 
2797 		printf("\n    src_port: 0x%04x, dst_port: 0x%04x",
2798 			rte_be_to_cpu_16(mask->src_port_mask),
2799 			rte_be_to_cpu_16(mask->dst_port_mask));
2800 
2801 		printf("\n    src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
2802 			rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
2803 			rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
2804 			rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
2805 			rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
2806 
2807 		printf("\n    dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
2808 			rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
2809 			rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
2810 			rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
2811 			rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
2812 	}
2813 
2814 	printf("\n");
2815 }
2816 
2817 static inline void
2818 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
2819 {
2820 	struct rte_eth_flex_payload_cfg *cfg;
2821 	uint32_t i, j;
2822 
2823 	for (i = 0; i < flex_conf->nb_payloads; i++) {
2824 		cfg = &flex_conf->flex_set[i];
2825 		if (cfg->type == RTE_ETH_RAW_PAYLOAD)
2826 			printf("\n    RAW:  ");
2827 		else if (cfg->type == RTE_ETH_L2_PAYLOAD)
2828 			printf("\n    L2_PAYLOAD:  ");
2829 		else if (cfg->type == RTE_ETH_L3_PAYLOAD)
2830 			printf("\n    L3_PAYLOAD:  ");
2831 		else if (cfg->type == RTE_ETH_L4_PAYLOAD)
2832 			printf("\n    L4_PAYLOAD:  ");
2833 		else
2834 			printf("\n    UNKNOWN PAYLOAD(%u):  ", cfg->type);
2835 		for (j = 0; j < num; j++)
2836 			printf("  %-5u", cfg->src_offset[j]);
2837 	}
2838 	printf("\n");
2839 }
2840 
2841 static char *
2842 flowtype_to_str(uint16_t flow_type)
2843 {
2844 	struct flow_type_info {
2845 		char str[32];
2846 		uint16_t ftype;
2847 	};
2848 
2849 	uint8_t i;
2850 	static struct flow_type_info flowtype_str_table[] = {
2851 		{"raw", RTE_ETH_FLOW_RAW},
2852 		{"ipv4", RTE_ETH_FLOW_IPV4},
2853 		{"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
2854 		{"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
2855 		{"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
2856 		{"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
2857 		{"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
2858 		{"ipv6", RTE_ETH_FLOW_IPV6},
2859 		{"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
2860 		{"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
2861 		{"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
2862 		{"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
2863 		{"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
2864 		{"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
2865 		{"port", RTE_ETH_FLOW_PORT},
2866 		{"vxlan", RTE_ETH_FLOW_VXLAN},
2867 		{"geneve", RTE_ETH_FLOW_GENEVE},
2868 		{"nvgre", RTE_ETH_FLOW_NVGRE},
2869 	};
2870 
2871 	for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
2872 		if (flowtype_str_table[i].ftype == flow_type)
2873 			return flowtype_str_table[i].str;
2874 	}
2875 
2876 	return NULL;
2877 }
2878 
2879 static inline void
2880 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
2881 {
2882 	struct rte_eth_fdir_flex_mask *mask;
2883 	uint32_t i, j;
2884 	char *p;
2885 
2886 	for (i = 0; i < flex_conf->nb_flexmasks; i++) {
2887 		mask = &flex_conf->flex_mask[i];
2888 		p = flowtype_to_str(mask->flow_type);
2889 		printf("\n    %s:\t", p ? p : "unknown");
2890 		for (j = 0; j < num; j++)
2891 			printf(" %02x", mask->mask[j]);
2892 	}
2893 	printf("\n");
2894 }
2895 
2896 static inline void
2897 print_fdir_flow_type(uint32_t flow_types_mask)
2898 {
2899 	int i;
2900 	char *p;
2901 
2902 	for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
2903 		if (!(flow_types_mask & (1 << i)))
2904 			continue;
2905 		p = flowtype_to_str(i);
2906 		if (p)
2907 			printf(" %s", p);
2908 		else
2909 			printf(" unknown");
2910 	}
2911 	printf("\n");
2912 }
2913 
2914 void
2915 fdir_get_infos(portid_t port_id)
2916 {
2917 	struct rte_eth_fdir_stats fdir_stat;
2918 	struct rte_eth_fdir_info fdir_info;
2919 	int ret;
2920 
2921 	static const char *fdir_stats_border = "########################";
2922 
2923 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2924 		return;
2925 	ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
2926 	if (ret < 0) {
2927 		printf("\n FDIR is not supported on port %-2d\n",
2928 			port_id);
2929 		return;
2930 	}
2931 
2932 	memset(&fdir_info, 0, sizeof(fdir_info));
2933 	rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
2934 			       RTE_ETH_FILTER_INFO, &fdir_info);
2935 	memset(&fdir_stat, 0, sizeof(fdir_stat));
2936 	rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
2937 			       RTE_ETH_FILTER_STATS, &fdir_stat);
2938 	printf("\n  %s FDIR infos for port %-2d     %s\n",
2939 	       fdir_stats_border, port_id, fdir_stats_border);
2940 	printf("  MODE: ");
2941 	if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
2942 		printf("  PERFECT\n");
2943 	else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
2944 		printf("  PERFECT-MAC-VLAN\n");
2945 	else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
2946 		printf("  PERFECT-TUNNEL\n");
2947 	else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
2948 		printf("  SIGNATURE\n");
2949 	else
2950 		printf("  DISABLE\n");
2951 	if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
2952 		&& fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
2953 		printf("  SUPPORTED FLOW TYPE: ");
2954 		print_fdir_flow_type(fdir_info.flow_types_mask[0]);
2955 	}
2956 	printf("  FLEX PAYLOAD INFO:\n");
2957 	printf("  max_len:       %-10"PRIu32"  payload_limit: %-10"PRIu32"\n"
2958 	       "  payload_unit:  %-10"PRIu32"  payload_seg:   %-10"PRIu32"\n"
2959 	       "  bitmask_unit:  %-10"PRIu32"  bitmask_num:   %-10"PRIu32"\n",
2960 		fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
2961 		fdir_info.flex_payload_unit,
2962 		fdir_info.max_flex_payload_segment_num,
2963 		fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
2964 	printf("  MASK: ");
2965 	print_fdir_mask(&fdir_info.mask);
2966 	if (fdir_info.flex_conf.nb_payloads > 0) {
2967 		printf("  FLEX PAYLOAD SRC OFFSET:");
2968 		print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
2969 	}
2970 	if (fdir_info.flex_conf.nb_flexmasks > 0) {
2971 		printf("  FLEX MASK CFG:");
2972 		print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
2973 	}
2974 	printf("  guarant_count: %-10"PRIu32"  best_count:    %"PRIu32"\n",
2975 	       fdir_stat.guarant_cnt, fdir_stat.best_cnt);
2976 	printf("  guarant_space: %-10"PRIu32"  best_space:    %"PRIu32"\n",
2977 	       fdir_info.guarant_spc, fdir_info.best_spc);
2978 	printf("  collision:     %-10"PRIu32"  free:          %"PRIu32"\n"
2979 	       "  maxhash:       %-10"PRIu32"  maxlen:        %"PRIu32"\n"
2980 	       "  add:	         %-10"PRIu64"  remove:        %"PRIu64"\n"
2981 	       "  f_add:         %-10"PRIu64"  f_remove:      %"PRIu64"\n",
2982 	       fdir_stat.collision, fdir_stat.free,
2983 	       fdir_stat.maxhash, fdir_stat.maxlen,
2984 	       fdir_stat.add, fdir_stat.remove,
2985 	       fdir_stat.f_add, fdir_stat.f_remove);
2986 	printf("  %s############################%s\n",
2987 	       fdir_stats_border, fdir_stats_border);
2988 }
2989 
2990 void
2991 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
2992 {
2993 	struct rte_port *port;
2994 	struct rte_eth_fdir_flex_conf *flex_conf;
2995 	int i, idx = 0;
2996 
2997 	port = &ports[port_id];
2998 	flex_conf = &port->dev_conf.fdir_conf.flex_conf;
2999 	for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
3000 		if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
3001 			idx = i;
3002 			break;
3003 		}
3004 	}
3005 	if (i >= RTE_ETH_FLOW_MAX) {
3006 		if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
3007 			idx = flex_conf->nb_flexmasks;
3008 			flex_conf->nb_flexmasks++;
3009 		} else {
3010 			printf("The flex mask table is full. Can not set flex"
3011 				" mask for flow_type(%u).", cfg->flow_type);
3012 			return;
3013 		}
3014 	}
3015 	rte_memcpy(&flex_conf->flex_mask[idx],
3016 			 cfg,
3017 			 sizeof(struct rte_eth_fdir_flex_mask));
3018 }
3019 
3020 void
3021 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
3022 {
3023 	struct rte_port *port;
3024 	struct rte_eth_fdir_flex_conf *flex_conf;
3025 	int i, idx = 0;
3026 
3027 	port = &ports[port_id];
3028 	flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3029 	for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
3030 		if (cfg->type == flex_conf->flex_set[i].type) {
3031 			idx = i;
3032 			break;
3033 		}
3034 	}
3035 	if (i >= RTE_ETH_PAYLOAD_MAX) {
3036 		if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
3037 			idx = flex_conf->nb_payloads;
3038 			flex_conf->nb_payloads++;
3039 		} else {
3040 			printf("The flex payload table is full. Can not set"
3041 				" flex payload for type(%u).", cfg->type);
3042 			return;
3043 		}
3044 	}
3045 	rte_memcpy(&flex_conf->flex_set[idx],
3046 			 cfg,
3047 			 sizeof(struct rte_eth_flex_payload_cfg));
3048 
3049 }
3050 
3051 void
3052 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
3053 {
3054 #ifdef RTE_LIBRTE_IXGBE_PMD
3055 	int diag;
3056 
3057 	if (is_rx)
3058 		diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
3059 	else
3060 		diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
3061 
3062 	if (diag == 0)
3063 		return;
3064 	printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
3065 			is_rx ? "rx" : "tx", port_id, diag);
3066 	return;
3067 #endif
3068 	printf("VF %s setting not supported for port %d\n",
3069 			is_rx ? "Rx" : "Tx", port_id);
3070 	RTE_SET_USED(vf);
3071 	RTE_SET_USED(on);
3072 }
3073 
3074 int
3075 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
3076 {
3077 	int diag;
3078 	struct rte_eth_link link;
3079 
3080 	if (port_id_is_invalid(port_id, ENABLED_WARN))
3081 		return 1;
3082 	rte_eth_link_get_nowait(port_id, &link);
3083 	if (rate > link.link_speed) {
3084 		printf("Invalid rate value:%u bigger than link speed: %u\n",
3085 			rate, link.link_speed);
3086 		return 1;
3087 	}
3088 	diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
3089 	if (diag == 0)
3090 		return diag;
3091 	printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
3092 		port_id, diag);
3093 	return diag;
3094 }
3095 
3096 int
3097 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
3098 {
3099 	int diag = -ENOTSUP;
3100 
3101 #ifdef RTE_LIBRTE_IXGBE_PMD
3102 	if (diag == -ENOTSUP)
3103 		diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
3104 						       q_msk);
3105 #endif
3106 #ifdef RTE_LIBRTE_BNXT_PMD
3107 	if (diag == -ENOTSUP)
3108 		diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
3109 #endif
3110 	if (diag == 0)
3111 		return diag;
3112 
3113 	printf("set_vf_rate_limit for port_id=%d failed diag=%d\n",
3114 		port_id, diag);
3115 	return diag;
3116 }
3117 
3118 /*
3119  * Functions to manage the set of filtered Multicast MAC addresses.
3120  *
3121  * A pool of filtered multicast MAC addresses is associated with each port.
3122  * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
3123  * The address of the pool and the number of valid multicast MAC addresses
3124  * recorded in the pool are stored in the fields "mc_addr_pool" and
3125  * "mc_addr_nb" of the "rte_port" data structure.
3126  *
3127  * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
3128  * to be supplied a contiguous array of multicast MAC addresses.
3129  * To comply with this constraint, the set of multicast addresses recorded
3130  * into the pool are systematically compacted at the beginning of the pool.
3131  * Hence, when a multicast address is removed from the pool, all following
3132  * addresses, if any, are copied back to keep the set contiguous.
3133  */
3134 #define MCAST_POOL_INC 32
3135 
3136 static int
3137 mcast_addr_pool_extend(struct rte_port *port)
3138 {
3139 	struct ether_addr *mc_pool;
3140 	size_t mc_pool_size;
3141 
3142 	/*
3143 	 * If a free entry is available at the end of the pool, just
3144 	 * increment the number of recorded multicast addresses.
3145 	 */
3146 	if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
3147 		port->mc_addr_nb++;
3148 		return 0;
3149 	}
3150 
3151 	/*
3152 	 * [re]allocate a pool with MCAST_POOL_INC more entries.
3153 	 * The previous test guarantees that port->mc_addr_nb is a multiple
3154 	 * of MCAST_POOL_INC.
3155 	 */
3156 	mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb +
3157 						    MCAST_POOL_INC);
3158 	mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool,
3159 						mc_pool_size);
3160 	if (mc_pool == NULL) {
3161 		printf("allocation of pool of %u multicast addresses failed\n",
3162 		       port->mc_addr_nb + MCAST_POOL_INC);
3163 		return -ENOMEM;
3164 	}
3165 
3166 	port->mc_addr_pool = mc_pool;
3167 	port->mc_addr_nb++;
3168 	return 0;
3169 
3170 }
3171 
3172 static void
3173 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
3174 {
3175 	port->mc_addr_nb--;
3176 	if (addr_idx == port->mc_addr_nb) {
3177 		/* No need to recompact the set of multicast addressses. */
3178 		if (port->mc_addr_nb == 0) {
3179 			/* free the pool of multicast addresses. */
3180 			free(port->mc_addr_pool);
3181 			port->mc_addr_pool = NULL;
3182 		}
3183 		return;
3184 	}
3185 	memmove(&port->mc_addr_pool[addr_idx],
3186 		&port->mc_addr_pool[addr_idx + 1],
3187 		sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx));
3188 }
3189 
3190 static void
3191 eth_port_multicast_addr_list_set(uint8_t port_id)
3192 {
3193 	struct rte_port *port;
3194 	int diag;
3195 
3196 	port = &ports[port_id];
3197 	diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
3198 					    port->mc_addr_nb);
3199 	if (diag == 0)
3200 		return;
3201 	printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
3202 	       port->mc_addr_nb, port_id, -diag);
3203 }
3204 
3205 void
3206 mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr)
3207 {
3208 	struct rte_port *port;
3209 	uint32_t i;
3210 
3211 	if (port_id_is_invalid(port_id, ENABLED_WARN))
3212 		return;
3213 
3214 	port = &ports[port_id];
3215 
3216 	/*
3217 	 * Check that the added multicast MAC address is not already recorded
3218 	 * in the pool of multicast addresses.
3219 	 */
3220 	for (i = 0; i < port->mc_addr_nb; i++) {
3221 		if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
3222 			printf("multicast address already filtered by port\n");
3223 			return;
3224 		}
3225 	}
3226 
3227 	if (mcast_addr_pool_extend(port) != 0)
3228 		return;
3229 	ether_addr_copy(mc_addr, &port->mc_addr_pool[i]);
3230 	eth_port_multicast_addr_list_set(port_id);
3231 }
3232 
3233 void
3234 mcast_addr_remove(uint8_t port_id, struct ether_addr *mc_addr)
3235 {
3236 	struct rte_port *port;
3237 	uint32_t i;
3238 
3239 	if (port_id_is_invalid(port_id, ENABLED_WARN))
3240 		return;
3241 
3242 	port = &ports[port_id];
3243 
3244 	/*
3245 	 * Search the pool of multicast MAC addresses for the removed address.
3246 	 */
3247 	for (i = 0; i < port->mc_addr_nb; i++) {
3248 		if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
3249 			break;
3250 	}
3251 	if (i == port->mc_addr_nb) {
3252 		printf("multicast address not filtered by port %d\n", port_id);
3253 		return;
3254 	}
3255 
3256 	mcast_addr_pool_remove(port, i);
3257 	eth_port_multicast_addr_list_set(port_id);
3258 }
3259 
3260 void
3261 port_dcb_info_display(uint8_t port_id)
3262 {
3263 	struct rte_eth_dcb_info dcb_info;
3264 	uint16_t i;
3265 	int ret;
3266 	static const char *border = "================";
3267 
3268 	if (port_id_is_invalid(port_id, ENABLED_WARN))
3269 		return;
3270 
3271 	ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
3272 	if (ret) {
3273 		printf("\n Failed to get dcb infos on port %-2d\n",
3274 			port_id);
3275 		return;
3276 	}
3277 	printf("\n  %s DCB infos for port %-2d  %s\n", border, port_id, border);
3278 	printf("  TC NUMBER: %d\n", dcb_info.nb_tcs);
3279 	printf("\n  TC :        ");
3280 	for (i = 0; i < dcb_info.nb_tcs; i++)
3281 		printf("\t%4d", i);
3282 	printf("\n  Priority :  ");
3283 	for (i = 0; i < dcb_info.nb_tcs; i++)
3284 		printf("\t%4d", dcb_info.prio_tc[i]);
3285 	printf("\n  BW percent :");
3286 	for (i = 0; i < dcb_info.nb_tcs; i++)
3287 		printf("\t%4d%%", dcb_info.tc_bws[i]);
3288 	printf("\n  RXQ base :  ");
3289 	for (i = 0; i < dcb_info.nb_tcs; i++)
3290 		printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
3291 	printf("\n  RXQ number :");
3292 	for (i = 0; i < dcb_info.nb_tcs; i++)
3293 		printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
3294 	printf("\n  TXQ base :  ");
3295 	for (i = 0; i < dcb_info.nb_tcs; i++)
3296 		printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
3297 	printf("\n  TXQ number :");
3298 	for (i = 0; i < dcb_info.nb_tcs; i++)
3299 		printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
3300 	printf("\n");
3301 }
3302 
3303 uint8_t *
3304 open_ddp_package_file(const char *file_path, uint32_t *size)
3305 {
3306 	FILE *fh = fopen(file_path, "rb");
3307 	uint32_t pkg_size;
3308 	uint8_t *buf = NULL;
3309 	int ret = 0;
3310 
3311 	if (size)
3312 		*size = 0;
3313 
3314 	if (fh == NULL) {
3315 		printf("%s: Failed to open %s\n", __func__, file_path);
3316 		return buf;
3317 	}
3318 
3319 	ret = fseek(fh, 0, SEEK_END);
3320 	if (ret < 0) {
3321 		fclose(fh);
3322 		printf("%s: File operations failed\n", __func__);
3323 		return buf;
3324 	}
3325 
3326 	pkg_size = ftell(fh);
3327 
3328 	buf = (uint8_t *)malloc(pkg_size);
3329 	if (!buf) {
3330 		fclose(fh);
3331 		printf("%s: Failed to malloc memory\n",	__func__);
3332 		return buf;
3333 	}
3334 
3335 	ret = fseek(fh, 0, SEEK_SET);
3336 	if (ret < 0) {
3337 		fclose(fh);
3338 		printf("%s: File seek operation failed\n", __func__);
3339 		close_ddp_package_file(buf);
3340 		return NULL;
3341 	}
3342 
3343 	ret = fread(buf, 1, pkg_size, fh);
3344 	if (ret < 0) {
3345 		fclose(fh);
3346 		printf("%s: File read operation failed\n", __func__);
3347 		close_ddp_package_file(buf);
3348 		return NULL;
3349 	}
3350 
3351 	if (size)
3352 		*size = pkg_size;
3353 
3354 	fclose(fh);
3355 
3356 	return buf;
3357 }
3358 
3359 int
3360 save_ddp_package_file(const char *file_path, uint8_t *buf, uint32_t size)
3361 {
3362 	FILE *fh = fopen(file_path, "wb");
3363 
3364 	if (fh == NULL) {
3365 		printf("%s: Failed to open %s\n", __func__, file_path);
3366 		return -1;
3367 	}
3368 
3369 	if (fwrite(buf, 1, size, fh) != size) {
3370 		fclose(fh);
3371 		printf("%s: File write operation failed\n", __func__);
3372 		return -1;
3373 	}
3374 
3375 	fclose(fh);
3376 
3377 	return 0;
3378 }
3379 
3380 int
3381 close_ddp_package_file(uint8_t *buf)
3382 {
3383 	if (buf) {
3384 		free((void *)buf);
3385 		return 0;
3386 	}
3387 
3388 	return -1;
3389 }
3390