xref: /dpdk/app/test-pmd/config.c (revision 1f8cc1a388610c348da7e379dbff62f1a28690d1)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation.
3  * Copyright 2013-2014 6WIND S.A.
4  */
5 
6 #include <stdarg.h>
7 #include <errno.h>
8 #include <stdio.h>
9 #include <string.h>
10 #include <stdint.h>
11 #include <inttypes.h>
12 
13 #include <sys/queue.h>
14 #include <sys/types.h>
15 #include <sys/stat.h>
16 #include <fcntl.h>
17 #include <unistd.h>
18 
19 #include <rte_common.h>
20 #include <rte_byteorder.h>
21 #include <rte_debug.h>
22 #include <rte_log.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
25 #include <rte_memzone.h>
26 #include <rte_launch.h>
27 #include <rte_eal.h>
28 #include <rte_per_lcore.h>
29 #include <rte_lcore.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_mempool.h>
32 #include <rte_mbuf.h>
33 #include <rte_interrupts.h>
34 #include <rte_pci.h>
35 #include <rte_ether.h>
36 #include <rte_ethdev.h>
37 #include <rte_string_fns.h>
38 #include <rte_cycles.h>
39 #include <rte_flow.h>
40 #include <rte_mtr.h>
41 #include <rte_errno.h>
42 #ifdef RTE_NET_IXGBE
43 #include <rte_pmd_ixgbe.h>
44 #endif
45 #ifdef RTE_NET_I40E
46 #include <rte_pmd_i40e.h>
47 #endif
48 #ifdef RTE_NET_BNXT
49 #include <rte_pmd_bnxt.h>
50 #endif
51 #include <rte_gro.h>
52 #include <rte_hexdump.h>
53 
54 #include "testpmd.h"
55 #include "cmdline_mtr.h"
56 
57 #define ETHDEV_FWVERS_LEN 32
58 
59 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
60 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW
61 #else
62 #define CLOCK_TYPE_ID CLOCK_MONOTONIC
63 #endif
64 
65 #define NS_PER_SEC 1E9
66 
67 static char *flowtype_to_str(uint16_t flow_type);
68 
69 static const struct {
70 	enum tx_pkt_split split;
71 	const char *name;
72 } tx_split_name[] = {
73 	{
74 		.split = TX_PKT_SPLIT_OFF,
75 		.name = "off",
76 	},
77 	{
78 		.split = TX_PKT_SPLIT_ON,
79 		.name = "on",
80 	},
81 	{
82 		.split = TX_PKT_SPLIT_RND,
83 		.name = "rand",
84 	},
85 };
86 
87 const struct rss_type_info rss_type_table[] = {
88 	{ "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP |
89 		RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD |
90 		RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP |
91 		RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS},
92 	{ "none", 0 },
93 	{ "eth", RTE_ETH_RSS_ETH },
94 	{ "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY },
95 	{ "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY },
96 	{ "vlan", RTE_ETH_RSS_VLAN },
97 	{ "s-vlan", RTE_ETH_RSS_S_VLAN },
98 	{ "c-vlan", RTE_ETH_RSS_C_VLAN },
99 	{ "ipv4", RTE_ETH_RSS_IPV4 },
100 	{ "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 },
101 	{ "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP },
102 	{ "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP },
103 	{ "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP },
104 	{ "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER },
105 	{ "ipv6", RTE_ETH_RSS_IPV6 },
106 	{ "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 },
107 	{ "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP },
108 	{ "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP },
109 	{ "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP },
110 	{ "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER },
111 	{ "l2-payload", RTE_ETH_RSS_L2_PAYLOAD },
112 	{ "ipv6-ex", RTE_ETH_RSS_IPV6_EX },
113 	{ "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX },
114 	{ "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX },
115 	{ "port", RTE_ETH_RSS_PORT },
116 	{ "vxlan", RTE_ETH_RSS_VXLAN },
117 	{ "geneve", RTE_ETH_RSS_GENEVE },
118 	{ "nvgre", RTE_ETH_RSS_NVGRE },
119 	{ "ip", RTE_ETH_RSS_IP },
120 	{ "udp", RTE_ETH_RSS_UDP },
121 	{ "tcp", RTE_ETH_RSS_TCP },
122 	{ "sctp", RTE_ETH_RSS_SCTP },
123 	{ "tunnel", RTE_ETH_RSS_TUNNEL },
124 	{ "l3-pre32", RTE_ETH_RSS_L3_PRE32 },
125 	{ "l3-pre40", RTE_ETH_RSS_L3_PRE40 },
126 	{ "l3-pre48", RTE_ETH_RSS_L3_PRE48 },
127 	{ "l3-pre56", RTE_ETH_RSS_L3_PRE56 },
128 	{ "l3-pre64", RTE_ETH_RSS_L3_PRE64 },
129 	{ "l3-pre96", RTE_ETH_RSS_L3_PRE96 },
130 	{ "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY },
131 	{ "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY },
132 	{ "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY },
133 	{ "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY },
134 	{ "esp", RTE_ETH_RSS_ESP },
135 	{ "ah", RTE_ETH_RSS_AH },
136 	{ "l2tpv3", RTE_ETH_RSS_L2TPV3 },
137 	{ "pfcp", RTE_ETH_RSS_PFCP },
138 	{ "pppoe", RTE_ETH_RSS_PPPOE },
139 	{ "gtpu", RTE_ETH_RSS_GTPU },
140 	{ "ecpri", RTE_ETH_RSS_ECPRI },
141 	{ "mpls", RTE_ETH_RSS_MPLS },
142 	{ "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM },
143 	{ "l4-chksum", RTE_ETH_RSS_L4_CHKSUM },
144 	{ NULL, 0 },
145 };
146 
147 static const struct {
148 	enum rte_eth_fec_mode mode;
149 	const char *name;
150 } fec_mode_name[] = {
151 	{
152 		.mode = RTE_ETH_FEC_NOFEC,
153 		.name = "off",
154 	},
155 	{
156 		.mode = RTE_ETH_FEC_AUTO,
157 		.name = "auto",
158 	},
159 	{
160 		.mode = RTE_ETH_FEC_BASER,
161 		.name = "baser",
162 	},
163 	{
164 		.mode = RTE_ETH_FEC_RS,
165 		.name = "rs",
166 	},
167 };
168 
169 static void
170 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
171 {
172 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
173 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
174 	printf("%s%s", name, buf);
175 }
176 
177 static void
178 nic_xstats_display_periodic(portid_t port_id)
179 {
180 	struct xstat_display_info *xstats_info;
181 	uint64_t *prev_values, *curr_values;
182 	uint64_t diff_value, value_rate;
183 	struct timespec cur_time;
184 	uint64_t *ids_supp;
185 	size_t ids_supp_sz;
186 	uint64_t diff_ns;
187 	unsigned int i;
188 	int rc;
189 
190 	xstats_info = &ports[port_id].xstats_info;
191 
192 	ids_supp_sz = xstats_info->ids_supp_sz;
193 	if (ids_supp_sz == 0)
194 		return;
195 
196 	printf("\n");
197 
198 	ids_supp = xstats_info->ids_supp;
199 	prev_values = xstats_info->prev_values;
200 	curr_values = xstats_info->curr_values;
201 
202 	rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values,
203 				      ids_supp_sz);
204 	if (rc != (int)ids_supp_sz) {
205 		fprintf(stderr,
206 			"Failed to get values of %zu xstats for port %u - return code %d\n",
207 			ids_supp_sz, port_id, rc);
208 		return;
209 	}
210 
211 	diff_ns = 0;
212 	if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) {
213 		uint64_t ns;
214 
215 		ns = cur_time.tv_sec * NS_PER_SEC;
216 		ns += cur_time.tv_nsec;
217 
218 		if (xstats_info->prev_ns != 0)
219 			diff_ns = ns - xstats_info->prev_ns;
220 		xstats_info->prev_ns = ns;
221 	}
222 
223 	printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)");
224 	for (i = 0; i < ids_supp_sz; i++) {
225 		diff_value = (curr_values[i] > prev_values[i]) ?
226 			     (curr_values[i] - prev_values[i]) : 0;
227 		prev_values[i] = curr_values[i];
228 		value_rate = diff_ns > 0 ?
229 				(double)diff_value / diff_ns * NS_PER_SEC : 0;
230 
231 		printf("  %-25s%12"PRIu64" %15"PRIu64"\n",
232 		       xstats_display[i].name, curr_values[i], value_rate);
233 	}
234 }
235 
236 void
237 nic_stats_display(portid_t port_id)
238 {
239 	static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
240 	static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
241 	static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS];
242 	static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS];
243 	static uint64_t prev_ns[RTE_MAX_ETHPORTS];
244 	struct timespec cur_time;
245 	uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx,
246 								diff_ns;
247 	uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx;
248 	struct rte_eth_stats stats;
249 
250 	static const char *nic_stats_border = "########################";
251 
252 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
253 		print_valid_ports();
254 		return;
255 	}
256 	rte_eth_stats_get(port_id, &stats);
257 	printf("\n  %s NIC statistics for port %-2d %s\n",
258 	       nic_stats_border, port_id, nic_stats_border);
259 
260 	printf("  RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes:  "
261 	       "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes);
262 	printf("  RX-errors: %-"PRIu64"\n", stats.ierrors);
263 	printf("  RX-nombuf:  %-10"PRIu64"\n", stats.rx_nombuf);
264 	printf("  TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes:  "
265 	       "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes);
266 
267 	diff_ns = 0;
268 	if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) {
269 		uint64_t ns;
270 
271 		ns = cur_time.tv_sec * NS_PER_SEC;
272 		ns += cur_time.tv_nsec;
273 
274 		if (prev_ns[port_id] != 0)
275 			diff_ns = ns - prev_ns[port_id];
276 		prev_ns[port_id] = ns;
277 	}
278 
279 	diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
280 		(stats.ipackets - prev_pkts_rx[port_id]) : 0;
281 	diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
282 		(stats.opackets - prev_pkts_tx[port_id]) : 0;
283 	prev_pkts_rx[port_id] = stats.ipackets;
284 	prev_pkts_tx[port_id] = stats.opackets;
285 	mpps_rx = diff_ns > 0 ?
286 		(double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0;
287 	mpps_tx = diff_ns > 0 ?
288 		(double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0;
289 
290 	diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ?
291 		(stats.ibytes - prev_bytes_rx[port_id]) : 0;
292 	diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ?
293 		(stats.obytes - prev_bytes_tx[port_id]) : 0;
294 	prev_bytes_rx[port_id] = stats.ibytes;
295 	prev_bytes_tx[port_id] = stats.obytes;
296 	mbps_rx = diff_ns > 0 ?
297 		(double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0;
298 	mbps_tx = diff_ns > 0 ?
299 		(double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0;
300 
301 	printf("\n  Throughput (since last show)\n");
302 	printf("  Rx-pps: %12"PRIu64"          Rx-bps: %12"PRIu64"\n  Tx-pps: %12"
303 	       PRIu64"          Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8,
304 	       mpps_tx, mbps_tx * 8);
305 
306 	if (xstats_display_num > 0)
307 		nic_xstats_display_periodic(port_id);
308 
309 	printf("  %s############################%s\n",
310 	       nic_stats_border, nic_stats_border);
311 }
312 
313 void
314 nic_stats_clear(portid_t port_id)
315 {
316 	int ret;
317 
318 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
319 		print_valid_ports();
320 		return;
321 	}
322 
323 	ret = rte_eth_stats_reset(port_id);
324 	if (ret != 0) {
325 		fprintf(stderr,
326 			"%s: Error: failed to reset stats (port %u): %s",
327 			__func__, port_id, strerror(-ret));
328 		return;
329 	}
330 
331 	ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
332 	if (ret != 0) {
333 		if (ret < 0)
334 			ret = -ret;
335 		fprintf(stderr,
336 			"%s: Error: failed to get stats (port %u): %s",
337 			__func__, port_id, strerror(ret));
338 		return;
339 	}
340 	printf("\n  NIC statistics for port %d cleared\n", port_id);
341 }
342 
343 void
344 nic_xstats_display(portid_t port_id)
345 {
346 	struct rte_eth_xstat *xstats;
347 	int cnt_xstats, idx_xstat;
348 	struct rte_eth_xstat_name *xstats_names;
349 
350 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
351 		print_valid_ports();
352 		return;
353 	}
354 	printf("###### NIC extended statistics for port %-2d\n", port_id);
355 	if (!rte_eth_dev_is_valid_port(port_id)) {
356 		fprintf(stderr, "Error: Invalid port number %i\n", port_id);
357 		return;
358 	}
359 
360 	/* Get count */
361 	cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
362 	if (cnt_xstats  < 0) {
363 		fprintf(stderr, "Error: Cannot get count of xstats\n");
364 		return;
365 	}
366 
367 	/* Get id-name lookup table */
368 	xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
369 	if (xstats_names == NULL) {
370 		fprintf(stderr, "Cannot allocate memory for xstats lookup\n");
371 		return;
372 	}
373 	if (cnt_xstats != rte_eth_xstats_get_names(
374 			port_id, xstats_names, cnt_xstats)) {
375 		fprintf(stderr, "Error: Cannot get xstats lookup\n");
376 		free(xstats_names);
377 		return;
378 	}
379 
380 	/* Get stats themselves */
381 	xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
382 	if (xstats == NULL) {
383 		fprintf(stderr, "Cannot allocate memory for xstats\n");
384 		free(xstats_names);
385 		return;
386 	}
387 	if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
388 		fprintf(stderr, "Error: Unable to get xstats\n");
389 		free(xstats_names);
390 		free(xstats);
391 		return;
392 	}
393 
394 	/* Display xstats */
395 	for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
396 		if (xstats_hide_zero && !xstats[idx_xstat].value)
397 			continue;
398 		printf("%s: %"PRIu64"\n",
399 			xstats_names[idx_xstat].name,
400 			xstats[idx_xstat].value);
401 	}
402 	free(xstats_names);
403 	free(xstats);
404 }
405 
406 void
407 nic_xstats_clear(portid_t port_id)
408 {
409 	int ret;
410 
411 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
412 		print_valid_ports();
413 		return;
414 	}
415 
416 	ret = rte_eth_xstats_reset(port_id);
417 	if (ret != 0) {
418 		fprintf(stderr,
419 			"%s: Error: failed to reset xstats (port %u): %s\n",
420 			__func__, port_id, strerror(-ret));
421 		return;
422 	}
423 
424 	ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
425 	if (ret != 0) {
426 		if (ret < 0)
427 			ret = -ret;
428 		fprintf(stderr, "%s: Error: failed to get stats (port %u): %s",
429 			__func__, port_id, strerror(ret));
430 		return;
431 	}
432 }
433 
434 static const char *
435 get_queue_state_name(uint8_t queue_state)
436 {
437 	if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED)
438 		return "stopped";
439 	else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED)
440 		return "started";
441 	else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN)
442 		return "hairpin";
443 	else
444 		return "unknown";
445 }
446 
447 void
448 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
449 {
450 	struct rte_eth_burst_mode mode;
451 	struct rte_eth_rxq_info qinfo;
452 	int32_t rc;
453 	static const char *info_border = "*********************";
454 
455 	rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
456 	if (rc != 0) {
457 		fprintf(stderr,
458 			"Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n",
459 			port_id, queue_id, strerror(-rc), rc);
460 		return;
461 	}
462 
463 	printf("\n%s Infos for port %-2u, RX queue %-2u %s",
464 	       info_border, port_id, queue_id, info_border);
465 
466 	printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
467 	printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
468 	printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
469 	printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
470 	printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
471 	printf("\nRX drop packets: %s",
472 		(qinfo.conf.rx_drop_en != 0) ? "on" : "off");
473 	printf("\nRX deferred start: %s",
474 		(qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
475 	printf("\nRX scattered packets: %s",
476 		(qinfo.scattered_rx != 0) ? "on" : "off");
477 	printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state));
478 	if (qinfo.rx_buf_size != 0)
479 		printf("\nRX buffer size: %hu", qinfo.rx_buf_size);
480 	printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
481 
482 	if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0)
483 		printf("\nBurst mode: %s%s",
484 		       mode.info,
485 		       mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
486 				" (per queue)" : "");
487 
488 	printf("\n");
489 }
490 
491 void
492 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
493 {
494 	struct rte_eth_burst_mode mode;
495 	struct rte_eth_txq_info qinfo;
496 	int32_t rc;
497 	static const char *info_border = "*********************";
498 
499 	rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
500 	if (rc != 0) {
501 		fprintf(stderr,
502 			"Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n",
503 			port_id, queue_id, strerror(-rc), rc);
504 		return;
505 	}
506 
507 	printf("\n%s Infos for port %-2u, TX queue %-2u %s",
508 	       info_border, port_id, queue_id, info_border);
509 
510 	printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
511 	printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
512 	printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
513 	printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
514 	printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
515 	printf("\nTX deferred start: %s",
516 		(qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
517 	printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
518 	printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state));
519 
520 	if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0)
521 		printf("\nBurst mode: %s%s",
522 		       mode.info,
523 		       mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
524 				" (per queue)" : "");
525 
526 	printf("\n");
527 }
528 
529 static int bus_match_all(const struct rte_bus *bus, const void *data)
530 {
531 	RTE_SET_USED(bus);
532 	RTE_SET_USED(data);
533 	return 0;
534 }
535 
536 static void
537 device_infos_display_speeds(uint32_t speed_capa)
538 {
539 	printf("\n\tDevice speed capability:");
540 	if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG)
541 		printf(" Autonegotiate (all speeds)");
542 	if (speed_capa & RTE_ETH_LINK_SPEED_FIXED)
543 		printf(" Disable autonegotiate (fixed speed)  ");
544 	if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD)
545 		printf(" 10 Mbps half-duplex  ");
546 	if (speed_capa & RTE_ETH_LINK_SPEED_10M)
547 		printf(" 10 Mbps full-duplex  ");
548 	if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD)
549 		printf(" 100 Mbps half-duplex  ");
550 	if (speed_capa & RTE_ETH_LINK_SPEED_100M)
551 		printf(" 100 Mbps full-duplex  ");
552 	if (speed_capa & RTE_ETH_LINK_SPEED_1G)
553 		printf(" 1 Gbps  ");
554 	if (speed_capa & RTE_ETH_LINK_SPEED_2_5G)
555 		printf(" 2.5 Gbps  ");
556 	if (speed_capa & RTE_ETH_LINK_SPEED_5G)
557 		printf(" 5 Gbps  ");
558 	if (speed_capa & RTE_ETH_LINK_SPEED_10G)
559 		printf(" 10 Gbps  ");
560 	if (speed_capa & RTE_ETH_LINK_SPEED_20G)
561 		printf(" 20 Gbps  ");
562 	if (speed_capa & RTE_ETH_LINK_SPEED_25G)
563 		printf(" 25 Gbps  ");
564 	if (speed_capa & RTE_ETH_LINK_SPEED_40G)
565 		printf(" 40 Gbps  ");
566 	if (speed_capa & RTE_ETH_LINK_SPEED_50G)
567 		printf(" 50 Gbps  ");
568 	if (speed_capa & RTE_ETH_LINK_SPEED_56G)
569 		printf(" 56 Gbps  ");
570 	if (speed_capa & RTE_ETH_LINK_SPEED_100G)
571 		printf(" 100 Gbps  ");
572 	if (speed_capa & RTE_ETH_LINK_SPEED_200G)
573 		printf(" 200 Gbps  ");
574 }
575 
576 void
577 device_infos_display(const char *identifier)
578 {
579 	static const char *info_border = "*********************";
580 	struct rte_bus *start = NULL, *next;
581 	struct rte_dev_iterator dev_iter;
582 	char name[RTE_ETH_NAME_MAX_LEN];
583 	struct rte_ether_addr mac_addr;
584 	struct rte_device *dev;
585 	struct rte_devargs da;
586 	portid_t port_id;
587 	struct rte_eth_dev_info dev_info;
588 	char devstr[128];
589 
590 	memset(&da, 0, sizeof(da));
591 	if (!identifier)
592 		goto skip_parse;
593 
594 	if (rte_devargs_parsef(&da, "%s", identifier)) {
595 		fprintf(stderr, "cannot parse identifier\n");
596 		return;
597 	}
598 
599 skip_parse:
600 	while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
601 
602 		start = next;
603 		if (identifier && da.bus != next)
604 			continue;
605 
606 		/* Skip buses that don't have iterate method */
607 		if (!next->dev_iterate)
608 			continue;
609 
610 		snprintf(devstr, sizeof(devstr), "bus=%s", next->name);
611 		RTE_DEV_FOREACH(dev, devstr, &dev_iter) {
612 
613 			if (!dev->driver)
614 				continue;
615 			/* Check for matching device if identifier is present */
616 			if (identifier &&
617 			    strncmp(da.name, dev->name, strlen(dev->name)))
618 				continue;
619 			printf("\n%s Infos for device %s %s\n",
620 			       info_border, dev->name, info_border);
621 			printf("Bus name: %s", dev->bus->name);
622 			printf("\nDriver name: %s", dev->driver->name);
623 			printf("\nDevargs: %s",
624 			       dev->devargs ? dev->devargs->args : "");
625 			printf("\nConnect to socket: %d", dev->numa_node);
626 			printf("\n");
627 
628 			/* List ports with matching device name */
629 			RTE_ETH_FOREACH_DEV_OF(port_id, dev) {
630 				printf("\n\tPort id: %-2d", port_id);
631 				if (eth_macaddr_get_print_err(port_id,
632 							      &mac_addr) == 0)
633 					print_ethaddr("\n\tMAC address: ",
634 						      &mac_addr);
635 				rte_eth_dev_get_name_by_port(port_id, name);
636 				printf("\n\tDevice name: %s", name);
637 				if (rte_eth_dev_info_get(port_id, &dev_info) == 0)
638 					device_infos_display_speeds(dev_info.speed_capa);
639 				printf("\n");
640 			}
641 		}
642 	};
643 	rte_devargs_reset(&da);
644 }
645 
646 static void
647 print_dev_capabilities(uint64_t capabilities)
648 {
649 	uint64_t single_capa;
650 	int begin;
651 	int end;
652 	int bit;
653 
654 	if (capabilities == 0)
655 		return;
656 
657 	begin = __builtin_ctzll(capabilities);
658 	end = sizeof(capabilities) * CHAR_BIT - __builtin_clzll(capabilities);
659 
660 	single_capa = 1ULL << begin;
661 	for (bit = begin; bit < end; bit++) {
662 		if (capabilities & single_capa)
663 			printf(" %s",
664 			       rte_eth_dev_capability_name(single_capa));
665 		single_capa <<= 1;
666 	}
667 }
668 
669 void
670 port_infos_display(portid_t port_id)
671 {
672 	struct rte_port *port;
673 	struct rte_ether_addr mac_addr;
674 	struct rte_eth_link link;
675 	struct rte_eth_dev_info dev_info;
676 	int vlan_offload;
677 	struct rte_mempool * mp;
678 	static const char *info_border = "*********************";
679 	uint16_t mtu;
680 	char name[RTE_ETH_NAME_MAX_LEN];
681 	int ret;
682 	char fw_version[ETHDEV_FWVERS_LEN];
683 
684 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
685 		print_valid_ports();
686 		return;
687 	}
688 	port = &ports[port_id];
689 	ret = eth_link_get_nowait_print_err(port_id, &link);
690 	if (ret < 0)
691 		return;
692 
693 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
694 	if (ret != 0)
695 		return;
696 
697 	printf("\n%s Infos for port %-2d %s\n",
698 	       info_border, port_id, info_border);
699 	if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0)
700 		print_ethaddr("MAC address: ", &mac_addr);
701 	rte_eth_dev_get_name_by_port(port_id, name);
702 	printf("\nDevice name: %s", name);
703 	printf("\nDriver name: %s", dev_info.driver_name);
704 
705 	if (rte_eth_dev_fw_version_get(port_id, fw_version,
706 						ETHDEV_FWVERS_LEN) == 0)
707 		printf("\nFirmware-version: %s", fw_version);
708 	else
709 		printf("\nFirmware-version: %s", "not available");
710 
711 	if (dev_info.device->devargs && dev_info.device->devargs->args)
712 		printf("\nDevargs: %s", dev_info.device->devargs->args);
713 	printf("\nConnect to socket: %u", port->socket_id);
714 
715 	if (port_numa[port_id] != NUMA_NO_CONFIG) {
716 		mp = mbuf_pool_find(port_numa[port_id], 0);
717 		if (mp)
718 			printf("\nmemory allocation on the socket: %d",
719 							port_numa[port_id]);
720 	} else
721 		printf("\nmemory allocation on the socket: %u",port->socket_id);
722 
723 	printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
724 	printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed));
725 	printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
726 	       ("full-duplex") : ("half-duplex"));
727 	printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ?
728 	       ("On") : ("Off"));
729 
730 	if (!rte_eth_dev_get_mtu(port_id, &mtu))
731 		printf("MTU: %u\n", mtu);
732 
733 	printf("Promiscuous mode: %s\n",
734 	       rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
735 	printf("Allmulticast mode: %s\n",
736 	       rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
737 	printf("Maximum number of MAC addresses: %u\n",
738 	       (unsigned int)(port->dev_info.max_mac_addrs));
739 	printf("Maximum number of MAC addresses of hash filtering: %u\n",
740 	       (unsigned int)(port->dev_info.max_hash_mac_addrs));
741 
742 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
743 	if (vlan_offload >= 0){
744 		printf("VLAN offload: \n");
745 		if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD)
746 			printf("  strip on, ");
747 		else
748 			printf("  strip off, ");
749 
750 		if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD)
751 			printf("filter on, ");
752 		else
753 			printf("filter off, ");
754 
755 		if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD)
756 			printf("extend on, ");
757 		else
758 			printf("extend off, ");
759 
760 		if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD)
761 			printf("qinq strip on\n");
762 		else
763 			printf("qinq strip off\n");
764 	}
765 
766 	if (dev_info.hash_key_size > 0)
767 		printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
768 	if (dev_info.reta_size > 0)
769 		printf("Redirection table size: %u\n", dev_info.reta_size);
770 	if (!dev_info.flow_type_rss_offloads)
771 		printf("No RSS offload flow type is supported.\n");
772 	else {
773 		uint16_t i;
774 		char *p;
775 
776 		printf("Supported RSS offload flow types:\n");
777 		for (i = RTE_ETH_FLOW_UNKNOWN + 1;
778 		     i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
779 			if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
780 				continue;
781 			p = flowtype_to_str(i);
782 			if (p)
783 				printf("  %s\n", p);
784 			else
785 				printf("  user defined %d\n", i);
786 		}
787 	}
788 
789 	printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize);
790 	printf("Maximum configurable length of RX packet: %u\n",
791 		dev_info.max_rx_pktlen);
792 	printf("Maximum configurable size of LRO aggregated packet: %u\n",
793 		dev_info.max_lro_pkt_size);
794 	if (dev_info.max_vfs)
795 		printf("Maximum number of VFs: %u\n", dev_info.max_vfs);
796 	if (dev_info.max_vmdq_pools)
797 		printf("Maximum number of VMDq pools: %u\n",
798 			dev_info.max_vmdq_pools);
799 
800 	printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues);
801 	printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
802 	printf("Max possible number of RXDs per queue: %hu\n",
803 		dev_info.rx_desc_lim.nb_max);
804 	printf("Min possible number of RXDs per queue: %hu\n",
805 		dev_info.rx_desc_lim.nb_min);
806 	printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
807 
808 	printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues);
809 	printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
810 	printf("Max possible number of TXDs per queue: %hu\n",
811 		dev_info.tx_desc_lim.nb_max);
812 	printf("Min possible number of TXDs per queue: %hu\n",
813 		dev_info.tx_desc_lim.nb_min);
814 	printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
815 	printf("Max segment number per packet: %hu\n",
816 		dev_info.tx_desc_lim.nb_seg_max);
817 	printf("Max segment number per MTU/TSO: %hu\n",
818 		dev_info.tx_desc_lim.nb_mtu_seg_max);
819 
820 	printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa);
821 	print_dev_capabilities(dev_info.dev_capa);
822 	printf(" )\n");
823 	/* Show switch info only if valid switch domain and port id is set */
824 	if (dev_info.switch_info.domain_id !=
825 		RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
826 		if (dev_info.switch_info.name)
827 			printf("Switch name: %s\n", dev_info.switch_info.name);
828 
829 		printf("Switch domain Id: %u\n",
830 			dev_info.switch_info.domain_id);
831 		printf("Switch Port Id: %u\n",
832 			dev_info.switch_info.port_id);
833 		if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0)
834 			printf("Switch Rx domain: %u\n",
835 			       dev_info.switch_info.rx_domain);
836 	}
837 }
838 
839 void
840 port_summary_header_display(void)
841 {
842 	uint16_t port_number;
843 
844 	port_number = rte_eth_dev_count_avail();
845 	printf("Number of available ports: %i\n", port_number);
846 	printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name",
847 			"Driver", "Status", "Link");
848 }
849 
850 void
851 port_summary_display(portid_t port_id)
852 {
853 	struct rte_ether_addr mac_addr;
854 	struct rte_eth_link link;
855 	struct rte_eth_dev_info dev_info;
856 	char name[RTE_ETH_NAME_MAX_LEN];
857 	int ret;
858 
859 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
860 		print_valid_ports();
861 		return;
862 	}
863 
864 	ret = eth_link_get_nowait_print_err(port_id, &link);
865 	if (ret < 0)
866 		return;
867 
868 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
869 	if (ret != 0)
870 		return;
871 
872 	rte_eth_dev_get_name_by_port(port_id, name);
873 	ret = eth_macaddr_get_print_err(port_id, &mac_addr);
874 	if (ret != 0)
875 		return;
876 
877 	printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n",
878 		port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name,
879 		dev_info.driver_name, (link.link_status) ? ("up") : ("down"),
880 		rte_eth_link_speed_to_str(link.link_speed));
881 }
882 
883 void
884 port_eeprom_display(portid_t port_id)
885 {
886 	struct rte_dev_eeprom_info einfo;
887 	int ret;
888 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
889 		print_valid_ports();
890 		return;
891 	}
892 
893 	int len_eeprom = rte_eth_dev_get_eeprom_length(port_id);
894 	if (len_eeprom < 0) {
895 		switch (len_eeprom) {
896 		case -ENODEV:
897 			fprintf(stderr, "port index %d invalid\n", port_id);
898 			break;
899 		case -ENOTSUP:
900 			fprintf(stderr, "operation not supported by device\n");
901 			break;
902 		case -EIO:
903 			fprintf(stderr, "device is removed\n");
904 			break;
905 		default:
906 			fprintf(stderr, "Unable to get EEPROM: %d\n",
907 				len_eeprom);
908 			break;
909 		}
910 		return;
911 	}
912 
913 	char buf[len_eeprom];
914 	einfo.offset = 0;
915 	einfo.length = len_eeprom;
916 	einfo.data = buf;
917 
918 	ret = rte_eth_dev_get_eeprom(port_id, &einfo);
919 	if (ret != 0) {
920 		switch (ret) {
921 		case -ENODEV:
922 			fprintf(stderr, "port index %d invalid\n", port_id);
923 			break;
924 		case -ENOTSUP:
925 			fprintf(stderr, "operation not supported by device\n");
926 			break;
927 		case -EIO:
928 			fprintf(stderr, "device is removed\n");
929 			break;
930 		default:
931 			fprintf(stderr, "Unable to get EEPROM: %d\n", ret);
932 			break;
933 		}
934 		return;
935 	}
936 	rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
937 	printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom);
938 }
939 
940 void
941 port_module_eeprom_display(portid_t port_id)
942 {
943 	struct rte_eth_dev_module_info minfo;
944 	struct rte_dev_eeprom_info einfo;
945 	int ret;
946 
947 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
948 		print_valid_ports();
949 		return;
950 	}
951 
952 
953 	ret = rte_eth_dev_get_module_info(port_id, &minfo);
954 	if (ret != 0) {
955 		switch (ret) {
956 		case -ENODEV:
957 			fprintf(stderr, "port index %d invalid\n", port_id);
958 			break;
959 		case -ENOTSUP:
960 			fprintf(stderr, "operation not supported by device\n");
961 			break;
962 		case -EIO:
963 			fprintf(stderr, "device is removed\n");
964 			break;
965 		default:
966 			fprintf(stderr, "Unable to get module EEPROM: %d\n",
967 				ret);
968 			break;
969 		}
970 		return;
971 	}
972 
973 	char buf[minfo.eeprom_len];
974 	einfo.offset = 0;
975 	einfo.length = minfo.eeprom_len;
976 	einfo.data = buf;
977 
978 	ret = rte_eth_dev_get_module_eeprom(port_id, &einfo);
979 	if (ret != 0) {
980 		switch (ret) {
981 		case -ENODEV:
982 			fprintf(stderr, "port index %d invalid\n", port_id);
983 			break;
984 		case -ENOTSUP:
985 			fprintf(stderr, "operation not supported by device\n");
986 			break;
987 		case -EIO:
988 			fprintf(stderr, "device is removed\n");
989 			break;
990 		default:
991 			fprintf(stderr, "Unable to get module EEPROM: %d\n",
992 				ret);
993 			break;
994 		}
995 		return;
996 	}
997 
998 	rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
999 	printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length);
1000 }
1001 
1002 int
1003 port_id_is_invalid(portid_t port_id, enum print_warning warning)
1004 {
1005 	uint16_t pid;
1006 
1007 	if (port_id == (portid_t)RTE_PORT_ALL)
1008 		return 0;
1009 
1010 	RTE_ETH_FOREACH_DEV(pid)
1011 		if (port_id == pid)
1012 			return 0;
1013 
1014 	if (warning == ENABLED_WARN)
1015 		fprintf(stderr, "Invalid port %d\n", port_id);
1016 
1017 	return 1;
1018 }
1019 
1020 void print_valid_ports(void)
1021 {
1022 	portid_t pid;
1023 
1024 	printf("The valid ports array is [");
1025 	RTE_ETH_FOREACH_DEV(pid) {
1026 		printf(" %d", pid);
1027 	}
1028 	printf(" ]\n");
1029 }
1030 
1031 static int
1032 vlan_id_is_invalid(uint16_t vlan_id)
1033 {
1034 	if (vlan_id < 4096)
1035 		return 0;
1036 	fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id);
1037 	return 1;
1038 }
1039 
1040 static int
1041 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
1042 {
1043 	const struct rte_pci_device *pci_dev;
1044 	const struct rte_bus *bus;
1045 	uint64_t pci_len;
1046 
1047 	if (reg_off & 0x3) {
1048 		fprintf(stderr,
1049 			"Port register offset 0x%X not aligned on a 4-byte boundary\n",
1050 			(unsigned int)reg_off);
1051 		return 1;
1052 	}
1053 
1054 	if (!ports[port_id].dev_info.device) {
1055 		fprintf(stderr, "Invalid device\n");
1056 		return 0;
1057 	}
1058 
1059 	bus = rte_bus_find_by_device(ports[port_id].dev_info.device);
1060 	if (bus && !strcmp(bus->name, "pci")) {
1061 		pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device);
1062 	} else {
1063 		fprintf(stderr, "Not a PCI device\n");
1064 		return 1;
1065 	}
1066 
1067 	pci_len = pci_dev->mem_resource[0].len;
1068 	if (reg_off >= pci_len) {
1069 		fprintf(stderr,
1070 			"Port %d: register offset %u (0x%X) out of port PCI resource (length=%"PRIu64")\n",
1071 			port_id, (unsigned int)reg_off, (unsigned int)reg_off,
1072 			pci_len);
1073 		return 1;
1074 	}
1075 	return 0;
1076 }
1077 
1078 static int
1079 reg_bit_pos_is_invalid(uint8_t bit_pos)
1080 {
1081 	if (bit_pos <= 31)
1082 		return 0;
1083 	fprintf(stderr, "Invalid bit position %d (must be <= 31)\n", bit_pos);
1084 	return 1;
1085 }
1086 
1087 #define display_port_and_reg_off(port_id, reg_off) \
1088 	printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
1089 
1090 static inline void
1091 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1092 {
1093 	display_port_and_reg_off(port_id, (unsigned)reg_off);
1094 	printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
1095 }
1096 
1097 void
1098 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
1099 {
1100 	uint32_t reg_v;
1101 
1102 
1103 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1104 		return;
1105 	if (port_reg_off_is_invalid(port_id, reg_off))
1106 		return;
1107 	if (reg_bit_pos_is_invalid(bit_x))
1108 		return;
1109 	reg_v = port_id_pci_reg_read(port_id, reg_off);
1110 	display_port_and_reg_off(port_id, (unsigned)reg_off);
1111 	printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
1112 }
1113 
1114 void
1115 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
1116 			   uint8_t bit1_pos, uint8_t bit2_pos)
1117 {
1118 	uint32_t reg_v;
1119 	uint8_t  l_bit;
1120 	uint8_t  h_bit;
1121 
1122 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1123 		return;
1124 	if (port_reg_off_is_invalid(port_id, reg_off))
1125 		return;
1126 	if (reg_bit_pos_is_invalid(bit1_pos))
1127 		return;
1128 	if (reg_bit_pos_is_invalid(bit2_pos))
1129 		return;
1130 	if (bit1_pos > bit2_pos)
1131 		l_bit = bit2_pos, h_bit = bit1_pos;
1132 	else
1133 		l_bit = bit1_pos, h_bit = bit2_pos;
1134 
1135 	reg_v = port_id_pci_reg_read(port_id, reg_off);
1136 	reg_v >>= l_bit;
1137 	if (h_bit < 31)
1138 		reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
1139 	display_port_and_reg_off(port_id, (unsigned)reg_off);
1140 	printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
1141 	       ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
1142 }
1143 
1144 void
1145 port_reg_display(portid_t port_id, uint32_t reg_off)
1146 {
1147 	uint32_t reg_v;
1148 
1149 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1150 		return;
1151 	if (port_reg_off_is_invalid(port_id, reg_off))
1152 		return;
1153 	reg_v = port_id_pci_reg_read(port_id, reg_off);
1154 	display_port_reg_value(port_id, reg_off, reg_v);
1155 }
1156 
1157 void
1158 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
1159 		 uint8_t bit_v)
1160 {
1161 	uint32_t reg_v;
1162 
1163 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1164 		return;
1165 	if (port_reg_off_is_invalid(port_id, reg_off))
1166 		return;
1167 	if (reg_bit_pos_is_invalid(bit_pos))
1168 		return;
1169 	if (bit_v > 1) {
1170 		fprintf(stderr, "Invalid bit value %d (must be 0 or 1)\n",
1171 			(int) bit_v);
1172 		return;
1173 	}
1174 	reg_v = port_id_pci_reg_read(port_id, reg_off);
1175 	if (bit_v == 0)
1176 		reg_v &= ~(1 << bit_pos);
1177 	else
1178 		reg_v |= (1 << bit_pos);
1179 	port_id_pci_reg_write(port_id, reg_off, reg_v);
1180 	display_port_reg_value(port_id, reg_off, reg_v);
1181 }
1182 
1183 void
1184 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
1185 		       uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
1186 {
1187 	uint32_t max_v;
1188 	uint32_t reg_v;
1189 	uint8_t  l_bit;
1190 	uint8_t  h_bit;
1191 
1192 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1193 		return;
1194 	if (port_reg_off_is_invalid(port_id, reg_off))
1195 		return;
1196 	if (reg_bit_pos_is_invalid(bit1_pos))
1197 		return;
1198 	if (reg_bit_pos_is_invalid(bit2_pos))
1199 		return;
1200 	if (bit1_pos > bit2_pos)
1201 		l_bit = bit2_pos, h_bit = bit1_pos;
1202 	else
1203 		l_bit = bit1_pos, h_bit = bit2_pos;
1204 
1205 	if ((h_bit - l_bit) < 31)
1206 		max_v = (1 << (h_bit - l_bit + 1)) - 1;
1207 	else
1208 		max_v = 0xFFFFFFFF;
1209 
1210 	if (value > max_v) {
1211 		fprintf(stderr, "Invalid value %u (0x%x) must be < %u (0x%x)\n",
1212 				(unsigned)value, (unsigned)value,
1213 				(unsigned)max_v, (unsigned)max_v);
1214 		return;
1215 	}
1216 	reg_v = port_id_pci_reg_read(port_id, reg_off);
1217 	reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
1218 	reg_v |= (value << l_bit); /* Set changed bits */
1219 	port_id_pci_reg_write(port_id, reg_off, reg_v);
1220 	display_port_reg_value(port_id, reg_off, reg_v);
1221 }
1222 
1223 void
1224 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
1225 {
1226 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1227 		return;
1228 	if (port_reg_off_is_invalid(port_id, reg_off))
1229 		return;
1230 	port_id_pci_reg_write(port_id, reg_off, reg_v);
1231 	display_port_reg_value(port_id, reg_off, reg_v);
1232 }
1233 
1234 void
1235 port_mtu_set(portid_t port_id, uint16_t mtu)
1236 {
1237 	struct rte_port *port = &ports[port_id];
1238 	int diag;
1239 
1240 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1241 		return;
1242 
1243 	if (port->need_reconfig == 0) {
1244 		diag = rte_eth_dev_set_mtu(port_id, mtu);
1245 		if (diag != 0) {
1246 			fprintf(stderr, "Set MTU failed. diag=%d\n", diag);
1247 			return;
1248 		}
1249 	}
1250 
1251 	port->dev_conf.rxmode.mtu = mtu;
1252 }
1253 
1254 /* Generic flow management functions. */
1255 
1256 static struct port_flow_tunnel *
1257 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id)
1258 {
1259 	struct port_flow_tunnel *flow_tunnel;
1260 
1261 	LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) {
1262 		if (flow_tunnel->id == port_tunnel_id)
1263 			goto out;
1264 	}
1265 	flow_tunnel = NULL;
1266 
1267 out:
1268 	return flow_tunnel;
1269 }
1270 
1271 const char *
1272 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel)
1273 {
1274 	const char *type;
1275 	switch (tunnel->type) {
1276 	default:
1277 		type = "unknown";
1278 		break;
1279 	case RTE_FLOW_ITEM_TYPE_VXLAN:
1280 		type = "vxlan";
1281 		break;
1282 	case RTE_FLOW_ITEM_TYPE_GRE:
1283 		type = "gre";
1284 		break;
1285 	case RTE_FLOW_ITEM_TYPE_NVGRE:
1286 		type = "nvgre";
1287 		break;
1288 	case RTE_FLOW_ITEM_TYPE_GENEVE:
1289 		type = "geneve";
1290 		break;
1291 	}
1292 
1293 	return type;
1294 }
1295 
1296 struct port_flow_tunnel *
1297 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun)
1298 {
1299 	struct rte_port *port = &ports[port_id];
1300 	struct port_flow_tunnel *flow_tunnel;
1301 
1302 	LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) {
1303 		if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun)))
1304 			goto out;
1305 	}
1306 	flow_tunnel = NULL;
1307 
1308 out:
1309 	return flow_tunnel;
1310 }
1311 
1312 void port_flow_tunnel_list(portid_t port_id)
1313 {
1314 	struct rte_port *port = &ports[port_id];
1315 	struct port_flow_tunnel *flt;
1316 
1317 	LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1318 		printf("port %u tunnel #%u type=%s",
1319 			port_id, flt->id, port_flow_tunnel_type(&flt->tunnel));
1320 		if (flt->tunnel.tun_id)
1321 			printf(" id=%" PRIu64, flt->tunnel.tun_id);
1322 		printf("\n");
1323 	}
1324 }
1325 
1326 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id)
1327 {
1328 	struct rte_port *port = &ports[port_id];
1329 	struct port_flow_tunnel *flt;
1330 
1331 	LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1332 		if (flt->id == tunnel_id)
1333 			break;
1334 	}
1335 	if (flt) {
1336 		LIST_REMOVE(flt, chain);
1337 		free(flt);
1338 		printf("port %u: flow tunnel #%u destroyed\n",
1339 			port_id, tunnel_id);
1340 	}
1341 }
1342 
1343 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops)
1344 {
1345 	struct rte_port *port = &ports[port_id];
1346 	enum rte_flow_item_type	type;
1347 	struct port_flow_tunnel *flt;
1348 
1349 	if (!strcmp(ops->type, "vxlan"))
1350 		type = RTE_FLOW_ITEM_TYPE_VXLAN;
1351 	else if (!strcmp(ops->type, "gre"))
1352 		type = RTE_FLOW_ITEM_TYPE_GRE;
1353 	else if (!strcmp(ops->type, "nvgre"))
1354 		type = RTE_FLOW_ITEM_TYPE_NVGRE;
1355 	else if (!strcmp(ops->type, "geneve"))
1356 		type = RTE_FLOW_ITEM_TYPE_GENEVE;
1357 	else {
1358 		fprintf(stderr, "cannot offload \"%s\" tunnel type\n",
1359 			ops->type);
1360 		return;
1361 	}
1362 	LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1363 		if (flt->tunnel.type == type)
1364 			break;
1365 	}
1366 	if (!flt) {
1367 		flt = calloc(1, sizeof(*flt));
1368 		if (!flt) {
1369 			fprintf(stderr, "failed to allocate port flt object\n");
1370 			return;
1371 		}
1372 		flt->tunnel.type = type;
1373 		flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 :
1374 				  LIST_FIRST(&port->flow_tunnel_list)->id + 1;
1375 		LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain);
1376 	}
1377 	printf("port %d: flow tunnel #%u type %s\n",
1378 		port_id, flt->id, ops->type);
1379 }
1380 
1381 /** Generate a port_flow entry from attributes/pattern/actions. */
1382 static struct port_flow *
1383 port_flow_new(const struct rte_flow_attr *attr,
1384 	      const struct rte_flow_item *pattern,
1385 	      const struct rte_flow_action *actions,
1386 	      struct rte_flow_error *error)
1387 {
1388 	const struct rte_flow_conv_rule rule = {
1389 		.attr_ro = attr,
1390 		.pattern_ro = pattern,
1391 		.actions_ro = actions,
1392 	};
1393 	struct port_flow *pf;
1394 	int ret;
1395 
1396 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error);
1397 	if (ret < 0)
1398 		return NULL;
1399 	pf = calloc(1, offsetof(struct port_flow, rule) + ret);
1400 	if (!pf) {
1401 		rte_flow_error_set
1402 			(error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1403 			 "calloc() failed");
1404 		return NULL;
1405 	}
1406 	if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule,
1407 			  error) >= 0)
1408 		return pf;
1409 	free(pf);
1410 	return NULL;
1411 }
1412 
1413 /** Print a message out of a flow error. */
1414 static int
1415 port_flow_complain(struct rte_flow_error *error)
1416 {
1417 	static const char *const errstrlist[] = {
1418 		[RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1419 		[RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1420 		[RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1421 		[RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1422 		[RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1423 		[RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1424 		[RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1425 		[RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field",
1426 		[RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1427 		[RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1428 		[RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification",
1429 		[RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range",
1430 		[RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask",
1431 		[RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1432 		[RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1433 		[RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration",
1434 		[RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1435 	};
1436 	const char *errstr;
1437 	char buf[32];
1438 	int err = rte_errno;
1439 
1440 	if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1441 	    !errstrlist[error->type])
1442 		errstr = "unknown type";
1443 	else
1444 		errstr = errstrlist[error->type];
1445 	fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n",
1446 		__func__, error->type, errstr,
1447 		error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1448 					 error->cause), buf) : "",
1449 		error->message ? error->message : "(no stated reason)",
1450 		rte_strerror(err));
1451 	return -err;
1452 }
1453 
1454 static void
1455 rss_config_display(struct rte_flow_action_rss *rss_conf)
1456 {
1457 	uint8_t i;
1458 
1459 	if (rss_conf == NULL) {
1460 		fprintf(stderr, "Invalid rule\n");
1461 		return;
1462 	}
1463 
1464 	printf("RSS:\n"
1465 	       " queues:");
1466 	if (rss_conf->queue_num == 0)
1467 		printf(" none");
1468 	for (i = 0; i < rss_conf->queue_num; i++)
1469 		printf(" %d", rss_conf->queue[i]);
1470 	printf("\n");
1471 
1472 	printf(" function: ");
1473 	switch (rss_conf->func) {
1474 	case RTE_ETH_HASH_FUNCTION_DEFAULT:
1475 		printf("default\n");
1476 		break;
1477 	case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1478 		printf("toeplitz\n");
1479 		break;
1480 	case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1481 		printf("simple_xor\n");
1482 		break;
1483 	case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1484 		printf("symmetric_toeplitz\n");
1485 		break;
1486 	default:
1487 		printf("Unknown function\n");
1488 		return;
1489 	}
1490 
1491 	printf(" types:\n");
1492 	if (rss_conf->types == 0) {
1493 		printf("  none\n");
1494 		return;
1495 	}
1496 	for (i = 0; rss_type_table[i].str; i++) {
1497 		if ((rss_conf->types &
1498 		    rss_type_table[i].rss_type) ==
1499 		    rss_type_table[i].rss_type &&
1500 		    rss_type_table[i].rss_type != 0)
1501 			printf("  %s\n", rss_type_table[i].str);
1502 	}
1503 }
1504 
1505 static struct port_indirect_action *
1506 action_get_by_id(portid_t port_id, uint32_t id)
1507 {
1508 	struct rte_port *port;
1509 	struct port_indirect_action **ppia;
1510 	struct port_indirect_action *pia = NULL;
1511 
1512 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1513 	    port_id == (portid_t)RTE_PORT_ALL)
1514 		return NULL;
1515 	port = &ports[port_id];
1516 	ppia = &port->actions_list;
1517 	while (*ppia) {
1518 		if ((*ppia)->id == id) {
1519 			pia = *ppia;
1520 			break;
1521 		}
1522 		ppia = &(*ppia)->next;
1523 	}
1524 	if (!pia)
1525 		fprintf(stderr,
1526 			"Failed to find indirect action #%u on port %u\n",
1527 			id, port_id);
1528 	return pia;
1529 }
1530 
1531 static int
1532 action_alloc(portid_t port_id, uint32_t id,
1533 	     struct port_indirect_action **action)
1534 {
1535 	struct rte_port *port;
1536 	struct port_indirect_action **ppia;
1537 	struct port_indirect_action *pia = NULL;
1538 
1539 	*action = NULL;
1540 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1541 	    port_id == (portid_t)RTE_PORT_ALL)
1542 		return -EINVAL;
1543 	port = &ports[port_id];
1544 	if (id == UINT32_MAX) {
1545 		/* taking first available ID */
1546 		if (port->actions_list) {
1547 			if (port->actions_list->id == UINT32_MAX - 1) {
1548 				fprintf(stderr,
1549 					"Highest indirect action ID is already assigned, delete it first\n");
1550 				return -ENOMEM;
1551 			}
1552 			id = port->actions_list->id + 1;
1553 		} else {
1554 			id = 0;
1555 		}
1556 	}
1557 	pia = calloc(1, sizeof(*pia));
1558 	if (!pia) {
1559 		fprintf(stderr,
1560 			"Allocation of port %u indirect action failed\n",
1561 			port_id);
1562 		return -ENOMEM;
1563 	}
1564 	ppia = &port->actions_list;
1565 	while (*ppia && (*ppia)->id > id)
1566 		ppia = &(*ppia)->next;
1567 	if (*ppia && (*ppia)->id == id) {
1568 		fprintf(stderr,
1569 			"Indirect action #%u is already assigned, delete it first\n",
1570 			id);
1571 		free(pia);
1572 		return -EINVAL;
1573 	}
1574 	pia->next = *ppia;
1575 	pia->id = id;
1576 	*ppia = pia;
1577 	*action = pia;
1578 	return 0;
1579 }
1580 
1581 /** Create indirect action */
1582 int
1583 port_action_handle_create(portid_t port_id, uint32_t id,
1584 			  const struct rte_flow_indir_action_conf *conf,
1585 			  const struct rte_flow_action *action)
1586 {
1587 	struct port_indirect_action *pia;
1588 	int ret;
1589 	struct rte_flow_error error;
1590 	struct rte_port *port;
1591 
1592 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1593 	    port_id == (portid_t)RTE_PORT_ALL)
1594 		return -EINVAL;
1595 
1596 	ret = action_alloc(port_id, id, &pia);
1597 	if (ret)
1598 		return ret;
1599 
1600 	port = &ports[port_id];
1601 
1602 	if (conf->transfer)
1603 		port_id = port->flow_transfer_proxy;
1604 
1605 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1606 	    port_id == (portid_t)RTE_PORT_ALL)
1607 		return -EINVAL;
1608 
1609 	if (action->type == RTE_FLOW_ACTION_TYPE_AGE) {
1610 		struct rte_flow_action_age *age =
1611 			(struct rte_flow_action_age *)(uintptr_t)(action->conf);
1612 
1613 		pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION;
1614 		age->context = &pia->age_type;
1615 	} else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) {
1616 		struct rte_flow_action_conntrack *ct =
1617 		(struct rte_flow_action_conntrack *)(uintptr_t)(action->conf);
1618 
1619 		memcpy(ct, &conntrack_context, sizeof(*ct));
1620 	}
1621 	/* Poisoning to make sure PMDs update it in case of error. */
1622 	memset(&error, 0x22, sizeof(error));
1623 	pia->handle = rte_flow_action_handle_create(port_id, conf, action,
1624 						    &error);
1625 	if (!pia->handle) {
1626 		uint32_t destroy_id = pia->id;
1627 		port_action_handle_destroy(port_id, 1, &destroy_id);
1628 		return port_flow_complain(&error);
1629 	}
1630 	pia->type = action->type;
1631 	pia->transfer = conf->transfer;
1632 	printf("Indirect action #%u created\n", pia->id);
1633 	return 0;
1634 }
1635 
1636 /** Destroy indirect action */
1637 int
1638 port_action_handle_destroy(portid_t port_id,
1639 			   uint32_t n,
1640 			   const uint32_t *actions)
1641 {
1642 	struct rte_port *port;
1643 	struct port_indirect_action **tmp;
1644 	uint32_t c = 0;
1645 	int ret = 0;
1646 
1647 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1648 	    port_id == (portid_t)RTE_PORT_ALL)
1649 		return -EINVAL;
1650 	port = &ports[port_id];
1651 	tmp = &port->actions_list;
1652 	while (*tmp) {
1653 		uint32_t i;
1654 
1655 		for (i = 0; i != n; ++i) {
1656 			struct rte_flow_error error;
1657 			struct port_indirect_action *pia = *tmp;
1658 			portid_t port_id_eff = port_id;
1659 
1660 			if (actions[i] != pia->id)
1661 				continue;
1662 
1663 			if (pia->transfer)
1664 				port_id_eff = port->flow_transfer_proxy;
1665 
1666 			if (port_id_is_invalid(port_id_eff, ENABLED_WARN) ||
1667 			    port_id_eff == (portid_t)RTE_PORT_ALL)
1668 				return -EINVAL;
1669 
1670 			/*
1671 			 * Poisoning to make sure PMDs update it in case
1672 			 * of error.
1673 			 */
1674 			memset(&error, 0x33, sizeof(error));
1675 
1676 			if (pia->handle && rte_flow_action_handle_destroy(
1677 					port_id_eff, pia->handle, &error)) {
1678 				ret = port_flow_complain(&error);
1679 				continue;
1680 			}
1681 			*tmp = pia->next;
1682 			printf("Indirect action #%u destroyed\n", pia->id);
1683 			free(pia);
1684 			break;
1685 		}
1686 		if (i == n)
1687 			tmp = &(*tmp)->next;
1688 		++c;
1689 	}
1690 	return ret;
1691 }
1692 
1693 
1694 /** Get indirect action by port + id */
1695 struct rte_flow_action_handle *
1696 port_action_handle_get_by_id(portid_t port_id, uint32_t id)
1697 {
1698 
1699 	struct port_indirect_action *pia = action_get_by_id(port_id, id);
1700 
1701 	return (pia) ? pia->handle : NULL;
1702 }
1703 
1704 /** Update indirect action */
1705 int
1706 port_action_handle_update(portid_t port_id, uint32_t id,
1707 			  const struct rte_flow_action *action)
1708 {
1709 	struct rte_flow_error error;
1710 	struct rte_flow_action_handle *action_handle;
1711 	struct port_indirect_action *pia;
1712 	struct rte_port *port;
1713 	const void *update;
1714 
1715 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1716 	    port_id == (portid_t)RTE_PORT_ALL)
1717 		return -EINVAL;
1718 
1719 	port = &ports[port_id];
1720 
1721 	action_handle = port_action_handle_get_by_id(port_id, id);
1722 	if (!action_handle)
1723 		return -EINVAL;
1724 	pia = action_get_by_id(port_id, id);
1725 	if (!pia)
1726 		return -EINVAL;
1727 	switch (pia->type) {
1728 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
1729 		update = action->conf;
1730 		break;
1731 	default:
1732 		update = action;
1733 		break;
1734 	}
1735 
1736 	if (pia->transfer)
1737 		port_id = port->flow_transfer_proxy;
1738 
1739 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1740 	    port_id == (portid_t)RTE_PORT_ALL)
1741 		return -EINVAL;
1742 
1743 	if (rte_flow_action_handle_update(port_id, action_handle, update,
1744 					  &error)) {
1745 		return port_flow_complain(&error);
1746 	}
1747 	printf("Indirect action #%u updated\n", id);
1748 	return 0;
1749 }
1750 
1751 int
1752 port_action_handle_query(portid_t port_id, uint32_t id)
1753 {
1754 	struct rte_flow_error error;
1755 	struct port_indirect_action *pia;
1756 	union {
1757 		struct rte_flow_query_count count;
1758 		struct rte_flow_query_age age;
1759 		struct rte_flow_action_conntrack ct;
1760 	} query;
1761 	portid_t port_id_eff = port_id;
1762 	struct rte_port *port;
1763 
1764 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1765 	    port_id == (portid_t)RTE_PORT_ALL)
1766 		return -EINVAL;
1767 
1768 	port = &ports[port_id];
1769 
1770 	pia = action_get_by_id(port_id, id);
1771 	if (!pia)
1772 		return -EINVAL;
1773 	switch (pia->type) {
1774 	case RTE_FLOW_ACTION_TYPE_AGE:
1775 	case RTE_FLOW_ACTION_TYPE_COUNT:
1776 		break;
1777 	default:
1778 		fprintf(stderr,
1779 			"Indirect action %u (type: %d) on port %u doesn't support query\n",
1780 			id, pia->type, port_id);
1781 		return -ENOTSUP;
1782 	}
1783 
1784 	if (pia->transfer)
1785 		port_id_eff = port->flow_transfer_proxy;
1786 
1787 	if (port_id_is_invalid(port_id_eff, ENABLED_WARN) ||
1788 	    port_id_eff == (portid_t)RTE_PORT_ALL)
1789 		return -EINVAL;
1790 
1791 	/* Poisoning to make sure PMDs update it in case of error. */
1792 	memset(&error, 0x55, sizeof(error));
1793 	memset(&query, 0, sizeof(query));
1794 	if (rte_flow_action_handle_query(port_id_eff, pia->handle, &query,
1795 					 &error))
1796 		return port_flow_complain(&error);
1797 	switch (pia->type) {
1798 	case RTE_FLOW_ACTION_TYPE_AGE:
1799 		printf("Indirect AGE action:\n"
1800 		       " aged: %u\n"
1801 		       " sec_since_last_hit_valid: %u\n"
1802 		       " sec_since_last_hit: %" PRIu32 "\n",
1803 		       query.age.aged,
1804 		       query.age.sec_since_last_hit_valid,
1805 		       query.age.sec_since_last_hit);
1806 		break;
1807 	case RTE_FLOW_ACTION_TYPE_COUNT:
1808 		printf("Indirect COUNT action:\n"
1809 		       " hits_set: %u\n"
1810 		       " bytes_set: %u\n"
1811 		       " hits: %" PRIu64 "\n"
1812 		       " bytes: %" PRIu64 "\n",
1813 		       query.count.hits_set,
1814 		       query.count.bytes_set,
1815 		       query.count.hits,
1816 		       query.count.bytes);
1817 		break;
1818 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
1819 		printf("Conntrack Context:\n"
1820 		       "  Peer: %u, Flow dir: %s, Enable: %u\n"
1821 		       "  Live: %u, SACK: %u, CACK: %u\n"
1822 		       "  Packet dir: %s, Liberal: %u, State: %u\n"
1823 		       "  Factor: %u, Retrans: %u, TCP flags: %u\n"
1824 		       "  Last Seq: %u, Last ACK: %u\n"
1825 		       "  Last Win: %u, Last End: %u\n",
1826 		       query.ct.peer_port,
1827 		       query.ct.is_original_dir ? "Original" : "Reply",
1828 		       query.ct.enable, query.ct.live_connection,
1829 		       query.ct.selective_ack, query.ct.challenge_ack_passed,
1830 		       query.ct.last_direction ? "Original" : "Reply",
1831 		       query.ct.liberal_mode, query.ct.state,
1832 		       query.ct.max_ack_window, query.ct.retransmission_limit,
1833 		       query.ct.last_index, query.ct.last_seq,
1834 		       query.ct.last_ack, query.ct.last_window,
1835 		       query.ct.last_end);
1836 		printf("  Original Dir:\n"
1837 		       "    scale: %u, fin: %u, ack seen: %u\n"
1838 		       " unacked data: %u\n    Sent end: %u,"
1839 		       "    Reply end: %u, Max win: %u, Max ACK: %u\n",
1840 		       query.ct.original_dir.scale,
1841 		       query.ct.original_dir.close_initiated,
1842 		       query.ct.original_dir.last_ack_seen,
1843 		       query.ct.original_dir.data_unacked,
1844 		       query.ct.original_dir.sent_end,
1845 		       query.ct.original_dir.reply_end,
1846 		       query.ct.original_dir.max_win,
1847 		       query.ct.original_dir.max_ack);
1848 		printf("  Reply Dir:\n"
1849 		       "    scale: %u, fin: %u, ack seen: %u\n"
1850 		       " unacked data: %u\n    Sent end: %u,"
1851 		       "    Reply end: %u, Max win: %u, Max ACK: %u\n",
1852 		       query.ct.reply_dir.scale,
1853 		       query.ct.reply_dir.close_initiated,
1854 		       query.ct.reply_dir.last_ack_seen,
1855 		       query.ct.reply_dir.data_unacked,
1856 		       query.ct.reply_dir.sent_end,
1857 		       query.ct.reply_dir.reply_end,
1858 		       query.ct.reply_dir.max_win,
1859 		       query.ct.reply_dir.max_ack);
1860 		break;
1861 	default:
1862 		fprintf(stderr,
1863 			"Indirect action %u (type: %d) on port %u doesn't support query\n",
1864 			id, pia->type, port_id);
1865 		break;
1866 	}
1867 	return 0;
1868 }
1869 
1870 static struct port_flow_tunnel *
1871 port_flow_tunnel_offload_cmd_prep(portid_t port_id,
1872 				  const struct rte_flow_item *pattern,
1873 				  const struct rte_flow_action *actions,
1874 				  const struct tunnel_ops *tunnel_ops)
1875 {
1876 	int ret;
1877 	struct rte_port *port;
1878 	struct port_flow_tunnel *pft;
1879 	struct rte_flow_error error;
1880 
1881 	port = &ports[port_id];
1882 	pft = port_flow_locate_tunnel_id(port, tunnel_ops->id);
1883 	if (!pft) {
1884 		fprintf(stderr, "failed to locate port flow tunnel #%u\n",
1885 			tunnel_ops->id);
1886 		return NULL;
1887 	}
1888 	if (tunnel_ops->actions) {
1889 		uint32_t num_actions;
1890 		const struct rte_flow_action *aptr;
1891 
1892 		ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel,
1893 						&pft->pmd_actions,
1894 						&pft->num_pmd_actions,
1895 						&error);
1896 		if (ret) {
1897 			port_flow_complain(&error);
1898 			return NULL;
1899 		}
1900 		for (aptr = actions, num_actions = 1;
1901 		     aptr->type != RTE_FLOW_ACTION_TYPE_END;
1902 		     aptr++, num_actions++);
1903 		pft->actions = malloc(
1904 				(num_actions +  pft->num_pmd_actions) *
1905 				sizeof(actions[0]));
1906 		if (!pft->actions) {
1907 			rte_flow_tunnel_action_decap_release(
1908 					port_id, pft->actions,
1909 					pft->num_pmd_actions, &error);
1910 			return NULL;
1911 		}
1912 		rte_memcpy(pft->actions, pft->pmd_actions,
1913 			   pft->num_pmd_actions * sizeof(actions[0]));
1914 		rte_memcpy(pft->actions + pft->num_pmd_actions, actions,
1915 			   num_actions * sizeof(actions[0]));
1916 	}
1917 	if (tunnel_ops->items) {
1918 		uint32_t num_items;
1919 		const struct rte_flow_item *iptr;
1920 
1921 		ret = rte_flow_tunnel_match(port_id, &pft->tunnel,
1922 					    &pft->pmd_items,
1923 					    &pft->num_pmd_items,
1924 					    &error);
1925 		if (ret) {
1926 			port_flow_complain(&error);
1927 			return NULL;
1928 		}
1929 		for (iptr = pattern, num_items = 1;
1930 		     iptr->type != RTE_FLOW_ITEM_TYPE_END;
1931 		     iptr++, num_items++);
1932 		pft->items = malloc((num_items + pft->num_pmd_items) *
1933 				    sizeof(pattern[0]));
1934 		if (!pft->items) {
1935 			rte_flow_tunnel_item_release(
1936 					port_id, pft->pmd_items,
1937 					pft->num_pmd_items, &error);
1938 			return NULL;
1939 		}
1940 		rte_memcpy(pft->items, pft->pmd_items,
1941 			   pft->num_pmd_items * sizeof(pattern[0]));
1942 		rte_memcpy(pft->items + pft->num_pmd_items, pattern,
1943 			   num_items * sizeof(pattern[0]));
1944 	}
1945 
1946 	return pft;
1947 }
1948 
1949 static void
1950 port_flow_tunnel_offload_cmd_release(portid_t port_id,
1951 				     const struct tunnel_ops *tunnel_ops,
1952 				     struct port_flow_tunnel *pft)
1953 {
1954 	struct rte_flow_error error;
1955 
1956 	if (tunnel_ops->actions) {
1957 		free(pft->actions);
1958 		rte_flow_tunnel_action_decap_release(
1959 			port_id, pft->pmd_actions,
1960 			pft->num_pmd_actions, &error);
1961 		pft->actions = NULL;
1962 		pft->pmd_actions = NULL;
1963 	}
1964 	if (tunnel_ops->items) {
1965 		free(pft->items);
1966 		rte_flow_tunnel_item_release(port_id, pft->pmd_items,
1967 					     pft->num_pmd_items,
1968 					     &error);
1969 		pft->items = NULL;
1970 		pft->pmd_items = NULL;
1971 	}
1972 }
1973 
1974 /** Add port meter policy */
1975 int
1976 port_meter_policy_add(portid_t port_id, uint32_t policy_id,
1977 			const struct rte_flow_action *actions)
1978 {
1979 	struct rte_mtr_error error;
1980 	const struct rte_flow_action *act = actions;
1981 	const struct rte_flow_action *start;
1982 	struct rte_mtr_meter_policy_params policy;
1983 	uint32_t i = 0, act_n;
1984 	int ret;
1985 
1986 	for (i = 0; i < RTE_COLORS; i++) {
1987 		for (act_n = 0, start = act;
1988 			act->type != RTE_FLOW_ACTION_TYPE_END; act++)
1989 			act_n++;
1990 		if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END)
1991 			policy.actions[i] = start;
1992 		else
1993 			policy.actions[i] = NULL;
1994 		act++;
1995 	}
1996 	ret = rte_mtr_meter_policy_add(port_id,
1997 			policy_id,
1998 			&policy, &error);
1999 	if (ret)
2000 		print_mtr_err_msg(&error);
2001 	return ret;
2002 }
2003 
2004 /** Validate flow rule. */
2005 int
2006 port_flow_validate(portid_t port_id,
2007 		   const struct rte_flow_attr *attr,
2008 		   const struct rte_flow_item *pattern,
2009 		   const struct rte_flow_action *actions,
2010 		   const struct tunnel_ops *tunnel_ops)
2011 {
2012 	struct rte_flow_error error;
2013 	struct port_flow_tunnel *pft = NULL;
2014 	struct rte_port *port;
2015 
2016 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2017 	    port_id == (portid_t)RTE_PORT_ALL)
2018 		return -EINVAL;
2019 
2020 	port = &ports[port_id];
2021 
2022 	if (attr->transfer)
2023 		port_id = port->flow_transfer_proxy;
2024 
2025 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2026 	    port_id == (portid_t)RTE_PORT_ALL)
2027 		return -EINVAL;
2028 
2029 	/* Poisoning to make sure PMDs update it in case of error. */
2030 	memset(&error, 0x11, sizeof(error));
2031 	if (tunnel_ops->enabled) {
2032 		pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern,
2033 							actions, tunnel_ops);
2034 		if (!pft)
2035 			return -ENOENT;
2036 		if (pft->items)
2037 			pattern = pft->items;
2038 		if (pft->actions)
2039 			actions = pft->actions;
2040 	}
2041 	if (rte_flow_validate(port_id, attr, pattern, actions, &error))
2042 		return port_flow_complain(&error);
2043 	if (tunnel_ops->enabled)
2044 		port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft);
2045 	printf("Flow rule validated\n");
2046 	return 0;
2047 }
2048 
2049 /** Return age action structure if exists, otherwise NULL. */
2050 static struct rte_flow_action_age *
2051 age_action_get(const struct rte_flow_action *actions)
2052 {
2053 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2054 		switch (actions->type) {
2055 		case RTE_FLOW_ACTION_TYPE_AGE:
2056 			return (struct rte_flow_action_age *)
2057 				(uintptr_t)actions->conf;
2058 		default:
2059 			break;
2060 		}
2061 	}
2062 	return NULL;
2063 }
2064 
2065 /** Create flow rule. */
2066 int
2067 port_flow_create(portid_t port_id,
2068 		 const struct rte_flow_attr *attr,
2069 		 const struct rte_flow_item *pattern,
2070 		 const struct rte_flow_action *actions,
2071 		 const struct tunnel_ops *tunnel_ops)
2072 {
2073 	struct rte_flow *flow;
2074 	struct rte_port *port;
2075 	struct port_flow *pf;
2076 	uint32_t id = 0;
2077 	struct rte_flow_error error;
2078 	struct port_flow_tunnel *pft = NULL;
2079 	struct rte_flow_action_age *age = age_action_get(actions);
2080 
2081 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2082 	    port_id == (portid_t)RTE_PORT_ALL)
2083 		return -EINVAL;
2084 
2085 	port = &ports[port_id];
2086 
2087 	if (attr->transfer)
2088 		port_id = port->flow_transfer_proxy;
2089 
2090 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2091 	    port_id == (portid_t)RTE_PORT_ALL)
2092 		return -EINVAL;
2093 
2094 	if (port->flow_list) {
2095 		if (port->flow_list->id == UINT32_MAX) {
2096 			fprintf(stderr,
2097 				"Highest rule ID is already assigned, delete it first");
2098 			return -ENOMEM;
2099 		}
2100 		id = port->flow_list->id + 1;
2101 	}
2102 	if (tunnel_ops->enabled) {
2103 		pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern,
2104 							actions, tunnel_ops);
2105 		if (!pft)
2106 			return -ENOENT;
2107 		if (pft->items)
2108 			pattern = pft->items;
2109 		if (pft->actions)
2110 			actions = pft->actions;
2111 	}
2112 	pf = port_flow_new(attr, pattern, actions, &error);
2113 	if (!pf)
2114 		return port_flow_complain(&error);
2115 	if (age) {
2116 		pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW;
2117 		age->context = &pf->age_type;
2118 	}
2119 	/* Poisoning to make sure PMDs update it in case of error. */
2120 	memset(&error, 0x22, sizeof(error));
2121 	flow = rte_flow_create(port_id, attr, pattern, actions, &error);
2122 	if (!flow) {
2123 		if (tunnel_ops->enabled)
2124 			port_flow_tunnel_offload_cmd_release(port_id,
2125 							     tunnel_ops, pft);
2126 		free(pf);
2127 		return port_flow_complain(&error);
2128 	}
2129 	pf->next = port->flow_list;
2130 	pf->id = id;
2131 	pf->flow = flow;
2132 	port->flow_list = pf;
2133 	if (tunnel_ops->enabled)
2134 		port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft);
2135 	printf("Flow rule #%u created\n", pf->id);
2136 	return 0;
2137 }
2138 
2139 /** Destroy a number of flow rules. */
2140 int
2141 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
2142 {
2143 	struct rte_port *port;
2144 	struct port_flow **tmp;
2145 	uint32_t c = 0;
2146 	int ret = 0;
2147 
2148 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2149 	    port_id == (portid_t)RTE_PORT_ALL)
2150 		return -EINVAL;
2151 	port = &ports[port_id];
2152 	tmp = &port->flow_list;
2153 	while (*tmp) {
2154 		uint32_t i;
2155 
2156 		for (i = 0; i != n; ++i) {
2157 			portid_t port_id_eff = port_id;
2158 			struct rte_flow_error error;
2159 			struct port_flow *pf = *tmp;
2160 
2161 			if (rule[i] != pf->id)
2162 				continue;
2163 			/*
2164 			 * Poisoning to make sure PMDs update it in case
2165 			 * of error.
2166 			 */
2167 			memset(&error, 0x33, sizeof(error));
2168 
2169 			if (pf->rule.attr->transfer)
2170 				port_id_eff = port->flow_transfer_proxy;
2171 
2172 			if (port_id_is_invalid(port_id_eff, ENABLED_WARN) ||
2173 			    port_id_eff == (portid_t)RTE_PORT_ALL)
2174 				return -EINVAL;
2175 
2176 			if (rte_flow_destroy(port_id_eff, pf->flow, &error)) {
2177 				ret = port_flow_complain(&error);
2178 				continue;
2179 			}
2180 			printf("Flow rule #%u destroyed\n", pf->id);
2181 			*tmp = pf->next;
2182 			free(pf);
2183 			break;
2184 		}
2185 		if (i == n)
2186 			tmp = &(*tmp)->next;
2187 		++c;
2188 	}
2189 	return ret;
2190 }
2191 
2192 /** Remove all flow rules. */
2193 int
2194 port_flow_flush(portid_t port_id)
2195 {
2196 	struct rte_flow_error error;
2197 	struct rte_port *port;
2198 	int ret = 0;
2199 
2200 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2201 		port_id == (portid_t)RTE_PORT_ALL)
2202 		return -EINVAL;
2203 
2204 	port = &ports[port_id];
2205 
2206 	if (port->flow_list == NULL)
2207 		return ret;
2208 
2209 	/* Poisoning to make sure PMDs update it in case of error. */
2210 	memset(&error, 0x44, sizeof(error));
2211 	if (rte_flow_flush(port_id, &error)) {
2212 		port_flow_complain(&error);
2213 	}
2214 
2215 	while (port->flow_list) {
2216 		struct port_flow *pf = port->flow_list->next;
2217 
2218 		free(port->flow_list);
2219 		port->flow_list = pf;
2220 	}
2221 	return ret;
2222 }
2223 
2224 /** Dump flow rules. */
2225 int
2226 port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id,
2227 		const char *file_name)
2228 {
2229 	int ret = 0;
2230 	FILE *file = stdout;
2231 	struct rte_flow_error error;
2232 	struct rte_port *port;
2233 	struct port_flow *pflow;
2234 	struct rte_flow *tmpFlow = NULL;
2235 	bool found = false;
2236 
2237 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2238 		port_id == (portid_t)RTE_PORT_ALL)
2239 		return -EINVAL;
2240 
2241 	if (!dump_all) {
2242 		port = &ports[port_id];
2243 		pflow = port->flow_list;
2244 		while (pflow) {
2245 			if (rule_id != pflow->id) {
2246 				pflow = pflow->next;
2247 			} else {
2248 				tmpFlow = pflow->flow;
2249 				if (tmpFlow)
2250 					found = true;
2251 				break;
2252 			}
2253 		}
2254 		if (found == false) {
2255 			fprintf(stderr, "Failed to dump to flow %d\n", rule_id);
2256 			return -EINVAL;
2257 		}
2258 	}
2259 
2260 	if (file_name && strlen(file_name)) {
2261 		file = fopen(file_name, "w");
2262 		if (!file) {
2263 			fprintf(stderr, "Failed to create file %s: %s\n",
2264 				file_name, strerror(errno));
2265 			return -errno;
2266 		}
2267 	}
2268 
2269 	if (!dump_all)
2270 		ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error);
2271 	else
2272 		ret = rte_flow_dev_dump(port_id, NULL, file, &error);
2273 	if (ret) {
2274 		port_flow_complain(&error);
2275 		fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret));
2276 	} else
2277 		printf("Flow dump finished\n");
2278 	if (file_name && strlen(file_name))
2279 		fclose(file);
2280 	return ret;
2281 }
2282 
2283 /** Query a flow rule. */
2284 int
2285 port_flow_query(portid_t port_id, uint32_t rule,
2286 		const struct rte_flow_action *action)
2287 {
2288 	struct rte_flow_error error;
2289 	struct rte_port *port;
2290 	struct port_flow *pf;
2291 	const char *name;
2292 	union {
2293 		struct rte_flow_query_count count;
2294 		struct rte_flow_action_rss rss_conf;
2295 		struct rte_flow_query_age age;
2296 	} query;
2297 	int ret;
2298 
2299 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2300 	    port_id == (portid_t)RTE_PORT_ALL)
2301 		return -EINVAL;
2302 	port = &ports[port_id];
2303 	for (pf = port->flow_list; pf; pf = pf->next)
2304 		if (pf->id == rule)
2305 			break;
2306 	if (!pf) {
2307 		fprintf(stderr, "Flow rule #%u not found\n", rule);
2308 		return -ENOENT;
2309 	}
2310 
2311 	if (pf->rule.attr->transfer)
2312 		port_id = port->flow_transfer_proxy;
2313 
2314 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2315 	    port_id == (portid_t)RTE_PORT_ALL)
2316 		return -EINVAL;
2317 
2318 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
2319 			    &name, sizeof(name),
2320 			    (void *)(uintptr_t)action->type, &error);
2321 	if (ret < 0)
2322 		return port_flow_complain(&error);
2323 	switch (action->type) {
2324 	case RTE_FLOW_ACTION_TYPE_COUNT:
2325 	case RTE_FLOW_ACTION_TYPE_RSS:
2326 	case RTE_FLOW_ACTION_TYPE_AGE:
2327 		break;
2328 	default:
2329 		fprintf(stderr, "Cannot query action type %d (%s)\n",
2330 			action->type, name);
2331 		return -ENOTSUP;
2332 	}
2333 	/* Poisoning to make sure PMDs update it in case of error. */
2334 	memset(&error, 0x55, sizeof(error));
2335 	memset(&query, 0, sizeof(query));
2336 	if (rte_flow_query(port_id, pf->flow, action, &query, &error))
2337 		return port_flow_complain(&error);
2338 	switch (action->type) {
2339 	case RTE_FLOW_ACTION_TYPE_COUNT:
2340 		printf("%s:\n"
2341 		       " hits_set: %u\n"
2342 		       " bytes_set: %u\n"
2343 		       " hits: %" PRIu64 "\n"
2344 		       " bytes: %" PRIu64 "\n",
2345 		       name,
2346 		       query.count.hits_set,
2347 		       query.count.bytes_set,
2348 		       query.count.hits,
2349 		       query.count.bytes);
2350 		break;
2351 	case RTE_FLOW_ACTION_TYPE_RSS:
2352 		rss_config_display(&query.rss_conf);
2353 		break;
2354 	case RTE_FLOW_ACTION_TYPE_AGE:
2355 		printf("%s:\n"
2356 		       " aged: %u\n"
2357 		       " sec_since_last_hit_valid: %u\n"
2358 		       " sec_since_last_hit: %" PRIu32 "\n",
2359 		       name,
2360 		       query.age.aged,
2361 		       query.age.sec_since_last_hit_valid,
2362 		       query.age.sec_since_last_hit);
2363 		break;
2364 	default:
2365 		fprintf(stderr,
2366 			"Cannot display result for action type %d (%s)\n",
2367 			action->type, name);
2368 		break;
2369 	}
2370 	return 0;
2371 }
2372 
2373 /** List simply and destroy all aged flows. */
2374 void
2375 port_flow_aged(portid_t port_id, uint8_t destroy)
2376 {
2377 	void **contexts;
2378 	int nb_context, total = 0, idx;
2379 	struct rte_flow_error error;
2380 	enum age_action_context_type *type;
2381 	union {
2382 		struct port_flow *pf;
2383 		struct port_indirect_action *pia;
2384 	} ctx;
2385 
2386 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2387 	    port_id == (portid_t)RTE_PORT_ALL)
2388 		return;
2389 	total = rte_flow_get_aged_flows(port_id, NULL, 0, &error);
2390 	printf("Port %u total aged flows: %d\n", port_id, total);
2391 	if (total < 0) {
2392 		port_flow_complain(&error);
2393 		return;
2394 	}
2395 	if (total == 0)
2396 		return;
2397 	contexts = malloc(sizeof(void *) * total);
2398 	if (contexts == NULL) {
2399 		fprintf(stderr, "Cannot allocate contexts for aged flow\n");
2400 		return;
2401 	}
2402 	printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type");
2403 	nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error);
2404 	if (nb_context != total) {
2405 		fprintf(stderr,
2406 			"Port:%d get aged flows count(%d) != total(%d)\n",
2407 			port_id, nb_context, total);
2408 		free(contexts);
2409 		return;
2410 	}
2411 	total = 0;
2412 	for (idx = 0; idx < nb_context; idx++) {
2413 		if (!contexts[idx]) {
2414 			fprintf(stderr, "Error: get Null context in port %u\n",
2415 				port_id);
2416 			continue;
2417 		}
2418 		type = (enum age_action_context_type *)contexts[idx];
2419 		switch (*type) {
2420 		case ACTION_AGE_CONTEXT_TYPE_FLOW:
2421 			ctx.pf = container_of(type, struct port_flow, age_type);
2422 			printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32
2423 								 "\t%c%c%c\t\n",
2424 			       "Flow",
2425 			       ctx.pf->id,
2426 			       ctx.pf->rule.attr->group,
2427 			       ctx.pf->rule.attr->priority,
2428 			       ctx.pf->rule.attr->ingress ? 'i' : '-',
2429 			       ctx.pf->rule.attr->egress ? 'e' : '-',
2430 			       ctx.pf->rule.attr->transfer ? 't' : '-');
2431 			if (destroy && !port_flow_destroy(port_id, 1,
2432 							  &ctx.pf->id))
2433 				total++;
2434 			break;
2435 		case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION:
2436 			ctx.pia = container_of(type,
2437 					struct port_indirect_action, age_type);
2438 			printf("%-20s\t%" PRIu32 "\n", "Indirect action",
2439 			       ctx.pia->id);
2440 			break;
2441 		default:
2442 			fprintf(stderr, "Error: invalid context type %u\n",
2443 				port_id);
2444 			break;
2445 		}
2446 	}
2447 	printf("\n%d flows destroyed\n", total);
2448 	free(contexts);
2449 }
2450 
2451 /** List flow rules. */
2452 void
2453 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group)
2454 {
2455 	struct rte_port *port;
2456 	struct port_flow *pf;
2457 	struct port_flow *list = NULL;
2458 	uint32_t i;
2459 
2460 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2461 	    port_id == (portid_t)RTE_PORT_ALL)
2462 		return;
2463 	port = &ports[port_id];
2464 	if (!port->flow_list)
2465 		return;
2466 	/* Sort flows by group, priority and ID. */
2467 	for (pf = port->flow_list; pf != NULL; pf = pf->next) {
2468 		struct port_flow **tmp;
2469 		const struct rte_flow_attr *curr = pf->rule.attr;
2470 
2471 		if (n) {
2472 			/* Filter out unwanted groups. */
2473 			for (i = 0; i != n; ++i)
2474 				if (curr->group == group[i])
2475 					break;
2476 			if (i == n)
2477 				continue;
2478 		}
2479 		for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) {
2480 			const struct rte_flow_attr *comp = (*tmp)->rule.attr;
2481 
2482 			if (curr->group > comp->group ||
2483 			    (curr->group == comp->group &&
2484 			     curr->priority > comp->priority) ||
2485 			    (curr->group == comp->group &&
2486 			     curr->priority == comp->priority &&
2487 			     pf->id > (*tmp)->id))
2488 				continue;
2489 			break;
2490 		}
2491 		pf->tmp = *tmp;
2492 		*tmp = pf;
2493 	}
2494 	printf("ID\tGroup\tPrio\tAttr\tRule\n");
2495 	for (pf = list; pf != NULL; pf = pf->tmp) {
2496 		const struct rte_flow_item *item = pf->rule.pattern;
2497 		const struct rte_flow_action *action = pf->rule.actions;
2498 		const char *name;
2499 
2500 		printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
2501 		       pf->id,
2502 		       pf->rule.attr->group,
2503 		       pf->rule.attr->priority,
2504 		       pf->rule.attr->ingress ? 'i' : '-',
2505 		       pf->rule.attr->egress ? 'e' : '-',
2506 		       pf->rule.attr->transfer ? 't' : '-');
2507 		while (item->type != RTE_FLOW_ITEM_TYPE_END) {
2508 			if ((uint32_t)item->type > INT_MAX)
2509 				name = "PMD_INTERNAL";
2510 			else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR,
2511 					  &name, sizeof(name),
2512 					  (void *)(uintptr_t)item->type,
2513 					  NULL) <= 0)
2514 				name = "[UNKNOWN]";
2515 			if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
2516 				printf("%s ", name);
2517 			++item;
2518 		}
2519 		printf("=>");
2520 		while (action->type != RTE_FLOW_ACTION_TYPE_END) {
2521 			if ((uint32_t)action->type > INT_MAX)
2522 				name = "PMD_INTERNAL";
2523 			else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
2524 					  &name, sizeof(name),
2525 					  (void *)(uintptr_t)action->type,
2526 					  NULL) <= 0)
2527 				name = "[UNKNOWN]";
2528 			if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
2529 				printf(" %s", name);
2530 			++action;
2531 		}
2532 		printf("\n");
2533 	}
2534 }
2535 
2536 /** Restrict ingress traffic to the defined flow rules. */
2537 int
2538 port_flow_isolate(portid_t port_id, int set)
2539 {
2540 	struct rte_flow_error error;
2541 
2542 	/* Poisoning to make sure PMDs update it in case of error. */
2543 	memset(&error, 0x66, sizeof(error));
2544 	if (rte_flow_isolate(port_id, set, &error))
2545 		return port_flow_complain(&error);
2546 	printf("Ingress traffic on port %u is %s to the defined flow rules\n",
2547 	       port_id,
2548 	       set ? "now restricted" : "not restricted anymore");
2549 	return 0;
2550 }
2551 
2552 /*
2553  * RX/TX ring descriptors display functions.
2554  */
2555 int
2556 rx_queue_id_is_invalid(queueid_t rxq_id)
2557 {
2558 	if (rxq_id < nb_rxq)
2559 		return 0;
2560 	fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n",
2561 		rxq_id, nb_rxq);
2562 	return 1;
2563 }
2564 
2565 int
2566 tx_queue_id_is_invalid(queueid_t txq_id)
2567 {
2568 	if (txq_id < nb_txq)
2569 		return 0;
2570 	fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n",
2571 		txq_id, nb_txq);
2572 	return 1;
2573 }
2574 
2575 static int
2576 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size)
2577 {
2578 	struct rte_port *port = &ports[port_id];
2579 	struct rte_eth_rxq_info rx_qinfo;
2580 	int ret;
2581 
2582 	ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo);
2583 	if (ret == 0) {
2584 		*ring_size = rx_qinfo.nb_desc;
2585 		return ret;
2586 	}
2587 
2588 	if (ret != -ENOTSUP)
2589 		return ret;
2590 	/*
2591 	 * If the rte_eth_rx_queue_info_get is not support for this PMD,
2592 	 * ring_size stored in testpmd will be used for validity verification.
2593 	 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc
2594 	 * being 0, it will use a default value provided by PMDs to setup this
2595 	 * rxq. If the default value is 0, it will use the
2596 	 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq.
2597 	 */
2598 	if (port->nb_rx_desc[rxq_id])
2599 		*ring_size = port->nb_rx_desc[rxq_id];
2600 	else if (port->dev_info.default_rxportconf.ring_size)
2601 		*ring_size = port->dev_info.default_rxportconf.ring_size;
2602 	else
2603 		*ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2604 	return 0;
2605 }
2606 
2607 static int
2608 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size)
2609 {
2610 	struct rte_port *port = &ports[port_id];
2611 	struct rte_eth_txq_info tx_qinfo;
2612 	int ret;
2613 
2614 	ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo);
2615 	if (ret == 0) {
2616 		*ring_size = tx_qinfo.nb_desc;
2617 		return ret;
2618 	}
2619 
2620 	if (ret != -ENOTSUP)
2621 		return ret;
2622 	/*
2623 	 * If the rte_eth_tx_queue_info_get is not support for this PMD,
2624 	 * ring_size stored in testpmd will be used for validity verification.
2625 	 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc
2626 	 * being 0, it will use a default value provided by PMDs to setup this
2627 	 * txq. If the default value is 0, it will use the
2628 	 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq.
2629 	 */
2630 	if (port->nb_tx_desc[txq_id])
2631 		*ring_size = port->nb_tx_desc[txq_id];
2632 	else if (port->dev_info.default_txportconf.ring_size)
2633 		*ring_size = port->dev_info.default_txportconf.ring_size;
2634 	else
2635 		*ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2636 	return 0;
2637 }
2638 
2639 static int
2640 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id)
2641 {
2642 	uint16_t ring_size;
2643 	int ret;
2644 
2645 	ret = get_rx_ring_size(port_id, rxq_id, &ring_size);
2646 	if (ret)
2647 		return 1;
2648 
2649 	if (rxdesc_id < ring_size)
2650 		return 0;
2651 
2652 	fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n",
2653 		rxdesc_id, ring_size);
2654 	return 1;
2655 }
2656 
2657 static int
2658 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id)
2659 {
2660 	uint16_t ring_size;
2661 	int ret;
2662 
2663 	ret = get_tx_ring_size(port_id, txq_id, &ring_size);
2664 	if (ret)
2665 		return 1;
2666 
2667 	if (txdesc_id < ring_size)
2668 		return 0;
2669 
2670 	fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n",
2671 		txdesc_id, ring_size);
2672 	return 1;
2673 }
2674 
2675 static const struct rte_memzone *
2676 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
2677 {
2678 	char mz_name[RTE_MEMZONE_NAMESIZE];
2679 	const struct rte_memzone *mz;
2680 
2681 	snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s",
2682 			port_id, q_id, ring_name);
2683 	mz = rte_memzone_lookup(mz_name);
2684 	if (mz == NULL)
2685 		fprintf(stderr,
2686 			"%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n",
2687 			ring_name, port_id, q_id, mz_name);
2688 	return mz;
2689 }
2690 
2691 union igb_ring_dword {
2692 	uint64_t dword;
2693 	struct {
2694 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
2695 		uint32_t lo;
2696 		uint32_t hi;
2697 #else
2698 		uint32_t hi;
2699 		uint32_t lo;
2700 #endif
2701 	} words;
2702 };
2703 
2704 struct igb_ring_desc_32_bytes {
2705 	union igb_ring_dword lo_dword;
2706 	union igb_ring_dword hi_dword;
2707 	union igb_ring_dword resv1;
2708 	union igb_ring_dword resv2;
2709 };
2710 
2711 struct igb_ring_desc_16_bytes {
2712 	union igb_ring_dword lo_dword;
2713 	union igb_ring_dword hi_dword;
2714 };
2715 
2716 static void
2717 ring_rxd_display_dword(union igb_ring_dword dword)
2718 {
2719 	printf("    0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
2720 					(unsigned)dword.words.hi);
2721 }
2722 
2723 static void
2724 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
2725 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
2726 			   portid_t port_id,
2727 #else
2728 			   __rte_unused portid_t port_id,
2729 #endif
2730 			   uint16_t desc_id)
2731 {
2732 	struct igb_ring_desc_16_bytes *ring =
2733 		(struct igb_ring_desc_16_bytes *)ring_mz->addr;
2734 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
2735 	int ret;
2736 	struct rte_eth_dev_info dev_info;
2737 
2738 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
2739 	if (ret != 0)
2740 		return;
2741 
2742 	if (strstr(dev_info.driver_name, "i40e") != NULL) {
2743 		/* 32 bytes RX descriptor, i40e only */
2744 		struct igb_ring_desc_32_bytes *ring =
2745 			(struct igb_ring_desc_32_bytes *)ring_mz->addr;
2746 		ring[desc_id].lo_dword.dword =
2747 			rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
2748 		ring_rxd_display_dword(ring[desc_id].lo_dword);
2749 		ring[desc_id].hi_dword.dword =
2750 			rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
2751 		ring_rxd_display_dword(ring[desc_id].hi_dword);
2752 		ring[desc_id].resv1.dword =
2753 			rte_le_to_cpu_64(ring[desc_id].resv1.dword);
2754 		ring_rxd_display_dword(ring[desc_id].resv1);
2755 		ring[desc_id].resv2.dword =
2756 			rte_le_to_cpu_64(ring[desc_id].resv2.dword);
2757 		ring_rxd_display_dword(ring[desc_id].resv2);
2758 
2759 		return;
2760 	}
2761 #endif
2762 	/* 16 bytes RX descriptor */
2763 	ring[desc_id].lo_dword.dword =
2764 		rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
2765 	ring_rxd_display_dword(ring[desc_id].lo_dword);
2766 	ring[desc_id].hi_dword.dword =
2767 		rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
2768 	ring_rxd_display_dword(ring[desc_id].hi_dword);
2769 }
2770 
2771 static void
2772 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
2773 {
2774 	struct igb_ring_desc_16_bytes *ring;
2775 	struct igb_ring_desc_16_bytes txd;
2776 
2777 	ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
2778 	txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
2779 	txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
2780 	printf("    0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
2781 			(unsigned)txd.lo_dword.words.lo,
2782 			(unsigned)txd.lo_dword.words.hi,
2783 			(unsigned)txd.hi_dword.words.lo,
2784 			(unsigned)txd.hi_dword.words.hi);
2785 }
2786 
2787 void
2788 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
2789 {
2790 	const struct rte_memzone *rx_mz;
2791 
2792 	if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id))
2793 		return;
2794 	rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
2795 	if (rx_mz == NULL)
2796 		return;
2797 	ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
2798 }
2799 
2800 void
2801 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
2802 {
2803 	const struct rte_memzone *tx_mz;
2804 
2805 	if (tx_desc_id_is_invalid(port_id, txq_id, txd_id))
2806 		return;
2807 	tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
2808 	if (tx_mz == NULL)
2809 		return;
2810 	ring_tx_descriptor_display(tx_mz, txd_id);
2811 }
2812 
2813 void
2814 fwd_lcores_config_display(void)
2815 {
2816 	lcoreid_t lc_id;
2817 
2818 	printf("List of forwarding lcores:");
2819 	for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
2820 		printf(" %2u", fwd_lcores_cpuids[lc_id]);
2821 	printf("\n");
2822 }
2823 void
2824 rxtx_config_display(void)
2825 {
2826 	portid_t pid;
2827 	queueid_t qid;
2828 
2829 	printf("  %s packet forwarding%s packets/burst=%d\n",
2830 	       cur_fwd_eng->fwd_mode_name,
2831 	       retry_enabled == 0 ? "" : " with retry",
2832 	       nb_pkt_per_burst);
2833 
2834 	if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
2835 		printf("  packet len=%u - nb packet segments=%d\n",
2836 				(unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
2837 
2838 	printf("  nb forwarding cores=%d - nb forwarding ports=%d\n",
2839 	       nb_fwd_lcores, nb_fwd_ports);
2840 
2841 	RTE_ETH_FOREACH_DEV(pid) {
2842 		struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0];
2843 		struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0];
2844 		uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
2845 		uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
2846 		struct rte_eth_rxq_info rx_qinfo;
2847 		struct rte_eth_txq_info tx_qinfo;
2848 		uint16_t rx_free_thresh_tmp;
2849 		uint16_t tx_free_thresh_tmp;
2850 		uint16_t tx_rs_thresh_tmp;
2851 		uint16_t nb_rx_desc_tmp;
2852 		uint16_t nb_tx_desc_tmp;
2853 		uint64_t offloads_tmp;
2854 		uint8_t pthresh_tmp;
2855 		uint8_t hthresh_tmp;
2856 		uint8_t wthresh_tmp;
2857 		int32_t rc;
2858 
2859 		/* per port config */
2860 		printf("  port %d: RX queue number: %d Tx queue number: %d\n",
2861 				(unsigned int)pid, nb_rxq, nb_txq);
2862 
2863 		printf("    Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n",
2864 				ports[pid].dev_conf.rxmode.offloads,
2865 				ports[pid].dev_conf.txmode.offloads);
2866 
2867 		/* per rx queue config only for first queue to be less verbose */
2868 		for (qid = 0; qid < 1; qid++) {
2869 			rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
2870 			if (rc) {
2871 				nb_rx_desc_tmp = nb_rx_desc[qid];
2872 				rx_free_thresh_tmp =
2873 					rx_conf[qid].rx_free_thresh;
2874 				pthresh_tmp = rx_conf[qid].rx_thresh.pthresh;
2875 				hthresh_tmp = rx_conf[qid].rx_thresh.hthresh;
2876 				wthresh_tmp = rx_conf[qid].rx_thresh.wthresh;
2877 				offloads_tmp = rx_conf[qid].offloads;
2878 			} else {
2879 				nb_rx_desc_tmp = rx_qinfo.nb_desc;
2880 				rx_free_thresh_tmp =
2881 						rx_qinfo.conf.rx_free_thresh;
2882 				pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh;
2883 				hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh;
2884 				wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh;
2885 				offloads_tmp = rx_qinfo.conf.offloads;
2886 			}
2887 
2888 			printf("    RX queue: %d\n", qid);
2889 			printf("      RX desc=%d - RX free threshold=%d\n",
2890 				nb_rx_desc_tmp, rx_free_thresh_tmp);
2891 			printf("      RX threshold registers: pthresh=%d hthresh=%d "
2892 				" wthresh=%d\n",
2893 				pthresh_tmp, hthresh_tmp, wthresh_tmp);
2894 			printf("      RX Offloads=0x%"PRIx64, offloads_tmp);
2895 			if (rx_conf->share_group > 0)
2896 				printf(" share_group=%u share_qid=%u",
2897 				       rx_conf->share_group,
2898 				       rx_conf->share_qid);
2899 			printf("\n");
2900 		}
2901 
2902 		/* per tx queue config only for first queue to be less verbose */
2903 		for (qid = 0; qid < 1; qid++) {
2904 			rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
2905 			if (rc) {
2906 				nb_tx_desc_tmp = nb_tx_desc[qid];
2907 				tx_free_thresh_tmp =
2908 					tx_conf[qid].tx_free_thresh;
2909 				pthresh_tmp = tx_conf[qid].tx_thresh.pthresh;
2910 				hthresh_tmp = tx_conf[qid].tx_thresh.hthresh;
2911 				wthresh_tmp = tx_conf[qid].tx_thresh.wthresh;
2912 				offloads_tmp = tx_conf[qid].offloads;
2913 				tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh;
2914 			} else {
2915 				nb_tx_desc_tmp = tx_qinfo.nb_desc;
2916 				tx_free_thresh_tmp =
2917 						tx_qinfo.conf.tx_free_thresh;
2918 				pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh;
2919 				hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh;
2920 				wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh;
2921 				offloads_tmp = tx_qinfo.conf.offloads;
2922 				tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh;
2923 			}
2924 
2925 			printf("    TX queue: %d\n", qid);
2926 			printf("      TX desc=%d - TX free threshold=%d\n",
2927 				nb_tx_desc_tmp, tx_free_thresh_tmp);
2928 			printf("      TX threshold registers: pthresh=%d hthresh=%d "
2929 				" wthresh=%d\n",
2930 				pthresh_tmp, hthresh_tmp, wthresh_tmp);
2931 			printf("      TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
2932 				offloads_tmp, tx_rs_thresh_tmp);
2933 		}
2934 	}
2935 }
2936 
2937 void
2938 port_rss_reta_info(portid_t port_id,
2939 		   struct rte_eth_rss_reta_entry64 *reta_conf,
2940 		   uint16_t nb_entries)
2941 {
2942 	uint16_t i, idx, shift;
2943 	int ret;
2944 
2945 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2946 		return;
2947 
2948 	ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
2949 	if (ret != 0) {
2950 		fprintf(stderr,
2951 			"Failed to get RSS RETA info, return code = %d\n",
2952 			ret);
2953 		return;
2954 	}
2955 
2956 	for (i = 0; i < nb_entries; i++) {
2957 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
2958 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
2959 		if (!(reta_conf[idx].mask & (1ULL << shift)))
2960 			continue;
2961 		printf("RSS RETA configuration: hash index=%u, queue=%u\n",
2962 					i, reta_conf[idx].reta[shift]);
2963 	}
2964 }
2965 
2966 /*
2967  * Displays the RSS hash functions of a port, and, optionally, the RSS hash
2968  * key of the port.
2969  */
2970 void
2971 port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
2972 {
2973 	struct rte_eth_rss_conf rss_conf = {0};
2974 	uint8_t rss_key[RSS_HASH_KEY_LENGTH];
2975 	uint64_t rss_hf;
2976 	uint8_t i;
2977 	int diag;
2978 	struct rte_eth_dev_info dev_info;
2979 	uint8_t hash_key_size;
2980 	int ret;
2981 
2982 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2983 		return;
2984 
2985 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
2986 	if (ret != 0)
2987 		return;
2988 
2989 	if (dev_info.hash_key_size > 0 &&
2990 			dev_info.hash_key_size <= sizeof(rss_key))
2991 		hash_key_size = dev_info.hash_key_size;
2992 	else {
2993 		fprintf(stderr,
2994 			"dev_info did not provide a valid hash key size\n");
2995 		return;
2996 	}
2997 
2998 	/* Get RSS hash key if asked to display it */
2999 	rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
3000 	rss_conf.rss_key_len = hash_key_size;
3001 	diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
3002 	if (diag != 0) {
3003 		switch (diag) {
3004 		case -ENODEV:
3005 			fprintf(stderr, "port index %d invalid\n", port_id);
3006 			break;
3007 		case -ENOTSUP:
3008 			fprintf(stderr, "operation not supported by device\n");
3009 			break;
3010 		default:
3011 			fprintf(stderr, "operation failed - diag=%d\n", diag);
3012 			break;
3013 		}
3014 		return;
3015 	}
3016 	rss_hf = rss_conf.rss_hf;
3017 	if (rss_hf == 0) {
3018 		printf("RSS disabled\n");
3019 		return;
3020 	}
3021 	printf("RSS functions:\n ");
3022 	for (i = 0; rss_type_table[i].str; i++) {
3023 		if (rss_type_table[i].rss_type == 0)
3024 			continue;
3025 		if ((rss_hf & rss_type_table[i].rss_type) == rss_type_table[i].rss_type)
3026 			printf("%s ", rss_type_table[i].str);
3027 	}
3028 	printf("\n");
3029 	if (!show_rss_key)
3030 		return;
3031 	printf("RSS key:\n");
3032 	for (i = 0; i < hash_key_size; i++)
3033 		printf("%02X", rss_key[i]);
3034 	printf("\n");
3035 }
3036 
3037 void
3038 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
3039 			 uint8_t hash_key_len)
3040 {
3041 	struct rte_eth_rss_conf rss_conf;
3042 	int diag;
3043 	unsigned int i;
3044 
3045 	rss_conf.rss_key = NULL;
3046 	rss_conf.rss_key_len = 0;
3047 	rss_conf.rss_hf = 0;
3048 	for (i = 0; rss_type_table[i].str; i++) {
3049 		if (!strcmp(rss_type_table[i].str, rss_type))
3050 			rss_conf.rss_hf = rss_type_table[i].rss_type;
3051 	}
3052 	diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
3053 	if (diag == 0) {
3054 		rss_conf.rss_key = hash_key;
3055 		rss_conf.rss_key_len = hash_key_len;
3056 		diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
3057 	}
3058 	if (diag == 0)
3059 		return;
3060 
3061 	switch (diag) {
3062 	case -ENODEV:
3063 		fprintf(stderr, "port index %d invalid\n", port_id);
3064 		break;
3065 	case -ENOTSUP:
3066 		fprintf(stderr, "operation not supported by device\n");
3067 		break;
3068 	default:
3069 		fprintf(stderr, "operation failed - diag=%d\n", diag);
3070 		break;
3071 	}
3072 }
3073 
3074 /*
3075  * Check whether a shared rxq scheduled on other lcores.
3076  */
3077 static bool
3078 fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc,
3079 			   portid_t src_port, queueid_t src_rxq,
3080 			   uint32_t share_group, queueid_t share_rxq)
3081 {
3082 	streamid_t sm_id;
3083 	streamid_t nb_fs_per_lcore;
3084 	lcoreid_t  nb_fc;
3085 	lcoreid_t  lc_id;
3086 	struct fwd_stream *fs;
3087 	struct rte_port *port;
3088 	struct rte_eth_dev_info *dev_info;
3089 	struct rte_eth_rxconf *rxq_conf;
3090 
3091 	nb_fc = cur_fwd_config.nb_fwd_lcores;
3092 	/* Check remaining cores. */
3093 	for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) {
3094 		sm_id = fwd_lcores[lc_id]->stream_idx;
3095 		nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;
3096 		for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;
3097 		     sm_id++) {
3098 			fs = fwd_streams[sm_id];
3099 			port = &ports[fs->rx_port];
3100 			dev_info = &port->dev_info;
3101 			rxq_conf = &port->rx_conf[fs->rx_queue];
3102 			if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)
3103 			    == 0 || rxq_conf->share_group == 0)
3104 				/* Not shared rxq. */
3105 				continue;
3106 			if (domain_id != port->dev_info.switch_info.domain_id)
3107 				continue;
3108 			if (rxq_conf->share_group != share_group)
3109 				continue;
3110 			if (rxq_conf->share_qid != share_rxq)
3111 				continue;
3112 			printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n",
3113 			       share_group, share_rxq);
3114 			printf("  lcore %hhu Port %hu queue %hu\n",
3115 			       src_lc, src_port, src_rxq);
3116 			printf("  lcore %hhu Port %hu queue %hu\n",
3117 			       lc_id, fs->rx_port, fs->rx_queue);
3118 			printf("Please use --nb-cores=%hu to limit number of forwarding cores\n",
3119 			       nb_rxq);
3120 			return true;
3121 		}
3122 	}
3123 	return false;
3124 }
3125 
3126 /*
3127  * Check shared rxq configuration.
3128  *
3129  * Shared group must not being scheduled on different core.
3130  */
3131 bool
3132 pkt_fwd_shared_rxq_check(void)
3133 {
3134 	streamid_t sm_id;
3135 	streamid_t nb_fs_per_lcore;
3136 	lcoreid_t  nb_fc;
3137 	lcoreid_t  lc_id;
3138 	struct fwd_stream *fs;
3139 	uint16_t domain_id;
3140 	struct rte_port *port;
3141 	struct rte_eth_dev_info *dev_info;
3142 	struct rte_eth_rxconf *rxq_conf;
3143 
3144 	if (rxq_share == 0)
3145 		return true;
3146 	nb_fc = cur_fwd_config.nb_fwd_lcores;
3147 	/*
3148 	 * Check streams on each core, make sure the same switch domain +
3149 	 * group + queue doesn't get scheduled on other cores.
3150 	 */
3151 	for (lc_id = 0; lc_id < nb_fc; lc_id++) {
3152 		sm_id = fwd_lcores[lc_id]->stream_idx;
3153 		nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;
3154 		for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;
3155 		     sm_id++) {
3156 			fs = fwd_streams[sm_id];
3157 			/* Update lcore info stream being scheduled. */
3158 			fs->lcore = fwd_lcores[lc_id];
3159 			port = &ports[fs->rx_port];
3160 			dev_info = &port->dev_info;
3161 			rxq_conf = &port->rx_conf[fs->rx_queue];
3162 			if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)
3163 			    == 0 || rxq_conf->share_group == 0)
3164 				/* Not shared rxq. */
3165 				continue;
3166 			/* Check shared rxq not scheduled on remaining cores. */
3167 			domain_id = port->dev_info.switch_info.domain_id;
3168 			if (fwd_stream_on_other_lcores(domain_id, lc_id,
3169 						       fs->rx_port,
3170 						       fs->rx_queue,
3171 						       rxq_conf->share_group,
3172 						       rxq_conf->share_qid))
3173 				return false;
3174 		}
3175 	}
3176 	return true;
3177 }
3178 
3179 /*
3180  * Setup forwarding configuration for each logical core.
3181  */
3182 static void
3183 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
3184 {
3185 	streamid_t nb_fs_per_lcore;
3186 	streamid_t nb_fs;
3187 	streamid_t sm_id;
3188 	lcoreid_t  nb_extra;
3189 	lcoreid_t  nb_fc;
3190 	lcoreid_t  nb_lc;
3191 	lcoreid_t  lc_id;
3192 
3193 	nb_fs = cfg->nb_fwd_streams;
3194 	nb_fc = cfg->nb_fwd_lcores;
3195 	if (nb_fs <= nb_fc) {
3196 		nb_fs_per_lcore = 1;
3197 		nb_extra = 0;
3198 	} else {
3199 		nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
3200 		nb_extra = (lcoreid_t) (nb_fs % nb_fc);
3201 	}
3202 
3203 	nb_lc = (lcoreid_t) (nb_fc - nb_extra);
3204 	sm_id = 0;
3205 	for (lc_id = 0; lc_id < nb_lc; lc_id++) {
3206 		fwd_lcores[lc_id]->stream_idx = sm_id;
3207 		fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
3208 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
3209 	}
3210 
3211 	/*
3212 	 * Assign extra remaining streams, if any.
3213 	 */
3214 	nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
3215 	for (lc_id = 0; lc_id < nb_extra; lc_id++) {
3216 		fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
3217 		fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
3218 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
3219 	}
3220 }
3221 
3222 static portid_t
3223 fwd_topology_tx_port_get(portid_t rxp)
3224 {
3225 	static int warning_once = 1;
3226 
3227 	RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
3228 
3229 	switch (port_topology) {
3230 	default:
3231 	case PORT_TOPOLOGY_PAIRED:
3232 		if ((rxp & 0x1) == 0) {
3233 			if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
3234 				return rxp + 1;
3235 			if (warning_once) {
3236 				fprintf(stderr,
3237 					"\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n");
3238 				warning_once = 0;
3239 			}
3240 			return rxp;
3241 		}
3242 		return rxp - 1;
3243 	case PORT_TOPOLOGY_CHAINED:
3244 		return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
3245 	case PORT_TOPOLOGY_LOOP:
3246 		return rxp;
3247 	}
3248 }
3249 
3250 static void
3251 simple_fwd_config_setup(void)
3252 {
3253 	portid_t i;
3254 
3255 	cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
3256 	cur_fwd_config.nb_fwd_streams =
3257 		(streamid_t) cur_fwd_config.nb_fwd_ports;
3258 
3259 	/* reinitialize forwarding streams */
3260 	init_fwd_streams();
3261 
3262 	/*
3263 	 * In the simple forwarding test, the number of forwarding cores
3264 	 * must be lower or equal to the number of forwarding ports.
3265 	 */
3266 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
3267 	if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
3268 		cur_fwd_config.nb_fwd_lcores =
3269 			(lcoreid_t) cur_fwd_config.nb_fwd_ports;
3270 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
3271 
3272 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
3273 		fwd_streams[i]->rx_port   = fwd_ports_ids[i];
3274 		fwd_streams[i]->rx_queue  = 0;
3275 		fwd_streams[i]->tx_port   =
3276 				fwd_ports_ids[fwd_topology_tx_port_get(i)];
3277 		fwd_streams[i]->tx_queue  = 0;
3278 		fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
3279 		fwd_streams[i]->retry_enabled = retry_enabled;
3280 	}
3281 }
3282 
3283 /**
3284  * For the RSS forwarding test all streams distributed over lcores. Each stream
3285  * being composed of a RX queue to poll on a RX port for input messages,
3286  * associated with a TX queue of a TX port where to send forwarded packets.
3287  */
3288 static void
3289 rss_fwd_config_setup(void)
3290 {
3291 	portid_t   rxp;
3292 	portid_t   txp;
3293 	queueid_t  rxq;
3294 	queueid_t  nb_q;
3295 	streamid_t  sm_id;
3296 	int start;
3297 	int end;
3298 
3299 	nb_q = nb_rxq;
3300 	if (nb_q > nb_txq)
3301 		nb_q = nb_txq;
3302 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
3303 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
3304 	cur_fwd_config.nb_fwd_streams =
3305 		(streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
3306 
3307 	if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
3308 		cur_fwd_config.nb_fwd_lcores =
3309 			(lcoreid_t)cur_fwd_config.nb_fwd_streams;
3310 
3311 	/* reinitialize forwarding streams */
3312 	init_fwd_streams();
3313 
3314 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
3315 
3316 	if (proc_id > 0 && nb_q % num_procs != 0)
3317 		printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n");
3318 
3319 	/**
3320 	 * In multi-process, All queues are allocated to different
3321 	 * processes based on num_procs and proc_id. For example:
3322 	 * if supports 4 queues(nb_q), 2 processes(num_procs),
3323 	 * the 0~1 queue for primary process.
3324 	 * the 2~3 queue for secondary process.
3325 	 */
3326 	start = proc_id * nb_q / num_procs;
3327 	end = start + nb_q / num_procs;
3328 	rxp = 0;
3329 	rxq = start;
3330 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
3331 		struct fwd_stream *fs;
3332 
3333 		fs = fwd_streams[sm_id];
3334 		txp = fwd_topology_tx_port_get(rxp);
3335 		fs->rx_port = fwd_ports_ids[rxp];
3336 		fs->rx_queue = rxq;
3337 		fs->tx_port = fwd_ports_ids[txp];
3338 		fs->tx_queue = rxq;
3339 		fs->peer_addr = fs->tx_port;
3340 		fs->retry_enabled = retry_enabled;
3341 		rxp++;
3342 		if (rxp < nb_fwd_ports)
3343 			continue;
3344 		rxp = 0;
3345 		rxq++;
3346 		if (rxq >= end)
3347 			rxq = start;
3348 	}
3349 }
3350 
3351 static uint16_t
3352 get_fwd_port_total_tc_num(void)
3353 {
3354 	struct rte_eth_dcb_info dcb_info;
3355 	uint16_t total_tc_num = 0;
3356 	unsigned int i;
3357 
3358 	for (i = 0; i < nb_fwd_ports; i++) {
3359 		(void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info);
3360 		total_tc_num += dcb_info.nb_tcs;
3361 	}
3362 
3363 	return total_tc_num;
3364 }
3365 
3366 /**
3367  * For the DCB forwarding test, each core is assigned on each traffic class.
3368  *
3369  * Each core is assigned a multi-stream, each stream being composed of
3370  * a RX queue to poll on a RX port for input messages, associated with
3371  * a TX queue of a TX port where to send forwarded packets. All RX and
3372  * TX queues are mapping to the same traffic class.
3373  * If VMDQ and DCB co-exist, each traffic class on different POOLs share
3374  * the same core
3375  */
3376 static void
3377 dcb_fwd_config_setup(void)
3378 {
3379 	struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
3380 	portid_t txp, rxp = 0;
3381 	queueid_t txq, rxq = 0;
3382 	lcoreid_t  lc_id;
3383 	uint16_t nb_rx_queue, nb_tx_queue;
3384 	uint16_t i, j, k, sm_id = 0;
3385 	uint16_t total_tc_num;
3386 	struct rte_port *port;
3387 	uint8_t tc = 0;
3388 	portid_t pid;
3389 	int ret;
3390 
3391 	/*
3392 	 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED
3393 	 * or RTE_PORT_STOPPED.
3394 	 *
3395 	 * Re-configure ports to get updated mapping between tc and queue in
3396 	 * case the queue number of the port is changed. Skip for started ports
3397 	 * since modifying queue number and calling dev_configure need to stop
3398 	 * ports first.
3399 	 */
3400 	for (pid = 0; pid < nb_fwd_ports; pid++) {
3401 		if (port_is_started(pid) == 1)
3402 			continue;
3403 
3404 		port = &ports[pid];
3405 		ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq,
3406 					    &port->dev_conf);
3407 		if (ret < 0) {
3408 			fprintf(stderr,
3409 				"Failed to re-configure port %d, ret = %d.\n",
3410 				pid, ret);
3411 			return;
3412 		}
3413 	}
3414 
3415 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
3416 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
3417 	cur_fwd_config.nb_fwd_streams =
3418 		(streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
3419 	total_tc_num = get_fwd_port_total_tc_num();
3420 	if (cur_fwd_config.nb_fwd_lcores > total_tc_num)
3421 		cur_fwd_config.nb_fwd_lcores = total_tc_num;
3422 
3423 	/* reinitialize forwarding streams */
3424 	init_fwd_streams();
3425 	sm_id = 0;
3426 	txp = 1;
3427 	/* get the dcb info on the first RX and TX ports */
3428 	(void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
3429 	(void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
3430 
3431 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
3432 		fwd_lcores[lc_id]->stream_nb = 0;
3433 		fwd_lcores[lc_id]->stream_idx = sm_id;
3434 		for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) {
3435 			/* if the nb_queue is zero, means this tc is
3436 			 * not enabled on the POOL
3437 			 */
3438 			if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
3439 				break;
3440 			k = fwd_lcores[lc_id]->stream_nb +
3441 				fwd_lcores[lc_id]->stream_idx;
3442 			rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
3443 			txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
3444 			nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
3445 			nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
3446 			for (j = 0; j < nb_rx_queue; j++) {
3447 				struct fwd_stream *fs;
3448 
3449 				fs = fwd_streams[k + j];
3450 				fs->rx_port = fwd_ports_ids[rxp];
3451 				fs->rx_queue = rxq + j;
3452 				fs->tx_port = fwd_ports_ids[txp];
3453 				fs->tx_queue = txq + j % nb_tx_queue;
3454 				fs->peer_addr = fs->tx_port;
3455 				fs->retry_enabled = retry_enabled;
3456 			}
3457 			fwd_lcores[lc_id]->stream_nb +=
3458 				rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
3459 		}
3460 		sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
3461 
3462 		tc++;
3463 		if (tc < rxp_dcb_info.nb_tcs)
3464 			continue;
3465 		/* Restart from TC 0 on next RX port */
3466 		tc = 0;
3467 		if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
3468 			rxp = (portid_t)
3469 				(rxp + ((nb_ports >> 1) / nb_fwd_ports));
3470 		else
3471 			rxp++;
3472 		if (rxp >= nb_fwd_ports)
3473 			return;
3474 		/* get the dcb information on next RX and TX ports */
3475 		if ((rxp & 0x1) == 0)
3476 			txp = (portid_t) (rxp + 1);
3477 		else
3478 			txp = (portid_t) (rxp - 1);
3479 		rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
3480 		rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
3481 	}
3482 }
3483 
3484 static void
3485 icmp_echo_config_setup(void)
3486 {
3487 	portid_t  rxp;
3488 	queueid_t rxq;
3489 	lcoreid_t lc_id;
3490 	uint16_t  sm_id;
3491 
3492 	if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
3493 		cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
3494 			(nb_txq * nb_fwd_ports);
3495 	else
3496 		cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
3497 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
3498 	cur_fwd_config.nb_fwd_streams =
3499 		(streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
3500 	if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
3501 		cur_fwd_config.nb_fwd_lcores =
3502 			(lcoreid_t)cur_fwd_config.nb_fwd_streams;
3503 	if (verbose_level > 0) {
3504 		printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
3505 		       __FUNCTION__,
3506 		       cur_fwd_config.nb_fwd_lcores,
3507 		       cur_fwd_config.nb_fwd_ports,
3508 		       cur_fwd_config.nb_fwd_streams);
3509 	}
3510 
3511 	/* reinitialize forwarding streams */
3512 	init_fwd_streams();
3513 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
3514 	rxp = 0; rxq = 0;
3515 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
3516 		if (verbose_level > 0)
3517 			printf("  core=%d: \n", lc_id);
3518 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
3519 			struct fwd_stream *fs;
3520 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
3521 			fs->rx_port = fwd_ports_ids[rxp];
3522 			fs->rx_queue = rxq;
3523 			fs->tx_port = fs->rx_port;
3524 			fs->tx_queue = rxq;
3525 			fs->peer_addr = fs->tx_port;
3526 			fs->retry_enabled = retry_enabled;
3527 			if (verbose_level > 0)
3528 				printf("  stream=%d port=%d rxq=%d txq=%d\n",
3529 				       sm_id, fs->rx_port, fs->rx_queue,
3530 				       fs->tx_queue);
3531 			rxq = (queueid_t) (rxq + 1);
3532 			if (rxq == nb_rxq) {
3533 				rxq = 0;
3534 				rxp = (portid_t) (rxp + 1);
3535 			}
3536 		}
3537 	}
3538 }
3539 
3540 void
3541 fwd_config_setup(void)
3542 {
3543 	struct rte_port *port;
3544 	portid_t pt_id;
3545 	unsigned int i;
3546 
3547 	cur_fwd_config.fwd_eng = cur_fwd_eng;
3548 	if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
3549 		icmp_echo_config_setup();
3550 		return;
3551 	}
3552 
3553 	if ((nb_rxq > 1) && (nb_txq > 1)){
3554 		if (dcb_config) {
3555 			for (i = 0; i < nb_fwd_ports; i++) {
3556 				pt_id = fwd_ports_ids[i];
3557 				port = &ports[pt_id];
3558 				if (!port->dcb_flag) {
3559 					fprintf(stderr,
3560 						"In DCB mode, all forwarding ports must be configured in this mode.\n");
3561 					return;
3562 				}
3563 			}
3564 			if (nb_fwd_lcores == 1) {
3565 				fprintf(stderr,
3566 					"In DCB mode,the nb forwarding cores should be larger than 1.\n");
3567 				return;
3568 			}
3569 
3570 			dcb_fwd_config_setup();
3571 		} else
3572 			rss_fwd_config_setup();
3573 	}
3574 	else
3575 		simple_fwd_config_setup();
3576 }
3577 
3578 static const char *
3579 mp_alloc_to_str(uint8_t mode)
3580 {
3581 	switch (mode) {
3582 	case MP_ALLOC_NATIVE:
3583 		return "native";
3584 	case MP_ALLOC_ANON:
3585 		return "anon";
3586 	case MP_ALLOC_XMEM:
3587 		return "xmem";
3588 	case MP_ALLOC_XMEM_HUGE:
3589 		return "xmemhuge";
3590 	case MP_ALLOC_XBUF:
3591 		return "xbuf";
3592 	default:
3593 		return "invalid";
3594 	}
3595 }
3596 
3597 void
3598 pkt_fwd_config_display(struct fwd_config *cfg)
3599 {
3600 	struct fwd_stream *fs;
3601 	lcoreid_t  lc_id;
3602 	streamid_t sm_id;
3603 
3604 	printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
3605 		"NUMA support %s, MP allocation mode: %s\n",
3606 		cfg->fwd_eng->fwd_mode_name,
3607 		retry_enabled == 0 ? "" : " with retry",
3608 		cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
3609 		numa_support == 1 ? "enabled" : "disabled",
3610 		mp_alloc_to_str(mp_alloc_type));
3611 
3612 	if (retry_enabled)
3613 		printf("TX retry num: %u, delay between TX retries: %uus\n",
3614 			burst_tx_retry_num, burst_tx_delay_time);
3615 	for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
3616 		printf("Logical Core %u (socket %u) forwards packets on "
3617 		       "%d streams:",
3618 		       fwd_lcores_cpuids[lc_id],
3619 		       rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
3620 		       fwd_lcores[lc_id]->stream_nb);
3621 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
3622 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
3623 			printf("\n  RX P=%d/Q=%d (socket %u) -> TX "
3624 			       "P=%d/Q=%d (socket %u) ",
3625 			       fs->rx_port, fs->rx_queue,
3626 			       ports[fs->rx_port].socket_id,
3627 			       fs->tx_port, fs->tx_queue,
3628 			       ports[fs->tx_port].socket_id);
3629 			print_ethaddr("peer=",
3630 				      &peer_eth_addrs[fs->peer_addr]);
3631 		}
3632 		printf("\n");
3633 	}
3634 	printf("\n");
3635 }
3636 
3637 void
3638 set_fwd_eth_peer(portid_t port_id, char *peer_addr)
3639 {
3640 	struct rte_ether_addr new_peer_addr;
3641 	if (!rte_eth_dev_is_valid_port(port_id)) {
3642 		fprintf(stderr, "Error: Invalid port number %i\n", port_id);
3643 		return;
3644 	}
3645 	if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) {
3646 		fprintf(stderr, "Error: Invalid ethernet address: %s\n",
3647 			peer_addr);
3648 		return;
3649 	}
3650 	peer_eth_addrs[port_id] = new_peer_addr;
3651 }
3652 
3653 int
3654 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
3655 {
3656 	unsigned int i;
3657 	unsigned int lcore_cpuid;
3658 	int record_now;
3659 
3660 	record_now = 0;
3661  again:
3662 	for (i = 0; i < nb_lc; i++) {
3663 		lcore_cpuid = lcorelist[i];
3664 		if (! rte_lcore_is_enabled(lcore_cpuid)) {
3665 			fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid);
3666 			return -1;
3667 		}
3668 		if (lcore_cpuid == rte_get_main_lcore()) {
3669 			fprintf(stderr,
3670 				"lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n",
3671 				lcore_cpuid);
3672 			return -1;
3673 		}
3674 		if (record_now)
3675 			fwd_lcores_cpuids[i] = lcore_cpuid;
3676 	}
3677 	if (record_now == 0) {
3678 		record_now = 1;
3679 		goto again;
3680 	}
3681 	nb_cfg_lcores = (lcoreid_t) nb_lc;
3682 	if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
3683 		printf("previous number of forwarding cores %u - changed to "
3684 		       "number of configured cores %u\n",
3685 		       (unsigned int) nb_fwd_lcores, nb_lc);
3686 		nb_fwd_lcores = (lcoreid_t) nb_lc;
3687 	}
3688 
3689 	return 0;
3690 }
3691 
3692 int
3693 set_fwd_lcores_mask(uint64_t lcoremask)
3694 {
3695 	unsigned int lcorelist[64];
3696 	unsigned int nb_lc;
3697 	unsigned int i;
3698 
3699 	if (lcoremask == 0) {
3700 		fprintf(stderr, "Invalid NULL mask of cores\n");
3701 		return -1;
3702 	}
3703 	nb_lc = 0;
3704 	for (i = 0; i < 64; i++) {
3705 		if (! ((uint64_t)(1ULL << i) & lcoremask))
3706 			continue;
3707 		lcorelist[nb_lc++] = i;
3708 	}
3709 	return set_fwd_lcores_list(lcorelist, nb_lc);
3710 }
3711 
3712 void
3713 set_fwd_lcores_number(uint16_t nb_lc)
3714 {
3715 	if (test_done == 0) {
3716 		fprintf(stderr, "Please stop forwarding first\n");
3717 		return;
3718 	}
3719 	if (nb_lc > nb_cfg_lcores) {
3720 		fprintf(stderr,
3721 			"nb fwd cores %u > %u (max. number of configured lcores) - ignored\n",
3722 			(unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
3723 		return;
3724 	}
3725 	nb_fwd_lcores = (lcoreid_t) nb_lc;
3726 	printf("Number of forwarding cores set to %u\n",
3727 	       (unsigned int) nb_fwd_lcores);
3728 }
3729 
3730 void
3731 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
3732 {
3733 	unsigned int i;
3734 	portid_t port_id;
3735 	int record_now;
3736 
3737 	record_now = 0;
3738  again:
3739 	for (i = 0; i < nb_pt; i++) {
3740 		port_id = (portid_t) portlist[i];
3741 		if (port_id_is_invalid(port_id, ENABLED_WARN))
3742 			return;
3743 		if (record_now)
3744 			fwd_ports_ids[i] = port_id;
3745 	}
3746 	if (record_now == 0) {
3747 		record_now = 1;
3748 		goto again;
3749 	}
3750 	nb_cfg_ports = (portid_t) nb_pt;
3751 	if (nb_fwd_ports != (portid_t) nb_pt) {
3752 		printf("previous number of forwarding ports %u - changed to "
3753 		       "number of configured ports %u\n",
3754 		       (unsigned int) nb_fwd_ports, nb_pt);
3755 		nb_fwd_ports = (portid_t) nb_pt;
3756 	}
3757 }
3758 
3759 /**
3760  * Parse the user input and obtain the list of forwarding ports
3761  *
3762  * @param[in] list
3763  *   String containing the user input. User can specify
3764  *   in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6.
3765  *   For example, if the user wants to use all the available
3766  *   4 ports in his system, then the input can be 0-3 or 0,1,2,3.
3767  *   If the user wants to use only the ports 1,2 then the input
3768  *   is 1,2.
3769  *   valid characters are '-' and ','
3770  * @param[out] values
3771  *   This array will be filled with a list of port IDs
3772  *   based on the user input
3773  *   Note that duplicate entries are discarded and only the first
3774  *   count entries in this array are port IDs and all the rest
3775  *   will contain default values
3776  * @param[in] maxsize
3777  *   This parameter denotes 2 things
3778  *   1) Number of elements in the values array
3779  *   2) Maximum value of each element in the values array
3780  * @return
3781  *   On success, returns total count of parsed port IDs
3782  *   On failure, returns 0
3783  */
3784 static unsigned int
3785 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize)
3786 {
3787 	unsigned int count = 0;
3788 	char *end = NULL;
3789 	int min, max;
3790 	int value, i;
3791 	unsigned int marked[maxsize];
3792 
3793 	if (list == NULL || values == NULL)
3794 		return 0;
3795 
3796 	for (i = 0; i < (int)maxsize; i++)
3797 		marked[i] = 0;
3798 
3799 	min = INT_MAX;
3800 
3801 	do {
3802 		/*Remove the blank spaces if any*/
3803 		while (isblank(*list))
3804 			list++;
3805 		if (*list == '\0')
3806 			break;
3807 		errno = 0;
3808 		value = strtol(list, &end, 10);
3809 		if (errno || end == NULL)
3810 			return 0;
3811 		if (value < 0 || value >= (int)maxsize)
3812 			return 0;
3813 		while (isblank(*end))
3814 			end++;
3815 		if (*end == '-' && min == INT_MAX) {
3816 			min = value;
3817 		} else if ((*end == ',') || (*end == '\0')) {
3818 			max = value;
3819 			if (min == INT_MAX)
3820 				min = value;
3821 			for (i = min; i <= max; i++) {
3822 				if (count < maxsize) {
3823 					if (marked[i])
3824 						continue;
3825 					values[count] = i;
3826 					marked[i] = 1;
3827 					count++;
3828 				}
3829 			}
3830 			min = INT_MAX;
3831 		} else
3832 			return 0;
3833 		list = end + 1;
3834 	} while (*end != '\0');
3835 
3836 	return count;
3837 }
3838 
3839 void
3840 parse_fwd_portlist(const char *portlist)
3841 {
3842 	unsigned int portcount;
3843 	unsigned int portindex[RTE_MAX_ETHPORTS];
3844 	unsigned int i, valid_port_count = 0;
3845 
3846 	portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS);
3847 	if (!portcount)
3848 		rte_exit(EXIT_FAILURE, "Invalid fwd port list\n");
3849 
3850 	/*
3851 	 * Here we verify the validity of the ports
3852 	 * and thereby calculate the total number of
3853 	 * valid ports
3854 	 */
3855 	for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) {
3856 		if (rte_eth_dev_is_valid_port(portindex[i])) {
3857 			portindex[valid_port_count] = portindex[i];
3858 			valid_port_count++;
3859 		}
3860 	}
3861 
3862 	set_fwd_ports_list(portindex, valid_port_count);
3863 }
3864 
3865 void
3866 set_fwd_ports_mask(uint64_t portmask)
3867 {
3868 	unsigned int portlist[64];
3869 	unsigned int nb_pt;
3870 	unsigned int i;
3871 
3872 	if (portmask == 0) {
3873 		fprintf(stderr, "Invalid NULL mask of ports\n");
3874 		return;
3875 	}
3876 	nb_pt = 0;
3877 	RTE_ETH_FOREACH_DEV(i) {
3878 		if (! ((uint64_t)(1ULL << i) & portmask))
3879 			continue;
3880 		portlist[nb_pt++] = i;
3881 	}
3882 	set_fwd_ports_list(portlist, nb_pt);
3883 }
3884 
3885 void
3886 set_fwd_ports_number(uint16_t nb_pt)
3887 {
3888 	if (nb_pt > nb_cfg_ports) {
3889 		fprintf(stderr,
3890 			"nb fwd ports %u > %u (number of configured ports) - ignored\n",
3891 			(unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
3892 		return;
3893 	}
3894 	nb_fwd_ports = (portid_t) nb_pt;
3895 	printf("Number of forwarding ports set to %u\n",
3896 	       (unsigned int) nb_fwd_ports);
3897 }
3898 
3899 int
3900 port_is_forwarding(portid_t port_id)
3901 {
3902 	unsigned int i;
3903 
3904 	if (port_id_is_invalid(port_id, ENABLED_WARN))
3905 		return -1;
3906 
3907 	for (i = 0; i < nb_fwd_ports; i++) {
3908 		if (fwd_ports_ids[i] == port_id)
3909 			return 1;
3910 	}
3911 
3912 	return 0;
3913 }
3914 
3915 void
3916 set_nb_pkt_per_burst(uint16_t nb)
3917 {
3918 	if (nb > MAX_PKT_BURST) {
3919 		fprintf(stderr,
3920 			"nb pkt per burst: %u > %u (maximum packet per burst)  ignored\n",
3921 			(unsigned int) nb, (unsigned int) MAX_PKT_BURST);
3922 		return;
3923 	}
3924 	nb_pkt_per_burst = nb;
3925 	printf("Number of packets per burst set to %u\n",
3926 	       (unsigned int) nb_pkt_per_burst);
3927 }
3928 
3929 static const char *
3930 tx_split_get_name(enum tx_pkt_split split)
3931 {
3932 	uint32_t i;
3933 
3934 	for (i = 0; i != RTE_DIM(tx_split_name); i++) {
3935 		if (tx_split_name[i].split == split)
3936 			return tx_split_name[i].name;
3937 	}
3938 	return NULL;
3939 }
3940 
3941 void
3942 set_tx_pkt_split(const char *name)
3943 {
3944 	uint32_t i;
3945 
3946 	for (i = 0; i != RTE_DIM(tx_split_name); i++) {
3947 		if (strcmp(tx_split_name[i].name, name) == 0) {
3948 			tx_pkt_split = tx_split_name[i].split;
3949 			return;
3950 		}
3951 	}
3952 	fprintf(stderr, "unknown value: \"%s\"\n", name);
3953 }
3954 
3955 int
3956 parse_fec_mode(const char *name, uint32_t *fec_capa)
3957 {
3958 	uint8_t i;
3959 
3960 	for (i = 0; i < RTE_DIM(fec_mode_name); i++) {
3961 		if (strcmp(fec_mode_name[i].name, name) == 0) {
3962 			*fec_capa =
3963 				RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode);
3964 			return 0;
3965 		}
3966 	}
3967 	return -1;
3968 }
3969 
3970 void
3971 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa)
3972 {
3973 	unsigned int i, j;
3974 
3975 	printf("FEC capabilities:\n");
3976 
3977 	for (i = 0; i < num; i++) {
3978 		printf("%s : ",
3979 			rte_eth_link_speed_to_str(speed_fec_capa[i].speed));
3980 
3981 		for (j = 0; j < RTE_DIM(fec_mode_name); j++) {
3982 			if (RTE_ETH_FEC_MODE_TO_CAPA(j) &
3983 						speed_fec_capa[i].capa)
3984 				printf("%s ", fec_mode_name[j].name);
3985 		}
3986 		printf("\n");
3987 	}
3988 }
3989 
3990 void
3991 show_rx_pkt_offsets(void)
3992 {
3993 	uint32_t i, n;
3994 
3995 	n = rx_pkt_nb_offs;
3996 	printf("Number of offsets: %u\n", n);
3997 	if (n) {
3998 		printf("Segment offsets: ");
3999 		for (i = 0; i != n - 1; i++)
4000 			printf("%hu,", rx_pkt_seg_offsets[i]);
4001 		printf("%hu\n", rx_pkt_seg_lengths[i]);
4002 	}
4003 }
4004 
4005 void
4006 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs)
4007 {
4008 	unsigned int i;
4009 
4010 	if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) {
4011 		printf("nb segments per RX packets=%u >= "
4012 		       "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs);
4013 		return;
4014 	}
4015 
4016 	/*
4017 	 * No extra check here, the segment length will be checked by PMD
4018 	 * in the extended queue setup.
4019 	 */
4020 	for (i = 0; i < nb_offs; i++) {
4021 		if (seg_offsets[i] >= UINT16_MAX) {
4022 			printf("offset[%u]=%u > UINT16_MAX - give up\n",
4023 			       i, seg_offsets[i]);
4024 			return;
4025 		}
4026 	}
4027 
4028 	for (i = 0; i < nb_offs; i++)
4029 		rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i];
4030 
4031 	rx_pkt_nb_offs = (uint8_t) nb_offs;
4032 }
4033 
4034 void
4035 show_rx_pkt_segments(void)
4036 {
4037 	uint32_t i, n;
4038 
4039 	n = rx_pkt_nb_segs;
4040 	printf("Number of segments: %u\n", n);
4041 	if (n) {
4042 		printf("Segment sizes: ");
4043 		for (i = 0; i != n - 1; i++)
4044 			printf("%hu,", rx_pkt_seg_lengths[i]);
4045 		printf("%hu\n", rx_pkt_seg_lengths[i]);
4046 	}
4047 }
4048 
4049 void
4050 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs)
4051 {
4052 	unsigned int i;
4053 
4054 	if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) {
4055 		printf("nb segments per RX packets=%u >= "
4056 		       "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs);
4057 		return;
4058 	}
4059 
4060 	/*
4061 	 * No extra check here, the segment length will be checked by PMD
4062 	 * in the extended queue setup.
4063 	 */
4064 	for (i = 0; i < nb_segs; i++) {
4065 		if (seg_lengths[i] >= UINT16_MAX) {
4066 			printf("length[%u]=%u > UINT16_MAX - give up\n",
4067 			       i, seg_lengths[i]);
4068 			return;
4069 		}
4070 	}
4071 
4072 	for (i = 0; i < nb_segs; i++)
4073 		rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
4074 
4075 	rx_pkt_nb_segs = (uint8_t) nb_segs;
4076 }
4077 
4078 void
4079 show_tx_pkt_segments(void)
4080 {
4081 	uint32_t i, n;
4082 	const char *split;
4083 
4084 	n = tx_pkt_nb_segs;
4085 	split = tx_split_get_name(tx_pkt_split);
4086 
4087 	printf("Number of segments: %u\n", n);
4088 	printf("Segment sizes: ");
4089 	for (i = 0; i != n - 1; i++)
4090 		printf("%hu,", tx_pkt_seg_lengths[i]);
4091 	printf("%hu\n", tx_pkt_seg_lengths[i]);
4092 	printf("Split packet: %s\n", split);
4093 }
4094 
4095 static bool
4096 nb_segs_is_invalid(unsigned int nb_segs)
4097 {
4098 	uint16_t ring_size;
4099 	uint16_t queue_id;
4100 	uint16_t port_id;
4101 	int ret;
4102 
4103 	RTE_ETH_FOREACH_DEV(port_id) {
4104 		for (queue_id = 0; queue_id < nb_txq; queue_id++) {
4105 			ret = get_tx_ring_size(port_id, queue_id, &ring_size);
4106 			if (ret) {
4107 				/* Port may not be initialized yet, can't say
4108 				 * the port is invalid in this stage.
4109 				 */
4110 				continue;
4111 			}
4112 			if (ring_size < nb_segs) {
4113 				printf("nb segments per TX packets=%u >= TX "
4114 				       "queue(%u) ring_size=%u - txpkts ignored\n",
4115 				       nb_segs, queue_id, ring_size);
4116 				return true;
4117 			}
4118 		}
4119 	}
4120 
4121 	return false;
4122 }
4123 
4124 void
4125 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs)
4126 {
4127 	uint16_t tx_pkt_len;
4128 	unsigned int i;
4129 
4130 	/*
4131 	 * For single segment settings failed check is ignored.
4132 	 * It is a very basic capability to send the single segment
4133 	 * packets, suppose it is always supported.
4134 	 */
4135 	if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) {
4136 		fprintf(stderr,
4137 			"Tx segment size(%u) is not supported - txpkts ignored\n",
4138 			nb_segs);
4139 		return;
4140 	}
4141 
4142 	if (nb_segs > RTE_MAX_SEGS_PER_PKT) {
4143 		fprintf(stderr,
4144 			"Tx segment size(%u) is bigger than max number of segment(%u)\n",
4145 			nb_segs, RTE_MAX_SEGS_PER_PKT);
4146 		return;
4147 	}
4148 
4149 	/*
4150 	 * Check that each segment length is greater or equal than
4151 	 * the mbuf data size.
4152 	 * Check also that the total packet length is greater or equal than the
4153 	 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
4154 	 * 20 + 8).
4155 	 */
4156 	tx_pkt_len = 0;
4157 	for (i = 0; i < nb_segs; i++) {
4158 		if (seg_lengths[i] > mbuf_data_size[0]) {
4159 			fprintf(stderr,
4160 				"length[%u]=%u > mbuf_data_size=%u - give up\n",
4161 				i, seg_lengths[i], mbuf_data_size[0]);
4162 			return;
4163 		}
4164 		tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
4165 	}
4166 	if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
4167 		fprintf(stderr, "total packet length=%u < %d - give up\n",
4168 				(unsigned) tx_pkt_len,
4169 				(int)(sizeof(struct rte_ether_hdr) + 20 + 8));
4170 		return;
4171 	}
4172 
4173 	for (i = 0; i < nb_segs; i++)
4174 		tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
4175 
4176 	tx_pkt_length  = tx_pkt_len;
4177 	tx_pkt_nb_segs = (uint8_t) nb_segs;
4178 }
4179 
4180 void
4181 show_tx_pkt_times(void)
4182 {
4183 	printf("Interburst gap: %u\n", tx_pkt_times_inter);
4184 	printf("Intraburst gap: %u\n", tx_pkt_times_intra);
4185 }
4186 
4187 void
4188 set_tx_pkt_times(unsigned int *tx_times)
4189 {
4190 	tx_pkt_times_inter = tx_times[0];
4191 	tx_pkt_times_intra = tx_times[1];
4192 }
4193 
4194 void
4195 setup_gro(const char *onoff, portid_t port_id)
4196 {
4197 	if (!rte_eth_dev_is_valid_port(port_id)) {
4198 		fprintf(stderr, "invalid port id %u\n", port_id);
4199 		return;
4200 	}
4201 	if (test_done == 0) {
4202 		fprintf(stderr,
4203 			"Before enable/disable GRO, please stop forwarding first\n");
4204 		return;
4205 	}
4206 	if (strcmp(onoff, "on") == 0) {
4207 		if (gro_ports[port_id].enable != 0) {
4208 			fprintf(stderr,
4209 				"Port %u has enabled GRO. Please disable GRO first\n",
4210 				port_id);
4211 			return;
4212 		}
4213 		if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
4214 			gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
4215 			gro_ports[port_id].param.max_flow_num =
4216 				GRO_DEFAULT_FLOW_NUM;
4217 			gro_ports[port_id].param.max_item_per_flow =
4218 				GRO_DEFAULT_ITEM_NUM_PER_FLOW;
4219 		}
4220 		gro_ports[port_id].enable = 1;
4221 	} else {
4222 		if (gro_ports[port_id].enable == 0) {
4223 			fprintf(stderr, "Port %u has disabled GRO\n", port_id);
4224 			return;
4225 		}
4226 		gro_ports[port_id].enable = 0;
4227 	}
4228 }
4229 
4230 void
4231 setup_gro_flush_cycles(uint8_t cycles)
4232 {
4233 	if (test_done == 0) {
4234 		fprintf(stderr,
4235 			"Before change flush interval for GRO, please stop forwarding first.\n");
4236 		return;
4237 	}
4238 
4239 	if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
4240 			GRO_DEFAULT_FLUSH_CYCLES) {
4241 		fprintf(stderr,
4242 			"The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n",
4243 			GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES);
4244 		cycles = GRO_DEFAULT_FLUSH_CYCLES;
4245 	}
4246 
4247 	gro_flush_cycles = cycles;
4248 }
4249 
4250 void
4251 show_gro(portid_t port_id)
4252 {
4253 	struct rte_gro_param *param;
4254 	uint32_t max_pkts_num;
4255 
4256 	param = &gro_ports[port_id].param;
4257 
4258 	if (!rte_eth_dev_is_valid_port(port_id)) {
4259 		fprintf(stderr, "Invalid port id %u.\n", port_id);
4260 		return;
4261 	}
4262 	if (gro_ports[port_id].enable) {
4263 		printf("GRO type: TCP/IPv4\n");
4264 		if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
4265 			max_pkts_num = param->max_flow_num *
4266 				param->max_item_per_flow;
4267 		} else
4268 			max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
4269 		printf("Max number of packets to perform GRO: %u\n",
4270 				max_pkts_num);
4271 		printf("Flushing cycles: %u\n", gro_flush_cycles);
4272 	} else
4273 		printf("Port %u doesn't enable GRO.\n", port_id);
4274 }
4275 
4276 void
4277 setup_gso(const char *mode, portid_t port_id)
4278 {
4279 	if (!rte_eth_dev_is_valid_port(port_id)) {
4280 		fprintf(stderr, "invalid port id %u\n", port_id);
4281 		return;
4282 	}
4283 	if (strcmp(mode, "on") == 0) {
4284 		if (test_done == 0) {
4285 			fprintf(stderr,
4286 				"before enabling GSO, please stop forwarding first\n");
4287 			return;
4288 		}
4289 		gso_ports[port_id].enable = 1;
4290 	} else if (strcmp(mode, "off") == 0) {
4291 		if (test_done == 0) {
4292 			fprintf(stderr,
4293 				"before disabling GSO, please stop forwarding first\n");
4294 			return;
4295 		}
4296 		gso_ports[port_id].enable = 0;
4297 	}
4298 }
4299 
4300 char*
4301 list_pkt_forwarding_modes(void)
4302 {
4303 	static char fwd_modes[128] = "";
4304 	const char *separator = "|";
4305 	struct fwd_engine *fwd_eng;
4306 	unsigned i = 0;
4307 
4308 	if (strlen (fwd_modes) == 0) {
4309 		while ((fwd_eng = fwd_engines[i++]) != NULL) {
4310 			strncat(fwd_modes, fwd_eng->fwd_mode_name,
4311 					sizeof(fwd_modes) - strlen(fwd_modes) - 1);
4312 			strncat(fwd_modes, separator,
4313 					sizeof(fwd_modes) - strlen(fwd_modes) - 1);
4314 		}
4315 		fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
4316 	}
4317 
4318 	return fwd_modes;
4319 }
4320 
4321 char*
4322 list_pkt_forwarding_retry_modes(void)
4323 {
4324 	static char fwd_modes[128] = "";
4325 	const char *separator = "|";
4326 	struct fwd_engine *fwd_eng;
4327 	unsigned i = 0;
4328 
4329 	if (strlen(fwd_modes) == 0) {
4330 		while ((fwd_eng = fwd_engines[i++]) != NULL) {
4331 			if (fwd_eng == &rx_only_engine)
4332 				continue;
4333 			strncat(fwd_modes, fwd_eng->fwd_mode_name,
4334 					sizeof(fwd_modes) -
4335 					strlen(fwd_modes) - 1);
4336 			strncat(fwd_modes, separator,
4337 					sizeof(fwd_modes) -
4338 					strlen(fwd_modes) - 1);
4339 		}
4340 		fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
4341 	}
4342 
4343 	return fwd_modes;
4344 }
4345 
4346 void
4347 set_pkt_forwarding_mode(const char *fwd_mode_name)
4348 {
4349 	struct fwd_engine *fwd_eng;
4350 	unsigned i;
4351 
4352 	i = 0;
4353 	while ((fwd_eng = fwd_engines[i]) != NULL) {
4354 		if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
4355 			printf("Set %s packet forwarding mode%s\n",
4356 			       fwd_mode_name,
4357 			       retry_enabled == 0 ? "" : " with retry");
4358 			cur_fwd_eng = fwd_eng;
4359 			return;
4360 		}
4361 		i++;
4362 	}
4363 	fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name);
4364 }
4365 
4366 void
4367 add_rx_dump_callbacks(portid_t portid)
4368 {
4369 	struct rte_eth_dev_info dev_info;
4370 	uint16_t queue;
4371 	int ret;
4372 
4373 	if (port_id_is_invalid(portid, ENABLED_WARN))
4374 		return;
4375 
4376 	ret = eth_dev_info_get_print_err(portid, &dev_info);
4377 	if (ret != 0)
4378 		return;
4379 
4380 	for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
4381 		if (!ports[portid].rx_dump_cb[queue])
4382 			ports[portid].rx_dump_cb[queue] =
4383 				rte_eth_add_rx_callback(portid, queue,
4384 					dump_rx_pkts, NULL);
4385 }
4386 
4387 void
4388 add_tx_dump_callbacks(portid_t portid)
4389 {
4390 	struct rte_eth_dev_info dev_info;
4391 	uint16_t queue;
4392 	int ret;
4393 
4394 	if (port_id_is_invalid(portid, ENABLED_WARN))
4395 		return;
4396 
4397 	ret = eth_dev_info_get_print_err(portid, &dev_info);
4398 	if (ret != 0)
4399 		return;
4400 
4401 	for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
4402 		if (!ports[portid].tx_dump_cb[queue])
4403 			ports[portid].tx_dump_cb[queue] =
4404 				rte_eth_add_tx_callback(portid, queue,
4405 							dump_tx_pkts, NULL);
4406 }
4407 
4408 void
4409 remove_rx_dump_callbacks(portid_t portid)
4410 {
4411 	struct rte_eth_dev_info dev_info;
4412 	uint16_t queue;
4413 	int ret;
4414 
4415 	if (port_id_is_invalid(portid, ENABLED_WARN))
4416 		return;
4417 
4418 	ret = eth_dev_info_get_print_err(portid, &dev_info);
4419 	if (ret != 0)
4420 		return;
4421 
4422 	for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
4423 		if (ports[portid].rx_dump_cb[queue]) {
4424 			rte_eth_remove_rx_callback(portid, queue,
4425 				ports[portid].rx_dump_cb[queue]);
4426 			ports[portid].rx_dump_cb[queue] = NULL;
4427 		}
4428 }
4429 
4430 void
4431 remove_tx_dump_callbacks(portid_t portid)
4432 {
4433 	struct rte_eth_dev_info dev_info;
4434 	uint16_t queue;
4435 	int ret;
4436 
4437 	if (port_id_is_invalid(portid, ENABLED_WARN))
4438 		return;
4439 
4440 	ret = eth_dev_info_get_print_err(portid, &dev_info);
4441 	if (ret != 0)
4442 		return;
4443 
4444 	for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
4445 		if (ports[portid].tx_dump_cb[queue]) {
4446 			rte_eth_remove_tx_callback(portid, queue,
4447 				ports[portid].tx_dump_cb[queue]);
4448 			ports[portid].tx_dump_cb[queue] = NULL;
4449 		}
4450 }
4451 
4452 void
4453 configure_rxtx_dump_callbacks(uint16_t verbose)
4454 {
4455 	portid_t portid;
4456 
4457 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4458 		TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n");
4459 		return;
4460 #endif
4461 
4462 	RTE_ETH_FOREACH_DEV(portid)
4463 	{
4464 		if (verbose == 1 || verbose > 2)
4465 			add_rx_dump_callbacks(portid);
4466 		else
4467 			remove_rx_dump_callbacks(portid);
4468 		if (verbose >= 2)
4469 			add_tx_dump_callbacks(portid);
4470 		else
4471 			remove_tx_dump_callbacks(portid);
4472 	}
4473 }
4474 
4475 void
4476 set_verbose_level(uint16_t vb_level)
4477 {
4478 	printf("Change verbose level from %u to %u\n",
4479 	       (unsigned int) verbose_level, (unsigned int) vb_level);
4480 	verbose_level = vb_level;
4481 	configure_rxtx_dump_callbacks(verbose_level);
4482 }
4483 
4484 void
4485 vlan_extend_set(portid_t port_id, int on)
4486 {
4487 	int diag;
4488 	int vlan_offload;
4489 	uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
4490 
4491 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4492 		return;
4493 
4494 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
4495 
4496 	if (on) {
4497 		vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
4498 		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
4499 	} else {
4500 		vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD;
4501 		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
4502 	}
4503 
4504 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
4505 	if (diag < 0) {
4506 		fprintf(stderr,
4507 			"rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n",
4508 			port_id, on, diag);
4509 		return;
4510 	}
4511 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
4512 }
4513 
4514 void
4515 rx_vlan_strip_set(portid_t port_id, int on)
4516 {
4517 	int diag;
4518 	int vlan_offload;
4519 	uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
4520 
4521 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4522 		return;
4523 
4524 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
4525 
4526 	if (on) {
4527 		vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
4528 		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
4529 	} else {
4530 		vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD;
4531 		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
4532 	}
4533 
4534 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
4535 	if (diag < 0) {
4536 		fprintf(stderr,
4537 			"%s(port_pi=%d, on=%d) failed diag=%d\n",
4538 			__func__, port_id, on, diag);
4539 		return;
4540 	}
4541 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
4542 }
4543 
4544 void
4545 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
4546 {
4547 	int diag;
4548 
4549 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4550 		return;
4551 
4552 	diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
4553 	if (diag < 0)
4554 		fprintf(stderr,
4555 			"%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n",
4556 			__func__, port_id, queue_id, on, diag);
4557 }
4558 
4559 void
4560 rx_vlan_filter_set(portid_t port_id, int on)
4561 {
4562 	int diag;
4563 	int vlan_offload;
4564 	uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
4565 
4566 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4567 		return;
4568 
4569 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
4570 
4571 	if (on) {
4572 		vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD;
4573 		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4574 	} else {
4575 		vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD;
4576 		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4577 	}
4578 
4579 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
4580 	if (diag < 0) {
4581 		fprintf(stderr,
4582 			"%s(port_pi=%d, on=%d) failed diag=%d\n",
4583 			__func__, port_id, on, diag);
4584 		return;
4585 	}
4586 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
4587 }
4588 
4589 void
4590 rx_vlan_qinq_strip_set(portid_t port_id, int on)
4591 {
4592 	int diag;
4593 	int vlan_offload;
4594 	uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
4595 
4596 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4597 		return;
4598 
4599 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
4600 
4601 	if (on) {
4602 		vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD;
4603 		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
4604 	} else {
4605 		vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD;
4606 		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
4607 	}
4608 
4609 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
4610 	if (diag < 0) {
4611 		fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n",
4612 			__func__, port_id, on, diag);
4613 		return;
4614 	}
4615 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
4616 }
4617 
4618 int
4619 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
4620 {
4621 	int diag;
4622 
4623 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4624 		return 1;
4625 	if (vlan_id_is_invalid(vlan_id))
4626 		return 1;
4627 	diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
4628 	if (diag == 0)
4629 		return 0;
4630 	fprintf(stderr,
4631 		"rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n",
4632 		port_id, vlan_id, on, diag);
4633 	return -1;
4634 }
4635 
4636 void
4637 rx_vlan_all_filter_set(portid_t port_id, int on)
4638 {
4639 	uint16_t vlan_id;
4640 
4641 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4642 		return;
4643 	for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
4644 		if (rx_vft_set(port_id, vlan_id, on))
4645 			break;
4646 	}
4647 }
4648 
4649 void
4650 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
4651 {
4652 	int diag;
4653 
4654 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4655 		return;
4656 
4657 	diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
4658 	if (diag == 0)
4659 		return;
4660 
4661 	fprintf(stderr,
4662 		"tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n",
4663 		port_id, vlan_type, tp_id, diag);
4664 }
4665 
4666 void
4667 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
4668 {
4669 	struct rte_eth_dev_info dev_info;
4670 	int ret;
4671 
4672 	if (vlan_id_is_invalid(vlan_id))
4673 		return;
4674 
4675 	if (ports[port_id].dev_conf.txmode.offloads &
4676 	    RTE_ETH_TX_OFFLOAD_QINQ_INSERT) {
4677 		fprintf(stderr, "Error, as QinQ has been enabled.\n");
4678 		return;
4679 	}
4680 
4681 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
4682 	if (ret != 0)
4683 		return;
4684 
4685 	if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) {
4686 		fprintf(stderr,
4687 			"Error: vlan insert is not supported by port %d\n",
4688 			port_id);
4689 		return;
4690 	}
4691 
4692 	tx_vlan_reset(port_id);
4693 	ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
4694 	ports[port_id].tx_vlan_id = vlan_id;
4695 }
4696 
4697 void
4698 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
4699 {
4700 	struct rte_eth_dev_info dev_info;
4701 	int ret;
4702 
4703 	if (vlan_id_is_invalid(vlan_id))
4704 		return;
4705 	if (vlan_id_is_invalid(vlan_id_outer))
4706 		return;
4707 
4708 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
4709 	if (ret != 0)
4710 		return;
4711 
4712 	if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) {
4713 		fprintf(stderr,
4714 			"Error: qinq insert not supported by port %d\n",
4715 			port_id);
4716 		return;
4717 	}
4718 
4719 	tx_vlan_reset(port_id);
4720 	ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
4721 						    RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
4722 	ports[port_id].tx_vlan_id = vlan_id;
4723 	ports[port_id].tx_vlan_id_outer = vlan_id_outer;
4724 }
4725 
4726 void
4727 tx_vlan_reset(portid_t port_id)
4728 {
4729 	ports[port_id].dev_conf.txmode.offloads &=
4730 				~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
4731 				  RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
4732 	ports[port_id].tx_vlan_id = 0;
4733 	ports[port_id].tx_vlan_id_outer = 0;
4734 }
4735 
4736 void
4737 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
4738 {
4739 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4740 		return;
4741 
4742 	rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
4743 }
4744 
4745 void
4746 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
4747 {
4748 	int ret;
4749 
4750 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4751 		return;
4752 
4753 	if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
4754 		return;
4755 
4756 	if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
4757 		fprintf(stderr, "map_value not in required range 0..%d\n",
4758 			RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
4759 		return;
4760 	}
4761 
4762 	if (!is_rx) { /* tx */
4763 		ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id,
4764 							     map_value);
4765 		if (ret) {
4766 			fprintf(stderr,
4767 				"failed to set tx queue stats mapping.\n");
4768 			return;
4769 		}
4770 	} else { /* rx */
4771 		ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id,
4772 							     map_value);
4773 		if (ret) {
4774 			fprintf(stderr,
4775 				"failed to set rx queue stats mapping.\n");
4776 			return;
4777 		}
4778 	}
4779 }
4780 
4781 void
4782 set_xstats_hide_zero(uint8_t on_off)
4783 {
4784 	xstats_hide_zero = on_off;
4785 }
4786 
4787 void
4788 set_record_core_cycles(uint8_t on_off)
4789 {
4790 	record_core_cycles = on_off;
4791 }
4792 
4793 void
4794 set_record_burst_stats(uint8_t on_off)
4795 {
4796 	record_burst_stats = on_off;
4797 }
4798 
4799 static char*
4800 flowtype_to_str(uint16_t flow_type)
4801 {
4802 	struct flow_type_info {
4803 		char str[32];
4804 		uint16_t ftype;
4805 	};
4806 
4807 	uint8_t i;
4808 	static struct flow_type_info flowtype_str_table[] = {
4809 		{"raw", RTE_ETH_FLOW_RAW},
4810 		{"ipv4", RTE_ETH_FLOW_IPV4},
4811 		{"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
4812 		{"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
4813 		{"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
4814 		{"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
4815 		{"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
4816 		{"ipv6", RTE_ETH_FLOW_IPV6},
4817 		{"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
4818 		{"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
4819 		{"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
4820 		{"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
4821 		{"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
4822 		{"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
4823 		{"ipv6-ex", RTE_ETH_FLOW_IPV6_EX},
4824 		{"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX},
4825 		{"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX},
4826 		{"port", RTE_ETH_FLOW_PORT},
4827 		{"vxlan", RTE_ETH_FLOW_VXLAN},
4828 		{"geneve", RTE_ETH_FLOW_GENEVE},
4829 		{"nvgre", RTE_ETH_FLOW_NVGRE},
4830 		{"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
4831 		{"gtpu", RTE_ETH_FLOW_GTPU},
4832 	};
4833 
4834 	for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
4835 		if (flowtype_str_table[i].ftype == flow_type)
4836 			return flowtype_str_table[i].str;
4837 	}
4838 
4839 	return NULL;
4840 }
4841 
4842 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE)
4843 
4844 static inline void
4845 print_fdir_mask(struct rte_eth_fdir_masks *mask)
4846 {
4847 	printf("\n    vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
4848 
4849 	if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
4850 		printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
4851 			" tunnel_id: 0x%08x",
4852 			mask->mac_addr_byte_mask, mask->tunnel_type_mask,
4853 			rte_be_to_cpu_32(mask->tunnel_id_mask));
4854 	else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
4855 		printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
4856 			rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
4857 			rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
4858 
4859 		printf("\n    src_port: 0x%04x, dst_port: 0x%04x",
4860 			rte_be_to_cpu_16(mask->src_port_mask),
4861 			rte_be_to_cpu_16(mask->dst_port_mask));
4862 
4863 		printf("\n    src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
4864 			rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
4865 			rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
4866 			rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
4867 			rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
4868 
4869 		printf("\n    dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
4870 			rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
4871 			rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
4872 			rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
4873 			rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
4874 	}
4875 
4876 	printf("\n");
4877 }
4878 
4879 static inline void
4880 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
4881 {
4882 	struct rte_eth_flex_payload_cfg *cfg;
4883 	uint32_t i, j;
4884 
4885 	for (i = 0; i < flex_conf->nb_payloads; i++) {
4886 		cfg = &flex_conf->flex_set[i];
4887 		if (cfg->type == RTE_ETH_RAW_PAYLOAD)
4888 			printf("\n    RAW:  ");
4889 		else if (cfg->type == RTE_ETH_L2_PAYLOAD)
4890 			printf("\n    L2_PAYLOAD:  ");
4891 		else if (cfg->type == RTE_ETH_L3_PAYLOAD)
4892 			printf("\n    L3_PAYLOAD:  ");
4893 		else if (cfg->type == RTE_ETH_L4_PAYLOAD)
4894 			printf("\n    L4_PAYLOAD:  ");
4895 		else
4896 			printf("\n    UNKNOWN PAYLOAD(%u):  ", cfg->type);
4897 		for (j = 0; j < num; j++)
4898 			printf("  %-5u", cfg->src_offset[j]);
4899 	}
4900 	printf("\n");
4901 }
4902 
4903 static inline void
4904 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
4905 {
4906 	struct rte_eth_fdir_flex_mask *mask;
4907 	uint32_t i, j;
4908 	char *p;
4909 
4910 	for (i = 0; i < flex_conf->nb_flexmasks; i++) {
4911 		mask = &flex_conf->flex_mask[i];
4912 		p = flowtype_to_str(mask->flow_type);
4913 		printf("\n    %s:\t", p ? p : "unknown");
4914 		for (j = 0; j < num; j++)
4915 			printf(" %02x", mask->mask[j]);
4916 	}
4917 	printf("\n");
4918 }
4919 
4920 static inline void
4921 print_fdir_flow_type(uint32_t flow_types_mask)
4922 {
4923 	int i;
4924 	char *p;
4925 
4926 	for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
4927 		if (!(flow_types_mask & (1 << i)))
4928 			continue;
4929 		p = flowtype_to_str(i);
4930 		if (p)
4931 			printf(" %s", p);
4932 		else
4933 			printf(" unknown");
4934 	}
4935 	printf("\n");
4936 }
4937 
4938 static int
4939 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info,
4940 		    struct rte_eth_fdir_stats *fdir_stat)
4941 {
4942 	int ret = -ENOTSUP;
4943 
4944 #ifdef RTE_NET_I40E
4945 	if (ret == -ENOTSUP) {
4946 		ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info);
4947 		if (!ret)
4948 			ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat);
4949 	}
4950 #endif
4951 #ifdef RTE_NET_IXGBE
4952 	if (ret == -ENOTSUP) {
4953 		ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info);
4954 		if (!ret)
4955 			ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat);
4956 	}
4957 #endif
4958 	switch (ret) {
4959 	case 0:
4960 		break;
4961 	case -ENOTSUP:
4962 		fprintf(stderr, "\n FDIR is not supported on port %-2d\n",
4963 			port_id);
4964 		break;
4965 	default:
4966 		fprintf(stderr, "programming error: (%s)\n", strerror(-ret));
4967 		break;
4968 	}
4969 	return ret;
4970 }
4971 
4972 void
4973 fdir_get_infos(portid_t port_id)
4974 {
4975 	struct rte_eth_fdir_stats fdir_stat;
4976 	struct rte_eth_fdir_info fdir_info;
4977 
4978 	static const char *fdir_stats_border = "########################";
4979 
4980 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4981 		return;
4982 
4983 	memset(&fdir_info, 0, sizeof(fdir_info));
4984 	memset(&fdir_stat, 0, sizeof(fdir_stat));
4985 	if (get_fdir_info(port_id, &fdir_info, &fdir_stat))
4986 		return;
4987 
4988 	printf("\n  %s FDIR infos for port %-2d     %s\n",
4989 	       fdir_stats_border, port_id, fdir_stats_border);
4990 	printf("  MODE: ");
4991 	if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
4992 		printf("  PERFECT\n");
4993 	else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
4994 		printf("  PERFECT-MAC-VLAN\n");
4995 	else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
4996 		printf("  PERFECT-TUNNEL\n");
4997 	else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
4998 		printf("  SIGNATURE\n");
4999 	else
5000 		printf("  DISABLE\n");
5001 	if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
5002 		&& fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
5003 		printf("  SUPPORTED FLOW TYPE: ");
5004 		print_fdir_flow_type(fdir_info.flow_types_mask[0]);
5005 	}
5006 	printf("  FLEX PAYLOAD INFO:\n");
5007 	printf("  max_len:       %-10"PRIu32"  payload_limit: %-10"PRIu32"\n"
5008 	       "  payload_unit:  %-10"PRIu32"  payload_seg:   %-10"PRIu32"\n"
5009 	       "  bitmask_unit:  %-10"PRIu32"  bitmask_num:   %-10"PRIu32"\n",
5010 		fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
5011 		fdir_info.flex_payload_unit,
5012 		fdir_info.max_flex_payload_segment_num,
5013 		fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
5014 	printf("  MASK: ");
5015 	print_fdir_mask(&fdir_info.mask);
5016 	if (fdir_info.flex_conf.nb_payloads > 0) {
5017 		printf("  FLEX PAYLOAD SRC OFFSET:");
5018 		print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
5019 	}
5020 	if (fdir_info.flex_conf.nb_flexmasks > 0) {
5021 		printf("  FLEX MASK CFG:");
5022 		print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
5023 	}
5024 	printf("  guarant_count: %-10"PRIu32"  best_count:    %"PRIu32"\n",
5025 	       fdir_stat.guarant_cnt, fdir_stat.best_cnt);
5026 	printf("  guarant_space: %-10"PRIu32"  best_space:    %"PRIu32"\n",
5027 	       fdir_info.guarant_spc, fdir_info.best_spc);
5028 	printf("  collision:     %-10"PRIu32"  free:          %"PRIu32"\n"
5029 	       "  maxhash:       %-10"PRIu32"  maxlen:        %"PRIu32"\n"
5030 	       "  add:	         %-10"PRIu64"  remove:        %"PRIu64"\n"
5031 	       "  f_add:         %-10"PRIu64"  f_remove:      %"PRIu64"\n",
5032 	       fdir_stat.collision, fdir_stat.free,
5033 	       fdir_stat.maxhash, fdir_stat.maxlen,
5034 	       fdir_stat.add, fdir_stat.remove,
5035 	       fdir_stat.f_add, fdir_stat.f_remove);
5036 	printf("  %s############################%s\n",
5037 	       fdir_stats_border, fdir_stats_border);
5038 }
5039 
5040 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */
5041 
5042 void
5043 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
5044 {
5045 	struct rte_port *port;
5046 	struct rte_eth_fdir_flex_conf *flex_conf;
5047 	int i, idx = 0;
5048 
5049 	port = &ports[port_id];
5050 	flex_conf = &port->dev_conf.fdir_conf.flex_conf;
5051 	for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
5052 		if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
5053 			idx = i;
5054 			break;
5055 		}
5056 	}
5057 	if (i >= RTE_ETH_FLOW_MAX) {
5058 		if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
5059 			idx = flex_conf->nb_flexmasks;
5060 			flex_conf->nb_flexmasks++;
5061 		} else {
5062 			fprintf(stderr,
5063 				"The flex mask table is full. Can not set flex mask for flow_type(%u).",
5064 				cfg->flow_type);
5065 			return;
5066 		}
5067 	}
5068 	rte_memcpy(&flex_conf->flex_mask[idx],
5069 			 cfg,
5070 			 sizeof(struct rte_eth_fdir_flex_mask));
5071 }
5072 
5073 void
5074 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
5075 {
5076 	struct rte_port *port;
5077 	struct rte_eth_fdir_flex_conf *flex_conf;
5078 	int i, idx = 0;
5079 
5080 	port = &ports[port_id];
5081 	flex_conf = &port->dev_conf.fdir_conf.flex_conf;
5082 	for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
5083 		if (cfg->type == flex_conf->flex_set[i].type) {
5084 			idx = i;
5085 			break;
5086 		}
5087 	}
5088 	if (i >= RTE_ETH_PAYLOAD_MAX) {
5089 		if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
5090 			idx = flex_conf->nb_payloads;
5091 			flex_conf->nb_payloads++;
5092 		} else {
5093 			fprintf(stderr,
5094 				"The flex payload table is full. Can not set flex payload for type(%u).",
5095 				cfg->type);
5096 			return;
5097 		}
5098 	}
5099 	rte_memcpy(&flex_conf->flex_set[idx],
5100 			 cfg,
5101 			 sizeof(struct rte_eth_flex_payload_cfg));
5102 
5103 }
5104 
5105 void
5106 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
5107 {
5108 #ifdef RTE_NET_IXGBE
5109 	int diag;
5110 
5111 	if (is_rx)
5112 		diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
5113 	else
5114 		diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
5115 
5116 	if (diag == 0)
5117 		return;
5118 	fprintf(stderr,
5119 		"rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
5120 		is_rx ? "rx" : "tx", port_id, diag);
5121 	return;
5122 #endif
5123 	fprintf(stderr, "VF %s setting not supported for port %d\n",
5124 		is_rx ? "Rx" : "Tx", port_id);
5125 	RTE_SET_USED(vf);
5126 	RTE_SET_USED(on);
5127 }
5128 
5129 int
5130 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
5131 {
5132 	int diag;
5133 	struct rte_eth_link link;
5134 	int ret;
5135 
5136 	if (port_id_is_invalid(port_id, ENABLED_WARN))
5137 		return 1;
5138 	ret = eth_link_get_nowait_print_err(port_id, &link);
5139 	if (ret < 0)
5140 		return 1;
5141 	if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN &&
5142 	    rate > link.link_speed) {
5143 		fprintf(stderr,
5144 			"Invalid rate value:%u bigger than link speed: %u\n",
5145 			rate, link.link_speed);
5146 		return 1;
5147 	}
5148 	diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
5149 	if (diag == 0)
5150 		return diag;
5151 	fprintf(stderr,
5152 		"rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
5153 		port_id, diag);
5154 	return diag;
5155 }
5156 
5157 int
5158 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
5159 {
5160 	int diag = -ENOTSUP;
5161 
5162 	RTE_SET_USED(vf);
5163 	RTE_SET_USED(rate);
5164 	RTE_SET_USED(q_msk);
5165 
5166 #ifdef RTE_NET_IXGBE
5167 	if (diag == -ENOTSUP)
5168 		diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
5169 						       q_msk);
5170 #endif
5171 #ifdef RTE_NET_BNXT
5172 	if (diag == -ENOTSUP)
5173 		diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
5174 #endif
5175 	if (diag == 0)
5176 		return diag;
5177 
5178 	fprintf(stderr,
5179 		"%s for port_id=%d failed diag=%d\n",
5180 		__func__, port_id, diag);
5181 	return diag;
5182 }
5183 
5184 /*
5185  * Functions to manage the set of filtered Multicast MAC addresses.
5186  *
5187  * A pool of filtered multicast MAC addresses is associated with each port.
5188  * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
5189  * The address of the pool and the number of valid multicast MAC addresses
5190  * recorded in the pool are stored in the fields "mc_addr_pool" and
5191  * "mc_addr_nb" of the "rte_port" data structure.
5192  *
5193  * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
5194  * to be supplied a contiguous array of multicast MAC addresses.
5195  * To comply with this constraint, the set of multicast addresses recorded
5196  * into the pool are systematically compacted at the beginning of the pool.
5197  * Hence, when a multicast address is removed from the pool, all following
5198  * addresses, if any, are copied back to keep the set contiguous.
5199  */
5200 #define MCAST_POOL_INC 32
5201 
5202 static int
5203 mcast_addr_pool_extend(struct rte_port *port)
5204 {
5205 	struct rte_ether_addr *mc_pool;
5206 	size_t mc_pool_size;
5207 
5208 	/*
5209 	 * If a free entry is available at the end of the pool, just
5210 	 * increment the number of recorded multicast addresses.
5211 	 */
5212 	if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
5213 		port->mc_addr_nb++;
5214 		return 0;
5215 	}
5216 
5217 	/*
5218 	 * [re]allocate a pool with MCAST_POOL_INC more entries.
5219 	 * The previous test guarantees that port->mc_addr_nb is a multiple
5220 	 * of MCAST_POOL_INC.
5221 	 */
5222 	mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb +
5223 						    MCAST_POOL_INC);
5224 	mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool,
5225 						mc_pool_size);
5226 	if (mc_pool == NULL) {
5227 		fprintf(stderr,
5228 			"allocation of pool of %u multicast addresses failed\n",
5229 			port->mc_addr_nb + MCAST_POOL_INC);
5230 		return -ENOMEM;
5231 	}
5232 
5233 	port->mc_addr_pool = mc_pool;
5234 	port->mc_addr_nb++;
5235 	return 0;
5236 
5237 }
5238 
5239 static void
5240 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr)
5241 {
5242 	if (mcast_addr_pool_extend(port) != 0)
5243 		return;
5244 	rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]);
5245 }
5246 
5247 static void
5248 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
5249 {
5250 	port->mc_addr_nb--;
5251 	if (addr_idx == port->mc_addr_nb) {
5252 		/* No need to recompact the set of multicast addresses. */
5253 		if (port->mc_addr_nb == 0) {
5254 			/* free the pool of multicast addresses. */
5255 			free(port->mc_addr_pool);
5256 			port->mc_addr_pool = NULL;
5257 		}
5258 		return;
5259 	}
5260 	memmove(&port->mc_addr_pool[addr_idx],
5261 		&port->mc_addr_pool[addr_idx + 1],
5262 		sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx));
5263 }
5264 
5265 static int
5266 eth_port_multicast_addr_list_set(portid_t port_id)
5267 {
5268 	struct rte_port *port;
5269 	int diag;
5270 
5271 	port = &ports[port_id];
5272 	diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
5273 					    port->mc_addr_nb);
5274 	if (diag < 0)
5275 		fprintf(stderr,
5276 			"rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
5277 			port_id, port->mc_addr_nb, diag);
5278 
5279 	return diag;
5280 }
5281 
5282 void
5283 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr)
5284 {
5285 	struct rte_port *port;
5286 	uint32_t i;
5287 
5288 	if (port_id_is_invalid(port_id, ENABLED_WARN))
5289 		return;
5290 
5291 	port = &ports[port_id];
5292 
5293 	/*
5294 	 * Check that the added multicast MAC address is not already recorded
5295 	 * in the pool of multicast addresses.
5296 	 */
5297 	for (i = 0; i < port->mc_addr_nb; i++) {
5298 		if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
5299 			fprintf(stderr,
5300 				"multicast address already filtered by port\n");
5301 			return;
5302 		}
5303 	}
5304 
5305 	mcast_addr_pool_append(port, mc_addr);
5306 	if (eth_port_multicast_addr_list_set(port_id) < 0)
5307 		/* Rollback on failure, remove the address from the pool */
5308 		mcast_addr_pool_remove(port, i);
5309 }
5310 
5311 void
5312 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr)
5313 {
5314 	struct rte_port *port;
5315 	uint32_t i;
5316 
5317 	if (port_id_is_invalid(port_id, ENABLED_WARN))
5318 		return;
5319 
5320 	port = &ports[port_id];
5321 
5322 	/*
5323 	 * Search the pool of multicast MAC addresses for the removed address.
5324 	 */
5325 	for (i = 0; i < port->mc_addr_nb; i++) {
5326 		if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
5327 			break;
5328 	}
5329 	if (i == port->mc_addr_nb) {
5330 		fprintf(stderr, "multicast address not filtered by port %d\n",
5331 			port_id);
5332 		return;
5333 	}
5334 
5335 	mcast_addr_pool_remove(port, i);
5336 	if (eth_port_multicast_addr_list_set(port_id) < 0)
5337 		/* Rollback on failure, add the address back into the pool */
5338 		mcast_addr_pool_append(port, mc_addr);
5339 }
5340 
5341 void
5342 port_dcb_info_display(portid_t port_id)
5343 {
5344 	struct rte_eth_dcb_info dcb_info;
5345 	uint16_t i;
5346 	int ret;
5347 	static const char *border = "================";
5348 
5349 	if (port_id_is_invalid(port_id, ENABLED_WARN))
5350 		return;
5351 
5352 	ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
5353 	if (ret) {
5354 		fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n",
5355 			port_id);
5356 		return;
5357 	}
5358 	printf("\n  %s DCB infos for port %-2d  %s\n", border, port_id, border);
5359 	printf("  TC NUMBER: %d\n", dcb_info.nb_tcs);
5360 	printf("\n  TC :        ");
5361 	for (i = 0; i < dcb_info.nb_tcs; i++)
5362 		printf("\t%4d", i);
5363 	printf("\n  Priority :  ");
5364 	for (i = 0; i < dcb_info.nb_tcs; i++)
5365 		printf("\t%4d", dcb_info.prio_tc[i]);
5366 	printf("\n  BW percent :");
5367 	for (i = 0; i < dcb_info.nb_tcs; i++)
5368 		printf("\t%4d%%", dcb_info.tc_bws[i]);
5369 	printf("\n  RXQ base :  ");
5370 	for (i = 0; i < dcb_info.nb_tcs; i++)
5371 		printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
5372 	printf("\n  RXQ number :");
5373 	for (i = 0; i < dcb_info.nb_tcs; i++)
5374 		printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
5375 	printf("\n  TXQ base :  ");
5376 	for (i = 0; i < dcb_info.nb_tcs; i++)
5377 		printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
5378 	printf("\n  TXQ number :");
5379 	for (i = 0; i < dcb_info.nb_tcs; i++)
5380 		printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
5381 	printf("\n");
5382 }
5383 
5384 uint8_t *
5385 open_file(const char *file_path, uint32_t *size)
5386 {
5387 	int fd = open(file_path, O_RDONLY);
5388 	off_t pkg_size;
5389 	uint8_t *buf = NULL;
5390 	int ret = 0;
5391 	struct stat st_buf;
5392 
5393 	if (size)
5394 		*size = 0;
5395 
5396 	if (fd == -1) {
5397 		fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path);
5398 		return buf;
5399 	}
5400 
5401 	if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
5402 		close(fd);
5403 		fprintf(stderr, "%s: File operations failed\n", __func__);
5404 		return buf;
5405 	}
5406 
5407 	pkg_size = st_buf.st_size;
5408 	if (pkg_size < 0) {
5409 		close(fd);
5410 		fprintf(stderr, "%s: File operations failed\n", __func__);
5411 		return buf;
5412 	}
5413 
5414 	buf = (uint8_t *)malloc(pkg_size);
5415 	if (!buf) {
5416 		close(fd);
5417 		fprintf(stderr, "%s: Failed to malloc memory\n", __func__);
5418 		return buf;
5419 	}
5420 
5421 	ret = read(fd, buf, pkg_size);
5422 	if (ret < 0) {
5423 		close(fd);
5424 		fprintf(stderr, "%s: File read operation failed\n", __func__);
5425 		close_file(buf);
5426 		return NULL;
5427 	}
5428 
5429 	if (size)
5430 		*size = pkg_size;
5431 
5432 	close(fd);
5433 
5434 	return buf;
5435 }
5436 
5437 int
5438 save_file(const char *file_path, uint8_t *buf, uint32_t size)
5439 {
5440 	FILE *fh = fopen(file_path, "wb");
5441 
5442 	if (fh == NULL) {
5443 		fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path);
5444 		return -1;
5445 	}
5446 
5447 	if (fwrite(buf, 1, size, fh) != size) {
5448 		fclose(fh);
5449 		fprintf(stderr, "%s: File write operation failed\n", __func__);
5450 		return -1;
5451 	}
5452 
5453 	fclose(fh);
5454 
5455 	return 0;
5456 }
5457 
5458 int
5459 close_file(uint8_t *buf)
5460 {
5461 	if (buf) {
5462 		free((void *)buf);
5463 		return 0;
5464 	}
5465 
5466 	return -1;
5467 }
5468 
5469 void
5470 port_queue_region_info_display(portid_t port_id, void *buf)
5471 {
5472 #ifdef RTE_NET_I40E
5473 	uint16_t i, j;
5474 	struct rte_pmd_i40e_queue_regions *info =
5475 		(struct rte_pmd_i40e_queue_regions *)buf;
5476 	static const char *queue_region_info_stats_border = "-------";
5477 
5478 	if (!info->queue_region_number)
5479 		printf("there is no region has been set before");
5480 
5481 	printf("\n	%s All queue region info for port=%2d %s",
5482 			queue_region_info_stats_border, port_id,
5483 			queue_region_info_stats_border);
5484 	printf("\n	queue_region_number: %-14u \n",
5485 			info->queue_region_number);
5486 
5487 	for (i = 0; i < info->queue_region_number; i++) {
5488 		printf("\n	region_id: %-14u queue_number: %-14u "
5489 			"queue_start_index: %-14u \n",
5490 			info->region[i].region_id,
5491 			info->region[i].queue_num,
5492 			info->region[i].queue_start_index);
5493 
5494 		printf("  user_priority_num is	%-14u :",
5495 					info->region[i].user_priority_num);
5496 		for (j = 0; j < info->region[i].user_priority_num; j++)
5497 			printf(" %-14u ", info->region[i].user_priority[j]);
5498 
5499 		printf("\n	flowtype_num is  %-14u :",
5500 				info->region[i].flowtype_num);
5501 		for (j = 0; j < info->region[i].flowtype_num; j++)
5502 			printf(" %-14u ", info->region[i].hw_flowtype[j]);
5503 	}
5504 #else
5505 	RTE_SET_USED(port_id);
5506 	RTE_SET_USED(buf);
5507 #endif
5508 
5509 	printf("\n\n");
5510 }
5511 
5512 void
5513 show_macs(portid_t port_id)
5514 {
5515 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
5516 	struct rte_eth_dev_info dev_info;
5517 	int32_t i, rc, num_macs = 0;
5518 
5519 	if (eth_dev_info_get_print_err(port_id, &dev_info))
5520 		return;
5521 
5522 	struct rte_ether_addr addr[dev_info.max_mac_addrs];
5523 	rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs);
5524 	if (rc < 0)
5525 		return;
5526 
5527 	for (i = 0; i < rc; i++) {
5528 
5529 		/* skip zero address */
5530 		if (rte_is_zero_ether_addr(&addr[i]))
5531 			continue;
5532 
5533 		num_macs++;
5534 	}
5535 
5536 	printf("Number of MAC address added: %d\n", num_macs);
5537 
5538 	for (i = 0; i < rc; i++) {
5539 
5540 		/* skip zero address */
5541 		if (rte_is_zero_ether_addr(&addr[i]))
5542 			continue;
5543 
5544 		rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]);
5545 		printf("  %s\n", buf);
5546 	}
5547 }
5548 
5549 void
5550 show_mcast_macs(portid_t port_id)
5551 {
5552 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
5553 	struct rte_ether_addr *addr;
5554 	struct rte_port *port;
5555 	uint32_t i;
5556 
5557 	port = &ports[port_id];
5558 
5559 	printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb);
5560 
5561 	for (i = 0; i < port->mc_addr_nb; i++) {
5562 		addr = &port->mc_addr_pool[i];
5563 
5564 		rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
5565 		printf("  %s\n", buf);
5566 	}
5567 }
5568