xref: /dpdk/app/test-pmd/config.c (revision 8f1d23ece06adff5eae9f1b4365bdbbd3abee2b2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation.
3  * Copyright 2013-2014 6WIND S.A.
4  */
5 
6 #include <ctype.h>
7 #include <stdarg.h>
8 #include <errno.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <stdint.h>
13 #include <inttypes.h>
14 
15 #include <sys/queue.h>
16 #include <sys/types.h>
17 #include <sys/stat.h>
18 #include <fcntl.h>
19 #include <unistd.h>
20 
21 #include <rte_common.h>
22 #include <rte_byteorder.h>
23 #include <rte_debug.h>
24 #include <rte_log.h>
25 #include <rte_memory.h>
26 #include <rte_memcpy.h>
27 #include <rte_memzone.h>
28 #include <rte_launch.h>
29 #include <rte_eal.h>
30 #include <rte_per_lcore.h>
31 #include <rte_lcore.h>
32 #include <rte_branch_prediction.h>
33 #include <rte_mempool.h>
34 #include <rte_mbuf.h>
35 #include <rte_interrupts.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_string_fns.h>
39 #include <rte_cycles.h>
40 #include <rte_flow.h>
41 #include <rte_mtr.h>
42 #include <rte_errno.h>
43 #ifdef RTE_NET_IXGBE
44 #include <rte_pmd_ixgbe.h>
45 #endif
46 #ifdef RTE_NET_I40E
47 #include <rte_pmd_i40e.h>
48 #endif
49 #ifdef RTE_NET_BNXT
50 #include <rte_pmd_bnxt.h>
51 #endif
52 #ifdef RTE_LIB_GRO
53 #include <rte_gro.h>
54 #endif
55 #include <rte_hexdump.h>
56 
57 #include "testpmd.h"
58 #include "cmdline_mtr.h"
59 
60 #define ETHDEV_FWVERS_LEN 32
61 
62 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
63 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW
64 #else
65 #define CLOCK_TYPE_ID CLOCK_MONOTONIC
66 #endif
67 
68 #define NS_PER_SEC 1E9
69 
70 static const struct {
71 	enum tx_pkt_split split;
72 	const char *name;
73 } tx_split_name[] = {
74 	{
75 		.split = TX_PKT_SPLIT_OFF,
76 		.name = "off",
77 	},
78 	{
79 		.split = TX_PKT_SPLIT_ON,
80 		.name = "on",
81 	},
82 	{
83 		.split = TX_PKT_SPLIT_RND,
84 		.name = "rand",
85 	},
86 };
87 
88 const struct rss_type_info rss_type_table[] = {
89 	/* Group types */
90 	{ "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP |
91 		RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD |
92 		RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP |
93 		RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS | RTE_ETH_RSS_L2TPV2},
94 	{ "none", 0 },
95 	{ "ip", RTE_ETH_RSS_IP },
96 	{ "udp", RTE_ETH_RSS_UDP },
97 	{ "tcp", RTE_ETH_RSS_TCP },
98 	{ "sctp", RTE_ETH_RSS_SCTP },
99 	{ "tunnel", RTE_ETH_RSS_TUNNEL },
100 	{ "vlan", RTE_ETH_RSS_VLAN },
101 
102 	/* Individual type */
103 	{ "ipv4", RTE_ETH_RSS_IPV4 },
104 	{ "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 },
105 	{ "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP },
106 	{ "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP },
107 	{ "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP },
108 	{ "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER },
109 	{ "ipv6", RTE_ETH_RSS_IPV6 },
110 	{ "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 },
111 	{ "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP },
112 	{ "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP },
113 	{ "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP },
114 	{ "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER },
115 	{ "l2-payload", RTE_ETH_RSS_L2_PAYLOAD },
116 	{ "ipv6-ex", RTE_ETH_RSS_IPV6_EX },
117 	{ "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX },
118 	{ "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX },
119 	{ "port", RTE_ETH_RSS_PORT },
120 	{ "vxlan", RTE_ETH_RSS_VXLAN },
121 	{ "geneve", RTE_ETH_RSS_GENEVE },
122 	{ "nvgre", RTE_ETH_RSS_NVGRE },
123 	{ "gtpu", RTE_ETH_RSS_GTPU },
124 	{ "eth", RTE_ETH_RSS_ETH },
125 	{ "s-vlan", RTE_ETH_RSS_S_VLAN },
126 	{ "c-vlan", RTE_ETH_RSS_C_VLAN },
127 	{ "esp", RTE_ETH_RSS_ESP },
128 	{ "ah", RTE_ETH_RSS_AH },
129 	{ "l2tpv3", RTE_ETH_RSS_L2TPV3 },
130 	{ "pfcp", RTE_ETH_RSS_PFCP },
131 	{ "pppoe", RTE_ETH_RSS_PPPOE },
132 	{ "ecpri", RTE_ETH_RSS_ECPRI },
133 	{ "mpls", RTE_ETH_RSS_MPLS },
134 	{ "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM },
135 	{ "l4-chksum", RTE_ETH_RSS_L4_CHKSUM },
136 	{ "l2tpv2", RTE_ETH_RSS_L2TPV2 },
137 	{ "l3-pre96", RTE_ETH_RSS_L3_PRE96 },
138 	{ "l3-pre64", RTE_ETH_RSS_L3_PRE64 },
139 	{ "l3-pre56", RTE_ETH_RSS_L3_PRE56 },
140 	{ "l3-pre48", RTE_ETH_RSS_L3_PRE48 },
141 	{ "l3-pre40", RTE_ETH_RSS_L3_PRE40 },
142 	{ "l3-pre32", RTE_ETH_RSS_L3_PRE32 },
143 	{ "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY },
144 	{ "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY },
145 	{ "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY },
146 	{ "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY },
147 	{ "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY },
148 	{ "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY },
149 	{ NULL, 0},
150 };
151 
152 static const struct {
153 	enum rte_eth_fec_mode mode;
154 	const char *name;
155 } fec_mode_name[] = {
156 	{
157 		.mode = RTE_ETH_FEC_NOFEC,
158 		.name = "off",
159 	},
160 	{
161 		.mode = RTE_ETH_FEC_AUTO,
162 		.name = "auto",
163 	},
164 	{
165 		.mode = RTE_ETH_FEC_BASER,
166 		.name = "baser",
167 	},
168 	{
169 		.mode = RTE_ETH_FEC_RS,
170 		.name = "rs",
171 	},
172 };
173 
174 static const struct {
175 	char str[32];
176 	uint16_t ftype;
177 } flowtype_str_table[] = {
178 	{"raw", RTE_ETH_FLOW_RAW},
179 	{"ipv4", RTE_ETH_FLOW_IPV4},
180 	{"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
181 	{"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
182 	{"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
183 	{"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
184 	{"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
185 	{"ipv6", RTE_ETH_FLOW_IPV6},
186 	{"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
187 	{"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
188 	{"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
189 	{"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
190 	{"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
191 	{"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
192 	{"ipv6-ex", RTE_ETH_FLOW_IPV6_EX},
193 	{"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX},
194 	{"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX},
195 	{"port", RTE_ETH_FLOW_PORT},
196 	{"vxlan", RTE_ETH_FLOW_VXLAN},
197 	{"geneve", RTE_ETH_FLOW_GENEVE},
198 	{"nvgre", RTE_ETH_FLOW_NVGRE},
199 	{"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
200 	{"gtpu", RTE_ETH_FLOW_GTPU},
201 };
202 
203 static void
204 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
205 {
206 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
207 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
208 	printf("%s%s", name, buf);
209 }
210 
211 static void
212 nic_xstats_display_periodic(portid_t port_id)
213 {
214 	struct xstat_display_info *xstats_info;
215 	uint64_t *prev_values, *curr_values;
216 	uint64_t diff_value, value_rate;
217 	struct timespec cur_time;
218 	uint64_t *ids_supp;
219 	size_t ids_supp_sz;
220 	uint64_t diff_ns;
221 	unsigned int i;
222 	int rc;
223 
224 	xstats_info = &ports[port_id].xstats_info;
225 
226 	ids_supp_sz = xstats_info->ids_supp_sz;
227 	if (ids_supp_sz == 0)
228 		return;
229 
230 	printf("\n");
231 
232 	ids_supp = xstats_info->ids_supp;
233 	prev_values = xstats_info->prev_values;
234 	curr_values = xstats_info->curr_values;
235 
236 	rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values,
237 				      ids_supp_sz);
238 	if (rc != (int)ids_supp_sz) {
239 		fprintf(stderr,
240 			"Failed to get values of %zu xstats for port %u - return code %d\n",
241 			ids_supp_sz, port_id, rc);
242 		return;
243 	}
244 
245 	diff_ns = 0;
246 	if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) {
247 		uint64_t ns;
248 
249 		ns = cur_time.tv_sec * NS_PER_SEC;
250 		ns += cur_time.tv_nsec;
251 
252 		if (xstats_info->prev_ns != 0)
253 			diff_ns = ns - xstats_info->prev_ns;
254 		xstats_info->prev_ns = ns;
255 	}
256 
257 	printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)");
258 	for (i = 0; i < ids_supp_sz; i++) {
259 		diff_value = (curr_values[i] > prev_values[i]) ?
260 			     (curr_values[i] - prev_values[i]) : 0;
261 		prev_values[i] = curr_values[i];
262 		value_rate = diff_ns > 0 ?
263 				(double)diff_value / diff_ns * NS_PER_SEC : 0;
264 
265 		printf("  %-25s%12"PRIu64" %15"PRIu64"\n",
266 		       xstats_display[i].name, curr_values[i], value_rate);
267 	}
268 }
269 
270 void
271 nic_stats_display(portid_t port_id)
272 {
273 	static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
274 	static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
275 	static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS];
276 	static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS];
277 	static uint64_t prev_ns[RTE_MAX_ETHPORTS];
278 	struct timespec cur_time;
279 	uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx,
280 								diff_ns;
281 	uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx;
282 	struct rte_eth_stats stats;
283 	static const char *nic_stats_border = "########################";
284 	int ret;
285 
286 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
287 		print_valid_ports();
288 		return;
289 	}
290 	ret = rte_eth_stats_get(port_id, &stats);
291 	if (ret != 0) {
292 		fprintf(stderr,
293 			"%s: Error: failed to get stats (port %u): %d",
294 			__func__, port_id, ret);
295 		return;
296 	}
297 	printf("\n  %s NIC statistics for port %-2d %s\n",
298 	       nic_stats_border, port_id, nic_stats_border);
299 
300 	printf("  RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes:  "
301 	       "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes);
302 	printf("  RX-errors: %-"PRIu64"\n", stats.ierrors);
303 	printf("  RX-nombuf:  %-10"PRIu64"\n", stats.rx_nombuf);
304 	printf("  TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes:  "
305 	       "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes);
306 
307 	diff_ns = 0;
308 	if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) {
309 		uint64_t ns;
310 
311 		ns = cur_time.tv_sec * NS_PER_SEC;
312 		ns += cur_time.tv_nsec;
313 
314 		if (prev_ns[port_id] != 0)
315 			diff_ns = ns - prev_ns[port_id];
316 		prev_ns[port_id] = ns;
317 	}
318 
319 	diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
320 		(stats.ipackets - prev_pkts_rx[port_id]) : 0;
321 	diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
322 		(stats.opackets - prev_pkts_tx[port_id]) : 0;
323 	prev_pkts_rx[port_id] = stats.ipackets;
324 	prev_pkts_tx[port_id] = stats.opackets;
325 	mpps_rx = diff_ns > 0 ?
326 		(double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0;
327 	mpps_tx = diff_ns > 0 ?
328 		(double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0;
329 
330 	diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ?
331 		(stats.ibytes - prev_bytes_rx[port_id]) : 0;
332 	diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ?
333 		(stats.obytes - prev_bytes_tx[port_id]) : 0;
334 	prev_bytes_rx[port_id] = stats.ibytes;
335 	prev_bytes_tx[port_id] = stats.obytes;
336 	mbps_rx = diff_ns > 0 ?
337 		(double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0;
338 	mbps_tx = diff_ns > 0 ?
339 		(double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0;
340 
341 	printf("\n  Throughput (since last show)\n");
342 	printf("  Rx-pps: %12"PRIu64"          Rx-bps: %12"PRIu64"\n  Tx-pps: %12"
343 	       PRIu64"          Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8,
344 	       mpps_tx, mbps_tx * 8);
345 
346 	if (xstats_display_num > 0)
347 		nic_xstats_display_periodic(port_id);
348 
349 	printf("  %s############################%s\n",
350 	       nic_stats_border, nic_stats_border);
351 }
352 
353 void
354 nic_stats_clear(portid_t port_id)
355 {
356 	int ret;
357 
358 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
359 		print_valid_ports();
360 		return;
361 	}
362 
363 	ret = rte_eth_stats_reset(port_id);
364 	if (ret != 0) {
365 		fprintf(stderr,
366 			"%s: Error: failed to reset stats (port %u): %s",
367 			__func__, port_id, strerror(-ret));
368 		return;
369 	}
370 
371 	ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
372 	if (ret != 0) {
373 		if (ret < 0)
374 			ret = -ret;
375 		fprintf(stderr,
376 			"%s: Error: failed to get stats (port %u): %s",
377 			__func__, port_id, strerror(ret));
378 		return;
379 	}
380 	printf("\n  NIC statistics for port %d cleared\n", port_id);
381 }
382 
383 void
384 nic_xstats_display(portid_t port_id)
385 {
386 	struct rte_eth_xstat *xstats;
387 	int cnt_xstats, idx_xstat;
388 	struct rte_eth_xstat_name *xstats_names;
389 
390 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
391 		print_valid_ports();
392 		return;
393 	}
394 	printf("###### NIC extended statistics for port %-2d\n", port_id);
395 	if (!rte_eth_dev_is_valid_port(port_id)) {
396 		fprintf(stderr, "Error: Invalid port number %i\n", port_id);
397 		return;
398 	}
399 
400 	/* Get count */
401 	cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
402 	if (cnt_xstats  < 0) {
403 		fprintf(stderr, "Error: Cannot get count of xstats\n");
404 		return;
405 	}
406 
407 	/* Get id-name lookup table */
408 	xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
409 	if (xstats_names == NULL) {
410 		fprintf(stderr, "Cannot allocate memory for xstats lookup\n");
411 		return;
412 	}
413 	if (cnt_xstats != rte_eth_xstats_get_names(
414 			port_id, xstats_names, cnt_xstats)) {
415 		fprintf(stderr, "Error: Cannot get xstats lookup\n");
416 		free(xstats_names);
417 		return;
418 	}
419 
420 	/* Get stats themselves */
421 	xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
422 	if (xstats == NULL) {
423 		fprintf(stderr, "Cannot allocate memory for xstats\n");
424 		free(xstats_names);
425 		return;
426 	}
427 	if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
428 		fprintf(stderr, "Error: Unable to get xstats\n");
429 		free(xstats_names);
430 		free(xstats);
431 		return;
432 	}
433 
434 	/* Display xstats */
435 	for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
436 		if (xstats_hide_zero && !xstats[idx_xstat].value)
437 			continue;
438 		printf("%s: %"PRIu64"\n",
439 			xstats_names[idx_xstat].name,
440 			xstats[idx_xstat].value);
441 	}
442 	free(xstats_names);
443 	free(xstats);
444 }
445 
446 void
447 nic_xstats_clear(portid_t port_id)
448 {
449 	int ret;
450 
451 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
452 		print_valid_ports();
453 		return;
454 	}
455 
456 	ret = rte_eth_xstats_reset(port_id);
457 	if (ret != 0) {
458 		fprintf(stderr,
459 			"%s: Error: failed to reset xstats (port %u): %s\n",
460 			__func__, port_id, strerror(-ret));
461 		return;
462 	}
463 
464 	ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
465 	if (ret != 0) {
466 		if (ret < 0)
467 			ret = -ret;
468 		fprintf(stderr, "%s: Error: failed to get stats (port %u): %s",
469 			__func__, port_id, strerror(ret));
470 		return;
471 	}
472 }
473 
474 static const char *
475 get_queue_state_name(uint8_t queue_state)
476 {
477 	if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED)
478 		return "stopped";
479 	else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED)
480 		return "started";
481 	else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN)
482 		return "hairpin";
483 	else
484 		return "unknown";
485 }
486 
487 void
488 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
489 {
490 	struct rte_eth_burst_mode mode;
491 	struct rte_eth_rxq_info qinfo;
492 	int32_t rc;
493 	static const char *info_border = "*********************";
494 
495 	rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
496 	if (rc != 0) {
497 		fprintf(stderr,
498 			"Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n",
499 			port_id, queue_id, strerror(-rc), rc);
500 		return;
501 	}
502 
503 	printf("\n%s Infos for port %-2u, RX queue %-2u %s",
504 	       info_border, port_id, queue_id, info_border);
505 
506 	printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
507 	printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
508 	printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
509 	printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
510 	printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
511 	printf("\nRX drop packets: %s",
512 		(qinfo.conf.rx_drop_en != 0) ? "on" : "off");
513 	printf("\nRX deferred start: %s",
514 		(qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
515 	printf("\nRX scattered packets: %s",
516 		(qinfo.scattered_rx != 0) ? "on" : "off");
517 	printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state));
518 	if (qinfo.rx_buf_size != 0)
519 		printf("\nRX buffer size: %hu", qinfo.rx_buf_size);
520 	printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
521 
522 	if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0)
523 		printf("\nBurst mode: %s%s",
524 		       mode.info,
525 		       mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
526 				" (per queue)" : "");
527 
528 	printf("\n");
529 }
530 
531 void
532 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
533 {
534 	struct rte_eth_burst_mode mode;
535 	struct rte_eth_txq_info qinfo;
536 	int32_t rc;
537 	static const char *info_border = "*********************";
538 
539 	rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
540 	if (rc != 0) {
541 		fprintf(stderr,
542 			"Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n",
543 			port_id, queue_id, strerror(-rc), rc);
544 		return;
545 	}
546 
547 	printf("\n%s Infos for port %-2u, TX queue %-2u %s",
548 	       info_border, port_id, queue_id, info_border);
549 
550 	printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
551 	printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
552 	printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
553 	printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
554 	printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
555 	printf("\nTX deferred start: %s",
556 		(qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
557 	printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
558 	printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state));
559 
560 	if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0)
561 		printf("\nBurst mode: %s%s",
562 		       mode.info,
563 		       mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
564 				" (per queue)" : "");
565 
566 	printf("\n");
567 }
568 
569 static int bus_match_all(const struct rte_bus *bus, const void *data)
570 {
571 	RTE_SET_USED(bus);
572 	RTE_SET_USED(data);
573 	return 0;
574 }
575 
576 static void
577 device_infos_display_speeds(uint32_t speed_capa)
578 {
579 	printf("\n\tDevice speed capability:");
580 	if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG)
581 		printf(" Autonegotiate (all speeds)");
582 	if (speed_capa & RTE_ETH_LINK_SPEED_FIXED)
583 		printf(" Disable autonegotiate (fixed speed)  ");
584 	if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD)
585 		printf(" 10 Mbps half-duplex  ");
586 	if (speed_capa & RTE_ETH_LINK_SPEED_10M)
587 		printf(" 10 Mbps full-duplex  ");
588 	if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD)
589 		printf(" 100 Mbps half-duplex  ");
590 	if (speed_capa & RTE_ETH_LINK_SPEED_100M)
591 		printf(" 100 Mbps full-duplex  ");
592 	if (speed_capa & RTE_ETH_LINK_SPEED_1G)
593 		printf(" 1 Gbps  ");
594 	if (speed_capa & RTE_ETH_LINK_SPEED_2_5G)
595 		printf(" 2.5 Gbps  ");
596 	if (speed_capa & RTE_ETH_LINK_SPEED_5G)
597 		printf(" 5 Gbps  ");
598 	if (speed_capa & RTE_ETH_LINK_SPEED_10G)
599 		printf(" 10 Gbps  ");
600 	if (speed_capa & RTE_ETH_LINK_SPEED_20G)
601 		printf(" 20 Gbps  ");
602 	if (speed_capa & RTE_ETH_LINK_SPEED_25G)
603 		printf(" 25 Gbps  ");
604 	if (speed_capa & RTE_ETH_LINK_SPEED_40G)
605 		printf(" 40 Gbps  ");
606 	if (speed_capa & RTE_ETH_LINK_SPEED_50G)
607 		printf(" 50 Gbps  ");
608 	if (speed_capa & RTE_ETH_LINK_SPEED_56G)
609 		printf(" 56 Gbps  ");
610 	if (speed_capa & RTE_ETH_LINK_SPEED_100G)
611 		printf(" 100 Gbps  ");
612 	if (speed_capa & RTE_ETH_LINK_SPEED_200G)
613 		printf(" 200 Gbps  ");
614 }
615 
616 void
617 device_infos_display(const char *identifier)
618 {
619 	static const char *info_border = "*********************";
620 	struct rte_bus *start = NULL, *next;
621 	struct rte_dev_iterator dev_iter;
622 	char name[RTE_ETH_NAME_MAX_LEN];
623 	struct rte_ether_addr mac_addr;
624 	struct rte_device *dev;
625 	struct rte_devargs da;
626 	portid_t port_id;
627 	struct rte_eth_dev_info dev_info;
628 	char devstr[128];
629 
630 	memset(&da, 0, sizeof(da));
631 	if (!identifier)
632 		goto skip_parse;
633 
634 	if (rte_devargs_parsef(&da, "%s", identifier)) {
635 		fprintf(stderr, "cannot parse identifier\n");
636 		return;
637 	}
638 
639 skip_parse:
640 	while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
641 
642 		start = next;
643 		if (identifier && da.bus != next)
644 			continue;
645 
646 		snprintf(devstr, sizeof(devstr), "bus=%s", next->name);
647 		RTE_DEV_FOREACH(dev, devstr, &dev_iter) {
648 
649 			if (!dev->driver)
650 				continue;
651 			/* Check for matching device if identifier is present */
652 			if (identifier &&
653 			    strncmp(da.name, dev->name, strlen(dev->name)))
654 				continue;
655 			printf("\n%s Infos for device %s %s\n",
656 			       info_border, dev->name, info_border);
657 			printf("Bus name: %s", dev->bus->name);
658 			printf("\nDriver name: %s", dev->driver->name);
659 			printf("\nDevargs: %s",
660 			       dev->devargs ? dev->devargs->args : "");
661 			printf("\nConnect to socket: %d", dev->numa_node);
662 			printf("\n");
663 
664 			/* List ports with matching device name */
665 			RTE_ETH_FOREACH_DEV_OF(port_id, dev) {
666 				printf("\n\tPort id: %-2d", port_id);
667 				if (eth_macaddr_get_print_err(port_id,
668 							      &mac_addr) == 0)
669 					print_ethaddr("\n\tMAC address: ",
670 						      &mac_addr);
671 				rte_eth_dev_get_name_by_port(port_id, name);
672 				printf("\n\tDevice name: %s", name);
673 				if (rte_eth_dev_info_get(port_id, &dev_info) == 0)
674 					device_infos_display_speeds(dev_info.speed_capa);
675 				printf("\n");
676 			}
677 		}
678 	};
679 	rte_devargs_reset(&da);
680 }
681 
682 static void
683 print_dev_capabilities(uint64_t capabilities)
684 {
685 	uint64_t single_capa;
686 	int begin;
687 	int end;
688 	int bit;
689 
690 	if (capabilities == 0)
691 		return;
692 
693 	begin = __builtin_ctzll(capabilities);
694 	end = sizeof(capabilities) * CHAR_BIT - __builtin_clzll(capabilities);
695 
696 	single_capa = 1ULL << begin;
697 	for (bit = begin; bit < end; bit++) {
698 		if (capabilities & single_capa)
699 			printf(" %s",
700 			       rte_eth_dev_capability_name(single_capa));
701 		single_capa <<= 1;
702 	}
703 }
704 
705 uint64_t
706 str_to_rsstypes(const char *str)
707 {
708 	uint16_t i;
709 
710 	for (i = 0; rss_type_table[i].str != NULL; i++) {
711 		if (strcmp(rss_type_table[i].str, str) == 0)
712 			return rss_type_table[i].rss_type;
713 	}
714 
715 	return 0;
716 }
717 
718 const char *
719 rsstypes_to_str(uint64_t rss_type)
720 {
721 	uint16_t i;
722 
723 	for (i = 0; rss_type_table[i].str != NULL; i++) {
724 		if (rss_type_table[i].rss_type == rss_type)
725 			return rss_type_table[i].str;
726 	}
727 
728 	return NULL;
729 }
730 
731 static void
732 rss_offload_types_display(uint64_t offload_types, uint16_t char_num_per_line)
733 {
734 	uint16_t user_defined_str_len;
735 	uint16_t total_len = 0;
736 	uint16_t str_len = 0;
737 	uint64_t rss_offload;
738 	uint16_t i;
739 
740 	for (i = 0; i < sizeof(offload_types) * CHAR_BIT; i++) {
741 		rss_offload = RTE_BIT64(i);
742 		if ((offload_types & rss_offload) != 0) {
743 			const char *p = rsstypes_to_str(rss_offload);
744 
745 			user_defined_str_len =
746 				strlen("user-defined-") + (i / 10 + 1);
747 			str_len = p ? strlen(p) : user_defined_str_len;
748 			str_len += 2; /* add two spaces */
749 			if (total_len + str_len >= char_num_per_line) {
750 				total_len = 0;
751 				printf("\n");
752 			}
753 
754 			if (p)
755 				printf("  %s", p);
756 			else
757 				printf("  user-defined-%u", i);
758 			total_len += str_len;
759 		}
760 	}
761 	printf("\n");
762 }
763 
764 void
765 port_infos_display(portid_t port_id)
766 {
767 	struct rte_port *port;
768 	struct rte_ether_addr mac_addr;
769 	struct rte_eth_link link;
770 	struct rte_eth_dev_info dev_info;
771 	int vlan_offload;
772 	struct rte_mempool * mp;
773 	static const char *info_border = "*********************";
774 	uint16_t mtu;
775 	char name[RTE_ETH_NAME_MAX_LEN];
776 	int ret;
777 	char fw_version[ETHDEV_FWVERS_LEN];
778 
779 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
780 		print_valid_ports();
781 		return;
782 	}
783 	port = &ports[port_id];
784 	ret = eth_link_get_nowait_print_err(port_id, &link);
785 	if (ret < 0)
786 		return;
787 
788 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
789 	if (ret != 0)
790 		return;
791 
792 	printf("\n%s Infos for port %-2d %s\n",
793 	       info_border, port_id, info_border);
794 	if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0)
795 		print_ethaddr("MAC address: ", &mac_addr);
796 	rte_eth_dev_get_name_by_port(port_id, name);
797 	printf("\nDevice name: %s", name);
798 	printf("\nDriver name: %s", dev_info.driver_name);
799 
800 	if (rte_eth_dev_fw_version_get(port_id, fw_version,
801 						ETHDEV_FWVERS_LEN) == 0)
802 		printf("\nFirmware-version: %s", fw_version);
803 	else
804 		printf("\nFirmware-version: %s", "not available");
805 
806 	if (dev_info.device->devargs && dev_info.device->devargs->args)
807 		printf("\nDevargs: %s", dev_info.device->devargs->args);
808 	printf("\nConnect to socket: %u", port->socket_id);
809 
810 	if (port_numa[port_id] != NUMA_NO_CONFIG) {
811 		mp = mbuf_pool_find(port_numa[port_id], 0);
812 		if (mp)
813 			printf("\nmemory allocation on the socket: %d",
814 							port_numa[port_id]);
815 	} else
816 		printf("\nmemory allocation on the socket: %u",port->socket_id);
817 
818 	printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
819 	printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed));
820 	printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
821 	       ("full-duplex") : ("half-duplex"));
822 	printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ?
823 	       ("On") : ("Off"));
824 
825 	if (!rte_eth_dev_get_mtu(port_id, &mtu))
826 		printf("MTU: %u\n", mtu);
827 
828 	printf("Promiscuous mode: %s\n",
829 	       rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
830 	printf("Allmulticast mode: %s\n",
831 	       rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
832 	printf("Maximum number of MAC addresses: %u\n",
833 	       (unsigned int)(port->dev_info.max_mac_addrs));
834 	printf("Maximum number of MAC addresses of hash filtering: %u\n",
835 	       (unsigned int)(port->dev_info.max_hash_mac_addrs));
836 
837 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
838 	if (vlan_offload >= 0){
839 		printf("VLAN offload: \n");
840 		if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD)
841 			printf("  strip on, ");
842 		else
843 			printf("  strip off, ");
844 
845 		if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD)
846 			printf("filter on, ");
847 		else
848 			printf("filter off, ");
849 
850 		if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD)
851 			printf("extend on, ");
852 		else
853 			printf("extend off, ");
854 
855 		if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD)
856 			printf("qinq strip on\n");
857 		else
858 			printf("qinq strip off\n");
859 	}
860 
861 	if (dev_info.hash_key_size > 0)
862 		printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
863 	if (dev_info.reta_size > 0)
864 		printf("Redirection table size: %u\n", dev_info.reta_size);
865 	if (!dev_info.flow_type_rss_offloads)
866 		printf("No RSS offload flow type is supported.\n");
867 	else {
868 		printf("Supported RSS offload flow types:\n");
869 		rss_offload_types_display(dev_info.flow_type_rss_offloads,
870 				TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE);
871 	}
872 
873 	printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize);
874 	printf("Maximum configurable length of RX packet: %u\n",
875 		dev_info.max_rx_pktlen);
876 	printf("Maximum configurable size of LRO aggregated packet: %u\n",
877 		dev_info.max_lro_pkt_size);
878 	if (dev_info.max_vfs)
879 		printf("Maximum number of VFs: %u\n", dev_info.max_vfs);
880 	if (dev_info.max_vmdq_pools)
881 		printf("Maximum number of VMDq pools: %u\n",
882 			dev_info.max_vmdq_pools);
883 
884 	printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues);
885 	printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
886 	printf("Max possible number of RXDs per queue: %hu\n",
887 		dev_info.rx_desc_lim.nb_max);
888 	printf("Min possible number of RXDs per queue: %hu\n",
889 		dev_info.rx_desc_lim.nb_min);
890 	printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
891 
892 	printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues);
893 	printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
894 	printf("Max possible number of TXDs per queue: %hu\n",
895 		dev_info.tx_desc_lim.nb_max);
896 	printf("Min possible number of TXDs per queue: %hu\n",
897 		dev_info.tx_desc_lim.nb_min);
898 	printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
899 	printf("Max segment number per packet: %hu\n",
900 		dev_info.tx_desc_lim.nb_seg_max);
901 	printf("Max segment number per MTU/TSO: %hu\n",
902 		dev_info.tx_desc_lim.nb_mtu_seg_max);
903 
904 	printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa);
905 	print_dev_capabilities(dev_info.dev_capa);
906 	printf(" )\n");
907 	/* Show switch info only if valid switch domain and port id is set */
908 	if (dev_info.switch_info.domain_id !=
909 		RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
910 		if (dev_info.switch_info.name)
911 			printf("Switch name: %s\n", dev_info.switch_info.name);
912 
913 		printf("Switch domain Id: %u\n",
914 			dev_info.switch_info.domain_id);
915 		printf("Switch Port Id: %u\n",
916 			dev_info.switch_info.port_id);
917 		if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0)
918 			printf("Switch Rx domain: %u\n",
919 			       dev_info.switch_info.rx_domain);
920 	}
921 }
922 
923 void
924 port_summary_header_display(void)
925 {
926 	uint16_t port_number;
927 
928 	port_number = rte_eth_dev_count_avail();
929 	printf("Number of available ports: %i\n", port_number);
930 	printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name",
931 			"Driver", "Status", "Link");
932 }
933 
934 void
935 port_summary_display(portid_t port_id)
936 {
937 	struct rte_ether_addr mac_addr;
938 	struct rte_eth_link link;
939 	struct rte_eth_dev_info dev_info;
940 	char name[RTE_ETH_NAME_MAX_LEN];
941 	int ret;
942 
943 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
944 		print_valid_ports();
945 		return;
946 	}
947 
948 	ret = eth_link_get_nowait_print_err(port_id, &link);
949 	if (ret < 0)
950 		return;
951 
952 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
953 	if (ret != 0)
954 		return;
955 
956 	rte_eth_dev_get_name_by_port(port_id, name);
957 	ret = eth_macaddr_get_print_err(port_id, &mac_addr);
958 	if (ret != 0)
959 		return;
960 
961 	printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n",
962 		port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name,
963 		dev_info.driver_name, (link.link_status) ? ("up") : ("down"),
964 		rte_eth_link_speed_to_str(link.link_speed));
965 }
966 
967 void
968 port_eeprom_display(portid_t port_id)
969 {
970 	struct rte_dev_eeprom_info einfo;
971 	int ret;
972 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
973 		print_valid_ports();
974 		return;
975 	}
976 
977 	int len_eeprom = rte_eth_dev_get_eeprom_length(port_id);
978 	if (len_eeprom < 0) {
979 		switch (len_eeprom) {
980 		case -ENODEV:
981 			fprintf(stderr, "port index %d invalid\n", port_id);
982 			break;
983 		case -ENOTSUP:
984 			fprintf(stderr, "operation not supported by device\n");
985 			break;
986 		case -EIO:
987 			fprintf(stderr, "device is removed\n");
988 			break;
989 		default:
990 			fprintf(stderr, "Unable to get EEPROM: %d\n",
991 				len_eeprom);
992 			break;
993 		}
994 		return;
995 	}
996 
997 	einfo.offset = 0;
998 	einfo.length = len_eeprom;
999 	einfo.data = calloc(1, len_eeprom);
1000 	if (!einfo.data) {
1001 		fprintf(stderr,
1002 			"Allocation of port %u eeprom data failed\n",
1003 			port_id);
1004 		return;
1005 	}
1006 
1007 	ret = rte_eth_dev_get_eeprom(port_id, &einfo);
1008 	if (ret != 0) {
1009 		switch (ret) {
1010 		case -ENODEV:
1011 			fprintf(stderr, "port index %d invalid\n", port_id);
1012 			break;
1013 		case -ENOTSUP:
1014 			fprintf(stderr, "operation not supported by device\n");
1015 			break;
1016 		case -EIO:
1017 			fprintf(stderr, "device is removed\n");
1018 			break;
1019 		default:
1020 			fprintf(stderr, "Unable to get EEPROM: %d\n", ret);
1021 			break;
1022 		}
1023 		free(einfo.data);
1024 		return;
1025 	}
1026 	rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
1027 	printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom);
1028 	free(einfo.data);
1029 }
1030 
1031 void
1032 port_module_eeprom_display(portid_t port_id)
1033 {
1034 	struct rte_eth_dev_module_info minfo;
1035 	struct rte_dev_eeprom_info einfo;
1036 	int ret;
1037 
1038 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
1039 		print_valid_ports();
1040 		return;
1041 	}
1042 
1043 
1044 	ret = rte_eth_dev_get_module_info(port_id, &minfo);
1045 	if (ret != 0) {
1046 		switch (ret) {
1047 		case -ENODEV:
1048 			fprintf(stderr, "port index %d invalid\n", port_id);
1049 			break;
1050 		case -ENOTSUP:
1051 			fprintf(stderr, "operation not supported by device\n");
1052 			break;
1053 		case -EIO:
1054 			fprintf(stderr, "device is removed\n");
1055 			break;
1056 		default:
1057 			fprintf(stderr, "Unable to get module EEPROM: %d\n",
1058 				ret);
1059 			break;
1060 		}
1061 		return;
1062 	}
1063 
1064 	einfo.offset = 0;
1065 	einfo.length = minfo.eeprom_len;
1066 	einfo.data = calloc(1, minfo.eeprom_len);
1067 	if (!einfo.data) {
1068 		fprintf(stderr,
1069 			"Allocation of port %u eeprom data failed\n",
1070 			port_id);
1071 		return;
1072 	}
1073 
1074 	ret = rte_eth_dev_get_module_eeprom(port_id, &einfo);
1075 	if (ret != 0) {
1076 		switch (ret) {
1077 		case -ENODEV:
1078 			fprintf(stderr, "port index %d invalid\n", port_id);
1079 			break;
1080 		case -ENOTSUP:
1081 			fprintf(stderr, "operation not supported by device\n");
1082 			break;
1083 		case -EIO:
1084 			fprintf(stderr, "device is removed\n");
1085 			break;
1086 		default:
1087 			fprintf(stderr, "Unable to get module EEPROM: %d\n",
1088 				ret);
1089 			break;
1090 		}
1091 		free(einfo.data);
1092 		return;
1093 	}
1094 
1095 	rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
1096 	printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length);
1097 	free(einfo.data);
1098 }
1099 
1100 int
1101 port_id_is_invalid(portid_t port_id, enum print_warning warning)
1102 {
1103 	uint16_t pid;
1104 
1105 	if (port_id == (portid_t)RTE_PORT_ALL)
1106 		return 0;
1107 
1108 	RTE_ETH_FOREACH_DEV(pid)
1109 		if (port_id == pid)
1110 			return 0;
1111 
1112 	if (warning == ENABLED_WARN)
1113 		fprintf(stderr, "Invalid port %d\n", port_id);
1114 
1115 	return 1;
1116 }
1117 
1118 void print_valid_ports(void)
1119 {
1120 	portid_t pid;
1121 
1122 	printf("The valid ports array is [");
1123 	RTE_ETH_FOREACH_DEV(pid) {
1124 		printf(" %d", pid);
1125 	}
1126 	printf(" ]\n");
1127 }
1128 
1129 static int
1130 vlan_id_is_invalid(uint16_t vlan_id)
1131 {
1132 	if (vlan_id < 4096)
1133 		return 0;
1134 	fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id);
1135 	return 1;
1136 }
1137 
1138 static uint32_t
1139 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
1140 {
1141 	uint32_t overhead_len;
1142 
1143 	if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
1144 		overhead_len = max_rx_pktlen - max_mtu;
1145 	else
1146 		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1147 
1148 	return overhead_len;
1149 }
1150 
1151 static int
1152 eth_dev_validate_mtu(uint16_t port_id, uint16_t mtu)
1153 {
1154 	struct rte_eth_dev_info dev_info;
1155 	uint32_t overhead_len;
1156 	uint32_t frame_size;
1157 	int ret;
1158 
1159 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1160 	if (ret != 0)
1161 		return ret;
1162 
1163 	if (mtu < dev_info.min_mtu) {
1164 		fprintf(stderr,
1165 			"MTU (%u) < device min MTU (%u) for port_id %u\n",
1166 			mtu, dev_info.min_mtu, port_id);
1167 		return -EINVAL;
1168 	}
1169 	if (mtu > dev_info.max_mtu) {
1170 		fprintf(stderr,
1171 			"MTU (%u) > device max MTU (%u) for port_id %u\n",
1172 			mtu, dev_info.max_mtu, port_id);
1173 		return -EINVAL;
1174 	}
1175 
1176 	overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
1177 			dev_info.max_mtu);
1178 	frame_size = mtu + overhead_len;
1179 	if (frame_size > dev_info.max_rx_pktlen) {
1180 		fprintf(stderr,
1181 			"Frame size (%u) > device max frame size (%u) for port_id %u\n",
1182 			frame_size, dev_info.max_rx_pktlen, port_id);
1183 		return -EINVAL;
1184 	}
1185 
1186 	return 0;
1187 }
1188 
1189 void
1190 port_mtu_set(portid_t port_id, uint16_t mtu)
1191 {
1192 	struct rte_port *port = &ports[port_id];
1193 	int diag;
1194 
1195 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1196 		return;
1197 
1198 	diag = eth_dev_validate_mtu(port_id, mtu);
1199 	if (diag != 0)
1200 		return;
1201 
1202 	if (port->need_reconfig == 0) {
1203 		diag = rte_eth_dev_set_mtu(port_id, mtu);
1204 		if (diag != 0) {
1205 			fprintf(stderr, "Set MTU failed. diag=%d\n", diag);
1206 			return;
1207 		}
1208 	}
1209 
1210 	port->dev_conf.rxmode.mtu = mtu;
1211 }
1212 
1213 /* Generic flow management functions. */
1214 
1215 static struct port_flow_tunnel *
1216 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id)
1217 {
1218 	struct port_flow_tunnel *flow_tunnel;
1219 
1220 	LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) {
1221 		if (flow_tunnel->id == port_tunnel_id)
1222 			goto out;
1223 	}
1224 	flow_tunnel = NULL;
1225 
1226 out:
1227 	return flow_tunnel;
1228 }
1229 
1230 const char *
1231 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel)
1232 {
1233 	const char *type;
1234 	switch (tunnel->type) {
1235 	default:
1236 		type = "unknown";
1237 		break;
1238 	case RTE_FLOW_ITEM_TYPE_VXLAN:
1239 		type = "vxlan";
1240 		break;
1241 	case RTE_FLOW_ITEM_TYPE_GRE:
1242 		type = "gre";
1243 		break;
1244 	case RTE_FLOW_ITEM_TYPE_NVGRE:
1245 		type = "nvgre";
1246 		break;
1247 	case RTE_FLOW_ITEM_TYPE_GENEVE:
1248 		type = "geneve";
1249 		break;
1250 	}
1251 
1252 	return type;
1253 }
1254 
1255 struct port_flow_tunnel *
1256 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun)
1257 {
1258 	struct rte_port *port = &ports[port_id];
1259 	struct port_flow_tunnel *flow_tunnel;
1260 
1261 	LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) {
1262 		if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun)))
1263 			goto out;
1264 	}
1265 	flow_tunnel = NULL;
1266 
1267 out:
1268 	return flow_tunnel;
1269 }
1270 
1271 void port_flow_tunnel_list(portid_t port_id)
1272 {
1273 	struct rte_port *port = &ports[port_id];
1274 	struct port_flow_tunnel *flt;
1275 
1276 	LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1277 		printf("port %u tunnel #%u type=%s",
1278 			port_id, flt->id, port_flow_tunnel_type(&flt->tunnel));
1279 		if (flt->tunnel.tun_id)
1280 			printf(" id=%" PRIu64, flt->tunnel.tun_id);
1281 		printf("\n");
1282 	}
1283 }
1284 
1285 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id)
1286 {
1287 	struct rte_port *port = &ports[port_id];
1288 	struct port_flow_tunnel *flt;
1289 
1290 	LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1291 		if (flt->id == tunnel_id)
1292 			break;
1293 	}
1294 	if (flt) {
1295 		LIST_REMOVE(flt, chain);
1296 		free(flt);
1297 		printf("port %u: flow tunnel #%u destroyed\n",
1298 			port_id, tunnel_id);
1299 	}
1300 }
1301 
1302 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops)
1303 {
1304 	struct rte_port *port = &ports[port_id];
1305 	enum rte_flow_item_type	type;
1306 	struct port_flow_tunnel *flt;
1307 
1308 	if (!strcmp(ops->type, "vxlan"))
1309 		type = RTE_FLOW_ITEM_TYPE_VXLAN;
1310 	else if (!strcmp(ops->type, "gre"))
1311 		type = RTE_FLOW_ITEM_TYPE_GRE;
1312 	else if (!strcmp(ops->type, "nvgre"))
1313 		type = RTE_FLOW_ITEM_TYPE_NVGRE;
1314 	else if (!strcmp(ops->type, "geneve"))
1315 		type = RTE_FLOW_ITEM_TYPE_GENEVE;
1316 	else {
1317 		fprintf(stderr, "cannot offload \"%s\" tunnel type\n",
1318 			ops->type);
1319 		return;
1320 	}
1321 	LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1322 		if (flt->tunnel.type == type)
1323 			break;
1324 	}
1325 	if (!flt) {
1326 		flt = calloc(1, sizeof(*flt));
1327 		if (!flt) {
1328 			fprintf(stderr, "failed to allocate port flt object\n");
1329 			return;
1330 		}
1331 		flt->tunnel.type = type;
1332 		flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 :
1333 				  LIST_FIRST(&port->flow_tunnel_list)->id + 1;
1334 		LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain);
1335 	}
1336 	printf("port %d: flow tunnel #%u type %s\n",
1337 		port_id, flt->id, ops->type);
1338 }
1339 
1340 /** Generate a port_flow entry from attributes/pattern/actions. */
1341 static struct port_flow *
1342 port_flow_new(const struct rte_flow_attr *attr,
1343 	      const struct rte_flow_item *pattern,
1344 	      const struct rte_flow_action *actions,
1345 	      struct rte_flow_error *error)
1346 {
1347 	const struct rte_flow_conv_rule rule = {
1348 		.attr_ro = attr,
1349 		.pattern_ro = pattern,
1350 		.actions_ro = actions,
1351 	};
1352 	struct port_flow *pf;
1353 	int ret;
1354 
1355 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error);
1356 	if (ret < 0)
1357 		return NULL;
1358 	pf = calloc(1, offsetof(struct port_flow, rule) + ret);
1359 	if (!pf) {
1360 		rte_flow_error_set
1361 			(error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1362 			 "calloc() failed");
1363 		return NULL;
1364 	}
1365 	if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule,
1366 			  error) >= 0)
1367 		return pf;
1368 	free(pf);
1369 	return NULL;
1370 }
1371 
1372 /** Print a message out of a flow error. */
1373 static int
1374 port_flow_complain(struct rte_flow_error *error)
1375 {
1376 	static const char *const errstrlist[] = {
1377 		[RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1378 		[RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1379 		[RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1380 		[RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1381 		[RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1382 		[RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1383 		[RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1384 		[RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field",
1385 		[RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1386 		[RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1387 		[RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification",
1388 		[RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range",
1389 		[RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask",
1390 		[RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1391 		[RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1392 		[RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration",
1393 		[RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1394 	};
1395 	const char *errstr;
1396 	char buf[32];
1397 	int err = rte_errno;
1398 
1399 	if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1400 	    !errstrlist[error->type])
1401 		errstr = "unknown type";
1402 	else
1403 		errstr = errstrlist[error->type];
1404 	fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n",
1405 		__func__, error->type, errstr,
1406 		error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1407 					 error->cause), buf) : "",
1408 		error->message ? error->message : "(no stated reason)",
1409 		rte_strerror(err));
1410 
1411 	switch (error->type) {
1412 	case RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER:
1413 		fprintf(stderr, "The status suggests the use of \"transfer\" "
1414 				"as the possible cause of the failure. Make "
1415 				"sure that the flow in question and its "
1416 				"indirect components (if any) are managed "
1417 				"via \"transfer\" proxy port. Use command "
1418 				"\"show port (port_id) flow transfer proxy\" "
1419 				"to figure out the proxy port ID\n");
1420 		break;
1421 	default:
1422 		break;
1423 	}
1424 
1425 	return -err;
1426 }
1427 
1428 static void
1429 rss_types_display(uint64_t rss_types, uint16_t char_num_per_line)
1430 {
1431 	uint16_t total_len = 0;
1432 	uint16_t str_len;
1433 	uint16_t i;
1434 
1435 	if (rss_types == 0)
1436 		return;
1437 
1438 	for (i = 0; rss_type_table[i].str; i++) {
1439 		if (rss_type_table[i].rss_type == 0)
1440 			continue;
1441 
1442 		if ((rss_types & rss_type_table[i].rss_type) ==
1443 					rss_type_table[i].rss_type) {
1444 			/* Contain two spaces */
1445 			str_len = strlen(rss_type_table[i].str) + 2;
1446 			if (total_len + str_len > char_num_per_line) {
1447 				printf("\n");
1448 				total_len = 0;
1449 			}
1450 			printf("  %s", rss_type_table[i].str);
1451 			total_len += str_len;
1452 		}
1453 	}
1454 	printf("\n");
1455 }
1456 
1457 static void
1458 rss_config_display(struct rte_flow_action_rss *rss_conf)
1459 {
1460 	uint8_t i;
1461 
1462 	if (rss_conf == NULL) {
1463 		fprintf(stderr, "Invalid rule\n");
1464 		return;
1465 	}
1466 
1467 	printf("RSS:\n"
1468 	       " queues:");
1469 	if (rss_conf->queue_num == 0)
1470 		printf(" none");
1471 	for (i = 0; i < rss_conf->queue_num; i++)
1472 		printf(" %d", rss_conf->queue[i]);
1473 	printf("\n");
1474 
1475 	printf(" function: ");
1476 	switch (rss_conf->func) {
1477 	case RTE_ETH_HASH_FUNCTION_DEFAULT:
1478 		printf("default\n");
1479 		break;
1480 	case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1481 		printf("toeplitz\n");
1482 		break;
1483 	case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1484 		printf("simple_xor\n");
1485 		break;
1486 	case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1487 		printf("symmetric_toeplitz\n");
1488 		break;
1489 	default:
1490 		printf("Unknown function\n");
1491 		return;
1492 	}
1493 
1494 	printf(" types:\n");
1495 	if (rss_conf->types == 0) {
1496 		printf("  none\n");
1497 		return;
1498 	}
1499 	rss_types_display(rss_conf->types, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE);
1500 }
1501 
1502 static struct port_indirect_action *
1503 action_get_by_id(portid_t port_id, uint32_t id)
1504 {
1505 	struct rte_port *port;
1506 	struct port_indirect_action **ppia;
1507 	struct port_indirect_action *pia = NULL;
1508 
1509 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1510 	    port_id == (portid_t)RTE_PORT_ALL)
1511 		return NULL;
1512 	port = &ports[port_id];
1513 	ppia = &port->actions_list;
1514 	while (*ppia) {
1515 		if ((*ppia)->id == id) {
1516 			pia = *ppia;
1517 			break;
1518 		}
1519 		ppia = &(*ppia)->next;
1520 	}
1521 	if (!pia)
1522 		fprintf(stderr,
1523 			"Failed to find indirect action #%u on port %u\n",
1524 			id, port_id);
1525 	return pia;
1526 }
1527 
1528 static int
1529 action_alloc(portid_t port_id, uint32_t id,
1530 	     struct port_indirect_action **action)
1531 {
1532 	struct rte_port *port;
1533 	struct port_indirect_action **ppia;
1534 	struct port_indirect_action *pia = NULL;
1535 
1536 	*action = NULL;
1537 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1538 	    port_id == (portid_t)RTE_PORT_ALL)
1539 		return -EINVAL;
1540 	port = &ports[port_id];
1541 	if (id == UINT32_MAX) {
1542 		/* taking first available ID */
1543 		if (port->actions_list) {
1544 			if (port->actions_list->id == UINT32_MAX - 1) {
1545 				fprintf(stderr,
1546 					"Highest indirect action ID is already assigned, delete it first\n");
1547 				return -ENOMEM;
1548 			}
1549 			id = port->actions_list->id + 1;
1550 		} else {
1551 			id = 0;
1552 		}
1553 	}
1554 	pia = calloc(1, sizeof(*pia));
1555 	if (!pia) {
1556 		fprintf(stderr,
1557 			"Allocation of port %u indirect action failed\n",
1558 			port_id);
1559 		return -ENOMEM;
1560 	}
1561 	ppia = &port->actions_list;
1562 	while (*ppia && (*ppia)->id > id)
1563 		ppia = &(*ppia)->next;
1564 	if (*ppia && (*ppia)->id == id) {
1565 		fprintf(stderr,
1566 			"Indirect action #%u is already assigned, delete it first\n",
1567 			id);
1568 		free(pia);
1569 		return -EINVAL;
1570 	}
1571 	pia->next = *ppia;
1572 	pia->id = id;
1573 	*ppia = pia;
1574 	*action = pia;
1575 	return 0;
1576 }
1577 
1578 static int
1579 template_alloc(uint32_t id, struct port_template **template,
1580 	       struct port_template **list)
1581 {
1582 	struct port_template *lst = *list;
1583 	struct port_template **ppt;
1584 	struct port_template *pt = NULL;
1585 
1586 	*template = NULL;
1587 	if (id == UINT32_MAX) {
1588 		/* taking first available ID */
1589 		if (lst) {
1590 			if (lst->id == UINT32_MAX - 1) {
1591 				printf("Highest template ID is already"
1592 				" assigned, delete it first\n");
1593 				return -ENOMEM;
1594 			}
1595 			id = lst->id + 1;
1596 		} else {
1597 			id = 0;
1598 		}
1599 	}
1600 	pt = calloc(1, sizeof(*pt));
1601 	if (!pt) {
1602 		printf("Allocation of port template failed\n");
1603 		return -ENOMEM;
1604 	}
1605 	ppt = list;
1606 	while (*ppt && (*ppt)->id > id)
1607 		ppt = &(*ppt)->next;
1608 	if (*ppt && (*ppt)->id == id) {
1609 		printf("Template #%u is already assigned,"
1610 			" delete it first\n", id);
1611 		free(pt);
1612 		return -EINVAL;
1613 	}
1614 	pt->next = *ppt;
1615 	pt->id = id;
1616 	*ppt = pt;
1617 	*template = pt;
1618 	return 0;
1619 }
1620 
1621 static int
1622 table_alloc(uint32_t id, struct port_table **table,
1623 	    struct port_table **list)
1624 {
1625 	struct port_table *lst = *list;
1626 	struct port_table **ppt;
1627 	struct port_table *pt = NULL;
1628 
1629 	*table = NULL;
1630 	if (id == UINT32_MAX) {
1631 		/* taking first available ID */
1632 		if (lst) {
1633 			if (lst->id == UINT32_MAX - 1) {
1634 				printf("Highest table ID is already"
1635 				" assigned, delete it first\n");
1636 				return -ENOMEM;
1637 			}
1638 			id = lst->id + 1;
1639 		} else {
1640 			id = 0;
1641 		}
1642 	}
1643 	pt = calloc(1, sizeof(*pt));
1644 	if (!pt) {
1645 		printf("Allocation of table failed\n");
1646 		return -ENOMEM;
1647 	}
1648 	ppt = list;
1649 	while (*ppt && (*ppt)->id > id)
1650 		ppt = &(*ppt)->next;
1651 	if (*ppt && (*ppt)->id == id) {
1652 		printf("Table #%u is already assigned,"
1653 			" delete it first\n", id);
1654 		free(pt);
1655 		return -EINVAL;
1656 	}
1657 	pt->next = *ppt;
1658 	pt->id = id;
1659 	*ppt = pt;
1660 	*table = pt;
1661 	return 0;
1662 }
1663 
1664 /** Get info about flow management resources. */
1665 int
1666 port_flow_get_info(portid_t port_id)
1667 {
1668 	struct rte_flow_port_info port_info;
1669 	struct rte_flow_queue_info queue_info;
1670 	struct rte_flow_error error;
1671 
1672 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1673 	    port_id == (portid_t)RTE_PORT_ALL)
1674 		return -EINVAL;
1675 	/* Poisoning to make sure PMDs update it in case of error. */
1676 	memset(&error, 0x99, sizeof(error));
1677 	memset(&port_info, 0, sizeof(port_info));
1678 	memset(&queue_info, 0, sizeof(queue_info));
1679 	if (rte_flow_info_get(port_id, &port_info, &queue_info, &error))
1680 		return port_flow_complain(&error);
1681 	printf("Flow engine resources on port %u:\n"
1682 	       "Number of queues: %d\n"
1683 		   "Size of queues: %d\n"
1684 	       "Number of counters: %d\n"
1685 	       "Number of aging objects: %d\n"
1686 	       "Number of meter actions: %d\n",
1687 	       port_id, port_info.max_nb_queues,
1688 		   queue_info.max_size,
1689 	       port_info.max_nb_counters,
1690 	       port_info.max_nb_aging_objects,
1691 	       port_info.max_nb_meters);
1692 	return 0;
1693 }
1694 
1695 /** Configure flow management resources. */
1696 int
1697 port_flow_configure(portid_t port_id,
1698 	const struct rte_flow_port_attr *port_attr,
1699 	uint16_t nb_queue,
1700 	const struct rte_flow_queue_attr *queue_attr)
1701 {
1702 	struct rte_port *port;
1703 	struct rte_flow_error error;
1704 	const struct rte_flow_queue_attr *attr_list[nb_queue];
1705 	int std_queue;
1706 
1707 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1708 	    port_id == (portid_t)RTE_PORT_ALL)
1709 		return -EINVAL;
1710 	port = &ports[port_id];
1711 	port->queue_nb = nb_queue;
1712 	port->queue_sz = queue_attr->size;
1713 	for (std_queue = 0; std_queue < nb_queue; std_queue++)
1714 		attr_list[std_queue] = queue_attr;
1715 	/* Poisoning to make sure PMDs update it in case of error. */
1716 	memset(&error, 0x66, sizeof(error));
1717 	if (rte_flow_configure(port_id, port_attr, nb_queue, attr_list, &error))
1718 		return port_flow_complain(&error);
1719 	printf("Configure flows on port %u: "
1720 	       "number of queues %d with %d elements\n",
1721 	       port_id, nb_queue, queue_attr->size);
1722 	return 0;
1723 }
1724 
1725 /** Create indirect action */
1726 int
1727 port_action_handle_create(portid_t port_id, uint32_t id,
1728 			  const struct rte_flow_indir_action_conf *conf,
1729 			  const struct rte_flow_action *action)
1730 {
1731 	struct port_indirect_action *pia;
1732 	int ret;
1733 	struct rte_flow_error error;
1734 
1735 	ret = action_alloc(port_id, id, &pia);
1736 	if (ret)
1737 		return ret;
1738 	if (action->type == RTE_FLOW_ACTION_TYPE_AGE) {
1739 		struct rte_flow_action_age *age =
1740 			(struct rte_flow_action_age *)(uintptr_t)(action->conf);
1741 
1742 		pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION;
1743 		age->context = &pia->age_type;
1744 	} else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) {
1745 		struct rte_flow_action_conntrack *ct =
1746 		(struct rte_flow_action_conntrack *)(uintptr_t)(action->conf);
1747 
1748 		memcpy(ct, &conntrack_context, sizeof(*ct));
1749 	}
1750 	/* Poisoning to make sure PMDs update it in case of error. */
1751 	memset(&error, 0x22, sizeof(error));
1752 	pia->handle = rte_flow_action_handle_create(port_id, conf, action,
1753 						    &error);
1754 	if (!pia->handle) {
1755 		uint32_t destroy_id = pia->id;
1756 		port_action_handle_destroy(port_id, 1, &destroy_id);
1757 		return port_flow_complain(&error);
1758 	}
1759 	pia->type = action->type;
1760 	printf("Indirect action #%u created\n", pia->id);
1761 	return 0;
1762 }
1763 
1764 /** Destroy indirect action */
1765 int
1766 port_action_handle_destroy(portid_t port_id,
1767 			   uint32_t n,
1768 			   const uint32_t *actions)
1769 {
1770 	struct rte_port *port;
1771 	struct port_indirect_action **tmp;
1772 	uint32_t c = 0;
1773 	int ret = 0;
1774 
1775 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1776 	    port_id == (portid_t)RTE_PORT_ALL)
1777 		return -EINVAL;
1778 	port = &ports[port_id];
1779 	tmp = &port->actions_list;
1780 	while (*tmp) {
1781 		uint32_t i;
1782 
1783 		for (i = 0; i != n; ++i) {
1784 			struct rte_flow_error error;
1785 			struct port_indirect_action *pia = *tmp;
1786 
1787 			if (actions[i] != pia->id)
1788 				continue;
1789 			/*
1790 			 * Poisoning to make sure PMDs update it in case
1791 			 * of error.
1792 			 */
1793 			memset(&error, 0x33, sizeof(error));
1794 
1795 			if (pia->handle && rte_flow_action_handle_destroy(
1796 					port_id, pia->handle, &error)) {
1797 				ret = port_flow_complain(&error);
1798 				continue;
1799 			}
1800 			*tmp = pia->next;
1801 			printf("Indirect action #%u destroyed\n", pia->id);
1802 			free(pia);
1803 			break;
1804 		}
1805 		if (i == n)
1806 			tmp = &(*tmp)->next;
1807 		++c;
1808 	}
1809 	return ret;
1810 }
1811 
1812 int
1813 port_action_handle_flush(portid_t port_id)
1814 {
1815 	struct rte_port *port;
1816 	struct port_indirect_action **tmp;
1817 	int ret = 0;
1818 
1819 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1820 	    port_id == (portid_t)RTE_PORT_ALL)
1821 		return -EINVAL;
1822 	port = &ports[port_id];
1823 	tmp = &port->actions_list;
1824 	while (*tmp != NULL) {
1825 		struct rte_flow_error error;
1826 		struct port_indirect_action *pia = *tmp;
1827 
1828 		/* Poisoning to make sure PMDs update it in case of error. */
1829 		memset(&error, 0x44, sizeof(error));
1830 		if (pia->handle != NULL &&
1831 		    rte_flow_action_handle_destroy
1832 					(port_id, pia->handle, &error) != 0) {
1833 			printf("Indirect action #%u not destroyed\n", pia->id);
1834 			ret = port_flow_complain(&error);
1835 			tmp = &pia->next;
1836 		} else {
1837 			*tmp = pia->next;
1838 			free(pia);
1839 		}
1840 	}
1841 	return ret;
1842 }
1843 
1844 /** Get indirect action by port + id */
1845 struct rte_flow_action_handle *
1846 port_action_handle_get_by_id(portid_t port_id, uint32_t id)
1847 {
1848 
1849 	struct port_indirect_action *pia = action_get_by_id(port_id, id);
1850 
1851 	return (pia) ? pia->handle : NULL;
1852 }
1853 
1854 /** Update indirect action */
1855 int
1856 port_action_handle_update(portid_t port_id, uint32_t id,
1857 			  const struct rte_flow_action *action)
1858 {
1859 	struct rte_flow_error error;
1860 	struct rte_flow_action_handle *action_handle;
1861 	struct port_indirect_action *pia;
1862 	const void *update;
1863 
1864 	action_handle = port_action_handle_get_by_id(port_id, id);
1865 	if (!action_handle)
1866 		return -EINVAL;
1867 	pia = action_get_by_id(port_id, id);
1868 	if (!pia)
1869 		return -EINVAL;
1870 	switch (pia->type) {
1871 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
1872 		update = action->conf;
1873 		break;
1874 	default:
1875 		update = action;
1876 		break;
1877 	}
1878 	if (rte_flow_action_handle_update(port_id, action_handle, update,
1879 					  &error)) {
1880 		return port_flow_complain(&error);
1881 	}
1882 	printf("Indirect action #%u updated\n", id);
1883 	return 0;
1884 }
1885 
1886 int
1887 port_action_handle_query(portid_t port_id, uint32_t id)
1888 {
1889 	struct rte_flow_error error;
1890 	struct port_indirect_action *pia;
1891 	union {
1892 		struct rte_flow_query_count count;
1893 		struct rte_flow_query_age age;
1894 		struct rte_flow_action_conntrack ct;
1895 	} query;
1896 
1897 	pia = action_get_by_id(port_id, id);
1898 	if (!pia)
1899 		return -EINVAL;
1900 	switch (pia->type) {
1901 	case RTE_FLOW_ACTION_TYPE_AGE:
1902 	case RTE_FLOW_ACTION_TYPE_COUNT:
1903 		break;
1904 	default:
1905 		fprintf(stderr,
1906 			"Indirect action %u (type: %d) on port %u doesn't support query\n",
1907 			id, pia->type, port_id);
1908 		return -ENOTSUP;
1909 	}
1910 	/* Poisoning to make sure PMDs update it in case of error. */
1911 	memset(&error, 0x55, sizeof(error));
1912 	memset(&query, 0, sizeof(query));
1913 	if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error))
1914 		return port_flow_complain(&error);
1915 	switch (pia->type) {
1916 	case RTE_FLOW_ACTION_TYPE_AGE:
1917 		printf("Indirect AGE action:\n"
1918 		       " aged: %u\n"
1919 		       " sec_since_last_hit_valid: %u\n"
1920 		       " sec_since_last_hit: %" PRIu32 "\n",
1921 		       query.age.aged,
1922 		       query.age.sec_since_last_hit_valid,
1923 		       query.age.sec_since_last_hit);
1924 		break;
1925 	case RTE_FLOW_ACTION_TYPE_COUNT:
1926 		printf("Indirect COUNT action:\n"
1927 		       " hits_set: %u\n"
1928 		       " bytes_set: %u\n"
1929 		       " hits: %" PRIu64 "\n"
1930 		       " bytes: %" PRIu64 "\n",
1931 		       query.count.hits_set,
1932 		       query.count.bytes_set,
1933 		       query.count.hits,
1934 		       query.count.bytes);
1935 		break;
1936 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
1937 		printf("Conntrack Context:\n"
1938 		       "  Peer: %u, Flow dir: %s, Enable: %u\n"
1939 		       "  Live: %u, SACK: %u, CACK: %u\n"
1940 		       "  Packet dir: %s, Liberal: %u, State: %u\n"
1941 		       "  Factor: %u, Retrans: %u, TCP flags: %u\n"
1942 		       "  Last Seq: %u, Last ACK: %u\n"
1943 		       "  Last Win: %u, Last End: %u\n",
1944 		       query.ct.peer_port,
1945 		       query.ct.is_original_dir ? "Original" : "Reply",
1946 		       query.ct.enable, query.ct.live_connection,
1947 		       query.ct.selective_ack, query.ct.challenge_ack_passed,
1948 		       query.ct.last_direction ? "Original" : "Reply",
1949 		       query.ct.liberal_mode, query.ct.state,
1950 		       query.ct.max_ack_window, query.ct.retransmission_limit,
1951 		       query.ct.last_index, query.ct.last_seq,
1952 		       query.ct.last_ack, query.ct.last_window,
1953 		       query.ct.last_end);
1954 		printf("  Original Dir:\n"
1955 		       "    scale: %u, fin: %u, ack seen: %u\n"
1956 		       " unacked data: %u\n    Sent end: %u,"
1957 		       "    Reply end: %u, Max win: %u, Max ACK: %u\n",
1958 		       query.ct.original_dir.scale,
1959 		       query.ct.original_dir.close_initiated,
1960 		       query.ct.original_dir.last_ack_seen,
1961 		       query.ct.original_dir.data_unacked,
1962 		       query.ct.original_dir.sent_end,
1963 		       query.ct.original_dir.reply_end,
1964 		       query.ct.original_dir.max_win,
1965 		       query.ct.original_dir.max_ack);
1966 		printf("  Reply Dir:\n"
1967 		       "    scale: %u, fin: %u, ack seen: %u\n"
1968 		       " unacked data: %u\n    Sent end: %u,"
1969 		       "    Reply end: %u, Max win: %u, Max ACK: %u\n",
1970 		       query.ct.reply_dir.scale,
1971 		       query.ct.reply_dir.close_initiated,
1972 		       query.ct.reply_dir.last_ack_seen,
1973 		       query.ct.reply_dir.data_unacked,
1974 		       query.ct.reply_dir.sent_end,
1975 		       query.ct.reply_dir.reply_end,
1976 		       query.ct.reply_dir.max_win,
1977 		       query.ct.reply_dir.max_ack);
1978 		break;
1979 	default:
1980 		fprintf(stderr,
1981 			"Indirect action %u (type: %d) on port %u doesn't support query\n",
1982 			id, pia->type, port_id);
1983 		break;
1984 	}
1985 	return 0;
1986 }
1987 
1988 static struct port_flow_tunnel *
1989 port_flow_tunnel_offload_cmd_prep(portid_t port_id,
1990 				  const struct rte_flow_item *pattern,
1991 				  const struct rte_flow_action *actions,
1992 				  const struct tunnel_ops *tunnel_ops)
1993 {
1994 	int ret;
1995 	struct rte_port *port;
1996 	struct port_flow_tunnel *pft;
1997 	struct rte_flow_error error;
1998 
1999 	port = &ports[port_id];
2000 	pft = port_flow_locate_tunnel_id(port, tunnel_ops->id);
2001 	if (!pft) {
2002 		fprintf(stderr, "failed to locate port flow tunnel #%u\n",
2003 			tunnel_ops->id);
2004 		return NULL;
2005 	}
2006 	if (tunnel_ops->actions) {
2007 		uint32_t num_actions;
2008 		const struct rte_flow_action *aptr;
2009 
2010 		ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel,
2011 						&pft->pmd_actions,
2012 						&pft->num_pmd_actions,
2013 						&error);
2014 		if (ret) {
2015 			port_flow_complain(&error);
2016 			return NULL;
2017 		}
2018 		for (aptr = actions, num_actions = 1;
2019 		     aptr->type != RTE_FLOW_ACTION_TYPE_END;
2020 		     aptr++, num_actions++);
2021 		pft->actions = malloc(
2022 				(num_actions +  pft->num_pmd_actions) *
2023 				sizeof(actions[0]));
2024 		if (!pft->actions) {
2025 			rte_flow_tunnel_action_decap_release(
2026 					port_id, pft->actions,
2027 					pft->num_pmd_actions, &error);
2028 			return NULL;
2029 		}
2030 		rte_memcpy(pft->actions, pft->pmd_actions,
2031 			   pft->num_pmd_actions * sizeof(actions[0]));
2032 		rte_memcpy(pft->actions + pft->num_pmd_actions, actions,
2033 			   num_actions * sizeof(actions[0]));
2034 	}
2035 	if (tunnel_ops->items) {
2036 		uint32_t num_items;
2037 		const struct rte_flow_item *iptr;
2038 
2039 		ret = rte_flow_tunnel_match(port_id, &pft->tunnel,
2040 					    &pft->pmd_items,
2041 					    &pft->num_pmd_items,
2042 					    &error);
2043 		if (ret) {
2044 			port_flow_complain(&error);
2045 			return NULL;
2046 		}
2047 		for (iptr = pattern, num_items = 1;
2048 		     iptr->type != RTE_FLOW_ITEM_TYPE_END;
2049 		     iptr++, num_items++);
2050 		pft->items = malloc((num_items + pft->num_pmd_items) *
2051 				    sizeof(pattern[0]));
2052 		if (!pft->items) {
2053 			rte_flow_tunnel_item_release(
2054 					port_id, pft->pmd_items,
2055 					pft->num_pmd_items, &error);
2056 			return NULL;
2057 		}
2058 		rte_memcpy(pft->items, pft->pmd_items,
2059 			   pft->num_pmd_items * sizeof(pattern[0]));
2060 		rte_memcpy(pft->items + pft->num_pmd_items, pattern,
2061 			   num_items * sizeof(pattern[0]));
2062 	}
2063 
2064 	return pft;
2065 }
2066 
2067 static void
2068 port_flow_tunnel_offload_cmd_release(portid_t port_id,
2069 				     const struct tunnel_ops *tunnel_ops,
2070 				     struct port_flow_tunnel *pft)
2071 {
2072 	struct rte_flow_error error;
2073 
2074 	if (tunnel_ops->actions) {
2075 		free(pft->actions);
2076 		rte_flow_tunnel_action_decap_release(
2077 			port_id, pft->pmd_actions,
2078 			pft->num_pmd_actions, &error);
2079 		pft->actions = NULL;
2080 		pft->pmd_actions = NULL;
2081 	}
2082 	if (tunnel_ops->items) {
2083 		free(pft->items);
2084 		rte_flow_tunnel_item_release(port_id, pft->pmd_items,
2085 					     pft->num_pmd_items,
2086 					     &error);
2087 		pft->items = NULL;
2088 		pft->pmd_items = NULL;
2089 	}
2090 }
2091 
2092 /** Add port meter policy */
2093 int
2094 port_meter_policy_add(portid_t port_id, uint32_t policy_id,
2095 			const struct rte_flow_action *actions)
2096 {
2097 	struct rte_mtr_error error;
2098 	const struct rte_flow_action *act = actions;
2099 	const struct rte_flow_action *start;
2100 	struct rte_mtr_meter_policy_params policy;
2101 	uint32_t i = 0, act_n;
2102 	int ret;
2103 
2104 	for (i = 0; i < RTE_COLORS; i++) {
2105 		for (act_n = 0, start = act;
2106 			act->type != RTE_FLOW_ACTION_TYPE_END; act++)
2107 			act_n++;
2108 		if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END)
2109 			policy.actions[i] = start;
2110 		else
2111 			policy.actions[i] = NULL;
2112 		act++;
2113 	}
2114 	ret = rte_mtr_meter_policy_add(port_id,
2115 			policy_id,
2116 			&policy, &error);
2117 	if (ret)
2118 		print_mtr_err_msg(&error);
2119 	return ret;
2120 }
2121 
2122 /** Validate flow rule. */
2123 int
2124 port_flow_validate(portid_t port_id,
2125 		   const struct rte_flow_attr *attr,
2126 		   const struct rte_flow_item *pattern,
2127 		   const struct rte_flow_action *actions,
2128 		   const struct tunnel_ops *tunnel_ops)
2129 {
2130 	struct rte_flow_error error;
2131 	struct port_flow_tunnel *pft = NULL;
2132 	int ret;
2133 
2134 	/* Poisoning to make sure PMDs update it in case of error. */
2135 	memset(&error, 0x11, sizeof(error));
2136 	if (tunnel_ops->enabled) {
2137 		pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern,
2138 							actions, tunnel_ops);
2139 		if (!pft)
2140 			return -ENOENT;
2141 		if (pft->items)
2142 			pattern = pft->items;
2143 		if (pft->actions)
2144 			actions = pft->actions;
2145 	}
2146 	ret = rte_flow_validate(port_id, attr, pattern, actions, &error);
2147 	if (tunnel_ops->enabled)
2148 		port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft);
2149 	if (ret)
2150 		return port_flow_complain(&error);
2151 	printf("Flow rule validated\n");
2152 	return 0;
2153 }
2154 
2155 /** Return age action structure if exists, otherwise NULL. */
2156 static struct rte_flow_action_age *
2157 age_action_get(const struct rte_flow_action *actions)
2158 {
2159 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2160 		switch (actions->type) {
2161 		case RTE_FLOW_ACTION_TYPE_AGE:
2162 			return (struct rte_flow_action_age *)
2163 				(uintptr_t)actions->conf;
2164 		default:
2165 			break;
2166 		}
2167 	}
2168 	return NULL;
2169 }
2170 
2171 /** Create pattern template */
2172 int
2173 port_flow_pattern_template_create(portid_t port_id, uint32_t id,
2174 				  const struct rte_flow_pattern_template_attr *attr,
2175 				  const struct rte_flow_item *pattern)
2176 {
2177 	struct rte_port *port;
2178 	struct port_template *pit;
2179 	int ret;
2180 	struct rte_flow_error error;
2181 
2182 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2183 	    port_id == (portid_t)RTE_PORT_ALL)
2184 		return -EINVAL;
2185 	port = &ports[port_id];
2186 	ret = template_alloc(id, &pit, &port->pattern_templ_list);
2187 	if (ret)
2188 		return ret;
2189 	/* Poisoning to make sure PMDs update it in case of error. */
2190 	memset(&error, 0x22, sizeof(error));
2191 	pit->template.pattern_template = rte_flow_pattern_template_create(port_id,
2192 						attr, pattern, &error);
2193 	if (!pit->template.pattern_template) {
2194 		uint32_t destroy_id = pit->id;
2195 		port_flow_pattern_template_destroy(port_id, 1, &destroy_id);
2196 		return port_flow_complain(&error);
2197 	}
2198 	printf("Pattern template #%u created\n", pit->id);
2199 	return 0;
2200 }
2201 
2202 /** Destroy pattern template */
2203 int
2204 port_flow_pattern_template_destroy(portid_t port_id, uint32_t n,
2205 				   const uint32_t *template)
2206 {
2207 	struct rte_port *port;
2208 	struct port_template **tmp;
2209 	uint32_t c = 0;
2210 	int ret = 0;
2211 
2212 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2213 	    port_id == (portid_t)RTE_PORT_ALL)
2214 		return -EINVAL;
2215 	port = &ports[port_id];
2216 	tmp = &port->pattern_templ_list;
2217 	while (*tmp) {
2218 		uint32_t i;
2219 
2220 		for (i = 0; i != n; ++i) {
2221 			struct rte_flow_error error;
2222 			struct port_template *pit = *tmp;
2223 
2224 			if (template[i] != pit->id)
2225 				continue;
2226 			/*
2227 			 * Poisoning to make sure PMDs update it in case
2228 			 * of error.
2229 			 */
2230 			memset(&error, 0x33, sizeof(error));
2231 
2232 			if (pit->template.pattern_template &&
2233 			    rte_flow_pattern_template_destroy(port_id,
2234 							   pit->template.pattern_template,
2235 							   &error)) {
2236 				ret = port_flow_complain(&error);
2237 				continue;
2238 			}
2239 			*tmp = pit->next;
2240 			printf("Pattern template #%u destroyed\n", pit->id);
2241 			free(pit);
2242 			break;
2243 		}
2244 		if (i == n)
2245 			tmp = &(*tmp)->next;
2246 		++c;
2247 	}
2248 	return ret;
2249 }
2250 
2251 /** Create actions template */
2252 int
2253 port_flow_actions_template_create(portid_t port_id, uint32_t id,
2254 				  const struct rte_flow_actions_template_attr *attr,
2255 				  const struct rte_flow_action *actions,
2256 				  const struct rte_flow_action *masks)
2257 {
2258 	struct rte_port *port;
2259 	struct port_template *pat;
2260 	int ret;
2261 	struct rte_flow_error error;
2262 
2263 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2264 	    port_id == (portid_t)RTE_PORT_ALL)
2265 		return -EINVAL;
2266 	port = &ports[port_id];
2267 	ret = template_alloc(id, &pat, &port->actions_templ_list);
2268 	if (ret)
2269 		return ret;
2270 	/* Poisoning to make sure PMDs update it in case of error. */
2271 	memset(&error, 0x22, sizeof(error));
2272 	pat->template.actions_template = rte_flow_actions_template_create(port_id,
2273 						attr, actions, masks, &error);
2274 	if (!pat->template.actions_template) {
2275 		uint32_t destroy_id = pat->id;
2276 		port_flow_actions_template_destroy(port_id, 1, &destroy_id);
2277 		return port_flow_complain(&error);
2278 	}
2279 	printf("Actions template #%u created\n", pat->id);
2280 	return 0;
2281 }
2282 
2283 /** Destroy actions template */
2284 int
2285 port_flow_actions_template_destroy(portid_t port_id, uint32_t n,
2286 				   const uint32_t *template)
2287 {
2288 	struct rte_port *port;
2289 	struct port_template **tmp;
2290 	uint32_t c = 0;
2291 	int ret = 0;
2292 
2293 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2294 	    port_id == (portid_t)RTE_PORT_ALL)
2295 		return -EINVAL;
2296 	port = &ports[port_id];
2297 	tmp = &port->actions_templ_list;
2298 	while (*tmp) {
2299 		uint32_t i;
2300 
2301 		for (i = 0; i != n; ++i) {
2302 			struct rte_flow_error error;
2303 			struct port_template *pat = *tmp;
2304 
2305 			if (template[i] != pat->id)
2306 				continue;
2307 			/*
2308 			 * Poisoning to make sure PMDs update it in case
2309 			 * of error.
2310 			 */
2311 			memset(&error, 0x33, sizeof(error));
2312 
2313 			if (pat->template.actions_template &&
2314 			    rte_flow_actions_template_destroy(port_id,
2315 					pat->template.actions_template, &error)) {
2316 				ret = port_flow_complain(&error);
2317 				continue;
2318 			}
2319 			*tmp = pat->next;
2320 			printf("Actions template #%u destroyed\n", pat->id);
2321 			free(pat);
2322 			break;
2323 		}
2324 		if (i == n)
2325 			tmp = &(*tmp)->next;
2326 		++c;
2327 	}
2328 	return ret;
2329 }
2330 
2331 /** Create table */
2332 int
2333 port_flow_template_table_create(portid_t port_id, uint32_t id,
2334 		const struct rte_flow_template_table_attr *table_attr,
2335 		uint32_t nb_pattern_templates, uint32_t *pattern_templates,
2336 		uint32_t nb_actions_templates, uint32_t *actions_templates)
2337 {
2338 	struct rte_port *port;
2339 	struct port_table *pt;
2340 	struct port_template *temp = NULL;
2341 	int ret;
2342 	uint32_t i;
2343 	struct rte_flow_error error;
2344 	struct rte_flow_pattern_template
2345 			*flow_pattern_templates[nb_pattern_templates];
2346 	struct rte_flow_actions_template
2347 			*flow_actions_templates[nb_actions_templates];
2348 
2349 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2350 	    port_id == (portid_t)RTE_PORT_ALL)
2351 		return -EINVAL;
2352 	port = &ports[port_id];
2353 	for (i = 0; i < nb_pattern_templates; ++i) {
2354 		bool found = false;
2355 		temp = port->pattern_templ_list;
2356 		while (temp) {
2357 			if (pattern_templates[i] == temp->id) {
2358 				flow_pattern_templates[i] =
2359 					temp->template.pattern_template;
2360 				found = true;
2361 				break;
2362 			}
2363 			temp = temp->next;
2364 		}
2365 		if (!found) {
2366 			printf("Pattern template #%u is invalid\n",
2367 			       pattern_templates[i]);
2368 			return -EINVAL;
2369 		}
2370 	}
2371 	for (i = 0; i < nb_actions_templates; ++i) {
2372 		bool found = false;
2373 		temp = port->actions_templ_list;
2374 		while (temp) {
2375 			if (actions_templates[i] == temp->id) {
2376 				flow_actions_templates[i] =
2377 					temp->template.actions_template;
2378 				found = true;
2379 				break;
2380 			}
2381 			temp = temp->next;
2382 		}
2383 		if (!found) {
2384 			printf("Actions template #%u is invalid\n",
2385 			       actions_templates[i]);
2386 			return -EINVAL;
2387 		}
2388 	}
2389 	ret = table_alloc(id, &pt, &port->table_list);
2390 	if (ret)
2391 		return ret;
2392 	/* Poisoning to make sure PMDs update it in case of error. */
2393 	memset(&error, 0x22, sizeof(error));
2394 	pt->table = rte_flow_template_table_create(port_id, table_attr,
2395 		      flow_pattern_templates, nb_pattern_templates,
2396 		      flow_actions_templates, nb_actions_templates,
2397 		      &error);
2398 
2399 	if (!pt->table) {
2400 		uint32_t destroy_id = pt->id;
2401 		port_flow_template_table_destroy(port_id, 1, &destroy_id);
2402 		return port_flow_complain(&error);
2403 	}
2404 	pt->nb_pattern_templates = nb_pattern_templates;
2405 	pt->nb_actions_templates = nb_actions_templates;
2406 	printf("Template table #%u created\n", pt->id);
2407 	return 0;
2408 }
2409 
2410 /** Destroy table */
2411 int
2412 port_flow_template_table_destroy(portid_t port_id,
2413 				 uint32_t n, const uint32_t *table)
2414 {
2415 	struct rte_port *port;
2416 	struct port_table **tmp;
2417 	uint32_t c = 0;
2418 	int ret = 0;
2419 
2420 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2421 	    port_id == (portid_t)RTE_PORT_ALL)
2422 		return -EINVAL;
2423 	port = &ports[port_id];
2424 	tmp = &port->table_list;
2425 	while (*tmp) {
2426 		uint32_t i;
2427 
2428 		for (i = 0; i != n; ++i) {
2429 			struct rte_flow_error error;
2430 			struct port_table *pt = *tmp;
2431 
2432 			if (table[i] != pt->id)
2433 				continue;
2434 			/*
2435 			 * Poisoning to make sure PMDs update it in case
2436 			 * of error.
2437 			 */
2438 			memset(&error, 0x33, sizeof(error));
2439 
2440 			if (pt->table &&
2441 			    rte_flow_template_table_destroy(port_id,
2442 							    pt->table,
2443 							    &error)) {
2444 				ret = port_flow_complain(&error);
2445 				continue;
2446 			}
2447 			*tmp = pt->next;
2448 			printf("Template table #%u destroyed\n", pt->id);
2449 			free(pt);
2450 			break;
2451 		}
2452 		if (i == n)
2453 			tmp = &(*tmp)->next;
2454 		++c;
2455 	}
2456 	return ret;
2457 }
2458 
2459 /** Enqueue create flow rule operation. */
2460 int
2461 port_queue_flow_create(portid_t port_id, queueid_t queue_id,
2462 		       bool postpone, uint32_t table_id,
2463 		       uint32_t pattern_idx, uint32_t actions_idx,
2464 		       const struct rte_flow_item *pattern,
2465 		       const struct rte_flow_action *actions)
2466 {
2467 	struct rte_flow_op_attr op_attr = { .postpone = postpone };
2468 	struct rte_flow *flow;
2469 	struct rte_port *port;
2470 	struct port_flow *pf;
2471 	struct port_table *pt;
2472 	uint32_t id = 0;
2473 	bool found;
2474 	struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL };
2475 	struct rte_flow_action_age *age = age_action_get(actions);
2476 
2477 	port = &ports[port_id];
2478 	if (port->flow_list) {
2479 		if (port->flow_list->id == UINT32_MAX) {
2480 			printf("Highest rule ID is already assigned,"
2481 			       " delete it first");
2482 			return -ENOMEM;
2483 		}
2484 		id = port->flow_list->id + 1;
2485 	}
2486 
2487 	if (queue_id >= port->queue_nb) {
2488 		printf("Queue #%u is invalid\n", queue_id);
2489 		return -EINVAL;
2490 	}
2491 
2492 	found = false;
2493 	pt = port->table_list;
2494 	while (pt) {
2495 		if (table_id == pt->id) {
2496 			found = true;
2497 			break;
2498 		}
2499 		pt = pt->next;
2500 	}
2501 	if (!found) {
2502 		printf("Table #%u is invalid\n", table_id);
2503 		return -EINVAL;
2504 	}
2505 
2506 	if (pattern_idx >= pt->nb_pattern_templates) {
2507 		printf("Pattern template index #%u is invalid,"
2508 		       " %u templates present in the table\n",
2509 		       pattern_idx, pt->nb_pattern_templates);
2510 		return -EINVAL;
2511 	}
2512 	if (actions_idx >= pt->nb_actions_templates) {
2513 		printf("Actions template index #%u is invalid,"
2514 		       " %u templates present in the table\n",
2515 		       actions_idx, pt->nb_actions_templates);
2516 		return -EINVAL;
2517 	}
2518 
2519 	pf = port_flow_new(NULL, pattern, actions, &error);
2520 	if (!pf)
2521 		return port_flow_complain(&error);
2522 	if (age) {
2523 		pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW;
2524 		age->context = &pf->age_type;
2525 	}
2526 	/* Poisoning to make sure PMDs update it in case of error. */
2527 	memset(&error, 0x11, sizeof(error));
2528 	flow = rte_flow_async_create(port_id, queue_id, &op_attr, pt->table,
2529 		pattern, pattern_idx, actions, actions_idx, NULL, &error);
2530 	if (!flow) {
2531 		uint32_t flow_id = pf->id;
2532 		port_queue_flow_destroy(port_id, queue_id, true, 1, &flow_id);
2533 		return port_flow_complain(&error);
2534 	}
2535 
2536 	pf->next = port->flow_list;
2537 	pf->id = id;
2538 	pf->flow = flow;
2539 	port->flow_list = pf;
2540 	printf("Flow rule #%u creation enqueued\n", pf->id);
2541 	return 0;
2542 }
2543 
2544 /** Enqueue number of destroy flow rules operations. */
2545 int
2546 port_queue_flow_destroy(portid_t port_id, queueid_t queue_id,
2547 			bool postpone, uint32_t n, const uint32_t *rule)
2548 {
2549 	struct rte_flow_op_attr op_attr = { .postpone = postpone };
2550 	struct rte_port *port;
2551 	struct port_flow **tmp;
2552 	uint32_t c = 0;
2553 	int ret = 0;
2554 
2555 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2556 	    port_id == (portid_t)RTE_PORT_ALL)
2557 		return -EINVAL;
2558 	port = &ports[port_id];
2559 
2560 	if (queue_id >= port->queue_nb) {
2561 		printf("Queue #%u is invalid\n", queue_id);
2562 		return -EINVAL;
2563 	}
2564 
2565 	tmp = &port->flow_list;
2566 	while (*tmp) {
2567 		uint32_t i;
2568 
2569 		for (i = 0; i != n; ++i) {
2570 			struct rte_flow_error error;
2571 			struct port_flow *pf = *tmp;
2572 
2573 			if (rule[i] != pf->id)
2574 				continue;
2575 			/*
2576 			 * Poisoning to make sure PMD
2577 			 * update it in case of error.
2578 			 */
2579 			memset(&error, 0x33, sizeof(error));
2580 			if (rte_flow_async_destroy(port_id, queue_id, &op_attr,
2581 						   pf->flow, NULL, &error)) {
2582 				ret = port_flow_complain(&error);
2583 				continue;
2584 			}
2585 			printf("Flow rule #%u destruction enqueued\n", pf->id);
2586 			*tmp = pf->next;
2587 			free(pf);
2588 			break;
2589 		}
2590 		if (i == n)
2591 			tmp = &(*tmp)->next;
2592 		++c;
2593 	}
2594 	return ret;
2595 }
2596 
2597 /** Enqueue indirect action create operation. */
2598 int
2599 port_queue_action_handle_create(portid_t port_id, uint32_t queue_id,
2600 				bool postpone, uint32_t id,
2601 				const struct rte_flow_indir_action_conf *conf,
2602 				const struct rte_flow_action *action)
2603 {
2604 	const struct rte_flow_op_attr attr = { .postpone = postpone};
2605 	struct rte_port *port;
2606 	struct port_indirect_action *pia;
2607 	int ret;
2608 	struct rte_flow_error error;
2609 
2610 	ret = action_alloc(port_id, id, &pia);
2611 	if (ret)
2612 		return ret;
2613 
2614 	port = &ports[port_id];
2615 	if (queue_id >= port->queue_nb) {
2616 		printf("Queue #%u is invalid\n", queue_id);
2617 		return -EINVAL;
2618 	}
2619 
2620 	if (action->type == RTE_FLOW_ACTION_TYPE_AGE) {
2621 		struct rte_flow_action_age *age =
2622 			(struct rte_flow_action_age *)(uintptr_t)(action->conf);
2623 
2624 		pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION;
2625 		age->context = &pia->age_type;
2626 	}
2627 	/* Poisoning to make sure PMDs update it in case of error. */
2628 	memset(&error, 0x88, sizeof(error));
2629 	pia->handle = rte_flow_async_action_handle_create(port_id, queue_id,
2630 					&attr, conf, action, NULL, &error);
2631 	if (!pia->handle) {
2632 		uint32_t destroy_id = pia->id;
2633 		port_queue_action_handle_destroy(port_id, queue_id,
2634 						 postpone, 1, &destroy_id);
2635 		return port_flow_complain(&error);
2636 	}
2637 	pia->type = action->type;
2638 	printf("Indirect action #%u creation queued\n", pia->id);
2639 	return 0;
2640 }
2641 
2642 /** Enqueue indirect action destroy operation. */
2643 int
2644 port_queue_action_handle_destroy(portid_t port_id,
2645 				 uint32_t queue_id, bool postpone,
2646 				 uint32_t n, const uint32_t *actions)
2647 {
2648 	const struct rte_flow_op_attr attr = { .postpone = postpone};
2649 	struct rte_port *port;
2650 	struct port_indirect_action **tmp;
2651 	uint32_t c = 0;
2652 	int ret = 0;
2653 
2654 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2655 	    port_id == (portid_t)RTE_PORT_ALL)
2656 		return -EINVAL;
2657 	port = &ports[port_id];
2658 
2659 	if (queue_id >= port->queue_nb) {
2660 		printf("Queue #%u is invalid\n", queue_id);
2661 		return -EINVAL;
2662 	}
2663 
2664 	tmp = &port->actions_list;
2665 	while (*tmp) {
2666 		uint32_t i;
2667 
2668 		for (i = 0; i != n; ++i) {
2669 			struct rte_flow_error error;
2670 			struct port_indirect_action *pia = *tmp;
2671 
2672 			if (actions[i] != pia->id)
2673 				continue;
2674 			/*
2675 			 * Poisoning to make sure PMDs update it in case
2676 			 * of error.
2677 			 */
2678 			memset(&error, 0x99, sizeof(error));
2679 
2680 			if (pia->handle &&
2681 			    rte_flow_async_action_handle_destroy(port_id,
2682 				queue_id, &attr, pia->handle, NULL, &error)) {
2683 				ret = port_flow_complain(&error);
2684 				continue;
2685 			}
2686 			*tmp = pia->next;
2687 			printf("Indirect action #%u destruction queued\n",
2688 			       pia->id);
2689 			free(pia);
2690 			break;
2691 		}
2692 		if (i == n)
2693 			tmp = &(*tmp)->next;
2694 		++c;
2695 	}
2696 	return ret;
2697 }
2698 
2699 /** Enqueue indirect action update operation. */
2700 int
2701 port_queue_action_handle_update(portid_t port_id,
2702 				uint32_t queue_id, bool postpone, uint32_t id,
2703 				const struct rte_flow_action *action)
2704 {
2705 	const struct rte_flow_op_attr attr = { .postpone = postpone};
2706 	struct rte_port *port;
2707 	struct rte_flow_error error;
2708 	struct rte_flow_action_handle *action_handle;
2709 
2710 	action_handle = port_action_handle_get_by_id(port_id, id);
2711 	if (!action_handle)
2712 		return -EINVAL;
2713 
2714 	port = &ports[port_id];
2715 	if (queue_id >= port->queue_nb) {
2716 		printf("Queue #%u is invalid\n", queue_id);
2717 		return -EINVAL;
2718 	}
2719 
2720 	if (rte_flow_async_action_handle_update(port_id, queue_id, &attr,
2721 				    action_handle, action, NULL, &error)) {
2722 		return port_flow_complain(&error);
2723 	}
2724 	printf("Indirect action #%u update queued\n", id);
2725 	return 0;
2726 }
2727 
2728 /** Push all the queue operations in the queue to the NIC. */
2729 int
2730 port_queue_flow_push(portid_t port_id, queueid_t queue_id)
2731 {
2732 	struct rte_port *port;
2733 	struct rte_flow_error error;
2734 	int ret = 0;
2735 
2736 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2737 	    port_id == (portid_t)RTE_PORT_ALL)
2738 		return -EINVAL;
2739 	port = &ports[port_id];
2740 
2741 	if (queue_id >= port->queue_nb) {
2742 		printf("Queue #%u is invalid\n", queue_id);
2743 		return -EINVAL;
2744 	}
2745 
2746 	memset(&error, 0x55, sizeof(error));
2747 	ret = rte_flow_push(port_id, queue_id, &error);
2748 	if (ret < 0) {
2749 		printf("Failed to push operations in the queue\n");
2750 		return -EINVAL;
2751 	}
2752 	printf("Queue #%u operations pushed\n", queue_id);
2753 	return ret;
2754 }
2755 
2756 /** Pull queue operation results from the queue. */
2757 int
2758 port_queue_flow_pull(portid_t port_id, queueid_t queue_id)
2759 {
2760 	struct rte_port *port;
2761 	struct rte_flow_op_result *res;
2762 	struct rte_flow_error error;
2763 	int ret = 0;
2764 	int success = 0;
2765 	int i;
2766 
2767 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2768 	    port_id == (portid_t)RTE_PORT_ALL)
2769 		return -EINVAL;
2770 	port = &ports[port_id];
2771 
2772 	if (queue_id >= port->queue_nb) {
2773 		printf("Queue #%u is invalid\n", queue_id);
2774 		return -EINVAL;
2775 	}
2776 
2777 	res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result));
2778 	if (!res) {
2779 		printf("Failed to allocate memory for pulled results\n");
2780 		return -ENOMEM;
2781 	}
2782 
2783 	memset(&error, 0x66, sizeof(error));
2784 	ret = rte_flow_pull(port_id, queue_id, res,
2785 				 port->queue_sz, &error);
2786 	if (ret < 0) {
2787 		printf("Failed to pull a operation results\n");
2788 		free(res);
2789 		return -EINVAL;
2790 	}
2791 
2792 	for (i = 0; i < ret; i++) {
2793 		if (res[i].status == RTE_FLOW_OP_SUCCESS)
2794 			success++;
2795 	}
2796 	printf("Queue #%u pulled %u operations (%u failed, %u succeeded)\n",
2797 	       queue_id, ret, ret - success, success);
2798 	free(res);
2799 	return ret;
2800 }
2801 
2802 /** Create flow rule. */
2803 int
2804 port_flow_create(portid_t port_id,
2805 		 const struct rte_flow_attr *attr,
2806 		 const struct rte_flow_item *pattern,
2807 		 const struct rte_flow_action *actions,
2808 		 const struct tunnel_ops *tunnel_ops)
2809 {
2810 	struct rte_flow *flow;
2811 	struct rte_port *port;
2812 	struct port_flow *pf;
2813 	uint32_t id = 0;
2814 	struct rte_flow_error error;
2815 	struct port_flow_tunnel *pft = NULL;
2816 	struct rte_flow_action_age *age = age_action_get(actions);
2817 
2818 	port = &ports[port_id];
2819 	if (port->flow_list) {
2820 		if (port->flow_list->id == UINT32_MAX) {
2821 			fprintf(stderr,
2822 				"Highest rule ID is already assigned, delete it first");
2823 			return -ENOMEM;
2824 		}
2825 		id = port->flow_list->id + 1;
2826 	}
2827 	if (tunnel_ops->enabled) {
2828 		pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern,
2829 							actions, tunnel_ops);
2830 		if (!pft)
2831 			return -ENOENT;
2832 		if (pft->items)
2833 			pattern = pft->items;
2834 		if (pft->actions)
2835 			actions = pft->actions;
2836 	}
2837 	pf = port_flow_new(attr, pattern, actions, &error);
2838 	if (!pf)
2839 		return port_flow_complain(&error);
2840 	if (age) {
2841 		pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW;
2842 		age->context = &pf->age_type;
2843 	}
2844 	/* Poisoning to make sure PMDs update it in case of error. */
2845 	memset(&error, 0x22, sizeof(error));
2846 	flow = rte_flow_create(port_id, attr, pattern, actions, &error);
2847 	if (!flow) {
2848 		if (tunnel_ops->enabled)
2849 			port_flow_tunnel_offload_cmd_release(port_id,
2850 							     tunnel_ops, pft);
2851 		free(pf);
2852 		return port_flow_complain(&error);
2853 	}
2854 	pf->next = port->flow_list;
2855 	pf->id = id;
2856 	pf->flow = flow;
2857 	port->flow_list = pf;
2858 	if (tunnel_ops->enabled)
2859 		port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft);
2860 	printf("Flow rule #%u created\n", pf->id);
2861 	return 0;
2862 }
2863 
2864 /** Destroy a number of flow rules. */
2865 int
2866 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
2867 {
2868 	struct rte_port *port;
2869 	struct port_flow **tmp;
2870 	uint32_t c = 0;
2871 	int ret = 0;
2872 
2873 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2874 	    port_id == (portid_t)RTE_PORT_ALL)
2875 		return -EINVAL;
2876 	port = &ports[port_id];
2877 	tmp = &port->flow_list;
2878 	while (*tmp) {
2879 		uint32_t i;
2880 
2881 		for (i = 0; i != n; ++i) {
2882 			struct rte_flow_error error;
2883 			struct port_flow *pf = *tmp;
2884 
2885 			if (rule[i] != pf->id)
2886 				continue;
2887 			/*
2888 			 * Poisoning to make sure PMDs update it in case
2889 			 * of error.
2890 			 */
2891 			memset(&error, 0x33, sizeof(error));
2892 			if (rte_flow_destroy(port_id, pf->flow, &error)) {
2893 				ret = port_flow_complain(&error);
2894 				continue;
2895 			}
2896 			printf("Flow rule #%u destroyed\n", pf->id);
2897 			*tmp = pf->next;
2898 			free(pf);
2899 			break;
2900 		}
2901 		if (i == n)
2902 			tmp = &(*tmp)->next;
2903 		++c;
2904 	}
2905 	return ret;
2906 }
2907 
2908 /** Remove all flow rules. */
2909 int
2910 port_flow_flush(portid_t port_id)
2911 {
2912 	struct rte_flow_error error;
2913 	struct rte_port *port;
2914 	int ret = 0;
2915 
2916 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2917 		port_id == (portid_t)RTE_PORT_ALL)
2918 		return -EINVAL;
2919 
2920 	port = &ports[port_id];
2921 
2922 	if (port->flow_list == NULL)
2923 		return ret;
2924 
2925 	/* Poisoning to make sure PMDs update it in case of error. */
2926 	memset(&error, 0x44, sizeof(error));
2927 	if (rte_flow_flush(port_id, &error)) {
2928 		port_flow_complain(&error);
2929 	}
2930 
2931 	while (port->flow_list) {
2932 		struct port_flow *pf = port->flow_list->next;
2933 
2934 		free(port->flow_list);
2935 		port->flow_list = pf;
2936 	}
2937 	return ret;
2938 }
2939 
2940 /** Dump flow rules. */
2941 int
2942 port_flow_dump(portid_t port_id, bool dump_all, uint32_t rule_id,
2943 		const char *file_name)
2944 {
2945 	int ret = 0;
2946 	FILE *file = stdout;
2947 	struct rte_flow_error error;
2948 	struct rte_port *port;
2949 	struct port_flow *pflow;
2950 	struct rte_flow *tmpFlow = NULL;
2951 	bool found = false;
2952 
2953 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2954 		port_id == (portid_t)RTE_PORT_ALL)
2955 		return -EINVAL;
2956 
2957 	if (!dump_all) {
2958 		port = &ports[port_id];
2959 		pflow = port->flow_list;
2960 		while (pflow) {
2961 			if (rule_id != pflow->id) {
2962 				pflow = pflow->next;
2963 			} else {
2964 				tmpFlow = pflow->flow;
2965 				if (tmpFlow)
2966 					found = true;
2967 				break;
2968 			}
2969 		}
2970 		if (found == false) {
2971 			fprintf(stderr, "Failed to dump to flow %d\n", rule_id);
2972 			return -EINVAL;
2973 		}
2974 	}
2975 
2976 	if (file_name && strlen(file_name)) {
2977 		file = fopen(file_name, "w");
2978 		if (!file) {
2979 			fprintf(stderr, "Failed to create file %s: %s\n",
2980 				file_name, strerror(errno));
2981 			return -errno;
2982 		}
2983 	}
2984 
2985 	if (!dump_all)
2986 		ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error);
2987 	else
2988 		ret = rte_flow_dev_dump(port_id, NULL, file, &error);
2989 	if (ret) {
2990 		port_flow_complain(&error);
2991 		fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret));
2992 	} else
2993 		printf("Flow dump finished\n");
2994 	if (file_name && strlen(file_name))
2995 		fclose(file);
2996 	return ret;
2997 }
2998 
2999 /** Query a flow rule. */
3000 int
3001 port_flow_query(portid_t port_id, uint32_t rule,
3002 		const struct rte_flow_action *action)
3003 {
3004 	struct rte_flow_error error;
3005 	struct rte_port *port;
3006 	struct port_flow *pf;
3007 	const char *name;
3008 	union {
3009 		struct rte_flow_query_count count;
3010 		struct rte_flow_action_rss rss_conf;
3011 		struct rte_flow_query_age age;
3012 	} query;
3013 	int ret;
3014 
3015 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3016 	    port_id == (portid_t)RTE_PORT_ALL)
3017 		return -EINVAL;
3018 	port = &ports[port_id];
3019 	for (pf = port->flow_list; pf; pf = pf->next)
3020 		if (pf->id == rule)
3021 			break;
3022 	if (!pf) {
3023 		fprintf(stderr, "Flow rule #%u not found\n", rule);
3024 		return -ENOENT;
3025 	}
3026 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
3027 			    &name, sizeof(name),
3028 			    (void *)(uintptr_t)action->type, &error);
3029 	if (ret < 0)
3030 		return port_flow_complain(&error);
3031 	switch (action->type) {
3032 	case RTE_FLOW_ACTION_TYPE_COUNT:
3033 	case RTE_FLOW_ACTION_TYPE_RSS:
3034 	case RTE_FLOW_ACTION_TYPE_AGE:
3035 		break;
3036 	default:
3037 		fprintf(stderr, "Cannot query action type %d (%s)\n",
3038 			action->type, name);
3039 		return -ENOTSUP;
3040 	}
3041 	/* Poisoning to make sure PMDs update it in case of error. */
3042 	memset(&error, 0x55, sizeof(error));
3043 	memset(&query, 0, sizeof(query));
3044 	if (rte_flow_query(port_id, pf->flow, action, &query, &error))
3045 		return port_flow_complain(&error);
3046 	switch (action->type) {
3047 	case RTE_FLOW_ACTION_TYPE_COUNT:
3048 		printf("%s:\n"
3049 		       " hits_set: %u\n"
3050 		       " bytes_set: %u\n"
3051 		       " hits: %" PRIu64 "\n"
3052 		       " bytes: %" PRIu64 "\n",
3053 		       name,
3054 		       query.count.hits_set,
3055 		       query.count.bytes_set,
3056 		       query.count.hits,
3057 		       query.count.bytes);
3058 		break;
3059 	case RTE_FLOW_ACTION_TYPE_RSS:
3060 		rss_config_display(&query.rss_conf);
3061 		break;
3062 	case RTE_FLOW_ACTION_TYPE_AGE:
3063 		printf("%s:\n"
3064 		       " aged: %u\n"
3065 		       " sec_since_last_hit_valid: %u\n"
3066 		       " sec_since_last_hit: %" PRIu32 "\n",
3067 		       name,
3068 		       query.age.aged,
3069 		       query.age.sec_since_last_hit_valid,
3070 		       query.age.sec_since_last_hit);
3071 		break;
3072 	default:
3073 		fprintf(stderr,
3074 			"Cannot display result for action type %d (%s)\n",
3075 			action->type, name);
3076 		break;
3077 	}
3078 	return 0;
3079 }
3080 
3081 /** List simply and destroy all aged flows. */
3082 void
3083 port_flow_aged(portid_t port_id, uint8_t destroy)
3084 {
3085 	void **contexts;
3086 	int nb_context, total = 0, idx;
3087 	struct rte_flow_error error;
3088 	enum age_action_context_type *type;
3089 	union {
3090 		struct port_flow *pf;
3091 		struct port_indirect_action *pia;
3092 	} ctx;
3093 
3094 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3095 	    port_id == (portid_t)RTE_PORT_ALL)
3096 		return;
3097 	total = rte_flow_get_aged_flows(port_id, NULL, 0, &error);
3098 	printf("Port %u total aged flows: %d\n", port_id, total);
3099 	if (total < 0) {
3100 		port_flow_complain(&error);
3101 		return;
3102 	}
3103 	if (total == 0)
3104 		return;
3105 	contexts = malloc(sizeof(void *) * total);
3106 	if (contexts == NULL) {
3107 		fprintf(stderr, "Cannot allocate contexts for aged flow\n");
3108 		return;
3109 	}
3110 	printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type");
3111 	nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error);
3112 	if (nb_context != total) {
3113 		fprintf(stderr,
3114 			"Port:%d get aged flows count(%d) != total(%d)\n",
3115 			port_id, nb_context, total);
3116 		free(contexts);
3117 		return;
3118 	}
3119 	total = 0;
3120 	for (idx = 0; idx < nb_context; idx++) {
3121 		if (!contexts[idx]) {
3122 			fprintf(stderr, "Error: get Null context in port %u\n",
3123 				port_id);
3124 			continue;
3125 		}
3126 		type = (enum age_action_context_type *)contexts[idx];
3127 		switch (*type) {
3128 		case ACTION_AGE_CONTEXT_TYPE_FLOW:
3129 			ctx.pf = container_of(type, struct port_flow, age_type);
3130 			printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32
3131 								 "\t%c%c%c\t\n",
3132 			       "Flow",
3133 			       ctx.pf->id,
3134 			       ctx.pf->rule.attr->group,
3135 			       ctx.pf->rule.attr->priority,
3136 			       ctx.pf->rule.attr->ingress ? 'i' : '-',
3137 			       ctx.pf->rule.attr->egress ? 'e' : '-',
3138 			       ctx.pf->rule.attr->transfer ? 't' : '-');
3139 			if (destroy && !port_flow_destroy(port_id, 1,
3140 							  &ctx.pf->id))
3141 				total++;
3142 			break;
3143 		case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION:
3144 			ctx.pia = container_of(type,
3145 					struct port_indirect_action, age_type);
3146 			printf("%-20s\t%" PRIu32 "\n", "Indirect action",
3147 			       ctx.pia->id);
3148 			break;
3149 		default:
3150 			fprintf(stderr, "Error: invalid context type %u\n",
3151 				port_id);
3152 			break;
3153 		}
3154 	}
3155 	printf("\n%d flows destroyed\n", total);
3156 	free(contexts);
3157 }
3158 
3159 /** List flow rules. */
3160 void
3161 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group)
3162 {
3163 	struct rte_port *port;
3164 	struct port_flow *pf;
3165 	struct port_flow *list = NULL;
3166 	uint32_t i;
3167 
3168 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3169 	    port_id == (portid_t)RTE_PORT_ALL)
3170 		return;
3171 	port = &ports[port_id];
3172 	if (!port->flow_list)
3173 		return;
3174 	/* Sort flows by group, priority and ID. */
3175 	for (pf = port->flow_list; pf != NULL; pf = pf->next) {
3176 		struct port_flow **tmp;
3177 		const struct rte_flow_attr *curr = pf->rule.attr;
3178 
3179 		if (n) {
3180 			/* Filter out unwanted groups. */
3181 			for (i = 0; i != n; ++i)
3182 				if (curr->group == group[i])
3183 					break;
3184 			if (i == n)
3185 				continue;
3186 		}
3187 		for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) {
3188 			const struct rte_flow_attr *comp = (*tmp)->rule.attr;
3189 
3190 			if (curr->group > comp->group ||
3191 			    (curr->group == comp->group &&
3192 			     curr->priority > comp->priority) ||
3193 			    (curr->group == comp->group &&
3194 			     curr->priority == comp->priority &&
3195 			     pf->id > (*tmp)->id))
3196 				continue;
3197 			break;
3198 		}
3199 		pf->tmp = *tmp;
3200 		*tmp = pf;
3201 	}
3202 	printf("ID\tGroup\tPrio\tAttr\tRule\n");
3203 	for (pf = list; pf != NULL; pf = pf->tmp) {
3204 		const struct rte_flow_item *item = pf->rule.pattern;
3205 		const struct rte_flow_action *action = pf->rule.actions;
3206 		const char *name;
3207 
3208 		printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
3209 		       pf->id,
3210 		       pf->rule.attr->group,
3211 		       pf->rule.attr->priority,
3212 		       pf->rule.attr->ingress ? 'i' : '-',
3213 		       pf->rule.attr->egress ? 'e' : '-',
3214 		       pf->rule.attr->transfer ? 't' : '-');
3215 		while (item->type != RTE_FLOW_ITEM_TYPE_END) {
3216 			if ((uint32_t)item->type > INT_MAX)
3217 				name = "PMD_INTERNAL";
3218 			else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR,
3219 					  &name, sizeof(name),
3220 					  (void *)(uintptr_t)item->type,
3221 					  NULL) <= 0)
3222 				name = "[UNKNOWN]";
3223 			if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
3224 				printf("%s ", name);
3225 			++item;
3226 		}
3227 		printf("=>");
3228 		while (action->type != RTE_FLOW_ACTION_TYPE_END) {
3229 			if ((uint32_t)action->type > INT_MAX)
3230 				name = "PMD_INTERNAL";
3231 			else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
3232 					  &name, sizeof(name),
3233 					  (void *)(uintptr_t)action->type,
3234 					  NULL) <= 0)
3235 				name = "[UNKNOWN]";
3236 			if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
3237 				printf(" %s", name);
3238 			++action;
3239 		}
3240 		printf("\n");
3241 	}
3242 }
3243 
3244 /** Restrict ingress traffic to the defined flow rules. */
3245 int
3246 port_flow_isolate(portid_t port_id, int set)
3247 {
3248 	struct rte_flow_error error;
3249 
3250 	/* Poisoning to make sure PMDs update it in case of error. */
3251 	memset(&error, 0x66, sizeof(error));
3252 	if (rte_flow_isolate(port_id, set, &error))
3253 		return port_flow_complain(&error);
3254 	printf("Ingress traffic on port %u is %s to the defined flow rules\n",
3255 	       port_id,
3256 	       set ? "now restricted" : "not restricted anymore");
3257 	return 0;
3258 }
3259 
3260 /*
3261  * RX/TX ring descriptors display functions.
3262  */
3263 int
3264 rx_queue_id_is_invalid(queueid_t rxq_id)
3265 {
3266 	if (rxq_id < nb_rxq)
3267 		return 0;
3268 	fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n",
3269 		rxq_id, nb_rxq);
3270 	return 1;
3271 }
3272 
3273 int
3274 tx_queue_id_is_invalid(queueid_t txq_id)
3275 {
3276 	if (txq_id < nb_txq)
3277 		return 0;
3278 	fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n",
3279 		txq_id, nb_txq);
3280 	return 1;
3281 }
3282 
3283 static int
3284 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size)
3285 {
3286 	struct rte_port *port = &ports[port_id];
3287 	struct rte_eth_rxq_info rx_qinfo;
3288 	int ret;
3289 
3290 	ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo);
3291 	if (ret == 0) {
3292 		*ring_size = rx_qinfo.nb_desc;
3293 		return ret;
3294 	}
3295 
3296 	if (ret != -ENOTSUP)
3297 		return ret;
3298 	/*
3299 	 * If the rte_eth_rx_queue_info_get is not support for this PMD,
3300 	 * ring_size stored in testpmd will be used for validity verification.
3301 	 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc
3302 	 * being 0, it will use a default value provided by PMDs to setup this
3303 	 * rxq. If the default value is 0, it will use the
3304 	 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq.
3305 	 */
3306 	if (port->nb_rx_desc[rxq_id])
3307 		*ring_size = port->nb_rx_desc[rxq_id];
3308 	else if (port->dev_info.default_rxportconf.ring_size)
3309 		*ring_size = port->dev_info.default_rxportconf.ring_size;
3310 	else
3311 		*ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
3312 	return 0;
3313 }
3314 
3315 static int
3316 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size)
3317 {
3318 	struct rte_port *port = &ports[port_id];
3319 	struct rte_eth_txq_info tx_qinfo;
3320 	int ret;
3321 
3322 	ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo);
3323 	if (ret == 0) {
3324 		*ring_size = tx_qinfo.nb_desc;
3325 		return ret;
3326 	}
3327 
3328 	if (ret != -ENOTSUP)
3329 		return ret;
3330 	/*
3331 	 * If the rte_eth_tx_queue_info_get is not support for this PMD,
3332 	 * ring_size stored in testpmd will be used for validity verification.
3333 	 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc
3334 	 * being 0, it will use a default value provided by PMDs to setup this
3335 	 * txq. If the default value is 0, it will use the
3336 	 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq.
3337 	 */
3338 	if (port->nb_tx_desc[txq_id])
3339 		*ring_size = port->nb_tx_desc[txq_id];
3340 	else if (port->dev_info.default_txportconf.ring_size)
3341 		*ring_size = port->dev_info.default_txportconf.ring_size;
3342 	else
3343 		*ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
3344 	return 0;
3345 }
3346 
3347 static int
3348 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id)
3349 {
3350 	uint16_t ring_size;
3351 	int ret;
3352 
3353 	ret = get_rx_ring_size(port_id, rxq_id, &ring_size);
3354 	if (ret)
3355 		return 1;
3356 
3357 	if (rxdesc_id < ring_size)
3358 		return 0;
3359 
3360 	fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n",
3361 		rxdesc_id, ring_size);
3362 	return 1;
3363 }
3364 
3365 static int
3366 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id)
3367 {
3368 	uint16_t ring_size;
3369 	int ret;
3370 
3371 	ret = get_tx_ring_size(port_id, txq_id, &ring_size);
3372 	if (ret)
3373 		return 1;
3374 
3375 	if (txdesc_id < ring_size)
3376 		return 0;
3377 
3378 	fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n",
3379 		txdesc_id, ring_size);
3380 	return 1;
3381 }
3382 
3383 static const struct rte_memzone *
3384 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
3385 {
3386 	char mz_name[RTE_MEMZONE_NAMESIZE];
3387 	const struct rte_memzone *mz;
3388 
3389 	snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s",
3390 			port_id, q_id, ring_name);
3391 	mz = rte_memzone_lookup(mz_name);
3392 	if (mz == NULL)
3393 		fprintf(stderr,
3394 			"%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n",
3395 			ring_name, port_id, q_id, mz_name);
3396 	return mz;
3397 }
3398 
3399 union igb_ring_dword {
3400 	uint64_t dword;
3401 	struct {
3402 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
3403 		uint32_t lo;
3404 		uint32_t hi;
3405 #else
3406 		uint32_t hi;
3407 		uint32_t lo;
3408 #endif
3409 	} words;
3410 };
3411 
3412 struct igb_ring_desc_32_bytes {
3413 	union igb_ring_dword lo_dword;
3414 	union igb_ring_dword hi_dword;
3415 	union igb_ring_dword resv1;
3416 	union igb_ring_dword resv2;
3417 };
3418 
3419 struct igb_ring_desc_16_bytes {
3420 	union igb_ring_dword lo_dword;
3421 	union igb_ring_dword hi_dword;
3422 };
3423 
3424 static void
3425 ring_rxd_display_dword(union igb_ring_dword dword)
3426 {
3427 	printf("    0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
3428 					(unsigned)dword.words.hi);
3429 }
3430 
3431 static void
3432 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
3433 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
3434 			   portid_t port_id,
3435 #else
3436 			   __rte_unused portid_t port_id,
3437 #endif
3438 			   uint16_t desc_id)
3439 {
3440 	struct igb_ring_desc_16_bytes *ring =
3441 		(struct igb_ring_desc_16_bytes *)ring_mz->addr;
3442 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
3443 	int ret;
3444 	struct rte_eth_dev_info dev_info;
3445 
3446 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
3447 	if (ret != 0)
3448 		return;
3449 
3450 	if (strstr(dev_info.driver_name, "i40e") != NULL) {
3451 		/* 32 bytes RX descriptor, i40e only */
3452 		struct igb_ring_desc_32_bytes *ring =
3453 			(struct igb_ring_desc_32_bytes *)ring_mz->addr;
3454 		ring[desc_id].lo_dword.dword =
3455 			rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
3456 		ring_rxd_display_dword(ring[desc_id].lo_dword);
3457 		ring[desc_id].hi_dword.dword =
3458 			rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
3459 		ring_rxd_display_dword(ring[desc_id].hi_dword);
3460 		ring[desc_id].resv1.dword =
3461 			rte_le_to_cpu_64(ring[desc_id].resv1.dword);
3462 		ring_rxd_display_dword(ring[desc_id].resv1);
3463 		ring[desc_id].resv2.dword =
3464 			rte_le_to_cpu_64(ring[desc_id].resv2.dword);
3465 		ring_rxd_display_dword(ring[desc_id].resv2);
3466 
3467 		return;
3468 	}
3469 #endif
3470 	/* 16 bytes RX descriptor */
3471 	ring[desc_id].lo_dword.dword =
3472 		rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
3473 	ring_rxd_display_dword(ring[desc_id].lo_dword);
3474 	ring[desc_id].hi_dword.dword =
3475 		rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
3476 	ring_rxd_display_dword(ring[desc_id].hi_dword);
3477 }
3478 
3479 static void
3480 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
3481 {
3482 	struct igb_ring_desc_16_bytes *ring;
3483 	struct igb_ring_desc_16_bytes txd;
3484 
3485 	ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
3486 	txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
3487 	txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
3488 	printf("    0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
3489 			(unsigned)txd.lo_dword.words.lo,
3490 			(unsigned)txd.lo_dword.words.hi,
3491 			(unsigned)txd.hi_dword.words.lo,
3492 			(unsigned)txd.hi_dword.words.hi);
3493 }
3494 
3495 void
3496 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
3497 {
3498 	const struct rte_memzone *rx_mz;
3499 
3500 	if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id))
3501 		return;
3502 	rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
3503 	if (rx_mz == NULL)
3504 		return;
3505 	ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
3506 }
3507 
3508 void
3509 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
3510 {
3511 	const struct rte_memzone *tx_mz;
3512 
3513 	if (tx_desc_id_is_invalid(port_id, txq_id, txd_id))
3514 		return;
3515 	tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
3516 	if (tx_mz == NULL)
3517 		return;
3518 	ring_tx_descriptor_display(tx_mz, txd_id);
3519 }
3520 
3521 void
3522 fwd_lcores_config_display(void)
3523 {
3524 	lcoreid_t lc_id;
3525 
3526 	printf("List of forwarding lcores:");
3527 	for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
3528 		printf(" %2u", fwd_lcores_cpuids[lc_id]);
3529 	printf("\n");
3530 }
3531 void
3532 rxtx_config_display(void)
3533 {
3534 	portid_t pid;
3535 	queueid_t qid;
3536 
3537 	printf("  %s packet forwarding%s packets/burst=%d\n",
3538 	       cur_fwd_eng->fwd_mode_name,
3539 	       retry_enabled == 0 ? "" : " with retry",
3540 	       nb_pkt_per_burst);
3541 
3542 	if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
3543 		printf("  packet len=%u - nb packet segments=%d\n",
3544 				(unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
3545 
3546 	printf("  nb forwarding cores=%d - nb forwarding ports=%d\n",
3547 	       nb_fwd_lcores, nb_fwd_ports);
3548 
3549 	RTE_ETH_FOREACH_DEV(pid) {
3550 		struct rte_eth_rxconf *rx_conf = &ports[pid].rxq[0].conf;
3551 		struct rte_eth_txconf *tx_conf = &ports[pid].txq[0].conf;
3552 		uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
3553 		uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
3554 		struct rte_eth_rxq_info rx_qinfo;
3555 		struct rte_eth_txq_info tx_qinfo;
3556 		uint16_t rx_free_thresh_tmp;
3557 		uint16_t tx_free_thresh_tmp;
3558 		uint16_t tx_rs_thresh_tmp;
3559 		uint16_t nb_rx_desc_tmp;
3560 		uint16_t nb_tx_desc_tmp;
3561 		uint64_t offloads_tmp;
3562 		uint8_t pthresh_tmp;
3563 		uint8_t hthresh_tmp;
3564 		uint8_t wthresh_tmp;
3565 		int32_t rc;
3566 
3567 		/* per port config */
3568 		printf("  port %d: RX queue number: %d Tx queue number: %d\n",
3569 				(unsigned int)pid, nb_rxq, nb_txq);
3570 
3571 		printf("    Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n",
3572 				ports[pid].dev_conf.rxmode.offloads,
3573 				ports[pid].dev_conf.txmode.offloads);
3574 
3575 		/* per rx queue config only for first queue to be less verbose */
3576 		for (qid = 0; qid < 1; qid++) {
3577 			rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
3578 			if (rc) {
3579 				nb_rx_desc_tmp = nb_rx_desc[qid];
3580 				rx_free_thresh_tmp =
3581 					rx_conf[qid].rx_free_thresh;
3582 				pthresh_tmp = rx_conf[qid].rx_thresh.pthresh;
3583 				hthresh_tmp = rx_conf[qid].rx_thresh.hthresh;
3584 				wthresh_tmp = rx_conf[qid].rx_thresh.wthresh;
3585 				offloads_tmp = rx_conf[qid].offloads;
3586 			} else {
3587 				nb_rx_desc_tmp = rx_qinfo.nb_desc;
3588 				rx_free_thresh_tmp =
3589 						rx_qinfo.conf.rx_free_thresh;
3590 				pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh;
3591 				hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh;
3592 				wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh;
3593 				offloads_tmp = rx_qinfo.conf.offloads;
3594 			}
3595 
3596 			printf("    RX queue: %d\n", qid);
3597 			printf("      RX desc=%d - RX free threshold=%d\n",
3598 				nb_rx_desc_tmp, rx_free_thresh_tmp);
3599 			printf("      RX threshold registers: pthresh=%d hthresh=%d "
3600 				" wthresh=%d\n",
3601 				pthresh_tmp, hthresh_tmp, wthresh_tmp);
3602 			printf("      RX Offloads=0x%"PRIx64, offloads_tmp);
3603 			if (rx_conf->share_group > 0)
3604 				printf(" share_group=%u share_qid=%u",
3605 				       rx_conf->share_group,
3606 				       rx_conf->share_qid);
3607 			printf("\n");
3608 		}
3609 
3610 		/* per tx queue config only for first queue to be less verbose */
3611 		for (qid = 0; qid < 1; qid++) {
3612 			rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
3613 			if (rc) {
3614 				nb_tx_desc_tmp = nb_tx_desc[qid];
3615 				tx_free_thresh_tmp =
3616 					tx_conf[qid].tx_free_thresh;
3617 				pthresh_tmp = tx_conf[qid].tx_thresh.pthresh;
3618 				hthresh_tmp = tx_conf[qid].tx_thresh.hthresh;
3619 				wthresh_tmp = tx_conf[qid].tx_thresh.wthresh;
3620 				offloads_tmp = tx_conf[qid].offloads;
3621 				tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh;
3622 			} else {
3623 				nb_tx_desc_tmp = tx_qinfo.nb_desc;
3624 				tx_free_thresh_tmp =
3625 						tx_qinfo.conf.tx_free_thresh;
3626 				pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh;
3627 				hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh;
3628 				wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh;
3629 				offloads_tmp = tx_qinfo.conf.offloads;
3630 				tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh;
3631 			}
3632 
3633 			printf("    TX queue: %d\n", qid);
3634 			printf("      TX desc=%d - TX free threshold=%d\n",
3635 				nb_tx_desc_tmp, tx_free_thresh_tmp);
3636 			printf("      TX threshold registers: pthresh=%d hthresh=%d "
3637 				" wthresh=%d\n",
3638 				pthresh_tmp, hthresh_tmp, wthresh_tmp);
3639 			printf("      TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
3640 				offloads_tmp, tx_rs_thresh_tmp);
3641 		}
3642 	}
3643 }
3644 
3645 void
3646 port_rss_reta_info(portid_t port_id,
3647 		   struct rte_eth_rss_reta_entry64 *reta_conf,
3648 		   uint16_t nb_entries)
3649 {
3650 	uint16_t i, idx, shift;
3651 	int ret;
3652 
3653 	if (port_id_is_invalid(port_id, ENABLED_WARN))
3654 		return;
3655 
3656 	ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
3657 	if (ret != 0) {
3658 		fprintf(stderr,
3659 			"Failed to get RSS RETA info, return code = %d\n",
3660 			ret);
3661 		return;
3662 	}
3663 
3664 	for (i = 0; i < nb_entries; i++) {
3665 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
3666 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
3667 		if (!(reta_conf[idx].mask & (1ULL << shift)))
3668 			continue;
3669 		printf("RSS RETA configuration: hash index=%u, queue=%u\n",
3670 					i, reta_conf[idx].reta[shift]);
3671 	}
3672 }
3673 
3674 /*
3675  * Displays the RSS hash functions of a port, and, optionally, the RSS hash
3676  * key of the port.
3677  */
3678 void
3679 port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
3680 {
3681 	struct rte_eth_rss_conf rss_conf = {0};
3682 	uint8_t rss_key[RSS_HASH_KEY_LENGTH];
3683 	uint64_t rss_hf;
3684 	uint8_t i;
3685 	int diag;
3686 	struct rte_eth_dev_info dev_info;
3687 	uint8_t hash_key_size;
3688 	int ret;
3689 
3690 	if (port_id_is_invalid(port_id, ENABLED_WARN))
3691 		return;
3692 
3693 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
3694 	if (ret != 0)
3695 		return;
3696 
3697 	if (dev_info.hash_key_size > 0 &&
3698 			dev_info.hash_key_size <= sizeof(rss_key))
3699 		hash_key_size = dev_info.hash_key_size;
3700 	else {
3701 		fprintf(stderr,
3702 			"dev_info did not provide a valid hash key size\n");
3703 		return;
3704 	}
3705 
3706 	/* Get RSS hash key if asked to display it */
3707 	rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
3708 	rss_conf.rss_key_len = hash_key_size;
3709 	diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
3710 	if (diag != 0) {
3711 		switch (diag) {
3712 		case -ENODEV:
3713 			fprintf(stderr, "port index %d invalid\n", port_id);
3714 			break;
3715 		case -ENOTSUP:
3716 			fprintf(stderr, "operation not supported by device\n");
3717 			break;
3718 		default:
3719 			fprintf(stderr, "operation failed - diag=%d\n", diag);
3720 			break;
3721 		}
3722 		return;
3723 	}
3724 	rss_hf = rss_conf.rss_hf;
3725 	if (rss_hf == 0) {
3726 		printf("RSS disabled\n");
3727 		return;
3728 	}
3729 	printf("RSS functions:\n");
3730 	rss_types_display(rss_hf, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE);
3731 	if (!show_rss_key)
3732 		return;
3733 	printf("RSS key:\n");
3734 	for (i = 0; i < hash_key_size; i++)
3735 		printf("%02X", rss_key[i]);
3736 	printf("\n");
3737 }
3738 
3739 void
3740 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
3741 			 uint8_t hash_key_len)
3742 {
3743 	struct rte_eth_rss_conf rss_conf;
3744 	int diag;
3745 
3746 	rss_conf.rss_key = NULL;
3747 	rss_conf.rss_key_len = 0;
3748 	rss_conf.rss_hf = str_to_rsstypes(rss_type);
3749 	diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
3750 	if (diag == 0) {
3751 		rss_conf.rss_key = hash_key;
3752 		rss_conf.rss_key_len = hash_key_len;
3753 		diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
3754 	}
3755 	if (diag == 0)
3756 		return;
3757 
3758 	switch (diag) {
3759 	case -ENODEV:
3760 		fprintf(stderr, "port index %d invalid\n", port_id);
3761 		break;
3762 	case -ENOTSUP:
3763 		fprintf(stderr, "operation not supported by device\n");
3764 		break;
3765 	default:
3766 		fprintf(stderr, "operation failed - diag=%d\n", diag);
3767 		break;
3768 	}
3769 }
3770 
3771 /*
3772  * Check whether a shared rxq scheduled on other lcores.
3773  */
3774 static bool
3775 fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc,
3776 			   portid_t src_port, queueid_t src_rxq,
3777 			   uint32_t share_group, queueid_t share_rxq)
3778 {
3779 	streamid_t sm_id;
3780 	streamid_t nb_fs_per_lcore;
3781 	lcoreid_t  nb_fc;
3782 	lcoreid_t  lc_id;
3783 	struct fwd_stream *fs;
3784 	struct rte_port *port;
3785 	struct rte_eth_dev_info *dev_info;
3786 	struct rte_eth_rxconf *rxq_conf;
3787 
3788 	nb_fc = cur_fwd_config.nb_fwd_lcores;
3789 	/* Check remaining cores. */
3790 	for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) {
3791 		sm_id = fwd_lcores[lc_id]->stream_idx;
3792 		nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;
3793 		for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;
3794 		     sm_id++) {
3795 			fs = fwd_streams[sm_id];
3796 			port = &ports[fs->rx_port];
3797 			dev_info = &port->dev_info;
3798 			rxq_conf = &port->rxq[fs->rx_queue].conf;
3799 			if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)
3800 			    == 0 || rxq_conf->share_group == 0)
3801 				/* Not shared rxq. */
3802 				continue;
3803 			if (domain_id != port->dev_info.switch_info.domain_id)
3804 				continue;
3805 			if (rxq_conf->share_group != share_group)
3806 				continue;
3807 			if (rxq_conf->share_qid != share_rxq)
3808 				continue;
3809 			printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n",
3810 			       share_group, share_rxq);
3811 			printf("  lcore %hhu Port %hu queue %hu\n",
3812 			       src_lc, src_port, src_rxq);
3813 			printf("  lcore %hhu Port %hu queue %hu\n",
3814 			       lc_id, fs->rx_port, fs->rx_queue);
3815 			printf("Please use --nb-cores=%hu to limit number of forwarding cores\n",
3816 			       nb_rxq);
3817 			return true;
3818 		}
3819 	}
3820 	return false;
3821 }
3822 
3823 /*
3824  * Check shared rxq configuration.
3825  *
3826  * Shared group must not being scheduled on different core.
3827  */
3828 bool
3829 pkt_fwd_shared_rxq_check(void)
3830 {
3831 	streamid_t sm_id;
3832 	streamid_t nb_fs_per_lcore;
3833 	lcoreid_t  nb_fc;
3834 	lcoreid_t  lc_id;
3835 	struct fwd_stream *fs;
3836 	uint16_t domain_id;
3837 	struct rte_port *port;
3838 	struct rte_eth_dev_info *dev_info;
3839 	struct rte_eth_rxconf *rxq_conf;
3840 
3841 	if (rxq_share == 0)
3842 		return true;
3843 	nb_fc = cur_fwd_config.nb_fwd_lcores;
3844 	/*
3845 	 * Check streams on each core, make sure the same switch domain +
3846 	 * group + queue doesn't get scheduled on other cores.
3847 	 */
3848 	for (lc_id = 0; lc_id < nb_fc; lc_id++) {
3849 		sm_id = fwd_lcores[lc_id]->stream_idx;
3850 		nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;
3851 		for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;
3852 		     sm_id++) {
3853 			fs = fwd_streams[sm_id];
3854 			/* Update lcore info stream being scheduled. */
3855 			fs->lcore = fwd_lcores[lc_id];
3856 			port = &ports[fs->rx_port];
3857 			dev_info = &port->dev_info;
3858 			rxq_conf = &port->rxq[fs->rx_queue].conf;
3859 			if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)
3860 			    == 0 || rxq_conf->share_group == 0)
3861 				/* Not shared rxq. */
3862 				continue;
3863 			/* Check shared rxq not scheduled on remaining cores. */
3864 			domain_id = port->dev_info.switch_info.domain_id;
3865 			if (fwd_stream_on_other_lcores(domain_id, lc_id,
3866 						       fs->rx_port,
3867 						       fs->rx_queue,
3868 						       rxq_conf->share_group,
3869 						       rxq_conf->share_qid))
3870 				return false;
3871 		}
3872 	}
3873 	return true;
3874 }
3875 
3876 /*
3877  * Setup forwarding configuration for each logical core.
3878  */
3879 static void
3880 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
3881 {
3882 	streamid_t nb_fs_per_lcore;
3883 	streamid_t nb_fs;
3884 	streamid_t sm_id;
3885 	lcoreid_t  nb_extra;
3886 	lcoreid_t  nb_fc;
3887 	lcoreid_t  nb_lc;
3888 	lcoreid_t  lc_id;
3889 
3890 	nb_fs = cfg->nb_fwd_streams;
3891 	nb_fc = cfg->nb_fwd_lcores;
3892 	if (nb_fs <= nb_fc) {
3893 		nb_fs_per_lcore = 1;
3894 		nb_extra = 0;
3895 	} else {
3896 		nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
3897 		nb_extra = (lcoreid_t) (nb_fs % nb_fc);
3898 	}
3899 
3900 	nb_lc = (lcoreid_t) (nb_fc - nb_extra);
3901 	sm_id = 0;
3902 	for (lc_id = 0; lc_id < nb_lc; lc_id++) {
3903 		fwd_lcores[lc_id]->stream_idx = sm_id;
3904 		fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
3905 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
3906 	}
3907 
3908 	/*
3909 	 * Assign extra remaining streams, if any.
3910 	 */
3911 	nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
3912 	for (lc_id = 0; lc_id < nb_extra; lc_id++) {
3913 		fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
3914 		fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
3915 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
3916 	}
3917 }
3918 
3919 static portid_t
3920 fwd_topology_tx_port_get(portid_t rxp)
3921 {
3922 	static int warning_once = 1;
3923 
3924 	RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
3925 
3926 	switch (port_topology) {
3927 	default:
3928 	case PORT_TOPOLOGY_PAIRED:
3929 		if ((rxp & 0x1) == 0) {
3930 			if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
3931 				return rxp + 1;
3932 			if (warning_once) {
3933 				fprintf(stderr,
3934 					"\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n");
3935 				warning_once = 0;
3936 			}
3937 			return rxp;
3938 		}
3939 		return rxp - 1;
3940 	case PORT_TOPOLOGY_CHAINED:
3941 		return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
3942 	case PORT_TOPOLOGY_LOOP:
3943 		return rxp;
3944 	}
3945 }
3946 
3947 static void
3948 simple_fwd_config_setup(void)
3949 {
3950 	portid_t i;
3951 
3952 	cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
3953 	cur_fwd_config.nb_fwd_streams =
3954 		(streamid_t) cur_fwd_config.nb_fwd_ports;
3955 
3956 	/* reinitialize forwarding streams */
3957 	init_fwd_streams();
3958 
3959 	/*
3960 	 * In the simple forwarding test, the number of forwarding cores
3961 	 * must be lower or equal to the number of forwarding ports.
3962 	 */
3963 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
3964 	if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
3965 		cur_fwd_config.nb_fwd_lcores =
3966 			(lcoreid_t) cur_fwd_config.nb_fwd_ports;
3967 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
3968 
3969 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
3970 		fwd_streams[i]->rx_port   = fwd_ports_ids[i];
3971 		fwd_streams[i]->rx_queue  = 0;
3972 		fwd_streams[i]->tx_port   =
3973 				fwd_ports_ids[fwd_topology_tx_port_get(i)];
3974 		fwd_streams[i]->tx_queue  = 0;
3975 		fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
3976 		fwd_streams[i]->retry_enabled = retry_enabled;
3977 	}
3978 }
3979 
3980 /**
3981  * For the RSS forwarding test all streams distributed over lcores. Each stream
3982  * being composed of a RX queue to poll on a RX port for input messages,
3983  * associated with a TX queue of a TX port where to send forwarded packets.
3984  */
3985 static void
3986 rss_fwd_config_setup(void)
3987 {
3988 	portid_t   rxp;
3989 	portid_t   txp;
3990 	queueid_t  rxq;
3991 	queueid_t  nb_q;
3992 	streamid_t  sm_id;
3993 	int start;
3994 	int end;
3995 
3996 	nb_q = nb_rxq;
3997 	if (nb_q > nb_txq)
3998 		nb_q = nb_txq;
3999 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
4000 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
4001 	cur_fwd_config.nb_fwd_streams =
4002 		(streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
4003 
4004 	if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
4005 		cur_fwd_config.nb_fwd_lcores =
4006 			(lcoreid_t)cur_fwd_config.nb_fwd_streams;
4007 
4008 	/* reinitialize forwarding streams */
4009 	init_fwd_streams();
4010 
4011 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
4012 
4013 	if (proc_id > 0 && nb_q % num_procs != 0)
4014 		printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n");
4015 
4016 	/**
4017 	 * In multi-process, All queues are allocated to different
4018 	 * processes based on num_procs and proc_id. For example:
4019 	 * if supports 4 queues(nb_q), 2 processes(num_procs),
4020 	 * the 0~1 queue for primary process.
4021 	 * the 2~3 queue for secondary process.
4022 	 */
4023 	start = proc_id * nb_q / num_procs;
4024 	end = start + nb_q / num_procs;
4025 	rxp = 0;
4026 	rxq = start;
4027 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
4028 		struct fwd_stream *fs;
4029 
4030 		fs = fwd_streams[sm_id];
4031 		txp = fwd_topology_tx_port_get(rxp);
4032 		fs->rx_port = fwd_ports_ids[rxp];
4033 		fs->rx_queue = rxq;
4034 		fs->tx_port = fwd_ports_ids[txp];
4035 		fs->tx_queue = rxq;
4036 		fs->peer_addr = fs->tx_port;
4037 		fs->retry_enabled = retry_enabled;
4038 		rxp++;
4039 		if (rxp < nb_fwd_ports)
4040 			continue;
4041 		rxp = 0;
4042 		rxq++;
4043 		if (rxq >= end)
4044 			rxq = start;
4045 	}
4046 }
4047 
4048 static uint16_t
4049 get_fwd_port_total_tc_num(void)
4050 {
4051 	struct rte_eth_dcb_info dcb_info;
4052 	uint16_t total_tc_num = 0;
4053 	unsigned int i;
4054 
4055 	for (i = 0; i < nb_fwd_ports; i++) {
4056 		(void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info);
4057 		total_tc_num += dcb_info.nb_tcs;
4058 	}
4059 
4060 	return total_tc_num;
4061 }
4062 
4063 /**
4064  * For the DCB forwarding test, each core is assigned on each traffic class.
4065  *
4066  * Each core is assigned a multi-stream, each stream being composed of
4067  * a RX queue to poll on a RX port for input messages, associated with
4068  * a TX queue of a TX port where to send forwarded packets. All RX and
4069  * TX queues are mapping to the same traffic class.
4070  * If VMDQ and DCB co-exist, each traffic class on different POOLs share
4071  * the same core
4072  */
4073 static void
4074 dcb_fwd_config_setup(void)
4075 {
4076 	struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
4077 	portid_t txp, rxp = 0;
4078 	queueid_t txq, rxq = 0;
4079 	lcoreid_t  lc_id;
4080 	uint16_t nb_rx_queue, nb_tx_queue;
4081 	uint16_t i, j, k, sm_id = 0;
4082 	uint16_t total_tc_num;
4083 	struct rte_port *port;
4084 	uint8_t tc = 0;
4085 	portid_t pid;
4086 	int ret;
4087 
4088 	/*
4089 	 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED
4090 	 * or RTE_PORT_STOPPED.
4091 	 *
4092 	 * Re-configure ports to get updated mapping between tc and queue in
4093 	 * case the queue number of the port is changed. Skip for started ports
4094 	 * since modifying queue number and calling dev_configure need to stop
4095 	 * ports first.
4096 	 */
4097 	for (pid = 0; pid < nb_fwd_ports; pid++) {
4098 		if (port_is_started(pid) == 1)
4099 			continue;
4100 
4101 		port = &ports[pid];
4102 		ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq,
4103 					    &port->dev_conf);
4104 		if (ret < 0) {
4105 			fprintf(stderr,
4106 				"Failed to re-configure port %d, ret = %d.\n",
4107 				pid, ret);
4108 			return;
4109 		}
4110 	}
4111 
4112 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
4113 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
4114 	cur_fwd_config.nb_fwd_streams =
4115 		(streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
4116 	total_tc_num = get_fwd_port_total_tc_num();
4117 	if (cur_fwd_config.nb_fwd_lcores > total_tc_num)
4118 		cur_fwd_config.nb_fwd_lcores = total_tc_num;
4119 
4120 	/* reinitialize forwarding streams */
4121 	init_fwd_streams();
4122 	sm_id = 0;
4123 	txp = 1;
4124 	/* get the dcb info on the first RX and TX ports */
4125 	(void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
4126 	(void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
4127 
4128 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
4129 		fwd_lcores[lc_id]->stream_nb = 0;
4130 		fwd_lcores[lc_id]->stream_idx = sm_id;
4131 		for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) {
4132 			/* if the nb_queue is zero, means this tc is
4133 			 * not enabled on the POOL
4134 			 */
4135 			if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
4136 				break;
4137 			k = fwd_lcores[lc_id]->stream_nb +
4138 				fwd_lcores[lc_id]->stream_idx;
4139 			rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
4140 			txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
4141 			nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
4142 			nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
4143 			for (j = 0; j < nb_rx_queue; j++) {
4144 				struct fwd_stream *fs;
4145 
4146 				fs = fwd_streams[k + j];
4147 				fs->rx_port = fwd_ports_ids[rxp];
4148 				fs->rx_queue = rxq + j;
4149 				fs->tx_port = fwd_ports_ids[txp];
4150 				fs->tx_queue = txq + j % nb_tx_queue;
4151 				fs->peer_addr = fs->tx_port;
4152 				fs->retry_enabled = retry_enabled;
4153 			}
4154 			fwd_lcores[lc_id]->stream_nb +=
4155 				rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
4156 		}
4157 		sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
4158 
4159 		tc++;
4160 		if (tc < rxp_dcb_info.nb_tcs)
4161 			continue;
4162 		/* Restart from TC 0 on next RX port */
4163 		tc = 0;
4164 		if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
4165 			rxp = (portid_t)
4166 				(rxp + ((nb_ports >> 1) / nb_fwd_ports));
4167 		else
4168 			rxp++;
4169 		if (rxp >= nb_fwd_ports)
4170 			return;
4171 		/* get the dcb information on next RX and TX ports */
4172 		if ((rxp & 0x1) == 0)
4173 			txp = (portid_t) (rxp + 1);
4174 		else
4175 			txp = (portid_t) (rxp - 1);
4176 		rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
4177 		rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
4178 	}
4179 }
4180 
4181 static void
4182 icmp_echo_config_setup(void)
4183 {
4184 	portid_t  rxp;
4185 	queueid_t rxq;
4186 	lcoreid_t lc_id;
4187 	uint16_t  sm_id;
4188 
4189 	if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
4190 		cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
4191 			(nb_txq * nb_fwd_ports);
4192 	else
4193 		cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
4194 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
4195 	cur_fwd_config.nb_fwd_streams =
4196 		(streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
4197 	if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
4198 		cur_fwd_config.nb_fwd_lcores =
4199 			(lcoreid_t)cur_fwd_config.nb_fwd_streams;
4200 	if (verbose_level > 0) {
4201 		printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
4202 		       __FUNCTION__,
4203 		       cur_fwd_config.nb_fwd_lcores,
4204 		       cur_fwd_config.nb_fwd_ports,
4205 		       cur_fwd_config.nb_fwd_streams);
4206 	}
4207 
4208 	/* reinitialize forwarding streams */
4209 	init_fwd_streams();
4210 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
4211 	rxp = 0; rxq = 0;
4212 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
4213 		if (verbose_level > 0)
4214 			printf("  core=%d: \n", lc_id);
4215 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
4216 			struct fwd_stream *fs;
4217 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
4218 			fs->rx_port = fwd_ports_ids[rxp];
4219 			fs->rx_queue = rxq;
4220 			fs->tx_port = fs->rx_port;
4221 			fs->tx_queue = rxq;
4222 			fs->peer_addr = fs->tx_port;
4223 			fs->retry_enabled = retry_enabled;
4224 			if (verbose_level > 0)
4225 				printf("  stream=%d port=%d rxq=%d txq=%d\n",
4226 				       sm_id, fs->rx_port, fs->rx_queue,
4227 				       fs->tx_queue);
4228 			rxq = (queueid_t) (rxq + 1);
4229 			if (rxq == nb_rxq) {
4230 				rxq = 0;
4231 				rxp = (portid_t) (rxp + 1);
4232 			}
4233 		}
4234 	}
4235 }
4236 
4237 void
4238 fwd_config_setup(void)
4239 {
4240 	struct rte_port *port;
4241 	portid_t pt_id;
4242 	unsigned int i;
4243 
4244 	cur_fwd_config.fwd_eng = cur_fwd_eng;
4245 	if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
4246 		icmp_echo_config_setup();
4247 		return;
4248 	}
4249 
4250 	if ((nb_rxq > 1) && (nb_txq > 1)){
4251 		if (dcb_config) {
4252 			for (i = 0; i < nb_fwd_ports; i++) {
4253 				pt_id = fwd_ports_ids[i];
4254 				port = &ports[pt_id];
4255 				if (!port->dcb_flag) {
4256 					fprintf(stderr,
4257 						"In DCB mode, all forwarding ports must be configured in this mode.\n");
4258 					return;
4259 				}
4260 			}
4261 			if (nb_fwd_lcores == 1) {
4262 				fprintf(stderr,
4263 					"In DCB mode,the nb forwarding cores should be larger than 1.\n");
4264 				return;
4265 			}
4266 
4267 			dcb_fwd_config_setup();
4268 		} else
4269 			rss_fwd_config_setup();
4270 	}
4271 	else
4272 		simple_fwd_config_setup();
4273 }
4274 
4275 static const char *
4276 mp_alloc_to_str(uint8_t mode)
4277 {
4278 	switch (mode) {
4279 	case MP_ALLOC_NATIVE:
4280 		return "native";
4281 	case MP_ALLOC_ANON:
4282 		return "anon";
4283 	case MP_ALLOC_XMEM:
4284 		return "xmem";
4285 	case MP_ALLOC_XMEM_HUGE:
4286 		return "xmemhuge";
4287 	case MP_ALLOC_XBUF:
4288 		return "xbuf";
4289 	default:
4290 		return "invalid";
4291 	}
4292 }
4293 
4294 void
4295 pkt_fwd_config_display(struct fwd_config *cfg)
4296 {
4297 	struct fwd_stream *fs;
4298 	lcoreid_t  lc_id;
4299 	streamid_t sm_id;
4300 
4301 	printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
4302 		"NUMA support %s, MP allocation mode: %s\n",
4303 		cfg->fwd_eng->fwd_mode_name,
4304 		retry_enabled == 0 ? "" : " with retry",
4305 		cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
4306 		numa_support == 1 ? "enabled" : "disabled",
4307 		mp_alloc_to_str(mp_alloc_type));
4308 
4309 	if (retry_enabled)
4310 		printf("TX retry num: %u, delay between TX retries: %uus\n",
4311 			burst_tx_retry_num, burst_tx_delay_time);
4312 	for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
4313 		printf("Logical Core %u (socket %u) forwards packets on "
4314 		       "%d streams:",
4315 		       fwd_lcores_cpuids[lc_id],
4316 		       rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
4317 		       fwd_lcores[lc_id]->stream_nb);
4318 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
4319 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
4320 			printf("\n  RX P=%d/Q=%d (socket %u) -> TX "
4321 			       "P=%d/Q=%d (socket %u) ",
4322 			       fs->rx_port, fs->rx_queue,
4323 			       ports[fs->rx_port].socket_id,
4324 			       fs->tx_port, fs->tx_queue,
4325 			       ports[fs->tx_port].socket_id);
4326 			print_ethaddr("peer=",
4327 				      &peer_eth_addrs[fs->peer_addr]);
4328 		}
4329 		printf("\n");
4330 	}
4331 	printf("\n");
4332 }
4333 
4334 void
4335 set_fwd_eth_peer(portid_t port_id, char *peer_addr)
4336 {
4337 	struct rte_ether_addr new_peer_addr;
4338 	if (!rte_eth_dev_is_valid_port(port_id)) {
4339 		fprintf(stderr, "Error: Invalid port number %i\n", port_id);
4340 		return;
4341 	}
4342 	if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) {
4343 		fprintf(stderr, "Error: Invalid ethernet address: %s\n",
4344 			peer_addr);
4345 		return;
4346 	}
4347 	peer_eth_addrs[port_id] = new_peer_addr;
4348 }
4349 
4350 int
4351 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
4352 {
4353 	unsigned int i;
4354 	unsigned int lcore_cpuid;
4355 	int record_now;
4356 
4357 	record_now = 0;
4358  again:
4359 	for (i = 0; i < nb_lc; i++) {
4360 		lcore_cpuid = lcorelist[i];
4361 		if (! rte_lcore_is_enabled(lcore_cpuid)) {
4362 			fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid);
4363 			return -1;
4364 		}
4365 		if (lcore_cpuid == rte_get_main_lcore()) {
4366 			fprintf(stderr,
4367 				"lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n",
4368 				lcore_cpuid);
4369 			return -1;
4370 		}
4371 		if (record_now)
4372 			fwd_lcores_cpuids[i] = lcore_cpuid;
4373 	}
4374 	if (record_now == 0) {
4375 		record_now = 1;
4376 		goto again;
4377 	}
4378 	nb_cfg_lcores = (lcoreid_t) nb_lc;
4379 	if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
4380 		printf("previous number of forwarding cores %u - changed to "
4381 		       "number of configured cores %u\n",
4382 		       (unsigned int) nb_fwd_lcores, nb_lc);
4383 		nb_fwd_lcores = (lcoreid_t) nb_lc;
4384 	}
4385 
4386 	return 0;
4387 }
4388 
4389 int
4390 set_fwd_lcores_mask(uint64_t lcoremask)
4391 {
4392 	unsigned int lcorelist[64];
4393 	unsigned int nb_lc;
4394 	unsigned int i;
4395 
4396 	if (lcoremask == 0) {
4397 		fprintf(stderr, "Invalid NULL mask of cores\n");
4398 		return -1;
4399 	}
4400 	nb_lc = 0;
4401 	for (i = 0; i < 64; i++) {
4402 		if (! ((uint64_t)(1ULL << i) & lcoremask))
4403 			continue;
4404 		lcorelist[nb_lc++] = i;
4405 	}
4406 	return set_fwd_lcores_list(lcorelist, nb_lc);
4407 }
4408 
4409 void
4410 set_fwd_lcores_number(uint16_t nb_lc)
4411 {
4412 	if (test_done == 0) {
4413 		fprintf(stderr, "Please stop forwarding first\n");
4414 		return;
4415 	}
4416 	if (nb_lc > nb_cfg_lcores) {
4417 		fprintf(stderr,
4418 			"nb fwd cores %u > %u (max. number of configured lcores) - ignored\n",
4419 			(unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
4420 		return;
4421 	}
4422 	nb_fwd_lcores = (lcoreid_t) nb_lc;
4423 	printf("Number of forwarding cores set to %u\n",
4424 	       (unsigned int) nb_fwd_lcores);
4425 }
4426 
4427 void
4428 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
4429 {
4430 	unsigned int i;
4431 	portid_t port_id;
4432 	int record_now;
4433 
4434 	record_now = 0;
4435  again:
4436 	for (i = 0; i < nb_pt; i++) {
4437 		port_id = (portid_t) portlist[i];
4438 		if (port_id_is_invalid(port_id, ENABLED_WARN))
4439 			return;
4440 		if (record_now)
4441 			fwd_ports_ids[i] = port_id;
4442 	}
4443 	if (record_now == 0) {
4444 		record_now = 1;
4445 		goto again;
4446 	}
4447 	nb_cfg_ports = (portid_t) nb_pt;
4448 	if (nb_fwd_ports != (portid_t) nb_pt) {
4449 		printf("previous number of forwarding ports %u - changed to "
4450 		       "number of configured ports %u\n",
4451 		       (unsigned int) nb_fwd_ports, nb_pt);
4452 		nb_fwd_ports = (portid_t) nb_pt;
4453 	}
4454 }
4455 
4456 /**
4457  * Parse the user input and obtain the list of forwarding ports
4458  *
4459  * @param[in] list
4460  *   String containing the user input. User can specify
4461  *   in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6.
4462  *   For example, if the user wants to use all the available
4463  *   4 ports in his system, then the input can be 0-3 or 0,1,2,3.
4464  *   If the user wants to use only the ports 1,2 then the input
4465  *   is 1,2.
4466  *   valid characters are '-' and ','
4467  * @param[out] values
4468  *   This array will be filled with a list of port IDs
4469  *   based on the user input
4470  *   Note that duplicate entries are discarded and only the first
4471  *   count entries in this array are port IDs and all the rest
4472  *   will contain default values
4473  * @param[in] maxsize
4474  *   This parameter denotes 2 things
4475  *   1) Number of elements in the values array
4476  *   2) Maximum value of each element in the values array
4477  * @return
4478  *   On success, returns total count of parsed port IDs
4479  *   On failure, returns 0
4480  */
4481 static unsigned int
4482 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize)
4483 {
4484 	unsigned int count = 0;
4485 	char *end = NULL;
4486 	int min, max;
4487 	int value, i;
4488 	unsigned int marked[maxsize];
4489 
4490 	if (list == NULL || values == NULL)
4491 		return 0;
4492 
4493 	for (i = 0; i < (int)maxsize; i++)
4494 		marked[i] = 0;
4495 
4496 	min = INT_MAX;
4497 
4498 	do {
4499 		/*Remove the blank spaces if any*/
4500 		while (isblank(*list))
4501 			list++;
4502 		if (*list == '\0')
4503 			break;
4504 		errno = 0;
4505 		value = strtol(list, &end, 10);
4506 		if (errno || end == NULL)
4507 			return 0;
4508 		if (value < 0 || value >= (int)maxsize)
4509 			return 0;
4510 		while (isblank(*end))
4511 			end++;
4512 		if (*end == '-' && min == INT_MAX) {
4513 			min = value;
4514 		} else if ((*end == ',') || (*end == '\0')) {
4515 			max = value;
4516 			if (min == INT_MAX)
4517 				min = value;
4518 			for (i = min; i <= max; i++) {
4519 				if (count < maxsize) {
4520 					if (marked[i])
4521 						continue;
4522 					values[count] = i;
4523 					marked[i] = 1;
4524 					count++;
4525 				}
4526 			}
4527 			min = INT_MAX;
4528 		} else
4529 			return 0;
4530 		list = end + 1;
4531 	} while (*end != '\0');
4532 
4533 	return count;
4534 }
4535 
4536 void
4537 parse_fwd_portlist(const char *portlist)
4538 {
4539 	unsigned int portcount;
4540 	unsigned int portindex[RTE_MAX_ETHPORTS];
4541 	unsigned int i, valid_port_count = 0;
4542 
4543 	portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS);
4544 	if (!portcount)
4545 		rte_exit(EXIT_FAILURE, "Invalid fwd port list\n");
4546 
4547 	/*
4548 	 * Here we verify the validity of the ports
4549 	 * and thereby calculate the total number of
4550 	 * valid ports
4551 	 */
4552 	for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) {
4553 		if (rte_eth_dev_is_valid_port(portindex[i])) {
4554 			portindex[valid_port_count] = portindex[i];
4555 			valid_port_count++;
4556 		}
4557 	}
4558 
4559 	set_fwd_ports_list(portindex, valid_port_count);
4560 }
4561 
4562 void
4563 set_fwd_ports_mask(uint64_t portmask)
4564 {
4565 	unsigned int portlist[64];
4566 	unsigned int nb_pt;
4567 	unsigned int i;
4568 
4569 	if (portmask == 0) {
4570 		fprintf(stderr, "Invalid NULL mask of ports\n");
4571 		return;
4572 	}
4573 	nb_pt = 0;
4574 	RTE_ETH_FOREACH_DEV(i) {
4575 		if (! ((uint64_t)(1ULL << i) & portmask))
4576 			continue;
4577 		portlist[nb_pt++] = i;
4578 	}
4579 	set_fwd_ports_list(portlist, nb_pt);
4580 }
4581 
4582 void
4583 set_fwd_ports_number(uint16_t nb_pt)
4584 {
4585 	if (nb_pt > nb_cfg_ports) {
4586 		fprintf(stderr,
4587 			"nb fwd ports %u > %u (number of configured ports) - ignored\n",
4588 			(unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
4589 		return;
4590 	}
4591 	nb_fwd_ports = (portid_t) nb_pt;
4592 	printf("Number of forwarding ports set to %u\n",
4593 	       (unsigned int) nb_fwd_ports);
4594 }
4595 
4596 int
4597 port_is_forwarding(portid_t port_id)
4598 {
4599 	unsigned int i;
4600 
4601 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4602 		return -1;
4603 
4604 	for (i = 0; i < nb_fwd_ports; i++) {
4605 		if (fwd_ports_ids[i] == port_id)
4606 			return 1;
4607 	}
4608 
4609 	return 0;
4610 }
4611 
4612 void
4613 set_nb_pkt_per_burst(uint16_t nb)
4614 {
4615 	if (nb > MAX_PKT_BURST) {
4616 		fprintf(stderr,
4617 			"nb pkt per burst: %u > %u (maximum packet per burst)  ignored\n",
4618 			(unsigned int) nb, (unsigned int) MAX_PKT_BURST);
4619 		return;
4620 	}
4621 	nb_pkt_per_burst = nb;
4622 	printf("Number of packets per burst set to %u\n",
4623 	       (unsigned int) nb_pkt_per_burst);
4624 }
4625 
4626 static const char *
4627 tx_split_get_name(enum tx_pkt_split split)
4628 {
4629 	uint32_t i;
4630 
4631 	for (i = 0; i != RTE_DIM(tx_split_name); i++) {
4632 		if (tx_split_name[i].split == split)
4633 			return tx_split_name[i].name;
4634 	}
4635 	return NULL;
4636 }
4637 
4638 void
4639 set_tx_pkt_split(const char *name)
4640 {
4641 	uint32_t i;
4642 
4643 	for (i = 0; i != RTE_DIM(tx_split_name); i++) {
4644 		if (strcmp(tx_split_name[i].name, name) == 0) {
4645 			tx_pkt_split = tx_split_name[i].split;
4646 			return;
4647 		}
4648 	}
4649 	fprintf(stderr, "unknown value: \"%s\"\n", name);
4650 }
4651 
4652 int
4653 parse_fec_mode(const char *name, uint32_t *fec_capa)
4654 {
4655 	uint8_t i;
4656 
4657 	for (i = 0; i < RTE_DIM(fec_mode_name); i++) {
4658 		if (strcmp(fec_mode_name[i].name, name) == 0) {
4659 			*fec_capa =
4660 				RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode);
4661 			return 0;
4662 		}
4663 	}
4664 	return -1;
4665 }
4666 
4667 void
4668 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa)
4669 {
4670 	unsigned int i, j;
4671 
4672 	printf("FEC capabilities:\n");
4673 
4674 	for (i = 0; i < num; i++) {
4675 		printf("%s : ",
4676 			rte_eth_link_speed_to_str(speed_fec_capa[i].speed));
4677 
4678 		for (j = 0; j < RTE_DIM(fec_mode_name); j++) {
4679 			if (RTE_ETH_FEC_MODE_TO_CAPA(j) &
4680 						speed_fec_capa[i].capa)
4681 				printf("%s ", fec_mode_name[j].name);
4682 		}
4683 		printf("\n");
4684 	}
4685 }
4686 
4687 void
4688 show_rx_pkt_offsets(void)
4689 {
4690 	uint32_t i, n;
4691 
4692 	n = rx_pkt_nb_offs;
4693 	printf("Number of offsets: %u\n", n);
4694 	if (n) {
4695 		printf("Segment offsets: ");
4696 		for (i = 0; i != n - 1; i++)
4697 			printf("%hu,", rx_pkt_seg_offsets[i]);
4698 		printf("%hu\n", rx_pkt_seg_lengths[i]);
4699 	}
4700 }
4701 
4702 void
4703 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs)
4704 {
4705 	unsigned int i;
4706 
4707 	if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) {
4708 		printf("nb segments per RX packets=%u >= "
4709 		       "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs);
4710 		return;
4711 	}
4712 
4713 	/*
4714 	 * No extra check here, the segment length will be checked by PMD
4715 	 * in the extended queue setup.
4716 	 */
4717 	for (i = 0; i < nb_offs; i++) {
4718 		if (seg_offsets[i] >= UINT16_MAX) {
4719 			printf("offset[%u]=%u > UINT16_MAX - give up\n",
4720 			       i, seg_offsets[i]);
4721 			return;
4722 		}
4723 	}
4724 
4725 	for (i = 0; i < nb_offs; i++)
4726 		rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i];
4727 
4728 	rx_pkt_nb_offs = (uint8_t) nb_offs;
4729 }
4730 
4731 void
4732 show_rx_pkt_segments(void)
4733 {
4734 	uint32_t i, n;
4735 
4736 	n = rx_pkt_nb_segs;
4737 	printf("Number of segments: %u\n", n);
4738 	if (n) {
4739 		printf("Segment sizes: ");
4740 		for (i = 0; i != n - 1; i++)
4741 			printf("%hu,", rx_pkt_seg_lengths[i]);
4742 		printf("%hu\n", rx_pkt_seg_lengths[i]);
4743 	}
4744 }
4745 
4746 void
4747 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs)
4748 {
4749 	unsigned int i;
4750 
4751 	if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) {
4752 		printf("nb segments per RX packets=%u >= "
4753 		       "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs);
4754 		return;
4755 	}
4756 
4757 	/*
4758 	 * No extra check here, the segment length will be checked by PMD
4759 	 * in the extended queue setup.
4760 	 */
4761 	for (i = 0; i < nb_segs; i++) {
4762 		if (seg_lengths[i] >= UINT16_MAX) {
4763 			printf("length[%u]=%u > UINT16_MAX - give up\n",
4764 			       i, seg_lengths[i]);
4765 			return;
4766 		}
4767 	}
4768 
4769 	for (i = 0; i < nb_segs; i++)
4770 		rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
4771 
4772 	rx_pkt_nb_segs = (uint8_t) nb_segs;
4773 }
4774 
4775 void
4776 show_tx_pkt_segments(void)
4777 {
4778 	uint32_t i, n;
4779 	const char *split;
4780 
4781 	n = tx_pkt_nb_segs;
4782 	split = tx_split_get_name(tx_pkt_split);
4783 
4784 	printf("Number of segments: %u\n", n);
4785 	printf("Segment sizes: ");
4786 	for (i = 0; i != n - 1; i++)
4787 		printf("%hu,", tx_pkt_seg_lengths[i]);
4788 	printf("%hu\n", tx_pkt_seg_lengths[i]);
4789 	printf("Split packet: %s\n", split);
4790 }
4791 
4792 static bool
4793 nb_segs_is_invalid(unsigned int nb_segs)
4794 {
4795 	uint16_t ring_size;
4796 	uint16_t queue_id;
4797 	uint16_t port_id;
4798 	int ret;
4799 
4800 	RTE_ETH_FOREACH_DEV(port_id) {
4801 		for (queue_id = 0; queue_id < nb_txq; queue_id++) {
4802 			ret = get_tx_ring_size(port_id, queue_id, &ring_size);
4803 			if (ret) {
4804 				/* Port may not be initialized yet, can't say
4805 				 * the port is invalid in this stage.
4806 				 */
4807 				continue;
4808 			}
4809 			if (ring_size < nb_segs) {
4810 				printf("nb segments per TX packets=%u >= TX "
4811 				       "queue(%u) ring_size=%u - txpkts ignored\n",
4812 				       nb_segs, queue_id, ring_size);
4813 				return true;
4814 			}
4815 		}
4816 	}
4817 
4818 	return false;
4819 }
4820 
4821 void
4822 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs)
4823 {
4824 	uint16_t tx_pkt_len;
4825 	unsigned int i;
4826 
4827 	/*
4828 	 * For single segment settings failed check is ignored.
4829 	 * It is a very basic capability to send the single segment
4830 	 * packets, suppose it is always supported.
4831 	 */
4832 	if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) {
4833 		fprintf(stderr,
4834 			"Tx segment size(%u) is not supported - txpkts ignored\n",
4835 			nb_segs);
4836 		return;
4837 	}
4838 
4839 	if (nb_segs > RTE_MAX_SEGS_PER_PKT) {
4840 		fprintf(stderr,
4841 			"Tx segment size(%u) is bigger than max number of segment(%u)\n",
4842 			nb_segs, RTE_MAX_SEGS_PER_PKT);
4843 		return;
4844 	}
4845 
4846 	/*
4847 	 * Check that each segment length is greater or equal than
4848 	 * the mbuf data size.
4849 	 * Check also that the total packet length is greater or equal than the
4850 	 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
4851 	 * 20 + 8).
4852 	 */
4853 	tx_pkt_len = 0;
4854 	for (i = 0; i < nb_segs; i++) {
4855 		if (seg_lengths[i] > mbuf_data_size[0]) {
4856 			fprintf(stderr,
4857 				"length[%u]=%u > mbuf_data_size=%u - give up\n",
4858 				i, seg_lengths[i], mbuf_data_size[0]);
4859 			return;
4860 		}
4861 		tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
4862 	}
4863 	if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
4864 		fprintf(stderr, "total packet length=%u < %d - give up\n",
4865 				(unsigned) tx_pkt_len,
4866 				(int)(sizeof(struct rte_ether_hdr) + 20 + 8));
4867 		return;
4868 	}
4869 
4870 	for (i = 0; i < nb_segs; i++)
4871 		tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
4872 
4873 	tx_pkt_length  = tx_pkt_len;
4874 	tx_pkt_nb_segs = (uint8_t) nb_segs;
4875 }
4876 
4877 void
4878 show_tx_pkt_times(void)
4879 {
4880 	printf("Interburst gap: %u\n", tx_pkt_times_inter);
4881 	printf("Intraburst gap: %u\n", tx_pkt_times_intra);
4882 }
4883 
4884 void
4885 set_tx_pkt_times(unsigned int *tx_times)
4886 {
4887 	tx_pkt_times_inter = tx_times[0];
4888 	tx_pkt_times_intra = tx_times[1];
4889 }
4890 
4891 #ifdef RTE_LIB_GRO
4892 void
4893 setup_gro(const char *onoff, portid_t port_id)
4894 {
4895 	if (!rte_eth_dev_is_valid_port(port_id)) {
4896 		fprintf(stderr, "invalid port id %u\n", port_id);
4897 		return;
4898 	}
4899 	if (test_done == 0) {
4900 		fprintf(stderr,
4901 			"Before enable/disable GRO, please stop forwarding first\n");
4902 		return;
4903 	}
4904 	if (strcmp(onoff, "on") == 0) {
4905 		if (gro_ports[port_id].enable != 0) {
4906 			fprintf(stderr,
4907 				"Port %u has enabled GRO. Please disable GRO first\n",
4908 				port_id);
4909 			return;
4910 		}
4911 		if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
4912 			gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
4913 			gro_ports[port_id].param.max_flow_num =
4914 				GRO_DEFAULT_FLOW_NUM;
4915 			gro_ports[port_id].param.max_item_per_flow =
4916 				GRO_DEFAULT_ITEM_NUM_PER_FLOW;
4917 		}
4918 		gro_ports[port_id].enable = 1;
4919 	} else {
4920 		if (gro_ports[port_id].enable == 0) {
4921 			fprintf(stderr, "Port %u has disabled GRO\n", port_id);
4922 			return;
4923 		}
4924 		gro_ports[port_id].enable = 0;
4925 	}
4926 }
4927 
4928 void
4929 setup_gro_flush_cycles(uint8_t cycles)
4930 {
4931 	if (test_done == 0) {
4932 		fprintf(stderr,
4933 			"Before change flush interval for GRO, please stop forwarding first.\n");
4934 		return;
4935 	}
4936 
4937 	if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
4938 			GRO_DEFAULT_FLUSH_CYCLES) {
4939 		fprintf(stderr,
4940 			"The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n",
4941 			GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES);
4942 		cycles = GRO_DEFAULT_FLUSH_CYCLES;
4943 	}
4944 
4945 	gro_flush_cycles = cycles;
4946 }
4947 
4948 void
4949 show_gro(portid_t port_id)
4950 {
4951 	struct rte_gro_param *param;
4952 	uint32_t max_pkts_num;
4953 
4954 	param = &gro_ports[port_id].param;
4955 
4956 	if (!rte_eth_dev_is_valid_port(port_id)) {
4957 		fprintf(stderr, "Invalid port id %u.\n", port_id);
4958 		return;
4959 	}
4960 	if (gro_ports[port_id].enable) {
4961 		printf("GRO type: TCP/IPv4\n");
4962 		if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
4963 			max_pkts_num = param->max_flow_num *
4964 				param->max_item_per_flow;
4965 		} else
4966 			max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
4967 		printf("Max number of packets to perform GRO: %u\n",
4968 				max_pkts_num);
4969 		printf("Flushing cycles: %u\n", gro_flush_cycles);
4970 	} else
4971 		printf("Port %u doesn't enable GRO.\n", port_id);
4972 }
4973 #endif /* RTE_LIB_GRO */
4974 
4975 #ifdef RTE_LIB_GSO
4976 void
4977 setup_gso(const char *mode, portid_t port_id)
4978 {
4979 	if (!rte_eth_dev_is_valid_port(port_id)) {
4980 		fprintf(stderr, "invalid port id %u\n", port_id);
4981 		return;
4982 	}
4983 	if (strcmp(mode, "on") == 0) {
4984 		if (test_done == 0) {
4985 			fprintf(stderr,
4986 				"before enabling GSO, please stop forwarding first\n");
4987 			return;
4988 		}
4989 		gso_ports[port_id].enable = 1;
4990 	} else if (strcmp(mode, "off") == 0) {
4991 		if (test_done == 0) {
4992 			fprintf(stderr,
4993 				"before disabling GSO, please stop forwarding first\n");
4994 			return;
4995 		}
4996 		gso_ports[port_id].enable = 0;
4997 	}
4998 }
4999 #endif /* RTE_LIB_GSO */
5000 
5001 char*
5002 list_pkt_forwarding_modes(void)
5003 {
5004 	static char fwd_modes[128] = "";
5005 	const char *separator = "|";
5006 	struct fwd_engine *fwd_eng;
5007 	unsigned i = 0;
5008 
5009 	if (strlen (fwd_modes) == 0) {
5010 		while ((fwd_eng = fwd_engines[i++]) != NULL) {
5011 			strncat(fwd_modes, fwd_eng->fwd_mode_name,
5012 					sizeof(fwd_modes) - strlen(fwd_modes) - 1);
5013 			strncat(fwd_modes, separator,
5014 					sizeof(fwd_modes) - strlen(fwd_modes) - 1);
5015 		}
5016 		fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
5017 	}
5018 
5019 	return fwd_modes;
5020 }
5021 
5022 char*
5023 list_pkt_forwarding_retry_modes(void)
5024 {
5025 	static char fwd_modes[128] = "";
5026 	const char *separator = "|";
5027 	struct fwd_engine *fwd_eng;
5028 	unsigned i = 0;
5029 
5030 	if (strlen(fwd_modes) == 0) {
5031 		while ((fwd_eng = fwd_engines[i++]) != NULL) {
5032 			if (fwd_eng == &rx_only_engine)
5033 				continue;
5034 			strncat(fwd_modes, fwd_eng->fwd_mode_name,
5035 					sizeof(fwd_modes) -
5036 					strlen(fwd_modes) - 1);
5037 			strncat(fwd_modes, separator,
5038 					sizeof(fwd_modes) -
5039 					strlen(fwd_modes) - 1);
5040 		}
5041 		fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
5042 	}
5043 
5044 	return fwd_modes;
5045 }
5046 
5047 void
5048 set_pkt_forwarding_mode(const char *fwd_mode_name)
5049 {
5050 	struct fwd_engine *fwd_eng;
5051 	unsigned i;
5052 
5053 	i = 0;
5054 	while ((fwd_eng = fwd_engines[i]) != NULL) {
5055 		if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
5056 			printf("Set %s packet forwarding mode%s\n",
5057 			       fwd_mode_name,
5058 			       retry_enabled == 0 ? "" : " with retry");
5059 			cur_fwd_eng = fwd_eng;
5060 			return;
5061 		}
5062 		i++;
5063 	}
5064 	fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name);
5065 }
5066 
5067 void
5068 add_rx_dump_callbacks(portid_t portid)
5069 {
5070 	struct rte_eth_dev_info dev_info;
5071 	uint16_t queue;
5072 	int ret;
5073 
5074 	if (port_id_is_invalid(portid, ENABLED_WARN))
5075 		return;
5076 
5077 	ret = eth_dev_info_get_print_err(portid, &dev_info);
5078 	if (ret != 0)
5079 		return;
5080 
5081 	for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
5082 		if (!ports[portid].rx_dump_cb[queue])
5083 			ports[portid].rx_dump_cb[queue] =
5084 				rte_eth_add_rx_callback(portid, queue,
5085 					dump_rx_pkts, NULL);
5086 }
5087 
5088 void
5089 add_tx_dump_callbacks(portid_t portid)
5090 {
5091 	struct rte_eth_dev_info dev_info;
5092 	uint16_t queue;
5093 	int ret;
5094 
5095 	if (port_id_is_invalid(portid, ENABLED_WARN))
5096 		return;
5097 
5098 	ret = eth_dev_info_get_print_err(portid, &dev_info);
5099 	if (ret != 0)
5100 		return;
5101 
5102 	for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
5103 		if (!ports[portid].tx_dump_cb[queue])
5104 			ports[portid].tx_dump_cb[queue] =
5105 				rte_eth_add_tx_callback(portid, queue,
5106 							dump_tx_pkts, NULL);
5107 }
5108 
5109 void
5110 remove_rx_dump_callbacks(portid_t portid)
5111 {
5112 	struct rte_eth_dev_info dev_info;
5113 	uint16_t queue;
5114 	int ret;
5115 
5116 	if (port_id_is_invalid(portid, ENABLED_WARN))
5117 		return;
5118 
5119 	ret = eth_dev_info_get_print_err(portid, &dev_info);
5120 	if (ret != 0)
5121 		return;
5122 
5123 	for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
5124 		if (ports[portid].rx_dump_cb[queue]) {
5125 			rte_eth_remove_rx_callback(portid, queue,
5126 				ports[portid].rx_dump_cb[queue]);
5127 			ports[portid].rx_dump_cb[queue] = NULL;
5128 		}
5129 }
5130 
5131 void
5132 remove_tx_dump_callbacks(portid_t portid)
5133 {
5134 	struct rte_eth_dev_info dev_info;
5135 	uint16_t queue;
5136 	int ret;
5137 
5138 	if (port_id_is_invalid(portid, ENABLED_WARN))
5139 		return;
5140 
5141 	ret = eth_dev_info_get_print_err(portid, &dev_info);
5142 	if (ret != 0)
5143 		return;
5144 
5145 	for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
5146 		if (ports[portid].tx_dump_cb[queue]) {
5147 			rte_eth_remove_tx_callback(portid, queue,
5148 				ports[portid].tx_dump_cb[queue]);
5149 			ports[portid].tx_dump_cb[queue] = NULL;
5150 		}
5151 }
5152 
5153 void
5154 configure_rxtx_dump_callbacks(uint16_t verbose)
5155 {
5156 	portid_t portid;
5157 
5158 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5159 		TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n");
5160 		return;
5161 #endif
5162 
5163 	RTE_ETH_FOREACH_DEV(portid)
5164 	{
5165 		if (verbose == 1 || verbose > 2)
5166 			add_rx_dump_callbacks(portid);
5167 		else
5168 			remove_rx_dump_callbacks(portid);
5169 		if (verbose >= 2)
5170 			add_tx_dump_callbacks(portid);
5171 		else
5172 			remove_tx_dump_callbacks(portid);
5173 	}
5174 }
5175 
5176 void
5177 set_verbose_level(uint16_t vb_level)
5178 {
5179 	printf("Change verbose level from %u to %u\n",
5180 	       (unsigned int) verbose_level, (unsigned int) vb_level);
5181 	verbose_level = vb_level;
5182 	configure_rxtx_dump_callbacks(verbose_level);
5183 }
5184 
5185 void
5186 vlan_extend_set(portid_t port_id, int on)
5187 {
5188 	int diag;
5189 	int vlan_offload;
5190 	uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
5191 
5192 	if (port_id_is_invalid(port_id, ENABLED_WARN))
5193 		return;
5194 
5195 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
5196 
5197 	if (on) {
5198 		vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
5199 		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
5200 	} else {
5201 		vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD;
5202 		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
5203 	}
5204 
5205 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
5206 	if (diag < 0) {
5207 		fprintf(stderr,
5208 			"rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n",
5209 			port_id, on, diag);
5210 		return;
5211 	}
5212 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
5213 }
5214 
5215 void
5216 rx_vlan_strip_set(portid_t port_id, int on)
5217 {
5218 	int diag;
5219 	int vlan_offload;
5220 	uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
5221 
5222 	if (port_id_is_invalid(port_id, ENABLED_WARN))
5223 		return;
5224 
5225 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
5226 
5227 	if (on) {
5228 		vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
5229 		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
5230 	} else {
5231 		vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD;
5232 		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
5233 	}
5234 
5235 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
5236 	if (diag < 0) {
5237 		fprintf(stderr,
5238 			"%s(port_pi=%d, on=%d) failed diag=%d\n",
5239 			__func__, port_id, on, diag);
5240 		return;
5241 	}
5242 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
5243 }
5244 
5245 void
5246 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
5247 {
5248 	int diag;
5249 
5250 	if (port_id_is_invalid(port_id, ENABLED_WARN))
5251 		return;
5252 
5253 	diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
5254 	if (diag < 0)
5255 		fprintf(stderr,
5256 			"%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n",
5257 			__func__, port_id, queue_id, on, diag);
5258 }
5259 
5260 void
5261 rx_vlan_filter_set(portid_t port_id, int on)
5262 {
5263 	int diag;
5264 	int vlan_offload;
5265 	uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
5266 
5267 	if (port_id_is_invalid(port_id, ENABLED_WARN))
5268 		return;
5269 
5270 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
5271 
5272 	if (on) {
5273 		vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD;
5274 		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
5275 	} else {
5276 		vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD;
5277 		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
5278 	}
5279 
5280 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
5281 	if (diag < 0) {
5282 		fprintf(stderr,
5283 			"%s(port_pi=%d, on=%d) failed diag=%d\n",
5284 			__func__, port_id, on, diag);
5285 		return;
5286 	}
5287 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
5288 }
5289 
5290 void
5291 rx_vlan_qinq_strip_set(portid_t port_id, int on)
5292 {
5293 	int diag;
5294 	int vlan_offload;
5295 	uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
5296 
5297 	if (port_id_is_invalid(port_id, ENABLED_WARN))
5298 		return;
5299 
5300 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
5301 
5302 	if (on) {
5303 		vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD;
5304 		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
5305 	} else {
5306 		vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD;
5307 		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
5308 	}
5309 
5310 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
5311 	if (diag < 0) {
5312 		fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n",
5313 			__func__, port_id, on, diag);
5314 		return;
5315 	}
5316 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
5317 }
5318 
5319 int
5320 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
5321 {
5322 	int diag;
5323 
5324 	if (port_id_is_invalid(port_id, ENABLED_WARN))
5325 		return 1;
5326 	if (vlan_id_is_invalid(vlan_id))
5327 		return 1;
5328 	diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
5329 	if (diag == 0)
5330 		return 0;
5331 	fprintf(stderr,
5332 		"rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n",
5333 		port_id, vlan_id, on, diag);
5334 	return -1;
5335 }
5336 
5337 void
5338 rx_vlan_all_filter_set(portid_t port_id, int on)
5339 {
5340 	uint16_t vlan_id;
5341 
5342 	if (port_id_is_invalid(port_id, ENABLED_WARN))
5343 		return;
5344 	for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
5345 		if (rx_vft_set(port_id, vlan_id, on))
5346 			break;
5347 	}
5348 }
5349 
5350 void
5351 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
5352 {
5353 	int diag;
5354 
5355 	if (port_id_is_invalid(port_id, ENABLED_WARN))
5356 		return;
5357 
5358 	diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
5359 	if (diag == 0)
5360 		return;
5361 
5362 	fprintf(stderr,
5363 		"tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n",
5364 		port_id, vlan_type, tp_id, diag);
5365 }
5366 
5367 void
5368 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
5369 {
5370 	struct rte_eth_dev_info dev_info;
5371 	int ret;
5372 
5373 	if (vlan_id_is_invalid(vlan_id))
5374 		return;
5375 
5376 	if (ports[port_id].dev_conf.txmode.offloads &
5377 	    RTE_ETH_TX_OFFLOAD_QINQ_INSERT) {
5378 		fprintf(stderr, "Error, as QinQ has been enabled.\n");
5379 		return;
5380 	}
5381 
5382 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
5383 	if (ret != 0)
5384 		return;
5385 
5386 	if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) {
5387 		fprintf(stderr,
5388 			"Error: vlan insert is not supported by port %d\n",
5389 			port_id);
5390 		return;
5391 	}
5392 
5393 	tx_vlan_reset(port_id);
5394 	ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
5395 	ports[port_id].tx_vlan_id = vlan_id;
5396 }
5397 
5398 void
5399 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
5400 {
5401 	struct rte_eth_dev_info dev_info;
5402 	int ret;
5403 
5404 	if (vlan_id_is_invalid(vlan_id))
5405 		return;
5406 	if (vlan_id_is_invalid(vlan_id_outer))
5407 		return;
5408 
5409 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
5410 	if (ret != 0)
5411 		return;
5412 
5413 	if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) {
5414 		fprintf(stderr,
5415 			"Error: qinq insert not supported by port %d\n",
5416 			port_id);
5417 		return;
5418 	}
5419 
5420 	tx_vlan_reset(port_id);
5421 	ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
5422 						    RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
5423 	ports[port_id].tx_vlan_id = vlan_id;
5424 	ports[port_id].tx_vlan_id_outer = vlan_id_outer;
5425 }
5426 
5427 void
5428 tx_vlan_reset(portid_t port_id)
5429 {
5430 	ports[port_id].dev_conf.txmode.offloads &=
5431 				~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
5432 				  RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
5433 	ports[port_id].tx_vlan_id = 0;
5434 	ports[port_id].tx_vlan_id_outer = 0;
5435 }
5436 
5437 void
5438 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
5439 {
5440 	if (port_id_is_invalid(port_id, ENABLED_WARN))
5441 		return;
5442 
5443 	rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
5444 }
5445 
5446 void
5447 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
5448 {
5449 	int ret;
5450 
5451 	if (port_id_is_invalid(port_id, ENABLED_WARN))
5452 		return;
5453 
5454 	if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
5455 		return;
5456 
5457 	if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
5458 		fprintf(stderr, "map_value not in required range 0..%d\n",
5459 			RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
5460 		return;
5461 	}
5462 
5463 	if (!is_rx) { /* tx */
5464 		ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id,
5465 							     map_value);
5466 		if (ret) {
5467 			fprintf(stderr,
5468 				"failed to set tx queue stats mapping.\n");
5469 			return;
5470 		}
5471 	} else { /* rx */
5472 		ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id,
5473 							     map_value);
5474 		if (ret) {
5475 			fprintf(stderr,
5476 				"failed to set rx queue stats mapping.\n");
5477 			return;
5478 		}
5479 	}
5480 }
5481 
5482 void
5483 set_xstats_hide_zero(uint8_t on_off)
5484 {
5485 	xstats_hide_zero = on_off;
5486 }
5487 
5488 void
5489 set_record_core_cycles(uint8_t on_off)
5490 {
5491 	record_core_cycles = on_off;
5492 }
5493 
5494 void
5495 set_record_burst_stats(uint8_t on_off)
5496 {
5497 	record_burst_stats = on_off;
5498 }
5499 
5500 uint16_t
5501 str_to_flowtype(const char *string)
5502 {
5503 	uint8_t i;
5504 
5505 	for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
5506 		if (!strcmp(flowtype_str_table[i].str, string))
5507 			return flowtype_str_table[i].ftype;
5508 	}
5509 
5510 	if (isdigit(string[0])) {
5511 		int val = atoi(string);
5512 		if (val > 0 && val < 64)
5513 			return (uint16_t)val;
5514 	}
5515 
5516 	return RTE_ETH_FLOW_UNKNOWN;
5517 }
5518 
5519 const char*
5520 flowtype_to_str(uint16_t flow_type)
5521 {
5522 	uint8_t i;
5523 
5524 	for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
5525 		if (flowtype_str_table[i].ftype == flow_type)
5526 			return flowtype_str_table[i].str;
5527 	}
5528 
5529 	return NULL;
5530 }
5531 
5532 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE)
5533 
5534 static inline void
5535 print_fdir_mask(struct rte_eth_fdir_masks *mask)
5536 {
5537 	printf("\n    vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
5538 
5539 	if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
5540 		printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
5541 			" tunnel_id: 0x%08x",
5542 			mask->mac_addr_byte_mask, mask->tunnel_type_mask,
5543 			rte_be_to_cpu_32(mask->tunnel_id_mask));
5544 	else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
5545 		printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
5546 			rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
5547 			rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
5548 
5549 		printf("\n    src_port: 0x%04x, dst_port: 0x%04x",
5550 			rte_be_to_cpu_16(mask->src_port_mask),
5551 			rte_be_to_cpu_16(mask->dst_port_mask));
5552 
5553 		printf("\n    src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
5554 			rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
5555 			rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
5556 			rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
5557 			rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
5558 
5559 		printf("\n    dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
5560 			rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
5561 			rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
5562 			rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
5563 			rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
5564 	}
5565 
5566 	printf("\n");
5567 }
5568 
5569 static inline void
5570 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
5571 {
5572 	struct rte_eth_flex_payload_cfg *cfg;
5573 	uint32_t i, j;
5574 
5575 	for (i = 0; i < flex_conf->nb_payloads; i++) {
5576 		cfg = &flex_conf->flex_set[i];
5577 		if (cfg->type == RTE_ETH_RAW_PAYLOAD)
5578 			printf("\n    RAW:  ");
5579 		else if (cfg->type == RTE_ETH_L2_PAYLOAD)
5580 			printf("\n    L2_PAYLOAD:  ");
5581 		else if (cfg->type == RTE_ETH_L3_PAYLOAD)
5582 			printf("\n    L3_PAYLOAD:  ");
5583 		else if (cfg->type == RTE_ETH_L4_PAYLOAD)
5584 			printf("\n    L4_PAYLOAD:  ");
5585 		else
5586 			printf("\n    UNKNOWN PAYLOAD(%u):  ", cfg->type);
5587 		for (j = 0; j < num; j++)
5588 			printf("  %-5u", cfg->src_offset[j]);
5589 	}
5590 	printf("\n");
5591 }
5592 
5593 static inline void
5594 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
5595 {
5596 	struct rte_eth_fdir_flex_mask *mask;
5597 	uint32_t i, j;
5598 	const char *p;
5599 
5600 	for (i = 0; i < flex_conf->nb_flexmasks; i++) {
5601 		mask = &flex_conf->flex_mask[i];
5602 		p = flowtype_to_str(mask->flow_type);
5603 		printf("\n    %s:\t", p ? p : "unknown");
5604 		for (j = 0; j < num; j++)
5605 			printf(" %02x", mask->mask[j]);
5606 	}
5607 	printf("\n");
5608 }
5609 
5610 static inline void
5611 print_fdir_flow_type(uint32_t flow_types_mask)
5612 {
5613 	int i;
5614 	const char *p;
5615 
5616 	for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
5617 		if (!(flow_types_mask & (1 << i)))
5618 			continue;
5619 		p = flowtype_to_str(i);
5620 		if (p)
5621 			printf(" %s", p);
5622 		else
5623 			printf(" unknown");
5624 	}
5625 	printf("\n");
5626 }
5627 
5628 static int
5629 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info,
5630 		    struct rte_eth_fdir_stats *fdir_stat)
5631 {
5632 	int ret = -ENOTSUP;
5633 
5634 #ifdef RTE_NET_I40E
5635 	if (ret == -ENOTSUP) {
5636 		ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info);
5637 		if (!ret)
5638 			ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat);
5639 	}
5640 #endif
5641 #ifdef RTE_NET_IXGBE
5642 	if (ret == -ENOTSUP) {
5643 		ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info);
5644 		if (!ret)
5645 			ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat);
5646 	}
5647 #endif
5648 	switch (ret) {
5649 	case 0:
5650 		break;
5651 	case -ENOTSUP:
5652 		fprintf(stderr, "\n FDIR is not supported on port %-2d\n",
5653 			port_id);
5654 		break;
5655 	default:
5656 		fprintf(stderr, "programming error: (%s)\n", strerror(-ret));
5657 		break;
5658 	}
5659 	return ret;
5660 }
5661 
5662 void
5663 fdir_get_infos(portid_t port_id)
5664 {
5665 	struct rte_eth_fdir_stats fdir_stat;
5666 	struct rte_eth_fdir_info fdir_info;
5667 
5668 	static const char *fdir_stats_border = "########################";
5669 
5670 	if (port_id_is_invalid(port_id, ENABLED_WARN))
5671 		return;
5672 
5673 	memset(&fdir_info, 0, sizeof(fdir_info));
5674 	memset(&fdir_stat, 0, sizeof(fdir_stat));
5675 	if (get_fdir_info(port_id, &fdir_info, &fdir_stat))
5676 		return;
5677 
5678 	printf("\n  %s FDIR infos for port %-2d     %s\n",
5679 	       fdir_stats_border, port_id, fdir_stats_border);
5680 	printf("  MODE: ");
5681 	if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
5682 		printf("  PERFECT\n");
5683 	else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
5684 		printf("  PERFECT-MAC-VLAN\n");
5685 	else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
5686 		printf("  PERFECT-TUNNEL\n");
5687 	else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
5688 		printf("  SIGNATURE\n");
5689 	else
5690 		printf("  DISABLE\n");
5691 	if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
5692 		&& fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
5693 		printf("  SUPPORTED FLOW TYPE: ");
5694 		print_fdir_flow_type(fdir_info.flow_types_mask[0]);
5695 	}
5696 	printf("  FLEX PAYLOAD INFO:\n");
5697 	printf("  max_len:       %-10"PRIu32"  payload_limit: %-10"PRIu32"\n"
5698 	       "  payload_unit:  %-10"PRIu32"  payload_seg:   %-10"PRIu32"\n"
5699 	       "  bitmask_unit:  %-10"PRIu32"  bitmask_num:   %-10"PRIu32"\n",
5700 		fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
5701 		fdir_info.flex_payload_unit,
5702 		fdir_info.max_flex_payload_segment_num,
5703 		fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
5704 	printf("  MASK: ");
5705 	print_fdir_mask(&fdir_info.mask);
5706 	if (fdir_info.flex_conf.nb_payloads > 0) {
5707 		printf("  FLEX PAYLOAD SRC OFFSET:");
5708 		print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
5709 	}
5710 	if (fdir_info.flex_conf.nb_flexmasks > 0) {
5711 		printf("  FLEX MASK CFG:");
5712 		print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
5713 	}
5714 	printf("  guarant_count: %-10"PRIu32"  best_count:    %"PRIu32"\n",
5715 	       fdir_stat.guarant_cnt, fdir_stat.best_cnt);
5716 	printf("  guarant_space: %-10"PRIu32"  best_space:    %"PRIu32"\n",
5717 	       fdir_info.guarant_spc, fdir_info.best_spc);
5718 	printf("  collision:     %-10"PRIu32"  free:          %"PRIu32"\n"
5719 	       "  maxhash:       %-10"PRIu32"  maxlen:        %"PRIu32"\n"
5720 	       "  add:	         %-10"PRIu64"  remove:        %"PRIu64"\n"
5721 	       "  f_add:         %-10"PRIu64"  f_remove:      %"PRIu64"\n",
5722 	       fdir_stat.collision, fdir_stat.free,
5723 	       fdir_stat.maxhash, fdir_stat.maxlen,
5724 	       fdir_stat.add, fdir_stat.remove,
5725 	       fdir_stat.f_add, fdir_stat.f_remove);
5726 	printf("  %s############################%s\n",
5727 	       fdir_stats_border, fdir_stats_border);
5728 }
5729 
5730 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */
5731 
5732 void
5733 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
5734 {
5735 	struct rte_port *port;
5736 	struct rte_eth_fdir_flex_conf *flex_conf;
5737 	int i, idx = 0;
5738 
5739 	port = &ports[port_id];
5740 	flex_conf = &port->dev_conf.fdir_conf.flex_conf;
5741 	for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
5742 		if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
5743 			idx = i;
5744 			break;
5745 		}
5746 	}
5747 	if (i >= RTE_ETH_FLOW_MAX) {
5748 		if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
5749 			idx = flex_conf->nb_flexmasks;
5750 			flex_conf->nb_flexmasks++;
5751 		} else {
5752 			fprintf(stderr,
5753 				"The flex mask table is full. Can not set flex mask for flow_type(%u).",
5754 				cfg->flow_type);
5755 			return;
5756 		}
5757 	}
5758 	rte_memcpy(&flex_conf->flex_mask[idx],
5759 			 cfg,
5760 			 sizeof(struct rte_eth_fdir_flex_mask));
5761 }
5762 
5763 void
5764 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
5765 {
5766 	struct rte_port *port;
5767 	struct rte_eth_fdir_flex_conf *flex_conf;
5768 	int i, idx = 0;
5769 
5770 	port = &ports[port_id];
5771 	flex_conf = &port->dev_conf.fdir_conf.flex_conf;
5772 	for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
5773 		if (cfg->type == flex_conf->flex_set[i].type) {
5774 			idx = i;
5775 			break;
5776 		}
5777 	}
5778 	if (i >= RTE_ETH_PAYLOAD_MAX) {
5779 		if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
5780 			idx = flex_conf->nb_payloads;
5781 			flex_conf->nb_payloads++;
5782 		} else {
5783 			fprintf(stderr,
5784 				"The flex payload table is full. Can not set flex payload for type(%u).",
5785 				cfg->type);
5786 			return;
5787 		}
5788 	}
5789 	rte_memcpy(&flex_conf->flex_set[idx],
5790 			 cfg,
5791 			 sizeof(struct rte_eth_flex_payload_cfg));
5792 
5793 }
5794 
5795 void
5796 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
5797 {
5798 #ifdef RTE_NET_IXGBE
5799 	int diag;
5800 
5801 	if (is_rx)
5802 		diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
5803 	else
5804 		diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
5805 
5806 	if (diag == 0)
5807 		return;
5808 	fprintf(stderr,
5809 		"rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
5810 		is_rx ? "rx" : "tx", port_id, diag);
5811 	return;
5812 #endif
5813 	fprintf(stderr, "VF %s setting not supported for port %d\n",
5814 		is_rx ? "Rx" : "Tx", port_id);
5815 	RTE_SET_USED(vf);
5816 	RTE_SET_USED(on);
5817 }
5818 
5819 int
5820 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
5821 {
5822 	int diag;
5823 	struct rte_eth_link link;
5824 	int ret;
5825 
5826 	if (port_id_is_invalid(port_id, ENABLED_WARN))
5827 		return 1;
5828 	ret = eth_link_get_nowait_print_err(port_id, &link);
5829 	if (ret < 0)
5830 		return 1;
5831 	if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN &&
5832 	    rate > link.link_speed) {
5833 		fprintf(stderr,
5834 			"Invalid rate value:%u bigger than link speed: %u\n",
5835 			rate, link.link_speed);
5836 		return 1;
5837 	}
5838 	diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
5839 	if (diag == 0)
5840 		return diag;
5841 	fprintf(stderr,
5842 		"rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
5843 		port_id, diag);
5844 	return diag;
5845 }
5846 
5847 int
5848 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
5849 {
5850 	int diag = -ENOTSUP;
5851 
5852 	RTE_SET_USED(vf);
5853 	RTE_SET_USED(rate);
5854 	RTE_SET_USED(q_msk);
5855 
5856 #ifdef RTE_NET_IXGBE
5857 	if (diag == -ENOTSUP)
5858 		diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
5859 						       q_msk);
5860 #endif
5861 #ifdef RTE_NET_BNXT
5862 	if (diag == -ENOTSUP)
5863 		diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
5864 #endif
5865 	if (diag == 0)
5866 		return diag;
5867 
5868 	fprintf(stderr,
5869 		"%s for port_id=%d failed diag=%d\n",
5870 		__func__, port_id, diag);
5871 	return diag;
5872 }
5873 
5874 int
5875 set_rxq_avail_thresh(portid_t port_id, uint16_t queue_id, uint8_t avail_thresh)
5876 {
5877 	if (port_id_is_invalid(port_id, ENABLED_WARN))
5878 		return -EINVAL;
5879 
5880 	return rte_eth_rx_avail_thresh_set(port_id, queue_id, avail_thresh);
5881 }
5882 
5883 /*
5884  * Functions to manage the set of filtered Multicast MAC addresses.
5885  *
5886  * A pool of filtered multicast MAC addresses is associated with each port.
5887  * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
5888  * The address of the pool and the number of valid multicast MAC addresses
5889  * recorded in the pool are stored in the fields "mc_addr_pool" and
5890  * "mc_addr_nb" of the "rte_port" data structure.
5891  *
5892  * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
5893  * to be supplied a contiguous array of multicast MAC addresses.
5894  * To comply with this constraint, the set of multicast addresses recorded
5895  * into the pool are systematically compacted at the beginning of the pool.
5896  * Hence, when a multicast address is removed from the pool, all following
5897  * addresses, if any, are copied back to keep the set contiguous.
5898  */
5899 #define MCAST_POOL_INC 32
5900 
5901 static int
5902 mcast_addr_pool_extend(struct rte_port *port)
5903 {
5904 	struct rte_ether_addr *mc_pool;
5905 	size_t mc_pool_size;
5906 
5907 	/*
5908 	 * If a free entry is available at the end of the pool, just
5909 	 * increment the number of recorded multicast addresses.
5910 	 */
5911 	if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
5912 		port->mc_addr_nb++;
5913 		return 0;
5914 	}
5915 
5916 	/*
5917 	 * [re]allocate a pool with MCAST_POOL_INC more entries.
5918 	 * The previous test guarantees that port->mc_addr_nb is a multiple
5919 	 * of MCAST_POOL_INC.
5920 	 */
5921 	mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb +
5922 						    MCAST_POOL_INC);
5923 	mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool,
5924 						mc_pool_size);
5925 	if (mc_pool == NULL) {
5926 		fprintf(stderr,
5927 			"allocation of pool of %u multicast addresses failed\n",
5928 			port->mc_addr_nb + MCAST_POOL_INC);
5929 		return -ENOMEM;
5930 	}
5931 
5932 	port->mc_addr_pool = mc_pool;
5933 	port->mc_addr_nb++;
5934 	return 0;
5935 
5936 }
5937 
5938 static void
5939 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr)
5940 {
5941 	if (mcast_addr_pool_extend(port) != 0)
5942 		return;
5943 	rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]);
5944 }
5945 
5946 static void
5947 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
5948 {
5949 	port->mc_addr_nb--;
5950 	if (addr_idx == port->mc_addr_nb) {
5951 		/* No need to recompact the set of multicast addresses. */
5952 		if (port->mc_addr_nb == 0) {
5953 			/* free the pool of multicast addresses. */
5954 			free(port->mc_addr_pool);
5955 			port->mc_addr_pool = NULL;
5956 		}
5957 		return;
5958 	}
5959 	memmove(&port->mc_addr_pool[addr_idx],
5960 		&port->mc_addr_pool[addr_idx + 1],
5961 		sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx));
5962 }
5963 
5964 int
5965 mcast_addr_pool_destroy(portid_t port_id)
5966 {
5967 	struct rte_port *port;
5968 
5969 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
5970 	    port_id == (portid_t)RTE_PORT_ALL)
5971 		return -EINVAL;
5972 	port = &ports[port_id];
5973 
5974 	if (port->mc_addr_nb != 0) {
5975 		/* free the pool of multicast addresses. */
5976 		free(port->mc_addr_pool);
5977 		port->mc_addr_pool = NULL;
5978 		port->mc_addr_nb = 0;
5979 	}
5980 	return 0;
5981 }
5982 
5983 static int
5984 eth_port_multicast_addr_list_set(portid_t port_id)
5985 {
5986 	struct rte_port *port;
5987 	int diag;
5988 
5989 	port = &ports[port_id];
5990 	diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
5991 					    port->mc_addr_nb);
5992 	if (diag < 0)
5993 		fprintf(stderr,
5994 			"rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
5995 			port_id, port->mc_addr_nb, diag);
5996 
5997 	return diag;
5998 }
5999 
6000 void
6001 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr)
6002 {
6003 	struct rte_port *port;
6004 	uint32_t i;
6005 
6006 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6007 		return;
6008 
6009 	port = &ports[port_id];
6010 
6011 	/*
6012 	 * Check that the added multicast MAC address is not already recorded
6013 	 * in the pool of multicast addresses.
6014 	 */
6015 	for (i = 0; i < port->mc_addr_nb; i++) {
6016 		if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
6017 			fprintf(stderr,
6018 				"multicast address already filtered by port\n");
6019 			return;
6020 		}
6021 	}
6022 
6023 	mcast_addr_pool_append(port, mc_addr);
6024 	if (eth_port_multicast_addr_list_set(port_id) < 0)
6025 		/* Rollback on failure, remove the address from the pool */
6026 		mcast_addr_pool_remove(port, i);
6027 }
6028 
6029 void
6030 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr)
6031 {
6032 	struct rte_port *port;
6033 	uint32_t i;
6034 
6035 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6036 		return;
6037 
6038 	port = &ports[port_id];
6039 
6040 	/*
6041 	 * Search the pool of multicast MAC addresses for the removed address.
6042 	 */
6043 	for (i = 0; i < port->mc_addr_nb; i++) {
6044 		if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
6045 			break;
6046 	}
6047 	if (i == port->mc_addr_nb) {
6048 		fprintf(stderr, "multicast address not filtered by port %d\n",
6049 			port_id);
6050 		return;
6051 	}
6052 
6053 	mcast_addr_pool_remove(port, i);
6054 	if (eth_port_multicast_addr_list_set(port_id) < 0)
6055 		/* Rollback on failure, add the address back into the pool */
6056 		mcast_addr_pool_append(port, mc_addr);
6057 }
6058 
6059 void
6060 port_dcb_info_display(portid_t port_id)
6061 {
6062 	struct rte_eth_dcb_info dcb_info;
6063 	uint16_t i;
6064 	int ret;
6065 	static const char *border = "================";
6066 
6067 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6068 		return;
6069 
6070 	ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
6071 	if (ret) {
6072 		fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n",
6073 			port_id);
6074 		return;
6075 	}
6076 	printf("\n  %s DCB infos for port %-2d  %s\n", border, port_id, border);
6077 	printf("  TC NUMBER: %d\n", dcb_info.nb_tcs);
6078 	printf("\n  TC :        ");
6079 	for (i = 0; i < dcb_info.nb_tcs; i++)
6080 		printf("\t%4d", i);
6081 	printf("\n  Priority :  ");
6082 	for (i = 0; i < dcb_info.nb_tcs; i++)
6083 		printf("\t%4d", dcb_info.prio_tc[i]);
6084 	printf("\n  BW percent :");
6085 	for (i = 0; i < dcb_info.nb_tcs; i++)
6086 		printf("\t%4d%%", dcb_info.tc_bws[i]);
6087 	printf("\n  RXQ base :  ");
6088 	for (i = 0; i < dcb_info.nb_tcs; i++)
6089 		printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
6090 	printf("\n  RXQ number :");
6091 	for (i = 0; i < dcb_info.nb_tcs; i++)
6092 		printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
6093 	printf("\n  TXQ base :  ");
6094 	for (i = 0; i < dcb_info.nb_tcs; i++)
6095 		printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
6096 	printf("\n  TXQ number :");
6097 	for (i = 0; i < dcb_info.nb_tcs; i++)
6098 		printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
6099 	printf("\n");
6100 }
6101 
6102 uint8_t *
6103 open_file(const char *file_path, uint32_t *size)
6104 {
6105 	int fd = open(file_path, O_RDONLY);
6106 	off_t pkg_size;
6107 	uint8_t *buf = NULL;
6108 	int ret = 0;
6109 	struct stat st_buf;
6110 
6111 	if (size)
6112 		*size = 0;
6113 
6114 	if (fd == -1) {
6115 		fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path);
6116 		return buf;
6117 	}
6118 
6119 	if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
6120 		close(fd);
6121 		fprintf(stderr, "%s: File operations failed\n", __func__);
6122 		return buf;
6123 	}
6124 
6125 	pkg_size = st_buf.st_size;
6126 	if (pkg_size < 0) {
6127 		close(fd);
6128 		fprintf(stderr, "%s: File operations failed\n", __func__);
6129 		return buf;
6130 	}
6131 
6132 	buf = (uint8_t *)malloc(pkg_size);
6133 	if (!buf) {
6134 		close(fd);
6135 		fprintf(stderr, "%s: Failed to malloc memory\n", __func__);
6136 		return buf;
6137 	}
6138 
6139 	ret = read(fd, buf, pkg_size);
6140 	if (ret < 0) {
6141 		close(fd);
6142 		fprintf(stderr, "%s: File read operation failed\n", __func__);
6143 		close_file(buf);
6144 		return NULL;
6145 	}
6146 
6147 	if (size)
6148 		*size = pkg_size;
6149 
6150 	close(fd);
6151 
6152 	return buf;
6153 }
6154 
6155 int
6156 save_file(const char *file_path, uint8_t *buf, uint32_t size)
6157 {
6158 	FILE *fh = fopen(file_path, "wb");
6159 
6160 	if (fh == NULL) {
6161 		fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path);
6162 		return -1;
6163 	}
6164 
6165 	if (fwrite(buf, 1, size, fh) != size) {
6166 		fclose(fh);
6167 		fprintf(stderr, "%s: File write operation failed\n", __func__);
6168 		return -1;
6169 	}
6170 
6171 	fclose(fh);
6172 
6173 	return 0;
6174 }
6175 
6176 int
6177 close_file(uint8_t *buf)
6178 {
6179 	if (buf) {
6180 		free((void *)buf);
6181 		return 0;
6182 	}
6183 
6184 	return -1;
6185 }
6186 
6187 void
6188 show_macs(portid_t port_id)
6189 {
6190 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
6191 	struct rte_eth_dev_info dev_info;
6192 	int32_t i, rc, num_macs = 0;
6193 
6194 	if (eth_dev_info_get_print_err(port_id, &dev_info))
6195 		return;
6196 
6197 	struct rte_ether_addr addr[dev_info.max_mac_addrs];
6198 	rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs);
6199 	if (rc < 0)
6200 		return;
6201 
6202 	for (i = 0; i < rc; i++) {
6203 
6204 		/* skip zero address */
6205 		if (rte_is_zero_ether_addr(&addr[i]))
6206 			continue;
6207 
6208 		num_macs++;
6209 	}
6210 
6211 	printf("Number of MAC address added: %d\n", num_macs);
6212 
6213 	for (i = 0; i < rc; i++) {
6214 
6215 		/* skip zero address */
6216 		if (rte_is_zero_ether_addr(&addr[i]))
6217 			continue;
6218 
6219 		rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]);
6220 		printf("  %s\n", buf);
6221 	}
6222 }
6223 
6224 void
6225 show_mcast_macs(portid_t port_id)
6226 {
6227 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
6228 	struct rte_ether_addr *addr;
6229 	struct rte_port *port;
6230 	uint32_t i;
6231 
6232 	port = &ports[port_id];
6233 
6234 	printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb);
6235 
6236 	for (i = 0; i < port->mc_addr_nb; i++) {
6237 		addr = &port->mc_addr_pool[i];
6238 
6239 		rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
6240 		printf("  %s\n", buf);
6241 	}
6242 }
6243