xref: /dpdk/app/test-pmd/config.c (revision a080d5bf76b0947b25ecbb61c3b5732fd87ce717)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation.
3  * Copyright 2013-2014 6WIND S.A.
4  */
5 
6 #include <ctype.h>
7 #include <stdarg.h>
8 #include <errno.h>
9 #include <stdbool.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <string.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 
16 #include <sys/queue.h>
17 #include <sys/types.h>
18 #include <sys/stat.h>
19 #include <fcntl.h>
20 #include <unistd.h>
21 
22 #include <rte_common.h>
23 #include <rte_byteorder.h>
24 #include <rte_debug.h>
25 #include <rte_log.h>
26 #include <rte_memory.h>
27 #include <rte_memcpy.h>
28 #include <rte_memzone.h>
29 #include <rte_launch.h>
30 #include <rte_bus.h>
31 #include <rte_eal.h>
32 #include <rte_per_lcore.h>
33 #include <rte_lcore.h>
34 #include <rte_branch_prediction.h>
35 #include <rte_mempool.h>
36 #include <rte_mbuf.h>
37 #include <rte_interrupts.h>
38 #include <rte_ether.h>
39 #include <rte_ethdev.h>
40 #include <rte_string_fns.h>
41 #include <rte_cycles.h>
42 #include <rte_flow.h>
43 #include <rte_mtr.h>
44 #include <rte_errno.h>
45 #ifdef RTE_NET_IXGBE
46 #include <rte_pmd_ixgbe.h>
47 #endif
48 #ifdef RTE_NET_I40E
49 #include <rte_pmd_i40e.h>
50 #endif
51 #ifdef RTE_NET_BNXT
52 #include <rte_pmd_bnxt.h>
53 #endif
54 #ifdef RTE_LIB_GRO
55 #include <rte_gro.h>
56 #endif
57 #include <rte_hexdump.h>
58 
59 #include "testpmd.h"
60 #include "cmdline_mtr.h"
61 
62 #define ETHDEV_FWVERS_LEN 32
63 
64 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
65 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW
66 #else
67 #define CLOCK_TYPE_ID CLOCK_MONOTONIC
68 #endif
69 
70 #define NS_PER_SEC 1E9
71 
72 static const struct {
73 	enum tx_pkt_split split;
74 	const char *name;
75 } tx_split_name[] = {
76 	{
77 		.split = TX_PKT_SPLIT_OFF,
78 		.name = "off",
79 	},
80 	{
81 		.split = TX_PKT_SPLIT_ON,
82 		.name = "on",
83 	},
84 	{
85 		.split = TX_PKT_SPLIT_RND,
86 		.name = "rand",
87 	},
88 };
89 
90 const struct rss_type_info rss_type_table[] = {
91 	/* Group types */
92 	{ "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP |
93 		RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD |
94 		RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP |
95 		RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS | RTE_ETH_RSS_L2TPV2},
96 	{ "none", 0 },
97 	{ "ip", RTE_ETH_RSS_IP },
98 	{ "udp", RTE_ETH_RSS_UDP },
99 	{ "tcp", RTE_ETH_RSS_TCP },
100 	{ "sctp", RTE_ETH_RSS_SCTP },
101 	{ "tunnel", RTE_ETH_RSS_TUNNEL },
102 	{ "vlan", RTE_ETH_RSS_VLAN },
103 
104 	/* Individual type */
105 	{ "ipv4", RTE_ETH_RSS_IPV4 },
106 	{ "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 },
107 	{ "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP },
108 	{ "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP },
109 	{ "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP },
110 	{ "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER },
111 	{ "ipv6", RTE_ETH_RSS_IPV6 },
112 	{ "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 },
113 	{ "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP },
114 	{ "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP },
115 	{ "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP },
116 	{ "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER },
117 	{ "l2-payload", RTE_ETH_RSS_L2_PAYLOAD },
118 	{ "ipv6-ex", RTE_ETH_RSS_IPV6_EX },
119 	{ "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX },
120 	{ "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX },
121 	{ "port", RTE_ETH_RSS_PORT },
122 	{ "vxlan", RTE_ETH_RSS_VXLAN },
123 	{ "geneve", RTE_ETH_RSS_GENEVE },
124 	{ "nvgre", RTE_ETH_RSS_NVGRE },
125 	{ "gtpu", RTE_ETH_RSS_GTPU },
126 	{ "eth", RTE_ETH_RSS_ETH },
127 	{ "s-vlan", RTE_ETH_RSS_S_VLAN },
128 	{ "c-vlan", RTE_ETH_RSS_C_VLAN },
129 	{ "esp", RTE_ETH_RSS_ESP },
130 	{ "ah", RTE_ETH_RSS_AH },
131 	{ "l2tpv3", RTE_ETH_RSS_L2TPV3 },
132 	{ "pfcp", RTE_ETH_RSS_PFCP },
133 	{ "pppoe", RTE_ETH_RSS_PPPOE },
134 	{ "ecpri", RTE_ETH_RSS_ECPRI },
135 	{ "mpls", RTE_ETH_RSS_MPLS },
136 	{ "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM },
137 	{ "l4-chksum", RTE_ETH_RSS_L4_CHKSUM },
138 	{ "l2tpv2", RTE_ETH_RSS_L2TPV2 },
139 	{ "l3-pre96", RTE_ETH_RSS_L3_PRE96 },
140 	{ "l3-pre64", RTE_ETH_RSS_L3_PRE64 },
141 	{ "l3-pre56", RTE_ETH_RSS_L3_PRE56 },
142 	{ "l3-pre48", RTE_ETH_RSS_L3_PRE48 },
143 	{ "l3-pre40", RTE_ETH_RSS_L3_PRE40 },
144 	{ "l3-pre32", RTE_ETH_RSS_L3_PRE32 },
145 	{ "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY },
146 	{ "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY },
147 	{ "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY },
148 	{ "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY },
149 	{ "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY },
150 	{ "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY },
151 	{ NULL, 0},
152 };
153 
154 static const struct {
155 	enum rte_eth_fec_mode mode;
156 	const char *name;
157 } fec_mode_name[] = {
158 	{
159 		.mode = RTE_ETH_FEC_NOFEC,
160 		.name = "off",
161 	},
162 	{
163 		.mode = RTE_ETH_FEC_AUTO,
164 		.name = "auto",
165 	},
166 	{
167 		.mode = RTE_ETH_FEC_BASER,
168 		.name = "baser",
169 	},
170 	{
171 		.mode = RTE_ETH_FEC_RS,
172 		.name = "rs",
173 	},
174 	{
175 		.mode = RTE_ETH_FEC_LLRS,
176 		.name = "llrs",
177 	},
178 };
179 
180 static const struct {
181 	char str[32];
182 	uint16_t ftype;
183 } flowtype_str_table[] = {
184 	{"raw", RTE_ETH_FLOW_RAW},
185 	{"ipv4", RTE_ETH_FLOW_IPV4},
186 	{"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
187 	{"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
188 	{"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
189 	{"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
190 	{"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
191 	{"ipv6", RTE_ETH_FLOW_IPV6},
192 	{"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
193 	{"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
194 	{"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
195 	{"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
196 	{"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
197 	{"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
198 	{"ipv6-ex", RTE_ETH_FLOW_IPV6_EX},
199 	{"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX},
200 	{"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX},
201 	{"port", RTE_ETH_FLOW_PORT},
202 	{"vxlan", RTE_ETH_FLOW_VXLAN},
203 	{"geneve", RTE_ETH_FLOW_GENEVE},
204 	{"nvgre", RTE_ETH_FLOW_NVGRE},
205 	{"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
206 	{"gtpu", RTE_ETH_FLOW_GTPU},
207 };
208 
209 static void
210 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
211 {
212 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
213 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
214 	printf("%s%s", name, buf);
215 }
216 
217 static void
218 nic_xstats_display_periodic(portid_t port_id)
219 {
220 	struct xstat_display_info *xstats_info;
221 	uint64_t *prev_values, *curr_values;
222 	uint64_t diff_value, value_rate;
223 	struct timespec cur_time;
224 	uint64_t *ids_supp;
225 	size_t ids_supp_sz;
226 	uint64_t diff_ns;
227 	unsigned int i;
228 	int rc;
229 
230 	xstats_info = &ports[port_id].xstats_info;
231 
232 	ids_supp_sz = xstats_info->ids_supp_sz;
233 	if (ids_supp_sz == 0)
234 		return;
235 
236 	printf("\n");
237 
238 	ids_supp = xstats_info->ids_supp;
239 	prev_values = xstats_info->prev_values;
240 	curr_values = xstats_info->curr_values;
241 
242 	rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values,
243 				      ids_supp_sz);
244 	if (rc != (int)ids_supp_sz) {
245 		fprintf(stderr,
246 			"Failed to get values of %zu xstats for port %u - return code %d\n",
247 			ids_supp_sz, port_id, rc);
248 		return;
249 	}
250 
251 	diff_ns = 0;
252 	if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) {
253 		uint64_t ns;
254 
255 		ns = cur_time.tv_sec * NS_PER_SEC;
256 		ns += cur_time.tv_nsec;
257 
258 		if (xstats_info->prev_ns != 0)
259 			diff_ns = ns - xstats_info->prev_ns;
260 		xstats_info->prev_ns = ns;
261 	}
262 
263 	printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)");
264 	for (i = 0; i < ids_supp_sz; i++) {
265 		diff_value = (curr_values[i] > prev_values[i]) ?
266 			     (curr_values[i] - prev_values[i]) : 0;
267 		prev_values[i] = curr_values[i];
268 		value_rate = diff_ns > 0 ?
269 				(double)diff_value / diff_ns * NS_PER_SEC : 0;
270 
271 		printf("  %-25s%12"PRIu64" %15"PRIu64"\n",
272 		       xstats_display[i].name, curr_values[i], value_rate);
273 	}
274 }
275 
276 void
277 nic_stats_display(portid_t port_id)
278 {
279 	static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
280 	static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
281 	static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS];
282 	static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS];
283 	static uint64_t prev_ns[RTE_MAX_ETHPORTS];
284 	struct timespec cur_time;
285 	uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx,
286 								diff_ns;
287 	uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx;
288 	struct rte_eth_stats stats;
289 	static const char *nic_stats_border = "########################";
290 	int ret;
291 
292 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
293 		print_valid_ports();
294 		return;
295 	}
296 	ret = rte_eth_stats_get(port_id, &stats);
297 	if (ret != 0) {
298 		fprintf(stderr,
299 			"%s: Error: failed to get stats (port %u): %d",
300 			__func__, port_id, ret);
301 		return;
302 	}
303 	printf("\n  %s NIC statistics for port %-2d %s\n",
304 	       nic_stats_border, port_id, nic_stats_border);
305 
306 	printf("  RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes:  "
307 	       "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes);
308 	printf("  RX-errors: %-"PRIu64"\n", stats.ierrors);
309 	printf("  RX-nombuf:  %-10"PRIu64"\n", stats.rx_nombuf);
310 	printf("  TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes:  "
311 	       "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes);
312 
313 	diff_ns = 0;
314 	if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) {
315 		uint64_t ns;
316 
317 		ns = cur_time.tv_sec * NS_PER_SEC;
318 		ns += cur_time.tv_nsec;
319 
320 		if (prev_ns[port_id] != 0)
321 			diff_ns = ns - prev_ns[port_id];
322 		prev_ns[port_id] = ns;
323 	}
324 
325 	diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
326 		(stats.ipackets - prev_pkts_rx[port_id]) : 0;
327 	diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
328 		(stats.opackets - prev_pkts_tx[port_id]) : 0;
329 	prev_pkts_rx[port_id] = stats.ipackets;
330 	prev_pkts_tx[port_id] = stats.opackets;
331 	mpps_rx = diff_ns > 0 ?
332 		(double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0;
333 	mpps_tx = diff_ns > 0 ?
334 		(double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0;
335 
336 	diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ?
337 		(stats.ibytes - prev_bytes_rx[port_id]) : 0;
338 	diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ?
339 		(stats.obytes - prev_bytes_tx[port_id]) : 0;
340 	prev_bytes_rx[port_id] = stats.ibytes;
341 	prev_bytes_tx[port_id] = stats.obytes;
342 	mbps_rx = diff_ns > 0 ?
343 		(double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0;
344 	mbps_tx = diff_ns > 0 ?
345 		(double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0;
346 
347 	printf("\n  Throughput (since last show)\n");
348 	printf("  Rx-pps: %12"PRIu64"          Rx-bps: %12"PRIu64"\n  Tx-pps: %12"
349 	       PRIu64"          Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8,
350 	       mpps_tx, mbps_tx * 8);
351 
352 	if (xstats_display_num > 0)
353 		nic_xstats_display_periodic(port_id);
354 
355 	printf("  %s############################%s\n",
356 	       nic_stats_border, nic_stats_border);
357 }
358 
359 void
360 nic_stats_clear(portid_t port_id)
361 {
362 	int ret;
363 
364 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
365 		print_valid_ports();
366 		return;
367 	}
368 
369 	ret = rte_eth_stats_reset(port_id);
370 	if (ret != 0) {
371 		fprintf(stderr,
372 			"%s: Error: failed to reset stats (port %u): %s",
373 			__func__, port_id, strerror(-ret));
374 		return;
375 	}
376 
377 	ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
378 	if (ret != 0) {
379 		if (ret < 0)
380 			ret = -ret;
381 		fprintf(stderr,
382 			"%s: Error: failed to get stats (port %u): %s",
383 			__func__, port_id, strerror(ret));
384 		return;
385 	}
386 	printf("\n  NIC statistics for port %d cleared\n", port_id);
387 }
388 
389 void
390 nic_xstats_display(portid_t port_id)
391 {
392 	struct rte_eth_xstat *xstats;
393 	int cnt_xstats, idx_xstat;
394 	struct rte_eth_xstat_name *xstats_names;
395 
396 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
397 		print_valid_ports();
398 		return;
399 	}
400 	printf("###### NIC extended statistics for port %-2d\n", port_id);
401 	if (!rte_eth_dev_is_valid_port(port_id)) {
402 		fprintf(stderr, "Error: Invalid port number %i\n", port_id);
403 		return;
404 	}
405 
406 	/* Get count */
407 	cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
408 	if (cnt_xstats  < 0) {
409 		fprintf(stderr, "Error: Cannot get count of xstats\n");
410 		return;
411 	}
412 
413 	/* Get id-name lookup table */
414 	xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
415 	if (xstats_names == NULL) {
416 		fprintf(stderr, "Cannot allocate memory for xstats lookup\n");
417 		return;
418 	}
419 	if (cnt_xstats != rte_eth_xstats_get_names(
420 			port_id, xstats_names, cnt_xstats)) {
421 		fprintf(stderr, "Error: Cannot get xstats lookup\n");
422 		free(xstats_names);
423 		return;
424 	}
425 
426 	/* Get stats themselves */
427 	xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
428 	if (xstats == NULL) {
429 		fprintf(stderr, "Cannot allocate memory for xstats\n");
430 		free(xstats_names);
431 		return;
432 	}
433 	if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
434 		fprintf(stderr, "Error: Unable to get xstats\n");
435 		free(xstats_names);
436 		free(xstats);
437 		return;
438 	}
439 
440 	/* Display xstats */
441 	for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
442 		if (xstats_hide_zero && !xstats[idx_xstat].value)
443 			continue;
444 		printf("%s: %"PRIu64"\n",
445 			xstats_names[idx_xstat].name,
446 			xstats[idx_xstat].value);
447 	}
448 	free(xstats_names);
449 	free(xstats);
450 }
451 
452 void
453 nic_xstats_clear(portid_t port_id)
454 {
455 	int ret;
456 
457 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
458 		print_valid_ports();
459 		return;
460 	}
461 
462 	ret = rte_eth_xstats_reset(port_id);
463 	if (ret != 0) {
464 		fprintf(stderr,
465 			"%s: Error: failed to reset xstats (port %u): %s\n",
466 			__func__, port_id, strerror(-ret));
467 		return;
468 	}
469 
470 	ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
471 	if (ret != 0) {
472 		if (ret < 0)
473 			ret = -ret;
474 		fprintf(stderr, "%s: Error: failed to get stats (port %u): %s",
475 			__func__, port_id, strerror(ret));
476 		return;
477 	}
478 }
479 
480 static const char *
481 get_queue_state_name(uint8_t queue_state)
482 {
483 	if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED)
484 		return "stopped";
485 	else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED)
486 		return "started";
487 	else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN)
488 		return "hairpin";
489 	else
490 		return "unknown";
491 }
492 
493 void
494 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
495 {
496 	struct rte_eth_burst_mode mode;
497 	struct rte_eth_rxq_info qinfo;
498 	int32_t rc;
499 	static const char *info_border = "*********************";
500 
501 	rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
502 	if (rc != 0) {
503 		fprintf(stderr,
504 			"Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n",
505 			port_id, queue_id, strerror(-rc), rc);
506 		return;
507 	}
508 
509 	printf("\n%s Infos for port %-2u, RX queue %-2u %s",
510 	       info_border, port_id, queue_id, info_border);
511 
512 	printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
513 	printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
514 	printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
515 	printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
516 	printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
517 	printf("\nRX drop packets: %s",
518 		(qinfo.conf.rx_drop_en != 0) ? "on" : "off");
519 	printf("\nRX deferred start: %s",
520 		(qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
521 	printf("\nRX scattered packets: %s",
522 		(qinfo.scattered_rx != 0) ? "on" : "off");
523 	printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state));
524 	if (qinfo.rx_buf_size != 0)
525 		printf("\nRX buffer size: %hu", qinfo.rx_buf_size);
526 	printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
527 
528 	if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0)
529 		printf("\nBurst mode: %s%s",
530 		       mode.info,
531 		       mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
532 				" (per queue)" : "");
533 
534 	printf("\n");
535 }
536 
537 void
538 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
539 {
540 	struct rte_eth_burst_mode mode;
541 	struct rte_eth_txq_info qinfo;
542 	int32_t rc;
543 	static const char *info_border = "*********************";
544 
545 	rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
546 	if (rc != 0) {
547 		fprintf(stderr,
548 			"Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n",
549 			port_id, queue_id, strerror(-rc), rc);
550 		return;
551 	}
552 
553 	printf("\n%s Infos for port %-2u, TX queue %-2u %s",
554 	       info_border, port_id, queue_id, info_border);
555 
556 	printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
557 	printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
558 	printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
559 	printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
560 	printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
561 	printf("\nTX deferred start: %s",
562 		(qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
563 	printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
564 	printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state));
565 
566 	if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0)
567 		printf("\nBurst mode: %s%s",
568 		       mode.info,
569 		       mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
570 				" (per queue)" : "");
571 
572 	printf("\n");
573 }
574 
575 static int bus_match_all(const struct rte_bus *bus, const void *data)
576 {
577 	RTE_SET_USED(bus);
578 	RTE_SET_USED(data);
579 	return 0;
580 }
581 
582 static void
583 device_infos_display_speeds(uint32_t speed_capa)
584 {
585 	printf("\n\tDevice speed capability:");
586 	if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG)
587 		printf(" Autonegotiate (all speeds)");
588 	if (speed_capa & RTE_ETH_LINK_SPEED_FIXED)
589 		printf(" Disable autonegotiate (fixed speed)  ");
590 	if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD)
591 		printf(" 10 Mbps half-duplex  ");
592 	if (speed_capa & RTE_ETH_LINK_SPEED_10M)
593 		printf(" 10 Mbps full-duplex  ");
594 	if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD)
595 		printf(" 100 Mbps half-duplex  ");
596 	if (speed_capa & RTE_ETH_LINK_SPEED_100M)
597 		printf(" 100 Mbps full-duplex  ");
598 	if (speed_capa & RTE_ETH_LINK_SPEED_1G)
599 		printf(" 1 Gbps  ");
600 	if (speed_capa & RTE_ETH_LINK_SPEED_2_5G)
601 		printf(" 2.5 Gbps  ");
602 	if (speed_capa & RTE_ETH_LINK_SPEED_5G)
603 		printf(" 5 Gbps  ");
604 	if (speed_capa & RTE_ETH_LINK_SPEED_10G)
605 		printf(" 10 Gbps  ");
606 	if (speed_capa & RTE_ETH_LINK_SPEED_20G)
607 		printf(" 20 Gbps  ");
608 	if (speed_capa & RTE_ETH_LINK_SPEED_25G)
609 		printf(" 25 Gbps  ");
610 	if (speed_capa & RTE_ETH_LINK_SPEED_40G)
611 		printf(" 40 Gbps  ");
612 	if (speed_capa & RTE_ETH_LINK_SPEED_50G)
613 		printf(" 50 Gbps  ");
614 	if (speed_capa & RTE_ETH_LINK_SPEED_56G)
615 		printf(" 56 Gbps  ");
616 	if (speed_capa & RTE_ETH_LINK_SPEED_100G)
617 		printf(" 100 Gbps  ");
618 	if (speed_capa & RTE_ETH_LINK_SPEED_200G)
619 		printf(" 200 Gbps  ");
620 	if (speed_capa & RTE_ETH_LINK_SPEED_400G)
621 		printf(" 400 Gbps  ");
622 }
623 
624 void
625 device_infos_display(const char *identifier)
626 {
627 	static const char *info_border = "*********************";
628 	struct rte_bus *start = NULL, *next;
629 	struct rte_dev_iterator dev_iter;
630 	char name[RTE_ETH_NAME_MAX_LEN];
631 	struct rte_ether_addr mac_addr;
632 	struct rte_device *dev;
633 	struct rte_devargs da;
634 	portid_t port_id;
635 	struct rte_eth_dev_info dev_info;
636 	char devstr[128];
637 
638 	memset(&da, 0, sizeof(da));
639 	if (!identifier)
640 		goto skip_parse;
641 
642 	if (rte_devargs_parsef(&da, "%s", identifier)) {
643 		fprintf(stderr, "cannot parse identifier\n");
644 		return;
645 	}
646 
647 skip_parse:
648 	while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
649 
650 		start = next;
651 		if (identifier && da.bus != next)
652 			continue;
653 
654 		snprintf(devstr, sizeof(devstr), "bus=%s", rte_bus_name(next));
655 		RTE_DEV_FOREACH(dev, devstr, &dev_iter) {
656 
657 			if (rte_dev_driver(dev) == NULL)
658 				continue;
659 			/* Check for matching device if identifier is present */
660 			if (identifier &&
661 			    strncmp(da.name, rte_dev_name(dev), strlen(rte_dev_name(dev))))
662 				continue;
663 			printf("\n%s Infos for device %s %s\n",
664 			       info_border, rte_dev_name(dev), info_border);
665 			printf("Bus name: %s", rte_bus_name(rte_dev_bus(dev)));
666 			printf("\nBus information: %s",
667 				rte_dev_bus_info(dev) ? rte_dev_bus_info(dev) : "");
668 			printf("\nDriver name: %s", rte_driver_name(rte_dev_driver(dev)));
669 			printf("\nDevargs: %s",
670 			       rte_dev_devargs(dev) ? rte_dev_devargs(dev)->args : "");
671 			printf("\nConnect to socket: %d", rte_dev_numa_node(dev));
672 			printf("\n");
673 
674 			/* List ports with matching device name */
675 			RTE_ETH_FOREACH_DEV_OF(port_id, dev) {
676 				printf("\n\tPort id: %-2d", port_id);
677 				if (eth_macaddr_get_print_err(port_id,
678 							      &mac_addr) == 0)
679 					print_ethaddr("\n\tMAC address: ",
680 						      &mac_addr);
681 				rte_eth_dev_get_name_by_port(port_id, name);
682 				printf("\n\tDevice name: %s", name);
683 				if (rte_eth_dev_info_get(port_id, &dev_info) == 0)
684 					device_infos_display_speeds(dev_info.speed_capa);
685 				printf("\n");
686 			}
687 		}
688 	};
689 	rte_devargs_reset(&da);
690 }
691 
692 static void
693 print_dev_capabilities(uint64_t capabilities)
694 {
695 	uint64_t single_capa;
696 	int begin;
697 	int end;
698 	int bit;
699 
700 	if (capabilities == 0)
701 		return;
702 
703 	begin = rte_ctz64(capabilities);
704 	end = sizeof(capabilities) * CHAR_BIT - rte_clz64(capabilities);
705 
706 	single_capa = 1ULL << begin;
707 	for (bit = begin; bit < end; bit++) {
708 		if (capabilities & single_capa)
709 			printf(" %s",
710 			       rte_eth_dev_capability_name(single_capa));
711 		single_capa <<= 1;
712 	}
713 }
714 
715 uint64_t
716 str_to_rsstypes(const char *str)
717 {
718 	uint16_t i;
719 
720 	for (i = 0; rss_type_table[i].str != NULL; i++) {
721 		if (strcmp(rss_type_table[i].str, str) == 0)
722 			return rss_type_table[i].rss_type;
723 	}
724 
725 	return 0;
726 }
727 
728 const char *
729 rsstypes_to_str(uint64_t rss_type)
730 {
731 	uint16_t i;
732 
733 	for (i = 0; rss_type_table[i].str != NULL; i++) {
734 		if (rss_type_table[i].rss_type == rss_type)
735 			return rss_type_table[i].str;
736 	}
737 
738 	return NULL;
739 }
740 
741 static void
742 rss_offload_types_display(uint64_t offload_types, uint16_t char_num_per_line)
743 {
744 	uint16_t user_defined_str_len;
745 	uint16_t total_len = 0;
746 	uint16_t str_len = 0;
747 	uint64_t rss_offload;
748 	uint16_t i;
749 
750 	for (i = 0; i < sizeof(offload_types) * CHAR_BIT; i++) {
751 		rss_offload = RTE_BIT64(i);
752 		if ((offload_types & rss_offload) != 0) {
753 			const char *p = rsstypes_to_str(rss_offload);
754 
755 			user_defined_str_len =
756 				strlen("user-defined-") + (i / 10 + 1);
757 			str_len = p ? strlen(p) : user_defined_str_len;
758 			str_len += 2; /* add two spaces */
759 			if (total_len + str_len >= char_num_per_line) {
760 				total_len = 0;
761 				printf("\n");
762 			}
763 
764 			if (p)
765 				printf("  %s", p);
766 			else
767 				printf("  user-defined-%u", i);
768 			total_len += str_len;
769 		}
770 	}
771 	printf("\n");
772 }
773 
774 void
775 port_infos_display(portid_t port_id)
776 {
777 	struct rte_port *port;
778 	struct rte_ether_addr mac_addr;
779 	struct rte_eth_link link;
780 	struct rte_eth_dev_info dev_info;
781 	int vlan_offload;
782 	struct rte_mempool * mp;
783 	static const char *info_border = "*********************";
784 	uint16_t mtu;
785 	char name[RTE_ETH_NAME_MAX_LEN];
786 	int ret;
787 	char fw_version[ETHDEV_FWVERS_LEN];
788 
789 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
790 		print_valid_ports();
791 		return;
792 	}
793 	port = &ports[port_id];
794 	ret = eth_link_get_nowait_print_err(port_id, &link);
795 	if (ret < 0)
796 		return;
797 
798 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
799 	if (ret != 0)
800 		return;
801 
802 	printf("\n%s Infos for port %-2d %s\n",
803 	       info_border, port_id, info_border);
804 	if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0)
805 		print_ethaddr("MAC address: ", &mac_addr);
806 	rte_eth_dev_get_name_by_port(port_id, name);
807 	printf("\nDevice name: %s", name);
808 	printf("\nDriver name: %s", dev_info.driver_name);
809 
810 	if (rte_eth_dev_fw_version_get(port_id, fw_version,
811 						ETHDEV_FWVERS_LEN) == 0)
812 		printf("\nFirmware-version: %s", fw_version);
813 	else
814 		printf("\nFirmware-version: %s", "not available");
815 
816 	if (rte_dev_devargs(dev_info.device) && rte_dev_devargs(dev_info.device)->args)
817 		printf("\nDevargs: %s", rte_dev_devargs(dev_info.device)->args);
818 	printf("\nConnect to socket: %u", port->socket_id);
819 
820 	if (port_numa[port_id] != NUMA_NO_CONFIG) {
821 		mp = mbuf_pool_find(port_numa[port_id], 0);
822 		if (mp)
823 			printf("\nmemory allocation on the socket: %d",
824 							port_numa[port_id]);
825 	} else
826 		printf("\nmemory allocation on the socket: %u",port->socket_id);
827 
828 	printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
829 	printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed));
830 	printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
831 	       ("full-duplex") : ("half-duplex"));
832 	printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ?
833 	       ("On") : ("Off"));
834 
835 	if (!rte_eth_dev_get_mtu(port_id, &mtu))
836 		printf("MTU: %u\n", mtu);
837 
838 	printf("Promiscuous mode: %s\n",
839 	       rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
840 	printf("Allmulticast mode: %s\n",
841 	       rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
842 	printf("Maximum number of MAC addresses: %u\n",
843 	       (unsigned int)(port->dev_info.max_mac_addrs));
844 	printf("Maximum number of MAC addresses of hash filtering: %u\n",
845 	       (unsigned int)(port->dev_info.max_hash_mac_addrs));
846 
847 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
848 	if (vlan_offload >= 0){
849 		printf("VLAN offload: \n");
850 		if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD)
851 			printf("  strip on, ");
852 		else
853 			printf("  strip off, ");
854 
855 		if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD)
856 			printf("filter on, ");
857 		else
858 			printf("filter off, ");
859 
860 		if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD)
861 			printf("extend on, ");
862 		else
863 			printf("extend off, ");
864 
865 		if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD)
866 			printf("qinq strip on\n");
867 		else
868 			printf("qinq strip off\n");
869 	}
870 
871 	if (dev_info.hash_key_size > 0)
872 		printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
873 	if (dev_info.reta_size > 0)
874 		printf("Redirection table size: %u\n", dev_info.reta_size);
875 	if (!dev_info.flow_type_rss_offloads)
876 		printf("No RSS offload flow type is supported.\n");
877 	else {
878 		printf("Supported RSS offload flow types:\n");
879 		rss_offload_types_display(dev_info.flow_type_rss_offloads,
880 				TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE);
881 	}
882 
883 	printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize);
884 	printf("Maximum configurable length of RX packet: %u\n",
885 		dev_info.max_rx_pktlen);
886 	printf("Maximum configurable size of LRO aggregated packet: %u\n",
887 		dev_info.max_lro_pkt_size);
888 	if (dev_info.max_vfs)
889 		printf("Maximum number of VFs: %u\n", dev_info.max_vfs);
890 	if (dev_info.max_vmdq_pools)
891 		printf("Maximum number of VMDq pools: %u\n",
892 			dev_info.max_vmdq_pools);
893 
894 	printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues);
895 	printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
896 	printf("Max possible number of RXDs per queue: %hu\n",
897 		dev_info.rx_desc_lim.nb_max);
898 	printf("Min possible number of RXDs per queue: %hu\n",
899 		dev_info.rx_desc_lim.nb_min);
900 	printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
901 
902 	printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues);
903 	printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
904 	printf("Max possible number of TXDs per queue: %hu\n",
905 		dev_info.tx_desc_lim.nb_max);
906 	printf("Min possible number of TXDs per queue: %hu\n",
907 		dev_info.tx_desc_lim.nb_min);
908 	printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
909 	printf("Max segment number per packet: %hu\n",
910 		dev_info.tx_desc_lim.nb_seg_max);
911 	printf("Max segment number per MTU/TSO: %hu\n",
912 		dev_info.tx_desc_lim.nb_mtu_seg_max);
913 
914 	printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa);
915 	print_dev_capabilities(dev_info.dev_capa);
916 	printf(" )\n");
917 	/* Show switch info only if valid switch domain and port id is set */
918 	if (dev_info.switch_info.domain_id !=
919 		RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
920 		if (dev_info.switch_info.name)
921 			printf("Switch name: %s\n", dev_info.switch_info.name);
922 
923 		printf("Switch domain Id: %u\n",
924 			dev_info.switch_info.domain_id);
925 		printf("Switch Port Id: %u\n",
926 			dev_info.switch_info.port_id);
927 		if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0)
928 			printf("Switch Rx domain: %u\n",
929 			       dev_info.switch_info.rx_domain);
930 	}
931 	printf("Device error handling mode: ");
932 	switch (dev_info.err_handle_mode) {
933 	case RTE_ETH_ERROR_HANDLE_MODE_NONE:
934 		printf("none\n");
935 		break;
936 	case RTE_ETH_ERROR_HANDLE_MODE_PASSIVE:
937 		printf("passive\n");
938 		break;
939 	case RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE:
940 		printf("proactive\n");
941 		break;
942 	default:
943 		printf("unknown\n");
944 		break;
945 	}
946 	printf("Device private info:\n");
947 	ret = rte_eth_dev_priv_dump(port_id, stdout);
948 	if (ret == -ENOTSUP)
949 		printf("  none\n");
950 	else if (ret < 0)
951 		fprintf(stderr, "  Failed to dump private info with error (%d): %s\n",
952 			ret, strerror(-ret));
953 }
954 
955 void
956 port_summary_header_display(void)
957 {
958 	uint16_t port_number;
959 
960 	port_number = rte_eth_dev_count_avail();
961 	printf("Number of available ports: %i\n", port_number);
962 	printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name",
963 			"Driver", "Status", "Link");
964 }
965 
966 void
967 port_summary_display(portid_t port_id)
968 {
969 	struct rte_ether_addr mac_addr;
970 	struct rte_eth_link link;
971 	struct rte_eth_dev_info dev_info;
972 	char name[RTE_ETH_NAME_MAX_LEN];
973 	int ret;
974 
975 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
976 		print_valid_ports();
977 		return;
978 	}
979 
980 	ret = eth_link_get_nowait_print_err(port_id, &link);
981 	if (ret < 0)
982 		return;
983 
984 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
985 	if (ret != 0)
986 		return;
987 
988 	rte_eth_dev_get_name_by_port(port_id, name);
989 	ret = eth_macaddr_get_print_err(port_id, &mac_addr);
990 	if (ret != 0)
991 		return;
992 
993 	printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n",
994 		port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name,
995 		dev_info.driver_name, (link.link_status) ? ("up") : ("down"),
996 		rte_eth_link_speed_to_str(link.link_speed));
997 }
998 
999 void
1000 port_eeprom_display(portid_t port_id)
1001 {
1002 	struct rte_dev_eeprom_info einfo;
1003 	int ret;
1004 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
1005 		print_valid_ports();
1006 		return;
1007 	}
1008 
1009 	int len_eeprom = rte_eth_dev_get_eeprom_length(port_id);
1010 	if (len_eeprom < 0) {
1011 		switch (len_eeprom) {
1012 		case -ENODEV:
1013 			fprintf(stderr, "port index %d invalid\n", port_id);
1014 			break;
1015 		case -ENOTSUP:
1016 			fprintf(stderr, "operation not supported by device\n");
1017 			break;
1018 		case -EIO:
1019 			fprintf(stderr, "device is removed\n");
1020 			break;
1021 		default:
1022 			fprintf(stderr, "Unable to get EEPROM: %d\n",
1023 				len_eeprom);
1024 			break;
1025 		}
1026 		return;
1027 	}
1028 
1029 	einfo.offset = 0;
1030 	einfo.length = len_eeprom;
1031 	einfo.data = calloc(1, len_eeprom);
1032 	if (!einfo.data) {
1033 		fprintf(stderr,
1034 			"Allocation of port %u eeprom data failed\n",
1035 			port_id);
1036 		return;
1037 	}
1038 
1039 	ret = rte_eth_dev_get_eeprom(port_id, &einfo);
1040 	if (ret != 0) {
1041 		switch (ret) {
1042 		case -ENODEV:
1043 			fprintf(stderr, "port index %d invalid\n", port_id);
1044 			break;
1045 		case -ENOTSUP:
1046 			fprintf(stderr, "operation not supported by device\n");
1047 			break;
1048 		case -EIO:
1049 			fprintf(stderr, "device is removed\n");
1050 			break;
1051 		default:
1052 			fprintf(stderr, "Unable to get EEPROM: %d\n", ret);
1053 			break;
1054 		}
1055 		free(einfo.data);
1056 		return;
1057 	}
1058 	rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
1059 	printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom);
1060 	free(einfo.data);
1061 }
1062 
1063 void
1064 port_module_eeprom_display(portid_t port_id)
1065 {
1066 	struct rte_eth_dev_module_info minfo;
1067 	struct rte_dev_eeprom_info einfo;
1068 	int ret;
1069 
1070 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
1071 		print_valid_ports();
1072 		return;
1073 	}
1074 
1075 
1076 	ret = rte_eth_dev_get_module_info(port_id, &minfo);
1077 	if (ret != 0) {
1078 		switch (ret) {
1079 		case -ENODEV:
1080 			fprintf(stderr, "port index %d invalid\n", port_id);
1081 			break;
1082 		case -ENOTSUP:
1083 			fprintf(stderr, "operation not supported by device\n");
1084 			break;
1085 		case -EIO:
1086 			fprintf(stderr, "device is removed\n");
1087 			break;
1088 		default:
1089 			fprintf(stderr, "Unable to get module EEPROM: %d\n",
1090 				ret);
1091 			break;
1092 		}
1093 		return;
1094 	}
1095 
1096 	einfo.offset = 0;
1097 	einfo.length = minfo.eeprom_len;
1098 	einfo.data = calloc(1, minfo.eeprom_len);
1099 	if (!einfo.data) {
1100 		fprintf(stderr,
1101 			"Allocation of port %u eeprom data failed\n",
1102 			port_id);
1103 		return;
1104 	}
1105 
1106 	ret = rte_eth_dev_get_module_eeprom(port_id, &einfo);
1107 	if (ret != 0) {
1108 		switch (ret) {
1109 		case -ENODEV:
1110 			fprintf(stderr, "port index %d invalid\n", port_id);
1111 			break;
1112 		case -ENOTSUP:
1113 			fprintf(stderr, "operation not supported by device\n");
1114 			break;
1115 		case -EIO:
1116 			fprintf(stderr, "device is removed\n");
1117 			break;
1118 		default:
1119 			fprintf(stderr, "Unable to get module EEPROM: %d\n",
1120 				ret);
1121 			break;
1122 		}
1123 		free(einfo.data);
1124 		return;
1125 	}
1126 
1127 	rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
1128 	printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length);
1129 	free(einfo.data);
1130 }
1131 
1132 int
1133 port_id_is_invalid(portid_t port_id, enum print_warning warning)
1134 {
1135 	uint16_t pid;
1136 
1137 	if (port_id == (portid_t)RTE_PORT_ALL)
1138 		return 0;
1139 
1140 	RTE_ETH_FOREACH_DEV(pid)
1141 		if (port_id == pid)
1142 			return 0;
1143 
1144 	if (warning == ENABLED_WARN)
1145 		fprintf(stderr, "Invalid port %d\n", port_id);
1146 
1147 	return 1;
1148 }
1149 
1150 void print_valid_ports(void)
1151 {
1152 	portid_t pid;
1153 
1154 	printf("The valid ports array is [");
1155 	RTE_ETH_FOREACH_DEV(pid) {
1156 		printf(" %d", pid);
1157 	}
1158 	printf(" ]\n");
1159 }
1160 
1161 static int
1162 vlan_id_is_invalid(uint16_t vlan_id)
1163 {
1164 	if (vlan_id < 4096)
1165 		return 0;
1166 	fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id);
1167 	return 1;
1168 }
1169 
1170 static uint32_t
1171 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
1172 {
1173 	uint32_t overhead_len;
1174 
1175 	if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
1176 		overhead_len = max_rx_pktlen - max_mtu;
1177 	else
1178 		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1179 
1180 	return overhead_len;
1181 }
1182 
1183 static int
1184 eth_dev_validate_mtu(uint16_t port_id, uint16_t mtu)
1185 {
1186 	struct rte_eth_dev_info dev_info;
1187 	uint32_t overhead_len;
1188 	uint32_t frame_size;
1189 	int ret;
1190 
1191 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1192 	if (ret != 0)
1193 		return ret;
1194 
1195 	if (mtu < dev_info.min_mtu) {
1196 		fprintf(stderr,
1197 			"MTU (%u) < device min MTU (%u) for port_id %u\n",
1198 			mtu, dev_info.min_mtu, port_id);
1199 		return -EINVAL;
1200 	}
1201 	if (mtu > dev_info.max_mtu) {
1202 		fprintf(stderr,
1203 			"MTU (%u) > device max MTU (%u) for port_id %u\n",
1204 			mtu, dev_info.max_mtu, port_id);
1205 		return -EINVAL;
1206 	}
1207 
1208 	overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
1209 			dev_info.max_mtu);
1210 	frame_size = mtu + overhead_len;
1211 	if (frame_size > dev_info.max_rx_pktlen) {
1212 		fprintf(stderr,
1213 			"Frame size (%u) > device max frame size (%u) for port_id %u\n",
1214 			frame_size, dev_info.max_rx_pktlen, port_id);
1215 		return -EINVAL;
1216 	}
1217 
1218 	return 0;
1219 }
1220 
1221 void
1222 port_mtu_set(portid_t port_id, uint16_t mtu)
1223 {
1224 	struct rte_port *port = &ports[port_id];
1225 	int diag;
1226 
1227 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1228 		return;
1229 
1230 	diag = eth_dev_validate_mtu(port_id, mtu);
1231 	if (diag != 0)
1232 		return;
1233 
1234 	if (port->need_reconfig == 0) {
1235 		diag = rte_eth_dev_set_mtu(port_id, mtu);
1236 		if (diag != 0) {
1237 			fprintf(stderr, "Set MTU failed. diag=%d\n", diag);
1238 			return;
1239 		}
1240 	}
1241 
1242 	port->dev_conf.rxmode.mtu = mtu;
1243 }
1244 
1245 /* Generic flow management functions. */
1246 
1247 static struct port_flow_tunnel *
1248 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id)
1249 {
1250 	struct port_flow_tunnel *flow_tunnel;
1251 
1252 	LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) {
1253 		if (flow_tunnel->id == port_tunnel_id)
1254 			goto out;
1255 	}
1256 	flow_tunnel = NULL;
1257 
1258 out:
1259 	return flow_tunnel;
1260 }
1261 
1262 const char *
1263 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel)
1264 {
1265 	const char *type;
1266 	switch (tunnel->type) {
1267 	default:
1268 		type = "unknown";
1269 		break;
1270 	case RTE_FLOW_ITEM_TYPE_VXLAN:
1271 		type = "vxlan";
1272 		break;
1273 	case RTE_FLOW_ITEM_TYPE_GRE:
1274 		type = "gre";
1275 		break;
1276 	case RTE_FLOW_ITEM_TYPE_NVGRE:
1277 		type = "nvgre";
1278 		break;
1279 	case RTE_FLOW_ITEM_TYPE_GENEVE:
1280 		type = "geneve";
1281 		break;
1282 	}
1283 
1284 	return type;
1285 }
1286 
1287 struct port_flow_tunnel *
1288 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun)
1289 {
1290 	struct rte_port *port = &ports[port_id];
1291 	struct port_flow_tunnel *flow_tunnel;
1292 
1293 	LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) {
1294 		if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun)))
1295 			goto out;
1296 	}
1297 	flow_tunnel = NULL;
1298 
1299 out:
1300 	return flow_tunnel;
1301 }
1302 
1303 void port_flow_tunnel_list(portid_t port_id)
1304 {
1305 	struct rte_port *port = &ports[port_id];
1306 	struct port_flow_tunnel *flt;
1307 
1308 	LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1309 		printf("port %u tunnel #%u type=%s",
1310 			port_id, flt->id, port_flow_tunnel_type(&flt->tunnel));
1311 		if (flt->tunnel.tun_id)
1312 			printf(" id=%" PRIu64, flt->tunnel.tun_id);
1313 		printf("\n");
1314 	}
1315 }
1316 
1317 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id)
1318 {
1319 	struct rte_port *port = &ports[port_id];
1320 	struct port_flow_tunnel *flt;
1321 
1322 	LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1323 		if (flt->id == tunnel_id)
1324 			break;
1325 	}
1326 	if (flt) {
1327 		LIST_REMOVE(flt, chain);
1328 		free(flt);
1329 		printf("port %u: flow tunnel #%u destroyed\n",
1330 			port_id, tunnel_id);
1331 	}
1332 }
1333 
1334 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops)
1335 {
1336 	struct rte_port *port = &ports[port_id];
1337 	enum rte_flow_item_type	type;
1338 	struct port_flow_tunnel *flt;
1339 
1340 	if (!strcmp(ops->type, "vxlan"))
1341 		type = RTE_FLOW_ITEM_TYPE_VXLAN;
1342 	else if (!strcmp(ops->type, "gre"))
1343 		type = RTE_FLOW_ITEM_TYPE_GRE;
1344 	else if (!strcmp(ops->type, "nvgre"))
1345 		type = RTE_FLOW_ITEM_TYPE_NVGRE;
1346 	else if (!strcmp(ops->type, "geneve"))
1347 		type = RTE_FLOW_ITEM_TYPE_GENEVE;
1348 	else {
1349 		fprintf(stderr, "cannot offload \"%s\" tunnel type\n",
1350 			ops->type);
1351 		return;
1352 	}
1353 	LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1354 		if (flt->tunnel.type == type)
1355 			break;
1356 	}
1357 	if (!flt) {
1358 		flt = calloc(1, sizeof(*flt));
1359 		if (!flt) {
1360 			fprintf(stderr, "failed to allocate port flt object\n");
1361 			return;
1362 		}
1363 		flt->tunnel.type = type;
1364 		flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 :
1365 				  LIST_FIRST(&port->flow_tunnel_list)->id + 1;
1366 		LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain);
1367 	}
1368 	printf("port %d: flow tunnel #%u type %s\n",
1369 		port_id, flt->id, ops->type);
1370 }
1371 
1372 /** Generate a port_flow entry from attributes/pattern/actions. */
1373 static struct port_flow *
1374 port_flow_new(const struct rte_flow_attr *attr,
1375 	      const struct rte_flow_item *pattern,
1376 	      const struct rte_flow_action *actions,
1377 	      struct rte_flow_error *error)
1378 {
1379 	const struct rte_flow_conv_rule rule = {
1380 		.attr_ro = attr,
1381 		.pattern_ro = pattern,
1382 		.actions_ro = actions,
1383 	};
1384 	struct port_flow *pf;
1385 	int ret;
1386 
1387 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error);
1388 	if (ret < 0)
1389 		return NULL;
1390 	pf = calloc(1, offsetof(struct port_flow, rule) + ret);
1391 	if (!pf) {
1392 		rte_flow_error_set
1393 			(error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1394 			 "calloc() failed");
1395 		return NULL;
1396 	}
1397 	if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule,
1398 			  error) >= 0)
1399 		return pf;
1400 	free(pf);
1401 	return NULL;
1402 }
1403 
1404 /** Print a message out of a flow error. */
1405 static int
1406 port_flow_complain(struct rte_flow_error *error)
1407 {
1408 	static const char *const errstrlist[] = {
1409 		[RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1410 		[RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1411 		[RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1412 		[RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1413 		[RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1414 		[RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1415 		[RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1416 		[RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field",
1417 		[RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1418 		[RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1419 		[RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification",
1420 		[RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range",
1421 		[RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask",
1422 		[RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1423 		[RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1424 		[RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration",
1425 		[RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1426 	};
1427 	const char *errstr;
1428 	char buf[32];
1429 	int err = rte_errno;
1430 
1431 	if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1432 	    !errstrlist[error->type])
1433 		errstr = "unknown type";
1434 	else
1435 		errstr = errstrlist[error->type];
1436 	fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n",
1437 		__func__, error->type, errstr,
1438 		error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1439 					 error->cause), buf) : "",
1440 		error->message ? error->message : "(no stated reason)",
1441 		rte_strerror(err));
1442 
1443 	switch (error->type) {
1444 	case RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER:
1445 		fprintf(stderr, "The status suggests the use of \"transfer\" "
1446 				"as the possible cause of the failure. Make "
1447 				"sure that the flow in question and its "
1448 				"indirect components (if any) are managed "
1449 				"via \"transfer\" proxy port. Use command "
1450 				"\"show port (port_id) flow transfer proxy\" "
1451 				"to figure out the proxy port ID\n");
1452 		break;
1453 	default:
1454 		break;
1455 	}
1456 
1457 	return -err;
1458 }
1459 
1460 static void
1461 rss_types_display(uint64_t rss_types, uint16_t char_num_per_line)
1462 {
1463 	uint16_t total_len = 0;
1464 	uint16_t str_len;
1465 	uint16_t i;
1466 
1467 	if (rss_types == 0)
1468 		return;
1469 
1470 	for (i = 0; rss_type_table[i].str; i++) {
1471 		if (rss_type_table[i].rss_type == 0)
1472 			continue;
1473 
1474 		if ((rss_types & rss_type_table[i].rss_type) ==
1475 					rss_type_table[i].rss_type) {
1476 			/* Contain two spaces */
1477 			str_len = strlen(rss_type_table[i].str) + 2;
1478 			if (total_len + str_len > char_num_per_line) {
1479 				printf("\n");
1480 				total_len = 0;
1481 			}
1482 			printf("  %s", rss_type_table[i].str);
1483 			total_len += str_len;
1484 		}
1485 	}
1486 	printf("\n");
1487 }
1488 
1489 static void
1490 rss_config_display(struct rte_flow_action_rss *rss_conf)
1491 {
1492 	uint8_t i;
1493 
1494 	if (rss_conf == NULL) {
1495 		fprintf(stderr, "Invalid rule\n");
1496 		return;
1497 	}
1498 
1499 	printf("RSS:\n"
1500 	       " queues:");
1501 	if (rss_conf->queue_num == 0)
1502 		printf(" none");
1503 	for (i = 0; i < rss_conf->queue_num; i++)
1504 		printf(" %d", rss_conf->queue[i]);
1505 	printf("\n");
1506 
1507 	printf(" function: ");
1508 	switch (rss_conf->func) {
1509 	case RTE_ETH_HASH_FUNCTION_DEFAULT:
1510 		printf("default\n");
1511 		break;
1512 	case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1513 		printf("toeplitz\n");
1514 		break;
1515 	case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1516 		printf("simple_xor\n");
1517 		break;
1518 	case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1519 		printf("symmetric_toeplitz\n");
1520 		break;
1521 	default:
1522 		printf("Unknown function\n");
1523 		return;
1524 	}
1525 
1526 	printf(" RSS key:\n");
1527 	if (rss_conf->key_len == 0) {
1528 		printf("  none");
1529 	} else {
1530 		printf("  key_len: %u\n", rss_conf->key_len);
1531 		printf("  key: ");
1532 		if (rss_conf->key == NULL) {
1533 			printf("none");
1534 		} else {
1535 			for (i = 0; i < rss_conf->key_len; i++)
1536 				printf("%02X", rss_conf->key[i]);
1537 		}
1538 	}
1539 	printf("\n");
1540 
1541 	printf(" types:\n");
1542 	if (rss_conf->types == 0) {
1543 		printf("  none\n");
1544 		return;
1545 	}
1546 	rss_types_display(rss_conf->types, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE);
1547 }
1548 
1549 static struct port_indirect_action *
1550 action_get_by_id(portid_t port_id, uint32_t id)
1551 {
1552 	struct rte_port *port;
1553 	struct port_indirect_action **ppia;
1554 	struct port_indirect_action *pia = NULL;
1555 
1556 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1557 	    port_id == (portid_t)RTE_PORT_ALL)
1558 		return NULL;
1559 	port = &ports[port_id];
1560 	ppia = &port->actions_list;
1561 	while (*ppia) {
1562 		if ((*ppia)->id == id) {
1563 			pia = *ppia;
1564 			break;
1565 		}
1566 		ppia = &(*ppia)->next;
1567 	}
1568 	if (!pia)
1569 		fprintf(stderr,
1570 			"Failed to find indirect action #%u on port %u\n",
1571 			id, port_id);
1572 	return pia;
1573 }
1574 
1575 static int
1576 action_alloc(portid_t port_id, uint32_t id,
1577 	     struct port_indirect_action **action)
1578 {
1579 	struct rte_port *port;
1580 	struct port_indirect_action **ppia;
1581 	struct port_indirect_action *pia = NULL;
1582 
1583 	*action = NULL;
1584 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1585 	    port_id == (portid_t)RTE_PORT_ALL)
1586 		return -EINVAL;
1587 	port = &ports[port_id];
1588 	if (id == UINT32_MAX) {
1589 		/* taking first available ID */
1590 		if (port->actions_list) {
1591 			if (port->actions_list->id == UINT32_MAX - 1) {
1592 				fprintf(stderr,
1593 					"Highest indirect action ID is already assigned, delete it first\n");
1594 				return -ENOMEM;
1595 			}
1596 			id = port->actions_list->id + 1;
1597 		} else {
1598 			id = 0;
1599 		}
1600 	}
1601 	pia = calloc(1, sizeof(*pia));
1602 	if (!pia) {
1603 		fprintf(stderr,
1604 			"Allocation of port %u indirect action failed\n",
1605 			port_id);
1606 		return -ENOMEM;
1607 	}
1608 	ppia = &port->actions_list;
1609 	while (*ppia && (*ppia)->id > id)
1610 		ppia = &(*ppia)->next;
1611 	if (*ppia && (*ppia)->id == id) {
1612 		fprintf(stderr,
1613 			"Indirect action #%u is already assigned, delete it first\n",
1614 			id);
1615 		free(pia);
1616 		return -EINVAL;
1617 	}
1618 	pia->next = *ppia;
1619 	pia->id = id;
1620 	*ppia = pia;
1621 	*action = pia;
1622 	return 0;
1623 }
1624 
1625 static int
1626 template_alloc(uint32_t id, struct port_template **template,
1627 	       struct port_template **list)
1628 {
1629 	struct port_template *lst = *list;
1630 	struct port_template **ppt;
1631 	struct port_template *pt = NULL;
1632 
1633 	*template = NULL;
1634 	if (id == UINT32_MAX) {
1635 		/* taking first available ID */
1636 		if (lst) {
1637 			if (lst->id == UINT32_MAX - 1) {
1638 				printf("Highest template ID is already"
1639 				" assigned, delete it first\n");
1640 				return -ENOMEM;
1641 			}
1642 			id = lst->id + 1;
1643 		} else {
1644 			id = 0;
1645 		}
1646 	}
1647 	pt = calloc(1, sizeof(*pt));
1648 	if (!pt) {
1649 		printf("Allocation of port template failed\n");
1650 		return -ENOMEM;
1651 	}
1652 	ppt = list;
1653 	while (*ppt && (*ppt)->id > id)
1654 		ppt = &(*ppt)->next;
1655 	if (*ppt && (*ppt)->id == id) {
1656 		printf("Template #%u is already assigned,"
1657 			" delete it first\n", id);
1658 		free(pt);
1659 		return -EINVAL;
1660 	}
1661 	pt->next = *ppt;
1662 	pt->id = id;
1663 	*ppt = pt;
1664 	*template = pt;
1665 	return 0;
1666 }
1667 
1668 static int
1669 table_alloc(uint32_t id, struct port_table **table,
1670 	    struct port_table **list)
1671 {
1672 	struct port_table *lst = *list;
1673 	struct port_table **ppt;
1674 	struct port_table *pt = NULL;
1675 
1676 	*table = NULL;
1677 	if (id == UINT32_MAX) {
1678 		/* taking first available ID */
1679 		if (lst) {
1680 			if (lst->id == UINT32_MAX - 1) {
1681 				printf("Highest table ID is already"
1682 				" assigned, delete it first\n");
1683 				return -ENOMEM;
1684 			}
1685 			id = lst->id + 1;
1686 		} else {
1687 			id = 0;
1688 		}
1689 	}
1690 	pt = calloc(1, sizeof(*pt));
1691 	if (!pt) {
1692 		printf("Allocation of table failed\n");
1693 		return -ENOMEM;
1694 	}
1695 	ppt = list;
1696 	while (*ppt && (*ppt)->id > id)
1697 		ppt = &(*ppt)->next;
1698 	if (*ppt && (*ppt)->id == id) {
1699 		printf("Table #%u is already assigned,"
1700 			" delete it first\n", id);
1701 		free(pt);
1702 		return -EINVAL;
1703 	}
1704 	pt->next = *ppt;
1705 	pt->id = id;
1706 	*ppt = pt;
1707 	*table = pt;
1708 	return 0;
1709 }
1710 
1711 /** Get info about flow management resources. */
1712 int
1713 port_flow_get_info(portid_t port_id)
1714 {
1715 	struct rte_flow_port_info port_info;
1716 	struct rte_flow_queue_info queue_info;
1717 	struct rte_flow_error error;
1718 
1719 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1720 	    port_id == (portid_t)RTE_PORT_ALL)
1721 		return -EINVAL;
1722 	/* Poisoning to make sure PMDs update it in case of error. */
1723 	memset(&error, 0x99, sizeof(error));
1724 	memset(&port_info, 0, sizeof(port_info));
1725 	memset(&queue_info, 0, sizeof(queue_info));
1726 	if (rte_flow_info_get(port_id, &port_info, &queue_info, &error))
1727 		return port_flow_complain(&error);
1728 	printf("Flow engine resources on port %u:\n"
1729 	       "Number of queues: %d\n"
1730 		   "Size of queues: %d\n"
1731 	       "Number of counters: %d\n"
1732 	       "Number of aging objects: %d\n"
1733 	       "Number of meter actions: %d\n",
1734 	       port_id, port_info.max_nb_queues,
1735 		   queue_info.max_size,
1736 	       port_info.max_nb_counters,
1737 	       port_info.max_nb_aging_objects,
1738 	       port_info.max_nb_meters);
1739 	return 0;
1740 }
1741 
1742 /** Configure flow management resources. */
1743 int
1744 port_flow_configure(portid_t port_id,
1745 	const struct rte_flow_port_attr *port_attr,
1746 	uint16_t nb_queue,
1747 	const struct rte_flow_queue_attr *queue_attr)
1748 {
1749 	struct rte_port *port;
1750 	struct rte_flow_error error;
1751 	const struct rte_flow_queue_attr *attr_list[nb_queue];
1752 	int std_queue;
1753 
1754 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1755 	    port_id == (portid_t)RTE_PORT_ALL)
1756 		return -EINVAL;
1757 	port = &ports[port_id];
1758 	port->queue_nb = nb_queue;
1759 	port->queue_sz = queue_attr->size;
1760 	for (std_queue = 0; std_queue < nb_queue; std_queue++)
1761 		attr_list[std_queue] = queue_attr;
1762 	/* Poisoning to make sure PMDs update it in case of error. */
1763 	memset(&error, 0x66, sizeof(error));
1764 	if (rte_flow_configure(port_id, port_attr, nb_queue, attr_list, &error))
1765 		return port_flow_complain(&error);
1766 	printf("Configure flows on port %u: "
1767 	       "number of queues %d with %d elements\n",
1768 	       port_id, nb_queue, queue_attr->size);
1769 	return 0;
1770 }
1771 
1772 static int
1773 action_handle_create(portid_t port_id,
1774 		     struct port_indirect_action *pia,
1775 		     const struct rte_flow_indir_action_conf *conf,
1776 		     const struct rte_flow_action *action,
1777 		     struct rte_flow_error *error)
1778 {
1779 	if (action->type == RTE_FLOW_ACTION_TYPE_AGE) {
1780 		struct rte_flow_action_age *age =
1781 			(struct rte_flow_action_age *)(uintptr_t)(action->conf);
1782 
1783 		pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION;
1784 		age->context = &pia->age_type;
1785 	} else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) {
1786 		struct rte_flow_action_conntrack *ct =
1787 			(struct rte_flow_action_conntrack *)(uintptr_t)(action->conf);
1788 
1789 		memcpy(ct, &conntrack_context, sizeof(*ct));
1790 	}
1791 	pia->type = action->type;
1792 	pia->handle = rte_flow_action_handle_create(port_id, conf, action,
1793 						    error);
1794 	return pia->handle ? 0 : -1;
1795 }
1796 
1797 static int
1798 action_list_handle_create(portid_t port_id,
1799 			  struct port_indirect_action *pia,
1800 			  const struct rte_flow_indir_action_conf *conf,
1801 			  const struct rte_flow_action *actions,
1802 			  struct rte_flow_error *error)
1803 {
1804 	pia->type = RTE_FLOW_ACTION_TYPE_INDIRECT_LIST;
1805 	pia->list_handle =
1806 		rte_flow_action_list_handle_create(port_id, conf,
1807 						   actions, error);
1808 	return pia->list_handle ? 0 : -1;
1809 }
1810 /** Create indirect action */
1811 int
1812 port_action_handle_create(portid_t port_id, uint32_t id, bool indirect_list,
1813 			  const struct rte_flow_indir_action_conf *conf,
1814 			  const struct rte_flow_action *action)
1815 {
1816 	struct port_indirect_action *pia;
1817 	int ret;
1818 	struct rte_flow_error error;
1819 
1820 	ret = action_alloc(port_id, id, &pia);
1821 	if (ret)
1822 		return ret;
1823 	/* Poisoning to make sure PMDs update it in case of error. */
1824 	memset(&error, 0x22, sizeof(error));
1825 	ret = indirect_list ?
1826 	       action_list_handle_create(port_id, pia, conf, action, &error) :
1827 	       action_handle_create(port_id, pia, conf, action, &error);
1828 	if (ret) {
1829 		uint32_t destroy_id = pia->id;
1830 		port_action_handle_destroy(port_id, 1, &destroy_id);
1831 		return port_flow_complain(&error);
1832 	}
1833 	printf("Indirect action #%u created\n", pia->id);
1834 	return 0;
1835 }
1836 
1837 /** Destroy indirect action */
1838 int
1839 port_action_handle_destroy(portid_t port_id,
1840 			   uint32_t n,
1841 			   const uint32_t *actions)
1842 {
1843 	struct rte_port *port;
1844 	struct port_indirect_action **tmp;
1845 	int ret = 0;
1846 
1847 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1848 	    port_id == (portid_t)RTE_PORT_ALL)
1849 		return -EINVAL;
1850 	port = &ports[port_id];
1851 	tmp = &port->actions_list;
1852 	while (*tmp) {
1853 		uint32_t i;
1854 
1855 		for (i = 0; i != n; ++i) {
1856 			struct rte_flow_error error;
1857 			struct port_indirect_action *pia = *tmp;
1858 
1859 			if (actions[i] != pia->id)
1860 				continue;
1861 			/*
1862 			 * Poisoning to make sure PMDs update it in case
1863 			 * of error.
1864 			 */
1865 			memset(&error, 0x33, sizeof(error));
1866 
1867 			if (pia->handle) {
1868 				ret = pia->type ==
1869 				      RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ?
1870 					rte_flow_action_list_handle_destroy
1871 					(port_id, pia->list_handle, &error) :
1872 					rte_flow_action_handle_destroy
1873 					(port_id, pia->handle, &error);
1874 				if (ret) {
1875 					ret = port_flow_complain(&error);
1876 					continue;
1877 				}
1878 			}
1879 			*tmp = pia->next;
1880 			printf("Indirect action #%u destroyed\n", pia->id);
1881 			free(pia);
1882 			break;
1883 		}
1884 		if (i == n)
1885 			tmp = &(*tmp)->next;
1886 	}
1887 	return ret;
1888 }
1889 
1890 int
1891 port_action_handle_flush(portid_t port_id)
1892 {
1893 	struct rte_port *port;
1894 	struct port_indirect_action **tmp;
1895 	int ret = 0;
1896 
1897 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1898 	    port_id == (portid_t)RTE_PORT_ALL)
1899 		return -EINVAL;
1900 	port = &ports[port_id];
1901 	tmp = &port->actions_list;
1902 	while (*tmp != NULL) {
1903 		struct rte_flow_error error;
1904 		struct port_indirect_action *pia = *tmp;
1905 
1906 		/* Poisoning to make sure PMDs update it in case of error. */
1907 		memset(&error, 0x44, sizeof(error));
1908 		if (pia->handle != NULL) {
1909 			ret = pia->type ==
1910 			      RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ?
1911 			      rte_flow_action_list_handle_destroy
1912 				      (port_id, pia->list_handle, &error) :
1913 			      rte_flow_action_handle_destroy
1914 				      (port_id, pia->handle, &error);
1915 			if (ret) {
1916 				printf("Indirect action #%u not destroyed\n",
1917 				       pia->id);
1918 				ret = port_flow_complain(&error);
1919 			}
1920 			tmp = &pia->next;
1921 		} else {
1922 			*tmp = pia->next;
1923 			free(pia);
1924 		}
1925 	}
1926 	return ret;
1927 }
1928 
1929 /** Get indirect action by port + id */
1930 struct rte_flow_action_handle *
1931 port_action_handle_get_by_id(portid_t port_id, uint32_t id)
1932 {
1933 
1934 	struct port_indirect_action *pia = action_get_by_id(port_id, id);
1935 
1936 	return (pia) ? pia->handle : NULL;
1937 }
1938 
1939 /** Update indirect action */
1940 int
1941 port_action_handle_update(portid_t port_id, uint32_t id,
1942 			  const struct rte_flow_action *action)
1943 {
1944 	struct rte_flow_error error;
1945 	struct rte_flow_action_handle *action_handle;
1946 	struct port_indirect_action *pia;
1947 	struct rte_flow_update_meter_mark mtr_update;
1948 	const void *update;
1949 
1950 	action_handle = port_action_handle_get_by_id(port_id, id);
1951 	if (!action_handle)
1952 		return -EINVAL;
1953 	pia = action_get_by_id(port_id, id);
1954 	if (!pia)
1955 		return -EINVAL;
1956 	switch (pia->type) {
1957 	case RTE_FLOW_ACTION_TYPE_AGE:
1958 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
1959 		update = action->conf;
1960 		break;
1961 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
1962 		memcpy(&mtr_update.meter_mark, action->conf,
1963 		       sizeof(struct rte_flow_action_meter_mark));
1964 		if (mtr_update.meter_mark.profile)
1965 			mtr_update.profile_valid = 1;
1966 		if (mtr_update.meter_mark.policy)
1967 			mtr_update.policy_valid = 1;
1968 		mtr_update.color_mode_valid = 1;
1969 		mtr_update.state_valid = 1;
1970 		update = &mtr_update;
1971 		break;
1972 	default:
1973 		update = action;
1974 		break;
1975 	}
1976 	if (rte_flow_action_handle_update(port_id, action_handle, update,
1977 					  &error)) {
1978 		return port_flow_complain(&error);
1979 	}
1980 	printf("Indirect action #%u updated\n", id);
1981 	return 0;
1982 }
1983 
1984 static void
1985 port_action_handle_query_dump(portid_t port_id,
1986 			      const struct port_indirect_action *pia,
1987 			      union port_action_query *query)
1988 {
1989 	if (!pia || !query)
1990 		return;
1991 	switch (pia->type) {
1992 	case RTE_FLOW_ACTION_TYPE_AGE:
1993 		printf("Indirect AGE action:\n"
1994 		       " aged: %u\n"
1995 		       " sec_since_last_hit_valid: %u\n"
1996 		       " sec_since_last_hit: %" PRIu32 "\n",
1997 		       query->age.aged,
1998 		       query->age.sec_since_last_hit_valid,
1999 		       query->age.sec_since_last_hit);
2000 		break;
2001 	case RTE_FLOW_ACTION_TYPE_COUNT:
2002 		printf("Indirect COUNT action:\n"
2003 		       " hits_set: %u\n"
2004 		       " bytes_set: %u\n"
2005 		       " hits: %" PRIu64 "\n"
2006 		       " bytes: %" PRIu64 "\n",
2007 		       query->count.hits_set,
2008 		       query->count.bytes_set,
2009 		       query->count.hits,
2010 		       query->count.bytes);
2011 		break;
2012 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
2013 		printf("Conntrack Context:\n"
2014 		       "  Peer: %u, Flow dir: %s, Enable: %u\n"
2015 		       "  Live: %u, SACK: %u, CACK: %u\n"
2016 		       "  Packet dir: %s, Liberal: %u, State: %u\n"
2017 		       "  Factor: %u, Retrans: %u, TCP flags: %u\n"
2018 		       "  Last Seq: %u, Last ACK: %u\n"
2019 		       "  Last Win: %u, Last End: %u\n",
2020 		       query->ct.peer_port,
2021 		       query->ct.is_original_dir ? "Original" : "Reply",
2022 		       query->ct.enable, query->ct.live_connection,
2023 		       query->ct.selective_ack, query->ct.challenge_ack_passed,
2024 		       query->ct.last_direction ? "Original" : "Reply",
2025 		       query->ct.liberal_mode, query->ct.state,
2026 		       query->ct.max_ack_window, query->ct.retransmission_limit,
2027 		       query->ct.last_index, query->ct.last_seq,
2028 		       query->ct.last_ack, query->ct.last_window,
2029 		       query->ct.last_end);
2030 		printf("  Original Dir:\n"
2031 		       "    scale: %u, fin: %u, ack seen: %u\n"
2032 		       " unacked data: %u\n    Sent end: %u,"
2033 		       "    Reply end: %u, Max win: %u, Max ACK: %u\n",
2034 		       query->ct.original_dir.scale,
2035 		       query->ct.original_dir.close_initiated,
2036 		       query->ct.original_dir.last_ack_seen,
2037 		       query->ct.original_dir.data_unacked,
2038 		       query->ct.original_dir.sent_end,
2039 		       query->ct.original_dir.reply_end,
2040 		       query->ct.original_dir.max_win,
2041 		       query->ct.original_dir.max_ack);
2042 		printf("  Reply Dir:\n"
2043 		       "    scale: %u, fin: %u, ack seen: %u\n"
2044 		       " unacked data: %u\n    Sent end: %u,"
2045 		       "    Reply end: %u, Max win: %u, Max ACK: %u\n",
2046 		       query->ct.reply_dir.scale,
2047 		       query->ct.reply_dir.close_initiated,
2048 		       query->ct.reply_dir.last_ack_seen,
2049 		       query->ct.reply_dir.data_unacked,
2050 		       query->ct.reply_dir.sent_end,
2051 		       query->ct.reply_dir.reply_end,
2052 		       query->ct.reply_dir.max_win,
2053 		       query->ct.reply_dir.max_ack);
2054 		break;
2055 	case RTE_FLOW_ACTION_TYPE_QUOTA:
2056 		printf("Indirect QUOTA action %u\n"
2057 		       " unused quota: %" PRId64 "\n",
2058 		       pia->id, query->quota.quota);
2059 		break;
2060 	default:
2061 		printf("port-%u: indirect action %u (type: %d) doesn't support query\n",
2062 		       pia->type, pia->id, port_id);
2063 		break;
2064 	}
2065 
2066 }
2067 
2068 void
2069 port_action_handle_query_update(portid_t port_id, uint32_t id,
2070 				enum rte_flow_query_update_mode qu_mode,
2071 				const struct rte_flow_action *action)
2072 {
2073 	int ret;
2074 	struct rte_flow_error error;
2075 	struct port_indirect_action *pia;
2076 	union port_action_query query;
2077 
2078 	pia = action_get_by_id(port_id, id);
2079 	if (!pia || !pia->handle)
2080 		return;
2081 	ret = rte_flow_action_handle_query_update(port_id, pia->handle, action,
2082 						  &query, qu_mode, &error);
2083 	if (ret)
2084 		port_flow_complain(&error);
2085 	else
2086 		port_action_handle_query_dump(port_id, pia, &query);
2087 
2088 }
2089 
2090 int
2091 port_action_handle_query(portid_t port_id, uint32_t id)
2092 {
2093 	struct rte_flow_error error;
2094 	struct port_indirect_action *pia;
2095 	union port_action_query query;
2096 
2097 	pia = action_get_by_id(port_id, id);
2098 	if (!pia)
2099 		return -EINVAL;
2100 	switch (pia->type) {
2101 	case RTE_FLOW_ACTION_TYPE_AGE:
2102 	case RTE_FLOW_ACTION_TYPE_COUNT:
2103 	case RTE_FLOW_ACTION_TYPE_QUOTA:
2104 		break;
2105 	default:
2106 		fprintf(stderr,
2107 			"Indirect action %u (type: %d) on port %u doesn't support query\n",
2108 			id, pia->type, port_id);
2109 		return -ENOTSUP;
2110 	}
2111 	/* Poisoning to make sure PMDs update it in case of error. */
2112 	memset(&error, 0x55, sizeof(error));
2113 	memset(&query, 0, sizeof(query));
2114 	if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error))
2115 		return port_flow_complain(&error);
2116 	port_action_handle_query_dump(port_id, pia, &query);
2117 	return 0;
2118 }
2119 
2120 static struct port_flow_tunnel *
2121 port_flow_tunnel_offload_cmd_prep(portid_t port_id,
2122 				  const struct rte_flow_item *pattern,
2123 				  const struct rte_flow_action *actions,
2124 				  const struct tunnel_ops *tunnel_ops)
2125 {
2126 	int ret;
2127 	struct rte_port *port;
2128 	struct port_flow_tunnel *pft;
2129 	struct rte_flow_error error;
2130 
2131 	port = &ports[port_id];
2132 	pft = port_flow_locate_tunnel_id(port, tunnel_ops->id);
2133 	if (!pft) {
2134 		fprintf(stderr, "failed to locate port flow tunnel #%u\n",
2135 			tunnel_ops->id);
2136 		return NULL;
2137 	}
2138 	if (tunnel_ops->actions) {
2139 		uint32_t num_actions;
2140 		const struct rte_flow_action *aptr;
2141 
2142 		ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel,
2143 						&pft->pmd_actions,
2144 						&pft->num_pmd_actions,
2145 						&error);
2146 		if (ret) {
2147 			port_flow_complain(&error);
2148 			return NULL;
2149 		}
2150 		for (aptr = actions, num_actions = 1;
2151 		     aptr->type != RTE_FLOW_ACTION_TYPE_END;
2152 		     aptr++, num_actions++);
2153 		pft->actions = malloc(
2154 				(num_actions +  pft->num_pmd_actions) *
2155 				sizeof(actions[0]));
2156 		if (!pft->actions) {
2157 			rte_flow_tunnel_action_decap_release(
2158 					port_id, pft->actions,
2159 					pft->num_pmd_actions, &error);
2160 			return NULL;
2161 		}
2162 		rte_memcpy(pft->actions, pft->pmd_actions,
2163 			   pft->num_pmd_actions * sizeof(actions[0]));
2164 		rte_memcpy(pft->actions + pft->num_pmd_actions, actions,
2165 			   num_actions * sizeof(actions[0]));
2166 	}
2167 	if (tunnel_ops->items) {
2168 		uint32_t num_items;
2169 		const struct rte_flow_item *iptr;
2170 
2171 		ret = rte_flow_tunnel_match(port_id, &pft->tunnel,
2172 					    &pft->pmd_items,
2173 					    &pft->num_pmd_items,
2174 					    &error);
2175 		if (ret) {
2176 			port_flow_complain(&error);
2177 			return NULL;
2178 		}
2179 		for (iptr = pattern, num_items = 1;
2180 		     iptr->type != RTE_FLOW_ITEM_TYPE_END;
2181 		     iptr++, num_items++);
2182 		pft->items = malloc((num_items + pft->num_pmd_items) *
2183 				    sizeof(pattern[0]));
2184 		if (!pft->items) {
2185 			rte_flow_tunnel_item_release(
2186 					port_id, pft->pmd_items,
2187 					pft->num_pmd_items, &error);
2188 			return NULL;
2189 		}
2190 		rte_memcpy(pft->items, pft->pmd_items,
2191 			   pft->num_pmd_items * sizeof(pattern[0]));
2192 		rte_memcpy(pft->items + pft->num_pmd_items, pattern,
2193 			   num_items * sizeof(pattern[0]));
2194 	}
2195 
2196 	return pft;
2197 }
2198 
2199 static void
2200 port_flow_tunnel_offload_cmd_release(portid_t port_id,
2201 				     const struct tunnel_ops *tunnel_ops,
2202 				     struct port_flow_tunnel *pft)
2203 {
2204 	struct rte_flow_error error;
2205 
2206 	if (tunnel_ops->actions) {
2207 		free(pft->actions);
2208 		rte_flow_tunnel_action_decap_release(
2209 			port_id, pft->pmd_actions,
2210 			pft->num_pmd_actions, &error);
2211 		pft->actions = NULL;
2212 		pft->pmd_actions = NULL;
2213 	}
2214 	if (tunnel_ops->items) {
2215 		free(pft->items);
2216 		rte_flow_tunnel_item_release(port_id, pft->pmd_items,
2217 					     pft->num_pmd_items,
2218 					     &error);
2219 		pft->items = NULL;
2220 		pft->pmd_items = NULL;
2221 	}
2222 }
2223 
2224 /** Add port meter policy */
2225 int
2226 port_meter_policy_add(portid_t port_id, uint32_t policy_id,
2227 			const struct rte_flow_action *actions)
2228 {
2229 	struct rte_mtr_error error;
2230 	const struct rte_flow_action *act = actions;
2231 	const struct rte_flow_action *start;
2232 	struct rte_mtr_meter_policy_params policy;
2233 	uint32_t i = 0, act_n;
2234 	int ret;
2235 
2236 	for (i = 0; i < RTE_COLORS; i++) {
2237 		for (act_n = 0, start = act;
2238 			act->type != RTE_FLOW_ACTION_TYPE_END; act++)
2239 			act_n++;
2240 		if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END)
2241 			policy.actions[i] = start;
2242 		else
2243 			policy.actions[i] = NULL;
2244 		act++;
2245 	}
2246 	ret = rte_mtr_meter_policy_add(port_id,
2247 			policy_id,
2248 			&policy, &error);
2249 	if (ret)
2250 		print_mtr_err_msg(&error);
2251 	return ret;
2252 }
2253 
2254 struct rte_flow_meter_profile *
2255 port_meter_profile_get_by_id(portid_t port_id, uint32_t id)
2256 {
2257 	struct rte_mtr_error error;
2258 	struct rte_flow_meter_profile *profile;
2259 
2260 	profile = rte_mtr_meter_profile_get(port_id, id, &error);
2261 	if (!profile)
2262 		print_mtr_err_msg(&error);
2263 	return profile;
2264 }
2265 struct rte_flow_meter_policy *
2266 port_meter_policy_get_by_id(portid_t port_id, uint32_t id)
2267 {
2268 	struct rte_mtr_error error;
2269 	struct rte_flow_meter_policy *policy;
2270 
2271 	policy = rte_mtr_meter_policy_get(port_id, id, &error);
2272 	if (!policy)
2273 		print_mtr_err_msg(&error);
2274 	return policy;
2275 }
2276 
2277 /** Validate flow rule. */
2278 int
2279 port_flow_validate(portid_t port_id,
2280 		   const struct rte_flow_attr *attr,
2281 		   const struct rte_flow_item *pattern,
2282 		   const struct rte_flow_action *actions,
2283 		   const struct tunnel_ops *tunnel_ops)
2284 {
2285 	struct rte_flow_error error;
2286 	struct port_flow_tunnel *pft = NULL;
2287 	int ret;
2288 
2289 	/* Poisoning to make sure PMDs update it in case of error. */
2290 	memset(&error, 0x11, sizeof(error));
2291 	if (tunnel_ops->enabled) {
2292 		pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern,
2293 							actions, tunnel_ops);
2294 		if (!pft)
2295 			return -ENOENT;
2296 		if (pft->items)
2297 			pattern = pft->items;
2298 		if (pft->actions)
2299 			actions = pft->actions;
2300 	}
2301 	ret = rte_flow_validate(port_id, attr, pattern, actions, &error);
2302 	if (tunnel_ops->enabled)
2303 		port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft);
2304 	if (ret)
2305 		return port_flow_complain(&error);
2306 	printf("Flow rule validated\n");
2307 	return 0;
2308 }
2309 
2310 /** Return age action structure if exists, otherwise NULL. */
2311 static struct rte_flow_action_age *
2312 age_action_get(const struct rte_flow_action *actions)
2313 {
2314 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2315 		switch (actions->type) {
2316 		case RTE_FLOW_ACTION_TYPE_AGE:
2317 			return (struct rte_flow_action_age *)
2318 				(uintptr_t)actions->conf;
2319 		default:
2320 			break;
2321 		}
2322 	}
2323 	return NULL;
2324 }
2325 
2326 /** Create pattern template */
2327 int
2328 port_flow_pattern_template_create(portid_t port_id, uint32_t id,
2329 				  const struct rte_flow_pattern_template_attr *attr,
2330 				  const struct rte_flow_item *pattern)
2331 {
2332 	struct rte_port *port;
2333 	struct port_template *pit;
2334 	int ret;
2335 	struct rte_flow_error error;
2336 
2337 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2338 	    port_id == (portid_t)RTE_PORT_ALL)
2339 		return -EINVAL;
2340 	port = &ports[port_id];
2341 	ret = template_alloc(id, &pit, &port->pattern_templ_list);
2342 	if (ret)
2343 		return ret;
2344 	/* Poisoning to make sure PMDs update it in case of error. */
2345 	memset(&error, 0x22, sizeof(error));
2346 	pit->template.pattern_template = rte_flow_pattern_template_create(port_id,
2347 						attr, pattern, &error);
2348 	if (!pit->template.pattern_template) {
2349 		uint32_t destroy_id = pit->id;
2350 		port_flow_pattern_template_destroy(port_id, 1, &destroy_id);
2351 		return port_flow_complain(&error);
2352 	}
2353 	printf("Pattern template #%u created\n", pit->id);
2354 	return 0;
2355 }
2356 
2357 /** Destroy pattern template */
2358 int
2359 port_flow_pattern_template_destroy(portid_t port_id, uint32_t n,
2360 				   const uint32_t *template)
2361 {
2362 	struct rte_port *port;
2363 	struct port_template **tmp;
2364 	int ret = 0;
2365 
2366 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2367 	    port_id == (portid_t)RTE_PORT_ALL)
2368 		return -EINVAL;
2369 	port = &ports[port_id];
2370 	tmp = &port->pattern_templ_list;
2371 	while (*tmp) {
2372 		uint32_t i;
2373 
2374 		for (i = 0; i != n; ++i) {
2375 			struct rte_flow_error error;
2376 			struct port_template *pit = *tmp;
2377 
2378 			if (template[i] != pit->id)
2379 				continue;
2380 			/*
2381 			 * Poisoning to make sure PMDs update it in case
2382 			 * of error.
2383 			 */
2384 			memset(&error, 0x33, sizeof(error));
2385 
2386 			if (pit->template.pattern_template &&
2387 			    rte_flow_pattern_template_destroy(port_id,
2388 							   pit->template.pattern_template,
2389 							   &error)) {
2390 				ret = port_flow_complain(&error);
2391 				continue;
2392 			}
2393 			*tmp = pit->next;
2394 			printf("Pattern template #%u destroyed\n", pit->id);
2395 			free(pit);
2396 			break;
2397 		}
2398 		if (i == n)
2399 			tmp = &(*tmp)->next;
2400 	}
2401 	return ret;
2402 }
2403 
2404 /** Flush pattern template */
2405 int
2406 port_flow_pattern_template_flush(portid_t port_id)
2407 {
2408 	struct rte_port *port;
2409 	struct port_template **tmp;
2410 	int ret = 0;
2411 
2412 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2413 	    port_id == (portid_t)RTE_PORT_ALL)
2414 		return -EINVAL;
2415 	port = &ports[port_id];
2416 	tmp = &port->pattern_templ_list;
2417 	while (*tmp) {
2418 		struct rte_flow_error error;
2419 		struct port_template *pit = *tmp;
2420 
2421 		/*
2422 		 * Poisoning to make sure PMDs update it in case
2423 		 * of error.
2424 		 */
2425 		memset(&error, 0x33, sizeof(error));
2426 		if (pit->template.pattern_template &&
2427 		    rte_flow_pattern_template_destroy(port_id,
2428 			pit->template.pattern_template, &error)) {
2429 			printf("Pattern template #%u not destroyed\n", pit->id);
2430 			ret = port_flow_complain(&error);
2431 			tmp = &pit->next;
2432 		} else {
2433 			*tmp = pit->next;
2434 			free(pit);
2435 		}
2436 	}
2437 	return ret;
2438 }
2439 
2440 /** Create actions template */
2441 int
2442 port_flow_actions_template_create(portid_t port_id, uint32_t id,
2443 				  const struct rte_flow_actions_template_attr *attr,
2444 				  const struct rte_flow_action *actions,
2445 				  const struct rte_flow_action *masks)
2446 {
2447 	struct rte_port *port;
2448 	struct port_template *pat;
2449 	int ret;
2450 	struct rte_flow_error error;
2451 
2452 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2453 	    port_id == (portid_t)RTE_PORT_ALL)
2454 		return -EINVAL;
2455 	port = &ports[port_id];
2456 	ret = template_alloc(id, &pat, &port->actions_templ_list);
2457 	if (ret)
2458 		return ret;
2459 	/* Poisoning to make sure PMDs update it in case of error. */
2460 	memset(&error, 0x22, sizeof(error));
2461 	pat->template.actions_template = rte_flow_actions_template_create(port_id,
2462 						attr, actions, masks, &error);
2463 	if (!pat->template.actions_template) {
2464 		uint32_t destroy_id = pat->id;
2465 		port_flow_actions_template_destroy(port_id, 1, &destroy_id);
2466 		return port_flow_complain(&error);
2467 	}
2468 	printf("Actions template #%u created\n", pat->id);
2469 	return 0;
2470 }
2471 
2472 /** Destroy actions template */
2473 int
2474 port_flow_actions_template_destroy(portid_t port_id, uint32_t n,
2475 				   const uint32_t *template)
2476 {
2477 	struct rte_port *port;
2478 	struct port_template **tmp;
2479 	int ret = 0;
2480 
2481 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2482 	    port_id == (portid_t)RTE_PORT_ALL)
2483 		return -EINVAL;
2484 	port = &ports[port_id];
2485 	tmp = &port->actions_templ_list;
2486 	while (*tmp) {
2487 		uint32_t i;
2488 
2489 		for (i = 0; i != n; ++i) {
2490 			struct rte_flow_error error;
2491 			struct port_template *pat = *tmp;
2492 
2493 			if (template[i] != pat->id)
2494 				continue;
2495 			/*
2496 			 * Poisoning to make sure PMDs update it in case
2497 			 * of error.
2498 			 */
2499 			memset(&error, 0x33, sizeof(error));
2500 
2501 			if (pat->template.actions_template &&
2502 			    rte_flow_actions_template_destroy(port_id,
2503 					pat->template.actions_template, &error)) {
2504 				ret = port_flow_complain(&error);
2505 				continue;
2506 			}
2507 			*tmp = pat->next;
2508 			printf("Actions template #%u destroyed\n", pat->id);
2509 			free(pat);
2510 			break;
2511 		}
2512 		if (i == n)
2513 			tmp = &(*tmp)->next;
2514 	}
2515 	return ret;
2516 }
2517 
2518 /** Flush actions template */
2519 int
2520 port_flow_actions_template_flush(portid_t port_id)
2521 {
2522 	struct rte_port *port;
2523 	struct port_template **tmp;
2524 	int ret = 0;
2525 
2526 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2527 	    port_id == (portid_t)RTE_PORT_ALL)
2528 		return -EINVAL;
2529 	port = &ports[port_id];
2530 	tmp = &port->actions_templ_list;
2531 	while (*tmp) {
2532 		struct rte_flow_error error;
2533 		struct port_template *pat = *tmp;
2534 
2535 		/*
2536 		 * Poisoning to make sure PMDs update it in case
2537 		 * of error.
2538 		 */
2539 		memset(&error, 0x33, sizeof(error));
2540 
2541 		if (pat->template.actions_template &&
2542 		    rte_flow_actions_template_destroy(port_id,
2543 			pat->template.actions_template, &error)) {
2544 			ret = port_flow_complain(&error);
2545 			printf("Actions template #%u not destroyed\n", pat->id);
2546 			tmp = &pat->next;
2547 		} else {
2548 			*tmp = pat->next;
2549 			free(pat);
2550 		}
2551 	}
2552 	return ret;
2553 }
2554 
2555 /** Create table */
2556 int
2557 port_flow_template_table_create(portid_t port_id, uint32_t id,
2558 		const struct rte_flow_template_table_attr *table_attr,
2559 		uint32_t nb_pattern_templates, uint32_t *pattern_templates,
2560 		uint32_t nb_actions_templates, uint32_t *actions_templates)
2561 {
2562 	struct rte_port *port;
2563 	struct port_table *pt;
2564 	struct port_template *temp = NULL;
2565 	int ret;
2566 	uint32_t i;
2567 	struct rte_flow_error error;
2568 	struct rte_flow_pattern_template
2569 			*flow_pattern_templates[nb_pattern_templates];
2570 	struct rte_flow_actions_template
2571 			*flow_actions_templates[nb_actions_templates];
2572 
2573 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2574 	    port_id == (portid_t)RTE_PORT_ALL)
2575 		return -EINVAL;
2576 	port = &ports[port_id];
2577 	for (i = 0; i < nb_pattern_templates; ++i) {
2578 		bool found = false;
2579 		temp = port->pattern_templ_list;
2580 		while (temp) {
2581 			if (pattern_templates[i] == temp->id) {
2582 				flow_pattern_templates[i] =
2583 					temp->template.pattern_template;
2584 				found = true;
2585 				break;
2586 			}
2587 			temp = temp->next;
2588 		}
2589 		if (!found) {
2590 			printf("Pattern template #%u is invalid\n",
2591 			       pattern_templates[i]);
2592 			return -EINVAL;
2593 		}
2594 	}
2595 	for (i = 0; i < nb_actions_templates; ++i) {
2596 		bool found = false;
2597 		temp = port->actions_templ_list;
2598 		while (temp) {
2599 			if (actions_templates[i] == temp->id) {
2600 				flow_actions_templates[i] =
2601 					temp->template.actions_template;
2602 				found = true;
2603 				break;
2604 			}
2605 			temp = temp->next;
2606 		}
2607 		if (!found) {
2608 			printf("Actions template #%u is invalid\n",
2609 			       actions_templates[i]);
2610 			return -EINVAL;
2611 		}
2612 	}
2613 	ret = table_alloc(id, &pt, &port->table_list);
2614 	if (ret)
2615 		return ret;
2616 	/* Poisoning to make sure PMDs update it in case of error. */
2617 	memset(&error, 0x22, sizeof(error));
2618 	pt->table = rte_flow_template_table_create(port_id, table_attr,
2619 		      flow_pattern_templates, nb_pattern_templates,
2620 		      flow_actions_templates, nb_actions_templates,
2621 		      &error);
2622 
2623 	if (!pt->table) {
2624 		uint32_t destroy_id = pt->id;
2625 		port_flow_template_table_destroy(port_id, 1, &destroy_id);
2626 		return port_flow_complain(&error);
2627 	}
2628 	pt->nb_pattern_templates = nb_pattern_templates;
2629 	pt->nb_actions_templates = nb_actions_templates;
2630 	rte_memcpy(&pt->flow_attr, &table_attr->flow_attr,
2631 		   sizeof(struct rte_flow_attr));
2632 	printf("Template table #%u created\n", pt->id);
2633 	return 0;
2634 }
2635 
2636 /** Destroy table */
2637 int
2638 port_flow_template_table_destroy(portid_t port_id,
2639 				 uint32_t n, const uint32_t *table)
2640 {
2641 	struct rte_port *port;
2642 	struct port_table **tmp;
2643 	int ret = 0;
2644 
2645 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2646 	    port_id == (portid_t)RTE_PORT_ALL)
2647 		return -EINVAL;
2648 	port = &ports[port_id];
2649 	tmp = &port->table_list;
2650 	while (*tmp) {
2651 		uint32_t i;
2652 
2653 		for (i = 0; i != n; ++i) {
2654 			struct rte_flow_error error;
2655 			struct port_table *pt = *tmp;
2656 
2657 			if (table[i] != pt->id)
2658 				continue;
2659 			/*
2660 			 * Poisoning to make sure PMDs update it in case
2661 			 * of error.
2662 			 */
2663 			memset(&error, 0x33, sizeof(error));
2664 
2665 			if (pt->table &&
2666 			    rte_flow_template_table_destroy(port_id,
2667 							    pt->table,
2668 							    &error)) {
2669 				ret = port_flow_complain(&error);
2670 				continue;
2671 			}
2672 			*tmp = pt->next;
2673 			printf("Template table #%u destroyed\n", pt->id);
2674 			free(pt);
2675 			break;
2676 		}
2677 		if (i == n)
2678 			tmp = &(*tmp)->next;
2679 	}
2680 	return ret;
2681 }
2682 
2683 /** Flush table */
2684 int
2685 port_flow_template_table_flush(portid_t port_id)
2686 {
2687 	struct rte_port *port;
2688 	struct port_table **tmp;
2689 	int ret = 0;
2690 
2691 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2692 	    port_id == (portid_t)RTE_PORT_ALL)
2693 		return -EINVAL;
2694 	port = &ports[port_id];
2695 	tmp = &port->table_list;
2696 	while (*tmp) {
2697 		struct rte_flow_error error;
2698 		struct port_table *pt = *tmp;
2699 
2700 		/*
2701 		 * Poisoning to make sure PMDs update it in case
2702 		 * of error.
2703 		 */
2704 		memset(&error, 0x33, sizeof(error));
2705 
2706 		if (pt->table &&
2707 		    rte_flow_template_table_destroy(port_id,
2708 						   pt->table,
2709 						   &error)) {
2710 			ret = port_flow_complain(&error);
2711 			printf("Template table #%u not destroyed\n", pt->id);
2712 			tmp = &pt->next;
2713 		} else {
2714 			*tmp = pt->next;
2715 			free(pt);
2716 		}
2717 	}
2718 	return ret;
2719 }
2720 
2721 /** Enqueue create flow rule operation. */
2722 int
2723 port_queue_flow_create(portid_t port_id, queueid_t queue_id,
2724 		       bool postpone, uint32_t table_id, uint32_t rule_idx,
2725 		       uint32_t pattern_idx, uint32_t actions_idx,
2726 		       const struct rte_flow_item *pattern,
2727 		       const struct rte_flow_action *actions)
2728 {
2729 	struct rte_flow_op_attr op_attr = { .postpone = postpone };
2730 	struct rte_flow *flow;
2731 	struct rte_port *port;
2732 	struct port_flow *pf;
2733 	struct port_table *pt;
2734 	uint32_t id = 0;
2735 	bool found;
2736 	struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL };
2737 	struct rte_flow_action_age *age = age_action_get(actions);
2738 	struct queue_job *job;
2739 
2740 	port = &ports[port_id];
2741 	if (port->flow_list) {
2742 		if (port->flow_list->id == UINT32_MAX) {
2743 			printf("Highest rule ID is already assigned,"
2744 			       " delete it first");
2745 			return -ENOMEM;
2746 		}
2747 		id = port->flow_list->id + 1;
2748 	}
2749 
2750 	if (queue_id >= port->queue_nb) {
2751 		printf("Queue #%u is invalid\n", queue_id);
2752 		return -EINVAL;
2753 	}
2754 
2755 	found = false;
2756 	pt = port->table_list;
2757 	while (pt) {
2758 		if (table_id == pt->id) {
2759 			found = true;
2760 			break;
2761 		}
2762 		pt = pt->next;
2763 	}
2764 	if (!found) {
2765 		printf("Table #%u is invalid\n", table_id);
2766 		return -EINVAL;
2767 	}
2768 
2769 	if (pattern_idx >= pt->nb_pattern_templates) {
2770 		printf("Pattern template index #%u is invalid,"
2771 		       " %u templates present in the table\n",
2772 		       pattern_idx, pt->nb_pattern_templates);
2773 		return -EINVAL;
2774 	}
2775 	if (actions_idx >= pt->nb_actions_templates) {
2776 		printf("Actions template index #%u is invalid,"
2777 		       " %u templates present in the table\n",
2778 		       actions_idx, pt->nb_actions_templates);
2779 		return -EINVAL;
2780 	}
2781 
2782 	job = calloc(1, sizeof(*job));
2783 	if (!job) {
2784 		printf("Queue flow create job allocate failed\n");
2785 		return -ENOMEM;
2786 	}
2787 	job->type = QUEUE_JOB_TYPE_FLOW_CREATE;
2788 
2789 	pf = port_flow_new(&pt->flow_attr, pattern, actions, &error);
2790 	if (!pf) {
2791 		free(job);
2792 		return port_flow_complain(&error);
2793 	}
2794 	if (age) {
2795 		pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW;
2796 		age->context = &pf->age_type;
2797 	}
2798 	/* Poisoning to make sure PMDs update it in case of error. */
2799 	memset(&error, 0x11, sizeof(error));
2800 	if (rule_idx == UINT32_MAX)
2801 		flow = rte_flow_async_create(port_id, queue_id, &op_attr, pt->table,
2802 			pattern, pattern_idx, actions, actions_idx, job, &error);
2803 	else
2804 		flow = rte_flow_async_create_by_index(port_id, queue_id, &op_attr, pt->table,
2805 			rule_idx, actions, actions_idx, job, &error);
2806 	if (!flow) {
2807 		uint64_t flow_id = pf->id;
2808 		port_queue_flow_destroy(port_id, queue_id, true, 1, &flow_id);
2809 		free(job);
2810 		return port_flow_complain(&error);
2811 	}
2812 
2813 	pf->next = port->flow_list;
2814 	pf->id = id;
2815 	pf->table = pt;
2816 	pf->flow = flow;
2817 	job->pf = pf;
2818 	port->flow_list = pf;
2819 	printf("Flow rule #%"PRIu64" creation enqueued\n", pf->id);
2820 	return 0;
2821 }
2822 
2823 /** Enqueue number of destroy flow rules operations. */
2824 int
2825 port_queue_flow_destroy(portid_t port_id, queueid_t queue_id,
2826 			bool postpone, uint32_t n, const uint64_t *rule)
2827 {
2828 	struct rte_flow_op_attr op_attr = { .postpone = postpone };
2829 	struct rte_port *port;
2830 	struct port_flow **tmp;
2831 	int ret = 0;
2832 	struct queue_job *job;
2833 
2834 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2835 	    port_id == (portid_t)RTE_PORT_ALL)
2836 		return -EINVAL;
2837 	port = &ports[port_id];
2838 
2839 	if (queue_id >= port->queue_nb) {
2840 		printf("Queue #%u is invalid\n", queue_id);
2841 		return -EINVAL;
2842 	}
2843 
2844 	tmp = &port->flow_list;
2845 	while (*tmp) {
2846 		uint32_t i;
2847 
2848 		for (i = 0; i != n; ++i) {
2849 			struct rte_flow_error error;
2850 			struct port_flow *pf = *tmp;
2851 
2852 			if (rule[i] != pf->id)
2853 				continue;
2854 			/*
2855 			 * Poisoning to make sure PMD
2856 			 * update it in case of error.
2857 			 */
2858 			memset(&error, 0x33, sizeof(error));
2859 			job = calloc(1, sizeof(*job));
2860 			if (!job) {
2861 				printf("Queue flow destroy job allocate failed\n");
2862 				return -ENOMEM;
2863 			}
2864 			job->type = QUEUE_JOB_TYPE_FLOW_DESTROY;
2865 			job->pf = pf;
2866 
2867 			if (rte_flow_async_destroy(port_id, queue_id, &op_attr,
2868 						   pf->flow, job, &error)) {
2869 				free(job);
2870 				ret = port_flow_complain(&error);
2871 				continue;
2872 			}
2873 			printf("Flow rule #%"PRIu64" destruction enqueued\n",
2874 			       pf->id);
2875 			*tmp = pf->next;
2876 			break;
2877 		}
2878 		if (i == n)
2879 			tmp = &(*tmp)->next;
2880 	}
2881 	return ret;
2882 }
2883 
2884 static void
2885 queue_action_handle_create(portid_t port_id, uint32_t queue_id,
2886 			   struct port_indirect_action *pia,
2887 			   struct queue_job *job,
2888 			   const struct rte_flow_op_attr *attr,
2889 			   const struct rte_flow_indir_action_conf *conf,
2890 			   const struct rte_flow_action *action,
2891 			   struct rte_flow_error *error)
2892 {
2893 	if (action->type == RTE_FLOW_ACTION_TYPE_AGE) {
2894 		struct rte_flow_action_age *age =
2895 			(struct rte_flow_action_age *)(uintptr_t)(action->conf);
2896 
2897 		pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION;
2898 		age->context = &pia->age_type;
2899 	}
2900 	/* Poisoning to make sure PMDs update it in case of error. */
2901 	pia->handle = rte_flow_async_action_handle_create(port_id, queue_id,
2902 							  attr, conf, action,
2903 							  job, error);
2904 	pia->type = action->type;
2905 }
2906 
2907 static void
2908 queue_action_list_handle_create(portid_t port_id, uint32_t queue_id,
2909 				struct port_indirect_action *pia,
2910 				struct queue_job *job,
2911 				const struct rte_flow_op_attr *attr,
2912 				const struct rte_flow_indir_action_conf *conf,
2913 				const struct rte_flow_action *action,
2914 				struct rte_flow_error *error)
2915 {
2916 	/* Poisoning to make sure PMDs update it in case of error. */
2917 	pia->type = RTE_FLOW_ACTION_TYPE_INDIRECT_LIST;
2918 	pia->list_handle = rte_flow_async_action_list_handle_create
2919 		(port_id, queue_id, attr, conf, action,
2920 		 job, error);
2921 }
2922 
2923 /** Enqueue update flow rule operation. */
2924 int
2925 port_queue_flow_update(portid_t port_id, queueid_t queue_id,
2926 		       bool postpone, uint32_t rule_idx, uint32_t actions_idx,
2927 		       const struct rte_flow_action *actions)
2928 {
2929 	struct rte_flow_op_attr op_attr = { .postpone = postpone };
2930 	struct rte_port *port;
2931 	struct port_flow *pf, *uf;
2932 	struct port_flow **tmp;
2933 	struct port_table *pt;
2934 	bool found;
2935 	struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL };
2936 	struct rte_flow_action_age *age = age_action_get(actions);
2937 	struct queue_job *job;
2938 
2939 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2940 	    port_id == (portid_t)RTE_PORT_ALL)
2941 		return -EINVAL;
2942 	port = &ports[port_id];
2943 
2944 	if (queue_id >= port->queue_nb) {
2945 		printf("Queue #%u is invalid\n", queue_id);
2946 		return -EINVAL;
2947 	}
2948 
2949 	found = false;
2950 	tmp = &port->flow_list;
2951 	while (*tmp) {
2952 		pf = *tmp;
2953 		if (rule_idx == pf->id) {
2954 			found = true;
2955 			break;
2956 		}
2957 		tmp = &(*tmp)->next;
2958 	}
2959 	if (!found) {
2960 		printf("Flow rule #%u is invalid\n", rule_idx);
2961 		return -EINVAL;
2962 	}
2963 
2964 	pt = pf->table;
2965 	if (actions_idx >= pt->nb_actions_templates) {
2966 		printf("Actions template index #%u is invalid,"
2967 		       " %u templates present in the table\n",
2968 		       actions_idx, pt->nb_actions_templates);
2969 		return -EINVAL;
2970 	}
2971 
2972 	job = calloc(1, sizeof(*job));
2973 	if (!job) {
2974 		printf("Queue flow create job allocate failed\n");
2975 		return -ENOMEM;
2976 	}
2977 	job->type = QUEUE_JOB_TYPE_FLOW_UPDATE;
2978 
2979 	uf = port_flow_new(&pt->flow_attr, pf->rule.pattern_ro, actions, &error);
2980 	if (!uf) {
2981 		free(job);
2982 		return port_flow_complain(&error);
2983 	}
2984 
2985 	if (age) {
2986 		uf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW;
2987 		age->context = &uf->age_type;
2988 	}
2989 
2990 	/*
2991 	 * Poisoning to make sure PMD update it in case of error.
2992 	 */
2993 	memset(&error, 0x44, sizeof(error));
2994 	if (rte_flow_async_actions_update(port_id, queue_id, &op_attr, pf->flow,
2995 					  actions, actions_idx, job, &error)) {
2996 		free(uf);
2997 		free(job);
2998 		return port_flow_complain(&error);
2999 	}
3000 	uf->next = pf->next;
3001 	uf->id = pf->id;
3002 	uf->table = pt;
3003 	uf->flow = pf->flow;
3004 	*tmp = uf;
3005 	job->pf = pf;
3006 
3007 	printf("Flow rule #%"PRIu64" update enqueued\n", pf->id);
3008 	return 0;
3009 }
3010 
3011 /** Enqueue indirect action create operation. */
3012 int
3013 port_queue_action_handle_create(portid_t port_id, uint32_t queue_id,
3014 				bool postpone, uint32_t id,
3015 				const struct rte_flow_indir_action_conf *conf,
3016 				const struct rte_flow_action *action)
3017 {
3018 	const struct rte_flow_op_attr attr = { .postpone = postpone};
3019 	struct rte_port *port;
3020 	struct port_indirect_action *pia;
3021 	int ret;
3022 	struct rte_flow_error error;
3023 	struct queue_job *job;
3024 	bool is_indirect_list = action[1].type != RTE_FLOW_ACTION_TYPE_END;
3025 
3026 
3027 	ret = action_alloc(port_id, id, &pia);
3028 	if (ret)
3029 		return ret;
3030 
3031 	port = &ports[port_id];
3032 	if (queue_id >= port->queue_nb) {
3033 		printf("Queue #%u is invalid\n", queue_id);
3034 		return -EINVAL;
3035 	}
3036 	job = calloc(1, sizeof(*job));
3037 	if (!job) {
3038 		printf("Queue action create job allocate failed\n");
3039 		return -ENOMEM;
3040 	}
3041 	job->type = QUEUE_JOB_TYPE_ACTION_CREATE;
3042 	job->pia = pia;
3043 
3044 	/* Poisoning to make sure PMDs update it in case of error. */
3045 	memset(&error, 0x88, sizeof(error));
3046 
3047 	if (is_indirect_list)
3048 		queue_action_list_handle_create(port_id, queue_id, pia, job,
3049 						&attr, conf, action, &error);
3050 	else
3051 		queue_action_handle_create(port_id, queue_id, pia, job, &attr,
3052 					   conf, action, &error);
3053 
3054 	if (!pia->handle) {
3055 		uint32_t destroy_id = pia->id;
3056 		port_queue_action_handle_destroy(port_id, queue_id,
3057 						 postpone, 1, &destroy_id);
3058 		free(job);
3059 		return port_flow_complain(&error);
3060 	}
3061 	printf("Indirect action #%u creation queued\n", pia->id);
3062 	return 0;
3063 }
3064 
3065 /** Enqueue indirect action destroy operation. */
3066 int
3067 port_queue_action_handle_destroy(portid_t port_id,
3068 				 uint32_t queue_id, bool postpone,
3069 				 uint32_t n, const uint32_t *actions)
3070 {
3071 	const struct rte_flow_op_attr attr = { .postpone = postpone};
3072 	struct rte_port *port;
3073 	struct port_indirect_action **tmp;
3074 	int ret = 0;
3075 	struct queue_job *job;
3076 
3077 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3078 	    port_id == (portid_t)RTE_PORT_ALL)
3079 		return -EINVAL;
3080 	port = &ports[port_id];
3081 
3082 	if (queue_id >= port->queue_nb) {
3083 		printf("Queue #%u is invalid\n", queue_id);
3084 		return -EINVAL;
3085 	}
3086 
3087 	tmp = &port->actions_list;
3088 	while (*tmp) {
3089 		uint32_t i;
3090 
3091 		for (i = 0; i != n; ++i) {
3092 			struct rte_flow_error error;
3093 			struct port_indirect_action *pia = *tmp;
3094 
3095 			if (actions[i] != pia->id)
3096 				continue;
3097 			/*
3098 			 * Poisoning to make sure PMDs update it in case
3099 			 * of error.
3100 			 */
3101 			memset(&error, 0x99, sizeof(error));
3102 			job = calloc(1, sizeof(*job));
3103 			if (!job) {
3104 				printf("Queue action destroy job allocate failed\n");
3105 				return -ENOMEM;
3106 			}
3107 			job->type = QUEUE_JOB_TYPE_ACTION_DESTROY;
3108 			job->pia = pia;
3109 			ret = pia->type == RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ?
3110 			      rte_flow_async_action_list_handle_destroy
3111 				      (port_id, queue_id,
3112 				       &attr, pia->list_handle,
3113 				       job, &error) :
3114 			      rte_flow_async_action_handle_destroy
3115 				      (port_id, queue_id, &attr, pia->handle,
3116 				       job, &error);
3117 			if (ret) {
3118 				free(job);
3119 				ret = port_flow_complain(&error);
3120 				continue;
3121 			}
3122 			*tmp = pia->next;
3123 			printf("Indirect action #%u destruction queued\n",
3124 			       pia->id);
3125 			break;
3126 		}
3127 		if (i == n)
3128 			tmp = &(*tmp)->next;
3129 	}
3130 	return ret;
3131 }
3132 
3133 /** Enqueue indirect action update operation. */
3134 int
3135 port_queue_action_handle_update(portid_t port_id,
3136 				uint32_t queue_id, bool postpone, uint32_t id,
3137 				const struct rte_flow_action *action)
3138 {
3139 	const struct rte_flow_op_attr attr = { .postpone = postpone};
3140 	struct rte_port *port;
3141 	struct rte_flow_error error;
3142 	struct rte_flow_action_handle *action_handle;
3143 	struct queue_job *job;
3144 	struct port_indirect_action *pia;
3145 	struct rte_flow_update_meter_mark mtr_update;
3146 	const void *update;
3147 
3148 	action_handle = port_action_handle_get_by_id(port_id, id);
3149 	if (!action_handle)
3150 		return -EINVAL;
3151 
3152 	port = &ports[port_id];
3153 	if (queue_id >= port->queue_nb) {
3154 		printf("Queue #%u is invalid\n", queue_id);
3155 		return -EINVAL;
3156 	}
3157 
3158 	job = calloc(1, sizeof(*job));
3159 	if (!job) {
3160 		printf("Queue action update job allocate failed\n");
3161 		return -ENOMEM;
3162 	}
3163 	job->type = QUEUE_JOB_TYPE_ACTION_UPDATE;
3164 
3165 	pia = action_get_by_id(port_id, id);
3166 	if (!pia) {
3167 		free(job);
3168 		return -EINVAL;
3169 	}
3170 
3171 	switch (pia->type) {
3172 	case RTE_FLOW_ACTION_TYPE_AGE:
3173 		update = action->conf;
3174 		break;
3175 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
3176 		rte_memcpy(&mtr_update.meter_mark, action->conf,
3177 			sizeof(struct rte_flow_action_meter_mark));
3178 		if (mtr_update.meter_mark.profile)
3179 			mtr_update.profile_valid = 1;
3180 		if (mtr_update.meter_mark.policy)
3181 			mtr_update.policy_valid = 1;
3182 		mtr_update.color_mode_valid = 1;
3183 		mtr_update.state_valid = 1;
3184 		update = &mtr_update;
3185 		break;
3186 	default:
3187 		update = action;
3188 		break;
3189 	}
3190 
3191 	if (rte_flow_async_action_handle_update(port_id, queue_id, &attr,
3192 				    action_handle, update, job, &error)) {
3193 		free(job);
3194 		return port_flow_complain(&error);
3195 	}
3196 	printf("Indirect action #%u update queued\n", id);
3197 	return 0;
3198 }
3199 
3200 void
3201 port_queue_action_handle_query_update(portid_t port_id,
3202 				      uint32_t queue_id, bool postpone,
3203 				      uint32_t id,
3204 				      enum rte_flow_query_update_mode qu_mode,
3205 				      const struct rte_flow_action *action)
3206 {
3207 	int ret;
3208 	struct rte_flow_error error;
3209 	struct port_indirect_action *pia = action_get_by_id(port_id, id);
3210 	const struct rte_flow_op_attr attr = { .postpone = postpone};
3211 	struct queue_job *job;
3212 
3213 	if (!pia || !pia->handle)
3214 		return;
3215 	job = calloc(1, sizeof(*job));
3216 	if (!job)
3217 		return;
3218 	job->type = QUEUE_JOB_TYPE_ACTION_QUERY;
3219 	job->pia = pia;
3220 
3221 	ret = rte_flow_async_action_handle_query_update(port_id, queue_id,
3222 							&attr, pia->handle,
3223 							action,
3224 							&job->query,
3225 							qu_mode, job,
3226 							&error);
3227 	if (ret) {
3228 		port_flow_complain(&error);
3229 		free(job);
3230 	} else {
3231 		printf("port-%u: indirect action #%u update-and-query queued\n",
3232 		       port_id, id);
3233 	}
3234 }
3235 
3236 /** Enqueue indirect action query operation. */
3237 int
3238 port_queue_action_handle_query(portid_t port_id,
3239 			       uint32_t queue_id, bool postpone, uint32_t id)
3240 {
3241 	const struct rte_flow_op_attr attr = { .postpone = postpone};
3242 	struct rte_port *port;
3243 	struct rte_flow_error error;
3244 	struct rte_flow_action_handle *action_handle;
3245 	struct port_indirect_action *pia;
3246 	struct queue_job *job;
3247 
3248 	pia = action_get_by_id(port_id, id);
3249 	action_handle = pia ? pia->handle : NULL;
3250 	if (!action_handle)
3251 		return -EINVAL;
3252 
3253 	port = &ports[port_id];
3254 	if (queue_id >= port->queue_nb) {
3255 		printf("Queue #%u is invalid\n", queue_id);
3256 		return -EINVAL;
3257 	}
3258 
3259 	job = calloc(1, sizeof(*job));
3260 	if (!job) {
3261 		printf("Queue action update job allocate failed\n");
3262 		return -ENOMEM;
3263 	}
3264 	job->type = QUEUE_JOB_TYPE_ACTION_QUERY;
3265 	job->pia = pia;
3266 
3267 	if (rte_flow_async_action_handle_query(port_id, queue_id, &attr,
3268 				    action_handle, &job->query, job, &error)) {
3269 		free(job);
3270 		return port_flow_complain(&error);
3271 	}
3272 	printf("Indirect action #%u update queued\n", id);
3273 	return 0;
3274 }
3275 
3276 /** Push all the queue operations in the queue to the NIC. */
3277 int
3278 port_queue_flow_push(portid_t port_id, queueid_t queue_id)
3279 {
3280 	struct rte_port *port;
3281 	struct rte_flow_error error;
3282 	int ret = 0;
3283 
3284 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3285 	    port_id == (portid_t)RTE_PORT_ALL)
3286 		return -EINVAL;
3287 	port = &ports[port_id];
3288 
3289 	if (queue_id >= port->queue_nb) {
3290 		printf("Queue #%u is invalid\n", queue_id);
3291 		return -EINVAL;
3292 	}
3293 
3294 	memset(&error, 0x55, sizeof(error));
3295 	ret = rte_flow_push(port_id, queue_id, &error);
3296 	if (ret < 0) {
3297 		printf("Failed to push operations in the queue\n");
3298 		return -EINVAL;
3299 	}
3300 	printf("Queue #%u operations pushed\n", queue_id);
3301 	return ret;
3302 }
3303 
3304 /** Calculate the hash result for a given pattern in a given table. */
3305 int
3306 port_flow_hash_calc(portid_t port_id, uint32_t table_id,
3307 		    uint8_t pattern_template_index, const struct rte_flow_item pattern[])
3308 {
3309 	uint32_t hash;
3310 	bool found;
3311 	struct port_table *pt;
3312 	struct rte_port *port;
3313 	struct rte_flow_error error;
3314 	int ret = 0;
3315 
3316 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3317 	    port_id == (portid_t)RTE_PORT_ALL)
3318 		return -EINVAL;
3319 	port = &ports[port_id];
3320 
3321 	found = false;
3322 	pt = port->table_list;
3323 	while (pt) {
3324 		if (table_id == pt->id) {
3325 			found = true;
3326 			break;
3327 		}
3328 		pt = pt->next;
3329 	}
3330 	if (!found) {
3331 		printf("Table #%u is invalid\n", table_id);
3332 		return -EINVAL;
3333 	}
3334 
3335 	memset(&error, 0x55, sizeof(error));
3336 	ret = rte_flow_calc_table_hash(port_id, pt->table, pattern,
3337 				       pattern_template_index, &hash, &error);
3338 	if (ret < 0) {
3339 		printf("Failed to calculate hash ");
3340 		switch (abs(ret)) {
3341 		case ENODEV:
3342 			printf("no such device\n");
3343 			break;
3344 		case ENOTSUP:
3345 			printf("device doesn't support this operation\n");
3346 			break;
3347 		default:
3348 			printf("\n");
3349 			break;
3350 		}
3351 		return ret;
3352 	}
3353 	printf("Hash results 0x%x\n", hash);
3354 	return 0;
3355 }
3356 
3357 /** Pull queue operation results from the queue. */
3358 static int
3359 port_queue_aged_flow_destroy(portid_t port_id, queueid_t queue_id,
3360 			     const uint64_t *rule, int nb_flows)
3361 {
3362 	struct rte_port *port = &ports[port_id];
3363 	struct rte_flow_op_result *res;
3364 	struct rte_flow_error error;
3365 	uint32_t n = nb_flows;
3366 	int ret = 0;
3367 	int i;
3368 
3369 	res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result));
3370 	if (!res) {
3371 		printf("Failed to allocate memory for pulled results\n");
3372 		return -ENOMEM;
3373 	}
3374 
3375 	memset(&error, 0x66, sizeof(error));
3376 	while (nb_flows > 0) {
3377 		int success = 0;
3378 
3379 		if (n > port->queue_sz)
3380 			n = port->queue_sz;
3381 		ret = port_queue_flow_destroy(port_id, queue_id, true, n, rule);
3382 		if (ret < 0) {
3383 			free(res);
3384 			return ret;
3385 		}
3386 		ret = rte_flow_push(port_id, queue_id, &error);
3387 		if (ret < 0) {
3388 			printf("Failed to push operations in the queue: %s\n",
3389 			       strerror(-ret));
3390 			free(res);
3391 			return ret;
3392 		}
3393 		while (success < nb_flows) {
3394 			ret = rte_flow_pull(port_id, queue_id, res,
3395 					    port->queue_sz, &error);
3396 			if (ret < 0) {
3397 				printf("Failed to pull a operation results: %s\n",
3398 				       strerror(-ret));
3399 				free(res);
3400 				return ret;
3401 			}
3402 
3403 			for (i = 0; i < ret; i++) {
3404 				if (res[i].status == RTE_FLOW_OP_SUCCESS)
3405 					success++;
3406 			}
3407 		}
3408 		rule += n;
3409 		nb_flows -= n;
3410 		n = nb_flows;
3411 	}
3412 
3413 	free(res);
3414 	return ret;
3415 }
3416 
3417 /** List simply and destroy all aged flows per queue. */
3418 void
3419 port_queue_flow_aged(portid_t port_id, uint32_t queue_id, uint8_t destroy)
3420 {
3421 	void **contexts;
3422 	int nb_context, total = 0, idx;
3423 	uint64_t *rules = NULL;
3424 	struct rte_port *port;
3425 	struct rte_flow_error error;
3426 	enum age_action_context_type *type;
3427 	union {
3428 		struct port_flow *pf;
3429 		struct port_indirect_action *pia;
3430 	} ctx;
3431 
3432 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3433 	    port_id == (portid_t)RTE_PORT_ALL)
3434 		return;
3435 	port = &ports[port_id];
3436 	if (queue_id >= port->queue_nb) {
3437 		printf("Error: queue #%u is invalid\n", queue_id);
3438 		return;
3439 	}
3440 	total = rte_flow_get_q_aged_flows(port_id, queue_id, NULL, 0, &error);
3441 	if (total < 0) {
3442 		port_flow_complain(&error);
3443 		return;
3444 	}
3445 	printf("Port %u queue %u total aged flows: %d\n",
3446 	       port_id, queue_id, total);
3447 	if (total == 0)
3448 		return;
3449 	contexts = calloc(total, sizeof(void *));
3450 	if (contexts == NULL) {
3451 		printf("Cannot allocate contexts for aged flow\n");
3452 		return;
3453 	}
3454 	printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type");
3455 	nb_context = rte_flow_get_q_aged_flows(port_id, queue_id, contexts,
3456 					       total, &error);
3457 	if (nb_context > total) {
3458 		printf("Port %u queue %u get aged flows count(%d) > total(%d)\n",
3459 		       port_id, queue_id, nb_context, total);
3460 		free(contexts);
3461 		return;
3462 	}
3463 	if (destroy) {
3464 		rules = malloc(sizeof(uint32_t) * nb_context);
3465 		if (rules == NULL)
3466 			printf("Cannot allocate memory for destroy aged flow\n");
3467 	}
3468 	total = 0;
3469 	for (idx = 0; idx < nb_context; idx++) {
3470 		if (!contexts[idx]) {
3471 			printf("Error: get Null context in port %u queue %u\n",
3472 			       port_id, queue_id);
3473 			continue;
3474 		}
3475 		type = (enum age_action_context_type *)contexts[idx];
3476 		switch (*type) {
3477 		case ACTION_AGE_CONTEXT_TYPE_FLOW:
3478 			ctx.pf = container_of(type, struct port_flow, age_type);
3479 			printf("%-20s\t%" PRIu64 "\t%" PRIu32 "\t%" PRIu32
3480 								 "\t%c%c%c\t\n",
3481 			       "Flow",
3482 			       ctx.pf->id,
3483 			       ctx.pf->rule.attr->group,
3484 			       ctx.pf->rule.attr->priority,
3485 			       ctx.pf->rule.attr->ingress ? 'i' : '-',
3486 			       ctx.pf->rule.attr->egress ? 'e' : '-',
3487 			       ctx.pf->rule.attr->transfer ? 't' : '-');
3488 			if (rules != NULL) {
3489 				rules[total] = ctx.pf->id;
3490 				total++;
3491 			}
3492 			break;
3493 		case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION:
3494 			ctx.pia = container_of(type,
3495 					       struct port_indirect_action,
3496 					       age_type);
3497 			printf("%-20s\t%" PRIu32 "\n", "Indirect action",
3498 			       ctx.pia->id);
3499 			break;
3500 		default:
3501 			printf("Error: invalid context type %u\n", port_id);
3502 			break;
3503 		}
3504 	}
3505 	if (rules != NULL) {
3506 		port_queue_aged_flow_destroy(port_id, queue_id, rules, total);
3507 		free(rules);
3508 	}
3509 	printf("\n%d flows destroyed\n", total);
3510 	free(contexts);
3511 }
3512 
3513 /** Pull queue operation results from the queue. */
3514 int
3515 port_queue_flow_pull(portid_t port_id, queueid_t queue_id)
3516 {
3517 	struct rte_port *port;
3518 	struct rte_flow_op_result *res;
3519 	struct rte_flow_error error;
3520 	int ret = 0;
3521 	int success = 0;
3522 	int i;
3523 	struct queue_job *job;
3524 
3525 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3526 	    port_id == (portid_t)RTE_PORT_ALL)
3527 		return -EINVAL;
3528 	port = &ports[port_id];
3529 
3530 	if (queue_id >= port->queue_nb) {
3531 		printf("Queue #%u is invalid\n", queue_id);
3532 		return -EINVAL;
3533 	}
3534 
3535 	res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result));
3536 	if (!res) {
3537 		printf("Failed to allocate memory for pulled results\n");
3538 		return -ENOMEM;
3539 	}
3540 
3541 	memset(&error, 0x66, sizeof(error));
3542 	ret = rte_flow_pull(port_id, queue_id, res,
3543 				 port->queue_sz, &error);
3544 	if (ret < 0) {
3545 		printf("Failed to pull a operation results\n");
3546 		free(res);
3547 		return -EINVAL;
3548 	}
3549 
3550 	for (i = 0; i < ret; i++) {
3551 		if (res[i].status == RTE_FLOW_OP_SUCCESS)
3552 			success++;
3553 		job = (struct queue_job *)res[i].user_data;
3554 		if (job->type == QUEUE_JOB_TYPE_FLOW_DESTROY ||
3555 		    job->type == QUEUE_JOB_TYPE_FLOW_UPDATE)
3556 			free(job->pf);
3557 		else if (job->type == QUEUE_JOB_TYPE_ACTION_DESTROY)
3558 			free(job->pia);
3559 		else if (job->type == QUEUE_JOB_TYPE_ACTION_QUERY)
3560 			port_action_handle_query_dump(port_id, job->pia,
3561 						      &job->query);
3562 		free(job);
3563 	}
3564 	printf("Queue #%u pulled %u operations (%u failed, %u succeeded)\n",
3565 	       queue_id, ret, ret - success, success);
3566 	free(res);
3567 	return ret;
3568 }
3569 
3570 /* Set group miss actions */
3571 int
3572 port_queue_group_set_miss_actions(portid_t port_id, const struct rte_flow_attr *attr,
3573 				  const struct rte_flow_action *actions)
3574 {
3575 	struct rte_flow_group_attr gattr = {
3576 		.ingress = attr->ingress,
3577 		.egress = attr->egress,
3578 		.transfer = attr->transfer,
3579 	};
3580 	struct rte_flow_error error;
3581 	int ret = 0;
3582 
3583 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3584 	    port_id == (portid_t)RTE_PORT_ALL)
3585 		return -EINVAL;
3586 
3587 	memset(&error, 0x66, sizeof(error));
3588 	ret = rte_flow_group_set_miss_actions(port_id, attr->group, &gattr, actions, &error);
3589 
3590 	if (ret < 0)
3591 		return port_flow_complain(&error);
3592 
3593 	printf("Group #%u set miss actions succeeded\n", attr->group);
3594 	return ret;
3595 }
3596 
3597 /** Create flow rule. */
3598 int
3599 port_flow_create(portid_t port_id,
3600 		 const struct rte_flow_attr *attr,
3601 		 const struct rte_flow_item *pattern,
3602 		 const struct rte_flow_action *actions,
3603 		 const struct tunnel_ops *tunnel_ops,
3604 		 uintptr_t user_id)
3605 {
3606 	struct rte_flow *flow;
3607 	struct rte_port *port;
3608 	struct port_flow *pf;
3609 	uint32_t id = 0;
3610 	struct rte_flow_error error;
3611 	struct port_flow_tunnel *pft = NULL;
3612 	struct rte_flow_action_age *age = age_action_get(actions);
3613 
3614 	port = &ports[port_id];
3615 	if (port->flow_list) {
3616 		if (port->flow_list->id == UINT32_MAX) {
3617 			fprintf(stderr,
3618 				"Highest rule ID is already assigned, delete it first");
3619 			return -ENOMEM;
3620 		}
3621 		id = port->flow_list->id + 1;
3622 	}
3623 	if (tunnel_ops->enabled) {
3624 		pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern,
3625 							actions, tunnel_ops);
3626 		if (!pft)
3627 			return -ENOENT;
3628 		if (pft->items)
3629 			pattern = pft->items;
3630 		if (pft->actions)
3631 			actions = pft->actions;
3632 	}
3633 	pf = port_flow_new(attr, pattern, actions, &error);
3634 	if (!pf)
3635 		return port_flow_complain(&error);
3636 	if (age) {
3637 		pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW;
3638 		age->context = &pf->age_type;
3639 	}
3640 	/* Poisoning to make sure PMDs update it in case of error. */
3641 	memset(&error, 0x22, sizeof(error));
3642 	flow = rte_flow_create(port_id, attr, pattern, actions, &error);
3643 	if (!flow) {
3644 		if (tunnel_ops->enabled)
3645 			port_flow_tunnel_offload_cmd_release(port_id,
3646 							     tunnel_ops, pft);
3647 		free(pf);
3648 		return port_flow_complain(&error);
3649 	}
3650 	pf->next = port->flow_list;
3651 	pf->id = id;
3652 	pf->user_id = user_id;
3653 	pf->flow = flow;
3654 	port->flow_list = pf;
3655 	if (tunnel_ops->enabled)
3656 		port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft);
3657 	if (user_id)
3658 		printf("Flow rule #%"PRIu64" created, user-id 0x%"PRIx64"\n",
3659 		       pf->id, pf->user_id);
3660 	else
3661 		printf("Flow rule #%"PRIu64" created\n", pf->id);
3662 	return 0;
3663 }
3664 
3665 /** Destroy a number of flow rules. */
3666 int
3667 port_flow_destroy(portid_t port_id, uint32_t n, const uint64_t *rule,
3668 		  bool is_user_id)
3669 {
3670 	struct rte_port *port;
3671 	struct port_flow **tmp;
3672 	int ret = 0;
3673 
3674 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3675 	    port_id == (portid_t)RTE_PORT_ALL)
3676 		return -EINVAL;
3677 	port = &ports[port_id];
3678 	tmp = &port->flow_list;
3679 	while (*tmp) {
3680 		uint32_t i;
3681 
3682 		for (i = 0; i != n; ++i) {
3683 			struct rte_flow_error error;
3684 			struct port_flow *pf = *tmp;
3685 
3686 			if (rule[i] != (is_user_id ? pf->user_id : pf->id))
3687 				continue;
3688 			/*
3689 			 * Poisoning to make sure PMDs update it in case
3690 			 * of error.
3691 			 */
3692 			memset(&error, 0x33, sizeof(error));
3693 			if (rte_flow_destroy(port_id, pf->flow, &error)) {
3694 				ret = port_flow_complain(&error);
3695 				continue;
3696 			}
3697 			if (is_user_id)
3698 				printf("Flow rule #%"PRIu64" destroyed, "
3699 				       "user-id 0x%"PRIx64"\n",
3700 				       pf->id, pf->user_id);
3701 			else
3702 				printf("Flow rule #%"PRIu64" destroyed\n",
3703 				       pf->id);
3704 			*tmp = pf->next;
3705 			free(pf);
3706 			break;
3707 		}
3708 		if (i == n)
3709 			tmp = &(*tmp)->next;
3710 	}
3711 	return ret;
3712 }
3713 
3714 /** Remove all flow rules. */
3715 int
3716 port_flow_flush(portid_t port_id)
3717 {
3718 	struct rte_flow_error error;
3719 	struct rte_port *port;
3720 	int ret = 0;
3721 
3722 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3723 		port_id == (portid_t)RTE_PORT_ALL)
3724 		return -EINVAL;
3725 
3726 	port = &ports[port_id];
3727 
3728 	if (port->flow_list == NULL)
3729 		return ret;
3730 
3731 	/* Poisoning to make sure PMDs update it in case of error. */
3732 	memset(&error, 0x44, sizeof(error));
3733 	if (rte_flow_flush(port_id, &error)) {
3734 		port_flow_complain(&error);
3735 	}
3736 
3737 	while (port->flow_list) {
3738 		struct port_flow *pf = port->flow_list->next;
3739 
3740 		free(port->flow_list);
3741 		port->flow_list = pf;
3742 	}
3743 	return ret;
3744 }
3745 
3746 /** Dump flow rules. */
3747 int
3748 port_flow_dump(portid_t port_id, bool dump_all, uint64_t rule_id,
3749 		const char *file_name, bool is_user_id)
3750 {
3751 	int ret = 0;
3752 	FILE *file = stdout;
3753 	struct rte_flow_error error;
3754 	struct rte_port *port;
3755 	struct port_flow *pflow;
3756 	struct rte_flow *tmpFlow = NULL;
3757 	bool found = false;
3758 
3759 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3760 		port_id == (portid_t)RTE_PORT_ALL)
3761 		return -EINVAL;
3762 
3763 	if (!dump_all) {
3764 		port = &ports[port_id];
3765 		pflow = port->flow_list;
3766 		while (pflow) {
3767 			if (rule_id !=
3768 			    (is_user_id ? pflow->user_id : pflow->id)) {
3769 				pflow = pflow->next;
3770 			} else {
3771 				tmpFlow = pflow->flow;
3772 				if (tmpFlow)
3773 					found = true;
3774 				break;
3775 			}
3776 		}
3777 		if (found == false) {
3778 			fprintf(stderr, "Failed to dump to flow %"PRIu64"\n",
3779 				rule_id);
3780 			return -EINVAL;
3781 		}
3782 	}
3783 
3784 	if (file_name && strlen(file_name)) {
3785 		file = fopen(file_name, "w");
3786 		if (!file) {
3787 			fprintf(stderr, "Failed to create file %s: %s\n",
3788 				file_name, strerror(errno));
3789 			return -errno;
3790 		}
3791 	}
3792 
3793 	if (!dump_all)
3794 		ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error);
3795 	else
3796 		ret = rte_flow_dev_dump(port_id, NULL, file, &error);
3797 	if (ret) {
3798 		port_flow_complain(&error);
3799 		fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret));
3800 	} else
3801 		printf("Flow dump finished\n");
3802 	if (file_name && strlen(file_name))
3803 		fclose(file);
3804 	return ret;
3805 }
3806 
3807 /** Query a flow rule. */
3808 int
3809 port_flow_query(portid_t port_id, uint64_t rule,
3810 		const struct rte_flow_action *action, bool is_user_id)
3811 {
3812 	struct rte_flow_error error;
3813 	struct rte_port *port;
3814 	struct port_flow *pf;
3815 	const char *name;
3816 	union {
3817 		struct rte_flow_query_count count;
3818 		struct rte_flow_action_rss rss_conf;
3819 		struct rte_flow_query_age age;
3820 	} query;
3821 	int ret;
3822 
3823 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3824 	    port_id == (portid_t)RTE_PORT_ALL)
3825 		return -EINVAL;
3826 	port = &ports[port_id];
3827 	for (pf = port->flow_list; pf; pf = pf->next)
3828 		if ((is_user_id ? pf->user_id : pf->id) == rule)
3829 			break;
3830 	if (!pf) {
3831 		fprintf(stderr, "Flow rule #%"PRIu64" not found\n", rule);
3832 		return -ENOENT;
3833 	}
3834 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
3835 			    &name, sizeof(name),
3836 			    (void *)(uintptr_t)action->type, &error);
3837 	if (ret < 0)
3838 		return port_flow_complain(&error);
3839 	switch (action->type) {
3840 	case RTE_FLOW_ACTION_TYPE_COUNT:
3841 	case RTE_FLOW_ACTION_TYPE_RSS:
3842 	case RTE_FLOW_ACTION_TYPE_AGE:
3843 		break;
3844 	default:
3845 		fprintf(stderr, "Cannot query action type %d (%s)\n",
3846 			action->type, name);
3847 		return -ENOTSUP;
3848 	}
3849 	/* Poisoning to make sure PMDs update it in case of error. */
3850 	memset(&error, 0x55, sizeof(error));
3851 	memset(&query, 0, sizeof(query));
3852 	if (rte_flow_query(port_id, pf->flow, action, &query, &error))
3853 		return port_flow_complain(&error);
3854 	switch (action->type) {
3855 	case RTE_FLOW_ACTION_TYPE_COUNT:
3856 		printf("%s:\n"
3857 		       " hits_set: %u\n"
3858 		       " bytes_set: %u\n"
3859 		       " hits: %" PRIu64 "\n"
3860 		       " bytes: %" PRIu64 "\n",
3861 		       name,
3862 		       query.count.hits_set,
3863 		       query.count.bytes_set,
3864 		       query.count.hits,
3865 		       query.count.bytes);
3866 		break;
3867 	case RTE_FLOW_ACTION_TYPE_RSS:
3868 		rss_config_display(&query.rss_conf);
3869 		break;
3870 	case RTE_FLOW_ACTION_TYPE_AGE:
3871 		printf("%s:\n"
3872 		       " aged: %u\n"
3873 		       " sec_since_last_hit_valid: %u\n"
3874 		       " sec_since_last_hit: %" PRIu32 "\n",
3875 		       name,
3876 		       query.age.aged,
3877 		       query.age.sec_since_last_hit_valid,
3878 		       query.age.sec_since_last_hit);
3879 		break;
3880 	default:
3881 		fprintf(stderr,
3882 			"Cannot display result for action type %d (%s)\n",
3883 			action->type, name);
3884 		break;
3885 	}
3886 	return 0;
3887 }
3888 
3889 /** List simply and destroy all aged flows. */
3890 void
3891 port_flow_aged(portid_t port_id, uint8_t destroy)
3892 {
3893 	void **contexts;
3894 	int nb_context, total = 0, idx;
3895 	struct rte_flow_error error;
3896 	enum age_action_context_type *type;
3897 	union {
3898 		struct port_flow *pf;
3899 		struct port_indirect_action *pia;
3900 	} ctx;
3901 
3902 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3903 	    port_id == (portid_t)RTE_PORT_ALL)
3904 		return;
3905 	total = rte_flow_get_aged_flows(port_id, NULL, 0, &error);
3906 	printf("Port %u total aged flows: %d\n", port_id, total);
3907 	if (total < 0) {
3908 		port_flow_complain(&error);
3909 		return;
3910 	}
3911 	if (total == 0)
3912 		return;
3913 	contexts = malloc(sizeof(void *) * total);
3914 	if (contexts == NULL) {
3915 		fprintf(stderr, "Cannot allocate contexts for aged flow\n");
3916 		return;
3917 	}
3918 	printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type");
3919 	nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error);
3920 	if (nb_context != total) {
3921 		fprintf(stderr,
3922 			"Port:%d get aged flows count(%d) != total(%d)\n",
3923 			port_id, nb_context, total);
3924 		free(contexts);
3925 		return;
3926 	}
3927 	total = 0;
3928 	for (idx = 0; idx < nb_context; idx++) {
3929 		if (!contexts[idx]) {
3930 			fprintf(stderr, "Error: get Null context in port %u\n",
3931 				port_id);
3932 			continue;
3933 		}
3934 		type = (enum age_action_context_type *)contexts[idx];
3935 		switch (*type) {
3936 		case ACTION_AGE_CONTEXT_TYPE_FLOW:
3937 			ctx.pf = container_of(type, struct port_flow, age_type);
3938 			printf("%-20s\t%" PRIu64 "\t%" PRIu32 "\t%" PRIu32
3939 								 "\t%c%c%c\t\n",
3940 			       "Flow",
3941 			       ctx.pf->id,
3942 			       ctx.pf->rule.attr->group,
3943 			       ctx.pf->rule.attr->priority,
3944 			       ctx.pf->rule.attr->ingress ? 'i' : '-',
3945 			       ctx.pf->rule.attr->egress ? 'e' : '-',
3946 			       ctx.pf->rule.attr->transfer ? 't' : '-');
3947 			if (destroy && !port_flow_destroy(port_id, 1,
3948 							  &ctx.pf->id, false))
3949 				total++;
3950 			break;
3951 		case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION:
3952 			ctx.pia = container_of(type,
3953 					struct port_indirect_action, age_type);
3954 			printf("%-20s\t%" PRIu32 "\n", "Indirect action",
3955 			       ctx.pia->id);
3956 			break;
3957 		default:
3958 			fprintf(stderr, "Error: invalid context type %u\n",
3959 				port_id);
3960 			break;
3961 		}
3962 	}
3963 	printf("\n%d flows destroyed\n", total);
3964 	free(contexts);
3965 }
3966 
3967 /** List flow rules. */
3968 void
3969 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group)
3970 {
3971 	struct rte_port *port;
3972 	struct port_flow *pf;
3973 	struct port_flow *list = NULL;
3974 	uint32_t i;
3975 
3976 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3977 	    port_id == (portid_t)RTE_PORT_ALL)
3978 		return;
3979 	port = &ports[port_id];
3980 	if (!port->flow_list)
3981 		return;
3982 	/* Sort flows by group, priority and ID. */
3983 	for (pf = port->flow_list; pf != NULL; pf = pf->next) {
3984 		struct port_flow **tmp;
3985 		const struct rte_flow_attr *curr = pf->rule.attr;
3986 
3987 		if (n) {
3988 			/* Filter out unwanted groups. */
3989 			for (i = 0; i != n; ++i)
3990 				if (curr->group == group[i])
3991 					break;
3992 			if (i == n)
3993 				continue;
3994 		}
3995 		for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) {
3996 			const struct rte_flow_attr *comp = (*tmp)->rule.attr;
3997 
3998 			if (curr->group > comp->group ||
3999 			    (curr->group == comp->group &&
4000 			     curr->priority > comp->priority) ||
4001 			    (curr->group == comp->group &&
4002 			     curr->priority == comp->priority &&
4003 			     pf->id > (*tmp)->id))
4004 				continue;
4005 			break;
4006 		}
4007 		pf->tmp = *tmp;
4008 		*tmp = pf;
4009 	}
4010 	printf("ID\tGroup\tPrio\tAttr\tRule\n");
4011 	for (pf = list; pf != NULL; pf = pf->tmp) {
4012 		const struct rte_flow_item *item = pf->rule.pattern;
4013 		const struct rte_flow_action *action = pf->rule.actions;
4014 		const char *name;
4015 
4016 		printf("%" PRIu64 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
4017 		       pf->id,
4018 		       pf->rule.attr->group,
4019 		       pf->rule.attr->priority,
4020 		       pf->rule.attr->ingress ? 'i' : '-',
4021 		       pf->rule.attr->egress ? 'e' : '-',
4022 		       pf->rule.attr->transfer ? 't' : '-');
4023 		while (item->type != RTE_FLOW_ITEM_TYPE_END) {
4024 			if ((uint32_t)item->type > INT_MAX)
4025 				name = "PMD_INTERNAL";
4026 			else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR,
4027 					  &name, sizeof(name),
4028 					  (void *)(uintptr_t)item->type,
4029 					  NULL) <= 0)
4030 				name = "[UNKNOWN]";
4031 			if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
4032 				printf("%s ", name);
4033 			++item;
4034 		}
4035 		printf("=>");
4036 		while (action->type != RTE_FLOW_ACTION_TYPE_END) {
4037 			if ((uint32_t)action->type > INT_MAX)
4038 				name = "PMD_INTERNAL";
4039 			else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
4040 					  &name, sizeof(name),
4041 					  (void *)(uintptr_t)action->type,
4042 					  NULL) <= 0)
4043 				name = "[UNKNOWN]";
4044 			if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
4045 				printf(" %s", name);
4046 			++action;
4047 		}
4048 		printf("\n");
4049 	}
4050 }
4051 
4052 /** Restrict ingress traffic to the defined flow rules. */
4053 int
4054 port_flow_isolate(portid_t port_id, int set)
4055 {
4056 	struct rte_flow_error error;
4057 
4058 	/* Poisoning to make sure PMDs update it in case of error. */
4059 	memset(&error, 0x66, sizeof(error));
4060 	if (rte_flow_isolate(port_id, set, &error))
4061 		return port_flow_complain(&error);
4062 	printf("Ingress traffic on port %u is %s to the defined flow rules\n",
4063 	       port_id,
4064 	       set ? "now restricted" : "not restricted anymore");
4065 	return 0;
4066 }
4067 
4068 /*
4069  * RX/TX ring descriptors display functions.
4070  */
4071 int
4072 rx_queue_id_is_invalid(queueid_t rxq_id)
4073 {
4074 	if (rxq_id < nb_rxq)
4075 		return 0;
4076 	fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n",
4077 		rxq_id, nb_rxq);
4078 	return 1;
4079 }
4080 
4081 int
4082 tx_queue_id_is_invalid(queueid_t txq_id)
4083 {
4084 	if (txq_id < nb_txq)
4085 		return 0;
4086 	fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n",
4087 		txq_id, nb_txq);
4088 	return 1;
4089 }
4090 
4091 static int
4092 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size)
4093 {
4094 	struct rte_port *port = &ports[port_id];
4095 	struct rte_eth_rxq_info rx_qinfo;
4096 	int ret;
4097 
4098 	ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo);
4099 	if (ret == 0) {
4100 		*ring_size = rx_qinfo.nb_desc;
4101 		return ret;
4102 	}
4103 
4104 	if (ret != -ENOTSUP)
4105 		return ret;
4106 	/*
4107 	 * If the rte_eth_rx_queue_info_get is not support for this PMD,
4108 	 * ring_size stored in testpmd will be used for validity verification.
4109 	 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc
4110 	 * being 0, it will use a default value provided by PMDs to setup this
4111 	 * rxq. If the default value is 0, it will use the
4112 	 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq.
4113 	 */
4114 	if (port->nb_rx_desc[rxq_id])
4115 		*ring_size = port->nb_rx_desc[rxq_id];
4116 	else if (port->dev_info.default_rxportconf.ring_size)
4117 		*ring_size = port->dev_info.default_rxportconf.ring_size;
4118 	else
4119 		*ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
4120 	return 0;
4121 }
4122 
4123 static int
4124 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size)
4125 {
4126 	struct rte_port *port = &ports[port_id];
4127 	struct rte_eth_txq_info tx_qinfo;
4128 	int ret;
4129 
4130 	ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo);
4131 	if (ret == 0) {
4132 		*ring_size = tx_qinfo.nb_desc;
4133 		return ret;
4134 	}
4135 
4136 	if (ret != -ENOTSUP)
4137 		return ret;
4138 	/*
4139 	 * If the rte_eth_tx_queue_info_get is not support for this PMD,
4140 	 * ring_size stored in testpmd will be used for validity verification.
4141 	 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc
4142 	 * being 0, it will use a default value provided by PMDs to setup this
4143 	 * txq. If the default value is 0, it will use the
4144 	 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq.
4145 	 */
4146 	if (port->nb_tx_desc[txq_id])
4147 		*ring_size = port->nb_tx_desc[txq_id];
4148 	else if (port->dev_info.default_txportconf.ring_size)
4149 		*ring_size = port->dev_info.default_txportconf.ring_size;
4150 	else
4151 		*ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
4152 	return 0;
4153 }
4154 
4155 static int
4156 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id)
4157 {
4158 	uint16_t ring_size;
4159 	int ret;
4160 
4161 	ret = get_rx_ring_size(port_id, rxq_id, &ring_size);
4162 	if (ret)
4163 		return 1;
4164 
4165 	if (rxdesc_id < ring_size)
4166 		return 0;
4167 
4168 	fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n",
4169 		rxdesc_id, ring_size);
4170 	return 1;
4171 }
4172 
4173 static int
4174 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id)
4175 {
4176 	uint16_t ring_size;
4177 	int ret;
4178 
4179 	ret = get_tx_ring_size(port_id, txq_id, &ring_size);
4180 	if (ret)
4181 		return 1;
4182 
4183 	if (txdesc_id < ring_size)
4184 		return 0;
4185 
4186 	fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n",
4187 		txdesc_id, ring_size);
4188 	return 1;
4189 }
4190 
4191 static const struct rte_memzone *
4192 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
4193 {
4194 	char mz_name[RTE_MEMZONE_NAMESIZE];
4195 	const struct rte_memzone *mz;
4196 
4197 	snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s",
4198 			port_id, q_id, ring_name);
4199 	mz = rte_memzone_lookup(mz_name);
4200 	if (mz == NULL)
4201 		fprintf(stderr,
4202 			"%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n",
4203 			ring_name, port_id, q_id, mz_name);
4204 	return mz;
4205 }
4206 
4207 union igb_ring_dword {
4208 	uint64_t dword;
4209 	struct {
4210 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
4211 		uint32_t lo;
4212 		uint32_t hi;
4213 #else
4214 		uint32_t hi;
4215 		uint32_t lo;
4216 #endif
4217 	} words;
4218 };
4219 
4220 struct igb_ring_desc_32_bytes {
4221 	union igb_ring_dword lo_dword;
4222 	union igb_ring_dword hi_dword;
4223 	union igb_ring_dword resv1;
4224 	union igb_ring_dword resv2;
4225 };
4226 
4227 struct igb_ring_desc_16_bytes {
4228 	union igb_ring_dword lo_dword;
4229 	union igb_ring_dword hi_dword;
4230 };
4231 
4232 static void
4233 ring_rxd_display_dword(union igb_ring_dword dword)
4234 {
4235 	printf("    0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
4236 					(unsigned)dword.words.hi);
4237 }
4238 
4239 static void
4240 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
4241 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
4242 			   portid_t port_id,
4243 #else
4244 			   __rte_unused portid_t port_id,
4245 #endif
4246 			   uint16_t desc_id)
4247 {
4248 	struct igb_ring_desc_16_bytes *ring =
4249 		(struct igb_ring_desc_16_bytes *)ring_mz->addr;
4250 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
4251 	int ret;
4252 	struct rte_eth_dev_info dev_info;
4253 
4254 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
4255 	if (ret != 0)
4256 		return;
4257 
4258 	if (strstr(dev_info.driver_name, "i40e") != NULL) {
4259 		/* 32 bytes RX descriptor, i40e only */
4260 		struct igb_ring_desc_32_bytes *ring =
4261 			(struct igb_ring_desc_32_bytes *)ring_mz->addr;
4262 		ring[desc_id].lo_dword.dword =
4263 			rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
4264 		ring_rxd_display_dword(ring[desc_id].lo_dword);
4265 		ring[desc_id].hi_dword.dword =
4266 			rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
4267 		ring_rxd_display_dword(ring[desc_id].hi_dword);
4268 		ring[desc_id].resv1.dword =
4269 			rte_le_to_cpu_64(ring[desc_id].resv1.dword);
4270 		ring_rxd_display_dword(ring[desc_id].resv1);
4271 		ring[desc_id].resv2.dword =
4272 			rte_le_to_cpu_64(ring[desc_id].resv2.dword);
4273 		ring_rxd_display_dword(ring[desc_id].resv2);
4274 
4275 		return;
4276 	}
4277 #endif
4278 	/* 16 bytes RX descriptor */
4279 	ring[desc_id].lo_dword.dword =
4280 		rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
4281 	ring_rxd_display_dword(ring[desc_id].lo_dword);
4282 	ring[desc_id].hi_dword.dword =
4283 		rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
4284 	ring_rxd_display_dword(ring[desc_id].hi_dword);
4285 }
4286 
4287 static void
4288 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
4289 {
4290 	struct igb_ring_desc_16_bytes *ring;
4291 	struct igb_ring_desc_16_bytes txd;
4292 
4293 	ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
4294 	txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
4295 	txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
4296 	printf("    0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
4297 			(unsigned)txd.lo_dword.words.lo,
4298 			(unsigned)txd.lo_dword.words.hi,
4299 			(unsigned)txd.hi_dword.words.lo,
4300 			(unsigned)txd.hi_dword.words.hi);
4301 }
4302 
4303 void
4304 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
4305 {
4306 	const struct rte_memzone *rx_mz;
4307 
4308 	if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id))
4309 		return;
4310 	rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
4311 	if (rx_mz == NULL)
4312 		return;
4313 	ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
4314 }
4315 
4316 void
4317 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
4318 {
4319 	const struct rte_memzone *tx_mz;
4320 
4321 	if (tx_desc_id_is_invalid(port_id, txq_id, txd_id))
4322 		return;
4323 	tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
4324 	if (tx_mz == NULL)
4325 		return;
4326 	ring_tx_descriptor_display(tx_mz, txd_id);
4327 }
4328 
4329 void
4330 fwd_lcores_config_display(void)
4331 {
4332 	lcoreid_t lc_id;
4333 
4334 	printf("List of forwarding lcores:");
4335 	for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
4336 		printf(" %2u", fwd_lcores_cpuids[lc_id]);
4337 	printf("\n");
4338 }
4339 void
4340 rxtx_config_display(void)
4341 {
4342 	portid_t pid;
4343 	queueid_t qid;
4344 
4345 	printf("  %s%s%s packet forwarding%s packets/burst=%d\n",
4346 	       cur_fwd_eng->fwd_mode_name,
4347 	       cur_fwd_eng->status ? "-" : "",
4348 	       cur_fwd_eng->status ? cur_fwd_eng->status : "",
4349 	       retry_enabled == 0 ? "" : " with retry",
4350 	       nb_pkt_per_burst);
4351 
4352 	if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
4353 		printf("  packet len=%u - nb packet segments=%d\n",
4354 				(unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
4355 
4356 	printf("  nb forwarding cores=%d - nb forwarding ports=%d\n",
4357 	       nb_fwd_lcores, nb_fwd_ports);
4358 
4359 	RTE_ETH_FOREACH_DEV(pid) {
4360 		struct rte_eth_rxconf *rx_conf = &ports[pid].rxq[0].conf;
4361 		struct rte_eth_txconf *tx_conf = &ports[pid].txq[0].conf;
4362 		uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
4363 		uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
4364 		struct rte_eth_rxq_info rx_qinfo;
4365 		struct rte_eth_txq_info tx_qinfo;
4366 		uint16_t rx_free_thresh_tmp;
4367 		uint16_t tx_free_thresh_tmp;
4368 		uint16_t tx_rs_thresh_tmp;
4369 		uint16_t nb_rx_desc_tmp;
4370 		uint16_t nb_tx_desc_tmp;
4371 		uint64_t offloads_tmp;
4372 		uint8_t pthresh_tmp;
4373 		uint8_t hthresh_tmp;
4374 		uint8_t wthresh_tmp;
4375 		int32_t rc;
4376 
4377 		/* per port config */
4378 		printf("  port %d: RX queue number: %d Tx queue number: %d\n",
4379 				(unsigned int)pid, nb_rxq, nb_txq);
4380 
4381 		printf("    Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n",
4382 				ports[pid].dev_conf.rxmode.offloads,
4383 				ports[pid].dev_conf.txmode.offloads);
4384 
4385 		/* per rx queue config only for first queue to be less verbose */
4386 		for (qid = 0; qid < 1; qid++) {
4387 			rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
4388 			if (rc) {
4389 				nb_rx_desc_tmp = nb_rx_desc[qid];
4390 				rx_free_thresh_tmp =
4391 					rx_conf[qid].rx_free_thresh;
4392 				pthresh_tmp = rx_conf[qid].rx_thresh.pthresh;
4393 				hthresh_tmp = rx_conf[qid].rx_thresh.hthresh;
4394 				wthresh_tmp = rx_conf[qid].rx_thresh.wthresh;
4395 				offloads_tmp = rx_conf[qid].offloads;
4396 			} else {
4397 				nb_rx_desc_tmp = rx_qinfo.nb_desc;
4398 				rx_free_thresh_tmp =
4399 						rx_qinfo.conf.rx_free_thresh;
4400 				pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh;
4401 				hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh;
4402 				wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh;
4403 				offloads_tmp = rx_qinfo.conf.offloads;
4404 			}
4405 
4406 			printf("    RX queue: %d\n", qid);
4407 			printf("      RX desc=%d - RX free threshold=%d\n",
4408 				nb_rx_desc_tmp, rx_free_thresh_tmp);
4409 			printf("      RX threshold registers: pthresh=%d hthresh=%d "
4410 				" wthresh=%d\n",
4411 				pthresh_tmp, hthresh_tmp, wthresh_tmp);
4412 			printf("      RX Offloads=0x%"PRIx64, offloads_tmp);
4413 			if (rx_conf->share_group > 0)
4414 				printf(" share_group=%u share_qid=%u",
4415 				       rx_conf->share_group,
4416 				       rx_conf->share_qid);
4417 			printf("\n");
4418 		}
4419 
4420 		/* per tx queue config only for first queue to be less verbose */
4421 		for (qid = 0; qid < 1; qid++) {
4422 			rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
4423 			if (rc) {
4424 				nb_tx_desc_tmp = nb_tx_desc[qid];
4425 				tx_free_thresh_tmp =
4426 					tx_conf[qid].tx_free_thresh;
4427 				pthresh_tmp = tx_conf[qid].tx_thresh.pthresh;
4428 				hthresh_tmp = tx_conf[qid].tx_thresh.hthresh;
4429 				wthresh_tmp = tx_conf[qid].tx_thresh.wthresh;
4430 				offloads_tmp = tx_conf[qid].offloads;
4431 				tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh;
4432 			} else {
4433 				nb_tx_desc_tmp = tx_qinfo.nb_desc;
4434 				tx_free_thresh_tmp =
4435 						tx_qinfo.conf.tx_free_thresh;
4436 				pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh;
4437 				hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh;
4438 				wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh;
4439 				offloads_tmp = tx_qinfo.conf.offloads;
4440 				tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh;
4441 			}
4442 
4443 			printf("    TX queue: %d\n", qid);
4444 			printf("      TX desc=%d - TX free threshold=%d\n",
4445 				nb_tx_desc_tmp, tx_free_thresh_tmp);
4446 			printf("      TX threshold registers: pthresh=%d hthresh=%d "
4447 				" wthresh=%d\n",
4448 				pthresh_tmp, hthresh_tmp, wthresh_tmp);
4449 			printf("      TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
4450 				offloads_tmp, tx_rs_thresh_tmp);
4451 		}
4452 	}
4453 }
4454 
4455 void
4456 port_rss_reta_info(portid_t port_id,
4457 		   struct rte_eth_rss_reta_entry64 *reta_conf,
4458 		   uint16_t nb_entries)
4459 {
4460 	uint16_t i, idx, shift;
4461 	int ret;
4462 
4463 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4464 		return;
4465 
4466 	ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
4467 	if (ret != 0) {
4468 		fprintf(stderr,
4469 			"Failed to get RSS RETA info, return code = %d\n",
4470 			ret);
4471 		return;
4472 	}
4473 
4474 	for (i = 0; i < nb_entries; i++) {
4475 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
4476 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
4477 		if (!(reta_conf[idx].mask & (1ULL << shift)))
4478 			continue;
4479 		printf("RSS RETA configuration: hash index=%u, queue=%u\n",
4480 					i, reta_conf[idx].reta[shift]);
4481 	}
4482 }
4483 
4484 /*
4485  * Displays the RSS hash functions of a port, and, optionally, the RSS hash
4486  * key of the port.
4487  */
4488 void
4489 port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
4490 {
4491 	struct rte_eth_rss_conf rss_conf = {0};
4492 	uint8_t rss_key[RSS_HASH_KEY_LENGTH];
4493 	uint64_t rss_hf;
4494 	uint8_t i;
4495 	int diag;
4496 	struct rte_eth_dev_info dev_info;
4497 	uint8_t hash_key_size;
4498 	int ret;
4499 
4500 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4501 		return;
4502 
4503 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
4504 	if (ret != 0)
4505 		return;
4506 
4507 	if (dev_info.hash_key_size > 0 &&
4508 			dev_info.hash_key_size <= sizeof(rss_key))
4509 		hash_key_size = dev_info.hash_key_size;
4510 	else {
4511 		fprintf(stderr,
4512 			"dev_info did not provide a valid hash key size\n");
4513 		return;
4514 	}
4515 
4516 	/* Get RSS hash key if asked to display it */
4517 	rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
4518 	rss_conf.rss_key_len = hash_key_size;
4519 	diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
4520 	if (diag != 0) {
4521 		switch (diag) {
4522 		case -ENODEV:
4523 			fprintf(stderr, "port index %d invalid\n", port_id);
4524 			break;
4525 		case -ENOTSUP:
4526 			fprintf(stderr, "operation not supported by device\n");
4527 			break;
4528 		default:
4529 			fprintf(stderr, "operation failed - diag=%d\n", diag);
4530 			break;
4531 		}
4532 		return;
4533 	}
4534 	rss_hf = rss_conf.rss_hf;
4535 	if (rss_hf == 0) {
4536 		printf("RSS disabled\n");
4537 		return;
4538 	}
4539 	printf("RSS functions:\n");
4540 	rss_types_display(rss_hf, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE);
4541 	if (!show_rss_key)
4542 		return;
4543 	printf("RSS key:\n");
4544 	for (i = 0; i < hash_key_size; i++)
4545 		printf("%02X", rss_key[i]);
4546 	printf("\n");
4547 }
4548 
4549 void
4550 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
4551 			 uint8_t hash_key_len)
4552 {
4553 	struct rte_eth_rss_conf rss_conf;
4554 	int diag;
4555 
4556 	rss_conf.rss_key = NULL;
4557 	rss_conf.rss_key_len = 0;
4558 	rss_conf.rss_hf = str_to_rsstypes(rss_type);
4559 	diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
4560 	if (diag == 0) {
4561 		rss_conf.rss_key = hash_key;
4562 		rss_conf.rss_key_len = hash_key_len;
4563 		diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
4564 	}
4565 	if (diag == 0)
4566 		return;
4567 
4568 	switch (diag) {
4569 	case -ENODEV:
4570 		fprintf(stderr, "port index %d invalid\n", port_id);
4571 		break;
4572 	case -ENOTSUP:
4573 		fprintf(stderr, "operation not supported by device\n");
4574 		break;
4575 	default:
4576 		fprintf(stderr, "operation failed - diag=%d\n", diag);
4577 		break;
4578 	}
4579 }
4580 
4581 /*
4582  * Check whether a shared rxq scheduled on other lcores.
4583  */
4584 static bool
4585 fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc,
4586 			   portid_t src_port, queueid_t src_rxq,
4587 			   uint32_t share_group, queueid_t share_rxq)
4588 {
4589 	streamid_t sm_id;
4590 	streamid_t nb_fs_per_lcore;
4591 	lcoreid_t  nb_fc;
4592 	lcoreid_t  lc_id;
4593 	struct fwd_stream *fs;
4594 	struct rte_port *port;
4595 	struct rte_eth_dev_info *dev_info;
4596 	struct rte_eth_rxconf *rxq_conf;
4597 
4598 	nb_fc = cur_fwd_config.nb_fwd_lcores;
4599 	/* Check remaining cores. */
4600 	for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) {
4601 		sm_id = fwd_lcores[lc_id]->stream_idx;
4602 		nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;
4603 		for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;
4604 		     sm_id++) {
4605 			fs = fwd_streams[sm_id];
4606 			port = &ports[fs->rx_port];
4607 			dev_info = &port->dev_info;
4608 			rxq_conf = &port->rxq[fs->rx_queue].conf;
4609 			if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)
4610 			    == 0 || rxq_conf->share_group == 0)
4611 				/* Not shared rxq. */
4612 				continue;
4613 			if (domain_id != port->dev_info.switch_info.domain_id)
4614 				continue;
4615 			if (rxq_conf->share_group != share_group)
4616 				continue;
4617 			if (rxq_conf->share_qid != share_rxq)
4618 				continue;
4619 			printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n",
4620 			       share_group, share_rxq);
4621 			printf("  lcore %hhu Port %hu queue %hu\n",
4622 			       src_lc, src_port, src_rxq);
4623 			printf("  lcore %hhu Port %hu queue %hu\n",
4624 			       lc_id, fs->rx_port, fs->rx_queue);
4625 			printf("Please use --nb-cores=%hu to limit number of forwarding cores\n",
4626 			       nb_rxq);
4627 			return true;
4628 		}
4629 	}
4630 	return false;
4631 }
4632 
4633 /*
4634  * Check shared rxq configuration.
4635  *
4636  * Shared group must not being scheduled on different core.
4637  */
4638 bool
4639 pkt_fwd_shared_rxq_check(void)
4640 {
4641 	streamid_t sm_id;
4642 	streamid_t nb_fs_per_lcore;
4643 	lcoreid_t  nb_fc;
4644 	lcoreid_t  lc_id;
4645 	struct fwd_stream *fs;
4646 	uint16_t domain_id;
4647 	struct rte_port *port;
4648 	struct rte_eth_dev_info *dev_info;
4649 	struct rte_eth_rxconf *rxq_conf;
4650 
4651 	if (rxq_share == 0)
4652 		return true;
4653 	nb_fc = cur_fwd_config.nb_fwd_lcores;
4654 	/*
4655 	 * Check streams on each core, make sure the same switch domain +
4656 	 * group + queue doesn't get scheduled on other cores.
4657 	 */
4658 	for (lc_id = 0; lc_id < nb_fc; lc_id++) {
4659 		sm_id = fwd_lcores[lc_id]->stream_idx;
4660 		nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;
4661 		for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;
4662 		     sm_id++) {
4663 			fs = fwd_streams[sm_id];
4664 			/* Update lcore info stream being scheduled. */
4665 			fs->lcore = fwd_lcores[lc_id];
4666 			port = &ports[fs->rx_port];
4667 			dev_info = &port->dev_info;
4668 			rxq_conf = &port->rxq[fs->rx_queue].conf;
4669 			if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)
4670 			    == 0 || rxq_conf->share_group == 0)
4671 				/* Not shared rxq. */
4672 				continue;
4673 			/* Check shared rxq not scheduled on remaining cores. */
4674 			domain_id = port->dev_info.switch_info.domain_id;
4675 			if (fwd_stream_on_other_lcores(domain_id, lc_id,
4676 						       fs->rx_port,
4677 						       fs->rx_queue,
4678 						       rxq_conf->share_group,
4679 						       rxq_conf->share_qid))
4680 				return false;
4681 		}
4682 	}
4683 	return true;
4684 }
4685 
4686 /*
4687  * Setup forwarding configuration for each logical core.
4688  */
4689 static void
4690 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
4691 {
4692 	streamid_t nb_fs_per_lcore;
4693 	streamid_t nb_fs;
4694 	streamid_t sm_id;
4695 	lcoreid_t  nb_extra;
4696 	lcoreid_t  nb_fc;
4697 	lcoreid_t  nb_lc;
4698 	lcoreid_t  lc_id;
4699 
4700 	nb_fs = cfg->nb_fwd_streams;
4701 	nb_fc = cfg->nb_fwd_lcores;
4702 	if (nb_fs <= nb_fc) {
4703 		nb_fs_per_lcore = 1;
4704 		nb_extra = 0;
4705 	} else {
4706 		nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
4707 		nb_extra = (lcoreid_t) (nb_fs % nb_fc);
4708 	}
4709 
4710 	nb_lc = (lcoreid_t) (nb_fc - nb_extra);
4711 	sm_id = 0;
4712 	for (lc_id = 0; lc_id < nb_lc; lc_id++) {
4713 		fwd_lcores[lc_id]->stream_idx = sm_id;
4714 		fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
4715 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
4716 	}
4717 
4718 	/*
4719 	 * Assign extra remaining streams, if any.
4720 	 */
4721 	nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
4722 	for (lc_id = 0; lc_id < nb_extra; lc_id++) {
4723 		fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
4724 		fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
4725 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
4726 	}
4727 }
4728 
4729 static portid_t
4730 fwd_topology_tx_port_get(portid_t rxp)
4731 {
4732 	static int warning_once = 1;
4733 
4734 	RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
4735 
4736 	switch (port_topology) {
4737 	default:
4738 	case PORT_TOPOLOGY_PAIRED:
4739 		if ((rxp & 0x1) == 0) {
4740 			if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
4741 				return rxp + 1;
4742 			if (warning_once) {
4743 				fprintf(stderr,
4744 					"\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n");
4745 				warning_once = 0;
4746 			}
4747 			return rxp;
4748 		}
4749 		return rxp - 1;
4750 	case PORT_TOPOLOGY_CHAINED:
4751 		return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
4752 	case PORT_TOPOLOGY_LOOP:
4753 		return rxp;
4754 	}
4755 }
4756 
4757 static void
4758 simple_fwd_config_setup(void)
4759 {
4760 	portid_t i;
4761 
4762 	cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
4763 	cur_fwd_config.nb_fwd_streams =
4764 		(streamid_t) cur_fwd_config.nb_fwd_ports;
4765 
4766 	/* reinitialize forwarding streams */
4767 	init_fwd_streams();
4768 
4769 	/*
4770 	 * In the simple forwarding test, the number of forwarding cores
4771 	 * must be lower or equal to the number of forwarding ports.
4772 	 */
4773 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
4774 	if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
4775 		cur_fwd_config.nb_fwd_lcores =
4776 			(lcoreid_t) cur_fwd_config.nb_fwd_ports;
4777 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
4778 
4779 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
4780 		fwd_streams[i]->rx_port   = fwd_ports_ids[i];
4781 		fwd_streams[i]->rx_queue  = 0;
4782 		fwd_streams[i]->tx_port   =
4783 				fwd_ports_ids[fwd_topology_tx_port_get(i)];
4784 		fwd_streams[i]->tx_queue  = 0;
4785 		fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
4786 		fwd_streams[i]->retry_enabled = retry_enabled;
4787 	}
4788 }
4789 
4790 /**
4791  * For the RSS forwarding test all streams distributed over lcores. Each stream
4792  * being composed of a RX queue to poll on a RX port for input messages,
4793  * associated with a TX queue of a TX port where to send forwarded packets.
4794  */
4795 static void
4796 rss_fwd_config_setup(void)
4797 {
4798 	portid_t   rxp;
4799 	portid_t   txp;
4800 	queueid_t  rxq;
4801 	queueid_t  nb_q;
4802 	streamid_t  sm_id;
4803 	int start;
4804 	int end;
4805 
4806 	nb_q = nb_rxq;
4807 	if (nb_q > nb_txq)
4808 		nb_q = nb_txq;
4809 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
4810 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
4811 	cur_fwd_config.nb_fwd_streams =
4812 		(streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
4813 
4814 	if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
4815 		cur_fwd_config.nb_fwd_lcores =
4816 			(lcoreid_t)cur_fwd_config.nb_fwd_streams;
4817 
4818 	/* reinitialize forwarding streams */
4819 	init_fwd_streams();
4820 
4821 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
4822 
4823 	if (proc_id > 0 && nb_q % num_procs != 0)
4824 		printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n");
4825 
4826 	/**
4827 	 * In multi-process, All queues are allocated to different
4828 	 * processes based on num_procs and proc_id. For example:
4829 	 * if supports 4 queues(nb_q), 2 processes(num_procs),
4830 	 * the 0~1 queue for primary process.
4831 	 * the 2~3 queue for secondary process.
4832 	 */
4833 	start = proc_id * nb_q / num_procs;
4834 	end = start + nb_q / num_procs;
4835 	rxp = 0;
4836 	rxq = start;
4837 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
4838 		struct fwd_stream *fs;
4839 
4840 		fs = fwd_streams[sm_id];
4841 		txp = fwd_topology_tx_port_get(rxp);
4842 		fs->rx_port = fwd_ports_ids[rxp];
4843 		fs->rx_queue = rxq;
4844 		fs->tx_port = fwd_ports_ids[txp];
4845 		fs->tx_queue = rxq;
4846 		fs->peer_addr = fs->tx_port;
4847 		fs->retry_enabled = retry_enabled;
4848 		rxp++;
4849 		if (rxp < nb_fwd_ports)
4850 			continue;
4851 		rxp = 0;
4852 		rxq++;
4853 		if (rxq >= end)
4854 			rxq = start;
4855 	}
4856 }
4857 
4858 static uint16_t
4859 get_fwd_port_total_tc_num(void)
4860 {
4861 	struct rte_eth_dcb_info dcb_info;
4862 	uint16_t total_tc_num = 0;
4863 	unsigned int i;
4864 
4865 	for (i = 0; i < nb_fwd_ports; i++) {
4866 		(void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info);
4867 		total_tc_num += dcb_info.nb_tcs;
4868 	}
4869 
4870 	return total_tc_num;
4871 }
4872 
4873 /**
4874  * For the DCB forwarding test, each core is assigned on each traffic class.
4875  *
4876  * Each core is assigned a multi-stream, each stream being composed of
4877  * a RX queue to poll on a RX port for input messages, associated with
4878  * a TX queue of a TX port where to send forwarded packets. All RX and
4879  * TX queues are mapping to the same traffic class.
4880  * If VMDQ and DCB co-exist, each traffic class on different POOLs share
4881  * the same core
4882  */
4883 static void
4884 dcb_fwd_config_setup(void)
4885 {
4886 	struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
4887 	portid_t txp, rxp = 0;
4888 	queueid_t txq, rxq = 0;
4889 	lcoreid_t  lc_id;
4890 	uint16_t nb_rx_queue, nb_tx_queue;
4891 	uint16_t i, j, k, sm_id = 0;
4892 	uint16_t total_tc_num;
4893 	struct rte_port *port;
4894 	uint8_t tc = 0;
4895 	portid_t pid;
4896 	int ret;
4897 
4898 	/*
4899 	 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED
4900 	 * or RTE_PORT_STOPPED.
4901 	 *
4902 	 * Re-configure ports to get updated mapping between tc and queue in
4903 	 * case the queue number of the port is changed. Skip for started ports
4904 	 * since modifying queue number and calling dev_configure need to stop
4905 	 * ports first.
4906 	 */
4907 	for (pid = 0; pid < nb_fwd_ports; pid++) {
4908 		if (port_is_started(pid) == 1)
4909 			continue;
4910 
4911 		port = &ports[pid];
4912 		ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq,
4913 					    &port->dev_conf);
4914 		if (ret < 0) {
4915 			fprintf(stderr,
4916 				"Failed to re-configure port %d, ret = %d.\n",
4917 				pid, ret);
4918 			return;
4919 		}
4920 	}
4921 
4922 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
4923 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
4924 	cur_fwd_config.nb_fwd_streams =
4925 		(streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
4926 	total_tc_num = get_fwd_port_total_tc_num();
4927 	if (cur_fwd_config.nb_fwd_lcores > total_tc_num)
4928 		cur_fwd_config.nb_fwd_lcores = total_tc_num;
4929 
4930 	/* reinitialize forwarding streams */
4931 	init_fwd_streams();
4932 	sm_id = 0;
4933 	txp = 1;
4934 	/* get the dcb info on the first RX and TX ports */
4935 	(void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
4936 	(void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
4937 
4938 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
4939 		fwd_lcores[lc_id]->stream_nb = 0;
4940 		fwd_lcores[lc_id]->stream_idx = sm_id;
4941 		for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) {
4942 			/* if the nb_queue is zero, means this tc is
4943 			 * not enabled on the POOL
4944 			 */
4945 			if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
4946 				break;
4947 			k = fwd_lcores[lc_id]->stream_nb +
4948 				fwd_lcores[lc_id]->stream_idx;
4949 			rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
4950 			txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
4951 			nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
4952 			nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
4953 			for (j = 0; j < nb_rx_queue; j++) {
4954 				struct fwd_stream *fs;
4955 
4956 				fs = fwd_streams[k + j];
4957 				fs->rx_port = fwd_ports_ids[rxp];
4958 				fs->rx_queue = rxq + j;
4959 				fs->tx_port = fwd_ports_ids[txp];
4960 				fs->tx_queue = txq + j % nb_tx_queue;
4961 				fs->peer_addr = fs->tx_port;
4962 				fs->retry_enabled = retry_enabled;
4963 			}
4964 			fwd_lcores[lc_id]->stream_nb +=
4965 				rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
4966 		}
4967 		sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
4968 
4969 		tc++;
4970 		if (tc < rxp_dcb_info.nb_tcs)
4971 			continue;
4972 		/* Restart from TC 0 on next RX port */
4973 		tc = 0;
4974 		if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
4975 			rxp = (portid_t)
4976 				(rxp + ((nb_ports >> 1) / nb_fwd_ports));
4977 		else
4978 			rxp++;
4979 		if (rxp >= nb_fwd_ports)
4980 			return;
4981 		/* get the dcb information on next RX and TX ports */
4982 		if ((rxp & 0x1) == 0)
4983 			txp = (portid_t) (rxp + 1);
4984 		else
4985 			txp = (portid_t) (rxp - 1);
4986 		rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
4987 		rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
4988 	}
4989 }
4990 
4991 static void
4992 icmp_echo_config_setup(void)
4993 {
4994 	portid_t  rxp;
4995 	queueid_t rxq;
4996 	lcoreid_t lc_id;
4997 	uint16_t  sm_id;
4998 
4999 	if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
5000 		cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
5001 			(nb_txq * nb_fwd_ports);
5002 	else
5003 		cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
5004 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
5005 	cur_fwd_config.nb_fwd_streams =
5006 		(streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
5007 	if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
5008 		cur_fwd_config.nb_fwd_lcores =
5009 			(lcoreid_t)cur_fwd_config.nb_fwd_streams;
5010 	if (verbose_level > 0) {
5011 		printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
5012 		       __FUNCTION__,
5013 		       cur_fwd_config.nb_fwd_lcores,
5014 		       cur_fwd_config.nb_fwd_ports,
5015 		       cur_fwd_config.nb_fwd_streams);
5016 	}
5017 
5018 	/* reinitialize forwarding streams */
5019 	init_fwd_streams();
5020 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
5021 	rxp = 0; rxq = 0;
5022 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
5023 		if (verbose_level > 0)
5024 			printf("  core=%d: \n", lc_id);
5025 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
5026 			struct fwd_stream *fs;
5027 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
5028 			fs->rx_port = fwd_ports_ids[rxp];
5029 			fs->rx_queue = rxq;
5030 			fs->tx_port = fs->rx_port;
5031 			fs->tx_queue = rxq;
5032 			fs->peer_addr = fs->tx_port;
5033 			fs->retry_enabled = retry_enabled;
5034 			if (verbose_level > 0)
5035 				printf("  stream=%d port=%d rxq=%d txq=%d\n",
5036 				       sm_id, fs->rx_port, fs->rx_queue,
5037 				       fs->tx_queue);
5038 			rxq = (queueid_t) (rxq + 1);
5039 			if (rxq == nb_rxq) {
5040 				rxq = 0;
5041 				rxp = (portid_t) (rxp + 1);
5042 			}
5043 		}
5044 	}
5045 }
5046 
5047 void
5048 fwd_config_setup(void)
5049 {
5050 	struct rte_port *port;
5051 	portid_t pt_id;
5052 	unsigned int i;
5053 
5054 	cur_fwd_config.fwd_eng = cur_fwd_eng;
5055 	if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
5056 		icmp_echo_config_setup();
5057 		return;
5058 	}
5059 
5060 	if ((nb_rxq > 1) && (nb_txq > 1)){
5061 		if (dcb_config) {
5062 			for (i = 0; i < nb_fwd_ports; i++) {
5063 				pt_id = fwd_ports_ids[i];
5064 				port = &ports[pt_id];
5065 				if (!port->dcb_flag) {
5066 					fprintf(stderr,
5067 						"In DCB mode, all forwarding ports must be configured in this mode.\n");
5068 					return;
5069 				}
5070 			}
5071 			if (nb_fwd_lcores == 1) {
5072 				fprintf(stderr,
5073 					"In DCB mode,the nb forwarding cores should be larger than 1.\n");
5074 				return;
5075 			}
5076 
5077 			dcb_fwd_config_setup();
5078 		} else
5079 			rss_fwd_config_setup();
5080 	}
5081 	else
5082 		simple_fwd_config_setup();
5083 }
5084 
5085 static const char *
5086 mp_alloc_to_str(uint8_t mode)
5087 {
5088 	switch (mode) {
5089 	case MP_ALLOC_NATIVE:
5090 		return "native";
5091 	case MP_ALLOC_ANON:
5092 		return "anon";
5093 	case MP_ALLOC_XMEM:
5094 		return "xmem";
5095 	case MP_ALLOC_XMEM_HUGE:
5096 		return "xmemhuge";
5097 	case MP_ALLOC_XBUF:
5098 		return "xbuf";
5099 	default:
5100 		return "invalid";
5101 	}
5102 }
5103 
5104 void
5105 pkt_fwd_config_display(struct fwd_config *cfg)
5106 {
5107 	struct fwd_stream *fs;
5108 	lcoreid_t  lc_id;
5109 	streamid_t sm_id;
5110 
5111 	printf("%s%s%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
5112 		"NUMA support %s, MP allocation mode: %s\n",
5113 		cfg->fwd_eng->fwd_mode_name,
5114 		cfg->fwd_eng->status ? "-" : "",
5115 		cfg->fwd_eng->status ? cfg->fwd_eng->status : "",
5116 		retry_enabled == 0 ? "" : " with retry",
5117 		cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
5118 		numa_support == 1 ? "enabled" : "disabled",
5119 		mp_alloc_to_str(mp_alloc_type));
5120 
5121 	if (retry_enabled)
5122 		printf("TX retry num: %u, delay between TX retries: %uus\n",
5123 			burst_tx_retry_num, burst_tx_delay_time);
5124 	for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
5125 		printf("Logical Core %u (socket %u) forwards packets on "
5126 		       "%d streams:",
5127 		       fwd_lcores_cpuids[lc_id],
5128 		       rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
5129 		       fwd_lcores[lc_id]->stream_nb);
5130 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
5131 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
5132 			printf("\n  RX P=%d/Q=%d (socket %u) -> TX "
5133 			       "P=%d/Q=%d (socket %u) ",
5134 			       fs->rx_port, fs->rx_queue,
5135 			       ports[fs->rx_port].socket_id,
5136 			       fs->tx_port, fs->tx_queue,
5137 			       ports[fs->tx_port].socket_id);
5138 			print_ethaddr("peer=",
5139 				      &peer_eth_addrs[fs->peer_addr]);
5140 		}
5141 		printf("\n");
5142 	}
5143 	printf("\n");
5144 }
5145 
5146 void
5147 set_fwd_eth_peer(portid_t port_id, char *peer_addr)
5148 {
5149 	struct rte_ether_addr new_peer_addr;
5150 	if (!rte_eth_dev_is_valid_port(port_id)) {
5151 		fprintf(stderr, "Error: Invalid port number %i\n", port_id);
5152 		return;
5153 	}
5154 	if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) {
5155 		fprintf(stderr, "Error: Invalid ethernet address: %s\n",
5156 			peer_addr);
5157 		return;
5158 	}
5159 	peer_eth_addrs[port_id] = new_peer_addr;
5160 }
5161 
5162 int
5163 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
5164 {
5165 	unsigned int i;
5166 	unsigned int lcore_cpuid;
5167 	int record_now;
5168 
5169 	record_now = 0;
5170  again:
5171 	for (i = 0; i < nb_lc; i++) {
5172 		lcore_cpuid = lcorelist[i];
5173 		if (! rte_lcore_is_enabled(lcore_cpuid)) {
5174 			fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid);
5175 			return -1;
5176 		}
5177 		if (lcore_cpuid == rte_get_main_lcore()) {
5178 			fprintf(stderr,
5179 				"lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n",
5180 				lcore_cpuid);
5181 			return -1;
5182 		}
5183 		if (record_now)
5184 			fwd_lcores_cpuids[i] = lcore_cpuid;
5185 	}
5186 	if (record_now == 0) {
5187 		record_now = 1;
5188 		goto again;
5189 	}
5190 	nb_cfg_lcores = (lcoreid_t) nb_lc;
5191 	if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
5192 		printf("previous number of forwarding cores %u - changed to "
5193 		       "number of configured cores %u\n",
5194 		       (unsigned int) nb_fwd_lcores, nb_lc);
5195 		nb_fwd_lcores = (lcoreid_t) nb_lc;
5196 	}
5197 
5198 	return 0;
5199 }
5200 
5201 int
5202 set_fwd_lcores_mask(uint64_t lcoremask)
5203 {
5204 	unsigned int lcorelist[64];
5205 	unsigned int nb_lc;
5206 	unsigned int i;
5207 
5208 	if (lcoremask == 0) {
5209 		fprintf(stderr, "Invalid NULL mask of cores\n");
5210 		return -1;
5211 	}
5212 	nb_lc = 0;
5213 	for (i = 0; i < 64; i++) {
5214 		if (! ((uint64_t)(1ULL << i) & lcoremask))
5215 			continue;
5216 		lcorelist[nb_lc++] = i;
5217 	}
5218 	return set_fwd_lcores_list(lcorelist, nb_lc);
5219 }
5220 
5221 void
5222 set_fwd_lcores_number(uint16_t nb_lc)
5223 {
5224 	if (test_done == 0) {
5225 		fprintf(stderr, "Please stop forwarding first\n");
5226 		return;
5227 	}
5228 	if (nb_lc > nb_cfg_lcores) {
5229 		fprintf(stderr,
5230 			"nb fwd cores %u > %u (max. number of configured lcores) - ignored\n",
5231 			(unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
5232 		return;
5233 	}
5234 	nb_fwd_lcores = (lcoreid_t) nb_lc;
5235 	printf("Number of forwarding cores set to %u\n",
5236 	       (unsigned int) nb_fwd_lcores);
5237 }
5238 
5239 void
5240 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
5241 {
5242 	unsigned int i;
5243 	portid_t port_id;
5244 	int record_now;
5245 
5246 	record_now = 0;
5247  again:
5248 	for (i = 0; i < nb_pt; i++) {
5249 		port_id = (portid_t) portlist[i];
5250 		if (port_id_is_invalid(port_id, ENABLED_WARN))
5251 			return;
5252 		if (record_now)
5253 			fwd_ports_ids[i] = port_id;
5254 	}
5255 	if (record_now == 0) {
5256 		record_now = 1;
5257 		goto again;
5258 	}
5259 	nb_cfg_ports = (portid_t) nb_pt;
5260 	if (nb_fwd_ports != (portid_t) nb_pt) {
5261 		printf("previous number of forwarding ports %u - changed to "
5262 		       "number of configured ports %u\n",
5263 		       (unsigned int) nb_fwd_ports, nb_pt);
5264 		nb_fwd_ports = (portid_t) nb_pt;
5265 	}
5266 }
5267 
5268 /**
5269  * Parse the user input and obtain the list of forwarding ports
5270  *
5271  * @param[in] list
5272  *   String containing the user input. User can specify
5273  *   in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6.
5274  *   For example, if the user wants to use all the available
5275  *   4 ports in his system, then the input can be 0-3 or 0,1,2,3.
5276  *   If the user wants to use only the ports 1,2 then the input
5277  *   is 1,2.
5278  *   valid characters are '-' and ','
5279  * @param[out] values
5280  *   This array will be filled with a list of port IDs
5281  *   based on the user input
5282  *   Note that duplicate entries are discarded and only the first
5283  *   count entries in this array are port IDs and all the rest
5284  *   will contain default values
5285  * @param[in] maxsize
5286  *   This parameter denotes 2 things
5287  *   1) Number of elements in the values array
5288  *   2) Maximum value of each element in the values array
5289  * @return
5290  *   On success, returns total count of parsed port IDs
5291  *   On failure, returns 0
5292  */
5293 static unsigned int
5294 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize)
5295 {
5296 	unsigned int count = 0;
5297 	char *end = NULL;
5298 	int min, max;
5299 	int value, i;
5300 	unsigned int marked[maxsize];
5301 
5302 	if (list == NULL || values == NULL)
5303 		return 0;
5304 
5305 	for (i = 0; i < (int)maxsize; i++)
5306 		marked[i] = 0;
5307 
5308 	min = INT_MAX;
5309 
5310 	do {
5311 		/*Remove the blank spaces if any*/
5312 		while (isblank(*list))
5313 			list++;
5314 		if (*list == '\0')
5315 			break;
5316 		errno = 0;
5317 		value = strtol(list, &end, 10);
5318 		if (errno || end == NULL)
5319 			return 0;
5320 		if (value < 0 || value >= (int)maxsize)
5321 			return 0;
5322 		while (isblank(*end))
5323 			end++;
5324 		if (*end == '-' && min == INT_MAX) {
5325 			min = value;
5326 		} else if ((*end == ',') || (*end == '\0')) {
5327 			max = value;
5328 			if (min == INT_MAX)
5329 				min = value;
5330 			for (i = min; i <= max; i++) {
5331 				if (count < maxsize) {
5332 					if (marked[i])
5333 						continue;
5334 					values[count] = i;
5335 					marked[i] = 1;
5336 					count++;
5337 				}
5338 			}
5339 			min = INT_MAX;
5340 		} else
5341 			return 0;
5342 		list = end + 1;
5343 	} while (*end != '\0');
5344 
5345 	return count;
5346 }
5347 
5348 void
5349 parse_fwd_portlist(const char *portlist)
5350 {
5351 	unsigned int portcount;
5352 	unsigned int portindex[RTE_MAX_ETHPORTS];
5353 	unsigned int i, valid_port_count = 0;
5354 
5355 	portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS);
5356 	if (!portcount)
5357 		rte_exit(EXIT_FAILURE, "Invalid fwd port list\n");
5358 
5359 	/*
5360 	 * Here we verify the validity of the ports
5361 	 * and thereby calculate the total number of
5362 	 * valid ports
5363 	 */
5364 	for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) {
5365 		if (rte_eth_dev_is_valid_port(portindex[i])) {
5366 			portindex[valid_port_count] = portindex[i];
5367 			valid_port_count++;
5368 		}
5369 	}
5370 
5371 	set_fwd_ports_list(portindex, valid_port_count);
5372 }
5373 
5374 void
5375 set_fwd_ports_mask(uint64_t portmask)
5376 {
5377 	unsigned int portlist[64];
5378 	unsigned int nb_pt;
5379 	unsigned int i;
5380 
5381 	if (portmask == 0) {
5382 		fprintf(stderr, "Invalid NULL mask of ports\n");
5383 		return;
5384 	}
5385 	nb_pt = 0;
5386 	RTE_ETH_FOREACH_DEV(i) {
5387 		if (! ((uint64_t)(1ULL << i) & portmask))
5388 			continue;
5389 		portlist[nb_pt++] = i;
5390 	}
5391 	set_fwd_ports_list(portlist, nb_pt);
5392 }
5393 
5394 void
5395 set_fwd_ports_number(uint16_t nb_pt)
5396 {
5397 	if (nb_pt > nb_cfg_ports) {
5398 		fprintf(stderr,
5399 			"nb fwd ports %u > %u (number of configured ports) - ignored\n",
5400 			(unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
5401 		return;
5402 	}
5403 	nb_fwd_ports = (portid_t) nb_pt;
5404 	printf("Number of forwarding ports set to %u\n",
5405 	       (unsigned int) nb_fwd_ports);
5406 }
5407 
5408 int
5409 port_is_forwarding(portid_t port_id)
5410 {
5411 	unsigned int i;
5412 
5413 	if (port_id_is_invalid(port_id, ENABLED_WARN))
5414 		return -1;
5415 
5416 	for (i = 0; i < nb_fwd_ports; i++) {
5417 		if (fwd_ports_ids[i] == port_id)
5418 			return 1;
5419 	}
5420 
5421 	return 0;
5422 }
5423 
5424 void
5425 set_nb_pkt_per_burst(uint16_t nb)
5426 {
5427 	if (nb > MAX_PKT_BURST) {
5428 		fprintf(stderr,
5429 			"nb pkt per burst: %u > %u (maximum packet per burst)  ignored\n",
5430 			(unsigned int) nb, (unsigned int) MAX_PKT_BURST);
5431 		return;
5432 	}
5433 	nb_pkt_per_burst = nb;
5434 	printf("Number of packets per burst set to %u\n",
5435 	       (unsigned int) nb_pkt_per_burst);
5436 }
5437 
5438 static const char *
5439 tx_split_get_name(enum tx_pkt_split split)
5440 {
5441 	uint32_t i;
5442 
5443 	for (i = 0; i != RTE_DIM(tx_split_name); i++) {
5444 		if (tx_split_name[i].split == split)
5445 			return tx_split_name[i].name;
5446 	}
5447 	return NULL;
5448 }
5449 
5450 void
5451 set_tx_pkt_split(const char *name)
5452 {
5453 	uint32_t i;
5454 
5455 	for (i = 0; i != RTE_DIM(tx_split_name); i++) {
5456 		if (strcmp(tx_split_name[i].name, name) == 0) {
5457 			tx_pkt_split = tx_split_name[i].split;
5458 			return;
5459 		}
5460 	}
5461 	fprintf(stderr, "unknown value: \"%s\"\n", name);
5462 }
5463 
5464 int
5465 parse_fec_mode(const char *name, uint32_t *fec_capa)
5466 {
5467 	uint8_t i;
5468 
5469 	for (i = 0; i < RTE_DIM(fec_mode_name); i++) {
5470 		if (strcmp(fec_mode_name[i].name, name) == 0) {
5471 			*fec_capa =
5472 				RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode);
5473 			return 0;
5474 		}
5475 	}
5476 	return -1;
5477 }
5478 
5479 void
5480 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa)
5481 {
5482 	unsigned int i, j;
5483 
5484 	printf("FEC capabilities:\n");
5485 
5486 	for (i = 0; i < num; i++) {
5487 		printf("%s : ",
5488 			rte_eth_link_speed_to_str(speed_fec_capa[i].speed));
5489 
5490 		for (j = 0; j < RTE_DIM(fec_mode_name); j++) {
5491 			if (RTE_ETH_FEC_MODE_TO_CAPA(j) &
5492 						speed_fec_capa[i].capa)
5493 				printf("%s ", fec_mode_name[j].name);
5494 		}
5495 		printf("\n");
5496 	}
5497 }
5498 
5499 void
5500 show_rx_pkt_offsets(void)
5501 {
5502 	uint32_t i, n;
5503 
5504 	n = rx_pkt_nb_offs;
5505 	printf("Number of offsets: %u\n", n);
5506 	if (n) {
5507 		printf("Segment offsets: ");
5508 		for (i = 0; i != n - 1; i++)
5509 			printf("%hu,", rx_pkt_seg_offsets[i]);
5510 		printf("%hu\n", rx_pkt_seg_lengths[i]);
5511 	}
5512 }
5513 
5514 void
5515 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs)
5516 {
5517 	unsigned int i;
5518 
5519 	if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) {
5520 		printf("nb segments per RX packets=%u >= "
5521 		       "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs);
5522 		return;
5523 	}
5524 
5525 	/*
5526 	 * No extra check here, the segment length will be checked by PMD
5527 	 * in the extended queue setup.
5528 	 */
5529 	for (i = 0; i < nb_offs; i++) {
5530 		if (seg_offsets[i] >= UINT16_MAX) {
5531 			printf("offset[%u]=%u > UINT16_MAX - give up\n",
5532 			       i, seg_offsets[i]);
5533 			return;
5534 		}
5535 	}
5536 
5537 	for (i = 0; i < nb_offs; i++)
5538 		rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i];
5539 
5540 	rx_pkt_nb_offs = (uint8_t) nb_offs;
5541 }
5542 
5543 void
5544 show_rx_pkt_segments(void)
5545 {
5546 	uint32_t i, n;
5547 
5548 	n = rx_pkt_nb_segs;
5549 	printf("Number of segments: %u\n", n);
5550 	if (n) {
5551 		printf("Segment sizes: ");
5552 		for (i = 0; i != n - 1; i++)
5553 			printf("%hu,", rx_pkt_seg_lengths[i]);
5554 		printf("%hu\n", rx_pkt_seg_lengths[i]);
5555 	}
5556 }
5557 
5558 static const char *get_ptype_str(uint32_t ptype)
5559 {
5560 	const char *str;
5561 
5562 	switch (ptype) {
5563 	case RTE_PTYPE_L2_ETHER:
5564 		str = "eth";
5565 		break;
5566 	case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN:
5567 		str = "ipv4";
5568 		break;
5569 	case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN:
5570 		str = "ipv6";
5571 		break;
5572 	case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP:
5573 		str = "ipv4-tcp";
5574 		break;
5575 	case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP:
5576 		str = "ipv4-udp";
5577 		break;
5578 	case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_SCTP:
5579 		str = "ipv4-sctp";
5580 		break;
5581 	case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP:
5582 		str = "ipv6-tcp";
5583 		break;
5584 	case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP:
5585 		str = "ipv6-udp";
5586 		break;
5587 	case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_SCTP:
5588 		str = "ipv6-sctp";
5589 		break;
5590 	case RTE_PTYPE_TUNNEL_GRENAT:
5591 		str = "grenat";
5592 		break;
5593 	case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER:
5594 		str = "inner-eth";
5595 		break;
5596 	case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER
5597 			| RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN:
5598 		str = "inner-ipv4";
5599 		break;
5600 	case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER
5601 			| RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN:
5602 		str = "inner-ipv6";
5603 		break;
5604 	case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
5605 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP:
5606 		str = "inner-ipv4-tcp";
5607 		break;
5608 	case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
5609 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_UDP:
5610 		str = "inner-ipv4-udp";
5611 		break;
5612 	case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
5613 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_SCTP:
5614 		str = "inner-ipv4-sctp";
5615 		break;
5616 	case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
5617 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP:
5618 		str = "inner-ipv6-tcp";
5619 		break;
5620 	case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
5621 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_UDP:
5622 		str = "inner-ipv6-udp";
5623 		break;
5624 	case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
5625 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_SCTP:
5626 		str = "inner-ipv6-sctp";
5627 		break;
5628 	default:
5629 		str = "unsupported";
5630 	}
5631 
5632 	return str;
5633 }
5634 
5635 void
5636 show_rx_pkt_hdrs(void)
5637 {
5638 	uint32_t i, n;
5639 
5640 	n = rx_pkt_nb_segs;
5641 	printf("Number of segments: %u\n", n);
5642 	if (n) {
5643 		printf("Packet segs: ");
5644 		for (i = 0; i < n - 1; i++)
5645 			printf("%s, ", get_ptype_str(rx_pkt_hdr_protos[i]));
5646 		printf("payload\n");
5647 	}
5648 }
5649 
5650 void
5651 set_rx_pkt_hdrs(unsigned int *seg_hdrs, unsigned int nb_segs)
5652 {
5653 	unsigned int i;
5654 
5655 	if (nb_segs + 1 > MAX_SEGS_BUFFER_SPLIT) {
5656 		printf("nb segments per RX packets=%u > "
5657 		       "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs + 1);
5658 		return;
5659 	}
5660 
5661 	memset(rx_pkt_hdr_protos, 0, sizeof(rx_pkt_hdr_protos));
5662 
5663 	for (i = 0; i < nb_segs; i++)
5664 		rx_pkt_hdr_protos[i] = (uint32_t)seg_hdrs[i];
5665 	/*
5666 	 * We calculate the number of hdrs, but payload is not included,
5667 	 * so rx_pkt_nb_segs would increase 1.
5668 	 */
5669 	rx_pkt_nb_segs = nb_segs + 1;
5670 }
5671 
5672 void
5673 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs)
5674 {
5675 	unsigned int i;
5676 
5677 	if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) {
5678 		printf("nb segments per RX packets=%u >= "
5679 		       "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs);
5680 		return;
5681 	}
5682 
5683 	/*
5684 	 * No extra check here, the segment length will be checked by PMD
5685 	 * in the extended queue setup.
5686 	 */
5687 	for (i = 0; i < nb_segs; i++) {
5688 		if (seg_lengths[i] >= UINT16_MAX) {
5689 			printf("length[%u]=%u > UINT16_MAX - give up\n",
5690 			       i, seg_lengths[i]);
5691 			return;
5692 		}
5693 	}
5694 
5695 	for (i = 0; i < nb_segs; i++)
5696 		rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
5697 
5698 	rx_pkt_nb_segs = (uint8_t) nb_segs;
5699 }
5700 
5701 void
5702 show_tx_pkt_segments(void)
5703 {
5704 	uint32_t i, n;
5705 	const char *split;
5706 
5707 	n = tx_pkt_nb_segs;
5708 	split = tx_split_get_name(tx_pkt_split);
5709 
5710 	printf("Number of segments: %u\n", n);
5711 	printf("Segment sizes: ");
5712 	for (i = 0; i != n - 1; i++)
5713 		printf("%hu,", tx_pkt_seg_lengths[i]);
5714 	printf("%hu\n", tx_pkt_seg_lengths[i]);
5715 	printf("Split packet: %s\n", split);
5716 }
5717 
5718 static bool
5719 nb_segs_is_invalid(unsigned int nb_segs)
5720 {
5721 	uint16_t ring_size;
5722 	uint16_t queue_id;
5723 	uint16_t port_id;
5724 	int ret;
5725 
5726 	RTE_ETH_FOREACH_DEV(port_id) {
5727 		for (queue_id = 0; queue_id < nb_txq; queue_id++) {
5728 			ret = get_tx_ring_size(port_id, queue_id, &ring_size);
5729 			if (ret) {
5730 				/* Port may not be initialized yet, can't say
5731 				 * the port is invalid in this stage.
5732 				 */
5733 				continue;
5734 			}
5735 			if (ring_size < nb_segs) {
5736 				printf("nb segments per TX packets=%u >= TX "
5737 				       "queue(%u) ring_size=%u - txpkts ignored\n",
5738 				       nb_segs, queue_id, ring_size);
5739 				return true;
5740 			}
5741 		}
5742 	}
5743 
5744 	return false;
5745 }
5746 
5747 void
5748 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs)
5749 {
5750 	uint16_t tx_pkt_len;
5751 	unsigned int i;
5752 
5753 	/*
5754 	 * For single segment settings failed check is ignored.
5755 	 * It is a very basic capability to send the single segment
5756 	 * packets, suppose it is always supported.
5757 	 */
5758 	if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) {
5759 		fprintf(stderr,
5760 			"Tx segment size(%u) is not supported - txpkts ignored\n",
5761 			nb_segs);
5762 		return;
5763 	}
5764 
5765 	if (nb_segs > RTE_MAX_SEGS_PER_PKT) {
5766 		fprintf(stderr,
5767 			"Tx segment size(%u) is bigger than max number of segment(%u)\n",
5768 			nb_segs, RTE_MAX_SEGS_PER_PKT);
5769 		return;
5770 	}
5771 
5772 	/*
5773 	 * Check that each segment length is greater or equal than
5774 	 * the mbuf data size.
5775 	 * Check also that the total packet length is greater or equal than the
5776 	 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
5777 	 * 20 + 8).
5778 	 */
5779 	tx_pkt_len = 0;
5780 	for (i = 0; i < nb_segs; i++) {
5781 		if (seg_lengths[i] > mbuf_data_size[0]) {
5782 			fprintf(stderr,
5783 				"length[%u]=%u > mbuf_data_size=%u - give up\n",
5784 				i, seg_lengths[i], mbuf_data_size[0]);
5785 			return;
5786 		}
5787 		tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
5788 	}
5789 	if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
5790 		fprintf(stderr, "total packet length=%u < %d - give up\n",
5791 				(unsigned) tx_pkt_len,
5792 				(int)(sizeof(struct rte_ether_hdr) + 20 + 8));
5793 		return;
5794 	}
5795 
5796 	for (i = 0; i < nb_segs; i++)
5797 		tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
5798 
5799 	tx_pkt_length  = tx_pkt_len;
5800 	tx_pkt_nb_segs = (uint8_t) nb_segs;
5801 }
5802 
5803 void
5804 show_tx_pkt_times(void)
5805 {
5806 	printf("Interburst gap: %u\n", tx_pkt_times_inter);
5807 	printf("Intraburst gap: %u\n", tx_pkt_times_intra);
5808 }
5809 
5810 void
5811 set_tx_pkt_times(unsigned int *tx_times)
5812 {
5813 	tx_pkt_times_inter = tx_times[0];
5814 	tx_pkt_times_intra = tx_times[1];
5815 }
5816 
5817 #ifdef RTE_LIB_GRO
5818 void
5819 setup_gro(const char *onoff, portid_t port_id)
5820 {
5821 	if (!rte_eth_dev_is_valid_port(port_id)) {
5822 		fprintf(stderr, "invalid port id %u\n", port_id);
5823 		return;
5824 	}
5825 	if (test_done == 0) {
5826 		fprintf(stderr,
5827 			"Before enable/disable GRO, please stop forwarding first\n");
5828 		return;
5829 	}
5830 	if (strcmp(onoff, "on") == 0) {
5831 		if (gro_ports[port_id].enable != 0) {
5832 			fprintf(stderr,
5833 				"Port %u has enabled GRO. Please disable GRO first\n",
5834 				port_id);
5835 			return;
5836 		}
5837 		if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
5838 			gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
5839 			gro_ports[port_id].param.max_flow_num =
5840 				GRO_DEFAULT_FLOW_NUM;
5841 			gro_ports[port_id].param.max_item_per_flow =
5842 				GRO_DEFAULT_ITEM_NUM_PER_FLOW;
5843 		}
5844 		gro_ports[port_id].enable = 1;
5845 	} else {
5846 		if (gro_ports[port_id].enable == 0) {
5847 			fprintf(stderr, "Port %u has disabled GRO\n", port_id);
5848 			return;
5849 		}
5850 		gro_ports[port_id].enable = 0;
5851 	}
5852 }
5853 
5854 void
5855 setup_gro_flush_cycles(uint8_t cycles)
5856 {
5857 	if (test_done == 0) {
5858 		fprintf(stderr,
5859 			"Before change flush interval for GRO, please stop forwarding first.\n");
5860 		return;
5861 	}
5862 
5863 	if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
5864 			GRO_DEFAULT_FLUSH_CYCLES) {
5865 		fprintf(stderr,
5866 			"The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n",
5867 			GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES);
5868 		cycles = GRO_DEFAULT_FLUSH_CYCLES;
5869 	}
5870 
5871 	gro_flush_cycles = cycles;
5872 }
5873 
5874 void
5875 show_gro(portid_t port_id)
5876 {
5877 	struct rte_gro_param *param;
5878 	uint32_t max_pkts_num;
5879 
5880 	param = &gro_ports[port_id].param;
5881 
5882 	if (!rte_eth_dev_is_valid_port(port_id)) {
5883 		fprintf(stderr, "Invalid port id %u.\n", port_id);
5884 		return;
5885 	}
5886 	if (gro_ports[port_id].enable) {
5887 		printf("GRO type: TCP/IPv4\n");
5888 		if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
5889 			max_pkts_num = param->max_flow_num *
5890 				param->max_item_per_flow;
5891 		} else
5892 			max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
5893 		printf("Max number of packets to perform GRO: %u\n",
5894 				max_pkts_num);
5895 		printf("Flushing cycles: %u\n", gro_flush_cycles);
5896 	} else
5897 		printf("Port %u doesn't enable GRO.\n", port_id);
5898 }
5899 #endif /* RTE_LIB_GRO */
5900 
5901 #ifdef RTE_LIB_GSO
5902 void
5903 setup_gso(const char *mode, portid_t port_id)
5904 {
5905 	if (!rte_eth_dev_is_valid_port(port_id)) {
5906 		fprintf(stderr, "invalid port id %u\n", port_id);
5907 		return;
5908 	}
5909 	if (strcmp(mode, "on") == 0) {
5910 		if (test_done == 0) {
5911 			fprintf(stderr,
5912 				"before enabling GSO, please stop forwarding first\n");
5913 			return;
5914 		}
5915 		gso_ports[port_id].enable = 1;
5916 	} else if (strcmp(mode, "off") == 0) {
5917 		if (test_done == 0) {
5918 			fprintf(stderr,
5919 				"before disabling GSO, please stop forwarding first\n");
5920 			return;
5921 		}
5922 		gso_ports[port_id].enable = 0;
5923 	}
5924 }
5925 #endif /* RTE_LIB_GSO */
5926 
5927 char*
5928 list_pkt_forwarding_modes(void)
5929 {
5930 	static char fwd_modes[128] = "";
5931 	const char *separator = "|";
5932 	struct fwd_engine *fwd_eng;
5933 	unsigned i = 0;
5934 
5935 	if (strlen (fwd_modes) == 0) {
5936 		while ((fwd_eng = fwd_engines[i++]) != NULL) {
5937 			strncat(fwd_modes, fwd_eng->fwd_mode_name,
5938 					sizeof(fwd_modes) - strlen(fwd_modes) - 1);
5939 			strncat(fwd_modes, separator,
5940 					sizeof(fwd_modes) - strlen(fwd_modes) - 1);
5941 		}
5942 		fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
5943 	}
5944 
5945 	return fwd_modes;
5946 }
5947 
5948 char*
5949 list_pkt_forwarding_retry_modes(void)
5950 {
5951 	static char fwd_modes[128] = "";
5952 	const char *separator = "|";
5953 	struct fwd_engine *fwd_eng;
5954 	unsigned i = 0;
5955 
5956 	if (strlen(fwd_modes) == 0) {
5957 		while ((fwd_eng = fwd_engines[i++]) != NULL) {
5958 			if (fwd_eng == &rx_only_engine)
5959 				continue;
5960 			strncat(fwd_modes, fwd_eng->fwd_mode_name,
5961 					sizeof(fwd_modes) -
5962 					strlen(fwd_modes) - 1);
5963 			strncat(fwd_modes, separator,
5964 					sizeof(fwd_modes) -
5965 					strlen(fwd_modes) - 1);
5966 		}
5967 		fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
5968 	}
5969 
5970 	return fwd_modes;
5971 }
5972 
5973 void
5974 set_pkt_forwarding_mode(const char *fwd_mode_name)
5975 {
5976 	struct fwd_engine *fwd_eng;
5977 	unsigned i;
5978 
5979 	i = 0;
5980 	while ((fwd_eng = fwd_engines[i]) != NULL) {
5981 		if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
5982 			printf("Set %s packet forwarding mode%s\n",
5983 			       fwd_mode_name,
5984 			       retry_enabled == 0 ? "" : " with retry");
5985 			cur_fwd_eng = fwd_eng;
5986 			return;
5987 		}
5988 		i++;
5989 	}
5990 	fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name);
5991 }
5992 
5993 void
5994 add_rx_dump_callbacks(portid_t portid)
5995 {
5996 	struct rte_eth_dev_info dev_info;
5997 	uint16_t queue;
5998 	int ret;
5999 
6000 	if (port_id_is_invalid(portid, ENABLED_WARN))
6001 		return;
6002 
6003 	ret = eth_dev_info_get_print_err(portid, &dev_info);
6004 	if (ret != 0)
6005 		return;
6006 
6007 	for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
6008 		if (!ports[portid].rx_dump_cb[queue])
6009 			ports[portid].rx_dump_cb[queue] =
6010 				rte_eth_add_rx_callback(portid, queue,
6011 					dump_rx_pkts, NULL);
6012 }
6013 
6014 void
6015 add_tx_dump_callbacks(portid_t portid)
6016 {
6017 	struct rte_eth_dev_info dev_info;
6018 	uint16_t queue;
6019 	int ret;
6020 
6021 	if (port_id_is_invalid(portid, ENABLED_WARN))
6022 		return;
6023 
6024 	ret = eth_dev_info_get_print_err(portid, &dev_info);
6025 	if (ret != 0)
6026 		return;
6027 
6028 	for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
6029 		if (!ports[portid].tx_dump_cb[queue])
6030 			ports[portid].tx_dump_cb[queue] =
6031 				rte_eth_add_tx_callback(portid, queue,
6032 							dump_tx_pkts, NULL);
6033 }
6034 
6035 void
6036 remove_rx_dump_callbacks(portid_t portid)
6037 {
6038 	struct rte_eth_dev_info dev_info;
6039 	uint16_t queue;
6040 	int ret;
6041 
6042 	if (port_id_is_invalid(portid, ENABLED_WARN))
6043 		return;
6044 
6045 	ret = eth_dev_info_get_print_err(portid, &dev_info);
6046 	if (ret != 0)
6047 		return;
6048 
6049 	for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
6050 		if (ports[portid].rx_dump_cb[queue]) {
6051 			rte_eth_remove_rx_callback(portid, queue,
6052 				ports[portid].rx_dump_cb[queue]);
6053 			ports[portid].rx_dump_cb[queue] = NULL;
6054 		}
6055 }
6056 
6057 void
6058 remove_tx_dump_callbacks(portid_t portid)
6059 {
6060 	struct rte_eth_dev_info dev_info;
6061 	uint16_t queue;
6062 	int ret;
6063 
6064 	if (port_id_is_invalid(portid, ENABLED_WARN))
6065 		return;
6066 
6067 	ret = eth_dev_info_get_print_err(portid, &dev_info);
6068 	if (ret != 0)
6069 		return;
6070 
6071 	for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
6072 		if (ports[portid].tx_dump_cb[queue]) {
6073 			rte_eth_remove_tx_callback(portid, queue,
6074 				ports[portid].tx_dump_cb[queue]);
6075 			ports[portid].tx_dump_cb[queue] = NULL;
6076 		}
6077 }
6078 
6079 void
6080 configure_rxtx_dump_callbacks(uint16_t verbose)
6081 {
6082 	portid_t portid;
6083 
6084 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
6085 		TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n");
6086 		return;
6087 #endif
6088 
6089 	RTE_ETH_FOREACH_DEV(portid)
6090 	{
6091 		if (verbose == 1 || verbose > 2)
6092 			add_rx_dump_callbacks(portid);
6093 		else
6094 			remove_rx_dump_callbacks(portid);
6095 		if (verbose >= 2)
6096 			add_tx_dump_callbacks(portid);
6097 		else
6098 			remove_tx_dump_callbacks(portid);
6099 	}
6100 }
6101 
6102 void
6103 set_verbose_level(uint16_t vb_level)
6104 {
6105 	printf("Change verbose level from %u to %u\n",
6106 	       (unsigned int) verbose_level, (unsigned int) vb_level);
6107 	verbose_level = vb_level;
6108 	configure_rxtx_dump_callbacks(verbose_level);
6109 }
6110 
6111 void
6112 vlan_extend_set(portid_t port_id, int on)
6113 {
6114 	int diag;
6115 	int vlan_offload;
6116 	uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
6117 
6118 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6119 		return;
6120 
6121 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
6122 
6123 	if (on) {
6124 		vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
6125 		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
6126 	} else {
6127 		vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD;
6128 		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
6129 	}
6130 
6131 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
6132 	if (diag < 0) {
6133 		fprintf(stderr,
6134 			"rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n",
6135 			port_id, on, diag);
6136 		return;
6137 	}
6138 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
6139 }
6140 
6141 void
6142 rx_vlan_strip_set(portid_t port_id, int on)
6143 {
6144 	int diag;
6145 	int vlan_offload;
6146 	uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
6147 
6148 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6149 		return;
6150 
6151 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
6152 
6153 	if (on) {
6154 		vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
6155 		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
6156 	} else {
6157 		vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD;
6158 		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
6159 	}
6160 
6161 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
6162 	if (diag < 0) {
6163 		fprintf(stderr,
6164 			"%s(port_pi=%d, on=%d) failed diag=%d\n",
6165 			__func__, port_id, on, diag);
6166 		return;
6167 	}
6168 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
6169 }
6170 
6171 void
6172 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
6173 {
6174 	int diag;
6175 
6176 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6177 		return;
6178 
6179 	diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
6180 	if (diag < 0)
6181 		fprintf(stderr,
6182 			"%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n",
6183 			__func__, port_id, queue_id, on, diag);
6184 }
6185 
6186 void
6187 rx_vlan_filter_set(portid_t port_id, int on)
6188 {
6189 	int diag;
6190 	int vlan_offload;
6191 	uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
6192 
6193 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6194 		return;
6195 
6196 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
6197 
6198 	if (on) {
6199 		vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD;
6200 		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
6201 	} else {
6202 		vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD;
6203 		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
6204 	}
6205 
6206 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
6207 	if (diag < 0) {
6208 		fprintf(stderr,
6209 			"%s(port_pi=%d, on=%d) failed diag=%d\n",
6210 			__func__, port_id, on, diag);
6211 		return;
6212 	}
6213 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
6214 }
6215 
6216 void
6217 rx_vlan_qinq_strip_set(portid_t port_id, int on)
6218 {
6219 	int diag;
6220 	int vlan_offload;
6221 	uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
6222 
6223 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6224 		return;
6225 
6226 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
6227 
6228 	if (on) {
6229 		vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD;
6230 		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
6231 	} else {
6232 		vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD;
6233 		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
6234 	}
6235 
6236 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
6237 	if (diag < 0) {
6238 		fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n",
6239 			__func__, port_id, on, diag);
6240 		return;
6241 	}
6242 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
6243 }
6244 
6245 int
6246 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
6247 {
6248 	int diag;
6249 
6250 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6251 		return 1;
6252 	if (vlan_id_is_invalid(vlan_id))
6253 		return 1;
6254 	diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
6255 	if (diag == 0)
6256 		return 0;
6257 	fprintf(stderr,
6258 		"rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n",
6259 		port_id, vlan_id, on, diag);
6260 	return -1;
6261 }
6262 
6263 void
6264 rx_vlan_all_filter_set(portid_t port_id, int on)
6265 {
6266 	uint16_t vlan_id;
6267 
6268 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6269 		return;
6270 	for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
6271 		if (rx_vft_set(port_id, vlan_id, on))
6272 			break;
6273 	}
6274 }
6275 
6276 void
6277 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
6278 {
6279 	int diag;
6280 
6281 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6282 		return;
6283 
6284 	diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
6285 	if (diag == 0)
6286 		return;
6287 
6288 	fprintf(stderr,
6289 		"tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n",
6290 		port_id, vlan_type, tp_id, diag);
6291 }
6292 
6293 void
6294 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
6295 {
6296 	struct rte_eth_dev_info dev_info;
6297 	int ret;
6298 
6299 	if (vlan_id_is_invalid(vlan_id))
6300 		return;
6301 
6302 	if (ports[port_id].dev_conf.txmode.offloads &
6303 	    RTE_ETH_TX_OFFLOAD_QINQ_INSERT) {
6304 		fprintf(stderr, "Error, as QinQ has been enabled.\n");
6305 		return;
6306 	}
6307 
6308 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
6309 	if (ret != 0)
6310 		return;
6311 
6312 	if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) {
6313 		fprintf(stderr,
6314 			"Error: vlan insert is not supported by port %d\n",
6315 			port_id);
6316 		return;
6317 	}
6318 
6319 	tx_vlan_reset(port_id);
6320 	ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
6321 	ports[port_id].tx_vlan_id = vlan_id;
6322 }
6323 
6324 void
6325 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
6326 {
6327 	struct rte_eth_dev_info dev_info;
6328 	int ret;
6329 
6330 	if (vlan_id_is_invalid(vlan_id))
6331 		return;
6332 	if (vlan_id_is_invalid(vlan_id_outer))
6333 		return;
6334 
6335 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
6336 	if (ret != 0)
6337 		return;
6338 
6339 	if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) {
6340 		fprintf(stderr,
6341 			"Error: qinq insert not supported by port %d\n",
6342 			port_id);
6343 		return;
6344 	}
6345 
6346 	tx_vlan_reset(port_id);
6347 	ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
6348 						    RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
6349 	ports[port_id].tx_vlan_id = vlan_id;
6350 	ports[port_id].tx_vlan_id_outer = vlan_id_outer;
6351 }
6352 
6353 void
6354 tx_vlan_reset(portid_t port_id)
6355 {
6356 	ports[port_id].dev_conf.txmode.offloads &=
6357 				~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
6358 				  RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
6359 	ports[port_id].tx_vlan_id = 0;
6360 	ports[port_id].tx_vlan_id_outer = 0;
6361 }
6362 
6363 void
6364 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
6365 {
6366 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6367 		return;
6368 
6369 	rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
6370 }
6371 
6372 void
6373 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
6374 {
6375 	int ret;
6376 
6377 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6378 		return;
6379 
6380 	if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
6381 		return;
6382 
6383 	if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
6384 		fprintf(stderr, "map_value not in required range 0..%d\n",
6385 			RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
6386 		return;
6387 	}
6388 
6389 	if (!is_rx) { /* tx */
6390 		ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id,
6391 							     map_value);
6392 		if (ret) {
6393 			fprintf(stderr,
6394 				"failed to set tx queue stats mapping.\n");
6395 			return;
6396 		}
6397 	} else { /* rx */
6398 		ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id,
6399 							     map_value);
6400 		if (ret) {
6401 			fprintf(stderr,
6402 				"failed to set rx queue stats mapping.\n");
6403 			return;
6404 		}
6405 	}
6406 }
6407 
6408 void
6409 set_xstats_hide_zero(uint8_t on_off)
6410 {
6411 	xstats_hide_zero = on_off;
6412 }
6413 
6414 void
6415 set_record_core_cycles(uint8_t on_off)
6416 {
6417 	record_core_cycles = on_off;
6418 }
6419 
6420 void
6421 set_record_burst_stats(uint8_t on_off)
6422 {
6423 	record_burst_stats = on_off;
6424 }
6425 
6426 uint16_t
6427 str_to_flowtype(const char *string)
6428 {
6429 	uint8_t i;
6430 
6431 	for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
6432 		if (!strcmp(flowtype_str_table[i].str, string))
6433 			return flowtype_str_table[i].ftype;
6434 	}
6435 
6436 	if (isdigit(string[0])) {
6437 		int val = atoi(string);
6438 		if (val > 0 && val < 64)
6439 			return (uint16_t)val;
6440 	}
6441 
6442 	return RTE_ETH_FLOW_UNKNOWN;
6443 }
6444 
6445 const char*
6446 flowtype_to_str(uint16_t flow_type)
6447 {
6448 	uint8_t i;
6449 
6450 	for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
6451 		if (flowtype_str_table[i].ftype == flow_type)
6452 			return flowtype_str_table[i].str;
6453 	}
6454 
6455 	return NULL;
6456 }
6457 
6458 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE)
6459 
6460 static inline void
6461 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
6462 {
6463 	struct rte_eth_flex_payload_cfg *cfg;
6464 	uint32_t i, j;
6465 
6466 	for (i = 0; i < flex_conf->nb_payloads; i++) {
6467 		cfg = &flex_conf->flex_set[i];
6468 		if (cfg->type == RTE_ETH_RAW_PAYLOAD)
6469 			printf("\n    RAW:  ");
6470 		else if (cfg->type == RTE_ETH_L2_PAYLOAD)
6471 			printf("\n    L2_PAYLOAD:  ");
6472 		else if (cfg->type == RTE_ETH_L3_PAYLOAD)
6473 			printf("\n    L3_PAYLOAD:  ");
6474 		else if (cfg->type == RTE_ETH_L4_PAYLOAD)
6475 			printf("\n    L4_PAYLOAD:  ");
6476 		else
6477 			printf("\n    UNKNOWN PAYLOAD(%u):  ", cfg->type);
6478 		for (j = 0; j < num; j++)
6479 			printf("  %-5u", cfg->src_offset[j]);
6480 	}
6481 	printf("\n");
6482 }
6483 
6484 static inline void
6485 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
6486 {
6487 	struct rte_eth_fdir_flex_mask *mask;
6488 	uint32_t i, j;
6489 	const char *p;
6490 
6491 	for (i = 0; i < flex_conf->nb_flexmasks; i++) {
6492 		mask = &flex_conf->flex_mask[i];
6493 		p = flowtype_to_str(mask->flow_type);
6494 		printf("\n    %s:\t", p ? p : "unknown");
6495 		for (j = 0; j < num; j++)
6496 			printf(" %02x", mask->mask[j]);
6497 	}
6498 	printf("\n");
6499 }
6500 
6501 static inline void
6502 print_fdir_flow_type(uint32_t flow_types_mask)
6503 {
6504 	int i;
6505 	const char *p;
6506 
6507 	for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
6508 		if (!(flow_types_mask & (1 << i)))
6509 			continue;
6510 		p = flowtype_to_str(i);
6511 		if (p)
6512 			printf(" %s", p);
6513 		else
6514 			printf(" unknown");
6515 	}
6516 	printf("\n");
6517 }
6518 
6519 static int
6520 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info,
6521 		    struct rte_eth_fdir_stats *fdir_stat)
6522 {
6523 	int ret = -ENOTSUP;
6524 
6525 #ifdef RTE_NET_I40E
6526 	if (ret == -ENOTSUP) {
6527 		ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info);
6528 		if (!ret)
6529 			ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat);
6530 	}
6531 #endif
6532 #ifdef RTE_NET_IXGBE
6533 	if (ret == -ENOTSUP) {
6534 		ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info);
6535 		if (!ret)
6536 			ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat);
6537 	}
6538 #endif
6539 	switch (ret) {
6540 	case 0:
6541 		break;
6542 	case -ENOTSUP:
6543 		fprintf(stderr, "\n FDIR is not supported on port %-2d\n",
6544 			port_id);
6545 		break;
6546 	default:
6547 		fprintf(stderr, "programming error: (%s)\n", strerror(-ret));
6548 		break;
6549 	}
6550 	return ret;
6551 }
6552 
6553 void
6554 fdir_get_infos(portid_t port_id)
6555 {
6556 	struct rte_eth_fdir_stats fdir_stat;
6557 	struct rte_eth_fdir_info fdir_info;
6558 
6559 	static const char *fdir_stats_border = "########################";
6560 
6561 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6562 		return;
6563 
6564 	memset(&fdir_info, 0, sizeof(fdir_info));
6565 	memset(&fdir_stat, 0, sizeof(fdir_stat));
6566 	if (get_fdir_info(port_id, &fdir_info, &fdir_stat))
6567 		return;
6568 
6569 	printf("\n  %s FDIR infos for port %-2d     %s\n",
6570 	       fdir_stats_border, port_id, fdir_stats_border);
6571 	printf("  MODE: ");
6572 	if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
6573 		printf("  PERFECT\n");
6574 	else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
6575 		printf("  PERFECT-MAC-VLAN\n");
6576 	else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
6577 		printf("  PERFECT-TUNNEL\n");
6578 	else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
6579 		printf("  SIGNATURE\n");
6580 	else
6581 		printf("  DISABLE\n");
6582 	if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
6583 		&& fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
6584 		printf("  SUPPORTED FLOW TYPE: ");
6585 		print_fdir_flow_type(fdir_info.flow_types_mask[0]);
6586 	}
6587 	printf("  FLEX PAYLOAD INFO:\n");
6588 	printf("  max_len:       %-10"PRIu32"  payload_limit: %-10"PRIu32"\n"
6589 	       "  payload_unit:  %-10"PRIu32"  payload_seg:   %-10"PRIu32"\n"
6590 	       "  bitmask_unit:  %-10"PRIu32"  bitmask_num:   %-10"PRIu32"\n",
6591 		fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
6592 		fdir_info.flex_payload_unit,
6593 		fdir_info.max_flex_payload_segment_num,
6594 		fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
6595 	if (fdir_info.flex_conf.nb_payloads > 0) {
6596 		printf("  FLEX PAYLOAD SRC OFFSET:");
6597 		print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
6598 	}
6599 	if (fdir_info.flex_conf.nb_flexmasks > 0) {
6600 		printf("  FLEX MASK CFG:");
6601 		print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
6602 	}
6603 	printf("  guarant_count: %-10"PRIu32"  best_count:    %"PRIu32"\n",
6604 	       fdir_stat.guarant_cnt, fdir_stat.best_cnt);
6605 	printf("  guarant_space: %-10"PRIu32"  best_space:    %"PRIu32"\n",
6606 	       fdir_info.guarant_spc, fdir_info.best_spc);
6607 	printf("  collision:     %-10"PRIu32"  free:          %"PRIu32"\n"
6608 	       "  maxhash:       %-10"PRIu32"  maxlen:        %"PRIu32"\n"
6609 	       "  add:	         %-10"PRIu64"  remove:        %"PRIu64"\n"
6610 	       "  f_add:         %-10"PRIu64"  f_remove:      %"PRIu64"\n",
6611 	       fdir_stat.collision, fdir_stat.free,
6612 	       fdir_stat.maxhash, fdir_stat.maxlen,
6613 	       fdir_stat.add, fdir_stat.remove,
6614 	       fdir_stat.f_add, fdir_stat.f_remove);
6615 	printf("  %s############################%s\n",
6616 	       fdir_stats_border, fdir_stats_border);
6617 }
6618 
6619 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */
6620 
6621 void
6622 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
6623 {
6624 #ifdef RTE_NET_IXGBE
6625 	int diag;
6626 
6627 	if (is_rx)
6628 		diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
6629 	else
6630 		diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
6631 
6632 	if (diag == 0)
6633 		return;
6634 	fprintf(stderr,
6635 		"rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
6636 		is_rx ? "rx" : "tx", port_id, diag);
6637 	return;
6638 #endif
6639 	fprintf(stderr, "VF %s setting not supported for port %d\n",
6640 		is_rx ? "Rx" : "Tx", port_id);
6641 	RTE_SET_USED(vf);
6642 	RTE_SET_USED(on);
6643 }
6644 
6645 int
6646 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint32_t rate)
6647 {
6648 	int diag;
6649 	struct rte_eth_link link;
6650 	int ret;
6651 
6652 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6653 		return 1;
6654 	ret = eth_link_get_nowait_print_err(port_id, &link);
6655 	if (ret < 0)
6656 		return 1;
6657 	if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN &&
6658 	    rate > link.link_speed) {
6659 		fprintf(stderr,
6660 			"Invalid rate value:%u bigger than link speed: %u\n",
6661 			rate, link.link_speed);
6662 		return 1;
6663 	}
6664 	diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
6665 	if (diag == 0)
6666 		return diag;
6667 	fprintf(stderr,
6668 		"rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
6669 		port_id, diag);
6670 	return diag;
6671 }
6672 
6673 int
6674 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint32_t rate, uint64_t q_msk)
6675 {
6676 	int diag = -ENOTSUP;
6677 
6678 	RTE_SET_USED(vf);
6679 	RTE_SET_USED(rate);
6680 	RTE_SET_USED(q_msk);
6681 
6682 #ifdef RTE_NET_IXGBE
6683 	if (diag == -ENOTSUP)
6684 		diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
6685 						       q_msk);
6686 #endif
6687 #ifdef RTE_NET_BNXT
6688 	if (diag == -ENOTSUP)
6689 		diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
6690 #endif
6691 	if (diag == 0)
6692 		return diag;
6693 
6694 	fprintf(stderr,
6695 		"%s for port_id=%d failed diag=%d\n",
6696 		__func__, port_id, diag);
6697 	return diag;
6698 }
6699 
6700 int
6701 set_rxq_avail_thresh(portid_t port_id, uint16_t queue_id, uint8_t avail_thresh)
6702 {
6703 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6704 		return -EINVAL;
6705 
6706 	return rte_eth_rx_avail_thresh_set(port_id, queue_id, avail_thresh);
6707 }
6708 
6709 /*
6710  * Functions to manage the set of filtered Multicast MAC addresses.
6711  *
6712  * A pool of filtered multicast MAC addresses is associated with each port.
6713  * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
6714  * The address of the pool and the number of valid multicast MAC addresses
6715  * recorded in the pool are stored in the fields "mc_addr_pool" and
6716  * "mc_addr_nb" of the "rte_port" data structure.
6717  *
6718  * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
6719  * to be supplied a contiguous array of multicast MAC addresses.
6720  * To comply with this constraint, the set of multicast addresses recorded
6721  * into the pool are systematically compacted at the beginning of the pool.
6722  * Hence, when a multicast address is removed from the pool, all following
6723  * addresses, if any, are copied back to keep the set contiguous.
6724  */
6725 #define MCAST_POOL_INC 32
6726 
6727 static int
6728 mcast_addr_pool_extend(struct rte_port *port)
6729 {
6730 	struct rte_ether_addr *mc_pool;
6731 	size_t mc_pool_size;
6732 
6733 	/*
6734 	 * If a free entry is available at the end of the pool, just
6735 	 * increment the number of recorded multicast addresses.
6736 	 */
6737 	if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
6738 		port->mc_addr_nb++;
6739 		return 0;
6740 	}
6741 
6742 	/*
6743 	 * [re]allocate a pool with MCAST_POOL_INC more entries.
6744 	 * The previous test guarantees that port->mc_addr_nb is a multiple
6745 	 * of MCAST_POOL_INC.
6746 	 */
6747 	mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb +
6748 						    MCAST_POOL_INC);
6749 	mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool,
6750 						mc_pool_size);
6751 	if (mc_pool == NULL) {
6752 		fprintf(stderr,
6753 			"allocation of pool of %u multicast addresses failed\n",
6754 			port->mc_addr_nb + MCAST_POOL_INC);
6755 		return -ENOMEM;
6756 	}
6757 
6758 	port->mc_addr_pool = mc_pool;
6759 	port->mc_addr_nb++;
6760 	return 0;
6761 
6762 }
6763 
6764 static void
6765 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr)
6766 {
6767 	if (mcast_addr_pool_extend(port) != 0)
6768 		return;
6769 	rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]);
6770 }
6771 
6772 static void
6773 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
6774 {
6775 	port->mc_addr_nb--;
6776 	if (addr_idx == port->mc_addr_nb) {
6777 		/* No need to recompact the set of multicast addresses. */
6778 		if (port->mc_addr_nb == 0) {
6779 			/* free the pool of multicast addresses. */
6780 			free(port->mc_addr_pool);
6781 			port->mc_addr_pool = NULL;
6782 		}
6783 		return;
6784 	}
6785 	memmove(&port->mc_addr_pool[addr_idx],
6786 		&port->mc_addr_pool[addr_idx + 1],
6787 		sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx));
6788 }
6789 
6790 int
6791 mcast_addr_pool_destroy(portid_t port_id)
6792 {
6793 	struct rte_port *port;
6794 
6795 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
6796 	    port_id == (portid_t)RTE_PORT_ALL)
6797 		return -EINVAL;
6798 	port = &ports[port_id];
6799 
6800 	if (port->mc_addr_nb != 0) {
6801 		/* free the pool of multicast addresses. */
6802 		free(port->mc_addr_pool);
6803 		port->mc_addr_pool = NULL;
6804 		port->mc_addr_nb = 0;
6805 	}
6806 	return 0;
6807 }
6808 
6809 static int
6810 eth_port_multicast_addr_list_set(portid_t port_id)
6811 {
6812 	struct rte_port *port;
6813 	int diag;
6814 
6815 	port = &ports[port_id];
6816 	diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
6817 					    port->mc_addr_nb);
6818 	if (diag < 0)
6819 		fprintf(stderr,
6820 			"rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
6821 			port_id, port->mc_addr_nb, diag);
6822 
6823 	return diag;
6824 }
6825 
6826 void
6827 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr)
6828 {
6829 	struct rte_port *port;
6830 	uint32_t i;
6831 
6832 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6833 		return;
6834 
6835 	port = &ports[port_id];
6836 
6837 	/*
6838 	 * Check that the added multicast MAC address is not already recorded
6839 	 * in the pool of multicast addresses.
6840 	 */
6841 	for (i = 0; i < port->mc_addr_nb; i++) {
6842 		if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
6843 			fprintf(stderr,
6844 				"multicast address already filtered by port\n");
6845 			return;
6846 		}
6847 	}
6848 
6849 	mcast_addr_pool_append(port, mc_addr);
6850 	if (eth_port_multicast_addr_list_set(port_id) < 0)
6851 		/* Rollback on failure, remove the address from the pool */
6852 		mcast_addr_pool_remove(port, i);
6853 }
6854 
6855 void
6856 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr)
6857 {
6858 	struct rte_port *port;
6859 	uint32_t i;
6860 
6861 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6862 		return;
6863 
6864 	port = &ports[port_id];
6865 
6866 	/*
6867 	 * Search the pool of multicast MAC addresses for the removed address.
6868 	 */
6869 	for (i = 0; i < port->mc_addr_nb; i++) {
6870 		if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
6871 			break;
6872 	}
6873 	if (i == port->mc_addr_nb) {
6874 		fprintf(stderr, "multicast address not filtered by port %d\n",
6875 			port_id);
6876 		return;
6877 	}
6878 
6879 	mcast_addr_pool_remove(port, i);
6880 	if (eth_port_multicast_addr_list_set(port_id) < 0)
6881 		/* Rollback on failure, add the address back into the pool */
6882 		mcast_addr_pool_append(port, mc_addr);
6883 }
6884 
6885 void
6886 mcast_addr_flush(portid_t port_id)
6887 {
6888 	int ret;
6889 
6890 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6891 		return;
6892 
6893 	ret = rte_eth_dev_set_mc_addr_list(port_id, NULL, 0);
6894 	if (ret != 0) {
6895 		fprintf(stderr,
6896 			"Failed to flush all multicast MAC addresses on port_id %u\n",
6897 			port_id);
6898 		return;
6899 	}
6900 	mcast_addr_pool_destroy(port_id);
6901 }
6902 
6903 void
6904 port_dcb_info_display(portid_t port_id)
6905 {
6906 	struct rte_eth_dcb_info dcb_info;
6907 	uint16_t i;
6908 	int ret;
6909 	static const char *border = "================";
6910 
6911 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6912 		return;
6913 
6914 	ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
6915 	if (ret) {
6916 		fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n",
6917 			port_id);
6918 		return;
6919 	}
6920 	printf("\n  %s DCB infos for port %-2d  %s\n", border, port_id, border);
6921 	printf("  TC NUMBER: %d\n", dcb_info.nb_tcs);
6922 	printf("\n  TC :        ");
6923 	for (i = 0; i < dcb_info.nb_tcs; i++)
6924 		printf("\t%4d", i);
6925 	printf("\n  Priority :  ");
6926 	for (i = 0; i < dcb_info.nb_tcs; i++)
6927 		printf("\t%4d", dcb_info.prio_tc[i]);
6928 	printf("\n  BW percent :");
6929 	for (i = 0; i < dcb_info.nb_tcs; i++)
6930 		printf("\t%4d%%", dcb_info.tc_bws[i]);
6931 	printf("\n  RXQ base :  ");
6932 	for (i = 0; i < dcb_info.nb_tcs; i++)
6933 		printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
6934 	printf("\n  RXQ number :");
6935 	for (i = 0; i < dcb_info.nb_tcs; i++)
6936 		printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
6937 	printf("\n  TXQ base :  ");
6938 	for (i = 0; i < dcb_info.nb_tcs; i++)
6939 		printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
6940 	printf("\n  TXQ number :");
6941 	for (i = 0; i < dcb_info.nb_tcs; i++)
6942 		printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
6943 	printf("\n");
6944 }
6945 
6946 uint8_t *
6947 open_file(const char *file_path, uint32_t *size)
6948 {
6949 	int fd = open(file_path, O_RDONLY);
6950 	off_t pkg_size;
6951 	uint8_t *buf = NULL;
6952 	int ret = 0;
6953 	struct stat st_buf;
6954 
6955 	if (size)
6956 		*size = 0;
6957 
6958 	if (fd == -1) {
6959 		fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path);
6960 		return buf;
6961 	}
6962 
6963 	if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
6964 		close(fd);
6965 		fprintf(stderr, "%s: File operations failed\n", __func__);
6966 		return buf;
6967 	}
6968 
6969 	pkg_size = st_buf.st_size;
6970 	if (pkg_size < 0) {
6971 		close(fd);
6972 		fprintf(stderr, "%s: File operations failed\n", __func__);
6973 		return buf;
6974 	}
6975 
6976 	buf = (uint8_t *)malloc(pkg_size);
6977 	if (!buf) {
6978 		close(fd);
6979 		fprintf(stderr, "%s: Failed to malloc memory\n", __func__);
6980 		return buf;
6981 	}
6982 
6983 	ret = read(fd, buf, pkg_size);
6984 	if (ret < 0) {
6985 		close(fd);
6986 		fprintf(stderr, "%s: File read operation failed\n", __func__);
6987 		close_file(buf);
6988 		return NULL;
6989 	}
6990 
6991 	if (size)
6992 		*size = pkg_size;
6993 
6994 	close(fd);
6995 
6996 	return buf;
6997 }
6998 
6999 int
7000 save_file(const char *file_path, uint8_t *buf, uint32_t size)
7001 {
7002 	FILE *fh = fopen(file_path, "wb");
7003 
7004 	if (fh == NULL) {
7005 		fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path);
7006 		return -1;
7007 	}
7008 
7009 	if (fwrite(buf, 1, size, fh) != size) {
7010 		fclose(fh);
7011 		fprintf(stderr, "%s: File write operation failed\n", __func__);
7012 		return -1;
7013 	}
7014 
7015 	fclose(fh);
7016 
7017 	return 0;
7018 }
7019 
7020 int
7021 close_file(uint8_t *buf)
7022 {
7023 	if (buf) {
7024 		free((void *)buf);
7025 		return 0;
7026 	}
7027 
7028 	return -1;
7029 }
7030 
7031 void
7032 show_macs(portid_t port_id)
7033 {
7034 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
7035 	struct rte_eth_dev_info dev_info;
7036 	int32_t i, rc, num_macs = 0;
7037 
7038 	if (eth_dev_info_get_print_err(port_id, &dev_info))
7039 		return;
7040 
7041 	struct rte_ether_addr addr[dev_info.max_mac_addrs];
7042 	rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs);
7043 	if (rc < 0)
7044 		return;
7045 
7046 	for (i = 0; i < rc; i++) {
7047 
7048 		/* skip zero address */
7049 		if (rte_is_zero_ether_addr(&addr[i]))
7050 			continue;
7051 
7052 		num_macs++;
7053 	}
7054 
7055 	printf("Number of MAC address added: %d\n", num_macs);
7056 
7057 	for (i = 0; i < rc; i++) {
7058 
7059 		/* skip zero address */
7060 		if (rte_is_zero_ether_addr(&addr[i]))
7061 			continue;
7062 
7063 		rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]);
7064 		printf("  %s\n", buf);
7065 	}
7066 }
7067 
7068 void
7069 show_mcast_macs(portid_t port_id)
7070 {
7071 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
7072 	struct rte_ether_addr *addr;
7073 	struct rte_port *port;
7074 	uint32_t i;
7075 
7076 	port = &ports[port_id];
7077 
7078 	printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb);
7079 
7080 	for (i = 0; i < port->mc_addr_nb; i++) {
7081 		addr = &port->mc_addr_pool[i];
7082 
7083 		rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
7084 		printf("  %s\n", buf);
7085 	}
7086 }
7087