xref: /dpdk/app/test-pmd/config.c (revision 96893df75bf50a82cabd8debe3492d564a5d13d5)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation.
3  * Copyright 2013-2014 6WIND S.A.
4  */
5 
6 #include <ctype.h>
7 #include <stdarg.h>
8 #include <errno.h>
9 #include <stdbool.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <string.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 
16 #include <sys/queue.h>
17 #include <sys/types.h>
18 #include <sys/stat.h>
19 #include <fcntl.h>
20 #include <unistd.h>
21 
22 #include <rte_common.h>
23 #include <rte_byteorder.h>
24 #include <rte_debug.h>
25 #include <rte_log.h>
26 #include <rte_memory.h>
27 #include <rte_memcpy.h>
28 #include <rte_memzone.h>
29 #include <rte_launch.h>
30 #include <rte_bus.h>
31 #include <rte_eal.h>
32 #include <rte_per_lcore.h>
33 #include <rte_lcore.h>
34 #include <rte_branch_prediction.h>
35 #include <rte_mempool.h>
36 #include <rte_mbuf.h>
37 #include <rte_interrupts.h>
38 #include <rte_ether.h>
39 #include <rte_ethdev.h>
40 #include <rte_string_fns.h>
41 #include <rte_cycles.h>
42 #include <rte_flow.h>
43 #include <rte_mtr.h>
44 #include <rte_errno.h>
45 #ifdef RTE_NET_IXGBE
46 #include <rte_pmd_ixgbe.h>
47 #endif
48 #ifdef RTE_NET_I40E
49 #include <rte_pmd_i40e.h>
50 #endif
51 #ifdef RTE_NET_BNXT
52 #include <rte_pmd_bnxt.h>
53 #endif
54 #ifdef RTE_LIB_GRO
55 #include <rte_gro.h>
56 #endif
57 #include <rte_hexdump.h>
58 
59 #include "testpmd.h"
60 #include "cmdline_mtr.h"
61 
62 #define ETHDEV_FWVERS_LEN 32
63 
64 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
65 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW
66 #else
67 #define CLOCK_TYPE_ID CLOCK_MONOTONIC
68 #endif
69 
70 #define NS_PER_SEC 1E9
71 
72 static const struct {
73 	enum tx_pkt_split split;
74 	const char *name;
75 } tx_split_name[] = {
76 	{
77 		.split = TX_PKT_SPLIT_OFF,
78 		.name = "off",
79 	},
80 	{
81 		.split = TX_PKT_SPLIT_ON,
82 		.name = "on",
83 	},
84 	{
85 		.split = TX_PKT_SPLIT_RND,
86 		.name = "rand",
87 	},
88 };
89 
90 const struct rss_type_info rss_type_table[] = {
91 	/* Group types */
92 	{ "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP |
93 		RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD |
94 		RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP |
95 		RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS | RTE_ETH_RSS_L2TPV2},
96 	{ "none", 0 },
97 	{ "ip", RTE_ETH_RSS_IP },
98 	{ "udp", RTE_ETH_RSS_UDP },
99 	{ "tcp", RTE_ETH_RSS_TCP },
100 	{ "sctp", RTE_ETH_RSS_SCTP },
101 	{ "tunnel", RTE_ETH_RSS_TUNNEL },
102 	{ "vlan", RTE_ETH_RSS_VLAN },
103 
104 	/* Individual type */
105 	{ "ipv4", RTE_ETH_RSS_IPV4 },
106 	{ "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 },
107 	{ "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP },
108 	{ "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP },
109 	{ "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP },
110 	{ "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER },
111 	{ "ipv6", RTE_ETH_RSS_IPV6 },
112 	{ "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 },
113 	{ "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP },
114 	{ "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP },
115 	{ "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP },
116 	{ "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER },
117 	{ "l2-payload", RTE_ETH_RSS_L2_PAYLOAD },
118 	{ "ipv6-ex", RTE_ETH_RSS_IPV6_EX },
119 	{ "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX },
120 	{ "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX },
121 	{ "port", RTE_ETH_RSS_PORT },
122 	{ "vxlan", RTE_ETH_RSS_VXLAN },
123 	{ "geneve", RTE_ETH_RSS_GENEVE },
124 	{ "nvgre", RTE_ETH_RSS_NVGRE },
125 	{ "gtpu", RTE_ETH_RSS_GTPU },
126 	{ "eth", RTE_ETH_RSS_ETH },
127 	{ "s-vlan", RTE_ETH_RSS_S_VLAN },
128 	{ "c-vlan", RTE_ETH_RSS_C_VLAN },
129 	{ "esp", RTE_ETH_RSS_ESP },
130 	{ "ah", RTE_ETH_RSS_AH },
131 	{ "l2tpv3", RTE_ETH_RSS_L2TPV3 },
132 	{ "pfcp", RTE_ETH_RSS_PFCP },
133 	{ "pppoe", RTE_ETH_RSS_PPPOE },
134 	{ "ecpri", RTE_ETH_RSS_ECPRI },
135 	{ "mpls", RTE_ETH_RSS_MPLS },
136 	{ "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM },
137 	{ "l4-chksum", RTE_ETH_RSS_L4_CHKSUM },
138 	{ "l2tpv2", RTE_ETH_RSS_L2TPV2 },
139 	{ "l3-pre96", RTE_ETH_RSS_L3_PRE96 },
140 	{ "l3-pre64", RTE_ETH_RSS_L3_PRE64 },
141 	{ "l3-pre56", RTE_ETH_RSS_L3_PRE56 },
142 	{ "l3-pre48", RTE_ETH_RSS_L3_PRE48 },
143 	{ "l3-pre40", RTE_ETH_RSS_L3_PRE40 },
144 	{ "l3-pre32", RTE_ETH_RSS_L3_PRE32 },
145 	{ "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY },
146 	{ "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY },
147 	{ "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY },
148 	{ "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY },
149 	{ "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY },
150 	{ "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY },
151 	{ "ipv6-flow-label", RTE_ETH_RSS_IPV6_FLOW_LABEL },
152 	{ NULL, 0},
153 };
154 
155 static const struct {
156 	enum rte_eth_fec_mode mode;
157 	const char *name;
158 } fec_mode_name[] = {
159 	{
160 		.mode = RTE_ETH_FEC_NOFEC,
161 		.name = "off",
162 	},
163 	{
164 		.mode = RTE_ETH_FEC_AUTO,
165 		.name = "auto",
166 	},
167 	{
168 		.mode = RTE_ETH_FEC_BASER,
169 		.name = "baser",
170 	},
171 	{
172 		.mode = RTE_ETH_FEC_RS,
173 		.name = "rs",
174 	},
175 	{
176 		.mode = RTE_ETH_FEC_LLRS,
177 		.name = "llrs",
178 	},
179 };
180 
181 static const struct {
182 	char str[32];
183 	uint16_t ftype;
184 } flowtype_str_table[] = {
185 	{"raw", RTE_ETH_FLOW_RAW},
186 	{"ipv4", RTE_ETH_FLOW_IPV4},
187 	{"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
188 	{"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
189 	{"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
190 	{"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
191 	{"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
192 	{"ipv6", RTE_ETH_FLOW_IPV6},
193 	{"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
194 	{"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
195 	{"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
196 	{"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
197 	{"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
198 	{"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
199 	{"ipv6-ex", RTE_ETH_FLOW_IPV6_EX},
200 	{"ipv6-tcp-ex", RTE_ETH_FLOW_IPV6_TCP_EX},
201 	{"ipv6-udp-ex", RTE_ETH_FLOW_IPV6_UDP_EX},
202 	{"port", RTE_ETH_FLOW_PORT},
203 	{"vxlan", RTE_ETH_FLOW_VXLAN},
204 	{"geneve", RTE_ETH_FLOW_GENEVE},
205 	{"nvgre", RTE_ETH_FLOW_NVGRE},
206 	{"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
207 	{"gtpu", RTE_ETH_FLOW_GTPU},
208 };
209 
210 static void
211 print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
212 {
213 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
214 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
215 	printf("%s%s", name, buf);
216 }
217 
218 static void
219 nic_xstats_display_periodic(portid_t port_id)
220 {
221 	struct xstat_display_info *xstats_info;
222 	uint64_t *prev_values, *curr_values;
223 	uint64_t diff_value, value_rate;
224 	struct timespec cur_time;
225 	uint64_t *ids_supp;
226 	size_t ids_supp_sz;
227 	uint64_t diff_ns;
228 	unsigned int i;
229 	int rc;
230 
231 	xstats_info = &ports[port_id].xstats_info;
232 
233 	ids_supp_sz = xstats_info->ids_supp_sz;
234 	if (ids_supp_sz == 0)
235 		return;
236 
237 	printf("\n");
238 
239 	ids_supp = xstats_info->ids_supp;
240 	prev_values = xstats_info->prev_values;
241 	curr_values = xstats_info->curr_values;
242 
243 	rc = rte_eth_xstats_get_by_id(port_id, ids_supp, curr_values,
244 				      ids_supp_sz);
245 	if (rc != (int)ids_supp_sz) {
246 		fprintf(stderr,
247 			"Failed to get values of %zu xstats for port %u - return code %d\n",
248 			ids_supp_sz, port_id, rc);
249 		return;
250 	}
251 
252 	diff_ns = 0;
253 	if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) {
254 		uint64_t ns;
255 
256 		ns = cur_time.tv_sec * NS_PER_SEC;
257 		ns += cur_time.tv_nsec;
258 
259 		if (xstats_info->prev_ns != 0)
260 			diff_ns = ns - xstats_info->prev_ns;
261 		xstats_info->prev_ns = ns;
262 	}
263 
264 	printf("%-31s%-17s%s\n", " ", "Value", "Rate (since last show)");
265 	for (i = 0; i < ids_supp_sz; i++) {
266 		diff_value = (curr_values[i] > prev_values[i]) ?
267 			     (curr_values[i] - prev_values[i]) : 0;
268 		prev_values[i] = curr_values[i];
269 		value_rate = diff_ns > 0 ?
270 				(double)diff_value / diff_ns * NS_PER_SEC : 0;
271 
272 		printf("  %-25s%12"PRIu64" %15"PRIu64"\n",
273 		       xstats_display[i].name, curr_values[i], value_rate);
274 	}
275 }
276 
277 void
278 nic_stats_display(portid_t port_id)
279 {
280 	static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
281 	static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
282 	static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS];
283 	static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS];
284 	static uint64_t prev_ns[RTE_MAX_ETHPORTS];
285 	struct timespec cur_time;
286 	uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx,
287 								diff_ns;
288 	uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx;
289 	struct rte_eth_stats stats;
290 	static const char *nic_stats_border = "########################";
291 	int ret;
292 
293 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
294 		print_valid_ports();
295 		return;
296 	}
297 	ret = rte_eth_stats_get(port_id, &stats);
298 	if (ret != 0) {
299 		fprintf(stderr,
300 			"%s: Error: failed to get stats (port %u): %d",
301 			__func__, port_id, ret);
302 		return;
303 	}
304 	printf("\n  %s NIC statistics for port %-2d %s\n",
305 	       nic_stats_border, port_id, nic_stats_border);
306 
307 	printf("  RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes:  "
308 	       "%-"PRIu64"\n", stats.ipackets, stats.imissed, stats.ibytes);
309 	printf("  RX-errors: %-"PRIu64"\n", stats.ierrors);
310 	printf("  RX-nombuf:  %-10"PRIu64"\n", stats.rx_nombuf);
311 	printf("  TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes:  "
312 	       "%-"PRIu64"\n", stats.opackets, stats.oerrors, stats.obytes);
313 
314 	diff_ns = 0;
315 	if (clock_gettime(CLOCK_TYPE_ID, &cur_time) == 0) {
316 		uint64_t ns;
317 
318 		ns = cur_time.tv_sec * NS_PER_SEC;
319 		ns += cur_time.tv_nsec;
320 
321 		if (prev_ns[port_id] != 0)
322 			diff_ns = ns - prev_ns[port_id];
323 		prev_ns[port_id] = ns;
324 	}
325 
326 	diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
327 		(stats.ipackets - prev_pkts_rx[port_id]) : 0;
328 	diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
329 		(stats.opackets - prev_pkts_tx[port_id]) : 0;
330 	prev_pkts_rx[port_id] = stats.ipackets;
331 	prev_pkts_tx[port_id] = stats.opackets;
332 	mpps_rx = diff_ns > 0 ?
333 		(double)diff_pkts_rx / diff_ns * NS_PER_SEC : 0;
334 	mpps_tx = diff_ns > 0 ?
335 		(double)diff_pkts_tx / diff_ns * NS_PER_SEC : 0;
336 
337 	diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ?
338 		(stats.ibytes - prev_bytes_rx[port_id]) : 0;
339 	diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ?
340 		(stats.obytes - prev_bytes_tx[port_id]) : 0;
341 	prev_bytes_rx[port_id] = stats.ibytes;
342 	prev_bytes_tx[port_id] = stats.obytes;
343 	mbps_rx = diff_ns > 0 ?
344 		(double)diff_bytes_rx / diff_ns * NS_PER_SEC : 0;
345 	mbps_tx = diff_ns > 0 ?
346 		(double)diff_bytes_tx / diff_ns * NS_PER_SEC : 0;
347 
348 	printf("\n  Throughput (since last show)\n");
349 	printf("  Rx-pps: %12"PRIu64"          Rx-bps: %12"PRIu64"\n  Tx-pps: %12"
350 	       PRIu64"          Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8,
351 	       mpps_tx, mbps_tx * 8);
352 
353 	if (xstats_display_num > 0)
354 		nic_xstats_display_periodic(port_id);
355 
356 	printf("  %s############################%s\n",
357 	       nic_stats_border, nic_stats_border);
358 }
359 
360 void
361 nic_stats_clear(portid_t port_id)
362 {
363 	int ret;
364 
365 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
366 		print_valid_ports();
367 		return;
368 	}
369 
370 	ret = rte_eth_stats_reset(port_id);
371 	if (ret != 0) {
372 		fprintf(stderr,
373 			"%s: Error: failed to reset stats (port %u): %s",
374 			__func__, port_id, strerror(-ret));
375 		return;
376 	}
377 
378 	ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
379 	if (ret != 0) {
380 		if (ret < 0)
381 			ret = -ret;
382 		fprintf(stderr,
383 			"%s: Error: failed to get stats (port %u): %s",
384 			__func__, port_id, strerror(ret));
385 		return;
386 	}
387 	printf("\n  NIC statistics for port %d cleared\n", port_id);
388 }
389 
390 void
391 nic_xstats_display(portid_t port_id)
392 {
393 	struct rte_eth_xstat *xstats;
394 	int cnt_xstats, idx_xstat;
395 	struct rte_eth_xstat_name *xstats_names;
396 
397 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
398 		print_valid_ports();
399 		return;
400 	}
401 	printf("###### NIC extended statistics for port %-2d\n", port_id);
402 	if (!rte_eth_dev_is_valid_port(port_id)) {
403 		fprintf(stderr, "Error: Invalid port number %i\n", port_id);
404 		return;
405 	}
406 
407 	/* Get count */
408 	cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
409 	if (cnt_xstats  < 0) {
410 		fprintf(stderr, "Error: Cannot get count of xstats\n");
411 		return;
412 	}
413 
414 	/* Get id-name lookup table */
415 	xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
416 	if (xstats_names == NULL) {
417 		fprintf(stderr, "Cannot allocate memory for xstats lookup\n");
418 		return;
419 	}
420 	if (cnt_xstats != rte_eth_xstats_get_names(
421 			port_id, xstats_names, cnt_xstats)) {
422 		fprintf(stderr, "Error: Cannot get xstats lookup\n");
423 		free(xstats_names);
424 		return;
425 	}
426 
427 	/* Get stats themselves */
428 	xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
429 	if (xstats == NULL) {
430 		fprintf(stderr, "Cannot allocate memory for xstats\n");
431 		free(xstats_names);
432 		return;
433 	}
434 	if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
435 		fprintf(stderr, "Error: Unable to get xstats\n");
436 		free(xstats_names);
437 		free(xstats);
438 		return;
439 	}
440 
441 	/* Display xstats */
442 	for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
443 		if (xstats_hide_zero && !xstats[idx_xstat].value)
444 			continue;
445 		printf("%s: %"PRIu64"\n",
446 			xstats_names[idx_xstat].name,
447 			xstats[idx_xstat].value);
448 	}
449 	free(xstats_names);
450 	free(xstats);
451 }
452 
453 void
454 nic_xstats_clear(portid_t port_id)
455 {
456 	int ret;
457 
458 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
459 		print_valid_ports();
460 		return;
461 	}
462 
463 	ret = rte_eth_xstats_reset(port_id);
464 	if (ret != 0) {
465 		fprintf(stderr,
466 			"%s: Error: failed to reset xstats (port %u): %s\n",
467 			__func__, port_id, strerror(-ret));
468 		return;
469 	}
470 
471 	ret = rte_eth_stats_get(port_id, &ports[port_id].stats);
472 	if (ret != 0) {
473 		if (ret < 0)
474 			ret = -ret;
475 		fprintf(stderr, "%s: Error: failed to get stats (port %u): %s",
476 			__func__, port_id, strerror(ret));
477 		return;
478 	}
479 }
480 
481 static const char *
482 get_queue_state_name(uint8_t queue_state)
483 {
484 	if (queue_state == RTE_ETH_QUEUE_STATE_STOPPED)
485 		return "stopped";
486 	else if (queue_state == RTE_ETH_QUEUE_STATE_STARTED)
487 		return "started";
488 	else if (queue_state == RTE_ETH_QUEUE_STATE_HAIRPIN)
489 		return "hairpin";
490 	else
491 		return "unknown";
492 }
493 
494 void
495 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
496 {
497 	struct rte_eth_burst_mode mode;
498 	struct rte_eth_rxq_info qinfo;
499 	int32_t rc;
500 	static const char *info_border = "*********************";
501 
502 	rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
503 	if (rc != 0) {
504 		fprintf(stderr,
505 			"Failed to retrieve information for port: %u, RX queue: %hu\nerror desc: %s(%d)\n",
506 			port_id, queue_id, strerror(-rc), rc);
507 		return;
508 	}
509 
510 	printf("\n%s Infos for port %-2u, RX queue %-2u %s",
511 	       info_border, port_id, queue_id, info_border);
512 
513 	printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
514 	printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
515 	printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
516 	printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
517 	printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
518 	printf("\nRX drop packets: %s",
519 		(qinfo.conf.rx_drop_en != 0) ? "on" : "off");
520 	printf("\nRX deferred start: %s",
521 		(qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
522 	printf("\nRX scattered packets: %s",
523 		(qinfo.scattered_rx != 0) ? "on" : "off");
524 	printf("\nRx queue state: %s", get_queue_state_name(qinfo.queue_state));
525 	if (qinfo.rx_buf_size != 0)
526 		printf("\nRX buffer size: %hu", qinfo.rx_buf_size);
527 	printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
528 
529 	if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0)
530 		printf("\nBurst mode: %s%s",
531 		       mode.info,
532 		       mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
533 				" (per queue)" : "");
534 
535 	printf("\n");
536 }
537 
538 void
539 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
540 {
541 	struct rte_eth_burst_mode mode;
542 	struct rte_eth_txq_info qinfo;
543 	int32_t rc;
544 	static const char *info_border = "*********************";
545 
546 	rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
547 	if (rc != 0) {
548 		fprintf(stderr,
549 			"Failed to retrieve information for port: %u, TX queue: %hu\nerror desc: %s(%d)\n",
550 			port_id, queue_id, strerror(-rc), rc);
551 		return;
552 	}
553 
554 	printf("\n%s Infos for port %-2u, TX queue %-2u %s",
555 	       info_border, port_id, queue_id, info_border);
556 
557 	printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
558 	printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
559 	printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
560 	printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
561 	printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
562 	printf("\nTX deferred start: %s",
563 		(qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
564 	printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
565 	printf("\nTx queue state: %s", get_queue_state_name(qinfo.queue_state));
566 
567 	if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0)
568 		printf("\nBurst mode: %s%s",
569 		       mode.info,
570 		       mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ?
571 				" (per queue)" : "");
572 
573 	printf("\n");
574 }
575 
576 static int bus_match_all(const struct rte_bus *bus, const void *data)
577 {
578 	RTE_SET_USED(bus);
579 	RTE_SET_USED(data);
580 	return 0;
581 }
582 
583 static void
584 device_infos_display_speeds(uint32_t speed_capa)
585 {
586 	printf("\n\tDevice speed capability:");
587 	if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG)
588 		printf(" Autonegotiate (all speeds)");
589 	if (speed_capa & RTE_ETH_LINK_SPEED_FIXED)
590 		printf(" Disable autonegotiate (fixed speed)  ");
591 	if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD)
592 		printf(" 10 Mbps half-duplex  ");
593 	if (speed_capa & RTE_ETH_LINK_SPEED_10M)
594 		printf(" 10 Mbps full-duplex  ");
595 	if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD)
596 		printf(" 100 Mbps half-duplex  ");
597 	if (speed_capa & RTE_ETH_LINK_SPEED_100M)
598 		printf(" 100 Mbps full-duplex  ");
599 	if (speed_capa & RTE_ETH_LINK_SPEED_1G)
600 		printf(" 1 Gbps  ");
601 	if (speed_capa & RTE_ETH_LINK_SPEED_2_5G)
602 		printf(" 2.5 Gbps  ");
603 	if (speed_capa & RTE_ETH_LINK_SPEED_5G)
604 		printf(" 5 Gbps  ");
605 	if (speed_capa & RTE_ETH_LINK_SPEED_10G)
606 		printf(" 10 Gbps  ");
607 	if (speed_capa & RTE_ETH_LINK_SPEED_20G)
608 		printf(" 20 Gbps  ");
609 	if (speed_capa & RTE_ETH_LINK_SPEED_25G)
610 		printf(" 25 Gbps  ");
611 	if (speed_capa & RTE_ETH_LINK_SPEED_40G)
612 		printf(" 40 Gbps  ");
613 	if (speed_capa & RTE_ETH_LINK_SPEED_50G)
614 		printf(" 50 Gbps  ");
615 	if (speed_capa & RTE_ETH_LINK_SPEED_56G)
616 		printf(" 56 Gbps  ");
617 	if (speed_capa & RTE_ETH_LINK_SPEED_100G)
618 		printf(" 100 Gbps  ");
619 	if (speed_capa & RTE_ETH_LINK_SPEED_200G)
620 		printf(" 200 Gbps  ");
621 	if (speed_capa & RTE_ETH_LINK_SPEED_400G)
622 		printf(" 400 Gbps  ");
623 }
624 
625 void
626 device_infos_display(const char *identifier)
627 {
628 	static const char *info_border = "*********************";
629 	struct rte_bus *start = NULL, *next;
630 	struct rte_dev_iterator dev_iter;
631 	char name[RTE_ETH_NAME_MAX_LEN];
632 	struct rte_ether_addr mac_addr;
633 	struct rte_device *dev;
634 	struct rte_devargs da;
635 	portid_t port_id;
636 	struct rte_eth_dev_info dev_info;
637 	char devstr[128];
638 
639 	memset(&da, 0, sizeof(da));
640 	if (!identifier)
641 		goto skip_parse;
642 
643 	if (rte_devargs_parsef(&da, "%s", identifier)) {
644 		fprintf(stderr, "cannot parse identifier\n");
645 		return;
646 	}
647 
648 skip_parse:
649 	while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
650 
651 		start = next;
652 		if (identifier && da.bus != next)
653 			continue;
654 
655 		snprintf(devstr, sizeof(devstr), "bus=%s", rte_bus_name(next));
656 		RTE_DEV_FOREACH(dev, devstr, &dev_iter) {
657 
658 			if (rte_dev_driver(dev) == NULL)
659 				continue;
660 			/* Check for matching device if identifier is present */
661 			if (identifier &&
662 			    strncmp(da.name, rte_dev_name(dev), strlen(rte_dev_name(dev))))
663 				continue;
664 			printf("\n%s Infos for device %s %s\n",
665 			       info_border, rte_dev_name(dev), info_border);
666 			printf("Bus name: %s", rte_bus_name(rte_dev_bus(dev)));
667 			printf("\nBus information: %s",
668 				rte_dev_bus_info(dev) ? rte_dev_bus_info(dev) : "");
669 			printf("\nDriver name: %s", rte_driver_name(rte_dev_driver(dev)));
670 			printf("\nDevargs: %s",
671 			       rte_dev_devargs(dev) ? rte_dev_devargs(dev)->args : "");
672 			printf("\nConnect to socket: %d", rte_dev_numa_node(dev));
673 			printf("\n");
674 
675 			/* List ports with matching device name */
676 			RTE_ETH_FOREACH_DEV_OF(port_id, dev) {
677 				printf("\n\tPort id: %-2d", port_id);
678 				if (eth_macaddr_get_print_err(port_id,
679 							      &mac_addr) == 0)
680 					print_ethaddr("\n\tMAC address: ",
681 						      &mac_addr);
682 				rte_eth_dev_get_name_by_port(port_id, name);
683 				printf("\n\tDevice name: %s", name);
684 				if (rte_eth_dev_info_get(port_id, &dev_info) == 0)
685 					device_infos_display_speeds(dev_info.speed_capa);
686 				printf("\n");
687 			}
688 		}
689 	};
690 	rte_devargs_reset(&da);
691 }
692 
693 static void
694 print_dev_capabilities(uint64_t capabilities)
695 {
696 	uint64_t single_capa;
697 	int begin;
698 	int end;
699 	int bit;
700 
701 	if (capabilities == 0)
702 		return;
703 
704 	begin = rte_ctz64(capabilities);
705 	end = sizeof(capabilities) * CHAR_BIT - rte_clz64(capabilities);
706 
707 	single_capa = 1ULL << begin;
708 	for (bit = begin; bit < end; bit++) {
709 		if (capabilities & single_capa)
710 			printf(" %s",
711 			       rte_eth_dev_capability_name(single_capa));
712 		single_capa <<= 1;
713 	}
714 }
715 
716 uint64_t
717 str_to_rsstypes(const char *str)
718 {
719 	uint16_t i;
720 
721 	for (i = 0; rss_type_table[i].str != NULL; i++) {
722 		if (strcmp(rss_type_table[i].str, str) == 0)
723 			return rss_type_table[i].rss_type;
724 	}
725 
726 	return 0;
727 }
728 
729 const char *
730 rsstypes_to_str(uint64_t rss_type)
731 {
732 	uint16_t i;
733 
734 	for (i = 0; rss_type_table[i].str != NULL; i++) {
735 		if (rss_type_table[i].rss_type == rss_type)
736 			return rss_type_table[i].str;
737 	}
738 
739 	return NULL;
740 }
741 
742 static void
743 rss_offload_types_display(uint64_t offload_types, uint16_t char_num_per_line)
744 {
745 	uint16_t user_defined_str_len;
746 	uint16_t total_len = 0;
747 	uint16_t str_len = 0;
748 	uint64_t rss_offload;
749 	uint16_t i;
750 
751 	for (i = 0; i < sizeof(offload_types) * CHAR_BIT; i++) {
752 		rss_offload = RTE_BIT64(i);
753 		if ((offload_types & rss_offload) != 0) {
754 			const char *p = rsstypes_to_str(rss_offload);
755 
756 			user_defined_str_len =
757 				strlen("user-defined-") + (i / 10 + 1);
758 			str_len = p ? strlen(p) : user_defined_str_len;
759 			str_len += 2; /* add two spaces */
760 			if (total_len + str_len >= char_num_per_line) {
761 				total_len = 0;
762 				printf("\n");
763 			}
764 
765 			if (p)
766 				printf("  %s", p);
767 			else
768 				printf("  user-defined-%u", i);
769 			total_len += str_len;
770 		}
771 	}
772 	printf("\n");
773 }
774 
775 void
776 port_infos_display(portid_t port_id)
777 {
778 	struct rte_port *port;
779 	struct rte_ether_addr mac_addr;
780 	struct rte_eth_link link;
781 	struct rte_eth_dev_info dev_info;
782 	int vlan_offload;
783 	struct rte_mempool * mp;
784 	static const char *info_border = "*********************";
785 	uint16_t mtu;
786 	char name[RTE_ETH_NAME_MAX_LEN];
787 	int ret;
788 	char fw_version[ETHDEV_FWVERS_LEN];
789 
790 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
791 		print_valid_ports();
792 		return;
793 	}
794 	port = &ports[port_id];
795 	ret = eth_link_get_nowait_print_err(port_id, &link);
796 	if (ret < 0)
797 		return;
798 
799 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
800 	if (ret != 0)
801 		return;
802 
803 	printf("\n%s Infos for port %-2d %s\n",
804 	       info_border, port_id, info_border);
805 	if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0)
806 		print_ethaddr("MAC address: ", &mac_addr);
807 	rte_eth_dev_get_name_by_port(port_id, name);
808 	printf("\nDevice name: %s", name);
809 	printf("\nDriver name: %s", dev_info.driver_name);
810 
811 	if (rte_eth_dev_fw_version_get(port_id, fw_version,
812 						ETHDEV_FWVERS_LEN) == 0)
813 		printf("\nFirmware-version: %s", fw_version);
814 	else
815 		printf("\nFirmware-version: %s", "not available");
816 
817 	if (rte_dev_devargs(dev_info.device) && rte_dev_devargs(dev_info.device)->args)
818 		printf("\nDevargs: %s", rte_dev_devargs(dev_info.device)->args);
819 	printf("\nConnect to socket: %u", port->socket_id);
820 
821 	if (port_numa[port_id] != NUMA_NO_CONFIG) {
822 		mp = mbuf_pool_find(port_numa[port_id], 0);
823 		if (mp)
824 			printf("\nmemory allocation on the socket: %d",
825 							port_numa[port_id]);
826 	} else
827 		printf("\nmemory allocation on the socket: %u",port->socket_id);
828 
829 	printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
830 	printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed));
831 	printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
832 	       ("full-duplex") : ("half-duplex"));
833 	printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ?
834 	       ("On") : ("Off"));
835 
836 	if (!rte_eth_dev_get_mtu(port_id, &mtu))
837 		printf("MTU: %u\n", mtu);
838 
839 	printf("Promiscuous mode: %s\n",
840 	       rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
841 	printf("Allmulticast mode: %s\n",
842 	       rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
843 	printf("Maximum number of MAC addresses: %u\n",
844 	       (unsigned int)(port->dev_info.max_mac_addrs));
845 	printf("Maximum number of MAC addresses of hash filtering: %u\n",
846 	       (unsigned int)(port->dev_info.max_hash_mac_addrs));
847 
848 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
849 	if (vlan_offload >= 0){
850 		printf("VLAN offload: \n");
851 		if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD)
852 			printf("  strip on, ");
853 		else
854 			printf("  strip off, ");
855 
856 		if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD)
857 			printf("filter on, ");
858 		else
859 			printf("filter off, ");
860 
861 		if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD)
862 			printf("extend on, ");
863 		else
864 			printf("extend off, ");
865 
866 		if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD)
867 			printf("qinq strip on\n");
868 		else
869 			printf("qinq strip off\n");
870 	}
871 
872 	if (dev_info.hash_key_size > 0)
873 		printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
874 	if (dev_info.reta_size > 0)
875 		printf("Redirection table size: %u\n", dev_info.reta_size);
876 	if (!dev_info.flow_type_rss_offloads)
877 		printf("No RSS offload flow type is supported.\n");
878 	else {
879 		printf("Supported RSS offload flow types:\n");
880 		rss_offload_types_display(dev_info.flow_type_rss_offloads,
881 				TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE);
882 	}
883 
884 	printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize);
885 	if (dev_info.max_rx_bufsize != UINT32_MAX)
886 		printf("Maximum size of RX buffer: %u\n", dev_info.max_rx_bufsize);
887 	printf("Maximum configurable length of RX packet: %u\n",
888 		dev_info.max_rx_pktlen);
889 	printf("Maximum configurable size of LRO aggregated packet: %u\n",
890 		dev_info.max_lro_pkt_size);
891 	if (dev_info.max_vfs)
892 		printf("Maximum number of VFs: %u\n", dev_info.max_vfs);
893 	if (dev_info.max_vmdq_pools)
894 		printf("Maximum number of VMDq pools: %u\n",
895 			dev_info.max_vmdq_pools);
896 
897 	printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues);
898 	printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
899 	printf("Max possible number of RXDs per queue: %hu\n",
900 		dev_info.rx_desc_lim.nb_max);
901 	printf("Min possible number of RXDs per queue: %hu\n",
902 		dev_info.rx_desc_lim.nb_min);
903 	printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
904 
905 	printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues);
906 	printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
907 	printf("Max possible number of TXDs per queue: %hu\n",
908 		dev_info.tx_desc_lim.nb_max);
909 	printf("Min possible number of TXDs per queue: %hu\n",
910 		dev_info.tx_desc_lim.nb_min);
911 	printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
912 	printf("Max segment number per packet: %hu\n",
913 		dev_info.tx_desc_lim.nb_seg_max);
914 	printf("Max segment number per MTU/TSO: %hu\n",
915 		dev_info.tx_desc_lim.nb_mtu_seg_max);
916 
917 	printf("Device capabilities: 0x%"PRIx64"(", dev_info.dev_capa);
918 	print_dev_capabilities(dev_info.dev_capa);
919 	printf(" )\n");
920 	/* Show switch info only if valid switch domain and port id is set */
921 	if (dev_info.switch_info.domain_id !=
922 		RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
923 		if (dev_info.switch_info.name)
924 			printf("Switch name: %s\n", dev_info.switch_info.name);
925 
926 		printf("Switch domain Id: %u\n",
927 			dev_info.switch_info.domain_id);
928 		printf("Switch Port Id: %u\n",
929 			dev_info.switch_info.port_id);
930 		if ((dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) != 0)
931 			printf("Switch Rx domain: %u\n",
932 			       dev_info.switch_info.rx_domain);
933 	}
934 	printf("Device error handling mode: ");
935 	switch (dev_info.err_handle_mode) {
936 	case RTE_ETH_ERROR_HANDLE_MODE_NONE:
937 		printf("none\n");
938 		break;
939 	case RTE_ETH_ERROR_HANDLE_MODE_PASSIVE:
940 		printf("passive\n");
941 		break;
942 	case RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE:
943 		printf("proactive\n");
944 		break;
945 	default:
946 		printf("unknown\n");
947 		break;
948 	}
949 	printf("Device private info:\n");
950 	ret = rte_eth_dev_priv_dump(port_id, stdout);
951 	if (ret == -ENOTSUP)
952 		printf("  none\n");
953 	else if (ret < 0)
954 		fprintf(stderr, "  Failed to dump private info with error (%d): %s\n",
955 			ret, strerror(-ret));
956 }
957 
958 void
959 port_summary_header_display(void)
960 {
961 	uint16_t port_number;
962 
963 	port_number = rte_eth_dev_count_avail();
964 	printf("Number of available ports: %i\n", port_number);
965 	printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name",
966 			"Driver", "Status", "Link");
967 }
968 
969 void
970 port_summary_display(portid_t port_id)
971 {
972 	struct rte_ether_addr mac_addr;
973 	struct rte_eth_link link;
974 	struct rte_eth_dev_info dev_info;
975 	char name[RTE_ETH_NAME_MAX_LEN];
976 	int ret;
977 
978 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
979 		print_valid_ports();
980 		return;
981 	}
982 
983 	ret = eth_link_get_nowait_print_err(port_id, &link);
984 	if (ret < 0)
985 		return;
986 
987 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
988 	if (ret != 0)
989 		return;
990 
991 	rte_eth_dev_get_name_by_port(port_id, name);
992 	ret = eth_macaddr_get_print_err(port_id, &mac_addr);
993 	if (ret != 0)
994 		return;
995 
996 	printf("%-4d " RTE_ETHER_ADDR_PRT_FMT " %-12s %-14s %-8s %s\n",
997 		port_id, RTE_ETHER_ADDR_BYTES(&mac_addr), name,
998 		dev_info.driver_name, (link.link_status) ? ("up") : ("down"),
999 		rte_eth_link_speed_to_str(link.link_speed));
1000 }
1001 
1002 void
1003 port_eeprom_display(portid_t port_id)
1004 {
1005 	struct rte_dev_eeprom_info einfo;
1006 	int ret;
1007 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
1008 		print_valid_ports();
1009 		return;
1010 	}
1011 
1012 	int len_eeprom = rte_eth_dev_get_eeprom_length(port_id);
1013 	if (len_eeprom < 0) {
1014 		switch (len_eeprom) {
1015 		case -ENODEV:
1016 			fprintf(stderr, "port index %d invalid\n", port_id);
1017 			break;
1018 		case -ENOTSUP:
1019 			fprintf(stderr, "operation not supported by device\n");
1020 			break;
1021 		case -EIO:
1022 			fprintf(stderr, "device is removed\n");
1023 			break;
1024 		default:
1025 			fprintf(stderr, "Unable to get EEPROM: %d\n",
1026 				len_eeprom);
1027 			break;
1028 		}
1029 		return;
1030 	}
1031 
1032 	einfo.offset = 0;
1033 	einfo.length = len_eeprom;
1034 	einfo.data = calloc(1, len_eeprom);
1035 	if (!einfo.data) {
1036 		fprintf(stderr,
1037 			"Allocation of port %u eeprom data failed\n",
1038 			port_id);
1039 		return;
1040 	}
1041 
1042 	ret = rte_eth_dev_get_eeprom(port_id, &einfo);
1043 	if (ret != 0) {
1044 		switch (ret) {
1045 		case -ENODEV:
1046 			fprintf(stderr, "port index %d invalid\n", port_id);
1047 			break;
1048 		case -ENOTSUP:
1049 			fprintf(stderr, "operation not supported by device\n");
1050 			break;
1051 		case -EIO:
1052 			fprintf(stderr, "device is removed\n");
1053 			break;
1054 		default:
1055 			fprintf(stderr, "Unable to get EEPROM: %d\n", ret);
1056 			break;
1057 		}
1058 		free(einfo.data);
1059 		return;
1060 	}
1061 	rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
1062 	printf("Finish -- Port: %d EEPROM length: %d bytes\n", port_id, len_eeprom);
1063 	free(einfo.data);
1064 }
1065 
1066 void
1067 port_module_eeprom_display(portid_t port_id)
1068 {
1069 	struct rte_eth_dev_module_info minfo;
1070 	struct rte_dev_eeprom_info einfo;
1071 	int ret;
1072 
1073 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
1074 		print_valid_ports();
1075 		return;
1076 	}
1077 
1078 
1079 	ret = rte_eth_dev_get_module_info(port_id, &minfo);
1080 	if (ret != 0) {
1081 		switch (ret) {
1082 		case -ENODEV:
1083 			fprintf(stderr, "port index %d invalid\n", port_id);
1084 			break;
1085 		case -ENOTSUP:
1086 			fprintf(stderr, "operation not supported by device\n");
1087 			break;
1088 		case -EIO:
1089 			fprintf(stderr, "device is removed\n");
1090 			break;
1091 		default:
1092 			fprintf(stderr, "Unable to get module EEPROM: %d\n",
1093 				ret);
1094 			break;
1095 		}
1096 		return;
1097 	}
1098 
1099 	einfo.offset = 0;
1100 	einfo.length = minfo.eeprom_len;
1101 	einfo.data = calloc(1, minfo.eeprom_len);
1102 	if (!einfo.data) {
1103 		fprintf(stderr,
1104 			"Allocation of port %u eeprom data failed\n",
1105 			port_id);
1106 		return;
1107 	}
1108 
1109 	ret = rte_eth_dev_get_module_eeprom(port_id, &einfo);
1110 	if (ret != 0) {
1111 		switch (ret) {
1112 		case -ENODEV:
1113 			fprintf(stderr, "port index %d invalid\n", port_id);
1114 			break;
1115 		case -ENOTSUP:
1116 			fprintf(stderr, "operation not supported by device\n");
1117 			break;
1118 		case -EIO:
1119 			fprintf(stderr, "device is removed\n");
1120 			break;
1121 		default:
1122 			fprintf(stderr, "Unable to get module EEPROM: %d\n",
1123 				ret);
1124 			break;
1125 		}
1126 		free(einfo.data);
1127 		return;
1128 	}
1129 
1130 	rte_hexdump(stdout, "hexdump", einfo.data, einfo.length);
1131 	printf("Finish -- Port: %d MODULE EEPROM length: %d bytes\n", port_id, einfo.length);
1132 	free(einfo.data);
1133 }
1134 
1135 int
1136 port_id_is_invalid(portid_t port_id, enum print_warning warning)
1137 {
1138 	uint16_t pid;
1139 
1140 	if (port_id == (portid_t)RTE_PORT_ALL)
1141 		return 0;
1142 
1143 	RTE_ETH_FOREACH_DEV(pid)
1144 		if (port_id == pid)
1145 			return 0;
1146 
1147 	if (warning == ENABLED_WARN)
1148 		fprintf(stderr, "Invalid port %d\n", port_id);
1149 
1150 	return 1;
1151 }
1152 
1153 void print_valid_ports(void)
1154 {
1155 	portid_t pid;
1156 
1157 	printf("The valid ports array is [");
1158 	RTE_ETH_FOREACH_DEV(pid) {
1159 		printf(" %d", pid);
1160 	}
1161 	printf(" ]\n");
1162 }
1163 
1164 static int
1165 vlan_id_is_invalid(uint16_t vlan_id)
1166 {
1167 	if (vlan_id < 4096)
1168 		return 0;
1169 	fprintf(stderr, "Invalid vlan_id %d (must be < 4096)\n", vlan_id);
1170 	return 1;
1171 }
1172 
1173 static uint32_t
1174 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
1175 {
1176 	uint32_t overhead_len;
1177 
1178 	if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
1179 		overhead_len = max_rx_pktlen - max_mtu;
1180 	else
1181 		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1182 
1183 	return overhead_len;
1184 }
1185 
1186 static int
1187 eth_dev_validate_mtu(uint16_t port_id, uint16_t mtu)
1188 {
1189 	struct rte_eth_dev_info dev_info;
1190 	uint32_t overhead_len;
1191 	uint32_t frame_size;
1192 	int ret;
1193 
1194 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1195 	if (ret != 0)
1196 		return ret;
1197 
1198 	if (mtu < dev_info.min_mtu) {
1199 		fprintf(stderr,
1200 			"MTU (%u) < device min MTU (%u) for port_id %u\n",
1201 			mtu, dev_info.min_mtu, port_id);
1202 		return -EINVAL;
1203 	}
1204 	if (mtu > dev_info.max_mtu) {
1205 		fprintf(stderr,
1206 			"MTU (%u) > device max MTU (%u) for port_id %u\n",
1207 			mtu, dev_info.max_mtu, port_id);
1208 		return -EINVAL;
1209 	}
1210 
1211 	overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
1212 			dev_info.max_mtu);
1213 	frame_size = mtu + overhead_len;
1214 	if (frame_size > dev_info.max_rx_pktlen) {
1215 		fprintf(stderr,
1216 			"Frame size (%u) > device max frame size (%u) for port_id %u\n",
1217 			frame_size, dev_info.max_rx_pktlen, port_id);
1218 		return -EINVAL;
1219 	}
1220 
1221 	return 0;
1222 }
1223 
1224 void
1225 port_mtu_set(portid_t port_id, uint16_t mtu)
1226 {
1227 	struct rte_port *port = &ports[port_id];
1228 	int diag;
1229 
1230 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1231 		return;
1232 
1233 	diag = eth_dev_validate_mtu(port_id, mtu);
1234 	if (diag != 0)
1235 		return;
1236 
1237 	if (port->need_reconfig == 0) {
1238 		diag = rte_eth_dev_set_mtu(port_id, mtu);
1239 		if (diag != 0) {
1240 			fprintf(stderr, "Set MTU failed. diag=%d\n", diag);
1241 			return;
1242 		}
1243 	}
1244 
1245 	port->dev_conf.rxmode.mtu = mtu;
1246 }
1247 
1248 /* Generic flow management functions. */
1249 
1250 static struct port_flow_tunnel *
1251 port_flow_locate_tunnel_id(struct rte_port *port, uint32_t port_tunnel_id)
1252 {
1253 	struct port_flow_tunnel *flow_tunnel;
1254 
1255 	LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) {
1256 		if (flow_tunnel->id == port_tunnel_id)
1257 			goto out;
1258 	}
1259 	flow_tunnel = NULL;
1260 
1261 out:
1262 	return flow_tunnel;
1263 }
1264 
1265 const char *
1266 port_flow_tunnel_type(struct rte_flow_tunnel *tunnel)
1267 {
1268 	const char *type;
1269 	switch (tunnel->type) {
1270 	default:
1271 		type = "unknown";
1272 		break;
1273 	case RTE_FLOW_ITEM_TYPE_VXLAN:
1274 		type = "vxlan";
1275 		break;
1276 	case RTE_FLOW_ITEM_TYPE_GRE:
1277 		type = "gre";
1278 		break;
1279 	case RTE_FLOW_ITEM_TYPE_NVGRE:
1280 		type = "nvgre";
1281 		break;
1282 	case RTE_FLOW_ITEM_TYPE_GENEVE:
1283 		type = "geneve";
1284 		break;
1285 	}
1286 
1287 	return type;
1288 }
1289 
1290 struct port_flow_tunnel *
1291 port_flow_locate_tunnel(uint16_t port_id, struct rte_flow_tunnel *tun)
1292 {
1293 	struct rte_port *port = &ports[port_id];
1294 	struct port_flow_tunnel *flow_tunnel;
1295 
1296 	LIST_FOREACH(flow_tunnel, &port->flow_tunnel_list, chain) {
1297 		if (!memcmp(&flow_tunnel->tunnel, tun, sizeof(*tun)))
1298 			goto out;
1299 	}
1300 	flow_tunnel = NULL;
1301 
1302 out:
1303 	return flow_tunnel;
1304 }
1305 
1306 void port_flow_tunnel_list(portid_t port_id)
1307 {
1308 	struct rte_port *port = &ports[port_id];
1309 	struct port_flow_tunnel *flt;
1310 
1311 	LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1312 		printf("port %u tunnel #%u type=%s",
1313 			port_id, flt->id, port_flow_tunnel_type(&flt->tunnel));
1314 		if (flt->tunnel.tun_id)
1315 			printf(" id=%" PRIu64, flt->tunnel.tun_id);
1316 		printf("\n");
1317 	}
1318 }
1319 
1320 void port_flow_tunnel_destroy(portid_t port_id, uint32_t tunnel_id)
1321 {
1322 	struct rte_port *port = &ports[port_id];
1323 	struct port_flow_tunnel *flt;
1324 
1325 	LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1326 		if (flt->id == tunnel_id)
1327 			break;
1328 	}
1329 	if (flt) {
1330 		LIST_REMOVE(flt, chain);
1331 		free(flt);
1332 		printf("port %u: flow tunnel #%u destroyed\n",
1333 			port_id, tunnel_id);
1334 	}
1335 }
1336 
1337 void port_flow_tunnel_create(portid_t port_id, const struct tunnel_ops *ops)
1338 {
1339 	struct rte_port *port = &ports[port_id];
1340 	enum rte_flow_item_type	type;
1341 	struct port_flow_tunnel *flt;
1342 
1343 	if (!strcmp(ops->type, "vxlan"))
1344 		type = RTE_FLOW_ITEM_TYPE_VXLAN;
1345 	else if (!strcmp(ops->type, "gre"))
1346 		type = RTE_FLOW_ITEM_TYPE_GRE;
1347 	else if (!strcmp(ops->type, "nvgre"))
1348 		type = RTE_FLOW_ITEM_TYPE_NVGRE;
1349 	else if (!strcmp(ops->type, "geneve"))
1350 		type = RTE_FLOW_ITEM_TYPE_GENEVE;
1351 	else {
1352 		fprintf(stderr, "cannot offload \"%s\" tunnel type\n",
1353 			ops->type);
1354 		return;
1355 	}
1356 	LIST_FOREACH(flt, &port->flow_tunnel_list, chain) {
1357 		if (flt->tunnel.type == type)
1358 			break;
1359 	}
1360 	if (!flt) {
1361 		flt = calloc(1, sizeof(*flt));
1362 		if (!flt) {
1363 			fprintf(stderr, "failed to allocate port flt object\n");
1364 			return;
1365 		}
1366 		flt->tunnel.type = type;
1367 		flt->id = LIST_EMPTY(&port->flow_tunnel_list) ? 1 :
1368 				  LIST_FIRST(&port->flow_tunnel_list)->id + 1;
1369 		LIST_INSERT_HEAD(&port->flow_tunnel_list, flt, chain);
1370 	}
1371 	printf("port %d: flow tunnel #%u type %s\n",
1372 		port_id, flt->id, ops->type);
1373 }
1374 
1375 /** Generate a port_flow entry from attributes/pattern/actions. */
1376 static struct port_flow *
1377 port_flow_new(const struct rte_flow_attr *attr,
1378 	      const struct rte_flow_item *pattern,
1379 	      const struct rte_flow_action *actions,
1380 	      struct rte_flow_error *error)
1381 {
1382 	const struct rte_flow_conv_rule rule = {
1383 		.attr_ro = attr,
1384 		.pattern_ro = pattern,
1385 		.actions_ro = actions,
1386 	};
1387 	struct port_flow *pf;
1388 	int ret;
1389 
1390 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error);
1391 	if (ret < 0)
1392 		return NULL;
1393 	pf = calloc(1, offsetof(struct port_flow, rule) + ret);
1394 	if (!pf) {
1395 		rte_flow_error_set
1396 			(error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1397 			 "calloc() failed");
1398 		return NULL;
1399 	}
1400 	if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule,
1401 			  error) >= 0)
1402 		return pf;
1403 	free(pf);
1404 	return NULL;
1405 }
1406 
1407 static struct port_flow *
1408 port_flow_locate(struct port_flow *flows_list, uint32_t flow_id)
1409 {
1410 	struct port_flow *pf = flows_list;
1411 
1412 	while (pf) {
1413 		if (pf->id == flow_id)
1414 			break;
1415 		pf = pf->next;
1416 	}
1417 	return pf;
1418 }
1419 
1420 /** Print a message out of a flow error. */
1421 static int
1422 port_flow_complain(struct rte_flow_error *error)
1423 {
1424 	static const char *const errstrlist[] = {
1425 		[RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1426 		[RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1427 		[RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1428 		[RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1429 		[RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1430 		[RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1431 		[RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1432 		[RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field",
1433 		[RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1434 		[RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1435 		[RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification",
1436 		[RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range",
1437 		[RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask",
1438 		[RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1439 		[RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1440 		[RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration",
1441 		[RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1442 	};
1443 	const char *errstr;
1444 	char buf[32];
1445 	int err = rte_errno;
1446 
1447 	if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1448 	    !errstrlist[error->type])
1449 		errstr = "unknown type";
1450 	else
1451 		errstr = errstrlist[error->type];
1452 	fprintf(stderr, "%s(): Caught PMD error type %d (%s): %s%s: %s\n",
1453 		__func__, error->type, errstr,
1454 		error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1455 					 error->cause), buf) : "",
1456 		error->message ? error->message : "(no stated reason)",
1457 		rte_strerror(err));
1458 
1459 	switch (error->type) {
1460 	case RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER:
1461 		fprintf(stderr, "The status suggests the use of \"transfer\" "
1462 				"as the possible cause of the failure. Make "
1463 				"sure that the flow in question and its "
1464 				"indirect components (if any) are managed "
1465 				"via \"transfer\" proxy port. Use command "
1466 				"\"show port (port_id) flow transfer proxy\" "
1467 				"to figure out the proxy port ID\n");
1468 		break;
1469 	default:
1470 		break;
1471 	}
1472 
1473 	return -err;
1474 }
1475 
1476 static void
1477 rss_types_display(uint64_t rss_types, uint16_t char_num_per_line)
1478 {
1479 	uint16_t total_len = 0;
1480 	uint16_t str_len;
1481 	uint16_t i;
1482 
1483 	if (rss_types == 0)
1484 		return;
1485 
1486 	for (i = 0; rss_type_table[i].str; i++) {
1487 		if (rss_type_table[i].rss_type == 0)
1488 			continue;
1489 
1490 		if ((rss_types & rss_type_table[i].rss_type) ==
1491 					rss_type_table[i].rss_type) {
1492 			/* Contain two spaces */
1493 			str_len = strlen(rss_type_table[i].str) + 2;
1494 			if (total_len + str_len > char_num_per_line) {
1495 				printf("\n");
1496 				total_len = 0;
1497 			}
1498 			printf("  %s", rss_type_table[i].str);
1499 			total_len += str_len;
1500 		}
1501 	}
1502 	printf("\n");
1503 }
1504 
1505 static void
1506 rss_config_display(struct rte_flow_action_rss *rss_conf)
1507 {
1508 	uint8_t i;
1509 
1510 	if (rss_conf == NULL) {
1511 		fprintf(stderr, "Invalid rule\n");
1512 		return;
1513 	}
1514 
1515 	printf("RSS:\n"
1516 	       " queues:");
1517 	if (rss_conf->queue_num == 0)
1518 		printf(" none");
1519 	for (i = 0; i < rss_conf->queue_num; i++)
1520 		printf(" %d", rss_conf->queue[i]);
1521 	printf("\n");
1522 
1523 	printf(" function: %s\n", rte_eth_dev_rss_algo_name(rss_conf->func));
1524 
1525 	printf(" RSS key:\n");
1526 	if (rss_conf->key_len == 0) {
1527 		printf("  none");
1528 	} else {
1529 		printf("  key_len: %u\n", rss_conf->key_len);
1530 		printf("  key: ");
1531 		if (rss_conf->key == NULL) {
1532 			printf("none");
1533 		} else {
1534 			for (i = 0; i < rss_conf->key_len; i++)
1535 				printf("%02X", rss_conf->key[i]);
1536 		}
1537 	}
1538 	printf("\n");
1539 
1540 	printf(" types:\n");
1541 	if (rss_conf->types == 0) {
1542 		printf("  none\n");
1543 		return;
1544 	}
1545 	rss_types_display(rss_conf->types, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE);
1546 }
1547 
1548 static struct port_indirect_action *
1549 action_get_by_id(portid_t port_id, uint32_t id)
1550 {
1551 	struct rte_port *port;
1552 	struct port_indirect_action **ppia;
1553 	struct port_indirect_action *pia = NULL;
1554 
1555 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1556 	    port_id == (portid_t)RTE_PORT_ALL)
1557 		return NULL;
1558 	port = &ports[port_id];
1559 	ppia = &port->actions_list;
1560 	while (*ppia) {
1561 		if ((*ppia)->id == id) {
1562 			pia = *ppia;
1563 			break;
1564 		}
1565 		ppia = &(*ppia)->next;
1566 	}
1567 	if (!pia)
1568 		fprintf(stderr,
1569 			"Failed to find indirect action #%u on port %u\n",
1570 			id, port_id);
1571 	return pia;
1572 }
1573 
1574 static int
1575 action_alloc(portid_t port_id, uint32_t id,
1576 	     struct port_indirect_action **action)
1577 {
1578 	struct rte_port *port;
1579 	struct port_indirect_action **ppia;
1580 	struct port_indirect_action *pia = NULL;
1581 
1582 	*action = NULL;
1583 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1584 	    port_id == (portid_t)RTE_PORT_ALL)
1585 		return -EINVAL;
1586 	port = &ports[port_id];
1587 	if (id == UINT32_MAX) {
1588 		/* taking first available ID */
1589 		if (port->actions_list) {
1590 			if (port->actions_list->id == UINT32_MAX - 1) {
1591 				fprintf(stderr,
1592 					"Highest indirect action ID is already assigned, delete it first\n");
1593 				return -ENOMEM;
1594 			}
1595 			id = port->actions_list->id + 1;
1596 		} else {
1597 			id = 0;
1598 		}
1599 	}
1600 	pia = calloc(1, sizeof(*pia));
1601 	if (!pia) {
1602 		fprintf(stderr,
1603 			"Allocation of port %u indirect action failed\n",
1604 			port_id);
1605 		return -ENOMEM;
1606 	}
1607 	ppia = &port->actions_list;
1608 	while (*ppia && (*ppia)->id > id)
1609 		ppia = &(*ppia)->next;
1610 	if (*ppia && (*ppia)->id == id) {
1611 		fprintf(stderr,
1612 			"Indirect action #%u is already assigned, delete it first\n",
1613 			id);
1614 		free(pia);
1615 		return -EINVAL;
1616 	}
1617 	pia->next = *ppia;
1618 	pia->id = id;
1619 	*ppia = pia;
1620 	*action = pia;
1621 	return 0;
1622 }
1623 
1624 static int
1625 template_alloc(uint32_t id, struct port_template **template,
1626 	       struct port_template **list)
1627 {
1628 	struct port_template *lst = *list;
1629 	struct port_template **ppt;
1630 	struct port_template *pt = NULL;
1631 
1632 	*template = NULL;
1633 	if (id == UINT32_MAX) {
1634 		/* taking first available ID */
1635 		if (lst) {
1636 			if (lst->id == UINT32_MAX - 1) {
1637 				printf("Highest template ID is already"
1638 				" assigned, delete it first\n");
1639 				return -ENOMEM;
1640 			}
1641 			id = lst->id + 1;
1642 		} else {
1643 			id = 0;
1644 		}
1645 	}
1646 	pt = calloc(1, sizeof(*pt));
1647 	if (!pt) {
1648 		printf("Allocation of port template failed\n");
1649 		return -ENOMEM;
1650 	}
1651 	ppt = list;
1652 	while (*ppt && (*ppt)->id > id)
1653 		ppt = &(*ppt)->next;
1654 	if (*ppt && (*ppt)->id == id) {
1655 		printf("Template #%u is already assigned,"
1656 			" delete it first\n", id);
1657 		free(pt);
1658 		return -EINVAL;
1659 	}
1660 	pt->next = *ppt;
1661 	pt->id = id;
1662 	*ppt = pt;
1663 	*template = pt;
1664 	return 0;
1665 }
1666 
1667 static int
1668 table_alloc(uint32_t id, struct port_table **table,
1669 	    struct port_table **list)
1670 {
1671 	struct port_table *lst = *list;
1672 	struct port_table **ppt;
1673 	struct port_table *pt = NULL;
1674 
1675 	*table = NULL;
1676 	if (id == UINT32_MAX) {
1677 		/* taking first available ID */
1678 		if (lst) {
1679 			if (lst->id == UINT32_MAX - 1) {
1680 				printf("Highest table ID is already"
1681 				" assigned, delete it first\n");
1682 				return -ENOMEM;
1683 			}
1684 			id = lst->id + 1;
1685 		} else {
1686 			id = 0;
1687 		}
1688 	}
1689 	pt = calloc(1, sizeof(*pt));
1690 	if (!pt) {
1691 		printf("Allocation of table failed\n");
1692 		return -ENOMEM;
1693 	}
1694 	ppt = list;
1695 	while (*ppt && (*ppt)->id > id)
1696 		ppt = &(*ppt)->next;
1697 	if (*ppt && (*ppt)->id == id) {
1698 		printf("Table #%u is already assigned,"
1699 			" delete it first\n", id);
1700 		free(pt);
1701 		return -EINVAL;
1702 	}
1703 	pt->next = *ppt;
1704 	pt->id = id;
1705 	*ppt = pt;
1706 	*table = pt;
1707 	return 0;
1708 }
1709 
1710 static struct port_table *
1711 port_table_locate(struct port_table *tables_list, uint32_t table_id)
1712 {
1713 	struct port_table *pt = tables_list;
1714 
1715 	while (pt) {
1716 		if (pt->id == table_id)
1717 			break;
1718 		pt = pt->next;
1719 	}
1720 	return pt;
1721 }
1722 
1723 /** Get info about flow management resources. */
1724 int
1725 port_flow_get_info(portid_t port_id)
1726 {
1727 	struct rte_flow_port_info port_info;
1728 	struct rte_flow_queue_info queue_info;
1729 	struct rte_flow_error error;
1730 
1731 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1732 	    port_id == (portid_t)RTE_PORT_ALL)
1733 		return -EINVAL;
1734 	/* Poisoning to make sure PMDs update it in case of error. */
1735 	memset(&error, 0x99, sizeof(error));
1736 	memset(&port_info, 0, sizeof(port_info));
1737 	memset(&queue_info, 0, sizeof(queue_info));
1738 	if (rte_flow_info_get(port_id, &port_info, &queue_info, &error))
1739 		return port_flow_complain(&error);
1740 	printf("Flow engine resources on port %u:\n"
1741 	       "Number of queues: %d\n"
1742 		   "Size of queues: %d\n"
1743 	       "Number of counters: %d\n"
1744 	       "Number of aging objects: %d\n"
1745 	       "Number of meter actions: %d\n",
1746 	       port_id, port_info.max_nb_queues,
1747 		   queue_info.max_size,
1748 	       port_info.max_nb_counters,
1749 	       port_info.max_nb_aging_objects,
1750 	       port_info.max_nb_meters);
1751 	return 0;
1752 }
1753 
1754 /** Configure flow management resources. */
1755 int
1756 port_flow_configure(portid_t port_id,
1757 	const struct rte_flow_port_attr *port_attr,
1758 	uint16_t nb_queue,
1759 	const struct rte_flow_queue_attr *queue_attr)
1760 {
1761 	struct rte_port *port;
1762 	struct rte_flow_error error;
1763 	const struct rte_flow_queue_attr *attr_list[nb_queue];
1764 	int std_queue;
1765 
1766 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1767 	    port_id == (portid_t)RTE_PORT_ALL)
1768 		return -EINVAL;
1769 	port = &ports[port_id];
1770 	port->queue_nb = nb_queue;
1771 	port->queue_sz = queue_attr->size;
1772 	for (std_queue = 0; std_queue < nb_queue; std_queue++)
1773 		attr_list[std_queue] = queue_attr;
1774 	/* Poisoning to make sure PMDs update it in case of error. */
1775 	memset(&error, 0x66, sizeof(error));
1776 	if (rte_flow_configure(port_id, port_attr, nb_queue, attr_list, &error))
1777 		return port_flow_complain(&error);
1778 	printf("Configure flows on port %u: "
1779 	       "number of queues %d with %d elements\n",
1780 	       port_id, nb_queue, queue_attr->size);
1781 	return 0;
1782 }
1783 
1784 static int
1785 action_handle_create(portid_t port_id,
1786 		     struct port_indirect_action *pia,
1787 		     const struct rte_flow_indir_action_conf *conf,
1788 		     const struct rte_flow_action *action,
1789 		     struct rte_flow_error *error)
1790 {
1791 	if (action->type == RTE_FLOW_ACTION_TYPE_AGE) {
1792 		struct rte_flow_action_age *age =
1793 			(struct rte_flow_action_age *)(uintptr_t)(action->conf);
1794 
1795 		pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION;
1796 		age->context = &pia->age_type;
1797 	} else if (action->type == RTE_FLOW_ACTION_TYPE_CONNTRACK) {
1798 		struct rte_flow_action_conntrack *ct =
1799 			(struct rte_flow_action_conntrack *)(uintptr_t)(action->conf);
1800 
1801 		memcpy(ct, &conntrack_context, sizeof(*ct));
1802 	}
1803 	pia->type = action->type;
1804 	pia->handle = rte_flow_action_handle_create(port_id, conf, action,
1805 						    error);
1806 	return pia->handle ? 0 : -1;
1807 }
1808 
1809 static int
1810 action_list_handle_create(portid_t port_id,
1811 			  struct port_indirect_action *pia,
1812 			  const struct rte_flow_indir_action_conf *conf,
1813 			  const struct rte_flow_action *actions,
1814 			  struct rte_flow_error *error)
1815 {
1816 	pia->type = RTE_FLOW_ACTION_TYPE_INDIRECT_LIST;
1817 	pia->list_handle =
1818 		rte_flow_action_list_handle_create(port_id, conf,
1819 						   actions, error);
1820 	return pia->list_handle ? 0 : -1;
1821 }
1822 /** Create indirect action */
1823 int
1824 port_action_handle_create(portid_t port_id, uint32_t id, bool indirect_list,
1825 			  const struct rte_flow_indir_action_conf *conf,
1826 			  const struct rte_flow_action *action)
1827 {
1828 	struct port_indirect_action *pia;
1829 	int ret;
1830 	struct rte_flow_error error;
1831 
1832 	ret = action_alloc(port_id, id, &pia);
1833 	if (ret)
1834 		return ret;
1835 	/* Poisoning to make sure PMDs update it in case of error. */
1836 	memset(&error, 0x22, sizeof(error));
1837 	ret = indirect_list ?
1838 	       action_list_handle_create(port_id, pia, conf, action, &error) :
1839 	       action_handle_create(port_id, pia, conf, action, &error);
1840 	if (ret) {
1841 		uint32_t destroy_id = pia->id;
1842 		port_action_handle_destroy(port_id, 1, &destroy_id);
1843 		return port_flow_complain(&error);
1844 	}
1845 	printf("Indirect action #%u created\n", pia->id);
1846 	return 0;
1847 }
1848 
1849 /** Destroy indirect action */
1850 int
1851 port_action_handle_destroy(portid_t port_id,
1852 			   uint32_t n,
1853 			   const uint32_t *actions)
1854 {
1855 	struct rte_port *port;
1856 	struct port_indirect_action **tmp;
1857 	int ret = 0;
1858 
1859 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1860 	    port_id == (portid_t)RTE_PORT_ALL)
1861 		return -EINVAL;
1862 	port = &ports[port_id];
1863 	tmp = &port->actions_list;
1864 	while (*tmp) {
1865 		uint32_t i;
1866 
1867 		for (i = 0; i != n; ++i) {
1868 			struct rte_flow_error error;
1869 			struct port_indirect_action *pia = *tmp;
1870 
1871 			if (actions[i] != pia->id)
1872 				continue;
1873 			/*
1874 			 * Poisoning to make sure PMDs update it in case
1875 			 * of error.
1876 			 */
1877 			memset(&error, 0x33, sizeof(error));
1878 
1879 			if (pia->handle) {
1880 				ret = pia->type ==
1881 				      RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ?
1882 					rte_flow_action_list_handle_destroy
1883 					(port_id, pia->list_handle, &error) :
1884 					rte_flow_action_handle_destroy
1885 					(port_id, pia->handle, &error);
1886 				if (ret) {
1887 					ret = port_flow_complain(&error);
1888 					continue;
1889 				}
1890 			}
1891 			*tmp = pia->next;
1892 			printf("Indirect action #%u destroyed\n", pia->id);
1893 			free(pia);
1894 			break;
1895 		}
1896 		if (i == n)
1897 			tmp = &(*tmp)->next;
1898 	}
1899 	return ret;
1900 }
1901 
1902 int
1903 port_action_handle_flush(portid_t port_id)
1904 {
1905 	struct rte_port *port;
1906 	struct port_indirect_action **tmp;
1907 	int ret = 0;
1908 
1909 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1910 	    port_id == (portid_t)RTE_PORT_ALL)
1911 		return -EINVAL;
1912 	port = &ports[port_id];
1913 	tmp = &port->actions_list;
1914 	while (*tmp != NULL) {
1915 		struct rte_flow_error error;
1916 		struct port_indirect_action *pia = *tmp;
1917 
1918 		/* Poisoning to make sure PMDs update it in case of error. */
1919 		memset(&error, 0x44, sizeof(error));
1920 		if (pia->handle != NULL) {
1921 			ret = pia->type ==
1922 			      RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ?
1923 			      rte_flow_action_list_handle_destroy
1924 				      (port_id, pia->list_handle, &error) :
1925 			      rte_flow_action_handle_destroy
1926 				      (port_id, pia->handle, &error);
1927 			if (ret) {
1928 				printf("Indirect action #%u not destroyed\n",
1929 				       pia->id);
1930 				ret = port_flow_complain(&error);
1931 			}
1932 			tmp = &pia->next;
1933 		} else {
1934 			*tmp = pia->next;
1935 			free(pia);
1936 		}
1937 	}
1938 	return ret;
1939 }
1940 
1941 /** Get indirect action by port + id */
1942 struct rte_flow_action_handle *
1943 port_action_handle_get_by_id(portid_t port_id, uint32_t id)
1944 {
1945 
1946 	struct port_indirect_action *pia = action_get_by_id(port_id, id);
1947 
1948 	return (pia) ? pia->handle : NULL;
1949 }
1950 
1951 /** Update indirect action */
1952 int
1953 port_action_handle_update(portid_t port_id, uint32_t id,
1954 			  const struct rte_flow_action *action)
1955 {
1956 	struct rte_flow_error error;
1957 	struct rte_flow_action_handle *action_handle;
1958 	struct port_indirect_action *pia;
1959 	struct rte_flow_update_meter_mark mtr_update;
1960 	const void *update;
1961 
1962 	action_handle = port_action_handle_get_by_id(port_id, id);
1963 	if (!action_handle)
1964 		return -EINVAL;
1965 	pia = action_get_by_id(port_id, id);
1966 	if (!pia)
1967 		return -EINVAL;
1968 	switch (pia->type) {
1969 	case RTE_FLOW_ACTION_TYPE_AGE:
1970 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
1971 		update = action->conf;
1972 		break;
1973 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
1974 		memcpy(&mtr_update.meter_mark, action->conf,
1975 		       sizeof(struct rte_flow_action_meter_mark));
1976 		if (mtr_update.meter_mark.profile)
1977 			mtr_update.profile_valid = 1;
1978 		if (mtr_update.meter_mark.policy)
1979 			mtr_update.policy_valid = 1;
1980 		mtr_update.color_mode_valid = 1;
1981 		mtr_update.state_valid = 1;
1982 		update = &mtr_update;
1983 		break;
1984 	default:
1985 		update = action;
1986 		break;
1987 	}
1988 	if (rte_flow_action_handle_update(port_id, action_handle, update,
1989 					  &error)) {
1990 		return port_flow_complain(&error);
1991 	}
1992 	printf("Indirect action #%u updated\n", id);
1993 	return 0;
1994 }
1995 
1996 static void
1997 port_action_handle_query_dump(portid_t port_id,
1998 			      const struct port_indirect_action *pia,
1999 			      union port_action_query *query)
2000 {
2001 	if (!pia || !query)
2002 		return;
2003 	switch (pia->type) {
2004 	case RTE_FLOW_ACTION_TYPE_AGE:
2005 		printf("Indirect AGE action:\n"
2006 		       " aged: %u\n"
2007 		       " sec_since_last_hit_valid: %u\n"
2008 		       " sec_since_last_hit: %" PRIu32 "\n",
2009 		       query->age.aged,
2010 		       query->age.sec_since_last_hit_valid,
2011 		       query->age.sec_since_last_hit);
2012 		break;
2013 	case RTE_FLOW_ACTION_TYPE_COUNT:
2014 		printf("Indirect COUNT action:\n"
2015 		       " hits_set: %u\n"
2016 		       " bytes_set: %u\n"
2017 		       " hits: %" PRIu64 "\n"
2018 		       " bytes: %" PRIu64 "\n",
2019 		       query->count.hits_set,
2020 		       query->count.bytes_set,
2021 		       query->count.hits,
2022 		       query->count.bytes);
2023 		break;
2024 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
2025 		printf("Conntrack Context:\n"
2026 		       "  Peer: %u, Flow dir: %s, Enable: %u\n"
2027 		       "  Live: %u, SACK: %u, CACK: %u\n"
2028 		       "  Packet dir: %s, Liberal: %u, State: %u\n"
2029 		       "  Factor: %u, Retrans: %u, TCP flags: %u\n"
2030 		       "  Last Seq: %u, Last ACK: %u\n"
2031 		       "  Last Win: %u, Last End: %u\n",
2032 		       query->ct.peer_port,
2033 		       query->ct.is_original_dir ? "Original" : "Reply",
2034 		       query->ct.enable, query->ct.live_connection,
2035 		       query->ct.selective_ack, query->ct.challenge_ack_passed,
2036 		       query->ct.last_direction ? "Original" : "Reply",
2037 		       query->ct.liberal_mode, query->ct.state,
2038 		       query->ct.max_ack_window, query->ct.retransmission_limit,
2039 		       query->ct.last_index, query->ct.last_seq,
2040 		       query->ct.last_ack, query->ct.last_window,
2041 		       query->ct.last_end);
2042 		printf("  Original Dir:\n"
2043 		       "    scale: %u, fin: %u, ack seen: %u\n"
2044 		       " unacked data: %u\n    Sent end: %u,"
2045 		       "    Reply end: %u, Max win: %u, Max ACK: %u\n",
2046 		       query->ct.original_dir.scale,
2047 		       query->ct.original_dir.close_initiated,
2048 		       query->ct.original_dir.last_ack_seen,
2049 		       query->ct.original_dir.data_unacked,
2050 		       query->ct.original_dir.sent_end,
2051 		       query->ct.original_dir.reply_end,
2052 		       query->ct.original_dir.max_win,
2053 		       query->ct.original_dir.max_ack);
2054 		printf("  Reply Dir:\n"
2055 		       "    scale: %u, fin: %u, ack seen: %u\n"
2056 		       " unacked data: %u\n    Sent end: %u,"
2057 		       "    Reply end: %u, Max win: %u, Max ACK: %u\n",
2058 		       query->ct.reply_dir.scale,
2059 		       query->ct.reply_dir.close_initiated,
2060 		       query->ct.reply_dir.last_ack_seen,
2061 		       query->ct.reply_dir.data_unacked,
2062 		       query->ct.reply_dir.sent_end,
2063 		       query->ct.reply_dir.reply_end,
2064 		       query->ct.reply_dir.max_win,
2065 		       query->ct.reply_dir.max_ack);
2066 		break;
2067 	case RTE_FLOW_ACTION_TYPE_QUOTA:
2068 		printf("Indirect QUOTA action %u\n"
2069 		       " unused quota: %" PRId64 "\n",
2070 		       pia->id, query->quota.quota);
2071 		break;
2072 	default:
2073 		printf("port-%u: indirect action %u (type: %d) doesn't support query\n",
2074 		       pia->type, pia->id, port_id);
2075 		break;
2076 	}
2077 
2078 }
2079 
2080 void
2081 port_action_handle_query_update(portid_t port_id, uint32_t id,
2082 				enum rte_flow_query_update_mode qu_mode,
2083 				const struct rte_flow_action *action)
2084 {
2085 	int ret;
2086 	struct rte_flow_error error;
2087 	struct port_indirect_action *pia;
2088 	union port_action_query query;
2089 
2090 	pia = action_get_by_id(port_id, id);
2091 	if (!pia || !pia->handle)
2092 		return;
2093 	ret = rte_flow_action_handle_query_update(port_id, pia->handle, action,
2094 						  &query, qu_mode, &error);
2095 	if (ret)
2096 		port_flow_complain(&error);
2097 	else
2098 		port_action_handle_query_dump(port_id, pia, &query);
2099 
2100 }
2101 
2102 int
2103 port_action_handle_query(portid_t port_id, uint32_t id)
2104 {
2105 	struct rte_flow_error error;
2106 	struct port_indirect_action *pia;
2107 	union port_action_query query;
2108 
2109 	pia = action_get_by_id(port_id, id);
2110 	if (!pia)
2111 		return -EINVAL;
2112 	switch (pia->type) {
2113 	case RTE_FLOW_ACTION_TYPE_AGE:
2114 	case RTE_FLOW_ACTION_TYPE_COUNT:
2115 	case RTE_FLOW_ACTION_TYPE_QUOTA:
2116 		break;
2117 	default:
2118 		fprintf(stderr,
2119 			"Indirect action %u (type: %d) on port %u doesn't support query\n",
2120 			id, pia->type, port_id);
2121 		return -ENOTSUP;
2122 	}
2123 	/* Poisoning to make sure PMDs update it in case of error. */
2124 	memset(&error, 0x55, sizeof(error));
2125 	memset(&query, 0, sizeof(query));
2126 	if (rte_flow_action_handle_query(port_id, pia->handle, &query, &error))
2127 		return port_flow_complain(&error);
2128 	port_action_handle_query_dump(port_id, pia, &query);
2129 	return 0;
2130 }
2131 
2132 static struct port_flow_tunnel *
2133 port_flow_tunnel_offload_cmd_prep(portid_t port_id,
2134 				  const struct rte_flow_item *pattern,
2135 				  const struct rte_flow_action *actions,
2136 				  const struct tunnel_ops *tunnel_ops)
2137 {
2138 	int ret;
2139 	struct rte_port *port;
2140 	struct port_flow_tunnel *pft;
2141 	struct rte_flow_error error;
2142 
2143 	port = &ports[port_id];
2144 	pft = port_flow_locate_tunnel_id(port, tunnel_ops->id);
2145 	if (!pft) {
2146 		fprintf(stderr, "failed to locate port flow tunnel #%u\n",
2147 			tunnel_ops->id);
2148 		return NULL;
2149 	}
2150 	if (tunnel_ops->actions) {
2151 		uint32_t num_actions;
2152 		const struct rte_flow_action *aptr;
2153 
2154 		ret = rte_flow_tunnel_decap_set(port_id, &pft->tunnel,
2155 						&pft->pmd_actions,
2156 						&pft->num_pmd_actions,
2157 						&error);
2158 		if (ret) {
2159 			port_flow_complain(&error);
2160 			return NULL;
2161 		}
2162 		for (aptr = actions, num_actions = 1;
2163 		     aptr->type != RTE_FLOW_ACTION_TYPE_END;
2164 		     aptr++, num_actions++);
2165 		pft->actions = malloc(
2166 				(num_actions +  pft->num_pmd_actions) *
2167 				sizeof(actions[0]));
2168 		if (!pft->actions) {
2169 			rte_flow_tunnel_action_decap_release(
2170 					port_id, pft->actions,
2171 					pft->num_pmd_actions, &error);
2172 			return NULL;
2173 		}
2174 		rte_memcpy(pft->actions, pft->pmd_actions,
2175 			   pft->num_pmd_actions * sizeof(actions[0]));
2176 		rte_memcpy(pft->actions + pft->num_pmd_actions, actions,
2177 			   num_actions * sizeof(actions[0]));
2178 	}
2179 	if (tunnel_ops->items) {
2180 		uint32_t num_items;
2181 		const struct rte_flow_item *iptr;
2182 
2183 		ret = rte_flow_tunnel_match(port_id, &pft->tunnel,
2184 					    &pft->pmd_items,
2185 					    &pft->num_pmd_items,
2186 					    &error);
2187 		if (ret) {
2188 			port_flow_complain(&error);
2189 			return NULL;
2190 		}
2191 		for (iptr = pattern, num_items = 1;
2192 		     iptr->type != RTE_FLOW_ITEM_TYPE_END;
2193 		     iptr++, num_items++);
2194 		pft->items = malloc((num_items + pft->num_pmd_items) *
2195 				    sizeof(pattern[0]));
2196 		if (!pft->items) {
2197 			rte_flow_tunnel_item_release(
2198 					port_id, pft->pmd_items,
2199 					pft->num_pmd_items, &error);
2200 			return NULL;
2201 		}
2202 		rte_memcpy(pft->items, pft->pmd_items,
2203 			   pft->num_pmd_items * sizeof(pattern[0]));
2204 		rte_memcpy(pft->items + pft->num_pmd_items, pattern,
2205 			   num_items * sizeof(pattern[0]));
2206 	}
2207 
2208 	return pft;
2209 }
2210 
2211 static void
2212 port_flow_tunnel_offload_cmd_release(portid_t port_id,
2213 				     const struct tunnel_ops *tunnel_ops,
2214 				     struct port_flow_tunnel *pft)
2215 {
2216 	struct rte_flow_error error;
2217 
2218 	if (tunnel_ops->actions) {
2219 		free(pft->actions);
2220 		rte_flow_tunnel_action_decap_release(
2221 			port_id, pft->pmd_actions,
2222 			pft->num_pmd_actions, &error);
2223 		pft->actions = NULL;
2224 		pft->pmd_actions = NULL;
2225 	}
2226 	if (tunnel_ops->items) {
2227 		free(pft->items);
2228 		rte_flow_tunnel_item_release(port_id, pft->pmd_items,
2229 					     pft->num_pmd_items,
2230 					     &error);
2231 		pft->items = NULL;
2232 		pft->pmd_items = NULL;
2233 	}
2234 }
2235 
2236 /** Add port meter policy */
2237 int
2238 port_meter_policy_add(portid_t port_id, uint32_t policy_id,
2239 			const struct rte_flow_action *actions)
2240 {
2241 	struct rte_mtr_error error;
2242 	const struct rte_flow_action *act = actions;
2243 	const struct rte_flow_action *start;
2244 	struct rte_mtr_meter_policy_params policy;
2245 	uint32_t i = 0, act_n;
2246 	int ret;
2247 
2248 	for (i = 0; i < RTE_COLORS; i++) {
2249 		for (act_n = 0, start = act;
2250 			act->type != RTE_FLOW_ACTION_TYPE_END; act++)
2251 			act_n++;
2252 		if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END)
2253 			policy.actions[i] = start;
2254 		else
2255 			policy.actions[i] = NULL;
2256 		act++;
2257 	}
2258 	ret = rte_mtr_meter_policy_add(port_id,
2259 			policy_id,
2260 			&policy, &error);
2261 	if (ret)
2262 		print_mtr_err_msg(&error);
2263 	return ret;
2264 }
2265 
2266 struct rte_flow_meter_profile *
2267 port_meter_profile_get_by_id(portid_t port_id, uint32_t id)
2268 {
2269 	struct rte_mtr_error error;
2270 	struct rte_flow_meter_profile *profile;
2271 
2272 	profile = rte_mtr_meter_profile_get(port_id, id, &error);
2273 	if (!profile)
2274 		print_mtr_err_msg(&error);
2275 	return profile;
2276 }
2277 struct rte_flow_meter_policy *
2278 port_meter_policy_get_by_id(portid_t port_id, uint32_t id)
2279 {
2280 	struct rte_mtr_error error;
2281 	struct rte_flow_meter_policy *policy;
2282 
2283 	policy = rte_mtr_meter_policy_get(port_id, id, &error);
2284 	if (!policy)
2285 		print_mtr_err_msg(&error);
2286 	return policy;
2287 }
2288 
2289 /** Validate flow rule. */
2290 int
2291 port_flow_validate(portid_t port_id,
2292 		   const struct rte_flow_attr *attr,
2293 		   const struct rte_flow_item *pattern,
2294 		   const struct rte_flow_action *actions,
2295 		   const struct tunnel_ops *tunnel_ops)
2296 {
2297 	struct rte_flow_error error;
2298 	struct port_flow_tunnel *pft = NULL;
2299 	int ret;
2300 
2301 	/* Poisoning to make sure PMDs update it in case of error. */
2302 	memset(&error, 0x11, sizeof(error));
2303 	if (tunnel_ops->enabled) {
2304 		pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern,
2305 							actions, tunnel_ops);
2306 		if (!pft)
2307 			return -ENOENT;
2308 		if (pft->items)
2309 			pattern = pft->items;
2310 		if (pft->actions)
2311 			actions = pft->actions;
2312 	}
2313 	ret = rte_flow_validate(port_id, attr, pattern, actions, &error);
2314 	if (tunnel_ops->enabled)
2315 		port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft);
2316 	if (ret)
2317 		return port_flow_complain(&error);
2318 	printf("Flow rule validated\n");
2319 	return 0;
2320 }
2321 
2322 /** Return age action structure if exists, otherwise NULL. */
2323 static struct rte_flow_action_age *
2324 age_action_get(const struct rte_flow_action *actions)
2325 {
2326 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2327 		switch (actions->type) {
2328 		case RTE_FLOW_ACTION_TYPE_AGE:
2329 			return (struct rte_flow_action_age *)
2330 				(uintptr_t)actions->conf;
2331 		default:
2332 			break;
2333 		}
2334 	}
2335 	return NULL;
2336 }
2337 
2338 /** Create pattern template */
2339 int
2340 port_flow_pattern_template_create(portid_t port_id, uint32_t id,
2341 				  const struct rte_flow_pattern_template_attr *attr,
2342 				  const struct rte_flow_item *pattern)
2343 {
2344 	struct rte_port *port;
2345 	struct port_template *pit;
2346 	int ret;
2347 	struct rte_flow_error error;
2348 
2349 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2350 	    port_id == (portid_t)RTE_PORT_ALL)
2351 		return -EINVAL;
2352 	port = &ports[port_id];
2353 	ret = template_alloc(id, &pit, &port->pattern_templ_list);
2354 	if (ret)
2355 		return ret;
2356 	/* Poisoning to make sure PMDs update it in case of error. */
2357 	memset(&error, 0x22, sizeof(error));
2358 	pit->template.pattern_template = rte_flow_pattern_template_create(port_id,
2359 						attr, pattern, &error);
2360 	if (!pit->template.pattern_template) {
2361 		uint32_t destroy_id = pit->id;
2362 		port_flow_pattern_template_destroy(port_id, 1, &destroy_id);
2363 		return port_flow_complain(&error);
2364 	}
2365 	printf("Pattern template #%u created\n", pit->id);
2366 	return 0;
2367 }
2368 
2369 /** Destroy pattern template */
2370 int
2371 port_flow_pattern_template_destroy(portid_t port_id, uint32_t n,
2372 				   const uint32_t *template)
2373 {
2374 	struct rte_port *port;
2375 	struct port_template **tmp;
2376 	int ret = 0;
2377 
2378 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2379 	    port_id == (portid_t)RTE_PORT_ALL)
2380 		return -EINVAL;
2381 	port = &ports[port_id];
2382 	tmp = &port->pattern_templ_list;
2383 	while (*tmp) {
2384 		uint32_t i;
2385 
2386 		for (i = 0; i != n; ++i) {
2387 			struct rte_flow_error error;
2388 			struct port_template *pit = *tmp;
2389 
2390 			if (template[i] != pit->id)
2391 				continue;
2392 			/*
2393 			 * Poisoning to make sure PMDs update it in case
2394 			 * of error.
2395 			 */
2396 			memset(&error, 0x33, sizeof(error));
2397 
2398 			if (pit->template.pattern_template &&
2399 			    rte_flow_pattern_template_destroy(port_id,
2400 							   pit->template.pattern_template,
2401 							   &error)) {
2402 				ret = port_flow_complain(&error);
2403 				continue;
2404 			}
2405 			*tmp = pit->next;
2406 			printf("Pattern template #%u destroyed\n", pit->id);
2407 			free(pit);
2408 			break;
2409 		}
2410 		if (i == n)
2411 			tmp = &(*tmp)->next;
2412 	}
2413 	return ret;
2414 }
2415 
2416 /** Flush pattern template */
2417 int
2418 port_flow_pattern_template_flush(portid_t port_id)
2419 {
2420 	struct rte_port *port;
2421 	struct port_template **tmp;
2422 	int ret = 0;
2423 
2424 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2425 	    port_id == (portid_t)RTE_PORT_ALL)
2426 		return -EINVAL;
2427 	port = &ports[port_id];
2428 	tmp = &port->pattern_templ_list;
2429 	while (*tmp) {
2430 		struct rte_flow_error error;
2431 		struct port_template *pit = *tmp;
2432 
2433 		/*
2434 		 * Poisoning to make sure PMDs update it in case
2435 		 * of error.
2436 		 */
2437 		memset(&error, 0x33, sizeof(error));
2438 		if (pit->template.pattern_template &&
2439 		    rte_flow_pattern_template_destroy(port_id,
2440 			pit->template.pattern_template, &error)) {
2441 			printf("Pattern template #%u not destroyed\n", pit->id);
2442 			ret = port_flow_complain(&error);
2443 			tmp = &pit->next;
2444 		} else {
2445 			*tmp = pit->next;
2446 			free(pit);
2447 		}
2448 	}
2449 	return ret;
2450 }
2451 
2452 /** Create actions template */
2453 int
2454 port_flow_actions_template_create(portid_t port_id, uint32_t id,
2455 				  const struct rte_flow_actions_template_attr *attr,
2456 				  const struct rte_flow_action *actions,
2457 				  const struct rte_flow_action *masks)
2458 {
2459 	struct rte_port *port;
2460 	struct port_template *pat;
2461 	int ret;
2462 	struct rte_flow_error error;
2463 
2464 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2465 	    port_id == (portid_t)RTE_PORT_ALL)
2466 		return -EINVAL;
2467 	port = &ports[port_id];
2468 	ret = template_alloc(id, &pat, &port->actions_templ_list);
2469 	if (ret)
2470 		return ret;
2471 	/* Poisoning to make sure PMDs update it in case of error. */
2472 	memset(&error, 0x22, sizeof(error));
2473 	pat->template.actions_template = rte_flow_actions_template_create(port_id,
2474 						attr, actions, masks, &error);
2475 	if (!pat->template.actions_template) {
2476 		uint32_t destroy_id = pat->id;
2477 		port_flow_actions_template_destroy(port_id, 1, &destroy_id);
2478 		return port_flow_complain(&error);
2479 	}
2480 	printf("Actions template #%u created\n", pat->id);
2481 	return 0;
2482 }
2483 
2484 /** Destroy actions template */
2485 int
2486 port_flow_actions_template_destroy(portid_t port_id, uint32_t n,
2487 				   const uint32_t *template)
2488 {
2489 	struct rte_port *port;
2490 	struct port_template **tmp;
2491 	int ret = 0;
2492 
2493 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2494 	    port_id == (portid_t)RTE_PORT_ALL)
2495 		return -EINVAL;
2496 	port = &ports[port_id];
2497 	tmp = &port->actions_templ_list;
2498 	while (*tmp) {
2499 		uint32_t i;
2500 
2501 		for (i = 0; i != n; ++i) {
2502 			struct rte_flow_error error;
2503 			struct port_template *pat = *tmp;
2504 
2505 			if (template[i] != pat->id)
2506 				continue;
2507 			/*
2508 			 * Poisoning to make sure PMDs update it in case
2509 			 * of error.
2510 			 */
2511 			memset(&error, 0x33, sizeof(error));
2512 
2513 			if (pat->template.actions_template &&
2514 			    rte_flow_actions_template_destroy(port_id,
2515 					pat->template.actions_template, &error)) {
2516 				ret = port_flow_complain(&error);
2517 				continue;
2518 			}
2519 			*tmp = pat->next;
2520 			printf("Actions template #%u destroyed\n", pat->id);
2521 			free(pat);
2522 			break;
2523 		}
2524 		if (i == n)
2525 			tmp = &(*tmp)->next;
2526 	}
2527 	return ret;
2528 }
2529 
2530 /** Flush actions template */
2531 int
2532 port_flow_actions_template_flush(portid_t port_id)
2533 {
2534 	struct rte_port *port;
2535 	struct port_template **tmp;
2536 	int ret = 0;
2537 
2538 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2539 	    port_id == (portid_t)RTE_PORT_ALL)
2540 		return -EINVAL;
2541 	port = &ports[port_id];
2542 	tmp = &port->actions_templ_list;
2543 	while (*tmp) {
2544 		struct rte_flow_error error;
2545 		struct port_template *pat = *tmp;
2546 
2547 		/*
2548 		 * Poisoning to make sure PMDs update it in case
2549 		 * of error.
2550 		 */
2551 		memset(&error, 0x33, sizeof(error));
2552 
2553 		if (pat->template.actions_template &&
2554 		    rte_flow_actions_template_destroy(port_id,
2555 			pat->template.actions_template, &error)) {
2556 			ret = port_flow_complain(&error);
2557 			printf("Actions template #%u not destroyed\n", pat->id);
2558 			tmp = &pat->next;
2559 		} else {
2560 			*tmp = pat->next;
2561 			free(pat);
2562 		}
2563 	}
2564 	return ret;
2565 }
2566 
2567 /** Create table */
2568 int
2569 port_flow_template_table_create(portid_t port_id, uint32_t id,
2570 		const struct rte_flow_template_table_attr *table_attr,
2571 		uint32_t nb_pattern_templates, uint32_t *pattern_templates,
2572 		uint32_t nb_actions_templates, uint32_t *actions_templates)
2573 {
2574 	struct rte_port *port;
2575 	struct port_table *pt;
2576 	struct port_template *temp = NULL;
2577 	int ret;
2578 	uint32_t i;
2579 	struct rte_flow_error error;
2580 	struct rte_flow_pattern_template
2581 			*flow_pattern_templates[nb_pattern_templates];
2582 	struct rte_flow_actions_template
2583 			*flow_actions_templates[nb_actions_templates];
2584 
2585 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2586 	    port_id == (portid_t)RTE_PORT_ALL)
2587 		return -EINVAL;
2588 	port = &ports[port_id];
2589 	for (i = 0; i < nb_pattern_templates; ++i) {
2590 		bool found = false;
2591 		temp = port->pattern_templ_list;
2592 		while (temp) {
2593 			if (pattern_templates[i] == temp->id) {
2594 				flow_pattern_templates[i] =
2595 					temp->template.pattern_template;
2596 				found = true;
2597 				break;
2598 			}
2599 			temp = temp->next;
2600 		}
2601 		if (!found) {
2602 			printf("Pattern template #%u is invalid\n",
2603 			       pattern_templates[i]);
2604 			return -EINVAL;
2605 		}
2606 	}
2607 	for (i = 0; i < nb_actions_templates; ++i) {
2608 		bool found = false;
2609 		temp = port->actions_templ_list;
2610 		while (temp) {
2611 			if (actions_templates[i] == temp->id) {
2612 				flow_actions_templates[i] =
2613 					temp->template.actions_template;
2614 				found = true;
2615 				break;
2616 			}
2617 			temp = temp->next;
2618 		}
2619 		if (!found) {
2620 			printf("Actions template #%u is invalid\n",
2621 			       actions_templates[i]);
2622 			return -EINVAL;
2623 		}
2624 	}
2625 	ret = table_alloc(id, &pt, &port->table_list);
2626 	if (ret)
2627 		return ret;
2628 	/* Poisoning to make sure PMDs update it in case of error. */
2629 	memset(&error, 0x22, sizeof(error));
2630 	pt->table = rte_flow_template_table_create(port_id, table_attr,
2631 		      flow_pattern_templates, nb_pattern_templates,
2632 		      flow_actions_templates, nb_actions_templates,
2633 		      &error);
2634 
2635 	if (!pt->table) {
2636 		uint32_t destroy_id = pt->id;
2637 		port_flow_template_table_destroy(port_id, 1, &destroy_id);
2638 		return port_flow_complain(&error);
2639 	}
2640 	pt->nb_pattern_templates = nb_pattern_templates;
2641 	pt->nb_actions_templates = nb_actions_templates;
2642 	rte_memcpy(&pt->flow_attr, &table_attr->flow_attr,
2643 		   sizeof(struct rte_flow_attr));
2644 	printf("Template table #%u created\n", pt->id);
2645 	return 0;
2646 }
2647 
2648 /** Destroy table */
2649 int
2650 port_flow_template_table_destroy(portid_t port_id,
2651 				 uint32_t n, const uint32_t *table)
2652 {
2653 	struct rte_port *port;
2654 	struct port_table **tmp;
2655 	int ret = 0;
2656 
2657 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2658 	    port_id == (portid_t)RTE_PORT_ALL)
2659 		return -EINVAL;
2660 	port = &ports[port_id];
2661 	tmp = &port->table_list;
2662 	while (*tmp) {
2663 		uint32_t i;
2664 
2665 		for (i = 0; i != n; ++i) {
2666 			struct rte_flow_error error;
2667 			struct port_table *pt = *tmp;
2668 
2669 			if (table[i] != pt->id)
2670 				continue;
2671 			/*
2672 			 * Poisoning to make sure PMDs update it in case
2673 			 * of error.
2674 			 */
2675 			memset(&error, 0x33, sizeof(error));
2676 
2677 			if (pt->table &&
2678 			    rte_flow_template_table_destroy(port_id,
2679 							    pt->table,
2680 							    &error)) {
2681 				ret = port_flow_complain(&error);
2682 				continue;
2683 			}
2684 			*tmp = pt->next;
2685 			printf("Template table #%u destroyed\n", pt->id);
2686 			free(pt);
2687 			break;
2688 		}
2689 		if (i == n)
2690 			tmp = &(*tmp)->next;
2691 	}
2692 	return ret;
2693 }
2694 
2695 int
2696 port_flow_template_table_resize_complete(portid_t port_id, uint32_t table_id)
2697 {
2698 	struct rte_port *port;
2699 	struct port_table *pt;
2700 	struct rte_flow_error error = { 0, };
2701 	int ret;
2702 
2703 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2704 		return -EINVAL;
2705 	port = &ports[port_id];
2706 	pt = port_table_locate(port->table_list, table_id);
2707 	if (!pt)
2708 		return -EINVAL;
2709 	ret = rte_flow_template_table_resize_complete(port_id,
2710 						      pt->table, &error);
2711 	return !ret ? 0 : port_flow_complain(&error);
2712 }
2713 
2714 int
2715 port_flow_template_table_resize(portid_t port_id,
2716 				uint32_t table_id, uint32_t flows_num)
2717 {
2718 	struct rte_port *port;
2719 	struct port_table *pt;
2720 	struct rte_flow_error error = { 0, };
2721 	int ret;
2722 
2723 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2724 		return -EINVAL;
2725 	port = &ports[port_id];
2726 	pt = port_table_locate(port->table_list, table_id);
2727 	if (!pt)
2728 		return -EINVAL;
2729 	ret = rte_flow_template_table_resize(port_id, pt->table, flows_num, &error);
2730 	if (ret)
2731 		return port_flow_complain(&error);
2732 	return 0;
2733 }
2734 
2735 /** Flush table */
2736 int
2737 port_flow_template_table_flush(portid_t port_id)
2738 {
2739 	struct rte_port *port;
2740 	struct port_table **tmp;
2741 	int ret = 0;
2742 
2743 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2744 	    port_id == (portid_t)RTE_PORT_ALL)
2745 		return -EINVAL;
2746 	port = &ports[port_id];
2747 	tmp = &port->table_list;
2748 	while (*tmp) {
2749 		struct rte_flow_error error;
2750 		struct port_table *pt = *tmp;
2751 
2752 		/*
2753 		 * Poisoning to make sure PMDs update it in case
2754 		 * of error.
2755 		 */
2756 		memset(&error, 0x33, sizeof(error));
2757 
2758 		if (pt->table &&
2759 		    rte_flow_template_table_destroy(port_id,
2760 						   pt->table,
2761 						   &error)) {
2762 			ret = port_flow_complain(&error);
2763 			printf("Template table #%u not destroyed\n", pt->id);
2764 			tmp = &pt->next;
2765 		} else {
2766 			*tmp = pt->next;
2767 			free(pt);
2768 		}
2769 	}
2770 	return ret;
2771 }
2772 
2773 /** Enqueue create flow rule operation. */
2774 int
2775 port_queue_flow_create(portid_t port_id, queueid_t queue_id,
2776 		       bool postpone, uint32_t table_id, uint32_t rule_idx,
2777 		       uint32_t pattern_idx, uint32_t actions_idx,
2778 		       const struct rte_flow_item *pattern,
2779 		       const struct rte_flow_action *actions)
2780 {
2781 	struct rte_flow_op_attr op_attr = { .postpone = postpone };
2782 	struct rte_flow *flow;
2783 	struct rte_port *port;
2784 	struct port_flow *pf;
2785 	struct port_table *pt;
2786 	uint32_t id = 0;
2787 	bool found;
2788 	struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL };
2789 	struct rte_flow_action_age *age = age_action_get(actions);
2790 	struct queue_job *job;
2791 
2792 	port = &ports[port_id];
2793 	if (port->flow_list) {
2794 		if (port->flow_list->id == UINT32_MAX) {
2795 			printf("Highest rule ID is already assigned,"
2796 			       " delete it first");
2797 			return -ENOMEM;
2798 		}
2799 		id = port->flow_list->id + 1;
2800 	}
2801 
2802 	if (queue_id >= port->queue_nb) {
2803 		printf("Queue #%u is invalid\n", queue_id);
2804 		return -EINVAL;
2805 	}
2806 
2807 	found = false;
2808 	pt = port->table_list;
2809 	while (pt) {
2810 		if (table_id == pt->id) {
2811 			found = true;
2812 			break;
2813 		}
2814 		pt = pt->next;
2815 	}
2816 	if (!found) {
2817 		printf("Table #%u is invalid\n", table_id);
2818 		return -EINVAL;
2819 	}
2820 
2821 	if (pattern_idx >= pt->nb_pattern_templates) {
2822 		printf("Pattern template index #%u is invalid,"
2823 		       " %u templates present in the table\n",
2824 		       pattern_idx, pt->nb_pattern_templates);
2825 		return -EINVAL;
2826 	}
2827 	if (actions_idx >= pt->nb_actions_templates) {
2828 		printf("Actions template index #%u is invalid,"
2829 		       " %u templates present in the table\n",
2830 		       actions_idx, pt->nb_actions_templates);
2831 		return -EINVAL;
2832 	}
2833 
2834 	job = calloc(1, sizeof(*job));
2835 	if (!job) {
2836 		printf("Queue flow create job allocate failed\n");
2837 		return -ENOMEM;
2838 	}
2839 	job->type = QUEUE_JOB_TYPE_FLOW_CREATE;
2840 
2841 	pf = port_flow_new(&pt->flow_attr, pattern, actions, &error);
2842 	if (!pf) {
2843 		free(job);
2844 		return port_flow_complain(&error);
2845 	}
2846 	if (age) {
2847 		pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW;
2848 		age->context = &pf->age_type;
2849 	}
2850 	/* Poisoning to make sure PMDs update it in case of error. */
2851 	memset(&error, 0x11, sizeof(error));
2852 	if (rule_idx == UINT32_MAX)
2853 		flow = rte_flow_async_create(port_id, queue_id, &op_attr, pt->table,
2854 			pattern, pattern_idx, actions, actions_idx, job, &error);
2855 	else
2856 		flow = rte_flow_async_create_by_index(port_id, queue_id, &op_attr, pt->table,
2857 			rule_idx, actions, actions_idx, job, &error);
2858 	if (!flow) {
2859 		uint64_t flow_id = pf->id;
2860 		port_queue_flow_destroy(port_id, queue_id, true, 1, &flow_id);
2861 		free(job);
2862 		return port_flow_complain(&error);
2863 	}
2864 
2865 	pf->next = port->flow_list;
2866 	pf->id = id;
2867 	pf->table = pt;
2868 	pf->flow = flow;
2869 	job->pf = pf;
2870 	port->flow_list = pf;
2871 	printf("Flow rule #%"PRIu64" creation enqueued\n", pf->id);
2872 	return 0;
2873 }
2874 
2875 int
2876 port_queue_flow_update_resized(portid_t port_id, queueid_t queue_id,
2877 			       bool postpone, uint32_t flow_id)
2878 {
2879 	const struct rte_flow_op_attr op_attr = { .postpone = postpone };
2880 	struct rte_flow_error error = { 0, };
2881 	struct port_flow *pf;
2882 	struct rte_port *port;
2883 	struct queue_job *job;
2884 	int ret;
2885 
2886 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2887 	    port_id == (portid_t)RTE_PORT_ALL)
2888 		return -EINVAL;
2889 	port = &ports[port_id];
2890 	if (queue_id >= port->queue_nb) {
2891 		printf("Queue #%u is invalid\n", queue_id);
2892 		return -EINVAL;
2893 	}
2894 	pf = port_flow_locate(port->flow_list, flow_id);
2895 	if (!pf)
2896 		return -EINVAL;
2897 	job = calloc(1, sizeof(*job));
2898 	if (!job)
2899 		return -ENOMEM;
2900 	job->type = QUEUE_JOB_TYPE_FLOW_TRANSFER;
2901 	job->pf = pf;
2902 	ret = rte_flow_async_update_resized(port_id, queue_id, &op_attr,
2903 					    pf->flow, job, &error);
2904 	if (ret) {
2905 		free(job);
2906 		return port_flow_complain(&error);
2907 	}
2908 	return 0;
2909 }
2910 
2911 /** Enqueue number of destroy flow rules operations. */
2912 int
2913 port_queue_flow_destroy(portid_t port_id, queueid_t queue_id,
2914 			bool postpone, uint32_t n, const uint64_t *rule)
2915 {
2916 	struct rte_flow_op_attr op_attr = { .postpone = postpone };
2917 	struct rte_port *port;
2918 	struct port_flow **tmp;
2919 	int ret = 0;
2920 	struct queue_job *job;
2921 
2922 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
2923 	    port_id == (portid_t)RTE_PORT_ALL)
2924 		return -EINVAL;
2925 	port = &ports[port_id];
2926 
2927 	if (queue_id >= port->queue_nb) {
2928 		printf("Queue #%u is invalid\n", queue_id);
2929 		return -EINVAL;
2930 	}
2931 
2932 	tmp = &port->flow_list;
2933 	while (*tmp) {
2934 		uint32_t i;
2935 
2936 		for (i = 0; i != n; ++i) {
2937 			struct rte_flow_error error;
2938 			struct port_flow *pf = *tmp;
2939 
2940 			if (rule[i] != pf->id)
2941 				continue;
2942 			/*
2943 			 * Poisoning to make sure PMD
2944 			 * update it in case of error.
2945 			 */
2946 			memset(&error, 0x33, sizeof(error));
2947 			job = calloc(1, sizeof(*job));
2948 			if (!job) {
2949 				printf("Queue flow destroy job allocate failed\n");
2950 				return -ENOMEM;
2951 			}
2952 			job->type = QUEUE_JOB_TYPE_FLOW_DESTROY;
2953 			job->pf = pf;
2954 
2955 			if (rte_flow_async_destroy(port_id, queue_id, &op_attr,
2956 						   pf->flow, job, &error)) {
2957 				free(job);
2958 				ret = port_flow_complain(&error);
2959 				continue;
2960 			}
2961 			printf("Flow rule #%"PRIu64" destruction enqueued\n",
2962 			       pf->id);
2963 			*tmp = pf->next;
2964 			break;
2965 		}
2966 		if (i == n)
2967 			tmp = &(*tmp)->next;
2968 	}
2969 	return ret;
2970 }
2971 
2972 static void
2973 queue_action_handle_create(portid_t port_id, uint32_t queue_id,
2974 			   struct port_indirect_action *pia,
2975 			   struct queue_job *job,
2976 			   const struct rte_flow_op_attr *attr,
2977 			   const struct rte_flow_indir_action_conf *conf,
2978 			   const struct rte_flow_action *action,
2979 			   struct rte_flow_error *error)
2980 {
2981 	if (action->type == RTE_FLOW_ACTION_TYPE_AGE) {
2982 		struct rte_flow_action_age *age =
2983 			(struct rte_flow_action_age *)(uintptr_t)(action->conf);
2984 
2985 		pia->age_type = ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION;
2986 		age->context = &pia->age_type;
2987 	}
2988 	/* Poisoning to make sure PMDs update it in case of error. */
2989 	pia->handle = rte_flow_async_action_handle_create(port_id, queue_id,
2990 							  attr, conf, action,
2991 							  job, error);
2992 	pia->type = action->type;
2993 }
2994 
2995 static void
2996 queue_action_list_handle_create(portid_t port_id, uint32_t queue_id,
2997 				struct port_indirect_action *pia,
2998 				struct queue_job *job,
2999 				const struct rte_flow_op_attr *attr,
3000 				const struct rte_flow_indir_action_conf *conf,
3001 				const struct rte_flow_action *action,
3002 				struct rte_flow_error *error)
3003 {
3004 	/* Poisoning to make sure PMDs update it in case of error. */
3005 	pia->type = RTE_FLOW_ACTION_TYPE_INDIRECT_LIST;
3006 	pia->list_handle = rte_flow_async_action_list_handle_create
3007 		(port_id, queue_id, attr, conf, action,
3008 		 job, error);
3009 }
3010 
3011 /** Enqueue update flow rule operation. */
3012 int
3013 port_queue_flow_update(portid_t port_id, queueid_t queue_id,
3014 		       bool postpone, uint32_t rule_idx, uint32_t actions_idx,
3015 		       const struct rte_flow_action *actions)
3016 {
3017 	struct rte_flow_op_attr op_attr = { .postpone = postpone };
3018 	struct rte_port *port;
3019 	struct port_flow *pf, *uf;
3020 	struct port_flow **tmp;
3021 	struct port_table *pt;
3022 	bool found;
3023 	struct rte_flow_error error = { RTE_FLOW_ERROR_TYPE_NONE, NULL, NULL };
3024 	struct rte_flow_action_age *age = age_action_get(actions);
3025 	struct queue_job *job;
3026 
3027 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3028 	    port_id == (portid_t)RTE_PORT_ALL)
3029 		return -EINVAL;
3030 	port = &ports[port_id];
3031 
3032 	if (queue_id >= port->queue_nb) {
3033 		printf("Queue #%u is invalid\n", queue_id);
3034 		return -EINVAL;
3035 	}
3036 
3037 	found = false;
3038 	tmp = &port->flow_list;
3039 	while (*tmp) {
3040 		pf = *tmp;
3041 		if (rule_idx == pf->id) {
3042 			found = true;
3043 			break;
3044 		}
3045 		tmp = &(*tmp)->next;
3046 	}
3047 	if (!found) {
3048 		printf("Flow rule #%u is invalid\n", rule_idx);
3049 		return -EINVAL;
3050 	}
3051 
3052 	pt = pf->table;
3053 	if (actions_idx >= pt->nb_actions_templates) {
3054 		printf("Actions template index #%u is invalid,"
3055 		       " %u templates present in the table\n",
3056 		       actions_idx, pt->nb_actions_templates);
3057 		return -EINVAL;
3058 	}
3059 
3060 	job = calloc(1, sizeof(*job));
3061 	if (!job) {
3062 		printf("Queue flow create job allocate failed\n");
3063 		return -ENOMEM;
3064 	}
3065 	job->type = QUEUE_JOB_TYPE_FLOW_UPDATE;
3066 
3067 	uf = port_flow_new(&pt->flow_attr, pf->rule.pattern_ro, actions, &error);
3068 	if (!uf) {
3069 		free(job);
3070 		return port_flow_complain(&error);
3071 	}
3072 
3073 	if (age) {
3074 		uf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW;
3075 		age->context = &uf->age_type;
3076 	}
3077 
3078 	/*
3079 	 * Poisoning to make sure PMD update it in case of error.
3080 	 */
3081 	memset(&error, 0x44, sizeof(error));
3082 	if (rte_flow_async_actions_update(port_id, queue_id, &op_attr, pf->flow,
3083 					  actions, actions_idx, job, &error)) {
3084 		free(uf);
3085 		free(job);
3086 		return port_flow_complain(&error);
3087 	}
3088 	uf->next = pf->next;
3089 	uf->id = pf->id;
3090 	uf->table = pt;
3091 	uf->flow = pf->flow;
3092 	*tmp = uf;
3093 	job->pf = pf;
3094 
3095 	printf("Flow rule #%"PRIu64" update enqueued\n", pf->id);
3096 	return 0;
3097 }
3098 
3099 /** Enqueue indirect action create operation. */
3100 int
3101 port_queue_action_handle_create(portid_t port_id, uint32_t queue_id,
3102 				bool postpone, uint32_t id,
3103 				const struct rte_flow_indir_action_conf *conf,
3104 				const struct rte_flow_action *action)
3105 {
3106 	const struct rte_flow_op_attr attr = { .postpone = postpone};
3107 	struct rte_port *port;
3108 	struct port_indirect_action *pia;
3109 	int ret;
3110 	struct rte_flow_error error;
3111 	struct queue_job *job;
3112 	bool is_indirect_list = action[1].type != RTE_FLOW_ACTION_TYPE_END;
3113 
3114 
3115 	ret = action_alloc(port_id, id, &pia);
3116 	if (ret)
3117 		return ret;
3118 
3119 	port = &ports[port_id];
3120 	if (queue_id >= port->queue_nb) {
3121 		printf("Queue #%u is invalid\n", queue_id);
3122 		return -EINVAL;
3123 	}
3124 	job = calloc(1, sizeof(*job));
3125 	if (!job) {
3126 		printf("Queue action create job allocate failed\n");
3127 		return -ENOMEM;
3128 	}
3129 	job->type = QUEUE_JOB_TYPE_ACTION_CREATE;
3130 	job->pia = pia;
3131 
3132 	/* Poisoning to make sure PMDs update it in case of error. */
3133 	memset(&error, 0x88, sizeof(error));
3134 
3135 	if (is_indirect_list)
3136 		queue_action_list_handle_create(port_id, queue_id, pia, job,
3137 						&attr, conf, action, &error);
3138 	else
3139 		queue_action_handle_create(port_id, queue_id, pia, job, &attr,
3140 					   conf, action, &error);
3141 
3142 	if (!pia->handle) {
3143 		uint32_t destroy_id = pia->id;
3144 		port_queue_action_handle_destroy(port_id, queue_id,
3145 						 postpone, 1, &destroy_id);
3146 		free(job);
3147 		return port_flow_complain(&error);
3148 	}
3149 	printf("Indirect action #%u creation queued\n", pia->id);
3150 	return 0;
3151 }
3152 
3153 /** Enqueue indirect action destroy operation. */
3154 int
3155 port_queue_action_handle_destroy(portid_t port_id,
3156 				 uint32_t queue_id, bool postpone,
3157 				 uint32_t n, const uint32_t *actions)
3158 {
3159 	const struct rte_flow_op_attr attr = { .postpone = postpone};
3160 	struct rte_port *port;
3161 	struct port_indirect_action **tmp;
3162 	int ret = 0;
3163 	struct queue_job *job;
3164 
3165 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3166 	    port_id == (portid_t)RTE_PORT_ALL)
3167 		return -EINVAL;
3168 	port = &ports[port_id];
3169 
3170 	if (queue_id >= port->queue_nb) {
3171 		printf("Queue #%u is invalid\n", queue_id);
3172 		return -EINVAL;
3173 	}
3174 
3175 	tmp = &port->actions_list;
3176 	while (*tmp) {
3177 		uint32_t i;
3178 
3179 		for (i = 0; i != n; ++i) {
3180 			struct rte_flow_error error;
3181 			struct port_indirect_action *pia = *tmp;
3182 
3183 			if (actions[i] != pia->id)
3184 				continue;
3185 			/*
3186 			 * Poisoning to make sure PMDs update it in case
3187 			 * of error.
3188 			 */
3189 			memset(&error, 0x99, sizeof(error));
3190 			job = calloc(1, sizeof(*job));
3191 			if (!job) {
3192 				printf("Queue action destroy job allocate failed\n");
3193 				return -ENOMEM;
3194 			}
3195 			job->type = QUEUE_JOB_TYPE_ACTION_DESTROY;
3196 			job->pia = pia;
3197 			ret = pia->type == RTE_FLOW_ACTION_TYPE_INDIRECT_LIST ?
3198 			      rte_flow_async_action_list_handle_destroy
3199 				      (port_id, queue_id,
3200 				       &attr, pia->list_handle,
3201 				       job, &error) :
3202 			      rte_flow_async_action_handle_destroy
3203 				      (port_id, queue_id, &attr, pia->handle,
3204 				       job, &error);
3205 			if (ret) {
3206 				free(job);
3207 				ret = port_flow_complain(&error);
3208 				continue;
3209 			}
3210 			*tmp = pia->next;
3211 			printf("Indirect action #%u destruction queued\n",
3212 			       pia->id);
3213 			break;
3214 		}
3215 		if (i == n)
3216 			tmp = &(*tmp)->next;
3217 	}
3218 	return ret;
3219 }
3220 
3221 /** Enqueue indirect action update operation. */
3222 int
3223 port_queue_action_handle_update(portid_t port_id,
3224 				uint32_t queue_id, bool postpone, uint32_t id,
3225 				const struct rte_flow_action *action)
3226 {
3227 	const struct rte_flow_op_attr attr = { .postpone = postpone};
3228 	struct rte_port *port;
3229 	struct rte_flow_error error;
3230 	struct rte_flow_action_handle *action_handle;
3231 	struct queue_job *job;
3232 	struct port_indirect_action *pia;
3233 	struct rte_flow_update_meter_mark mtr_update;
3234 	const void *update;
3235 
3236 	action_handle = port_action_handle_get_by_id(port_id, id);
3237 	if (!action_handle)
3238 		return -EINVAL;
3239 
3240 	port = &ports[port_id];
3241 	if (queue_id >= port->queue_nb) {
3242 		printf("Queue #%u is invalid\n", queue_id);
3243 		return -EINVAL;
3244 	}
3245 
3246 	job = calloc(1, sizeof(*job));
3247 	if (!job) {
3248 		printf("Queue action update job allocate failed\n");
3249 		return -ENOMEM;
3250 	}
3251 	job->type = QUEUE_JOB_TYPE_ACTION_UPDATE;
3252 
3253 	pia = action_get_by_id(port_id, id);
3254 	if (!pia) {
3255 		free(job);
3256 		return -EINVAL;
3257 	}
3258 
3259 	switch (pia->type) {
3260 	case RTE_FLOW_ACTION_TYPE_AGE:
3261 		update = action->conf;
3262 		break;
3263 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
3264 		rte_memcpy(&mtr_update.meter_mark, action->conf,
3265 			sizeof(struct rte_flow_action_meter_mark));
3266 		if (mtr_update.meter_mark.profile)
3267 			mtr_update.profile_valid = 1;
3268 		if (mtr_update.meter_mark.policy)
3269 			mtr_update.policy_valid = 1;
3270 		mtr_update.color_mode_valid = 1;
3271 		mtr_update.state_valid = 1;
3272 		update = &mtr_update;
3273 		break;
3274 	default:
3275 		update = action;
3276 		break;
3277 	}
3278 
3279 	if (rte_flow_async_action_handle_update(port_id, queue_id, &attr,
3280 				    action_handle, update, job, &error)) {
3281 		free(job);
3282 		return port_flow_complain(&error);
3283 	}
3284 	printf("Indirect action #%u update queued\n", id);
3285 	return 0;
3286 }
3287 
3288 void
3289 port_queue_action_handle_query_update(portid_t port_id,
3290 				      uint32_t queue_id, bool postpone,
3291 				      uint32_t id,
3292 				      enum rte_flow_query_update_mode qu_mode,
3293 				      const struct rte_flow_action *action)
3294 {
3295 	int ret;
3296 	struct rte_flow_error error;
3297 	struct port_indirect_action *pia = action_get_by_id(port_id, id);
3298 	const struct rte_flow_op_attr attr = { .postpone = postpone};
3299 	struct queue_job *job;
3300 
3301 	if (!pia || !pia->handle)
3302 		return;
3303 	job = calloc(1, sizeof(*job));
3304 	if (!job)
3305 		return;
3306 	job->type = QUEUE_JOB_TYPE_ACTION_QUERY;
3307 	job->pia = pia;
3308 
3309 	ret = rte_flow_async_action_handle_query_update(port_id, queue_id,
3310 							&attr, pia->handle,
3311 							action,
3312 							&job->query,
3313 							qu_mode, job,
3314 							&error);
3315 	if (ret) {
3316 		port_flow_complain(&error);
3317 		free(job);
3318 	} else {
3319 		printf("port-%u: indirect action #%u update-and-query queued\n",
3320 		       port_id, id);
3321 	}
3322 }
3323 
3324 /** Enqueue indirect action query operation. */
3325 int
3326 port_queue_action_handle_query(portid_t port_id,
3327 			       uint32_t queue_id, bool postpone, uint32_t id)
3328 {
3329 	const struct rte_flow_op_attr attr = { .postpone = postpone};
3330 	struct rte_port *port;
3331 	struct rte_flow_error error;
3332 	struct rte_flow_action_handle *action_handle;
3333 	struct port_indirect_action *pia;
3334 	struct queue_job *job;
3335 
3336 	pia = action_get_by_id(port_id, id);
3337 	action_handle = pia ? pia->handle : NULL;
3338 	if (!action_handle)
3339 		return -EINVAL;
3340 
3341 	port = &ports[port_id];
3342 	if (queue_id >= port->queue_nb) {
3343 		printf("Queue #%u is invalid\n", queue_id);
3344 		return -EINVAL;
3345 	}
3346 
3347 	job = calloc(1, sizeof(*job));
3348 	if (!job) {
3349 		printf("Queue action update job allocate failed\n");
3350 		return -ENOMEM;
3351 	}
3352 	job->type = QUEUE_JOB_TYPE_ACTION_QUERY;
3353 	job->pia = pia;
3354 
3355 	if (rte_flow_async_action_handle_query(port_id, queue_id, &attr,
3356 				    action_handle, &job->query, job, &error)) {
3357 		free(job);
3358 		return port_flow_complain(&error);
3359 	}
3360 	printf("Indirect action #%u update queued\n", id);
3361 	return 0;
3362 }
3363 
3364 /** Push all the queue operations in the queue to the NIC. */
3365 int
3366 port_queue_flow_push(portid_t port_id, queueid_t queue_id)
3367 {
3368 	struct rte_port *port;
3369 	struct rte_flow_error error;
3370 	int ret = 0;
3371 
3372 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3373 	    port_id == (portid_t)RTE_PORT_ALL)
3374 		return -EINVAL;
3375 	port = &ports[port_id];
3376 
3377 	if (queue_id >= port->queue_nb) {
3378 		printf("Queue #%u is invalid\n", queue_id);
3379 		return -EINVAL;
3380 	}
3381 
3382 	memset(&error, 0x55, sizeof(error));
3383 	ret = rte_flow_push(port_id, queue_id, &error);
3384 	if (ret < 0) {
3385 		printf("Failed to push operations in the queue\n");
3386 		return -EINVAL;
3387 	}
3388 	printf("Queue #%u operations pushed\n", queue_id);
3389 	return ret;
3390 }
3391 
3392 /** Calculate the hash result for a given pattern in a given table. */
3393 int
3394 port_flow_hash_calc(portid_t port_id, uint32_t table_id,
3395 		    uint8_t pattern_template_index, const struct rte_flow_item pattern[])
3396 {
3397 	uint32_t hash;
3398 	bool found;
3399 	struct port_table *pt;
3400 	struct rte_port *port;
3401 	struct rte_flow_error error;
3402 	int ret = 0;
3403 
3404 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3405 	    port_id == (portid_t)RTE_PORT_ALL)
3406 		return -EINVAL;
3407 	port = &ports[port_id];
3408 
3409 	found = false;
3410 	pt = port->table_list;
3411 	while (pt) {
3412 		if (table_id == pt->id) {
3413 			found = true;
3414 			break;
3415 		}
3416 		pt = pt->next;
3417 	}
3418 	if (!found) {
3419 		printf("Table #%u is invalid\n", table_id);
3420 		return -EINVAL;
3421 	}
3422 
3423 	memset(&error, 0x55, sizeof(error));
3424 	ret = rte_flow_calc_table_hash(port_id, pt->table, pattern,
3425 				       pattern_template_index, &hash, &error);
3426 	if (ret < 0) {
3427 		printf("Failed to calculate hash ");
3428 		switch (abs(ret)) {
3429 		case ENODEV:
3430 			printf("no such device\n");
3431 			break;
3432 		case ENOTSUP:
3433 			printf("device doesn't support this operation\n");
3434 			break;
3435 		default:
3436 			printf("\n");
3437 			break;
3438 		}
3439 		return ret;
3440 	}
3441 	printf("Hash results 0x%x\n", hash);
3442 	return 0;
3443 }
3444 
3445 /** Calculate the encap hash result for a given pattern. */
3446 int
3447 port_flow_hash_calc_encap(portid_t port_id,
3448 			  enum rte_flow_encap_hash_field encap_hash_field,
3449 			  const struct rte_flow_item pattern[])
3450 {
3451 	struct rte_flow_error error;
3452 	int ret = 0;
3453 	uint16_t hash = 0;
3454 	uint8_t len = encap_hash_field == RTE_FLOW_ENCAP_HASH_FIELD_SRC_PORT ? 2 : 1;
3455 
3456 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3457 	    port_id == (portid_t)RTE_PORT_ALL) {
3458 		printf("Failed to calculate encap hash - not a valid port");
3459 		return -EINVAL;
3460 	}
3461 
3462 	ret = rte_flow_calc_encap_hash(port_id, pattern, encap_hash_field, len,
3463 				       (uint8_t *)&hash, &error);
3464 	if (ret < 0) {
3465 		printf("Failed to calculate encap hash");
3466 		return ret;
3467 	}
3468 	if (encap_hash_field == RTE_FLOW_ENCAP_HASH_FIELD_SRC_PORT)
3469 		printf("encap hash result %#x\n", hash);
3470 	else
3471 		printf("encap hash result %#x\n", *(uint8_t *)&hash);
3472 	return 0;
3473 }
3474 
3475 /** Pull queue operation results from the queue. */
3476 static int
3477 port_queue_aged_flow_destroy(portid_t port_id, queueid_t queue_id,
3478 			     const uint64_t *rule, int nb_flows)
3479 {
3480 	struct rte_port *port = &ports[port_id];
3481 	struct rte_flow_op_result *res;
3482 	struct rte_flow_error error;
3483 	uint32_t n = nb_flows;
3484 	int ret = 0;
3485 	int i;
3486 
3487 	res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result));
3488 	if (!res) {
3489 		printf("Failed to allocate memory for pulled results\n");
3490 		return -ENOMEM;
3491 	}
3492 
3493 	memset(&error, 0x66, sizeof(error));
3494 	while (nb_flows > 0) {
3495 		int success = 0;
3496 
3497 		if (n > port->queue_sz)
3498 			n = port->queue_sz;
3499 		ret = port_queue_flow_destroy(port_id, queue_id, true, n, rule);
3500 		if (ret < 0) {
3501 			free(res);
3502 			return ret;
3503 		}
3504 		ret = rte_flow_push(port_id, queue_id, &error);
3505 		if (ret < 0) {
3506 			printf("Failed to push operations in the queue: %s\n",
3507 			       strerror(-ret));
3508 			free(res);
3509 			return ret;
3510 		}
3511 		while (success < nb_flows) {
3512 			ret = rte_flow_pull(port_id, queue_id, res,
3513 					    port->queue_sz, &error);
3514 			if (ret < 0) {
3515 				printf("Failed to pull a operation results: %s\n",
3516 				       strerror(-ret));
3517 				free(res);
3518 				return ret;
3519 			}
3520 
3521 			for (i = 0; i < ret; i++) {
3522 				if (res[i].status == RTE_FLOW_OP_SUCCESS)
3523 					success++;
3524 			}
3525 		}
3526 		rule += n;
3527 		nb_flows -= n;
3528 		n = nb_flows;
3529 	}
3530 
3531 	free(res);
3532 	return ret;
3533 }
3534 
3535 /** List simply and destroy all aged flows per queue. */
3536 void
3537 port_queue_flow_aged(portid_t port_id, uint32_t queue_id, uint8_t destroy)
3538 {
3539 	void **contexts;
3540 	int nb_context, total = 0, idx;
3541 	uint64_t *rules = NULL;
3542 	struct rte_port *port;
3543 	struct rte_flow_error error;
3544 	enum age_action_context_type *type;
3545 	union {
3546 		struct port_flow *pf;
3547 		struct port_indirect_action *pia;
3548 	} ctx;
3549 
3550 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3551 	    port_id == (portid_t)RTE_PORT_ALL)
3552 		return;
3553 	port = &ports[port_id];
3554 	if (queue_id >= port->queue_nb) {
3555 		printf("Error: queue #%u is invalid\n", queue_id);
3556 		return;
3557 	}
3558 	total = rte_flow_get_q_aged_flows(port_id, queue_id, NULL, 0, &error);
3559 	if (total < 0) {
3560 		port_flow_complain(&error);
3561 		return;
3562 	}
3563 	printf("Port %u queue %u total aged flows: %d\n",
3564 	       port_id, queue_id, total);
3565 	if (total == 0)
3566 		return;
3567 	contexts = calloc(total, sizeof(void *));
3568 	if (contexts == NULL) {
3569 		printf("Cannot allocate contexts for aged flow\n");
3570 		return;
3571 	}
3572 	printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type");
3573 	nb_context = rte_flow_get_q_aged_flows(port_id, queue_id, contexts,
3574 					       total, &error);
3575 	if (nb_context > total) {
3576 		printf("Port %u queue %u get aged flows count(%d) > total(%d)\n",
3577 		       port_id, queue_id, nb_context, total);
3578 		free(contexts);
3579 		return;
3580 	}
3581 	if (destroy) {
3582 		rules = malloc(sizeof(uint32_t) * nb_context);
3583 		if (rules == NULL)
3584 			printf("Cannot allocate memory for destroy aged flow\n");
3585 	}
3586 	total = 0;
3587 	for (idx = 0; idx < nb_context; idx++) {
3588 		if (!contexts[idx]) {
3589 			printf("Error: get Null context in port %u queue %u\n",
3590 			       port_id, queue_id);
3591 			continue;
3592 		}
3593 		type = (enum age_action_context_type *)contexts[idx];
3594 		switch (*type) {
3595 		case ACTION_AGE_CONTEXT_TYPE_FLOW:
3596 			ctx.pf = container_of(type, struct port_flow, age_type);
3597 			printf("%-20s\t%" PRIu64 "\t%" PRIu32 "\t%" PRIu32
3598 								 "\t%c%c%c\t\n",
3599 			       "Flow",
3600 			       ctx.pf->id,
3601 			       ctx.pf->rule.attr->group,
3602 			       ctx.pf->rule.attr->priority,
3603 			       ctx.pf->rule.attr->ingress ? 'i' : '-',
3604 			       ctx.pf->rule.attr->egress ? 'e' : '-',
3605 			       ctx.pf->rule.attr->transfer ? 't' : '-');
3606 			if (rules != NULL) {
3607 				rules[total] = ctx.pf->id;
3608 				total++;
3609 			}
3610 			break;
3611 		case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION:
3612 			ctx.pia = container_of(type,
3613 					       struct port_indirect_action,
3614 					       age_type);
3615 			printf("%-20s\t%" PRIu32 "\n", "Indirect action",
3616 			       ctx.pia->id);
3617 			break;
3618 		default:
3619 			printf("Error: invalid context type %u\n", port_id);
3620 			break;
3621 		}
3622 	}
3623 	if (rules != NULL) {
3624 		port_queue_aged_flow_destroy(port_id, queue_id, rules, total);
3625 		free(rules);
3626 	}
3627 	printf("\n%d flows destroyed\n", total);
3628 	free(contexts);
3629 }
3630 
3631 /** Pull queue operation results from the queue. */
3632 int
3633 port_queue_flow_pull(portid_t port_id, queueid_t queue_id)
3634 {
3635 	struct rte_port *port;
3636 	struct rte_flow_op_result *res;
3637 	struct rte_flow_error error;
3638 	int ret = 0;
3639 	int success = 0;
3640 	int i;
3641 	struct queue_job *job;
3642 
3643 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3644 	    port_id == (portid_t)RTE_PORT_ALL)
3645 		return -EINVAL;
3646 	port = &ports[port_id];
3647 
3648 	if (queue_id >= port->queue_nb) {
3649 		printf("Queue #%u is invalid\n", queue_id);
3650 		return -EINVAL;
3651 	}
3652 
3653 	res = calloc(port->queue_sz, sizeof(struct rte_flow_op_result));
3654 	if (!res) {
3655 		printf("Failed to allocate memory for pulled results\n");
3656 		return -ENOMEM;
3657 	}
3658 
3659 	memset(&error, 0x66, sizeof(error));
3660 	ret = rte_flow_pull(port_id, queue_id, res,
3661 				 port->queue_sz, &error);
3662 	if (ret < 0) {
3663 		printf("Failed to pull a operation results\n");
3664 		free(res);
3665 		return -EINVAL;
3666 	}
3667 
3668 	for (i = 0; i < ret; i++) {
3669 		if (res[i].status == RTE_FLOW_OP_SUCCESS)
3670 			success++;
3671 		job = (struct queue_job *)res[i].user_data;
3672 		if (job->type == QUEUE_JOB_TYPE_FLOW_DESTROY ||
3673 		    job->type == QUEUE_JOB_TYPE_FLOW_UPDATE)
3674 			free(job->pf);
3675 		else if (job->type == QUEUE_JOB_TYPE_ACTION_DESTROY)
3676 			free(job->pia);
3677 		else if (job->type == QUEUE_JOB_TYPE_ACTION_QUERY)
3678 			port_action_handle_query_dump(port_id, job->pia,
3679 						      &job->query);
3680 		free(job);
3681 	}
3682 	printf("Queue #%u pulled %u operations (%u failed, %u succeeded)\n",
3683 	       queue_id, ret, ret - success, success);
3684 	free(res);
3685 	return ret;
3686 }
3687 
3688 /* Set group miss actions */
3689 int
3690 port_queue_group_set_miss_actions(portid_t port_id, const struct rte_flow_attr *attr,
3691 				  const struct rte_flow_action *actions)
3692 {
3693 	struct rte_flow_group_attr gattr = {
3694 		.ingress = attr->ingress,
3695 		.egress = attr->egress,
3696 		.transfer = attr->transfer,
3697 	};
3698 	struct rte_flow_error error;
3699 	int ret = 0;
3700 
3701 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3702 	    port_id == (portid_t)RTE_PORT_ALL)
3703 		return -EINVAL;
3704 
3705 	memset(&error, 0x66, sizeof(error));
3706 	ret = rte_flow_group_set_miss_actions(port_id, attr->group, &gattr, actions, &error);
3707 
3708 	if (ret < 0)
3709 		return port_flow_complain(&error);
3710 
3711 	printf("Group #%u set miss actions succeeded\n", attr->group);
3712 	return ret;
3713 }
3714 
3715 /** Create flow rule. */
3716 int
3717 port_flow_create(portid_t port_id,
3718 		 const struct rte_flow_attr *attr,
3719 		 const struct rte_flow_item *pattern,
3720 		 const struct rte_flow_action *actions,
3721 		 const struct tunnel_ops *tunnel_ops,
3722 		 uintptr_t user_id)
3723 {
3724 	struct rte_flow *flow;
3725 	struct rte_port *port;
3726 	struct port_flow *pf;
3727 	uint32_t id = 0;
3728 	struct rte_flow_error error;
3729 	struct port_flow_tunnel *pft = NULL;
3730 	struct rte_flow_action_age *age = age_action_get(actions);
3731 
3732 	port = &ports[port_id];
3733 	if (port->flow_list) {
3734 		if (port->flow_list->id == UINT32_MAX) {
3735 			fprintf(stderr,
3736 				"Highest rule ID is already assigned, delete it first");
3737 			return -ENOMEM;
3738 		}
3739 		id = port->flow_list->id + 1;
3740 	}
3741 	if (tunnel_ops->enabled) {
3742 		pft = port_flow_tunnel_offload_cmd_prep(port_id, pattern,
3743 							actions, tunnel_ops);
3744 		if (!pft)
3745 			return -ENOENT;
3746 		if (pft->items)
3747 			pattern = pft->items;
3748 		if (pft->actions)
3749 			actions = pft->actions;
3750 	}
3751 	pf = port_flow_new(attr, pattern, actions, &error);
3752 	if (!pf)
3753 		return port_flow_complain(&error);
3754 	if (age) {
3755 		pf->age_type = ACTION_AGE_CONTEXT_TYPE_FLOW;
3756 		age->context = &pf->age_type;
3757 	}
3758 	/* Poisoning to make sure PMDs update it in case of error. */
3759 	memset(&error, 0x22, sizeof(error));
3760 	flow = rte_flow_create(port_id, attr, pattern, actions, &error);
3761 	if (!flow) {
3762 		if (tunnel_ops->enabled)
3763 			port_flow_tunnel_offload_cmd_release(port_id,
3764 							     tunnel_ops, pft);
3765 		free(pf);
3766 		return port_flow_complain(&error);
3767 	}
3768 	pf->next = port->flow_list;
3769 	pf->id = id;
3770 	pf->user_id = user_id;
3771 	pf->flow = flow;
3772 	port->flow_list = pf;
3773 	if (tunnel_ops->enabled)
3774 		port_flow_tunnel_offload_cmd_release(port_id, tunnel_ops, pft);
3775 	if (user_id)
3776 		printf("Flow rule #%"PRIu64" created, user-id 0x%"PRIx64"\n",
3777 		       pf->id, pf->user_id);
3778 	else
3779 		printf("Flow rule #%"PRIu64" created\n", pf->id);
3780 	return 0;
3781 }
3782 
3783 /** Destroy a number of flow rules. */
3784 int
3785 port_flow_destroy(portid_t port_id, uint32_t n, const uint64_t *rule,
3786 		  bool is_user_id)
3787 {
3788 	struct rte_port *port;
3789 	struct port_flow **tmp;
3790 	int ret = 0;
3791 
3792 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3793 	    port_id == (portid_t)RTE_PORT_ALL)
3794 		return -EINVAL;
3795 	port = &ports[port_id];
3796 	tmp = &port->flow_list;
3797 	while (*tmp) {
3798 		uint32_t i;
3799 
3800 		for (i = 0; i != n; ++i) {
3801 			struct rte_flow_error error;
3802 			struct port_flow *pf = *tmp;
3803 
3804 			if (rule[i] != (is_user_id ? pf->user_id : pf->id))
3805 				continue;
3806 			/*
3807 			 * Poisoning to make sure PMDs update it in case
3808 			 * of error.
3809 			 */
3810 			memset(&error, 0x33, sizeof(error));
3811 			if (rte_flow_destroy(port_id, pf->flow, &error)) {
3812 				ret = port_flow_complain(&error);
3813 				continue;
3814 			}
3815 			if (is_user_id)
3816 				printf("Flow rule #%"PRIu64" destroyed, "
3817 				       "user-id 0x%"PRIx64"\n",
3818 				       pf->id, pf->user_id);
3819 			else
3820 				printf("Flow rule #%"PRIu64" destroyed\n",
3821 				       pf->id);
3822 			*tmp = pf->next;
3823 			free(pf);
3824 			break;
3825 		}
3826 		if (i == n)
3827 			tmp = &(*tmp)->next;
3828 	}
3829 	return ret;
3830 }
3831 
3832 /** Remove all flow rules. */
3833 int
3834 port_flow_flush(portid_t port_id)
3835 {
3836 	struct rte_flow_error error;
3837 	struct rte_port *port;
3838 	int ret = 0;
3839 
3840 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3841 		port_id == (portid_t)RTE_PORT_ALL)
3842 		return -EINVAL;
3843 
3844 	port = &ports[port_id];
3845 
3846 	if (port->flow_list == NULL)
3847 		return ret;
3848 
3849 	/* Poisoning to make sure PMDs update it in case of error. */
3850 	memset(&error, 0x44, sizeof(error));
3851 	if (rte_flow_flush(port_id, &error)) {
3852 		port_flow_complain(&error);
3853 	}
3854 
3855 	while (port->flow_list) {
3856 		struct port_flow *pf = port->flow_list->next;
3857 
3858 		free(port->flow_list);
3859 		port->flow_list = pf;
3860 	}
3861 	return ret;
3862 }
3863 
3864 /** Dump flow rules. */
3865 int
3866 port_flow_dump(portid_t port_id, bool dump_all, uint64_t rule_id,
3867 		const char *file_name, bool is_user_id)
3868 {
3869 	int ret = 0;
3870 	FILE *file = stdout;
3871 	struct rte_flow_error error;
3872 	struct rte_port *port;
3873 	struct port_flow *pflow;
3874 	struct rte_flow *tmpFlow = NULL;
3875 	bool found = false;
3876 
3877 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3878 		port_id == (portid_t)RTE_PORT_ALL)
3879 		return -EINVAL;
3880 
3881 	if (!dump_all) {
3882 		port = &ports[port_id];
3883 		pflow = port->flow_list;
3884 		while (pflow) {
3885 			if (rule_id !=
3886 			    (is_user_id ? pflow->user_id : pflow->id)) {
3887 				pflow = pflow->next;
3888 			} else {
3889 				tmpFlow = pflow->flow;
3890 				if (tmpFlow)
3891 					found = true;
3892 				break;
3893 			}
3894 		}
3895 		if (found == false) {
3896 			fprintf(stderr, "Failed to dump to flow %"PRIu64"\n",
3897 				rule_id);
3898 			return -EINVAL;
3899 		}
3900 	}
3901 
3902 	if (file_name && strlen(file_name)) {
3903 		file = fopen(file_name, "w");
3904 		if (!file) {
3905 			fprintf(stderr, "Failed to create file %s: %s\n",
3906 				file_name, strerror(errno));
3907 			return -errno;
3908 		}
3909 	}
3910 
3911 	if (!dump_all)
3912 		ret = rte_flow_dev_dump(port_id, tmpFlow, file, &error);
3913 	else
3914 		ret = rte_flow_dev_dump(port_id, NULL, file, &error);
3915 	if (ret) {
3916 		port_flow_complain(&error);
3917 		fprintf(stderr, "Failed to dump flow: %s\n", strerror(-ret));
3918 	} else
3919 		printf("Flow dump finished\n");
3920 	if (file_name && strlen(file_name))
3921 		fclose(file);
3922 	return ret;
3923 }
3924 
3925 /** Query a flow rule. */
3926 int
3927 port_flow_query(portid_t port_id, uint64_t rule,
3928 		const struct rte_flow_action *action, bool is_user_id)
3929 {
3930 	struct rte_flow_error error;
3931 	struct rte_port *port;
3932 	struct port_flow *pf;
3933 	const char *name;
3934 	union {
3935 		struct rte_flow_query_count count;
3936 		struct rte_flow_action_rss rss_conf;
3937 		struct rte_flow_query_age age;
3938 	} query;
3939 	int ret;
3940 
3941 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
3942 	    port_id == (portid_t)RTE_PORT_ALL)
3943 		return -EINVAL;
3944 	port = &ports[port_id];
3945 	for (pf = port->flow_list; pf; pf = pf->next)
3946 		if ((is_user_id ? pf->user_id : pf->id) == rule)
3947 			break;
3948 	if (!pf) {
3949 		fprintf(stderr, "Flow rule #%"PRIu64" not found\n", rule);
3950 		return -ENOENT;
3951 	}
3952 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
3953 			    &name, sizeof(name),
3954 			    (void *)(uintptr_t)action->type, &error);
3955 	if (ret < 0)
3956 		return port_flow_complain(&error);
3957 	switch (action->type) {
3958 	case RTE_FLOW_ACTION_TYPE_COUNT:
3959 	case RTE_FLOW_ACTION_TYPE_RSS:
3960 	case RTE_FLOW_ACTION_TYPE_AGE:
3961 		break;
3962 	default:
3963 		fprintf(stderr, "Cannot query action type %d (%s)\n",
3964 			action->type, name);
3965 		return -ENOTSUP;
3966 	}
3967 	/* Poisoning to make sure PMDs update it in case of error. */
3968 	memset(&error, 0x55, sizeof(error));
3969 	memset(&query, 0, sizeof(query));
3970 	if (rte_flow_query(port_id, pf->flow, action, &query, &error))
3971 		return port_flow_complain(&error);
3972 	switch (action->type) {
3973 	case RTE_FLOW_ACTION_TYPE_COUNT:
3974 		printf("%s:\n"
3975 		       " hits_set: %u\n"
3976 		       " bytes_set: %u\n"
3977 		       " hits: %" PRIu64 "\n"
3978 		       " bytes: %" PRIu64 "\n",
3979 		       name,
3980 		       query.count.hits_set,
3981 		       query.count.bytes_set,
3982 		       query.count.hits,
3983 		       query.count.bytes);
3984 		break;
3985 	case RTE_FLOW_ACTION_TYPE_RSS:
3986 		rss_config_display(&query.rss_conf);
3987 		break;
3988 	case RTE_FLOW_ACTION_TYPE_AGE:
3989 		printf("%s:\n"
3990 		       " aged: %u\n"
3991 		       " sec_since_last_hit_valid: %u\n"
3992 		       " sec_since_last_hit: %" PRIu32 "\n",
3993 		       name,
3994 		       query.age.aged,
3995 		       query.age.sec_since_last_hit_valid,
3996 		       query.age.sec_since_last_hit);
3997 		break;
3998 	default:
3999 		fprintf(stderr,
4000 			"Cannot display result for action type %d (%s)\n",
4001 			action->type, name);
4002 		break;
4003 	}
4004 	return 0;
4005 }
4006 
4007 /** List simply and destroy all aged flows. */
4008 void
4009 port_flow_aged(portid_t port_id, uint8_t destroy)
4010 {
4011 	void **contexts;
4012 	int nb_context, total = 0, idx;
4013 	struct rte_flow_error error;
4014 	enum age_action_context_type *type;
4015 	union {
4016 		struct port_flow *pf;
4017 		struct port_indirect_action *pia;
4018 	} ctx;
4019 
4020 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
4021 	    port_id == (portid_t)RTE_PORT_ALL)
4022 		return;
4023 	total = rte_flow_get_aged_flows(port_id, NULL, 0, &error);
4024 	printf("Port %u total aged flows: %d\n", port_id, total);
4025 	if (total < 0) {
4026 		port_flow_complain(&error);
4027 		return;
4028 	}
4029 	if (total == 0)
4030 		return;
4031 	contexts = malloc(sizeof(void *) * total);
4032 	if (contexts == NULL) {
4033 		fprintf(stderr, "Cannot allocate contexts for aged flow\n");
4034 		return;
4035 	}
4036 	printf("%-20s\tID\tGroup\tPrio\tAttr\n", "Type");
4037 	nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error);
4038 	if (nb_context != total) {
4039 		fprintf(stderr,
4040 			"Port:%d get aged flows count(%d) != total(%d)\n",
4041 			port_id, nb_context, total);
4042 		free(contexts);
4043 		return;
4044 	}
4045 	total = 0;
4046 	for (idx = 0; idx < nb_context; idx++) {
4047 		if (!contexts[idx]) {
4048 			fprintf(stderr, "Error: get Null context in port %u\n",
4049 				port_id);
4050 			continue;
4051 		}
4052 		type = (enum age_action_context_type *)contexts[idx];
4053 		switch (*type) {
4054 		case ACTION_AGE_CONTEXT_TYPE_FLOW:
4055 			ctx.pf = container_of(type, struct port_flow, age_type);
4056 			printf("%-20s\t%" PRIu64 "\t%" PRIu32 "\t%" PRIu32
4057 								 "\t%c%c%c\t\n",
4058 			       "Flow",
4059 			       ctx.pf->id,
4060 			       ctx.pf->rule.attr->group,
4061 			       ctx.pf->rule.attr->priority,
4062 			       ctx.pf->rule.attr->ingress ? 'i' : '-',
4063 			       ctx.pf->rule.attr->egress ? 'e' : '-',
4064 			       ctx.pf->rule.attr->transfer ? 't' : '-');
4065 			if (destroy && !port_flow_destroy(port_id, 1,
4066 							  &ctx.pf->id, false))
4067 				total++;
4068 			break;
4069 		case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION:
4070 			ctx.pia = container_of(type,
4071 					struct port_indirect_action, age_type);
4072 			printf("%-20s\t%" PRIu32 "\n", "Indirect action",
4073 			       ctx.pia->id);
4074 			break;
4075 		default:
4076 			fprintf(stderr, "Error: invalid context type %u\n",
4077 				port_id);
4078 			break;
4079 		}
4080 	}
4081 	printf("\n%d flows destroyed\n", total);
4082 	free(contexts);
4083 }
4084 
4085 /** List flow rules. */
4086 void
4087 port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group)
4088 {
4089 	struct rte_port *port;
4090 	struct port_flow *pf;
4091 	struct port_flow *list = NULL;
4092 	uint32_t i;
4093 
4094 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
4095 	    port_id == (portid_t)RTE_PORT_ALL)
4096 		return;
4097 	port = &ports[port_id];
4098 	if (!port->flow_list)
4099 		return;
4100 	/* Sort flows by group, priority and ID. */
4101 	for (pf = port->flow_list; pf != NULL; pf = pf->next) {
4102 		struct port_flow **tmp;
4103 		const struct rte_flow_attr *curr = pf->rule.attr;
4104 
4105 		if (n) {
4106 			/* Filter out unwanted groups. */
4107 			for (i = 0; i != n; ++i)
4108 				if (curr->group == group[i])
4109 					break;
4110 			if (i == n)
4111 				continue;
4112 		}
4113 		for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) {
4114 			const struct rte_flow_attr *comp = (*tmp)->rule.attr;
4115 
4116 			if (curr->group > comp->group ||
4117 			    (curr->group == comp->group &&
4118 			     curr->priority > comp->priority) ||
4119 			    (curr->group == comp->group &&
4120 			     curr->priority == comp->priority &&
4121 			     pf->id > (*tmp)->id))
4122 				continue;
4123 			break;
4124 		}
4125 		pf->tmp = *tmp;
4126 		*tmp = pf;
4127 	}
4128 	printf("ID\tGroup\tPrio\tAttr\tRule\n");
4129 	for (pf = list; pf != NULL; pf = pf->tmp) {
4130 		const struct rte_flow_item *item = pf->rule.pattern;
4131 		const struct rte_flow_action *action = pf->rule.actions;
4132 		const char *name;
4133 
4134 		printf("%" PRIu64 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
4135 		       pf->id,
4136 		       pf->rule.attr->group,
4137 		       pf->rule.attr->priority,
4138 		       pf->rule.attr->ingress ? 'i' : '-',
4139 		       pf->rule.attr->egress ? 'e' : '-',
4140 		       pf->rule.attr->transfer ? 't' : '-');
4141 		while (item->type != RTE_FLOW_ITEM_TYPE_END) {
4142 			if ((uint32_t)item->type > INT_MAX)
4143 				name = "PMD_INTERNAL";
4144 			else if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR,
4145 					  &name, sizeof(name),
4146 					  (void *)(uintptr_t)item->type,
4147 					  NULL) <= 0)
4148 				name = "[UNKNOWN]";
4149 			if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
4150 				printf("%s ", name);
4151 			++item;
4152 		}
4153 		printf("=>");
4154 		while (action->type != RTE_FLOW_ACTION_TYPE_END) {
4155 			if ((uint32_t)action->type > INT_MAX)
4156 				name = "PMD_INTERNAL";
4157 			else if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
4158 					  &name, sizeof(name),
4159 					  (void *)(uintptr_t)action->type,
4160 					  NULL) <= 0)
4161 				name = "[UNKNOWN]";
4162 			if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
4163 				printf(" %s", name);
4164 			++action;
4165 		}
4166 		printf("\n");
4167 	}
4168 }
4169 
4170 /** Restrict ingress traffic to the defined flow rules. */
4171 int
4172 port_flow_isolate(portid_t port_id, int set)
4173 {
4174 	struct rte_flow_error error;
4175 
4176 	/* Poisoning to make sure PMDs update it in case of error. */
4177 	memset(&error, 0x66, sizeof(error));
4178 	if (rte_flow_isolate(port_id, set, &error))
4179 		return port_flow_complain(&error);
4180 	printf("Ingress traffic on port %u is %s to the defined flow rules\n",
4181 	       port_id,
4182 	       set ? "now restricted" : "not restricted anymore");
4183 	return 0;
4184 }
4185 
4186 /*
4187  * RX/TX ring descriptors display functions.
4188  */
4189 int
4190 rx_queue_id_is_invalid(queueid_t rxq_id)
4191 {
4192 	if (rxq_id < nb_rxq)
4193 		return 0;
4194 	fprintf(stderr, "Invalid RX queue %d (must be < nb_rxq=%d)\n",
4195 		rxq_id, nb_rxq);
4196 	return 1;
4197 }
4198 
4199 int
4200 tx_queue_id_is_invalid(queueid_t txq_id)
4201 {
4202 	if (txq_id < nb_txq)
4203 		return 0;
4204 	fprintf(stderr, "Invalid TX queue %d (must be < nb_txq=%d)\n",
4205 		txq_id, nb_txq);
4206 	return 1;
4207 }
4208 
4209 static int
4210 get_rx_ring_size(portid_t port_id, queueid_t rxq_id, uint16_t *ring_size)
4211 {
4212 	struct rte_port *port = &ports[port_id];
4213 	struct rte_eth_rxq_info rx_qinfo;
4214 	int ret;
4215 
4216 	ret = rte_eth_rx_queue_info_get(port_id, rxq_id, &rx_qinfo);
4217 	if (ret == 0) {
4218 		*ring_size = rx_qinfo.nb_desc;
4219 		return ret;
4220 	}
4221 
4222 	if (ret != -ENOTSUP)
4223 		return ret;
4224 	/*
4225 	 * If the rte_eth_rx_queue_info_get is not support for this PMD,
4226 	 * ring_size stored in testpmd will be used for validity verification.
4227 	 * When configure the rxq by rte_eth_rx_queue_setup with nb_rx_desc
4228 	 * being 0, it will use a default value provided by PMDs to setup this
4229 	 * rxq. If the default value is 0, it will use the
4230 	 * RTE_ETH_DEV_FALLBACK_RX_RINGSIZE to setup this rxq.
4231 	 */
4232 	if (port->nb_rx_desc[rxq_id])
4233 		*ring_size = port->nb_rx_desc[rxq_id];
4234 	else if (port->dev_info.default_rxportconf.ring_size)
4235 		*ring_size = port->dev_info.default_rxportconf.ring_size;
4236 	else
4237 		*ring_size = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
4238 	return 0;
4239 }
4240 
4241 static int
4242 get_tx_ring_size(portid_t port_id, queueid_t txq_id, uint16_t *ring_size)
4243 {
4244 	struct rte_port *port = &ports[port_id];
4245 	struct rte_eth_txq_info tx_qinfo;
4246 	int ret;
4247 
4248 	ret = rte_eth_tx_queue_info_get(port_id, txq_id, &tx_qinfo);
4249 	if (ret == 0) {
4250 		*ring_size = tx_qinfo.nb_desc;
4251 		return ret;
4252 	}
4253 
4254 	if (ret != -ENOTSUP)
4255 		return ret;
4256 	/*
4257 	 * If the rte_eth_tx_queue_info_get is not support for this PMD,
4258 	 * ring_size stored in testpmd will be used for validity verification.
4259 	 * When configure the txq by rte_eth_tx_queue_setup with nb_tx_desc
4260 	 * being 0, it will use a default value provided by PMDs to setup this
4261 	 * txq. If the default value is 0, it will use the
4262 	 * RTE_ETH_DEV_FALLBACK_TX_RINGSIZE to setup this txq.
4263 	 */
4264 	if (port->nb_tx_desc[txq_id])
4265 		*ring_size = port->nb_tx_desc[txq_id];
4266 	else if (port->dev_info.default_txportconf.ring_size)
4267 		*ring_size = port->dev_info.default_txportconf.ring_size;
4268 	else
4269 		*ring_size = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
4270 	return 0;
4271 }
4272 
4273 static int
4274 rx_desc_id_is_invalid(portid_t port_id, queueid_t rxq_id, uint16_t rxdesc_id)
4275 {
4276 	uint16_t ring_size;
4277 	int ret;
4278 
4279 	ret = get_rx_ring_size(port_id, rxq_id, &ring_size);
4280 	if (ret)
4281 		return 1;
4282 
4283 	if (rxdesc_id < ring_size)
4284 		return 0;
4285 
4286 	fprintf(stderr, "Invalid RX descriptor %u (must be < ring_size=%u)\n",
4287 		rxdesc_id, ring_size);
4288 	return 1;
4289 }
4290 
4291 static int
4292 tx_desc_id_is_invalid(portid_t port_id, queueid_t txq_id, uint16_t txdesc_id)
4293 {
4294 	uint16_t ring_size;
4295 	int ret;
4296 
4297 	ret = get_tx_ring_size(port_id, txq_id, &ring_size);
4298 	if (ret)
4299 		return 1;
4300 
4301 	if (txdesc_id < ring_size)
4302 		return 0;
4303 
4304 	fprintf(stderr, "Invalid TX descriptor %u (must be < ring_size=%u)\n",
4305 		txdesc_id, ring_size);
4306 	return 1;
4307 }
4308 
4309 static const struct rte_memzone *
4310 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
4311 {
4312 	char mz_name[RTE_MEMZONE_NAMESIZE];
4313 	const struct rte_memzone *mz;
4314 
4315 	snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s",
4316 			port_id, q_id, ring_name);
4317 	mz = rte_memzone_lookup(mz_name);
4318 	if (mz == NULL)
4319 		fprintf(stderr,
4320 			"%s ring memory zoneof (port %d, queue %d) not found (zone name = %s\n",
4321 			ring_name, port_id, q_id, mz_name);
4322 	return mz;
4323 }
4324 
4325 union igb_ring_dword {
4326 	uint64_t dword;
4327 	struct {
4328 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
4329 		uint32_t lo;
4330 		uint32_t hi;
4331 #else
4332 		uint32_t hi;
4333 		uint32_t lo;
4334 #endif
4335 	} words;
4336 };
4337 
4338 struct igb_ring_desc_32_bytes {
4339 	union igb_ring_dword lo_dword;
4340 	union igb_ring_dword hi_dword;
4341 	union igb_ring_dword resv1;
4342 	union igb_ring_dword resv2;
4343 };
4344 
4345 struct igb_ring_desc_16_bytes {
4346 	union igb_ring_dword lo_dword;
4347 	union igb_ring_dword hi_dword;
4348 };
4349 
4350 static void
4351 ring_rxd_display_dword(union igb_ring_dword dword)
4352 {
4353 	printf("    0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
4354 					(unsigned)dword.words.hi);
4355 }
4356 
4357 static void
4358 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
4359 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
4360 			   portid_t port_id,
4361 #else
4362 			   __rte_unused portid_t port_id,
4363 #endif
4364 			   uint16_t desc_id)
4365 {
4366 	struct igb_ring_desc_16_bytes *ring =
4367 		(struct igb_ring_desc_16_bytes *)ring_mz->addr;
4368 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
4369 	int ret;
4370 	struct rte_eth_dev_info dev_info;
4371 
4372 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
4373 	if (ret != 0)
4374 		return;
4375 
4376 	if (strstr(dev_info.driver_name, "i40e") != NULL) {
4377 		/* 32 bytes RX descriptor, i40e only */
4378 		struct igb_ring_desc_32_bytes *ring =
4379 			(struct igb_ring_desc_32_bytes *)ring_mz->addr;
4380 		ring[desc_id].lo_dword.dword =
4381 			rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
4382 		ring_rxd_display_dword(ring[desc_id].lo_dword);
4383 		ring[desc_id].hi_dword.dword =
4384 			rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
4385 		ring_rxd_display_dword(ring[desc_id].hi_dword);
4386 		ring[desc_id].resv1.dword =
4387 			rte_le_to_cpu_64(ring[desc_id].resv1.dword);
4388 		ring_rxd_display_dword(ring[desc_id].resv1);
4389 		ring[desc_id].resv2.dword =
4390 			rte_le_to_cpu_64(ring[desc_id].resv2.dword);
4391 		ring_rxd_display_dword(ring[desc_id].resv2);
4392 
4393 		return;
4394 	}
4395 #endif
4396 	/* 16 bytes RX descriptor */
4397 	ring[desc_id].lo_dword.dword =
4398 		rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
4399 	ring_rxd_display_dword(ring[desc_id].lo_dword);
4400 	ring[desc_id].hi_dword.dword =
4401 		rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
4402 	ring_rxd_display_dword(ring[desc_id].hi_dword);
4403 }
4404 
4405 static void
4406 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
4407 {
4408 	struct igb_ring_desc_16_bytes *ring;
4409 	struct igb_ring_desc_16_bytes txd;
4410 
4411 	ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
4412 	txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
4413 	txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
4414 	printf("    0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
4415 			(unsigned)txd.lo_dword.words.lo,
4416 			(unsigned)txd.lo_dword.words.hi,
4417 			(unsigned)txd.hi_dword.words.lo,
4418 			(unsigned)txd.hi_dword.words.hi);
4419 }
4420 
4421 void
4422 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
4423 {
4424 	const struct rte_memzone *rx_mz;
4425 
4426 	if (rx_desc_id_is_invalid(port_id, rxq_id, rxd_id))
4427 		return;
4428 	rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
4429 	if (rx_mz == NULL)
4430 		return;
4431 	ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
4432 }
4433 
4434 void
4435 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
4436 {
4437 	const struct rte_memzone *tx_mz;
4438 
4439 	if (tx_desc_id_is_invalid(port_id, txq_id, txd_id))
4440 		return;
4441 	tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
4442 	if (tx_mz == NULL)
4443 		return;
4444 	ring_tx_descriptor_display(tx_mz, txd_id);
4445 }
4446 
4447 void
4448 fwd_lcores_config_display(void)
4449 {
4450 	lcoreid_t lc_id;
4451 
4452 	printf("List of forwarding lcores:");
4453 	for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
4454 		printf(" %2u", fwd_lcores_cpuids[lc_id]);
4455 	printf("\n");
4456 }
4457 void
4458 rxtx_config_display(void)
4459 {
4460 	portid_t pid;
4461 	queueid_t qid;
4462 
4463 	printf("  %s%s%s packet forwarding%s packets/burst=%d\n",
4464 	       cur_fwd_eng->fwd_mode_name,
4465 	       cur_fwd_eng->status ? "-" : "",
4466 	       cur_fwd_eng->status ? cur_fwd_eng->status : "",
4467 	       retry_enabled == 0 ? "" : " with retry",
4468 	       nb_pkt_per_burst);
4469 
4470 	if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
4471 		printf("  packet len=%u - nb packet segments=%d\n",
4472 				(unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
4473 
4474 	printf("  nb forwarding cores=%d - nb forwarding ports=%d\n",
4475 	       nb_fwd_lcores, nb_fwd_ports);
4476 
4477 	RTE_ETH_FOREACH_DEV(pid) {
4478 		struct rte_eth_rxconf *rx_conf = &ports[pid].rxq[0].conf;
4479 		struct rte_eth_txconf *tx_conf = &ports[pid].txq[0].conf;
4480 		uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
4481 		uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
4482 		struct rte_eth_rxq_info rx_qinfo;
4483 		struct rte_eth_txq_info tx_qinfo;
4484 		uint16_t rx_free_thresh_tmp;
4485 		uint16_t tx_free_thresh_tmp;
4486 		uint16_t tx_rs_thresh_tmp;
4487 		uint16_t nb_rx_desc_tmp;
4488 		uint16_t nb_tx_desc_tmp;
4489 		uint64_t offloads_tmp;
4490 		uint8_t pthresh_tmp;
4491 		uint8_t hthresh_tmp;
4492 		uint8_t wthresh_tmp;
4493 		int32_t rc;
4494 
4495 		/* per port config */
4496 		printf("  port %d: RX queue number: %d Tx queue number: %d\n",
4497 				(unsigned int)pid, nb_rxq, nb_txq);
4498 
4499 		printf("    Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n",
4500 				ports[pid].dev_conf.rxmode.offloads,
4501 				ports[pid].dev_conf.txmode.offloads);
4502 
4503 		/* per rx queue config only for first queue to be less verbose */
4504 		for (qid = 0; qid < 1; qid++) {
4505 			rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
4506 			if (rc) {
4507 				nb_rx_desc_tmp = nb_rx_desc[qid];
4508 				rx_free_thresh_tmp =
4509 					rx_conf[qid].rx_free_thresh;
4510 				pthresh_tmp = rx_conf[qid].rx_thresh.pthresh;
4511 				hthresh_tmp = rx_conf[qid].rx_thresh.hthresh;
4512 				wthresh_tmp = rx_conf[qid].rx_thresh.wthresh;
4513 				offloads_tmp = rx_conf[qid].offloads;
4514 			} else {
4515 				nb_rx_desc_tmp = rx_qinfo.nb_desc;
4516 				rx_free_thresh_tmp =
4517 						rx_qinfo.conf.rx_free_thresh;
4518 				pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh;
4519 				hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh;
4520 				wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh;
4521 				offloads_tmp = rx_qinfo.conf.offloads;
4522 			}
4523 
4524 			printf("    RX queue: %d\n", qid);
4525 			printf("      RX desc=%d - RX free threshold=%d\n",
4526 				nb_rx_desc_tmp, rx_free_thresh_tmp);
4527 			printf("      RX threshold registers: pthresh=%d hthresh=%d "
4528 				" wthresh=%d\n",
4529 				pthresh_tmp, hthresh_tmp, wthresh_tmp);
4530 			printf("      RX Offloads=0x%"PRIx64, offloads_tmp);
4531 			if (rx_conf->share_group > 0)
4532 				printf(" share_group=%u share_qid=%u",
4533 				       rx_conf->share_group,
4534 				       rx_conf->share_qid);
4535 			printf("\n");
4536 		}
4537 
4538 		/* per tx queue config only for first queue to be less verbose */
4539 		for (qid = 0; qid < 1; qid++) {
4540 			rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
4541 			if (rc) {
4542 				nb_tx_desc_tmp = nb_tx_desc[qid];
4543 				tx_free_thresh_tmp =
4544 					tx_conf[qid].tx_free_thresh;
4545 				pthresh_tmp = tx_conf[qid].tx_thresh.pthresh;
4546 				hthresh_tmp = tx_conf[qid].tx_thresh.hthresh;
4547 				wthresh_tmp = tx_conf[qid].tx_thresh.wthresh;
4548 				offloads_tmp = tx_conf[qid].offloads;
4549 				tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh;
4550 			} else {
4551 				nb_tx_desc_tmp = tx_qinfo.nb_desc;
4552 				tx_free_thresh_tmp =
4553 						tx_qinfo.conf.tx_free_thresh;
4554 				pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh;
4555 				hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh;
4556 				wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh;
4557 				offloads_tmp = tx_qinfo.conf.offloads;
4558 				tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh;
4559 			}
4560 
4561 			printf("    TX queue: %d\n", qid);
4562 			printf("      TX desc=%d - TX free threshold=%d\n",
4563 				nb_tx_desc_tmp, tx_free_thresh_tmp);
4564 			printf("      TX threshold registers: pthresh=%d hthresh=%d "
4565 				" wthresh=%d\n",
4566 				pthresh_tmp, hthresh_tmp, wthresh_tmp);
4567 			printf("      TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
4568 				offloads_tmp, tx_rs_thresh_tmp);
4569 		}
4570 	}
4571 }
4572 
4573 void
4574 port_rss_reta_info(portid_t port_id,
4575 		   struct rte_eth_rss_reta_entry64 *reta_conf,
4576 		   uint16_t nb_entries)
4577 {
4578 	uint16_t i, idx, shift;
4579 	int ret;
4580 
4581 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4582 		return;
4583 
4584 	ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
4585 	if (ret != 0) {
4586 		fprintf(stderr,
4587 			"Failed to get RSS RETA info, return code = %d\n",
4588 			ret);
4589 		return;
4590 	}
4591 
4592 	for (i = 0; i < nb_entries; i++) {
4593 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
4594 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
4595 		if (!(reta_conf[idx].mask & (1ULL << shift)))
4596 			continue;
4597 		printf("RSS RETA configuration: hash index=%u, queue=%u\n",
4598 					i, reta_conf[idx].reta[shift]);
4599 	}
4600 }
4601 
4602 /*
4603  * Displays the RSS hash functions of a port, and, optionally, the RSS hash
4604  * key of the port.
4605  */
4606 void
4607 port_rss_hash_conf_show(portid_t port_id, int show_rss_key, int show_rss_algo)
4608 {
4609 	struct rte_eth_rss_conf rss_conf = {0};
4610 	uint8_t rss_key[RSS_HASH_KEY_LENGTH];
4611 	uint64_t rss_hf;
4612 	uint8_t i;
4613 	int diag;
4614 	struct rte_eth_dev_info dev_info;
4615 	uint8_t hash_key_size;
4616 	int ret;
4617 
4618 	if (port_id_is_invalid(port_id, ENABLED_WARN))
4619 		return;
4620 
4621 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
4622 	if (ret != 0)
4623 		return;
4624 
4625 	if (dev_info.hash_key_size > 0 &&
4626 			dev_info.hash_key_size <= sizeof(rss_key))
4627 		hash_key_size = dev_info.hash_key_size;
4628 	else {
4629 		fprintf(stderr,
4630 			"dev_info did not provide a valid hash key size\n");
4631 		return;
4632 	}
4633 
4634 	/* Get RSS hash key if asked to display it */
4635 	rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
4636 	rss_conf.rss_key_len = hash_key_size;
4637 	diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
4638 	if (diag != 0) {
4639 		switch (diag) {
4640 		case -ENODEV:
4641 			fprintf(stderr, "port index %d invalid\n", port_id);
4642 			break;
4643 		case -ENOTSUP:
4644 			fprintf(stderr, "operation not supported by device\n");
4645 			break;
4646 		default:
4647 			fprintf(stderr, "operation failed - diag=%d\n", diag);
4648 			break;
4649 		}
4650 		return;
4651 	}
4652 	rss_hf = rss_conf.rss_hf;
4653 	if (rss_hf == 0) {
4654 		printf("RSS disabled\n");
4655 		return;
4656 	}
4657 
4658 	if (show_rss_algo) {
4659 		printf("RSS algorithm:\n  %s\n",
4660 			rte_eth_dev_rss_algo_name(rss_conf.algorithm));
4661 		return;
4662 	}
4663 
4664 	printf("RSS functions:\n");
4665 	rss_types_display(rss_hf, TESTPMD_RSS_TYPES_CHAR_NUM_PER_LINE);
4666 
4667 	if (!show_rss_key)
4668 		return;
4669 	printf("RSS key:\n");
4670 	for (i = 0; i < hash_key_size; i++)
4671 		printf("%02X", rss_key[i]);
4672 	printf("\n");
4673 }
4674 
4675 void
4676 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
4677 			 uint8_t hash_key_len)
4678 {
4679 	struct rte_eth_rss_conf rss_conf;
4680 	int diag;
4681 
4682 	rss_conf.rss_key = NULL;
4683 	rss_conf.rss_key_len = 0;
4684 	rss_conf.rss_hf = str_to_rsstypes(rss_type);
4685 	diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
4686 	if (diag == 0) {
4687 		rss_conf.rss_key = hash_key;
4688 		rss_conf.rss_key_len = hash_key_len;
4689 		diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
4690 	}
4691 	if (diag == 0)
4692 		return;
4693 
4694 	switch (diag) {
4695 	case -ENODEV:
4696 		fprintf(stderr, "port index %d invalid\n", port_id);
4697 		break;
4698 	case -ENOTSUP:
4699 		fprintf(stderr, "operation not supported by device\n");
4700 		break;
4701 	default:
4702 		fprintf(stderr, "operation failed - diag=%d\n", diag);
4703 		break;
4704 	}
4705 }
4706 
4707 /*
4708  * Check whether a shared rxq scheduled on other lcores.
4709  */
4710 static bool
4711 fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc,
4712 			   portid_t src_port, queueid_t src_rxq,
4713 			   uint32_t share_group, queueid_t share_rxq)
4714 {
4715 	streamid_t sm_id;
4716 	streamid_t nb_fs_per_lcore;
4717 	lcoreid_t  nb_fc;
4718 	lcoreid_t  lc_id;
4719 	struct fwd_stream *fs;
4720 	struct rte_port *port;
4721 	struct rte_eth_dev_info *dev_info;
4722 	struct rte_eth_rxconf *rxq_conf;
4723 
4724 	nb_fc = cur_fwd_config.nb_fwd_lcores;
4725 	/* Check remaining cores. */
4726 	for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) {
4727 		sm_id = fwd_lcores[lc_id]->stream_idx;
4728 		nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;
4729 		for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;
4730 		     sm_id++) {
4731 			fs = fwd_streams[sm_id];
4732 			port = &ports[fs->rx_port];
4733 			dev_info = &port->dev_info;
4734 			rxq_conf = &port->rxq[fs->rx_queue].conf;
4735 			if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)
4736 			    == 0 || rxq_conf->share_group == 0)
4737 				/* Not shared rxq. */
4738 				continue;
4739 			if (domain_id != port->dev_info.switch_info.domain_id)
4740 				continue;
4741 			if (rxq_conf->share_group != share_group)
4742 				continue;
4743 			if (rxq_conf->share_qid != share_rxq)
4744 				continue;
4745 			printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n",
4746 			       share_group, share_rxq);
4747 			printf("  lcore %hhu Port %hu queue %hu\n",
4748 			       src_lc, src_port, src_rxq);
4749 			printf("  lcore %hhu Port %hu queue %hu\n",
4750 			       lc_id, fs->rx_port, fs->rx_queue);
4751 			printf("Please use --nb-cores=%hu to limit number of forwarding cores\n",
4752 			       nb_rxq);
4753 			return true;
4754 		}
4755 	}
4756 	return false;
4757 }
4758 
4759 /*
4760  * Check shared rxq configuration.
4761  *
4762  * Shared group must not being scheduled on different core.
4763  */
4764 bool
4765 pkt_fwd_shared_rxq_check(void)
4766 {
4767 	streamid_t sm_id;
4768 	streamid_t nb_fs_per_lcore;
4769 	lcoreid_t  nb_fc;
4770 	lcoreid_t  lc_id;
4771 	struct fwd_stream *fs;
4772 	uint16_t domain_id;
4773 	struct rte_port *port;
4774 	struct rte_eth_dev_info *dev_info;
4775 	struct rte_eth_rxconf *rxq_conf;
4776 
4777 	if (rxq_share == 0)
4778 		return true;
4779 	nb_fc = cur_fwd_config.nb_fwd_lcores;
4780 	/*
4781 	 * Check streams on each core, make sure the same switch domain +
4782 	 * group + queue doesn't get scheduled on other cores.
4783 	 */
4784 	for (lc_id = 0; lc_id < nb_fc; lc_id++) {
4785 		sm_id = fwd_lcores[lc_id]->stream_idx;
4786 		nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;
4787 		for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;
4788 		     sm_id++) {
4789 			fs = fwd_streams[sm_id];
4790 			/* Update lcore info stream being scheduled. */
4791 			fs->lcore = fwd_lcores[lc_id];
4792 			port = &ports[fs->rx_port];
4793 			dev_info = &port->dev_info;
4794 			rxq_conf = &port->rxq[fs->rx_queue].conf;
4795 			if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)
4796 			    == 0 || rxq_conf->share_group == 0)
4797 				/* Not shared rxq. */
4798 				continue;
4799 			/* Check shared rxq not scheduled on remaining cores. */
4800 			domain_id = port->dev_info.switch_info.domain_id;
4801 			if (fwd_stream_on_other_lcores(domain_id, lc_id,
4802 						       fs->rx_port,
4803 						       fs->rx_queue,
4804 						       rxq_conf->share_group,
4805 						       rxq_conf->share_qid))
4806 				return false;
4807 		}
4808 	}
4809 	return true;
4810 }
4811 
4812 /*
4813  * Setup forwarding configuration for each logical core.
4814  */
4815 static void
4816 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
4817 {
4818 	streamid_t nb_fs_per_lcore;
4819 	streamid_t nb_fs;
4820 	streamid_t sm_id;
4821 	lcoreid_t  nb_extra;
4822 	lcoreid_t  nb_fc;
4823 	lcoreid_t  nb_lc;
4824 	lcoreid_t  lc_id;
4825 
4826 	nb_fs = cfg->nb_fwd_streams;
4827 	nb_fc = cfg->nb_fwd_lcores;
4828 	if (nb_fs <= nb_fc) {
4829 		nb_fs_per_lcore = 1;
4830 		nb_extra = 0;
4831 	} else {
4832 		nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
4833 		nb_extra = (lcoreid_t) (nb_fs % nb_fc);
4834 	}
4835 
4836 	nb_lc = (lcoreid_t) (nb_fc - nb_extra);
4837 	sm_id = 0;
4838 	for (lc_id = 0; lc_id < nb_lc; lc_id++) {
4839 		fwd_lcores[lc_id]->stream_idx = sm_id;
4840 		fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
4841 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
4842 	}
4843 
4844 	/*
4845 	 * Assign extra remaining streams, if any.
4846 	 */
4847 	nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
4848 	for (lc_id = 0; lc_id < nb_extra; lc_id++) {
4849 		fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
4850 		fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
4851 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
4852 	}
4853 }
4854 
4855 static portid_t
4856 fwd_topology_tx_port_get(portid_t rxp)
4857 {
4858 	static int warning_once = 1;
4859 
4860 	RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
4861 
4862 	switch (port_topology) {
4863 	default:
4864 	case PORT_TOPOLOGY_PAIRED:
4865 		if ((rxp & 0x1) == 0) {
4866 			if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
4867 				return rxp + 1;
4868 			if (warning_once) {
4869 				fprintf(stderr,
4870 					"\nWarning! port-topology=paired and odd forward ports number, the last port will pair with itself.\n\n");
4871 				warning_once = 0;
4872 			}
4873 			return rxp;
4874 		}
4875 		return rxp - 1;
4876 	case PORT_TOPOLOGY_CHAINED:
4877 		return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
4878 	case PORT_TOPOLOGY_LOOP:
4879 		return rxp;
4880 	}
4881 }
4882 
4883 static void
4884 simple_fwd_config_setup(void)
4885 {
4886 	portid_t i;
4887 
4888 	cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
4889 	cur_fwd_config.nb_fwd_streams =
4890 		(streamid_t) cur_fwd_config.nb_fwd_ports;
4891 
4892 	/* reinitialize forwarding streams */
4893 	init_fwd_streams();
4894 
4895 	/*
4896 	 * In the simple forwarding test, the number of forwarding cores
4897 	 * must be lower or equal to the number of forwarding ports.
4898 	 */
4899 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
4900 	if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
4901 		cur_fwd_config.nb_fwd_lcores =
4902 			(lcoreid_t) cur_fwd_config.nb_fwd_ports;
4903 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
4904 
4905 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
4906 		fwd_streams[i]->rx_port   = fwd_ports_ids[i];
4907 		fwd_streams[i]->rx_queue  = 0;
4908 		fwd_streams[i]->tx_port   =
4909 				fwd_ports_ids[fwd_topology_tx_port_get(i)];
4910 		fwd_streams[i]->tx_queue  = 0;
4911 		fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
4912 		fwd_streams[i]->retry_enabled = retry_enabled;
4913 	}
4914 }
4915 
4916 /**
4917  * For the RSS forwarding test all streams distributed over lcores. Each stream
4918  * being composed of a RX queue to poll on a RX port for input messages,
4919  * associated with a TX queue of a TX port where to send forwarded packets.
4920  */
4921 static void
4922 rss_fwd_config_setup(void)
4923 {
4924 	portid_t   rxp;
4925 	portid_t   txp;
4926 	queueid_t  rxq;
4927 	queueid_t  nb_q;
4928 	streamid_t  sm_id;
4929 	int start;
4930 
4931 	nb_q = nb_rxq;
4932 	if (nb_q > nb_txq)
4933 		nb_q = nb_txq;
4934 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
4935 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
4936 	cur_fwd_config.nb_fwd_streams =
4937 		(streamid_t) (nb_q / num_procs * cur_fwd_config.nb_fwd_ports);
4938 
4939 	if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
4940 		cur_fwd_config.nb_fwd_lcores =
4941 			(lcoreid_t)cur_fwd_config.nb_fwd_streams;
4942 
4943 	/* reinitialize forwarding streams */
4944 	init_fwd_streams();
4945 
4946 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
4947 
4948 	if (proc_id > 0 && nb_q % num_procs != 0)
4949 		printf("Warning! queue numbers should be multiple of processes, or packet loss will happen.\n");
4950 
4951 	/**
4952 	 * In multi-process, All queues are allocated to different
4953 	 * processes based on num_procs and proc_id. For example:
4954 	 * if supports 4 queues(nb_q), 2 processes(num_procs),
4955 	 * the 0~1 queue for primary process.
4956 	 * the 2~3 queue for secondary process.
4957 	 */
4958 	start = proc_id * nb_q / num_procs;
4959 	rxp = 0;
4960 	rxq = start;
4961 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
4962 		struct fwd_stream *fs;
4963 
4964 		fs = fwd_streams[sm_id];
4965 		txp = fwd_topology_tx_port_get(rxp);
4966 		fs->rx_port = fwd_ports_ids[rxp];
4967 		fs->rx_queue = rxq;
4968 		fs->tx_port = fwd_ports_ids[txp];
4969 		fs->tx_queue = rxq;
4970 		fs->peer_addr = fs->tx_port;
4971 		fs->retry_enabled = retry_enabled;
4972 		rxp++;
4973 		if (rxp < nb_fwd_ports)
4974 			continue;
4975 		rxp = 0;
4976 		rxq++;
4977 	}
4978 }
4979 
4980 static uint16_t
4981 get_fwd_port_total_tc_num(void)
4982 {
4983 	struct rte_eth_dcb_info dcb_info;
4984 	uint16_t total_tc_num = 0;
4985 	unsigned int i;
4986 
4987 	for (i = 0; i < nb_fwd_ports; i++) {
4988 		(void)rte_eth_dev_get_dcb_info(fwd_ports_ids[i], &dcb_info);
4989 		total_tc_num += dcb_info.nb_tcs;
4990 	}
4991 
4992 	return total_tc_num;
4993 }
4994 
4995 /**
4996  * For the DCB forwarding test, each core is assigned on each traffic class.
4997  *
4998  * Each core is assigned a multi-stream, each stream being composed of
4999  * a RX queue to poll on a RX port for input messages, associated with
5000  * a TX queue of a TX port where to send forwarded packets. All RX and
5001  * TX queues are mapping to the same traffic class.
5002  * If VMDQ and DCB co-exist, each traffic class on different POOLs share
5003  * the same core
5004  */
5005 static void
5006 dcb_fwd_config_setup(void)
5007 {
5008 	struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
5009 	portid_t txp, rxp = 0;
5010 	queueid_t txq, rxq = 0;
5011 	lcoreid_t  lc_id;
5012 	uint16_t nb_rx_queue, nb_tx_queue;
5013 	uint16_t i, j, k, sm_id = 0;
5014 	uint16_t total_tc_num;
5015 	struct rte_port *port;
5016 	uint8_t tc = 0;
5017 	portid_t pid;
5018 	int ret;
5019 
5020 	/*
5021 	 * The fwd_config_setup() is called when the port is RTE_PORT_STARTED
5022 	 * or RTE_PORT_STOPPED.
5023 	 *
5024 	 * Re-configure ports to get updated mapping between tc and queue in
5025 	 * case the queue number of the port is changed. Skip for started ports
5026 	 * since modifying queue number and calling dev_configure need to stop
5027 	 * ports first.
5028 	 */
5029 	for (pid = 0; pid < nb_fwd_ports; pid++) {
5030 		if (port_is_started(pid) == 1)
5031 			continue;
5032 
5033 		port = &ports[pid];
5034 		ret = rte_eth_dev_configure(pid, nb_rxq, nb_txq,
5035 					    &port->dev_conf);
5036 		if (ret < 0) {
5037 			fprintf(stderr,
5038 				"Failed to re-configure port %d, ret = %d.\n",
5039 				pid, ret);
5040 			return;
5041 		}
5042 	}
5043 
5044 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
5045 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
5046 	cur_fwd_config.nb_fwd_streams =
5047 		(streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
5048 	total_tc_num = get_fwd_port_total_tc_num();
5049 	if (cur_fwd_config.nb_fwd_lcores > total_tc_num)
5050 		cur_fwd_config.nb_fwd_lcores = total_tc_num;
5051 
5052 	/* reinitialize forwarding streams */
5053 	init_fwd_streams();
5054 	sm_id = 0;
5055 	txp = 1;
5056 	/* get the dcb info on the first RX and TX ports */
5057 	(void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
5058 	(void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
5059 
5060 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
5061 		fwd_lcores[lc_id]->stream_nb = 0;
5062 		fwd_lcores[lc_id]->stream_idx = sm_id;
5063 		for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) {
5064 			/* if the nb_queue is zero, means this tc is
5065 			 * not enabled on the POOL
5066 			 */
5067 			if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
5068 				break;
5069 			k = fwd_lcores[lc_id]->stream_nb +
5070 				fwd_lcores[lc_id]->stream_idx;
5071 			rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
5072 			txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
5073 			nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
5074 			nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
5075 			for (j = 0; j < nb_rx_queue; j++) {
5076 				struct fwd_stream *fs;
5077 
5078 				fs = fwd_streams[k + j];
5079 				fs->rx_port = fwd_ports_ids[rxp];
5080 				fs->rx_queue = rxq + j;
5081 				fs->tx_port = fwd_ports_ids[txp];
5082 				fs->tx_queue = txq + j % nb_tx_queue;
5083 				fs->peer_addr = fs->tx_port;
5084 				fs->retry_enabled = retry_enabled;
5085 			}
5086 			fwd_lcores[lc_id]->stream_nb +=
5087 				rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
5088 		}
5089 		sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
5090 
5091 		tc++;
5092 		if (tc < rxp_dcb_info.nb_tcs)
5093 			continue;
5094 		/* Restart from TC 0 on next RX port */
5095 		tc = 0;
5096 		if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
5097 			rxp = (portid_t)
5098 				(rxp + ((nb_ports >> 1) / nb_fwd_ports));
5099 		else
5100 			rxp++;
5101 		if (rxp >= nb_fwd_ports)
5102 			return;
5103 		/* get the dcb information on next RX and TX ports */
5104 		if ((rxp & 0x1) == 0)
5105 			txp = (portid_t) (rxp + 1);
5106 		else
5107 			txp = (portid_t) (rxp - 1);
5108 		rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
5109 		rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
5110 	}
5111 }
5112 
5113 static void
5114 icmp_echo_config_setup(void)
5115 {
5116 	portid_t  rxp;
5117 	queueid_t rxq;
5118 	lcoreid_t lc_id;
5119 	uint16_t  sm_id;
5120 
5121 	if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
5122 		cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
5123 			(nb_txq * nb_fwd_ports);
5124 	else
5125 		cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
5126 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
5127 	cur_fwd_config.nb_fwd_streams =
5128 		(streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
5129 	if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
5130 		cur_fwd_config.nb_fwd_lcores =
5131 			(lcoreid_t)cur_fwd_config.nb_fwd_streams;
5132 	if (verbose_level > 0) {
5133 		printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
5134 		       __FUNCTION__,
5135 		       cur_fwd_config.nb_fwd_lcores,
5136 		       cur_fwd_config.nb_fwd_ports,
5137 		       cur_fwd_config.nb_fwd_streams);
5138 	}
5139 
5140 	/* reinitialize forwarding streams */
5141 	init_fwd_streams();
5142 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
5143 	rxp = 0; rxq = 0;
5144 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
5145 		if (verbose_level > 0)
5146 			printf("  core=%d: \n", lc_id);
5147 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
5148 			struct fwd_stream *fs;
5149 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
5150 			fs->rx_port = fwd_ports_ids[rxp];
5151 			fs->rx_queue = rxq;
5152 			fs->tx_port = fs->rx_port;
5153 			fs->tx_queue = rxq;
5154 			fs->peer_addr = fs->tx_port;
5155 			fs->retry_enabled = retry_enabled;
5156 			if (verbose_level > 0)
5157 				printf("  stream=%d port=%d rxq=%d txq=%d\n",
5158 				       sm_id, fs->rx_port, fs->rx_queue,
5159 				       fs->tx_queue);
5160 			rxq = (queueid_t) (rxq + 1);
5161 			if (rxq == nb_rxq) {
5162 				rxq = 0;
5163 				rxp = (portid_t) (rxp + 1);
5164 			}
5165 		}
5166 	}
5167 }
5168 
5169 void
5170 fwd_config_setup(void)
5171 {
5172 	struct rte_port *port;
5173 	portid_t pt_id;
5174 	unsigned int i;
5175 
5176 	cur_fwd_config.fwd_eng = cur_fwd_eng;
5177 	if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
5178 		icmp_echo_config_setup();
5179 		return;
5180 	}
5181 
5182 	if ((nb_rxq > 1) && (nb_txq > 1)){
5183 		if (dcb_config) {
5184 			for (i = 0; i < nb_fwd_ports; i++) {
5185 				pt_id = fwd_ports_ids[i];
5186 				port = &ports[pt_id];
5187 				if (!port->dcb_flag) {
5188 					fprintf(stderr,
5189 						"In DCB mode, all forwarding ports must be configured in this mode.\n");
5190 					return;
5191 				}
5192 			}
5193 			if (nb_fwd_lcores == 1) {
5194 				fprintf(stderr,
5195 					"In DCB mode,the nb forwarding cores should be larger than 1.\n");
5196 				return;
5197 			}
5198 
5199 			dcb_fwd_config_setup();
5200 		} else
5201 			rss_fwd_config_setup();
5202 	}
5203 	else
5204 		simple_fwd_config_setup();
5205 }
5206 
5207 static const char *
5208 mp_alloc_to_str(uint8_t mode)
5209 {
5210 	switch (mode) {
5211 	case MP_ALLOC_NATIVE:
5212 		return "native";
5213 	case MP_ALLOC_ANON:
5214 		return "anon";
5215 	case MP_ALLOC_XMEM:
5216 		return "xmem";
5217 	case MP_ALLOC_XMEM_HUGE:
5218 		return "xmemhuge";
5219 	case MP_ALLOC_XBUF:
5220 		return "xbuf";
5221 	default:
5222 		return "invalid";
5223 	}
5224 }
5225 
5226 void
5227 pkt_fwd_config_display(struct fwd_config *cfg)
5228 {
5229 	struct fwd_stream *fs;
5230 	lcoreid_t  lc_id;
5231 	streamid_t sm_id;
5232 
5233 	printf("%s%s%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
5234 		"NUMA support %s, MP allocation mode: %s\n",
5235 		cfg->fwd_eng->fwd_mode_name,
5236 		cfg->fwd_eng->status ? "-" : "",
5237 		cfg->fwd_eng->status ? cfg->fwd_eng->status : "",
5238 		retry_enabled == 0 ? "" : " with retry",
5239 		cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
5240 		numa_support == 1 ? "enabled" : "disabled",
5241 		mp_alloc_to_str(mp_alloc_type));
5242 
5243 	if (retry_enabled)
5244 		printf("TX retry num: %u, delay between TX retries: %uus\n",
5245 			burst_tx_retry_num, burst_tx_delay_time);
5246 	for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
5247 		printf("Logical Core %u (socket %u) forwards packets on "
5248 		       "%d streams:",
5249 		       fwd_lcores_cpuids[lc_id],
5250 		       rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
5251 		       fwd_lcores[lc_id]->stream_nb);
5252 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
5253 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
5254 			printf("\n  RX P=%d/Q=%d (socket %u) -> TX "
5255 			       "P=%d/Q=%d (socket %u) ",
5256 			       fs->rx_port, fs->rx_queue,
5257 			       ports[fs->rx_port].socket_id,
5258 			       fs->tx_port, fs->tx_queue,
5259 			       ports[fs->tx_port].socket_id);
5260 			print_ethaddr("peer=",
5261 				      &peer_eth_addrs[fs->peer_addr]);
5262 		}
5263 		printf("\n");
5264 	}
5265 	printf("\n");
5266 }
5267 
5268 void
5269 set_fwd_eth_peer(portid_t port_id, char *peer_addr)
5270 {
5271 	struct rte_ether_addr new_peer_addr;
5272 	if (!rte_eth_dev_is_valid_port(port_id)) {
5273 		fprintf(stderr, "Error: Invalid port number %i\n", port_id);
5274 		return;
5275 	}
5276 	if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) {
5277 		fprintf(stderr, "Error: Invalid ethernet address: %s\n",
5278 			peer_addr);
5279 		return;
5280 	}
5281 	peer_eth_addrs[port_id] = new_peer_addr;
5282 }
5283 
5284 int
5285 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
5286 {
5287 	unsigned int i;
5288 	unsigned int lcore_cpuid;
5289 	int record_now;
5290 
5291 	record_now = 0;
5292  again:
5293 	for (i = 0; i < nb_lc; i++) {
5294 		lcore_cpuid = lcorelist[i];
5295 		if (! rte_lcore_is_enabled(lcore_cpuid)) {
5296 			fprintf(stderr, "lcore %u not enabled\n", lcore_cpuid);
5297 			return -1;
5298 		}
5299 		if (lcore_cpuid == rte_get_main_lcore()) {
5300 			fprintf(stderr,
5301 				"lcore %u cannot be masked on for running packet forwarding, which is the main lcore and reserved for command line parsing only\n",
5302 				lcore_cpuid);
5303 			return -1;
5304 		}
5305 		if (record_now)
5306 			fwd_lcores_cpuids[i] = lcore_cpuid;
5307 	}
5308 	if (record_now == 0) {
5309 		record_now = 1;
5310 		goto again;
5311 	}
5312 	nb_cfg_lcores = (lcoreid_t) nb_lc;
5313 	if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
5314 		printf("previous number of forwarding cores %u - changed to "
5315 		       "number of configured cores %u\n",
5316 		       (unsigned int) nb_fwd_lcores, nb_lc);
5317 		nb_fwd_lcores = (lcoreid_t) nb_lc;
5318 	}
5319 
5320 	return 0;
5321 }
5322 
5323 int
5324 set_fwd_lcores_mask(uint64_t lcoremask)
5325 {
5326 	unsigned int lcorelist[64];
5327 	unsigned int nb_lc;
5328 	unsigned int i;
5329 
5330 	if (lcoremask == 0) {
5331 		fprintf(stderr, "Invalid NULL mask of cores\n");
5332 		return -1;
5333 	}
5334 	nb_lc = 0;
5335 	for (i = 0; i < 64; i++) {
5336 		if (! ((uint64_t)(1ULL << i) & lcoremask))
5337 			continue;
5338 		lcorelist[nb_lc++] = i;
5339 	}
5340 	return set_fwd_lcores_list(lcorelist, nb_lc);
5341 }
5342 
5343 void
5344 set_fwd_lcores_number(uint16_t nb_lc)
5345 {
5346 	if (test_done == 0) {
5347 		fprintf(stderr, "Please stop forwarding first\n");
5348 		return;
5349 	}
5350 	if (nb_lc > nb_cfg_lcores) {
5351 		fprintf(stderr,
5352 			"nb fwd cores %u > %u (max. number of configured lcores) - ignored\n",
5353 			(unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
5354 		return;
5355 	}
5356 	nb_fwd_lcores = (lcoreid_t) nb_lc;
5357 	printf("Number of forwarding cores set to %u\n",
5358 	       (unsigned int) nb_fwd_lcores);
5359 }
5360 
5361 void
5362 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
5363 {
5364 	unsigned int i;
5365 	portid_t port_id;
5366 	int record_now;
5367 
5368 	record_now = 0;
5369  again:
5370 	for (i = 0; i < nb_pt; i++) {
5371 		port_id = (portid_t) portlist[i];
5372 		if (port_id_is_invalid(port_id, ENABLED_WARN))
5373 			return;
5374 		if (record_now)
5375 			fwd_ports_ids[i] = port_id;
5376 	}
5377 	if (record_now == 0) {
5378 		record_now = 1;
5379 		goto again;
5380 	}
5381 	nb_cfg_ports = (portid_t) nb_pt;
5382 	if (nb_fwd_ports != (portid_t) nb_pt) {
5383 		printf("previous number of forwarding ports %u - changed to "
5384 		       "number of configured ports %u\n",
5385 		       (unsigned int) nb_fwd_ports, nb_pt);
5386 		nb_fwd_ports = (portid_t) nb_pt;
5387 	}
5388 }
5389 
5390 /**
5391  * Parse the user input and obtain the list of forwarding ports
5392  *
5393  * @param[in] list
5394  *   String containing the user input. User can specify
5395  *   in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6.
5396  *   For example, if the user wants to use all the available
5397  *   4 ports in his system, then the input can be 0-3 or 0,1,2,3.
5398  *   If the user wants to use only the ports 1,2 then the input
5399  *   is 1,2.
5400  *   valid characters are '-' and ','
5401  * @param[out] values
5402  *   This array will be filled with a list of port IDs
5403  *   based on the user input
5404  *   Note that duplicate entries are discarded and only the first
5405  *   count entries in this array are port IDs and all the rest
5406  *   will contain default values
5407  * @param[in] maxsize
5408  *   This parameter denotes 2 things
5409  *   1) Number of elements in the values array
5410  *   2) Maximum value of each element in the values array
5411  * @return
5412  *   On success, returns total count of parsed port IDs
5413  *   On failure, returns 0
5414  */
5415 static unsigned int
5416 parse_port_list(const char *list, unsigned int *values, unsigned int maxsize)
5417 {
5418 	unsigned int count = 0;
5419 	char *end = NULL;
5420 	int min, max;
5421 	int value, i;
5422 	unsigned int marked[maxsize];
5423 
5424 	if (list == NULL || values == NULL)
5425 		return 0;
5426 
5427 	for (i = 0; i < (int)maxsize; i++)
5428 		marked[i] = 0;
5429 
5430 	min = INT_MAX;
5431 
5432 	do {
5433 		/*Remove the blank spaces if any*/
5434 		while (isblank(*list))
5435 			list++;
5436 		if (*list == '\0')
5437 			break;
5438 		errno = 0;
5439 		value = strtol(list, &end, 10);
5440 		if (errno || end == NULL)
5441 			return 0;
5442 		if (value < 0 || value >= (int)maxsize)
5443 			return 0;
5444 		while (isblank(*end))
5445 			end++;
5446 		if (*end == '-' && min == INT_MAX) {
5447 			min = value;
5448 		} else if ((*end == ',') || (*end == '\0')) {
5449 			max = value;
5450 			if (min == INT_MAX)
5451 				min = value;
5452 			for (i = min; i <= max; i++) {
5453 				if (count < maxsize) {
5454 					if (marked[i])
5455 						continue;
5456 					values[count] = i;
5457 					marked[i] = 1;
5458 					count++;
5459 				}
5460 			}
5461 			min = INT_MAX;
5462 		} else
5463 			return 0;
5464 		list = end + 1;
5465 	} while (*end != '\0');
5466 
5467 	return count;
5468 }
5469 
5470 void
5471 parse_fwd_portlist(const char *portlist)
5472 {
5473 	unsigned int portcount;
5474 	unsigned int portindex[RTE_MAX_ETHPORTS];
5475 	unsigned int i, valid_port_count = 0;
5476 
5477 	portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS);
5478 	if (!portcount)
5479 		rte_exit(EXIT_FAILURE, "Invalid fwd port list\n");
5480 
5481 	/*
5482 	 * Here we verify the validity of the ports
5483 	 * and thereby calculate the total number of
5484 	 * valid ports
5485 	 */
5486 	for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) {
5487 		if (rte_eth_dev_is_valid_port(portindex[i])) {
5488 			portindex[valid_port_count] = portindex[i];
5489 			valid_port_count++;
5490 		}
5491 	}
5492 
5493 	set_fwd_ports_list(portindex, valid_port_count);
5494 }
5495 
5496 void
5497 set_fwd_ports_mask(uint64_t portmask)
5498 {
5499 	unsigned int portlist[64];
5500 	unsigned int nb_pt;
5501 	unsigned int i;
5502 
5503 	if (portmask == 0) {
5504 		fprintf(stderr, "Invalid NULL mask of ports\n");
5505 		return;
5506 	}
5507 	nb_pt = 0;
5508 	RTE_ETH_FOREACH_DEV(i) {
5509 		if (! ((uint64_t)(1ULL << i) & portmask))
5510 			continue;
5511 		portlist[nb_pt++] = i;
5512 	}
5513 	set_fwd_ports_list(portlist, nb_pt);
5514 }
5515 
5516 void
5517 set_fwd_ports_number(uint16_t nb_pt)
5518 {
5519 	if (nb_pt > nb_cfg_ports) {
5520 		fprintf(stderr,
5521 			"nb fwd ports %u > %u (number of configured ports) - ignored\n",
5522 			(unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
5523 		return;
5524 	}
5525 	nb_fwd_ports = (portid_t) nb_pt;
5526 	printf("Number of forwarding ports set to %u\n",
5527 	       (unsigned int) nb_fwd_ports);
5528 }
5529 
5530 int
5531 port_is_forwarding(portid_t port_id)
5532 {
5533 	unsigned int i;
5534 
5535 	if (port_id_is_invalid(port_id, ENABLED_WARN))
5536 		return -1;
5537 
5538 	for (i = 0; i < nb_fwd_ports; i++) {
5539 		if (fwd_ports_ids[i] == port_id)
5540 			return 1;
5541 	}
5542 
5543 	return 0;
5544 }
5545 
5546 void
5547 set_nb_pkt_per_burst(uint16_t nb)
5548 {
5549 	if (nb > MAX_PKT_BURST) {
5550 		fprintf(stderr,
5551 			"nb pkt per burst: %u > %u (maximum packet per burst)  ignored\n",
5552 			(unsigned int) nb, (unsigned int) MAX_PKT_BURST);
5553 		return;
5554 	}
5555 	nb_pkt_per_burst = nb;
5556 	printf("Number of packets per burst set to %u\n",
5557 	       (unsigned int) nb_pkt_per_burst);
5558 }
5559 
5560 static const char *
5561 tx_split_get_name(enum tx_pkt_split split)
5562 {
5563 	uint32_t i;
5564 
5565 	for (i = 0; i != RTE_DIM(tx_split_name); i++) {
5566 		if (tx_split_name[i].split == split)
5567 			return tx_split_name[i].name;
5568 	}
5569 	return NULL;
5570 }
5571 
5572 void
5573 set_tx_pkt_split(const char *name)
5574 {
5575 	uint32_t i;
5576 
5577 	for (i = 0; i != RTE_DIM(tx_split_name); i++) {
5578 		if (strcmp(tx_split_name[i].name, name) == 0) {
5579 			tx_pkt_split = tx_split_name[i].split;
5580 			return;
5581 		}
5582 	}
5583 	fprintf(stderr, "unknown value: \"%s\"\n", name);
5584 }
5585 
5586 int
5587 parse_fec_mode(const char *name, uint32_t *fec_capa)
5588 {
5589 	uint8_t i;
5590 
5591 	for (i = 0; i < RTE_DIM(fec_mode_name); i++) {
5592 		if (strcmp(fec_mode_name[i].name, name) == 0) {
5593 			*fec_capa =
5594 				RTE_ETH_FEC_MODE_TO_CAPA(fec_mode_name[i].mode);
5595 			return 0;
5596 		}
5597 	}
5598 	return -1;
5599 }
5600 
5601 void
5602 show_fec_capability(unsigned int num, struct rte_eth_fec_capa *speed_fec_capa)
5603 {
5604 	unsigned int i, j;
5605 
5606 	printf("FEC capabilities:\n");
5607 
5608 	for (i = 0; i < num; i++) {
5609 		printf("%s : ",
5610 			rte_eth_link_speed_to_str(speed_fec_capa[i].speed));
5611 
5612 		for (j = 0; j < RTE_DIM(fec_mode_name); j++) {
5613 			if (RTE_ETH_FEC_MODE_TO_CAPA(j) &
5614 						speed_fec_capa[i].capa)
5615 				printf("%s ", fec_mode_name[j].name);
5616 		}
5617 		printf("\n");
5618 	}
5619 }
5620 
5621 void
5622 show_rx_pkt_offsets(void)
5623 {
5624 	uint32_t i, n;
5625 
5626 	n = rx_pkt_nb_offs;
5627 	printf("Number of offsets: %u\n", n);
5628 	if (n) {
5629 		printf("Segment offsets: ");
5630 		for (i = 0; i != n - 1; i++)
5631 			printf("%hu,", rx_pkt_seg_offsets[i]);
5632 		printf("%hu\n", rx_pkt_seg_lengths[i]);
5633 	}
5634 }
5635 
5636 void
5637 set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs)
5638 {
5639 	unsigned int i;
5640 
5641 	if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) {
5642 		printf("nb segments per RX packets=%u >= "
5643 		       "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs);
5644 		return;
5645 	}
5646 
5647 	/*
5648 	 * No extra check here, the segment length will be checked by PMD
5649 	 * in the extended queue setup.
5650 	 */
5651 	for (i = 0; i < nb_offs; i++) {
5652 		if (seg_offsets[i] >= UINT16_MAX) {
5653 			printf("offset[%u]=%u > UINT16_MAX - give up\n",
5654 			       i, seg_offsets[i]);
5655 			return;
5656 		}
5657 	}
5658 
5659 	for (i = 0; i < nb_offs; i++)
5660 		rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i];
5661 
5662 	rx_pkt_nb_offs = (uint8_t) nb_offs;
5663 }
5664 
5665 void
5666 show_rx_pkt_segments(void)
5667 {
5668 	uint32_t i, n;
5669 
5670 	n = rx_pkt_nb_segs;
5671 	printf("Number of segments: %u\n", n);
5672 	if (n) {
5673 		printf("Segment sizes: ");
5674 		for (i = 0; i != n - 1; i++)
5675 			printf("%hu,", rx_pkt_seg_lengths[i]);
5676 		printf("%hu\n", rx_pkt_seg_lengths[i]);
5677 	}
5678 }
5679 
5680 static const char *get_ptype_str(uint32_t ptype)
5681 {
5682 	const char *str;
5683 
5684 	switch (ptype) {
5685 	case RTE_PTYPE_L2_ETHER:
5686 		str = "eth";
5687 		break;
5688 	case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN:
5689 		str = "ipv4";
5690 		break;
5691 	case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN:
5692 		str = "ipv6";
5693 		break;
5694 	case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP:
5695 		str = "ipv4-tcp";
5696 		break;
5697 	case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP:
5698 		str = "ipv4-udp";
5699 		break;
5700 	case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_SCTP:
5701 		str = "ipv4-sctp";
5702 		break;
5703 	case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP:
5704 		str = "ipv6-tcp";
5705 		break;
5706 	case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP:
5707 		str = "ipv6-udp";
5708 		break;
5709 	case RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_SCTP:
5710 		str = "ipv6-sctp";
5711 		break;
5712 	case RTE_PTYPE_TUNNEL_GRENAT:
5713 		str = "grenat";
5714 		break;
5715 	case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER:
5716 		str = "inner-eth";
5717 		break;
5718 	case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER
5719 			| RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN:
5720 		str = "inner-ipv4";
5721 		break;
5722 	case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER
5723 			| RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN:
5724 		str = "inner-ipv6";
5725 		break;
5726 	case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
5727 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP:
5728 		str = "inner-ipv4-tcp";
5729 		break;
5730 	case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
5731 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_UDP:
5732 		str = "inner-ipv4-udp";
5733 		break;
5734 	case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
5735 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_SCTP:
5736 		str = "inner-ipv4-sctp";
5737 		break;
5738 	case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
5739 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP:
5740 		str = "inner-ipv6-tcp";
5741 		break;
5742 	case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
5743 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_UDP:
5744 		str = "inner-ipv6-udp";
5745 		break;
5746 	case RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
5747 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_SCTP:
5748 		str = "inner-ipv6-sctp";
5749 		break;
5750 	default:
5751 		str = "unsupported";
5752 	}
5753 
5754 	return str;
5755 }
5756 
5757 void
5758 show_rx_pkt_hdrs(void)
5759 {
5760 	uint32_t i, n;
5761 
5762 	n = rx_pkt_nb_segs;
5763 	printf("Number of segments: %u\n", n);
5764 	if (n) {
5765 		printf("Packet segs: ");
5766 		for (i = 0; i < n - 1; i++)
5767 			printf("%s, ", get_ptype_str(rx_pkt_hdr_protos[i]));
5768 		printf("payload\n");
5769 	}
5770 }
5771 
5772 void
5773 set_rx_pkt_hdrs(unsigned int *seg_hdrs, unsigned int nb_segs)
5774 {
5775 	unsigned int i;
5776 
5777 	if (nb_segs + 1 > MAX_SEGS_BUFFER_SPLIT) {
5778 		printf("nb segments per RX packets=%u > "
5779 		       "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs + 1);
5780 		return;
5781 	}
5782 
5783 	memset(rx_pkt_hdr_protos, 0, sizeof(rx_pkt_hdr_protos));
5784 
5785 	for (i = 0; i < nb_segs; i++)
5786 		rx_pkt_hdr_protos[i] = (uint32_t)seg_hdrs[i];
5787 	/*
5788 	 * We calculate the number of hdrs, but payload is not included,
5789 	 * so rx_pkt_nb_segs would increase 1.
5790 	 */
5791 	rx_pkt_nb_segs = nb_segs + 1;
5792 }
5793 
5794 void
5795 set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs)
5796 {
5797 	unsigned int i;
5798 
5799 	if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) {
5800 		printf("nb segments per RX packets=%u >= "
5801 		       "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs);
5802 		return;
5803 	}
5804 
5805 	/*
5806 	 * No extra check here, the segment length will be checked by PMD
5807 	 * in the extended queue setup.
5808 	 */
5809 	for (i = 0; i < nb_segs; i++) {
5810 		if (seg_lengths[i] >= UINT16_MAX) {
5811 			printf("length[%u]=%u > UINT16_MAX - give up\n",
5812 			       i, seg_lengths[i]);
5813 			return;
5814 		}
5815 	}
5816 
5817 	for (i = 0; i < nb_segs; i++)
5818 		rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
5819 
5820 	rx_pkt_nb_segs = (uint8_t) nb_segs;
5821 }
5822 
5823 void
5824 show_tx_pkt_segments(void)
5825 {
5826 	uint32_t i, n;
5827 	const char *split;
5828 
5829 	n = tx_pkt_nb_segs;
5830 	split = tx_split_get_name(tx_pkt_split);
5831 
5832 	printf("Number of segments: %u\n", n);
5833 	printf("Segment sizes: ");
5834 	for (i = 0; i != n - 1; i++)
5835 		printf("%hu,", tx_pkt_seg_lengths[i]);
5836 	printf("%hu\n", tx_pkt_seg_lengths[i]);
5837 	printf("Split packet: %s\n", split);
5838 }
5839 
5840 static bool
5841 nb_segs_is_invalid(unsigned int nb_segs)
5842 {
5843 	uint16_t ring_size;
5844 	uint16_t queue_id;
5845 	uint16_t port_id;
5846 	int ret;
5847 
5848 	RTE_ETH_FOREACH_DEV(port_id) {
5849 		for (queue_id = 0; queue_id < nb_txq; queue_id++) {
5850 			ret = get_tx_ring_size(port_id, queue_id, &ring_size);
5851 			if (ret) {
5852 				/* Port may not be initialized yet, can't say
5853 				 * the port is invalid in this stage.
5854 				 */
5855 				continue;
5856 			}
5857 			if (ring_size < nb_segs) {
5858 				printf("nb segments per TX packets=%u >= TX "
5859 				       "queue(%u) ring_size=%u - txpkts ignored\n",
5860 				       nb_segs, queue_id, ring_size);
5861 				return true;
5862 			}
5863 		}
5864 	}
5865 
5866 	return false;
5867 }
5868 
5869 void
5870 set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs)
5871 {
5872 	uint16_t tx_pkt_len;
5873 	unsigned int i;
5874 
5875 	/*
5876 	 * For single segment settings failed check is ignored.
5877 	 * It is a very basic capability to send the single segment
5878 	 * packets, suppose it is always supported.
5879 	 */
5880 	if (nb_segs > 1 && nb_segs_is_invalid(nb_segs)) {
5881 		fprintf(stderr,
5882 			"Tx segment size(%u) is not supported - txpkts ignored\n",
5883 			nb_segs);
5884 		return;
5885 	}
5886 
5887 	if (nb_segs > RTE_MAX_SEGS_PER_PKT) {
5888 		fprintf(stderr,
5889 			"Tx segment size(%u) is bigger than max number of segment(%u)\n",
5890 			nb_segs, RTE_MAX_SEGS_PER_PKT);
5891 		return;
5892 	}
5893 
5894 	/*
5895 	 * Check that each segment length is greater or equal than
5896 	 * the mbuf data size.
5897 	 * Check also that the total packet length is greater or equal than the
5898 	 * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) +
5899 	 * 20 + 8).
5900 	 */
5901 	tx_pkt_len = 0;
5902 	for (i = 0; i < nb_segs; i++) {
5903 		if (seg_lengths[i] > mbuf_data_size[0]) {
5904 			fprintf(stderr,
5905 				"length[%u]=%u > mbuf_data_size=%u - give up\n",
5906 				i, seg_lengths[i], mbuf_data_size[0]);
5907 			return;
5908 		}
5909 		tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
5910 	}
5911 	if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) {
5912 		fprintf(stderr, "total packet length=%u < %d - give up\n",
5913 				(unsigned) tx_pkt_len,
5914 				(int)(sizeof(struct rte_ether_hdr) + 20 + 8));
5915 		return;
5916 	}
5917 
5918 	for (i = 0; i < nb_segs; i++)
5919 		tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
5920 
5921 	tx_pkt_length  = tx_pkt_len;
5922 	tx_pkt_nb_segs = (uint8_t) nb_segs;
5923 }
5924 
5925 void
5926 show_tx_pkt_times(void)
5927 {
5928 	printf("Interburst gap: %u\n", tx_pkt_times_inter);
5929 	printf("Intraburst gap: %u\n", tx_pkt_times_intra);
5930 }
5931 
5932 void
5933 set_tx_pkt_times(unsigned int *tx_times)
5934 {
5935 	tx_pkt_times_inter = tx_times[0];
5936 	tx_pkt_times_intra = tx_times[1];
5937 }
5938 
5939 #ifdef RTE_LIB_GRO
5940 void
5941 setup_gro(const char *onoff, portid_t port_id)
5942 {
5943 	if (!rte_eth_dev_is_valid_port(port_id)) {
5944 		fprintf(stderr, "invalid port id %u\n", port_id);
5945 		return;
5946 	}
5947 	if (test_done == 0) {
5948 		fprintf(stderr,
5949 			"Before enable/disable GRO, please stop forwarding first\n");
5950 		return;
5951 	}
5952 	if (strcmp(onoff, "on") == 0) {
5953 		if (gro_ports[port_id].enable != 0) {
5954 			fprintf(stderr,
5955 				"Port %u has enabled GRO. Please disable GRO first\n",
5956 				port_id);
5957 			return;
5958 		}
5959 		if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
5960 			gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
5961 			gro_ports[port_id].param.max_flow_num =
5962 				GRO_DEFAULT_FLOW_NUM;
5963 			gro_ports[port_id].param.max_item_per_flow =
5964 				GRO_DEFAULT_ITEM_NUM_PER_FLOW;
5965 		}
5966 		gro_ports[port_id].enable = 1;
5967 	} else {
5968 		if (gro_ports[port_id].enable == 0) {
5969 			fprintf(stderr, "Port %u has disabled GRO\n", port_id);
5970 			return;
5971 		}
5972 		gro_ports[port_id].enable = 0;
5973 	}
5974 }
5975 
5976 void
5977 setup_gro_flush_cycles(uint8_t cycles)
5978 {
5979 	if (test_done == 0) {
5980 		fprintf(stderr,
5981 			"Before change flush interval for GRO, please stop forwarding first.\n");
5982 		return;
5983 	}
5984 
5985 	if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
5986 			GRO_DEFAULT_FLUSH_CYCLES) {
5987 		fprintf(stderr,
5988 			"The flushing cycle be in the range of 1 to %u. Revert to the default value %u.\n",
5989 			GRO_MAX_FLUSH_CYCLES, GRO_DEFAULT_FLUSH_CYCLES);
5990 		cycles = GRO_DEFAULT_FLUSH_CYCLES;
5991 	}
5992 
5993 	gro_flush_cycles = cycles;
5994 }
5995 
5996 void
5997 show_gro(portid_t port_id)
5998 {
5999 	struct rte_gro_param *param;
6000 	uint32_t max_pkts_num;
6001 
6002 	param = &gro_ports[port_id].param;
6003 
6004 	if (!rte_eth_dev_is_valid_port(port_id)) {
6005 		fprintf(stderr, "Invalid port id %u.\n", port_id);
6006 		return;
6007 	}
6008 	if (gro_ports[port_id].enable) {
6009 		printf("GRO type: TCP/IPv4\n");
6010 		if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
6011 			max_pkts_num = param->max_flow_num *
6012 				param->max_item_per_flow;
6013 		} else
6014 			max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
6015 		printf("Max number of packets to perform GRO: %u\n",
6016 				max_pkts_num);
6017 		printf("Flushing cycles: %u\n", gro_flush_cycles);
6018 	} else
6019 		printf("Port %u doesn't enable GRO.\n", port_id);
6020 }
6021 #endif /* RTE_LIB_GRO */
6022 
6023 #ifdef RTE_LIB_GSO
6024 void
6025 setup_gso(const char *mode, portid_t port_id)
6026 {
6027 	if (!rte_eth_dev_is_valid_port(port_id)) {
6028 		fprintf(stderr, "invalid port id %u\n", port_id);
6029 		return;
6030 	}
6031 	if (strcmp(mode, "on") == 0) {
6032 		if (test_done == 0) {
6033 			fprintf(stderr,
6034 				"before enabling GSO, please stop forwarding first\n");
6035 			return;
6036 		}
6037 		gso_ports[port_id].enable = 1;
6038 	} else if (strcmp(mode, "off") == 0) {
6039 		if (test_done == 0) {
6040 			fprintf(stderr,
6041 				"before disabling GSO, please stop forwarding first\n");
6042 			return;
6043 		}
6044 		gso_ports[port_id].enable = 0;
6045 	}
6046 }
6047 #endif /* RTE_LIB_GSO */
6048 
6049 char*
6050 list_pkt_forwarding_modes(void)
6051 {
6052 	static char fwd_modes[128] = "";
6053 	const char *separator = "|";
6054 	struct fwd_engine *fwd_eng;
6055 	unsigned i = 0;
6056 
6057 	if (strlen (fwd_modes) == 0) {
6058 		while ((fwd_eng = fwd_engines[i++]) != NULL) {
6059 			strncat(fwd_modes, fwd_eng->fwd_mode_name,
6060 					sizeof(fwd_modes) - strlen(fwd_modes) - 1);
6061 			strncat(fwd_modes, separator,
6062 					sizeof(fwd_modes) - strlen(fwd_modes) - 1);
6063 		}
6064 		fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
6065 	}
6066 
6067 	return fwd_modes;
6068 }
6069 
6070 char*
6071 list_pkt_forwarding_retry_modes(void)
6072 {
6073 	static char fwd_modes[128] = "";
6074 	const char *separator = "|";
6075 	struct fwd_engine *fwd_eng;
6076 	unsigned i = 0;
6077 
6078 	if (strlen(fwd_modes) == 0) {
6079 		while ((fwd_eng = fwd_engines[i++]) != NULL) {
6080 			if (fwd_eng == &rx_only_engine)
6081 				continue;
6082 			strncat(fwd_modes, fwd_eng->fwd_mode_name,
6083 					sizeof(fwd_modes) -
6084 					strlen(fwd_modes) - 1);
6085 			strncat(fwd_modes, separator,
6086 					sizeof(fwd_modes) -
6087 					strlen(fwd_modes) - 1);
6088 		}
6089 		fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
6090 	}
6091 
6092 	return fwd_modes;
6093 }
6094 
6095 void
6096 set_pkt_forwarding_mode(const char *fwd_mode_name)
6097 {
6098 	struct fwd_engine *fwd_eng;
6099 	unsigned i;
6100 
6101 	i = 0;
6102 	while ((fwd_eng = fwd_engines[i]) != NULL) {
6103 		if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
6104 			printf("Set %s packet forwarding mode%s\n",
6105 			       fwd_mode_name,
6106 			       retry_enabled == 0 ? "" : " with retry");
6107 			cur_fwd_eng = fwd_eng;
6108 			return;
6109 		}
6110 		i++;
6111 	}
6112 	fprintf(stderr, "Invalid %s packet forwarding mode\n", fwd_mode_name);
6113 }
6114 
6115 void
6116 add_rx_dump_callbacks(portid_t portid)
6117 {
6118 	struct rte_eth_dev_info dev_info;
6119 	uint16_t queue;
6120 	int ret;
6121 
6122 	if (port_id_is_invalid(portid, ENABLED_WARN))
6123 		return;
6124 
6125 	ret = eth_dev_info_get_print_err(portid, &dev_info);
6126 	if (ret != 0)
6127 		return;
6128 
6129 	for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
6130 		if (!ports[portid].rx_dump_cb[queue])
6131 			ports[portid].rx_dump_cb[queue] =
6132 				rte_eth_add_rx_callback(portid, queue,
6133 					dump_rx_pkts, NULL);
6134 }
6135 
6136 void
6137 add_tx_dump_callbacks(portid_t portid)
6138 {
6139 	struct rte_eth_dev_info dev_info;
6140 	uint16_t queue;
6141 	int ret;
6142 
6143 	if (port_id_is_invalid(portid, ENABLED_WARN))
6144 		return;
6145 
6146 	ret = eth_dev_info_get_print_err(portid, &dev_info);
6147 	if (ret != 0)
6148 		return;
6149 
6150 	for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
6151 		if (!ports[portid].tx_dump_cb[queue])
6152 			ports[portid].tx_dump_cb[queue] =
6153 				rte_eth_add_tx_callback(portid, queue,
6154 							dump_tx_pkts, NULL);
6155 }
6156 
6157 void
6158 remove_rx_dump_callbacks(portid_t portid)
6159 {
6160 	struct rte_eth_dev_info dev_info;
6161 	uint16_t queue;
6162 	int ret;
6163 
6164 	if (port_id_is_invalid(portid, ENABLED_WARN))
6165 		return;
6166 
6167 	ret = eth_dev_info_get_print_err(portid, &dev_info);
6168 	if (ret != 0)
6169 		return;
6170 
6171 	for (queue = 0; queue < dev_info.nb_rx_queues; queue++)
6172 		if (ports[portid].rx_dump_cb[queue]) {
6173 			rte_eth_remove_rx_callback(portid, queue,
6174 				ports[portid].rx_dump_cb[queue]);
6175 			ports[portid].rx_dump_cb[queue] = NULL;
6176 		}
6177 }
6178 
6179 void
6180 remove_tx_dump_callbacks(portid_t portid)
6181 {
6182 	struct rte_eth_dev_info dev_info;
6183 	uint16_t queue;
6184 	int ret;
6185 
6186 	if (port_id_is_invalid(portid, ENABLED_WARN))
6187 		return;
6188 
6189 	ret = eth_dev_info_get_print_err(portid, &dev_info);
6190 	if (ret != 0)
6191 		return;
6192 
6193 	for (queue = 0; queue < dev_info.nb_tx_queues; queue++)
6194 		if (ports[portid].tx_dump_cb[queue]) {
6195 			rte_eth_remove_tx_callback(portid, queue,
6196 				ports[portid].tx_dump_cb[queue]);
6197 			ports[portid].tx_dump_cb[queue] = NULL;
6198 		}
6199 }
6200 
6201 void
6202 configure_rxtx_dump_callbacks(uint16_t verbose)
6203 {
6204 	portid_t portid;
6205 
6206 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
6207 		TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n");
6208 		return;
6209 #endif
6210 
6211 	RTE_ETH_FOREACH_DEV(portid)
6212 	{
6213 		if (verbose == 1 || verbose > 2)
6214 			add_rx_dump_callbacks(portid);
6215 		else
6216 			remove_rx_dump_callbacks(portid);
6217 		if (verbose >= 2)
6218 			add_tx_dump_callbacks(portid);
6219 		else
6220 			remove_tx_dump_callbacks(portid);
6221 	}
6222 }
6223 
6224 void
6225 set_verbose_level(uint16_t vb_level)
6226 {
6227 	printf("Change verbose level from %u to %u\n",
6228 	       (unsigned int) verbose_level, (unsigned int) vb_level);
6229 	verbose_level = vb_level;
6230 	configure_rxtx_dump_callbacks(verbose_level);
6231 }
6232 
6233 void
6234 vlan_extend_set(portid_t port_id, int on)
6235 {
6236 	int diag;
6237 	int vlan_offload;
6238 	uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
6239 
6240 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6241 		return;
6242 
6243 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
6244 
6245 	if (on) {
6246 		vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
6247 		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
6248 	} else {
6249 		vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD;
6250 		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
6251 	}
6252 
6253 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
6254 	if (diag < 0) {
6255 		fprintf(stderr,
6256 			"rx_vlan_extend_set(port_pi=%d, on=%d) failed diag=%d\n",
6257 			port_id, on, diag);
6258 		return;
6259 	}
6260 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
6261 }
6262 
6263 void
6264 rx_vlan_strip_set(portid_t port_id, int on)
6265 {
6266 	int diag;
6267 	int vlan_offload;
6268 	uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
6269 
6270 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6271 		return;
6272 
6273 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
6274 
6275 	if (on) {
6276 		vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
6277 		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
6278 	} else {
6279 		vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD;
6280 		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
6281 	}
6282 
6283 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
6284 	if (diag < 0) {
6285 		fprintf(stderr,
6286 			"%s(port_pi=%d, on=%d) failed diag=%d\n",
6287 			__func__, port_id, on, diag);
6288 		return;
6289 	}
6290 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
6291 }
6292 
6293 void
6294 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
6295 {
6296 	int diag;
6297 
6298 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6299 		return;
6300 
6301 	diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
6302 	if (diag < 0)
6303 		fprintf(stderr,
6304 			"%s(port_pi=%d, queue_id=%d, on=%d) failed diag=%d\n",
6305 			__func__, port_id, queue_id, on, diag);
6306 }
6307 
6308 void
6309 rx_vlan_filter_set(portid_t port_id, int on)
6310 {
6311 	int diag;
6312 	int vlan_offload;
6313 	uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
6314 
6315 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6316 		return;
6317 
6318 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
6319 
6320 	if (on) {
6321 		vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD;
6322 		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
6323 	} else {
6324 		vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD;
6325 		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
6326 	}
6327 
6328 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
6329 	if (diag < 0) {
6330 		fprintf(stderr,
6331 			"%s(port_pi=%d, on=%d) failed diag=%d\n",
6332 			__func__, port_id, on, diag);
6333 		return;
6334 	}
6335 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
6336 }
6337 
6338 void
6339 rx_vlan_qinq_strip_set(portid_t port_id, int on)
6340 {
6341 	int diag;
6342 	int vlan_offload;
6343 	uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads;
6344 
6345 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6346 		return;
6347 
6348 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
6349 
6350 	if (on) {
6351 		vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD;
6352 		port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
6353 	} else {
6354 		vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD;
6355 		port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
6356 	}
6357 
6358 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
6359 	if (diag < 0) {
6360 		fprintf(stderr, "%s(port_pi=%d, on=%d) failed diag=%d\n",
6361 			__func__, port_id, on, diag);
6362 		return;
6363 	}
6364 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
6365 }
6366 
6367 int
6368 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
6369 {
6370 	int diag;
6371 
6372 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6373 		return 1;
6374 	if (vlan_id_is_invalid(vlan_id))
6375 		return 1;
6376 	diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
6377 	if (diag == 0)
6378 		return 0;
6379 	fprintf(stderr,
6380 		"rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed diag=%d\n",
6381 		port_id, vlan_id, on, diag);
6382 	return -1;
6383 }
6384 
6385 void
6386 rx_vlan_all_filter_set(portid_t port_id, int on)
6387 {
6388 	uint16_t vlan_id;
6389 
6390 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6391 		return;
6392 	for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
6393 		if (rx_vft_set(port_id, vlan_id, on))
6394 			break;
6395 	}
6396 }
6397 
6398 void
6399 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
6400 {
6401 	int diag;
6402 
6403 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6404 		return;
6405 
6406 	diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
6407 	if (diag == 0)
6408 		return;
6409 
6410 	fprintf(stderr,
6411 		"tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed diag=%d\n",
6412 		port_id, vlan_type, tp_id, diag);
6413 }
6414 
6415 void
6416 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
6417 {
6418 	struct rte_eth_dev_info dev_info;
6419 	int ret;
6420 
6421 	if (vlan_id_is_invalid(vlan_id))
6422 		return;
6423 
6424 	if (ports[port_id].dev_conf.txmode.offloads &
6425 	    RTE_ETH_TX_OFFLOAD_QINQ_INSERT) {
6426 		fprintf(stderr, "Error, as QinQ has been enabled.\n");
6427 		return;
6428 	}
6429 
6430 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
6431 	if (ret != 0)
6432 		return;
6433 
6434 	if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) {
6435 		fprintf(stderr,
6436 			"Error: vlan insert is not supported by port %d\n",
6437 			port_id);
6438 		return;
6439 	}
6440 
6441 	tx_vlan_reset(port_id);
6442 	ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
6443 	ports[port_id].tx_vlan_id = vlan_id;
6444 }
6445 
6446 void
6447 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
6448 {
6449 	struct rte_eth_dev_info dev_info;
6450 	int ret;
6451 
6452 	if (vlan_id_is_invalid(vlan_id))
6453 		return;
6454 	if (vlan_id_is_invalid(vlan_id_outer))
6455 		return;
6456 
6457 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
6458 	if (ret != 0)
6459 		return;
6460 
6461 	if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) {
6462 		fprintf(stderr,
6463 			"Error: qinq insert not supported by port %d\n",
6464 			port_id);
6465 		return;
6466 	}
6467 
6468 	tx_vlan_reset(port_id);
6469 	ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
6470 						    RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
6471 	ports[port_id].tx_vlan_id = vlan_id;
6472 	ports[port_id].tx_vlan_id_outer = vlan_id_outer;
6473 }
6474 
6475 void
6476 tx_vlan_reset(portid_t port_id)
6477 {
6478 	ports[port_id].dev_conf.txmode.offloads &=
6479 				~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
6480 				  RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
6481 	ports[port_id].tx_vlan_id = 0;
6482 	ports[port_id].tx_vlan_id_outer = 0;
6483 }
6484 
6485 void
6486 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
6487 {
6488 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6489 		return;
6490 
6491 	rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
6492 }
6493 
6494 void
6495 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
6496 {
6497 	int ret;
6498 
6499 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6500 		return;
6501 
6502 	if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
6503 		return;
6504 
6505 	if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
6506 		fprintf(stderr, "map_value not in required range 0..%d\n",
6507 			RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
6508 		return;
6509 	}
6510 
6511 	if (!is_rx) { /* tx */
6512 		ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, queue_id,
6513 							     map_value);
6514 		if (ret) {
6515 			fprintf(stderr,
6516 				"failed to set tx queue stats mapping.\n");
6517 			return;
6518 		}
6519 	} else { /* rx */
6520 		ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, queue_id,
6521 							     map_value);
6522 		if (ret) {
6523 			fprintf(stderr,
6524 				"failed to set rx queue stats mapping.\n");
6525 			return;
6526 		}
6527 	}
6528 }
6529 
6530 void
6531 set_xstats_hide_zero(uint8_t on_off)
6532 {
6533 	xstats_hide_zero = on_off;
6534 }
6535 
6536 void
6537 set_record_core_cycles(uint8_t on_off)
6538 {
6539 	record_core_cycles = on_off;
6540 }
6541 
6542 void
6543 set_record_burst_stats(uint8_t on_off)
6544 {
6545 	record_burst_stats = on_off;
6546 }
6547 
6548 uint16_t
6549 str_to_flowtype(const char *string)
6550 {
6551 	uint8_t i;
6552 
6553 	for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
6554 		if (!strcmp(flowtype_str_table[i].str, string))
6555 			return flowtype_str_table[i].ftype;
6556 	}
6557 
6558 	if (isdigit(string[0])) {
6559 		int val = atoi(string);
6560 		if (val > 0 && val < 64)
6561 			return (uint16_t)val;
6562 	}
6563 
6564 	return RTE_ETH_FLOW_UNKNOWN;
6565 }
6566 
6567 const char*
6568 flowtype_to_str(uint16_t flow_type)
6569 {
6570 	uint8_t i;
6571 
6572 	for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
6573 		if (flowtype_str_table[i].ftype == flow_type)
6574 			return flowtype_str_table[i].str;
6575 	}
6576 
6577 	return NULL;
6578 }
6579 
6580 #if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE)
6581 
6582 static inline void
6583 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
6584 {
6585 	struct rte_eth_flex_payload_cfg *cfg;
6586 	uint32_t i, j;
6587 
6588 	for (i = 0; i < flex_conf->nb_payloads; i++) {
6589 		cfg = &flex_conf->flex_set[i];
6590 		if (cfg->type == RTE_ETH_RAW_PAYLOAD)
6591 			printf("\n    RAW:  ");
6592 		else if (cfg->type == RTE_ETH_L2_PAYLOAD)
6593 			printf("\n    L2_PAYLOAD:  ");
6594 		else if (cfg->type == RTE_ETH_L3_PAYLOAD)
6595 			printf("\n    L3_PAYLOAD:  ");
6596 		else if (cfg->type == RTE_ETH_L4_PAYLOAD)
6597 			printf("\n    L4_PAYLOAD:  ");
6598 		else
6599 			printf("\n    UNKNOWN PAYLOAD(%u):  ", cfg->type);
6600 		for (j = 0; j < num; j++)
6601 			printf("  %-5u", cfg->src_offset[j]);
6602 	}
6603 	printf("\n");
6604 }
6605 
6606 static inline void
6607 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
6608 {
6609 	struct rte_eth_fdir_flex_mask *mask;
6610 	uint32_t i, j;
6611 	const char *p;
6612 
6613 	for (i = 0; i < flex_conf->nb_flexmasks; i++) {
6614 		mask = &flex_conf->flex_mask[i];
6615 		p = flowtype_to_str(mask->flow_type);
6616 		printf("\n    %s:\t", p ? p : "unknown");
6617 		for (j = 0; j < num; j++)
6618 			printf(" %02x", mask->mask[j]);
6619 	}
6620 	printf("\n");
6621 }
6622 
6623 static inline void
6624 print_fdir_flow_type(uint32_t flow_types_mask)
6625 {
6626 	int i;
6627 	const char *p;
6628 
6629 	for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
6630 		if (!(flow_types_mask & (1 << i)))
6631 			continue;
6632 		p = flowtype_to_str(i);
6633 		if (p)
6634 			printf(" %s", p);
6635 		else
6636 			printf(" unknown");
6637 	}
6638 	printf("\n");
6639 }
6640 
6641 static int
6642 get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info,
6643 		    struct rte_eth_fdir_stats *fdir_stat)
6644 {
6645 	int ret = -ENOTSUP;
6646 
6647 #ifdef RTE_NET_I40E
6648 	if (ret == -ENOTSUP) {
6649 		ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info);
6650 		if (!ret)
6651 			ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat);
6652 	}
6653 #endif
6654 #ifdef RTE_NET_IXGBE
6655 	if (ret == -ENOTSUP) {
6656 		ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info);
6657 		if (!ret)
6658 			ret = rte_pmd_ixgbe_get_fdir_stats(port_id, fdir_stat);
6659 	}
6660 #endif
6661 	switch (ret) {
6662 	case 0:
6663 		break;
6664 	case -ENOTSUP:
6665 		fprintf(stderr, "\n FDIR is not supported on port %-2d\n",
6666 			port_id);
6667 		break;
6668 	default:
6669 		fprintf(stderr, "programming error: (%s)\n", strerror(-ret));
6670 		break;
6671 	}
6672 	return ret;
6673 }
6674 
6675 void
6676 fdir_get_infos(portid_t port_id)
6677 {
6678 	struct rte_eth_fdir_stats fdir_stat;
6679 	struct rte_eth_fdir_info fdir_info;
6680 
6681 	static const char *fdir_stats_border = "########################";
6682 
6683 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6684 		return;
6685 
6686 	memset(&fdir_info, 0, sizeof(fdir_info));
6687 	memset(&fdir_stat, 0, sizeof(fdir_stat));
6688 	if (get_fdir_info(port_id, &fdir_info, &fdir_stat))
6689 		return;
6690 
6691 	printf("\n  %s FDIR infos for port %-2d     %s\n",
6692 	       fdir_stats_border, port_id, fdir_stats_border);
6693 	printf("  MODE: ");
6694 	if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
6695 		printf("  PERFECT\n");
6696 	else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
6697 		printf("  PERFECT-MAC-VLAN\n");
6698 	else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
6699 		printf("  PERFECT-TUNNEL\n");
6700 	else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
6701 		printf("  SIGNATURE\n");
6702 	else
6703 		printf("  DISABLE\n");
6704 	if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
6705 		&& fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
6706 		printf("  SUPPORTED FLOW TYPE: ");
6707 		print_fdir_flow_type(fdir_info.flow_types_mask[0]);
6708 	}
6709 	printf("  FLEX PAYLOAD INFO:\n");
6710 	printf("  max_len:       %-10"PRIu32"  payload_limit: %-10"PRIu32"\n"
6711 	       "  payload_unit:  %-10"PRIu32"  payload_seg:   %-10"PRIu32"\n"
6712 	       "  bitmask_unit:  %-10"PRIu32"  bitmask_num:   %-10"PRIu32"\n",
6713 		fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
6714 		fdir_info.flex_payload_unit,
6715 		fdir_info.max_flex_payload_segment_num,
6716 		fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
6717 	if (fdir_info.flex_conf.nb_payloads > 0) {
6718 		printf("  FLEX PAYLOAD SRC OFFSET:");
6719 		print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
6720 	}
6721 	if (fdir_info.flex_conf.nb_flexmasks > 0) {
6722 		printf("  FLEX MASK CFG:");
6723 		print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
6724 	}
6725 	printf("  guarant_count: %-10"PRIu32"  best_count:    %"PRIu32"\n",
6726 	       fdir_stat.guarant_cnt, fdir_stat.best_cnt);
6727 	printf("  guarant_space: %-10"PRIu32"  best_space:    %"PRIu32"\n",
6728 	       fdir_info.guarant_spc, fdir_info.best_spc);
6729 	printf("  collision:     %-10"PRIu32"  free:          %"PRIu32"\n"
6730 	       "  maxhash:       %-10"PRIu32"  maxlen:        %"PRIu32"\n"
6731 	       "  add:	         %-10"PRIu64"  remove:        %"PRIu64"\n"
6732 	       "  f_add:         %-10"PRIu64"  f_remove:      %"PRIu64"\n",
6733 	       fdir_stat.collision, fdir_stat.free,
6734 	       fdir_stat.maxhash, fdir_stat.maxlen,
6735 	       fdir_stat.add, fdir_stat.remove,
6736 	       fdir_stat.f_add, fdir_stat.f_remove);
6737 	printf("  %s############################%s\n",
6738 	       fdir_stats_border, fdir_stats_border);
6739 }
6740 
6741 #endif /* RTE_NET_I40E || RTE_NET_IXGBE */
6742 
6743 void
6744 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
6745 {
6746 #ifdef RTE_NET_IXGBE
6747 	int diag;
6748 
6749 	if (is_rx)
6750 		diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
6751 	else
6752 		diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
6753 
6754 	if (diag == 0)
6755 		return;
6756 	fprintf(stderr,
6757 		"rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
6758 		is_rx ? "rx" : "tx", port_id, diag);
6759 	return;
6760 #endif
6761 	fprintf(stderr, "VF %s setting not supported for port %d\n",
6762 		is_rx ? "Rx" : "Tx", port_id);
6763 	RTE_SET_USED(vf);
6764 	RTE_SET_USED(on);
6765 }
6766 
6767 int
6768 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint32_t rate)
6769 {
6770 	int diag;
6771 	struct rte_eth_link link;
6772 	int ret;
6773 
6774 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6775 		return 1;
6776 	ret = eth_link_get_nowait_print_err(port_id, &link);
6777 	if (ret < 0)
6778 		return 1;
6779 	if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN &&
6780 	    rate > link.link_speed) {
6781 		fprintf(stderr,
6782 			"Invalid rate value:%u bigger than link speed: %u\n",
6783 			rate, link.link_speed);
6784 		return 1;
6785 	}
6786 	diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
6787 	if (diag == 0)
6788 		return diag;
6789 	fprintf(stderr,
6790 		"rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
6791 		port_id, diag);
6792 	return diag;
6793 }
6794 
6795 int
6796 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint32_t rate, uint64_t q_msk)
6797 {
6798 	int diag = -ENOTSUP;
6799 
6800 	RTE_SET_USED(vf);
6801 	RTE_SET_USED(rate);
6802 	RTE_SET_USED(q_msk);
6803 
6804 #ifdef RTE_NET_IXGBE
6805 	if (diag == -ENOTSUP)
6806 		diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
6807 						       q_msk);
6808 #endif
6809 #ifdef RTE_NET_BNXT
6810 	if (diag == -ENOTSUP)
6811 		diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
6812 #endif
6813 	if (diag == 0)
6814 		return diag;
6815 
6816 	fprintf(stderr,
6817 		"%s for port_id=%d failed diag=%d\n",
6818 		__func__, port_id, diag);
6819 	return diag;
6820 }
6821 
6822 int
6823 set_rxq_avail_thresh(portid_t port_id, uint16_t queue_id, uint8_t avail_thresh)
6824 {
6825 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6826 		return -EINVAL;
6827 
6828 	return rte_eth_rx_avail_thresh_set(port_id, queue_id, avail_thresh);
6829 }
6830 
6831 /*
6832  * Functions to manage the set of filtered Multicast MAC addresses.
6833  *
6834  * A pool of filtered multicast MAC addresses is associated with each port.
6835  * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
6836  * The address of the pool and the number of valid multicast MAC addresses
6837  * recorded in the pool are stored in the fields "mc_addr_pool" and
6838  * "mc_addr_nb" of the "rte_port" data structure.
6839  *
6840  * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
6841  * to be supplied a contiguous array of multicast MAC addresses.
6842  * To comply with this constraint, the set of multicast addresses recorded
6843  * into the pool are systematically compacted at the beginning of the pool.
6844  * Hence, when a multicast address is removed from the pool, all following
6845  * addresses, if any, are copied back to keep the set contiguous.
6846  */
6847 #define MCAST_POOL_INC 32
6848 
6849 static int
6850 mcast_addr_pool_extend(struct rte_port *port)
6851 {
6852 	struct rte_ether_addr *mc_pool;
6853 	size_t mc_pool_size;
6854 
6855 	/*
6856 	 * If a free entry is available at the end of the pool, just
6857 	 * increment the number of recorded multicast addresses.
6858 	 */
6859 	if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
6860 		port->mc_addr_nb++;
6861 		return 0;
6862 	}
6863 
6864 	/*
6865 	 * [re]allocate a pool with MCAST_POOL_INC more entries.
6866 	 * The previous test guarantees that port->mc_addr_nb is a multiple
6867 	 * of MCAST_POOL_INC.
6868 	 */
6869 	mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb +
6870 						    MCAST_POOL_INC);
6871 	mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool,
6872 						mc_pool_size);
6873 	if (mc_pool == NULL) {
6874 		fprintf(stderr,
6875 			"allocation of pool of %u multicast addresses failed\n",
6876 			port->mc_addr_nb + MCAST_POOL_INC);
6877 		return -ENOMEM;
6878 	}
6879 
6880 	port->mc_addr_pool = mc_pool;
6881 	port->mc_addr_nb++;
6882 	return 0;
6883 
6884 }
6885 
6886 static void
6887 mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr)
6888 {
6889 	if (mcast_addr_pool_extend(port) != 0)
6890 		return;
6891 	rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]);
6892 }
6893 
6894 static void
6895 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
6896 {
6897 	port->mc_addr_nb--;
6898 	if (addr_idx == port->mc_addr_nb) {
6899 		/* No need to recompact the set of multicast addresses. */
6900 		if (port->mc_addr_nb == 0) {
6901 			/* free the pool of multicast addresses. */
6902 			free(port->mc_addr_pool);
6903 			port->mc_addr_pool = NULL;
6904 		}
6905 		return;
6906 	}
6907 	memmove(&port->mc_addr_pool[addr_idx],
6908 		&port->mc_addr_pool[addr_idx + 1],
6909 		sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx));
6910 }
6911 
6912 int
6913 mcast_addr_pool_destroy(portid_t port_id)
6914 {
6915 	struct rte_port *port;
6916 
6917 	if (port_id_is_invalid(port_id, ENABLED_WARN) ||
6918 	    port_id == (portid_t)RTE_PORT_ALL)
6919 		return -EINVAL;
6920 	port = &ports[port_id];
6921 
6922 	if (port->mc_addr_nb != 0) {
6923 		/* free the pool of multicast addresses. */
6924 		free(port->mc_addr_pool);
6925 		port->mc_addr_pool = NULL;
6926 		port->mc_addr_nb = 0;
6927 	}
6928 	return 0;
6929 }
6930 
6931 static int
6932 eth_port_multicast_addr_list_set(portid_t port_id)
6933 {
6934 	struct rte_port *port;
6935 	int diag;
6936 
6937 	port = &ports[port_id];
6938 	diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
6939 					    port->mc_addr_nb);
6940 	if (diag < 0)
6941 		fprintf(stderr,
6942 			"rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
6943 			port_id, port->mc_addr_nb, diag);
6944 
6945 	return diag;
6946 }
6947 
6948 void
6949 mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr)
6950 {
6951 	struct rte_port *port;
6952 	uint32_t i;
6953 
6954 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6955 		return;
6956 
6957 	port = &ports[port_id];
6958 
6959 	/*
6960 	 * Check that the added multicast MAC address is not already recorded
6961 	 * in the pool of multicast addresses.
6962 	 */
6963 	for (i = 0; i < port->mc_addr_nb; i++) {
6964 		if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
6965 			fprintf(stderr,
6966 				"multicast address already filtered by port\n");
6967 			return;
6968 		}
6969 	}
6970 
6971 	mcast_addr_pool_append(port, mc_addr);
6972 	if (eth_port_multicast_addr_list_set(port_id) < 0)
6973 		/* Rollback on failure, remove the address from the pool */
6974 		mcast_addr_pool_remove(port, i);
6975 }
6976 
6977 void
6978 mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr)
6979 {
6980 	struct rte_port *port;
6981 	uint32_t i;
6982 
6983 	if (port_id_is_invalid(port_id, ENABLED_WARN))
6984 		return;
6985 
6986 	port = &ports[port_id];
6987 
6988 	/*
6989 	 * Search the pool of multicast MAC addresses for the removed address.
6990 	 */
6991 	for (i = 0; i < port->mc_addr_nb; i++) {
6992 		if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
6993 			break;
6994 	}
6995 	if (i == port->mc_addr_nb) {
6996 		fprintf(stderr, "multicast address not filtered by port %d\n",
6997 			port_id);
6998 		return;
6999 	}
7000 
7001 	mcast_addr_pool_remove(port, i);
7002 	if (eth_port_multicast_addr_list_set(port_id) < 0)
7003 		/* Rollback on failure, add the address back into the pool */
7004 		mcast_addr_pool_append(port, mc_addr);
7005 }
7006 
7007 void
7008 mcast_addr_flush(portid_t port_id)
7009 {
7010 	int ret;
7011 
7012 	if (port_id_is_invalid(port_id, ENABLED_WARN))
7013 		return;
7014 
7015 	ret = rte_eth_dev_set_mc_addr_list(port_id, NULL, 0);
7016 	if (ret != 0) {
7017 		fprintf(stderr,
7018 			"Failed to flush all multicast MAC addresses on port_id %u\n",
7019 			port_id);
7020 		return;
7021 	}
7022 	mcast_addr_pool_destroy(port_id);
7023 }
7024 
7025 void
7026 port_dcb_info_display(portid_t port_id)
7027 {
7028 	struct rte_eth_dcb_info dcb_info;
7029 	uint16_t i;
7030 	int ret;
7031 	static const char *border = "================";
7032 
7033 	if (port_id_is_invalid(port_id, ENABLED_WARN))
7034 		return;
7035 
7036 	ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
7037 	if (ret) {
7038 		fprintf(stderr, "\n Failed to get dcb infos on port %-2d\n",
7039 			port_id);
7040 		return;
7041 	}
7042 	printf("\n  %s DCB infos for port %-2d  %s\n", border, port_id, border);
7043 	printf("  TC NUMBER: %d\n", dcb_info.nb_tcs);
7044 	printf("\n  TC :        ");
7045 	for (i = 0; i < dcb_info.nb_tcs; i++)
7046 		printf("\t%4d", i);
7047 	printf("\n  Priority :  ");
7048 	for (i = 0; i < dcb_info.nb_tcs; i++)
7049 		printf("\t%4d", dcb_info.prio_tc[i]);
7050 	printf("\n  BW percent :");
7051 	for (i = 0; i < dcb_info.nb_tcs; i++)
7052 		printf("\t%4d%%", dcb_info.tc_bws[i]);
7053 	printf("\n  RXQ base :  ");
7054 	for (i = 0; i < dcb_info.nb_tcs; i++)
7055 		printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
7056 	printf("\n  RXQ number :");
7057 	for (i = 0; i < dcb_info.nb_tcs; i++)
7058 		printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
7059 	printf("\n  TXQ base :  ");
7060 	for (i = 0; i < dcb_info.nb_tcs; i++)
7061 		printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
7062 	printf("\n  TXQ number :");
7063 	for (i = 0; i < dcb_info.nb_tcs; i++)
7064 		printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
7065 	printf("\n");
7066 }
7067 
7068 uint8_t *
7069 open_file(const char *file_path, uint32_t *size)
7070 {
7071 	int fd = open(file_path, O_RDONLY);
7072 	off_t pkg_size;
7073 	uint8_t *buf = NULL;
7074 	int ret = 0;
7075 	struct stat st_buf;
7076 
7077 	if (size)
7078 		*size = 0;
7079 
7080 	if (fd == -1) {
7081 		fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path);
7082 		return buf;
7083 	}
7084 
7085 	if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
7086 		close(fd);
7087 		fprintf(stderr, "%s: File operations failed\n", __func__);
7088 		return buf;
7089 	}
7090 
7091 	pkg_size = st_buf.st_size;
7092 	if (pkg_size < 0) {
7093 		close(fd);
7094 		fprintf(stderr, "%s: File operations failed\n", __func__);
7095 		return buf;
7096 	}
7097 
7098 	buf = (uint8_t *)malloc(pkg_size);
7099 	if (!buf) {
7100 		close(fd);
7101 		fprintf(stderr, "%s: Failed to malloc memory\n", __func__);
7102 		return buf;
7103 	}
7104 
7105 	ret = read(fd, buf, pkg_size);
7106 	if (ret < 0) {
7107 		close(fd);
7108 		fprintf(stderr, "%s: File read operation failed\n", __func__);
7109 		close_file(buf);
7110 		return NULL;
7111 	}
7112 
7113 	if (size)
7114 		*size = pkg_size;
7115 
7116 	close(fd);
7117 
7118 	return buf;
7119 }
7120 
7121 int
7122 save_file(const char *file_path, uint8_t *buf, uint32_t size)
7123 {
7124 	FILE *fh = fopen(file_path, "wb");
7125 
7126 	if (fh == NULL) {
7127 		fprintf(stderr, "%s: Failed to open %s\n", __func__, file_path);
7128 		return -1;
7129 	}
7130 
7131 	if (fwrite(buf, 1, size, fh) != size) {
7132 		fclose(fh);
7133 		fprintf(stderr, "%s: File write operation failed\n", __func__);
7134 		return -1;
7135 	}
7136 
7137 	fclose(fh);
7138 
7139 	return 0;
7140 }
7141 
7142 int
7143 close_file(uint8_t *buf)
7144 {
7145 	if (buf) {
7146 		free((void *)buf);
7147 		return 0;
7148 	}
7149 
7150 	return -1;
7151 }
7152 
7153 void
7154 show_macs(portid_t port_id)
7155 {
7156 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
7157 	struct rte_eth_dev_info dev_info;
7158 	int32_t i, rc, num_macs = 0;
7159 
7160 	if (eth_dev_info_get_print_err(port_id, &dev_info))
7161 		return;
7162 
7163 	struct rte_ether_addr addr[dev_info.max_mac_addrs];
7164 	rc = rte_eth_macaddrs_get(port_id, addr, dev_info.max_mac_addrs);
7165 	if (rc < 0)
7166 		return;
7167 
7168 	for (i = 0; i < rc; i++) {
7169 
7170 		/* skip zero address */
7171 		if (rte_is_zero_ether_addr(&addr[i]))
7172 			continue;
7173 
7174 		num_macs++;
7175 	}
7176 
7177 	printf("Number of MAC address added: %d\n", num_macs);
7178 
7179 	for (i = 0; i < rc; i++) {
7180 
7181 		/* skip zero address */
7182 		if (rte_is_zero_ether_addr(&addr[i]))
7183 			continue;
7184 
7185 		rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, &addr[i]);
7186 		printf("  %s\n", buf);
7187 	}
7188 }
7189 
7190 void
7191 show_mcast_macs(portid_t port_id)
7192 {
7193 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
7194 	struct rte_ether_addr *addr;
7195 	struct rte_port *port;
7196 	uint32_t i;
7197 
7198 	port = &ports[port_id];
7199 
7200 	printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb);
7201 
7202 	for (i = 0; i < port->mc_addr_nb; i++) {
7203 		addr = &port->mc_addr_pool[i];
7204 
7205 		rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr);
7206 		printf("  %s\n", buf);
7207 	}
7208 }
7209