xref: /dpdk/app/test-pmd/config.c (revision a7dde4ff0dcdf382bc2a5fc86c40106a8f43e1f3)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 /*   BSD LICENSE
34  *
35  *   Copyright 2013-2014 6WIND S.A.
36  *
37  *   Redistribution and use in source and binary forms, with or without
38  *   modification, are permitted provided that the following conditions
39  *   are met:
40  *
41  *     * Redistributions of source code must retain the above copyright
42  *       notice, this list of conditions and the following disclaimer.
43  *     * Redistributions in binary form must reproduce the above copyright
44  *       notice, this list of conditions and the following disclaimer in
45  *       the documentation and/or other materials provided with the
46  *       distribution.
47  *     * Neither the name of 6WIND S.A. nor the names of its
48  *       contributors may be used to endorse or promote products derived
49  *       from this software without specific prior written permission.
50  *
51  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62  */
63 
64 #include <stdarg.h>
65 #include <errno.h>
66 #include <stdio.h>
67 #include <string.h>
68 #include <stdarg.h>
69 #include <stdint.h>
70 #include <inttypes.h>
71 
72 #include <sys/queue.h>
73 
74 #include <rte_common.h>
75 #include <rte_byteorder.h>
76 #include <rte_debug.h>
77 #include <rte_log.h>
78 #include <rte_memory.h>
79 #include <rte_memcpy.h>
80 #include <rte_memzone.h>
81 #include <rte_launch.h>
82 #include <rte_tailq.h>
83 #include <rte_eal.h>
84 #include <rte_per_lcore.h>
85 #include <rte_lcore.h>
86 #include <rte_atomic.h>
87 #include <rte_branch_prediction.h>
88 #include <rte_ring.h>
89 #include <rte_mempool.h>
90 #include <rte_mbuf.h>
91 #include <rte_interrupts.h>
92 #include <rte_pci.h>
93 #include <rte_ether.h>
94 #include <rte_ethdev.h>
95 #include <rte_string_fns.h>
96 
97 #include "testpmd.h"
98 
99 static char *flowtype_to_str(uint16_t flow_type);
100 
101 static void
102 print_ethaddr(const char *name, struct ether_addr *eth_addr)
103 {
104 	char buf[ETHER_ADDR_FMT_SIZE];
105 	ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
106 	printf("%s%s", name, buf);
107 }
108 
109 void
110 nic_stats_display(portid_t port_id)
111 {
112 	struct rte_eth_stats stats;
113 	struct rte_port *port = &ports[port_id];
114 	uint8_t i;
115 	portid_t pid;
116 
117 	static const char *nic_stats_border = "########################";
118 
119 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
120 		printf("Valid port range is [0");
121 		FOREACH_PORT(pid, ports)
122 			printf(", %d", pid);
123 		printf("]\n");
124 		return;
125 	}
126 	rte_eth_stats_get(port_id, &stats);
127 	printf("\n  %s NIC statistics for port %-2d %s\n",
128 	       nic_stats_border, port_id, nic_stats_border);
129 
130 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
131 		printf("  RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes:  "
132 		       "%-"PRIu64"\n",
133 		       stats.ipackets, stats.imissed, stats.ibytes);
134 		printf("  RX-badcrc:  %-10"PRIu64" RX-badlen: %-10"PRIu64" RX-errors: "
135 		       "%-"PRIu64"\n",
136 		       stats.ibadcrc, stats.ibadlen, stats.ierrors);
137 		printf("  RX-nombuf:  %-10"PRIu64"\n",
138 		       stats.rx_nombuf);
139 		printf("  TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes:  "
140 		       "%-"PRIu64"\n",
141 		       stats.opackets, stats.oerrors, stats.obytes);
142 	}
143 	else {
144 		printf("  RX-packets:              %10"PRIu64"    RX-errors: %10"PRIu64
145 		       "    RX-bytes: %10"PRIu64"\n",
146 		       stats.ipackets, stats.ierrors, stats.ibytes);
147 		printf("  RX-badcrc:               %10"PRIu64"    RX-badlen: %10"PRIu64
148 		       "  RX-errors:  %10"PRIu64"\n",
149 		       stats.ibadcrc, stats.ibadlen, stats.ierrors);
150 		printf("  RX-nombuf:               %10"PRIu64"\n",
151 		       stats.rx_nombuf);
152 		printf("  TX-packets:              %10"PRIu64"    TX-errors: %10"PRIu64
153 		       "    TX-bytes: %10"PRIu64"\n",
154 		       stats.opackets, stats.oerrors, stats.obytes);
155 	}
156 
157 	/* stats fdir */
158 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
159 		printf("  Fdirmiss:   %-10"PRIu64" Fdirmatch: %-10"PRIu64"\n",
160 		       stats.fdirmiss,
161 		       stats.fdirmatch);
162 
163 	if (port->rx_queue_stats_mapping_enabled) {
164 		printf("\n");
165 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
166 			printf("  Stats reg %2d RX-packets: %10"PRIu64
167 			       "    RX-errors: %10"PRIu64
168 			       "    RX-bytes: %10"PRIu64"\n",
169 			       i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
170 		}
171 	}
172 	if (port->tx_queue_stats_mapping_enabled) {
173 		printf("\n");
174 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
175 			printf("  Stats reg %2d TX-packets: %10"PRIu64
176 			       "                             TX-bytes: %10"PRIu64"\n",
177 			       i, stats.q_opackets[i], stats.q_obytes[i]);
178 		}
179 	}
180 
181 	/* Display statistics of XON/XOFF pause frames, if any. */
182 	if ((stats.tx_pause_xon  | stats.rx_pause_xon |
183 	     stats.tx_pause_xoff | stats.rx_pause_xoff) > 0) {
184 		printf("  RX-XOFF:    %-10"PRIu64" RX-XON:    %-10"PRIu64"\n",
185 		       stats.rx_pause_xoff, stats.rx_pause_xon);
186 		printf("  TX-XOFF:    %-10"PRIu64" TX-XON:    %-10"PRIu64"\n",
187 		       stats.tx_pause_xoff, stats.tx_pause_xon);
188 	}
189 	printf("  %s############################%s\n",
190 	       nic_stats_border, nic_stats_border);
191 }
192 
193 void
194 nic_stats_clear(portid_t port_id)
195 {
196 	portid_t pid;
197 
198 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
199 		printf("Valid port range is [0");
200 		FOREACH_PORT(pid, ports)
201 			printf(", %d", pid);
202 		printf("]\n");
203 		return;
204 	}
205 	rte_eth_stats_reset(port_id);
206 	printf("\n  NIC statistics for port %d cleared\n", port_id);
207 }
208 
209 void
210 nic_xstats_display(portid_t port_id)
211 {
212 	struct rte_eth_xstats *xstats;
213 	int len, ret, i;
214 
215 	printf("###### NIC extended statistics for port %-2d\n", port_id);
216 
217 	len = rte_eth_xstats_get(port_id, NULL, 0);
218 	if (len < 0) {
219 		printf("Cannot get xstats count\n");
220 		return;
221 	}
222 	xstats = malloc(sizeof(xstats[0]) * len);
223 	if (xstats == NULL) {
224 		printf("Cannot allocate memory for xstats\n");
225 		return;
226 	}
227 	ret = rte_eth_xstats_get(port_id, xstats, len);
228 	if (ret < 0 || ret > len) {
229 		printf("Cannot get xstats\n");
230 		free(xstats);
231 		return;
232 	}
233 	for (i = 0; i < len; i++)
234 		printf("%s: %"PRIu64"\n", xstats[i].name, xstats[i].value);
235 	free(xstats);
236 }
237 
238 void
239 nic_xstats_clear(portid_t port_id)
240 {
241 	rte_eth_xstats_reset(port_id);
242 }
243 
244 void
245 nic_stats_mapping_display(portid_t port_id)
246 {
247 	struct rte_port *port = &ports[port_id];
248 	uint16_t i;
249 	portid_t pid;
250 
251 	static const char *nic_stats_mapping_border = "########################";
252 
253 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
254 		printf("Valid port range is [0");
255 		FOREACH_PORT(pid, ports)
256 			printf(", %d", pid);
257 		printf("]\n");
258 		return;
259 	}
260 
261 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
262 		printf("Port id %d - either does not support queue statistic mapping or"
263 		       " no queue statistic mapping set\n", port_id);
264 		return;
265 	}
266 
267 	printf("\n  %s NIC statistics mapping for port %-2d %s\n",
268 	       nic_stats_mapping_border, port_id, nic_stats_mapping_border);
269 
270 	if (port->rx_queue_stats_mapping_enabled) {
271 		for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
272 			if (rx_queue_stats_mappings[i].port_id == port_id) {
273 				printf("  RX-queue %2d mapped to Stats Reg %2d\n",
274 				       rx_queue_stats_mappings[i].queue_id,
275 				       rx_queue_stats_mappings[i].stats_counter_id);
276 			}
277 		}
278 		printf("\n");
279 	}
280 
281 
282 	if (port->tx_queue_stats_mapping_enabled) {
283 		for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
284 			if (tx_queue_stats_mappings[i].port_id == port_id) {
285 				printf("  TX-queue %2d mapped to Stats Reg %2d\n",
286 				       tx_queue_stats_mappings[i].queue_id,
287 				       tx_queue_stats_mappings[i].stats_counter_id);
288 			}
289 		}
290 	}
291 
292 	printf("  %s####################################%s\n",
293 	       nic_stats_mapping_border, nic_stats_mapping_border);
294 }
295 
296 void
297 port_infos_display(portid_t port_id)
298 {
299 	struct rte_port *port;
300 	struct ether_addr mac_addr;
301 	struct rte_eth_link link;
302 	struct rte_eth_dev_info dev_info;
303 	int vlan_offload;
304 	struct rte_mempool * mp;
305 	static const char *info_border = "*********************";
306 	portid_t pid;
307 
308 	if (port_id_is_invalid(port_id, ENABLED_WARN)) {
309 		printf("Valid port range is [0");
310 		FOREACH_PORT(pid, ports)
311 			printf(", %d", pid);
312 		printf("]\n");
313 		return;
314 	}
315 	port = &ports[port_id];
316 	rte_eth_link_get_nowait(port_id, &link);
317 	printf("\n%s Infos for port %-2d %s\n",
318 	       info_border, port_id, info_border);
319 	rte_eth_macaddr_get(port_id, &mac_addr);
320 	print_ethaddr("MAC address: ", &mac_addr);
321 	printf("\nConnect to socket: %u", port->socket_id);
322 
323 	if (port_numa[port_id] != NUMA_NO_CONFIG) {
324 		mp = mbuf_pool_find(port_numa[port_id]);
325 		if (mp)
326 			printf("\nmemory allocation on the socket: %d",
327 							port_numa[port_id]);
328 	} else
329 		printf("\nmemory allocation on the socket: %u",port->socket_id);
330 
331 	printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
332 	printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
333 	printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
334 	       ("full-duplex") : ("half-duplex"));
335 	printf("Promiscuous mode: %s\n",
336 	       rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
337 	printf("Allmulticast mode: %s\n",
338 	       rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
339 	printf("Maximum number of MAC addresses: %u\n",
340 	       (unsigned int)(port->dev_info.max_mac_addrs));
341 	printf("Maximum number of MAC addresses of hash filtering: %u\n",
342 	       (unsigned int)(port->dev_info.max_hash_mac_addrs));
343 
344 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
345 	if (vlan_offload >= 0){
346 		printf("VLAN offload: \n");
347 		if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
348 			printf("  strip on \n");
349 		else
350 			printf("  strip off \n");
351 
352 		if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
353 			printf("  filter on \n");
354 		else
355 			printf("  filter off \n");
356 
357 		if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
358 			printf("  qinq(extend) on \n");
359 		else
360 			printf("  qinq(extend) off \n");
361 	}
362 
363 	memset(&dev_info, 0, sizeof(dev_info));
364 	rte_eth_dev_info_get(port_id, &dev_info);
365 	if (dev_info.reta_size > 0)
366 		printf("Redirection table size: %u\n", dev_info.reta_size);
367 	if (!dev_info.flow_type_rss_offloads)
368 		printf("No flow type is supported.\n");
369 	else {
370 		uint16_t i;
371 		char *p;
372 
373 		printf("Supported flow types:\n");
374 		for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX;
375 								i++) {
376 			if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
377 				continue;
378 			p = flowtype_to_str(i);
379 			printf("  %s\n", (p ? p : "unknown"));
380 		}
381 	}
382 }
383 
384 int
385 port_id_is_invalid(portid_t port_id, enum print_warning warning)
386 {
387 	if (port_id == (portid_t)RTE_PORT_ALL)
388 		return 0;
389 
390 	if (ports[port_id].enabled)
391 		return 0;
392 
393 	if (warning == ENABLED_WARN)
394 		printf("Invalid port %d\n", port_id);
395 
396 	return 1;
397 }
398 
399 static int
400 vlan_id_is_invalid(uint16_t vlan_id)
401 {
402 	if (vlan_id < 4096)
403 		return 0;
404 	printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
405 	return 1;
406 }
407 
408 static int
409 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
410 {
411 	uint64_t pci_len;
412 
413 	if (reg_off & 0x3) {
414 		printf("Port register offset 0x%X not aligned on a 4-byte "
415 		       "boundary\n",
416 		       (unsigned)reg_off);
417 		return 1;
418 	}
419 	pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len;
420 	if (reg_off >= pci_len) {
421 		printf("Port %d: register offset %u (0x%X) out of port PCI "
422 		       "resource (length=%"PRIu64")\n",
423 		       port_id, (unsigned)reg_off, (unsigned)reg_off,  pci_len);
424 		return 1;
425 	}
426 	return 0;
427 }
428 
429 static int
430 reg_bit_pos_is_invalid(uint8_t bit_pos)
431 {
432 	if (bit_pos <= 31)
433 		return 0;
434 	printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
435 	return 1;
436 }
437 
438 #define display_port_and_reg_off(port_id, reg_off) \
439 	printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
440 
441 static inline void
442 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
443 {
444 	display_port_and_reg_off(port_id, (unsigned)reg_off);
445 	printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
446 }
447 
448 void
449 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
450 {
451 	uint32_t reg_v;
452 
453 
454 	if (port_id_is_invalid(port_id, ENABLED_WARN))
455 		return;
456 	if (port_reg_off_is_invalid(port_id, reg_off))
457 		return;
458 	if (reg_bit_pos_is_invalid(bit_x))
459 		return;
460 	reg_v = port_id_pci_reg_read(port_id, reg_off);
461 	display_port_and_reg_off(port_id, (unsigned)reg_off);
462 	printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
463 }
464 
465 void
466 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
467 			   uint8_t bit1_pos, uint8_t bit2_pos)
468 {
469 	uint32_t reg_v;
470 	uint8_t  l_bit;
471 	uint8_t  h_bit;
472 
473 	if (port_id_is_invalid(port_id, ENABLED_WARN))
474 		return;
475 	if (port_reg_off_is_invalid(port_id, reg_off))
476 		return;
477 	if (reg_bit_pos_is_invalid(bit1_pos))
478 		return;
479 	if (reg_bit_pos_is_invalid(bit2_pos))
480 		return;
481 	if (bit1_pos > bit2_pos)
482 		l_bit = bit2_pos, h_bit = bit1_pos;
483 	else
484 		l_bit = bit1_pos, h_bit = bit2_pos;
485 
486 	reg_v = port_id_pci_reg_read(port_id, reg_off);
487 	reg_v >>= l_bit;
488 	if (h_bit < 31)
489 		reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
490 	display_port_and_reg_off(port_id, (unsigned)reg_off);
491 	printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
492 	       ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
493 }
494 
495 void
496 port_reg_display(portid_t port_id, uint32_t reg_off)
497 {
498 	uint32_t reg_v;
499 
500 	if (port_id_is_invalid(port_id, ENABLED_WARN))
501 		return;
502 	if (port_reg_off_is_invalid(port_id, reg_off))
503 		return;
504 	reg_v = port_id_pci_reg_read(port_id, reg_off);
505 	display_port_reg_value(port_id, reg_off, reg_v);
506 }
507 
508 void
509 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
510 		 uint8_t bit_v)
511 {
512 	uint32_t reg_v;
513 
514 	if (port_id_is_invalid(port_id, ENABLED_WARN))
515 		return;
516 	if (port_reg_off_is_invalid(port_id, reg_off))
517 		return;
518 	if (reg_bit_pos_is_invalid(bit_pos))
519 		return;
520 	if (bit_v > 1) {
521 		printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
522 		return;
523 	}
524 	reg_v = port_id_pci_reg_read(port_id, reg_off);
525 	if (bit_v == 0)
526 		reg_v &= ~(1 << bit_pos);
527 	else
528 		reg_v |= (1 << bit_pos);
529 	port_id_pci_reg_write(port_id, reg_off, reg_v);
530 	display_port_reg_value(port_id, reg_off, reg_v);
531 }
532 
533 void
534 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
535 		       uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
536 {
537 	uint32_t max_v;
538 	uint32_t reg_v;
539 	uint8_t  l_bit;
540 	uint8_t  h_bit;
541 
542 	if (port_id_is_invalid(port_id, ENABLED_WARN))
543 		return;
544 	if (port_reg_off_is_invalid(port_id, reg_off))
545 		return;
546 	if (reg_bit_pos_is_invalid(bit1_pos))
547 		return;
548 	if (reg_bit_pos_is_invalid(bit2_pos))
549 		return;
550 	if (bit1_pos > bit2_pos)
551 		l_bit = bit2_pos, h_bit = bit1_pos;
552 	else
553 		l_bit = bit1_pos, h_bit = bit2_pos;
554 
555 	if ((h_bit - l_bit) < 31)
556 		max_v = (1 << (h_bit - l_bit + 1)) - 1;
557 	else
558 		max_v = 0xFFFFFFFF;
559 
560 	if (value > max_v) {
561 		printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
562 				(unsigned)value, (unsigned)value,
563 				(unsigned)max_v, (unsigned)max_v);
564 		return;
565 	}
566 	reg_v = port_id_pci_reg_read(port_id, reg_off);
567 	reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
568 	reg_v |= (value << l_bit); /* Set changed bits */
569 	port_id_pci_reg_write(port_id, reg_off, reg_v);
570 	display_port_reg_value(port_id, reg_off, reg_v);
571 }
572 
573 void
574 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
575 {
576 	if (port_id_is_invalid(port_id, ENABLED_WARN))
577 		return;
578 	if (port_reg_off_is_invalid(port_id, reg_off))
579 		return;
580 	port_id_pci_reg_write(port_id, reg_off, reg_v);
581 	display_port_reg_value(port_id, reg_off, reg_v);
582 }
583 
584 void
585 port_mtu_set(portid_t port_id, uint16_t mtu)
586 {
587 	int diag;
588 
589 	if (port_id_is_invalid(port_id, ENABLED_WARN))
590 		return;
591 	diag = rte_eth_dev_set_mtu(port_id, mtu);
592 	if (diag == 0)
593 		return;
594 	printf("Set MTU failed. diag=%d\n", diag);
595 }
596 
597 /*
598  * RX/TX ring descriptors display functions.
599  */
600 int
601 rx_queue_id_is_invalid(queueid_t rxq_id)
602 {
603 	if (rxq_id < nb_rxq)
604 		return 0;
605 	printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
606 	return 1;
607 }
608 
609 int
610 tx_queue_id_is_invalid(queueid_t txq_id)
611 {
612 	if (txq_id < nb_txq)
613 		return 0;
614 	printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
615 	return 1;
616 }
617 
618 static int
619 rx_desc_id_is_invalid(uint16_t rxdesc_id)
620 {
621 	if (rxdesc_id < nb_rxd)
622 		return 0;
623 	printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
624 	       rxdesc_id, nb_rxd);
625 	return 1;
626 }
627 
628 static int
629 tx_desc_id_is_invalid(uint16_t txdesc_id)
630 {
631 	if (txdesc_id < nb_txd)
632 		return 0;
633 	printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
634 	       txdesc_id, nb_txd);
635 	return 1;
636 }
637 
638 static const struct rte_memzone *
639 ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id)
640 {
641 	char mz_name[RTE_MEMZONE_NAMESIZE];
642 	const struct rte_memzone *mz;
643 
644 	snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d",
645 		 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id);
646 	mz = rte_memzone_lookup(mz_name);
647 	if (mz == NULL)
648 		printf("%s ring memory zoneof (port %d, queue %d) not"
649 		       "found (zone name = %s\n",
650 		       ring_name, port_id, q_id, mz_name);
651 	return (mz);
652 }
653 
654 union igb_ring_dword {
655 	uint64_t dword;
656 	struct {
657 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
658 		uint32_t lo;
659 		uint32_t hi;
660 #else
661 		uint32_t hi;
662 		uint32_t lo;
663 #endif
664 	} words;
665 };
666 
667 struct igb_ring_desc_32_bytes {
668 	union igb_ring_dword lo_dword;
669 	union igb_ring_dword hi_dword;
670 	union igb_ring_dword resv1;
671 	union igb_ring_dword resv2;
672 };
673 
674 struct igb_ring_desc_16_bytes {
675 	union igb_ring_dword lo_dword;
676 	union igb_ring_dword hi_dword;
677 };
678 
679 static void
680 ring_rxd_display_dword(union igb_ring_dword dword)
681 {
682 	printf("    0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
683 					(unsigned)dword.words.hi);
684 }
685 
686 static void
687 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
688 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
689 			   uint8_t port_id,
690 #else
691 			   __rte_unused uint8_t port_id,
692 #endif
693 			   uint16_t desc_id)
694 {
695 	struct igb_ring_desc_16_bytes *ring =
696 		(struct igb_ring_desc_16_bytes *)ring_mz->addr;
697 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
698 	struct rte_eth_dev_info dev_info;
699 
700 	memset(&dev_info, 0, sizeof(dev_info));
701 	rte_eth_dev_info_get(port_id, &dev_info);
702 	if (strstr(dev_info.driver_name, "i40e") != NULL) {
703 		/* 32 bytes RX descriptor, i40e only */
704 		struct igb_ring_desc_32_bytes *ring =
705 			(struct igb_ring_desc_32_bytes *)ring_mz->addr;
706 		ring[desc_id].lo_dword.dword =
707 			rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
708 		ring_rxd_display_dword(ring[desc_id].lo_dword);
709 		ring[desc_id].hi_dword.dword =
710 			rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
711 		ring_rxd_display_dword(ring[desc_id].hi_dword);
712 		ring[desc_id].resv1.dword =
713 			rte_le_to_cpu_64(ring[desc_id].resv1.dword);
714 		ring_rxd_display_dword(ring[desc_id].resv1);
715 		ring[desc_id].resv2.dword =
716 			rte_le_to_cpu_64(ring[desc_id].resv2.dword);
717 		ring_rxd_display_dword(ring[desc_id].resv2);
718 
719 		return;
720 	}
721 #endif
722 	/* 16 bytes RX descriptor */
723 	ring[desc_id].lo_dword.dword =
724 		rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
725 	ring_rxd_display_dword(ring[desc_id].lo_dword);
726 	ring[desc_id].hi_dword.dword =
727 		rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
728 	ring_rxd_display_dword(ring[desc_id].hi_dword);
729 }
730 
731 static void
732 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
733 {
734 	struct igb_ring_desc_16_bytes *ring;
735 	struct igb_ring_desc_16_bytes txd;
736 
737 	ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
738 	txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
739 	txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
740 	printf("    0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
741 			(unsigned)txd.lo_dword.words.lo,
742 			(unsigned)txd.lo_dword.words.hi,
743 			(unsigned)txd.hi_dword.words.lo,
744 			(unsigned)txd.hi_dword.words.hi);
745 }
746 
747 void
748 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
749 {
750 	const struct rte_memzone *rx_mz;
751 
752 	if (port_id_is_invalid(port_id, ENABLED_WARN))
753 		return;
754 	if (rx_queue_id_is_invalid(rxq_id))
755 		return;
756 	if (rx_desc_id_is_invalid(rxd_id))
757 		return;
758 	rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
759 	if (rx_mz == NULL)
760 		return;
761 	ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
762 }
763 
764 void
765 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
766 {
767 	const struct rte_memzone *tx_mz;
768 
769 	if (port_id_is_invalid(port_id, ENABLED_WARN))
770 		return;
771 	if (tx_queue_id_is_invalid(txq_id))
772 		return;
773 	if (tx_desc_id_is_invalid(txd_id))
774 		return;
775 	tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
776 	if (tx_mz == NULL)
777 		return;
778 	ring_tx_descriptor_display(tx_mz, txd_id);
779 }
780 
781 void
782 fwd_lcores_config_display(void)
783 {
784 	lcoreid_t lc_id;
785 
786 	printf("List of forwarding lcores:");
787 	for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
788 		printf(" %2u", fwd_lcores_cpuids[lc_id]);
789 	printf("\n");
790 }
791 void
792 rxtx_config_display(void)
793 {
794 	printf("  %s packet forwarding - CRC stripping %s - "
795 	       "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name,
796 	       rx_mode.hw_strip_crc ? "enabled" : "disabled",
797 	       nb_pkt_per_burst);
798 
799 	if (cur_fwd_eng == &tx_only_engine)
800 		printf("  packet len=%u - nb packet segments=%d\n",
801 				(unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
802 
803 	struct rte_eth_rxconf *rx_conf = &ports[0].rx_conf;
804 	struct rte_eth_txconf *tx_conf = &ports[0].tx_conf;
805 
806 	printf("  nb forwarding cores=%d - nb forwarding ports=%d\n",
807 	       nb_fwd_lcores, nb_fwd_ports);
808 	printf("  RX queues=%d - RX desc=%d - RX free threshold=%d\n",
809 	       nb_rxq, nb_rxd, rx_conf->rx_free_thresh);
810 	printf("  RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
811 	       rx_conf->rx_thresh.pthresh, rx_conf->rx_thresh.hthresh,
812 	       rx_conf->rx_thresh.wthresh);
813 	printf("  TX queues=%d - TX desc=%d - TX free threshold=%d\n",
814 	       nb_txq, nb_txd, tx_conf->tx_free_thresh);
815 	printf("  TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
816 	       tx_conf->tx_thresh.pthresh, tx_conf->tx_thresh.hthresh,
817 	       tx_conf->tx_thresh.wthresh);
818 	printf("  TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n",
819 	       tx_conf->tx_rs_thresh, tx_conf->txq_flags);
820 }
821 
822 void
823 port_rss_reta_info(portid_t port_id,
824 		   struct rte_eth_rss_reta_entry64 *reta_conf,
825 		   uint16_t nb_entries)
826 {
827 	uint16_t i, idx, shift;
828 	int ret;
829 
830 	if (port_id_is_invalid(port_id, ENABLED_WARN))
831 		return;
832 
833 	ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
834 	if (ret != 0) {
835 		printf("Failed to get RSS RETA info, return code = %d\n", ret);
836 		return;
837 	}
838 
839 	for (i = 0; i < nb_entries; i++) {
840 		idx = i / RTE_RETA_GROUP_SIZE;
841 		shift = i % RTE_RETA_GROUP_SIZE;
842 		if (!(reta_conf[idx].mask & (1ULL << shift)))
843 			continue;
844 		printf("RSS RETA configuration: hash index=%u, queue=%u\n",
845 					i, reta_conf[idx].reta[shift]);
846 	}
847 }
848 
849 /*
850  * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
851  * key of the port.
852  */
853 void
854 port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
855 {
856 	struct rss_type_info {
857 		char str[32];
858 		uint64_t rss_type;
859 	};
860 	static const struct rss_type_info rss_type_table[] = {
861 		{"ipv4", ETH_RSS_IPV4},
862 		{"ipv4-frag", ETH_RSS_FRAG_IPV4},
863 		{"ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP},
864 		{"ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP},
865 		{"ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP},
866 		{"ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER},
867 		{"ipv6", ETH_RSS_IPV6},
868 		{"ipv6-frag", ETH_RSS_FRAG_IPV6},
869 		{"ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP},
870 		{"ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP},
871 		{"ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP},
872 		{"ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER},
873 		{"l2-payload", ETH_RSS_L2_PAYLOAD},
874 		{"ipv6-ex", ETH_RSS_IPV6_EX},
875 		{"ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX},
876 		{"ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX},
877 	};
878 
879 	struct rte_eth_rss_conf rss_conf;
880 	uint8_t rss_key[10 * 4];
881 	uint64_t rss_hf;
882 	uint8_t i;
883 	int diag;
884 
885 	if (port_id_is_invalid(port_id, ENABLED_WARN))
886 		return;
887 	/* Get RSS hash key if asked to display it */
888 	rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
889 	diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
890 	if (diag != 0) {
891 		switch (diag) {
892 		case -ENODEV:
893 			printf("port index %d invalid\n", port_id);
894 			break;
895 		case -ENOTSUP:
896 			printf("operation not supported by device\n");
897 			break;
898 		default:
899 			printf("operation failed - diag=%d\n", diag);
900 			break;
901 		}
902 		return;
903 	}
904 	rss_hf = rss_conf.rss_hf;
905 	if (rss_hf == 0) {
906 		printf("RSS disabled\n");
907 		return;
908 	}
909 	printf("RSS functions:\n ");
910 	for (i = 0; i < RTE_DIM(rss_type_table); i++) {
911 		if (rss_hf & rss_type_table[i].rss_type)
912 			printf("%s ", rss_type_table[i].str);
913 	}
914 	printf("\n");
915 	if (!show_rss_key)
916 		return;
917 	printf("RSS key:\n");
918 	for (i = 0; i < sizeof(rss_key); i++)
919 		printf("%02X", rss_key[i]);
920 	printf("\n");
921 }
922 
923 void
924 port_rss_hash_key_update(portid_t port_id, uint8_t *hash_key)
925 {
926 	struct rte_eth_rss_conf rss_conf;
927 	int diag;
928 
929 	rss_conf.rss_key = NULL;
930 	diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
931 	if (diag == 0) {
932 		rss_conf.rss_key = hash_key;
933 		diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
934 	}
935 	if (diag == 0)
936 		return;
937 
938 	switch (diag) {
939 	case -ENODEV:
940 		printf("port index %d invalid\n", port_id);
941 		break;
942 	case -ENOTSUP:
943 		printf("operation not supported by device\n");
944 		break;
945 	default:
946 		printf("operation failed - diag=%d\n", diag);
947 		break;
948 	}
949 }
950 
951 /*
952  * Setup forwarding configuration for each logical core.
953  */
954 static void
955 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
956 {
957 	streamid_t nb_fs_per_lcore;
958 	streamid_t nb_fs;
959 	streamid_t sm_id;
960 	lcoreid_t  nb_extra;
961 	lcoreid_t  nb_fc;
962 	lcoreid_t  nb_lc;
963 	lcoreid_t  lc_id;
964 
965 	nb_fs = cfg->nb_fwd_streams;
966 	nb_fc = cfg->nb_fwd_lcores;
967 	if (nb_fs <= nb_fc) {
968 		nb_fs_per_lcore = 1;
969 		nb_extra = 0;
970 	} else {
971 		nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
972 		nb_extra = (lcoreid_t) (nb_fs % nb_fc);
973 	}
974 
975 	nb_lc = (lcoreid_t) (nb_fc - nb_extra);
976 	sm_id = 0;
977 	for (lc_id = 0; lc_id < nb_lc; lc_id++) {
978 		fwd_lcores[lc_id]->stream_idx = sm_id;
979 		fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
980 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
981 	}
982 
983 	/*
984 	 * Assign extra remaining streams, if any.
985 	 */
986 	nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
987 	for (lc_id = 0; lc_id < nb_extra; lc_id++) {
988 		fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
989 		fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
990 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
991 	}
992 }
993 
994 static void
995 simple_fwd_config_setup(void)
996 {
997 	portid_t i;
998 	portid_t j;
999 	portid_t inc = 2;
1000 
1001 	if (port_topology == PORT_TOPOLOGY_CHAINED ||
1002 	    port_topology == PORT_TOPOLOGY_LOOP) {
1003 		inc = 1;
1004 	} else if (nb_fwd_ports % 2) {
1005 		printf("\nWarning! Cannot handle an odd number of ports "
1006 		       "with the current port topology. Configuration "
1007 		       "must be changed to have an even number of ports, "
1008 		       "or relaunch application with "
1009 		       "--port-topology=chained\n\n");
1010 	}
1011 
1012 	cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
1013 	cur_fwd_config.nb_fwd_streams =
1014 		(streamid_t) cur_fwd_config.nb_fwd_ports;
1015 
1016 	/* reinitialize forwarding streams */
1017 	init_fwd_streams();
1018 
1019 	/*
1020 	 * In the simple forwarding test, the number of forwarding cores
1021 	 * must be lower or equal to the number of forwarding ports.
1022 	 */
1023 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1024 	if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
1025 		cur_fwd_config.nb_fwd_lcores =
1026 			(lcoreid_t) cur_fwd_config.nb_fwd_ports;
1027 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
1028 
1029 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) {
1030 		if (port_topology != PORT_TOPOLOGY_LOOP)
1031 			j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports);
1032 		else
1033 			j = i;
1034 		fwd_streams[i]->rx_port   = fwd_ports_ids[i];
1035 		fwd_streams[i]->rx_queue  = 0;
1036 		fwd_streams[i]->tx_port   = fwd_ports_ids[j];
1037 		fwd_streams[i]->tx_queue  = 0;
1038 		fwd_streams[i]->peer_addr = j;
1039 
1040 		if (port_topology == PORT_TOPOLOGY_PAIRED) {
1041 			fwd_streams[j]->rx_port   = fwd_ports_ids[j];
1042 			fwd_streams[j]->rx_queue  = 0;
1043 			fwd_streams[j]->tx_port   = fwd_ports_ids[i];
1044 			fwd_streams[j]->tx_queue  = 0;
1045 			fwd_streams[j]->peer_addr = i;
1046 		}
1047 	}
1048 }
1049 
1050 /**
1051  * For the RSS forwarding test, each core is assigned on every port a transmit
1052  * queue whose index is the index of the core itself. This approach limits the
1053  * maximumm number of processing cores of the RSS test to the maximum number of
1054  * TX queues supported by the devices.
1055  *
1056  * Each core is assigned a single stream, each stream being composed of
1057  * a RX queue to poll on a RX port for input messages, associated with
1058  * a TX queue of a TX port where to send forwarded packets.
1059  * All packets received on the RX queue of index "RxQj" of the RX port "RxPi"
1060  * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two
1061  * following rules:
1062  *    - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
1063  *    - TxQl = RxQj
1064  */
1065 static void
1066 rss_fwd_config_setup(void)
1067 {
1068 	portid_t   rxp;
1069 	portid_t   txp;
1070 	queueid_t  rxq;
1071 	queueid_t  nb_q;
1072 	lcoreid_t  lc_id;
1073 
1074 	nb_q = nb_rxq;
1075 	if (nb_q > nb_txq)
1076 		nb_q = nb_txq;
1077 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1078 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1079 	cur_fwd_config.nb_fwd_streams =
1080 		(streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
1081 	if (cur_fwd_config.nb_fwd_streams > cur_fwd_config.nb_fwd_lcores)
1082 		cur_fwd_config.nb_fwd_streams =
1083 			(streamid_t)cur_fwd_config.nb_fwd_lcores;
1084 	else
1085 		cur_fwd_config.nb_fwd_lcores =
1086 			(lcoreid_t)cur_fwd_config.nb_fwd_streams;
1087 
1088 	/* reinitialize forwarding streams */
1089 	init_fwd_streams();
1090 
1091 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
1092 	rxp = 0; rxq = 0;
1093 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
1094 		struct fwd_stream *fs;
1095 
1096 		fs = fwd_streams[lc_id];
1097 
1098 		if ((rxp & 0x1) == 0)
1099 			txp = (portid_t) (rxp + 1);
1100 		else
1101 			txp = (portid_t) (rxp - 1);
1102 		/*
1103 		 * if we are in loopback, simply send stuff out through the
1104 		 * ingress port
1105 		 */
1106 		if (port_topology == PORT_TOPOLOGY_LOOP)
1107 			txp = rxp;
1108 
1109 		fs->rx_port = fwd_ports_ids[rxp];
1110 		fs->rx_queue = rxq;
1111 		fs->tx_port = fwd_ports_ids[txp];
1112 		fs->tx_queue = rxq;
1113 		fs->peer_addr = fs->tx_port;
1114 		rxq = (queueid_t) (rxq + 1);
1115 		if (rxq < nb_q)
1116 			continue;
1117 		/*
1118 		 * rxq == nb_q
1119 		 * Restart from RX queue 0 on next RX port
1120 		 */
1121 		rxq = 0;
1122 		if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
1123 			rxp = (portid_t)
1124 				(rxp + ((nb_ports >> 1) / nb_fwd_ports));
1125 		else
1126 			rxp = (portid_t) (rxp + 1);
1127 	}
1128 }
1129 
1130 /*
1131  * In DCB and VT on,the mapping of 128 receive queues to 128 transmit queues.
1132  */
1133 static void
1134 dcb_rxq_2_txq_mapping(queueid_t rxq, queueid_t *txq)
1135 {
1136 	if(dcb_q_mapping == DCB_4_TCS_Q_MAPPING) {
1137 
1138 		if (rxq < 32)
1139 			/* tc0: 0-31 */
1140 			*txq = rxq;
1141 		else if (rxq < 64) {
1142 			/* tc1: 64-95 */
1143 			*txq =  (uint16_t)(rxq + 32);
1144 		}
1145 		else {
1146 			/* tc2: 96-111;tc3:112-127 */
1147 			*txq =  (uint16_t)(rxq/2 + 64);
1148 		}
1149 	}
1150 	else {
1151 		if (rxq < 16)
1152 			/* tc0 mapping*/
1153 			*txq = rxq;
1154 		else if (rxq < 32) {
1155 			/* tc1 mapping*/
1156 			 *txq = (uint16_t)(rxq + 16);
1157 		}
1158 		else if (rxq < 64) {
1159 			/*tc2,tc3 mapping */
1160 			*txq =  (uint16_t)(rxq + 32);
1161 		}
1162 		else {
1163 			/* tc4,tc5,tc6 and tc7 mapping */
1164 			*txq =  (uint16_t)(rxq/2 + 64);
1165 		}
1166 	}
1167 }
1168 
1169 /**
1170  * For the DCB forwarding test, each core is assigned on every port multi-transmit
1171  * queue.
1172  *
1173  * Each core is assigned a multi-stream, each stream being composed of
1174  * a RX queue to poll on a RX port for input messages, associated with
1175  * a TX queue of a TX port where to send forwarded packets.
1176  * All packets received on the RX queue of index "RxQj" of the RX port "RxPi"
1177  * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two
1178  * following rules:
1179  * In VT mode,
1180  *    - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
1181  *    - TxQl = RxQj
1182  * In non-VT mode,
1183  *    - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
1184  *    There is a mapping of RxQj to TxQl to be required,and the mapping was implemented
1185  *    in dcb_rxq_2_txq_mapping function.
1186  */
1187 static void
1188 dcb_fwd_config_setup(void)
1189 {
1190 	portid_t   rxp;
1191 	portid_t   txp;
1192 	queueid_t  rxq;
1193 	queueid_t  nb_q;
1194 	lcoreid_t  lc_id;
1195 	uint16_t sm_id;
1196 
1197 	nb_q = nb_rxq;
1198 
1199 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1200 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1201 	cur_fwd_config.nb_fwd_streams =
1202 		(streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
1203 
1204 	/* reinitialize forwarding streams */
1205 	init_fwd_streams();
1206 
1207 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
1208 	rxp = 0; rxq = 0;
1209 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
1210 		/* a fwd core can run multi-streams */
1211 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++)
1212 		{
1213 			struct fwd_stream *fs;
1214 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
1215 			if ((rxp & 0x1) == 0)
1216 				txp = (portid_t) (rxp + 1);
1217 			else
1218 				txp = (portid_t) (rxp - 1);
1219 			fs->rx_port = fwd_ports_ids[rxp];
1220 			fs->rx_queue = rxq;
1221 			fs->tx_port = fwd_ports_ids[txp];
1222 			if (dcb_q_mapping == DCB_VT_Q_MAPPING)
1223 				fs->tx_queue = rxq;
1224 			else
1225 				dcb_rxq_2_txq_mapping(rxq, &fs->tx_queue);
1226 			fs->peer_addr = fs->tx_port;
1227 			rxq = (queueid_t) (rxq + 1);
1228 			if (rxq < nb_q)
1229 				continue;
1230 			rxq = 0;
1231 			if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
1232 				rxp = (portid_t)
1233 					(rxp + ((nb_ports >> 1) / nb_fwd_ports));
1234 			else
1235 				rxp = (portid_t) (rxp + 1);
1236 		}
1237 	}
1238 }
1239 
1240 static void
1241 icmp_echo_config_setup(void)
1242 {
1243 	portid_t  rxp;
1244 	queueid_t rxq;
1245 	lcoreid_t lc_id;
1246 	uint16_t  sm_id;
1247 
1248 	if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
1249 		cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
1250 			(nb_txq * nb_fwd_ports);
1251 	else
1252 		cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1253 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1254 	cur_fwd_config.nb_fwd_streams =
1255 		(streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
1256 	if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
1257 		cur_fwd_config.nb_fwd_lcores =
1258 			(lcoreid_t)cur_fwd_config.nb_fwd_streams;
1259 	if (verbose_level > 0) {
1260 		printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
1261 		       __FUNCTION__,
1262 		       cur_fwd_config.nb_fwd_lcores,
1263 		       cur_fwd_config.nb_fwd_ports,
1264 		       cur_fwd_config.nb_fwd_streams);
1265 	}
1266 
1267 	/* reinitialize forwarding streams */
1268 	init_fwd_streams();
1269 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
1270 	rxp = 0; rxq = 0;
1271 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
1272 		if (verbose_level > 0)
1273 			printf("  core=%d: \n", lc_id);
1274 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
1275 			struct fwd_stream *fs;
1276 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
1277 			fs->rx_port = fwd_ports_ids[rxp];
1278 			fs->rx_queue = rxq;
1279 			fs->tx_port = fs->rx_port;
1280 			fs->tx_queue = lc_id;
1281 			fs->peer_addr = fs->tx_port;
1282 			if (verbose_level > 0)
1283 				printf("  stream=%d port=%d rxq=%d txq=%d\n",
1284 				       sm_id, fs->rx_port, fs->rx_queue,
1285 				       fs->tx_queue);
1286 			rxq = (queueid_t) (rxq + 1);
1287 			if (rxq == nb_rxq) {
1288 				rxq = 0;
1289 				rxp = (portid_t) (rxp + 1);
1290 			}
1291 		}
1292 	}
1293 }
1294 
1295 void
1296 fwd_config_setup(void)
1297 {
1298 	cur_fwd_config.fwd_eng = cur_fwd_eng;
1299 	if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
1300 		icmp_echo_config_setup();
1301 		return;
1302 	}
1303 	if ((nb_rxq > 1) && (nb_txq > 1)){
1304 		if (dcb_config)
1305 			dcb_fwd_config_setup();
1306 		else
1307 			rss_fwd_config_setup();
1308 	}
1309 	else
1310 		simple_fwd_config_setup();
1311 }
1312 
1313 static void
1314 pkt_fwd_config_display(struct fwd_config *cfg)
1315 {
1316 	struct fwd_stream *fs;
1317 	lcoreid_t  lc_id;
1318 	streamid_t sm_id;
1319 
1320 	printf("%s packet forwarding - ports=%d - cores=%d - streams=%d - "
1321 		"NUMA support %s, MP over anonymous pages %s\n",
1322 		cfg->fwd_eng->fwd_mode_name,
1323 		cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
1324 		numa_support == 1 ? "enabled" : "disabled",
1325 		mp_anon != 0 ? "enabled" : "disabled");
1326 
1327 	if (strcmp(cfg->fwd_eng->fwd_mode_name, "mac_retry") == 0)
1328 		printf("TX retry num: %u, delay between TX retries: %uus\n",
1329 			burst_tx_retry_num, burst_tx_delay_time);
1330 	for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
1331 		printf("Logical Core %u (socket %u) forwards packets on "
1332 		       "%d streams:",
1333 		       fwd_lcores_cpuids[lc_id],
1334 		       rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
1335 		       fwd_lcores[lc_id]->stream_nb);
1336 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
1337 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
1338 			printf("\n  RX P=%d/Q=%d (socket %u) -> TX "
1339 			       "P=%d/Q=%d (socket %u) ",
1340 			       fs->rx_port, fs->rx_queue,
1341 			       ports[fs->rx_port].socket_id,
1342 			       fs->tx_port, fs->tx_queue,
1343 			       ports[fs->tx_port].socket_id);
1344 			print_ethaddr("peer=",
1345 				      &peer_eth_addrs[fs->peer_addr]);
1346 		}
1347 		printf("\n");
1348 	}
1349 	printf("\n");
1350 }
1351 
1352 
1353 void
1354 fwd_config_display(void)
1355 {
1356 	if((dcb_config) && (nb_fwd_lcores == 1)) {
1357 		printf("In DCB mode,the nb forwarding cores should be larger than 1\n");
1358 		return;
1359 	}
1360 	fwd_config_setup();
1361 	pkt_fwd_config_display(&cur_fwd_config);
1362 }
1363 
1364 int
1365 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
1366 {
1367 	unsigned int i;
1368 	unsigned int lcore_cpuid;
1369 	int record_now;
1370 
1371 	record_now = 0;
1372  again:
1373 	for (i = 0; i < nb_lc; i++) {
1374 		lcore_cpuid = lcorelist[i];
1375 		if (! rte_lcore_is_enabled(lcore_cpuid)) {
1376 			printf("lcore %u not enabled\n", lcore_cpuid);
1377 			return -1;
1378 		}
1379 		if (lcore_cpuid == rte_get_master_lcore()) {
1380 			printf("lcore %u cannot be masked on for running "
1381 			       "packet forwarding, which is the master lcore "
1382 			       "and reserved for command line parsing only\n",
1383 			       lcore_cpuid);
1384 			return -1;
1385 		}
1386 		if (record_now)
1387 			fwd_lcores_cpuids[i] = lcore_cpuid;
1388 	}
1389 	if (record_now == 0) {
1390 		record_now = 1;
1391 		goto again;
1392 	}
1393 	nb_cfg_lcores = (lcoreid_t) nb_lc;
1394 	if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
1395 		printf("previous number of forwarding cores %u - changed to "
1396 		       "number of configured cores %u\n",
1397 		       (unsigned int) nb_fwd_lcores, nb_lc);
1398 		nb_fwd_lcores = (lcoreid_t) nb_lc;
1399 	}
1400 
1401 	return 0;
1402 }
1403 
1404 int
1405 set_fwd_lcores_mask(uint64_t lcoremask)
1406 {
1407 	unsigned int lcorelist[64];
1408 	unsigned int nb_lc;
1409 	unsigned int i;
1410 
1411 	if (lcoremask == 0) {
1412 		printf("Invalid NULL mask of cores\n");
1413 		return -1;
1414 	}
1415 	nb_lc = 0;
1416 	for (i = 0; i < 64; i++) {
1417 		if (! ((uint64_t)(1ULL << i) & lcoremask))
1418 			continue;
1419 		lcorelist[nb_lc++] = i;
1420 	}
1421 	return set_fwd_lcores_list(lcorelist, nb_lc);
1422 }
1423 
1424 void
1425 set_fwd_lcores_number(uint16_t nb_lc)
1426 {
1427 	if (nb_lc > nb_cfg_lcores) {
1428 		printf("nb fwd cores %u > %u (max. number of configured "
1429 		       "lcores) - ignored\n",
1430 		       (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
1431 		return;
1432 	}
1433 	nb_fwd_lcores = (lcoreid_t) nb_lc;
1434 	printf("Number of forwarding cores set to %u\n",
1435 	       (unsigned int) nb_fwd_lcores);
1436 }
1437 
1438 void
1439 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
1440 {
1441 	unsigned int i;
1442 	portid_t port_id;
1443 	int record_now;
1444 
1445 	record_now = 0;
1446  again:
1447 	for (i = 0; i < nb_pt; i++) {
1448 		port_id = (portid_t) portlist[i];
1449 		if (port_id_is_invalid(port_id, ENABLED_WARN))
1450 			return;
1451 		if (record_now)
1452 			fwd_ports_ids[i] = port_id;
1453 	}
1454 	if (record_now == 0) {
1455 		record_now = 1;
1456 		goto again;
1457 	}
1458 	nb_cfg_ports = (portid_t) nb_pt;
1459 	if (nb_fwd_ports != (portid_t) nb_pt) {
1460 		printf("previous number of forwarding ports %u - changed to "
1461 		       "number of configured ports %u\n",
1462 		       (unsigned int) nb_fwd_ports, nb_pt);
1463 		nb_fwd_ports = (portid_t) nb_pt;
1464 	}
1465 }
1466 
1467 void
1468 set_fwd_ports_mask(uint64_t portmask)
1469 {
1470 	unsigned int portlist[64];
1471 	unsigned int nb_pt;
1472 	unsigned int i;
1473 
1474 	if (portmask == 0) {
1475 		printf("Invalid NULL mask of ports\n");
1476 		return;
1477 	}
1478 	nb_pt = 0;
1479 	for (i = 0; i < (unsigned)RTE_MIN(64, RTE_MAX_ETHPORTS); i++) {
1480 		if (! ((uint64_t)(1ULL << i) & portmask))
1481 			continue;
1482 		portlist[nb_pt++] = i;
1483 	}
1484 	set_fwd_ports_list(portlist, nb_pt);
1485 }
1486 
1487 void
1488 set_fwd_ports_number(uint16_t nb_pt)
1489 {
1490 	if (nb_pt > nb_cfg_ports) {
1491 		printf("nb fwd ports %u > %u (number of configured "
1492 		       "ports) - ignored\n",
1493 		       (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
1494 		return;
1495 	}
1496 	nb_fwd_ports = (portid_t) nb_pt;
1497 	printf("Number of forwarding ports set to %u\n",
1498 	       (unsigned int) nb_fwd_ports);
1499 }
1500 
1501 void
1502 set_nb_pkt_per_burst(uint16_t nb)
1503 {
1504 	if (nb > MAX_PKT_BURST) {
1505 		printf("nb pkt per burst: %u > %u (maximum packet per burst) "
1506 		       " ignored\n",
1507 		       (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
1508 		return;
1509 	}
1510 	nb_pkt_per_burst = nb;
1511 	printf("Number of packets per burst set to %u\n",
1512 	       (unsigned int) nb_pkt_per_burst);
1513 }
1514 
1515 void
1516 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
1517 {
1518 	uint16_t tx_pkt_len;
1519 	unsigned i;
1520 
1521 	if (nb_segs >= (unsigned) nb_txd) {
1522 		printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
1523 		       nb_segs, (unsigned int) nb_txd);
1524 		return;
1525 	}
1526 
1527 	/*
1528 	 * Check that each segment length is greater or equal than
1529 	 * the mbuf data sise.
1530 	 * Check also that the total packet length is greater or equal than the
1531 	 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8).
1532 	 */
1533 	tx_pkt_len = 0;
1534 	for (i = 0; i < nb_segs; i++) {
1535 		if (seg_lengths[i] > (unsigned) mbuf_data_size) {
1536 			printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
1537 			       i, seg_lengths[i], (unsigned) mbuf_data_size);
1538 			return;
1539 		}
1540 		tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
1541 	}
1542 	if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) {
1543 		printf("total packet length=%u < %d - give up\n",
1544 				(unsigned) tx_pkt_len,
1545 				(int)(sizeof(struct ether_hdr) + 20 + 8));
1546 		return;
1547 	}
1548 
1549 	for (i = 0; i < nb_segs; i++)
1550 		tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
1551 
1552 	tx_pkt_length  = tx_pkt_len;
1553 	tx_pkt_nb_segs = (uint8_t) nb_segs;
1554 }
1555 
1556 char*
1557 list_pkt_forwarding_modes(void)
1558 {
1559 	static char fwd_modes[128] = "";
1560 	const char *separator = "|";
1561 	struct fwd_engine *fwd_eng;
1562 	unsigned i = 0;
1563 
1564 	if (strlen (fwd_modes) == 0) {
1565 		while ((fwd_eng = fwd_engines[i++]) != NULL) {
1566 			strcat(fwd_modes, fwd_eng->fwd_mode_name);
1567 			strcat(fwd_modes, separator);
1568 		}
1569 		fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
1570 	}
1571 
1572 	return fwd_modes;
1573 }
1574 
1575 void
1576 set_pkt_forwarding_mode(const char *fwd_mode_name)
1577 {
1578 	struct fwd_engine *fwd_eng;
1579 	unsigned i;
1580 
1581 	i = 0;
1582 	while ((fwd_eng = fwd_engines[i]) != NULL) {
1583 		if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
1584 			printf("Set %s packet forwarding mode\n",
1585 			       fwd_mode_name);
1586 			cur_fwd_eng = fwd_eng;
1587 			return;
1588 		}
1589 		i++;
1590 	}
1591 	printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
1592 }
1593 
1594 void
1595 set_verbose_level(uint16_t vb_level)
1596 {
1597 	printf("Change verbose level from %u to %u\n",
1598 	       (unsigned int) verbose_level, (unsigned int) vb_level);
1599 	verbose_level = vb_level;
1600 }
1601 
1602 void
1603 vlan_extend_set(portid_t port_id, int on)
1604 {
1605 	int diag;
1606 	int vlan_offload;
1607 
1608 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1609 		return;
1610 
1611 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1612 
1613 	if (on)
1614 		vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
1615 	else
1616 		vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
1617 
1618 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1619 	if (diag < 0)
1620 		printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
1621 	       "diag=%d\n", port_id, on, diag);
1622 }
1623 
1624 void
1625 rx_vlan_strip_set(portid_t port_id, int on)
1626 {
1627 	int diag;
1628 	int vlan_offload;
1629 
1630 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1631 		return;
1632 
1633 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1634 
1635 	if (on)
1636 		vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
1637 	else
1638 		vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
1639 
1640 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1641 	if (diag < 0)
1642 		printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
1643 	       "diag=%d\n", port_id, on, diag);
1644 }
1645 
1646 void
1647 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
1648 {
1649 	int diag;
1650 
1651 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1652 		return;
1653 
1654 	diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
1655 	if (diag < 0)
1656 		printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
1657 	       "diag=%d\n", port_id, queue_id, on, diag);
1658 }
1659 
1660 void
1661 rx_vlan_filter_set(portid_t port_id, int on)
1662 {
1663 	int diag;
1664 	int vlan_offload;
1665 
1666 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1667 		return;
1668 
1669 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1670 
1671 	if (on)
1672 		vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
1673 	else
1674 		vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
1675 
1676 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1677 	if (diag < 0)
1678 		printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
1679 	       "diag=%d\n", port_id, on, diag);
1680 }
1681 
1682 void
1683 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
1684 {
1685 	int diag;
1686 
1687 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1688 		return;
1689 	if (vlan_id_is_invalid(vlan_id))
1690 		return;
1691 	diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
1692 	if (diag == 0)
1693 		return;
1694 	printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
1695 	       "diag=%d\n",
1696 	       port_id, vlan_id, on, diag);
1697 }
1698 
1699 void
1700 rx_vlan_all_filter_set(portid_t port_id, int on)
1701 {
1702 	uint16_t vlan_id;
1703 
1704 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1705 		return;
1706 	for (vlan_id = 0; vlan_id < 4096; vlan_id++)
1707 		rx_vft_set(port_id, vlan_id, on);
1708 }
1709 
1710 void
1711 vlan_tpid_set(portid_t port_id, uint16_t tp_id)
1712 {
1713 	int diag;
1714 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1715 		return;
1716 
1717 	diag = rte_eth_dev_set_vlan_ether_type(port_id, tp_id);
1718 	if (diag == 0)
1719 		return;
1720 
1721 	printf("tx_vlan_tpid_set(port_pi=%d, tpid=%d) failed "
1722 	       "diag=%d\n",
1723 	       port_id, tp_id, diag);
1724 }
1725 
1726 void
1727 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
1728 {
1729 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1730 		return;
1731 	if (vlan_id_is_invalid(vlan_id))
1732 		return;
1733 	ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_VLAN;
1734 	ports[port_id].tx_vlan_id = vlan_id;
1735 }
1736 
1737 void
1738 tx_vlan_reset(portid_t port_id)
1739 {
1740 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1741 		return;
1742 	ports[port_id].tx_ol_flags &= ~TESTPMD_TX_OFFLOAD_INSERT_VLAN;
1743 }
1744 
1745 void
1746 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
1747 {
1748 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1749 		return;
1750 
1751 	rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
1752 }
1753 
1754 void
1755 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
1756 {
1757 	uint16_t i;
1758 	uint8_t existing_mapping_found = 0;
1759 
1760 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1761 		return;
1762 
1763 	if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
1764 		return;
1765 
1766 	if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1767 		printf("map_value not in required range 0..%d\n",
1768 				RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
1769 		return;
1770 	}
1771 
1772 	if (!is_rx) { /*then tx*/
1773 		for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1774 			if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1775 			    (tx_queue_stats_mappings[i].queue_id == queue_id)) {
1776 				tx_queue_stats_mappings[i].stats_counter_id = map_value;
1777 				existing_mapping_found = 1;
1778 				break;
1779 			}
1780 		}
1781 		if (!existing_mapping_found) { /* A new additional mapping... */
1782 			tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
1783 			tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
1784 			tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
1785 			nb_tx_queue_stats_mappings++;
1786 		}
1787 	}
1788 	else { /*rx*/
1789 		for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1790 			if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1791 			    (rx_queue_stats_mappings[i].queue_id == queue_id)) {
1792 				rx_queue_stats_mappings[i].stats_counter_id = map_value;
1793 				existing_mapping_found = 1;
1794 				break;
1795 			}
1796 		}
1797 		if (!existing_mapping_found) { /* A new additional mapping... */
1798 			rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
1799 			rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
1800 			rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
1801 			nb_rx_queue_stats_mappings++;
1802 		}
1803 	}
1804 }
1805 
1806 static inline void
1807 print_fdir_mask(struct rte_eth_fdir_masks *mask)
1808 {
1809 	printf("\n    vlan_tci: 0x%04x, src_ipv4: 0x%08x, dst_ipv4: 0x%08x,"
1810 		      " src_port: 0x%04x, dst_port: 0x%04x",
1811 		mask->vlan_tci_mask, mask->ipv4_mask.src_ip,
1812 		mask->ipv4_mask.dst_ip,
1813 		mask->src_port_mask, mask->dst_port_mask);
1814 
1815 	printf("\n    src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x,"
1816 		     " dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
1817 		mask->ipv6_mask.src_ip[0], mask->ipv6_mask.src_ip[1],
1818 		mask->ipv6_mask.src_ip[2], mask->ipv6_mask.src_ip[3],
1819 		mask->ipv6_mask.dst_ip[0], mask->ipv6_mask.dst_ip[1],
1820 		mask->ipv6_mask.dst_ip[2], mask->ipv6_mask.dst_ip[3]);
1821 	printf("\n");
1822 }
1823 
1824 static inline void
1825 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
1826 {
1827 	struct rte_eth_flex_payload_cfg *cfg;
1828 	uint32_t i, j;
1829 
1830 	for (i = 0; i < flex_conf->nb_payloads; i++) {
1831 		cfg = &flex_conf->flex_set[i];
1832 		if (cfg->type == RTE_ETH_RAW_PAYLOAD)
1833 			printf("\n    RAW:  ");
1834 		else if (cfg->type == RTE_ETH_L2_PAYLOAD)
1835 			printf("\n    L2_PAYLOAD:  ");
1836 		else if (cfg->type == RTE_ETH_L3_PAYLOAD)
1837 			printf("\n    L3_PAYLOAD:  ");
1838 		else if (cfg->type == RTE_ETH_L4_PAYLOAD)
1839 			printf("\n    L4_PAYLOAD:  ");
1840 		else
1841 			printf("\n    UNKNOWN PAYLOAD(%u):  ", cfg->type);
1842 		for (j = 0; j < num; j++)
1843 			printf("  %-5u", cfg->src_offset[j]);
1844 	}
1845 	printf("\n");
1846 }
1847 
1848 static char *
1849 flowtype_to_str(uint16_t flow_type)
1850 {
1851 	struct flow_type_info {
1852 		char str[32];
1853 		uint16_t ftype;
1854 	};
1855 
1856 	uint8_t i;
1857 	static struct flow_type_info flowtype_str_table[] = {
1858 		{"raw", RTE_ETH_FLOW_RAW},
1859 		{"ipv4", RTE_ETH_FLOW_IPV4},
1860 		{"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
1861 		{"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
1862 		{"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
1863 		{"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
1864 		{"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
1865 		{"ipv6", RTE_ETH_FLOW_IPV6},
1866 		{"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
1867 		{"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
1868 		{"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
1869 		{"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
1870 		{"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
1871 		{"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
1872 	};
1873 
1874 	for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
1875 		if (flowtype_str_table[i].ftype == flow_type)
1876 			return flowtype_str_table[i].str;
1877 	}
1878 
1879 	return NULL;
1880 }
1881 
1882 static inline void
1883 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
1884 {
1885 	struct rte_eth_fdir_flex_mask *mask;
1886 	uint32_t i, j;
1887 	char *p;
1888 
1889 	for (i = 0; i < flex_conf->nb_flexmasks; i++) {
1890 		mask = &flex_conf->flex_mask[i];
1891 		p = flowtype_to_str(mask->flow_type);
1892 		printf("\n    %s:\t", p ? p : "unknown");
1893 		for (j = 0; j < num; j++)
1894 			printf(" %02x", mask->mask[j]);
1895 	}
1896 	printf("\n");
1897 }
1898 
1899 static inline void
1900 print_fdir_flow_type(uint32_t flow_types_mask)
1901 {
1902 	int i;
1903 	char *p;
1904 
1905 	for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
1906 		if (!(flow_types_mask & (1 << i)))
1907 			continue;
1908 		p = flowtype_to_str(i);
1909 		if (p)
1910 			printf(" %s", p);
1911 		else
1912 			printf(" unknown");
1913 	}
1914 	printf("\n");
1915 }
1916 
1917 void
1918 fdir_get_infos(portid_t port_id)
1919 {
1920 	struct rte_eth_fdir_stats fdir_stat;
1921 	struct rte_eth_fdir_info fdir_info;
1922 	int ret;
1923 
1924 	static const char *fdir_stats_border = "########################";
1925 
1926 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1927 		return;
1928 	ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
1929 	if (ret < 0) {
1930 		printf("\n FDIR is not supported on port %-2d\n",
1931 			port_id);
1932 		return;
1933 	}
1934 
1935 	memset(&fdir_info, 0, sizeof(fdir_info));
1936 	rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
1937 			       RTE_ETH_FILTER_INFO, &fdir_info);
1938 	memset(&fdir_stat, 0, sizeof(fdir_stat));
1939 	rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
1940 			       RTE_ETH_FILTER_STATS, &fdir_stat);
1941 	printf("\n  %s FDIR infos for port %-2d     %s\n",
1942 	       fdir_stats_border, port_id, fdir_stats_border);
1943 	printf("  MODE: ");
1944 	if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
1945 		printf("  PERFECT\n");
1946 	else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
1947 		printf("  SIGNATURE\n");
1948 	else
1949 		printf("  DISABLE\n");
1950 	printf("  SUPPORTED FLOW TYPE: ");
1951 	print_fdir_flow_type(fdir_info.flow_types_mask[0]);
1952 	printf("  FLEX PAYLOAD INFO:\n");
1953 	printf("  max_len:       %-10"PRIu32"  payload_limit: %-10"PRIu32"\n"
1954 	       "  payload_unit:  %-10"PRIu32"  payload_seg:   %-10"PRIu32"\n"
1955 	       "  bitmask_unit:  %-10"PRIu32"  bitmask_num:   %-10"PRIu32"\n",
1956 		fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
1957 		fdir_info.flex_payload_unit,
1958 		fdir_info.max_flex_payload_segment_num,
1959 		fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
1960 	printf("  MASK: ");
1961 	print_fdir_mask(&fdir_info.mask);
1962 	if (fdir_info.flex_conf.nb_payloads > 0) {
1963 		printf("  FLEX PAYLOAD SRC OFFSET:");
1964 		print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
1965 	}
1966 	if (fdir_info.flex_conf.nb_flexmasks > 0) {
1967 		printf("  FLEX MASK CFG:");
1968 		print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
1969 	}
1970 	printf("  guarant_count: %-10"PRIu32"  best_count:    %"PRIu32"\n",
1971 	       fdir_stat.guarant_cnt, fdir_stat.best_cnt);
1972 	printf("  guarant_space: %-10"PRIu32"  best_space:    %"PRIu32"\n",
1973 	       fdir_info.guarant_spc, fdir_info.best_spc);
1974 	printf("  collision:     %-10"PRIu32"  free:          %"PRIu32"\n"
1975 	       "  maxhash:       %-10"PRIu32"  maxlen:        %"PRIu32"\n"
1976 	       "  add:	         %-10"PRIu64"  remove:        %"PRIu64"\n"
1977 	       "  f_add:         %-10"PRIu64"  f_remove:      %"PRIu64"\n",
1978 	       fdir_stat.collision, fdir_stat.free,
1979 	       fdir_stat.maxhash, fdir_stat.maxlen,
1980 	       fdir_stat.add, fdir_stat.remove,
1981 	       fdir_stat.f_add, fdir_stat.f_remove);
1982 	printf("  %s############################%s\n",
1983 	       fdir_stats_border, fdir_stats_border);
1984 }
1985 
1986 void
1987 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
1988 {
1989 	struct rte_port *port;
1990 	struct rte_eth_fdir_flex_conf *flex_conf;
1991 	int i, idx = 0;
1992 
1993 	port = &ports[port_id];
1994 	flex_conf = &port->dev_conf.fdir_conf.flex_conf;
1995 	for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
1996 		if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
1997 			idx = i;
1998 			break;
1999 		}
2000 	}
2001 	if (i >= RTE_ETH_FLOW_MAX) {
2002 		if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
2003 			idx = flex_conf->nb_flexmasks;
2004 			flex_conf->nb_flexmasks++;
2005 		} else {
2006 			printf("The flex mask table is full. Can not set flex"
2007 				" mask for flow_type(%u).", cfg->flow_type);
2008 			return;
2009 		}
2010 	}
2011 	(void)rte_memcpy(&flex_conf->flex_mask[idx],
2012 			 cfg,
2013 			 sizeof(struct rte_eth_fdir_flex_mask));
2014 }
2015 
2016 void
2017 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
2018 {
2019 	struct rte_port *port;
2020 	struct rte_eth_fdir_flex_conf *flex_conf;
2021 	int i, idx = 0;
2022 
2023 	port = &ports[port_id];
2024 	flex_conf = &port->dev_conf.fdir_conf.flex_conf;
2025 	for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
2026 		if (cfg->type == flex_conf->flex_set[i].type) {
2027 			idx = i;
2028 			break;
2029 		}
2030 	}
2031 	if (i >= RTE_ETH_PAYLOAD_MAX) {
2032 		if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
2033 			idx = flex_conf->nb_payloads;
2034 			flex_conf->nb_payloads++;
2035 		} else {
2036 			printf("The flex payload table is full. Can not set"
2037 				" flex payload for type(%u).", cfg->type);
2038 			return;
2039 		}
2040 	}
2041 	(void)rte_memcpy(&flex_conf->flex_set[idx],
2042 			 cfg,
2043 			 sizeof(struct rte_eth_flex_payload_cfg));
2044 
2045 }
2046 
2047 void
2048 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
2049 {
2050 	int diag;
2051 
2052 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2053 		return;
2054 	if (is_rx)
2055 		diag = rte_eth_dev_set_vf_rx(port_id,vf,on);
2056 	else
2057 		diag = rte_eth_dev_set_vf_tx(port_id,vf,on);
2058 	if (diag == 0)
2059 		return;
2060 	if(is_rx)
2061 		printf("rte_eth_dev_set_vf_rx for port_id=%d failed "
2062 	       		"diag=%d\n", port_id, diag);
2063 	else
2064 		printf("rte_eth_dev_set_vf_tx for port_id=%d failed "
2065 	       		"diag=%d\n", port_id, diag);
2066 
2067 }
2068 
2069 void
2070 set_vf_rx_vlan(portid_t port_id, uint16_t vlan_id, uint64_t vf_mask, uint8_t on)
2071 {
2072 	int diag;
2073 
2074 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2075 		return;
2076 	if (vlan_id_is_invalid(vlan_id))
2077 		return;
2078 	diag = rte_eth_dev_set_vf_vlan_filter(port_id, vlan_id, vf_mask, on);
2079 	if (diag == 0)
2080 		return;
2081 	printf("rte_eth_dev_set_vf_vlan_filter for port_id=%d failed "
2082 	       "diag=%d\n", port_id, diag);
2083 }
2084 
2085 int
2086 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
2087 {
2088 	int diag;
2089 	struct rte_eth_link link;
2090 
2091 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2092 		return 1;
2093 	rte_eth_link_get_nowait(port_id, &link);
2094 	if (rate > link.link_speed) {
2095 		printf("Invalid rate value:%u bigger than link speed: %u\n",
2096 			rate, link.link_speed);
2097 		return 1;
2098 	}
2099 	diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
2100 	if (diag == 0)
2101 		return diag;
2102 	printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
2103 		port_id, diag);
2104 	return diag;
2105 }
2106 
2107 int
2108 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
2109 {
2110 	int diag;
2111 	struct rte_eth_link link;
2112 
2113 	if (q_msk == 0)
2114 		return 0;
2115 
2116 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2117 		return 1;
2118 	rte_eth_link_get_nowait(port_id, &link);
2119 	if (rate > link.link_speed) {
2120 		printf("Invalid rate value:%u bigger than link speed: %u\n",
2121 			rate, link.link_speed);
2122 		return 1;
2123 	}
2124 	diag = rte_eth_set_vf_rate_limit(port_id, vf, rate, q_msk);
2125 	if (diag == 0)
2126 		return diag;
2127 	printf("rte_eth_set_vf_rate_limit for port_id=%d failed diag=%d\n",
2128 		port_id, diag);
2129 	return diag;
2130 }
2131