xref: /dpdk/app/test-pmd/config.c (revision ac2ece3fb1f5511b96b595d6cd6861d25520b99b)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 /*   BSD LICENSE
34  *
35  *   Copyright(c) 2013 6WIND.
36  *
37  *   Redistribution and use in source and binary forms, with or without
38  *   modification, are permitted provided that the following conditions
39  *   are met:
40  *
41  *     * Redistributions of source code must retain the above copyright
42  *       notice, this list of conditions and the following disclaimer.
43  *     * Redistributions in binary form must reproduce the above copyright
44  *       notice, this list of conditions and the following disclaimer in
45  *       the documentation and/or other materials provided with the
46  *       distribution.
47  *     * Neither the name of 6WIND S.A. nor the names of its
48  *       contributors may be used to endorse or promote products derived
49  *       from this software without specific prior written permission.
50  *
51  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62  */
63 
64 #include <stdarg.h>
65 #include <errno.h>
66 #include <stdio.h>
67 #include <string.h>
68 #include <stdarg.h>
69 #include <stdint.h>
70 #include <inttypes.h>
71 
72 #include <sys/queue.h>
73 
74 #include <rte_common.h>
75 #include <rte_byteorder.h>
76 #include <rte_debug.h>
77 #include <rte_log.h>
78 #include <rte_memory.h>
79 #include <rte_memcpy.h>
80 #include <rte_memzone.h>
81 #include <rte_launch.h>
82 #include <rte_tailq.h>
83 #include <rte_eal.h>
84 #include <rte_per_lcore.h>
85 #include <rte_lcore.h>
86 #include <rte_atomic.h>
87 #include <rte_branch_prediction.h>
88 #include <rte_ring.h>
89 #include <rte_mempool.h>
90 #include <rte_mbuf.h>
91 #include <rte_interrupts.h>
92 #include <rte_pci.h>
93 #include <rte_ether.h>
94 #include <rte_ethdev.h>
95 #include <rte_string_fns.h>
96 
97 #include "testpmd.h"
98 
99 static void
100 print_ethaddr(const char *name, struct ether_addr *eth_addr)
101 {
102 	printf("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
103 	       (unsigned int)eth_addr->addr_bytes[0],
104 	       (unsigned int)eth_addr->addr_bytes[1],
105 	       (unsigned int)eth_addr->addr_bytes[2],
106 	       (unsigned int)eth_addr->addr_bytes[3],
107 	       (unsigned int)eth_addr->addr_bytes[4],
108 	       (unsigned int)eth_addr->addr_bytes[5]);
109 }
110 
111 void
112 nic_stats_display(portid_t port_id)
113 {
114 	struct rte_eth_stats stats;
115 	struct rte_port *port = &ports[port_id];
116 	uint8_t i;
117 
118 	static const char *nic_stats_border = "########################";
119 
120 	if (port_id >= nb_ports) {
121 		printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
122 		return;
123 	}
124 	rte_eth_stats_get(port_id, &stats);
125 	printf("\n  %s NIC statistics for port %-2d %s\n",
126 	       nic_stats_border, port_id, nic_stats_border);
127 
128 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
129 		printf("  RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes:  "
130 		       "%-"PRIu64"\n",
131 		       stats.ipackets, stats.imissed, stats.ibytes);
132 		printf("  RX-badcrc:  %-10"PRIu64" RX-badlen: %-10"PRIu64" RX-errors: "
133 		       "%-"PRIu64"\n",
134 		       stats.ibadcrc, stats.ibadlen, stats.ierrors);
135 		printf("  RX-nombuf:  %-10"PRIu64"\n",
136 		       stats.rx_nombuf);
137 		printf("  TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes:  "
138 		       "%-"PRIu64"\n",
139 		       stats.opackets, stats.oerrors, stats.obytes);
140 	}
141 	else {
142 		printf("  RX-packets:              %10"PRIu64"    RX-errors: %10"PRIu64
143 		       "    RX-bytes: %10"PRIu64"\n",
144 		       stats.ipackets, stats.ierrors, stats.ibytes);
145 		printf("  RX-badcrc:               %10"PRIu64"    RX-badlen: %10"PRIu64
146 		       "  RX-errors:  %10"PRIu64"\n",
147 		       stats.ibadcrc, stats.ibadlen, stats.ierrors);
148 		printf("  RX-nombuf:               %10"PRIu64"\n",
149 		       stats.rx_nombuf);
150 		printf("  TX-packets:              %10"PRIu64"    TX-errors: %10"PRIu64
151 		       "    TX-bytes: %10"PRIu64"\n",
152 		       stats.opackets, stats.oerrors, stats.obytes);
153 	}
154 
155 	/* stats fdir */
156 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
157 		printf("  Fdirmiss:   %-10"PRIu64" Fdirmatch: %-10"PRIu64"\n",
158 		       stats.fdirmiss,
159 		       stats.fdirmatch);
160 
161 	if (port->rx_queue_stats_mapping_enabled) {
162 		printf("\n");
163 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
164 			printf("  Stats reg %2d RX-packets: %10"PRIu64
165 			       "    RX-errors: %10"PRIu64
166 			       "    RX-bytes: %10"PRIu64"\n",
167 			       i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
168 		}
169 	}
170 	if (port->tx_queue_stats_mapping_enabled) {
171 		printf("\n");
172 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
173 			printf("  Stats reg %2d TX-packets: %10"PRIu64
174 			       "                             TX-bytes: %10"PRIu64"\n",
175 			       i, stats.q_opackets[i], stats.q_obytes[i]);
176 		}
177 	}
178 
179 	/* Display statistics of XON/XOFF pause frames, if any. */
180 	if ((stats.tx_pause_xon  | stats.rx_pause_xon |
181 	     stats.tx_pause_xoff | stats.rx_pause_xoff) > 0) {
182 		printf("  RX-XOFF:    %-10"PRIu64" RX-XON:    %-10"PRIu64"\n",
183 		       stats.rx_pause_xoff, stats.rx_pause_xon);
184 		printf("  TX-XOFF:    %-10"PRIu64" TX-XON:    %-10"PRIu64"\n",
185 		       stats.tx_pause_xoff, stats.tx_pause_xon);
186 	}
187 	printf("  %s############################%s\n",
188 	       nic_stats_border, nic_stats_border);
189 }
190 
191 void
192 nic_stats_clear(portid_t port_id)
193 {
194 	if (port_id >= nb_ports) {
195 		printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
196 		return;
197 	}
198 	rte_eth_stats_reset(port_id);
199 	printf("\n  NIC statistics for port %d cleared\n", port_id);
200 }
201 
202 
203 void
204 nic_stats_mapping_display(portid_t port_id)
205 {
206 	struct rte_port *port = &ports[port_id];
207 	uint16_t i;
208 
209 	static const char *nic_stats_mapping_border = "########################";
210 
211 	if (port_id >= nb_ports) {
212 		printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
213 		return;
214 	}
215 
216 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
217 		printf("Port id %d - either does not support queue statistic mapping or"
218 		       " no queue statistic mapping set\n", port_id);
219 		return;
220 	}
221 
222 	printf("\n  %s NIC statistics mapping for port %-2d %s\n",
223 	       nic_stats_mapping_border, port_id, nic_stats_mapping_border);
224 
225 	if (port->rx_queue_stats_mapping_enabled) {
226 		for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
227 			if (rx_queue_stats_mappings[i].port_id == port_id) {
228 				printf("  RX-queue %2d mapped to Stats Reg %2d\n",
229 				       rx_queue_stats_mappings[i].queue_id,
230 				       rx_queue_stats_mappings[i].stats_counter_id);
231 			}
232 		}
233 		printf("\n");
234 	}
235 
236 
237 	if (port->tx_queue_stats_mapping_enabled) {
238 		for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
239 			if (tx_queue_stats_mappings[i].port_id == port_id) {
240 				printf("  TX-queue %2d mapped to Stats Reg %2d\n",
241 				       tx_queue_stats_mappings[i].queue_id,
242 				       tx_queue_stats_mappings[i].stats_counter_id);
243 			}
244 		}
245 	}
246 
247 	printf("  %s####################################%s\n",
248 	       nic_stats_mapping_border, nic_stats_mapping_border);
249 }
250 
251 void
252 port_infos_display(portid_t port_id)
253 {
254 	struct rte_port *port;
255 	struct rte_eth_link link;
256 	int vlan_offload;
257 	struct rte_mempool * mp;
258 	static const char *info_border = "*********************";
259 
260 	if (port_id >= nb_ports) {
261 		printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
262 		return;
263 	}
264 	port = &ports[port_id];
265 	rte_eth_link_get_nowait(port_id, &link);
266 	printf("\n%s Infos for port %-2d %s\n",
267 	       info_border, port_id, info_border);
268 	print_ethaddr("MAC address: ", &port->eth_addr);
269 	printf("\nConnect to socket: %u", port->socket_id);
270 
271 	if (port_numa[port_id] != NUMA_NO_CONFIG) {
272 		mp = mbuf_pool_find(port_numa[port_id]);
273 		if (mp)
274 			printf("\nmemory allocation on the socket: %d",
275 							port_numa[port_id]);
276 	} else
277 		printf("\nmemory allocation on the socket: %u",port->socket_id);
278 
279 	printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
280 	printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
281 	printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
282 	       ("full-duplex") : ("half-duplex"));
283 	printf("Promiscuous mode: %s\n",
284 	       rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
285 	printf("Allmulticast mode: %s\n",
286 	       rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
287 	printf("Maximum number of MAC addresses: %u\n",
288 	       (unsigned int)(port->dev_info.max_mac_addrs));
289 	printf("Maximum number of MAC addresses of hash filtering: %u\n",
290 	       (unsigned int)(port->dev_info.max_hash_mac_addrs));
291 
292 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
293 	if (vlan_offload >= 0){
294 		printf("VLAN offload: \n");
295 		if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
296 			printf("  strip on \n");
297 		else
298 			printf("  strip off \n");
299 
300 		if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
301 			printf("  filter on \n");
302 		else
303 			printf("  filter off \n");
304 
305 		if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
306 			printf("  qinq(extend) on \n");
307 		else
308 			printf("  qinq(extend) off \n");
309 	}
310 }
311 
312 static int
313 port_id_is_invalid(portid_t port_id)
314 {
315 	if (port_id < nb_ports)
316 		return 0;
317 	printf("Invalid port %d (must be < nb_ports=%d)\n", port_id, nb_ports);
318 	return 1;
319 }
320 
321 static int
322 vlan_id_is_invalid(uint16_t vlan_id)
323 {
324 	if (vlan_id < 4096)
325 		return 0;
326 	printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
327 	return 1;
328 }
329 
330 static int
331 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
332 {
333 	uint64_t pci_len;
334 
335 	if (reg_off & 0x3) {
336 		printf("Port register offset 0x%X not aligned on a 4-byte "
337 		       "boundary\n",
338 		       (unsigned)reg_off);
339 		return 1;
340 	}
341 	pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len;
342 	if (reg_off >= pci_len) {
343 		printf("Port %d: register offset %u (0x%X) out of port PCI "
344 		       "resource (length=%"PRIu64")\n",
345 		       port_id, (unsigned)reg_off, (unsigned)reg_off,  pci_len);
346 		return 1;
347 	}
348 	return 0;
349 }
350 
351 static int
352 reg_bit_pos_is_invalid(uint8_t bit_pos)
353 {
354 	if (bit_pos <= 31)
355 		return 0;
356 	printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
357 	return 1;
358 }
359 
360 #define display_port_and_reg_off(port_id, reg_off) \
361 	printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
362 
363 static inline void
364 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
365 {
366 	display_port_and_reg_off(port_id, (unsigned)reg_off);
367 	printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
368 }
369 
370 void
371 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
372 {
373 	uint32_t reg_v;
374 
375 
376 	if (port_id_is_invalid(port_id))
377 		return;
378 	if (port_reg_off_is_invalid(port_id, reg_off))
379 		return;
380 	if (reg_bit_pos_is_invalid(bit_x))
381 		return;
382 	reg_v = port_id_pci_reg_read(port_id, reg_off);
383 	display_port_and_reg_off(port_id, (unsigned)reg_off);
384 	printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
385 }
386 
387 void
388 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
389 			   uint8_t bit1_pos, uint8_t bit2_pos)
390 {
391 	uint32_t reg_v;
392 	uint8_t  l_bit;
393 	uint8_t  h_bit;
394 
395 	if (port_id_is_invalid(port_id))
396 		return;
397 	if (port_reg_off_is_invalid(port_id, reg_off))
398 		return;
399 	if (reg_bit_pos_is_invalid(bit1_pos))
400 		return;
401 	if (reg_bit_pos_is_invalid(bit2_pos))
402 		return;
403 	if (bit1_pos > bit2_pos)
404 		l_bit = bit2_pos, h_bit = bit1_pos;
405 	else
406 		l_bit = bit1_pos, h_bit = bit2_pos;
407 
408 	reg_v = port_id_pci_reg_read(port_id, reg_off);
409 	reg_v >>= l_bit;
410 	if (h_bit < 31)
411 		reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
412 	display_port_and_reg_off(port_id, (unsigned)reg_off);
413 	printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
414 	       ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
415 }
416 
417 void
418 port_reg_display(portid_t port_id, uint32_t reg_off)
419 {
420 	uint32_t reg_v;
421 
422 	if (port_id_is_invalid(port_id))
423 		return;
424 	if (port_reg_off_is_invalid(port_id, reg_off))
425 		return;
426 	reg_v = port_id_pci_reg_read(port_id, reg_off);
427 	display_port_reg_value(port_id, reg_off, reg_v);
428 }
429 
430 void
431 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
432 		 uint8_t bit_v)
433 {
434 	uint32_t reg_v;
435 
436 	if (port_id_is_invalid(port_id))
437 		return;
438 	if (port_reg_off_is_invalid(port_id, reg_off))
439 		return;
440 	if (reg_bit_pos_is_invalid(bit_pos))
441 		return;
442 	if (bit_v > 1) {
443 		printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
444 		return;
445 	}
446 	reg_v = port_id_pci_reg_read(port_id, reg_off);
447 	if (bit_v == 0)
448 		reg_v &= ~(1 << bit_pos);
449 	else
450 		reg_v |= (1 << bit_pos);
451 	port_id_pci_reg_write(port_id, reg_off, reg_v);
452 	display_port_reg_value(port_id, reg_off, reg_v);
453 }
454 
455 void
456 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
457 		       uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
458 {
459 	uint32_t max_v;
460 	uint32_t reg_v;
461 	uint8_t  l_bit;
462 	uint8_t  h_bit;
463 
464 	if (port_id_is_invalid(port_id))
465 		return;
466 	if (port_reg_off_is_invalid(port_id, reg_off))
467 		return;
468 	if (reg_bit_pos_is_invalid(bit1_pos))
469 		return;
470 	if (reg_bit_pos_is_invalid(bit2_pos))
471 		return;
472 	if (bit1_pos > bit2_pos)
473 		l_bit = bit2_pos, h_bit = bit1_pos;
474 	else
475 		l_bit = bit1_pos, h_bit = bit2_pos;
476 
477 	if ((h_bit - l_bit) < 31)
478 		max_v = (1 << (h_bit - l_bit + 1)) - 1;
479 	else
480 		max_v = 0xFFFFFFFF;
481 
482 	if (value > max_v) {
483 		printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
484 				(unsigned)value, (unsigned)value,
485 				(unsigned)max_v, (unsigned)max_v);
486 		return;
487 	}
488 	reg_v = port_id_pci_reg_read(port_id, reg_off);
489 	reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
490 	reg_v |= (value << l_bit); /* Set changed bits */
491 	port_id_pci_reg_write(port_id, reg_off, reg_v);
492 	display_port_reg_value(port_id, reg_off, reg_v);
493 }
494 
495 void
496 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
497 {
498 	if (port_id_is_invalid(port_id))
499 		return;
500 	if (port_reg_off_is_invalid(port_id, reg_off))
501 		return;
502 	port_id_pci_reg_write(port_id, reg_off, reg_v);
503 	display_port_reg_value(port_id, reg_off, reg_v);
504 }
505 
506 /*
507  * RX/TX ring descriptors display functions.
508  */
509 static int
510 rx_queue_id_is_invalid(queueid_t rxq_id)
511 {
512 	if (rxq_id < nb_rxq)
513 		return 0;
514 	printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
515 	return 1;
516 }
517 
518 static int
519 tx_queue_id_is_invalid(queueid_t txq_id)
520 {
521 	if (txq_id < nb_txq)
522 		return 0;
523 	printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
524 	return 1;
525 }
526 
527 static int
528 rx_desc_id_is_invalid(uint16_t rxdesc_id)
529 {
530 	if (rxdesc_id < nb_rxd)
531 		return 0;
532 	printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
533 	       rxdesc_id, nb_rxd);
534 	return 1;
535 }
536 
537 static int
538 tx_desc_id_is_invalid(uint16_t txdesc_id)
539 {
540 	if (txdesc_id < nb_txd)
541 		return 0;
542 	printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
543 	       txdesc_id, nb_txd);
544 	return 1;
545 }
546 
547 static const struct rte_memzone *
548 ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id)
549 {
550 	char mz_name[RTE_MEMZONE_NAMESIZE];
551 	const struct rte_memzone *mz;
552 
553 	rte_snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d",
554 		 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id);
555 	mz = rte_memzone_lookup(mz_name);
556 	if (mz == NULL)
557 		printf("%s ring memory zoneof (port %d, queue %d) not"
558 		       "found (zone name = %s\n",
559 		       ring_name, port_id, q_id, mz_name);
560 	return (mz);
561 }
562 
563 union igb_ring_dword {
564 	uint64_t dword;
565 	struct {
566 		uint32_t hi;
567 		uint32_t lo;
568 	} words;
569 };
570 
571 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
572 struct igb_ring_desc_32B {
573 	union igb_ring_dword lo_dword;
574 	union igb_ring_dword hi_dword;
575 	union igb_ring_dword resv1;
576 	union igb_ring_dword resv2;
577 };
578 #endif
579 
580 struct igb_ring_desc {
581 	union igb_ring_dword lo_dword;
582 	union igb_ring_dword hi_dword;
583 };
584 
585 static void
586 ring_rx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
587 {
588 #ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC
589 	struct igb_ring_desc *ring;
590 	struct igb_ring_desc rd;
591 
592 	ring = (struct igb_ring_desc *) ring_mz->addr;
593 #else
594 	struct igb_ring_desc_32B *ring;
595 	struct igb_ring_desc_32B rd;
596 
597 	ring = (struct igb_ring_desc_32B *) ring_mz->addr;
598 #endif
599 	rd.lo_dword = rte_le_to_cpu_64(ring[desc_id].lo_dword);
600 	rd.hi_dword = rte_le_to_cpu_64(ring[desc_id].hi_dword);
601 	printf("    0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
602 		(unsigned)rd.lo_dword.words.lo, (unsigned)rd.lo_dword.words.hi,
603 		(unsigned)rd.hi_dword.words.lo, (unsigned)rd.hi_dword.words.hi);
604 }
605 
606 static void
607 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
608 {
609 	struct igb_ring_desc *ring;
610 	struct igb_ring_desc rd;
611 
612 	ring = (struct igb_ring_desc *) ring_mz->addr;
613 	rd.lo_dword = rte_le_to_cpu_64(ring[desc_id].lo_dword);
614 	rd.hi_dword = rte_le_to_cpu_64(ring[desc_id].hi_dword);
615 	printf("    0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
616 		(unsigned)rd.lo_dword.words.lo, (unsigned)rd.lo_dword.words.hi,
617 		(unsigned)rd.hi_dword.words.lo, (unsigned)rd.hi_dword.words.hi);
618 }
619 
620 void
621 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
622 {
623 	const struct rte_memzone *rx_mz;
624 
625 	if (port_id_is_invalid(port_id))
626 		return;
627 	if (rx_queue_id_is_invalid(rxq_id))
628 		return;
629 	if (rx_desc_id_is_invalid(rxd_id))
630 		return;
631 	rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
632 	if (rx_mz == NULL)
633 		return;
634 	ring_rx_descriptor_display(rx_mz, rxd_id);
635 }
636 
637 void
638 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
639 {
640 	const struct rte_memzone *tx_mz;
641 
642 	if (port_id_is_invalid(port_id))
643 		return;
644 	if (tx_queue_id_is_invalid(txq_id))
645 		return;
646 	if (tx_desc_id_is_invalid(txd_id))
647 		return;
648 	tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
649 	if (tx_mz == NULL)
650 		return;
651 	ring_tx_descriptor_display(tx_mz, txd_id);
652 }
653 
654 void
655 fwd_lcores_config_display(void)
656 {
657 	lcoreid_t lc_id;
658 
659 	printf("List of forwarding lcores:");
660 	for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
661 		printf(" %2u", fwd_lcores_cpuids[lc_id]);
662 	printf("\n");
663 }
664 void
665 rxtx_config_display(void)
666 {
667 	printf("  %s packet forwarding - CRC stripping %s - "
668 	       "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name,
669 	       rx_mode.hw_strip_crc ? "enabled" : "disabled",
670 	       nb_pkt_per_burst);
671 
672 	if (cur_fwd_eng == &tx_only_engine)
673 		printf("  packet len=%u - nb packet segments=%d\n",
674 				(unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
675 
676 	printf("  nb forwarding cores=%d - nb forwarding ports=%d\n",
677 	       nb_fwd_lcores, nb_fwd_ports);
678 	printf("  RX queues=%d - RX desc=%d - RX free threshold=%d\n",
679 	       nb_rxq, nb_rxd, rx_free_thresh);
680 	printf("  RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
681 	       rx_thresh.pthresh, rx_thresh.hthresh, rx_thresh.wthresh);
682 	printf("  TX queues=%d - TX desc=%d - TX free threshold=%d\n",
683 	       nb_txq, nb_txd, tx_free_thresh);
684 	printf("  TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
685 	       tx_thresh.pthresh, tx_thresh.hthresh, tx_thresh.wthresh);
686 	printf("  TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n",
687 	       tx_rs_thresh, txq_flags);
688 }
689 
690 void
691 port_rss_reta_info(portid_t port_id,struct rte_eth_rss_reta *reta_conf)
692 {
693 	uint8_t i,j;
694 	int ret;
695 
696 	if (port_id_is_invalid(port_id))
697 		return;
698 
699 	ret = rte_eth_dev_rss_reta_query(port_id, reta_conf);
700 	if (ret != 0) {
701 		printf("Failed to get RSS RETA info, return code = %d\n", ret);
702 		return;
703 	}
704 
705 	if (reta_conf->mask_lo != 0) {
706 		for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
707 			if (reta_conf->mask_lo & (uint64_t)(1ULL << i))
708 				printf("RSS RETA configuration: hash index=%d,"
709 					"queue=%d\n",i,reta_conf->reta[i]);
710 		}
711 	}
712 
713 	if (reta_conf->mask_hi != 0) {
714 		for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
715 			if(reta_conf->mask_hi & (uint64_t)(1ULL << i)) {
716 				j = (uint8_t)(i + ETH_RSS_RETA_NUM_ENTRIES/2);
717 				printf("RSS RETA configuration: hash index=%d,"
718 					"queue=%d\n",j,reta_conf->reta[j]);
719 			}
720 		}
721 	}
722 }
723 
724 /*
725  * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
726  * key of the port.
727  */
728 void
729 port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
730 {
731 	struct rte_eth_rss_conf rss_conf;
732 	uint8_t rss_key[10 * 4];
733 	uint16_t rss_hf;
734 	uint8_t i;
735 	int diag;
736 
737 	if (port_id_is_invalid(port_id))
738 		return;
739 	/* Get RSS hash key if asked to display it */
740 	rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
741 	diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
742 	if (diag != 0) {
743 		switch (diag) {
744 		case -ENODEV:
745 			printf("port index %d invalid\n", port_id);
746 			break;
747 		case -ENOTSUP:
748 			printf("operation not supported by device\n");
749 			break;
750 		default:
751 			printf("operation failed - diag=%d\n", diag);
752 			break;
753 		}
754 		return;
755 	}
756 	rss_hf = rss_conf.rss_hf;
757 	if (rss_hf == 0) {
758 		printf("RSS disabled\n");
759 		return;
760 	}
761 	printf("RSS functions:\n ");
762 	if (rss_hf & ETH_RSS_IPV4)
763 		printf("ip4");
764 	if (rss_hf & ETH_RSS_IPV4_TCP)
765 		printf(" tcp4");
766 	if (rss_hf & ETH_RSS_IPV4_UDP)
767 		printf(" udp4");
768 	if (rss_hf & ETH_RSS_IPV6)
769 		printf(" ip6");
770 	if (rss_hf & ETH_RSS_IPV6_EX)
771 		printf(" ip6-ex");
772 	if (rss_hf & ETH_RSS_IPV6_TCP)
773 		printf(" tcp6");
774 	if (rss_hf & ETH_RSS_IPV6_TCP_EX)
775 		printf(" tcp6-ex");
776 	if (rss_hf & ETH_RSS_IPV6_UDP)
777 		printf(" udp6");
778 	if (rss_hf & ETH_RSS_IPV6_UDP_EX)
779 		printf(" udp6-ex");
780 	printf("\n");
781 	if (!show_rss_key)
782 		return;
783 	printf("RSS key:\n");
784 	for (i = 0; i < sizeof(rss_key); i++)
785 		printf("%02X", rss_key[i]);
786 	printf("\n");
787 }
788 
789 void
790 port_rss_hash_key_update(portid_t port_id, uint8_t *hash_key)
791 {
792 	struct rte_eth_rss_conf rss_conf;
793 	int diag;
794 
795 	rss_conf.rss_key = NULL;
796 	diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
797 	if (diag == 0) {
798 		rss_conf.rss_key = hash_key;
799 		diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
800 	}
801 	if (diag == 0)
802 		return;
803 
804 	switch (diag) {
805 	case -ENODEV:
806 		printf("port index %d invalid\n", port_id);
807 		break;
808 	case -ENOTSUP:
809 		printf("operation not supported by device\n");
810 		break;
811 	default:
812 		printf("operation failed - diag=%d\n", diag);
813 		break;
814 	}
815 }
816 
817 /*
818  * Setup forwarding configuration for each logical core.
819  */
820 static void
821 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
822 {
823 	streamid_t nb_fs_per_lcore;
824 	streamid_t nb_fs;
825 	streamid_t sm_id;
826 	lcoreid_t  nb_extra;
827 	lcoreid_t  nb_fc;
828 	lcoreid_t  nb_lc;
829 	lcoreid_t  lc_id;
830 
831 	nb_fs = cfg->nb_fwd_streams;
832 	nb_fc = cfg->nb_fwd_lcores;
833 	if (nb_fs <= nb_fc) {
834 		nb_fs_per_lcore = 1;
835 		nb_extra = 0;
836 	} else {
837 		nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
838 		nb_extra = (lcoreid_t) (nb_fs % nb_fc);
839 	}
840 
841 	nb_lc = (lcoreid_t) (nb_fc - nb_extra);
842 	sm_id = 0;
843 	for (lc_id = 0; lc_id < nb_lc; lc_id++) {
844 		fwd_lcores[lc_id]->stream_idx = sm_id;
845 		fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
846 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
847 	}
848 
849 	/*
850 	 * Assign extra remaining streams, if any.
851 	 */
852 	nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
853 	for (lc_id = 0; lc_id < nb_extra; lc_id++) {
854 		fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
855 		fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
856 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
857 	}
858 }
859 
860 static void
861 simple_fwd_config_setup(void)
862 {
863 	portid_t i;
864 	portid_t j;
865 	portid_t inc = 2;
866 
867 	if (port_topology == PORT_TOPOLOGY_CHAINED ||
868 	    port_topology == PORT_TOPOLOGY_LOOP) {
869 		inc = 1;
870 	} else if (nb_fwd_ports % 2) {
871 		printf("\nWarning! Cannot handle an odd number of ports "
872 		       "with the current port topology. Configuration "
873 		       "must be changed to have an even number of ports, "
874 		       "or relaunch application with "
875 		       "--port-topology=chained\n\n");
876 	}
877 
878 	cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
879 	cur_fwd_config.nb_fwd_streams =
880 		(streamid_t) cur_fwd_config.nb_fwd_ports;
881 
882 	/* reinitialize forwarding streams */
883 	init_fwd_streams();
884 
885 	/*
886 	 * In the simple forwarding test, the number of forwarding cores
887 	 * must be lower or equal to the number of forwarding ports.
888 	 */
889 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
890 	if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
891 		cur_fwd_config.nb_fwd_lcores =
892 			(lcoreid_t) cur_fwd_config.nb_fwd_ports;
893 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
894 
895 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) {
896 		if (port_topology != PORT_TOPOLOGY_LOOP)
897 			j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports);
898 		else
899 			j = i;
900 		fwd_streams[i]->rx_port   = fwd_ports_ids[i];
901 		fwd_streams[i]->rx_queue  = 0;
902 		fwd_streams[i]->tx_port   = fwd_ports_ids[j];
903 		fwd_streams[i]->tx_queue  = 0;
904 		fwd_streams[i]->peer_addr = j;
905 
906 		if (port_topology == PORT_TOPOLOGY_PAIRED) {
907 			fwd_streams[j]->rx_port   = fwd_ports_ids[j];
908 			fwd_streams[j]->rx_queue  = 0;
909 			fwd_streams[j]->tx_port   = fwd_ports_ids[i];
910 			fwd_streams[j]->tx_queue  = 0;
911 			fwd_streams[j]->peer_addr = i;
912 		}
913 	}
914 }
915 
916 /**
917  * For the RSS forwarding test, each core is assigned on every port a transmit
918  * queue whose index is the index of the core itself. This approach limits the
919  * maximumm number of processing cores of the RSS test to the maximum number of
920  * TX queues supported by the devices.
921  *
922  * Each core is assigned a single stream, each stream being composed of
923  * a RX queue to poll on a RX port for input messages, associated with
924  * a TX queue of a TX port where to send forwarded packets.
925  * All packets received on the RX queue of index "RxQj" of the RX port "RxPi"
926  * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two
927  * following rules:
928  *    - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
929  *    - TxQl = RxQj
930  */
931 static void
932 rss_fwd_config_setup(void)
933 {
934 	portid_t   rxp;
935 	portid_t   txp;
936 	queueid_t  rxq;
937 	queueid_t  nb_q;
938 	lcoreid_t  lc_id;
939 
940 	nb_q = nb_rxq;
941 	if (nb_q > nb_txq)
942 		nb_q = nb_txq;
943 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
944 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
945 	cur_fwd_config.nb_fwd_streams =
946 		(streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
947 	if (cur_fwd_config.nb_fwd_streams > cur_fwd_config.nb_fwd_lcores)
948 		cur_fwd_config.nb_fwd_streams =
949 			(streamid_t)cur_fwd_config.nb_fwd_lcores;
950 	else
951 		cur_fwd_config.nb_fwd_lcores =
952 			(lcoreid_t)cur_fwd_config.nb_fwd_streams;
953 
954 	/* reinitialize forwarding streams */
955 	init_fwd_streams();
956 
957 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
958 	rxp = 0; rxq = 0;
959 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
960 		struct fwd_stream *fs;
961 
962 		fs = fwd_streams[lc_id];
963 
964 		if ((rxp & 0x1) == 0)
965 			txp = (portid_t) (rxp + 1);
966 		else
967 			txp = (portid_t) (rxp - 1);
968 		/*
969 		 * if we are in loopback, simply send stuff out through the
970 		 * ingress port
971 		 */
972 		if (port_topology == PORT_TOPOLOGY_LOOP)
973 			txp = rxp;
974 
975 		fs->rx_port = fwd_ports_ids[rxp];
976 		fs->rx_queue = rxq;
977 		fs->tx_port = fwd_ports_ids[txp];
978 		fs->tx_queue = rxq;
979 		fs->peer_addr = fs->tx_port;
980 		rxq = (queueid_t) (rxq + 1);
981 		if (rxq < nb_q)
982 			continue;
983 		/*
984 		 * rxq == nb_q
985 		 * Restart from RX queue 0 on next RX port
986 		 */
987 		rxq = 0;
988 		if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
989 			rxp = (portid_t)
990 				(rxp + ((nb_ports >> 1) / nb_fwd_ports));
991 		else
992 			rxp = (portid_t) (rxp + 1);
993 	}
994 }
995 
996 /*
997  * In DCB and VT on,the mapping of 128 receive queues to 128 transmit queues.
998  */
999 static void
1000 dcb_rxq_2_txq_mapping(queueid_t rxq, queueid_t *txq)
1001 {
1002 	if(dcb_q_mapping == DCB_4_TCS_Q_MAPPING) {
1003 
1004 		if (rxq < 32)
1005 			/* tc0: 0-31 */
1006 			*txq = rxq;
1007 		else if (rxq < 64) {
1008 			/* tc1: 64-95 */
1009 			*txq =  (uint16_t)(rxq + 32);
1010 		}
1011 		else {
1012 			/* tc2: 96-111;tc3:112-127 */
1013 			*txq =  (uint16_t)(rxq/2 + 64);
1014 		}
1015 	}
1016 	else {
1017 		if (rxq < 16)
1018 			/* tc0 mapping*/
1019 			*txq = rxq;
1020 		else if (rxq < 32) {
1021 			/* tc1 mapping*/
1022 			 *txq = (uint16_t)(rxq + 16);
1023 		}
1024 		else if (rxq < 64) {
1025 			/*tc2,tc3 mapping */
1026 			*txq =  (uint16_t)(rxq + 32);
1027 		}
1028 		else {
1029 			/* tc4,tc5,tc6 and tc7 mapping */
1030 			*txq =  (uint16_t)(rxq/2 + 64);
1031 		}
1032 	}
1033 }
1034 
1035 /**
1036  * For the DCB forwarding test, each core is assigned on every port multi-transmit
1037  * queue.
1038  *
1039  * Each core is assigned a multi-stream, each stream being composed of
1040  * a RX queue to poll on a RX port for input messages, associated with
1041  * a TX queue of a TX port where to send forwarded packets.
1042  * All packets received on the RX queue of index "RxQj" of the RX port "RxPi"
1043  * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two
1044  * following rules:
1045  * In VT mode,
1046  *    - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
1047  *    - TxQl = RxQj
1048  * In non-VT mode,
1049  *    - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
1050  *    There is a mapping of RxQj to TxQl to be required,and the mapping was implemented
1051  *    in dcb_rxq_2_txq_mapping function.
1052  */
1053 static void
1054 dcb_fwd_config_setup(void)
1055 {
1056 	portid_t   rxp;
1057 	portid_t   txp;
1058 	queueid_t  rxq;
1059 	queueid_t  nb_q;
1060 	lcoreid_t  lc_id;
1061 	uint16_t sm_id;
1062 
1063 	nb_q = nb_rxq;
1064 
1065 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1066 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1067 	cur_fwd_config.nb_fwd_streams =
1068 		(streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
1069 
1070 	/* reinitialize forwarding streams */
1071 	init_fwd_streams();
1072 
1073 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
1074 	rxp = 0; rxq = 0;
1075 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
1076 		/* a fwd core can run multi-streams */
1077 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++)
1078 		{
1079 			struct fwd_stream *fs;
1080 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
1081 			if ((rxp & 0x1) == 0)
1082 				txp = (portid_t) (rxp + 1);
1083 			else
1084 				txp = (portid_t) (rxp - 1);
1085 			fs->rx_port = fwd_ports_ids[rxp];
1086 			fs->rx_queue = rxq;
1087 			fs->tx_port = fwd_ports_ids[txp];
1088 			if (dcb_q_mapping == DCB_VT_Q_MAPPING)
1089 				fs->tx_queue = rxq;
1090 			else
1091 				dcb_rxq_2_txq_mapping(rxq, &fs->tx_queue);
1092 			fs->peer_addr = fs->tx_port;
1093 			rxq = (queueid_t) (rxq + 1);
1094 			if (rxq < nb_q)
1095 				continue;
1096 			rxq = 0;
1097 			if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
1098 				rxp = (portid_t)
1099 					(rxp + ((nb_ports >> 1) / nb_fwd_ports));
1100 			else
1101 				rxp = (portid_t) (rxp + 1);
1102 		}
1103 	}
1104 }
1105 
1106 static void
1107 icmp_echo_config_setup(void)
1108 {
1109 	portid_t  rxp;
1110 	queueid_t rxq;
1111 	lcoreid_t lc_id;
1112 	uint16_t  sm_id;
1113 
1114 	if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
1115 		cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
1116 			(nb_txq * nb_fwd_ports);
1117 	else
1118 		cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1119 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1120 	cur_fwd_config.nb_fwd_streams =
1121 		(streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
1122 	if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
1123 		cur_fwd_config.nb_fwd_lcores =
1124 			(lcoreid_t)cur_fwd_config.nb_fwd_streams;
1125 	if (verbose_level > 0) {
1126 		printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
1127 		       __FUNCTION__,
1128 		       cur_fwd_config.nb_fwd_lcores,
1129 		       cur_fwd_config.nb_fwd_ports,
1130 		       cur_fwd_config.nb_fwd_streams);
1131 	}
1132 
1133 	/* reinitialize forwarding streams */
1134 	init_fwd_streams();
1135 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
1136 	rxp = 0; rxq = 0;
1137 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
1138 		if (verbose_level > 0)
1139 			printf("  core=%d: \n", lc_id);
1140 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
1141 			struct fwd_stream *fs;
1142 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
1143 			fs->rx_port = fwd_ports_ids[rxp];
1144 			fs->rx_queue = rxq;
1145 			fs->tx_port = fs->rx_port;
1146 			fs->tx_queue = lc_id;
1147 			fs->peer_addr = fs->tx_port;
1148 			if (verbose_level > 0)
1149 				printf("  stream=%d port=%d rxq=%d txq=%d\n",
1150 				       sm_id, fs->rx_port, fs->rx_queue,
1151 				       fs->tx_queue);
1152 			rxq = (queueid_t) (rxq + 1);
1153 			if (rxq == nb_rxq) {
1154 				rxq = 0;
1155 				rxp = (portid_t) (rxp + 1);
1156 			}
1157 		}
1158 	}
1159 }
1160 
1161 void
1162 fwd_config_setup(void)
1163 {
1164 	cur_fwd_config.fwd_eng = cur_fwd_eng;
1165 	if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
1166 		icmp_echo_config_setup();
1167 		return;
1168 	}
1169 	if ((nb_rxq > 1) && (nb_txq > 1)){
1170 		if (dcb_config)
1171 			dcb_fwd_config_setup();
1172 		else
1173 			rss_fwd_config_setup();
1174 	}
1175 	else
1176 		simple_fwd_config_setup();
1177 }
1178 
1179 static void
1180 pkt_fwd_config_display(struct fwd_config *cfg)
1181 {
1182 	struct fwd_stream *fs;
1183 	lcoreid_t  lc_id;
1184 	streamid_t sm_id;
1185 
1186 	printf("%s packet forwarding - ports=%d - cores=%d - streams=%d - "
1187 		"NUMA support %s, MP over anonymous pages %s\n",
1188 		cfg->fwd_eng->fwd_mode_name,
1189 		cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
1190 		numa_support == 1 ? "enabled" : "disabled",
1191 		mp_anon != 0 ? "enabled" : "disabled");
1192 
1193 	if (strcmp(cfg->fwd_eng->fwd_mode_name, "mac_retry") == 0)
1194 		printf("TX retry num: %u, delay between TX retries: %uus\n",
1195 			burst_tx_retry_num, burst_tx_delay_time);
1196 	for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
1197 		printf("Logical Core %u (socket %u) forwards packets on "
1198 		       "%d streams:",
1199 		       fwd_lcores_cpuids[lc_id],
1200 		       rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
1201 		       fwd_lcores[lc_id]->stream_nb);
1202 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
1203 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
1204 			printf("\n  RX P=%d/Q=%d (socket %u) -> TX "
1205 			       "P=%d/Q=%d (socket %u) ",
1206 			       fs->rx_port, fs->rx_queue,
1207 			       ports[fs->rx_port].socket_id,
1208 			       fs->tx_port, fs->tx_queue,
1209 			       ports[fs->tx_port].socket_id);
1210 			print_ethaddr("peer=",
1211 				      &peer_eth_addrs[fs->peer_addr]);
1212 		}
1213 		printf("\n");
1214 	}
1215 	printf("\n");
1216 }
1217 
1218 
1219 void
1220 fwd_config_display(void)
1221 {
1222 	if((dcb_config) && (nb_fwd_lcores == 1)) {
1223 		printf("In DCB mode,the nb forwarding cores should be larger than 1\n");
1224 		return;
1225 	}
1226 	fwd_config_setup();
1227 	pkt_fwd_config_display(&cur_fwd_config);
1228 }
1229 
1230 int
1231 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
1232 {
1233 	unsigned int i;
1234 	unsigned int lcore_cpuid;
1235 	int record_now;
1236 
1237 	record_now = 0;
1238  again:
1239 	for (i = 0; i < nb_lc; i++) {
1240 		lcore_cpuid = lcorelist[i];
1241 		if (! rte_lcore_is_enabled(lcore_cpuid)) {
1242 			printf("lcore %u not enabled\n", lcore_cpuid);
1243 			return -1;
1244 		}
1245 		if (lcore_cpuid == rte_get_master_lcore()) {
1246 			printf("lcore %u cannot be masked on for running "
1247 			       "packet forwarding, which is the master lcore "
1248 			       "and reserved for command line parsing only\n",
1249 			       lcore_cpuid);
1250 			return -1;
1251 		}
1252 		if (record_now)
1253 			fwd_lcores_cpuids[i] = lcore_cpuid;
1254 	}
1255 	if (record_now == 0) {
1256 		record_now = 1;
1257 		goto again;
1258 	}
1259 	nb_cfg_lcores = (lcoreid_t) nb_lc;
1260 	if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
1261 		printf("previous number of forwarding cores %u - changed to "
1262 		       "number of configured cores %u\n",
1263 		       (unsigned int) nb_fwd_lcores, nb_lc);
1264 		nb_fwd_lcores = (lcoreid_t) nb_lc;
1265 	}
1266 
1267 	return 0;
1268 }
1269 
1270 int
1271 set_fwd_lcores_mask(uint64_t lcoremask)
1272 {
1273 	unsigned int lcorelist[64];
1274 	unsigned int nb_lc;
1275 	unsigned int i;
1276 
1277 	if (lcoremask == 0) {
1278 		printf("Invalid NULL mask of cores\n");
1279 		return -1;
1280 	}
1281 	nb_lc = 0;
1282 	for (i = 0; i < 64; i++) {
1283 		if (! ((uint64_t)(1ULL << i) & lcoremask))
1284 			continue;
1285 		lcorelist[nb_lc++] = i;
1286 	}
1287 	return set_fwd_lcores_list(lcorelist, nb_lc);
1288 }
1289 
1290 void
1291 set_fwd_lcores_number(uint16_t nb_lc)
1292 {
1293 	if (nb_lc > nb_cfg_lcores) {
1294 		printf("nb fwd cores %u > %u (max. number of configured "
1295 		       "lcores) - ignored\n",
1296 		       (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
1297 		return;
1298 	}
1299 	nb_fwd_lcores = (lcoreid_t) nb_lc;
1300 	printf("Number of forwarding cores set to %u\n",
1301 	       (unsigned int) nb_fwd_lcores);
1302 }
1303 
1304 void
1305 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
1306 {
1307 	unsigned int i;
1308 	portid_t port_id;
1309 	int record_now;
1310 
1311 	record_now = 0;
1312  again:
1313 	for (i = 0; i < nb_pt; i++) {
1314 		port_id = (portid_t) portlist[i];
1315 		if (port_id >= nb_ports) {
1316 			printf("Invalid port id %u >= %u\n",
1317 			       (unsigned int) port_id,
1318 			       (unsigned int) nb_ports);
1319 			return;
1320 		}
1321 		if (record_now)
1322 			fwd_ports_ids[i] = port_id;
1323 	}
1324 	if (record_now == 0) {
1325 		record_now = 1;
1326 		goto again;
1327 	}
1328 	nb_cfg_ports = (portid_t) nb_pt;
1329 	if (nb_fwd_ports != (portid_t) nb_pt) {
1330 		printf("previous number of forwarding ports %u - changed to "
1331 		       "number of configured ports %u\n",
1332 		       (unsigned int) nb_fwd_ports, nb_pt);
1333 		nb_fwd_ports = (portid_t) nb_pt;
1334 	}
1335 }
1336 
1337 void
1338 set_fwd_ports_mask(uint64_t portmask)
1339 {
1340 	unsigned int portlist[64];
1341 	unsigned int nb_pt;
1342 	unsigned int i;
1343 
1344 	if (portmask == 0) {
1345 		printf("Invalid NULL mask of ports\n");
1346 		return;
1347 	}
1348 	nb_pt = 0;
1349 	for (i = 0; i < 64; i++) {
1350 		if (! ((uint64_t)(1ULL << i) & portmask))
1351 			continue;
1352 		portlist[nb_pt++] = i;
1353 	}
1354 	set_fwd_ports_list(portlist, nb_pt);
1355 }
1356 
1357 void
1358 set_fwd_ports_number(uint16_t nb_pt)
1359 {
1360 	if (nb_pt > nb_cfg_ports) {
1361 		printf("nb fwd ports %u > %u (number of configured "
1362 		       "ports) - ignored\n",
1363 		       (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
1364 		return;
1365 	}
1366 	nb_fwd_ports = (portid_t) nb_pt;
1367 	printf("Number of forwarding ports set to %u\n",
1368 	       (unsigned int) nb_fwd_ports);
1369 }
1370 
1371 void
1372 set_nb_pkt_per_burst(uint16_t nb)
1373 {
1374 	if (nb > MAX_PKT_BURST) {
1375 		printf("nb pkt per burst: %u > %u (maximum packet per burst) "
1376 		       " ignored\n",
1377 		       (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
1378 		return;
1379 	}
1380 	nb_pkt_per_burst = nb;
1381 	printf("Number of packets per burst set to %u\n",
1382 	       (unsigned int) nb_pkt_per_burst);
1383 }
1384 
1385 void
1386 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
1387 {
1388 	uint16_t tx_pkt_len;
1389 	unsigned i;
1390 
1391 	if (nb_segs >= (unsigned) nb_txd) {
1392 		printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
1393 		       nb_segs, (unsigned int) nb_txd);
1394 		return;
1395 	}
1396 
1397 	/*
1398 	 * Check that each segment length is greater or equal than
1399 	 * the mbuf data sise.
1400 	 * Check also that the total packet length is greater or equal than the
1401 	 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8).
1402 	 */
1403 	tx_pkt_len = 0;
1404 	for (i = 0; i < nb_segs; i++) {
1405 		if (seg_lengths[i] > (unsigned) mbuf_data_size) {
1406 			printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
1407 			       i, seg_lengths[i], (unsigned) mbuf_data_size);
1408 			return;
1409 		}
1410 		tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
1411 	}
1412 	if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) {
1413 		printf("total packet length=%u < %d - give up\n",
1414 				(unsigned) tx_pkt_len,
1415 				(int)(sizeof(struct ether_hdr) + 20 + 8));
1416 		return;
1417 	}
1418 
1419 	for (i = 0; i < nb_segs; i++)
1420 		tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
1421 
1422 	tx_pkt_length  = tx_pkt_len;
1423 	tx_pkt_nb_segs = (uint8_t) nb_segs;
1424 }
1425 
1426 char*
1427 list_pkt_forwarding_modes(void)
1428 {
1429 	static char fwd_modes[128] = "";
1430 	const char *separator = "|";
1431 	struct fwd_engine *fwd_eng;
1432 	unsigned i = 0;
1433 
1434 	if (strlen (fwd_modes) == 0) {
1435 		while ((fwd_eng = fwd_engines[i++]) != NULL) {
1436 			strcat(fwd_modes, fwd_eng->fwd_mode_name);
1437 			strcat(fwd_modes, separator);
1438 		}
1439 		fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
1440 	}
1441 
1442 	return fwd_modes;
1443 }
1444 
1445 void
1446 set_pkt_forwarding_mode(const char *fwd_mode_name)
1447 {
1448 	struct fwd_engine *fwd_eng;
1449 	unsigned i;
1450 
1451 	i = 0;
1452 	while ((fwd_eng = fwd_engines[i]) != NULL) {
1453 		if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
1454 			printf("Set %s packet forwarding mode\n",
1455 			       fwd_mode_name);
1456 			cur_fwd_eng = fwd_eng;
1457 			return;
1458 		}
1459 		i++;
1460 	}
1461 	printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
1462 }
1463 
1464 void
1465 set_verbose_level(uint16_t vb_level)
1466 {
1467 	printf("Change verbose level from %u to %u\n",
1468 	       (unsigned int) verbose_level, (unsigned int) vb_level);
1469 	verbose_level = vb_level;
1470 }
1471 
1472 void
1473 vlan_extend_set(portid_t port_id, int on)
1474 {
1475 	int diag;
1476 	int vlan_offload;
1477 
1478 	if (port_id_is_invalid(port_id))
1479 		return;
1480 
1481 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1482 
1483 	if (on)
1484 		vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
1485 	else
1486 		vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
1487 
1488 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1489 	if (diag < 0)
1490 		printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
1491 	       "diag=%d\n", port_id, on, diag);
1492 }
1493 
1494 void
1495 rx_vlan_strip_set(portid_t port_id, int on)
1496 {
1497 	int diag;
1498 	int vlan_offload;
1499 
1500 	if (port_id_is_invalid(port_id))
1501 		return;
1502 
1503 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1504 
1505 	if (on)
1506 		vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
1507 	else
1508 		vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
1509 
1510 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1511 	if (diag < 0)
1512 		printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
1513 	       "diag=%d\n", port_id, on, diag);
1514 }
1515 
1516 void
1517 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
1518 {
1519 	int diag;
1520 
1521 	if (port_id_is_invalid(port_id))
1522 		return;
1523 
1524 	diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
1525 	if (diag < 0)
1526 		printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
1527 	       "diag=%d\n", port_id, queue_id, on, diag);
1528 }
1529 
1530 void
1531 rx_vlan_filter_set(portid_t port_id, int on)
1532 {
1533 	int diag;
1534 	int vlan_offload;
1535 
1536 	if (port_id_is_invalid(port_id))
1537 		return;
1538 
1539 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1540 
1541 	if (on)
1542 		vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
1543 	else
1544 		vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
1545 
1546 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1547 	if (diag < 0)
1548 		printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
1549 	       "diag=%d\n", port_id, on, diag);
1550 }
1551 
1552 void
1553 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
1554 {
1555 	int diag;
1556 
1557 	if (port_id_is_invalid(port_id))
1558 		return;
1559 	if (vlan_id_is_invalid(vlan_id))
1560 		return;
1561 	diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
1562 	if (diag == 0)
1563 		return;
1564 	printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
1565 	       "diag=%d\n",
1566 	       port_id, vlan_id, on, diag);
1567 }
1568 
1569 void
1570 rx_vlan_all_filter_set(portid_t port_id, int on)
1571 {
1572 	uint16_t vlan_id;
1573 
1574 	if (port_id_is_invalid(port_id))
1575 		return;
1576 	for (vlan_id = 0; vlan_id < 4096; vlan_id++)
1577 		rx_vft_set(port_id, vlan_id, on);
1578 }
1579 
1580 void
1581 vlan_tpid_set(portid_t port_id, uint16_t tp_id)
1582 {
1583 	int diag;
1584 	if (port_id_is_invalid(port_id))
1585 		return;
1586 
1587 	diag = rte_eth_dev_set_vlan_ether_type(port_id, tp_id);
1588 	if (diag == 0)
1589 		return;
1590 
1591 	printf("tx_vlan_tpid_set(port_pi=%d, tpid=%d) failed "
1592 	       "diag=%d\n",
1593 	       port_id, tp_id, diag);
1594 }
1595 
1596 void
1597 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
1598 {
1599 	if (port_id_is_invalid(port_id))
1600 		return;
1601 	if (vlan_id_is_invalid(vlan_id))
1602 		return;
1603 	ports[port_id].tx_ol_flags |= PKT_TX_VLAN_PKT;
1604 	ports[port_id].tx_vlan_id = vlan_id;
1605 }
1606 
1607 void
1608 tx_vlan_reset(portid_t port_id)
1609 {
1610 	if (port_id_is_invalid(port_id))
1611 		return;
1612 	ports[port_id].tx_ol_flags &= ~PKT_TX_VLAN_PKT;
1613 }
1614 
1615 void
1616 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
1617 {
1618 	if (port_id_is_invalid(port_id))
1619 		return;
1620 
1621 	rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
1622 }
1623 
1624 void
1625 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
1626 {
1627 	uint16_t i;
1628 	uint8_t existing_mapping_found = 0;
1629 
1630 	if (port_id_is_invalid(port_id))
1631 		return;
1632 
1633 	if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
1634 		return;
1635 
1636 	if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1637 		printf("map_value not in required range 0..%d\n",
1638 				RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
1639 		return;
1640 	}
1641 
1642 	if (!is_rx) { /*then tx*/
1643 		for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1644 			if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1645 			    (tx_queue_stats_mappings[i].queue_id == queue_id)) {
1646 				tx_queue_stats_mappings[i].stats_counter_id = map_value;
1647 				existing_mapping_found = 1;
1648 				break;
1649 			}
1650 		}
1651 		if (!existing_mapping_found) { /* A new additional mapping... */
1652 			tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
1653 			tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
1654 			tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
1655 			nb_tx_queue_stats_mappings++;
1656 		}
1657 	}
1658 	else { /*rx*/
1659 		for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1660 			if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1661 			    (rx_queue_stats_mappings[i].queue_id == queue_id)) {
1662 				rx_queue_stats_mappings[i].stats_counter_id = map_value;
1663 				existing_mapping_found = 1;
1664 				break;
1665 			}
1666 		}
1667 		if (!existing_mapping_found) { /* A new additional mapping... */
1668 			rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
1669 			rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
1670 			rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
1671 			nb_rx_queue_stats_mappings++;
1672 		}
1673 	}
1674 }
1675 
1676 void
1677 tx_cksum_set(portid_t port_id, uint8_t cksum_mask)
1678 {
1679 	uint16_t tx_ol_flags;
1680 	if (port_id_is_invalid(port_id))
1681 		return;
1682 	/* Clear last 4 bits and then set L3/4 checksum mask again */
1683 	tx_ol_flags = (uint16_t) (ports[port_id].tx_ol_flags & 0xFFF0);
1684 	ports[port_id].tx_ol_flags = (uint16_t) ((cksum_mask & 0xf) | tx_ol_flags);
1685 }
1686 
1687 void
1688 fdir_add_signature_filter(portid_t port_id, uint8_t queue_id,
1689 			  struct rte_fdir_filter *fdir_filter)
1690 {
1691 	int diag;
1692 
1693 	if (port_id_is_invalid(port_id))
1694 		return;
1695 
1696 	diag = rte_eth_dev_fdir_add_signature_filter(port_id, fdir_filter,
1697 						     queue_id);
1698 	if (diag == 0)
1699 		return;
1700 
1701 	printf("rte_eth_dev_fdir_add_signature_filter for port_id=%d failed "
1702 	       "diag=%d\n", port_id, diag);
1703 }
1704 
1705 void
1706 fdir_update_signature_filter(portid_t port_id, uint8_t queue_id,
1707 			     struct rte_fdir_filter *fdir_filter)
1708 {
1709 	int diag;
1710 
1711 	if (port_id_is_invalid(port_id))
1712 		return;
1713 
1714 	diag = rte_eth_dev_fdir_update_signature_filter(port_id, fdir_filter,
1715 							queue_id);
1716 	if (diag == 0)
1717 		return;
1718 
1719 	printf("rte_eth_dev_fdir_update_signature_filter for port_id=%d failed "
1720 	       "diag=%d\n", port_id, diag);
1721 }
1722 
1723 void
1724 fdir_remove_signature_filter(portid_t port_id,
1725 			     struct rte_fdir_filter *fdir_filter)
1726 {
1727 	int diag;
1728 
1729 	if (port_id_is_invalid(port_id))
1730 		return;
1731 
1732 	diag = rte_eth_dev_fdir_remove_signature_filter(port_id, fdir_filter);
1733 	if (diag == 0)
1734 		return;
1735 
1736 	printf("rte_eth_dev_fdir_add_signature_filter for port_id=%d failed "
1737 	       "diag=%d\n", port_id, diag);
1738 
1739 }
1740 
1741 void
1742 fdir_get_infos(portid_t port_id)
1743 {
1744 	struct rte_eth_fdir fdir_infos;
1745 
1746 	static const char *fdir_stats_border = "########################";
1747 
1748 	if (port_id_is_invalid(port_id))
1749 		return;
1750 
1751 	rte_eth_dev_fdir_get_infos(port_id, &fdir_infos);
1752 
1753 	printf("\n  %s FDIR infos for port %-2d     %s\n",
1754 	       fdir_stats_border, port_id, fdir_stats_border);
1755 
1756 	printf("  collision: %-10"PRIu64"  free:     %"PRIu64"\n"
1757 	       "  maxhash:   %-10"PRIu64"  maxlen:   %"PRIu64"\n"
1758 	       "  add:       %-10"PRIu64"  remove:   %"PRIu64"\n"
1759 	       "  f_add:     %-10"PRIu64"  f_remove: %"PRIu64"\n",
1760 	       (uint64_t)(fdir_infos.collision), (uint64_t)(fdir_infos.free),
1761 	       (uint64_t)(fdir_infos.maxhash), (uint64_t)(fdir_infos.maxlen),
1762 	       fdir_infos.add, fdir_infos.remove,
1763 	       fdir_infos.f_add, fdir_infos.f_remove);
1764 	printf("  %s############################%s\n",
1765 	       fdir_stats_border, fdir_stats_border);
1766 }
1767 
1768 void
1769 fdir_add_perfect_filter(portid_t port_id, uint16_t soft_id, uint8_t queue_id,
1770 			uint8_t drop, struct rte_fdir_filter *fdir_filter)
1771 {
1772 	int diag;
1773 
1774 	if (port_id_is_invalid(port_id))
1775 		return;
1776 
1777 	diag = rte_eth_dev_fdir_add_perfect_filter(port_id, fdir_filter,
1778 						   soft_id, queue_id, drop);
1779 	if (diag == 0)
1780 		return;
1781 
1782 	printf("rte_eth_dev_fdir_add_perfect_filter for port_id=%d failed "
1783 	       "diag=%d\n", port_id, diag);
1784 }
1785 
1786 void
1787 fdir_update_perfect_filter(portid_t port_id, uint16_t soft_id, uint8_t queue_id,
1788 			   uint8_t drop, struct rte_fdir_filter *fdir_filter)
1789 {
1790 	int diag;
1791 
1792 	if (port_id_is_invalid(port_id))
1793 		return;
1794 
1795 	diag = rte_eth_dev_fdir_update_perfect_filter(port_id, fdir_filter,
1796 						      soft_id, queue_id, drop);
1797 	if (diag == 0)
1798 		return;
1799 
1800 	printf("rte_eth_dev_fdir_update_perfect_filter for port_id=%d failed "
1801 	       "diag=%d\n", port_id, diag);
1802 }
1803 
1804 void
1805 fdir_remove_perfect_filter(portid_t port_id, uint16_t soft_id,
1806 			   struct rte_fdir_filter *fdir_filter)
1807 {
1808 	int diag;
1809 
1810 	if (port_id_is_invalid(port_id))
1811 		return;
1812 
1813 	diag = rte_eth_dev_fdir_remove_perfect_filter(port_id, fdir_filter,
1814 						      soft_id);
1815 	if (diag == 0)
1816 		return;
1817 
1818 	printf("rte_eth_dev_fdir_update_perfect_filter for port_id=%d failed "
1819 	       "diag=%d\n", port_id, diag);
1820 }
1821 
1822 void
1823 fdir_set_masks(portid_t port_id, struct rte_fdir_masks *fdir_masks)
1824 {
1825 	int diag;
1826 
1827 	if (port_id_is_invalid(port_id))
1828 		return;
1829 
1830 	diag = rte_eth_dev_fdir_set_masks(port_id, fdir_masks);
1831 	if (diag == 0)
1832 		return;
1833 
1834 	printf("rte_eth_dev_set_masks_filter for port_id=%d failed "
1835 	       "diag=%d\n", port_id, diag);
1836 }
1837 
1838 void
1839 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
1840 {
1841 	int diag;
1842 
1843 	if (port_id_is_invalid(port_id))
1844 		return;
1845 	if (is_rx)
1846 		diag = rte_eth_dev_set_vf_rx(port_id,vf,on);
1847 	else
1848 		diag = rte_eth_dev_set_vf_tx(port_id,vf,on);
1849 	if (diag == 0)
1850 		return;
1851 	if(is_rx)
1852 		printf("rte_eth_dev_set_vf_rx for port_id=%d failed "
1853 	       		"diag=%d\n", port_id, diag);
1854 	else
1855 		printf("rte_eth_dev_set_vf_tx for port_id=%d failed "
1856 	       		"diag=%d\n", port_id, diag);
1857 
1858 }
1859 
1860 void
1861 set_vf_rx_vlan(portid_t port_id, uint16_t vlan_id, uint64_t vf_mask, uint8_t on)
1862 {
1863 	int diag;
1864 
1865 	if (port_id_is_invalid(port_id))
1866 		return;
1867 	if (vlan_id_is_invalid(vlan_id))
1868 		return;
1869 	diag = rte_eth_dev_set_vf_vlan_filter(port_id, vlan_id, vf_mask, on);
1870 	if (diag == 0)
1871 		return;
1872 	printf("rte_eth_dev_set_vf_vlan_filter for port_id=%d failed "
1873 	       "diag=%d\n", port_id, diag);
1874 }
1875 
1876 int
1877 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
1878 {
1879 	int diag;
1880 	struct rte_eth_link link;
1881 
1882 	if (port_id_is_invalid(port_id))
1883 		return 1;
1884 	rte_eth_link_get_nowait(port_id, &link);
1885 	if (rate > link.link_speed) {
1886 		printf("Invalid rate value:%u bigger than link speed: %u\n",
1887 			rate, link.link_speed);
1888 		return 1;
1889 	}
1890 	diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
1891 	if (diag == 0)
1892 		return diag;
1893 	printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
1894 		port_id, diag);
1895 	return diag;
1896 }
1897 
1898 int
1899 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
1900 {
1901 	int diag;
1902 	struct rte_eth_link link;
1903 
1904 	if (q_msk == 0)
1905 		return 0;
1906 
1907 	if (port_id_is_invalid(port_id))
1908 		return 1;
1909 	rte_eth_link_get_nowait(port_id, &link);
1910 	if (rate > link.link_speed) {
1911 		printf("Invalid rate value:%u bigger than link speed: %u\n",
1912 			rate, link.link_speed);
1913 		return 1;
1914 	}
1915 	diag = rte_eth_set_vf_rate_limit(port_id, vf, rate, q_msk);
1916 	if (diag == 0)
1917 		return diag;
1918 	printf("rte_eth_set_vf_rate_limit for port_id=%d failed diag=%d\n",
1919 		port_id, diag);
1920 	return diag;
1921 }
1922 
1923 void
1924 get_ethertype_filter(uint8_t port_id, uint16_t index)
1925 {
1926 	struct rte_ethertype_filter filter;
1927 	int ret = 0;
1928 	uint16_t rx_queue;
1929 
1930 	memset(&filter, 0, sizeof(filter));
1931 	ret = rte_eth_dev_get_ethertype_filter(port_id, index,
1932 				&filter, &rx_queue);
1933 	if (ret < 0) {
1934 		if (ret == (-ENOENT))
1935 			printf("filter[%d] is not enabled\n", index);
1936 		else
1937 			printf("get ethertype filter fails(%s)\n", strerror(-ret));
1938 		return;
1939 	} else {
1940 		printf("filter[%d]:\n", index);
1941 		printf("    ethertype:  0x%04x\n",
1942 			rte_le_to_cpu_32(filter.ethertype));
1943 		printf("    priority: %s, %d\n",
1944 			filter.priority_en ? "enable" : "disable",
1945 			filter.priority);
1946 		printf("    queue: %d\n", rx_queue);
1947 	}
1948 }
1949 
1950 void
1951 get_syn_filter(uint8_t port_id)
1952 {
1953 	struct rte_syn_filter filter;
1954 	int ret = 0;
1955 	uint16_t rx_queue;
1956 
1957 	memset(&filter, 0, sizeof(filter));
1958 	ret = rte_eth_dev_get_syn_filter(port_id, &filter, &rx_queue);
1959 
1960 	if (ret < 0) {
1961 		if (ret == (-ENOENT))
1962 			printf("syn filter is not enabled\n");
1963 		else
1964 			printf("get syn filter fails(%s)\n", strerror(-ret));
1965 		return;
1966 	}
1967 	printf("syn filter: priority: %s, queue: %d\n",
1968 		filter.hig_pri ? "high" : "low",
1969 		rx_queue);
1970 }
1971 void
1972 get_2tuple_filter(uint8_t port_id, uint16_t index)
1973 {
1974 	struct rte_2tuple_filter filter;
1975 	int ret = 0;
1976 	uint16_t rx_queue;
1977 
1978 	memset(&filter, 0, sizeof(filter));
1979 	ret = rte_eth_dev_get_2tuple_filter(port_id, index,
1980 				&filter, &rx_queue);
1981 	if (ret < 0) {
1982 		if (ret == (-ENOENT))
1983 			printf("filter[%d] is not enabled\n", index);
1984 		else
1985 			printf("get 2tuple filter fails(%s)\n", strerror(-ret));
1986 		return;
1987 	} else {
1988 		printf("filter[%d]:\n", index);
1989 		printf("    Destination Port:     0x%04x    mask: %d\n",
1990 			rte_be_to_cpu_16(filter.dst_port),
1991 			filter.dst_port_mask ? 0 : 1);
1992 		printf("    protocol:  0x%02x     mask:%d     tcp_flags: 0x%02x\n",
1993 			filter.protocol, filter.protocol_mask ? 0 : 1,
1994 			filter.tcp_flags);
1995 		printf("    priority: %d    queue: %d\n",
1996 			filter.priority, rx_queue);
1997 	}
1998 }
1999 
2000 void
2001 get_5tuple_filter(uint8_t port_id, uint16_t index)
2002 {
2003 	struct rte_5tuple_filter filter;
2004 	int ret = 0;
2005 	uint16_t rx_queue;
2006 
2007 	memset(&filter, 0, sizeof(filter));
2008 	ret = rte_eth_dev_get_5tuple_filter(port_id, index,
2009 				&filter, &rx_queue);
2010 	if (ret < 0) {
2011 		if (ret == (-ENOENT))
2012 			printf("filter[%d] is not enabled\n", index);
2013 		else
2014 			printf("get 5tuple filter fails(%s)\n", strerror(-ret));
2015 		return;
2016 	} else {
2017 		printf("filter[%d]:\n", index);
2018 		printf("    Destination IP:  0x%08x    mask: %d\n",
2019 			(unsigned)rte_be_to_cpu_32(filter.dst_ip),
2020 			filter.dst_ip_mask ? 0 : 1);
2021 		printf("    Source IP:       0x%08x    mask: %d\n",
2022 			(unsigned)rte_be_to_cpu_32(filter.src_ip),
2023 			filter.src_ip_mask ? 0 : 1);
2024 		printf("    Destination Port:       0x%04x    mask: %d\n",
2025 			rte_be_to_cpu_16(filter.dst_port),
2026 			filter.dst_port_mask ? 0 : 1);
2027 		printf("    Source Port:       0x%04x    mask: %d\n",
2028 			rte_be_to_cpu_16(filter.src_port),
2029 			filter.src_port_mask ? 0 : 1);
2030 		printf("    protocol:           0x%02x    mask: %d\n",
2031 			filter.protocol,
2032 			filter.protocol_mask ? 0 : 1);
2033 		printf("    priority: %d    flags: 0x%02x    queue: %d\n",
2034 			filter.priority, filter.tcp_flags, rx_queue);
2035 	}
2036 }
2037 void
2038 get_flex_filter(uint8_t port_id, uint16_t index)
2039 
2040 {
2041 	struct rte_flex_filter filter;
2042 	int ret = 0;
2043 	uint16_t rx_queue;
2044 	int i, j;
2045 
2046 	memset(&filter, 0, sizeof(filter));
2047 	ret = rte_eth_dev_get_flex_filter(port_id, index,
2048 				&filter, &rx_queue);
2049 	if (ret < 0) {
2050 		if (ret == (-ENOENT))
2051 			printf("filter[%d] is not enabled\n", index);
2052 		else
2053 			printf("get flex filter fails(%s)\n", strerror(-ret));
2054 		return;
2055 	} else {
2056 		printf("filter[%d]: ", index);
2057 		printf("\n    length: %d", filter.len);
2058 		printf("\n    dword[]: 0x");
2059 		for (i = 0; i < 32; i++)
2060 			printf("%08x ", (unsigned)rte_be_to_cpu_32(filter.dwords[i]));
2061 		printf("\n    mask[]: 0b");
2062 		for (i = 0; i < 16; i++) {
2063 			for (j = 0; j < 8; j++)
2064 				printf("%c", (filter.mask[i] & (1 << j)) ? '1' : '0');
2065 		}
2066 		printf("\n    priority: %d    queue: %d\n",
2067 			filter.priority, rx_queue);
2068 	}
2069 }
2070