xref: /dpdk/app/test-pmd/config.c (revision 6a18e1af70a451f85f67b7b8c6971cf3b144520e)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 /*   BSD LICENSE
34  *
35  *   Copyright(c) 2013 6WIND.
36  *
37  *   Redistribution and use in source and binary forms, with or without
38  *   modification, are permitted provided that the following conditions
39  *   are met:
40  *
41  *     * Redistributions of source code must retain the above copyright
42  *       notice, this list of conditions and the following disclaimer.
43  *     * Redistributions in binary form must reproduce the above copyright
44  *       notice, this list of conditions and the following disclaimer in
45  *       the documentation and/or other materials provided with the
46  *       distribution.
47  *     * Neither the name of 6WIND S.A. nor the names of its
48  *       contributors may be used to endorse or promote products derived
49  *       from this software without specific prior written permission.
50  *
51  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62  */
63 
64 #include <stdarg.h>
65 #include <errno.h>
66 #include <stdio.h>
67 #include <string.h>
68 #include <stdarg.h>
69 #include <stdint.h>
70 #include <inttypes.h>
71 
72 #include <sys/queue.h>
73 
74 #include <rte_common.h>
75 #include <rte_byteorder.h>
76 #include <rte_debug.h>
77 #include <rte_log.h>
78 #include <rte_memory.h>
79 #include <rte_memcpy.h>
80 #include <rte_memzone.h>
81 #include <rte_launch.h>
82 #include <rte_tailq.h>
83 #include <rte_eal.h>
84 #include <rte_per_lcore.h>
85 #include <rte_lcore.h>
86 #include <rte_atomic.h>
87 #include <rte_branch_prediction.h>
88 #include <rte_ring.h>
89 #include <rte_mempool.h>
90 #include <rte_mbuf.h>
91 #include <rte_interrupts.h>
92 #include <rte_pci.h>
93 #include <rte_ether.h>
94 #include <rte_ethdev.h>
95 #include <rte_string_fns.h>
96 
97 #include "testpmd.h"
98 
99 static void
100 print_ethaddr(const char *name, struct ether_addr *eth_addr)
101 {
102 	printf("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
103 	       (unsigned int)eth_addr->addr_bytes[0],
104 	       (unsigned int)eth_addr->addr_bytes[1],
105 	       (unsigned int)eth_addr->addr_bytes[2],
106 	       (unsigned int)eth_addr->addr_bytes[3],
107 	       (unsigned int)eth_addr->addr_bytes[4],
108 	       (unsigned int)eth_addr->addr_bytes[5]);
109 }
110 
111 void
112 nic_stats_display(portid_t port_id)
113 {
114 	struct rte_eth_stats stats;
115 	struct rte_port *port = &ports[port_id];
116 	uint8_t i;
117 
118 	static const char *nic_stats_border = "########################";
119 
120 	if (port_id >= nb_ports) {
121 		printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
122 		return;
123 	}
124 	rte_eth_stats_get(port_id, &stats);
125 	printf("\n  %s NIC statistics for port %-2d %s\n",
126 	       nic_stats_border, port_id, nic_stats_border);
127 
128 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
129 		printf("  RX-packets: %-10"PRIu64" RX-errors: %-10"PRIu64"RX-bytes: "
130 		       "%-"PRIu64"\n"
131 		       "  TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64"TX-bytes: "
132 		       "%-"PRIu64"\n",
133 		       stats.ipackets, stats.ierrors, stats.ibytes,
134 		       stats.opackets, stats.oerrors, stats.obytes);
135 	}
136 	else {
137 		printf("  RX-packets:              %10"PRIu64"    RX-errors: %10"PRIu64
138 		       "    RX-bytes: %10"PRIu64"\n"
139 		       "  TX-packets:              %10"PRIu64"    TX-errors: %10"PRIu64
140 		       "    TX-bytes: %10"PRIu64"\n",
141 		       stats.ipackets, stats.ierrors, stats.ibytes,
142 		       stats.opackets, stats.oerrors, stats.obytes);
143 	}
144 
145 	/* stats fdir */
146 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
147 		printf("  Fdirmiss:   %-10"PRIu64" Fdirmatch: %-10"PRIu64"\n",
148 		       stats.fdirmiss,
149 		       stats.fdirmatch);
150 
151 	if (port->rx_queue_stats_mapping_enabled) {
152 		printf("\n");
153 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
154 			printf("  Stats reg %2d RX-packets: %10"PRIu64
155 			       "    RX-errors: %10"PRIu64
156 			       "    RX-bytes: %10"PRIu64"\n",
157 			       i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
158 		}
159 	}
160 	if (port->tx_queue_stats_mapping_enabled) {
161 		printf("\n");
162 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
163 			printf("  Stats reg %2d TX-packets: %10"PRIu64
164 			       "                             TX-bytes: %10"PRIu64"\n",
165 			       i, stats.q_opackets[i], stats.q_obytes[i]);
166 		}
167 	}
168 
169 	/* Display statistics of XON/XOFF pause frames, if any. */
170 	if ((stats.tx_pause_xon  | stats.rx_pause_xon |
171 	     stats.tx_pause_xoff | stats.rx_pause_xoff) > 0) {
172 		printf("  RX-XOFF:    %-10"PRIu64" RX-XON:    %-10"PRIu64"\n",
173 		       stats.rx_pause_xoff, stats.rx_pause_xon);
174 		printf("  TX-XOFF:    %-10"PRIu64" TX-XON:    %-10"PRIu64"\n",
175 		       stats.tx_pause_xoff, stats.tx_pause_xon);
176 	}
177 	printf("  %s############################%s\n",
178 	       nic_stats_border, nic_stats_border);
179 }
180 
181 void
182 nic_stats_clear(portid_t port_id)
183 {
184 	if (port_id >= nb_ports) {
185 		printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
186 		return;
187 	}
188 	rte_eth_stats_reset(port_id);
189 	printf("\n  NIC statistics for port %d cleared\n", port_id);
190 }
191 
192 
193 void
194 nic_stats_mapping_display(portid_t port_id)
195 {
196 	struct rte_port *port = &ports[port_id];
197 	uint16_t i;
198 
199 	static const char *nic_stats_mapping_border = "########################";
200 
201 	if (port_id >= nb_ports) {
202 		printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
203 		return;
204 	}
205 
206 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
207 		printf("Port id %d - either does not support queue statistic mapping or"
208 		       " no queue statistic mapping set\n", port_id);
209 		return;
210 	}
211 
212 	printf("\n  %s NIC statistics mapping for port %-2d %s\n",
213 	       nic_stats_mapping_border, port_id, nic_stats_mapping_border);
214 
215 	if (port->rx_queue_stats_mapping_enabled) {
216 		for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
217 			if (rx_queue_stats_mappings[i].port_id == port_id) {
218 				printf("  RX-queue %2d mapped to Stats Reg %2d\n",
219 				       rx_queue_stats_mappings[i].queue_id,
220 				       rx_queue_stats_mappings[i].stats_counter_id);
221 			}
222 		}
223 		printf("\n");
224 	}
225 
226 
227 	if (port->tx_queue_stats_mapping_enabled) {
228 		for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
229 			if (tx_queue_stats_mappings[i].port_id == port_id) {
230 				printf("  TX-queue %2d mapped to Stats Reg %2d\n",
231 				       tx_queue_stats_mappings[i].queue_id,
232 				       tx_queue_stats_mappings[i].stats_counter_id);
233 			}
234 		}
235 	}
236 
237 	printf("  %s####################################%s\n",
238 	       nic_stats_mapping_border, nic_stats_mapping_border);
239 }
240 
241 void
242 port_infos_display(portid_t port_id)
243 {
244 	struct rte_port *port;
245 	struct rte_eth_link link;
246 	int vlan_offload;
247 	struct rte_mempool * mp;
248 	static const char *info_border = "*********************";
249 
250 	if (port_id >= nb_ports) {
251 		printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
252 		return;
253 	}
254 	port = &ports[port_id];
255 	rte_eth_link_get_nowait(port_id, &link);
256 	printf("\n%s Infos for port %-2d %s\n",
257 	       info_border, port_id, info_border);
258 	print_ethaddr("MAC address: ", &port->eth_addr);
259 	printf("\nConnect to socket: %u", port->socket_id);
260 
261 	if (port_numa[port_id] != NUMA_NO_CONFIG) {
262 		mp = mbuf_pool_find(port_numa[port_id]);
263 		if (mp)
264 			printf("\nmemory allocation on the socket: %d",
265 							port_numa[port_id]);
266 	} else
267 		printf("\nmemory allocation on the socket: %u",port->socket_id);
268 
269 	printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
270 	printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
271 	printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
272 	       ("full-duplex") : ("half-duplex"));
273 	printf("Promiscuous mode: %s\n",
274 	       rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
275 	printf("Allmulticast mode: %s\n",
276 	       rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
277 	printf("Maximum number of MAC addresses: %u\n",
278 	       (unsigned int)(port->dev_info.max_mac_addrs));
279 	printf("Maximum number of MAC addresses of hash filtering: %u\n",
280 	       (unsigned int)(port->dev_info.max_hash_mac_addrs));
281 
282 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
283 	if (vlan_offload >= 0){
284 		printf("VLAN offload: \n");
285 		if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
286 			printf("  strip on \n");
287 		else
288 			printf("  strip off \n");
289 
290 		if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
291 			printf("  filter on \n");
292 		else
293 			printf("  filter off \n");
294 
295 		if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
296 			printf("  qinq(extend) on \n");
297 		else
298 			printf("  qinq(extend) off \n");
299 	}
300 }
301 
302 static int
303 port_id_is_invalid(portid_t port_id)
304 {
305 	if (port_id < nb_ports)
306 		return 0;
307 	printf("Invalid port %d (must be < nb_ports=%d)\n", port_id, nb_ports);
308 	return 1;
309 }
310 
311 static int
312 vlan_id_is_invalid(uint16_t vlan_id)
313 {
314 	if (vlan_id < 4096)
315 		return 0;
316 	printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
317 	return 1;
318 }
319 
320 static int
321 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
322 {
323 	uint64_t pci_len;
324 
325 	if (reg_off & 0x3) {
326 		printf("Port register offset 0x%X not aligned on a 4-byte "
327 		       "boundary\n",
328 		       (unsigned)reg_off);
329 		return 1;
330 	}
331 	pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len;
332 	if (reg_off >= pci_len) {
333 		printf("Port %d: register offset %u (0x%X) out of port PCI "
334 		       "resource (length=%"PRIu64")\n",
335 		       port_id, (unsigned)reg_off, (unsigned)reg_off,  pci_len);
336 		return 1;
337 	}
338 	return 0;
339 }
340 
341 static int
342 reg_bit_pos_is_invalid(uint8_t bit_pos)
343 {
344 	if (bit_pos <= 31)
345 		return 0;
346 	printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
347 	return 1;
348 }
349 
350 #define display_port_and_reg_off(port_id, reg_off) \
351 	printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
352 
353 static inline void
354 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
355 {
356 	display_port_and_reg_off(port_id, (unsigned)reg_off);
357 	printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
358 }
359 
360 void
361 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
362 {
363 	uint32_t reg_v;
364 
365 
366 	if (port_id_is_invalid(port_id))
367 		return;
368 	if (port_reg_off_is_invalid(port_id, reg_off))
369 		return;
370 	if (reg_bit_pos_is_invalid(bit_x))
371 		return;
372 	reg_v = port_id_pci_reg_read(port_id, reg_off);
373 	display_port_and_reg_off(port_id, (unsigned)reg_off);
374 	printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
375 }
376 
377 void
378 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
379 			   uint8_t bit1_pos, uint8_t bit2_pos)
380 {
381 	uint32_t reg_v;
382 	uint8_t  l_bit;
383 	uint8_t  h_bit;
384 
385 	if (port_id_is_invalid(port_id))
386 		return;
387 	if (port_reg_off_is_invalid(port_id, reg_off))
388 		return;
389 	if (reg_bit_pos_is_invalid(bit1_pos))
390 		return;
391 	if (reg_bit_pos_is_invalid(bit2_pos))
392 		return;
393 	if (bit1_pos > bit2_pos)
394 		l_bit = bit2_pos, h_bit = bit1_pos;
395 	else
396 		l_bit = bit1_pos, h_bit = bit2_pos;
397 
398 	reg_v = port_id_pci_reg_read(port_id, reg_off);
399 	reg_v >>= l_bit;
400 	if (h_bit < 31)
401 		reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
402 	display_port_and_reg_off(port_id, (unsigned)reg_off);
403 	printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
404 	       ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
405 }
406 
407 void
408 port_reg_display(portid_t port_id, uint32_t reg_off)
409 {
410 	uint32_t reg_v;
411 
412 	if (port_id_is_invalid(port_id))
413 		return;
414 	if (port_reg_off_is_invalid(port_id, reg_off))
415 		return;
416 	reg_v = port_id_pci_reg_read(port_id, reg_off);
417 	display_port_reg_value(port_id, reg_off, reg_v);
418 }
419 
420 void
421 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
422 		 uint8_t bit_v)
423 {
424 	uint32_t reg_v;
425 
426 	if (port_id_is_invalid(port_id))
427 		return;
428 	if (port_reg_off_is_invalid(port_id, reg_off))
429 		return;
430 	if (reg_bit_pos_is_invalid(bit_pos))
431 		return;
432 	if (bit_v > 1) {
433 		printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
434 		return;
435 	}
436 	reg_v = port_id_pci_reg_read(port_id, reg_off);
437 	if (bit_v == 0)
438 		reg_v &= ~(1 << bit_pos);
439 	else
440 		reg_v |= (1 << bit_pos);
441 	port_id_pci_reg_write(port_id, reg_off, reg_v);
442 	display_port_reg_value(port_id, reg_off, reg_v);
443 }
444 
445 void
446 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
447 		       uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
448 {
449 	uint32_t max_v;
450 	uint32_t reg_v;
451 	uint8_t  l_bit;
452 	uint8_t  h_bit;
453 
454 	if (port_id_is_invalid(port_id))
455 		return;
456 	if (port_reg_off_is_invalid(port_id, reg_off))
457 		return;
458 	if (reg_bit_pos_is_invalid(bit1_pos))
459 		return;
460 	if (reg_bit_pos_is_invalid(bit2_pos))
461 		return;
462 	if (bit1_pos > bit2_pos)
463 		l_bit = bit2_pos, h_bit = bit1_pos;
464 	else
465 		l_bit = bit1_pos, h_bit = bit2_pos;
466 
467 	if ((h_bit - l_bit) < 31)
468 		max_v = (1 << (h_bit - l_bit + 1)) - 1;
469 	else
470 		max_v = 0xFFFFFFFF;
471 
472 	if (value > max_v) {
473 		printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
474 				(unsigned)value, (unsigned)value,
475 				(unsigned)max_v, (unsigned)max_v);
476 		return;
477 	}
478 	reg_v = port_id_pci_reg_read(port_id, reg_off);
479 	reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
480 	reg_v |= (value << l_bit); /* Set changed bits */
481 	port_id_pci_reg_write(port_id, reg_off, reg_v);
482 	display_port_reg_value(port_id, reg_off, reg_v);
483 }
484 
485 void
486 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
487 {
488 	if (port_id_is_invalid(port_id))
489 		return;
490 	if (port_reg_off_is_invalid(port_id, reg_off))
491 		return;
492 	port_id_pci_reg_write(port_id, reg_off, reg_v);
493 	display_port_reg_value(port_id, reg_off, reg_v);
494 }
495 
496 /*
497  * RX/TX ring descriptors display functions.
498  */
499 static int
500 rx_queue_id_is_invalid(queueid_t rxq_id)
501 {
502 	if (rxq_id < nb_rxq)
503 		return 0;
504 	printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
505 	return 1;
506 }
507 
508 static int
509 tx_queue_id_is_invalid(queueid_t txq_id)
510 {
511 	if (txq_id < nb_txq)
512 		return 0;
513 	printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
514 	return 1;
515 }
516 
517 static int
518 rx_desc_id_is_invalid(uint16_t rxdesc_id)
519 {
520 	if (rxdesc_id < nb_rxd)
521 		return 0;
522 	printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
523 	       rxdesc_id, nb_rxd);
524 	return 1;
525 }
526 
527 static int
528 tx_desc_id_is_invalid(uint16_t txdesc_id)
529 {
530 	if (txdesc_id < nb_txd)
531 		return 0;
532 	printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
533 	       txdesc_id, nb_txd);
534 	return 1;
535 }
536 
537 static const struct rte_memzone *
538 ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id)
539 {
540 	char mz_name[RTE_MEMZONE_NAMESIZE];
541 	const struct rte_memzone *mz;
542 
543 	rte_snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d",
544 		 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id);
545 	mz = rte_memzone_lookup(mz_name);
546 	if (mz == NULL)
547 		printf("%s ring memory zoneof (port %d, queue %d) not"
548 		       "found (zone name = %s\n",
549 		       ring_name, port_id, q_id, mz_name);
550 	return (mz);
551 }
552 
553 union igb_ring_dword {
554 	uint64_t dword;
555 	struct {
556 		uint32_t hi;
557 		uint32_t lo;
558 	} words;
559 };
560 
561 struct igb_ring_desc {
562 	union igb_ring_dword lo_dword;
563 	union igb_ring_dword hi_dword;
564 };
565 
566 static void
567 ring_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
568 {
569 	struct igb_ring_desc *ring;
570 	struct igb_ring_desc rd;
571 
572 	ring = (struct igb_ring_desc *) ring_mz->addr;
573 	rd.lo_dword = rte_le_to_cpu_64(ring[desc_id].lo_dword);
574 	rd.hi_dword = rte_le_to_cpu_64(ring[desc_id].hi_dword);
575 	printf("    0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
576 		(unsigned)rd.lo_dword.words.lo, (unsigned)rd.lo_dword.words.hi,
577 		(unsigned)rd.hi_dword.words.lo, (unsigned)rd.hi_dword.words.hi);
578 }
579 
580 void
581 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
582 {
583 	const struct rte_memzone *rx_mz;
584 
585 	if (port_id_is_invalid(port_id))
586 		return;
587 	if (rx_queue_id_is_invalid(rxq_id))
588 		return;
589 	if (rx_desc_id_is_invalid(rxd_id))
590 		return;
591 	rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
592 	if (rx_mz == NULL)
593 		return;
594 	ring_descriptor_display(rx_mz, rxd_id);
595 }
596 
597 void
598 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
599 {
600 	const struct rte_memzone *tx_mz;
601 
602 	if (port_id_is_invalid(port_id))
603 		return;
604 	if (tx_queue_id_is_invalid(txq_id))
605 		return;
606 	if (tx_desc_id_is_invalid(txd_id))
607 		return;
608 	tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
609 	if (tx_mz == NULL)
610 		return;
611 	ring_descriptor_display(tx_mz, txd_id);
612 }
613 
614 void
615 fwd_lcores_config_display(void)
616 {
617 	lcoreid_t lc_id;
618 
619 	printf("List of forwarding lcores:");
620 	for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
621 		printf(" %2u", fwd_lcores_cpuids[lc_id]);
622 	printf("\n");
623 }
624 void
625 rxtx_config_display(void)
626 {
627 	printf("  %s packet forwarding - CRC stripping %s - "
628 	       "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name,
629 	       rx_mode.hw_strip_crc ? "enabled" : "disabled",
630 	       nb_pkt_per_burst);
631 
632 	if (cur_fwd_eng == &tx_only_engine)
633 		printf("  packet len=%u - nb packet segments=%d\n",
634 				(unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
635 
636 	printf("  nb forwarding cores=%d - nb forwarding ports=%d\n",
637 	       nb_fwd_lcores, nb_fwd_ports);
638 	printf("  RX queues=%d - RX desc=%d - RX free threshold=%d\n",
639 	       nb_rxq, nb_rxd, rx_free_thresh);
640 	printf("  RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
641 	       rx_thresh.pthresh, rx_thresh.hthresh, rx_thresh.wthresh);
642 	printf("  TX queues=%d - TX desc=%d - TX free threshold=%d\n",
643 	       nb_txq, nb_txd, tx_free_thresh);
644 	printf("  TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
645 	       tx_thresh.pthresh, tx_thresh.hthresh, tx_thresh.wthresh);
646 	printf("  TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n",
647 	       tx_rs_thresh, txq_flags);
648 }
649 
650 void
651 port_rss_reta_info(portid_t port_id,struct rte_eth_rss_reta *reta_conf)
652 {
653 	uint8_t i,j;
654 	int ret;
655 
656 	if (port_id_is_invalid(port_id))
657 		return;
658 
659 	ret = rte_eth_dev_rss_reta_query(port_id, reta_conf);
660 	if (ret != 0) {
661 		printf("Failed to get RSS RETA info, return code = %d\n", ret);
662 		return;
663 	}
664 
665 	if (reta_conf->mask_lo != 0) {
666 		for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
667 			if (reta_conf->mask_lo & (uint64_t)(1ULL << i))
668 				printf("RSS RETA configuration: hash index=%d,"
669 					"queue=%d\n",i,reta_conf->reta[i]);
670 		}
671 	}
672 
673 	if (reta_conf->mask_hi != 0) {
674 		for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
675 			if(reta_conf->mask_hi & (uint64_t)(1ULL << i)) {
676 				j = (uint8_t)(i + ETH_RSS_RETA_NUM_ENTRIES/2);
677 				printf("RSS RETA configuration: hash index=%d,"
678 					"queue=%d\n",j,reta_conf->reta[j]);
679 			}
680 		}
681 	}
682 }
683 
684 /*
685  * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
686  * key of the port.
687  */
688 void
689 port_rss_hash_conf_show(portid_t port_id, int show_rss_key)
690 {
691 	struct rte_eth_rss_conf rss_conf;
692 	uint8_t rss_key[10 * 4];
693 	uint16_t rss_hf;
694 	uint8_t i;
695 	int diag;
696 
697 	if (port_id_is_invalid(port_id))
698 		return;
699 	/* Get RSS hash key if asked to display it */
700 	rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
701 	diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
702 	if (diag != 0) {
703 		switch (diag) {
704 		case -ENODEV:
705 			printf("port index %d invalid\n", port_id);
706 			break;
707 		case -ENOTSUP:
708 			printf("operation not supported by device\n");
709 			break;
710 		default:
711 			printf("operation failed - diag=%d\n", diag);
712 			break;
713 		}
714 		return;
715 	}
716 	rss_hf = rss_conf.rss_hf;
717 	if (rss_hf == 0) {
718 		printf("RSS disabled\n");
719 		return;
720 	}
721 	printf("RSS functions:\n ");
722 	if (rss_hf & ETH_RSS_IPV4)
723 		printf("ip4");
724 	if (rss_hf & ETH_RSS_IPV4_TCP)
725 		printf(" tcp4");
726 	if (rss_hf & ETH_RSS_IPV4_UDP)
727 		printf(" udp4");
728 	if (rss_hf & ETH_RSS_IPV6)
729 		printf(" ip6");
730 	if (rss_hf & ETH_RSS_IPV6_EX)
731 		printf(" ip6-ex");
732 	if (rss_hf & ETH_RSS_IPV6_TCP)
733 		printf(" tcp6");
734 	if (rss_hf & ETH_RSS_IPV6_TCP_EX)
735 		printf(" tcp6-ex");
736 	if (rss_hf & ETH_RSS_IPV6_UDP)
737 		printf(" udp6");
738 	if (rss_hf & ETH_RSS_IPV6_UDP_EX)
739 		printf(" udp6-ex");
740 	printf("\n");
741 	if (!show_rss_key)
742 		return;
743 	printf("RSS key:\n");
744 	for (i = 0; i < sizeof(rss_key); i++)
745 		printf("%02X", rss_key[i]);
746 	printf("\n");
747 }
748 
749 void
750 port_rss_hash_key_update(portid_t port_id, uint8_t *hash_key)
751 {
752 	struct rte_eth_rss_conf rss_conf;
753 	int diag;
754 
755 	rss_conf.rss_key = NULL;
756 	diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
757 	if (diag == 0) {
758 		rss_conf.rss_key = hash_key;
759 		diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
760 	}
761 	if (diag == 0)
762 		return;
763 
764 	switch (diag) {
765 	case -ENODEV:
766 		printf("port index %d invalid\n", port_id);
767 		break;
768 	case -ENOTSUP:
769 		printf("operation not supported by device\n");
770 		break;
771 	default:
772 		printf("operation failed - diag=%d\n", diag);
773 		break;
774 	}
775 }
776 
777 /*
778  * Setup forwarding configuration for each logical core.
779  */
780 static void
781 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
782 {
783 	streamid_t nb_fs_per_lcore;
784 	streamid_t nb_fs;
785 	streamid_t sm_id;
786 	lcoreid_t  nb_extra;
787 	lcoreid_t  nb_fc;
788 	lcoreid_t  nb_lc;
789 	lcoreid_t  lc_id;
790 
791 	nb_fs = cfg->nb_fwd_streams;
792 	nb_fc = cfg->nb_fwd_lcores;
793 	if (nb_fs <= nb_fc) {
794 		nb_fs_per_lcore = 1;
795 		nb_extra = 0;
796 	} else {
797 		nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
798 		nb_extra = (lcoreid_t) (nb_fs % nb_fc);
799 	}
800 
801 	nb_lc = (lcoreid_t) (nb_fc - nb_extra);
802 	sm_id = 0;
803 	for (lc_id = 0; lc_id < nb_lc; lc_id++) {
804 		fwd_lcores[lc_id]->stream_idx = sm_id;
805 		fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
806 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
807 	}
808 
809 	/*
810 	 * Assign extra remaining streams, if any.
811 	 */
812 	nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
813 	for (lc_id = 0; lc_id < nb_extra; lc_id++) {
814 		fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
815 		fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
816 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
817 	}
818 }
819 
820 static void
821 simple_fwd_config_setup(void)
822 {
823 	portid_t i;
824 	portid_t j;
825 	portid_t inc = 2;
826 
827 	if (port_topology == PORT_TOPOLOGY_CHAINED ||
828 	    port_topology == PORT_TOPOLOGY_LOOP) {
829 		inc = 1;
830 	} else if (nb_fwd_ports % 2) {
831 		printf("\nWarning! Cannot handle an odd number of ports "
832 		       "with the current port topology. Configuration "
833 		       "must be changed to have an even number of ports, "
834 		       "or relaunch application with "
835 		       "--port-topology=chained\n\n");
836 	}
837 
838 	cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
839 	cur_fwd_config.nb_fwd_streams =
840 		(streamid_t) cur_fwd_config.nb_fwd_ports;
841 
842 	/* reinitialize forwarding streams */
843 	init_fwd_streams();
844 
845 	/*
846 	 * In the simple forwarding test, the number of forwarding cores
847 	 * must be lower or equal to the number of forwarding ports.
848 	 */
849 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
850 	if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
851 		cur_fwd_config.nb_fwd_lcores =
852 			(lcoreid_t) cur_fwd_config.nb_fwd_ports;
853 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
854 
855 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) {
856 		if (port_topology != PORT_TOPOLOGY_LOOP)
857 			j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports);
858 		else
859 			j = i;
860 		fwd_streams[i]->rx_port   = fwd_ports_ids[i];
861 		fwd_streams[i]->rx_queue  = 0;
862 		fwd_streams[i]->tx_port   = fwd_ports_ids[j];
863 		fwd_streams[i]->tx_queue  = 0;
864 		fwd_streams[i]->peer_addr = j;
865 
866 		if (port_topology == PORT_TOPOLOGY_PAIRED) {
867 			fwd_streams[j]->rx_port   = fwd_ports_ids[j];
868 			fwd_streams[j]->rx_queue  = 0;
869 			fwd_streams[j]->tx_port   = fwd_ports_ids[i];
870 			fwd_streams[j]->tx_queue  = 0;
871 			fwd_streams[j]->peer_addr = i;
872 		}
873 	}
874 }
875 
876 /**
877  * For the RSS forwarding test, each core is assigned on every port a transmit
878  * queue whose index is the index of the core itself. This approach limits the
879  * maximumm number of processing cores of the RSS test to the maximum number of
880  * TX queues supported by the devices.
881  *
882  * Each core is assigned a single stream, each stream being composed of
883  * a RX queue to poll on a RX port for input messages, associated with
884  * a TX queue of a TX port where to send forwarded packets.
885  * All packets received on the RX queue of index "RxQj" of the RX port "RxPi"
886  * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two
887  * following rules:
888  *    - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
889  *    - TxQl = RxQj
890  */
891 static void
892 rss_fwd_config_setup(void)
893 {
894 	portid_t   rxp;
895 	portid_t   txp;
896 	queueid_t  rxq;
897 	queueid_t  nb_q;
898 	lcoreid_t  lc_id;
899 
900 	nb_q = nb_rxq;
901 	if (nb_q > nb_txq)
902 		nb_q = nb_txq;
903 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
904 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
905 	cur_fwd_config.nb_fwd_streams =
906 		(streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
907 	if (cur_fwd_config.nb_fwd_streams > cur_fwd_config.nb_fwd_lcores)
908 		cur_fwd_config.nb_fwd_streams =
909 			(streamid_t)cur_fwd_config.nb_fwd_lcores;
910 	else
911 		cur_fwd_config.nb_fwd_lcores =
912 			(lcoreid_t)cur_fwd_config.nb_fwd_streams;
913 
914 	/* reinitialize forwarding streams */
915 	init_fwd_streams();
916 
917 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
918 	rxp = 0; rxq = 0;
919 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
920 		struct fwd_stream *fs;
921 
922 		fs = fwd_streams[lc_id];
923 
924 		if ((rxp & 0x1) == 0)
925 			txp = (portid_t) (rxp + 1);
926 		else
927 			txp = (portid_t) (rxp - 1);
928 		/*
929 		 * if we are in loopback, simply send stuff out through the
930 		 * ingress port
931 		 */
932 		if (port_topology == PORT_TOPOLOGY_LOOP)
933 			txp = rxp;
934 
935 		fs->rx_port = fwd_ports_ids[rxp];
936 		fs->rx_queue = rxq;
937 		fs->tx_port = fwd_ports_ids[txp];
938 		fs->tx_queue = rxq;
939 		fs->peer_addr = fs->tx_port;
940 		rxq = (queueid_t) (rxq + 1);
941 		if (rxq < nb_q)
942 			continue;
943 		/*
944 		 * rxq == nb_q
945 		 * Restart from RX queue 0 on next RX port
946 		 */
947 		rxq = 0;
948 		if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
949 			rxp = (portid_t)
950 				(rxp + ((nb_ports >> 1) / nb_fwd_ports));
951 		else
952 			rxp = (portid_t) (rxp + 1);
953 	}
954 }
955 
956 /*
957  * In DCB and VT on,the mapping of 128 receive queues to 128 transmit queues.
958  */
959 static void
960 dcb_rxq_2_txq_mapping(queueid_t rxq, queueid_t *txq)
961 {
962 	if(dcb_q_mapping == DCB_4_TCS_Q_MAPPING) {
963 
964 		if (rxq < 32)
965 			/* tc0: 0-31 */
966 			*txq = rxq;
967 		else if (rxq < 64) {
968 			/* tc1: 64-95 */
969 			*txq =  (uint16_t)(rxq + 32);
970 		}
971 		else {
972 			/* tc2: 96-111;tc3:112-127 */
973 			*txq =  (uint16_t)(rxq/2 + 64);
974 		}
975 	}
976 	else {
977 		if (rxq < 16)
978 			/* tc0 mapping*/
979 			*txq = rxq;
980 		else if (rxq < 32) {
981 			/* tc1 mapping*/
982 			 *txq = (uint16_t)(rxq + 16);
983 		}
984 		else if (rxq < 64) {
985 			/*tc2,tc3 mapping */
986 			*txq =  (uint16_t)(rxq + 32);
987 		}
988 		else {
989 			/* tc4,tc5,tc6 and tc7 mapping */
990 			*txq =  (uint16_t)(rxq/2 + 64);
991 		}
992 	}
993 }
994 
995 /**
996  * For the DCB forwarding test, each core is assigned on every port multi-transmit
997  * queue.
998  *
999  * Each core is assigned a multi-stream, each stream being composed of
1000  * a RX queue to poll on a RX port for input messages, associated with
1001  * a TX queue of a TX port where to send forwarded packets.
1002  * All packets received on the RX queue of index "RxQj" of the RX port "RxPi"
1003  * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two
1004  * following rules:
1005  * In VT mode,
1006  *    - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
1007  *    - TxQl = RxQj
1008  * In non-VT mode,
1009  *    - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
1010  *    There is a mapping of RxQj to TxQl to be required,and the mapping was implemented
1011  *    in dcb_rxq_2_txq_mapping function.
1012  */
1013 static void
1014 dcb_fwd_config_setup(void)
1015 {
1016 	portid_t   rxp;
1017 	portid_t   txp;
1018 	queueid_t  rxq;
1019 	queueid_t  nb_q;
1020 	lcoreid_t  lc_id;
1021 	uint16_t sm_id;
1022 
1023 	nb_q = nb_rxq;
1024 
1025 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1026 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1027 	cur_fwd_config.nb_fwd_streams =
1028 		(streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
1029 
1030 	/* reinitialize forwarding streams */
1031 	init_fwd_streams();
1032 
1033 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
1034 	rxp = 0; rxq = 0;
1035 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
1036 		/* a fwd core can run multi-streams */
1037 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++)
1038 		{
1039 			struct fwd_stream *fs;
1040 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
1041 			if ((rxp & 0x1) == 0)
1042 				txp = (portid_t) (rxp + 1);
1043 			else
1044 				txp = (portid_t) (rxp - 1);
1045 			fs->rx_port = fwd_ports_ids[rxp];
1046 			fs->rx_queue = rxq;
1047 			fs->tx_port = fwd_ports_ids[txp];
1048 			if (dcb_q_mapping == DCB_VT_Q_MAPPING)
1049 				fs->tx_queue = rxq;
1050 			else
1051 				dcb_rxq_2_txq_mapping(rxq, &fs->tx_queue);
1052 			fs->peer_addr = fs->tx_port;
1053 			rxq = (queueid_t) (rxq + 1);
1054 			if (rxq < nb_q)
1055 				continue;
1056 			rxq = 0;
1057 			if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
1058 				rxp = (portid_t)
1059 					(rxp + ((nb_ports >> 1) / nb_fwd_ports));
1060 			else
1061 				rxp = (portid_t) (rxp + 1);
1062 		}
1063 	}
1064 }
1065 
1066 static void
1067 icmp_echo_config_setup(void)
1068 {
1069 	portid_t  rxp;
1070 	queueid_t rxq;
1071 	lcoreid_t lc_id;
1072 	uint16_t  sm_id;
1073 
1074 	if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
1075 		cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
1076 			(nb_txq * nb_fwd_ports);
1077 	else
1078 		cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1079 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1080 	cur_fwd_config.nb_fwd_streams =
1081 		(streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
1082 	if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
1083 		cur_fwd_config.nb_fwd_lcores =
1084 			(lcoreid_t)cur_fwd_config.nb_fwd_streams;
1085 	if (verbose_level > 0) {
1086 		printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
1087 		       __FUNCTION__,
1088 		       cur_fwd_config.nb_fwd_lcores,
1089 		       cur_fwd_config.nb_fwd_ports,
1090 		       cur_fwd_config.nb_fwd_streams);
1091 	}
1092 
1093 	/* reinitialize forwarding streams */
1094 	init_fwd_streams();
1095 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
1096 	rxp = 0; rxq = 0;
1097 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
1098 		if (verbose_level > 0)
1099 			printf("  core=%d: \n", lc_id);
1100 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
1101 			struct fwd_stream *fs;
1102 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
1103 			fs->rx_port = fwd_ports_ids[rxp];
1104 			fs->rx_queue = rxq;
1105 			fs->tx_port = fs->rx_port;
1106 			fs->tx_queue = lc_id;
1107 			fs->peer_addr = fs->tx_port;
1108 			if (verbose_level > 0)
1109 				printf("  stream=%d port=%d rxq=%d txq=%d\n",
1110 				       sm_id, fs->rx_port, fs->rx_queue,
1111 				       fs->tx_queue);
1112 			rxq = (queueid_t) (rxq + 1);
1113 			if (rxq == nb_rxq) {
1114 				rxq = 0;
1115 				rxp = (portid_t) (rxp + 1);
1116 			}
1117 		}
1118 	}
1119 }
1120 
1121 void
1122 fwd_config_setup(void)
1123 {
1124 	cur_fwd_config.fwd_eng = cur_fwd_eng;
1125 	if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
1126 		icmp_echo_config_setup();
1127 		return;
1128 	}
1129 	if ((nb_rxq > 1) && (nb_txq > 1)){
1130 		if (dcb_config)
1131 			dcb_fwd_config_setup();
1132 		else
1133 			rss_fwd_config_setup();
1134 	}
1135 	else
1136 		simple_fwd_config_setup();
1137 }
1138 
1139 static void
1140 pkt_fwd_config_display(struct fwd_config *cfg)
1141 {
1142 	struct fwd_stream *fs;
1143 	lcoreid_t  lc_id;
1144 	streamid_t sm_id;
1145 
1146 	printf("%s packet forwarding - ports=%d - cores=%d - streams=%d - "
1147 		"NUMA support %s, MP over anonymous pages %s\n",
1148 		cfg->fwd_eng->fwd_mode_name,
1149 		cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
1150 		numa_support == 1 ? "enabled" : "disabled",
1151 		mp_anon != 0 ? "enabled" : "disabled");
1152 
1153 	if (strcmp(cfg->fwd_eng->fwd_mode_name, "mac_retry") == 0)
1154 		printf("TX retry num: %u, delay between TX retries: %uus\n",
1155 			burst_tx_retry_num, burst_tx_delay_time);
1156 	for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
1157 		printf("Logical Core %u (socket %u) forwards packets on "
1158 		       "%d streams:",
1159 		       fwd_lcores_cpuids[lc_id],
1160 		       rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
1161 		       fwd_lcores[lc_id]->stream_nb);
1162 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
1163 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
1164 			printf("\n  RX P=%d/Q=%d (socket %u) -> TX "
1165 			       "P=%d/Q=%d (socket %u) ",
1166 			       fs->rx_port, fs->rx_queue,
1167 			       ports[fs->rx_port].socket_id,
1168 			       fs->tx_port, fs->tx_queue,
1169 			       ports[fs->tx_port].socket_id);
1170 			print_ethaddr("peer=",
1171 				      &peer_eth_addrs[fs->peer_addr]);
1172 		}
1173 		printf("\n");
1174 	}
1175 	printf("\n");
1176 }
1177 
1178 
1179 void
1180 fwd_config_display(void)
1181 {
1182 	if((dcb_config) && (nb_fwd_lcores == 1)) {
1183 		printf("In DCB mode,the nb forwarding cores should be larger than 1\n");
1184 		return;
1185 	}
1186 	fwd_config_setup();
1187 	pkt_fwd_config_display(&cur_fwd_config);
1188 }
1189 
1190 int
1191 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
1192 {
1193 	unsigned int i;
1194 	unsigned int lcore_cpuid;
1195 	int record_now;
1196 
1197 	record_now = 0;
1198  again:
1199 	for (i = 0; i < nb_lc; i++) {
1200 		lcore_cpuid = lcorelist[i];
1201 		if (! rte_lcore_is_enabled(lcore_cpuid)) {
1202 			printf("lcore %u not enabled\n", lcore_cpuid);
1203 			return -1;
1204 		}
1205 		if (lcore_cpuid == rte_get_master_lcore()) {
1206 			printf("lcore %u cannot be masked on for running "
1207 			       "packet forwarding, which is the master lcore "
1208 			       "and reserved for command line parsing only\n",
1209 			       lcore_cpuid);
1210 			return -1;
1211 		}
1212 		if (record_now)
1213 			fwd_lcores_cpuids[i] = lcore_cpuid;
1214 	}
1215 	if (record_now == 0) {
1216 		record_now = 1;
1217 		goto again;
1218 	}
1219 	nb_cfg_lcores = (lcoreid_t) nb_lc;
1220 	if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
1221 		printf("previous number of forwarding cores %u - changed to "
1222 		       "number of configured cores %u\n",
1223 		       (unsigned int) nb_fwd_lcores, nb_lc);
1224 		nb_fwd_lcores = (lcoreid_t) nb_lc;
1225 	}
1226 
1227 	return 0;
1228 }
1229 
1230 int
1231 set_fwd_lcores_mask(uint64_t lcoremask)
1232 {
1233 	unsigned int lcorelist[64];
1234 	unsigned int nb_lc;
1235 	unsigned int i;
1236 
1237 	if (lcoremask == 0) {
1238 		printf("Invalid NULL mask of cores\n");
1239 		return -1;
1240 	}
1241 	nb_lc = 0;
1242 	for (i = 0; i < 64; i++) {
1243 		if (! ((uint64_t)(1ULL << i) & lcoremask))
1244 			continue;
1245 		lcorelist[nb_lc++] = i;
1246 	}
1247 	return set_fwd_lcores_list(lcorelist, nb_lc);
1248 }
1249 
1250 void
1251 set_fwd_lcores_number(uint16_t nb_lc)
1252 {
1253 	if (nb_lc > nb_cfg_lcores) {
1254 		printf("nb fwd cores %u > %u (max. number of configured "
1255 		       "lcores) - ignored\n",
1256 		       (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
1257 		return;
1258 	}
1259 	nb_fwd_lcores = (lcoreid_t) nb_lc;
1260 	printf("Number of forwarding cores set to %u\n",
1261 	       (unsigned int) nb_fwd_lcores);
1262 }
1263 
1264 void
1265 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
1266 {
1267 	unsigned int i;
1268 	portid_t port_id;
1269 	int record_now;
1270 
1271 	record_now = 0;
1272  again:
1273 	for (i = 0; i < nb_pt; i++) {
1274 		port_id = (portid_t) portlist[i];
1275 		if (port_id >= nb_ports) {
1276 			printf("Invalid port id %u >= %u\n",
1277 			       (unsigned int) port_id,
1278 			       (unsigned int) nb_ports);
1279 			return;
1280 		}
1281 		if (record_now)
1282 			fwd_ports_ids[i] = port_id;
1283 	}
1284 	if (record_now == 0) {
1285 		record_now = 1;
1286 		goto again;
1287 	}
1288 	nb_cfg_ports = (portid_t) nb_pt;
1289 	if (nb_fwd_ports != (portid_t) nb_pt) {
1290 		printf("previous number of forwarding ports %u - changed to "
1291 		       "number of configured ports %u\n",
1292 		       (unsigned int) nb_fwd_ports, nb_pt);
1293 		nb_fwd_ports = (portid_t) nb_pt;
1294 	}
1295 }
1296 
1297 void
1298 set_fwd_ports_mask(uint64_t portmask)
1299 {
1300 	unsigned int portlist[64];
1301 	unsigned int nb_pt;
1302 	unsigned int i;
1303 
1304 	if (portmask == 0) {
1305 		printf("Invalid NULL mask of ports\n");
1306 		return;
1307 	}
1308 	nb_pt = 0;
1309 	for (i = 0; i < 64; i++) {
1310 		if (! ((uint64_t)(1ULL << i) & portmask))
1311 			continue;
1312 		portlist[nb_pt++] = i;
1313 	}
1314 	set_fwd_ports_list(portlist, nb_pt);
1315 }
1316 
1317 void
1318 set_fwd_ports_number(uint16_t nb_pt)
1319 {
1320 	if (nb_pt > nb_cfg_ports) {
1321 		printf("nb fwd ports %u > %u (number of configured "
1322 		       "ports) - ignored\n",
1323 		       (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
1324 		return;
1325 	}
1326 	nb_fwd_ports = (portid_t) nb_pt;
1327 	printf("Number of forwarding ports set to %u\n",
1328 	       (unsigned int) nb_fwd_ports);
1329 }
1330 
1331 void
1332 set_nb_pkt_per_burst(uint16_t nb)
1333 {
1334 	if (nb > MAX_PKT_BURST) {
1335 		printf("nb pkt per burst: %u > %u (maximum packet per burst) "
1336 		       " ignored\n",
1337 		       (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
1338 		return;
1339 	}
1340 	nb_pkt_per_burst = nb;
1341 	printf("Number of packets per burst set to %u\n",
1342 	       (unsigned int) nb_pkt_per_burst);
1343 }
1344 
1345 void
1346 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
1347 {
1348 	uint16_t tx_pkt_len;
1349 	unsigned i;
1350 
1351 	if (nb_segs >= (unsigned) nb_txd) {
1352 		printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
1353 		       nb_segs, (unsigned int) nb_txd);
1354 		return;
1355 	}
1356 
1357 	/*
1358 	 * Check that each segment length is greater or equal than
1359 	 * the mbuf data sise.
1360 	 * Check also that the total packet length is greater or equal than the
1361 	 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8).
1362 	 */
1363 	tx_pkt_len = 0;
1364 	for (i = 0; i < nb_segs; i++) {
1365 		if (seg_lengths[i] > (unsigned) mbuf_data_size) {
1366 			printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
1367 			       i, seg_lengths[i], (unsigned) mbuf_data_size);
1368 			return;
1369 		}
1370 		tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
1371 	}
1372 	if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) {
1373 		printf("total packet length=%u < %d - give up\n",
1374 				(unsigned) tx_pkt_len,
1375 				(int)(sizeof(struct ether_hdr) + 20 + 8));
1376 		return;
1377 	}
1378 
1379 	for (i = 0; i < nb_segs; i++)
1380 		tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
1381 
1382 	tx_pkt_length  = tx_pkt_len;
1383 	tx_pkt_nb_segs = (uint8_t) nb_segs;
1384 }
1385 
1386 char*
1387 list_pkt_forwarding_modes(void)
1388 {
1389 	static char fwd_modes[128] = "";
1390 	const char *separator = "|";
1391 	struct fwd_engine *fwd_eng;
1392 	unsigned i = 0;
1393 
1394 	if (strlen (fwd_modes) == 0) {
1395 		while ((fwd_eng = fwd_engines[i++]) != NULL) {
1396 			strcat(fwd_modes, fwd_eng->fwd_mode_name);
1397 			strcat(fwd_modes, separator);
1398 		}
1399 		fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
1400 	}
1401 
1402 	return fwd_modes;
1403 }
1404 
1405 void
1406 set_pkt_forwarding_mode(const char *fwd_mode_name)
1407 {
1408 	struct fwd_engine *fwd_eng;
1409 	unsigned i;
1410 
1411 	i = 0;
1412 	while ((fwd_eng = fwd_engines[i]) != NULL) {
1413 		if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
1414 			printf("Set %s packet forwarding mode\n",
1415 			       fwd_mode_name);
1416 			cur_fwd_eng = fwd_eng;
1417 			return;
1418 		}
1419 		i++;
1420 	}
1421 	printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
1422 }
1423 
1424 void
1425 set_verbose_level(uint16_t vb_level)
1426 {
1427 	printf("Change verbose level from %u to %u\n",
1428 	       (unsigned int) verbose_level, (unsigned int) vb_level);
1429 	verbose_level = vb_level;
1430 }
1431 
1432 void
1433 vlan_extend_set(portid_t port_id, int on)
1434 {
1435 	int diag;
1436 	int vlan_offload;
1437 
1438 	if (port_id_is_invalid(port_id))
1439 		return;
1440 
1441 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1442 
1443 	if (on)
1444 		vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
1445 	else
1446 		vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
1447 
1448 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1449 	if (diag < 0)
1450 		printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
1451 	       "diag=%d\n", port_id, on, diag);
1452 }
1453 
1454 void
1455 rx_vlan_strip_set(portid_t port_id, int on)
1456 {
1457 	int diag;
1458 	int vlan_offload;
1459 
1460 	if (port_id_is_invalid(port_id))
1461 		return;
1462 
1463 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1464 
1465 	if (on)
1466 		vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
1467 	else
1468 		vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
1469 
1470 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1471 	if (diag < 0)
1472 		printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
1473 	       "diag=%d\n", port_id, on, diag);
1474 }
1475 
1476 void
1477 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
1478 {
1479 	int diag;
1480 
1481 	if (port_id_is_invalid(port_id))
1482 		return;
1483 
1484 	diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
1485 	if (diag < 0)
1486 		printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
1487 	       "diag=%d\n", port_id, queue_id, on, diag);
1488 }
1489 
1490 void
1491 rx_vlan_filter_set(portid_t port_id, int on)
1492 {
1493 	int diag;
1494 	int vlan_offload;
1495 
1496 	if (port_id_is_invalid(port_id))
1497 		return;
1498 
1499 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1500 
1501 	if (on)
1502 		vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
1503 	else
1504 		vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
1505 
1506 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1507 	if (diag < 0)
1508 		printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
1509 	       "diag=%d\n", port_id, on, diag);
1510 }
1511 
1512 void
1513 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
1514 {
1515 	int diag;
1516 
1517 	if (port_id_is_invalid(port_id))
1518 		return;
1519 	if (vlan_id_is_invalid(vlan_id))
1520 		return;
1521 	diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
1522 	if (diag == 0)
1523 		return;
1524 	printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
1525 	       "diag=%d\n",
1526 	       port_id, vlan_id, on, diag);
1527 }
1528 
1529 void
1530 rx_vlan_all_filter_set(portid_t port_id, int on)
1531 {
1532 	uint16_t vlan_id;
1533 
1534 	if (port_id_is_invalid(port_id))
1535 		return;
1536 	for (vlan_id = 0; vlan_id < 4096; vlan_id++)
1537 		rx_vft_set(port_id, vlan_id, on);
1538 }
1539 
1540 void
1541 vlan_tpid_set(portid_t port_id, uint16_t tp_id)
1542 {
1543 	int diag;
1544 	if (port_id_is_invalid(port_id))
1545 		return;
1546 
1547 	diag = rte_eth_dev_set_vlan_ether_type(port_id, tp_id);
1548 	if (diag == 0)
1549 		return;
1550 
1551 	printf("tx_vlan_tpid_set(port_pi=%d, tpid=%d) failed "
1552 	       "diag=%d\n",
1553 	       port_id, tp_id, diag);
1554 }
1555 
1556 void
1557 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
1558 {
1559 	if (port_id_is_invalid(port_id))
1560 		return;
1561 	if (vlan_id_is_invalid(vlan_id))
1562 		return;
1563 	ports[port_id].tx_ol_flags |= PKT_TX_VLAN_PKT;
1564 	ports[port_id].tx_vlan_id = vlan_id;
1565 }
1566 
1567 void
1568 tx_vlan_reset(portid_t port_id)
1569 {
1570 	if (port_id_is_invalid(port_id))
1571 		return;
1572 	ports[port_id].tx_ol_flags &= ~PKT_TX_VLAN_PKT;
1573 }
1574 
1575 void
1576 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
1577 {
1578 	uint16_t i;
1579 	uint8_t existing_mapping_found = 0;
1580 
1581 	if (port_id_is_invalid(port_id))
1582 		return;
1583 
1584 	if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
1585 		return;
1586 
1587 	if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1588 		printf("map_value not in required range 0..%d\n",
1589 				RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
1590 		return;
1591 	}
1592 
1593 	if (!is_rx) { /*then tx*/
1594 		for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1595 			if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1596 			    (tx_queue_stats_mappings[i].queue_id == queue_id)) {
1597 				tx_queue_stats_mappings[i].stats_counter_id = map_value;
1598 				existing_mapping_found = 1;
1599 				break;
1600 			}
1601 		}
1602 		if (!existing_mapping_found) { /* A new additional mapping... */
1603 			tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
1604 			tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
1605 			tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
1606 			nb_tx_queue_stats_mappings++;
1607 		}
1608 	}
1609 	else { /*rx*/
1610 		for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1611 			if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1612 			    (rx_queue_stats_mappings[i].queue_id == queue_id)) {
1613 				rx_queue_stats_mappings[i].stats_counter_id = map_value;
1614 				existing_mapping_found = 1;
1615 				break;
1616 			}
1617 		}
1618 		if (!existing_mapping_found) { /* A new additional mapping... */
1619 			rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
1620 			rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
1621 			rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
1622 			nb_rx_queue_stats_mappings++;
1623 		}
1624 	}
1625 }
1626 
1627 void
1628 tx_cksum_set(portid_t port_id, uint8_t cksum_mask)
1629 {
1630 	uint16_t tx_ol_flags;
1631 	if (port_id_is_invalid(port_id))
1632 		return;
1633 	/* Clear last 4 bits and then set L3/4 checksum mask again */
1634 	tx_ol_flags = (uint16_t) (ports[port_id].tx_ol_flags & 0xFFF0);
1635 	ports[port_id].tx_ol_flags = (uint16_t) ((cksum_mask & 0xf) | tx_ol_flags);
1636 }
1637 
1638 void
1639 fdir_add_signature_filter(portid_t port_id, uint8_t queue_id,
1640 			  struct rte_fdir_filter *fdir_filter)
1641 {
1642 	int diag;
1643 
1644 	if (port_id_is_invalid(port_id))
1645 		return;
1646 
1647 	diag = rte_eth_dev_fdir_add_signature_filter(port_id, fdir_filter,
1648 						     queue_id);
1649 	if (diag == 0)
1650 		return;
1651 
1652 	printf("rte_eth_dev_fdir_add_signature_filter for port_id=%d failed "
1653 	       "diag=%d\n", port_id, diag);
1654 }
1655 
1656 void
1657 fdir_update_signature_filter(portid_t port_id, uint8_t queue_id,
1658 			     struct rte_fdir_filter *fdir_filter)
1659 {
1660 	int diag;
1661 
1662 	if (port_id_is_invalid(port_id))
1663 		return;
1664 
1665 	diag = rte_eth_dev_fdir_update_signature_filter(port_id, fdir_filter,
1666 							queue_id);
1667 	if (diag == 0)
1668 		return;
1669 
1670 	printf("rte_eth_dev_fdir_update_signature_filter for port_id=%d failed "
1671 	       "diag=%d\n", port_id, diag);
1672 }
1673 
1674 void
1675 fdir_remove_signature_filter(portid_t port_id,
1676 			     struct rte_fdir_filter *fdir_filter)
1677 {
1678 	int diag;
1679 
1680 	if (port_id_is_invalid(port_id))
1681 		return;
1682 
1683 	diag = rte_eth_dev_fdir_remove_signature_filter(port_id, fdir_filter);
1684 	if (diag == 0)
1685 		return;
1686 
1687 	printf("rte_eth_dev_fdir_add_signature_filter for port_id=%d failed "
1688 	       "diag=%d\n", port_id, diag);
1689 
1690 }
1691 
1692 void
1693 fdir_get_infos(portid_t port_id)
1694 {
1695 	struct rte_eth_fdir fdir_infos;
1696 
1697 	static const char *fdir_stats_border = "########################";
1698 
1699 	if (port_id_is_invalid(port_id))
1700 		return;
1701 
1702 	rte_eth_dev_fdir_get_infos(port_id, &fdir_infos);
1703 
1704 	printf("\n  %s FDIR infos for port %-2d     %s\n",
1705 	       fdir_stats_border, port_id, fdir_stats_border);
1706 
1707 	printf("  collision: %-10"PRIu64"  free:     %"PRIu64"\n"
1708 	       "  maxhash:   %-10"PRIu64"  maxlen:   %"PRIu64"\n"
1709 	       "  add:       %-10"PRIu64"  remove:   %"PRIu64"\n"
1710 	       "  f_add:     %-10"PRIu64"  f_remove: %"PRIu64"\n",
1711 	       (uint64_t)(fdir_infos.collision), (uint64_t)(fdir_infos.free),
1712 	       (uint64_t)(fdir_infos.maxhash), (uint64_t)(fdir_infos.maxlen),
1713 	       fdir_infos.add, fdir_infos.remove,
1714 	       fdir_infos.f_add, fdir_infos.f_remove);
1715 	printf("  %s############################%s\n",
1716 	       fdir_stats_border, fdir_stats_border);
1717 }
1718 
1719 void
1720 fdir_add_perfect_filter(portid_t port_id, uint16_t soft_id, uint8_t queue_id,
1721 			uint8_t drop, struct rte_fdir_filter *fdir_filter)
1722 {
1723 	int diag;
1724 
1725 	if (port_id_is_invalid(port_id))
1726 		return;
1727 
1728 	diag = rte_eth_dev_fdir_add_perfect_filter(port_id, fdir_filter,
1729 						   soft_id, queue_id, drop);
1730 	if (diag == 0)
1731 		return;
1732 
1733 	printf("rte_eth_dev_fdir_add_perfect_filter for port_id=%d failed "
1734 	       "diag=%d\n", port_id, diag);
1735 }
1736 
1737 void
1738 fdir_update_perfect_filter(portid_t port_id, uint16_t soft_id, uint8_t queue_id,
1739 			   uint8_t drop, struct rte_fdir_filter *fdir_filter)
1740 {
1741 	int diag;
1742 
1743 	if (port_id_is_invalid(port_id))
1744 		return;
1745 
1746 	diag = rte_eth_dev_fdir_update_perfect_filter(port_id, fdir_filter,
1747 						      soft_id, queue_id, drop);
1748 	if (diag == 0)
1749 		return;
1750 
1751 	printf("rte_eth_dev_fdir_update_perfect_filter for port_id=%d failed "
1752 	       "diag=%d\n", port_id, diag);
1753 }
1754 
1755 void
1756 fdir_remove_perfect_filter(portid_t port_id, uint16_t soft_id,
1757 			   struct rte_fdir_filter *fdir_filter)
1758 {
1759 	int diag;
1760 
1761 	if (port_id_is_invalid(port_id))
1762 		return;
1763 
1764 	diag = rte_eth_dev_fdir_remove_perfect_filter(port_id, fdir_filter,
1765 						      soft_id);
1766 	if (diag == 0)
1767 		return;
1768 
1769 	printf("rte_eth_dev_fdir_update_perfect_filter for port_id=%d failed "
1770 	       "diag=%d\n", port_id, diag);
1771 }
1772 
1773 void
1774 fdir_set_masks(portid_t port_id, struct rte_fdir_masks *fdir_masks)
1775 {
1776 	int diag;
1777 
1778 	if (port_id_is_invalid(port_id))
1779 		return;
1780 
1781 	diag = rte_eth_dev_fdir_set_masks(port_id, fdir_masks);
1782 	if (diag == 0)
1783 		return;
1784 
1785 	printf("rte_eth_dev_set_masks_filter for port_id=%d failed "
1786 	       "diag=%d\n", port_id, diag);
1787 }
1788 
1789 void
1790 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
1791 {
1792 	int diag;
1793 
1794 	if (port_id_is_invalid(port_id))
1795 		return;
1796 	if (is_rx)
1797 		diag = rte_eth_dev_set_vf_rx(port_id,vf,on);
1798 	else
1799 		diag = rte_eth_dev_set_vf_tx(port_id,vf,on);
1800 	if (diag == 0)
1801 		return;
1802 	if(is_rx)
1803 		printf("rte_eth_dev_set_vf_rx for port_id=%d failed "
1804 	       		"diag=%d\n", port_id, diag);
1805 	else
1806 		printf("rte_eth_dev_set_vf_tx for port_id=%d failed "
1807 	       		"diag=%d\n", port_id, diag);
1808 
1809 }
1810 
1811 void
1812 set_vf_rx_vlan(portid_t port_id, uint16_t vlan_id, uint64_t vf_mask, uint8_t on)
1813 {
1814 	int diag;
1815 
1816 	if (port_id_is_invalid(port_id))
1817 		return;
1818 	if (vlan_id_is_invalid(vlan_id))
1819 		return;
1820 	diag = rte_eth_dev_set_vf_vlan_filter(port_id, vlan_id, vf_mask, on);
1821 	if (diag == 0)
1822 		return;
1823 	printf("rte_eth_dev_set_vf_vlan_filter for port_id=%d failed "
1824 	       "diag=%d\n", port_id, diag);
1825 }
1826 
1827 int
1828 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
1829 {
1830 	int diag;
1831 	struct rte_eth_link link;
1832 
1833 	if (port_id_is_invalid(port_id))
1834 		return 1;
1835 	rte_eth_link_get_nowait(port_id, &link);
1836 	if (rate > link.link_speed) {
1837 		printf("Invalid rate value:%u bigger than link speed: %u\n",
1838 			rate, link.link_speed);
1839 		return 1;
1840 	}
1841 	diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
1842 	if (diag == 0)
1843 		return diag;
1844 	printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
1845 		port_id, diag);
1846 	return diag;
1847 }
1848 
1849 int
1850 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
1851 {
1852 	int diag;
1853 	struct rte_eth_link link;
1854 
1855 	if (q_msk == 0)
1856 		return 0;
1857 
1858 	if (port_id_is_invalid(port_id))
1859 		return 1;
1860 	rte_eth_link_get_nowait(port_id, &link);
1861 	if (rate > link.link_speed) {
1862 		printf("Invalid rate value:%u bigger than link speed: %u\n",
1863 			rate, link.link_speed);
1864 		return 1;
1865 	}
1866 	diag = rte_eth_set_vf_rate_limit(port_id, vf, rate, q_msk);
1867 	if (diag == 0)
1868 		return diag;
1869 	printf("rte_eth_set_vf_rate_limit for port_id=%d failed diag=%d\n",
1870 		port_id, diag);
1871 	return diag;
1872 }
1873 
1874