xref: /dpdk/app/test-pmd/config.c (revision 5706de65334e11bae4847e9ace5e58b71d743f42)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 /*   BSD LICENSE
34  *
35  *   Copyright(c) 2013 6WIND.
36  *
37  *   Redistribution and use in source and binary forms, with or without
38  *   modification, are permitted provided that the following conditions
39  *   are met:
40  *
41  *     * Redistributions of source code must retain the above copyright
42  *       notice, this list of conditions and the following disclaimer.
43  *     * Redistributions in binary form must reproduce the above copyright
44  *       notice, this list of conditions and the following disclaimer in
45  *       the documentation and/or other materials provided with the
46  *       distribution.
47  *     * Neither the name of 6WIND S.A. nor the names of its
48  *       contributors may be used to endorse or promote products derived
49  *       from this software without specific prior written permission.
50  *
51  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62  */
63 
64 #include <stdarg.h>
65 #include <errno.h>
66 #include <stdio.h>
67 #include <string.h>
68 #include <stdarg.h>
69 #include <stdint.h>
70 #include <inttypes.h>
71 
72 #include <sys/queue.h>
73 
74 #include <rte_common.h>
75 #include <rte_byteorder.h>
76 #include <rte_debug.h>
77 #include <rte_log.h>
78 #include <rte_memory.h>
79 #include <rte_memcpy.h>
80 #include <rte_memzone.h>
81 #include <rte_launch.h>
82 #include <rte_tailq.h>
83 #include <rte_eal.h>
84 #include <rte_per_lcore.h>
85 #include <rte_lcore.h>
86 #include <rte_atomic.h>
87 #include <rte_branch_prediction.h>
88 #include <rte_ring.h>
89 #include <rte_mempool.h>
90 #include <rte_mbuf.h>
91 #include <rte_interrupts.h>
92 #include <rte_pci.h>
93 #include <rte_ether.h>
94 #include <rte_ethdev.h>
95 #include <rte_string_fns.h>
96 
97 #include "testpmd.h"
98 
99 static void
100 print_ethaddr(const char *name, struct ether_addr *eth_addr)
101 {
102 	printf("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
103 	       (unsigned int)eth_addr->addr_bytes[0],
104 	       (unsigned int)eth_addr->addr_bytes[1],
105 	       (unsigned int)eth_addr->addr_bytes[2],
106 	       (unsigned int)eth_addr->addr_bytes[3],
107 	       (unsigned int)eth_addr->addr_bytes[4],
108 	       (unsigned int)eth_addr->addr_bytes[5]);
109 }
110 
111 void
112 nic_stats_display(portid_t port_id)
113 {
114 	struct rte_eth_stats stats;
115 	struct rte_port *port = &ports[port_id];
116 	uint8_t i;
117 
118 	static const char *nic_stats_border = "########################";
119 
120 	if (port_id >= nb_ports) {
121 		printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
122 		return;
123 	}
124 	rte_eth_stats_get(port_id, &stats);
125 	printf("\n  %s NIC statistics for port %-2d %s\n",
126 	       nic_stats_border, port_id, nic_stats_border);
127 
128 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
129 		printf("  RX-packets: %-10"PRIu64" RX-errors: %-10"PRIu64"RX-bytes: "
130 		       "%-"PRIu64"\n"
131 		       "  TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64"TX-bytes: "
132 		       "%-"PRIu64"\n",
133 		       stats.ipackets, stats.ierrors, stats.ibytes,
134 		       stats.opackets, stats.oerrors, stats.obytes);
135 	}
136 	else {
137 		printf("  RX-packets:              %10"PRIu64"    RX-errors: %10"PRIu64
138 		       "    RX-bytes: %10"PRIu64"\n"
139 		       "  TX-packets:              %10"PRIu64"    TX-errors: %10"PRIu64
140 		       "    TX-bytes: %10"PRIu64"\n",
141 		       stats.ipackets, stats.ierrors, stats.ibytes,
142 		       stats.opackets, stats.oerrors, stats.obytes);
143 	}
144 
145 	/* stats fdir */
146 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
147 		printf("  Fdirmiss:   %-10"PRIu64" Fdirmatch: %-10"PRIu64"\n",
148 		       stats.fdirmiss,
149 		       stats.fdirmatch);
150 
151 	if (port->rx_queue_stats_mapping_enabled) {
152 		printf("\n");
153 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
154 			printf("  Stats reg %2d RX-packets: %10"PRIu64
155 			       "    RX-errors: %10"PRIu64
156 			       "    RX-bytes: %10"PRIu64"\n",
157 			       i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
158 		}
159 	}
160 	if (port->tx_queue_stats_mapping_enabled) {
161 		printf("\n");
162 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
163 			printf("  Stats reg %2d TX-packets: %10"PRIu64
164 			       "                             TX-bytes: %10"PRIu64"\n",
165 			       i, stats.q_opackets[i], stats.q_obytes[i]);
166 		}
167 	}
168 
169 	/* Display statistics of XON/XOFF pause frames, if any. */
170 	if ((stats.tx_pause_xon  | stats.rx_pause_xon |
171 	     stats.tx_pause_xoff | stats.rx_pause_xoff) > 0) {
172 		printf("  RX-XOFF:    %-10"PRIu64" RX-XON:    %-10"PRIu64"\n",
173 		       stats.rx_pause_xoff, stats.rx_pause_xon);
174 		printf("  TX-XOFF:    %-10"PRIu64" TX-XON:    %-10"PRIu64"\n",
175 		       stats.tx_pause_xoff, stats.tx_pause_xon);
176 	}
177 	printf("  %s############################%s\n",
178 	       nic_stats_border, nic_stats_border);
179 }
180 
181 void
182 nic_stats_clear(portid_t port_id)
183 {
184 	if (port_id >= nb_ports) {
185 		printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
186 		return;
187 	}
188 	rte_eth_stats_reset(port_id);
189 	printf("\n  NIC statistics for port %d cleared\n", port_id);
190 }
191 
192 
193 void
194 nic_stats_mapping_display(portid_t port_id)
195 {
196 	struct rte_port *port = &ports[port_id];
197 	uint16_t i;
198 
199 	static const char *nic_stats_mapping_border = "########################";
200 
201 	if (port_id >= nb_ports) {
202 		printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
203 		return;
204 	}
205 
206 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
207 		printf("Port id %d - either does not support queue statistic mapping or"
208 		       " no queue statistic mapping set\n", port_id);
209 		return;
210 	}
211 
212 	printf("\n  %s NIC statistics mapping for port %-2d %s\n",
213 	       nic_stats_mapping_border, port_id, nic_stats_mapping_border);
214 
215 	if (port->rx_queue_stats_mapping_enabled) {
216 		for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
217 			if (rx_queue_stats_mappings[i].port_id == port_id) {
218 				printf("  RX-queue %2d mapped to Stats Reg %2d\n",
219 				       rx_queue_stats_mappings[i].queue_id,
220 				       rx_queue_stats_mappings[i].stats_counter_id);
221 			}
222 		}
223 		printf("\n");
224 	}
225 
226 
227 	if (port->tx_queue_stats_mapping_enabled) {
228 		for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
229 			if (tx_queue_stats_mappings[i].port_id == port_id) {
230 				printf("  TX-queue %2d mapped to Stats Reg %2d\n",
231 				       tx_queue_stats_mappings[i].queue_id,
232 				       tx_queue_stats_mappings[i].stats_counter_id);
233 			}
234 		}
235 	}
236 
237 	printf("  %s####################################%s\n",
238 	       nic_stats_mapping_border, nic_stats_mapping_border);
239 }
240 
241 void
242 port_infos_display(portid_t port_id)
243 {
244 	struct rte_port *port;
245 	struct rte_eth_link link;
246 	int vlan_offload;
247 	struct rte_mempool * mp;
248 	static const char *info_border = "*********************";
249 
250 	if (port_id >= nb_ports) {
251 		printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
252 		return;
253 	}
254 	port = &ports[port_id];
255 	rte_eth_link_get_nowait(port_id, &link);
256 	printf("\n%s Infos for port %-2d %s\n",
257 	       info_border, port_id, info_border);
258 	print_ethaddr("MAC address: ", &port->eth_addr);
259 	printf("\nConnect to socket: %u", port->socket_id);
260 
261 	if (port_numa[port_id] != NUMA_NO_CONFIG) {
262 		mp = mbuf_pool_find(port_numa[port_id]);
263 		if (mp)
264 			printf("\nmemory allocation on the socket: %d",
265 							port_numa[port_id]);
266 	} else
267 		printf("\nmemory allocation on the socket: %u",port->socket_id);
268 
269 	printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
270 	printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
271 	printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
272 	       ("full-duplex") : ("half-duplex"));
273 	printf("Promiscuous mode: %s\n",
274 	       rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
275 	printf("Allmulticast mode: %s\n",
276 	       rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
277 	printf("Maximum number of MAC addresses: %u\n",
278 	       (unsigned int)(port->dev_info.max_mac_addrs));
279 	printf("Maximum number of MAC addresses of hash filtering: %u\n",
280 	       (unsigned int)(port->dev_info.max_hash_mac_addrs));
281 
282 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
283 	if (vlan_offload >= 0){
284 		printf("VLAN offload: \n");
285 		if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
286 			printf("  strip on \n");
287 		else
288 			printf("  strip off \n");
289 
290 		if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
291 			printf("  filter on \n");
292 		else
293 			printf("  filter off \n");
294 
295 		if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
296 			printf("  qinq(extend) on \n");
297 		else
298 			printf("  qinq(extend) off \n");
299 	}
300 }
301 
302 static int
303 port_id_is_invalid(portid_t port_id)
304 {
305 	if (port_id < nb_ports)
306 		return 0;
307 	printf("Invalid port %d (must be < nb_ports=%d)\n", port_id, nb_ports);
308 	return 1;
309 }
310 
311 static int
312 vlan_id_is_invalid(uint16_t vlan_id)
313 {
314 	if (vlan_id < 4096)
315 		return 0;
316 	printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
317 	return 1;
318 }
319 
320 static int
321 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
322 {
323 	uint64_t pci_len;
324 
325 	if (reg_off & 0x3) {
326 		printf("Port register offset 0x%X not aligned on a 4-byte "
327 		       "boundary\n",
328 		       (unsigned)reg_off);
329 		return 1;
330 	}
331 	pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len;
332 	if (reg_off >= pci_len) {
333 		printf("Port %d: register offset %u (0x%X) out of port PCI "
334 		       "resource (length=%"PRIu64")\n",
335 		       port_id, (unsigned)reg_off, (unsigned)reg_off,  pci_len);
336 		return 1;
337 	}
338 	return 0;
339 }
340 
341 static int
342 reg_bit_pos_is_invalid(uint8_t bit_pos)
343 {
344 	if (bit_pos <= 31)
345 		return 0;
346 	printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
347 	return 1;
348 }
349 
350 #define display_port_and_reg_off(port_id, reg_off) \
351 	printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
352 
353 static inline void
354 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
355 {
356 	display_port_and_reg_off(port_id, (unsigned)reg_off);
357 	printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
358 }
359 
360 void
361 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
362 {
363 	uint32_t reg_v;
364 
365 
366 	if (port_id_is_invalid(port_id))
367 		return;
368 	if (port_reg_off_is_invalid(port_id, reg_off))
369 		return;
370 	if (reg_bit_pos_is_invalid(bit_x))
371 		return;
372 	reg_v = port_id_pci_reg_read(port_id, reg_off);
373 	display_port_and_reg_off(port_id, (unsigned)reg_off);
374 	printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
375 }
376 
377 void
378 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
379 			   uint8_t bit1_pos, uint8_t bit2_pos)
380 {
381 	uint32_t reg_v;
382 	uint8_t  l_bit;
383 	uint8_t  h_bit;
384 
385 	if (port_id_is_invalid(port_id))
386 		return;
387 	if (port_reg_off_is_invalid(port_id, reg_off))
388 		return;
389 	if (reg_bit_pos_is_invalid(bit1_pos))
390 		return;
391 	if (reg_bit_pos_is_invalid(bit2_pos))
392 		return;
393 	if (bit1_pos > bit2_pos)
394 		l_bit = bit2_pos, h_bit = bit1_pos;
395 	else
396 		l_bit = bit1_pos, h_bit = bit2_pos;
397 
398 	reg_v = port_id_pci_reg_read(port_id, reg_off);
399 	reg_v >>= l_bit;
400 	if (h_bit < 31)
401 		reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
402 	display_port_and_reg_off(port_id, (unsigned)reg_off);
403 	printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
404 	       ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
405 }
406 
407 void
408 port_reg_display(portid_t port_id, uint32_t reg_off)
409 {
410 	uint32_t reg_v;
411 
412 	if (port_id_is_invalid(port_id))
413 		return;
414 	if (port_reg_off_is_invalid(port_id, reg_off))
415 		return;
416 	reg_v = port_id_pci_reg_read(port_id, reg_off);
417 	display_port_reg_value(port_id, reg_off, reg_v);
418 }
419 
420 void
421 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
422 		 uint8_t bit_v)
423 {
424 	uint32_t reg_v;
425 
426 	if (port_id_is_invalid(port_id))
427 		return;
428 	if (port_reg_off_is_invalid(port_id, reg_off))
429 		return;
430 	if (reg_bit_pos_is_invalid(bit_pos))
431 		return;
432 	if (bit_v > 1) {
433 		printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
434 		return;
435 	}
436 	reg_v = port_id_pci_reg_read(port_id, reg_off);
437 	if (bit_v == 0)
438 		reg_v &= ~(1 << bit_pos);
439 	else
440 		reg_v |= (1 << bit_pos);
441 	port_id_pci_reg_write(port_id, reg_off, reg_v);
442 	display_port_reg_value(port_id, reg_off, reg_v);
443 }
444 
445 void
446 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
447 		       uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
448 {
449 	uint32_t max_v;
450 	uint32_t reg_v;
451 	uint8_t  l_bit;
452 	uint8_t  h_bit;
453 
454 	if (port_id_is_invalid(port_id))
455 		return;
456 	if (port_reg_off_is_invalid(port_id, reg_off))
457 		return;
458 	if (reg_bit_pos_is_invalid(bit1_pos))
459 		return;
460 	if (reg_bit_pos_is_invalid(bit2_pos))
461 		return;
462 	if (bit1_pos > bit2_pos)
463 		l_bit = bit2_pos, h_bit = bit1_pos;
464 	else
465 		l_bit = bit1_pos, h_bit = bit2_pos;
466 
467 	if ((h_bit - l_bit) < 31)
468 		max_v = (1 << (h_bit - l_bit + 1)) - 1;
469 	else
470 		max_v = 0xFFFFFFFF;
471 
472 	if (value > max_v) {
473 		printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
474 				(unsigned)value, (unsigned)value,
475 				(unsigned)max_v, (unsigned)max_v);
476 		return;
477 	}
478 	reg_v = port_id_pci_reg_read(port_id, reg_off);
479 	reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
480 	reg_v |= (value << l_bit); /* Set changed bits */
481 	port_id_pci_reg_write(port_id, reg_off, reg_v);
482 	display_port_reg_value(port_id, reg_off, reg_v);
483 }
484 
485 void
486 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
487 {
488 	if (port_id_is_invalid(port_id))
489 		return;
490 	if (port_reg_off_is_invalid(port_id, reg_off))
491 		return;
492 	port_id_pci_reg_write(port_id, reg_off, reg_v);
493 	display_port_reg_value(port_id, reg_off, reg_v);
494 }
495 
496 /*
497  * RX/TX ring descriptors display functions.
498  */
499 static int
500 rx_queue_id_is_invalid(queueid_t rxq_id)
501 {
502 	if (rxq_id < nb_rxq)
503 		return 0;
504 	printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
505 	return 1;
506 }
507 
508 static int
509 tx_queue_id_is_invalid(queueid_t txq_id)
510 {
511 	if (txq_id < nb_txq)
512 		return 0;
513 	printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
514 	return 1;
515 }
516 
517 static int
518 rx_desc_id_is_invalid(uint16_t rxdesc_id)
519 {
520 	if (rxdesc_id < nb_rxd)
521 		return 0;
522 	printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
523 	       rxdesc_id, nb_rxd);
524 	return 1;
525 }
526 
527 static int
528 tx_desc_id_is_invalid(uint16_t txdesc_id)
529 {
530 	if (txdesc_id < nb_txd)
531 		return 0;
532 	printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
533 	       txdesc_id, nb_txd);
534 	return 1;
535 }
536 
537 static const struct rte_memzone *
538 ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id)
539 {
540 	char mz_name[RTE_MEMZONE_NAMESIZE];
541 	const struct rte_memzone *mz;
542 
543 	rte_snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d",
544 		 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id);
545 	mz = rte_memzone_lookup(mz_name);
546 	if (mz == NULL)
547 		printf("%s ring memory zoneof (port %d, queue %d) not"
548 		       "found (zone name = %s\n",
549 		       ring_name, port_id, q_id, mz_name);
550 	return (mz);
551 }
552 
553 union igb_ring_dword {
554 	uint64_t dword;
555 	struct {
556 		uint32_t hi;
557 		uint32_t lo;
558 	} words;
559 };
560 
561 struct igb_ring_desc {
562 	union igb_ring_dword lo_dword;
563 	union igb_ring_dword hi_dword;
564 };
565 
566 static void
567 ring_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
568 {
569 	struct igb_ring_desc *ring;
570 	struct igb_ring_desc rd;
571 
572 	ring = (struct igb_ring_desc *) ring_mz->addr;
573 	rd.lo_dword = rte_le_to_cpu_64(ring[desc_id].lo_dword);
574 	rd.hi_dword = rte_le_to_cpu_64(ring[desc_id].hi_dword);
575 	printf("    0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
576 		(unsigned)rd.lo_dword.words.lo, (unsigned)rd.lo_dword.words.hi,
577 		(unsigned)rd.hi_dword.words.lo, (unsigned)rd.hi_dword.words.hi);
578 }
579 
580 void
581 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
582 {
583 	const struct rte_memzone *rx_mz;
584 
585 	if (port_id_is_invalid(port_id))
586 		return;
587 	if (rx_queue_id_is_invalid(rxq_id))
588 		return;
589 	if (rx_desc_id_is_invalid(rxd_id))
590 		return;
591 	rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
592 	if (rx_mz == NULL)
593 		return;
594 	ring_descriptor_display(rx_mz, rxd_id);
595 }
596 
597 void
598 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
599 {
600 	const struct rte_memzone *tx_mz;
601 
602 	if (port_id_is_invalid(port_id))
603 		return;
604 	if (tx_queue_id_is_invalid(txq_id))
605 		return;
606 	if (tx_desc_id_is_invalid(txd_id))
607 		return;
608 	tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
609 	if (tx_mz == NULL)
610 		return;
611 	ring_descriptor_display(tx_mz, txd_id);
612 }
613 
614 void
615 fwd_lcores_config_display(void)
616 {
617 	lcoreid_t lc_id;
618 
619 	printf("List of forwarding lcores:");
620 	for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
621 		printf(" %2u", fwd_lcores_cpuids[lc_id]);
622 	printf("\n");
623 }
624 void
625 rxtx_config_display(void)
626 {
627 	printf("  %s packet forwarding - CRC stripping %s - "
628 	       "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name,
629 	       rx_mode.hw_strip_crc ? "enabled" : "disabled",
630 	       nb_pkt_per_burst);
631 
632 	if (cur_fwd_eng == &tx_only_engine)
633 		printf("  packet len=%u - nb packet segments=%d\n",
634 				(unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
635 
636 	printf("  nb forwarding cores=%d - nb forwarding ports=%d\n",
637 	       nb_fwd_lcores, nb_fwd_ports);
638 	printf("  RX queues=%d - RX desc=%d - RX free threshold=%d\n",
639 	       nb_rxq, nb_rxd, rx_free_thresh);
640 	printf("  RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
641 	       rx_thresh.pthresh, rx_thresh.hthresh, rx_thresh.wthresh);
642 	printf("  TX queues=%d - TX desc=%d - TX free threshold=%d\n",
643 	       nb_txq, nb_txd, tx_free_thresh);
644 	printf("  TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
645 	       tx_thresh.pthresh, tx_thresh.hthresh, tx_thresh.wthresh);
646 	printf("  TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n",
647 	       tx_rs_thresh, txq_flags);
648 }
649 
650 void
651 port_rss_reta_info(portid_t port_id,struct rte_eth_rss_reta *reta_conf)
652 {
653 	uint8_t i,j;
654 	int ret;
655 
656 	if (port_id_is_invalid(port_id))
657 		return;
658 
659 	ret = rte_eth_dev_rss_reta_query(port_id, reta_conf);
660 	if (ret != 0) {
661 		printf("Failed to get RSS RETA info, return code = %d\n", ret);
662 		return;
663 	}
664 
665 	if (reta_conf->mask_lo != 0) {
666 		for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
667 			if (reta_conf->mask_lo & (uint64_t)(1ULL << i))
668 				printf("RSS RETA configuration: hash index=%d,"
669 					"queue=%d\n",i,reta_conf->reta[i]);
670 		}
671 	}
672 
673 	if (reta_conf->mask_hi != 0) {
674 		for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
675 			if(reta_conf->mask_hi & (uint64_t)(1ULL << i)) {
676 				j = (uint8_t)(i + ETH_RSS_RETA_NUM_ENTRIES/2);
677 				printf("RSS RETA configuration: hash index=%d,"
678 					"queue=%d\n",j,reta_conf->reta[j]);
679 			}
680 		}
681 	}
682 }
683 
684 /*
685  * Setup forwarding configuration for each logical core.
686  */
687 static void
688 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
689 {
690 	streamid_t nb_fs_per_lcore;
691 	streamid_t nb_fs;
692 	streamid_t sm_id;
693 	lcoreid_t  nb_extra;
694 	lcoreid_t  nb_fc;
695 	lcoreid_t  nb_lc;
696 	lcoreid_t  lc_id;
697 
698 	nb_fs = cfg->nb_fwd_streams;
699 	nb_fc = cfg->nb_fwd_lcores;
700 	if (nb_fs <= nb_fc) {
701 		nb_fs_per_lcore = 1;
702 		nb_extra = 0;
703 	} else {
704 		nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
705 		nb_extra = (lcoreid_t) (nb_fs % nb_fc);
706 	}
707 
708 	nb_lc = (lcoreid_t) (nb_fc - nb_extra);
709 	sm_id = 0;
710 	for (lc_id = 0; lc_id < nb_lc; lc_id++) {
711 		fwd_lcores[lc_id]->stream_idx = sm_id;
712 		fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
713 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
714 	}
715 
716 	/*
717 	 * Assign extra remaining streams, if any.
718 	 */
719 	nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
720 	for (lc_id = 0; lc_id < nb_extra; lc_id++) {
721 		fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
722 		fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
723 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
724 	}
725 }
726 
727 static void
728 simple_fwd_config_setup(void)
729 {
730 	portid_t i;
731 	portid_t j;
732 	portid_t inc = 2;
733 
734 	if (port_topology == PORT_TOPOLOGY_CHAINED) {
735 		inc = 1;
736 	} else if (nb_fwd_ports % 2) {
737 		printf("\nWarning! Cannot handle an odd number of ports "
738 		       "with the current port topology. Configuration "
739 		       "must be changed to have an even number of ports, "
740 		       "or relaunch application with "
741 		       "--port-topology=chained\n\n");
742 	}
743 
744 	cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
745 	cur_fwd_config.nb_fwd_streams =
746 		(streamid_t) cur_fwd_config.nb_fwd_ports;
747 
748 	/* reinitialize forwarding streams */
749 	init_fwd_streams();
750 
751 	/*
752 	 * In the simple forwarding test, the number of forwarding cores
753 	 * must be lower or equal to the number of forwarding ports.
754 	 */
755 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
756 	if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
757 		cur_fwd_config.nb_fwd_lcores =
758 			(lcoreid_t) cur_fwd_config.nb_fwd_ports;
759 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
760 
761 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) {
762 		j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports);
763 		fwd_streams[i]->rx_port   = fwd_ports_ids[i];
764 		fwd_streams[i]->rx_queue  = 0;
765 		fwd_streams[i]->tx_port   = fwd_ports_ids[j];
766 		fwd_streams[i]->tx_queue  = 0;
767 		fwd_streams[i]->peer_addr = j;
768 
769 		if (port_topology == PORT_TOPOLOGY_PAIRED) {
770 			fwd_streams[j]->rx_port   = fwd_ports_ids[j];
771 			fwd_streams[j]->rx_queue  = 0;
772 			fwd_streams[j]->tx_port   = fwd_ports_ids[i];
773 			fwd_streams[j]->tx_queue  = 0;
774 			fwd_streams[j]->peer_addr = i;
775 		}
776 	}
777 }
778 
779 /**
780  * For the RSS forwarding test, each core is assigned on every port a transmit
781  * queue whose index is the index of the core itself. This approach limits the
782  * maximumm number of processing cores of the RSS test to the maximum number of
783  * TX queues supported by the devices.
784  *
785  * Each core is assigned a single stream, each stream being composed of
786  * a RX queue to poll on a RX port for input messages, associated with
787  * a TX queue of a TX port where to send forwarded packets.
788  * All packets received on the RX queue of index "RxQj" of the RX port "RxPi"
789  * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two
790  * following rules:
791  *    - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
792  *    - TxQl = RxQj
793  */
794 static void
795 rss_fwd_config_setup(void)
796 {
797 	portid_t   rxp;
798 	portid_t   txp;
799 	queueid_t  rxq;
800 	queueid_t  nb_q;
801 	lcoreid_t  lc_id;
802 
803 	nb_q = nb_rxq;
804 	if (nb_q > nb_txq)
805 		nb_q = nb_txq;
806 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
807 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
808 	cur_fwd_config.nb_fwd_streams =
809 		(streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
810 	if (cur_fwd_config.nb_fwd_streams > cur_fwd_config.nb_fwd_lcores)
811 		cur_fwd_config.nb_fwd_streams =
812 			(streamid_t)cur_fwd_config.nb_fwd_lcores;
813 	else
814 		cur_fwd_config.nb_fwd_lcores =
815 			(lcoreid_t)cur_fwd_config.nb_fwd_streams;
816 
817 	/* reinitialize forwarding streams */
818 	init_fwd_streams();
819 
820 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
821 	rxp = 0; rxq = 0;
822 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
823 		struct fwd_stream *fs;
824 
825 		fs = fwd_streams[lc_id];
826 		if ((rxp & 0x1) == 0)
827 			txp = (portid_t) (rxp + 1);
828 		else
829 			txp = (portid_t) (rxp - 1);
830 		fs->rx_port = fwd_ports_ids[rxp];
831 		fs->rx_queue = rxq;
832 		fs->tx_port = fwd_ports_ids[txp];
833 		fs->tx_queue = rxq;
834 		fs->peer_addr = fs->tx_port;
835 		rxq = (queueid_t) (rxq + 1);
836 		if (rxq < nb_q)
837 			continue;
838 		/*
839 		 * rxq == nb_q
840 		 * Restart from RX queue 0 on next RX port
841 		 */
842 		rxq = 0;
843 		if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
844 			rxp = (portid_t)
845 				(rxp + ((nb_ports >> 1) / nb_fwd_ports));
846 		else
847 			rxp = (portid_t) (rxp + 1);
848 	}
849 }
850 
851 /*
852  * In DCB and VT on,the mapping of 128 receive queues to 128 transmit queues.
853  */
854 static void
855 dcb_rxq_2_txq_mapping(queueid_t rxq, queueid_t *txq)
856 {
857 	if(dcb_q_mapping == DCB_4_TCS_Q_MAPPING) {
858 
859 		if (rxq < 32)
860 			/* tc0: 0-31 */
861 			*txq = rxq;
862 		else if (rxq < 64) {
863 			/* tc1: 64-95 */
864 			*txq =  (uint16_t)(rxq + 32);
865 		}
866 		else {
867 			/* tc2: 96-111;tc3:112-127 */
868 			*txq =  (uint16_t)(rxq/2 + 64);
869 		}
870 	}
871 	else {
872 		if (rxq < 16)
873 			/* tc0 mapping*/
874 			*txq = rxq;
875 		else if (rxq < 32) {
876 			/* tc1 mapping*/
877 			 *txq = (uint16_t)(rxq + 16);
878 		}
879 		else if (rxq < 64) {
880 			/*tc2,tc3 mapping */
881 			*txq =  (uint16_t)(rxq + 32);
882 		}
883 		else {
884 			/* tc4,tc5,tc6 and tc7 mapping */
885 			*txq =  (uint16_t)(rxq/2 + 64);
886 		}
887 	}
888 }
889 
890 /**
891  * For the DCB forwarding test, each core is assigned on every port multi-transmit
892  * queue.
893  *
894  * Each core is assigned a multi-stream, each stream being composed of
895  * a RX queue to poll on a RX port for input messages, associated with
896  * a TX queue of a TX port where to send forwarded packets.
897  * All packets received on the RX queue of index "RxQj" of the RX port "RxPi"
898  * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two
899  * following rules:
900  * In VT mode,
901  *    - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
902  *    - TxQl = RxQj
903  * In non-VT mode,
904  *    - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
905  *    There is a mapping of RxQj to TxQl to be required,and the mapping was implemented
906  *    in dcb_rxq_2_txq_mapping function.
907  */
908 static void
909 dcb_fwd_config_setup(void)
910 {
911 	portid_t   rxp;
912 	portid_t   txp;
913 	queueid_t  rxq;
914 	queueid_t  nb_q;
915 	lcoreid_t  lc_id;
916 	uint16_t sm_id;
917 
918 	nb_q = nb_rxq;
919 
920 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
921 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
922 	cur_fwd_config.nb_fwd_streams =
923 		(streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
924 
925 	/* reinitialize forwarding streams */
926 	init_fwd_streams();
927 
928 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
929 	rxp = 0; rxq = 0;
930 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
931 		/* a fwd core can run multi-streams */
932 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++)
933 		{
934 			struct fwd_stream *fs;
935 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
936 			if ((rxp & 0x1) == 0)
937 				txp = (portid_t) (rxp + 1);
938 			else
939 				txp = (portid_t) (rxp - 1);
940 			fs->rx_port = fwd_ports_ids[rxp];
941 			fs->rx_queue = rxq;
942 			fs->tx_port = fwd_ports_ids[txp];
943 			if (dcb_q_mapping == DCB_VT_Q_MAPPING)
944 				fs->tx_queue = rxq;
945 			else
946 				dcb_rxq_2_txq_mapping(rxq, &fs->tx_queue);
947 			fs->peer_addr = fs->tx_port;
948 			rxq = (queueid_t) (rxq + 1);
949 			if (rxq < nb_q)
950 				continue;
951 			rxq = 0;
952 			if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
953 				rxp = (portid_t)
954 					(rxp + ((nb_ports >> 1) / nb_fwd_ports));
955 			else
956 				rxp = (portid_t) (rxp + 1);
957 		}
958 	}
959 }
960 
961 void
962 fwd_config_setup(void)
963 {
964 	cur_fwd_config.fwd_eng = cur_fwd_eng;
965 	if ((nb_rxq > 1) && (nb_txq > 1)){
966 		if (dcb_config)
967 			dcb_fwd_config_setup();
968 		else
969 			rss_fwd_config_setup();
970 	}
971 	else
972 		simple_fwd_config_setup();
973 }
974 
975 static void
976 pkt_fwd_config_display(struct fwd_config *cfg)
977 {
978 	struct fwd_stream *fs;
979 	lcoreid_t  lc_id;
980 	streamid_t sm_id;
981 
982 	printf("%s packet forwarding - ports=%d - cores=%d - streams=%d - "
983 		"NUMA support %s, MP over anonymous pages %s\n",
984 		cfg->fwd_eng->fwd_mode_name,
985 		cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
986 		numa_support == 1 ? "enabled" : "disabled",
987 		mp_anon != 0 ? "enabled" : "disabled");
988 
989 	if (strcmp(cfg->fwd_eng->fwd_mode_name, "mac_retry") == 0)
990 		printf("TX retry num: %u, delay between TX retries: %uus\n",
991 			burst_tx_retry_num, burst_tx_delay_time);
992 	for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
993 		printf("Logical Core %u (socket %u) forwards packets on "
994 		       "%d streams:",
995 		       fwd_lcores_cpuids[lc_id],
996 		       rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
997 		       fwd_lcores[lc_id]->stream_nb);
998 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
999 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
1000 			printf("\n  RX P=%d/Q=%d (socket %u) -> TX "
1001 			       "P=%d/Q=%d (socket %u) ",
1002 			       fs->rx_port, fs->rx_queue,
1003 			       ports[fs->rx_port].socket_id,
1004 			       fs->tx_port, fs->tx_queue,
1005 			       ports[fs->tx_port].socket_id);
1006 			print_ethaddr("peer=",
1007 				      &peer_eth_addrs[fs->peer_addr]);
1008 		}
1009 		printf("\n");
1010 	}
1011 	printf("\n");
1012 }
1013 
1014 
1015 void
1016 fwd_config_display(void)
1017 {
1018 	if((dcb_config) && (nb_fwd_lcores == 1)) {
1019 		printf("In DCB mode,the nb forwarding cores should be larger than 1\n");
1020 		return;
1021 	}
1022 	fwd_config_setup();
1023 	pkt_fwd_config_display(&cur_fwd_config);
1024 }
1025 
1026 int
1027 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
1028 {
1029 	unsigned int i;
1030 	unsigned int lcore_cpuid;
1031 	int record_now;
1032 
1033 	record_now = 0;
1034  again:
1035 	for (i = 0; i < nb_lc; i++) {
1036 		lcore_cpuid = lcorelist[i];
1037 		if (! rte_lcore_is_enabled(lcore_cpuid)) {
1038 			printf("lcore %u not enabled\n", lcore_cpuid);
1039 			return -1;
1040 		}
1041 		if (lcore_cpuid == rte_get_master_lcore()) {
1042 			printf("lcore %u cannot be masked on for running "
1043 			       "packet forwarding, which is the master lcore "
1044 			       "and reserved for command line parsing only\n",
1045 			       lcore_cpuid);
1046 			return -1;
1047 		}
1048 		if (record_now)
1049 			fwd_lcores_cpuids[i] = lcore_cpuid;
1050 	}
1051 	if (record_now == 0) {
1052 		record_now = 1;
1053 		goto again;
1054 	}
1055 	nb_cfg_lcores = (lcoreid_t) nb_lc;
1056 	if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
1057 		printf("previous number of forwarding cores %u - changed to "
1058 		       "number of configured cores %u\n",
1059 		       (unsigned int) nb_fwd_lcores, nb_lc);
1060 		nb_fwd_lcores = (lcoreid_t) nb_lc;
1061 	}
1062 
1063 	return 0;
1064 }
1065 
1066 int
1067 set_fwd_lcores_mask(uint64_t lcoremask)
1068 {
1069 	unsigned int lcorelist[64];
1070 	unsigned int nb_lc;
1071 	unsigned int i;
1072 
1073 	if (lcoremask == 0) {
1074 		printf("Invalid NULL mask of cores\n");
1075 		return -1;
1076 	}
1077 	nb_lc = 0;
1078 	for (i = 0; i < 64; i++) {
1079 		if (! ((uint64_t)(1ULL << i) & lcoremask))
1080 			continue;
1081 		lcorelist[nb_lc++] = i;
1082 	}
1083 	return set_fwd_lcores_list(lcorelist, nb_lc);
1084 }
1085 
1086 void
1087 set_fwd_lcores_number(uint16_t nb_lc)
1088 {
1089 	if (nb_lc > nb_cfg_lcores) {
1090 		printf("nb fwd cores %u > %u (max. number of configured "
1091 		       "lcores) - ignored\n",
1092 		       (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
1093 		return;
1094 	}
1095 	nb_fwd_lcores = (lcoreid_t) nb_lc;
1096 	printf("Number of forwarding cores set to %u\n",
1097 	       (unsigned int) nb_fwd_lcores);
1098 }
1099 
1100 void
1101 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
1102 {
1103 	unsigned int i;
1104 	portid_t port_id;
1105 	int record_now;
1106 
1107 	record_now = 0;
1108  again:
1109 	for (i = 0; i < nb_pt; i++) {
1110 		port_id = (portid_t) portlist[i];
1111 		if (port_id >= nb_ports) {
1112 			printf("Invalid port id %u >= %u\n",
1113 			       (unsigned int) port_id,
1114 			       (unsigned int) nb_ports);
1115 			return;
1116 		}
1117 		if (record_now)
1118 			fwd_ports_ids[i] = port_id;
1119 	}
1120 	if (record_now == 0) {
1121 		record_now = 1;
1122 		goto again;
1123 	}
1124 	nb_cfg_ports = (portid_t) nb_pt;
1125 	if (nb_fwd_ports != (portid_t) nb_pt) {
1126 		printf("previous number of forwarding ports %u - changed to "
1127 		       "number of configured ports %u\n",
1128 		       (unsigned int) nb_fwd_ports, nb_pt);
1129 		nb_fwd_ports = (portid_t) nb_pt;
1130 	}
1131 }
1132 
1133 void
1134 set_fwd_ports_mask(uint64_t portmask)
1135 {
1136 	unsigned int portlist[64];
1137 	unsigned int nb_pt;
1138 	unsigned int i;
1139 
1140 	if (portmask == 0) {
1141 		printf("Invalid NULL mask of ports\n");
1142 		return;
1143 	}
1144 	nb_pt = 0;
1145 	for (i = 0; i < 64; i++) {
1146 		if (! ((uint64_t)(1ULL << i) & portmask))
1147 			continue;
1148 		portlist[nb_pt++] = i;
1149 	}
1150 	set_fwd_ports_list(portlist, nb_pt);
1151 }
1152 
1153 void
1154 set_fwd_ports_number(uint16_t nb_pt)
1155 {
1156 	if (nb_pt > nb_cfg_ports) {
1157 		printf("nb fwd ports %u > %u (number of configured "
1158 		       "ports) - ignored\n",
1159 		       (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
1160 		return;
1161 	}
1162 	nb_fwd_ports = (portid_t) nb_pt;
1163 	printf("Number of forwarding ports set to %u\n",
1164 	       (unsigned int) nb_fwd_ports);
1165 }
1166 
1167 void
1168 set_nb_pkt_per_burst(uint16_t nb)
1169 {
1170 	if (nb > MAX_PKT_BURST) {
1171 		printf("nb pkt per burst: %u > %u (maximum packet per burst) "
1172 		       " ignored\n",
1173 		       (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
1174 		return;
1175 	}
1176 	nb_pkt_per_burst = nb;
1177 	printf("Number of packets per burst set to %u\n",
1178 	       (unsigned int) nb_pkt_per_burst);
1179 }
1180 
1181 void
1182 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
1183 {
1184 	uint16_t tx_pkt_len;
1185 	unsigned i;
1186 
1187 	if (nb_segs >= (unsigned) nb_txd) {
1188 		printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
1189 		       nb_segs, (unsigned int) nb_txd);
1190 		return;
1191 	}
1192 
1193 	/*
1194 	 * Check that each segment length is greater or equal than
1195 	 * the mbuf data sise.
1196 	 * Check also that the total packet length is greater or equal than the
1197 	 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8).
1198 	 */
1199 	tx_pkt_len = 0;
1200 	for (i = 0; i < nb_segs; i++) {
1201 		if (seg_lengths[i] > (unsigned) mbuf_data_size) {
1202 			printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
1203 			       i, seg_lengths[i], (unsigned) mbuf_data_size);
1204 			return;
1205 		}
1206 		tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
1207 	}
1208 	if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) {
1209 		printf("total packet length=%u < %d - give up\n",
1210 				(unsigned) tx_pkt_len,
1211 				(int)(sizeof(struct ether_hdr) + 20 + 8));
1212 		return;
1213 	}
1214 
1215 	for (i = 0; i < nb_segs; i++)
1216 		tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
1217 
1218 	tx_pkt_length  = tx_pkt_len;
1219 	tx_pkt_nb_segs = (uint8_t) nb_segs;
1220 }
1221 
1222 void
1223 set_pkt_forwarding_mode(const char *fwd_mode_name)
1224 {
1225 	struct fwd_engine *fwd_eng;
1226 	unsigned i;
1227 
1228 	i = 0;
1229 	while ((fwd_eng = fwd_engines[i]) != NULL) {
1230 		if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
1231 			printf("Set %s packet forwarding mode\n",
1232 			       fwd_mode_name);
1233 			cur_fwd_eng = fwd_eng;
1234 			return;
1235 		}
1236 		i++;
1237 	}
1238 	printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
1239 }
1240 
1241 void
1242 set_verbose_level(uint16_t vb_level)
1243 {
1244 	printf("Change verbose level from %u to %u\n",
1245 	       (unsigned int) verbose_level, (unsigned int) vb_level);
1246 	verbose_level = vb_level;
1247 }
1248 
1249 void
1250 vlan_extend_set(portid_t port_id, int on)
1251 {
1252 	int diag;
1253 	int vlan_offload;
1254 
1255 	if (port_id_is_invalid(port_id))
1256 		return;
1257 
1258 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1259 
1260 	if (on)
1261 		vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
1262 	else
1263 		vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
1264 
1265 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1266 	if (diag < 0)
1267 		printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
1268 	       "diag=%d\n", port_id, on, diag);
1269 }
1270 
1271 void
1272 rx_vlan_strip_set(portid_t port_id, int on)
1273 {
1274 	int diag;
1275 	int vlan_offload;
1276 
1277 	if (port_id_is_invalid(port_id))
1278 		return;
1279 
1280 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1281 
1282 	if (on)
1283 		vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
1284 	else
1285 		vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
1286 
1287 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1288 	if (diag < 0)
1289 		printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
1290 	       "diag=%d\n", port_id, on, diag);
1291 }
1292 
1293 void
1294 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
1295 {
1296 	int diag;
1297 
1298 	if (port_id_is_invalid(port_id))
1299 		return;
1300 
1301 	diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
1302 	if (diag < 0)
1303 		printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
1304 	       "diag=%d\n", port_id, queue_id, on, diag);
1305 }
1306 
1307 void
1308 rx_vlan_filter_set(portid_t port_id, int on)
1309 {
1310 	int diag;
1311 	int vlan_offload;
1312 
1313 	if (port_id_is_invalid(port_id))
1314 		return;
1315 
1316 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1317 
1318 	if (on)
1319 		vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
1320 	else
1321 		vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
1322 
1323 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1324 	if (diag < 0)
1325 		printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
1326 	       "diag=%d\n", port_id, on, diag);
1327 }
1328 
1329 void
1330 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
1331 {
1332 	int diag;
1333 
1334 	if (port_id_is_invalid(port_id))
1335 		return;
1336 	if (vlan_id_is_invalid(vlan_id))
1337 		return;
1338 	diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
1339 	if (diag == 0)
1340 		return;
1341 	printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
1342 	       "diag=%d\n",
1343 	       port_id, vlan_id, on, diag);
1344 }
1345 
1346 void
1347 rx_vlan_all_filter_set(portid_t port_id, int on)
1348 {
1349 	uint16_t vlan_id;
1350 
1351 	if (port_id_is_invalid(port_id))
1352 		return;
1353 	for (vlan_id = 0; vlan_id < 4096; vlan_id++)
1354 		rx_vft_set(port_id, vlan_id, on);
1355 }
1356 
1357 void
1358 vlan_tpid_set(portid_t port_id, uint16_t tp_id)
1359 {
1360 	int diag;
1361 	if (port_id_is_invalid(port_id))
1362 		return;
1363 
1364 	diag = rte_eth_dev_set_vlan_ether_type(port_id, tp_id);
1365 	if (diag == 0)
1366 		return;
1367 
1368 	printf("tx_vlan_tpid_set(port_pi=%d, tpid=%d) failed "
1369 	       "diag=%d\n",
1370 	       port_id, tp_id, diag);
1371 }
1372 
1373 void
1374 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
1375 {
1376 	if (port_id_is_invalid(port_id))
1377 		return;
1378 	if (vlan_id_is_invalid(vlan_id))
1379 		return;
1380 	ports[port_id].tx_ol_flags |= PKT_TX_VLAN_PKT;
1381 	ports[port_id].tx_vlan_id = vlan_id;
1382 }
1383 
1384 void
1385 tx_vlan_reset(portid_t port_id)
1386 {
1387 	if (port_id_is_invalid(port_id))
1388 		return;
1389 	ports[port_id].tx_ol_flags &= ~PKT_TX_VLAN_PKT;
1390 }
1391 
1392 void
1393 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
1394 {
1395 	uint16_t i;
1396 	uint8_t existing_mapping_found = 0;
1397 
1398 	if (port_id_is_invalid(port_id))
1399 		return;
1400 
1401 	if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
1402 		return;
1403 
1404 	if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1405 		printf("map_value not in required range 0..%d\n",
1406 				RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
1407 		return;
1408 	}
1409 
1410 	if (!is_rx) { /*then tx*/
1411 		for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1412 			if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1413 			    (tx_queue_stats_mappings[i].queue_id == queue_id)) {
1414 				tx_queue_stats_mappings[i].stats_counter_id = map_value;
1415 				existing_mapping_found = 1;
1416 				break;
1417 			}
1418 		}
1419 		if (!existing_mapping_found) { /* A new additional mapping... */
1420 			tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
1421 			tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
1422 			tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
1423 			nb_tx_queue_stats_mappings++;
1424 		}
1425 	}
1426 	else { /*rx*/
1427 		for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1428 			if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1429 			    (rx_queue_stats_mappings[i].queue_id == queue_id)) {
1430 				rx_queue_stats_mappings[i].stats_counter_id = map_value;
1431 				existing_mapping_found = 1;
1432 				break;
1433 			}
1434 		}
1435 		if (!existing_mapping_found) { /* A new additional mapping... */
1436 			rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
1437 			rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
1438 			rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
1439 			nb_rx_queue_stats_mappings++;
1440 		}
1441 	}
1442 }
1443 
1444 void
1445 tx_cksum_set(portid_t port_id, uint8_t cksum_mask)
1446 {
1447 	uint16_t tx_ol_flags;
1448 	if (port_id_is_invalid(port_id))
1449 		return;
1450 	/* Clear last 4 bits and then set L3/4 checksum mask again */
1451 	tx_ol_flags = (uint16_t) (ports[port_id].tx_ol_flags & 0xFFF0);
1452 	ports[port_id].tx_ol_flags = (uint16_t) ((cksum_mask & 0xf) | tx_ol_flags);
1453 }
1454 
1455 void
1456 fdir_add_signature_filter(portid_t port_id, uint8_t queue_id,
1457 			  struct rte_fdir_filter *fdir_filter)
1458 {
1459 	int diag;
1460 
1461 	if (port_id_is_invalid(port_id))
1462 		return;
1463 
1464 	diag = rte_eth_dev_fdir_add_signature_filter(port_id, fdir_filter,
1465 						     queue_id);
1466 	if (diag == 0)
1467 		return;
1468 
1469 	printf("rte_eth_dev_fdir_add_signature_filter for port_id=%d failed "
1470 	       "diag=%d\n", port_id, diag);
1471 }
1472 
1473 void
1474 fdir_update_signature_filter(portid_t port_id, uint8_t queue_id,
1475 			     struct rte_fdir_filter *fdir_filter)
1476 {
1477 	int diag;
1478 
1479 	if (port_id_is_invalid(port_id))
1480 		return;
1481 
1482 	diag = rte_eth_dev_fdir_update_signature_filter(port_id, fdir_filter,
1483 							queue_id);
1484 	if (diag == 0)
1485 		return;
1486 
1487 	printf("rte_eth_dev_fdir_update_signature_filter for port_id=%d failed "
1488 	       "diag=%d\n", port_id, diag);
1489 }
1490 
1491 void
1492 fdir_remove_signature_filter(portid_t port_id,
1493 			     struct rte_fdir_filter *fdir_filter)
1494 {
1495 	int diag;
1496 
1497 	if (port_id_is_invalid(port_id))
1498 		return;
1499 
1500 	diag = rte_eth_dev_fdir_remove_signature_filter(port_id, fdir_filter);
1501 	if (diag == 0)
1502 		return;
1503 
1504 	printf("rte_eth_dev_fdir_add_signature_filter for port_id=%d failed "
1505 	       "diag=%d\n", port_id, diag);
1506 
1507 }
1508 
1509 void
1510 fdir_get_infos(portid_t port_id)
1511 {
1512 	struct rte_eth_fdir fdir_infos;
1513 
1514 	static const char *fdir_stats_border = "########################";
1515 
1516 	if (port_id_is_invalid(port_id))
1517 		return;
1518 
1519 	rte_eth_dev_fdir_get_infos(port_id, &fdir_infos);
1520 
1521 	printf("\n  %s FDIR infos for port %-2d     %s\n",
1522 	       fdir_stats_border, port_id, fdir_stats_border);
1523 
1524 	printf("  collision: %-10"PRIu64"  free:     %"PRIu64"\n"
1525 	       "  maxhash:   %-10"PRIu64"  maxlen:   %"PRIu64"\n"
1526 	       "  add:       %-10"PRIu64"  remove:   %"PRIu64"\n"
1527 	       "  f_add:     %-10"PRIu64"  f_remove: %"PRIu64"\n",
1528 	       (uint64_t)(fdir_infos.collision), (uint64_t)(fdir_infos.free),
1529 	       (uint64_t)(fdir_infos.maxhash), (uint64_t)(fdir_infos.maxlen),
1530 	       fdir_infos.add, fdir_infos.remove,
1531 	       fdir_infos.f_add, fdir_infos.f_remove);
1532 	printf("  %s############################%s\n",
1533 	       fdir_stats_border, fdir_stats_border);
1534 }
1535 
1536 void
1537 fdir_add_perfect_filter(portid_t port_id, uint16_t soft_id, uint8_t queue_id,
1538 			uint8_t drop, struct rte_fdir_filter *fdir_filter)
1539 {
1540 	int diag;
1541 
1542 	if (port_id_is_invalid(port_id))
1543 		return;
1544 
1545 	diag = rte_eth_dev_fdir_add_perfect_filter(port_id, fdir_filter,
1546 						   soft_id, queue_id, drop);
1547 	if (diag == 0)
1548 		return;
1549 
1550 	printf("rte_eth_dev_fdir_add_perfect_filter for port_id=%d failed "
1551 	       "diag=%d\n", port_id, diag);
1552 }
1553 
1554 void
1555 fdir_update_perfect_filter(portid_t port_id, uint16_t soft_id, uint8_t queue_id,
1556 			   uint8_t drop, struct rte_fdir_filter *fdir_filter)
1557 {
1558 	int diag;
1559 
1560 	if (port_id_is_invalid(port_id))
1561 		return;
1562 
1563 	diag = rte_eth_dev_fdir_update_perfect_filter(port_id, fdir_filter,
1564 						      soft_id, queue_id, drop);
1565 	if (diag == 0)
1566 		return;
1567 
1568 	printf("rte_eth_dev_fdir_update_perfect_filter for port_id=%d failed "
1569 	       "diag=%d\n", port_id, diag);
1570 }
1571 
1572 void
1573 fdir_remove_perfect_filter(portid_t port_id, uint16_t soft_id,
1574 			   struct rte_fdir_filter *fdir_filter)
1575 {
1576 	int diag;
1577 
1578 	if (port_id_is_invalid(port_id))
1579 		return;
1580 
1581 	diag = rte_eth_dev_fdir_remove_perfect_filter(port_id, fdir_filter,
1582 						      soft_id);
1583 	if (diag == 0)
1584 		return;
1585 
1586 	printf("rte_eth_dev_fdir_update_perfect_filter for port_id=%d failed "
1587 	       "diag=%d\n", port_id, diag);
1588 }
1589 
1590 void
1591 fdir_set_masks(portid_t port_id, struct rte_fdir_masks *fdir_masks)
1592 {
1593 	int diag;
1594 
1595 	if (port_id_is_invalid(port_id))
1596 		return;
1597 
1598 	diag = rte_eth_dev_fdir_set_masks(port_id, fdir_masks);
1599 	if (diag == 0)
1600 		return;
1601 
1602 	printf("rte_eth_dev_set_masks_filter for port_id=%d failed "
1603 	       "diag=%d\n", port_id, diag);
1604 }
1605 
1606 void
1607 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
1608 {
1609 	int diag;
1610 
1611 	if (port_id_is_invalid(port_id))
1612 		return;
1613 	if (is_rx)
1614 		diag = rte_eth_dev_set_vf_rx(port_id,vf,on);
1615 	else
1616 		diag = rte_eth_dev_set_vf_tx(port_id,vf,on);
1617 	if (diag == 0)
1618 		return;
1619 	if(is_rx)
1620 		printf("rte_eth_dev_set_vf_rx for port_id=%d failed "
1621 	       		"diag=%d\n", port_id, diag);
1622 	else
1623 		printf("rte_eth_dev_set_vf_tx for port_id=%d failed "
1624 	       		"diag=%d\n", port_id, diag);
1625 
1626 }
1627 
1628 void
1629 set_vf_rx_vlan(portid_t port_id, uint16_t vlan_id, uint64_t vf_mask, uint8_t on)
1630 {
1631 	int diag;
1632 
1633 	if (port_id_is_invalid(port_id))
1634 		return;
1635 	if (vlan_id_is_invalid(vlan_id))
1636 		return;
1637 	diag = rte_eth_dev_set_vf_vlan_filter(port_id, vlan_id, vf_mask, on);
1638 	if (diag == 0)
1639 		return;
1640 	printf("rte_eth_dev_set_vf_vlan_filter for port_id=%d failed "
1641 	       "diag=%d\n", port_id, diag);
1642 }
1643 
1644