xref: /dpdk/app/test-pmd/config.c (revision ce8d561418d45cca86755be0f3cabc7a1a9dba4b)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34 
35 #include <stdarg.h>
36 #include <errno.h>
37 #include <stdio.h>
38 #include <string.h>
39 #include <stdarg.h>
40 #include <stdint.h>
41 #include <inttypes.h>
42 
43 #include <sys/queue.h>
44 
45 #include <rte_common.h>
46 #include <rte_byteorder.h>
47 #include <rte_debug.h>
48 #include <rte_log.h>
49 #include <rte_memory.h>
50 #include <rte_memcpy.h>
51 #include <rte_memzone.h>
52 #include <rte_launch.h>
53 #include <rte_tailq.h>
54 #include <rte_eal.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
59 #include <rte_ring.h>
60 #include <rte_mempool.h>
61 #include <rte_mbuf.h>
62 #include <rte_interrupts.h>
63 #include <rte_pci.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
66 #include <rte_string_fns.h>
67 
68 #include "testpmd.h"
69 
70 static void
71 print_ethaddr(const char *name, struct ether_addr *eth_addr)
72 {
73 	printf("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
74 	       eth_addr->addr_bytes[0],
75 	       eth_addr->addr_bytes[1],
76 	       eth_addr->addr_bytes[2],
77 	       eth_addr->addr_bytes[3],
78 	       eth_addr->addr_bytes[4],
79 	       eth_addr->addr_bytes[5]);
80 }
81 
82 void
83 nic_stats_display(portid_t port_id)
84 {
85 	struct rte_eth_stats stats;
86 	struct rte_port *port = &ports[port_id];
87 	uint8_t i;
88 
89 	static const char *nic_stats_border = "########################";
90 
91 	if (port_id >= nb_ports) {
92 		printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
93 		return;
94 	}
95 	rte_eth_stats_get(port_id, &stats);
96 	printf("\n  %s NIC statistics for port %-2d %s\n",
97 	       nic_stats_border, port_id, nic_stats_border);
98 
99 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
100 		printf("  RX-packets: %-10"PRIu64" RX-errors: %-10"PRIu64"RX-bytes: "
101 		       "%-"PRIu64"\n"
102 		       "  TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64"TX-bytes: "
103 		       "%-"PRIu64"\n",
104 		       stats.ipackets, stats.ierrors, stats.ibytes,
105 		       stats.opackets, stats.oerrors, stats.obytes);
106 	}
107 	else {
108 		printf("  RX-packets:              %10"PRIu64"    RX-errors: %10"PRIu64
109 		       "    RX-bytes: %10"PRIu64"\n"
110 		       "  TX-packets:              %10"PRIu64"    TX-errors: %10"PRIu64
111 		       "    TX-bytes: %10"PRIu64"\n",
112 		       stats.ipackets, stats.ierrors, stats.ibytes,
113 		       stats.opackets, stats.oerrors, stats.obytes);
114 	}
115 
116 	/* stats fdir */
117 	if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
118 		printf("  Fdirmiss:   %-10"PRIu64" Fdirmatch: %-10"PRIu64"\n",
119 		       stats.fdirmiss,
120 		       stats.fdirmatch);
121 
122 	if (port->rx_queue_stats_mapping_enabled) {
123 		printf("\n");
124 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
125 			printf("  Stats reg %2d RX-packets: %10"PRIu64
126 			       "    RX-errors: %10"PRIu64
127 			       "    RX-bytes: %10"PRIu64"\n",
128 			       i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
129 		}
130 	}
131 	if (port->tx_queue_stats_mapping_enabled) {
132 		printf("\n");
133 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
134 			printf("  Stats reg %2d TX-packets: %10"PRIu64
135 			       "                             TX-bytes: %10"PRIu64"\n",
136 			       i, stats.q_opackets[i], stats.q_obytes[i]);
137 		}
138 	}
139 
140 	printf("  %s############################%s\n",
141 	       nic_stats_border, nic_stats_border);
142 }
143 
144 void
145 nic_stats_clear(portid_t port_id)
146 {
147 	if (port_id >= nb_ports) {
148 		printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
149 		return;
150 	}
151 	rte_eth_stats_reset(port_id);
152 	printf("\n  NIC statistics for port %d cleared\n", port_id);
153 }
154 
155 
156 void
157 nic_stats_mapping_display(portid_t port_id)
158 {
159 	struct rte_port *port = &ports[port_id];
160 	uint16_t i;
161 
162 	static const char *nic_stats_mapping_border = "########################";
163 
164 	if (port_id >= nb_ports) {
165 		printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
166 		return;
167 	}
168 
169 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
170 		printf("Port id %d - either does not support queue statistic mapping or"
171 		       " no queue statistic mapping set\n", port_id);
172 		return;
173 	}
174 
175 	printf("\n  %s NIC statistics mapping for port %-2d %s\n",
176 	       nic_stats_mapping_border, port_id, nic_stats_mapping_border);
177 
178 	if (port->rx_queue_stats_mapping_enabled) {
179 		for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
180 			if (rx_queue_stats_mappings[i].port_id == port_id) {
181 				printf("  RX-queue %2d mapped to Stats Reg %2d\n",
182 				       rx_queue_stats_mappings[i].queue_id,
183 				       rx_queue_stats_mappings[i].stats_counter_id);
184 			}
185 		}
186 		printf("\n");
187 	}
188 
189 
190 	if (port->tx_queue_stats_mapping_enabled) {
191 		for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
192 			if (tx_queue_stats_mappings[i].port_id == port_id) {
193 				printf("  TX-queue %2d mapped to Stats Reg %2d\n",
194 				       tx_queue_stats_mappings[i].queue_id,
195 				       tx_queue_stats_mappings[i].stats_counter_id);
196 			}
197 		}
198 	}
199 
200 	printf("  %s####################################%s\n",
201 	       nic_stats_mapping_border, nic_stats_mapping_border);
202 }
203 
204 void
205 port_infos_display(portid_t port_id)
206 {
207 	struct rte_port *port;
208 	struct rte_eth_link link;
209 	int vlan_offload;
210 	static const char *info_border = "*********************";
211 
212 	if (port_id >= nb_ports) {
213 		printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
214 		return;
215 	}
216 	port = &ports[port_id];
217 	rte_eth_link_get(port_id, &link);
218 	printf("\n%s Infos for port %-2d %s\n",
219 	       info_border, port_id, info_border);
220 	print_ethaddr("MAC address: ", &port->eth_addr);
221 	printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
222 	printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
223 	printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
224 	       ("full-duplex") : ("half-duplex"));
225 	printf("Promiscuous mode: %s\n",
226 	       rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
227 	printf("Allmulticast mode: %s\n",
228 	       rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
229 	printf("Maximum number of MAC addresses: %u\n",
230 	       (unsigned int)(port->dev_info.max_mac_addrs));
231 
232 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
233 	if (vlan_offload >= 0){
234 		printf("VLAN offload: \n");
235 		if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
236 			printf("  strip on \n");
237 		else
238 			printf("  strip off \n");
239 
240 		if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
241 			printf("  filter on \n");
242 		else
243 			printf("  filter off \n");
244 
245 		if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
246 			printf("  qinq(extend) on \n");
247 		else
248 			printf("  qinq(extend) off \n");
249 	}
250 }
251 
252 static int
253 port_id_is_invalid(portid_t port_id)
254 {
255 	if (port_id < nb_ports)
256 		return 0;
257 	printf("Invalid port %d (must be < nb_ports=%d)\n", port_id, nb_ports);
258 	return 1;
259 }
260 
261 static int
262 vlan_id_is_invalid(uint16_t vlan_id)
263 {
264 	if (vlan_id < 4096)
265 		return 0;
266 	printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
267 	return 1;
268 }
269 
270 static int
271 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
272 {
273 	uint64_t pci_len;
274 
275 	if (reg_off & 0x3) {
276 		printf("Port register offset 0x%X not aligned on a 4-byte "
277 		       "boundary\n",
278 		       (unsigned)reg_off);
279 		return 1;
280 	}
281 	pci_len = ports[port_id].dev_info.pci_dev->mem_resource.len;
282 	if (reg_off >= pci_len) {
283 		printf("Port %d: register offset %u (0x%X) out of port PCI "
284 		       "resource (length=%"PRIu64")\n",
285 		       port_id, (unsigned)reg_off, (unsigned)reg_off,  pci_len);
286 		return 1;
287 	}
288 	return 0;
289 }
290 
291 static int
292 reg_bit_pos_is_invalid(uint8_t bit_pos)
293 {
294 	if (bit_pos <= 31)
295 		return 0;
296 	printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
297 	return 1;
298 }
299 
300 #define display_port_and_reg_off(port_id, reg_off) \
301 	printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
302 
303 static inline void
304 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
305 {
306 	display_port_and_reg_off(port_id, (unsigned)reg_off);
307 	printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
308 }
309 
310 void
311 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
312 {
313 	uint32_t reg_v;
314 
315 
316 	if (port_id_is_invalid(port_id))
317 		return;
318 	if (port_reg_off_is_invalid(port_id, reg_off))
319 		return;
320 	if (reg_bit_pos_is_invalid(bit_x))
321 		return;
322 	reg_v = port_id_pci_reg_read(port_id, reg_off);
323 	display_port_and_reg_off(port_id, (unsigned)reg_off);
324 	printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
325 }
326 
327 void
328 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
329 			   uint8_t bit1_pos, uint8_t bit2_pos)
330 {
331 	uint32_t reg_v;
332 	uint8_t  l_bit;
333 	uint8_t  h_bit;
334 
335 	if (port_id_is_invalid(port_id))
336 		return;
337 	if (port_reg_off_is_invalid(port_id, reg_off))
338 		return;
339 	if (reg_bit_pos_is_invalid(bit1_pos))
340 		return;
341 	if (reg_bit_pos_is_invalid(bit2_pos))
342 		return;
343 	if (bit1_pos > bit2_pos)
344 		l_bit = bit2_pos, h_bit = bit1_pos;
345 	else
346 		l_bit = bit1_pos, h_bit = bit2_pos;
347 
348 	reg_v = port_id_pci_reg_read(port_id, reg_off);
349 	reg_v >>= l_bit;
350 	if (h_bit < 31)
351 		reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
352 	display_port_and_reg_off(port_id, (unsigned)reg_off);
353 	printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
354 	       ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
355 }
356 
357 void
358 port_reg_display(portid_t port_id, uint32_t reg_off)
359 {
360 	uint32_t reg_v;
361 
362 	if (port_id_is_invalid(port_id))
363 		return;
364 	if (port_reg_off_is_invalid(port_id, reg_off))
365 		return;
366 	reg_v = port_id_pci_reg_read(port_id, reg_off);
367 	display_port_reg_value(port_id, reg_off, reg_v);
368 }
369 
370 void
371 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
372 		 uint8_t bit_v)
373 {
374 	uint32_t reg_v;
375 
376 	if (port_id_is_invalid(port_id))
377 		return;
378 	if (port_reg_off_is_invalid(port_id, reg_off))
379 		return;
380 	if (reg_bit_pos_is_invalid(bit_pos))
381 		return;
382 	if (bit_v > 1) {
383 		printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
384 		return;
385 	}
386 	reg_v = port_id_pci_reg_read(port_id, reg_off);
387 	if (bit_v == 0)
388 		reg_v &= ~(1 << bit_pos);
389 	else
390 		reg_v |= (1 << bit_pos);
391 	port_id_pci_reg_write(port_id, reg_off, reg_v);
392 	display_port_reg_value(port_id, reg_off, reg_v);
393 }
394 
395 void
396 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
397 		       uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
398 {
399 	uint32_t max_v;
400 	uint32_t reg_v;
401 	uint8_t  l_bit;
402 	uint8_t  h_bit;
403 
404 	if (port_id_is_invalid(port_id))
405 		return;
406 	if (port_reg_off_is_invalid(port_id, reg_off))
407 		return;
408 	if (reg_bit_pos_is_invalid(bit1_pos))
409 		return;
410 	if (reg_bit_pos_is_invalid(bit2_pos))
411 		return;
412 	if (bit1_pos > bit2_pos)
413 		l_bit = bit2_pos, h_bit = bit1_pos;
414 	else
415 		l_bit = bit1_pos, h_bit = bit2_pos;
416 
417 	if ((h_bit - l_bit) < 31)
418 		max_v = (1 << (h_bit - l_bit + 1)) - 1;
419 	else
420 		max_v = 0xFFFFFFFF;
421 
422 	if (value > max_v) {
423 		printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
424 				(unsigned)value, (unsigned)value,
425 				(unsigned)max_v, (unsigned)max_v);
426 		return;
427 	}
428 	reg_v = port_id_pci_reg_read(port_id, reg_off);
429 	reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
430 	reg_v |= (value << l_bit); /* Set changed bits */
431 	port_id_pci_reg_write(port_id, reg_off, reg_v);
432 	display_port_reg_value(port_id, reg_off, reg_v);
433 }
434 
435 void
436 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
437 {
438 	if (port_id_is_invalid(port_id))
439 		return;
440 	if (port_reg_off_is_invalid(port_id, reg_off))
441 		return;
442 	port_id_pci_reg_write(port_id, reg_off, reg_v);
443 	display_port_reg_value(port_id, reg_off, reg_v);
444 }
445 
446 /*
447  * RX/TX ring descriptors display functions.
448  */
449 static int
450 rx_queue_id_is_invalid(queueid_t rxq_id)
451 {
452 	if (rxq_id < nb_rxq)
453 		return 0;
454 	printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
455 	return 1;
456 }
457 
458 static int
459 tx_queue_id_is_invalid(queueid_t txq_id)
460 {
461 	if (txq_id < nb_txq)
462 		return 0;
463 	printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
464 	return 1;
465 }
466 
467 static int
468 rx_desc_id_is_invalid(uint16_t rxdesc_id)
469 {
470 	if (rxdesc_id < nb_rxd)
471 		return 0;
472 	printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
473 	       rxdesc_id, nb_rxd);
474 	return 1;
475 }
476 
477 static int
478 tx_desc_id_is_invalid(uint16_t txdesc_id)
479 {
480 	if (txdesc_id < nb_txd)
481 		return 0;
482 	printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
483 	       txdesc_id, nb_txd);
484 	return 1;
485 }
486 
487 static const struct rte_memzone *
488 ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id)
489 {
490 	char mz_name[RTE_MEMZONE_NAMESIZE];
491 	const struct rte_memzone *mz;
492 
493 	rte_snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d",
494 		 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id);
495 	mz = rte_memzone_lookup(mz_name);
496 	if (mz == NULL)
497 		printf("%s ring memory zoneof (port %d, queue %d) not"
498 		       "found (zone name = %s\n",
499 		       ring_name, port_id, q_id, mz_name);
500 	return (mz);
501 }
502 
503 union igb_ring_dword {
504 	uint64_t dword;
505 	struct {
506 		uint32_t hi;
507 		uint32_t lo;
508 	} words;
509 };
510 
511 struct igb_ring_desc {
512 	union igb_ring_dword lo_dword;
513 	union igb_ring_dword hi_dword;
514 };
515 
516 static void
517 ring_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
518 {
519 	struct igb_ring_desc *ring;
520 	struct igb_ring_desc rd;
521 
522 	ring = (struct igb_ring_desc *) ring_mz->addr;
523 	rd.lo_dword = rte_le_to_cpu_64(ring[desc_id].lo_dword);
524 	rd.hi_dword = rte_le_to_cpu_64(ring[desc_id].hi_dword);
525 	printf("    0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
526 		(unsigned)rd.lo_dword.words.lo, (unsigned)rd.lo_dword.words.hi,
527 		(unsigned)rd.hi_dword.words.lo, (unsigned)rd.hi_dword.words.hi);
528 }
529 
530 void
531 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
532 {
533 	const struct rte_memzone *rx_mz;
534 
535 	if (port_id_is_invalid(port_id))
536 		return;
537 	if (rx_queue_id_is_invalid(rxq_id))
538 		return;
539 	if (rx_desc_id_is_invalid(rxd_id))
540 		return;
541 	rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
542 	if (rx_mz == NULL)
543 		return;
544 	ring_descriptor_display(rx_mz, rxd_id);
545 }
546 
547 void
548 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
549 {
550 	const struct rte_memzone *tx_mz;
551 
552 	if (port_id_is_invalid(port_id))
553 		return;
554 	if (tx_queue_id_is_invalid(txq_id))
555 		return;
556 	if (tx_desc_id_is_invalid(txd_id))
557 		return;
558 	tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
559 	if (tx_mz == NULL)
560 		return;
561 	ring_descriptor_display(tx_mz, txd_id);
562 }
563 
564 void
565 fwd_lcores_config_display(void)
566 {
567 	lcoreid_t lc_id;
568 
569 	printf("List of forwarding lcores:");
570 	for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
571 		printf(" %2u", fwd_lcores_cpuids[lc_id]);
572 	printf("\n");
573 }
574 void
575 rxtx_config_display(void)
576 {
577 	printf("  %s packet forwarding - CRC stripping %s - "
578 	       "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name,
579 	       rx_mode.hw_strip_crc ? "enabled" : "disabled",
580 	       nb_pkt_per_burst);
581 
582 	if (cur_fwd_eng == &tx_only_engine)
583 		printf("  packet len=%u - nb packet segments=%d\n",
584 				(unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
585 
586 	printf("  nb forwarding cores=%d - nb forwarding ports=%d\n",
587 	       nb_fwd_lcores, nb_fwd_ports);
588 	printf("  RX queues=%d - RX desc=%d - RX free threshold=%d\n",
589 	       nb_rxq, nb_rxd, rx_free_thresh);
590 	printf("  RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
591 	       rx_thresh.pthresh, rx_thresh.hthresh, rx_thresh.wthresh);
592 	printf("  TX queues=%d - TX desc=%d - TX free threshold=%d\n",
593 	       nb_txq, nb_txd, tx_free_thresh);
594 	printf("  TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
595 	       tx_thresh.pthresh, tx_thresh.hthresh, tx_thresh.wthresh);
596 	printf("  TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n",
597 	       tx_rs_thresh, txq_flags);
598 }
599 
600 /*
601  * Setup forwarding configuration for each logical core.
602  */
603 static void
604 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
605 {
606 	streamid_t nb_fs_per_lcore;
607 	streamid_t nb_fs;
608 	streamid_t sm_id;
609 	lcoreid_t  nb_extra;
610 	lcoreid_t  nb_fc;
611 	lcoreid_t  nb_lc;
612 	lcoreid_t  lc_id;
613 
614 	nb_fs = cfg->nb_fwd_streams;
615 	nb_fc = cfg->nb_fwd_lcores;
616 	if (nb_fs <= nb_fc) {
617 		nb_fs_per_lcore = 1;
618 		nb_extra = 0;
619 	} else {
620 		nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
621 		nb_extra = (lcoreid_t) (nb_fs % nb_fc);
622 	}
623 	nb_extra = (lcoreid_t) (nb_fs % nb_fc);
624 
625 	nb_lc = (lcoreid_t) (nb_fc - nb_extra);
626 	sm_id = 0;
627 	for (lc_id = 0; lc_id < nb_lc; lc_id++) {
628 		fwd_lcores[lc_id]->stream_idx = sm_id;
629 		fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
630 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
631 	}
632 
633 	/*
634 	 * Assign extra remaining streams, if any.
635 	 */
636 	nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
637 	for (lc_id = 0; lc_id < nb_extra; lc_id++) {
638 		fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
639 		fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
640 		sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
641 	}
642 }
643 
644 static void
645 simple_fwd_config_setup(void)
646 {
647 	portid_t i;
648 	portid_t j;
649 	portid_t inc = 2;
650 
651 	if (nb_fwd_ports % 2) {
652 		if (port_topology == PORT_TOPOLOGY_CHAINED) {
653 			inc = 1;
654 		}
655 		else {
656 			printf("\nWarning! Cannot handle an odd number of ports "
657 			       "with the current port topology. Configuration "
658 			       "must be changed to have an even number of ports, "
659 			       "or relaunch application with "
660 			       "--port-topology=chained\n\n");
661 		}
662 	}
663 
664 	cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
665 	cur_fwd_config.nb_fwd_streams =
666 		(streamid_t) cur_fwd_config.nb_fwd_ports;
667 
668 	/*
669 	 * In the simple forwarding test, the number of forwarding cores
670 	 * must be lower or equal to the number of forwarding ports.
671 	 */
672 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
673 	if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
674 		cur_fwd_config.nb_fwd_lcores =
675 			(lcoreid_t) cur_fwd_config.nb_fwd_ports;
676 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
677 
678 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) {
679 		j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports);
680 		fwd_streams[i]->rx_port   = fwd_ports_ids[i];
681 		fwd_streams[i]->rx_queue  = 0;
682 		fwd_streams[i]->tx_port   = fwd_ports_ids[j];
683 		fwd_streams[i]->tx_queue  = 0;
684 		fwd_streams[i]->peer_addr = j;
685 
686 		if (port_topology == PORT_TOPOLOGY_PAIRED) {
687 			fwd_streams[j]->rx_port   = fwd_ports_ids[j];
688 			fwd_streams[j]->rx_queue  = 0;
689 			fwd_streams[j]->tx_port   = fwd_ports_ids[i];
690 			fwd_streams[j]->tx_queue  = 0;
691 			fwd_streams[j]->peer_addr = i;
692 		}
693 	}
694 }
695 
696 /**
697  * For the RSS forwarding test, each core is assigned on every port a transmit
698  * queue whose index is the index of the core itself. This approach limits the
699  * maximumm number of processing cores of the RSS test to the maximum number of
700  * TX queues supported by the devices.
701  *
702  * Each core is assigned a single stream, each stream being composed of
703  * a RX queue to poll on a RX port for input messages, associated with
704  * a TX queue of a TX port where to send forwarded packets.
705  * All packets received on the RX queue of index "RxQj" of the RX port "RxPi"
706  * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two
707  * following rules:
708  *    - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
709  *    - TxQl = RxQj
710  */
711 static void
712 rss_fwd_config_setup(void)
713 {
714 	portid_t   rxp;
715 	portid_t   txp;
716 	queueid_t  rxq;
717 	queueid_t  nb_q;
718 	lcoreid_t  lc_id;
719 
720 	nb_q = nb_rxq;
721 	if (nb_q > nb_txq)
722 		nb_q = nb_txq;
723 	cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
724 	cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
725 	cur_fwd_config.nb_fwd_streams =
726 		(streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
727 	if (cur_fwd_config.nb_fwd_streams > cur_fwd_config.nb_fwd_lcores)
728 		cur_fwd_config.nb_fwd_streams =
729 			(streamid_t)cur_fwd_config.nb_fwd_lcores;
730 	else
731 		cur_fwd_config.nb_fwd_lcores =
732 			(lcoreid_t)cur_fwd_config.nb_fwd_streams;
733 	setup_fwd_config_of_each_lcore(&cur_fwd_config);
734 	rxp = 0; rxq = 0;
735 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
736 		struct fwd_stream *fs;
737 
738 		fs = fwd_streams[lc_id];
739 		if ((rxp & 0x1) == 0)
740 			txp = (portid_t) (rxp + 1);
741 		else
742 			txp = (portid_t) (rxp - 1);
743 		fs->rx_port = fwd_ports_ids[rxp];
744 		fs->rx_queue = rxq;
745 		fs->tx_port = fwd_ports_ids[txp];
746 		fs->tx_queue = rxq;
747 		fs->peer_addr = fs->tx_port;
748 		rxq = (queueid_t) (rxq + 1);
749 		if (rxq < nb_q)
750 			continue;
751 		/*
752 		 * rxq == nb_q
753 		 * Restart from RX queue 0 on next RX port
754 		 */
755 		rxq = 0;
756 		if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
757 			rxp = (portid_t)
758 				(rxp + ((nb_ports >> 1) / nb_fwd_ports));
759 		else
760 			rxp = (portid_t) (rxp + 1);
761 	}
762 }
763 
764 void
765 fwd_config_setup(void)
766 {
767 	cur_fwd_config.fwd_eng = cur_fwd_eng;
768 	if ((nb_rxq > 1) && (nb_txq > 1))
769 		rss_fwd_config_setup();
770 	else
771 		simple_fwd_config_setup();
772 }
773 
774 static void
775 pkt_fwd_config_display(struct fwd_config *cfg)
776 {
777 	struct fwd_stream *fs;
778 	lcoreid_t  lc_id;
779 	streamid_t sm_id;
780 
781 	printf("%s packet forwarding - ports=%d - cores=%d - streams=%d - "
782 	       "NUMA support %s\n",
783 	       cfg->fwd_eng->fwd_mode_name,
784 	       cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
785 	       numa_support == 1 ? "enabled" : "disabled");
786 	for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
787 		printf("Logical Core %u (socket %u) forwards packets on "
788 		       "%d streams:",
789 		       fwd_lcores_cpuids[lc_id],
790 		       rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
791 		       fwd_lcores[lc_id]->stream_nb);
792 		for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
793 			fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
794 			printf("\n  RX P=%d/Q=%d (socket %u) -> TX "
795 			       "P=%d/Q=%d (socket %u) ",
796 			       fs->rx_port, fs->rx_queue,
797 			       ports[fs->rx_port].socket_id,
798 			       fs->tx_port, fs->tx_queue,
799 			       ports[fs->tx_port].socket_id);
800 			print_ethaddr("peer=",
801 				      &peer_eth_addrs[fs->peer_addr]);
802 		}
803 		printf("\n");
804 	}
805 	printf("\n");
806 }
807 
808 
809 void
810 fwd_config_display(void)
811 {
812 	fwd_config_setup();
813 	pkt_fwd_config_display(&cur_fwd_config);
814 }
815 
816 void
817 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
818 {
819 	unsigned int i;
820 	unsigned int lcore_cpuid;
821 	int record_now;
822 
823 	record_now = 0;
824  again:
825 	for (i = 0; i < nb_lc; i++) {
826 		lcore_cpuid = lcorelist[i];
827 		if (! rte_lcore_is_enabled(lcore_cpuid)) {
828 			printf("Logical core %u not enabled\n", lcore_cpuid);
829 			return;
830 		}
831 		if (lcore_cpuid == rte_get_master_lcore()) {
832 			printf("Master core %u cannot forward packets\n",
833 			       lcore_cpuid);
834 			return;
835 		}
836 		if (record_now)
837 			fwd_lcores_cpuids[i] = lcore_cpuid;
838 	}
839 	if (record_now == 0) {
840 		record_now = 1;
841 		goto again;
842 	}
843 	nb_cfg_lcores = (lcoreid_t) nb_lc;
844 	if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
845 		printf("previous number of forwarding cores %u - changed to "
846 		       "number of configured cores %u\n",
847 		       (unsigned int) nb_fwd_lcores, nb_lc);
848 		nb_fwd_lcores = (lcoreid_t) nb_lc;
849 	}
850 }
851 
852 void
853 set_fwd_lcores_mask(uint64_t lcoremask)
854 {
855 	unsigned int lcorelist[64];
856 	unsigned int nb_lc;
857 	unsigned int i;
858 
859 	if (lcoremask == 0) {
860 		printf("Invalid NULL mask of cores\n");
861 		return;
862 	}
863 	nb_lc = 0;
864 	for (i = 0; i < 64; i++) {
865 		if (! ((uint64_t)(1ULL << i) & lcoremask))
866 			continue;
867 		lcorelist[nb_lc++] = i;
868 	}
869 	set_fwd_lcores_list(lcorelist, nb_lc);
870 }
871 
872 void
873 set_fwd_lcores_number(uint16_t nb_lc)
874 {
875 	if (nb_lc > nb_cfg_lcores) {
876 		printf("nb fwd cores %u > %u (max. number of configured "
877 		       "lcores) - ignored\n",
878 		       (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
879 		return;
880 	}
881 	nb_fwd_lcores = (lcoreid_t) nb_lc;
882 	printf("Number of forwarding cores set to %u\n",
883 	       (unsigned int) nb_fwd_lcores);
884 }
885 
886 void
887 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
888 {
889 	unsigned int i;
890 	portid_t port_id;
891 	int record_now;
892 
893 	record_now = 0;
894  again:
895 	for (i = 0; i < nb_pt; i++) {
896 		port_id = (portid_t) portlist[i];
897 		if (port_id >= nb_ports) {
898 			printf("Invalid port id %u > %u\n",
899 			       (unsigned int) port_id,
900 			       (unsigned int) nb_ports);
901 			return;
902 		}
903 		if (record_now)
904 			fwd_ports_ids[i] = port_id;
905 	}
906 	if (record_now == 0) {
907 		record_now = 1;
908 		goto again;
909 	}
910 	nb_cfg_ports = (portid_t) nb_pt;
911 	if (nb_fwd_ports != (portid_t) nb_pt) {
912 		printf("previous number of forwarding ports %u - changed to "
913 		       "number of configured ports %u\n",
914 		       (unsigned int) nb_fwd_ports, nb_pt);
915 		nb_fwd_ports = (portid_t) nb_pt;
916 	}
917 }
918 
919 void
920 set_fwd_ports_mask(uint64_t portmask)
921 {
922 	unsigned int portlist[64];
923 	unsigned int nb_pt;
924 	unsigned int i;
925 
926 	if (portmask == 0) {
927 		printf("Invalid NULL mask of ports\n");
928 		return;
929 	}
930 	nb_pt = 0;
931 	for (i = 0; i < 64; i++) {
932 		if (! ((uint64_t)(1ULL << i) & portmask))
933 			continue;
934 		portlist[nb_pt++] = i;
935 	}
936 	set_fwd_ports_list(portlist, nb_pt);
937 }
938 
939 void
940 set_fwd_ports_number(uint16_t nb_pt)
941 {
942 	if (nb_pt > nb_cfg_ports) {
943 		printf("nb fwd ports %u > %u (number of configured "
944 		       "ports) - ignored\n",
945 		       (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
946 		return;
947 	}
948 	nb_fwd_ports = (portid_t) nb_pt;
949 	printf("Number of forwarding ports set to %u\n",
950 	       (unsigned int) nb_fwd_ports);
951 }
952 
953 void
954 set_nb_pkt_per_burst(uint16_t nb)
955 {
956 	if (nb > MAX_PKT_BURST) {
957 		printf("nb pkt per burst: %u > %u (maximum packet per burst) "
958 		       " ignored\n",
959 		       (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
960 		return;
961 	}
962 	nb_pkt_per_burst = nb;
963 	printf("Number of packets per burst set to %u\n",
964 	       (unsigned int) nb_pkt_per_burst);
965 }
966 
967 void
968 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
969 {
970 	uint16_t tx_pkt_len;
971 	unsigned i;
972 
973 	if (nb_segs >= (unsigned) nb_txd) {
974 		printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
975 		       nb_segs, (unsigned int) nb_txd);
976 		return;
977 	}
978 
979 	/*
980 	 * Check that each segment length is greater or equal than
981 	 * the mbuf data sise.
982 	 * Check also that the total packet length is greater or equal than the
983 	 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8).
984 	 */
985 	tx_pkt_len = 0;
986 	for (i = 0; i < nb_segs; i++) {
987 		if (seg_lengths[i] > (unsigned) mbuf_data_size) {
988 			printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
989 			       i, seg_lengths[i], (unsigned) mbuf_data_size);
990 			return;
991 		}
992 		tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
993 	}
994 	if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) {
995 		printf("total packet length=%u < %d - give up\n",
996 				(unsigned) tx_pkt_len,
997 				(int)(sizeof(struct ether_hdr) + 20 + 8));
998 		return;
999 	}
1000 
1001 	for (i = 0; i < nb_segs; i++)
1002 		tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
1003 
1004 	tx_pkt_length  = tx_pkt_len;
1005 	tx_pkt_nb_segs = (uint8_t) nb_segs;
1006 }
1007 
1008 void
1009 set_pkt_forwarding_mode(const char *fwd_mode_name)
1010 {
1011 	struct fwd_engine *fwd_eng;
1012 	unsigned i;
1013 
1014 	i = 0;
1015 	while ((fwd_eng = fwd_engines[i]) != NULL) {
1016 		if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
1017 			printf("Set %s packet forwarding mode\n",
1018 			       fwd_mode_name);
1019 			cur_fwd_eng = fwd_eng;
1020 			return;
1021 		}
1022 		i++;
1023 	}
1024 	printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
1025 }
1026 
1027 void
1028 set_verbose_level(uint16_t vb_level)
1029 {
1030 	printf("Change verbose level from %u to %u\n",
1031 	       (unsigned int) verbose_level, (unsigned int) vb_level);
1032 	verbose_level = vb_level;
1033 }
1034 
1035 void
1036 vlan_extend_set(portid_t port_id, int on)
1037 {
1038 	int diag;
1039 	int vlan_offload;
1040 
1041 	if (port_id_is_invalid(port_id))
1042 		return;
1043 
1044 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1045 
1046 	if (on)
1047 		vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
1048 	else
1049 		vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
1050 
1051 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1052 	if (diag < 0)
1053 		printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
1054 	       "diag=%d\n", port_id, on, diag);
1055 }
1056 
1057 void
1058 rx_vlan_strip_set(portid_t port_id, int on)
1059 {
1060 	int diag;
1061 	int vlan_offload;
1062 
1063 	if (port_id_is_invalid(port_id))
1064 		return;
1065 
1066 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1067 
1068 	if (on)
1069 		vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
1070 	else
1071 		vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
1072 
1073 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1074 	if (diag < 0)
1075 		printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
1076 	       "diag=%d\n", port_id, on, diag);
1077 }
1078 
1079 void
1080 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
1081 {
1082 	int diag;
1083 
1084 	if (port_id_is_invalid(port_id))
1085 		return;
1086 
1087 	diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
1088 	if (diag < 0)
1089 		printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
1090 	       "diag=%d\n", port_id, queue_id, on, diag);
1091 }
1092 
1093 void
1094 rx_vlan_filter_set(portid_t port_id, int on)
1095 {
1096 	int diag;
1097 	int vlan_offload;
1098 
1099 	if (port_id_is_invalid(port_id))
1100 		return;
1101 
1102 	vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1103 
1104 	if (on)
1105 		vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
1106 	else
1107 		vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
1108 
1109 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1110 	if (diag < 0)
1111 		printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
1112 	       "diag=%d\n", port_id, on, diag);
1113 }
1114 
1115 void
1116 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
1117 {
1118 	int diag;
1119 
1120 	if (port_id_is_invalid(port_id))
1121 		return;
1122 	if (vlan_id_is_invalid(vlan_id))
1123 		return;
1124 	diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
1125 	if (diag == 0)
1126 		return;
1127 	printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
1128 	       "diag=%d\n",
1129 	       port_id, vlan_id, on, diag);
1130 }
1131 
1132 void
1133 rx_vlan_all_filter_set(portid_t port_id, int on)
1134 {
1135 	uint16_t vlan_id;
1136 
1137 	if (port_id_is_invalid(port_id))
1138 		return;
1139 	for (vlan_id = 0; vlan_id < 4096; vlan_id++)
1140 		rx_vft_set(port_id, vlan_id, on);
1141 }
1142 
1143 void
1144 vlan_tpid_set(portid_t port_id, uint16_t tp_id)
1145 {
1146 	int diag;
1147 	if (port_id_is_invalid(port_id))
1148 		return;
1149 
1150 	diag = rte_eth_dev_set_vlan_ether_type(port_id, tp_id);
1151 	if (diag == 0)
1152 		return;
1153 
1154 	printf("tx_vlan_tpid_set(port_pi=%d, tpid=%d) failed "
1155 	       "diag=%d\n",
1156 	       port_id, tp_id, diag);
1157 }
1158 
1159 void
1160 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
1161 {
1162 	if (port_id_is_invalid(port_id))
1163 		return;
1164 	if (vlan_id_is_invalid(vlan_id))
1165 		return;
1166 	ports[port_id].tx_ol_flags |= PKT_TX_VLAN_PKT;
1167 	ports[port_id].tx_vlan_id = vlan_id;
1168 }
1169 
1170 void
1171 tx_vlan_reset(portid_t port_id)
1172 {
1173 	if (port_id_is_invalid(port_id))
1174 		return;
1175 	ports[port_id].tx_ol_flags &= ~PKT_TX_VLAN_PKT;
1176 }
1177 
1178 void
1179 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
1180 {
1181 	uint16_t i;
1182 	uint8_t existing_mapping_found = 0;
1183 
1184 	if (port_id_is_invalid(port_id))
1185 		return;
1186 
1187 	if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
1188 		return;
1189 
1190 	if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1191 		printf("map_value not in required range 0..%d\n",
1192 				RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
1193 		return;
1194 	}
1195 
1196 	if (!is_rx) { /*then tx*/
1197 		for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1198 			if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1199 			    (tx_queue_stats_mappings[i].queue_id == queue_id)) {
1200 				tx_queue_stats_mappings[i].stats_counter_id = map_value;
1201 				existing_mapping_found = 1;
1202 				break;
1203 			}
1204 		}
1205 		if (!existing_mapping_found) { /* A new additional mapping... */
1206 			tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
1207 			tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
1208 			tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
1209 			nb_tx_queue_stats_mappings++;
1210 		}
1211 	}
1212 	else { /*rx*/
1213 		for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1214 			if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1215 			    (rx_queue_stats_mappings[i].queue_id == queue_id)) {
1216 				rx_queue_stats_mappings[i].stats_counter_id = map_value;
1217 				existing_mapping_found = 1;
1218 				break;
1219 			}
1220 		}
1221 		if (!existing_mapping_found) { /* A new additional mapping... */
1222 			rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
1223 			rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
1224 			rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
1225 			nb_rx_queue_stats_mappings++;
1226 		}
1227 	}
1228 }
1229 
1230 void
1231 tx_cksum_set(portid_t port_id, uint8_t cksum_mask)
1232 {
1233 	uint16_t tx_ol_flags;
1234 	if (port_id_is_invalid(port_id))
1235 		return;
1236 	/* Clear last 4 bits and then set L3/4 checksum mask again */
1237 	tx_ol_flags = (uint16_t) (ports[port_id].tx_ol_flags & 0xFFF0);
1238 	ports[port_id].tx_ol_flags = (uint16_t) ((cksum_mask & 0xf) | tx_ol_flags);
1239 }
1240 
1241 void
1242 fdir_add_signature_filter(portid_t port_id, uint8_t queue_id,
1243 			  struct rte_fdir_filter *fdir_filter)
1244 {
1245 	int diag;
1246 
1247 	if (port_id_is_invalid(port_id))
1248 		return;
1249 
1250 	diag = rte_eth_dev_fdir_add_signature_filter(port_id, fdir_filter,
1251 						     queue_id);
1252 	if (diag == 0)
1253 		return;
1254 
1255 	printf("rte_eth_dev_fdir_add_signature_filter for port_id=%d failed "
1256 	       "diag=%d\n", port_id, diag);
1257 }
1258 
1259 void
1260 fdir_update_signature_filter(portid_t port_id, uint8_t queue_id,
1261 			     struct rte_fdir_filter *fdir_filter)
1262 {
1263 	int diag;
1264 
1265 	if (port_id_is_invalid(port_id))
1266 		return;
1267 
1268 	diag = rte_eth_dev_fdir_update_signature_filter(port_id, fdir_filter,
1269 							queue_id);
1270 	if (diag == 0)
1271 		return;
1272 
1273 	printf("rte_eth_dev_fdir_update_signature_filter for port_id=%d failed "
1274 	       "diag=%d\n", port_id, diag);
1275 }
1276 
1277 void
1278 fdir_remove_signature_filter(portid_t port_id,
1279 			     struct rte_fdir_filter *fdir_filter)
1280 {
1281 	int diag;
1282 
1283 	if (port_id_is_invalid(port_id))
1284 		return;
1285 
1286 	diag = rte_eth_dev_fdir_remove_signature_filter(port_id, fdir_filter);
1287 	if (diag == 0)
1288 		return;
1289 
1290 	printf("rte_eth_dev_fdir_add_signature_filter for port_id=%d failed "
1291 	       "diag=%d\n", port_id, diag);
1292 
1293 }
1294 
1295 void
1296 fdir_get_infos(portid_t port_id)
1297 {
1298 	struct rte_eth_fdir fdir_infos;
1299 
1300 	static const char *fdir_stats_border = "########################";
1301 
1302 	if (port_id_is_invalid(port_id))
1303 		return;
1304 
1305 	rte_eth_dev_fdir_get_infos(port_id, &fdir_infos);
1306 
1307 	printf("\n  %s FDIR infos for port %-2d %s\n",
1308 	       fdir_stats_border, port_id, fdir_stats_border);
1309 
1310 	printf("  collision: %-10"PRIu64" free: %-10"PRIu64"\n"
1311 	       "  maxhash: %-10"PRIu64" maxlen: %-10"PRIu64"\n"
1312 	       "  add : %-10"PRIu64"   remove : %-10"PRIu64"\n"
1313 	       "  f_add: %-10"PRIu64" f_remove: %-10"PRIu64"\n",
1314 	       (uint64_t)(fdir_infos.collision), (uint64_t)(fdir_infos.free),
1315 	       (uint64_t)(fdir_infos.maxhash), (uint64_t)(fdir_infos.maxlen),
1316 	       fdir_infos.add, fdir_infos.remove,
1317 	       fdir_infos.f_add, fdir_infos.f_remove);
1318 	printf("  %s############################%s\n",
1319 	       fdir_stats_border, fdir_stats_border);
1320 }
1321 
1322 void
1323 fdir_add_perfect_filter(portid_t port_id, uint16_t soft_id, uint8_t queue_id,
1324 			uint8_t drop, struct rte_fdir_filter *fdir_filter)
1325 {
1326 	int diag;
1327 
1328 	if (port_id_is_invalid(port_id))
1329 		return;
1330 
1331 	diag = rte_eth_dev_fdir_add_perfect_filter(port_id, fdir_filter,
1332 						   soft_id, queue_id, drop);
1333 	if (diag == 0)
1334 		return;
1335 
1336 	printf("rte_eth_dev_fdir_add_perfect_filter for port_id=%d failed "
1337 	       "diag=%d\n", port_id, diag);
1338 }
1339 
1340 void
1341 fdir_update_perfect_filter(portid_t port_id, uint16_t soft_id, uint8_t queue_id,
1342 			   uint8_t drop, struct rte_fdir_filter *fdir_filter)
1343 {
1344 	int diag;
1345 
1346 	if (port_id_is_invalid(port_id))
1347 		return;
1348 
1349 	diag = rte_eth_dev_fdir_update_perfect_filter(port_id, fdir_filter,
1350 						      soft_id, queue_id, drop);
1351 	if (diag == 0)
1352 		return;
1353 
1354 	printf("rte_eth_dev_fdir_update_perfect_filter for port_id=%d failed "
1355 	       "diag=%d\n", port_id, diag);
1356 }
1357 
1358 void
1359 fdir_remove_perfect_filter(portid_t port_id, uint16_t soft_id,
1360 			   struct rte_fdir_filter *fdir_filter)
1361 {
1362 	int diag;
1363 
1364 	if (port_id_is_invalid(port_id))
1365 		return;
1366 
1367 	diag = rte_eth_dev_fdir_remove_perfect_filter(port_id, fdir_filter,
1368 						      soft_id);
1369 	if (diag == 0)
1370 		return;
1371 
1372 	printf("rte_eth_dev_fdir_update_perfect_filter for port_id=%d failed "
1373 	       "diag=%d\n", port_id, diag);
1374 }
1375 
1376 void
1377 fdir_set_masks(portid_t port_id, struct rte_fdir_masks *fdir_masks)
1378 {
1379 	int diag;
1380 
1381 	if (port_id_is_invalid(port_id))
1382 		return;
1383 
1384 	diag = rte_eth_dev_fdir_set_masks(port_id, fdir_masks);
1385 	if (diag == 0)
1386 		return;
1387 
1388 	printf("rte_eth_dev_set_masks_filter for port_id=%d failed "
1389 	       "diag=%d\n", port_id, diag);
1390 }
1391