xref: /dpdk/app/test-pmd/testpmd.c (revision 03ab51eafda992874a48c392ca66ffb577fe2b71)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #ifndef RTE_EXEC_ENV_WINDOWS
13 #include <sys/mman.h>
14 #endif
15 #include <sys/types.h>
16 #include <errno.h>
17 #include <stdbool.h>
18 
19 #include <sys/queue.h>
20 #include <sys/stat.h>
21 
22 #include <stdint.h>
23 #include <unistd.h>
24 #include <inttypes.h>
25 
26 #include <rte_common.h>
27 #include <rte_errno.h>
28 #include <rte_byteorder.h>
29 #include <rte_log.h>
30 #include <rte_debug.h>
31 #include <rte_cycles.h>
32 #include <rte_memory.h>
33 #include <rte_memcpy.h>
34 #include <rte_launch.h>
35 #include <rte_eal.h>
36 #include <rte_alarm.h>
37 #include <rte_per_lcore.h>
38 #include <rte_lcore.h>
39 #include <rte_atomic.h>
40 #include <rte_branch_prediction.h>
41 #include <rte_mempool.h>
42 #include <rte_malloc.h>
43 #include <rte_mbuf.h>
44 #include <rte_mbuf_pool_ops.h>
45 #include <rte_interrupts.h>
46 #include <rte_pci.h>
47 #include <rte_ether.h>
48 #include <rte_ethdev.h>
49 #include <rte_dev.h>
50 #include <rte_string_fns.h>
51 #ifdef RTE_NET_IXGBE
52 #include <rte_pmd_ixgbe.h>
53 #endif
54 #ifdef RTE_LIB_PDUMP
55 #include <rte_pdump.h>
56 #endif
57 #include <rte_flow.h>
58 #include <rte_metrics.h>
59 #ifdef RTE_LIB_BITRATESTATS
60 #include <rte_bitrate.h>
61 #endif
62 #ifdef RTE_LIB_LATENCYSTATS
63 #include <rte_latencystats.h>
64 #endif
65 #ifdef RTE_EXEC_ENV_WINDOWS
66 #include <process.h>
67 #endif
68 
69 #include "testpmd.h"
70 
71 #ifndef MAP_HUGETLB
72 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
73 #define HUGE_FLAG (0x40000)
74 #else
75 #define HUGE_FLAG MAP_HUGETLB
76 #endif
77 
78 #ifndef MAP_HUGE_SHIFT
79 /* older kernels (or FreeBSD) will not have this define */
80 #define HUGE_SHIFT (26)
81 #else
82 #define HUGE_SHIFT MAP_HUGE_SHIFT
83 #endif
84 
85 #define EXTMEM_HEAP_NAME "extmem"
86 #define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
87 
88 uint16_t verbose_level = 0; /**< Silent by default. */
89 int testpmd_logtype; /**< Log type for testpmd logs */
90 
91 /* use main core for command line ? */
92 uint8_t interactive = 0;
93 uint8_t auto_start = 0;
94 uint8_t tx_first;
95 char cmdline_filename[PATH_MAX] = {0};
96 
97 /*
98  * NUMA support configuration.
99  * When set, the NUMA support attempts to dispatch the allocation of the
100  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
101  * probed ports among the CPU sockets 0 and 1.
102  * Otherwise, all memory is allocated from CPU socket 0.
103  */
104 uint8_t numa_support = 1; /**< numa enabled by default */
105 
106 /*
107  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
108  * not configured.
109  */
110 uint8_t socket_num = UMA_NO_CONFIG;
111 
112 /*
113  * Select mempool allocation type:
114  * - native: use regular DPDK memory
115  * - anon: use regular DPDK memory to create mempool, but populate using
116  *         anonymous memory (may not be IOVA-contiguous)
117  * - xmem: use externally allocated hugepage memory
118  */
119 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
120 
121 /*
122  * Store specified sockets on which memory pool to be used by ports
123  * is allocated.
124  */
125 uint8_t port_numa[RTE_MAX_ETHPORTS];
126 
127 /*
128  * Store specified sockets on which RX ring to be used by ports
129  * is allocated.
130  */
131 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
132 
133 /*
134  * Store specified sockets on which TX ring to be used by ports
135  * is allocated.
136  */
137 uint8_t txring_numa[RTE_MAX_ETHPORTS];
138 
139 /*
140  * Record the Ethernet address of peer target ports to which packets are
141  * forwarded.
142  * Must be instantiated with the ethernet addresses of peer traffic generator
143  * ports.
144  */
145 struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
146 portid_t nb_peer_eth_addrs = 0;
147 
148 /*
149  * Probed Target Environment.
150  */
151 struct rte_port *ports;	       /**< For all probed ethernet ports. */
152 portid_t nb_ports;             /**< Number of probed ethernet ports. */
153 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
154 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
155 
156 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
157 
158 /*
159  * Test Forwarding Configuration.
160  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
161  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
162  */
163 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
164 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
165 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
166 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
167 
168 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
169 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
170 
171 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
172 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
173 
174 /*
175  * Forwarding engines.
176  */
177 struct fwd_engine * fwd_engines[] = {
178 	&io_fwd_engine,
179 	&mac_fwd_engine,
180 	&mac_swap_engine,
181 	&flow_gen_engine,
182 	&rx_only_engine,
183 	&tx_only_engine,
184 	&csum_fwd_engine,
185 	&icmp_echo_engine,
186 	&noisy_vnf_engine,
187 	&five_tuple_swap_fwd_engine,
188 #ifdef RTE_LIBRTE_IEEE1588
189 	&ieee1588_fwd_engine,
190 #endif
191 	NULL,
192 };
193 
194 struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
195 uint16_t mempool_flags;
196 
197 struct fwd_config cur_fwd_config;
198 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
199 uint32_t retry_enabled;
200 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
201 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
202 
203 uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
204 uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
205 	DEFAULT_MBUF_DATA_SIZE
206 }; /**< Mbuf data space size. */
207 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
208                                       * specified on command-line. */
209 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
210 
211 /*
212  * In container, it cannot terminate the process which running with 'stats-period'
213  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
214  */
215 uint8_t f_quit;
216 
217 /*
218  * Configuration of packet segments used to scatter received packets
219  * if some of split features is configured.
220  */
221 uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
222 uint8_t  rx_pkt_nb_segs; /**< Number of segments to split */
223 uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
224 uint8_t  rx_pkt_nb_offs; /**< Number of specified offsets */
225 
226 /*
227  * Configuration of packet segments used by the "txonly" processing engine.
228  */
229 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
230 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
231 	TXONLY_DEF_PACKET_LEN,
232 };
233 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
234 
235 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
236 /**< Split policy for packets to TX. */
237 
238 uint8_t txonly_multi_flow;
239 /**< Whether multiple flows are generated in TXONLY mode. */
240 
241 uint32_t tx_pkt_times_inter;
242 /**< Timings for send scheduling in TXONLY mode, time between bursts. */
243 
244 uint32_t tx_pkt_times_intra;
245 /**< Timings for send scheduling in TXONLY mode, time between packets. */
246 
247 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
248 uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
249 int nb_flows_flowgen = 1024; /**< Number of flows in flowgen mode. */
250 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
251 
252 /* current configuration is in DCB or not,0 means it is not in DCB mode */
253 uint8_t dcb_config = 0;
254 
255 /*
256  * Configurable number of RX/TX queues.
257  */
258 queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
259 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
260 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
261 
262 /*
263  * Configurable number of RX/TX ring descriptors.
264  * Defaults are supplied by drivers via ethdev.
265  */
266 #define RTE_TEST_RX_DESC_DEFAULT 0
267 #define RTE_TEST_TX_DESC_DEFAULT 0
268 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
269 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
270 
271 #define RTE_PMD_PARAM_UNSET -1
272 /*
273  * Configurable values of RX and TX ring threshold registers.
274  */
275 
276 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
277 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
278 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
279 
280 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
281 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
282 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
283 
284 /*
285  * Configurable value of RX free threshold.
286  */
287 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
288 
289 /*
290  * Configurable value of RX drop enable.
291  */
292 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
293 
294 /*
295  * Configurable value of TX free threshold.
296  */
297 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
298 
299 /*
300  * Configurable value of TX RS bit threshold.
301  */
302 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
303 
304 /*
305  * Configurable value of buffered packets before sending.
306  */
307 uint16_t noisy_tx_sw_bufsz;
308 
309 /*
310  * Configurable value of packet buffer timeout.
311  */
312 uint16_t noisy_tx_sw_buf_flush_time;
313 
314 /*
315  * Configurable value for size of VNF internal memory area
316  * used for simulating noisy neighbour behaviour
317  */
318 uint64_t noisy_lkup_mem_sz;
319 
320 /*
321  * Configurable value of number of random writes done in
322  * VNF simulation memory area.
323  */
324 uint64_t noisy_lkup_num_writes;
325 
326 /*
327  * Configurable value of number of random reads done in
328  * VNF simulation memory area.
329  */
330 uint64_t noisy_lkup_num_reads;
331 
332 /*
333  * Configurable value of number of random reads/writes done in
334  * VNF simulation memory area.
335  */
336 uint64_t noisy_lkup_num_reads_writes;
337 
338 /*
339  * Receive Side Scaling (RSS) configuration.
340  */
341 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
342 
343 /*
344  * Port topology configuration
345  */
346 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
347 
348 /*
349  * Avoids to flush all the RX streams before starts forwarding.
350  */
351 uint8_t no_flush_rx = 0; /* flush by default */
352 
353 /*
354  * Flow API isolated mode.
355  */
356 uint8_t flow_isolate_all;
357 
358 /*
359  * Avoids to check link status when starting/stopping a port.
360  */
361 uint8_t no_link_check = 0; /* check by default */
362 
363 /*
364  * Don't automatically start all ports in interactive mode.
365  */
366 uint8_t no_device_start = 0;
367 
368 /*
369  * Enable link status change notification
370  */
371 uint8_t lsc_interrupt = 1; /* enabled by default */
372 
373 /*
374  * Enable device removal notification.
375  */
376 uint8_t rmv_interrupt = 1; /* enabled by default */
377 
378 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
379 
380 /* After attach, port setup is called on event or by iterator */
381 bool setup_on_probe_event = true;
382 
383 /* Clear ptypes on port initialization. */
384 uint8_t clear_ptypes = true;
385 
386 /* Hairpin ports configuration mode. */
387 uint16_t hairpin_mode;
388 
389 /* Pretty printing of ethdev events */
390 static const char * const eth_event_desc[] = {
391 	[RTE_ETH_EVENT_UNKNOWN] = "unknown",
392 	[RTE_ETH_EVENT_INTR_LSC] = "link state change",
393 	[RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
394 	[RTE_ETH_EVENT_INTR_RESET] = "reset",
395 	[RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
396 	[RTE_ETH_EVENT_IPSEC] = "IPsec",
397 	[RTE_ETH_EVENT_MACSEC] = "MACsec",
398 	[RTE_ETH_EVENT_INTR_RMV] = "device removal",
399 	[RTE_ETH_EVENT_NEW] = "device probed",
400 	[RTE_ETH_EVENT_DESTROY] = "device released",
401 	[RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
402 	[RTE_ETH_EVENT_MAX] = NULL,
403 };
404 
405 /*
406  * Display or mask ether events
407  * Default to all events except VF_MBOX
408  */
409 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
410 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
411 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
412 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
413 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
414 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
415 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
416 			    (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
417 /*
418  * Decide if all memory are locked for performance.
419  */
420 int do_mlockall = 0;
421 
422 /*
423  * NIC bypass mode configuration options.
424  */
425 
426 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
427 /* The NIC bypass watchdog timeout. */
428 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
429 #endif
430 
431 
432 #ifdef RTE_LIB_LATENCYSTATS
433 
434 /*
435  * Set when latency stats is enabled in the commandline
436  */
437 uint8_t latencystats_enabled;
438 
439 /*
440  * Lcore ID to serive latency statistics.
441  */
442 lcoreid_t latencystats_lcore_id = -1;
443 
444 #endif
445 
446 /*
447  * Ethernet device configuration.
448  */
449 struct rte_eth_rxmode rx_mode = {
450 	/* Default maximum frame length.
451 	 * Zero is converted to "RTE_ETHER_MTU + PMD Ethernet overhead"
452 	 * in init_config().
453 	 */
454 	.max_rx_pkt_len = 0,
455 };
456 
457 struct rte_eth_txmode tx_mode = {
458 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
459 };
460 
461 struct rte_fdir_conf fdir_conf = {
462 	.mode = RTE_FDIR_MODE_NONE,
463 	.pballoc = RTE_FDIR_PBALLOC_64K,
464 	.status = RTE_FDIR_REPORT_STATUS,
465 	.mask = {
466 		.vlan_tci_mask = 0xFFEF,
467 		.ipv4_mask     = {
468 			.src_ip = 0xFFFFFFFF,
469 			.dst_ip = 0xFFFFFFFF,
470 		},
471 		.ipv6_mask     = {
472 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
473 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
474 		},
475 		.src_port_mask = 0xFFFF,
476 		.dst_port_mask = 0xFFFF,
477 		.mac_addr_byte_mask = 0xFF,
478 		.tunnel_type_mask = 1,
479 		.tunnel_id_mask = 0xFFFFFFFF,
480 	},
481 	.drop_queue = 127,
482 };
483 
484 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
485 
486 /*
487  * Display zero values by default for xstats
488  */
489 uint8_t xstats_hide_zero;
490 
491 /*
492  * Measure of CPU cycles disabled by default
493  */
494 uint8_t record_core_cycles;
495 
496 /*
497  * Display of RX and TX bursts disabled by default
498  */
499 uint8_t record_burst_stats;
500 
501 unsigned int num_sockets = 0;
502 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
503 
504 #ifdef RTE_LIB_BITRATESTATS
505 /* Bitrate statistics */
506 struct rte_stats_bitrates *bitrate_data;
507 lcoreid_t bitrate_lcore_id;
508 uint8_t bitrate_enabled;
509 #endif
510 
511 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
512 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
513 
514 /*
515  * hexadecimal bitmask of RX mq mode can be enabled.
516  */
517 enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
518 
519 /*
520  * Used to set forced link speed
521  */
522 uint32_t eth_link_speed;
523 
524 /*
525  * ID of the current process in multi-process, used to
526  * configure the queues to be polled.
527  */
528 int proc_id;
529 
530 /*
531  * Number of processes in multi-process, used to
532  * configure the queues to be polled.
533  */
534 unsigned int num_procs = 1;
535 
536 static int
537 eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
538 		      const struct rte_eth_conf *dev_conf)
539 {
540 	if (is_proc_primary())
541 		return rte_eth_dev_configure(port_id, nb_rx_q, nb_tx_q,
542 					dev_conf);
543 	return 0;
544 }
545 
546 static int
547 eth_dev_start_mp(uint16_t port_id)
548 {
549 	if (is_proc_primary())
550 		return rte_eth_dev_start(port_id);
551 
552 	return 0;
553 }
554 
555 static int
556 eth_dev_stop_mp(uint16_t port_id)
557 {
558 	if (is_proc_primary())
559 		return rte_eth_dev_stop(port_id);
560 
561 	return 0;
562 }
563 
564 static void
565 mempool_free_mp(struct rte_mempool *mp)
566 {
567 	if (is_proc_primary())
568 		rte_mempool_free(mp);
569 }
570 
571 static int
572 eth_dev_set_mtu_mp(uint16_t port_id, uint16_t mtu)
573 {
574 	if (is_proc_primary())
575 		return rte_eth_dev_set_mtu(port_id, mtu);
576 
577 	return 0;
578 }
579 
580 /* Forward function declarations */
581 static void setup_attached_port(portid_t pi);
582 static void check_all_ports_link_status(uint32_t port_mask);
583 static int eth_event_callback(portid_t port_id,
584 			      enum rte_eth_event_type type,
585 			      void *param, void *ret_param);
586 static void dev_event_callback(const char *device_name,
587 				enum rte_dev_event_type type,
588 				void *param);
589 
590 /*
591  * Check if all the ports are started.
592  * If yes, return positive value. If not, return zero.
593  */
594 static int all_ports_started(void);
595 
596 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
597 uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
598 
599 /* Holds the registered mbuf dynamic flags names. */
600 char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
601 
602 /*
603  * Helper function to check if socket is already discovered.
604  * If yes, return positive value. If not, return zero.
605  */
606 int
607 new_socket_id(unsigned int socket_id)
608 {
609 	unsigned int i;
610 
611 	for (i = 0; i < num_sockets; i++) {
612 		if (socket_ids[i] == socket_id)
613 			return 0;
614 	}
615 	return 1;
616 }
617 
618 /*
619  * Setup default configuration.
620  */
621 static void
622 set_default_fwd_lcores_config(void)
623 {
624 	unsigned int i;
625 	unsigned int nb_lc;
626 	unsigned int sock_num;
627 
628 	nb_lc = 0;
629 	for (i = 0; i < RTE_MAX_LCORE; i++) {
630 		if (!rte_lcore_is_enabled(i))
631 			continue;
632 		sock_num = rte_lcore_to_socket_id(i);
633 		if (new_socket_id(sock_num)) {
634 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
635 				rte_exit(EXIT_FAILURE,
636 					 "Total sockets greater than %u\n",
637 					 RTE_MAX_NUMA_NODES);
638 			}
639 			socket_ids[num_sockets++] = sock_num;
640 		}
641 		if (i == rte_get_main_lcore())
642 			continue;
643 		fwd_lcores_cpuids[nb_lc++] = i;
644 	}
645 	nb_lcores = (lcoreid_t) nb_lc;
646 	nb_cfg_lcores = nb_lcores;
647 	nb_fwd_lcores = 1;
648 }
649 
650 static void
651 set_def_peer_eth_addrs(void)
652 {
653 	portid_t i;
654 
655 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
656 		peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
657 		peer_eth_addrs[i].addr_bytes[5] = i;
658 	}
659 }
660 
661 static void
662 set_default_fwd_ports_config(void)
663 {
664 	portid_t pt_id;
665 	int i = 0;
666 
667 	RTE_ETH_FOREACH_DEV(pt_id) {
668 		fwd_ports_ids[i++] = pt_id;
669 
670 		/* Update sockets info according to the attached device */
671 		int socket_id = rte_eth_dev_socket_id(pt_id);
672 		if (socket_id >= 0 && new_socket_id(socket_id)) {
673 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
674 				rte_exit(EXIT_FAILURE,
675 					 "Total sockets greater than %u\n",
676 					 RTE_MAX_NUMA_NODES);
677 			}
678 			socket_ids[num_sockets++] = socket_id;
679 		}
680 	}
681 
682 	nb_cfg_ports = nb_ports;
683 	nb_fwd_ports = nb_ports;
684 }
685 
686 void
687 set_def_fwd_config(void)
688 {
689 	set_default_fwd_lcores_config();
690 	set_def_peer_eth_addrs();
691 	set_default_fwd_ports_config();
692 }
693 
694 #ifndef RTE_EXEC_ENV_WINDOWS
695 /* extremely pessimistic estimation of memory required to create a mempool */
696 static int
697 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
698 {
699 	unsigned int n_pages, mbuf_per_pg, leftover;
700 	uint64_t total_mem, mbuf_mem, obj_sz;
701 
702 	/* there is no good way to predict how much space the mempool will
703 	 * occupy because it will allocate chunks on the fly, and some of those
704 	 * will come from default DPDK memory while some will come from our
705 	 * external memory, so just assume 128MB will be enough for everyone.
706 	 */
707 	uint64_t hdr_mem = 128 << 20;
708 
709 	/* account for possible non-contiguousness */
710 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
711 	if (obj_sz > pgsz) {
712 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
713 		return -1;
714 	}
715 
716 	mbuf_per_pg = pgsz / obj_sz;
717 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
718 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
719 
720 	mbuf_mem = n_pages * pgsz;
721 
722 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
723 
724 	if (total_mem > SIZE_MAX) {
725 		TESTPMD_LOG(ERR, "Memory size too big\n");
726 		return -1;
727 	}
728 	*out = (size_t)total_mem;
729 
730 	return 0;
731 }
732 
733 static int
734 pagesz_flags(uint64_t page_sz)
735 {
736 	/* as per mmap() manpage, all page sizes are log2 of page size
737 	 * shifted by MAP_HUGE_SHIFT
738 	 */
739 	int log2 = rte_log2_u64(page_sz);
740 
741 	return (log2 << HUGE_SHIFT);
742 }
743 
744 static void *
745 alloc_mem(size_t memsz, size_t pgsz, bool huge)
746 {
747 	void *addr;
748 	int flags;
749 
750 	/* allocate anonymous hugepages */
751 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
752 	if (huge)
753 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
754 
755 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
756 	if (addr == MAP_FAILED)
757 		return NULL;
758 
759 	return addr;
760 }
761 
762 struct extmem_param {
763 	void *addr;
764 	size_t len;
765 	size_t pgsz;
766 	rte_iova_t *iova_table;
767 	unsigned int iova_table_len;
768 };
769 
770 static int
771 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
772 		bool huge)
773 {
774 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
775 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
776 	unsigned int cur_page, n_pages, pgsz_idx;
777 	size_t mem_sz, cur_pgsz;
778 	rte_iova_t *iovas = NULL;
779 	void *addr;
780 	int ret;
781 
782 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
783 		/* skip anything that is too big */
784 		if (pgsizes[pgsz_idx] > SIZE_MAX)
785 			continue;
786 
787 		cur_pgsz = pgsizes[pgsz_idx];
788 
789 		/* if we were told not to allocate hugepages, override */
790 		if (!huge)
791 			cur_pgsz = sysconf(_SC_PAGESIZE);
792 
793 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
794 		if (ret < 0) {
795 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
796 			return -1;
797 		}
798 
799 		/* allocate our memory */
800 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
801 
802 		/* if we couldn't allocate memory with a specified page size,
803 		 * that doesn't mean we can't do it with other page sizes, so
804 		 * try another one.
805 		 */
806 		if (addr == NULL)
807 			continue;
808 
809 		/* store IOVA addresses for every page in this memory area */
810 		n_pages = mem_sz / cur_pgsz;
811 
812 		iovas = malloc(sizeof(*iovas) * n_pages);
813 
814 		if (iovas == NULL) {
815 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
816 			goto fail;
817 		}
818 		/* lock memory if it's not huge pages */
819 		if (!huge)
820 			mlock(addr, mem_sz);
821 
822 		/* populate IOVA addresses */
823 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
824 			rte_iova_t iova;
825 			size_t offset;
826 			void *cur;
827 
828 			offset = cur_pgsz * cur_page;
829 			cur = RTE_PTR_ADD(addr, offset);
830 
831 			/* touch the page before getting its IOVA */
832 			*(volatile char *)cur = 0;
833 
834 			iova = rte_mem_virt2iova(cur);
835 
836 			iovas[cur_page] = iova;
837 		}
838 
839 		break;
840 	}
841 	/* if we couldn't allocate anything */
842 	if (iovas == NULL)
843 		return -1;
844 
845 	param->addr = addr;
846 	param->len = mem_sz;
847 	param->pgsz = cur_pgsz;
848 	param->iova_table = iovas;
849 	param->iova_table_len = n_pages;
850 
851 	return 0;
852 fail:
853 	if (iovas)
854 		free(iovas);
855 	if (addr)
856 		munmap(addr, mem_sz);
857 
858 	return -1;
859 }
860 
861 static int
862 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
863 {
864 	struct extmem_param param;
865 	int socket_id, ret;
866 
867 	memset(&param, 0, sizeof(param));
868 
869 	/* check if our heap exists */
870 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
871 	if (socket_id < 0) {
872 		/* create our heap */
873 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
874 		if (ret < 0) {
875 			TESTPMD_LOG(ERR, "Cannot create heap\n");
876 			return -1;
877 		}
878 	}
879 
880 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
881 	if (ret < 0) {
882 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
883 		return -1;
884 	}
885 
886 	/* we now have a valid memory area, so add it to heap */
887 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
888 			param.addr, param.len, param.iova_table,
889 			param.iova_table_len, param.pgsz);
890 
891 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
892 
893 	/* not needed any more */
894 	free(param.iova_table);
895 
896 	if (ret < 0) {
897 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
898 		munmap(param.addr, param.len);
899 		return -1;
900 	}
901 
902 	/* success */
903 
904 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
905 			param.len >> 20);
906 
907 	return 0;
908 }
909 static void
910 dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
911 	     struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
912 {
913 	uint16_t pid = 0;
914 	int ret;
915 
916 	RTE_ETH_FOREACH_DEV(pid) {
917 		struct rte_eth_dev_info dev_info;
918 
919 		ret = eth_dev_info_get_print_err(pid, &dev_info);
920 		if (ret != 0) {
921 			TESTPMD_LOG(DEBUG,
922 				    "unable to get device info for port %d on addr 0x%p,"
923 				    "mempool unmapping will not be performed\n",
924 				    pid, memhdr->addr);
925 			continue;
926 		}
927 
928 		ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len);
929 		if (ret) {
930 			TESTPMD_LOG(DEBUG,
931 				    "unable to DMA unmap addr 0x%p "
932 				    "for device %s\n",
933 				    memhdr->addr, dev_info.device->name);
934 		}
935 	}
936 	ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
937 	if (ret) {
938 		TESTPMD_LOG(DEBUG,
939 			    "unable to un-register addr 0x%p\n", memhdr->addr);
940 	}
941 }
942 
943 static void
944 dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
945 	   struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
946 {
947 	uint16_t pid = 0;
948 	size_t page_size = sysconf(_SC_PAGESIZE);
949 	int ret;
950 
951 	ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
952 				  page_size);
953 	if (ret) {
954 		TESTPMD_LOG(DEBUG,
955 			    "unable to register addr 0x%p\n", memhdr->addr);
956 		return;
957 	}
958 	RTE_ETH_FOREACH_DEV(pid) {
959 		struct rte_eth_dev_info dev_info;
960 
961 		ret = eth_dev_info_get_print_err(pid, &dev_info);
962 		if (ret != 0) {
963 			TESTPMD_LOG(DEBUG,
964 				    "unable to get device info for port %d on addr 0x%p,"
965 				    "mempool mapping will not be performed\n",
966 				    pid, memhdr->addr);
967 			continue;
968 		}
969 		ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len);
970 		if (ret) {
971 			TESTPMD_LOG(DEBUG,
972 				    "unable to DMA map addr 0x%p "
973 				    "for device %s\n",
974 				    memhdr->addr, dev_info.device->name);
975 		}
976 	}
977 }
978 #endif
979 
980 static unsigned int
981 setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
982 	    char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
983 {
984 	struct rte_pktmbuf_extmem *xmem;
985 	unsigned int ext_num, zone_num, elt_num;
986 	uint16_t elt_size;
987 
988 	elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
989 	elt_num = EXTBUF_ZONE_SIZE / elt_size;
990 	zone_num = (nb_mbufs + elt_num - 1) / elt_num;
991 
992 	xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
993 	if (xmem == NULL) {
994 		TESTPMD_LOG(ERR, "Cannot allocate memory for "
995 				 "external buffer descriptors\n");
996 		*ext_mem = NULL;
997 		return 0;
998 	}
999 	for (ext_num = 0; ext_num < zone_num; ext_num++) {
1000 		struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
1001 		const struct rte_memzone *mz;
1002 		char mz_name[RTE_MEMZONE_NAMESIZE];
1003 		int ret;
1004 
1005 		ret = snprintf(mz_name, sizeof(mz_name),
1006 			RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
1007 		if (ret < 0 || ret >= (int)sizeof(mz_name)) {
1008 			errno = ENAMETOOLONG;
1009 			ext_num = 0;
1010 			break;
1011 		}
1012 		mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
1013 						 socket_id,
1014 						 RTE_MEMZONE_IOVA_CONTIG |
1015 						 RTE_MEMZONE_1GB |
1016 						 RTE_MEMZONE_SIZE_HINT_ONLY,
1017 						 EXTBUF_ZONE_SIZE);
1018 		if (mz == NULL) {
1019 			/*
1020 			 * The caller exits on external buffer creation
1021 			 * error, so there is no need to free memzones.
1022 			 */
1023 			errno = ENOMEM;
1024 			ext_num = 0;
1025 			break;
1026 		}
1027 		xseg->buf_ptr = mz->addr;
1028 		xseg->buf_iova = mz->iova;
1029 		xseg->buf_len = EXTBUF_ZONE_SIZE;
1030 		xseg->elt_size = elt_size;
1031 	}
1032 	if (ext_num == 0 && xmem != NULL) {
1033 		free(xmem);
1034 		xmem = NULL;
1035 	}
1036 	*ext_mem = xmem;
1037 	return ext_num;
1038 }
1039 
1040 /*
1041  * Configuration initialisation done once at init time.
1042  */
1043 static struct rte_mempool *
1044 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
1045 		 unsigned int socket_id, uint16_t size_idx)
1046 {
1047 	char pool_name[RTE_MEMPOOL_NAMESIZE];
1048 	struct rte_mempool *rte_mp = NULL;
1049 #ifndef RTE_EXEC_ENV_WINDOWS
1050 	uint32_t mb_size;
1051 
1052 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
1053 #endif
1054 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
1055 	if (!is_proc_primary()) {
1056 		rte_mp = rte_mempool_lookup(pool_name);
1057 		if (rte_mp == NULL)
1058 			rte_exit(EXIT_FAILURE,
1059 				"Get mbuf pool for socket %u failed: %s\n",
1060 				socket_id, rte_strerror(rte_errno));
1061 		return rte_mp;
1062 	}
1063 
1064 	TESTPMD_LOG(INFO,
1065 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
1066 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
1067 
1068 	switch (mp_alloc_type) {
1069 	case MP_ALLOC_NATIVE:
1070 		{
1071 			/* wrapper to rte_mempool_create() */
1072 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1073 					rte_mbuf_best_mempool_ops());
1074 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1075 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
1076 			break;
1077 		}
1078 #ifndef RTE_EXEC_ENV_WINDOWS
1079 	case MP_ALLOC_ANON:
1080 		{
1081 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
1082 				mb_size, (unsigned int) mb_mempool_cache,
1083 				sizeof(struct rte_pktmbuf_pool_private),
1084 				socket_id, mempool_flags);
1085 			if (rte_mp == NULL)
1086 				goto err;
1087 
1088 			if (rte_mempool_populate_anon(rte_mp) == 0) {
1089 				rte_mempool_free(rte_mp);
1090 				rte_mp = NULL;
1091 				goto err;
1092 			}
1093 			rte_pktmbuf_pool_init(rte_mp, NULL);
1094 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
1095 			rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
1096 			break;
1097 		}
1098 	case MP_ALLOC_XMEM:
1099 	case MP_ALLOC_XMEM_HUGE:
1100 		{
1101 			int heap_socket;
1102 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
1103 
1104 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
1105 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
1106 
1107 			heap_socket =
1108 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
1109 			if (heap_socket < 0)
1110 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
1111 
1112 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1113 					rte_mbuf_best_mempool_ops());
1114 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
1115 					mb_mempool_cache, 0, mbuf_seg_size,
1116 					heap_socket);
1117 			break;
1118 		}
1119 #endif
1120 	case MP_ALLOC_XBUF:
1121 		{
1122 			struct rte_pktmbuf_extmem *ext_mem;
1123 			unsigned int ext_num;
1124 
1125 			ext_num = setup_extbuf(nb_mbuf,	mbuf_seg_size,
1126 					       socket_id, pool_name, &ext_mem);
1127 			if (ext_num == 0)
1128 				rte_exit(EXIT_FAILURE,
1129 					 "Can't create pinned data buffers\n");
1130 
1131 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
1132 					rte_mbuf_best_mempool_ops());
1133 			rte_mp = rte_pktmbuf_pool_create_extbuf
1134 					(pool_name, nb_mbuf, mb_mempool_cache,
1135 					 0, mbuf_seg_size, socket_id,
1136 					 ext_mem, ext_num);
1137 			free(ext_mem);
1138 			break;
1139 		}
1140 	default:
1141 		{
1142 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
1143 		}
1144 	}
1145 
1146 #ifndef RTE_EXEC_ENV_WINDOWS
1147 err:
1148 #endif
1149 	if (rte_mp == NULL) {
1150 		rte_exit(EXIT_FAILURE,
1151 			"Creation of mbuf pool for socket %u failed: %s\n",
1152 			socket_id, rte_strerror(rte_errno));
1153 	} else if (verbose_level > 0) {
1154 		rte_mempool_dump(stdout, rte_mp);
1155 	}
1156 	return rte_mp;
1157 }
1158 
1159 /*
1160  * Check given socket id is valid or not with NUMA mode,
1161  * if valid, return 0, else return -1
1162  */
1163 static int
1164 check_socket_id(const unsigned int socket_id)
1165 {
1166 	static int warning_once = 0;
1167 
1168 	if (new_socket_id(socket_id)) {
1169 		if (!warning_once && numa_support)
1170 			fprintf(stderr,
1171 				"Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n");
1172 		warning_once = 1;
1173 		return -1;
1174 	}
1175 	return 0;
1176 }
1177 
1178 /*
1179  * Get the allowed maximum number of RX queues.
1180  * *pid return the port id which has minimal value of
1181  * max_rx_queues in all ports.
1182  */
1183 queueid_t
1184 get_allowed_max_nb_rxq(portid_t *pid)
1185 {
1186 	queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
1187 	bool max_rxq_valid = false;
1188 	portid_t pi;
1189 	struct rte_eth_dev_info dev_info;
1190 
1191 	RTE_ETH_FOREACH_DEV(pi) {
1192 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1193 			continue;
1194 
1195 		max_rxq_valid = true;
1196 		if (dev_info.max_rx_queues < allowed_max_rxq) {
1197 			allowed_max_rxq = dev_info.max_rx_queues;
1198 			*pid = pi;
1199 		}
1200 	}
1201 	return max_rxq_valid ? allowed_max_rxq : 0;
1202 }
1203 
1204 /*
1205  * Check input rxq is valid or not.
1206  * If input rxq is not greater than any of maximum number
1207  * of RX queues of all ports, it is valid.
1208  * if valid, return 0, else return -1
1209  */
1210 int
1211 check_nb_rxq(queueid_t rxq)
1212 {
1213 	queueid_t allowed_max_rxq;
1214 	portid_t pid = 0;
1215 
1216 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
1217 	if (rxq > allowed_max_rxq) {
1218 		fprintf(stderr,
1219 			"Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n",
1220 			rxq, allowed_max_rxq, pid);
1221 		return -1;
1222 	}
1223 	return 0;
1224 }
1225 
1226 /*
1227  * Get the allowed maximum number of TX queues.
1228  * *pid return the port id which has minimal value of
1229  * max_tx_queues in all ports.
1230  */
1231 queueid_t
1232 get_allowed_max_nb_txq(portid_t *pid)
1233 {
1234 	queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
1235 	bool max_txq_valid = false;
1236 	portid_t pi;
1237 	struct rte_eth_dev_info dev_info;
1238 
1239 	RTE_ETH_FOREACH_DEV(pi) {
1240 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1241 			continue;
1242 
1243 		max_txq_valid = true;
1244 		if (dev_info.max_tx_queues < allowed_max_txq) {
1245 			allowed_max_txq = dev_info.max_tx_queues;
1246 			*pid = pi;
1247 		}
1248 	}
1249 	return max_txq_valid ? allowed_max_txq : 0;
1250 }
1251 
1252 /*
1253  * Check input txq is valid or not.
1254  * If input txq is not greater than any of maximum number
1255  * of TX queues of all ports, it is valid.
1256  * if valid, return 0, else return -1
1257  */
1258 int
1259 check_nb_txq(queueid_t txq)
1260 {
1261 	queueid_t allowed_max_txq;
1262 	portid_t pid = 0;
1263 
1264 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
1265 	if (txq > allowed_max_txq) {
1266 		fprintf(stderr,
1267 			"Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n",
1268 			txq, allowed_max_txq, pid);
1269 		return -1;
1270 	}
1271 	return 0;
1272 }
1273 
1274 /*
1275  * Get the allowed maximum number of RXDs of every rx queue.
1276  * *pid return the port id which has minimal value of
1277  * max_rxd in all queues of all ports.
1278  */
1279 static uint16_t
1280 get_allowed_max_nb_rxd(portid_t *pid)
1281 {
1282 	uint16_t allowed_max_rxd = UINT16_MAX;
1283 	portid_t pi;
1284 	struct rte_eth_dev_info dev_info;
1285 
1286 	RTE_ETH_FOREACH_DEV(pi) {
1287 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1288 			continue;
1289 
1290 		if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
1291 			allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
1292 			*pid = pi;
1293 		}
1294 	}
1295 	return allowed_max_rxd;
1296 }
1297 
1298 /*
1299  * Get the allowed minimal number of RXDs of every rx queue.
1300  * *pid return the port id which has minimal value of
1301  * min_rxd in all queues of all ports.
1302  */
1303 static uint16_t
1304 get_allowed_min_nb_rxd(portid_t *pid)
1305 {
1306 	uint16_t allowed_min_rxd = 0;
1307 	portid_t pi;
1308 	struct rte_eth_dev_info dev_info;
1309 
1310 	RTE_ETH_FOREACH_DEV(pi) {
1311 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1312 			continue;
1313 
1314 		if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
1315 			allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
1316 			*pid = pi;
1317 		}
1318 	}
1319 
1320 	return allowed_min_rxd;
1321 }
1322 
1323 /*
1324  * Check input rxd is valid or not.
1325  * If input rxd is not greater than any of maximum number
1326  * of RXDs of every Rx queues and is not less than any of
1327  * minimal number of RXDs of every Rx queues, it is valid.
1328  * if valid, return 0, else return -1
1329  */
1330 int
1331 check_nb_rxd(queueid_t rxd)
1332 {
1333 	uint16_t allowed_max_rxd;
1334 	uint16_t allowed_min_rxd;
1335 	portid_t pid = 0;
1336 
1337 	allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
1338 	if (rxd > allowed_max_rxd) {
1339 		fprintf(stderr,
1340 			"Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n",
1341 			rxd, allowed_max_rxd, pid);
1342 		return -1;
1343 	}
1344 
1345 	allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
1346 	if (rxd < allowed_min_rxd) {
1347 		fprintf(stderr,
1348 			"Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n",
1349 			rxd, allowed_min_rxd, pid);
1350 		return -1;
1351 	}
1352 
1353 	return 0;
1354 }
1355 
1356 /*
1357  * Get the allowed maximum number of TXDs of every rx queues.
1358  * *pid return the port id which has minimal value of
1359  * max_txd in every tx queue.
1360  */
1361 static uint16_t
1362 get_allowed_max_nb_txd(portid_t *pid)
1363 {
1364 	uint16_t allowed_max_txd = UINT16_MAX;
1365 	portid_t pi;
1366 	struct rte_eth_dev_info dev_info;
1367 
1368 	RTE_ETH_FOREACH_DEV(pi) {
1369 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1370 			continue;
1371 
1372 		if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
1373 			allowed_max_txd = dev_info.tx_desc_lim.nb_max;
1374 			*pid = pi;
1375 		}
1376 	}
1377 	return allowed_max_txd;
1378 }
1379 
1380 /*
1381  * Get the allowed maximum number of TXDs of every tx queues.
1382  * *pid return the port id which has minimal value of
1383  * min_txd in every tx queue.
1384  */
1385 static uint16_t
1386 get_allowed_min_nb_txd(portid_t *pid)
1387 {
1388 	uint16_t allowed_min_txd = 0;
1389 	portid_t pi;
1390 	struct rte_eth_dev_info dev_info;
1391 
1392 	RTE_ETH_FOREACH_DEV(pi) {
1393 		if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
1394 			continue;
1395 
1396 		if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
1397 			allowed_min_txd = dev_info.tx_desc_lim.nb_min;
1398 			*pid = pi;
1399 		}
1400 	}
1401 
1402 	return allowed_min_txd;
1403 }
1404 
1405 /*
1406  * Check input txd is valid or not.
1407  * If input txd is not greater than any of maximum number
1408  * of TXDs of every Rx queues, it is valid.
1409  * if valid, return 0, else return -1
1410  */
1411 int
1412 check_nb_txd(queueid_t txd)
1413 {
1414 	uint16_t allowed_max_txd;
1415 	uint16_t allowed_min_txd;
1416 	portid_t pid = 0;
1417 
1418 	allowed_max_txd = get_allowed_max_nb_txd(&pid);
1419 	if (txd > allowed_max_txd) {
1420 		fprintf(stderr,
1421 			"Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n",
1422 			txd, allowed_max_txd, pid);
1423 		return -1;
1424 	}
1425 
1426 	allowed_min_txd = get_allowed_min_nb_txd(&pid);
1427 	if (txd < allowed_min_txd) {
1428 		fprintf(stderr,
1429 			"Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n",
1430 			txd, allowed_min_txd, pid);
1431 		return -1;
1432 	}
1433 	return 0;
1434 }
1435 
1436 
1437 /*
1438  * Get the allowed maximum number of hairpin queues.
1439  * *pid return the port id which has minimal value of
1440  * max_hairpin_queues in all ports.
1441  */
1442 queueid_t
1443 get_allowed_max_nb_hairpinq(portid_t *pid)
1444 {
1445 	queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
1446 	portid_t pi;
1447 	struct rte_eth_hairpin_cap cap;
1448 
1449 	RTE_ETH_FOREACH_DEV(pi) {
1450 		if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
1451 			*pid = pi;
1452 			return 0;
1453 		}
1454 		if (cap.max_nb_queues < allowed_max_hairpinq) {
1455 			allowed_max_hairpinq = cap.max_nb_queues;
1456 			*pid = pi;
1457 		}
1458 	}
1459 	return allowed_max_hairpinq;
1460 }
1461 
1462 /*
1463  * Check input hairpin is valid or not.
1464  * If input hairpin is not greater than any of maximum number
1465  * of hairpin queues of all ports, it is valid.
1466  * if valid, return 0, else return -1
1467  */
1468 int
1469 check_nb_hairpinq(queueid_t hairpinq)
1470 {
1471 	queueid_t allowed_max_hairpinq;
1472 	portid_t pid = 0;
1473 
1474 	allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
1475 	if (hairpinq > allowed_max_hairpinq) {
1476 		fprintf(stderr,
1477 			"Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n",
1478 			hairpinq, allowed_max_hairpinq, pid);
1479 		return -1;
1480 	}
1481 	return 0;
1482 }
1483 
1484 static void
1485 init_config_port_offloads(portid_t pid, uint32_t socket_id)
1486 {
1487 	struct rte_port *port = &ports[pid];
1488 	uint16_t data_size;
1489 	int ret;
1490 	int i;
1491 
1492 	port->dev_conf.txmode = tx_mode;
1493 	port->dev_conf.rxmode = rx_mode;
1494 
1495 	ret = eth_dev_info_get_print_err(pid, &port->dev_info);
1496 	if (ret != 0)
1497 		rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
1498 
1499 	ret = update_jumbo_frame_offload(pid);
1500 	if (ret != 0)
1501 		fprintf(stderr,
1502 			"Updating jumbo frame offload failed for port %u\n",
1503 			pid);
1504 
1505 	if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1506 		port->dev_conf.txmode.offloads &=
1507 			~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1508 
1509 	/* Apply Rx offloads configuration */
1510 	for (i = 0; i < port->dev_info.max_rx_queues; i++)
1511 		port->rx_conf[i].offloads = port->dev_conf.rxmode.offloads;
1512 	/* Apply Tx offloads configuration */
1513 	for (i = 0; i < port->dev_info.max_tx_queues; i++)
1514 		port->tx_conf[i].offloads = port->dev_conf.txmode.offloads;
1515 
1516 	if (eth_link_speed)
1517 		port->dev_conf.link_speeds = eth_link_speed;
1518 
1519 	/* set flag to initialize port/queue */
1520 	port->need_reconfig = 1;
1521 	port->need_reconfig_queues = 1;
1522 	port->socket_id = socket_id;
1523 	port->tx_metadata = 0;
1524 
1525 	/*
1526 	 * Check for maximum number of segments per MTU.
1527 	 * Accordingly update the mbuf data size.
1528 	 */
1529 	if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
1530 	    port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
1531 		data_size = rx_mode.max_rx_pkt_len /
1532 			port->dev_info.rx_desc_lim.nb_mtu_seg_max;
1533 
1534 		if ((data_size + RTE_PKTMBUF_HEADROOM) > mbuf_data_size[0]) {
1535 			mbuf_data_size[0] = data_size + RTE_PKTMBUF_HEADROOM;
1536 			TESTPMD_LOG(WARNING,
1537 				    "Configured mbuf size of the first segment %hu\n",
1538 				    mbuf_data_size[0]);
1539 		}
1540 	}
1541 }
1542 
1543 static void
1544 init_config(void)
1545 {
1546 	portid_t pid;
1547 	struct rte_mempool *mbp;
1548 	unsigned int nb_mbuf_per_pool;
1549 	lcoreid_t  lc_id;
1550 	struct rte_gro_param gro_param;
1551 	uint32_t gso_types;
1552 
1553 	/* Configuration of logical cores. */
1554 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1555 				sizeof(struct fwd_lcore *) * nb_lcores,
1556 				RTE_CACHE_LINE_SIZE);
1557 	if (fwd_lcores == NULL) {
1558 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1559 							"failed\n", nb_lcores);
1560 	}
1561 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1562 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1563 					       sizeof(struct fwd_lcore),
1564 					       RTE_CACHE_LINE_SIZE);
1565 		if (fwd_lcores[lc_id] == NULL) {
1566 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1567 								"failed\n");
1568 		}
1569 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1570 	}
1571 
1572 	RTE_ETH_FOREACH_DEV(pid) {
1573 		uint32_t socket_id;
1574 
1575 		if (numa_support) {
1576 			socket_id = port_numa[pid];
1577 			if (port_numa[pid] == NUMA_NO_CONFIG) {
1578 				socket_id = rte_eth_dev_socket_id(pid);
1579 
1580 				/*
1581 				 * if socket_id is invalid,
1582 				 * set to the first available socket.
1583 				 */
1584 				if (check_socket_id(socket_id) < 0)
1585 					socket_id = socket_ids[0];
1586 			}
1587 		} else {
1588 			socket_id = (socket_num == UMA_NO_CONFIG) ?
1589 				    0 : socket_num;
1590 		}
1591 		/* Apply default TxRx configuration for all ports */
1592 		init_config_port_offloads(pid, socket_id);
1593 	}
1594 	/*
1595 	 * Create pools of mbuf.
1596 	 * If NUMA support is disabled, create a single pool of mbuf in
1597 	 * socket 0 memory by default.
1598 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1599 	 *
1600 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1601 	 * nb_txd can be configured at run time.
1602 	 */
1603 	if (param_total_num_mbufs)
1604 		nb_mbuf_per_pool = param_total_num_mbufs;
1605 	else {
1606 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1607 			(nb_lcores * mb_mempool_cache) +
1608 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1609 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1610 	}
1611 
1612 	if (numa_support) {
1613 		uint8_t i, j;
1614 
1615 		for (i = 0; i < num_sockets; i++)
1616 			for (j = 0; j < mbuf_data_size_n; j++)
1617 				mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
1618 					mbuf_pool_create(mbuf_data_size[j],
1619 							  nb_mbuf_per_pool,
1620 							  socket_ids[i], j);
1621 	} else {
1622 		uint8_t i;
1623 
1624 		for (i = 0; i < mbuf_data_size_n; i++)
1625 			mempools[i] = mbuf_pool_create
1626 					(mbuf_data_size[i],
1627 					 nb_mbuf_per_pool,
1628 					 socket_num == UMA_NO_CONFIG ?
1629 					 0 : socket_num, i);
1630 	}
1631 
1632 	init_port_config();
1633 
1634 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1635 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1636 	/*
1637 	 * Records which Mbuf pool to use by each logical core, if needed.
1638 	 */
1639 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1640 		mbp = mbuf_pool_find(
1641 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
1642 
1643 		if (mbp == NULL)
1644 			mbp = mbuf_pool_find(0, 0);
1645 		fwd_lcores[lc_id]->mbp = mbp;
1646 		/* initialize GSO context */
1647 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1648 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1649 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1650 		fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
1651 			RTE_ETHER_CRC_LEN;
1652 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
1653 	}
1654 
1655 	fwd_config_setup();
1656 
1657 	/* create a gro context for each lcore */
1658 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1659 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1660 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1661 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1662 		gro_param.socket_id = rte_lcore_to_socket_id(
1663 				fwd_lcores_cpuids[lc_id]);
1664 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1665 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1666 			rte_exit(EXIT_FAILURE,
1667 					"rte_gro_ctx_create() failed\n");
1668 		}
1669 	}
1670 }
1671 
1672 
1673 void
1674 reconfig(portid_t new_port_id, unsigned socket_id)
1675 {
1676 	/* Reconfiguration of Ethernet ports. */
1677 	init_config_port_offloads(new_port_id, socket_id);
1678 	init_port_config();
1679 }
1680 
1681 
1682 int
1683 init_fwd_streams(void)
1684 {
1685 	portid_t pid;
1686 	struct rte_port *port;
1687 	streamid_t sm_id, nb_fwd_streams_new;
1688 	queueid_t q;
1689 
1690 	/* set socket id according to numa or not */
1691 	RTE_ETH_FOREACH_DEV(pid) {
1692 		port = &ports[pid];
1693 		if (nb_rxq > port->dev_info.max_rx_queues) {
1694 			fprintf(stderr,
1695 				"Fail: nb_rxq(%d) is greater than max_rx_queues(%d)\n",
1696 				nb_rxq, port->dev_info.max_rx_queues);
1697 			return -1;
1698 		}
1699 		if (nb_txq > port->dev_info.max_tx_queues) {
1700 			fprintf(stderr,
1701 				"Fail: nb_txq(%d) is greater than max_tx_queues(%d)\n",
1702 				nb_txq, port->dev_info.max_tx_queues);
1703 			return -1;
1704 		}
1705 		if (numa_support) {
1706 			if (port_numa[pid] != NUMA_NO_CONFIG)
1707 				port->socket_id = port_numa[pid];
1708 			else {
1709 				port->socket_id = rte_eth_dev_socket_id(pid);
1710 
1711 				/*
1712 				 * if socket_id is invalid,
1713 				 * set to the first available socket.
1714 				 */
1715 				if (check_socket_id(port->socket_id) < 0)
1716 					port->socket_id = socket_ids[0];
1717 			}
1718 		}
1719 		else {
1720 			if (socket_num == UMA_NO_CONFIG)
1721 				port->socket_id = 0;
1722 			else
1723 				port->socket_id = socket_num;
1724 		}
1725 	}
1726 
1727 	q = RTE_MAX(nb_rxq, nb_txq);
1728 	if (q == 0) {
1729 		fprintf(stderr,
1730 			"Fail: Cannot allocate fwd streams as number of queues is 0\n");
1731 		return -1;
1732 	}
1733 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1734 	if (nb_fwd_streams_new == nb_fwd_streams)
1735 		return 0;
1736 	/* clear the old */
1737 	if (fwd_streams != NULL) {
1738 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1739 			if (fwd_streams[sm_id] == NULL)
1740 				continue;
1741 			rte_free(fwd_streams[sm_id]);
1742 			fwd_streams[sm_id] = NULL;
1743 		}
1744 		rte_free(fwd_streams);
1745 		fwd_streams = NULL;
1746 	}
1747 
1748 	/* init new */
1749 	nb_fwd_streams = nb_fwd_streams_new;
1750 	if (nb_fwd_streams) {
1751 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1752 			sizeof(struct fwd_stream *) * nb_fwd_streams,
1753 			RTE_CACHE_LINE_SIZE);
1754 		if (fwd_streams == NULL)
1755 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1756 				 " (struct fwd_stream *)) failed\n",
1757 				 nb_fwd_streams);
1758 
1759 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1760 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1761 				" struct fwd_stream", sizeof(struct fwd_stream),
1762 				RTE_CACHE_LINE_SIZE);
1763 			if (fwd_streams[sm_id] == NULL)
1764 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
1765 					 "(struct fwd_stream) failed\n");
1766 		}
1767 	}
1768 
1769 	return 0;
1770 }
1771 
1772 static void
1773 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1774 {
1775 	uint64_t total_burst, sburst;
1776 	uint64_t nb_burst;
1777 	uint64_t burst_stats[4];
1778 	uint16_t pktnb_stats[4];
1779 	uint16_t nb_pkt;
1780 	int burst_percent[4], sburstp;
1781 	int i;
1782 
1783 	/*
1784 	 * First compute the total number of packet bursts and the
1785 	 * two highest numbers of bursts of the same number of packets.
1786 	 */
1787 	memset(&burst_stats, 0x0, sizeof(burst_stats));
1788 	memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
1789 
1790 	/* Show stats for 0 burst size always */
1791 	total_burst = pbs->pkt_burst_spread[0];
1792 	burst_stats[0] = pbs->pkt_burst_spread[0];
1793 	pktnb_stats[0] = 0;
1794 
1795 	/* Find the next 2 burst sizes with highest occurrences. */
1796 	for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1797 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
1798 
1799 		if (nb_burst == 0)
1800 			continue;
1801 
1802 		total_burst += nb_burst;
1803 
1804 		if (nb_burst > burst_stats[1]) {
1805 			burst_stats[2] = burst_stats[1];
1806 			pktnb_stats[2] = pktnb_stats[1];
1807 			burst_stats[1] = nb_burst;
1808 			pktnb_stats[1] = nb_pkt;
1809 		} else if (nb_burst > burst_stats[2]) {
1810 			burst_stats[2] = nb_burst;
1811 			pktnb_stats[2] = nb_pkt;
1812 		}
1813 	}
1814 	if (total_burst == 0)
1815 		return;
1816 
1817 	printf("  %s-bursts : %"PRIu64" [", rx_tx, total_burst);
1818 	for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
1819 		if (i == 3) {
1820 			printf("%d%% of other]\n", 100 - sburstp);
1821 			return;
1822 		}
1823 
1824 		sburst += burst_stats[i];
1825 		if (sburst == total_burst) {
1826 			printf("%d%% of %d pkts]\n",
1827 				100 - sburstp, (int) pktnb_stats[i]);
1828 			return;
1829 		}
1830 
1831 		burst_percent[i] =
1832 			(double)burst_stats[i] / total_burst * 100;
1833 		printf("%d%% of %d pkts + ",
1834 			burst_percent[i], (int) pktnb_stats[i]);
1835 		sburstp += burst_percent[i];
1836 	}
1837 }
1838 
1839 static void
1840 fwd_stream_stats_display(streamid_t stream_id)
1841 {
1842 	struct fwd_stream *fs;
1843 	static const char *fwd_top_stats_border = "-------";
1844 
1845 	fs = fwd_streams[stream_id];
1846 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1847 	    (fs->fwd_dropped == 0))
1848 		return;
1849 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1850 	       "TX Port=%2d/Queue=%2d %s\n",
1851 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1852 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1853 	printf("  RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
1854 	       " TX-dropped: %-14"PRIu64,
1855 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1856 
1857 	/* if checksum mode */
1858 	if (cur_fwd_eng == &csum_fwd_engine) {
1859 		printf("  RX- bad IP checksum: %-14"PRIu64
1860 		       "  Rx- bad L4 checksum: %-14"PRIu64
1861 		       " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
1862 			fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
1863 			fs->rx_bad_outer_l4_csum);
1864 		printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
1865 			fs->rx_bad_outer_ip_csum);
1866 	} else {
1867 		printf("\n");
1868 	}
1869 
1870 	if (record_burst_stats) {
1871 		pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1872 		pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1873 	}
1874 }
1875 
1876 void
1877 fwd_stats_display(void)
1878 {
1879 	static const char *fwd_stats_border = "----------------------";
1880 	static const char *acc_stats_border = "+++++++++++++++";
1881 	struct {
1882 		struct fwd_stream *rx_stream;
1883 		struct fwd_stream *tx_stream;
1884 		uint64_t tx_dropped;
1885 		uint64_t rx_bad_ip_csum;
1886 		uint64_t rx_bad_l4_csum;
1887 		uint64_t rx_bad_outer_l4_csum;
1888 		uint64_t rx_bad_outer_ip_csum;
1889 	} ports_stats[RTE_MAX_ETHPORTS];
1890 	uint64_t total_rx_dropped = 0;
1891 	uint64_t total_tx_dropped = 0;
1892 	uint64_t total_rx_nombuf = 0;
1893 	struct rte_eth_stats stats;
1894 	uint64_t fwd_cycles = 0;
1895 	uint64_t total_recv = 0;
1896 	uint64_t total_xmit = 0;
1897 	struct rte_port *port;
1898 	streamid_t sm_id;
1899 	portid_t pt_id;
1900 	int i;
1901 
1902 	memset(ports_stats, 0, sizeof(ports_stats));
1903 
1904 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1905 		struct fwd_stream *fs = fwd_streams[sm_id];
1906 
1907 		if (cur_fwd_config.nb_fwd_streams >
1908 		    cur_fwd_config.nb_fwd_ports) {
1909 			fwd_stream_stats_display(sm_id);
1910 		} else {
1911 			ports_stats[fs->tx_port].tx_stream = fs;
1912 			ports_stats[fs->rx_port].rx_stream = fs;
1913 		}
1914 
1915 		ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
1916 
1917 		ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
1918 		ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
1919 		ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
1920 				fs->rx_bad_outer_l4_csum;
1921 		ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
1922 				fs->rx_bad_outer_ip_csum;
1923 
1924 		if (record_core_cycles)
1925 			fwd_cycles += fs->core_cycles;
1926 	}
1927 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1928 		pt_id = fwd_ports_ids[i];
1929 		port = &ports[pt_id];
1930 
1931 		rte_eth_stats_get(pt_id, &stats);
1932 		stats.ipackets -= port->stats.ipackets;
1933 		stats.opackets -= port->stats.opackets;
1934 		stats.ibytes -= port->stats.ibytes;
1935 		stats.obytes -= port->stats.obytes;
1936 		stats.imissed -= port->stats.imissed;
1937 		stats.oerrors -= port->stats.oerrors;
1938 		stats.rx_nombuf -= port->stats.rx_nombuf;
1939 
1940 		total_recv += stats.ipackets;
1941 		total_xmit += stats.opackets;
1942 		total_rx_dropped += stats.imissed;
1943 		total_tx_dropped += ports_stats[pt_id].tx_dropped;
1944 		total_tx_dropped += stats.oerrors;
1945 		total_rx_nombuf  += stats.rx_nombuf;
1946 
1947 		printf("\n  %s Forward statistics for port %-2d %s\n",
1948 		       fwd_stats_border, pt_id, fwd_stats_border);
1949 
1950 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64
1951 		       "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
1952 		       stats.ipackets + stats.imissed);
1953 
1954 		if (cur_fwd_eng == &csum_fwd_engine) {
1955 			printf("  Bad-ipcsum: %-14"PRIu64
1956 			       " Bad-l4csum: %-14"PRIu64
1957 			       "Bad-outer-l4csum: %-14"PRIu64"\n",
1958 			       ports_stats[pt_id].rx_bad_ip_csum,
1959 			       ports_stats[pt_id].rx_bad_l4_csum,
1960 			       ports_stats[pt_id].rx_bad_outer_l4_csum);
1961 			printf("  Bad-outer-ipcsum: %-14"PRIu64"\n",
1962 			       ports_stats[pt_id].rx_bad_outer_ip_csum);
1963 		}
1964 		if (stats.ierrors + stats.rx_nombuf > 0) {
1965 			printf("  RX-error: %-"PRIu64"\n", stats.ierrors);
1966 			printf("  RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
1967 		}
1968 
1969 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64
1970 		       "TX-total: %-"PRIu64"\n",
1971 		       stats.opackets, ports_stats[pt_id].tx_dropped,
1972 		       stats.opackets + ports_stats[pt_id].tx_dropped);
1973 
1974 		if (record_burst_stats) {
1975 			if (ports_stats[pt_id].rx_stream)
1976 				pkt_burst_stats_display("RX",
1977 					&ports_stats[pt_id].rx_stream->rx_burst_stats);
1978 			if (ports_stats[pt_id].tx_stream)
1979 				pkt_burst_stats_display("TX",
1980 				&ports_stats[pt_id].tx_stream->tx_burst_stats);
1981 		}
1982 
1983 		printf("  %s--------------------------------%s\n",
1984 		       fwd_stats_border, fwd_stats_border);
1985 	}
1986 
1987 	printf("\n  %s Accumulated forward statistics for all ports"
1988 	       "%s\n",
1989 	       acc_stats_border, acc_stats_border);
1990 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1991 	       "%-"PRIu64"\n"
1992 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1993 	       "%-"PRIu64"\n",
1994 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1995 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1996 	if (total_rx_nombuf > 0)
1997 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1998 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1999 	       "%s\n",
2000 	       acc_stats_border, acc_stats_border);
2001 	if (record_core_cycles) {
2002 #define CYC_PER_MHZ 1E6
2003 		if (total_recv > 0 || total_xmit > 0) {
2004 			uint64_t total_pkts = 0;
2005 			if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
2006 			    strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
2007 				total_pkts = total_xmit;
2008 			else
2009 				total_pkts = total_recv;
2010 
2011 			printf("\n  CPU cycles/packet=%.2F (total cycles="
2012 			       "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
2013 			       " MHz Clock\n",
2014 			       (double) fwd_cycles / total_pkts,
2015 			       fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
2016 			       (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
2017 		}
2018 	}
2019 }
2020 
2021 void
2022 fwd_stats_reset(void)
2023 {
2024 	streamid_t sm_id;
2025 	portid_t pt_id;
2026 	int i;
2027 
2028 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2029 		pt_id = fwd_ports_ids[i];
2030 		rte_eth_stats_get(pt_id, &ports[pt_id].stats);
2031 	}
2032 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
2033 		struct fwd_stream *fs = fwd_streams[sm_id];
2034 
2035 		fs->rx_packets = 0;
2036 		fs->tx_packets = 0;
2037 		fs->fwd_dropped = 0;
2038 		fs->rx_bad_ip_csum = 0;
2039 		fs->rx_bad_l4_csum = 0;
2040 		fs->rx_bad_outer_l4_csum = 0;
2041 		fs->rx_bad_outer_ip_csum = 0;
2042 
2043 		memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
2044 		memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
2045 		fs->core_cycles = 0;
2046 	}
2047 }
2048 
2049 static void
2050 flush_fwd_rx_queues(void)
2051 {
2052 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
2053 	portid_t  rxp;
2054 	portid_t port_id;
2055 	queueid_t rxq;
2056 	uint16_t  nb_rx;
2057 	uint16_t  i;
2058 	uint8_t   j;
2059 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
2060 	uint64_t timer_period;
2061 
2062 	if (num_procs > 1) {
2063 		printf("multi-process not support for flushing fwd Rx queues, skip the below lines and return.\n");
2064 		return;
2065 	}
2066 
2067 	/* convert to number of cycles */
2068 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
2069 
2070 	for (j = 0; j < 2; j++) {
2071 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
2072 			for (rxq = 0; rxq < nb_rxq; rxq++) {
2073 				port_id = fwd_ports_ids[rxp];
2074 				/**
2075 				* testpmd can stuck in the below do while loop
2076 				* if rte_eth_rx_burst() always returns nonzero
2077 				* packets. So timer is added to exit this loop
2078 				* after 1sec timer expiry.
2079 				*/
2080 				prev_tsc = rte_rdtsc();
2081 				do {
2082 					nb_rx = rte_eth_rx_burst(port_id, rxq,
2083 						pkts_burst, MAX_PKT_BURST);
2084 					for (i = 0; i < nb_rx; i++)
2085 						rte_pktmbuf_free(pkts_burst[i]);
2086 
2087 					cur_tsc = rte_rdtsc();
2088 					diff_tsc = cur_tsc - prev_tsc;
2089 					timer_tsc += diff_tsc;
2090 				} while ((nb_rx > 0) &&
2091 					(timer_tsc < timer_period));
2092 				timer_tsc = 0;
2093 			}
2094 		}
2095 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
2096 	}
2097 }
2098 
2099 static void
2100 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
2101 {
2102 	struct fwd_stream **fsm;
2103 	streamid_t nb_fs;
2104 	streamid_t sm_id;
2105 #ifdef RTE_LIB_BITRATESTATS
2106 	uint64_t tics_per_1sec;
2107 	uint64_t tics_datum;
2108 	uint64_t tics_current;
2109 	uint16_t i, cnt_ports;
2110 
2111 	cnt_ports = nb_ports;
2112 	tics_datum = rte_rdtsc();
2113 	tics_per_1sec = rte_get_timer_hz();
2114 #endif
2115 	fsm = &fwd_streams[fc->stream_idx];
2116 	nb_fs = fc->stream_nb;
2117 	do {
2118 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
2119 			(*pkt_fwd)(fsm[sm_id]);
2120 #ifdef RTE_LIB_BITRATESTATS
2121 		if (bitrate_enabled != 0 &&
2122 				bitrate_lcore_id == rte_lcore_id()) {
2123 			tics_current = rte_rdtsc();
2124 			if (tics_current - tics_datum >= tics_per_1sec) {
2125 				/* Periodic bitrate calculation */
2126 				for (i = 0; i < cnt_ports; i++)
2127 					rte_stats_bitrate_calc(bitrate_data,
2128 						ports_ids[i]);
2129 				tics_datum = tics_current;
2130 			}
2131 		}
2132 #endif
2133 #ifdef RTE_LIB_LATENCYSTATS
2134 		if (latencystats_enabled != 0 &&
2135 				latencystats_lcore_id == rte_lcore_id())
2136 			rte_latencystats_update();
2137 #endif
2138 
2139 	} while (! fc->stopped);
2140 }
2141 
2142 static int
2143 start_pkt_forward_on_core(void *fwd_arg)
2144 {
2145 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
2146 			     cur_fwd_config.fwd_eng->packet_fwd);
2147 	return 0;
2148 }
2149 
2150 /*
2151  * Run the TXONLY packet forwarding engine to send a single burst of packets.
2152  * Used to start communication flows in network loopback test configurations.
2153  */
2154 static int
2155 run_one_txonly_burst_on_core(void *fwd_arg)
2156 {
2157 	struct fwd_lcore *fwd_lc;
2158 	struct fwd_lcore tmp_lcore;
2159 
2160 	fwd_lc = (struct fwd_lcore *) fwd_arg;
2161 	tmp_lcore = *fwd_lc;
2162 	tmp_lcore.stopped = 1;
2163 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
2164 	return 0;
2165 }
2166 
2167 /*
2168  * Launch packet forwarding:
2169  *     - Setup per-port forwarding context.
2170  *     - launch logical cores with their forwarding configuration.
2171  */
2172 static void
2173 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
2174 {
2175 	port_fwd_begin_t port_fwd_begin;
2176 	unsigned int i;
2177 	unsigned int lc_id;
2178 	int diag;
2179 
2180 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
2181 	if (port_fwd_begin != NULL) {
2182 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2183 			(*port_fwd_begin)(fwd_ports_ids[i]);
2184 	}
2185 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
2186 		lc_id = fwd_lcores_cpuids[i];
2187 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
2188 			fwd_lcores[i]->stopped = 0;
2189 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
2190 						     fwd_lcores[i], lc_id);
2191 			if (diag != 0)
2192 				fprintf(stderr,
2193 					"launch lcore %u failed - diag=%d\n",
2194 					lc_id, diag);
2195 		}
2196 	}
2197 }
2198 
2199 /*
2200  * Launch packet forwarding configuration.
2201  */
2202 void
2203 start_packet_forwarding(int with_tx_first)
2204 {
2205 	port_fwd_begin_t port_fwd_begin;
2206 	port_fwd_end_t  port_fwd_end;
2207 	unsigned int i;
2208 
2209 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
2210 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
2211 
2212 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
2213 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
2214 
2215 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
2216 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
2217 		(!nb_rxq || !nb_txq))
2218 		rte_exit(EXIT_FAILURE,
2219 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
2220 			cur_fwd_eng->fwd_mode_name);
2221 
2222 	if (all_ports_started() == 0) {
2223 		fprintf(stderr, "Not all ports were started\n");
2224 		return;
2225 	}
2226 	if (test_done == 0) {
2227 		fprintf(stderr, "Packet forwarding already started\n");
2228 		return;
2229 	}
2230 	test_done = 0;
2231 
2232 	fwd_config_setup();
2233 
2234 	if(!no_flush_rx)
2235 		flush_fwd_rx_queues();
2236 
2237 	pkt_fwd_config_display(&cur_fwd_config);
2238 	rxtx_config_display();
2239 
2240 	fwd_stats_reset();
2241 	if (with_tx_first) {
2242 		port_fwd_begin = tx_only_engine.port_fwd_begin;
2243 		if (port_fwd_begin != NULL) {
2244 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2245 				(*port_fwd_begin)(fwd_ports_ids[i]);
2246 		}
2247 		while (with_tx_first--) {
2248 			launch_packet_forwarding(
2249 					run_one_txonly_burst_on_core);
2250 			rte_eal_mp_wait_lcore();
2251 		}
2252 		port_fwd_end = tx_only_engine.port_fwd_end;
2253 		if (port_fwd_end != NULL) {
2254 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2255 				(*port_fwd_end)(fwd_ports_ids[i]);
2256 		}
2257 	}
2258 	launch_packet_forwarding(start_pkt_forward_on_core);
2259 }
2260 
2261 void
2262 stop_packet_forwarding(void)
2263 {
2264 	port_fwd_end_t port_fwd_end;
2265 	lcoreid_t lc_id;
2266 	portid_t pt_id;
2267 	int i;
2268 
2269 	if (test_done) {
2270 		fprintf(stderr, "Packet forwarding not started\n");
2271 		return;
2272 	}
2273 	printf("Telling cores to stop...");
2274 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
2275 		fwd_lcores[lc_id]->stopped = 1;
2276 	printf("\nWaiting for lcores to finish...\n");
2277 	rte_eal_mp_wait_lcore();
2278 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
2279 	if (port_fwd_end != NULL) {
2280 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
2281 			pt_id = fwd_ports_ids[i];
2282 			(*port_fwd_end)(pt_id);
2283 		}
2284 	}
2285 
2286 	fwd_stats_display();
2287 
2288 	printf("\nDone.\n");
2289 	test_done = 1;
2290 }
2291 
2292 void
2293 dev_set_link_up(portid_t pid)
2294 {
2295 	if (rte_eth_dev_set_link_up(pid) < 0)
2296 		fprintf(stderr, "\nSet link up fail.\n");
2297 }
2298 
2299 void
2300 dev_set_link_down(portid_t pid)
2301 {
2302 	if (rte_eth_dev_set_link_down(pid) < 0)
2303 		fprintf(stderr, "\nSet link down fail.\n");
2304 }
2305 
2306 static int
2307 all_ports_started(void)
2308 {
2309 	portid_t pi;
2310 	struct rte_port *port;
2311 
2312 	RTE_ETH_FOREACH_DEV(pi) {
2313 		port = &ports[pi];
2314 		/* Check if there is a port which is not started */
2315 		if ((port->port_status != RTE_PORT_STARTED) &&
2316 			(port->slave_flag == 0))
2317 			return 0;
2318 	}
2319 
2320 	/* No port is not started */
2321 	return 1;
2322 }
2323 
2324 int
2325 port_is_stopped(portid_t port_id)
2326 {
2327 	struct rte_port *port = &ports[port_id];
2328 
2329 	if ((port->port_status != RTE_PORT_STOPPED) &&
2330 	    (port->slave_flag == 0))
2331 		return 0;
2332 	return 1;
2333 }
2334 
2335 int
2336 all_ports_stopped(void)
2337 {
2338 	portid_t pi;
2339 
2340 	RTE_ETH_FOREACH_DEV(pi) {
2341 		if (!port_is_stopped(pi))
2342 			return 0;
2343 	}
2344 
2345 	return 1;
2346 }
2347 
2348 int
2349 port_is_started(portid_t port_id)
2350 {
2351 	if (port_id_is_invalid(port_id, ENABLED_WARN))
2352 		return 0;
2353 
2354 	if (ports[port_id].port_status != RTE_PORT_STARTED)
2355 		return 0;
2356 
2357 	return 1;
2358 }
2359 
2360 /* Configure the Rx and Tx hairpin queues for the selected port. */
2361 static int
2362 setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
2363 {
2364 	queueid_t qi;
2365 	struct rte_eth_hairpin_conf hairpin_conf = {
2366 		.peer_count = 1,
2367 	};
2368 	int i;
2369 	int diag;
2370 	struct rte_port *port = &ports[pi];
2371 	uint16_t peer_rx_port = pi;
2372 	uint16_t peer_tx_port = pi;
2373 	uint32_t manual = 1;
2374 	uint32_t tx_exp = hairpin_mode & 0x10;
2375 
2376 	if (!(hairpin_mode & 0xf)) {
2377 		peer_rx_port = pi;
2378 		peer_tx_port = pi;
2379 		manual = 0;
2380 	} else if (hairpin_mode & 0x1) {
2381 		peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
2382 						       RTE_ETH_DEV_NO_OWNER);
2383 		if (peer_tx_port >= RTE_MAX_ETHPORTS)
2384 			peer_tx_port = rte_eth_find_next_owned_by(0,
2385 						RTE_ETH_DEV_NO_OWNER);
2386 		if (p_pi != RTE_MAX_ETHPORTS) {
2387 			peer_rx_port = p_pi;
2388 		} else {
2389 			uint16_t next_pi;
2390 
2391 			/* Last port will be the peer RX port of the first. */
2392 			RTE_ETH_FOREACH_DEV(next_pi)
2393 				peer_rx_port = next_pi;
2394 		}
2395 		manual = 1;
2396 	} else if (hairpin_mode & 0x2) {
2397 		if (cnt_pi & 0x1) {
2398 			peer_rx_port = p_pi;
2399 		} else {
2400 			peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
2401 						RTE_ETH_DEV_NO_OWNER);
2402 			if (peer_rx_port >= RTE_MAX_ETHPORTS)
2403 				peer_rx_port = pi;
2404 		}
2405 		peer_tx_port = peer_rx_port;
2406 		manual = 1;
2407 	}
2408 
2409 	for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
2410 		hairpin_conf.peers[0].port = peer_rx_port;
2411 		hairpin_conf.peers[0].queue = i + nb_rxq;
2412 		hairpin_conf.manual_bind = !!manual;
2413 		hairpin_conf.tx_explicit = !!tx_exp;
2414 		diag = rte_eth_tx_hairpin_queue_setup
2415 			(pi, qi, nb_txd, &hairpin_conf);
2416 		i++;
2417 		if (diag == 0)
2418 			continue;
2419 
2420 		/* Fail to setup rx queue, return */
2421 		if (rte_atomic16_cmpset(&(port->port_status),
2422 					RTE_PORT_HANDLING,
2423 					RTE_PORT_STOPPED) == 0)
2424 			fprintf(stderr,
2425 				"Port %d can not be set back to stopped\n", pi);
2426 		fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2427 			pi);
2428 		/* try to reconfigure queues next time */
2429 		port->need_reconfig_queues = 1;
2430 		return -1;
2431 	}
2432 	for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
2433 		hairpin_conf.peers[0].port = peer_tx_port;
2434 		hairpin_conf.peers[0].queue = i + nb_txq;
2435 		hairpin_conf.manual_bind = !!manual;
2436 		hairpin_conf.tx_explicit = !!tx_exp;
2437 		diag = rte_eth_rx_hairpin_queue_setup
2438 			(pi, qi, nb_rxd, &hairpin_conf);
2439 		i++;
2440 		if (diag == 0)
2441 			continue;
2442 
2443 		/* Fail to setup rx queue, return */
2444 		if (rte_atomic16_cmpset(&(port->port_status),
2445 					RTE_PORT_HANDLING,
2446 					RTE_PORT_STOPPED) == 0)
2447 			fprintf(stderr,
2448 				"Port %d can not be set back to stopped\n", pi);
2449 		fprintf(stderr, "Fail to configure port %d hairpin queues\n",
2450 			pi);
2451 		/* try to reconfigure queues next time */
2452 		port->need_reconfig_queues = 1;
2453 		return -1;
2454 	}
2455 	return 0;
2456 }
2457 
2458 /* Configure the Rx with optional split. */
2459 int
2460 rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2461 	       uint16_t nb_rx_desc, unsigned int socket_id,
2462 	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
2463 {
2464 	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
2465 	unsigned int i, mp_n;
2466 	int ret;
2467 
2468 	if (rx_pkt_nb_segs <= 1 ||
2469 	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
2470 		rx_conf->rx_seg = NULL;
2471 		rx_conf->rx_nseg = 0;
2472 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
2473 					     nb_rx_desc, socket_id,
2474 					     rx_conf, mp);
2475 		return ret;
2476 	}
2477 	for (i = 0; i < rx_pkt_nb_segs; i++) {
2478 		struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
2479 		struct rte_mempool *mpx;
2480 		/*
2481 		 * Use last valid pool for the segments with number
2482 		 * exceeding the pool index.
2483 		 */
2484 		mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
2485 		mpx = mbuf_pool_find(socket_id, mp_n);
2486 		/* Handle zero as mbuf data buffer size. */
2487 		rx_seg->length = rx_pkt_seg_lengths[i] ?
2488 				   rx_pkt_seg_lengths[i] :
2489 				   mbuf_data_size[mp_n];
2490 		rx_seg->offset = i < rx_pkt_nb_offs ?
2491 				   rx_pkt_seg_offsets[i] : 0;
2492 		rx_seg->mp = mpx ? mpx : mp;
2493 	}
2494 	rx_conf->rx_nseg = rx_pkt_nb_segs;
2495 	rx_conf->rx_seg = rx_useg;
2496 	ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2497 				    socket_id, rx_conf, NULL);
2498 	rx_conf->rx_seg = NULL;
2499 	rx_conf->rx_nseg = 0;
2500 	return ret;
2501 }
2502 
2503 int
2504 start_port(portid_t pid)
2505 {
2506 	int diag, need_check_link_status = -1;
2507 	portid_t pi;
2508 	portid_t p_pi = RTE_MAX_ETHPORTS;
2509 	portid_t pl[RTE_MAX_ETHPORTS];
2510 	portid_t peer_pl[RTE_MAX_ETHPORTS];
2511 	uint16_t cnt_pi = 0;
2512 	uint16_t cfg_pi = 0;
2513 	int peer_pi;
2514 	queueid_t qi;
2515 	struct rte_port *port;
2516 	struct rte_eth_hairpin_cap cap;
2517 
2518 	if (port_id_is_invalid(pid, ENABLED_WARN))
2519 		return 0;
2520 
2521 	RTE_ETH_FOREACH_DEV(pi) {
2522 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2523 			continue;
2524 
2525 		need_check_link_status = 0;
2526 		port = &ports[pi];
2527 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
2528 						 RTE_PORT_HANDLING) == 0) {
2529 			fprintf(stderr, "Port %d is now not stopped\n", pi);
2530 			continue;
2531 		}
2532 
2533 		if (port->need_reconfig > 0) {
2534 			port->need_reconfig = 0;
2535 
2536 			if (flow_isolate_all) {
2537 				int ret = port_flow_isolate(pi, 1);
2538 				if (ret) {
2539 					fprintf(stderr,
2540 						"Failed to apply isolated mode on port %d\n",
2541 						pi);
2542 					return -1;
2543 				}
2544 			}
2545 			configure_rxtx_dump_callbacks(0);
2546 			printf("Configuring Port %d (socket %u)\n", pi,
2547 					port->socket_id);
2548 			if (nb_hairpinq > 0 &&
2549 			    rte_eth_dev_hairpin_capability_get(pi, &cap)) {
2550 				fprintf(stderr,
2551 					"Port %d doesn't support hairpin queues\n",
2552 					pi);
2553 				return -1;
2554 			}
2555 			/* configure port */
2556 			diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
2557 						     nb_txq + nb_hairpinq,
2558 						     &(port->dev_conf));
2559 			if (diag != 0) {
2560 				if (rte_atomic16_cmpset(&(port->port_status),
2561 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2562 					fprintf(stderr,
2563 						"Port %d can not be set back to stopped\n",
2564 						pi);
2565 				fprintf(stderr, "Fail to configure port %d\n",
2566 					pi);
2567 				/* try to reconfigure port next time */
2568 				port->need_reconfig = 1;
2569 				return -1;
2570 			}
2571 		}
2572 		if (port->need_reconfig_queues > 0 && is_proc_primary()) {
2573 			port->need_reconfig_queues = 0;
2574 			/* setup tx queues */
2575 			for (qi = 0; qi < nb_txq; qi++) {
2576 				if ((numa_support) &&
2577 					(txring_numa[pi] != NUMA_NO_CONFIG))
2578 					diag = rte_eth_tx_queue_setup(pi, qi,
2579 						port->nb_tx_desc[qi],
2580 						txring_numa[pi],
2581 						&(port->tx_conf[qi]));
2582 				else
2583 					diag = rte_eth_tx_queue_setup(pi, qi,
2584 						port->nb_tx_desc[qi],
2585 						port->socket_id,
2586 						&(port->tx_conf[qi]));
2587 
2588 				if (diag == 0)
2589 					continue;
2590 
2591 				/* Fail to setup tx queue, return */
2592 				if (rte_atomic16_cmpset(&(port->port_status),
2593 							RTE_PORT_HANDLING,
2594 							RTE_PORT_STOPPED) == 0)
2595 					fprintf(stderr,
2596 						"Port %d can not be set back to stopped\n",
2597 						pi);
2598 				fprintf(stderr,
2599 					"Fail to configure port %d tx queues\n",
2600 					pi);
2601 				/* try to reconfigure queues next time */
2602 				port->need_reconfig_queues = 1;
2603 				return -1;
2604 			}
2605 			for (qi = 0; qi < nb_rxq; qi++) {
2606 				/* setup rx queues */
2607 				if ((numa_support) &&
2608 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2609 					struct rte_mempool * mp =
2610 						mbuf_pool_find
2611 							(rxring_numa[pi], 0);
2612 					if (mp == NULL) {
2613 						fprintf(stderr,
2614 							"Failed to setup RX queue: No mempool allocation on the socket %d\n",
2615 							rxring_numa[pi]);
2616 						return -1;
2617 					}
2618 
2619 					diag = rx_queue_setup(pi, qi,
2620 					     port->nb_rx_desc[qi],
2621 					     rxring_numa[pi],
2622 					     &(port->rx_conf[qi]),
2623 					     mp);
2624 				} else {
2625 					struct rte_mempool *mp =
2626 						mbuf_pool_find
2627 							(port->socket_id, 0);
2628 					if (mp == NULL) {
2629 						fprintf(stderr,
2630 							"Failed to setup RX queue: No mempool allocation on the socket %d\n",
2631 							port->socket_id);
2632 						return -1;
2633 					}
2634 					diag = rx_queue_setup(pi, qi,
2635 					     port->nb_rx_desc[qi],
2636 					     port->socket_id,
2637 					     &(port->rx_conf[qi]),
2638 					     mp);
2639 				}
2640 				if (diag == 0)
2641 					continue;
2642 
2643 				/* Fail to setup rx queue, return */
2644 				if (rte_atomic16_cmpset(&(port->port_status),
2645 							RTE_PORT_HANDLING,
2646 							RTE_PORT_STOPPED) == 0)
2647 					fprintf(stderr,
2648 						"Port %d can not be set back to stopped\n",
2649 						pi);
2650 				fprintf(stderr,
2651 					"Fail to configure port %d rx queues\n",
2652 					pi);
2653 				/* try to reconfigure queues next time */
2654 				port->need_reconfig_queues = 1;
2655 				return -1;
2656 			}
2657 			/* setup hairpin queues */
2658 			if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
2659 				return -1;
2660 		}
2661 		configure_rxtx_dump_callbacks(verbose_level);
2662 		if (clear_ptypes) {
2663 			diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
2664 					NULL, 0);
2665 			if (diag < 0)
2666 				fprintf(stderr,
2667 					"Port %d: Failed to disable Ptype parsing\n",
2668 					pi);
2669 		}
2670 
2671 		p_pi = pi;
2672 		cnt_pi++;
2673 
2674 		/* start port */
2675 		diag = eth_dev_start_mp(pi);
2676 		if (diag < 0) {
2677 			fprintf(stderr, "Fail to start port %d: %s\n",
2678 				pi, rte_strerror(-diag));
2679 
2680 			/* Fail to setup rx queue, return */
2681 			if (rte_atomic16_cmpset(&(port->port_status),
2682 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2683 				fprintf(stderr,
2684 					"Port %d can not be set back to stopped\n",
2685 					pi);
2686 			continue;
2687 		}
2688 
2689 		if (rte_atomic16_cmpset(&(port->port_status),
2690 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2691 			fprintf(stderr, "Port %d can not be set into started\n",
2692 				pi);
2693 
2694 		if (eth_macaddr_get_print_err(pi, &port->eth_addr) == 0)
2695 			printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi,
2696 					RTE_ETHER_ADDR_BYTES(&port->eth_addr));
2697 
2698 		/* at least one port started, need checking link status */
2699 		need_check_link_status = 1;
2700 
2701 		pl[cfg_pi++] = pi;
2702 	}
2703 
2704 	if (need_check_link_status == 1 && !no_link_check)
2705 		check_all_ports_link_status(RTE_PORT_ALL);
2706 	else if (need_check_link_status == 0)
2707 		fprintf(stderr, "Please stop the ports first\n");
2708 
2709 	if (hairpin_mode & 0xf) {
2710 		uint16_t i;
2711 		int j;
2712 
2713 		/* bind all started hairpin ports */
2714 		for (i = 0; i < cfg_pi; i++) {
2715 			pi = pl[i];
2716 			/* bind current Tx to all peer Rx */
2717 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2718 							RTE_MAX_ETHPORTS, 1);
2719 			if (peer_pi < 0)
2720 				return peer_pi;
2721 			for (j = 0; j < peer_pi; j++) {
2722 				if (!port_is_started(peer_pl[j]))
2723 					continue;
2724 				diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
2725 				if (diag < 0) {
2726 					fprintf(stderr,
2727 						"Error during binding hairpin Tx port %u to %u: %s\n",
2728 						pi, peer_pl[j],
2729 						rte_strerror(-diag));
2730 					return -1;
2731 				}
2732 			}
2733 			/* bind all peer Tx to current Rx */
2734 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2735 							RTE_MAX_ETHPORTS, 0);
2736 			if (peer_pi < 0)
2737 				return peer_pi;
2738 			for (j = 0; j < peer_pi; j++) {
2739 				if (!port_is_started(peer_pl[j]))
2740 					continue;
2741 				diag = rte_eth_hairpin_bind(peer_pl[j], pi);
2742 				if (diag < 0) {
2743 					fprintf(stderr,
2744 						"Error during binding hairpin Tx port %u to %u: %s\n",
2745 						peer_pl[j], pi,
2746 						rte_strerror(-diag));
2747 					return -1;
2748 				}
2749 			}
2750 		}
2751 	}
2752 
2753 	printf("Done\n");
2754 	return 0;
2755 }
2756 
2757 void
2758 stop_port(portid_t pid)
2759 {
2760 	portid_t pi;
2761 	struct rte_port *port;
2762 	int need_check_link_status = 0;
2763 	portid_t peer_pl[RTE_MAX_ETHPORTS];
2764 	int peer_pi;
2765 
2766 	if (port_id_is_invalid(pid, ENABLED_WARN))
2767 		return;
2768 
2769 	printf("Stopping ports...\n");
2770 
2771 	RTE_ETH_FOREACH_DEV(pi) {
2772 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2773 			continue;
2774 
2775 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2776 			fprintf(stderr,
2777 				"Please remove port %d from forwarding configuration.\n",
2778 				pi);
2779 			continue;
2780 		}
2781 
2782 		if (port_is_bonding_slave(pi)) {
2783 			fprintf(stderr,
2784 				"Please remove port %d from bonded device.\n",
2785 				pi);
2786 			continue;
2787 		}
2788 
2789 		port = &ports[pi];
2790 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2791 						RTE_PORT_HANDLING) == 0)
2792 			continue;
2793 
2794 		if (hairpin_mode & 0xf) {
2795 			int j;
2796 
2797 			rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
2798 			/* unbind all peer Tx from current Rx */
2799 			peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
2800 							RTE_MAX_ETHPORTS, 0);
2801 			if (peer_pi < 0)
2802 				continue;
2803 			for (j = 0; j < peer_pi; j++) {
2804 				if (!port_is_started(peer_pl[j]))
2805 					continue;
2806 				rte_eth_hairpin_unbind(peer_pl[j], pi);
2807 			}
2808 		}
2809 
2810 		if (port->flow_list)
2811 			port_flow_flush(pi);
2812 
2813 		if (eth_dev_stop_mp(pi) != 0)
2814 			RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
2815 				pi);
2816 
2817 		if (rte_atomic16_cmpset(&(port->port_status),
2818 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2819 			fprintf(stderr, "Port %d can not be set into stopped\n",
2820 				pi);
2821 		need_check_link_status = 1;
2822 	}
2823 	if (need_check_link_status && !no_link_check)
2824 		check_all_ports_link_status(RTE_PORT_ALL);
2825 
2826 	printf("Done\n");
2827 }
2828 
2829 static void
2830 remove_invalid_ports_in(portid_t *array, portid_t *total)
2831 {
2832 	portid_t i;
2833 	portid_t new_total = 0;
2834 
2835 	for (i = 0; i < *total; i++)
2836 		if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
2837 			array[new_total] = array[i];
2838 			new_total++;
2839 		}
2840 	*total = new_total;
2841 }
2842 
2843 static void
2844 remove_invalid_ports(void)
2845 {
2846 	remove_invalid_ports_in(ports_ids, &nb_ports);
2847 	remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
2848 	nb_cfg_ports = nb_fwd_ports;
2849 }
2850 
2851 void
2852 close_port(portid_t pid)
2853 {
2854 	portid_t pi;
2855 	struct rte_port *port;
2856 
2857 	if (port_id_is_invalid(pid, ENABLED_WARN))
2858 		return;
2859 
2860 	printf("Closing ports...\n");
2861 
2862 	RTE_ETH_FOREACH_DEV(pi) {
2863 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2864 			continue;
2865 
2866 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2867 			fprintf(stderr,
2868 				"Please remove port %d from forwarding configuration.\n",
2869 				pi);
2870 			continue;
2871 		}
2872 
2873 		if (port_is_bonding_slave(pi)) {
2874 			fprintf(stderr,
2875 				"Please remove port %d from bonded device.\n",
2876 				pi);
2877 			continue;
2878 		}
2879 
2880 		port = &ports[pi];
2881 		if (rte_atomic16_cmpset(&(port->port_status),
2882 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2883 			fprintf(stderr, "Port %d is already closed\n", pi);
2884 			continue;
2885 		}
2886 
2887 		if (is_proc_primary()) {
2888 			port_flow_flush(pi);
2889 			rte_eth_dev_close(pi);
2890 		}
2891 	}
2892 
2893 	remove_invalid_ports();
2894 	printf("Done\n");
2895 }
2896 
2897 void
2898 reset_port(portid_t pid)
2899 {
2900 	int diag;
2901 	portid_t pi;
2902 	struct rte_port *port;
2903 
2904 	if (port_id_is_invalid(pid, ENABLED_WARN))
2905 		return;
2906 
2907 	if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
2908 		(pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
2909 		fprintf(stderr,
2910 			"Can not reset port(s), please stop port(s) first.\n");
2911 		return;
2912 	}
2913 
2914 	printf("Resetting ports...\n");
2915 
2916 	RTE_ETH_FOREACH_DEV(pi) {
2917 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2918 			continue;
2919 
2920 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2921 			fprintf(stderr,
2922 				"Please remove port %d from forwarding configuration.\n",
2923 				pi);
2924 			continue;
2925 		}
2926 
2927 		if (port_is_bonding_slave(pi)) {
2928 			fprintf(stderr,
2929 				"Please remove port %d from bonded device.\n",
2930 				pi);
2931 			continue;
2932 		}
2933 
2934 		diag = rte_eth_dev_reset(pi);
2935 		if (diag == 0) {
2936 			port = &ports[pi];
2937 			port->need_reconfig = 1;
2938 			port->need_reconfig_queues = 1;
2939 		} else {
2940 			fprintf(stderr, "Failed to reset port %d. diag=%d\n",
2941 				pi, diag);
2942 		}
2943 	}
2944 
2945 	printf("Done\n");
2946 }
2947 
2948 void
2949 attach_port(char *identifier)
2950 {
2951 	portid_t pi;
2952 	struct rte_dev_iterator iterator;
2953 
2954 	printf("Attaching a new port...\n");
2955 
2956 	if (identifier == NULL) {
2957 		fprintf(stderr, "Invalid parameters are specified\n");
2958 		return;
2959 	}
2960 
2961 	if (rte_dev_probe(identifier) < 0) {
2962 		TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
2963 		return;
2964 	}
2965 
2966 	/* first attach mode: event */
2967 	if (setup_on_probe_event) {
2968 		/* new ports are detected on RTE_ETH_EVENT_NEW event */
2969 		for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
2970 			if (ports[pi].port_status == RTE_PORT_HANDLING &&
2971 					ports[pi].need_setup != 0)
2972 				setup_attached_port(pi);
2973 		return;
2974 	}
2975 
2976 	/* second attach mode: iterator */
2977 	RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
2978 		/* setup ports matching the devargs used for probing */
2979 		if (port_is_forwarding(pi))
2980 			continue; /* port was already attached before */
2981 		setup_attached_port(pi);
2982 	}
2983 }
2984 
2985 static void
2986 setup_attached_port(portid_t pi)
2987 {
2988 	unsigned int socket_id;
2989 	int ret;
2990 
2991 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2992 	/* if socket_id is invalid, set to the first available socket. */
2993 	if (check_socket_id(socket_id) < 0)
2994 		socket_id = socket_ids[0];
2995 	reconfig(pi, socket_id);
2996 	ret = rte_eth_promiscuous_enable(pi);
2997 	if (ret != 0)
2998 		fprintf(stderr,
2999 			"Error during enabling promiscuous mode for port %u: %s - ignore\n",
3000 			pi, rte_strerror(-ret));
3001 
3002 	ports_ids[nb_ports++] = pi;
3003 	fwd_ports_ids[nb_fwd_ports++] = pi;
3004 	nb_cfg_ports = nb_fwd_ports;
3005 	ports[pi].need_setup = 0;
3006 	ports[pi].port_status = RTE_PORT_STOPPED;
3007 
3008 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
3009 	printf("Done\n");
3010 }
3011 
3012 static void
3013 detach_device(struct rte_device *dev)
3014 {
3015 	portid_t sibling;
3016 
3017 	if (dev == NULL) {
3018 		fprintf(stderr, "Device already removed\n");
3019 		return;
3020 	}
3021 
3022 	printf("Removing a device...\n");
3023 
3024 	RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
3025 		if (ports[sibling].port_status != RTE_PORT_CLOSED) {
3026 			if (ports[sibling].port_status != RTE_PORT_STOPPED) {
3027 				fprintf(stderr, "Port %u not stopped\n",
3028 					sibling);
3029 				return;
3030 			}
3031 			port_flow_flush(sibling);
3032 		}
3033 	}
3034 
3035 	if (rte_dev_remove(dev) < 0) {
3036 		TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
3037 		return;
3038 	}
3039 	remove_invalid_ports();
3040 
3041 	printf("Device is detached\n");
3042 	printf("Now total ports is %d\n", nb_ports);
3043 	printf("Done\n");
3044 	return;
3045 }
3046 
3047 void
3048 detach_port_device(portid_t port_id)
3049 {
3050 	int ret;
3051 	struct rte_eth_dev_info dev_info;
3052 
3053 	if (port_id_is_invalid(port_id, ENABLED_WARN))
3054 		return;
3055 
3056 	if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3057 		if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3058 			fprintf(stderr, "Port not stopped\n");
3059 			return;
3060 		}
3061 		fprintf(stderr, "Port was not closed\n");
3062 	}
3063 
3064 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
3065 	if (ret != 0) {
3066 		TESTPMD_LOG(ERR,
3067 			"Failed to get device info for port %d, not detaching\n",
3068 			port_id);
3069 		return;
3070 	}
3071 	detach_device(dev_info.device);
3072 }
3073 
3074 void
3075 detach_devargs(char *identifier)
3076 {
3077 	struct rte_dev_iterator iterator;
3078 	struct rte_devargs da;
3079 	portid_t port_id;
3080 
3081 	printf("Removing a device...\n");
3082 
3083 	memset(&da, 0, sizeof(da));
3084 	if (rte_devargs_parsef(&da, "%s", identifier)) {
3085 		fprintf(stderr, "cannot parse identifier\n");
3086 		return;
3087 	}
3088 
3089 	RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
3090 		if (ports[port_id].port_status != RTE_PORT_CLOSED) {
3091 			if (ports[port_id].port_status != RTE_PORT_STOPPED) {
3092 				fprintf(stderr, "Port %u not stopped\n",
3093 					port_id);
3094 				rte_eth_iterator_cleanup(&iterator);
3095 				rte_devargs_reset(&da);
3096 				return;
3097 			}
3098 			port_flow_flush(port_id);
3099 		}
3100 	}
3101 
3102 	if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
3103 		TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
3104 			    da.name, da.bus->name);
3105 		rte_devargs_reset(&da);
3106 		return;
3107 	}
3108 
3109 	remove_invalid_ports();
3110 
3111 	printf("Device %s is detached\n", identifier);
3112 	printf("Now total ports is %d\n", nb_ports);
3113 	printf("Done\n");
3114 	rte_devargs_reset(&da);
3115 }
3116 
3117 void
3118 pmd_test_exit(void)
3119 {
3120 	portid_t pt_id;
3121 	unsigned int i;
3122 	int ret;
3123 
3124 	if (test_done == 0)
3125 		stop_packet_forwarding();
3126 
3127 #ifndef RTE_EXEC_ENV_WINDOWS
3128 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3129 		if (mempools[i]) {
3130 			if (mp_alloc_type == MP_ALLOC_ANON)
3131 				rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
3132 						     NULL);
3133 		}
3134 	}
3135 #endif
3136 	if (ports != NULL) {
3137 		no_link_check = 1;
3138 		RTE_ETH_FOREACH_DEV(pt_id) {
3139 			printf("\nStopping port %d...\n", pt_id);
3140 			fflush(stdout);
3141 			stop_port(pt_id);
3142 		}
3143 		RTE_ETH_FOREACH_DEV(pt_id) {
3144 			printf("\nShutting down port %d...\n", pt_id);
3145 			fflush(stdout);
3146 			close_port(pt_id);
3147 		}
3148 	}
3149 
3150 	if (hot_plug) {
3151 		ret = rte_dev_event_monitor_stop();
3152 		if (ret) {
3153 			RTE_LOG(ERR, EAL,
3154 				"fail to stop device event monitor.");
3155 			return;
3156 		}
3157 
3158 		ret = rte_dev_event_callback_unregister(NULL,
3159 			dev_event_callback, NULL);
3160 		if (ret < 0) {
3161 			RTE_LOG(ERR, EAL,
3162 				"fail to unregister device event callback.\n");
3163 			return;
3164 		}
3165 
3166 		ret = rte_dev_hotplug_handle_disable();
3167 		if (ret) {
3168 			RTE_LOG(ERR, EAL,
3169 				"fail to disable hotplug handling.\n");
3170 			return;
3171 		}
3172 	}
3173 	for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
3174 		if (mempools[i])
3175 			mempool_free_mp(mempools[i]);
3176 	}
3177 
3178 	printf("\nBye...\n");
3179 }
3180 
3181 typedef void (*cmd_func_t)(void);
3182 struct pmd_test_command {
3183 	const char *cmd_name;
3184 	cmd_func_t cmd_func;
3185 };
3186 
3187 /* Check the link status of all ports in up to 9s, and print them finally */
3188 static void
3189 check_all_ports_link_status(uint32_t port_mask)
3190 {
3191 #define CHECK_INTERVAL 100 /* 100ms */
3192 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
3193 	portid_t portid;
3194 	uint8_t count, all_ports_up, print_flag = 0;
3195 	struct rte_eth_link link;
3196 	int ret;
3197 	char link_status[RTE_ETH_LINK_MAX_STR_LEN];
3198 
3199 	printf("Checking link statuses...\n");
3200 	fflush(stdout);
3201 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
3202 		all_ports_up = 1;
3203 		RTE_ETH_FOREACH_DEV(portid) {
3204 			if ((port_mask & (1 << portid)) == 0)
3205 				continue;
3206 			memset(&link, 0, sizeof(link));
3207 			ret = rte_eth_link_get_nowait(portid, &link);
3208 			if (ret < 0) {
3209 				all_ports_up = 0;
3210 				if (print_flag == 1)
3211 					fprintf(stderr,
3212 						"Port %u link get failed: %s\n",
3213 						portid, rte_strerror(-ret));
3214 				continue;
3215 			}
3216 			/* print link status if flag set */
3217 			if (print_flag == 1) {
3218 				rte_eth_link_to_str(link_status,
3219 					sizeof(link_status), &link);
3220 				printf("Port %d %s\n", portid, link_status);
3221 				continue;
3222 			}
3223 			/* clear all_ports_up flag if any link down */
3224 			if (link.link_status == ETH_LINK_DOWN) {
3225 				all_ports_up = 0;
3226 				break;
3227 			}
3228 		}
3229 		/* after finally printing all link status, get out */
3230 		if (print_flag == 1)
3231 			break;
3232 
3233 		if (all_ports_up == 0) {
3234 			fflush(stdout);
3235 			rte_delay_ms(CHECK_INTERVAL);
3236 		}
3237 
3238 		/* set the print_flag if all ports up or timeout */
3239 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
3240 			print_flag = 1;
3241 		}
3242 
3243 		if (lsc_interrupt)
3244 			break;
3245 	}
3246 }
3247 
3248 static void
3249 rmv_port_callback(void *arg)
3250 {
3251 	int need_to_start = 0;
3252 	int org_no_link_check = no_link_check;
3253 	portid_t port_id = (intptr_t)arg;
3254 	struct rte_eth_dev_info dev_info;
3255 	int ret;
3256 
3257 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
3258 
3259 	if (!test_done && port_is_forwarding(port_id)) {
3260 		need_to_start = 1;
3261 		stop_packet_forwarding();
3262 	}
3263 	no_link_check = 1;
3264 	stop_port(port_id);
3265 	no_link_check = org_no_link_check;
3266 
3267 	ret = eth_dev_info_get_print_err(port_id, &dev_info);
3268 	if (ret != 0)
3269 		TESTPMD_LOG(ERR,
3270 			"Failed to get device info for port %d, not detaching\n",
3271 			port_id);
3272 	else {
3273 		struct rte_device *device = dev_info.device;
3274 		close_port(port_id);
3275 		detach_device(device); /* might be already removed or have more ports */
3276 	}
3277 	if (need_to_start)
3278 		start_packet_forwarding(0);
3279 }
3280 
3281 /* This function is used by the interrupt thread */
3282 static int
3283 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
3284 		  void *ret_param)
3285 {
3286 	RTE_SET_USED(param);
3287 	RTE_SET_USED(ret_param);
3288 
3289 	if (type >= RTE_ETH_EVENT_MAX) {
3290 		fprintf(stderr,
3291 			"\nPort %" PRIu16 ": %s called upon invalid event %d\n",
3292 			port_id, __func__, type);
3293 		fflush(stderr);
3294 	} else if (event_print_mask & (UINT32_C(1) << type)) {
3295 		printf("\nPort %" PRIu16 ": %s event\n", port_id,
3296 			eth_event_desc[type]);
3297 		fflush(stdout);
3298 	}
3299 
3300 	switch (type) {
3301 	case RTE_ETH_EVENT_NEW:
3302 		ports[port_id].need_setup = 1;
3303 		ports[port_id].port_status = RTE_PORT_HANDLING;
3304 		break;
3305 	case RTE_ETH_EVENT_INTR_RMV:
3306 		if (port_id_is_invalid(port_id, DISABLED_WARN))
3307 			break;
3308 		if (rte_eal_alarm_set(100000,
3309 				rmv_port_callback, (void *)(intptr_t)port_id))
3310 			fprintf(stderr,
3311 				"Could not set up deferred device removal\n");
3312 		break;
3313 	case RTE_ETH_EVENT_DESTROY:
3314 		ports[port_id].port_status = RTE_PORT_CLOSED;
3315 		printf("Port %u is closed\n", port_id);
3316 		break;
3317 	default:
3318 		break;
3319 	}
3320 	return 0;
3321 }
3322 
3323 static int
3324 register_eth_event_callback(void)
3325 {
3326 	int ret;
3327 	enum rte_eth_event_type event;
3328 
3329 	for (event = RTE_ETH_EVENT_UNKNOWN;
3330 			event < RTE_ETH_EVENT_MAX; event++) {
3331 		ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
3332 				event,
3333 				eth_event_callback,
3334 				NULL);
3335 		if (ret != 0) {
3336 			TESTPMD_LOG(ERR, "Failed to register callback for "
3337 					"%s event\n", eth_event_desc[event]);
3338 			return -1;
3339 		}
3340 	}
3341 
3342 	return 0;
3343 }
3344 
3345 /* This function is used by the interrupt thread */
3346 static void
3347 dev_event_callback(const char *device_name, enum rte_dev_event_type type,
3348 			     __rte_unused void *arg)
3349 {
3350 	uint16_t port_id;
3351 	int ret;
3352 
3353 	if (type >= RTE_DEV_EVENT_MAX) {
3354 		fprintf(stderr, "%s called upon invalid event %d\n",
3355 			__func__, type);
3356 		fflush(stderr);
3357 	}
3358 
3359 	switch (type) {
3360 	case RTE_DEV_EVENT_REMOVE:
3361 		RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
3362 			device_name);
3363 		ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
3364 		if (ret) {
3365 			RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
3366 				device_name);
3367 			return;
3368 		}
3369 		/*
3370 		 * Because the user's callback is invoked in eal interrupt
3371 		 * callback, the interrupt callback need to be finished before
3372 		 * it can be unregistered when detaching device. So finish
3373 		 * callback soon and use a deferred removal to detach device
3374 		 * is need. It is a workaround, once the device detaching be
3375 		 * moved into the eal in the future, the deferred removal could
3376 		 * be deleted.
3377 		 */
3378 		if (rte_eal_alarm_set(100000,
3379 				rmv_port_callback, (void *)(intptr_t)port_id))
3380 			RTE_LOG(ERR, EAL,
3381 				"Could not set up deferred device removal\n");
3382 		break;
3383 	case RTE_DEV_EVENT_ADD:
3384 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
3385 			device_name);
3386 		/* TODO: After finish kernel driver binding,
3387 		 * begin to attach port.
3388 		 */
3389 		break;
3390 	default:
3391 		break;
3392 	}
3393 }
3394 
3395 static void
3396 rxtx_port_config(struct rte_port *port)
3397 {
3398 	uint16_t qid;
3399 	uint64_t offloads;
3400 
3401 	for (qid = 0; qid < nb_rxq; qid++) {
3402 		offloads = port->rx_conf[qid].offloads;
3403 		port->rx_conf[qid] = port->dev_info.default_rxconf;
3404 		if (offloads != 0)
3405 			port->rx_conf[qid].offloads = offloads;
3406 
3407 		/* Check if any Rx parameters have been passed */
3408 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
3409 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
3410 
3411 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
3412 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
3413 
3414 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
3415 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
3416 
3417 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
3418 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
3419 
3420 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
3421 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
3422 
3423 		port->nb_rx_desc[qid] = nb_rxd;
3424 	}
3425 
3426 	for (qid = 0; qid < nb_txq; qid++) {
3427 		offloads = port->tx_conf[qid].offloads;
3428 		port->tx_conf[qid] = port->dev_info.default_txconf;
3429 		if (offloads != 0)
3430 			port->tx_conf[qid].offloads = offloads;
3431 
3432 		/* Check if any Tx parameters have been passed */
3433 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
3434 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
3435 
3436 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
3437 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
3438 
3439 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
3440 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
3441 
3442 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
3443 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
3444 
3445 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
3446 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
3447 
3448 		port->nb_tx_desc[qid] = nb_txd;
3449 	}
3450 }
3451 
3452 /*
3453  * Helper function to arrange max_rx_pktlen value and JUMBO_FRAME offload,
3454  * MTU is also aligned if JUMBO_FRAME offload is not set.
3455  *
3456  * port->dev_info should be set before calling this function.
3457  *
3458  * return 0 on success, negative on error
3459  */
3460 int
3461 update_jumbo_frame_offload(portid_t portid)
3462 {
3463 	struct rte_port *port = &ports[portid];
3464 	uint32_t eth_overhead;
3465 	uint64_t rx_offloads;
3466 	int ret;
3467 	bool on;
3468 
3469 	/* Update the max_rx_pkt_len to have MTU as RTE_ETHER_MTU */
3470 	if (port->dev_info.max_mtu != UINT16_MAX &&
3471 	    port->dev_info.max_rx_pktlen > port->dev_info.max_mtu)
3472 		eth_overhead = port->dev_info.max_rx_pktlen -
3473 				port->dev_info.max_mtu;
3474 	else
3475 		eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
3476 
3477 	rx_offloads = port->dev_conf.rxmode.offloads;
3478 
3479 	/* Default config value is 0 to use PMD specific overhead */
3480 	if (port->dev_conf.rxmode.max_rx_pkt_len == 0)
3481 		port->dev_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MTU + eth_overhead;
3482 
3483 	if (port->dev_conf.rxmode.max_rx_pkt_len <= RTE_ETHER_MTU + eth_overhead) {
3484 		rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
3485 		on = false;
3486 	} else {
3487 		if ((port->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
3488 			fprintf(stderr,
3489 				"Frame size (%u) is not supported by port %u\n",
3490 				port->dev_conf.rxmode.max_rx_pkt_len,
3491 				portid);
3492 			return -1;
3493 		}
3494 		rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
3495 		on = true;
3496 	}
3497 
3498 	if (rx_offloads != port->dev_conf.rxmode.offloads) {
3499 		uint16_t qid;
3500 
3501 		port->dev_conf.rxmode.offloads = rx_offloads;
3502 
3503 		/* Apply JUMBO_FRAME offload configuration to Rx queue(s) */
3504 		for (qid = 0; qid < port->dev_info.nb_rx_queues; qid++) {
3505 			if (on)
3506 				port->rx_conf[qid].offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
3507 			else
3508 				port->rx_conf[qid].offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
3509 		}
3510 	}
3511 
3512 	/* If JUMBO_FRAME is set MTU conversion done by ethdev layer,
3513 	 * if unset do it here
3514 	 */
3515 	if ((rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
3516 		ret = eth_dev_set_mtu_mp(portid,
3517 				port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead);
3518 		if (ret)
3519 			fprintf(stderr,
3520 				"Failed to set MTU to %u for port %u\n",
3521 				port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead,
3522 				portid);
3523 	}
3524 
3525 	return 0;
3526 }
3527 
3528 void
3529 init_port_config(void)
3530 {
3531 	portid_t pid;
3532 	struct rte_port *port;
3533 	int ret;
3534 
3535 	RTE_ETH_FOREACH_DEV(pid) {
3536 		port = &ports[pid];
3537 		port->dev_conf.fdir_conf = fdir_conf;
3538 
3539 		ret = eth_dev_info_get_print_err(pid, &port->dev_info);
3540 		if (ret != 0)
3541 			return;
3542 
3543 		if (nb_rxq > 1) {
3544 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3545 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
3546 				rss_hf & port->dev_info.flow_type_rss_offloads;
3547 		} else {
3548 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
3549 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
3550 		}
3551 
3552 		if (port->dcb_flag == 0) {
3553 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
3554 				port->dev_conf.rxmode.mq_mode =
3555 					(enum rte_eth_rx_mq_mode)
3556 						(rx_mq_mode & ETH_MQ_RX_RSS);
3557 			else
3558 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
3559 		}
3560 
3561 		rxtx_port_config(port);
3562 
3563 		ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
3564 		if (ret != 0)
3565 			return;
3566 
3567 #if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
3568 		rte_pmd_ixgbe_bypass_init(pid);
3569 #endif
3570 
3571 		if (lsc_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_LSC))
3572 			port->dev_conf.intr_conf.lsc = 1;
3573 		if (rmv_interrupt && (*port->dev_info.dev_flags & RTE_ETH_DEV_INTR_RMV))
3574 			port->dev_conf.intr_conf.rmv = 1;
3575 	}
3576 }
3577 
3578 void set_port_slave_flag(portid_t slave_pid)
3579 {
3580 	struct rte_port *port;
3581 
3582 	port = &ports[slave_pid];
3583 	port->slave_flag = 1;
3584 }
3585 
3586 void clear_port_slave_flag(portid_t slave_pid)
3587 {
3588 	struct rte_port *port;
3589 
3590 	port = &ports[slave_pid];
3591 	port->slave_flag = 0;
3592 }
3593 
3594 uint8_t port_is_bonding_slave(portid_t slave_pid)
3595 {
3596 	struct rte_port *port;
3597 	struct rte_eth_dev_info dev_info;
3598 	int ret;
3599 
3600 	port = &ports[slave_pid];
3601 	ret = eth_dev_info_get_print_err(slave_pid, &dev_info);
3602 	if (ret != 0) {
3603 		TESTPMD_LOG(ERR,
3604 			"Failed to get device info for port id %d,"
3605 			"cannot determine if the port is a bonded slave",
3606 			slave_pid);
3607 		return 0;
3608 	}
3609 	if ((*dev_info.dev_flags & RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
3610 		return 1;
3611 	return 0;
3612 }
3613 
3614 const uint16_t vlan_tags[] = {
3615 		0,  1,  2,  3,  4,  5,  6,  7,
3616 		8,  9, 10, 11,  12, 13, 14, 15,
3617 		16, 17, 18, 19, 20, 21, 22, 23,
3618 		24, 25, 26, 27, 28, 29, 30, 31
3619 };
3620 
3621 static  int
3622 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
3623 		 enum dcb_mode_enable dcb_mode,
3624 		 enum rte_eth_nb_tcs num_tcs,
3625 		 uint8_t pfc_en)
3626 {
3627 	uint8_t i;
3628 	int32_t rc;
3629 	struct rte_eth_rss_conf rss_conf;
3630 
3631 	/*
3632 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
3633 	 * given above, and the number of traffic classes available for use.
3634 	 */
3635 	if (dcb_mode == DCB_VT_ENABLED) {
3636 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3637 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
3638 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3639 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
3640 
3641 		/* VMDQ+DCB RX and TX configurations */
3642 		vmdq_rx_conf->enable_default_pool = 0;
3643 		vmdq_rx_conf->default_pool = 0;
3644 		vmdq_rx_conf->nb_queue_pools =
3645 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3646 		vmdq_tx_conf->nb_queue_pools =
3647 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
3648 
3649 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
3650 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
3651 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
3652 			vmdq_rx_conf->pool_map[i].pools =
3653 				1 << (i % vmdq_rx_conf->nb_queue_pools);
3654 		}
3655 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3656 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
3657 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
3658 		}
3659 
3660 		/* set DCB mode of RX and TX of multiple queues */
3661 		eth_conf->rxmode.mq_mode =
3662 				(enum rte_eth_rx_mq_mode)
3663 					(rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
3664 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
3665 	} else {
3666 		struct rte_eth_dcb_rx_conf *rx_conf =
3667 				&eth_conf->rx_adv_conf.dcb_rx_conf;
3668 		struct rte_eth_dcb_tx_conf *tx_conf =
3669 				&eth_conf->tx_adv_conf.dcb_tx_conf;
3670 
3671 		memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
3672 
3673 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
3674 		if (rc != 0)
3675 			return rc;
3676 
3677 		rx_conf->nb_tcs = num_tcs;
3678 		tx_conf->nb_tcs = num_tcs;
3679 
3680 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3681 			rx_conf->dcb_tc[i] = i % num_tcs;
3682 			tx_conf->dcb_tc[i] = i % num_tcs;
3683 		}
3684 
3685 		eth_conf->rxmode.mq_mode =
3686 				(enum rte_eth_rx_mq_mode)
3687 					(rx_mq_mode & ETH_MQ_RX_DCB_RSS);
3688 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
3689 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
3690 	}
3691 
3692 	if (pfc_en)
3693 		eth_conf->dcb_capability_en =
3694 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
3695 	else
3696 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
3697 
3698 	return 0;
3699 }
3700 
3701 int
3702 init_port_dcb_config(portid_t pid,
3703 		     enum dcb_mode_enable dcb_mode,
3704 		     enum rte_eth_nb_tcs num_tcs,
3705 		     uint8_t pfc_en)
3706 {
3707 	struct rte_eth_conf port_conf;
3708 	struct rte_port *rte_port;
3709 	int retval;
3710 	uint16_t i;
3711 
3712 	if (num_procs > 1) {
3713 		printf("The multi-process feature doesn't support dcb.\n");
3714 		return -ENOTSUP;
3715 	}
3716 	rte_port = &ports[pid];
3717 
3718 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
3719 
3720 	port_conf.rxmode = rte_port->dev_conf.rxmode;
3721 	port_conf.txmode = rte_port->dev_conf.txmode;
3722 
3723 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
3724 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
3725 	if (retval < 0)
3726 		return retval;
3727 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3728 
3729 	/* re-configure the device . */
3730 	retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
3731 	if (retval < 0)
3732 		return retval;
3733 
3734 	retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
3735 	if (retval != 0)
3736 		return retval;
3737 
3738 	/* If dev_info.vmdq_pool_base is greater than 0,
3739 	 * the queue id of vmdq pools is started after pf queues.
3740 	 */
3741 	if (dcb_mode == DCB_VT_ENABLED &&
3742 	    rte_port->dev_info.vmdq_pool_base > 0) {
3743 		fprintf(stderr,
3744 			"VMDQ_DCB multi-queue mode is nonsensical for port %d.\n",
3745 			pid);
3746 		return -1;
3747 	}
3748 
3749 	/* Assume the ports in testpmd have the same dcb capability
3750 	 * and has the same number of rxq and txq in dcb mode
3751 	 */
3752 	if (dcb_mode == DCB_VT_ENABLED) {
3753 		if (rte_port->dev_info.max_vfs > 0) {
3754 			nb_rxq = rte_port->dev_info.nb_rx_queues;
3755 			nb_txq = rte_port->dev_info.nb_tx_queues;
3756 		} else {
3757 			nb_rxq = rte_port->dev_info.max_rx_queues;
3758 			nb_txq = rte_port->dev_info.max_tx_queues;
3759 		}
3760 	} else {
3761 		/*if vt is disabled, use all pf queues */
3762 		if (rte_port->dev_info.vmdq_pool_base == 0) {
3763 			nb_rxq = rte_port->dev_info.max_rx_queues;
3764 			nb_txq = rte_port->dev_info.max_tx_queues;
3765 		} else {
3766 			nb_rxq = (queueid_t)num_tcs;
3767 			nb_txq = (queueid_t)num_tcs;
3768 
3769 		}
3770 	}
3771 	rx_free_thresh = 64;
3772 
3773 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
3774 
3775 	rxtx_port_config(rte_port);
3776 	/* VLAN filter */
3777 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
3778 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
3779 		rx_vft_set(pid, vlan_tags[i], 1);
3780 
3781 	retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
3782 	if (retval != 0)
3783 		return retval;
3784 
3785 	rte_port->dcb_flag = 1;
3786 
3787 	/* Enter DCB configuration status */
3788 	dcb_config = 1;
3789 
3790 	return 0;
3791 }
3792 
3793 static void
3794 init_port(void)
3795 {
3796 	int i;
3797 
3798 	/* Configuration of Ethernet ports. */
3799 	ports = rte_zmalloc("testpmd: ports",
3800 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
3801 			    RTE_CACHE_LINE_SIZE);
3802 	if (ports == NULL) {
3803 		rte_exit(EXIT_FAILURE,
3804 				"rte_zmalloc(%d struct rte_port) failed\n",
3805 				RTE_MAX_ETHPORTS);
3806 	}
3807 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3808 		LIST_INIT(&ports[i].flow_tunnel_list);
3809 	/* Initialize ports NUMA structures */
3810 	memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3811 	memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3812 	memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
3813 }
3814 
3815 static void
3816 force_quit(void)
3817 {
3818 	pmd_test_exit();
3819 	prompt_exit();
3820 }
3821 
3822 static void
3823 print_stats(void)
3824 {
3825 	uint8_t i;
3826 	const char clr[] = { 27, '[', '2', 'J', '\0' };
3827 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
3828 
3829 	/* Clear screen and move to top left */
3830 	printf("%s%s", clr, top_left);
3831 
3832 	printf("\nPort statistics ====================================");
3833 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
3834 		nic_stats_display(fwd_ports_ids[i]);
3835 
3836 	fflush(stdout);
3837 }
3838 
3839 static void
3840 signal_handler(int signum)
3841 {
3842 	if (signum == SIGINT || signum == SIGTERM) {
3843 		fprintf(stderr, "\nSignal %d received, preparing to exit...\n",
3844 			signum);
3845 #ifdef RTE_LIB_PDUMP
3846 		/* uninitialize packet capture framework */
3847 		rte_pdump_uninit();
3848 #endif
3849 #ifdef RTE_LIB_LATENCYSTATS
3850 		if (latencystats_enabled != 0)
3851 			rte_latencystats_uninit();
3852 #endif
3853 		force_quit();
3854 		/* Set flag to indicate the force termination. */
3855 		f_quit = 1;
3856 		/* exit with the expected status */
3857 #ifndef RTE_EXEC_ENV_WINDOWS
3858 		signal(signum, SIG_DFL);
3859 		kill(getpid(), signum);
3860 #endif
3861 	}
3862 }
3863 
3864 int
3865 main(int argc, char** argv)
3866 {
3867 	int diag;
3868 	portid_t port_id;
3869 	uint16_t count;
3870 	int ret;
3871 
3872 	signal(SIGINT, signal_handler);
3873 	signal(SIGTERM, signal_handler);
3874 
3875 	testpmd_logtype = rte_log_register("testpmd");
3876 	if (testpmd_logtype < 0)
3877 		rte_exit(EXIT_FAILURE, "Cannot register log type");
3878 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
3879 
3880 	diag = rte_eal_init(argc, argv);
3881 	if (diag < 0)
3882 		rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
3883 			 rte_strerror(rte_errno));
3884 
3885 	ret = register_eth_event_callback();
3886 	if (ret != 0)
3887 		rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
3888 
3889 #ifdef RTE_LIB_PDUMP
3890 	/* initialize packet capture framework */
3891 	rte_pdump_init();
3892 #endif
3893 
3894 	count = 0;
3895 	RTE_ETH_FOREACH_DEV(port_id) {
3896 		ports_ids[count] = port_id;
3897 		count++;
3898 	}
3899 	nb_ports = (portid_t) count;
3900 	if (nb_ports == 0)
3901 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3902 
3903 	/* allocate port structures, and init them */
3904 	init_port();
3905 
3906 	set_def_fwd_config();
3907 	if (nb_lcores == 0)
3908 		rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
3909 			 "Check the core mask argument\n");
3910 
3911 	/* Bitrate/latency stats disabled by default */
3912 #ifdef RTE_LIB_BITRATESTATS
3913 	bitrate_enabled = 0;
3914 #endif
3915 #ifdef RTE_LIB_LATENCYSTATS
3916 	latencystats_enabled = 0;
3917 #endif
3918 
3919 	/* on FreeBSD, mlockall() is disabled by default */
3920 #ifdef RTE_EXEC_ENV_FREEBSD
3921 	do_mlockall = 0;
3922 #else
3923 	do_mlockall = 1;
3924 #endif
3925 
3926 	argc -= diag;
3927 	argv += diag;
3928 	if (argc > 1)
3929 		launch_args_parse(argc, argv);
3930 
3931 #ifndef RTE_EXEC_ENV_WINDOWS
3932 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3933 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3934 			strerror(errno));
3935 	}
3936 #endif
3937 
3938 	if (tx_first && interactive)
3939 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3940 				"interactive mode.\n");
3941 
3942 	if (tx_first && lsc_interrupt) {
3943 		fprintf(stderr,
3944 			"Warning: lsc_interrupt needs to be off when using tx_first. Disabling.\n");
3945 		lsc_interrupt = 0;
3946 	}
3947 
3948 	if (!nb_rxq && !nb_txq)
3949 		fprintf(stderr,
3950 			"Warning: Either rx or tx queues should be non-zero\n");
3951 
3952 	if (nb_rxq > 1 && nb_rxq > nb_txq)
3953 		fprintf(stderr,
3954 			"Warning: nb_rxq=%d enables RSS configuration, but nb_txq=%d will prevent to fully test it.\n",
3955 			nb_rxq, nb_txq);
3956 
3957 	init_config();
3958 
3959 	if (hot_plug) {
3960 		ret = rte_dev_hotplug_handle_enable();
3961 		if (ret) {
3962 			RTE_LOG(ERR, EAL,
3963 				"fail to enable hotplug handling.");
3964 			return -1;
3965 		}
3966 
3967 		ret = rte_dev_event_monitor_start();
3968 		if (ret) {
3969 			RTE_LOG(ERR, EAL,
3970 				"fail to start device event monitoring.");
3971 			return -1;
3972 		}
3973 
3974 		ret = rte_dev_event_callback_register(NULL,
3975 			dev_event_callback, NULL);
3976 		if (ret) {
3977 			RTE_LOG(ERR, EAL,
3978 				"fail  to register device event callback\n");
3979 			return -1;
3980 		}
3981 	}
3982 
3983 	if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
3984 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
3985 
3986 	/* set all ports to promiscuous mode by default */
3987 	RTE_ETH_FOREACH_DEV(port_id) {
3988 		ret = rte_eth_promiscuous_enable(port_id);
3989 		if (ret != 0)
3990 			fprintf(stderr,
3991 				"Error during enabling promiscuous mode for port %u: %s - ignore\n",
3992 				port_id, rte_strerror(-ret));
3993 	}
3994 
3995 	/* Init metrics library */
3996 	rte_metrics_init(rte_socket_id());
3997 
3998 #ifdef RTE_LIB_LATENCYSTATS
3999 	if (latencystats_enabled != 0) {
4000 		int ret = rte_latencystats_init(1, NULL);
4001 		if (ret)
4002 			fprintf(stderr,
4003 				"Warning: latencystats init() returned error %d\n",
4004 				ret);
4005 		fprintf(stderr, "Latencystats running on lcore %d\n",
4006 			latencystats_lcore_id);
4007 	}
4008 #endif
4009 
4010 	/* Setup bitrate stats */
4011 #ifdef RTE_LIB_BITRATESTATS
4012 	if (bitrate_enabled != 0) {
4013 		bitrate_data = rte_stats_bitrate_create();
4014 		if (bitrate_data == NULL)
4015 			rte_exit(EXIT_FAILURE,
4016 				"Could not allocate bitrate data.\n");
4017 		rte_stats_bitrate_reg(bitrate_data);
4018 	}
4019 #endif
4020 
4021 #ifdef RTE_LIB_CMDLINE
4022 	if (strlen(cmdline_filename) != 0)
4023 		cmdline_read_from_file(cmdline_filename);
4024 
4025 	if (interactive == 1) {
4026 		if (auto_start) {
4027 			printf("Start automatic packet forwarding\n");
4028 			start_packet_forwarding(0);
4029 		}
4030 		prompt();
4031 		pmd_test_exit();
4032 	} else
4033 #endif
4034 	{
4035 		char c;
4036 		int rc;
4037 
4038 		f_quit = 0;
4039 
4040 		printf("No commandline core given, start packet forwarding\n");
4041 		start_packet_forwarding(tx_first);
4042 		if (stats_period != 0) {
4043 			uint64_t prev_time = 0, cur_time, diff_time = 0;
4044 			uint64_t timer_period;
4045 
4046 			/* Convert to number of cycles */
4047 			timer_period = stats_period * rte_get_timer_hz();
4048 
4049 			while (f_quit == 0) {
4050 				cur_time = rte_get_timer_cycles();
4051 				diff_time += cur_time - prev_time;
4052 
4053 				if (diff_time >= timer_period) {
4054 					print_stats();
4055 					/* Reset the timer */
4056 					diff_time = 0;
4057 				}
4058 				/* Sleep to avoid unnecessary checks */
4059 				prev_time = cur_time;
4060 				rte_delay_us_sleep(US_PER_S);
4061 			}
4062 		}
4063 
4064 		printf("Press enter to exit\n");
4065 		rc = read(0, &c, 1);
4066 		pmd_test_exit();
4067 		if (rc < 0)
4068 			return 1;
4069 	}
4070 
4071 	ret = rte_eal_cleanup();
4072 	if (ret != 0)
4073 		rte_exit(EXIT_FAILURE,
4074 			 "EAL cleanup failed: %s\n", strerror(-ret));
4075 
4076 	return EXIT_SUCCESS;
4077 }
4078