xref: /dpdk/app/test-pmd/testpmd.c (revision c7f5dba7d4bb7971fac51755aad09b71b10cef90)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 #include <stdbool.h>
16 
17 #include <sys/queue.h>
18 #include <sys/stat.h>
19 
20 #include <stdint.h>
21 #include <unistd.h>
22 #include <inttypes.h>
23 
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_byteorder.h>
27 #include <rte_log.h>
28 #include <rte_debug.h>
29 #include <rte_cycles.h>
30 #include <rte_malloc_heap.h>
31 #include <rte_memory.h>
32 #include <rte_memcpy.h>
33 #include <rte_launch.h>
34 #include <rte_eal.h>
35 #include <rte_alarm.h>
36 #include <rte_per_lcore.h>
37 #include <rte_lcore.h>
38 #include <rte_atomic.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_mempool.h>
41 #include <rte_malloc.h>
42 #include <rte_mbuf.h>
43 #include <rte_mbuf_pool_ops.h>
44 #include <rte_interrupts.h>
45 #include <rte_pci.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_dev.h>
49 #include <rte_string_fns.h>
50 #ifdef RTE_LIBRTE_IXGBE_PMD
51 #include <rte_pmd_ixgbe.h>
52 #endif
53 #ifdef RTE_LIBRTE_PDUMP
54 #include <rte_pdump.h>
55 #endif
56 #include <rte_flow.h>
57 #include <rte_metrics.h>
58 #ifdef RTE_LIBRTE_BITRATE
59 #include <rte_bitrate.h>
60 #endif
61 #ifdef RTE_LIBRTE_LATENCY_STATS
62 #include <rte_latencystats.h>
63 #endif
64 
65 #include "testpmd.h"
66 
67 #ifndef MAP_HUGETLB
68 /* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
69 #define HUGE_FLAG (0x40000)
70 #else
71 #define HUGE_FLAG MAP_HUGETLB
72 #endif
73 
74 #ifndef MAP_HUGE_SHIFT
75 /* older kernels (or FreeBSD) will not have this define */
76 #define HUGE_SHIFT (26)
77 #else
78 #define HUGE_SHIFT MAP_HUGE_SHIFT
79 #endif
80 
81 #define EXTMEM_HEAP_NAME "extmem"
82 
83 uint16_t verbose_level = 0; /**< Silent by default. */
84 int testpmd_logtype; /**< Log type for testpmd logs */
85 
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
89 uint8_t tx_first;
90 char cmdline_filename[PATH_MAX] = {0};
91 
92 /*
93  * NUMA support configuration.
94  * When set, the NUMA support attempts to dispatch the allocation of the
95  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
96  * probed ports among the CPU sockets 0 and 1.
97  * Otherwise, all memory is allocated from CPU socket 0.
98  */
99 uint8_t numa_support = 1; /**< numa enabled by default */
100 
101 /*
102  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103  * not configured.
104  */
105 uint8_t socket_num = UMA_NO_CONFIG;
106 
107 /*
108  * Select mempool allocation type:
109  * - native: use regular DPDK memory
110  * - anon: use regular DPDK memory to create mempool, but populate using
111  *         anonymous memory (may not be IOVA-contiguous)
112  * - xmem: use externally allocated hugepage memory
113  */
114 uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
115 
116 /*
117  * Store specified sockets on which memory pool to be used by ports
118  * is allocated.
119  */
120 uint8_t port_numa[RTE_MAX_ETHPORTS];
121 
122 /*
123  * Store specified sockets on which RX ring to be used by ports
124  * is allocated.
125  */
126 uint8_t rxring_numa[RTE_MAX_ETHPORTS];
127 
128 /*
129  * Store specified sockets on which TX ring to be used by ports
130  * is allocated.
131  */
132 uint8_t txring_numa[RTE_MAX_ETHPORTS];
133 
134 /*
135  * Record the Ethernet address of peer target ports to which packets are
136  * forwarded.
137  * Must be instantiated with the ethernet addresses of peer traffic generator
138  * ports.
139  */
140 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
141 portid_t nb_peer_eth_addrs = 0;
142 
143 /*
144  * Probed Target Environment.
145  */
146 struct rte_port *ports;	       /**< For all probed ethernet ports. */
147 portid_t nb_ports;             /**< Number of probed ethernet ports. */
148 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
149 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
150 
151 portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
152 
153 /*
154  * Test Forwarding Configuration.
155  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
156  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
157  */
158 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
159 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
160 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
161 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
162 
163 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
164 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
165 
166 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
167 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
168 
169 /*
170  * Forwarding engines.
171  */
172 struct fwd_engine * fwd_engines[] = {
173 	&io_fwd_engine,
174 	&mac_fwd_engine,
175 	&mac_swap_engine,
176 	&flow_gen_engine,
177 	&rx_only_engine,
178 	&tx_only_engine,
179 	&csum_fwd_engine,
180 	&icmp_echo_engine,
181 #if defined RTE_LIBRTE_PMD_SOFTNIC
182 	&softnic_fwd_engine,
183 #endif
184 #ifdef RTE_LIBRTE_IEEE1588
185 	&ieee1588_fwd_engine,
186 #endif
187 	NULL,
188 };
189 
190 struct fwd_config cur_fwd_config;
191 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
192 uint32_t retry_enabled;
193 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
194 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
195 
196 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
197 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
198                                       * specified on command-line. */
199 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
200 
201 /*
202  * In container, it cannot terminate the process which running with 'stats-period'
203  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
204  */
205 uint8_t f_quit;
206 
207 /*
208  * Configuration of packet segments used by the "txonly" processing engine.
209  */
210 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
211 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
212 	TXONLY_DEF_PACKET_LEN,
213 };
214 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
215 
216 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
217 /**< Split policy for packets to TX. */
218 
219 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
220 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
221 
222 /* current configuration is in DCB or not,0 means it is not in DCB mode */
223 uint8_t dcb_config = 0;
224 
225 /* Whether the dcb is in testing status */
226 uint8_t dcb_test = 0;
227 
228 /*
229  * Configurable number of RX/TX queues.
230  */
231 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
232 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
233 
234 /*
235  * Configurable number of RX/TX ring descriptors.
236  * Defaults are supplied by drivers via ethdev.
237  */
238 #define RTE_TEST_RX_DESC_DEFAULT 0
239 #define RTE_TEST_TX_DESC_DEFAULT 0
240 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
241 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
242 
243 #define RTE_PMD_PARAM_UNSET -1
244 /*
245  * Configurable values of RX and TX ring threshold registers.
246  */
247 
248 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
249 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
250 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
251 
252 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
253 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
254 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
255 
256 /*
257  * Configurable value of RX free threshold.
258  */
259 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
260 
261 /*
262  * Configurable value of RX drop enable.
263  */
264 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
265 
266 /*
267  * Configurable value of TX free threshold.
268  */
269 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
270 
271 /*
272  * Configurable value of TX RS bit threshold.
273  */
274 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
275 
276 /*
277  * Receive Side Scaling (RSS) configuration.
278  */
279 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
280 
281 /*
282  * Port topology configuration
283  */
284 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
285 
286 /*
287  * Avoids to flush all the RX streams before starts forwarding.
288  */
289 uint8_t no_flush_rx = 0; /* flush by default */
290 
291 /*
292  * Flow API isolated mode.
293  */
294 uint8_t flow_isolate_all;
295 
296 /*
297  * Avoids to check link status when starting/stopping a port.
298  */
299 uint8_t no_link_check = 0; /* check by default */
300 
301 /*
302  * Enable link status change notification
303  */
304 uint8_t lsc_interrupt = 1; /* enabled by default */
305 
306 /*
307  * Enable device removal notification.
308  */
309 uint8_t rmv_interrupt = 1; /* enabled by default */
310 
311 uint8_t hot_plug = 0; /**< hotplug disabled by default. */
312 
313 /*
314  * Display or mask ether events
315  * Default to all events except VF_MBOX
316  */
317 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
318 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
319 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
320 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
321 			    (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
322 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
323 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
324 /*
325  * Decide if all memory are locked for performance.
326  */
327 int do_mlockall = 0;
328 
329 /*
330  * NIC bypass mode configuration options.
331  */
332 
333 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
334 /* The NIC bypass watchdog timeout. */
335 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
336 #endif
337 
338 
339 #ifdef RTE_LIBRTE_LATENCY_STATS
340 
341 /*
342  * Set when latency stats is enabled in the commandline
343  */
344 uint8_t latencystats_enabled;
345 
346 /*
347  * Lcore ID to serive latency statistics.
348  */
349 lcoreid_t latencystats_lcore_id = -1;
350 
351 #endif
352 
353 /*
354  * Ethernet device configuration.
355  */
356 struct rte_eth_rxmode rx_mode = {
357 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
358 };
359 
360 struct rte_eth_txmode tx_mode = {
361 	.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
362 };
363 
364 struct rte_fdir_conf fdir_conf = {
365 	.mode = RTE_FDIR_MODE_NONE,
366 	.pballoc = RTE_FDIR_PBALLOC_64K,
367 	.status = RTE_FDIR_REPORT_STATUS,
368 	.mask = {
369 		.vlan_tci_mask = 0xFFEF,
370 		.ipv4_mask     = {
371 			.src_ip = 0xFFFFFFFF,
372 			.dst_ip = 0xFFFFFFFF,
373 		},
374 		.ipv6_mask     = {
375 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
376 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
377 		},
378 		.src_port_mask = 0xFFFF,
379 		.dst_port_mask = 0xFFFF,
380 		.mac_addr_byte_mask = 0xFF,
381 		.tunnel_type_mask = 1,
382 		.tunnel_id_mask = 0xFFFFFFFF,
383 	},
384 	.drop_queue = 127,
385 };
386 
387 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
388 
389 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
390 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
391 
392 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
393 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
394 
395 uint16_t nb_tx_queue_stats_mappings = 0;
396 uint16_t nb_rx_queue_stats_mappings = 0;
397 
398 /*
399  * Display zero values by default for xstats
400  */
401 uint8_t xstats_hide_zero;
402 
403 unsigned int num_sockets = 0;
404 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
405 
406 #ifdef RTE_LIBRTE_BITRATE
407 /* Bitrate statistics */
408 struct rte_stats_bitrates *bitrate_data;
409 lcoreid_t bitrate_lcore_id;
410 uint8_t bitrate_enabled;
411 #endif
412 
413 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
414 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
415 
416 struct vxlan_encap_conf vxlan_encap_conf = {
417 	.select_ipv4 = 1,
418 	.select_vlan = 0,
419 	.vni = "\x00\x00\x00",
420 	.udp_src = 0,
421 	.udp_dst = RTE_BE16(4789),
422 	.ipv4_src = IPv4(127, 0, 0, 1),
423 	.ipv4_dst = IPv4(255, 255, 255, 255),
424 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
425 		"\x00\x00\x00\x00\x00\x00\x00\x01",
426 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
427 		"\x00\x00\x00\x00\x00\x00\x11\x11",
428 	.vlan_tci = 0,
429 	.eth_src = "\x00\x00\x00\x00\x00\x00",
430 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
431 };
432 
433 struct nvgre_encap_conf nvgre_encap_conf = {
434 	.select_ipv4 = 1,
435 	.select_vlan = 0,
436 	.tni = "\x00\x00\x00",
437 	.ipv4_src = IPv4(127, 0, 0, 1),
438 	.ipv4_dst = IPv4(255, 255, 255, 255),
439 	.ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
440 		"\x00\x00\x00\x00\x00\x00\x00\x01",
441 	.ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
442 		"\x00\x00\x00\x00\x00\x00\x11\x11",
443 	.vlan_tci = 0,
444 	.eth_src = "\x00\x00\x00\x00\x00\x00",
445 	.eth_dst = "\xff\xff\xff\xff\xff\xff",
446 };
447 
448 /* Forward function declarations */
449 static void map_port_queue_stats_mapping_registers(portid_t pi,
450 						   struct rte_port *port);
451 static void check_all_ports_link_status(uint32_t port_mask);
452 static int eth_event_callback(portid_t port_id,
453 			      enum rte_eth_event_type type,
454 			      void *param, void *ret_param);
455 static void eth_dev_event_callback(char *device_name,
456 				enum rte_dev_event_type type,
457 				void *param);
458 static int eth_dev_event_callback_register(void);
459 static int eth_dev_event_callback_unregister(void);
460 
461 
462 /*
463  * Check if all the ports are started.
464  * If yes, return positive value. If not, return zero.
465  */
466 static int all_ports_started(void);
467 
468 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
469 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
470 
471 /*
472  * Helper function to check if socket is already discovered.
473  * If yes, return positive value. If not, return zero.
474  */
475 int
476 new_socket_id(unsigned int socket_id)
477 {
478 	unsigned int i;
479 
480 	for (i = 0; i < num_sockets; i++) {
481 		if (socket_ids[i] == socket_id)
482 			return 0;
483 	}
484 	return 1;
485 }
486 
487 /*
488  * Setup default configuration.
489  */
490 static void
491 set_default_fwd_lcores_config(void)
492 {
493 	unsigned int i;
494 	unsigned int nb_lc;
495 	unsigned int sock_num;
496 
497 	nb_lc = 0;
498 	for (i = 0; i < RTE_MAX_LCORE; i++) {
499 		if (!rte_lcore_is_enabled(i))
500 			continue;
501 		sock_num = rte_lcore_to_socket_id(i);
502 		if (new_socket_id(sock_num)) {
503 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
504 				rte_exit(EXIT_FAILURE,
505 					 "Total sockets greater than %u\n",
506 					 RTE_MAX_NUMA_NODES);
507 			}
508 			socket_ids[num_sockets++] = sock_num;
509 		}
510 		if (i == rte_get_master_lcore())
511 			continue;
512 		fwd_lcores_cpuids[nb_lc++] = i;
513 	}
514 	nb_lcores = (lcoreid_t) nb_lc;
515 	nb_cfg_lcores = nb_lcores;
516 	nb_fwd_lcores = 1;
517 }
518 
519 static void
520 set_def_peer_eth_addrs(void)
521 {
522 	portid_t i;
523 
524 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
525 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
526 		peer_eth_addrs[i].addr_bytes[5] = i;
527 	}
528 }
529 
530 static void
531 set_default_fwd_ports_config(void)
532 {
533 	portid_t pt_id;
534 	int i = 0;
535 
536 	RTE_ETH_FOREACH_DEV(pt_id)
537 		fwd_ports_ids[i++] = pt_id;
538 
539 	nb_cfg_ports = nb_ports;
540 	nb_fwd_ports = nb_ports;
541 }
542 
543 void
544 set_def_fwd_config(void)
545 {
546 	set_default_fwd_lcores_config();
547 	set_def_peer_eth_addrs();
548 	set_default_fwd_ports_config();
549 }
550 
551 /* extremely pessimistic estimation of memory required to create a mempool */
552 static int
553 calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
554 {
555 	unsigned int n_pages, mbuf_per_pg, leftover;
556 	uint64_t total_mem, mbuf_mem, obj_sz;
557 
558 	/* there is no good way to predict how much space the mempool will
559 	 * occupy because it will allocate chunks on the fly, and some of those
560 	 * will come from default DPDK memory while some will come from our
561 	 * external memory, so just assume 128MB will be enough for everyone.
562 	 */
563 	uint64_t hdr_mem = 128 << 20;
564 
565 	/* account for possible non-contiguousness */
566 	obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
567 	if (obj_sz > pgsz) {
568 		TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
569 		return -1;
570 	}
571 
572 	mbuf_per_pg = pgsz / obj_sz;
573 	leftover = (nb_mbufs % mbuf_per_pg) > 0;
574 	n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
575 
576 	mbuf_mem = n_pages * pgsz;
577 
578 	total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
579 
580 	if (total_mem > SIZE_MAX) {
581 		TESTPMD_LOG(ERR, "Memory size too big\n");
582 		return -1;
583 	}
584 	*out = (size_t)total_mem;
585 
586 	return 0;
587 }
588 
589 static inline uint32_t
590 bsf64(uint64_t v)
591 {
592 	return (uint32_t)__builtin_ctzll(v);
593 }
594 
595 static inline uint32_t
596 log2_u64(uint64_t v)
597 {
598 	if (v == 0)
599 		return 0;
600 	v = rte_align64pow2(v);
601 	return bsf64(v);
602 }
603 
604 static int
605 pagesz_flags(uint64_t page_sz)
606 {
607 	/* as per mmap() manpage, all page sizes are log2 of page size
608 	 * shifted by MAP_HUGE_SHIFT
609 	 */
610 	int log2 = log2_u64(page_sz);
611 
612 	return (log2 << HUGE_SHIFT);
613 }
614 
615 static void *
616 alloc_mem(size_t memsz, size_t pgsz, bool huge)
617 {
618 	void *addr;
619 	int flags;
620 
621 	/* allocate anonymous hugepages */
622 	flags = MAP_ANONYMOUS | MAP_PRIVATE;
623 	if (huge)
624 		flags |= HUGE_FLAG | pagesz_flags(pgsz);
625 
626 	addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
627 	if (addr == MAP_FAILED)
628 		return NULL;
629 
630 	return addr;
631 }
632 
633 struct extmem_param {
634 	void *addr;
635 	size_t len;
636 	size_t pgsz;
637 	rte_iova_t *iova_table;
638 	unsigned int iova_table_len;
639 };
640 
641 static int
642 create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
643 		bool huge)
644 {
645 	uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
646 			RTE_PGSIZE_16M, RTE_PGSIZE_16G};    /* POWER */
647 	unsigned int cur_page, n_pages, pgsz_idx;
648 	size_t mem_sz, cur_pgsz;
649 	rte_iova_t *iovas = NULL;
650 	void *addr;
651 	int ret;
652 
653 	for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
654 		/* skip anything that is too big */
655 		if (pgsizes[pgsz_idx] > SIZE_MAX)
656 			continue;
657 
658 		cur_pgsz = pgsizes[pgsz_idx];
659 
660 		/* if we were told not to allocate hugepages, override */
661 		if (!huge)
662 			cur_pgsz = sysconf(_SC_PAGESIZE);
663 
664 		ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
665 		if (ret < 0) {
666 			TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
667 			return -1;
668 		}
669 
670 		/* allocate our memory */
671 		addr = alloc_mem(mem_sz, cur_pgsz, huge);
672 
673 		/* if we couldn't allocate memory with a specified page size,
674 		 * that doesn't mean we can't do it with other page sizes, so
675 		 * try another one.
676 		 */
677 		if (addr == NULL)
678 			continue;
679 
680 		/* store IOVA addresses for every page in this memory area */
681 		n_pages = mem_sz / cur_pgsz;
682 
683 		iovas = malloc(sizeof(*iovas) * n_pages);
684 
685 		if (iovas == NULL) {
686 			TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
687 			goto fail;
688 		}
689 		/* lock memory if it's not huge pages */
690 		if (!huge)
691 			mlock(addr, mem_sz);
692 
693 		/* populate IOVA addresses */
694 		for (cur_page = 0; cur_page < n_pages; cur_page++) {
695 			rte_iova_t iova;
696 			size_t offset;
697 			void *cur;
698 
699 			offset = cur_pgsz * cur_page;
700 			cur = RTE_PTR_ADD(addr, offset);
701 
702 			/* touch the page before getting its IOVA */
703 			*(volatile char *)cur = 0;
704 
705 			iova = rte_mem_virt2iova(cur);
706 
707 			iovas[cur_page] = iova;
708 		}
709 
710 		break;
711 	}
712 	/* if we couldn't allocate anything */
713 	if (iovas == NULL)
714 		return -1;
715 
716 	param->addr = addr;
717 	param->len = mem_sz;
718 	param->pgsz = cur_pgsz;
719 	param->iova_table = iovas;
720 	param->iova_table_len = n_pages;
721 
722 	return 0;
723 fail:
724 	if (iovas)
725 		free(iovas);
726 	if (addr)
727 		munmap(addr, mem_sz);
728 
729 	return -1;
730 }
731 
732 static int
733 setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
734 {
735 	struct extmem_param param;
736 	int socket_id, ret;
737 
738 	memset(&param, 0, sizeof(param));
739 
740 	/* check if our heap exists */
741 	socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
742 	if (socket_id < 0) {
743 		/* create our heap */
744 		ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
745 		if (ret < 0) {
746 			TESTPMD_LOG(ERR, "Cannot create heap\n");
747 			return -1;
748 		}
749 	}
750 
751 	ret = create_extmem(nb_mbufs, mbuf_sz, &param, huge);
752 	if (ret < 0) {
753 		TESTPMD_LOG(ERR, "Cannot create memory area\n");
754 		return -1;
755 	}
756 
757 	/* we now have a valid memory area, so add it to heap */
758 	ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
759 			param.addr, param.len, param.iova_table,
760 			param.iova_table_len, param.pgsz);
761 
762 	/* when using VFIO, memory is automatically mapped for DMA by EAL */
763 
764 	/* not needed any more */
765 	free(param.iova_table);
766 
767 	if (ret < 0) {
768 		TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
769 		munmap(param.addr, param.len);
770 		return -1;
771 	}
772 
773 	/* success */
774 
775 	TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
776 			param.len >> 20);
777 
778 	return 0;
779 }
780 
781 /*
782  * Configuration initialisation done once at init time.
783  */
784 static void
785 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
786 		 unsigned int socket_id)
787 {
788 	char pool_name[RTE_MEMPOOL_NAMESIZE];
789 	struct rte_mempool *rte_mp = NULL;
790 	uint32_t mb_size;
791 
792 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
793 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
794 
795 	TESTPMD_LOG(INFO,
796 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
797 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
798 
799 	switch (mp_alloc_type) {
800 	case MP_ALLOC_NATIVE:
801 		{
802 			/* wrapper to rte_mempool_create() */
803 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
804 					rte_mbuf_best_mempool_ops());
805 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
806 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
807 			break;
808 		}
809 	case MP_ALLOC_ANON:
810 		{
811 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
812 				mb_size, (unsigned int) mb_mempool_cache,
813 				sizeof(struct rte_pktmbuf_pool_private),
814 				socket_id, 0);
815 			if (rte_mp == NULL)
816 				goto err;
817 
818 			if (rte_mempool_populate_anon(rte_mp) == 0) {
819 				rte_mempool_free(rte_mp);
820 				rte_mp = NULL;
821 				goto err;
822 			}
823 			rte_pktmbuf_pool_init(rte_mp, NULL);
824 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
825 			break;
826 		}
827 	case MP_ALLOC_XMEM:
828 	case MP_ALLOC_XMEM_HUGE:
829 		{
830 			int heap_socket;
831 			bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
832 
833 			if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
834 				rte_exit(EXIT_FAILURE, "Could not create external memory\n");
835 
836 			heap_socket =
837 				rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
838 			if (heap_socket < 0)
839 				rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
840 
841 			TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
842 					rte_mbuf_best_mempool_ops());
843 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
844 					mb_mempool_cache, 0, mbuf_seg_size,
845 					heap_socket);
846 			break;
847 		}
848 	default:
849 		{
850 			rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
851 		}
852 	}
853 
854 err:
855 	if (rte_mp == NULL) {
856 		rte_exit(EXIT_FAILURE,
857 			"Creation of mbuf pool for socket %u failed: %s\n",
858 			socket_id, rte_strerror(rte_errno));
859 	} else if (verbose_level > 0) {
860 		rte_mempool_dump(stdout, rte_mp);
861 	}
862 }
863 
864 /*
865  * Check given socket id is valid or not with NUMA mode,
866  * if valid, return 0, else return -1
867  */
868 static int
869 check_socket_id(const unsigned int socket_id)
870 {
871 	static int warning_once = 0;
872 
873 	if (new_socket_id(socket_id)) {
874 		if (!warning_once && numa_support)
875 			printf("Warning: NUMA should be configured manually by"
876 			       " using --port-numa-config and"
877 			       " --ring-numa-config parameters along with"
878 			       " --numa.\n");
879 		warning_once = 1;
880 		return -1;
881 	}
882 	return 0;
883 }
884 
885 /*
886  * Get the allowed maximum number of RX queues.
887  * *pid return the port id which has minimal value of
888  * max_rx_queues in all ports.
889  */
890 queueid_t
891 get_allowed_max_nb_rxq(portid_t *pid)
892 {
893 	queueid_t allowed_max_rxq = MAX_QUEUE_ID;
894 	portid_t pi;
895 	struct rte_eth_dev_info dev_info;
896 
897 	RTE_ETH_FOREACH_DEV(pi) {
898 		rte_eth_dev_info_get(pi, &dev_info);
899 		if (dev_info.max_rx_queues < allowed_max_rxq) {
900 			allowed_max_rxq = dev_info.max_rx_queues;
901 			*pid = pi;
902 		}
903 	}
904 	return allowed_max_rxq;
905 }
906 
907 /*
908  * Check input rxq is valid or not.
909  * If input rxq is not greater than any of maximum number
910  * of RX queues of all ports, it is valid.
911  * if valid, return 0, else return -1
912  */
913 int
914 check_nb_rxq(queueid_t rxq)
915 {
916 	queueid_t allowed_max_rxq;
917 	portid_t pid = 0;
918 
919 	allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
920 	if (rxq > allowed_max_rxq) {
921 		printf("Fail: input rxq (%u) can't be greater "
922 		       "than max_rx_queues (%u) of port %u\n",
923 		       rxq,
924 		       allowed_max_rxq,
925 		       pid);
926 		return -1;
927 	}
928 	return 0;
929 }
930 
931 /*
932  * Get the allowed maximum number of TX queues.
933  * *pid return the port id which has minimal value of
934  * max_tx_queues in all ports.
935  */
936 queueid_t
937 get_allowed_max_nb_txq(portid_t *pid)
938 {
939 	queueid_t allowed_max_txq = MAX_QUEUE_ID;
940 	portid_t pi;
941 	struct rte_eth_dev_info dev_info;
942 
943 	RTE_ETH_FOREACH_DEV(pi) {
944 		rte_eth_dev_info_get(pi, &dev_info);
945 		if (dev_info.max_tx_queues < allowed_max_txq) {
946 			allowed_max_txq = dev_info.max_tx_queues;
947 			*pid = pi;
948 		}
949 	}
950 	return allowed_max_txq;
951 }
952 
953 /*
954  * Check input txq is valid or not.
955  * If input txq is not greater than any of maximum number
956  * of TX queues of all ports, it is valid.
957  * if valid, return 0, else return -1
958  */
959 int
960 check_nb_txq(queueid_t txq)
961 {
962 	queueid_t allowed_max_txq;
963 	portid_t pid = 0;
964 
965 	allowed_max_txq = get_allowed_max_nb_txq(&pid);
966 	if (txq > allowed_max_txq) {
967 		printf("Fail: input txq (%u) can't be greater "
968 		       "than max_tx_queues (%u) of port %u\n",
969 		       txq,
970 		       allowed_max_txq,
971 		       pid);
972 		return -1;
973 	}
974 	return 0;
975 }
976 
977 static void
978 init_config(void)
979 {
980 	portid_t pid;
981 	struct rte_port *port;
982 	struct rte_mempool *mbp;
983 	unsigned int nb_mbuf_per_pool;
984 	lcoreid_t  lc_id;
985 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
986 	struct rte_gro_param gro_param;
987 	uint32_t gso_types;
988 	int k;
989 
990 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
991 
992 	if (numa_support) {
993 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
994 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
995 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
996 	}
997 
998 	/* Configuration of logical cores. */
999 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
1000 				sizeof(struct fwd_lcore *) * nb_lcores,
1001 				RTE_CACHE_LINE_SIZE);
1002 	if (fwd_lcores == NULL) {
1003 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
1004 							"failed\n", nb_lcores);
1005 	}
1006 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1007 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
1008 					       sizeof(struct fwd_lcore),
1009 					       RTE_CACHE_LINE_SIZE);
1010 		if (fwd_lcores[lc_id] == NULL) {
1011 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
1012 								"failed\n");
1013 		}
1014 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
1015 	}
1016 
1017 	RTE_ETH_FOREACH_DEV(pid) {
1018 		port = &ports[pid];
1019 		/* Apply default TxRx configuration for all ports */
1020 		port->dev_conf.txmode = tx_mode;
1021 		port->dev_conf.rxmode = rx_mode;
1022 		rte_eth_dev_info_get(pid, &port->dev_info);
1023 
1024 		if (!(port->dev_info.tx_offload_capa &
1025 		      DEV_TX_OFFLOAD_MBUF_FAST_FREE))
1026 			port->dev_conf.txmode.offloads &=
1027 				~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1028 		if (numa_support) {
1029 			if (port_numa[pid] != NUMA_NO_CONFIG)
1030 				port_per_socket[port_numa[pid]]++;
1031 			else {
1032 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
1033 
1034 				/* if socket_id is invalid, set to 0 */
1035 				if (check_socket_id(socket_id) < 0)
1036 					socket_id = 0;
1037 				port_per_socket[socket_id]++;
1038 			}
1039 		}
1040 
1041 		/* Apply Rx offloads configuration */
1042 		for (k = 0; k < port->dev_info.max_rx_queues; k++)
1043 			port->rx_conf[k].offloads =
1044 				port->dev_conf.rxmode.offloads;
1045 		/* Apply Tx offloads configuration */
1046 		for (k = 0; k < port->dev_info.max_tx_queues; k++)
1047 			port->tx_conf[k].offloads =
1048 				port->dev_conf.txmode.offloads;
1049 
1050 		/* set flag to initialize port/queue */
1051 		port->need_reconfig = 1;
1052 		port->need_reconfig_queues = 1;
1053 	}
1054 
1055 	/*
1056 	 * Create pools of mbuf.
1057 	 * If NUMA support is disabled, create a single pool of mbuf in
1058 	 * socket 0 memory by default.
1059 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
1060 	 *
1061 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
1062 	 * nb_txd can be configured at run time.
1063 	 */
1064 	if (param_total_num_mbufs)
1065 		nb_mbuf_per_pool = param_total_num_mbufs;
1066 	else {
1067 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
1068 			(nb_lcores * mb_mempool_cache) +
1069 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
1070 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
1071 	}
1072 
1073 	if (numa_support) {
1074 		uint8_t i;
1075 
1076 		for (i = 0; i < num_sockets; i++)
1077 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1078 					 socket_ids[i]);
1079 	} else {
1080 		if (socket_num == UMA_NO_CONFIG)
1081 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
1082 		else
1083 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
1084 						 socket_num);
1085 	}
1086 
1087 	init_port_config();
1088 
1089 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1090 		DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
1091 	/*
1092 	 * Records which Mbuf pool to use by each logical core, if needed.
1093 	 */
1094 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1095 		mbp = mbuf_pool_find(
1096 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
1097 
1098 		if (mbp == NULL)
1099 			mbp = mbuf_pool_find(0);
1100 		fwd_lcores[lc_id]->mbp = mbp;
1101 		/* initialize GSO context */
1102 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
1103 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
1104 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
1105 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
1106 			ETHER_CRC_LEN;
1107 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
1108 	}
1109 
1110 	/* Configuration of packet forwarding streams. */
1111 	if (init_fwd_streams() < 0)
1112 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
1113 
1114 	fwd_config_setup();
1115 
1116 	/* create a gro context for each lcore */
1117 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
1118 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
1119 	gro_param.max_item_per_flow = MAX_PKT_BURST;
1120 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
1121 		gro_param.socket_id = rte_lcore_to_socket_id(
1122 				fwd_lcores_cpuids[lc_id]);
1123 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
1124 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
1125 			rte_exit(EXIT_FAILURE,
1126 					"rte_gro_ctx_create() failed\n");
1127 		}
1128 	}
1129 
1130 #if defined RTE_LIBRTE_PMD_SOFTNIC
1131 	if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
1132 		RTE_ETH_FOREACH_DEV(pid) {
1133 			port = &ports[pid];
1134 			const char *driver = port->dev_info.driver_name;
1135 
1136 			if (strcmp(driver, "net_softnic") == 0)
1137 				port->softport.fwd_lcore_arg = fwd_lcores;
1138 		}
1139 	}
1140 #endif
1141 
1142 }
1143 
1144 
1145 void
1146 reconfig(portid_t new_port_id, unsigned socket_id)
1147 {
1148 	struct rte_port *port;
1149 
1150 	/* Reconfiguration of Ethernet ports. */
1151 	port = &ports[new_port_id];
1152 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
1153 
1154 	/* set flag to initialize port/queue */
1155 	port->need_reconfig = 1;
1156 	port->need_reconfig_queues = 1;
1157 	port->socket_id = socket_id;
1158 
1159 	init_port_config();
1160 }
1161 
1162 
1163 int
1164 init_fwd_streams(void)
1165 {
1166 	portid_t pid;
1167 	struct rte_port *port;
1168 	streamid_t sm_id, nb_fwd_streams_new;
1169 	queueid_t q;
1170 
1171 	/* set socket id according to numa or not */
1172 	RTE_ETH_FOREACH_DEV(pid) {
1173 		port = &ports[pid];
1174 		if (nb_rxq > port->dev_info.max_rx_queues) {
1175 			printf("Fail: nb_rxq(%d) is greater than "
1176 				"max_rx_queues(%d)\n", nb_rxq,
1177 				port->dev_info.max_rx_queues);
1178 			return -1;
1179 		}
1180 		if (nb_txq > port->dev_info.max_tx_queues) {
1181 			printf("Fail: nb_txq(%d) is greater than "
1182 				"max_tx_queues(%d)\n", nb_txq,
1183 				port->dev_info.max_tx_queues);
1184 			return -1;
1185 		}
1186 		if (numa_support) {
1187 			if (port_numa[pid] != NUMA_NO_CONFIG)
1188 				port->socket_id = port_numa[pid];
1189 			else {
1190 				port->socket_id = rte_eth_dev_socket_id(pid);
1191 
1192 				/* if socket_id is invalid, set to 0 */
1193 				if (check_socket_id(port->socket_id) < 0)
1194 					port->socket_id = 0;
1195 			}
1196 		}
1197 		else {
1198 			if (socket_num == UMA_NO_CONFIG)
1199 				port->socket_id = 0;
1200 			else
1201 				port->socket_id = socket_num;
1202 		}
1203 	}
1204 
1205 	q = RTE_MAX(nb_rxq, nb_txq);
1206 	if (q == 0) {
1207 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
1208 		return -1;
1209 	}
1210 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
1211 	if (nb_fwd_streams_new == nb_fwd_streams)
1212 		return 0;
1213 	/* clear the old */
1214 	if (fwd_streams != NULL) {
1215 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1216 			if (fwd_streams[sm_id] == NULL)
1217 				continue;
1218 			rte_free(fwd_streams[sm_id]);
1219 			fwd_streams[sm_id] = NULL;
1220 		}
1221 		rte_free(fwd_streams);
1222 		fwd_streams = NULL;
1223 	}
1224 
1225 	/* init new */
1226 	nb_fwd_streams = nb_fwd_streams_new;
1227 	if (nb_fwd_streams) {
1228 		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
1229 			sizeof(struct fwd_stream *) * nb_fwd_streams,
1230 			RTE_CACHE_LINE_SIZE);
1231 		if (fwd_streams == NULL)
1232 			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
1233 				 " (struct fwd_stream *)) failed\n",
1234 				 nb_fwd_streams);
1235 
1236 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
1237 			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
1238 				" struct fwd_stream", sizeof(struct fwd_stream),
1239 				RTE_CACHE_LINE_SIZE);
1240 			if (fwd_streams[sm_id] == NULL)
1241 				rte_exit(EXIT_FAILURE, "rte_zmalloc"
1242 					 "(struct fwd_stream) failed\n");
1243 		}
1244 	}
1245 
1246 	return 0;
1247 }
1248 
1249 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1250 static void
1251 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
1252 {
1253 	unsigned int total_burst;
1254 	unsigned int nb_burst;
1255 	unsigned int burst_stats[3];
1256 	uint16_t pktnb_stats[3];
1257 	uint16_t nb_pkt;
1258 	int burst_percent[3];
1259 
1260 	/*
1261 	 * First compute the total number of packet bursts and the
1262 	 * two highest numbers of bursts of the same number of packets.
1263 	 */
1264 	total_burst = 0;
1265 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
1266 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
1267 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
1268 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
1269 		if (nb_burst == 0)
1270 			continue;
1271 		total_burst += nb_burst;
1272 		if (nb_burst > burst_stats[0]) {
1273 			burst_stats[1] = burst_stats[0];
1274 			pktnb_stats[1] = pktnb_stats[0];
1275 			burst_stats[0] = nb_burst;
1276 			pktnb_stats[0] = nb_pkt;
1277 		} else if (nb_burst > burst_stats[1]) {
1278 			burst_stats[1] = nb_burst;
1279 			pktnb_stats[1] = nb_pkt;
1280 		}
1281 	}
1282 	if (total_burst == 0)
1283 		return;
1284 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
1285 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
1286 	       burst_percent[0], (int) pktnb_stats[0]);
1287 	if (burst_stats[0] == total_burst) {
1288 		printf("]\n");
1289 		return;
1290 	}
1291 	if (burst_stats[0] + burst_stats[1] == total_burst) {
1292 		printf(" + %d%% of %d pkts]\n",
1293 		       100 - burst_percent[0], pktnb_stats[1]);
1294 		return;
1295 	}
1296 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
1297 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
1298 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
1299 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
1300 		return;
1301 	}
1302 	printf(" + %d%% of %d pkts + %d%% of others]\n",
1303 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
1304 }
1305 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
1306 
1307 static void
1308 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
1309 {
1310 	struct rte_port *port;
1311 	uint8_t i;
1312 
1313 	static const char *fwd_stats_border = "----------------------";
1314 
1315 	port = &ports[port_id];
1316 	printf("\n  %s Forward statistics for port %-2d %s\n",
1317 	       fwd_stats_border, port_id, fwd_stats_border);
1318 
1319 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
1320 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1321 		       "%-"PRIu64"\n",
1322 		       stats->ipackets, stats->imissed,
1323 		       (uint64_t) (stats->ipackets + stats->imissed));
1324 
1325 		if (cur_fwd_eng == &csum_fwd_engine)
1326 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
1327 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1328 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1329 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
1330 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1331 		}
1332 
1333 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1334 		       "%-"PRIu64"\n",
1335 		       stats->opackets, port->tx_dropped,
1336 		       (uint64_t) (stats->opackets + port->tx_dropped));
1337 	}
1338 	else {
1339 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
1340 		       "%14"PRIu64"\n",
1341 		       stats->ipackets, stats->imissed,
1342 		       (uint64_t) (stats->ipackets + stats->imissed));
1343 
1344 		if (cur_fwd_eng == &csum_fwd_engine)
1345 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
1346 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1347 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
1348 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
1349 			printf("  RX-nombufs:             %14"PRIu64"\n",
1350 			       stats->rx_nombuf);
1351 		}
1352 
1353 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1354 		       "%14"PRIu64"\n",
1355 		       stats->opackets, port->tx_dropped,
1356 		       (uint64_t) (stats->opackets + port->tx_dropped));
1357 	}
1358 
1359 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1360 	if (port->rx_stream)
1361 		pkt_burst_stats_display("RX",
1362 			&port->rx_stream->rx_burst_stats);
1363 	if (port->tx_stream)
1364 		pkt_burst_stats_display("TX",
1365 			&port->tx_stream->tx_burst_stats);
1366 #endif
1367 
1368 	if (port->rx_queue_stats_mapping_enabled) {
1369 		printf("\n");
1370 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1371 			printf("  Stats reg %2d RX-packets:%14"PRIu64
1372 			       "     RX-errors:%14"PRIu64
1373 			       "    RX-bytes:%14"PRIu64"\n",
1374 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1375 		}
1376 		printf("\n");
1377 	}
1378 	if (port->tx_queue_stats_mapping_enabled) {
1379 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1380 			printf("  Stats reg %2d TX-packets:%14"PRIu64
1381 			       "                                 TX-bytes:%14"PRIu64"\n",
1382 			       i, stats->q_opackets[i], stats->q_obytes[i]);
1383 		}
1384 	}
1385 
1386 	printf("  %s--------------------------------%s\n",
1387 	       fwd_stats_border, fwd_stats_border);
1388 }
1389 
1390 static void
1391 fwd_stream_stats_display(streamid_t stream_id)
1392 {
1393 	struct fwd_stream *fs;
1394 	static const char *fwd_top_stats_border = "-------";
1395 
1396 	fs = fwd_streams[stream_id];
1397 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1398 	    (fs->fwd_dropped == 0))
1399 		return;
1400 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1401 	       "TX Port=%2d/Queue=%2d %s\n",
1402 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1403 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1404 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1405 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1406 
1407 	/* if checksum mode */
1408 	if (cur_fwd_eng == &csum_fwd_engine) {
1409 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1410 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1411 	}
1412 
1413 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1414 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1415 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1416 #endif
1417 }
1418 
1419 static void
1420 flush_fwd_rx_queues(void)
1421 {
1422 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1423 	portid_t  rxp;
1424 	portid_t port_id;
1425 	queueid_t rxq;
1426 	uint16_t  nb_rx;
1427 	uint16_t  i;
1428 	uint8_t   j;
1429 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1430 	uint64_t timer_period;
1431 
1432 	/* convert to number of cycles */
1433 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
1434 
1435 	for (j = 0; j < 2; j++) {
1436 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1437 			for (rxq = 0; rxq < nb_rxq; rxq++) {
1438 				port_id = fwd_ports_ids[rxp];
1439 				/**
1440 				* testpmd can stuck in the below do while loop
1441 				* if rte_eth_rx_burst() always returns nonzero
1442 				* packets. So timer is added to exit this loop
1443 				* after 1sec timer expiry.
1444 				*/
1445 				prev_tsc = rte_rdtsc();
1446 				do {
1447 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1448 						pkts_burst, MAX_PKT_BURST);
1449 					for (i = 0; i < nb_rx; i++)
1450 						rte_pktmbuf_free(pkts_burst[i]);
1451 
1452 					cur_tsc = rte_rdtsc();
1453 					diff_tsc = cur_tsc - prev_tsc;
1454 					timer_tsc += diff_tsc;
1455 				} while ((nb_rx > 0) &&
1456 					(timer_tsc < timer_period));
1457 				timer_tsc = 0;
1458 			}
1459 		}
1460 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1461 	}
1462 }
1463 
1464 static void
1465 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1466 {
1467 	struct fwd_stream **fsm;
1468 	streamid_t nb_fs;
1469 	streamid_t sm_id;
1470 #ifdef RTE_LIBRTE_BITRATE
1471 	uint64_t tics_per_1sec;
1472 	uint64_t tics_datum;
1473 	uint64_t tics_current;
1474 	uint16_t i, cnt_ports;
1475 
1476 	cnt_ports = nb_ports;
1477 	tics_datum = rte_rdtsc();
1478 	tics_per_1sec = rte_get_timer_hz();
1479 #endif
1480 	fsm = &fwd_streams[fc->stream_idx];
1481 	nb_fs = fc->stream_nb;
1482 	do {
1483 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1484 			(*pkt_fwd)(fsm[sm_id]);
1485 #ifdef RTE_LIBRTE_BITRATE
1486 		if (bitrate_enabled != 0 &&
1487 				bitrate_lcore_id == rte_lcore_id()) {
1488 			tics_current = rte_rdtsc();
1489 			if (tics_current - tics_datum >= tics_per_1sec) {
1490 				/* Periodic bitrate calculation */
1491 				for (i = 0; i < cnt_ports; i++)
1492 					rte_stats_bitrate_calc(bitrate_data,
1493 						ports_ids[i]);
1494 				tics_datum = tics_current;
1495 			}
1496 		}
1497 #endif
1498 #ifdef RTE_LIBRTE_LATENCY_STATS
1499 		if (latencystats_enabled != 0 &&
1500 				latencystats_lcore_id == rte_lcore_id())
1501 			rte_latencystats_update();
1502 #endif
1503 
1504 	} while (! fc->stopped);
1505 }
1506 
1507 static int
1508 start_pkt_forward_on_core(void *fwd_arg)
1509 {
1510 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1511 			     cur_fwd_config.fwd_eng->packet_fwd);
1512 	return 0;
1513 }
1514 
1515 /*
1516  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1517  * Used to start communication flows in network loopback test configurations.
1518  */
1519 static int
1520 run_one_txonly_burst_on_core(void *fwd_arg)
1521 {
1522 	struct fwd_lcore *fwd_lc;
1523 	struct fwd_lcore tmp_lcore;
1524 
1525 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1526 	tmp_lcore = *fwd_lc;
1527 	tmp_lcore.stopped = 1;
1528 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1529 	return 0;
1530 }
1531 
1532 /*
1533  * Launch packet forwarding:
1534  *     - Setup per-port forwarding context.
1535  *     - launch logical cores with their forwarding configuration.
1536  */
1537 static void
1538 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1539 {
1540 	port_fwd_begin_t port_fwd_begin;
1541 	unsigned int i;
1542 	unsigned int lc_id;
1543 	int diag;
1544 
1545 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1546 	if (port_fwd_begin != NULL) {
1547 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1548 			(*port_fwd_begin)(fwd_ports_ids[i]);
1549 	}
1550 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1551 		lc_id = fwd_lcores_cpuids[i];
1552 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1553 			fwd_lcores[i]->stopped = 0;
1554 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1555 						     fwd_lcores[i], lc_id);
1556 			if (diag != 0)
1557 				printf("launch lcore %u failed - diag=%d\n",
1558 				       lc_id, diag);
1559 		}
1560 	}
1561 }
1562 
1563 /*
1564  * Update the forward ports list.
1565  */
1566 void
1567 update_fwd_ports(portid_t new_pid)
1568 {
1569 	unsigned int i;
1570 	unsigned int new_nb_fwd_ports = 0;
1571 	int move = 0;
1572 
1573 	for (i = 0; i < nb_fwd_ports; ++i) {
1574 		if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1575 			move = 1;
1576 		else if (move)
1577 			fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1578 		else
1579 			new_nb_fwd_ports++;
1580 	}
1581 	if (new_pid < RTE_MAX_ETHPORTS)
1582 		fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1583 
1584 	nb_fwd_ports = new_nb_fwd_ports;
1585 	nb_cfg_ports = new_nb_fwd_ports;
1586 }
1587 
1588 /*
1589  * Launch packet forwarding configuration.
1590  */
1591 void
1592 start_packet_forwarding(int with_tx_first)
1593 {
1594 	port_fwd_begin_t port_fwd_begin;
1595 	port_fwd_end_t  port_fwd_end;
1596 	struct rte_port *port;
1597 	unsigned int i;
1598 	portid_t   pt_id;
1599 	streamid_t sm_id;
1600 
1601 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1602 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1603 
1604 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1605 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1606 
1607 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1608 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1609 		(!nb_rxq || !nb_txq))
1610 		rte_exit(EXIT_FAILURE,
1611 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1612 			cur_fwd_eng->fwd_mode_name);
1613 
1614 	if (all_ports_started() == 0) {
1615 		printf("Not all ports were started\n");
1616 		return;
1617 	}
1618 	if (test_done == 0) {
1619 		printf("Packet forwarding already started\n");
1620 		return;
1621 	}
1622 
1623 
1624 	if(dcb_test) {
1625 		for (i = 0; i < nb_fwd_ports; i++) {
1626 			pt_id = fwd_ports_ids[i];
1627 			port = &ports[pt_id];
1628 			if (!port->dcb_flag) {
1629 				printf("In DCB mode, all forwarding ports must "
1630                                        "be configured in this mode.\n");
1631 				return;
1632 			}
1633 		}
1634 		if (nb_fwd_lcores == 1) {
1635 			printf("In DCB mode,the nb forwarding cores "
1636                                "should be larger than 1.\n");
1637 			return;
1638 		}
1639 	}
1640 	test_done = 0;
1641 
1642 	fwd_config_setup();
1643 
1644 	if(!no_flush_rx)
1645 		flush_fwd_rx_queues();
1646 
1647 	pkt_fwd_config_display(&cur_fwd_config);
1648 	rxtx_config_display();
1649 
1650 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1651 		pt_id = fwd_ports_ids[i];
1652 		port = &ports[pt_id];
1653 		rte_eth_stats_get(pt_id, &port->stats);
1654 		port->tx_dropped = 0;
1655 
1656 		map_port_queue_stats_mapping_registers(pt_id, port);
1657 	}
1658 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1659 		fwd_streams[sm_id]->rx_packets = 0;
1660 		fwd_streams[sm_id]->tx_packets = 0;
1661 		fwd_streams[sm_id]->fwd_dropped = 0;
1662 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1663 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1664 
1665 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1666 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1667 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1668 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1669 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1670 #endif
1671 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1672 		fwd_streams[sm_id]->core_cycles = 0;
1673 #endif
1674 	}
1675 	if (with_tx_first) {
1676 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1677 		if (port_fwd_begin != NULL) {
1678 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1679 				(*port_fwd_begin)(fwd_ports_ids[i]);
1680 		}
1681 		while (with_tx_first--) {
1682 			launch_packet_forwarding(
1683 					run_one_txonly_burst_on_core);
1684 			rte_eal_mp_wait_lcore();
1685 		}
1686 		port_fwd_end = tx_only_engine.port_fwd_end;
1687 		if (port_fwd_end != NULL) {
1688 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1689 				(*port_fwd_end)(fwd_ports_ids[i]);
1690 		}
1691 	}
1692 	launch_packet_forwarding(start_pkt_forward_on_core);
1693 }
1694 
1695 void
1696 stop_packet_forwarding(void)
1697 {
1698 	struct rte_eth_stats stats;
1699 	struct rte_port *port;
1700 	port_fwd_end_t  port_fwd_end;
1701 	int i;
1702 	portid_t   pt_id;
1703 	streamid_t sm_id;
1704 	lcoreid_t  lc_id;
1705 	uint64_t total_recv;
1706 	uint64_t total_xmit;
1707 	uint64_t total_rx_dropped;
1708 	uint64_t total_tx_dropped;
1709 	uint64_t total_rx_nombuf;
1710 	uint64_t tx_dropped;
1711 	uint64_t rx_bad_ip_csum;
1712 	uint64_t rx_bad_l4_csum;
1713 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1714 	uint64_t fwd_cycles;
1715 #endif
1716 
1717 	static const char *acc_stats_border = "+++++++++++++++";
1718 
1719 	if (test_done) {
1720 		printf("Packet forwarding not started\n");
1721 		return;
1722 	}
1723 	printf("Telling cores to stop...");
1724 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1725 		fwd_lcores[lc_id]->stopped = 1;
1726 	printf("\nWaiting for lcores to finish...\n");
1727 	rte_eal_mp_wait_lcore();
1728 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1729 	if (port_fwd_end != NULL) {
1730 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1731 			pt_id = fwd_ports_ids[i];
1732 			(*port_fwd_end)(pt_id);
1733 		}
1734 	}
1735 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1736 	fwd_cycles = 0;
1737 #endif
1738 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1739 		if (cur_fwd_config.nb_fwd_streams >
1740 		    cur_fwd_config.nb_fwd_ports) {
1741 			fwd_stream_stats_display(sm_id);
1742 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1743 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1744 		} else {
1745 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1746 				fwd_streams[sm_id];
1747 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1748 				fwd_streams[sm_id];
1749 		}
1750 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1751 		tx_dropped = (uint64_t) (tx_dropped +
1752 					 fwd_streams[sm_id]->fwd_dropped);
1753 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1754 
1755 		rx_bad_ip_csum =
1756 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1757 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1758 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1759 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1760 							rx_bad_ip_csum;
1761 
1762 		rx_bad_l4_csum =
1763 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1764 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1765 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1766 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1767 							rx_bad_l4_csum;
1768 
1769 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1770 		fwd_cycles = (uint64_t) (fwd_cycles +
1771 					 fwd_streams[sm_id]->core_cycles);
1772 #endif
1773 	}
1774 	total_recv = 0;
1775 	total_xmit = 0;
1776 	total_rx_dropped = 0;
1777 	total_tx_dropped = 0;
1778 	total_rx_nombuf  = 0;
1779 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1780 		pt_id = fwd_ports_ids[i];
1781 
1782 		port = &ports[pt_id];
1783 		rte_eth_stats_get(pt_id, &stats);
1784 		stats.ipackets -= port->stats.ipackets;
1785 		port->stats.ipackets = 0;
1786 		stats.opackets -= port->stats.opackets;
1787 		port->stats.opackets = 0;
1788 		stats.ibytes   -= port->stats.ibytes;
1789 		port->stats.ibytes = 0;
1790 		stats.obytes   -= port->stats.obytes;
1791 		port->stats.obytes = 0;
1792 		stats.imissed  -= port->stats.imissed;
1793 		port->stats.imissed = 0;
1794 		stats.oerrors  -= port->stats.oerrors;
1795 		port->stats.oerrors = 0;
1796 		stats.rx_nombuf -= port->stats.rx_nombuf;
1797 		port->stats.rx_nombuf = 0;
1798 
1799 		total_recv += stats.ipackets;
1800 		total_xmit += stats.opackets;
1801 		total_rx_dropped += stats.imissed;
1802 		total_tx_dropped += port->tx_dropped;
1803 		total_rx_nombuf  += stats.rx_nombuf;
1804 
1805 		fwd_port_stats_display(pt_id, &stats);
1806 	}
1807 
1808 	printf("\n  %s Accumulated forward statistics for all ports"
1809 	       "%s\n",
1810 	       acc_stats_border, acc_stats_border);
1811 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1812 	       "%-"PRIu64"\n"
1813 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1814 	       "%-"PRIu64"\n",
1815 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1816 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1817 	if (total_rx_nombuf > 0)
1818 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1819 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1820 	       "%s\n",
1821 	       acc_stats_border, acc_stats_border);
1822 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1823 	if (total_recv > 0)
1824 		printf("\n  CPU cycles/packet=%u (total cycles="
1825 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1826 		       (unsigned int)(fwd_cycles / total_recv),
1827 		       fwd_cycles, total_recv);
1828 #endif
1829 	printf("\nDone.\n");
1830 	test_done = 1;
1831 }
1832 
1833 void
1834 dev_set_link_up(portid_t pid)
1835 {
1836 	if (rte_eth_dev_set_link_up(pid) < 0)
1837 		printf("\nSet link up fail.\n");
1838 }
1839 
1840 void
1841 dev_set_link_down(portid_t pid)
1842 {
1843 	if (rte_eth_dev_set_link_down(pid) < 0)
1844 		printf("\nSet link down fail.\n");
1845 }
1846 
1847 static int
1848 all_ports_started(void)
1849 {
1850 	portid_t pi;
1851 	struct rte_port *port;
1852 
1853 	RTE_ETH_FOREACH_DEV(pi) {
1854 		port = &ports[pi];
1855 		/* Check if there is a port which is not started */
1856 		if ((port->port_status != RTE_PORT_STARTED) &&
1857 			(port->slave_flag == 0))
1858 			return 0;
1859 	}
1860 
1861 	/* No port is not started */
1862 	return 1;
1863 }
1864 
1865 int
1866 port_is_stopped(portid_t port_id)
1867 {
1868 	struct rte_port *port = &ports[port_id];
1869 
1870 	if ((port->port_status != RTE_PORT_STOPPED) &&
1871 	    (port->slave_flag == 0))
1872 		return 0;
1873 	return 1;
1874 }
1875 
1876 int
1877 all_ports_stopped(void)
1878 {
1879 	portid_t pi;
1880 
1881 	RTE_ETH_FOREACH_DEV(pi) {
1882 		if (!port_is_stopped(pi))
1883 			return 0;
1884 	}
1885 
1886 	return 1;
1887 }
1888 
1889 int
1890 port_is_started(portid_t port_id)
1891 {
1892 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1893 		return 0;
1894 
1895 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1896 		return 0;
1897 
1898 	return 1;
1899 }
1900 
1901 static int
1902 port_is_closed(portid_t port_id)
1903 {
1904 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1905 		return 0;
1906 
1907 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1908 		return 0;
1909 
1910 	return 1;
1911 }
1912 
1913 int
1914 start_port(portid_t pid)
1915 {
1916 	int diag, need_check_link_status = -1;
1917 	portid_t pi;
1918 	queueid_t qi;
1919 	struct rte_port *port;
1920 	struct ether_addr mac_addr;
1921 	enum rte_eth_event_type event_type;
1922 
1923 	if (port_id_is_invalid(pid, ENABLED_WARN))
1924 		return 0;
1925 
1926 	if(dcb_config)
1927 		dcb_test = 1;
1928 	RTE_ETH_FOREACH_DEV(pi) {
1929 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1930 			continue;
1931 
1932 		need_check_link_status = 0;
1933 		port = &ports[pi];
1934 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1935 						 RTE_PORT_HANDLING) == 0) {
1936 			printf("Port %d is now not stopped\n", pi);
1937 			continue;
1938 		}
1939 
1940 		if (port->need_reconfig > 0) {
1941 			port->need_reconfig = 0;
1942 
1943 			if (flow_isolate_all) {
1944 				int ret = port_flow_isolate(pi, 1);
1945 				if (ret) {
1946 					printf("Failed to apply isolated"
1947 					       " mode on port %d\n", pi);
1948 					return -1;
1949 				}
1950 			}
1951 
1952 			printf("Configuring Port %d (socket %u)\n", pi,
1953 					port->socket_id);
1954 			/* configure port */
1955 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1956 						&(port->dev_conf));
1957 			if (diag != 0) {
1958 				if (rte_atomic16_cmpset(&(port->port_status),
1959 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1960 					printf("Port %d can not be set back "
1961 							"to stopped\n", pi);
1962 				printf("Fail to configure port %d\n", pi);
1963 				/* try to reconfigure port next time */
1964 				port->need_reconfig = 1;
1965 				return -1;
1966 			}
1967 		}
1968 		if (port->need_reconfig_queues > 0) {
1969 			port->need_reconfig_queues = 0;
1970 			/* setup tx queues */
1971 			for (qi = 0; qi < nb_txq; qi++) {
1972 				if ((numa_support) &&
1973 					(txring_numa[pi] != NUMA_NO_CONFIG))
1974 					diag = rte_eth_tx_queue_setup(pi, qi,
1975 						port->nb_tx_desc[qi],
1976 						txring_numa[pi],
1977 						&(port->tx_conf[qi]));
1978 				else
1979 					diag = rte_eth_tx_queue_setup(pi, qi,
1980 						port->nb_tx_desc[qi],
1981 						port->socket_id,
1982 						&(port->tx_conf[qi]));
1983 
1984 				if (diag == 0)
1985 					continue;
1986 
1987 				/* Fail to setup tx queue, return */
1988 				if (rte_atomic16_cmpset(&(port->port_status),
1989 							RTE_PORT_HANDLING,
1990 							RTE_PORT_STOPPED) == 0)
1991 					printf("Port %d can not be set back "
1992 							"to stopped\n", pi);
1993 				printf("Fail to configure port %d tx queues\n",
1994 				       pi);
1995 				/* try to reconfigure queues next time */
1996 				port->need_reconfig_queues = 1;
1997 				return -1;
1998 			}
1999 			for (qi = 0; qi < nb_rxq; qi++) {
2000 				/* setup rx queues */
2001 				if ((numa_support) &&
2002 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
2003 					struct rte_mempool * mp =
2004 						mbuf_pool_find(rxring_numa[pi]);
2005 					if (mp == NULL) {
2006 						printf("Failed to setup RX queue:"
2007 							"No mempool allocation"
2008 							" on the socket %d\n",
2009 							rxring_numa[pi]);
2010 						return -1;
2011 					}
2012 
2013 					diag = rte_eth_rx_queue_setup(pi, qi,
2014 					     port->nb_rx_desc[qi],
2015 					     rxring_numa[pi],
2016 					     &(port->rx_conf[qi]),
2017 					     mp);
2018 				} else {
2019 					struct rte_mempool *mp =
2020 						mbuf_pool_find(port->socket_id);
2021 					if (mp == NULL) {
2022 						printf("Failed to setup RX queue:"
2023 							"No mempool allocation"
2024 							" on the socket %d\n",
2025 							port->socket_id);
2026 						return -1;
2027 					}
2028 					diag = rte_eth_rx_queue_setup(pi, qi,
2029 					     port->nb_rx_desc[qi],
2030 					     port->socket_id,
2031 					     &(port->rx_conf[qi]),
2032 					     mp);
2033 				}
2034 				if (diag == 0)
2035 					continue;
2036 
2037 				/* Fail to setup rx queue, return */
2038 				if (rte_atomic16_cmpset(&(port->port_status),
2039 							RTE_PORT_HANDLING,
2040 							RTE_PORT_STOPPED) == 0)
2041 					printf("Port %d can not be set back "
2042 							"to stopped\n", pi);
2043 				printf("Fail to configure port %d rx queues\n",
2044 				       pi);
2045 				/* try to reconfigure queues next time */
2046 				port->need_reconfig_queues = 1;
2047 				return -1;
2048 			}
2049 		}
2050 
2051 		/* start port */
2052 		if (rte_eth_dev_start(pi) < 0) {
2053 			printf("Fail to start port %d\n", pi);
2054 
2055 			/* Fail to setup rx queue, return */
2056 			if (rte_atomic16_cmpset(&(port->port_status),
2057 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2058 				printf("Port %d can not be set back to "
2059 							"stopped\n", pi);
2060 			continue;
2061 		}
2062 
2063 		if (rte_atomic16_cmpset(&(port->port_status),
2064 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
2065 			printf("Port %d can not be set into started\n", pi);
2066 
2067 		rte_eth_macaddr_get(pi, &mac_addr);
2068 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
2069 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
2070 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
2071 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
2072 
2073 		/* at least one port started, need checking link status */
2074 		need_check_link_status = 1;
2075 	}
2076 
2077 	for (event_type = RTE_ETH_EVENT_UNKNOWN;
2078 	     event_type < RTE_ETH_EVENT_MAX;
2079 	     event_type++) {
2080 		diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
2081 						event_type,
2082 						eth_event_callback,
2083 						NULL);
2084 		if (diag) {
2085 			printf("Failed to setup even callback for event %d\n",
2086 				event_type);
2087 			return -1;
2088 		}
2089 	}
2090 
2091 	if (need_check_link_status == 1 && !no_link_check)
2092 		check_all_ports_link_status(RTE_PORT_ALL);
2093 	else if (need_check_link_status == 0)
2094 		printf("Please stop the ports first\n");
2095 
2096 	printf("Done\n");
2097 	return 0;
2098 }
2099 
2100 void
2101 stop_port(portid_t pid)
2102 {
2103 	portid_t pi;
2104 	struct rte_port *port;
2105 	int need_check_link_status = 0;
2106 
2107 	if (dcb_test) {
2108 		dcb_test = 0;
2109 		dcb_config = 0;
2110 	}
2111 
2112 	if (port_id_is_invalid(pid, ENABLED_WARN))
2113 		return;
2114 
2115 	printf("Stopping ports...\n");
2116 
2117 	RTE_ETH_FOREACH_DEV(pi) {
2118 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2119 			continue;
2120 
2121 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2122 			printf("Please remove port %d from forwarding configuration.\n", pi);
2123 			continue;
2124 		}
2125 
2126 		if (port_is_bonding_slave(pi)) {
2127 			printf("Please remove port %d from bonded device.\n", pi);
2128 			continue;
2129 		}
2130 
2131 		port = &ports[pi];
2132 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
2133 						RTE_PORT_HANDLING) == 0)
2134 			continue;
2135 
2136 		rte_eth_dev_stop(pi);
2137 
2138 		if (rte_atomic16_cmpset(&(port->port_status),
2139 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
2140 			printf("Port %d can not be set into stopped\n", pi);
2141 		need_check_link_status = 1;
2142 	}
2143 	if (need_check_link_status && !no_link_check)
2144 		check_all_ports_link_status(RTE_PORT_ALL);
2145 
2146 	printf("Done\n");
2147 }
2148 
2149 void
2150 close_port(portid_t pid)
2151 {
2152 	portid_t pi;
2153 	struct rte_port *port;
2154 
2155 	if (port_id_is_invalid(pid, ENABLED_WARN))
2156 		return;
2157 
2158 	printf("Closing ports...\n");
2159 
2160 	RTE_ETH_FOREACH_DEV(pi) {
2161 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2162 			continue;
2163 
2164 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2165 			printf("Please remove port %d from forwarding configuration.\n", pi);
2166 			continue;
2167 		}
2168 
2169 		if (port_is_bonding_slave(pi)) {
2170 			printf("Please remove port %d from bonded device.\n", pi);
2171 			continue;
2172 		}
2173 
2174 		port = &ports[pi];
2175 		if (rte_atomic16_cmpset(&(port->port_status),
2176 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
2177 			printf("Port %d is already closed\n", pi);
2178 			continue;
2179 		}
2180 
2181 		if (rte_atomic16_cmpset(&(port->port_status),
2182 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
2183 			printf("Port %d is now not stopped\n", pi);
2184 			continue;
2185 		}
2186 
2187 		if (port->flow_list)
2188 			port_flow_flush(pi);
2189 		rte_eth_dev_close(pi);
2190 
2191 		if (rte_atomic16_cmpset(&(port->port_status),
2192 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
2193 			printf("Port %d cannot be set to closed\n", pi);
2194 	}
2195 
2196 	printf("Done\n");
2197 }
2198 
2199 void
2200 reset_port(portid_t pid)
2201 {
2202 	int diag;
2203 	portid_t pi;
2204 	struct rte_port *port;
2205 
2206 	if (port_id_is_invalid(pid, ENABLED_WARN))
2207 		return;
2208 
2209 	printf("Resetting ports...\n");
2210 
2211 	RTE_ETH_FOREACH_DEV(pi) {
2212 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
2213 			continue;
2214 
2215 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
2216 			printf("Please remove port %d from forwarding "
2217 			       "configuration.\n", pi);
2218 			continue;
2219 		}
2220 
2221 		if (port_is_bonding_slave(pi)) {
2222 			printf("Please remove port %d from bonded device.\n",
2223 			       pi);
2224 			continue;
2225 		}
2226 
2227 		diag = rte_eth_dev_reset(pi);
2228 		if (diag == 0) {
2229 			port = &ports[pi];
2230 			port->need_reconfig = 1;
2231 			port->need_reconfig_queues = 1;
2232 		} else {
2233 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
2234 		}
2235 	}
2236 
2237 	printf("Done\n");
2238 }
2239 
2240 static int
2241 eth_dev_event_callback_register(void)
2242 {
2243 	int ret;
2244 
2245 	/* register the device event callback */
2246 	ret = rte_dev_event_callback_register(NULL,
2247 		eth_dev_event_callback, NULL);
2248 	if (ret) {
2249 		printf("Failed to register device event callback\n");
2250 		return -1;
2251 	}
2252 
2253 	return 0;
2254 }
2255 
2256 
2257 static int
2258 eth_dev_event_callback_unregister(void)
2259 {
2260 	int ret;
2261 
2262 	/* unregister the device event callback */
2263 	ret = rte_dev_event_callback_unregister(NULL,
2264 		eth_dev_event_callback, NULL);
2265 	if (ret < 0) {
2266 		printf("Failed to unregister device event callback\n");
2267 		return -1;
2268 	}
2269 
2270 	return 0;
2271 }
2272 
2273 void
2274 attach_port(char *identifier)
2275 {
2276 	portid_t pi = 0;
2277 	unsigned int socket_id;
2278 
2279 	printf("Attaching a new port...\n");
2280 
2281 	if (identifier == NULL) {
2282 		printf("Invalid parameters are specified\n");
2283 		return;
2284 	}
2285 
2286 	if (rte_eth_dev_attach(identifier, &pi))
2287 		return;
2288 
2289 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
2290 	/* if socket_id is invalid, set to 0 */
2291 	if (check_socket_id(socket_id) < 0)
2292 		socket_id = 0;
2293 	reconfig(pi, socket_id);
2294 	rte_eth_promiscuous_enable(pi);
2295 
2296 	ports_ids[nb_ports] = pi;
2297 	nb_ports = rte_eth_dev_count_avail();
2298 
2299 	ports[pi].port_status = RTE_PORT_STOPPED;
2300 
2301 	update_fwd_ports(pi);
2302 
2303 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
2304 	printf("Done\n");
2305 }
2306 
2307 void
2308 detach_port(portid_t port_id)
2309 {
2310 	char name[RTE_ETH_NAME_MAX_LEN];
2311 	uint16_t i;
2312 
2313 	printf("Detaching a port...\n");
2314 
2315 	if (!port_is_closed(port_id)) {
2316 		printf("Please close port first\n");
2317 		return;
2318 	}
2319 
2320 	if (ports[port_id].flow_list)
2321 		port_flow_flush(port_id);
2322 
2323 	if (rte_eth_dev_detach(port_id, name)) {
2324 		TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
2325 		return;
2326 	}
2327 
2328 	for (i = 0; i < nb_ports; i++) {
2329 		if (ports_ids[i] == port_id) {
2330 			ports_ids[i] = ports_ids[nb_ports-1];
2331 			ports_ids[nb_ports-1] = 0;
2332 			break;
2333 		}
2334 	}
2335 	nb_ports = rte_eth_dev_count_avail();
2336 
2337 	update_fwd_ports(RTE_MAX_ETHPORTS);
2338 
2339 	printf("Port %u is detached. Now total ports is %d\n",
2340 			port_id, nb_ports);
2341 	printf("Done\n");
2342 	return;
2343 }
2344 
2345 void
2346 pmd_test_exit(void)
2347 {
2348 	struct rte_device *device;
2349 	portid_t pt_id;
2350 	int ret;
2351 
2352 	if (test_done == 0)
2353 		stop_packet_forwarding();
2354 
2355 	if (ports != NULL) {
2356 		no_link_check = 1;
2357 		RTE_ETH_FOREACH_DEV(pt_id) {
2358 			printf("\nShutting down port %d...\n", pt_id);
2359 			fflush(stdout);
2360 			stop_port(pt_id);
2361 			close_port(pt_id);
2362 
2363 			/*
2364 			 * This is a workaround to fix a virtio-user issue that
2365 			 * requires to call clean-up routine to remove existing
2366 			 * socket.
2367 			 * This workaround valid only for testpmd, needs a fix
2368 			 * valid for all applications.
2369 			 * TODO: Implement proper resource cleanup
2370 			 */
2371 			device = rte_eth_devices[pt_id].device;
2372 			if (device && !strcmp(device->driver->name, "net_virtio_user"))
2373 				detach_port(pt_id);
2374 		}
2375 	}
2376 
2377 	if (hot_plug) {
2378 		ret = rte_dev_event_monitor_stop();
2379 		if (ret)
2380 			RTE_LOG(ERR, EAL,
2381 				"fail to stop device event monitor.");
2382 
2383 		ret = eth_dev_event_callback_unregister();
2384 		if (ret)
2385 			RTE_LOG(ERR, EAL,
2386 				"fail to unregister all event callbacks.");
2387 	}
2388 
2389 	printf("\nBye...\n");
2390 }
2391 
2392 typedef void (*cmd_func_t)(void);
2393 struct pmd_test_command {
2394 	const char *cmd_name;
2395 	cmd_func_t cmd_func;
2396 };
2397 
2398 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
2399 
2400 /* Check the link status of all ports in up to 9s, and print them finally */
2401 static void
2402 check_all_ports_link_status(uint32_t port_mask)
2403 {
2404 #define CHECK_INTERVAL 100 /* 100ms */
2405 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
2406 	portid_t portid;
2407 	uint8_t count, all_ports_up, print_flag = 0;
2408 	struct rte_eth_link link;
2409 
2410 	printf("Checking link statuses...\n");
2411 	fflush(stdout);
2412 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
2413 		all_ports_up = 1;
2414 		RTE_ETH_FOREACH_DEV(portid) {
2415 			if ((port_mask & (1 << portid)) == 0)
2416 				continue;
2417 			memset(&link, 0, sizeof(link));
2418 			rte_eth_link_get_nowait(portid, &link);
2419 			/* print link status if flag set */
2420 			if (print_flag == 1) {
2421 				if (link.link_status)
2422 					printf(
2423 					"Port%d Link Up. speed %u Mbps- %s\n",
2424 					portid, link.link_speed,
2425 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2426 					("full-duplex") : ("half-duplex\n"));
2427 				else
2428 					printf("Port %d Link Down\n", portid);
2429 				continue;
2430 			}
2431 			/* clear all_ports_up flag if any link down */
2432 			if (link.link_status == ETH_LINK_DOWN) {
2433 				all_ports_up = 0;
2434 				break;
2435 			}
2436 		}
2437 		/* after finally printing all link status, get out */
2438 		if (print_flag == 1)
2439 			break;
2440 
2441 		if (all_ports_up == 0) {
2442 			fflush(stdout);
2443 			rte_delay_ms(CHECK_INTERVAL);
2444 		}
2445 
2446 		/* set the print_flag if all ports up or timeout */
2447 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2448 			print_flag = 1;
2449 		}
2450 
2451 		if (lsc_interrupt)
2452 			break;
2453 	}
2454 }
2455 
2456 static void
2457 rmv_event_callback(void *arg)
2458 {
2459 	int need_to_start = 0;
2460 	int org_no_link_check = no_link_check;
2461 	portid_t port_id = (intptr_t)arg;
2462 
2463 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
2464 
2465 	if (!test_done && port_is_forwarding(port_id)) {
2466 		need_to_start = 1;
2467 		stop_packet_forwarding();
2468 	}
2469 	no_link_check = 1;
2470 	stop_port(port_id);
2471 	no_link_check = org_no_link_check;
2472 	close_port(port_id);
2473 	detach_port(port_id);
2474 	if (need_to_start)
2475 		start_packet_forwarding(0);
2476 }
2477 
2478 /* This function is used by the interrupt thread */
2479 static int
2480 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2481 		  void *ret_param)
2482 {
2483 	static const char * const event_desc[] = {
2484 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2485 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
2486 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2487 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2488 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2489 		[RTE_ETH_EVENT_IPSEC] = "IPsec",
2490 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
2491 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
2492 		[RTE_ETH_EVENT_NEW] = "device probed",
2493 		[RTE_ETH_EVENT_DESTROY] = "device released",
2494 		[RTE_ETH_EVENT_MAX] = NULL,
2495 	};
2496 
2497 	RTE_SET_USED(param);
2498 	RTE_SET_USED(ret_param);
2499 
2500 	if (type >= RTE_ETH_EVENT_MAX) {
2501 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2502 			port_id, __func__, type);
2503 		fflush(stderr);
2504 	} else if (event_print_mask & (UINT32_C(1) << type)) {
2505 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
2506 			event_desc[type]);
2507 		fflush(stdout);
2508 	}
2509 
2510 	if (port_id_is_invalid(port_id, DISABLED_WARN))
2511 		return 0;
2512 
2513 	switch (type) {
2514 	case RTE_ETH_EVENT_INTR_RMV:
2515 		if (rte_eal_alarm_set(100000,
2516 				rmv_event_callback, (void *)(intptr_t)port_id))
2517 			fprintf(stderr, "Could not set up deferred device removal\n");
2518 		break;
2519 	default:
2520 		break;
2521 	}
2522 	return 0;
2523 }
2524 
2525 /* This function is used by the interrupt thread */
2526 static void
2527 eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
2528 			     __rte_unused void *arg)
2529 {
2530 	if (type >= RTE_DEV_EVENT_MAX) {
2531 		fprintf(stderr, "%s called upon invalid event %d\n",
2532 			__func__, type);
2533 		fflush(stderr);
2534 	}
2535 
2536 	switch (type) {
2537 	case RTE_DEV_EVENT_REMOVE:
2538 		RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
2539 			device_name);
2540 		/* TODO: After finish failure handle, begin to stop
2541 		 * packet forward, stop port, close port, detach port.
2542 		 */
2543 		break;
2544 	case RTE_DEV_EVENT_ADD:
2545 		RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
2546 			device_name);
2547 		/* TODO: After finish kernel driver binding,
2548 		 * begin to attach port.
2549 		 */
2550 		break;
2551 	default:
2552 		break;
2553 	}
2554 }
2555 
2556 static int
2557 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2558 {
2559 	uint16_t i;
2560 	int diag;
2561 	uint8_t mapping_found = 0;
2562 
2563 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2564 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2565 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2566 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2567 					tx_queue_stats_mappings[i].queue_id,
2568 					tx_queue_stats_mappings[i].stats_counter_id);
2569 			if (diag != 0)
2570 				return diag;
2571 			mapping_found = 1;
2572 		}
2573 	}
2574 	if (mapping_found)
2575 		port->tx_queue_stats_mapping_enabled = 1;
2576 	return 0;
2577 }
2578 
2579 static int
2580 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2581 {
2582 	uint16_t i;
2583 	int diag;
2584 	uint8_t mapping_found = 0;
2585 
2586 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2587 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2588 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2589 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2590 					rx_queue_stats_mappings[i].queue_id,
2591 					rx_queue_stats_mappings[i].stats_counter_id);
2592 			if (diag != 0)
2593 				return diag;
2594 			mapping_found = 1;
2595 		}
2596 	}
2597 	if (mapping_found)
2598 		port->rx_queue_stats_mapping_enabled = 1;
2599 	return 0;
2600 }
2601 
2602 static void
2603 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2604 {
2605 	int diag = 0;
2606 
2607 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2608 	if (diag != 0) {
2609 		if (diag == -ENOTSUP) {
2610 			port->tx_queue_stats_mapping_enabled = 0;
2611 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2612 		}
2613 		else
2614 			rte_exit(EXIT_FAILURE,
2615 					"set_tx_queue_stats_mapping_registers "
2616 					"failed for port id=%d diag=%d\n",
2617 					pi, diag);
2618 	}
2619 
2620 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2621 	if (diag != 0) {
2622 		if (diag == -ENOTSUP) {
2623 			port->rx_queue_stats_mapping_enabled = 0;
2624 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2625 		}
2626 		else
2627 			rte_exit(EXIT_FAILURE,
2628 					"set_rx_queue_stats_mapping_registers "
2629 					"failed for port id=%d diag=%d\n",
2630 					pi, diag);
2631 	}
2632 }
2633 
2634 static void
2635 rxtx_port_config(struct rte_port *port)
2636 {
2637 	uint16_t qid;
2638 
2639 	for (qid = 0; qid < nb_rxq; qid++) {
2640 		port->rx_conf[qid] = port->dev_info.default_rxconf;
2641 
2642 		/* Check if any Rx parameters have been passed */
2643 		if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2644 			port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
2645 
2646 		if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2647 			port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
2648 
2649 		if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2650 			port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
2651 
2652 		if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2653 			port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
2654 
2655 		if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2656 			port->rx_conf[qid].rx_drop_en = rx_drop_en;
2657 
2658 		port->nb_rx_desc[qid] = nb_rxd;
2659 	}
2660 
2661 	for (qid = 0; qid < nb_txq; qid++) {
2662 		port->tx_conf[qid] = port->dev_info.default_txconf;
2663 
2664 		/* Check if any Tx parameters have been passed */
2665 		if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2666 			port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
2667 
2668 		if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2669 			port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
2670 
2671 		if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2672 			port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
2673 
2674 		if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2675 			port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
2676 
2677 		if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2678 			port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
2679 
2680 		port->nb_tx_desc[qid] = nb_txd;
2681 	}
2682 }
2683 
2684 void
2685 init_port_config(void)
2686 {
2687 	portid_t pid;
2688 	struct rte_port *port;
2689 
2690 	RTE_ETH_FOREACH_DEV(pid) {
2691 		port = &ports[pid];
2692 		port->dev_conf.fdir_conf = fdir_conf;
2693 		rte_eth_dev_info_get(pid, &port->dev_info);
2694 		if (nb_rxq > 1) {
2695 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2696 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
2697 				rss_hf & port->dev_info.flow_type_rss_offloads;
2698 		} else {
2699 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2700 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2701 		}
2702 
2703 		if (port->dcb_flag == 0) {
2704 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2705 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2706 			else
2707 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2708 		}
2709 
2710 		rxtx_port_config(port);
2711 
2712 		rte_eth_macaddr_get(pid, &port->eth_addr);
2713 
2714 		map_port_queue_stats_mapping_registers(pid, port);
2715 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2716 		rte_pmd_ixgbe_bypass_init(pid);
2717 #endif
2718 
2719 		if (lsc_interrupt &&
2720 		    (rte_eth_devices[pid].data->dev_flags &
2721 		     RTE_ETH_DEV_INTR_LSC))
2722 			port->dev_conf.intr_conf.lsc = 1;
2723 		if (rmv_interrupt &&
2724 		    (rte_eth_devices[pid].data->dev_flags &
2725 		     RTE_ETH_DEV_INTR_RMV))
2726 			port->dev_conf.intr_conf.rmv = 1;
2727 	}
2728 }
2729 
2730 void set_port_slave_flag(portid_t slave_pid)
2731 {
2732 	struct rte_port *port;
2733 
2734 	port = &ports[slave_pid];
2735 	port->slave_flag = 1;
2736 }
2737 
2738 void clear_port_slave_flag(portid_t slave_pid)
2739 {
2740 	struct rte_port *port;
2741 
2742 	port = &ports[slave_pid];
2743 	port->slave_flag = 0;
2744 }
2745 
2746 uint8_t port_is_bonding_slave(portid_t slave_pid)
2747 {
2748 	struct rte_port *port;
2749 
2750 	port = &ports[slave_pid];
2751 	if ((rte_eth_devices[slave_pid].data->dev_flags &
2752 	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2753 		return 1;
2754 	return 0;
2755 }
2756 
2757 const uint16_t vlan_tags[] = {
2758 		0,  1,  2,  3,  4,  5,  6,  7,
2759 		8,  9, 10, 11,  12, 13, 14, 15,
2760 		16, 17, 18, 19, 20, 21, 22, 23,
2761 		24, 25, 26, 27, 28, 29, 30, 31
2762 };
2763 
2764 static  int
2765 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
2766 		 enum dcb_mode_enable dcb_mode,
2767 		 enum rte_eth_nb_tcs num_tcs,
2768 		 uint8_t pfc_en)
2769 {
2770 	uint8_t i;
2771 	int32_t rc;
2772 	struct rte_eth_rss_conf rss_conf;
2773 
2774 	/*
2775 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2776 	 * given above, and the number of traffic classes available for use.
2777 	 */
2778 	if (dcb_mode == DCB_VT_ENABLED) {
2779 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2780 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2781 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2782 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2783 
2784 		/* VMDQ+DCB RX and TX configurations */
2785 		vmdq_rx_conf->enable_default_pool = 0;
2786 		vmdq_rx_conf->default_pool = 0;
2787 		vmdq_rx_conf->nb_queue_pools =
2788 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2789 		vmdq_tx_conf->nb_queue_pools =
2790 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2791 
2792 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2793 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2794 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2795 			vmdq_rx_conf->pool_map[i].pools =
2796 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2797 		}
2798 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2799 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2800 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2801 		}
2802 
2803 		/* set DCB mode of RX and TX of multiple queues */
2804 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2805 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2806 	} else {
2807 		struct rte_eth_dcb_rx_conf *rx_conf =
2808 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2809 		struct rte_eth_dcb_tx_conf *tx_conf =
2810 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2811 
2812 		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2813 		if (rc != 0)
2814 			return rc;
2815 
2816 		rx_conf->nb_tcs = num_tcs;
2817 		tx_conf->nb_tcs = num_tcs;
2818 
2819 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2820 			rx_conf->dcb_tc[i] = i % num_tcs;
2821 			tx_conf->dcb_tc[i] = i % num_tcs;
2822 		}
2823 
2824 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2825 		eth_conf->rx_adv_conf.rss_conf = rss_conf;
2826 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2827 	}
2828 
2829 	if (pfc_en)
2830 		eth_conf->dcb_capability_en =
2831 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2832 	else
2833 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2834 
2835 	return 0;
2836 }
2837 
2838 int
2839 init_port_dcb_config(portid_t pid,
2840 		     enum dcb_mode_enable dcb_mode,
2841 		     enum rte_eth_nb_tcs num_tcs,
2842 		     uint8_t pfc_en)
2843 {
2844 	struct rte_eth_conf port_conf;
2845 	struct rte_port *rte_port;
2846 	int retval;
2847 	uint16_t i;
2848 
2849 	rte_port = &ports[pid];
2850 
2851 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2852 	/* Enter DCB configuration status */
2853 	dcb_config = 1;
2854 
2855 	port_conf.rxmode = rte_port->dev_conf.rxmode;
2856 	port_conf.txmode = rte_port->dev_conf.txmode;
2857 
2858 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2859 	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2860 	if (retval < 0)
2861 		return retval;
2862 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2863 
2864 	/* re-configure the device . */
2865 	rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
2866 
2867 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2868 
2869 	/* If dev_info.vmdq_pool_base is greater than 0,
2870 	 * the queue id of vmdq pools is started after pf queues.
2871 	 */
2872 	if (dcb_mode == DCB_VT_ENABLED &&
2873 	    rte_port->dev_info.vmdq_pool_base > 0) {
2874 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2875 			" for port %d.", pid);
2876 		return -1;
2877 	}
2878 
2879 	/* Assume the ports in testpmd have the same dcb capability
2880 	 * and has the same number of rxq and txq in dcb mode
2881 	 */
2882 	if (dcb_mode == DCB_VT_ENABLED) {
2883 		if (rte_port->dev_info.max_vfs > 0) {
2884 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2885 			nb_txq = rte_port->dev_info.nb_tx_queues;
2886 		} else {
2887 			nb_rxq = rte_port->dev_info.max_rx_queues;
2888 			nb_txq = rte_port->dev_info.max_tx_queues;
2889 		}
2890 	} else {
2891 		/*if vt is disabled, use all pf queues */
2892 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2893 			nb_rxq = rte_port->dev_info.max_rx_queues;
2894 			nb_txq = rte_port->dev_info.max_tx_queues;
2895 		} else {
2896 			nb_rxq = (queueid_t)num_tcs;
2897 			nb_txq = (queueid_t)num_tcs;
2898 
2899 		}
2900 	}
2901 	rx_free_thresh = 64;
2902 
2903 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2904 
2905 	rxtx_port_config(rte_port);
2906 	/* VLAN filter */
2907 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2908 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2909 		rx_vft_set(pid, vlan_tags[i], 1);
2910 
2911 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2912 	map_port_queue_stats_mapping_registers(pid, rte_port);
2913 
2914 	rte_port->dcb_flag = 1;
2915 
2916 	return 0;
2917 }
2918 
2919 static void
2920 init_port(void)
2921 {
2922 	/* Configuration of Ethernet ports. */
2923 	ports = rte_zmalloc("testpmd: ports",
2924 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2925 			    RTE_CACHE_LINE_SIZE);
2926 	if (ports == NULL) {
2927 		rte_exit(EXIT_FAILURE,
2928 				"rte_zmalloc(%d struct rte_port) failed\n",
2929 				RTE_MAX_ETHPORTS);
2930 	}
2931 }
2932 
2933 static void
2934 force_quit(void)
2935 {
2936 	pmd_test_exit();
2937 	prompt_exit();
2938 }
2939 
2940 static void
2941 print_stats(void)
2942 {
2943 	uint8_t i;
2944 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2945 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2946 
2947 	/* Clear screen and move to top left */
2948 	printf("%s%s", clr, top_left);
2949 
2950 	printf("\nPort statistics ====================================");
2951 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2952 		nic_stats_display(fwd_ports_ids[i]);
2953 }
2954 
2955 static void
2956 signal_handler(int signum)
2957 {
2958 	if (signum == SIGINT || signum == SIGTERM) {
2959 		printf("\nSignal %d received, preparing to exit...\n",
2960 				signum);
2961 #ifdef RTE_LIBRTE_PDUMP
2962 		/* uninitialize packet capture framework */
2963 		rte_pdump_uninit();
2964 #endif
2965 #ifdef RTE_LIBRTE_LATENCY_STATS
2966 		rte_latencystats_uninit();
2967 #endif
2968 		force_quit();
2969 		/* Set flag to indicate the force termination. */
2970 		f_quit = 1;
2971 		/* exit with the expected status */
2972 		signal(signum, SIG_DFL);
2973 		kill(getpid(), signum);
2974 	}
2975 }
2976 
2977 int
2978 main(int argc, char** argv)
2979 {
2980 	int diag;
2981 	portid_t port_id;
2982 	uint16_t count;
2983 	int ret;
2984 
2985 	signal(SIGINT, signal_handler);
2986 	signal(SIGTERM, signal_handler);
2987 
2988 	diag = rte_eal_init(argc, argv);
2989 	if (diag < 0)
2990 		rte_panic("Cannot init EAL\n");
2991 
2992 	testpmd_logtype = rte_log_register("testpmd");
2993 	if (testpmd_logtype < 0)
2994 		rte_panic("Cannot register log type");
2995 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2996 
2997 #ifdef RTE_LIBRTE_PDUMP
2998 	/* initialize packet capture framework */
2999 	rte_pdump_init(NULL);
3000 #endif
3001 
3002 	count = 0;
3003 	RTE_ETH_FOREACH_DEV(port_id) {
3004 		ports_ids[count] = port_id;
3005 		count++;
3006 	}
3007 	nb_ports = (portid_t) count;
3008 	if (nb_ports == 0)
3009 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
3010 
3011 	/* allocate port structures, and init them */
3012 	init_port();
3013 
3014 	set_def_fwd_config();
3015 	if (nb_lcores == 0)
3016 		rte_panic("Empty set of forwarding logical cores - check the "
3017 			  "core mask supplied in the command parameters\n");
3018 
3019 	/* Bitrate/latency stats disabled by default */
3020 #ifdef RTE_LIBRTE_BITRATE
3021 	bitrate_enabled = 0;
3022 #endif
3023 #ifdef RTE_LIBRTE_LATENCY_STATS
3024 	latencystats_enabled = 0;
3025 #endif
3026 
3027 	/* on FreeBSD, mlockall() is disabled by default */
3028 #ifdef RTE_EXEC_ENV_BSDAPP
3029 	do_mlockall = 0;
3030 #else
3031 	do_mlockall = 1;
3032 #endif
3033 
3034 	argc -= diag;
3035 	argv += diag;
3036 	if (argc > 1)
3037 		launch_args_parse(argc, argv);
3038 
3039 	if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
3040 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
3041 			strerror(errno));
3042 	}
3043 
3044 	if (tx_first && interactive)
3045 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
3046 				"interactive mode.\n");
3047 
3048 	if (tx_first && lsc_interrupt) {
3049 		printf("Warning: lsc_interrupt needs to be off when "
3050 				" using tx_first. Disabling.\n");
3051 		lsc_interrupt = 0;
3052 	}
3053 
3054 	if (!nb_rxq && !nb_txq)
3055 		printf("Warning: Either rx or tx queues should be non-zero\n");
3056 
3057 	if (nb_rxq > 1 && nb_rxq > nb_txq)
3058 		printf("Warning: nb_rxq=%d enables RSS configuration, "
3059 		       "but nb_txq=%d will prevent to fully test it.\n",
3060 		       nb_rxq, nb_txq);
3061 
3062 	init_config();
3063 
3064 	if (hot_plug) {
3065 		/* enable hot plug monitoring */
3066 		ret = rte_dev_event_monitor_start();
3067 		if (ret) {
3068 			rte_errno = EINVAL;
3069 			return -1;
3070 		}
3071 		eth_dev_event_callback_register();
3072 
3073 	}
3074 
3075 	if (start_port(RTE_PORT_ALL) != 0)
3076 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
3077 
3078 	/* set all ports to promiscuous mode by default */
3079 	RTE_ETH_FOREACH_DEV(port_id)
3080 		rte_eth_promiscuous_enable(port_id);
3081 
3082 	/* Init metrics library */
3083 	rte_metrics_init(rte_socket_id());
3084 
3085 #ifdef RTE_LIBRTE_LATENCY_STATS
3086 	if (latencystats_enabled != 0) {
3087 		int ret = rte_latencystats_init(1, NULL);
3088 		if (ret)
3089 			printf("Warning: latencystats init()"
3090 				" returned error %d\n",	ret);
3091 		printf("Latencystats running on lcore %d\n",
3092 			latencystats_lcore_id);
3093 	}
3094 #endif
3095 
3096 	/* Setup bitrate stats */
3097 #ifdef RTE_LIBRTE_BITRATE
3098 	if (bitrate_enabled != 0) {
3099 		bitrate_data = rte_stats_bitrate_create();
3100 		if (bitrate_data == NULL)
3101 			rte_exit(EXIT_FAILURE,
3102 				"Could not allocate bitrate data.\n");
3103 		rte_stats_bitrate_reg(bitrate_data);
3104 	}
3105 #endif
3106 
3107 #ifdef RTE_LIBRTE_CMDLINE
3108 	if (strlen(cmdline_filename) != 0)
3109 		cmdline_read_from_file(cmdline_filename);
3110 
3111 	if (interactive == 1) {
3112 		if (auto_start) {
3113 			printf("Start automatic packet forwarding\n");
3114 			start_packet_forwarding(0);
3115 		}
3116 		prompt();
3117 		pmd_test_exit();
3118 	} else
3119 #endif
3120 	{
3121 		char c;
3122 		int rc;
3123 
3124 		f_quit = 0;
3125 
3126 		printf("No commandline core given, start packet forwarding\n");
3127 		start_packet_forwarding(tx_first);
3128 		if (stats_period != 0) {
3129 			uint64_t prev_time = 0, cur_time, diff_time = 0;
3130 			uint64_t timer_period;
3131 
3132 			/* Convert to number of cycles */
3133 			timer_period = stats_period * rte_get_timer_hz();
3134 
3135 			while (f_quit == 0) {
3136 				cur_time = rte_get_timer_cycles();
3137 				diff_time += cur_time - prev_time;
3138 
3139 				if (diff_time >= timer_period) {
3140 					print_stats();
3141 					/* Reset the timer */
3142 					diff_time = 0;
3143 				}
3144 				/* Sleep to avoid unnecessary checks */
3145 				prev_time = cur_time;
3146 				sleep(1);
3147 			}
3148 		}
3149 
3150 		printf("Press enter to exit\n");
3151 		rc = read(0, &c, 1);
3152 		pmd_test_exit();
3153 		if (rc < 0)
3154 			return 1;
3155 	}
3156 
3157 	return 0;
3158 }
3159