xref: /dpdk/app/test-pmd/testpmd.c (revision 384161e00627aaa26f45cfd835004c4e4f377640)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 
16 #include <sys/queue.h>
17 #include <sys/stat.h>
18 
19 #include <stdint.h>
20 #include <unistd.h>
21 #include <inttypes.h>
22 
23 #include <rte_common.h>
24 #include <rte_errno.h>
25 #include <rte_byteorder.h>
26 #include <rte_log.h>
27 #include <rte_debug.h>
28 #include <rte_cycles.h>
29 #include <rte_memory.h>
30 #include <rte_memcpy.h>
31 #include <rte_launch.h>
32 #include <rte_eal.h>
33 #include <rte_alarm.h>
34 #include <rte_per_lcore.h>
35 #include <rte_lcore.h>
36 #include <rte_atomic.h>
37 #include <rte_branch_prediction.h>
38 #include <rte_mempool.h>
39 #include <rte_malloc.h>
40 #include <rte_mbuf.h>
41 #include <rte_interrupts.h>
42 #include <rte_pci.h>
43 #include <rte_ether.h>
44 #include <rte_ethdev.h>
45 #include <rte_dev.h>
46 #include <rte_string_fns.h>
47 #ifdef RTE_LIBRTE_IXGBE_PMD
48 #include <rte_pmd_ixgbe.h>
49 #endif
50 #ifdef RTE_LIBRTE_PDUMP
51 #include <rte_pdump.h>
52 #endif
53 #include <rte_flow.h>
54 #include <rte_metrics.h>
55 #ifdef RTE_LIBRTE_BITRATE
56 #include <rte_bitrate.h>
57 #endif
58 #ifdef RTE_LIBRTE_LATENCY_STATS
59 #include <rte_latencystats.h>
60 #endif
61 
62 #include "testpmd.h"
63 
64 uint16_t verbose_level = 0; /**< Silent by default. */
65 int testpmd_logtype; /**< Log type for testpmd logs */
66 
67 /* use master core for command line ? */
68 uint8_t interactive = 0;
69 uint8_t auto_start = 0;
70 uint8_t tx_first;
71 char cmdline_filename[PATH_MAX] = {0};
72 
73 /*
74  * NUMA support configuration.
75  * When set, the NUMA support attempts to dispatch the allocation of the
76  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
77  * probed ports among the CPU sockets 0 and 1.
78  * Otherwise, all memory is allocated from CPU socket 0.
79  */
80 uint8_t numa_support = 1; /**< numa enabled by default */
81 
82 /*
83  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
84  * not configured.
85  */
86 uint8_t socket_num = UMA_NO_CONFIG;
87 
88 /*
89  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
90  */
91 uint8_t mp_anon = 0;
92 
93 /*
94  * Record the Ethernet address of peer target ports to which packets are
95  * forwarded.
96  * Must be instantiated with the ethernet addresses of peer traffic generator
97  * ports.
98  */
99 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
100 portid_t nb_peer_eth_addrs = 0;
101 
102 /*
103  * Probed Target Environment.
104  */
105 struct rte_port *ports;	       /**< For all probed ethernet ports. */
106 portid_t nb_ports;             /**< Number of probed ethernet ports. */
107 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
108 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
109 
110 /*
111  * Test Forwarding Configuration.
112  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
113  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
114  */
115 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
116 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
117 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
118 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
119 
120 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
121 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
122 
123 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
124 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
125 
126 /*
127  * Forwarding engines.
128  */
129 struct fwd_engine * fwd_engines[] = {
130 	&io_fwd_engine,
131 	&mac_fwd_engine,
132 	&mac_swap_engine,
133 	&flow_gen_engine,
134 	&rx_only_engine,
135 	&tx_only_engine,
136 	&csum_fwd_engine,
137 	&icmp_echo_engine,
138 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
139 	&softnic_tm_engine,
140 	&softnic_tm_bypass_engine,
141 #endif
142 #ifdef RTE_LIBRTE_IEEE1588
143 	&ieee1588_fwd_engine,
144 #endif
145 	NULL,
146 };
147 
148 struct fwd_config cur_fwd_config;
149 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
150 uint32_t retry_enabled;
151 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
152 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
153 
154 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
155 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
156                                       * specified on command-line. */
157 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
158 
159 /*
160  * In container, it cannot terminate the process which running with 'stats-period'
161  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
162  */
163 uint8_t f_quit;
164 
165 /*
166  * Configuration of packet segments used by the "txonly" processing engine.
167  */
168 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
169 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
170 	TXONLY_DEF_PACKET_LEN,
171 };
172 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
173 
174 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
175 /**< Split policy for packets to TX. */
176 
177 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
178 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
179 
180 /* current configuration is in DCB or not,0 means it is not in DCB mode */
181 uint8_t dcb_config = 0;
182 
183 /* Whether the dcb is in testing status */
184 uint8_t dcb_test = 0;
185 
186 /*
187  * Configurable number of RX/TX queues.
188  */
189 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
190 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
191 
192 /*
193  * Configurable number of RX/TX ring descriptors.
194  */
195 #define RTE_TEST_RX_DESC_DEFAULT 128
196 #define RTE_TEST_TX_DESC_DEFAULT 512
197 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
198 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
199 
200 #define RTE_PMD_PARAM_UNSET -1
201 /*
202  * Configurable values of RX and TX ring threshold registers.
203  */
204 
205 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
206 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
207 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
208 
209 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
210 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
211 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
212 
213 /*
214  * Configurable value of RX free threshold.
215  */
216 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
217 
218 /*
219  * Configurable value of RX drop enable.
220  */
221 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
222 
223 /*
224  * Configurable value of TX free threshold.
225  */
226 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
227 
228 /*
229  * Configurable value of TX RS bit threshold.
230  */
231 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
232 
233 /*
234  * Receive Side Scaling (RSS) configuration.
235  */
236 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
237 
238 /*
239  * Port topology configuration
240  */
241 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
242 
243 /*
244  * Avoids to flush all the RX streams before starts forwarding.
245  */
246 uint8_t no_flush_rx = 0; /* flush by default */
247 
248 /*
249  * Flow API isolated mode.
250  */
251 uint8_t flow_isolate_all;
252 
253 /*
254  * Avoids to check link status when starting/stopping a port.
255  */
256 uint8_t no_link_check = 0; /* check by default */
257 
258 /*
259  * Enable link status change notification
260  */
261 uint8_t lsc_interrupt = 1; /* enabled by default */
262 
263 /*
264  * Enable device removal notification.
265  */
266 uint8_t rmv_interrupt = 1; /* enabled by default */
267 
268 /*
269  * Display or mask ether events
270  * Default to all events except VF_MBOX
271  */
272 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
273 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
274 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
275 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
276 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
277 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
278 
279 /*
280  * NIC bypass mode configuration options.
281  */
282 
283 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
284 /* The NIC bypass watchdog timeout. */
285 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
286 #endif
287 
288 
289 #ifdef RTE_LIBRTE_LATENCY_STATS
290 
291 /*
292  * Set when latency stats is enabled in the commandline
293  */
294 uint8_t latencystats_enabled;
295 
296 /*
297  * Lcore ID to serive latency statistics.
298  */
299 lcoreid_t latencystats_lcore_id = -1;
300 
301 #endif
302 
303 /*
304  * Ethernet device configuration.
305  */
306 struct rte_eth_rxmode rx_mode = {
307 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
308 	.offloads = (DEV_RX_OFFLOAD_VLAN_FILTER |
309 		     DEV_RX_OFFLOAD_VLAN_STRIP |
310 		     DEV_RX_OFFLOAD_CRC_STRIP),
311 	.ignore_offload_bitfield = 1,
312 };
313 
314 struct rte_eth_txmode tx_mode;
315 
316 struct rte_fdir_conf fdir_conf = {
317 	.mode = RTE_FDIR_MODE_NONE,
318 	.pballoc = RTE_FDIR_PBALLOC_64K,
319 	.status = RTE_FDIR_REPORT_STATUS,
320 	.mask = {
321 		.vlan_tci_mask = 0x0,
322 		.ipv4_mask     = {
323 			.src_ip = 0xFFFFFFFF,
324 			.dst_ip = 0xFFFFFFFF,
325 		},
326 		.ipv6_mask     = {
327 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
328 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
329 		},
330 		.src_port_mask = 0xFFFF,
331 		.dst_port_mask = 0xFFFF,
332 		.mac_addr_byte_mask = 0xFF,
333 		.tunnel_type_mask = 1,
334 		.tunnel_id_mask = 0xFFFFFFFF,
335 	},
336 	.drop_queue = 127,
337 };
338 
339 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
340 
341 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
342 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
343 
344 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
345 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
346 
347 uint16_t nb_tx_queue_stats_mappings = 0;
348 uint16_t nb_rx_queue_stats_mappings = 0;
349 
350 /*
351  * Display zero values by default for xstats
352  */
353 uint8_t xstats_hide_zero;
354 
355 unsigned int num_sockets = 0;
356 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
357 
358 #ifdef RTE_LIBRTE_BITRATE
359 /* Bitrate statistics */
360 struct rte_stats_bitrates *bitrate_data;
361 lcoreid_t bitrate_lcore_id;
362 uint8_t bitrate_enabled;
363 #endif
364 
365 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
366 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
367 
368 /* Forward function declarations */
369 static void map_port_queue_stats_mapping_registers(portid_t pi,
370 						   struct rte_port *port);
371 static void check_all_ports_link_status(uint32_t port_mask);
372 static int eth_event_callback(portid_t port_id,
373 			      enum rte_eth_event_type type,
374 			      void *param, void *ret_param);
375 
376 /*
377  * Check if all the ports are started.
378  * If yes, return positive value. If not, return zero.
379  */
380 static int all_ports_started(void);
381 
382 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
383 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
384 
385 /*
386  * Helper function to check if socket is already discovered.
387  * If yes, return positive value. If not, return zero.
388  */
389 int
390 new_socket_id(unsigned int socket_id)
391 {
392 	unsigned int i;
393 
394 	for (i = 0; i < num_sockets; i++) {
395 		if (socket_ids[i] == socket_id)
396 			return 0;
397 	}
398 	return 1;
399 }
400 
401 /*
402  * Setup default configuration.
403  */
404 static void
405 set_default_fwd_lcores_config(void)
406 {
407 	unsigned int i;
408 	unsigned int nb_lc;
409 	unsigned int sock_num;
410 
411 	nb_lc = 0;
412 	for (i = 0; i < RTE_MAX_LCORE; i++) {
413 		sock_num = rte_lcore_to_socket_id(i);
414 		if (new_socket_id(sock_num)) {
415 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
416 				rte_exit(EXIT_FAILURE,
417 					 "Total sockets greater than %u\n",
418 					 RTE_MAX_NUMA_NODES);
419 			}
420 			socket_ids[num_sockets++] = sock_num;
421 		}
422 		if (!rte_lcore_is_enabled(i))
423 			continue;
424 		if (i == rte_get_master_lcore())
425 			continue;
426 		fwd_lcores_cpuids[nb_lc++] = i;
427 	}
428 	nb_lcores = (lcoreid_t) nb_lc;
429 	nb_cfg_lcores = nb_lcores;
430 	nb_fwd_lcores = 1;
431 }
432 
433 static void
434 set_def_peer_eth_addrs(void)
435 {
436 	portid_t i;
437 
438 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
439 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
440 		peer_eth_addrs[i].addr_bytes[5] = i;
441 	}
442 }
443 
444 static void
445 set_default_fwd_ports_config(void)
446 {
447 	portid_t pt_id;
448 	int i = 0;
449 
450 	RTE_ETH_FOREACH_DEV(pt_id)
451 		fwd_ports_ids[i++] = pt_id;
452 
453 	nb_cfg_ports = nb_ports;
454 	nb_fwd_ports = nb_ports;
455 }
456 
457 void
458 set_def_fwd_config(void)
459 {
460 	set_default_fwd_lcores_config();
461 	set_def_peer_eth_addrs();
462 	set_default_fwd_ports_config();
463 }
464 
465 /*
466  * Configuration initialisation done once at init time.
467  */
468 static void
469 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
470 		 unsigned int socket_id)
471 {
472 	char pool_name[RTE_MEMPOOL_NAMESIZE];
473 	struct rte_mempool *rte_mp = NULL;
474 	uint32_t mb_size;
475 
476 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
477 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
478 
479 	TESTPMD_LOG(INFO,
480 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
481 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
482 
483 	if (mp_anon != 0) {
484 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
485 			mb_size, (unsigned) mb_mempool_cache,
486 			sizeof(struct rte_pktmbuf_pool_private),
487 			socket_id, 0);
488 		if (rte_mp == NULL)
489 			goto err;
490 
491 		if (rte_mempool_populate_anon(rte_mp) == 0) {
492 			rte_mempool_free(rte_mp);
493 			rte_mp = NULL;
494 			goto err;
495 		}
496 		rte_pktmbuf_pool_init(rte_mp, NULL);
497 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
498 	} else {
499 		/* wrapper to rte_mempool_create() */
500 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
501 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
502 	}
503 
504 err:
505 	if (rte_mp == NULL) {
506 		rte_exit(EXIT_FAILURE,
507 			"Creation of mbuf pool for socket %u failed: %s\n",
508 			socket_id, rte_strerror(rte_errno));
509 	} else if (verbose_level > 0) {
510 		rte_mempool_dump(stdout, rte_mp);
511 	}
512 }
513 
514 /*
515  * Check given socket id is valid or not with NUMA mode,
516  * if valid, return 0, else return -1
517  */
518 static int
519 check_socket_id(const unsigned int socket_id)
520 {
521 	static int warning_once = 0;
522 
523 	if (new_socket_id(socket_id)) {
524 		if (!warning_once && numa_support)
525 			printf("Warning: NUMA should be configured manually by"
526 			       " using --port-numa-config and"
527 			       " --ring-numa-config parameters along with"
528 			       " --numa.\n");
529 		warning_once = 1;
530 		return -1;
531 	}
532 	return 0;
533 }
534 
535 static void
536 init_config(void)
537 {
538 	portid_t pid;
539 	struct rte_port *port;
540 	struct rte_mempool *mbp;
541 	unsigned int nb_mbuf_per_pool;
542 	lcoreid_t  lc_id;
543 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
544 	struct rte_gro_param gro_param;
545 	uint32_t gso_types;
546 
547 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
548 
549 	if (numa_support) {
550 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
551 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
552 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
553 	}
554 
555 	/* Configuration of logical cores. */
556 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
557 				sizeof(struct fwd_lcore *) * nb_lcores,
558 				RTE_CACHE_LINE_SIZE);
559 	if (fwd_lcores == NULL) {
560 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
561 							"failed\n", nb_lcores);
562 	}
563 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
564 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
565 					       sizeof(struct fwd_lcore),
566 					       RTE_CACHE_LINE_SIZE);
567 		if (fwd_lcores[lc_id] == NULL) {
568 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
569 								"failed\n");
570 		}
571 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
572 	}
573 
574 	RTE_ETH_FOREACH_DEV(pid) {
575 		port = &ports[pid];
576 		/* Apply default Tx configuration for all ports */
577 		port->dev_conf.txmode = tx_mode;
578 		port->dev_conf.rxmode = rx_mode;
579 		rte_eth_dev_info_get(pid, &port->dev_info);
580 
581 		if (numa_support) {
582 			if (port_numa[pid] != NUMA_NO_CONFIG)
583 				port_per_socket[port_numa[pid]]++;
584 			else {
585 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
586 
587 				/* if socket_id is invalid, set to 0 */
588 				if (check_socket_id(socket_id) < 0)
589 					socket_id = 0;
590 				port_per_socket[socket_id]++;
591 			}
592 		}
593 
594 		/* set flag to initialize port/queue */
595 		port->need_reconfig = 1;
596 		port->need_reconfig_queues = 1;
597 	}
598 
599 	/*
600 	 * Create pools of mbuf.
601 	 * If NUMA support is disabled, create a single pool of mbuf in
602 	 * socket 0 memory by default.
603 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
604 	 *
605 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
606 	 * nb_txd can be configured at run time.
607 	 */
608 	if (param_total_num_mbufs)
609 		nb_mbuf_per_pool = param_total_num_mbufs;
610 	else {
611 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
612 			(nb_lcores * mb_mempool_cache) +
613 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
614 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
615 	}
616 
617 	if (numa_support) {
618 		uint8_t i;
619 
620 		for (i = 0; i < num_sockets; i++)
621 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
622 					 socket_ids[i]);
623 	} else {
624 		if (socket_num == UMA_NO_CONFIG)
625 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
626 		else
627 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
628 						 socket_num);
629 	}
630 
631 	init_port_config();
632 
633 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
634 		DEV_TX_OFFLOAD_GRE_TNL_TSO;
635 	/*
636 	 * Records which Mbuf pool to use by each logical core, if needed.
637 	 */
638 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
639 		mbp = mbuf_pool_find(
640 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
641 
642 		if (mbp == NULL)
643 			mbp = mbuf_pool_find(0);
644 		fwd_lcores[lc_id]->mbp = mbp;
645 		/* initialize GSO context */
646 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
647 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
648 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
649 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
650 			ETHER_CRC_LEN;
651 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
652 	}
653 
654 	/* Configuration of packet forwarding streams. */
655 	if (init_fwd_streams() < 0)
656 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
657 
658 	fwd_config_setup();
659 
660 	/* create a gro context for each lcore */
661 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
662 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
663 	gro_param.max_item_per_flow = MAX_PKT_BURST;
664 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
665 		gro_param.socket_id = rte_lcore_to_socket_id(
666 				fwd_lcores_cpuids[lc_id]);
667 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
668 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
669 			rte_exit(EXIT_FAILURE,
670 					"rte_gro_ctx_create() failed\n");
671 		}
672 	}
673 }
674 
675 
676 void
677 reconfig(portid_t new_port_id, unsigned socket_id)
678 {
679 	struct rte_port *port;
680 
681 	/* Reconfiguration of Ethernet ports. */
682 	port = &ports[new_port_id];
683 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
684 
685 	/* set flag to initialize port/queue */
686 	port->need_reconfig = 1;
687 	port->need_reconfig_queues = 1;
688 	port->socket_id = socket_id;
689 
690 	init_port_config();
691 }
692 
693 
694 int
695 init_fwd_streams(void)
696 {
697 	portid_t pid;
698 	struct rte_port *port;
699 	streamid_t sm_id, nb_fwd_streams_new;
700 	queueid_t q;
701 
702 	/* set socket id according to numa or not */
703 	RTE_ETH_FOREACH_DEV(pid) {
704 		port = &ports[pid];
705 		if (nb_rxq > port->dev_info.max_rx_queues) {
706 			printf("Fail: nb_rxq(%d) is greater than "
707 				"max_rx_queues(%d)\n", nb_rxq,
708 				port->dev_info.max_rx_queues);
709 			return -1;
710 		}
711 		if (nb_txq > port->dev_info.max_tx_queues) {
712 			printf("Fail: nb_txq(%d) is greater than "
713 				"max_tx_queues(%d)\n", nb_txq,
714 				port->dev_info.max_tx_queues);
715 			return -1;
716 		}
717 		if (numa_support) {
718 			if (port_numa[pid] != NUMA_NO_CONFIG)
719 				port->socket_id = port_numa[pid];
720 			else {
721 				port->socket_id = rte_eth_dev_socket_id(pid);
722 
723 				/* if socket_id is invalid, set to 0 */
724 				if (check_socket_id(port->socket_id) < 0)
725 					port->socket_id = 0;
726 			}
727 		}
728 		else {
729 			if (socket_num == UMA_NO_CONFIG)
730 				port->socket_id = 0;
731 			else
732 				port->socket_id = socket_num;
733 		}
734 	}
735 
736 	q = RTE_MAX(nb_rxq, nb_txq);
737 	if (q == 0) {
738 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
739 		return -1;
740 	}
741 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
742 	if (nb_fwd_streams_new == nb_fwd_streams)
743 		return 0;
744 	/* clear the old */
745 	if (fwd_streams != NULL) {
746 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
747 			if (fwd_streams[sm_id] == NULL)
748 				continue;
749 			rte_free(fwd_streams[sm_id]);
750 			fwd_streams[sm_id] = NULL;
751 		}
752 		rte_free(fwd_streams);
753 		fwd_streams = NULL;
754 	}
755 
756 	/* init new */
757 	nb_fwd_streams = nb_fwd_streams_new;
758 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
759 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
760 	if (fwd_streams == NULL)
761 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
762 						"failed\n", nb_fwd_streams);
763 
764 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
765 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
766 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
767 		if (fwd_streams[sm_id] == NULL)
768 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
769 								" failed\n");
770 	}
771 
772 	return 0;
773 }
774 
775 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
776 static void
777 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
778 {
779 	unsigned int total_burst;
780 	unsigned int nb_burst;
781 	unsigned int burst_stats[3];
782 	uint16_t pktnb_stats[3];
783 	uint16_t nb_pkt;
784 	int burst_percent[3];
785 
786 	/*
787 	 * First compute the total number of packet bursts and the
788 	 * two highest numbers of bursts of the same number of packets.
789 	 */
790 	total_burst = 0;
791 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
792 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
793 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
794 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
795 		if (nb_burst == 0)
796 			continue;
797 		total_burst += nb_burst;
798 		if (nb_burst > burst_stats[0]) {
799 			burst_stats[1] = burst_stats[0];
800 			pktnb_stats[1] = pktnb_stats[0];
801 			burst_stats[0] = nb_burst;
802 			pktnb_stats[0] = nb_pkt;
803 		}
804 	}
805 	if (total_burst == 0)
806 		return;
807 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
808 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
809 	       burst_percent[0], (int) pktnb_stats[0]);
810 	if (burst_stats[0] == total_burst) {
811 		printf("]\n");
812 		return;
813 	}
814 	if (burst_stats[0] + burst_stats[1] == total_burst) {
815 		printf(" + %d%% of %d pkts]\n",
816 		       100 - burst_percent[0], pktnb_stats[1]);
817 		return;
818 	}
819 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
820 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
821 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
822 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
823 		return;
824 	}
825 	printf(" + %d%% of %d pkts + %d%% of others]\n",
826 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
827 }
828 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
829 
830 static void
831 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
832 {
833 	struct rte_port *port;
834 	uint8_t i;
835 
836 	static const char *fwd_stats_border = "----------------------";
837 
838 	port = &ports[port_id];
839 	printf("\n  %s Forward statistics for port %-2d %s\n",
840 	       fwd_stats_border, port_id, fwd_stats_border);
841 
842 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
843 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
844 		       "%-"PRIu64"\n",
845 		       stats->ipackets, stats->imissed,
846 		       (uint64_t) (stats->ipackets + stats->imissed));
847 
848 		if (cur_fwd_eng == &csum_fwd_engine)
849 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
850 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
851 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
852 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
853 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
854 		}
855 
856 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
857 		       "%-"PRIu64"\n",
858 		       stats->opackets, port->tx_dropped,
859 		       (uint64_t) (stats->opackets + port->tx_dropped));
860 	}
861 	else {
862 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
863 		       "%14"PRIu64"\n",
864 		       stats->ipackets, stats->imissed,
865 		       (uint64_t) (stats->ipackets + stats->imissed));
866 
867 		if (cur_fwd_eng == &csum_fwd_engine)
868 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
869 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
870 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
871 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
872 			printf("  RX-nombufs:             %14"PRIu64"\n",
873 			       stats->rx_nombuf);
874 		}
875 
876 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
877 		       "%14"PRIu64"\n",
878 		       stats->opackets, port->tx_dropped,
879 		       (uint64_t) (stats->opackets + port->tx_dropped));
880 	}
881 
882 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
883 	if (port->rx_stream)
884 		pkt_burst_stats_display("RX",
885 			&port->rx_stream->rx_burst_stats);
886 	if (port->tx_stream)
887 		pkt_burst_stats_display("TX",
888 			&port->tx_stream->tx_burst_stats);
889 #endif
890 
891 	if (port->rx_queue_stats_mapping_enabled) {
892 		printf("\n");
893 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
894 			printf("  Stats reg %2d RX-packets:%14"PRIu64
895 			       "     RX-errors:%14"PRIu64
896 			       "    RX-bytes:%14"PRIu64"\n",
897 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
898 		}
899 		printf("\n");
900 	}
901 	if (port->tx_queue_stats_mapping_enabled) {
902 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
903 			printf("  Stats reg %2d TX-packets:%14"PRIu64
904 			       "                                 TX-bytes:%14"PRIu64"\n",
905 			       i, stats->q_opackets[i], stats->q_obytes[i]);
906 		}
907 	}
908 
909 	printf("  %s--------------------------------%s\n",
910 	       fwd_stats_border, fwd_stats_border);
911 }
912 
913 static void
914 fwd_stream_stats_display(streamid_t stream_id)
915 {
916 	struct fwd_stream *fs;
917 	static const char *fwd_top_stats_border = "-------";
918 
919 	fs = fwd_streams[stream_id];
920 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
921 	    (fs->fwd_dropped == 0))
922 		return;
923 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
924 	       "TX Port=%2d/Queue=%2d %s\n",
925 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
926 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
927 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
928 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
929 
930 	/* if checksum mode */
931 	if (cur_fwd_eng == &csum_fwd_engine) {
932 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
933 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
934 	}
935 
936 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
937 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
938 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
939 #endif
940 }
941 
942 static void
943 flush_fwd_rx_queues(void)
944 {
945 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
946 	portid_t  rxp;
947 	portid_t port_id;
948 	queueid_t rxq;
949 	uint16_t  nb_rx;
950 	uint16_t  i;
951 	uint8_t   j;
952 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
953 	uint64_t timer_period;
954 
955 	/* convert to number of cycles */
956 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
957 
958 	for (j = 0; j < 2; j++) {
959 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
960 			for (rxq = 0; rxq < nb_rxq; rxq++) {
961 				port_id = fwd_ports_ids[rxp];
962 				/**
963 				* testpmd can stuck in the below do while loop
964 				* if rte_eth_rx_burst() always returns nonzero
965 				* packets. So timer is added to exit this loop
966 				* after 1sec timer expiry.
967 				*/
968 				prev_tsc = rte_rdtsc();
969 				do {
970 					nb_rx = rte_eth_rx_burst(port_id, rxq,
971 						pkts_burst, MAX_PKT_BURST);
972 					for (i = 0; i < nb_rx; i++)
973 						rte_pktmbuf_free(pkts_burst[i]);
974 
975 					cur_tsc = rte_rdtsc();
976 					diff_tsc = cur_tsc - prev_tsc;
977 					timer_tsc += diff_tsc;
978 				} while ((nb_rx > 0) &&
979 					(timer_tsc < timer_period));
980 				timer_tsc = 0;
981 			}
982 		}
983 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
984 	}
985 }
986 
987 static void
988 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
989 {
990 	struct fwd_stream **fsm;
991 	streamid_t nb_fs;
992 	streamid_t sm_id;
993 #ifdef RTE_LIBRTE_BITRATE
994 	uint64_t tics_per_1sec;
995 	uint64_t tics_datum;
996 	uint64_t tics_current;
997 	uint8_t idx_port, cnt_ports;
998 
999 	cnt_ports = rte_eth_dev_count();
1000 	tics_datum = rte_rdtsc();
1001 	tics_per_1sec = rte_get_timer_hz();
1002 #endif
1003 	fsm = &fwd_streams[fc->stream_idx];
1004 	nb_fs = fc->stream_nb;
1005 	do {
1006 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1007 			(*pkt_fwd)(fsm[sm_id]);
1008 #ifdef RTE_LIBRTE_BITRATE
1009 		if (bitrate_enabled != 0 &&
1010 				bitrate_lcore_id == rte_lcore_id()) {
1011 			tics_current = rte_rdtsc();
1012 			if (tics_current - tics_datum >= tics_per_1sec) {
1013 				/* Periodic bitrate calculation */
1014 				for (idx_port = 0;
1015 						idx_port < cnt_ports;
1016 						idx_port++)
1017 					rte_stats_bitrate_calc(bitrate_data,
1018 						idx_port);
1019 				tics_datum = tics_current;
1020 			}
1021 		}
1022 #endif
1023 #ifdef RTE_LIBRTE_LATENCY_STATS
1024 		if (latencystats_enabled != 0 &&
1025 				latencystats_lcore_id == rte_lcore_id())
1026 			rte_latencystats_update();
1027 #endif
1028 
1029 	} while (! fc->stopped);
1030 }
1031 
1032 static int
1033 start_pkt_forward_on_core(void *fwd_arg)
1034 {
1035 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1036 			     cur_fwd_config.fwd_eng->packet_fwd);
1037 	return 0;
1038 }
1039 
1040 /*
1041  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1042  * Used to start communication flows in network loopback test configurations.
1043  */
1044 static int
1045 run_one_txonly_burst_on_core(void *fwd_arg)
1046 {
1047 	struct fwd_lcore *fwd_lc;
1048 	struct fwd_lcore tmp_lcore;
1049 
1050 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1051 	tmp_lcore = *fwd_lc;
1052 	tmp_lcore.stopped = 1;
1053 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1054 	return 0;
1055 }
1056 
1057 /*
1058  * Launch packet forwarding:
1059  *     - Setup per-port forwarding context.
1060  *     - launch logical cores with their forwarding configuration.
1061  */
1062 static void
1063 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1064 {
1065 	port_fwd_begin_t port_fwd_begin;
1066 	unsigned int i;
1067 	unsigned int lc_id;
1068 	int diag;
1069 
1070 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1071 	if (port_fwd_begin != NULL) {
1072 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1073 			(*port_fwd_begin)(fwd_ports_ids[i]);
1074 	}
1075 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1076 		lc_id = fwd_lcores_cpuids[i];
1077 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1078 			fwd_lcores[i]->stopped = 0;
1079 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1080 						     fwd_lcores[i], lc_id);
1081 			if (diag != 0)
1082 				printf("launch lcore %u failed - diag=%d\n",
1083 				       lc_id, diag);
1084 		}
1085 	}
1086 }
1087 
1088 /*
1089  * Launch packet forwarding configuration.
1090  */
1091 void
1092 start_packet_forwarding(int with_tx_first)
1093 {
1094 	port_fwd_begin_t port_fwd_begin;
1095 	port_fwd_end_t  port_fwd_end;
1096 	struct rte_port *port;
1097 	unsigned int i;
1098 	portid_t   pt_id;
1099 	streamid_t sm_id;
1100 
1101 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1102 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1103 
1104 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1105 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1106 
1107 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1108 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1109 		(!nb_rxq || !nb_txq))
1110 		rte_exit(EXIT_FAILURE,
1111 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1112 			cur_fwd_eng->fwd_mode_name);
1113 
1114 	if (all_ports_started() == 0) {
1115 		printf("Not all ports were started\n");
1116 		return;
1117 	}
1118 	if (test_done == 0) {
1119 		printf("Packet forwarding already started\n");
1120 		return;
1121 	}
1122 
1123 	if (init_fwd_streams() < 0) {
1124 		printf("Fail from init_fwd_streams()\n");
1125 		return;
1126 	}
1127 
1128 	if(dcb_test) {
1129 		for (i = 0; i < nb_fwd_ports; i++) {
1130 			pt_id = fwd_ports_ids[i];
1131 			port = &ports[pt_id];
1132 			if (!port->dcb_flag) {
1133 				printf("In DCB mode, all forwarding ports must "
1134                                        "be configured in this mode.\n");
1135 				return;
1136 			}
1137 		}
1138 		if (nb_fwd_lcores == 1) {
1139 			printf("In DCB mode,the nb forwarding cores "
1140                                "should be larger than 1.\n");
1141 			return;
1142 		}
1143 	}
1144 	test_done = 0;
1145 
1146 	if(!no_flush_rx)
1147 		flush_fwd_rx_queues();
1148 
1149 	fwd_config_setup();
1150 	pkt_fwd_config_display(&cur_fwd_config);
1151 	rxtx_config_display();
1152 
1153 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1154 		pt_id = fwd_ports_ids[i];
1155 		port = &ports[pt_id];
1156 		rte_eth_stats_get(pt_id, &port->stats);
1157 		port->tx_dropped = 0;
1158 
1159 		map_port_queue_stats_mapping_registers(pt_id, port);
1160 	}
1161 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1162 		fwd_streams[sm_id]->rx_packets = 0;
1163 		fwd_streams[sm_id]->tx_packets = 0;
1164 		fwd_streams[sm_id]->fwd_dropped = 0;
1165 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1166 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1167 
1168 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1169 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1170 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1171 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1172 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1173 #endif
1174 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1175 		fwd_streams[sm_id]->core_cycles = 0;
1176 #endif
1177 	}
1178 	if (with_tx_first) {
1179 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1180 		if (port_fwd_begin != NULL) {
1181 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1182 				(*port_fwd_begin)(fwd_ports_ids[i]);
1183 		}
1184 		while (with_tx_first--) {
1185 			launch_packet_forwarding(
1186 					run_one_txonly_burst_on_core);
1187 			rte_eal_mp_wait_lcore();
1188 		}
1189 		port_fwd_end = tx_only_engine.port_fwd_end;
1190 		if (port_fwd_end != NULL) {
1191 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1192 				(*port_fwd_end)(fwd_ports_ids[i]);
1193 		}
1194 	}
1195 	launch_packet_forwarding(start_pkt_forward_on_core);
1196 }
1197 
1198 void
1199 stop_packet_forwarding(void)
1200 {
1201 	struct rte_eth_stats stats;
1202 	struct rte_port *port;
1203 	port_fwd_end_t  port_fwd_end;
1204 	int i;
1205 	portid_t   pt_id;
1206 	streamid_t sm_id;
1207 	lcoreid_t  lc_id;
1208 	uint64_t total_recv;
1209 	uint64_t total_xmit;
1210 	uint64_t total_rx_dropped;
1211 	uint64_t total_tx_dropped;
1212 	uint64_t total_rx_nombuf;
1213 	uint64_t tx_dropped;
1214 	uint64_t rx_bad_ip_csum;
1215 	uint64_t rx_bad_l4_csum;
1216 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1217 	uint64_t fwd_cycles;
1218 #endif
1219 
1220 	static const char *acc_stats_border = "+++++++++++++++";
1221 
1222 	if (test_done) {
1223 		printf("Packet forwarding not started\n");
1224 		return;
1225 	}
1226 	printf("Telling cores to stop...");
1227 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1228 		fwd_lcores[lc_id]->stopped = 1;
1229 	printf("\nWaiting for lcores to finish...\n");
1230 	rte_eal_mp_wait_lcore();
1231 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1232 	if (port_fwd_end != NULL) {
1233 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1234 			pt_id = fwd_ports_ids[i];
1235 			(*port_fwd_end)(pt_id);
1236 		}
1237 	}
1238 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1239 	fwd_cycles = 0;
1240 #endif
1241 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1242 		if (cur_fwd_config.nb_fwd_streams >
1243 		    cur_fwd_config.nb_fwd_ports) {
1244 			fwd_stream_stats_display(sm_id);
1245 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1246 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1247 		} else {
1248 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1249 				fwd_streams[sm_id];
1250 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1251 				fwd_streams[sm_id];
1252 		}
1253 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1254 		tx_dropped = (uint64_t) (tx_dropped +
1255 					 fwd_streams[sm_id]->fwd_dropped);
1256 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1257 
1258 		rx_bad_ip_csum =
1259 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1260 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1261 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1262 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1263 							rx_bad_ip_csum;
1264 
1265 		rx_bad_l4_csum =
1266 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1267 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1268 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1269 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1270 							rx_bad_l4_csum;
1271 
1272 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1273 		fwd_cycles = (uint64_t) (fwd_cycles +
1274 					 fwd_streams[sm_id]->core_cycles);
1275 #endif
1276 	}
1277 	total_recv = 0;
1278 	total_xmit = 0;
1279 	total_rx_dropped = 0;
1280 	total_tx_dropped = 0;
1281 	total_rx_nombuf  = 0;
1282 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1283 		pt_id = fwd_ports_ids[i];
1284 
1285 		port = &ports[pt_id];
1286 		rte_eth_stats_get(pt_id, &stats);
1287 		stats.ipackets -= port->stats.ipackets;
1288 		port->stats.ipackets = 0;
1289 		stats.opackets -= port->stats.opackets;
1290 		port->stats.opackets = 0;
1291 		stats.ibytes   -= port->stats.ibytes;
1292 		port->stats.ibytes = 0;
1293 		stats.obytes   -= port->stats.obytes;
1294 		port->stats.obytes = 0;
1295 		stats.imissed  -= port->stats.imissed;
1296 		port->stats.imissed = 0;
1297 		stats.oerrors  -= port->stats.oerrors;
1298 		port->stats.oerrors = 0;
1299 		stats.rx_nombuf -= port->stats.rx_nombuf;
1300 		port->stats.rx_nombuf = 0;
1301 
1302 		total_recv += stats.ipackets;
1303 		total_xmit += stats.opackets;
1304 		total_rx_dropped += stats.imissed;
1305 		total_tx_dropped += port->tx_dropped;
1306 		total_rx_nombuf  += stats.rx_nombuf;
1307 
1308 		fwd_port_stats_display(pt_id, &stats);
1309 	}
1310 
1311 	printf("\n  %s Accumulated forward statistics for all ports"
1312 	       "%s\n",
1313 	       acc_stats_border, acc_stats_border);
1314 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1315 	       "%-"PRIu64"\n"
1316 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1317 	       "%-"PRIu64"\n",
1318 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1319 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1320 	if (total_rx_nombuf > 0)
1321 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1322 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1323 	       "%s\n",
1324 	       acc_stats_border, acc_stats_border);
1325 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1326 	if (total_recv > 0)
1327 		printf("\n  CPU cycles/packet=%u (total cycles="
1328 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1329 		       (unsigned int)(fwd_cycles / total_recv),
1330 		       fwd_cycles, total_recv);
1331 #endif
1332 	printf("\nDone.\n");
1333 	test_done = 1;
1334 }
1335 
1336 void
1337 dev_set_link_up(portid_t pid)
1338 {
1339 	if (rte_eth_dev_set_link_up(pid) < 0)
1340 		printf("\nSet link up fail.\n");
1341 }
1342 
1343 void
1344 dev_set_link_down(portid_t pid)
1345 {
1346 	if (rte_eth_dev_set_link_down(pid) < 0)
1347 		printf("\nSet link down fail.\n");
1348 }
1349 
1350 static int
1351 all_ports_started(void)
1352 {
1353 	portid_t pi;
1354 	struct rte_port *port;
1355 
1356 	RTE_ETH_FOREACH_DEV(pi) {
1357 		port = &ports[pi];
1358 		/* Check if there is a port which is not started */
1359 		if ((port->port_status != RTE_PORT_STARTED) &&
1360 			(port->slave_flag == 0))
1361 			return 0;
1362 	}
1363 
1364 	/* No port is not started */
1365 	return 1;
1366 }
1367 
1368 int
1369 port_is_stopped(portid_t port_id)
1370 {
1371 	struct rte_port *port = &ports[port_id];
1372 
1373 	if ((port->port_status != RTE_PORT_STOPPED) &&
1374 	    (port->slave_flag == 0))
1375 		return 0;
1376 	return 1;
1377 }
1378 
1379 int
1380 all_ports_stopped(void)
1381 {
1382 	portid_t pi;
1383 
1384 	RTE_ETH_FOREACH_DEV(pi) {
1385 		if (!port_is_stopped(pi))
1386 			return 0;
1387 	}
1388 
1389 	return 1;
1390 }
1391 
1392 int
1393 port_is_started(portid_t port_id)
1394 {
1395 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1396 		return 0;
1397 
1398 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1399 		return 0;
1400 
1401 	return 1;
1402 }
1403 
1404 static int
1405 port_is_closed(portid_t port_id)
1406 {
1407 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1408 		return 0;
1409 
1410 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1411 		return 0;
1412 
1413 	return 1;
1414 }
1415 
1416 int
1417 start_port(portid_t pid)
1418 {
1419 	int diag, need_check_link_status = -1;
1420 	portid_t pi;
1421 	queueid_t qi;
1422 	struct rte_port *port;
1423 	struct ether_addr mac_addr;
1424 	enum rte_eth_event_type event_type;
1425 
1426 	if (port_id_is_invalid(pid, ENABLED_WARN))
1427 		return 0;
1428 
1429 	if(dcb_config)
1430 		dcb_test = 1;
1431 	RTE_ETH_FOREACH_DEV(pi) {
1432 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1433 			continue;
1434 
1435 		need_check_link_status = 0;
1436 		port = &ports[pi];
1437 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1438 						 RTE_PORT_HANDLING) == 0) {
1439 			printf("Port %d is now not stopped\n", pi);
1440 			continue;
1441 		}
1442 
1443 		if (port->need_reconfig > 0) {
1444 			port->need_reconfig = 0;
1445 
1446 			if (flow_isolate_all) {
1447 				int ret = port_flow_isolate(pi, 1);
1448 				if (ret) {
1449 					printf("Failed to apply isolated"
1450 					       " mode on port %d\n", pi);
1451 					return -1;
1452 				}
1453 			}
1454 
1455 			printf("Configuring Port %d (socket %u)\n", pi,
1456 					port->socket_id);
1457 			/* configure port */
1458 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1459 						&(port->dev_conf));
1460 			if (diag != 0) {
1461 				if (rte_atomic16_cmpset(&(port->port_status),
1462 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1463 					printf("Port %d can not be set back "
1464 							"to stopped\n", pi);
1465 				printf("Fail to configure port %d\n", pi);
1466 				/* try to reconfigure port next time */
1467 				port->need_reconfig = 1;
1468 				return -1;
1469 			}
1470 		}
1471 		if (port->need_reconfig_queues > 0) {
1472 			port->need_reconfig_queues = 0;
1473 			port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
1474 			/* Apply Tx offloads configuration */
1475 			port->tx_conf.offloads = port->dev_conf.txmode.offloads;
1476 			/* setup tx queues */
1477 			for (qi = 0; qi < nb_txq; qi++) {
1478 				if ((numa_support) &&
1479 					(txring_numa[pi] != NUMA_NO_CONFIG))
1480 					diag = rte_eth_tx_queue_setup(pi, qi,
1481 						nb_txd,txring_numa[pi],
1482 						&(port->tx_conf));
1483 				else
1484 					diag = rte_eth_tx_queue_setup(pi, qi,
1485 						nb_txd,port->socket_id,
1486 						&(port->tx_conf));
1487 
1488 				if (diag == 0)
1489 					continue;
1490 
1491 				/* Fail to setup tx queue, return */
1492 				if (rte_atomic16_cmpset(&(port->port_status),
1493 							RTE_PORT_HANDLING,
1494 							RTE_PORT_STOPPED) == 0)
1495 					printf("Port %d can not be set back "
1496 							"to stopped\n", pi);
1497 				printf("Fail to configure port %d tx queues\n", pi);
1498 				/* try to reconfigure queues next time */
1499 				port->need_reconfig_queues = 1;
1500 				return -1;
1501 			}
1502 			/* Apply Rx offloads configuration */
1503 			port->rx_conf.offloads = port->dev_conf.rxmode.offloads;
1504 			/* setup rx queues */
1505 			for (qi = 0; qi < nb_rxq; qi++) {
1506 				if ((numa_support) &&
1507 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1508 					struct rte_mempool * mp =
1509 						mbuf_pool_find(rxring_numa[pi]);
1510 					if (mp == NULL) {
1511 						printf("Failed to setup RX queue:"
1512 							"No mempool allocation"
1513 							" on the socket %d\n",
1514 							rxring_numa[pi]);
1515 						return -1;
1516 					}
1517 
1518 					diag = rte_eth_rx_queue_setup(pi, qi,
1519 					     nb_rxd,rxring_numa[pi],
1520 					     &(port->rx_conf),mp);
1521 				} else {
1522 					struct rte_mempool *mp =
1523 						mbuf_pool_find(port->socket_id);
1524 					if (mp == NULL) {
1525 						printf("Failed to setup RX queue:"
1526 							"No mempool allocation"
1527 							" on the socket %d\n",
1528 							port->socket_id);
1529 						return -1;
1530 					}
1531 					diag = rte_eth_rx_queue_setup(pi, qi,
1532 					     nb_rxd,port->socket_id,
1533 					     &(port->rx_conf), mp);
1534 				}
1535 				if (diag == 0)
1536 					continue;
1537 
1538 				/* Fail to setup rx queue, return */
1539 				if (rte_atomic16_cmpset(&(port->port_status),
1540 							RTE_PORT_HANDLING,
1541 							RTE_PORT_STOPPED) == 0)
1542 					printf("Port %d can not be set back "
1543 							"to stopped\n", pi);
1544 				printf("Fail to configure port %d rx queues\n", pi);
1545 				/* try to reconfigure queues next time */
1546 				port->need_reconfig_queues = 1;
1547 				return -1;
1548 			}
1549 		}
1550 
1551 		for (event_type = RTE_ETH_EVENT_UNKNOWN;
1552 		     event_type < RTE_ETH_EVENT_MAX;
1553 		     event_type++) {
1554 			diag = rte_eth_dev_callback_register(pi,
1555 							event_type,
1556 							eth_event_callback,
1557 							NULL);
1558 			if (diag) {
1559 				printf("Failed to setup even callback for event %d\n",
1560 					event_type);
1561 				return -1;
1562 			}
1563 		}
1564 
1565 		/* start port */
1566 		if (rte_eth_dev_start(pi) < 0) {
1567 			printf("Fail to start port %d\n", pi);
1568 
1569 			/* Fail to setup rx queue, return */
1570 			if (rte_atomic16_cmpset(&(port->port_status),
1571 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1572 				printf("Port %d can not be set back to "
1573 							"stopped\n", pi);
1574 			continue;
1575 		}
1576 
1577 		if (rte_atomic16_cmpset(&(port->port_status),
1578 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1579 			printf("Port %d can not be set into started\n", pi);
1580 
1581 		rte_eth_macaddr_get(pi, &mac_addr);
1582 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1583 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1584 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1585 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1586 
1587 		/* at least one port started, need checking link status */
1588 		need_check_link_status = 1;
1589 	}
1590 
1591 	if (need_check_link_status == 1 && !no_link_check)
1592 		check_all_ports_link_status(RTE_PORT_ALL);
1593 	else if (need_check_link_status == 0)
1594 		printf("Please stop the ports first\n");
1595 
1596 	printf("Done\n");
1597 	return 0;
1598 }
1599 
1600 void
1601 stop_port(portid_t pid)
1602 {
1603 	portid_t pi;
1604 	struct rte_port *port;
1605 	int need_check_link_status = 0;
1606 
1607 	if (dcb_test) {
1608 		dcb_test = 0;
1609 		dcb_config = 0;
1610 	}
1611 
1612 	if (port_id_is_invalid(pid, ENABLED_WARN))
1613 		return;
1614 
1615 	printf("Stopping ports...\n");
1616 
1617 	RTE_ETH_FOREACH_DEV(pi) {
1618 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1619 			continue;
1620 
1621 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1622 			printf("Please remove port %d from forwarding configuration.\n", pi);
1623 			continue;
1624 		}
1625 
1626 		if (port_is_bonding_slave(pi)) {
1627 			printf("Please remove port %d from bonded device.\n", pi);
1628 			continue;
1629 		}
1630 
1631 		port = &ports[pi];
1632 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1633 						RTE_PORT_HANDLING) == 0)
1634 			continue;
1635 
1636 		rte_eth_dev_stop(pi);
1637 
1638 		if (rte_atomic16_cmpset(&(port->port_status),
1639 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1640 			printf("Port %d can not be set into stopped\n", pi);
1641 		need_check_link_status = 1;
1642 	}
1643 	if (need_check_link_status && !no_link_check)
1644 		check_all_ports_link_status(RTE_PORT_ALL);
1645 
1646 	printf("Done\n");
1647 }
1648 
1649 void
1650 close_port(portid_t pid)
1651 {
1652 	portid_t pi;
1653 	struct rte_port *port;
1654 
1655 	if (port_id_is_invalid(pid, ENABLED_WARN))
1656 		return;
1657 
1658 	printf("Closing ports...\n");
1659 
1660 	RTE_ETH_FOREACH_DEV(pi) {
1661 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1662 			continue;
1663 
1664 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1665 			printf("Please remove port %d from forwarding configuration.\n", pi);
1666 			continue;
1667 		}
1668 
1669 		if (port_is_bonding_slave(pi)) {
1670 			printf("Please remove port %d from bonded device.\n", pi);
1671 			continue;
1672 		}
1673 
1674 		port = &ports[pi];
1675 		if (rte_atomic16_cmpset(&(port->port_status),
1676 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1677 			printf("Port %d is already closed\n", pi);
1678 			continue;
1679 		}
1680 
1681 		if (rte_atomic16_cmpset(&(port->port_status),
1682 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1683 			printf("Port %d is now not stopped\n", pi);
1684 			continue;
1685 		}
1686 
1687 		if (port->flow_list)
1688 			port_flow_flush(pi);
1689 		rte_eth_dev_close(pi);
1690 
1691 		if (rte_atomic16_cmpset(&(port->port_status),
1692 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1693 			printf("Port %d cannot be set to closed\n", pi);
1694 	}
1695 
1696 	printf("Done\n");
1697 }
1698 
1699 void
1700 reset_port(portid_t pid)
1701 {
1702 	int diag;
1703 	portid_t pi;
1704 	struct rte_port *port;
1705 
1706 	if (port_id_is_invalid(pid, ENABLED_WARN))
1707 		return;
1708 
1709 	printf("Resetting ports...\n");
1710 
1711 	RTE_ETH_FOREACH_DEV(pi) {
1712 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1713 			continue;
1714 
1715 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1716 			printf("Please remove port %d from forwarding "
1717 			       "configuration.\n", pi);
1718 			continue;
1719 		}
1720 
1721 		if (port_is_bonding_slave(pi)) {
1722 			printf("Please remove port %d from bonded device.\n",
1723 			       pi);
1724 			continue;
1725 		}
1726 
1727 		diag = rte_eth_dev_reset(pi);
1728 		if (diag == 0) {
1729 			port = &ports[pi];
1730 			port->need_reconfig = 1;
1731 			port->need_reconfig_queues = 1;
1732 		} else {
1733 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1734 		}
1735 	}
1736 
1737 	printf("Done\n");
1738 }
1739 
1740 void
1741 attach_port(char *identifier)
1742 {
1743 	portid_t pi = 0;
1744 	unsigned int socket_id;
1745 
1746 	printf("Attaching a new port...\n");
1747 
1748 	if (identifier == NULL) {
1749 		printf("Invalid parameters are specified\n");
1750 		return;
1751 	}
1752 
1753 	if (rte_eth_dev_attach(identifier, &pi))
1754 		return;
1755 
1756 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1757 	/* if socket_id is invalid, set to 0 */
1758 	if (check_socket_id(socket_id) < 0)
1759 		socket_id = 0;
1760 	reconfig(pi, socket_id);
1761 	rte_eth_promiscuous_enable(pi);
1762 
1763 	nb_ports = rte_eth_dev_count();
1764 
1765 	ports[pi].port_status = RTE_PORT_STOPPED;
1766 
1767 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1768 	printf("Done\n");
1769 }
1770 
1771 void
1772 detach_port(portid_t port_id)
1773 {
1774 	char name[RTE_ETH_NAME_MAX_LEN];
1775 
1776 	printf("Detaching a port...\n");
1777 
1778 	if (!port_is_closed(port_id)) {
1779 		printf("Please close port first\n");
1780 		return;
1781 	}
1782 
1783 	if (ports[port_id].flow_list)
1784 		port_flow_flush(port_id);
1785 
1786 	if (rte_eth_dev_detach(port_id, name)) {
1787 		TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1788 		return;
1789 	}
1790 
1791 	nb_ports = rte_eth_dev_count();
1792 
1793 	printf("Port '%s' is detached. Now total ports is %d\n",
1794 			name, nb_ports);
1795 	printf("Done\n");
1796 	return;
1797 }
1798 
1799 void
1800 pmd_test_exit(void)
1801 {
1802 	portid_t pt_id;
1803 
1804 	if (test_done == 0)
1805 		stop_packet_forwarding();
1806 
1807 	if (ports != NULL) {
1808 		no_link_check = 1;
1809 		RTE_ETH_FOREACH_DEV(pt_id) {
1810 			printf("\nShutting down port %d...\n", pt_id);
1811 			fflush(stdout);
1812 			stop_port(pt_id);
1813 			close_port(pt_id);
1814 		}
1815 	}
1816 	printf("\nBye...\n");
1817 }
1818 
1819 typedef void (*cmd_func_t)(void);
1820 struct pmd_test_command {
1821 	const char *cmd_name;
1822 	cmd_func_t cmd_func;
1823 };
1824 
1825 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1826 
1827 /* Check the link status of all ports in up to 9s, and print them finally */
1828 static void
1829 check_all_ports_link_status(uint32_t port_mask)
1830 {
1831 #define CHECK_INTERVAL 100 /* 100ms */
1832 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1833 	portid_t portid;
1834 	uint8_t count, all_ports_up, print_flag = 0;
1835 	struct rte_eth_link link;
1836 
1837 	printf("Checking link statuses...\n");
1838 	fflush(stdout);
1839 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1840 		all_ports_up = 1;
1841 		RTE_ETH_FOREACH_DEV(portid) {
1842 			if ((port_mask & (1 << portid)) == 0)
1843 				continue;
1844 			memset(&link, 0, sizeof(link));
1845 			rte_eth_link_get_nowait(portid, &link);
1846 			/* print link status if flag set */
1847 			if (print_flag == 1) {
1848 				if (link.link_status)
1849 					printf(
1850 					"Port%d Link Up. speed %u Mbps- %s\n",
1851 					portid, link.link_speed,
1852 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1853 					("full-duplex") : ("half-duplex\n"));
1854 				else
1855 					printf("Port %d Link Down\n", portid);
1856 				continue;
1857 			}
1858 			/* clear all_ports_up flag if any link down */
1859 			if (link.link_status == ETH_LINK_DOWN) {
1860 				all_ports_up = 0;
1861 				break;
1862 			}
1863 		}
1864 		/* after finally printing all link status, get out */
1865 		if (print_flag == 1)
1866 			break;
1867 
1868 		if (all_ports_up == 0) {
1869 			fflush(stdout);
1870 			rte_delay_ms(CHECK_INTERVAL);
1871 		}
1872 
1873 		/* set the print_flag if all ports up or timeout */
1874 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1875 			print_flag = 1;
1876 		}
1877 
1878 		if (lsc_interrupt)
1879 			break;
1880 	}
1881 }
1882 
1883 static void
1884 rmv_event_callback(void *arg)
1885 {
1886 	struct rte_eth_dev *dev;
1887 	portid_t port_id = (intptr_t)arg;
1888 
1889 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1890 	dev = &rte_eth_devices[port_id];
1891 
1892 	stop_port(port_id);
1893 	close_port(port_id);
1894 	printf("removing device %s\n", dev->device->name);
1895 	if (rte_eal_dev_detach(dev->device))
1896 		TESTPMD_LOG(ERR, "Failed to detach device %s\n",
1897 			dev->device->name);
1898 }
1899 
1900 /* This function is used by the interrupt thread */
1901 static int
1902 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1903 		  void *ret_param)
1904 {
1905 	static const char * const event_desc[] = {
1906 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1907 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
1908 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1909 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1910 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1911 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
1912 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
1913 		[RTE_ETH_EVENT_MAX] = NULL,
1914 	};
1915 
1916 	RTE_SET_USED(param);
1917 	RTE_SET_USED(ret_param);
1918 
1919 	if (type >= RTE_ETH_EVENT_MAX) {
1920 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1921 			port_id, __func__, type);
1922 		fflush(stderr);
1923 	} else if (event_print_mask & (UINT32_C(1) << type)) {
1924 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
1925 			event_desc[type]);
1926 		fflush(stdout);
1927 	}
1928 
1929 	switch (type) {
1930 	case RTE_ETH_EVENT_INTR_RMV:
1931 		if (rte_eal_alarm_set(100000,
1932 				rmv_event_callback, (void *)(intptr_t)port_id))
1933 			fprintf(stderr, "Could not set up deferred device removal\n");
1934 		break;
1935 	default:
1936 		break;
1937 	}
1938 	return 0;
1939 }
1940 
1941 static int
1942 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
1943 {
1944 	uint16_t i;
1945 	int diag;
1946 	uint8_t mapping_found = 0;
1947 
1948 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1949 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1950 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1951 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1952 					tx_queue_stats_mappings[i].queue_id,
1953 					tx_queue_stats_mappings[i].stats_counter_id);
1954 			if (diag != 0)
1955 				return diag;
1956 			mapping_found = 1;
1957 		}
1958 	}
1959 	if (mapping_found)
1960 		port->tx_queue_stats_mapping_enabled = 1;
1961 	return 0;
1962 }
1963 
1964 static int
1965 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
1966 {
1967 	uint16_t i;
1968 	int diag;
1969 	uint8_t mapping_found = 0;
1970 
1971 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1972 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1973 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1974 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1975 					rx_queue_stats_mappings[i].queue_id,
1976 					rx_queue_stats_mappings[i].stats_counter_id);
1977 			if (diag != 0)
1978 				return diag;
1979 			mapping_found = 1;
1980 		}
1981 	}
1982 	if (mapping_found)
1983 		port->rx_queue_stats_mapping_enabled = 1;
1984 	return 0;
1985 }
1986 
1987 static void
1988 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
1989 {
1990 	int diag = 0;
1991 
1992 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1993 	if (diag != 0) {
1994 		if (diag == -ENOTSUP) {
1995 			port->tx_queue_stats_mapping_enabled = 0;
1996 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1997 		}
1998 		else
1999 			rte_exit(EXIT_FAILURE,
2000 					"set_tx_queue_stats_mapping_registers "
2001 					"failed for port id=%d diag=%d\n",
2002 					pi, diag);
2003 	}
2004 
2005 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2006 	if (diag != 0) {
2007 		if (diag == -ENOTSUP) {
2008 			port->rx_queue_stats_mapping_enabled = 0;
2009 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2010 		}
2011 		else
2012 			rte_exit(EXIT_FAILURE,
2013 					"set_rx_queue_stats_mapping_registers "
2014 					"failed for port id=%d diag=%d\n",
2015 					pi, diag);
2016 	}
2017 }
2018 
2019 static void
2020 rxtx_port_config(struct rte_port *port)
2021 {
2022 	port->rx_conf = port->dev_info.default_rxconf;
2023 	port->tx_conf = port->dev_info.default_txconf;
2024 
2025 	/* Check if any RX/TX parameters have been passed */
2026 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2027 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2028 
2029 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2030 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2031 
2032 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2033 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2034 
2035 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2036 		port->rx_conf.rx_free_thresh = rx_free_thresh;
2037 
2038 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2039 		port->rx_conf.rx_drop_en = rx_drop_en;
2040 
2041 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2042 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2043 
2044 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2045 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2046 
2047 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2048 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2049 
2050 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2051 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2052 
2053 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2054 		port->tx_conf.tx_free_thresh = tx_free_thresh;
2055 }
2056 
2057 void
2058 init_port_config(void)
2059 {
2060 	portid_t pid;
2061 	struct rte_port *port;
2062 
2063 	RTE_ETH_FOREACH_DEV(pid) {
2064 		port = &ports[pid];
2065 		port->dev_conf.fdir_conf = fdir_conf;
2066 		if (nb_rxq > 1) {
2067 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2068 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2069 		} else {
2070 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2071 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2072 		}
2073 
2074 		if (port->dcb_flag == 0) {
2075 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2076 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2077 			else
2078 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2079 		}
2080 
2081 		rxtx_port_config(port);
2082 
2083 		rte_eth_macaddr_get(pid, &port->eth_addr);
2084 
2085 		map_port_queue_stats_mapping_registers(pid, port);
2086 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2087 		rte_pmd_ixgbe_bypass_init(pid);
2088 #endif
2089 
2090 		if (lsc_interrupt &&
2091 		    (rte_eth_devices[pid].data->dev_flags &
2092 		     RTE_ETH_DEV_INTR_LSC))
2093 			port->dev_conf.intr_conf.lsc = 1;
2094 		if (rmv_interrupt &&
2095 		    (rte_eth_devices[pid].data->dev_flags &
2096 		     RTE_ETH_DEV_INTR_RMV))
2097 			port->dev_conf.intr_conf.rmv = 1;
2098 
2099 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2100 		/* Detect softnic port */
2101 		if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2102 			port->softnic_enable = 1;
2103 			memset(&port->softport, 0, sizeof(struct softnic_port));
2104 
2105 			if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2106 				port->softport.tm_flag = 1;
2107 		}
2108 #endif
2109 	}
2110 }
2111 
2112 void set_port_slave_flag(portid_t slave_pid)
2113 {
2114 	struct rte_port *port;
2115 
2116 	port = &ports[slave_pid];
2117 	port->slave_flag = 1;
2118 }
2119 
2120 void clear_port_slave_flag(portid_t slave_pid)
2121 {
2122 	struct rte_port *port;
2123 
2124 	port = &ports[slave_pid];
2125 	port->slave_flag = 0;
2126 }
2127 
2128 uint8_t port_is_bonding_slave(portid_t slave_pid)
2129 {
2130 	struct rte_port *port;
2131 
2132 	port = &ports[slave_pid];
2133 	return port->slave_flag;
2134 }
2135 
2136 const uint16_t vlan_tags[] = {
2137 		0,  1,  2,  3,  4,  5,  6,  7,
2138 		8,  9, 10, 11,  12, 13, 14, 15,
2139 		16, 17, 18, 19, 20, 21, 22, 23,
2140 		24, 25, 26, 27, 28, 29, 30, 31
2141 };
2142 
2143 static  int
2144 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2145 		 enum dcb_mode_enable dcb_mode,
2146 		 enum rte_eth_nb_tcs num_tcs,
2147 		 uint8_t pfc_en)
2148 {
2149 	uint8_t i;
2150 
2151 	/*
2152 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2153 	 * given above, and the number of traffic classes available for use.
2154 	 */
2155 	if (dcb_mode == DCB_VT_ENABLED) {
2156 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2157 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2158 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2159 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2160 
2161 		/* VMDQ+DCB RX and TX configurations */
2162 		vmdq_rx_conf->enable_default_pool = 0;
2163 		vmdq_rx_conf->default_pool = 0;
2164 		vmdq_rx_conf->nb_queue_pools =
2165 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2166 		vmdq_tx_conf->nb_queue_pools =
2167 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2168 
2169 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2170 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2171 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2172 			vmdq_rx_conf->pool_map[i].pools =
2173 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2174 		}
2175 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2176 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2177 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2178 		}
2179 
2180 		/* set DCB mode of RX and TX of multiple queues */
2181 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2182 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2183 	} else {
2184 		struct rte_eth_dcb_rx_conf *rx_conf =
2185 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2186 		struct rte_eth_dcb_tx_conf *tx_conf =
2187 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2188 
2189 		rx_conf->nb_tcs = num_tcs;
2190 		tx_conf->nb_tcs = num_tcs;
2191 
2192 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2193 			rx_conf->dcb_tc[i] = i % num_tcs;
2194 			tx_conf->dcb_tc[i] = i % num_tcs;
2195 		}
2196 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2197 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2198 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2199 	}
2200 
2201 	if (pfc_en)
2202 		eth_conf->dcb_capability_en =
2203 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2204 	else
2205 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2206 
2207 	return 0;
2208 }
2209 
2210 int
2211 init_port_dcb_config(portid_t pid,
2212 		     enum dcb_mode_enable dcb_mode,
2213 		     enum rte_eth_nb_tcs num_tcs,
2214 		     uint8_t pfc_en)
2215 {
2216 	struct rte_eth_conf port_conf;
2217 	struct rte_port *rte_port;
2218 	int retval;
2219 	uint16_t i;
2220 
2221 	rte_port = &ports[pid];
2222 
2223 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2224 	/* Enter DCB configuration status */
2225 	dcb_config = 1;
2226 
2227 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2228 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2229 	if (retval < 0)
2230 		return retval;
2231 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2232 
2233 	/**
2234 	 * Write the configuration into the device.
2235 	 * Set the numbers of RX & TX queues to 0, so
2236 	 * the RX & TX queues will not be setup.
2237 	 */
2238 	rte_eth_dev_configure(pid, 0, 0, &port_conf);
2239 
2240 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2241 
2242 	/* If dev_info.vmdq_pool_base is greater than 0,
2243 	 * the queue id of vmdq pools is started after pf queues.
2244 	 */
2245 	if (dcb_mode == DCB_VT_ENABLED &&
2246 	    rte_port->dev_info.vmdq_pool_base > 0) {
2247 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2248 			" for port %d.", pid);
2249 		return -1;
2250 	}
2251 
2252 	/* Assume the ports in testpmd have the same dcb capability
2253 	 * and has the same number of rxq and txq in dcb mode
2254 	 */
2255 	if (dcb_mode == DCB_VT_ENABLED) {
2256 		if (rte_port->dev_info.max_vfs > 0) {
2257 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2258 			nb_txq = rte_port->dev_info.nb_tx_queues;
2259 		} else {
2260 			nb_rxq = rte_port->dev_info.max_rx_queues;
2261 			nb_txq = rte_port->dev_info.max_tx_queues;
2262 		}
2263 	} else {
2264 		/*if vt is disabled, use all pf queues */
2265 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2266 			nb_rxq = rte_port->dev_info.max_rx_queues;
2267 			nb_txq = rte_port->dev_info.max_tx_queues;
2268 		} else {
2269 			nb_rxq = (queueid_t)num_tcs;
2270 			nb_txq = (queueid_t)num_tcs;
2271 
2272 		}
2273 	}
2274 	rx_free_thresh = 64;
2275 
2276 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2277 
2278 	rxtx_port_config(rte_port);
2279 	/* VLAN filter */
2280 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2281 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2282 		rx_vft_set(pid, vlan_tags[i], 1);
2283 
2284 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2285 	map_port_queue_stats_mapping_registers(pid, rte_port);
2286 
2287 	rte_port->dcb_flag = 1;
2288 
2289 	return 0;
2290 }
2291 
2292 static void
2293 init_port(void)
2294 {
2295 	/* Configuration of Ethernet ports. */
2296 	ports = rte_zmalloc("testpmd: ports",
2297 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2298 			    RTE_CACHE_LINE_SIZE);
2299 	if (ports == NULL) {
2300 		rte_exit(EXIT_FAILURE,
2301 				"rte_zmalloc(%d struct rte_port) failed\n",
2302 				RTE_MAX_ETHPORTS);
2303 	}
2304 }
2305 
2306 static void
2307 force_quit(void)
2308 {
2309 	pmd_test_exit();
2310 	prompt_exit();
2311 }
2312 
2313 static void
2314 print_stats(void)
2315 {
2316 	uint8_t i;
2317 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2318 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2319 
2320 	/* Clear screen and move to top left */
2321 	printf("%s%s", clr, top_left);
2322 
2323 	printf("\nPort statistics ====================================");
2324 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2325 		nic_stats_display(fwd_ports_ids[i]);
2326 }
2327 
2328 static void
2329 signal_handler(int signum)
2330 {
2331 	if (signum == SIGINT || signum == SIGTERM) {
2332 		printf("\nSignal %d received, preparing to exit...\n",
2333 				signum);
2334 #ifdef RTE_LIBRTE_PDUMP
2335 		/* uninitialize packet capture framework */
2336 		rte_pdump_uninit();
2337 #endif
2338 #ifdef RTE_LIBRTE_LATENCY_STATS
2339 		rte_latencystats_uninit();
2340 #endif
2341 		force_quit();
2342 		/* Set flag to indicate the force termination. */
2343 		f_quit = 1;
2344 		/* exit with the expected status */
2345 		signal(signum, SIG_DFL);
2346 		kill(getpid(), signum);
2347 	}
2348 }
2349 
2350 int
2351 main(int argc, char** argv)
2352 {
2353 	int  diag;
2354 	portid_t port_id;
2355 
2356 	signal(SIGINT, signal_handler);
2357 	signal(SIGTERM, signal_handler);
2358 
2359 	diag = rte_eal_init(argc, argv);
2360 	if (diag < 0)
2361 		rte_panic("Cannot init EAL\n");
2362 
2363 	testpmd_logtype = rte_log_register("testpmd");
2364 	if (testpmd_logtype < 0)
2365 		rte_panic("Cannot register log type");
2366 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2367 
2368 	if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2369 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2370 			strerror(errno));
2371 	}
2372 
2373 #ifdef RTE_LIBRTE_PDUMP
2374 	/* initialize packet capture framework */
2375 	rte_pdump_init(NULL);
2376 #endif
2377 
2378 	nb_ports = (portid_t) rte_eth_dev_count();
2379 	if (nb_ports == 0)
2380 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2381 
2382 	/* allocate port structures, and init them */
2383 	init_port();
2384 
2385 	set_def_fwd_config();
2386 	if (nb_lcores == 0)
2387 		rte_panic("Empty set of forwarding logical cores - check the "
2388 			  "core mask supplied in the command parameters\n");
2389 
2390 	/* Bitrate/latency stats disabled by default */
2391 #ifdef RTE_LIBRTE_BITRATE
2392 	bitrate_enabled = 0;
2393 #endif
2394 #ifdef RTE_LIBRTE_LATENCY_STATS
2395 	latencystats_enabled = 0;
2396 #endif
2397 
2398 	argc -= diag;
2399 	argv += diag;
2400 	if (argc > 1)
2401 		launch_args_parse(argc, argv);
2402 
2403 	if (tx_first && interactive)
2404 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2405 				"interactive mode.\n");
2406 
2407 	if (tx_first && lsc_interrupt) {
2408 		printf("Warning: lsc_interrupt needs to be off when "
2409 				" using tx_first. Disabling.\n");
2410 		lsc_interrupt = 0;
2411 	}
2412 
2413 	if (!nb_rxq && !nb_txq)
2414 		printf("Warning: Either rx or tx queues should be non-zero\n");
2415 
2416 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2417 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2418 		       "but nb_txq=%d will prevent to fully test it.\n",
2419 		       nb_rxq, nb_txq);
2420 
2421 	init_config();
2422 	if (start_port(RTE_PORT_ALL) != 0)
2423 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2424 
2425 	/* set all ports to promiscuous mode by default */
2426 	RTE_ETH_FOREACH_DEV(port_id)
2427 		rte_eth_promiscuous_enable(port_id);
2428 
2429 	/* Init metrics library */
2430 	rte_metrics_init(rte_socket_id());
2431 
2432 #ifdef RTE_LIBRTE_LATENCY_STATS
2433 	if (latencystats_enabled != 0) {
2434 		int ret = rte_latencystats_init(1, NULL);
2435 		if (ret)
2436 			printf("Warning: latencystats init()"
2437 				" returned error %d\n",	ret);
2438 		printf("Latencystats running on lcore %d\n",
2439 			latencystats_lcore_id);
2440 	}
2441 #endif
2442 
2443 	/* Setup bitrate stats */
2444 #ifdef RTE_LIBRTE_BITRATE
2445 	if (bitrate_enabled != 0) {
2446 		bitrate_data = rte_stats_bitrate_create();
2447 		if (bitrate_data == NULL)
2448 			rte_exit(EXIT_FAILURE,
2449 				"Could not allocate bitrate data.\n");
2450 		rte_stats_bitrate_reg(bitrate_data);
2451 	}
2452 #endif
2453 
2454 #ifdef RTE_LIBRTE_CMDLINE
2455 	if (strlen(cmdline_filename) != 0)
2456 		cmdline_read_from_file(cmdline_filename);
2457 
2458 	if (interactive == 1) {
2459 		if (auto_start) {
2460 			printf("Start automatic packet forwarding\n");
2461 			start_packet_forwarding(0);
2462 		}
2463 		prompt();
2464 		pmd_test_exit();
2465 	} else
2466 #endif
2467 	{
2468 		char c;
2469 		int rc;
2470 
2471 		f_quit = 0;
2472 
2473 		printf("No commandline core given, start packet forwarding\n");
2474 		start_packet_forwarding(tx_first);
2475 		if (stats_period != 0) {
2476 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2477 			uint64_t timer_period;
2478 
2479 			/* Convert to number of cycles */
2480 			timer_period = stats_period * rte_get_timer_hz();
2481 
2482 			while (f_quit == 0) {
2483 				cur_time = rte_get_timer_cycles();
2484 				diff_time += cur_time - prev_time;
2485 
2486 				if (diff_time >= timer_period) {
2487 					print_stats();
2488 					/* Reset the timer */
2489 					diff_time = 0;
2490 				}
2491 				/* Sleep to avoid unnecessary checks */
2492 				prev_time = cur_time;
2493 				sleep(1);
2494 			}
2495 		}
2496 
2497 		printf("Press enter to exit\n");
2498 		rc = read(0, &c, 1);
2499 		pmd_test_exit();
2500 		if (rc < 0)
2501 			return 1;
2502 	}
2503 
2504 	return 0;
2505 }
2506