xref: /dpdk/app/test-pmd/testpmd.c (revision 6018eb8cc8b072a99fa9a2bdff9466947b7367d8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <time.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <errno.h>
15 
16 #include <sys/queue.h>
17 #include <sys/stat.h>
18 
19 #include <stdint.h>
20 #include <unistd.h>
21 #include <inttypes.h>
22 
23 #include <rte_common.h>
24 #include <rte_errno.h>
25 #include <rte_byteorder.h>
26 #include <rte_log.h>
27 #include <rte_debug.h>
28 #include <rte_cycles.h>
29 #include <rte_memory.h>
30 #include <rte_memcpy.h>
31 #include <rte_launch.h>
32 #include <rte_eal.h>
33 #include <rte_alarm.h>
34 #include <rte_per_lcore.h>
35 #include <rte_lcore.h>
36 #include <rte_atomic.h>
37 #include <rte_branch_prediction.h>
38 #include <rte_mempool.h>
39 #include <rte_malloc.h>
40 #include <rte_mbuf.h>
41 #include <rte_interrupts.h>
42 #include <rte_pci.h>
43 #include <rte_ether.h>
44 #include <rte_ethdev.h>
45 #include <rte_dev.h>
46 #include <rte_string_fns.h>
47 #ifdef RTE_LIBRTE_IXGBE_PMD
48 #include <rte_pmd_ixgbe.h>
49 #endif
50 #ifdef RTE_LIBRTE_PDUMP
51 #include <rte_pdump.h>
52 #endif
53 #include <rte_flow.h>
54 #include <rte_metrics.h>
55 #ifdef RTE_LIBRTE_BITRATE
56 #include <rte_bitrate.h>
57 #endif
58 #ifdef RTE_LIBRTE_LATENCY_STATS
59 #include <rte_latencystats.h>
60 #endif
61 
62 #include "testpmd.h"
63 
64 uint16_t verbose_level = 0; /**< Silent by default. */
65 int testpmd_logtype; /**< Log type for testpmd logs */
66 
67 /* use master core for command line ? */
68 uint8_t interactive = 0;
69 uint8_t auto_start = 0;
70 uint8_t tx_first;
71 char cmdline_filename[PATH_MAX] = {0};
72 
73 /*
74  * NUMA support configuration.
75  * When set, the NUMA support attempts to dispatch the allocation of the
76  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
77  * probed ports among the CPU sockets 0 and 1.
78  * Otherwise, all memory is allocated from CPU socket 0.
79  */
80 uint8_t numa_support = 1; /**< numa enabled by default */
81 
82 /*
83  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
84  * not configured.
85  */
86 uint8_t socket_num = UMA_NO_CONFIG;
87 
88 /*
89  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
90  */
91 uint8_t mp_anon = 0;
92 
93 /*
94  * Record the Ethernet address of peer target ports to which packets are
95  * forwarded.
96  * Must be instantiated with the ethernet addresses of peer traffic generator
97  * ports.
98  */
99 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
100 portid_t nb_peer_eth_addrs = 0;
101 
102 /*
103  * Probed Target Environment.
104  */
105 struct rte_port *ports;	       /**< For all probed ethernet ports. */
106 portid_t nb_ports;             /**< Number of probed ethernet ports. */
107 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
108 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
109 
110 /*
111  * Test Forwarding Configuration.
112  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
113  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
114  */
115 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
116 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
117 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
118 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
119 
120 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
121 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
122 
123 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
124 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
125 
126 /*
127  * Forwarding engines.
128  */
129 struct fwd_engine * fwd_engines[] = {
130 	&io_fwd_engine,
131 	&mac_fwd_engine,
132 	&mac_swap_engine,
133 	&flow_gen_engine,
134 	&rx_only_engine,
135 	&tx_only_engine,
136 	&csum_fwd_engine,
137 	&icmp_echo_engine,
138 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
139 	&softnic_tm_engine,
140 	&softnic_tm_bypass_engine,
141 #endif
142 #ifdef RTE_LIBRTE_IEEE1588
143 	&ieee1588_fwd_engine,
144 #endif
145 	NULL,
146 };
147 
148 struct fwd_config cur_fwd_config;
149 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
150 uint32_t retry_enabled;
151 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
152 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
153 
154 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
155 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
156                                       * specified on command-line. */
157 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
158 
159 /*
160  * In container, it cannot terminate the process which running with 'stats-period'
161  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
162  */
163 uint8_t f_quit;
164 
165 /*
166  * Configuration of packet segments used by the "txonly" processing engine.
167  */
168 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
169 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
170 	TXONLY_DEF_PACKET_LEN,
171 };
172 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
173 
174 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
175 /**< Split policy for packets to TX. */
176 
177 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
178 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
179 
180 /* current configuration is in DCB or not,0 means it is not in DCB mode */
181 uint8_t dcb_config = 0;
182 
183 /* Whether the dcb is in testing status */
184 uint8_t dcb_test = 0;
185 
186 /*
187  * Configurable number of RX/TX queues.
188  */
189 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
190 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
191 
192 /*
193  * Configurable number of RX/TX ring descriptors.
194  */
195 #define RTE_TEST_RX_DESC_DEFAULT 128
196 #define RTE_TEST_TX_DESC_DEFAULT 512
197 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
198 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
199 
200 #define RTE_PMD_PARAM_UNSET -1
201 /*
202  * Configurable values of RX and TX ring threshold registers.
203  */
204 
205 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
206 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
207 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
208 
209 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
210 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
211 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
212 
213 /*
214  * Configurable value of RX free threshold.
215  */
216 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
217 
218 /*
219  * Configurable value of RX drop enable.
220  */
221 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
222 
223 /*
224  * Configurable value of TX free threshold.
225  */
226 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
227 
228 /*
229  * Configurable value of TX RS bit threshold.
230  */
231 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
232 
233 /*
234  * Configurable value of TX queue flags.
235  */
236 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
237 
238 /*
239  * Receive Side Scaling (RSS) configuration.
240  */
241 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
242 
243 /*
244  * Port topology configuration
245  */
246 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
247 
248 /*
249  * Avoids to flush all the RX streams before starts forwarding.
250  */
251 uint8_t no_flush_rx = 0; /* flush by default */
252 
253 /*
254  * Flow API isolated mode.
255  */
256 uint8_t flow_isolate_all;
257 
258 /*
259  * Avoids to check link status when starting/stopping a port.
260  */
261 uint8_t no_link_check = 0; /* check by default */
262 
263 /*
264  * Enable link status change notification
265  */
266 uint8_t lsc_interrupt = 1; /* enabled by default */
267 
268 /*
269  * Enable device removal notification.
270  */
271 uint8_t rmv_interrupt = 1; /* enabled by default */
272 
273 /*
274  * Display or mask ether events
275  * Default to all events except VF_MBOX
276  */
277 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
278 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
279 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
280 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
281 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
282 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
283 
284 /*
285  * NIC bypass mode configuration options.
286  */
287 
288 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
289 /* The NIC bypass watchdog timeout. */
290 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
291 #endif
292 
293 
294 #ifdef RTE_LIBRTE_LATENCY_STATS
295 
296 /*
297  * Set when latency stats is enabled in the commandline
298  */
299 uint8_t latencystats_enabled;
300 
301 /*
302  * Lcore ID to serive latency statistics.
303  */
304 lcoreid_t latencystats_lcore_id = -1;
305 
306 #endif
307 
308 /*
309  * Ethernet device configuration.
310  */
311 struct rte_eth_rxmode rx_mode = {
312 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
313 	.offloads = (DEV_RX_OFFLOAD_VLAN_FILTER |
314 		     DEV_RX_OFFLOAD_VLAN_STRIP |
315 		     DEV_RX_OFFLOAD_CRC_STRIP),
316 	.ignore_offload_bitfield = 1,
317 };
318 
319 struct rte_fdir_conf fdir_conf = {
320 	.mode = RTE_FDIR_MODE_NONE,
321 	.pballoc = RTE_FDIR_PBALLOC_64K,
322 	.status = RTE_FDIR_REPORT_STATUS,
323 	.mask = {
324 		.vlan_tci_mask = 0x0,
325 		.ipv4_mask     = {
326 			.src_ip = 0xFFFFFFFF,
327 			.dst_ip = 0xFFFFFFFF,
328 		},
329 		.ipv6_mask     = {
330 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
331 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
332 		},
333 		.src_port_mask = 0xFFFF,
334 		.dst_port_mask = 0xFFFF,
335 		.mac_addr_byte_mask = 0xFF,
336 		.tunnel_type_mask = 1,
337 		.tunnel_id_mask = 0xFFFFFFFF,
338 	},
339 	.drop_queue = 127,
340 };
341 
342 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
343 
344 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
345 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
346 
347 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
348 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
349 
350 uint16_t nb_tx_queue_stats_mappings = 0;
351 uint16_t nb_rx_queue_stats_mappings = 0;
352 
353 /*
354  * Display zero values by default for xstats
355  */
356 uint8_t xstats_hide_zero;
357 
358 unsigned int num_sockets = 0;
359 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
360 
361 #ifdef RTE_LIBRTE_BITRATE
362 /* Bitrate statistics */
363 struct rte_stats_bitrates *bitrate_data;
364 lcoreid_t bitrate_lcore_id;
365 uint8_t bitrate_enabled;
366 #endif
367 
368 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
369 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
370 
371 /* Forward function declarations */
372 static void map_port_queue_stats_mapping_registers(portid_t pi,
373 						   struct rte_port *port);
374 static void check_all_ports_link_status(uint32_t port_mask);
375 static int eth_event_callback(portid_t port_id,
376 			      enum rte_eth_event_type type,
377 			      void *param, void *ret_param);
378 
379 /*
380  * Check if all the ports are started.
381  * If yes, return positive value. If not, return zero.
382  */
383 static int all_ports_started(void);
384 
385 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
386 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
387 
388 /*
389  * Helper function to check if socket is already discovered.
390  * If yes, return positive value. If not, return zero.
391  */
392 int
393 new_socket_id(unsigned int socket_id)
394 {
395 	unsigned int i;
396 
397 	for (i = 0; i < num_sockets; i++) {
398 		if (socket_ids[i] == socket_id)
399 			return 0;
400 	}
401 	return 1;
402 }
403 
404 /*
405  * Setup default configuration.
406  */
407 static void
408 set_default_fwd_lcores_config(void)
409 {
410 	unsigned int i;
411 	unsigned int nb_lc;
412 	unsigned int sock_num;
413 
414 	nb_lc = 0;
415 	for (i = 0; i < RTE_MAX_LCORE; i++) {
416 		sock_num = rte_lcore_to_socket_id(i);
417 		if (new_socket_id(sock_num)) {
418 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
419 				rte_exit(EXIT_FAILURE,
420 					 "Total sockets greater than %u\n",
421 					 RTE_MAX_NUMA_NODES);
422 			}
423 			socket_ids[num_sockets++] = sock_num;
424 		}
425 		if (!rte_lcore_is_enabled(i))
426 			continue;
427 		if (i == rte_get_master_lcore())
428 			continue;
429 		fwd_lcores_cpuids[nb_lc++] = i;
430 	}
431 	nb_lcores = (lcoreid_t) nb_lc;
432 	nb_cfg_lcores = nb_lcores;
433 	nb_fwd_lcores = 1;
434 }
435 
436 static void
437 set_def_peer_eth_addrs(void)
438 {
439 	portid_t i;
440 
441 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
442 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
443 		peer_eth_addrs[i].addr_bytes[5] = i;
444 	}
445 }
446 
447 static void
448 set_default_fwd_ports_config(void)
449 {
450 	portid_t pt_id;
451 	int i = 0;
452 
453 	RTE_ETH_FOREACH_DEV(pt_id)
454 		fwd_ports_ids[i++] = pt_id;
455 
456 	nb_cfg_ports = nb_ports;
457 	nb_fwd_ports = nb_ports;
458 }
459 
460 void
461 set_def_fwd_config(void)
462 {
463 	set_default_fwd_lcores_config();
464 	set_def_peer_eth_addrs();
465 	set_default_fwd_ports_config();
466 }
467 
468 /*
469  * Configuration initialisation done once at init time.
470  */
471 static void
472 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
473 		 unsigned int socket_id)
474 {
475 	char pool_name[RTE_MEMPOOL_NAMESIZE];
476 	struct rte_mempool *rte_mp = NULL;
477 	uint32_t mb_size;
478 
479 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
480 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
481 
482 	TESTPMD_LOG(INFO,
483 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
484 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
485 
486 	if (mp_anon != 0) {
487 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
488 			mb_size, (unsigned) mb_mempool_cache,
489 			sizeof(struct rte_pktmbuf_pool_private),
490 			socket_id, 0);
491 		if (rte_mp == NULL)
492 			goto err;
493 
494 		if (rte_mempool_populate_anon(rte_mp) == 0) {
495 			rte_mempool_free(rte_mp);
496 			rte_mp = NULL;
497 			goto err;
498 		}
499 		rte_pktmbuf_pool_init(rte_mp, NULL);
500 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
501 	} else {
502 		/* wrapper to rte_mempool_create() */
503 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
504 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
505 	}
506 
507 err:
508 	if (rte_mp == NULL) {
509 		rte_exit(EXIT_FAILURE,
510 			"Creation of mbuf pool for socket %u failed: %s\n",
511 			socket_id, rte_strerror(rte_errno));
512 	} else if (verbose_level > 0) {
513 		rte_mempool_dump(stdout, rte_mp);
514 	}
515 }
516 
517 /*
518  * Check given socket id is valid or not with NUMA mode,
519  * if valid, return 0, else return -1
520  */
521 static int
522 check_socket_id(const unsigned int socket_id)
523 {
524 	static int warning_once = 0;
525 
526 	if (new_socket_id(socket_id)) {
527 		if (!warning_once && numa_support)
528 			printf("Warning: NUMA should be configured manually by"
529 			       " using --port-numa-config and"
530 			       " --ring-numa-config parameters along with"
531 			       " --numa.\n");
532 		warning_once = 1;
533 		return -1;
534 	}
535 	return 0;
536 }
537 
538 static void
539 init_config(void)
540 {
541 	portid_t pid;
542 	struct rte_port *port;
543 	struct rte_mempool *mbp;
544 	unsigned int nb_mbuf_per_pool;
545 	lcoreid_t  lc_id;
546 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
547 	struct rte_gro_param gro_param;
548 	uint32_t gso_types;
549 
550 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
551 
552 	if (numa_support) {
553 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
554 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
555 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
556 	}
557 
558 	/* Configuration of logical cores. */
559 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
560 				sizeof(struct fwd_lcore *) * nb_lcores,
561 				RTE_CACHE_LINE_SIZE);
562 	if (fwd_lcores == NULL) {
563 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
564 							"failed\n", nb_lcores);
565 	}
566 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
567 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
568 					       sizeof(struct fwd_lcore),
569 					       RTE_CACHE_LINE_SIZE);
570 		if (fwd_lcores[lc_id] == NULL) {
571 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
572 								"failed\n");
573 		}
574 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
575 	}
576 
577 	RTE_ETH_FOREACH_DEV(pid) {
578 		port = &ports[pid];
579 		rte_eth_dev_info_get(pid, &port->dev_info);
580 
581 		if (numa_support) {
582 			if (port_numa[pid] != NUMA_NO_CONFIG)
583 				port_per_socket[port_numa[pid]]++;
584 			else {
585 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
586 
587 				/* if socket_id is invalid, set to 0 */
588 				if (check_socket_id(socket_id) < 0)
589 					socket_id = 0;
590 				port_per_socket[socket_id]++;
591 			}
592 		}
593 
594 		/* set flag to initialize port/queue */
595 		port->need_reconfig = 1;
596 		port->need_reconfig_queues = 1;
597 	}
598 
599 	/*
600 	 * Create pools of mbuf.
601 	 * If NUMA support is disabled, create a single pool of mbuf in
602 	 * socket 0 memory by default.
603 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
604 	 *
605 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
606 	 * nb_txd can be configured at run time.
607 	 */
608 	if (param_total_num_mbufs)
609 		nb_mbuf_per_pool = param_total_num_mbufs;
610 	else {
611 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
612 			(nb_lcores * mb_mempool_cache) +
613 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
614 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
615 	}
616 
617 	if (numa_support) {
618 		uint8_t i;
619 
620 		for (i = 0; i < num_sockets; i++)
621 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
622 					 socket_ids[i]);
623 	} else {
624 		if (socket_num == UMA_NO_CONFIG)
625 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
626 		else
627 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
628 						 socket_num);
629 	}
630 
631 	init_port_config();
632 
633 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
634 		DEV_TX_OFFLOAD_GRE_TNL_TSO;
635 	/*
636 	 * Records which Mbuf pool to use by each logical core, if needed.
637 	 */
638 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
639 		mbp = mbuf_pool_find(
640 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
641 
642 		if (mbp == NULL)
643 			mbp = mbuf_pool_find(0);
644 		fwd_lcores[lc_id]->mbp = mbp;
645 		/* initialize GSO context */
646 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
647 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
648 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
649 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
650 			ETHER_CRC_LEN;
651 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
652 	}
653 
654 	/* Configuration of packet forwarding streams. */
655 	if (init_fwd_streams() < 0)
656 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
657 
658 	fwd_config_setup();
659 
660 	/* create a gro context for each lcore */
661 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
662 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
663 	gro_param.max_item_per_flow = MAX_PKT_BURST;
664 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
665 		gro_param.socket_id = rte_lcore_to_socket_id(
666 				fwd_lcores_cpuids[lc_id]);
667 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
668 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
669 			rte_exit(EXIT_FAILURE,
670 					"rte_gro_ctx_create() failed\n");
671 		}
672 	}
673 }
674 
675 
676 void
677 reconfig(portid_t new_port_id, unsigned socket_id)
678 {
679 	struct rte_port *port;
680 
681 	/* Reconfiguration of Ethernet ports. */
682 	port = &ports[new_port_id];
683 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
684 
685 	/* set flag to initialize port/queue */
686 	port->need_reconfig = 1;
687 	port->need_reconfig_queues = 1;
688 	port->socket_id = socket_id;
689 
690 	init_port_config();
691 }
692 
693 
694 int
695 init_fwd_streams(void)
696 {
697 	portid_t pid;
698 	struct rte_port *port;
699 	streamid_t sm_id, nb_fwd_streams_new;
700 	queueid_t q;
701 
702 	/* set socket id according to numa or not */
703 	RTE_ETH_FOREACH_DEV(pid) {
704 		port = &ports[pid];
705 		if (nb_rxq > port->dev_info.max_rx_queues) {
706 			printf("Fail: nb_rxq(%d) is greater than "
707 				"max_rx_queues(%d)\n", nb_rxq,
708 				port->dev_info.max_rx_queues);
709 			return -1;
710 		}
711 		if (nb_txq > port->dev_info.max_tx_queues) {
712 			printf("Fail: nb_txq(%d) is greater than "
713 				"max_tx_queues(%d)\n", nb_txq,
714 				port->dev_info.max_tx_queues);
715 			return -1;
716 		}
717 		if (numa_support) {
718 			if (port_numa[pid] != NUMA_NO_CONFIG)
719 				port->socket_id = port_numa[pid];
720 			else {
721 				port->socket_id = rte_eth_dev_socket_id(pid);
722 
723 				/* if socket_id is invalid, set to 0 */
724 				if (check_socket_id(port->socket_id) < 0)
725 					port->socket_id = 0;
726 			}
727 		}
728 		else {
729 			if (socket_num == UMA_NO_CONFIG)
730 				port->socket_id = 0;
731 			else
732 				port->socket_id = socket_num;
733 		}
734 	}
735 
736 	q = RTE_MAX(nb_rxq, nb_txq);
737 	if (q == 0) {
738 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
739 		return -1;
740 	}
741 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
742 	if (nb_fwd_streams_new == nb_fwd_streams)
743 		return 0;
744 	/* clear the old */
745 	if (fwd_streams != NULL) {
746 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
747 			if (fwd_streams[sm_id] == NULL)
748 				continue;
749 			rte_free(fwd_streams[sm_id]);
750 			fwd_streams[sm_id] = NULL;
751 		}
752 		rte_free(fwd_streams);
753 		fwd_streams = NULL;
754 	}
755 
756 	/* init new */
757 	nb_fwd_streams = nb_fwd_streams_new;
758 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
759 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
760 	if (fwd_streams == NULL)
761 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
762 						"failed\n", nb_fwd_streams);
763 
764 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
765 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
766 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
767 		if (fwd_streams[sm_id] == NULL)
768 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
769 								" failed\n");
770 	}
771 
772 	return 0;
773 }
774 
775 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
776 static void
777 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
778 {
779 	unsigned int total_burst;
780 	unsigned int nb_burst;
781 	unsigned int burst_stats[3];
782 	uint16_t pktnb_stats[3];
783 	uint16_t nb_pkt;
784 	int burst_percent[3];
785 
786 	/*
787 	 * First compute the total number of packet bursts and the
788 	 * two highest numbers of bursts of the same number of packets.
789 	 */
790 	total_burst = 0;
791 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
792 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
793 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
794 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
795 		if (nb_burst == 0)
796 			continue;
797 		total_burst += nb_burst;
798 		if (nb_burst > burst_stats[0]) {
799 			burst_stats[1] = burst_stats[0];
800 			pktnb_stats[1] = pktnb_stats[0];
801 			burst_stats[0] = nb_burst;
802 			pktnb_stats[0] = nb_pkt;
803 		}
804 	}
805 	if (total_burst == 0)
806 		return;
807 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
808 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
809 	       burst_percent[0], (int) pktnb_stats[0]);
810 	if (burst_stats[0] == total_burst) {
811 		printf("]\n");
812 		return;
813 	}
814 	if (burst_stats[0] + burst_stats[1] == total_burst) {
815 		printf(" + %d%% of %d pkts]\n",
816 		       100 - burst_percent[0], pktnb_stats[1]);
817 		return;
818 	}
819 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
820 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
821 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
822 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
823 		return;
824 	}
825 	printf(" + %d%% of %d pkts + %d%% of others]\n",
826 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
827 }
828 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
829 
830 static void
831 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
832 {
833 	struct rte_port *port;
834 	uint8_t i;
835 
836 	static const char *fwd_stats_border = "----------------------";
837 
838 	port = &ports[port_id];
839 	printf("\n  %s Forward statistics for port %-2d %s\n",
840 	       fwd_stats_border, port_id, fwd_stats_border);
841 
842 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
843 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
844 		       "%-"PRIu64"\n",
845 		       stats->ipackets, stats->imissed,
846 		       (uint64_t) (stats->ipackets + stats->imissed));
847 
848 		if (cur_fwd_eng == &csum_fwd_engine)
849 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
850 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
851 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
852 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
853 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
854 		}
855 
856 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
857 		       "%-"PRIu64"\n",
858 		       stats->opackets, port->tx_dropped,
859 		       (uint64_t) (stats->opackets + port->tx_dropped));
860 	}
861 	else {
862 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
863 		       "%14"PRIu64"\n",
864 		       stats->ipackets, stats->imissed,
865 		       (uint64_t) (stats->ipackets + stats->imissed));
866 
867 		if (cur_fwd_eng == &csum_fwd_engine)
868 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
869 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
870 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
871 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
872 			printf("  RX-nombufs:             %14"PRIu64"\n",
873 			       stats->rx_nombuf);
874 		}
875 
876 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
877 		       "%14"PRIu64"\n",
878 		       stats->opackets, port->tx_dropped,
879 		       (uint64_t) (stats->opackets + port->tx_dropped));
880 	}
881 
882 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
883 	if (port->rx_stream)
884 		pkt_burst_stats_display("RX",
885 			&port->rx_stream->rx_burst_stats);
886 	if (port->tx_stream)
887 		pkt_burst_stats_display("TX",
888 			&port->tx_stream->tx_burst_stats);
889 #endif
890 
891 	if (port->rx_queue_stats_mapping_enabled) {
892 		printf("\n");
893 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
894 			printf("  Stats reg %2d RX-packets:%14"PRIu64
895 			       "     RX-errors:%14"PRIu64
896 			       "    RX-bytes:%14"PRIu64"\n",
897 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
898 		}
899 		printf("\n");
900 	}
901 	if (port->tx_queue_stats_mapping_enabled) {
902 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
903 			printf("  Stats reg %2d TX-packets:%14"PRIu64
904 			       "                                 TX-bytes:%14"PRIu64"\n",
905 			       i, stats->q_opackets[i], stats->q_obytes[i]);
906 		}
907 	}
908 
909 	printf("  %s--------------------------------%s\n",
910 	       fwd_stats_border, fwd_stats_border);
911 }
912 
913 static void
914 fwd_stream_stats_display(streamid_t stream_id)
915 {
916 	struct fwd_stream *fs;
917 	static const char *fwd_top_stats_border = "-------";
918 
919 	fs = fwd_streams[stream_id];
920 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
921 	    (fs->fwd_dropped == 0))
922 		return;
923 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
924 	       "TX Port=%2d/Queue=%2d %s\n",
925 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
926 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
927 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
928 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
929 
930 	/* if checksum mode */
931 	if (cur_fwd_eng == &csum_fwd_engine) {
932 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
933 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
934 	}
935 
936 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
937 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
938 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
939 #endif
940 }
941 
942 static void
943 flush_fwd_rx_queues(void)
944 {
945 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
946 	portid_t  rxp;
947 	portid_t port_id;
948 	queueid_t rxq;
949 	uint16_t  nb_rx;
950 	uint16_t  i;
951 	uint8_t   j;
952 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
953 	uint64_t timer_period;
954 
955 	/* convert to number of cycles */
956 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
957 
958 	for (j = 0; j < 2; j++) {
959 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
960 			for (rxq = 0; rxq < nb_rxq; rxq++) {
961 				port_id = fwd_ports_ids[rxp];
962 				/**
963 				* testpmd can stuck in the below do while loop
964 				* if rte_eth_rx_burst() always returns nonzero
965 				* packets. So timer is added to exit this loop
966 				* after 1sec timer expiry.
967 				*/
968 				prev_tsc = rte_rdtsc();
969 				do {
970 					nb_rx = rte_eth_rx_burst(port_id, rxq,
971 						pkts_burst, MAX_PKT_BURST);
972 					for (i = 0; i < nb_rx; i++)
973 						rte_pktmbuf_free(pkts_burst[i]);
974 
975 					cur_tsc = rte_rdtsc();
976 					diff_tsc = cur_tsc - prev_tsc;
977 					timer_tsc += diff_tsc;
978 				} while ((nb_rx > 0) &&
979 					(timer_tsc < timer_period));
980 				timer_tsc = 0;
981 			}
982 		}
983 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
984 	}
985 }
986 
987 static void
988 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
989 {
990 	struct fwd_stream **fsm;
991 	streamid_t nb_fs;
992 	streamid_t sm_id;
993 #ifdef RTE_LIBRTE_BITRATE
994 	uint64_t tics_per_1sec;
995 	uint64_t tics_datum;
996 	uint64_t tics_current;
997 	uint8_t idx_port, cnt_ports;
998 
999 	cnt_ports = rte_eth_dev_count();
1000 	tics_datum = rte_rdtsc();
1001 	tics_per_1sec = rte_get_timer_hz();
1002 #endif
1003 	fsm = &fwd_streams[fc->stream_idx];
1004 	nb_fs = fc->stream_nb;
1005 	do {
1006 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1007 			(*pkt_fwd)(fsm[sm_id]);
1008 #ifdef RTE_LIBRTE_BITRATE
1009 		if (bitrate_enabled != 0 &&
1010 				bitrate_lcore_id == rte_lcore_id()) {
1011 			tics_current = rte_rdtsc();
1012 			if (tics_current - tics_datum >= tics_per_1sec) {
1013 				/* Periodic bitrate calculation */
1014 				for (idx_port = 0;
1015 						idx_port < cnt_ports;
1016 						idx_port++)
1017 					rte_stats_bitrate_calc(bitrate_data,
1018 						idx_port);
1019 				tics_datum = tics_current;
1020 			}
1021 		}
1022 #endif
1023 #ifdef RTE_LIBRTE_LATENCY_STATS
1024 		if (latencystats_enabled != 0 &&
1025 				latencystats_lcore_id == rte_lcore_id())
1026 			rte_latencystats_update();
1027 #endif
1028 
1029 	} while (! fc->stopped);
1030 }
1031 
1032 static int
1033 start_pkt_forward_on_core(void *fwd_arg)
1034 {
1035 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1036 			     cur_fwd_config.fwd_eng->packet_fwd);
1037 	return 0;
1038 }
1039 
1040 /*
1041  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1042  * Used to start communication flows in network loopback test configurations.
1043  */
1044 static int
1045 run_one_txonly_burst_on_core(void *fwd_arg)
1046 {
1047 	struct fwd_lcore *fwd_lc;
1048 	struct fwd_lcore tmp_lcore;
1049 
1050 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1051 	tmp_lcore = *fwd_lc;
1052 	tmp_lcore.stopped = 1;
1053 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1054 	return 0;
1055 }
1056 
1057 /*
1058  * Launch packet forwarding:
1059  *     - Setup per-port forwarding context.
1060  *     - launch logical cores with their forwarding configuration.
1061  */
1062 static void
1063 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1064 {
1065 	port_fwd_begin_t port_fwd_begin;
1066 	unsigned int i;
1067 	unsigned int lc_id;
1068 	int diag;
1069 
1070 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1071 	if (port_fwd_begin != NULL) {
1072 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1073 			(*port_fwd_begin)(fwd_ports_ids[i]);
1074 	}
1075 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1076 		lc_id = fwd_lcores_cpuids[i];
1077 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1078 			fwd_lcores[i]->stopped = 0;
1079 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1080 						     fwd_lcores[i], lc_id);
1081 			if (diag != 0)
1082 				printf("launch lcore %u failed - diag=%d\n",
1083 				       lc_id, diag);
1084 		}
1085 	}
1086 }
1087 
1088 /*
1089  * Launch packet forwarding configuration.
1090  */
1091 void
1092 start_packet_forwarding(int with_tx_first)
1093 {
1094 	port_fwd_begin_t port_fwd_begin;
1095 	port_fwd_end_t  port_fwd_end;
1096 	struct rte_port *port;
1097 	unsigned int i;
1098 	portid_t   pt_id;
1099 	streamid_t sm_id;
1100 
1101 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1102 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1103 
1104 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1105 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1106 
1107 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1108 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1109 		(!nb_rxq || !nb_txq))
1110 		rte_exit(EXIT_FAILURE,
1111 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1112 			cur_fwd_eng->fwd_mode_name);
1113 
1114 	if (all_ports_started() == 0) {
1115 		printf("Not all ports were started\n");
1116 		return;
1117 	}
1118 	if (test_done == 0) {
1119 		printf("Packet forwarding already started\n");
1120 		return;
1121 	}
1122 
1123 	if (init_fwd_streams() < 0) {
1124 		printf("Fail from init_fwd_streams()\n");
1125 		return;
1126 	}
1127 
1128 	if(dcb_test) {
1129 		for (i = 0; i < nb_fwd_ports; i++) {
1130 			pt_id = fwd_ports_ids[i];
1131 			port = &ports[pt_id];
1132 			if (!port->dcb_flag) {
1133 				printf("In DCB mode, all forwarding ports must "
1134                                        "be configured in this mode.\n");
1135 				return;
1136 			}
1137 		}
1138 		if (nb_fwd_lcores == 1) {
1139 			printf("In DCB mode,the nb forwarding cores "
1140                                "should be larger than 1.\n");
1141 			return;
1142 		}
1143 	}
1144 	test_done = 0;
1145 
1146 	if(!no_flush_rx)
1147 		flush_fwd_rx_queues();
1148 
1149 	fwd_config_setup();
1150 	pkt_fwd_config_display(&cur_fwd_config);
1151 	rxtx_config_display();
1152 
1153 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1154 		pt_id = fwd_ports_ids[i];
1155 		port = &ports[pt_id];
1156 		rte_eth_stats_get(pt_id, &port->stats);
1157 		port->tx_dropped = 0;
1158 
1159 		map_port_queue_stats_mapping_registers(pt_id, port);
1160 	}
1161 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1162 		fwd_streams[sm_id]->rx_packets = 0;
1163 		fwd_streams[sm_id]->tx_packets = 0;
1164 		fwd_streams[sm_id]->fwd_dropped = 0;
1165 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1166 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1167 
1168 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1169 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1170 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1171 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1172 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1173 #endif
1174 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1175 		fwd_streams[sm_id]->core_cycles = 0;
1176 #endif
1177 	}
1178 	if (with_tx_first) {
1179 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1180 		if (port_fwd_begin != NULL) {
1181 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1182 				(*port_fwd_begin)(fwd_ports_ids[i]);
1183 		}
1184 		while (with_tx_first--) {
1185 			launch_packet_forwarding(
1186 					run_one_txonly_burst_on_core);
1187 			rte_eal_mp_wait_lcore();
1188 		}
1189 		port_fwd_end = tx_only_engine.port_fwd_end;
1190 		if (port_fwd_end != NULL) {
1191 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1192 				(*port_fwd_end)(fwd_ports_ids[i]);
1193 		}
1194 	}
1195 	launch_packet_forwarding(start_pkt_forward_on_core);
1196 }
1197 
1198 void
1199 stop_packet_forwarding(void)
1200 {
1201 	struct rte_eth_stats stats;
1202 	struct rte_port *port;
1203 	port_fwd_end_t  port_fwd_end;
1204 	int i;
1205 	portid_t   pt_id;
1206 	streamid_t sm_id;
1207 	lcoreid_t  lc_id;
1208 	uint64_t total_recv;
1209 	uint64_t total_xmit;
1210 	uint64_t total_rx_dropped;
1211 	uint64_t total_tx_dropped;
1212 	uint64_t total_rx_nombuf;
1213 	uint64_t tx_dropped;
1214 	uint64_t rx_bad_ip_csum;
1215 	uint64_t rx_bad_l4_csum;
1216 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1217 	uint64_t fwd_cycles;
1218 #endif
1219 
1220 	static const char *acc_stats_border = "+++++++++++++++";
1221 
1222 	if (test_done) {
1223 		printf("Packet forwarding not started\n");
1224 		return;
1225 	}
1226 	printf("Telling cores to stop...");
1227 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1228 		fwd_lcores[lc_id]->stopped = 1;
1229 	printf("\nWaiting for lcores to finish...\n");
1230 	rte_eal_mp_wait_lcore();
1231 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1232 	if (port_fwd_end != NULL) {
1233 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1234 			pt_id = fwd_ports_ids[i];
1235 			(*port_fwd_end)(pt_id);
1236 		}
1237 	}
1238 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1239 	fwd_cycles = 0;
1240 #endif
1241 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1242 		if (cur_fwd_config.nb_fwd_streams >
1243 		    cur_fwd_config.nb_fwd_ports) {
1244 			fwd_stream_stats_display(sm_id);
1245 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1246 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1247 		} else {
1248 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1249 				fwd_streams[sm_id];
1250 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1251 				fwd_streams[sm_id];
1252 		}
1253 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1254 		tx_dropped = (uint64_t) (tx_dropped +
1255 					 fwd_streams[sm_id]->fwd_dropped);
1256 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1257 
1258 		rx_bad_ip_csum =
1259 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1260 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1261 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1262 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1263 							rx_bad_ip_csum;
1264 
1265 		rx_bad_l4_csum =
1266 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1267 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1268 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1269 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1270 							rx_bad_l4_csum;
1271 
1272 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1273 		fwd_cycles = (uint64_t) (fwd_cycles +
1274 					 fwd_streams[sm_id]->core_cycles);
1275 #endif
1276 	}
1277 	total_recv = 0;
1278 	total_xmit = 0;
1279 	total_rx_dropped = 0;
1280 	total_tx_dropped = 0;
1281 	total_rx_nombuf  = 0;
1282 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1283 		pt_id = fwd_ports_ids[i];
1284 
1285 		port = &ports[pt_id];
1286 		rte_eth_stats_get(pt_id, &stats);
1287 		stats.ipackets -= port->stats.ipackets;
1288 		port->stats.ipackets = 0;
1289 		stats.opackets -= port->stats.opackets;
1290 		port->stats.opackets = 0;
1291 		stats.ibytes   -= port->stats.ibytes;
1292 		port->stats.ibytes = 0;
1293 		stats.obytes   -= port->stats.obytes;
1294 		port->stats.obytes = 0;
1295 		stats.imissed  -= port->stats.imissed;
1296 		port->stats.imissed = 0;
1297 		stats.oerrors  -= port->stats.oerrors;
1298 		port->stats.oerrors = 0;
1299 		stats.rx_nombuf -= port->stats.rx_nombuf;
1300 		port->stats.rx_nombuf = 0;
1301 
1302 		total_recv += stats.ipackets;
1303 		total_xmit += stats.opackets;
1304 		total_rx_dropped += stats.imissed;
1305 		total_tx_dropped += port->tx_dropped;
1306 		total_rx_nombuf  += stats.rx_nombuf;
1307 
1308 		fwd_port_stats_display(pt_id, &stats);
1309 	}
1310 
1311 	printf("\n  %s Accumulated forward statistics for all ports"
1312 	       "%s\n",
1313 	       acc_stats_border, acc_stats_border);
1314 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1315 	       "%-"PRIu64"\n"
1316 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1317 	       "%-"PRIu64"\n",
1318 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1319 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1320 	if (total_rx_nombuf > 0)
1321 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1322 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1323 	       "%s\n",
1324 	       acc_stats_border, acc_stats_border);
1325 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1326 	if (total_recv > 0)
1327 		printf("\n  CPU cycles/packet=%u (total cycles="
1328 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1329 		       (unsigned int)(fwd_cycles / total_recv),
1330 		       fwd_cycles, total_recv);
1331 #endif
1332 	printf("\nDone.\n");
1333 	test_done = 1;
1334 }
1335 
1336 void
1337 dev_set_link_up(portid_t pid)
1338 {
1339 	if (rte_eth_dev_set_link_up(pid) < 0)
1340 		printf("\nSet link up fail.\n");
1341 }
1342 
1343 void
1344 dev_set_link_down(portid_t pid)
1345 {
1346 	if (rte_eth_dev_set_link_down(pid) < 0)
1347 		printf("\nSet link down fail.\n");
1348 }
1349 
1350 static int
1351 all_ports_started(void)
1352 {
1353 	portid_t pi;
1354 	struct rte_port *port;
1355 
1356 	RTE_ETH_FOREACH_DEV(pi) {
1357 		port = &ports[pi];
1358 		/* Check if there is a port which is not started */
1359 		if ((port->port_status != RTE_PORT_STARTED) &&
1360 			(port->slave_flag == 0))
1361 			return 0;
1362 	}
1363 
1364 	/* No port is not started */
1365 	return 1;
1366 }
1367 
1368 int
1369 port_is_stopped(portid_t port_id)
1370 {
1371 	struct rte_port *port = &ports[port_id];
1372 
1373 	if ((port->port_status != RTE_PORT_STOPPED) &&
1374 	    (port->slave_flag == 0))
1375 		return 0;
1376 	return 1;
1377 }
1378 
1379 int
1380 all_ports_stopped(void)
1381 {
1382 	portid_t pi;
1383 
1384 	RTE_ETH_FOREACH_DEV(pi) {
1385 		if (!port_is_stopped(pi))
1386 			return 0;
1387 	}
1388 
1389 	return 1;
1390 }
1391 
1392 int
1393 port_is_started(portid_t port_id)
1394 {
1395 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1396 		return 0;
1397 
1398 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1399 		return 0;
1400 
1401 	return 1;
1402 }
1403 
1404 static int
1405 port_is_closed(portid_t port_id)
1406 {
1407 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1408 		return 0;
1409 
1410 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1411 		return 0;
1412 
1413 	return 1;
1414 }
1415 
1416 int
1417 start_port(portid_t pid)
1418 {
1419 	int diag, need_check_link_status = -1;
1420 	portid_t pi;
1421 	queueid_t qi;
1422 	struct rte_port *port;
1423 	struct ether_addr mac_addr;
1424 	enum rte_eth_event_type event_type;
1425 
1426 	if (port_id_is_invalid(pid, ENABLED_WARN))
1427 		return 0;
1428 
1429 	if(dcb_config)
1430 		dcb_test = 1;
1431 	RTE_ETH_FOREACH_DEV(pi) {
1432 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1433 			continue;
1434 
1435 		need_check_link_status = 0;
1436 		port = &ports[pi];
1437 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1438 						 RTE_PORT_HANDLING) == 0) {
1439 			printf("Port %d is now not stopped\n", pi);
1440 			continue;
1441 		}
1442 
1443 		if (port->need_reconfig > 0) {
1444 			port->need_reconfig = 0;
1445 
1446 			if (flow_isolate_all) {
1447 				int ret = port_flow_isolate(pi, 1);
1448 				if (ret) {
1449 					printf("Failed to apply isolated"
1450 					       " mode on port %d\n", pi);
1451 					return -1;
1452 				}
1453 			}
1454 
1455 			printf("Configuring Port %d (socket %u)\n", pi,
1456 					port->socket_id);
1457 			/* configure port */
1458 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1459 						&(port->dev_conf));
1460 			if (diag != 0) {
1461 				if (rte_atomic16_cmpset(&(port->port_status),
1462 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1463 					printf("Port %d can not be set back "
1464 							"to stopped\n", pi);
1465 				printf("Fail to configure port %d\n", pi);
1466 				/* try to reconfigure port next time */
1467 				port->need_reconfig = 1;
1468 				return -1;
1469 			}
1470 		}
1471 		if (port->need_reconfig_queues > 0) {
1472 			port->need_reconfig_queues = 0;
1473 			/* setup tx queues */
1474 			for (qi = 0; qi < nb_txq; qi++) {
1475 				if ((numa_support) &&
1476 					(txring_numa[pi] != NUMA_NO_CONFIG))
1477 					diag = rte_eth_tx_queue_setup(pi, qi,
1478 						nb_txd,txring_numa[pi],
1479 						&(port->tx_conf));
1480 				else
1481 					diag = rte_eth_tx_queue_setup(pi, qi,
1482 						nb_txd,port->socket_id,
1483 						&(port->tx_conf));
1484 
1485 				if (diag == 0)
1486 					continue;
1487 
1488 				/* Fail to setup tx queue, return */
1489 				if (rte_atomic16_cmpset(&(port->port_status),
1490 							RTE_PORT_HANDLING,
1491 							RTE_PORT_STOPPED) == 0)
1492 					printf("Port %d can not be set back "
1493 							"to stopped\n", pi);
1494 				printf("Fail to configure port %d tx queues\n", pi);
1495 				/* try to reconfigure queues next time */
1496 				port->need_reconfig_queues = 1;
1497 				return -1;
1498 			}
1499 			/* Apply Rx offloads configuration */
1500 			port->rx_conf.offloads = port->dev_conf.rxmode.offloads;
1501 			/* setup rx queues */
1502 			for (qi = 0; qi < nb_rxq; qi++) {
1503 				if ((numa_support) &&
1504 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1505 					struct rte_mempool * mp =
1506 						mbuf_pool_find(rxring_numa[pi]);
1507 					if (mp == NULL) {
1508 						printf("Failed to setup RX queue:"
1509 							"No mempool allocation"
1510 							" on the socket %d\n",
1511 							rxring_numa[pi]);
1512 						return -1;
1513 					}
1514 
1515 					diag = rte_eth_rx_queue_setup(pi, qi,
1516 					     nb_rxd,rxring_numa[pi],
1517 					     &(port->rx_conf),mp);
1518 				} else {
1519 					struct rte_mempool *mp =
1520 						mbuf_pool_find(port->socket_id);
1521 					if (mp == NULL) {
1522 						printf("Failed to setup RX queue:"
1523 							"No mempool allocation"
1524 							" on the socket %d\n",
1525 							port->socket_id);
1526 						return -1;
1527 					}
1528 					diag = rte_eth_rx_queue_setup(pi, qi,
1529 					     nb_rxd,port->socket_id,
1530 					     &(port->rx_conf), mp);
1531 				}
1532 				if (diag == 0)
1533 					continue;
1534 
1535 				/* Fail to setup rx queue, return */
1536 				if (rte_atomic16_cmpset(&(port->port_status),
1537 							RTE_PORT_HANDLING,
1538 							RTE_PORT_STOPPED) == 0)
1539 					printf("Port %d can not be set back "
1540 							"to stopped\n", pi);
1541 				printf("Fail to configure port %d rx queues\n", pi);
1542 				/* try to reconfigure queues next time */
1543 				port->need_reconfig_queues = 1;
1544 				return -1;
1545 			}
1546 		}
1547 
1548 		for (event_type = RTE_ETH_EVENT_UNKNOWN;
1549 		     event_type < RTE_ETH_EVENT_MAX;
1550 		     event_type++) {
1551 			diag = rte_eth_dev_callback_register(pi,
1552 							event_type,
1553 							eth_event_callback,
1554 							NULL);
1555 			if (diag) {
1556 				printf("Failed to setup even callback for event %d\n",
1557 					event_type);
1558 				return -1;
1559 			}
1560 		}
1561 
1562 		/* start port */
1563 		if (rte_eth_dev_start(pi) < 0) {
1564 			printf("Fail to start port %d\n", pi);
1565 
1566 			/* Fail to setup rx queue, return */
1567 			if (rte_atomic16_cmpset(&(port->port_status),
1568 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1569 				printf("Port %d can not be set back to "
1570 							"stopped\n", pi);
1571 			continue;
1572 		}
1573 
1574 		if (rte_atomic16_cmpset(&(port->port_status),
1575 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1576 			printf("Port %d can not be set into started\n", pi);
1577 
1578 		rte_eth_macaddr_get(pi, &mac_addr);
1579 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1580 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1581 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1582 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1583 
1584 		/* at least one port started, need checking link status */
1585 		need_check_link_status = 1;
1586 	}
1587 
1588 	if (need_check_link_status == 1 && !no_link_check)
1589 		check_all_ports_link_status(RTE_PORT_ALL);
1590 	else if (need_check_link_status == 0)
1591 		printf("Please stop the ports first\n");
1592 
1593 	printf("Done\n");
1594 	return 0;
1595 }
1596 
1597 void
1598 stop_port(portid_t pid)
1599 {
1600 	portid_t pi;
1601 	struct rte_port *port;
1602 	int need_check_link_status = 0;
1603 
1604 	if (dcb_test) {
1605 		dcb_test = 0;
1606 		dcb_config = 0;
1607 	}
1608 
1609 	if (port_id_is_invalid(pid, ENABLED_WARN))
1610 		return;
1611 
1612 	printf("Stopping ports...\n");
1613 
1614 	RTE_ETH_FOREACH_DEV(pi) {
1615 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1616 			continue;
1617 
1618 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1619 			printf("Please remove port %d from forwarding configuration.\n", pi);
1620 			continue;
1621 		}
1622 
1623 		if (port_is_bonding_slave(pi)) {
1624 			printf("Please remove port %d from bonded device.\n", pi);
1625 			continue;
1626 		}
1627 
1628 		port = &ports[pi];
1629 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1630 						RTE_PORT_HANDLING) == 0)
1631 			continue;
1632 
1633 		rte_eth_dev_stop(pi);
1634 
1635 		if (rte_atomic16_cmpset(&(port->port_status),
1636 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1637 			printf("Port %d can not be set into stopped\n", pi);
1638 		need_check_link_status = 1;
1639 	}
1640 	if (need_check_link_status && !no_link_check)
1641 		check_all_ports_link_status(RTE_PORT_ALL);
1642 
1643 	printf("Done\n");
1644 }
1645 
1646 void
1647 close_port(portid_t pid)
1648 {
1649 	portid_t pi;
1650 	struct rte_port *port;
1651 
1652 	if (port_id_is_invalid(pid, ENABLED_WARN))
1653 		return;
1654 
1655 	printf("Closing ports...\n");
1656 
1657 	RTE_ETH_FOREACH_DEV(pi) {
1658 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1659 			continue;
1660 
1661 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1662 			printf("Please remove port %d from forwarding configuration.\n", pi);
1663 			continue;
1664 		}
1665 
1666 		if (port_is_bonding_slave(pi)) {
1667 			printf("Please remove port %d from bonded device.\n", pi);
1668 			continue;
1669 		}
1670 
1671 		port = &ports[pi];
1672 		if (rte_atomic16_cmpset(&(port->port_status),
1673 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1674 			printf("Port %d is already closed\n", pi);
1675 			continue;
1676 		}
1677 
1678 		if (rte_atomic16_cmpset(&(port->port_status),
1679 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1680 			printf("Port %d is now not stopped\n", pi);
1681 			continue;
1682 		}
1683 
1684 		if (port->flow_list)
1685 			port_flow_flush(pi);
1686 		rte_eth_dev_close(pi);
1687 
1688 		if (rte_atomic16_cmpset(&(port->port_status),
1689 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1690 			printf("Port %d cannot be set to closed\n", pi);
1691 	}
1692 
1693 	printf("Done\n");
1694 }
1695 
1696 void
1697 reset_port(portid_t pid)
1698 {
1699 	int diag;
1700 	portid_t pi;
1701 	struct rte_port *port;
1702 
1703 	if (port_id_is_invalid(pid, ENABLED_WARN))
1704 		return;
1705 
1706 	printf("Resetting ports...\n");
1707 
1708 	RTE_ETH_FOREACH_DEV(pi) {
1709 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1710 			continue;
1711 
1712 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1713 			printf("Please remove port %d from forwarding "
1714 			       "configuration.\n", pi);
1715 			continue;
1716 		}
1717 
1718 		if (port_is_bonding_slave(pi)) {
1719 			printf("Please remove port %d from bonded device.\n",
1720 			       pi);
1721 			continue;
1722 		}
1723 
1724 		diag = rte_eth_dev_reset(pi);
1725 		if (diag == 0) {
1726 			port = &ports[pi];
1727 			port->need_reconfig = 1;
1728 			port->need_reconfig_queues = 1;
1729 		} else {
1730 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1731 		}
1732 	}
1733 
1734 	printf("Done\n");
1735 }
1736 
1737 void
1738 attach_port(char *identifier)
1739 {
1740 	portid_t pi = 0;
1741 	unsigned int socket_id;
1742 
1743 	printf("Attaching a new port...\n");
1744 
1745 	if (identifier == NULL) {
1746 		printf("Invalid parameters are specified\n");
1747 		return;
1748 	}
1749 
1750 	if (rte_eth_dev_attach(identifier, &pi))
1751 		return;
1752 
1753 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1754 	/* if socket_id is invalid, set to 0 */
1755 	if (check_socket_id(socket_id) < 0)
1756 		socket_id = 0;
1757 	reconfig(pi, socket_id);
1758 	rte_eth_promiscuous_enable(pi);
1759 
1760 	nb_ports = rte_eth_dev_count();
1761 
1762 	ports[pi].port_status = RTE_PORT_STOPPED;
1763 
1764 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1765 	printf("Done\n");
1766 }
1767 
1768 void
1769 detach_port(portid_t port_id)
1770 {
1771 	char name[RTE_ETH_NAME_MAX_LEN];
1772 
1773 	printf("Detaching a port...\n");
1774 
1775 	if (!port_is_closed(port_id)) {
1776 		printf("Please close port first\n");
1777 		return;
1778 	}
1779 
1780 	if (ports[port_id].flow_list)
1781 		port_flow_flush(port_id);
1782 
1783 	if (rte_eth_dev_detach(port_id, name)) {
1784 		TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
1785 		return;
1786 	}
1787 
1788 	nb_ports = rte_eth_dev_count();
1789 
1790 	printf("Port '%s' is detached. Now total ports is %d\n",
1791 			name, nb_ports);
1792 	printf("Done\n");
1793 	return;
1794 }
1795 
1796 void
1797 pmd_test_exit(void)
1798 {
1799 	portid_t pt_id;
1800 
1801 	if (test_done == 0)
1802 		stop_packet_forwarding();
1803 
1804 	if (ports != NULL) {
1805 		no_link_check = 1;
1806 		RTE_ETH_FOREACH_DEV(pt_id) {
1807 			printf("\nShutting down port %d...\n", pt_id);
1808 			fflush(stdout);
1809 			stop_port(pt_id);
1810 			close_port(pt_id);
1811 		}
1812 	}
1813 	printf("\nBye...\n");
1814 }
1815 
1816 typedef void (*cmd_func_t)(void);
1817 struct pmd_test_command {
1818 	const char *cmd_name;
1819 	cmd_func_t cmd_func;
1820 };
1821 
1822 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1823 
1824 /* Check the link status of all ports in up to 9s, and print them finally */
1825 static void
1826 check_all_ports_link_status(uint32_t port_mask)
1827 {
1828 #define CHECK_INTERVAL 100 /* 100ms */
1829 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1830 	portid_t portid;
1831 	uint8_t count, all_ports_up, print_flag = 0;
1832 	struct rte_eth_link link;
1833 
1834 	printf("Checking link statuses...\n");
1835 	fflush(stdout);
1836 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1837 		all_ports_up = 1;
1838 		RTE_ETH_FOREACH_DEV(portid) {
1839 			if ((port_mask & (1 << portid)) == 0)
1840 				continue;
1841 			memset(&link, 0, sizeof(link));
1842 			rte_eth_link_get_nowait(portid, &link);
1843 			/* print link status if flag set */
1844 			if (print_flag == 1) {
1845 				if (link.link_status)
1846 					printf(
1847 					"Port%d Link Up. speed %u Mbps- %s\n",
1848 					portid, link.link_speed,
1849 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1850 					("full-duplex") : ("half-duplex\n"));
1851 				else
1852 					printf("Port %d Link Down\n", portid);
1853 				continue;
1854 			}
1855 			/* clear all_ports_up flag if any link down */
1856 			if (link.link_status == ETH_LINK_DOWN) {
1857 				all_ports_up = 0;
1858 				break;
1859 			}
1860 		}
1861 		/* after finally printing all link status, get out */
1862 		if (print_flag == 1)
1863 			break;
1864 
1865 		if (all_ports_up == 0) {
1866 			fflush(stdout);
1867 			rte_delay_ms(CHECK_INTERVAL);
1868 		}
1869 
1870 		/* set the print_flag if all ports up or timeout */
1871 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1872 			print_flag = 1;
1873 		}
1874 
1875 		if (lsc_interrupt)
1876 			break;
1877 	}
1878 }
1879 
1880 static void
1881 rmv_event_callback(void *arg)
1882 {
1883 	struct rte_eth_dev *dev;
1884 	portid_t port_id = (intptr_t)arg;
1885 
1886 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1887 	dev = &rte_eth_devices[port_id];
1888 
1889 	stop_port(port_id);
1890 	close_port(port_id);
1891 	printf("removing device %s\n", dev->device->name);
1892 	if (rte_eal_dev_detach(dev->device))
1893 		TESTPMD_LOG(ERR, "Failed to detach device %s\n",
1894 			dev->device->name);
1895 }
1896 
1897 /* This function is used by the interrupt thread */
1898 static int
1899 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1900 		  void *ret_param)
1901 {
1902 	static const char * const event_desc[] = {
1903 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1904 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
1905 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1906 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1907 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1908 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
1909 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
1910 		[RTE_ETH_EVENT_MAX] = NULL,
1911 	};
1912 
1913 	RTE_SET_USED(param);
1914 	RTE_SET_USED(ret_param);
1915 
1916 	if (type >= RTE_ETH_EVENT_MAX) {
1917 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1918 			port_id, __func__, type);
1919 		fflush(stderr);
1920 	} else if (event_print_mask & (UINT32_C(1) << type)) {
1921 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
1922 			event_desc[type]);
1923 		fflush(stdout);
1924 	}
1925 
1926 	switch (type) {
1927 	case RTE_ETH_EVENT_INTR_RMV:
1928 		if (rte_eal_alarm_set(100000,
1929 				rmv_event_callback, (void *)(intptr_t)port_id))
1930 			fprintf(stderr, "Could not set up deferred device removal\n");
1931 		break;
1932 	default:
1933 		break;
1934 	}
1935 	return 0;
1936 }
1937 
1938 static int
1939 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
1940 {
1941 	uint16_t i;
1942 	int diag;
1943 	uint8_t mapping_found = 0;
1944 
1945 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1946 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1947 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1948 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1949 					tx_queue_stats_mappings[i].queue_id,
1950 					tx_queue_stats_mappings[i].stats_counter_id);
1951 			if (diag != 0)
1952 				return diag;
1953 			mapping_found = 1;
1954 		}
1955 	}
1956 	if (mapping_found)
1957 		port->tx_queue_stats_mapping_enabled = 1;
1958 	return 0;
1959 }
1960 
1961 static int
1962 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
1963 {
1964 	uint16_t i;
1965 	int diag;
1966 	uint8_t mapping_found = 0;
1967 
1968 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1969 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1970 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1971 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1972 					rx_queue_stats_mappings[i].queue_id,
1973 					rx_queue_stats_mappings[i].stats_counter_id);
1974 			if (diag != 0)
1975 				return diag;
1976 			mapping_found = 1;
1977 		}
1978 	}
1979 	if (mapping_found)
1980 		port->rx_queue_stats_mapping_enabled = 1;
1981 	return 0;
1982 }
1983 
1984 static void
1985 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
1986 {
1987 	int diag = 0;
1988 
1989 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1990 	if (diag != 0) {
1991 		if (diag == -ENOTSUP) {
1992 			port->tx_queue_stats_mapping_enabled = 0;
1993 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1994 		}
1995 		else
1996 			rte_exit(EXIT_FAILURE,
1997 					"set_tx_queue_stats_mapping_registers "
1998 					"failed for port id=%d diag=%d\n",
1999 					pi, diag);
2000 	}
2001 
2002 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2003 	if (diag != 0) {
2004 		if (diag == -ENOTSUP) {
2005 			port->rx_queue_stats_mapping_enabled = 0;
2006 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2007 		}
2008 		else
2009 			rte_exit(EXIT_FAILURE,
2010 					"set_rx_queue_stats_mapping_registers "
2011 					"failed for port id=%d diag=%d\n",
2012 					pi, diag);
2013 	}
2014 }
2015 
2016 static void
2017 rxtx_port_config(struct rte_port *port)
2018 {
2019 	port->rx_conf = port->dev_info.default_rxconf;
2020 	port->tx_conf = port->dev_info.default_txconf;
2021 
2022 	/* Check if any RX/TX parameters have been passed */
2023 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2024 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2025 
2026 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2027 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2028 
2029 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2030 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2031 
2032 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2033 		port->rx_conf.rx_free_thresh = rx_free_thresh;
2034 
2035 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2036 		port->rx_conf.rx_drop_en = rx_drop_en;
2037 
2038 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2039 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2040 
2041 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2042 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2043 
2044 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2045 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2046 
2047 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2048 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2049 
2050 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2051 		port->tx_conf.tx_free_thresh = tx_free_thresh;
2052 
2053 	if (txq_flags != RTE_PMD_PARAM_UNSET)
2054 		port->tx_conf.txq_flags = txq_flags;
2055 }
2056 
2057 void
2058 init_port_config(void)
2059 {
2060 	portid_t pid;
2061 	struct rte_port *port;
2062 
2063 	RTE_ETH_FOREACH_DEV(pid) {
2064 		port = &ports[pid];
2065 		port->dev_conf.rxmode = rx_mode;
2066 		port->dev_conf.fdir_conf = fdir_conf;
2067 		if (nb_rxq > 1) {
2068 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2069 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2070 		} else {
2071 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2072 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2073 		}
2074 
2075 		if (port->dcb_flag == 0) {
2076 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2077 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2078 			else
2079 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2080 		}
2081 
2082 		rxtx_port_config(port);
2083 
2084 		rte_eth_macaddr_get(pid, &port->eth_addr);
2085 
2086 		map_port_queue_stats_mapping_registers(pid, port);
2087 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2088 		rte_pmd_ixgbe_bypass_init(pid);
2089 #endif
2090 
2091 		if (lsc_interrupt &&
2092 		    (rte_eth_devices[pid].data->dev_flags &
2093 		     RTE_ETH_DEV_INTR_LSC))
2094 			port->dev_conf.intr_conf.lsc = 1;
2095 		if (rmv_interrupt &&
2096 		    (rte_eth_devices[pid].data->dev_flags &
2097 		     RTE_ETH_DEV_INTR_RMV))
2098 			port->dev_conf.intr_conf.rmv = 1;
2099 
2100 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2101 		/* Detect softnic port */
2102 		if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2103 			port->softnic_enable = 1;
2104 			memset(&port->softport, 0, sizeof(struct softnic_port));
2105 
2106 			if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2107 				port->softport.tm_flag = 1;
2108 		}
2109 #endif
2110 	}
2111 }
2112 
2113 void set_port_slave_flag(portid_t slave_pid)
2114 {
2115 	struct rte_port *port;
2116 
2117 	port = &ports[slave_pid];
2118 	port->slave_flag = 1;
2119 }
2120 
2121 void clear_port_slave_flag(portid_t slave_pid)
2122 {
2123 	struct rte_port *port;
2124 
2125 	port = &ports[slave_pid];
2126 	port->slave_flag = 0;
2127 }
2128 
2129 uint8_t port_is_bonding_slave(portid_t slave_pid)
2130 {
2131 	struct rte_port *port;
2132 
2133 	port = &ports[slave_pid];
2134 	return port->slave_flag;
2135 }
2136 
2137 const uint16_t vlan_tags[] = {
2138 		0,  1,  2,  3,  4,  5,  6,  7,
2139 		8,  9, 10, 11,  12, 13, 14, 15,
2140 		16, 17, 18, 19, 20, 21, 22, 23,
2141 		24, 25, 26, 27, 28, 29, 30, 31
2142 };
2143 
2144 static  int
2145 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2146 		 enum dcb_mode_enable dcb_mode,
2147 		 enum rte_eth_nb_tcs num_tcs,
2148 		 uint8_t pfc_en)
2149 {
2150 	uint8_t i;
2151 
2152 	/*
2153 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2154 	 * given above, and the number of traffic classes available for use.
2155 	 */
2156 	if (dcb_mode == DCB_VT_ENABLED) {
2157 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2158 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2159 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2160 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2161 
2162 		/* VMDQ+DCB RX and TX configurations */
2163 		vmdq_rx_conf->enable_default_pool = 0;
2164 		vmdq_rx_conf->default_pool = 0;
2165 		vmdq_rx_conf->nb_queue_pools =
2166 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2167 		vmdq_tx_conf->nb_queue_pools =
2168 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2169 
2170 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2171 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2172 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2173 			vmdq_rx_conf->pool_map[i].pools =
2174 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2175 		}
2176 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2177 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2178 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2179 		}
2180 
2181 		/* set DCB mode of RX and TX of multiple queues */
2182 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2183 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2184 	} else {
2185 		struct rte_eth_dcb_rx_conf *rx_conf =
2186 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2187 		struct rte_eth_dcb_tx_conf *tx_conf =
2188 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2189 
2190 		rx_conf->nb_tcs = num_tcs;
2191 		tx_conf->nb_tcs = num_tcs;
2192 
2193 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2194 			rx_conf->dcb_tc[i] = i % num_tcs;
2195 			tx_conf->dcb_tc[i] = i % num_tcs;
2196 		}
2197 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2198 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2199 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2200 	}
2201 
2202 	if (pfc_en)
2203 		eth_conf->dcb_capability_en =
2204 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2205 	else
2206 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2207 
2208 	return 0;
2209 }
2210 
2211 int
2212 init_port_dcb_config(portid_t pid,
2213 		     enum dcb_mode_enable dcb_mode,
2214 		     enum rte_eth_nb_tcs num_tcs,
2215 		     uint8_t pfc_en)
2216 {
2217 	struct rte_eth_conf port_conf;
2218 	struct rte_port *rte_port;
2219 	int retval;
2220 	uint16_t i;
2221 
2222 	rte_port = &ports[pid];
2223 
2224 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2225 	/* Enter DCB configuration status */
2226 	dcb_config = 1;
2227 
2228 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2229 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2230 	if (retval < 0)
2231 		return retval;
2232 	port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2233 
2234 	/**
2235 	 * Write the configuration into the device.
2236 	 * Set the numbers of RX & TX queues to 0, so
2237 	 * the RX & TX queues will not be setup.
2238 	 */
2239 	rte_eth_dev_configure(pid, 0, 0, &port_conf);
2240 
2241 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2242 
2243 	/* If dev_info.vmdq_pool_base is greater than 0,
2244 	 * the queue id of vmdq pools is started after pf queues.
2245 	 */
2246 	if (dcb_mode == DCB_VT_ENABLED &&
2247 	    rte_port->dev_info.vmdq_pool_base > 0) {
2248 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2249 			" for port %d.", pid);
2250 		return -1;
2251 	}
2252 
2253 	/* Assume the ports in testpmd have the same dcb capability
2254 	 * and has the same number of rxq and txq in dcb mode
2255 	 */
2256 	if (dcb_mode == DCB_VT_ENABLED) {
2257 		if (rte_port->dev_info.max_vfs > 0) {
2258 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2259 			nb_txq = rte_port->dev_info.nb_tx_queues;
2260 		} else {
2261 			nb_rxq = rte_port->dev_info.max_rx_queues;
2262 			nb_txq = rte_port->dev_info.max_tx_queues;
2263 		}
2264 	} else {
2265 		/*if vt is disabled, use all pf queues */
2266 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2267 			nb_rxq = rte_port->dev_info.max_rx_queues;
2268 			nb_txq = rte_port->dev_info.max_tx_queues;
2269 		} else {
2270 			nb_rxq = (queueid_t)num_tcs;
2271 			nb_txq = (queueid_t)num_tcs;
2272 
2273 		}
2274 	}
2275 	rx_free_thresh = 64;
2276 
2277 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2278 
2279 	rxtx_port_config(rte_port);
2280 	/* VLAN filter */
2281 	rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
2282 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2283 		rx_vft_set(pid, vlan_tags[i], 1);
2284 
2285 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2286 	map_port_queue_stats_mapping_registers(pid, rte_port);
2287 
2288 	rte_port->dcb_flag = 1;
2289 
2290 	return 0;
2291 }
2292 
2293 static void
2294 init_port(void)
2295 {
2296 	/* Configuration of Ethernet ports. */
2297 	ports = rte_zmalloc("testpmd: ports",
2298 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2299 			    RTE_CACHE_LINE_SIZE);
2300 	if (ports == NULL) {
2301 		rte_exit(EXIT_FAILURE,
2302 				"rte_zmalloc(%d struct rte_port) failed\n",
2303 				RTE_MAX_ETHPORTS);
2304 	}
2305 }
2306 
2307 static void
2308 force_quit(void)
2309 {
2310 	pmd_test_exit();
2311 	prompt_exit();
2312 }
2313 
2314 static void
2315 print_stats(void)
2316 {
2317 	uint8_t i;
2318 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2319 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2320 
2321 	/* Clear screen and move to top left */
2322 	printf("%s%s", clr, top_left);
2323 
2324 	printf("\nPort statistics ====================================");
2325 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2326 		nic_stats_display(fwd_ports_ids[i]);
2327 }
2328 
2329 static void
2330 signal_handler(int signum)
2331 {
2332 	if (signum == SIGINT || signum == SIGTERM) {
2333 		printf("\nSignal %d received, preparing to exit...\n",
2334 				signum);
2335 #ifdef RTE_LIBRTE_PDUMP
2336 		/* uninitialize packet capture framework */
2337 		rte_pdump_uninit();
2338 #endif
2339 #ifdef RTE_LIBRTE_LATENCY_STATS
2340 		rte_latencystats_uninit();
2341 #endif
2342 		force_quit();
2343 		/* Set flag to indicate the force termination. */
2344 		f_quit = 1;
2345 		/* exit with the expected status */
2346 		signal(signum, SIG_DFL);
2347 		kill(getpid(), signum);
2348 	}
2349 }
2350 
2351 int
2352 main(int argc, char** argv)
2353 {
2354 	int  diag;
2355 	portid_t port_id;
2356 
2357 	signal(SIGINT, signal_handler);
2358 	signal(SIGTERM, signal_handler);
2359 
2360 	diag = rte_eal_init(argc, argv);
2361 	if (diag < 0)
2362 		rte_panic("Cannot init EAL\n");
2363 
2364 	testpmd_logtype = rte_log_register("testpmd");
2365 	if (testpmd_logtype < 0)
2366 		rte_panic("Cannot register log type");
2367 	rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
2368 
2369 	if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2370 		TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
2371 			strerror(errno));
2372 	}
2373 
2374 #ifdef RTE_LIBRTE_PDUMP
2375 	/* initialize packet capture framework */
2376 	rte_pdump_init(NULL);
2377 #endif
2378 
2379 	nb_ports = (portid_t) rte_eth_dev_count();
2380 	if (nb_ports == 0)
2381 		TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
2382 
2383 	/* allocate port structures, and init them */
2384 	init_port();
2385 
2386 	set_def_fwd_config();
2387 	if (nb_lcores == 0)
2388 		rte_panic("Empty set of forwarding logical cores - check the "
2389 			  "core mask supplied in the command parameters\n");
2390 
2391 	/* Bitrate/latency stats disabled by default */
2392 #ifdef RTE_LIBRTE_BITRATE
2393 	bitrate_enabled = 0;
2394 #endif
2395 #ifdef RTE_LIBRTE_LATENCY_STATS
2396 	latencystats_enabled = 0;
2397 #endif
2398 
2399 	argc -= diag;
2400 	argv += diag;
2401 	if (argc > 1)
2402 		launch_args_parse(argc, argv);
2403 
2404 	if (tx_first && interactive)
2405 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2406 				"interactive mode.\n");
2407 
2408 	if (tx_first && lsc_interrupt) {
2409 		printf("Warning: lsc_interrupt needs to be off when "
2410 				" using tx_first. Disabling.\n");
2411 		lsc_interrupt = 0;
2412 	}
2413 
2414 	if (!nb_rxq && !nb_txq)
2415 		printf("Warning: Either rx or tx queues should be non-zero\n");
2416 
2417 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2418 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2419 		       "but nb_txq=%d will prevent to fully test it.\n",
2420 		       nb_rxq, nb_txq);
2421 
2422 	init_config();
2423 	if (start_port(RTE_PORT_ALL) != 0)
2424 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2425 
2426 	/* set all ports to promiscuous mode by default */
2427 	RTE_ETH_FOREACH_DEV(port_id)
2428 		rte_eth_promiscuous_enable(port_id);
2429 
2430 	/* Init metrics library */
2431 	rte_metrics_init(rte_socket_id());
2432 
2433 #ifdef RTE_LIBRTE_LATENCY_STATS
2434 	if (latencystats_enabled != 0) {
2435 		int ret = rte_latencystats_init(1, NULL);
2436 		if (ret)
2437 			printf("Warning: latencystats init()"
2438 				" returned error %d\n",	ret);
2439 		printf("Latencystats running on lcore %d\n",
2440 			latencystats_lcore_id);
2441 	}
2442 #endif
2443 
2444 	/* Setup bitrate stats */
2445 #ifdef RTE_LIBRTE_BITRATE
2446 	if (bitrate_enabled != 0) {
2447 		bitrate_data = rte_stats_bitrate_create();
2448 		if (bitrate_data == NULL)
2449 			rte_exit(EXIT_FAILURE,
2450 				"Could not allocate bitrate data.\n");
2451 		rte_stats_bitrate_reg(bitrate_data);
2452 	}
2453 #endif
2454 
2455 #ifdef RTE_LIBRTE_CMDLINE
2456 	if (strlen(cmdline_filename) != 0)
2457 		cmdline_read_from_file(cmdline_filename);
2458 
2459 	if (interactive == 1) {
2460 		if (auto_start) {
2461 			printf("Start automatic packet forwarding\n");
2462 			start_packet_forwarding(0);
2463 		}
2464 		prompt();
2465 		pmd_test_exit();
2466 	} else
2467 #endif
2468 	{
2469 		char c;
2470 		int rc;
2471 
2472 		f_quit = 0;
2473 
2474 		printf("No commandline core given, start packet forwarding\n");
2475 		start_packet_forwarding(tx_first);
2476 		if (stats_period != 0) {
2477 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2478 			uint64_t timer_period;
2479 
2480 			/* Convert to number of cycles */
2481 			timer_period = stats_period * rte_get_timer_hz();
2482 
2483 			while (f_quit == 0) {
2484 				cur_time = rte_get_timer_cycles();
2485 				diff_time += cur_time - prev_time;
2486 
2487 				if (diff_time >= timer_period) {
2488 					print_stats();
2489 					/* Reset the timer */
2490 					diff_time = 0;
2491 				}
2492 				/* Sleep to avoid unnecessary checks */
2493 				prev_time = cur_time;
2494 				sleep(1);
2495 			}
2496 		}
2497 
2498 		printf("Press enter to exit\n");
2499 		rc = read(0, &c, 1);
2500 		pmd_test_exit();
2501 		if (rc < 0)
2502 			return 1;
2503 	}
2504 
2505 	return 0;
2506 }
2507