xref: /dpdk/app/test-pmd/testpmd.c (revision b79e4c00af0e7cfb8601ab0208659d226b82bd10)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_IXGBE_PMD
77 #include <rte_pmd_ixgbe.h>
78 #endif
79 #ifdef RTE_LIBRTE_PMD_XENVIRT
80 #include <rte_eth_xenvirt.h>
81 #endif
82 #ifdef RTE_LIBRTE_PDUMP
83 #include <rte_pdump.h>
84 #endif
85 #include <rte_flow.h>
86 #include <rte_metrics.h>
87 #ifdef RTE_LIBRTE_BITRATE
88 #include <rte_bitrate.h>
89 #endif
90 #ifdef RTE_LIBRTE_LATENCY_STATS
91 #include <rte_latencystats.h>
92 #endif
93 
94 #include "testpmd.h"
95 
96 uint16_t verbose_level = 0; /**< Silent by default. */
97 
98 /* use master core for command line ? */
99 uint8_t interactive = 0;
100 uint8_t auto_start = 0;
101 uint8_t tx_first;
102 char cmdline_filename[PATH_MAX] = {0};
103 
104 /*
105  * NUMA support configuration.
106  * When set, the NUMA support attempts to dispatch the allocation of the
107  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
108  * probed ports among the CPU sockets 0 and 1.
109  * Otherwise, all memory is allocated from CPU socket 0.
110  */
111 uint8_t numa_support = 1; /**< numa enabled by default */
112 
113 /*
114  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
115  * not configured.
116  */
117 uint8_t socket_num = UMA_NO_CONFIG;
118 
119 /*
120  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
121  */
122 uint8_t mp_anon = 0;
123 
124 /*
125  * Record the Ethernet address of peer target ports to which packets are
126  * forwarded.
127  * Must be instantiated with the ethernet addresses of peer traffic generator
128  * ports.
129  */
130 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
131 portid_t nb_peer_eth_addrs = 0;
132 
133 /*
134  * Probed Target Environment.
135  */
136 struct rte_port *ports;	       /**< For all probed ethernet ports. */
137 portid_t nb_ports;             /**< Number of probed ethernet ports. */
138 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
139 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
140 
141 /*
142  * Test Forwarding Configuration.
143  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
144  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
145  */
146 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
147 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
148 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
149 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
150 
151 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
152 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
153 
154 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
155 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
156 
157 /*
158  * Forwarding engines.
159  */
160 struct fwd_engine * fwd_engines[] = {
161 	&io_fwd_engine,
162 	&mac_fwd_engine,
163 	&mac_swap_engine,
164 	&flow_gen_engine,
165 	&rx_only_engine,
166 	&tx_only_engine,
167 	&csum_fwd_engine,
168 	&icmp_echo_engine,
169 #ifdef RTE_LIBRTE_IEEE1588
170 	&ieee1588_fwd_engine,
171 #endif
172 	NULL,
173 };
174 
175 struct fwd_config cur_fwd_config;
176 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
177 uint32_t retry_enabled;
178 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
179 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
180 
181 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
182 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
183                                       * specified on command-line. */
184 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
185 /*
186  * Configuration of packet segments used by the "txonly" processing engine.
187  */
188 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
189 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
190 	TXONLY_DEF_PACKET_LEN,
191 };
192 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
193 
194 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
195 /**< Split policy for packets to TX. */
196 
197 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
198 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
199 
200 /* current configuration is in DCB or not,0 means it is not in DCB mode */
201 uint8_t dcb_config = 0;
202 
203 /* Whether the dcb is in testing status */
204 uint8_t dcb_test = 0;
205 
206 /*
207  * Configurable number of RX/TX queues.
208  */
209 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
210 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
211 
212 /*
213  * Configurable number of RX/TX ring descriptors.
214  */
215 #define RTE_TEST_RX_DESC_DEFAULT 128
216 #define RTE_TEST_TX_DESC_DEFAULT 512
217 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
218 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
219 
220 #define RTE_PMD_PARAM_UNSET -1
221 /*
222  * Configurable values of RX and TX ring threshold registers.
223  */
224 
225 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
226 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
228 
229 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
230 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
232 
233 /*
234  * Configurable value of RX free threshold.
235  */
236 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
237 
238 /*
239  * Configurable value of RX drop enable.
240  */
241 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
242 
243 /*
244  * Configurable value of TX free threshold.
245  */
246 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
247 
248 /*
249  * Configurable value of TX RS bit threshold.
250  */
251 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
252 
253 /*
254  * Configurable value of TX queue flags.
255  */
256 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
257 
258 /*
259  * Receive Side Scaling (RSS) configuration.
260  */
261 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
262 
263 /*
264  * Port topology configuration
265  */
266 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
267 
268 /*
269  * Avoids to flush all the RX streams before starts forwarding.
270  */
271 uint8_t no_flush_rx = 0; /* flush by default */
272 
273 /*
274  * Avoids to check link status when starting/stopping a port.
275  */
276 uint8_t no_link_check = 0; /* check by default */
277 
278 /*
279  * Enable link status change notification
280  */
281 uint8_t lsc_interrupt = 1; /* enabled by default */
282 
283 /*
284  * Enable device removal notification.
285  */
286 uint8_t rmv_interrupt = 1; /* enabled by default */
287 
288 /*
289  * Display or mask ether events
290  * Default to all events except VF_MBOX
291  */
292 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
293 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
294 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
295 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
296 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
297 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
298 
299 /*
300  * NIC bypass mode configuration options.
301  */
302 
303 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
304 /* The NIC bypass watchdog timeout. */
305 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
306 #endif
307 
308 
309 #ifdef RTE_LIBRTE_LATENCY_STATS
310 
311 /*
312  * Set when latency stats is enabled in the commandline
313  */
314 uint8_t latencystats_enabled;
315 
316 /*
317  * Lcore ID to serive latency statistics.
318  */
319 lcoreid_t latencystats_lcore_id = -1;
320 
321 #endif
322 
323 /*
324  * Ethernet device configuration.
325  */
326 struct rte_eth_rxmode rx_mode = {
327 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
328 	.split_hdr_size = 0,
329 	.header_split   = 0, /**< Header Split disabled. */
330 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
331 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
332 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
333 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
334 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
335 	.hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
336 };
337 
338 struct rte_fdir_conf fdir_conf = {
339 	.mode = RTE_FDIR_MODE_NONE,
340 	.pballoc = RTE_FDIR_PBALLOC_64K,
341 	.status = RTE_FDIR_REPORT_STATUS,
342 	.mask = {
343 		.vlan_tci_mask = 0x0,
344 		.ipv4_mask     = {
345 			.src_ip = 0xFFFFFFFF,
346 			.dst_ip = 0xFFFFFFFF,
347 		},
348 		.ipv6_mask     = {
349 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
350 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
351 		},
352 		.src_port_mask = 0xFFFF,
353 		.dst_port_mask = 0xFFFF,
354 		.mac_addr_byte_mask = 0xFF,
355 		.tunnel_type_mask = 1,
356 		.tunnel_id_mask = 0xFFFFFFFF,
357 	},
358 	.drop_queue = 127,
359 };
360 
361 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
362 
363 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
364 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
365 
366 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
367 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
368 
369 uint16_t nb_tx_queue_stats_mappings = 0;
370 uint16_t nb_rx_queue_stats_mappings = 0;
371 
372 unsigned int num_sockets = 0;
373 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
374 
375 #ifdef RTE_LIBRTE_BITRATE
376 /* Bitrate statistics */
377 struct rte_stats_bitrates *bitrate_data;
378 lcoreid_t bitrate_lcore_id;
379 uint8_t bitrate_enabled;
380 #endif
381 
382 /* Forward function declarations */
383 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
384 static void check_all_ports_link_status(uint32_t port_mask);
385 static int eth_event_callback(uint8_t port_id,
386 			      enum rte_eth_event_type type,
387 			      void *param, void *ret_param);
388 
389 /*
390  * Check if all the ports are started.
391  * If yes, return positive value. If not, return zero.
392  */
393 static int all_ports_started(void);
394 
395 /*
396  * Helper function to check if socket is already discovered.
397  * If yes, return positive value. If not, return zero.
398  */
399 int
400 new_socket_id(unsigned int socket_id)
401 {
402 	unsigned int i;
403 
404 	for (i = 0; i < num_sockets; i++) {
405 		if (socket_ids[i] == socket_id)
406 			return 0;
407 	}
408 	return 1;
409 }
410 
411 /*
412  * Setup default configuration.
413  */
414 static void
415 set_default_fwd_lcores_config(void)
416 {
417 	unsigned int i;
418 	unsigned int nb_lc;
419 	unsigned int sock_num;
420 
421 	nb_lc = 0;
422 	for (i = 0; i < RTE_MAX_LCORE; i++) {
423 		sock_num = rte_lcore_to_socket_id(i);
424 		if (new_socket_id(sock_num)) {
425 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
426 				rte_exit(EXIT_FAILURE,
427 					 "Total sockets greater than %u\n",
428 					 RTE_MAX_NUMA_NODES);
429 			}
430 			socket_ids[num_sockets++] = sock_num;
431 		}
432 		if (!rte_lcore_is_enabled(i))
433 			continue;
434 		if (i == rte_get_master_lcore())
435 			continue;
436 		fwd_lcores_cpuids[nb_lc++] = i;
437 	}
438 	nb_lcores = (lcoreid_t) nb_lc;
439 	nb_cfg_lcores = nb_lcores;
440 	nb_fwd_lcores = 1;
441 }
442 
443 static void
444 set_def_peer_eth_addrs(void)
445 {
446 	portid_t i;
447 
448 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
449 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
450 		peer_eth_addrs[i].addr_bytes[5] = i;
451 	}
452 }
453 
454 static void
455 set_default_fwd_ports_config(void)
456 {
457 	portid_t pt_id;
458 
459 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
460 		fwd_ports_ids[pt_id] = pt_id;
461 
462 	nb_cfg_ports = nb_ports;
463 	nb_fwd_ports = nb_ports;
464 }
465 
466 void
467 set_def_fwd_config(void)
468 {
469 	set_default_fwd_lcores_config();
470 	set_def_peer_eth_addrs();
471 	set_default_fwd_ports_config();
472 }
473 
474 /*
475  * Configuration initialisation done once at init time.
476  */
477 static void
478 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
479 		 unsigned int socket_id)
480 {
481 	char pool_name[RTE_MEMPOOL_NAMESIZE];
482 	struct rte_mempool *rte_mp = NULL;
483 	uint32_t mb_size;
484 
485 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
486 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
487 
488 	RTE_LOG(INFO, USER1,
489 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
490 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
491 
492 #ifdef RTE_LIBRTE_PMD_XENVIRT
493 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
494 		(unsigned) mb_mempool_cache,
495 		sizeof(struct rte_pktmbuf_pool_private),
496 		rte_pktmbuf_pool_init, NULL,
497 		rte_pktmbuf_init, NULL,
498 		socket_id, 0);
499 #endif
500 
501 	/* if the former XEN allocation failed fall back to normal allocation */
502 	if (rte_mp == NULL) {
503 		if (mp_anon != 0) {
504 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
505 				mb_size, (unsigned) mb_mempool_cache,
506 				sizeof(struct rte_pktmbuf_pool_private),
507 				socket_id, 0);
508 			if (rte_mp == NULL)
509 				goto err;
510 
511 			if (rte_mempool_populate_anon(rte_mp) == 0) {
512 				rte_mempool_free(rte_mp);
513 				rte_mp = NULL;
514 				goto err;
515 			}
516 			rte_pktmbuf_pool_init(rte_mp, NULL);
517 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
518 		} else {
519 			/* wrapper to rte_mempool_create() */
520 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
521 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
522 		}
523 	}
524 
525 err:
526 	if (rte_mp == NULL) {
527 		rte_exit(EXIT_FAILURE,
528 			"Creation of mbuf pool for socket %u failed: %s\n",
529 			socket_id, rte_strerror(rte_errno));
530 	} else if (verbose_level > 0) {
531 		rte_mempool_dump(stdout, rte_mp);
532 	}
533 }
534 
535 /*
536  * Check given socket id is valid or not with NUMA mode,
537  * if valid, return 0, else return -1
538  */
539 static int
540 check_socket_id(const unsigned int socket_id)
541 {
542 	static int warning_once = 0;
543 
544 	if (new_socket_id(socket_id)) {
545 		if (!warning_once && numa_support)
546 			printf("Warning: NUMA should be configured manually by"
547 			       " using --port-numa-config and"
548 			       " --ring-numa-config parameters along with"
549 			       " --numa.\n");
550 		warning_once = 1;
551 		return -1;
552 	}
553 	return 0;
554 }
555 
556 static void
557 init_config(void)
558 {
559 	portid_t pid;
560 	struct rte_port *port;
561 	struct rte_mempool *mbp;
562 	unsigned int nb_mbuf_per_pool;
563 	lcoreid_t  lc_id;
564 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
565 
566 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
567 
568 	if (numa_support) {
569 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
570 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
571 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
572 	}
573 
574 	/* Configuration of logical cores. */
575 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
576 				sizeof(struct fwd_lcore *) * nb_lcores,
577 				RTE_CACHE_LINE_SIZE);
578 	if (fwd_lcores == NULL) {
579 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
580 							"failed\n", nb_lcores);
581 	}
582 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
583 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
584 					       sizeof(struct fwd_lcore),
585 					       RTE_CACHE_LINE_SIZE);
586 		if (fwd_lcores[lc_id] == NULL) {
587 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
588 								"failed\n");
589 		}
590 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
591 	}
592 
593 	RTE_ETH_FOREACH_DEV(pid) {
594 		port = &ports[pid];
595 		rte_eth_dev_info_get(pid, &port->dev_info);
596 
597 		if (numa_support) {
598 			if (port_numa[pid] != NUMA_NO_CONFIG)
599 				port_per_socket[port_numa[pid]]++;
600 			else {
601 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
602 
603 				/* if socket_id is invalid, set to 0 */
604 				if (check_socket_id(socket_id) < 0)
605 					socket_id = 0;
606 				port_per_socket[socket_id]++;
607 			}
608 		}
609 
610 		/* set flag to initialize port/queue */
611 		port->need_reconfig = 1;
612 		port->need_reconfig_queues = 1;
613 	}
614 
615 	/*
616 	 * Create pools of mbuf.
617 	 * If NUMA support is disabled, create a single pool of mbuf in
618 	 * socket 0 memory by default.
619 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
620 	 *
621 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
622 	 * nb_txd can be configured at run time.
623 	 */
624 	if (param_total_num_mbufs)
625 		nb_mbuf_per_pool = param_total_num_mbufs;
626 	else {
627 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
628 			(nb_lcores * mb_mempool_cache) +
629 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
630 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
631 	}
632 
633 	if (numa_support) {
634 		uint8_t i;
635 
636 		for (i = 0; i < num_sockets; i++)
637 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
638 					 socket_ids[i]);
639 	} else {
640 		if (socket_num == UMA_NO_CONFIG)
641 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
642 		else
643 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
644 						 socket_num);
645 	}
646 
647 	init_port_config();
648 
649 	/*
650 	 * Records which Mbuf pool to use by each logical core, if needed.
651 	 */
652 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
653 		mbp = mbuf_pool_find(
654 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
655 
656 		if (mbp == NULL)
657 			mbp = mbuf_pool_find(0);
658 		fwd_lcores[lc_id]->mbp = mbp;
659 	}
660 
661 	/* Configuration of packet forwarding streams. */
662 	if (init_fwd_streams() < 0)
663 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
664 
665 	fwd_config_setup();
666 }
667 
668 
669 void
670 reconfig(portid_t new_port_id, unsigned socket_id)
671 {
672 	struct rte_port *port;
673 
674 	/* Reconfiguration of Ethernet ports. */
675 	port = &ports[new_port_id];
676 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
677 
678 	/* set flag to initialize port/queue */
679 	port->need_reconfig = 1;
680 	port->need_reconfig_queues = 1;
681 	port->socket_id = socket_id;
682 
683 	init_port_config();
684 }
685 
686 
687 int
688 init_fwd_streams(void)
689 {
690 	portid_t pid;
691 	struct rte_port *port;
692 	streamid_t sm_id, nb_fwd_streams_new;
693 	queueid_t q;
694 
695 	/* set socket id according to numa or not */
696 	RTE_ETH_FOREACH_DEV(pid) {
697 		port = &ports[pid];
698 		if (nb_rxq > port->dev_info.max_rx_queues) {
699 			printf("Fail: nb_rxq(%d) is greater than "
700 				"max_rx_queues(%d)\n", nb_rxq,
701 				port->dev_info.max_rx_queues);
702 			return -1;
703 		}
704 		if (nb_txq > port->dev_info.max_tx_queues) {
705 			printf("Fail: nb_txq(%d) is greater than "
706 				"max_tx_queues(%d)\n", nb_txq,
707 				port->dev_info.max_tx_queues);
708 			return -1;
709 		}
710 		if (numa_support) {
711 			if (port_numa[pid] != NUMA_NO_CONFIG)
712 				port->socket_id = port_numa[pid];
713 			else {
714 				port->socket_id = rte_eth_dev_socket_id(pid);
715 
716 				/* if socket_id is invalid, set to 0 */
717 				if (check_socket_id(port->socket_id) < 0)
718 					port->socket_id = 0;
719 			}
720 		}
721 		else {
722 			if (socket_num == UMA_NO_CONFIG)
723 				port->socket_id = 0;
724 			else
725 				port->socket_id = socket_num;
726 		}
727 	}
728 
729 	q = RTE_MAX(nb_rxq, nb_txq);
730 	if (q == 0) {
731 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
732 		return -1;
733 	}
734 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
735 	if (nb_fwd_streams_new == nb_fwd_streams)
736 		return 0;
737 	/* clear the old */
738 	if (fwd_streams != NULL) {
739 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
740 			if (fwd_streams[sm_id] == NULL)
741 				continue;
742 			rte_free(fwd_streams[sm_id]);
743 			fwd_streams[sm_id] = NULL;
744 		}
745 		rte_free(fwd_streams);
746 		fwd_streams = NULL;
747 	}
748 
749 	/* init new */
750 	nb_fwd_streams = nb_fwd_streams_new;
751 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
752 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
753 	if (fwd_streams == NULL)
754 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
755 						"failed\n", nb_fwd_streams);
756 
757 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
758 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
759 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
760 		if (fwd_streams[sm_id] == NULL)
761 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
762 								" failed\n");
763 	}
764 
765 	return 0;
766 }
767 
768 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
769 static void
770 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
771 {
772 	unsigned int total_burst;
773 	unsigned int nb_burst;
774 	unsigned int burst_stats[3];
775 	uint16_t pktnb_stats[3];
776 	uint16_t nb_pkt;
777 	int burst_percent[3];
778 
779 	/*
780 	 * First compute the total number of packet bursts and the
781 	 * two highest numbers of bursts of the same number of packets.
782 	 */
783 	total_burst = 0;
784 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
785 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
786 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
787 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
788 		if (nb_burst == 0)
789 			continue;
790 		total_burst += nb_burst;
791 		if (nb_burst > burst_stats[0]) {
792 			burst_stats[1] = burst_stats[0];
793 			pktnb_stats[1] = pktnb_stats[0];
794 			burst_stats[0] = nb_burst;
795 			pktnb_stats[0] = nb_pkt;
796 		}
797 	}
798 	if (total_burst == 0)
799 		return;
800 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
801 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
802 	       burst_percent[0], (int) pktnb_stats[0]);
803 	if (burst_stats[0] == total_burst) {
804 		printf("]\n");
805 		return;
806 	}
807 	if (burst_stats[0] + burst_stats[1] == total_burst) {
808 		printf(" + %d%% of %d pkts]\n",
809 		       100 - burst_percent[0], pktnb_stats[1]);
810 		return;
811 	}
812 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
813 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
814 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
815 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
816 		return;
817 	}
818 	printf(" + %d%% of %d pkts + %d%% of others]\n",
819 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
820 }
821 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
822 
823 static void
824 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
825 {
826 	struct rte_port *port;
827 	uint8_t i;
828 
829 	static const char *fwd_stats_border = "----------------------";
830 
831 	port = &ports[port_id];
832 	printf("\n  %s Forward statistics for port %-2d %s\n",
833 	       fwd_stats_border, port_id, fwd_stats_border);
834 
835 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
836 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
837 		       "%-"PRIu64"\n",
838 		       stats->ipackets, stats->imissed,
839 		       (uint64_t) (stats->ipackets + stats->imissed));
840 
841 		if (cur_fwd_eng == &csum_fwd_engine)
842 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
843 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
844 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
845 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
846 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
847 		}
848 
849 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
850 		       "%-"PRIu64"\n",
851 		       stats->opackets, port->tx_dropped,
852 		       (uint64_t) (stats->opackets + port->tx_dropped));
853 	}
854 	else {
855 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
856 		       "%14"PRIu64"\n",
857 		       stats->ipackets, stats->imissed,
858 		       (uint64_t) (stats->ipackets + stats->imissed));
859 
860 		if (cur_fwd_eng == &csum_fwd_engine)
861 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
862 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
863 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
864 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
865 			printf("  RX-nombufs:             %14"PRIu64"\n",
866 			       stats->rx_nombuf);
867 		}
868 
869 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
870 		       "%14"PRIu64"\n",
871 		       stats->opackets, port->tx_dropped,
872 		       (uint64_t) (stats->opackets + port->tx_dropped));
873 	}
874 
875 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
876 	if (port->rx_stream)
877 		pkt_burst_stats_display("RX",
878 			&port->rx_stream->rx_burst_stats);
879 	if (port->tx_stream)
880 		pkt_burst_stats_display("TX",
881 			&port->tx_stream->tx_burst_stats);
882 #endif
883 
884 	if (port->rx_queue_stats_mapping_enabled) {
885 		printf("\n");
886 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
887 			printf("  Stats reg %2d RX-packets:%14"PRIu64
888 			       "     RX-errors:%14"PRIu64
889 			       "    RX-bytes:%14"PRIu64"\n",
890 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
891 		}
892 		printf("\n");
893 	}
894 	if (port->tx_queue_stats_mapping_enabled) {
895 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
896 			printf("  Stats reg %2d TX-packets:%14"PRIu64
897 			       "                                 TX-bytes:%14"PRIu64"\n",
898 			       i, stats->q_opackets[i], stats->q_obytes[i]);
899 		}
900 	}
901 
902 	printf("  %s--------------------------------%s\n",
903 	       fwd_stats_border, fwd_stats_border);
904 }
905 
906 static void
907 fwd_stream_stats_display(streamid_t stream_id)
908 {
909 	struct fwd_stream *fs;
910 	static const char *fwd_top_stats_border = "-------";
911 
912 	fs = fwd_streams[stream_id];
913 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
914 	    (fs->fwd_dropped == 0))
915 		return;
916 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
917 	       "TX Port=%2d/Queue=%2d %s\n",
918 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
919 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
920 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
921 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
922 
923 	/* if checksum mode */
924 	if (cur_fwd_eng == &csum_fwd_engine) {
925 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
926 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
927 	}
928 
929 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
930 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
931 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
932 #endif
933 }
934 
935 static void
936 flush_fwd_rx_queues(void)
937 {
938 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
939 	portid_t  rxp;
940 	portid_t port_id;
941 	queueid_t rxq;
942 	uint16_t  nb_rx;
943 	uint16_t  i;
944 	uint8_t   j;
945 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
946 	uint64_t timer_period;
947 
948 	/* convert to number of cycles */
949 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
950 
951 	for (j = 0; j < 2; j++) {
952 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
953 			for (rxq = 0; rxq < nb_rxq; rxq++) {
954 				port_id = fwd_ports_ids[rxp];
955 				/**
956 				* testpmd can stuck in the below do while loop
957 				* if rte_eth_rx_burst() always returns nonzero
958 				* packets. So timer is added to exit this loop
959 				* after 1sec timer expiry.
960 				*/
961 				prev_tsc = rte_rdtsc();
962 				do {
963 					nb_rx = rte_eth_rx_burst(port_id, rxq,
964 						pkts_burst, MAX_PKT_BURST);
965 					for (i = 0; i < nb_rx; i++)
966 						rte_pktmbuf_free(pkts_burst[i]);
967 
968 					cur_tsc = rte_rdtsc();
969 					diff_tsc = cur_tsc - prev_tsc;
970 					timer_tsc += diff_tsc;
971 				} while ((nb_rx > 0) &&
972 					(timer_tsc < timer_period));
973 				timer_tsc = 0;
974 			}
975 		}
976 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
977 	}
978 }
979 
980 static void
981 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
982 {
983 	struct fwd_stream **fsm;
984 	streamid_t nb_fs;
985 	streamid_t sm_id;
986 #ifdef RTE_LIBRTE_BITRATE
987 	uint64_t tics_per_1sec;
988 	uint64_t tics_datum;
989 	uint64_t tics_current;
990 	uint8_t idx_port, cnt_ports;
991 
992 	cnt_ports = rte_eth_dev_count();
993 	tics_datum = rte_rdtsc();
994 	tics_per_1sec = rte_get_timer_hz();
995 #endif
996 	fsm = &fwd_streams[fc->stream_idx];
997 	nb_fs = fc->stream_nb;
998 	do {
999 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1000 			(*pkt_fwd)(fsm[sm_id]);
1001 #ifdef RTE_LIBRTE_BITRATE
1002 		if (bitrate_enabled != 0 &&
1003 				bitrate_lcore_id == rte_lcore_id()) {
1004 			tics_current = rte_rdtsc();
1005 			if (tics_current - tics_datum >= tics_per_1sec) {
1006 				/* Periodic bitrate calculation */
1007 				for (idx_port = 0;
1008 						idx_port < cnt_ports;
1009 						idx_port++)
1010 					rte_stats_bitrate_calc(bitrate_data,
1011 						idx_port);
1012 				tics_datum = tics_current;
1013 			}
1014 		}
1015 #endif
1016 #ifdef RTE_LIBRTE_LATENCY_STATS
1017 		if (latencystats_enabled != 0 &&
1018 				latencystats_lcore_id == rte_lcore_id())
1019 			rte_latencystats_update();
1020 #endif
1021 
1022 	} while (! fc->stopped);
1023 }
1024 
1025 static int
1026 start_pkt_forward_on_core(void *fwd_arg)
1027 {
1028 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1029 			     cur_fwd_config.fwd_eng->packet_fwd);
1030 	return 0;
1031 }
1032 
1033 /*
1034  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1035  * Used to start communication flows in network loopback test configurations.
1036  */
1037 static int
1038 run_one_txonly_burst_on_core(void *fwd_arg)
1039 {
1040 	struct fwd_lcore *fwd_lc;
1041 	struct fwd_lcore tmp_lcore;
1042 
1043 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1044 	tmp_lcore = *fwd_lc;
1045 	tmp_lcore.stopped = 1;
1046 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1047 	return 0;
1048 }
1049 
1050 /*
1051  * Launch packet forwarding:
1052  *     - Setup per-port forwarding context.
1053  *     - launch logical cores with their forwarding configuration.
1054  */
1055 static void
1056 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1057 {
1058 	port_fwd_begin_t port_fwd_begin;
1059 	unsigned int i;
1060 	unsigned int lc_id;
1061 	int diag;
1062 
1063 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1064 	if (port_fwd_begin != NULL) {
1065 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1066 			(*port_fwd_begin)(fwd_ports_ids[i]);
1067 	}
1068 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1069 		lc_id = fwd_lcores_cpuids[i];
1070 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1071 			fwd_lcores[i]->stopped = 0;
1072 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1073 						     fwd_lcores[i], lc_id);
1074 			if (diag != 0)
1075 				printf("launch lcore %u failed - diag=%d\n",
1076 				       lc_id, diag);
1077 		}
1078 	}
1079 }
1080 
1081 /*
1082  * Launch packet forwarding configuration.
1083  */
1084 void
1085 start_packet_forwarding(int with_tx_first)
1086 {
1087 	port_fwd_begin_t port_fwd_begin;
1088 	port_fwd_end_t  port_fwd_end;
1089 	struct rte_port *port;
1090 	unsigned int i;
1091 	portid_t   pt_id;
1092 	streamid_t sm_id;
1093 
1094 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1095 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1096 
1097 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1098 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1099 
1100 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1101 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1102 		(!nb_rxq || !nb_txq))
1103 		rte_exit(EXIT_FAILURE,
1104 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1105 			cur_fwd_eng->fwd_mode_name);
1106 
1107 	if (all_ports_started() == 0) {
1108 		printf("Not all ports were started\n");
1109 		return;
1110 	}
1111 	if (test_done == 0) {
1112 		printf("Packet forwarding already started\n");
1113 		return;
1114 	}
1115 
1116 	if (init_fwd_streams() < 0) {
1117 		printf("Fail from init_fwd_streams()\n");
1118 		return;
1119 	}
1120 
1121 	if(dcb_test) {
1122 		for (i = 0; i < nb_fwd_ports; i++) {
1123 			pt_id = fwd_ports_ids[i];
1124 			port = &ports[pt_id];
1125 			if (!port->dcb_flag) {
1126 				printf("In DCB mode, all forwarding ports must "
1127                                        "be configured in this mode.\n");
1128 				return;
1129 			}
1130 		}
1131 		if (nb_fwd_lcores == 1) {
1132 			printf("In DCB mode,the nb forwarding cores "
1133                                "should be larger than 1.\n");
1134 			return;
1135 		}
1136 	}
1137 	test_done = 0;
1138 
1139 	if(!no_flush_rx)
1140 		flush_fwd_rx_queues();
1141 
1142 	fwd_config_setup();
1143 	pkt_fwd_config_display(&cur_fwd_config);
1144 	rxtx_config_display();
1145 
1146 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1147 		pt_id = fwd_ports_ids[i];
1148 		port = &ports[pt_id];
1149 		rte_eth_stats_get(pt_id, &port->stats);
1150 		port->tx_dropped = 0;
1151 
1152 		map_port_queue_stats_mapping_registers(pt_id, port);
1153 	}
1154 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1155 		fwd_streams[sm_id]->rx_packets = 0;
1156 		fwd_streams[sm_id]->tx_packets = 0;
1157 		fwd_streams[sm_id]->fwd_dropped = 0;
1158 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1159 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1160 
1161 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1162 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1163 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1164 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1165 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1166 #endif
1167 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1168 		fwd_streams[sm_id]->core_cycles = 0;
1169 #endif
1170 	}
1171 	if (with_tx_first) {
1172 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1173 		if (port_fwd_begin != NULL) {
1174 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1175 				(*port_fwd_begin)(fwd_ports_ids[i]);
1176 		}
1177 		while (with_tx_first--) {
1178 			launch_packet_forwarding(
1179 					run_one_txonly_burst_on_core);
1180 			rte_eal_mp_wait_lcore();
1181 		}
1182 		port_fwd_end = tx_only_engine.port_fwd_end;
1183 		if (port_fwd_end != NULL) {
1184 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1185 				(*port_fwd_end)(fwd_ports_ids[i]);
1186 		}
1187 	}
1188 	launch_packet_forwarding(start_pkt_forward_on_core);
1189 }
1190 
1191 void
1192 stop_packet_forwarding(void)
1193 {
1194 	struct rte_eth_stats stats;
1195 	struct rte_port *port;
1196 	port_fwd_end_t  port_fwd_end;
1197 	int i;
1198 	portid_t   pt_id;
1199 	streamid_t sm_id;
1200 	lcoreid_t  lc_id;
1201 	uint64_t total_recv;
1202 	uint64_t total_xmit;
1203 	uint64_t total_rx_dropped;
1204 	uint64_t total_tx_dropped;
1205 	uint64_t total_rx_nombuf;
1206 	uint64_t tx_dropped;
1207 	uint64_t rx_bad_ip_csum;
1208 	uint64_t rx_bad_l4_csum;
1209 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1210 	uint64_t fwd_cycles;
1211 #endif
1212 	static const char *acc_stats_border = "+++++++++++++++";
1213 
1214 	if (test_done) {
1215 		printf("Packet forwarding not started\n");
1216 		return;
1217 	}
1218 	printf("Telling cores to stop...");
1219 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1220 		fwd_lcores[lc_id]->stopped = 1;
1221 	printf("\nWaiting for lcores to finish...\n");
1222 	rte_eal_mp_wait_lcore();
1223 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1224 	if (port_fwd_end != NULL) {
1225 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1226 			pt_id = fwd_ports_ids[i];
1227 			(*port_fwd_end)(pt_id);
1228 		}
1229 	}
1230 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1231 	fwd_cycles = 0;
1232 #endif
1233 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1234 		if (cur_fwd_config.nb_fwd_streams >
1235 		    cur_fwd_config.nb_fwd_ports) {
1236 			fwd_stream_stats_display(sm_id);
1237 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1238 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1239 		} else {
1240 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1241 				fwd_streams[sm_id];
1242 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1243 				fwd_streams[sm_id];
1244 		}
1245 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1246 		tx_dropped = (uint64_t) (tx_dropped +
1247 					 fwd_streams[sm_id]->fwd_dropped);
1248 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1249 
1250 		rx_bad_ip_csum =
1251 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1252 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1253 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1254 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1255 							rx_bad_ip_csum;
1256 
1257 		rx_bad_l4_csum =
1258 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1259 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1260 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1261 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1262 							rx_bad_l4_csum;
1263 
1264 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1265 		fwd_cycles = (uint64_t) (fwd_cycles +
1266 					 fwd_streams[sm_id]->core_cycles);
1267 #endif
1268 	}
1269 	total_recv = 0;
1270 	total_xmit = 0;
1271 	total_rx_dropped = 0;
1272 	total_tx_dropped = 0;
1273 	total_rx_nombuf  = 0;
1274 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1275 		pt_id = fwd_ports_ids[i];
1276 
1277 		port = &ports[pt_id];
1278 		rte_eth_stats_get(pt_id, &stats);
1279 		stats.ipackets -= port->stats.ipackets;
1280 		port->stats.ipackets = 0;
1281 		stats.opackets -= port->stats.opackets;
1282 		port->stats.opackets = 0;
1283 		stats.ibytes   -= port->stats.ibytes;
1284 		port->stats.ibytes = 0;
1285 		stats.obytes   -= port->stats.obytes;
1286 		port->stats.obytes = 0;
1287 		stats.imissed  -= port->stats.imissed;
1288 		port->stats.imissed = 0;
1289 		stats.oerrors  -= port->stats.oerrors;
1290 		port->stats.oerrors = 0;
1291 		stats.rx_nombuf -= port->stats.rx_nombuf;
1292 		port->stats.rx_nombuf = 0;
1293 
1294 		total_recv += stats.ipackets;
1295 		total_xmit += stats.opackets;
1296 		total_rx_dropped += stats.imissed;
1297 		total_tx_dropped += port->tx_dropped;
1298 		total_rx_nombuf  += stats.rx_nombuf;
1299 
1300 		fwd_port_stats_display(pt_id, &stats);
1301 	}
1302 	printf("\n  %s Accumulated forward statistics for all ports"
1303 	       "%s\n",
1304 	       acc_stats_border, acc_stats_border);
1305 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1306 	       "%-"PRIu64"\n"
1307 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1308 	       "%-"PRIu64"\n",
1309 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1310 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1311 	if (total_rx_nombuf > 0)
1312 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1313 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1314 	       "%s\n",
1315 	       acc_stats_border, acc_stats_border);
1316 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1317 	if (total_recv > 0)
1318 		printf("\n  CPU cycles/packet=%u (total cycles="
1319 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1320 		       (unsigned int)(fwd_cycles / total_recv),
1321 		       fwd_cycles, total_recv);
1322 #endif
1323 	printf("\nDone.\n");
1324 	test_done = 1;
1325 }
1326 
1327 void
1328 dev_set_link_up(portid_t pid)
1329 {
1330 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1331 		printf("\nSet link up fail.\n");
1332 }
1333 
1334 void
1335 dev_set_link_down(portid_t pid)
1336 {
1337 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1338 		printf("\nSet link down fail.\n");
1339 }
1340 
1341 static int
1342 all_ports_started(void)
1343 {
1344 	portid_t pi;
1345 	struct rte_port *port;
1346 
1347 	RTE_ETH_FOREACH_DEV(pi) {
1348 		port = &ports[pi];
1349 		/* Check if there is a port which is not started */
1350 		if ((port->port_status != RTE_PORT_STARTED) &&
1351 			(port->slave_flag == 0))
1352 			return 0;
1353 	}
1354 
1355 	/* No port is not started */
1356 	return 1;
1357 }
1358 
1359 int
1360 all_ports_stopped(void)
1361 {
1362 	portid_t pi;
1363 	struct rte_port *port;
1364 
1365 	RTE_ETH_FOREACH_DEV(pi) {
1366 		port = &ports[pi];
1367 		if ((port->port_status != RTE_PORT_STOPPED) &&
1368 			(port->slave_flag == 0))
1369 			return 0;
1370 	}
1371 
1372 	return 1;
1373 }
1374 
1375 int
1376 port_is_started(portid_t port_id)
1377 {
1378 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1379 		return 0;
1380 
1381 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1382 		return 0;
1383 
1384 	return 1;
1385 }
1386 
1387 static int
1388 port_is_closed(portid_t port_id)
1389 {
1390 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1391 		return 0;
1392 
1393 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1394 		return 0;
1395 
1396 	return 1;
1397 }
1398 
1399 int
1400 start_port(portid_t pid)
1401 {
1402 	int diag, need_check_link_status = -1;
1403 	portid_t pi;
1404 	queueid_t qi;
1405 	struct rte_port *port;
1406 	struct ether_addr mac_addr;
1407 	enum rte_eth_event_type event_type;
1408 
1409 	if (port_id_is_invalid(pid, ENABLED_WARN))
1410 		return 0;
1411 
1412 	if(dcb_config)
1413 		dcb_test = 1;
1414 	RTE_ETH_FOREACH_DEV(pi) {
1415 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1416 			continue;
1417 
1418 		need_check_link_status = 0;
1419 		port = &ports[pi];
1420 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1421 						 RTE_PORT_HANDLING) == 0) {
1422 			printf("Port %d is now not stopped\n", pi);
1423 			continue;
1424 		}
1425 
1426 		if (port->need_reconfig > 0) {
1427 			port->need_reconfig = 0;
1428 
1429 			printf("Configuring Port %d (socket %u)\n", pi,
1430 					port->socket_id);
1431 			/* configure port */
1432 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1433 						&(port->dev_conf));
1434 			if (diag != 0) {
1435 				if (rte_atomic16_cmpset(&(port->port_status),
1436 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1437 					printf("Port %d can not be set back "
1438 							"to stopped\n", pi);
1439 				printf("Fail to configure port %d\n", pi);
1440 				/* try to reconfigure port next time */
1441 				port->need_reconfig = 1;
1442 				return -1;
1443 			}
1444 		}
1445 		if (port->need_reconfig_queues > 0) {
1446 			port->need_reconfig_queues = 0;
1447 			/* setup tx queues */
1448 			for (qi = 0; qi < nb_txq; qi++) {
1449 				if ((numa_support) &&
1450 					(txring_numa[pi] != NUMA_NO_CONFIG))
1451 					diag = rte_eth_tx_queue_setup(pi, qi,
1452 						nb_txd,txring_numa[pi],
1453 						&(port->tx_conf));
1454 				else
1455 					diag = rte_eth_tx_queue_setup(pi, qi,
1456 						nb_txd,port->socket_id,
1457 						&(port->tx_conf));
1458 
1459 				if (diag == 0)
1460 					continue;
1461 
1462 				/* Fail to setup tx queue, return */
1463 				if (rte_atomic16_cmpset(&(port->port_status),
1464 							RTE_PORT_HANDLING,
1465 							RTE_PORT_STOPPED) == 0)
1466 					printf("Port %d can not be set back "
1467 							"to stopped\n", pi);
1468 				printf("Fail to configure port %d tx queues\n", pi);
1469 				/* try to reconfigure queues next time */
1470 				port->need_reconfig_queues = 1;
1471 				return -1;
1472 			}
1473 			/* setup rx queues */
1474 			for (qi = 0; qi < nb_rxq; qi++) {
1475 				if ((numa_support) &&
1476 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1477 					struct rte_mempool * mp =
1478 						mbuf_pool_find(rxring_numa[pi]);
1479 					if (mp == NULL) {
1480 						printf("Failed to setup RX queue:"
1481 							"No mempool allocation"
1482 							" on the socket %d\n",
1483 							rxring_numa[pi]);
1484 						return -1;
1485 					}
1486 
1487 					diag = rte_eth_rx_queue_setup(pi, qi,
1488 					     nb_rxd,rxring_numa[pi],
1489 					     &(port->rx_conf),mp);
1490 				} else {
1491 					struct rte_mempool *mp =
1492 						mbuf_pool_find(port->socket_id);
1493 					if (mp == NULL) {
1494 						printf("Failed to setup RX queue:"
1495 							"No mempool allocation"
1496 							" on the socket %d\n",
1497 							port->socket_id);
1498 						return -1;
1499 					}
1500 					diag = rte_eth_rx_queue_setup(pi, qi,
1501 					     nb_rxd,port->socket_id,
1502 					     &(port->rx_conf), mp);
1503 				}
1504 				if (diag == 0)
1505 					continue;
1506 
1507 				/* Fail to setup rx queue, return */
1508 				if (rte_atomic16_cmpset(&(port->port_status),
1509 							RTE_PORT_HANDLING,
1510 							RTE_PORT_STOPPED) == 0)
1511 					printf("Port %d can not be set back "
1512 							"to stopped\n", pi);
1513 				printf("Fail to configure port %d rx queues\n", pi);
1514 				/* try to reconfigure queues next time */
1515 				port->need_reconfig_queues = 1;
1516 				return -1;
1517 			}
1518 		}
1519 
1520 		for (event_type = RTE_ETH_EVENT_UNKNOWN;
1521 		     event_type < RTE_ETH_EVENT_MAX;
1522 		     event_type++) {
1523 			diag = rte_eth_dev_callback_register(pi,
1524 							event_type,
1525 							eth_event_callback,
1526 							NULL);
1527 			if (diag) {
1528 				printf("Failed to setup even callback for event %d\n",
1529 					event_type);
1530 				return -1;
1531 			}
1532 		}
1533 
1534 		/* start port */
1535 		if (rte_eth_dev_start(pi) < 0) {
1536 			printf("Fail to start port %d\n", pi);
1537 
1538 			/* Fail to setup rx queue, return */
1539 			if (rte_atomic16_cmpset(&(port->port_status),
1540 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1541 				printf("Port %d can not be set back to "
1542 							"stopped\n", pi);
1543 			continue;
1544 		}
1545 
1546 		if (rte_atomic16_cmpset(&(port->port_status),
1547 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1548 			printf("Port %d can not be set into started\n", pi);
1549 
1550 		rte_eth_macaddr_get(pi, &mac_addr);
1551 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1552 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1553 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1554 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1555 
1556 		/* at least one port started, need checking link status */
1557 		need_check_link_status = 1;
1558 	}
1559 
1560 	if (need_check_link_status == 1 && !no_link_check)
1561 		check_all_ports_link_status(RTE_PORT_ALL);
1562 	else if (need_check_link_status == 0)
1563 		printf("Please stop the ports first\n");
1564 
1565 	printf("Done\n");
1566 	return 0;
1567 }
1568 
1569 void
1570 stop_port(portid_t pid)
1571 {
1572 	portid_t pi;
1573 	struct rte_port *port;
1574 	int need_check_link_status = 0;
1575 
1576 	if (dcb_test) {
1577 		dcb_test = 0;
1578 		dcb_config = 0;
1579 	}
1580 
1581 	if (port_id_is_invalid(pid, ENABLED_WARN))
1582 		return;
1583 
1584 	printf("Stopping ports...\n");
1585 
1586 	RTE_ETH_FOREACH_DEV(pi) {
1587 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1588 			continue;
1589 
1590 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1591 			printf("Please remove port %d from forwarding configuration.\n", pi);
1592 			continue;
1593 		}
1594 
1595 		if (port_is_bonding_slave(pi)) {
1596 			printf("Please remove port %d from bonded device.\n", pi);
1597 			continue;
1598 		}
1599 
1600 		port = &ports[pi];
1601 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1602 						RTE_PORT_HANDLING) == 0)
1603 			continue;
1604 
1605 		rte_eth_dev_stop(pi);
1606 
1607 		if (rte_atomic16_cmpset(&(port->port_status),
1608 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1609 			printf("Port %d can not be set into stopped\n", pi);
1610 		need_check_link_status = 1;
1611 	}
1612 	if (need_check_link_status && !no_link_check)
1613 		check_all_ports_link_status(RTE_PORT_ALL);
1614 
1615 	printf("Done\n");
1616 }
1617 
1618 void
1619 close_port(portid_t pid)
1620 {
1621 	portid_t pi;
1622 	struct rte_port *port;
1623 
1624 	if (port_id_is_invalid(pid, ENABLED_WARN))
1625 		return;
1626 
1627 	printf("Closing ports...\n");
1628 
1629 	RTE_ETH_FOREACH_DEV(pi) {
1630 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1631 			continue;
1632 
1633 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1634 			printf("Please remove port %d from forwarding configuration.\n", pi);
1635 			continue;
1636 		}
1637 
1638 		if (port_is_bonding_slave(pi)) {
1639 			printf("Please remove port %d from bonded device.\n", pi);
1640 			continue;
1641 		}
1642 
1643 		port = &ports[pi];
1644 		if (rte_atomic16_cmpset(&(port->port_status),
1645 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1646 			printf("Port %d is already closed\n", pi);
1647 			continue;
1648 		}
1649 
1650 		if (rte_atomic16_cmpset(&(port->port_status),
1651 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1652 			printf("Port %d is now not stopped\n", pi);
1653 			continue;
1654 		}
1655 
1656 		if (port->flow_list)
1657 			port_flow_flush(pi);
1658 		rte_eth_dev_close(pi);
1659 
1660 		if (rte_atomic16_cmpset(&(port->port_status),
1661 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1662 			printf("Port %d cannot be set to closed\n", pi);
1663 	}
1664 
1665 	printf("Done\n");
1666 }
1667 
1668 void
1669 attach_port(char *identifier)
1670 {
1671 	portid_t pi = 0;
1672 	unsigned int socket_id;
1673 
1674 	printf("Attaching a new port...\n");
1675 
1676 	if (identifier == NULL) {
1677 		printf("Invalid parameters are specified\n");
1678 		return;
1679 	}
1680 
1681 	if (rte_eth_dev_attach(identifier, &pi))
1682 		return;
1683 
1684 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1685 	/* if socket_id is invalid, set to 0 */
1686 	if (check_socket_id(socket_id) < 0)
1687 		socket_id = 0;
1688 	reconfig(pi, socket_id);
1689 	rte_eth_promiscuous_enable(pi);
1690 
1691 	nb_ports = rte_eth_dev_count();
1692 
1693 	ports[pi].port_status = RTE_PORT_STOPPED;
1694 
1695 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1696 	printf("Done\n");
1697 }
1698 
1699 void
1700 detach_port(uint8_t port_id)
1701 {
1702 	char name[RTE_ETH_NAME_MAX_LEN];
1703 
1704 	printf("Detaching a port...\n");
1705 
1706 	if (!port_is_closed(port_id)) {
1707 		printf("Please close port first\n");
1708 		return;
1709 	}
1710 
1711 	if (ports[port_id].flow_list)
1712 		port_flow_flush(port_id);
1713 
1714 	if (rte_eth_dev_detach(port_id, name))
1715 		return;
1716 
1717 	nb_ports = rte_eth_dev_count();
1718 
1719 	printf("Port '%s' is detached. Now total ports is %d\n",
1720 			name, nb_ports);
1721 	printf("Done\n");
1722 	return;
1723 }
1724 
1725 void
1726 pmd_test_exit(void)
1727 {
1728 	portid_t pt_id;
1729 
1730 	if (test_done == 0)
1731 		stop_packet_forwarding();
1732 
1733 	if (ports != NULL) {
1734 		no_link_check = 1;
1735 		RTE_ETH_FOREACH_DEV(pt_id) {
1736 			printf("\nShutting down port %d...\n", pt_id);
1737 			fflush(stdout);
1738 			stop_port(pt_id);
1739 			close_port(pt_id);
1740 		}
1741 	}
1742 	printf("\nBye...\n");
1743 }
1744 
1745 typedef void (*cmd_func_t)(void);
1746 struct pmd_test_command {
1747 	const char *cmd_name;
1748 	cmd_func_t cmd_func;
1749 };
1750 
1751 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1752 
1753 /* Check the link status of all ports in up to 9s, and print them finally */
1754 static void
1755 check_all_ports_link_status(uint32_t port_mask)
1756 {
1757 #define CHECK_INTERVAL 100 /* 100ms */
1758 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1759 	uint8_t portid, count, all_ports_up, print_flag = 0;
1760 	struct rte_eth_link link;
1761 
1762 	printf("Checking link statuses...\n");
1763 	fflush(stdout);
1764 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1765 		all_ports_up = 1;
1766 		RTE_ETH_FOREACH_DEV(portid) {
1767 			if ((port_mask & (1 << portid)) == 0)
1768 				continue;
1769 			memset(&link, 0, sizeof(link));
1770 			rte_eth_link_get_nowait(portid, &link);
1771 			/* print link status if flag set */
1772 			if (print_flag == 1) {
1773 				if (link.link_status)
1774 					printf("Port %d Link Up - speed %u "
1775 						"Mbps - %s\n", (uint8_t)portid,
1776 						(unsigned)link.link_speed,
1777 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1778 					("full-duplex") : ("half-duplex\n"));
1779 				else
1780 					printf("Port %d Link Down\n",
1781 						(uint8_t)portid);
1782 				continue;
1783 			}
1784 			/* clear all_ports_up flag if any link down */
1785 			if (link.link_status == ETH_LINK_DOWN) {
1786 				all_ports_up = 0;
1787 				break;
1788 			}
1789 		}
1790 		/* after finally printing all link status, get out */
1791 		if (print_flag == 1)
1792 			break;
1793 
1794 		if (all_ports_up == 0) {
1795 			fflush(stdout);
1796 			rte_delay_ms(CHECK_INTERVAL);
1797 		}
1798 
1799 		/* set the print_flag if all ports up or timeout */
1800 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1801 			print_flag = 1;
1802 		}
1803 
1804 		if (lsc_interrupt)
1805 			break;
1806 	}
1807 }
1808 
1809 static void
1810 rmv_event_callback(void *arg)
1811 {
1812 	struct rte_eth_dev *dev;
1813 	struct rte_devargs *da;
1814 	char name[32] = "";
1815 	uint8_t port_id = (intptr_t)arg;
1816 
1817 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1818 	dev = &rte_eth_devices[port_id];
1819 	da = dev->device->devargs;
1820 
1821 	stop_port(port_id);
1822 	close_port(port_id);
1823 	if (da->type == RTE_DEVTYPE_VIRTUAL)
1824 		snprintf(name, sizeof(name), "%s", da->virt.drv_name);
1825 	else if (da->type == RTE_DEVTYPE_WHITELISTED_PCI)
1826 		rte_pci_device_name(&da->pci.addr, name, sizeof(name));
1827 	printf("removing device %s\n", name);
1828 	rte_eal_dev_detach(dev->device);
1829 	dev->state = RTE_ETH_DEV_UNUSED;
1830 }
1831 
1832 /* This function is used by the interrupt thread */
1833 static int
1834 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param,
1835 		  void *ret_param)
1836 {
1837 	static const char * const event_desc[] = {
1838 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1839 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
1840 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1841 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1842 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1843 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
1844 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
1845 		[RTE_ETH_EVENT_MAX] = NULL,
1846 	};
1847 
1848 	RTE_SET_USED(param);
1849 	RTE_SET_USED(ret_param);
1850 
1851 	if (type >= RTE_ETH_EVENT_MAX) {
1852 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1853 			port_id, __func__, type);
1854 		fflush(stderr);
1855 	} else if (event_print_mask & (UINT32_C(1) << type)) {
1856 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
1857 			event_desc[type]);
1858 		fflush(stdout);
1859 	}
1860 
1861 	switch (type) {
1862 	case RTE_ETH_EVENT_INTR_RMV:
1863 		if (rte_eal_alarm_set(100000,
1864 				rmv_event_callback, (void *)(intptr_t)port_id))
1865 			fprintf(stderr, "Could not set up deferred device removal\n");
1866 		break;
1867 	default:
1868 		break;
1869 	}
1870 	return 0;
1871 }
1872 
1873 static int
1874 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1875 {
1876 	uint16_t i;
1877 	int diag;
1878 	uint8_t mapping_found = 0;
1879 
1880 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1881 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1882 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1883 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1884 					tx_queue_stats_mappings[i].queue_id,
1885 					tx_queue_stats_mappings[i].stats_counter_id);
1886 			if (diag != 0)
1887 				return diag;
1888 			mapping_found = 1;
1889 		}
1890 	}
1891 	if (mapping_found)
1892 		port->tx_queue_stats_mapping_enabled = 1;
1893 	return 0;
1894 }
1895 
1896 static int
1897 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1898 {
1899 	uint16_t i;
1900 	int diag;
1901 	uint8_t mapping_found = 0;
1902 
1903 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1904 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1905 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1906 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1907 					rx_queue_stats_mappings[i].queue_id,
1908 					rx_queue_stats_mappings[i].stats_counter_id);
1909 			if (diag != 0)
1910 				return diag;
1911 			mapping_found = 1;
1912 		}
1913 	}
1914 	if (mapping_found)
1915 		port->rx_queue_stats_mapping_enabled = 1;
1916 	return 0;
1917 }
1918 
1919 static void
1920 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1921 {
1922 	int diag = 0;
1923 
1924 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1925 	if (diag != 0) {
1926 		if (diag == -ENOTSUP) {
1927 			port->tx_queue_stats_mapping_enabled = 0;
1928 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1929 		}
1930 		else
1931 			rte_exit(EXIT_FAILURE,
1932 					"set_tx_queue_stats_mapping_registers "
1933 					"failed for port id=%d diag=%d\n",
1934 					pi, diag);
1935 	}
1936 
1937 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1938 	if (diag != 0) {
1939 		if (diag == -ENOTSUP) {
1940 			port->rx_queue_stats_mapping_enabled = 0;
1941 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1942 		}
1943 		else
1944 			rte_exit(EXIT_FAILURE,
1945 					"set_rx_queue_stats_mapping_registers "
1946 					"failed for port id=%d diag=%d\n",
1947 					pi, diag);
1948 	}
1949 }
1950 
1951 static void
1952 rxtx_port_config(struct rte_port *port)
1953 {
1954 	port->rx_conf = port->dev_info.default_rxconf;
1955 	port->tx_conf = port->dev_info.default_txconf;
1956 
1957 	/* Check if any RX/TX parameters have been passed */
1958 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1959 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1960 
1961 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1962 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1963 
1964 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1965 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1966 
1967 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1968 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1969 
1970 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1971 		port->rx_conf.rx_drop_en = rx_drop_en;
1972 
1973 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1974 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1975 
1976 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1977 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1978 
1979 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1980 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1981 
1982 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1983 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1984 
1985 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1986 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1987 
1988 	if (txq_flags != RTE_PMD_PARAM_UNSET)
1989 		port->tx_conf.txq_flags = txq_flags;
1990 }
1991 
1992 void
1993 init_port_config(void)
1994 {
1995 	portid_t pid;
1996 	struct rte_port *port;
1997 
1998 	RTE_ETH_FOREACH_DEV(pid) {
1999 		port = &ports[pid];
2000 		port->dev_conf.rxmode = rx_mode;
2001 		port->dev_conf.fdir_conf = fdir_conf;
2002 		if (nb_rxq > 1) {
2003 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2004 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2005 		} else {
2006 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2007 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2008 		}
2009 
2010 		if (port->dcb_flag == 0) {
2011 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2012 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2013 			else
2014 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2015 		}
2016 
2017 		rxtx_port_config(port);
2018 
2019 		rte_eth_macaddr_get(pid, &port->eth_addr);
2020 
2021 		map_port_queue_stats_mapping_registers(pid, port);
2022 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2023 		rte_pmd_ixgbe_bypass_init(pid);
2024 #endif
2025 
2026 		if (lsc_interrupt &&
2027 		    (rte_eth_devices[pid].data->dev_flags &
2028 		     RTE_ETH_DEV_INTR_LSC))
2029 			port->dev_conf.intr_conf.lsc = 1;
2030 		if (rmv_interrupt &&
2031 		    (rte_eth_devices[pid].data->dev_flags &
2032 		     RTE_ETH_DEV_INTR_RMV))
2033 			port->dev_conf.intr_conf.rmv = 1;
2034 	}
2035 }
2036 
2037 void set_port_slave_flag(portid_t slave_pid)
2038 {
2039 	struct rte_port *port;
2040 
2041 	port = &ports[slave_pid];
2042 	port->slave_flag = 1;
2043 }
2044 
2045 void clear_port_slave_flag(portid_t slave_pid)
2046 {
2047 	struct rte_port *port;
2048 
2049 	port = &ports[slave_pid];
2050 	port->slave_flag = 0;
2051 }
2052 
2053 uint8_t port_is_bonding_slave(portid_t slave_pid)
2054 {
2055 	struct rte_port *port;
2056 
2057 	port = &ports[slave_pid];
2058 	return port->slave_flag;
2059 }
2060 
2061 const uint16_t vlan_tags[] = {
2062 		0,  1,  2,  3,  4,  5,  6,  7,
2063 		8,  9, 10, 11,  12, 13, 14, 15,
2064 		16, 17, 18, 19, 20, 21, 22, 23,
2065 		24, 25, 26, 27, 28, 29, 30, 31
2066 };
2067 
2068 static  int
2069 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2070 		 enum dcb_mode_enable dcb_mode,
2071 		 enum rte_eth_nb_tcs num_tcs,
2072 		 uint8_t pfc_en)
2073 {
2074 	uint8_t i;
2075 
2076 	/*
2077 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2078 	 * given above, and the number of traffic classes available for use.
2079 	 */
2080 	if (dcb_mode == DCB_VT_ENABLED) {
2081 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2082 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2083 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2084 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2085 
2086 		/* VMDQ+DCB RX and TX configurations */
2087 		vmdq_rx_conf->enable_default_pool = 0;
2088 		vmdq_rx_conf->default_pool = 0;
2089 		vmdq_rx_conf->nb_queue_pools =
2090 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2091 		vmdq_tx_conf->nb_queue_pools =
2092 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2093 
2094 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2095 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2096 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2097 			vmdq_rx_conf->pool_map[i].pools =
2098 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2099 		}
2100 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2101 			vmdq_rx_conf->dcb_tc[i] = i;
2102 			vmdq_tx_conf->dcb_tc[i] = i;
2103 		}
2104 
2105 		/* set DCB mode of RX and TX of multiple queues */
2106 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2107 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2108 	} else {
2109 		struct rte_eth_dcb_rx_conf *rx_conf =
2110 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2111 		struct rte_eth_dcb_tx_conf *tx_conf =
2112 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2113 
2114 		rx_conf->nb_tcs = num_tcs;
2115 		tx_conf->nb_tcs = num_tcs;
2116 
2117 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2118 			rx_conf->dcb_tc[i] = i % num_tcs;
2119 			tx_conf->dcb_tc[i] = i % num_tcs;
2120 		}
2121 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2122 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2123 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2124 	}
2125 
2126 	if (pfc_en)
2127 		eth_conf->dcb_capability_en =
2128 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2129 	else
2130 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2131 
2132 	return 0;
2133 }
2134 
2135 int
2136 init_port_dcb_config(portid_t pid,
2137 		     enum dcb_mode_enable dcb_mode,
2138 		     enum rte_eth_nb_tcs num_tcs,
2139 		     uint8_t pfc_en)
2140 {
2141 	struct rte_eth_conf port_conf;
2142 	struct rte_port *rte_port;
2143 	int retval;
2144 	uint16_t i;
2145 
2146 	rte_port = &ports[pid];
2147 
2148 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2149 	/* Enter DCB configuration status */
2150 	dcb_config = 1;
2151 
2152 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2153 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2154 	if (retval < 0)
2155 		return retval;
2156 	port_conf.rxmode.hw_vlan_filter = 1;
2157 
2158 	/**
2159 	 * Write the configuration into the device.
2160 	 * Set the numbers of RX & TX queues to 0, so
2161 	 * the RX & TX queues will not be setup.
2162 	 */
2163 	(void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2164 
2165 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2166 
2167 	/* If dev_info.vmdq_pool_base is greater than 0,
2168 	 * the queue id of vmdq pools is started after pf queues.
2169 	 */
2170 	if (dcb_mode == DCB_VT_ENABLED &&
2171 	    rte_port->dev_info.vmdq_pool_base > 0) {
2172 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2173 			" for port %d.", pid);
2174 		return -1;
2175 	}
2176 
2177 	/* Assume the ports in testpmd have the same dcb capability
2178 	 * and has the same number of rxq and txq in dcb mode
2179 	 */
2180 	if (dcb_mode == DCB_VT_ENABLED) {
2181 		if (rte_port->dev_info.max_vfs > 0) {
2182 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2183 			nb_txq = rte_port->dev_info.nb_tx_queues;
2184 		} else {
2185 			nb_rxq = rte_port->dev_info.max_rx_queues;
2186 			nb_txq = rte_port->dev_info.max_tx_queues;
2187 		}
2188 	} else {
2189 		/*if vt is disabled, use all pf queues */
2190 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2191 			nb_rxq = rte_port->dev_info.max_rx_queues;
2192 			nb_txq = rte_port->dev_info.max_tx_queues;
2193 		} else {
2194 			nb_rxq = (queueid_t)num_tcs;
2195 			nb_txq = (queueid_t)num_tcs;
2196 
2197 		}
2198 	}
2199 	rx_free_thresh = 64;
2200 
2201 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2202 
2203 	rxtx_port_config(rte_port);
2204 	/* VLAN filter */
2205 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2206 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2207 		rx_vft_set(pid, vlan_tags[i], 1);
2208 
2209 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2210 	map_port_queue_stats_mapping_registers(pid, rte_port);
2211 
2212 	rte_port->dcb_flag = 1;
2213 
2214 	return 0;
2215 }
2216 
2217 static void
2218 init_port(void)
2219 {
2220 	/* Configuration of Ethernet ports. */
2221 	ports = rte_zmalloc("testpmd: ports",
2222 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2223 			    RTE_CACHE_LINE_SIZE);
2224 	if (ports == NULL) {
2225 		rte_exit(EXIT_FAILURE,
2226 				"rte_zmalloc(%d struct rte_port) failed\n",
2227 				RTE_MAX_ETHPORTS);
2228 	}
2229 }
2230 
2231 static void
2232 force_quit(void)
2233 {
2234 	pmd_test_exit();
2235 	prompt_exit();
2236 }
2237 
2238 static void
2239 print_stats(void)
2240 {
2241 	uint8_t i;
2242 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2243 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2244 
2245 	/* Clear screen and move to top left */
2246 	printf("%s%s", clr, top_left);
2247 
2248 	printf("\nPort statistics ====================================");
2249 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2250 		nic_stats_display(fwd_ports_ids[i]);
2251 }
2252 
2253 static void
2254 signal_handler(int signum)
2255 {
2256 	if (signum == SIGINT || signum == SIGTERM) {
2257 		printf("\nSignal %d received, preparing to exit...\n",
2258 				signum);
2259 #ifdef RTE_LIBRTE_PDUMP
2260 		/* uninitialize packet capture framework */
2261 		rte_pdump_uninit();
2262 #endif
2263 #ifdef RTE_LIBRTE_LATENCY_STATS
2264 		rte_latencystats_uninit();
2265 #endif
2266 		force_quit();
2267 		/* exit with the expected status */
2268 		signal(signum, SIG_DFL);
2269 		kill(getpid(), signum);
2270 	}
2271 }
2272 
2273 int
2274 main(int argc, char** argv)
2275 {
2276 	int  diag;
2277 	uint8_t port_id;
2278 
2279 	signal(SIGINT, signal_handler);
2280 	signal(SIGTERM, signal_handler);
2281 
2282 	diag = rte_eal_init(argc, argv);
2283 	if (diag < 0)
2284 		rte_panic("Cannot init EAL\n");
2285 
2286 #ifdef RTE_LIBRTE_PDUMP
2287 	/* initialize packet capture framework */
2288 	rte_pdump_init(NULL);
2289 #endif
2290 
2291 	nb_ports = (portid_t) rte_eth_dev_count();
2292 	if (nb_ports == 0)
2293 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2294 
2295 	/* allocate port structures, and init them */
2296 	init_port();
2297 
2298 	set_def_fwd_config();
2299 	if (nb_lcores == 0)
2300 		rte_panic("Empty set of forwarding logical cores - check the "
2301 			  "core mask supplied in the command parameters\n");
2302 
2303 	/* Bitrate/latency stats disabled by default */
2304 #ifdef RTE_LIBRTE_BITRATE
2305 	bitrate_enabled = 0;
2306 #endif
2307 #ifdef RTE_LIBRTE_LATENCY_STATS
2308 	latencystats_enabled = 0;
2309 #endif
2310 
2311 	argc -= diag;
2312 	argv += diag;
2313 	if (argc > 1)
2314 		launch_args_parse(argc, argv);
2315 
2316 	if (tx_first && interactive)
2317 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2318 				"interactive mode.\n");
2319 	if (!nb_rxq && !nb_txq)
2320 		printf("Warning: Either rx or tx queues should be non-zero\n");
2321 
2322 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2323 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2324 		       "but nb_txq=%d will prevent to fully test it.\n",
2325 		       nb_rxq, nb_txq);
2326 
2327 	init_config();
2328 	if (start_port(RTE_PORT_ALL) != 0)
2329 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2330 
2331 	/* set all ports to promiscuous mode by default */
2332 	RTE_ETH_FOREACH_DEV(port_id)
2333 		rte_eth_promiscuous_enable(port_id);
2334 
2335 	/* Init metrics library */
2336 	rte_metrics_init(rte_socket_id());
2337 
2338 #ifdef RTE_LIBRTE_LATENCY_STATS
2339 	if (latencystats_enabled != 0) {
2340 		int ret = rte_latencystats_init(1, NULL);
2341 		if (ret)
2342 			printf("Warning: latencystats init()"
2343 				" returned error %d\n",	ret);
2344 		printf("Latencystats running on lcore %d\n",
2345 			latencystats_lcore_id);
2346 	}
2347 #endif
2348 
2349 	/* Setup bitrate stats */
2350 #ifdef RTE_LIBRTE_BITRATE
2351 	if (bitrate_enabled != 0) {
2352 		bitrate_data = rte_stats_bitrate_create();
2353 		if (bitrate_data == NULL)
2354 			rte_exit(EXIT_FAILURE,
2355 				"Could not allocate bitrate data.\n");
2356 		rte_stats_bitrate_reg(bitrate_data);
2357 	}
2358 #endif
2359 
2360 #ifdef RTE_LIBRTE_CMDLINE
2361 	if (strlen(cmdline_filename) != 0)
2362 		cmdline_read_from_file(cmdline_filename);
2363 
2364 	if (interactive == 1) {
2365 		if (auto_start) {
2366 			printf("Start automatic packet forwarding\n");
2367 			start_packet_forwarding(0);
2368 		}
2369 		prompt();
2370 		pmd_test_exit();
2371 	} else
2372 #endif
2373 	{
2374 		char c;
2375 		int rc;
2376 
2377 		printf("No commandline core given, start packet forwarding\n");
2378 		start_packet_forwarding(tx_first);
2379 		if (stats_period != 0) {
2380 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2381 			uint64_t timer_period;
2382 
2383 			/* Convert to number of cycles */
2384 			timer_period = stats_period * rte_get_timer_hz();
2385 
2386 			while (1) {
2387 				cur_time = rte_get_timer_cycles();
2388 				diff_time += cur_time - prev_time;
2389 
2390 				if (diff_time >= timer_period) {
2391 					print_stats();
2392 					/* Reset the timer */
2393 					diff_time = 0;
2394 				}
2395 				/* Sleep to avoid unnecessary checks */
2396 				prev_time = cur_time;
2397 				sleep(1);
2398 			}
2399 		}
2400 
2401 		printf("Press enter to exit\n");
2402 		rc = read(0, &c, 1);
2403 		pmd_test_exit();
2404 		if (rc < 0)
2405 			return 1;
2406 	}
2407 
2408 	return 0;
2409 }
2410