xref: /dpdk/app/test-pmd/testpmd.c (revision fcee050aa1d74b3e65ea349f401728ece7cbdc50)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_IXGBE_PMD
77 #include <rte_pmd_ixgbe.h>
78 #endif
79 #ifdef RTE_LIBRTE_PMD_XENVIRT
80 #include <rte_eth_xenvirt.h>
81 #endif
82 #ifdef RTE_LIBRTE_PDUMP
83 #include <rte_pdump.h>
84 #endif
85 #include <rte_flow.h>
86 #include <rte_metrics.h>
87 #ifdef RTE_LIBRTE_BITRATE
88 #include <rte_bitrate.h>
89 #endif
90 #ifdef RTE_LIBRTE_LATENCY_STATS
91 #include <rte_latencystats.h>
92 #endif
93 #include <rte_gro.h>
94 
95 #include "testpmd.h"
96 
97 uint16_t verbose_level = 0; /**< Silent by default. */
98 
99 /* use master core for command line ? */
100 uint8_t interactive = 0;
101 uint8_t auto_start = 0;
102 uint8_t tx_first;
103 char cmdline_filename[PATH_MAX] = {0};
104 
105 /*
106  * NUMA support configuration.
107  * When set, the NUMA support attempts to dispatch the allocation of the
108  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
109  * probed ports among the CPU sockets 0 and 1.
110  * Otherwise, all memory is allocated from CPU socket 0.
111  */
112 uint8_t numa_support = 1; /**< numa enabled by default */
113 
114 /*
115  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
116  * not configured.
117  */
118 uint8_t socket_num = UMA_NO_CONFIG;
119 
120 /*
121  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
122  */
123 uint8_t mp_anon = 0;
124 
125 /*
126  * Record the Ethernet address of peer target ports to which packets are
127  * forwarded.
128  * Must be instantiated with the ethernet addresses of peer traffic generator
129  * ports.
130  */
131 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
132 portid_t nb_peer_eth_addrs = 0;
133 
134 /*
135  * Probed Target Environment.
136  */
137 struct rte_port *ports;	       /**< For all probed ethernet ports. */
138 portid_t nb_ports;             /**< Number of probed ethernet ports. */
139 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
140 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
141 
142 /*
143  * Test Forwarding Configuration.
144  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
145  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
146  */
147 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
148 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
149 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
150 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
151 
152 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
153 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
154 
155 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
156 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
157 
158 /*
159  * Forwarding engines.
160  */
161 struct fwd_engine * fwd_engines[] = {
162 	&io_fwd_engine,
163 	&mac_fwd_engine,
164 	&mac_swap_engine,
165 	&flow_gen_engine,
166 	&rx_only_engine,
167 	&tx_only_engine,
168 	&csum_fwd_engine,
169 	&icmp_echo_engine,
170 #ifdef RTE_LIBRTE_IEEE1588
171 	&ieee1588_fwd_engine,
172 #endif
173 	NULL,
174 };
175 
176 struct fwd_config cur_fwd_config;
177 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
178 uint32_t retry_enabled;
179 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
180 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
181 
182 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
183 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
184                                       * specified on command-line. */
185 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
186 /*
187  * Configuration of packet segments used by the "txonly" processing engine.
188  */
189 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
190 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
191 	TXONLY_DEF_PACKET_LEN,
192 };
193 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
194 
195 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
196 /**< Split policy for packets to TX. */
197 
198 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
199 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
200 
201 /* current configuration is in DCB or not,0 means it is not in DCB mode */
202 uint8_t dcb_config = 0;
203 
204 /* Whether the dcb is in testing status */
205 uint8_t dcb_test = 0;
206 
207 /*
208  * Configurable number of RX/TX queues.
209  */
210 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
211 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
212 
213 /*
214  * Configurable number of RX/TX ring descriptors.
215  */
216 #define RTE_TEST_RX_DESC_DEFAULT 128
217 #define RTE_TEST_TX_DESC_DEFAULT 512
218 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
219 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
220 
221 #define RTE_PMD_PARAM_UNSET -1
222 /*
223  * Configurable values of RX and TX ring threshold registers.
224  */
225 
226 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
227 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
228 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
229 
230 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
231 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
232 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
233 
234 /*
235  * Configurable value of RX free threshold.
236  */
237 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
238 
239 /*
240  * Configurable value of RX drop enable.
241  */
242 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
243 
244 /*
245  * Configurable value of TX free threshold.
246  */
247 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
248 
249 /*
250  * Configurable value of TX RS bit threshold.
251  */
252 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
253 
254 /*
255  * Configurable value of TX queue flags.
256  */
257 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
258 
259 /*
260  * Receive Side Scaling (RSS) configuration.
261  */
262 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
263 
264 /*
265  * Port topology configuration
266  */
267 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
268 
269 /*
270  * Avoids to flush all the RX streams before starts forwarding.
271  */
272 uint8_t no_flush_rx = 0; /* flush by default */
273 
274 /*
275  * Avoids to check link status when starting/stopping a port.
276  */
277 uint8_t no_link_check = 0; /* check by default */
278 
279 /*
280  * Enable link status change notification
281  */
282 uint8_t lsc_interrupt = 1; /* enabled by default */
283 
284 /*
285  * Enable device removal notification.
286  */
287 uint8_t rmv_interrupt = 1; /* enabled by default */
288 
289 /*
290  * Display or mask ether events
291  * Default to all events except VF_MBOX
292  */
293 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
294 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
295 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
296 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
297 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
298 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
299 
300 /*
301  * NIC bypass mode configuration options.
302  */
303 
304 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
305 /* The NIC bypass watchdog timeout. */
306 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
307 #endif
308 
309 
310 #ifdef RTE_LIBRTE_LATENCY_STATS
311 
312 /*
313  * Set when latency stats is enabled in the commandline
314  */
315 uint8_t latencystats_enabled;
316 
317 /*
318  * Lcore ID to serive latency statistics.
319  */
320 lcoreid_t latencystats_lcore_id = -1;
321 
322 #endif
323 
324 /*
325  * Ethernet device configuration.
326  */
327 struct rte_eth_rxmode rx_mode = {
328 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
329 	.split_hdr_size = 0,
330 	.header_split   = 0, /**< Header Split disabled. */
331 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
332 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
333 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
334 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
335 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
336 	.hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
337 };
338 
339 struct rte_fdir_conf fdir_conf = {
340 	.mode = RTE_FDIR_MODE_NONE,
341 	.pballoc = RTE_FDIR_PBALLOC_64K,
342 	.status = RTE_FDIR_REPORT_STATUS,
343 	.mask = {
344 		.vlan_tci_mask = 0x0,
345 		.ipv4_mask     = {
346 			.src_ip = 0xFFFFFFFF,
347 			.dst_ip = 0xFFFFFFFF,
348 		},
349 		.ipv6_mask     = {
350 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
351 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
352 		},
353 		.src_port_mask = 0xFFFF,
354 		.dst_port_mask = 0xFFFF,
355 		.mac_addr_byte_mask = 0xFF,
356 		.tunnel_type_mask = 1,
357 		.tunnel_id_mask = 0xFFFFFFFF,
358 	},
359 	.drop_queue = 127,
360 };
361 
362 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
363 
364 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
365 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
366 
367 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
368 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
369 
370 uint16_t nb_tx_queue_stats_mappings = 0;
371 uint16_t nb_rx_queue_stats_mappings = 0;
372 
373 unsigned int num_sockets = 0;
374 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
375 
376 #ifdef RTE_LIBRTE_BITRATE
377 /* Bitrate statistics */
378 struct rte_stats_bitrates *bitrate_data;
379 lcoreid_t bitrate_lcore_id;
380 uint8_t bitrate_enabled;
381 #endif
382 
383 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
384 
385 /* Forward function declarations */
386 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
387 static void check_all_ports_link_status(uint32_t port_mask);
388 static int eth_event_callback(uint8_t port_id,
389 			      enum rte_eth_event_type type,
390 			      void *param, void *ret_param);
391 
392 /*
393  * Check if all the ports are started.
394  * If yes, return positive value. If not, return zero.
395  */
396 static int all_ports_started(void);
397 
398 /*
399  * Helper function to check if socket is already discovered.
400  * If yes, return positive value. If not, return zero.
401  */
402 int
403 new_socket_id(unsigned int socket_id)
404 {
405 	unsigned int i;
406 
407 	for (i = 0; i < num_sockets; i++) {
408 		if (socket_ids[i] == socket_id)
409 			return 0;
410 	}
411 	return 1;
412 }
413 
414 /*
415  * Setup default configuration.
416  */
417 static void
418 set_default_fwd_lcores_config(void)
419 {
420 	unsigned int i;
421 	unsigned int nb_lc;
422 	unsigned int sock_num;
423 
424 	nb_lc = 0;
425 	for (i = 0; i < RTE_MAX_LCORE; i++) {
426 		sock_num = rte_lcore_to_socket_id(i);
427 		if (new_socket_id(sock_num)) {
428 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
429 				rte_exit(EXIT_FAILURE,
430 					 "Total sockets greater than %u\n",
431 					 RTE_MAX_NUMA_NODES);
432 			}
433 			socket_ids[num_sockets++] = sock_num;
434 		}
435 		if (!rte_lcore_is_enabled(i))
436 			continue;
437 		if (i == rte_get_master_lcore())
438 			continue;
439 		fwd_lcores_cpuids[nb_lc++] = i;
440 	}
441 	nb_lcores = (lcoreid_t) nb_lc;
442 	nb_cfg_lcores = nb_lcores;
443 	nb_fwd_lcores = 1;
444 }
445 
446 static void
447 set_def_peer_eth_addrs(void)
448 {
449 	portid_t i;
450 
451 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
452 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
453 		peer_eth_addrs[i].addr_bytes[5] = i;
454 	}
455 }
456 
457 static void
458 set_default_fwd_ports_config(void)
459 {
460 	portid_t pt_id;
461 
462 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
463 		fwd_ports_ids[pt_id] = pt_id;
464 
465 	nb_cfg_ports = nb_ports;
466 	nb_fwd_ports = nb_ports;
467 }
468 
469 void
470 set_def_fwd_config(void)
471 {
472 	set_default_fwd_lcores_config();
473 	set_def_peer_eth_addrs();
474 	set_default_fwd_ports_config();
475 }
476 
477 /*
478  * Configuration initialisation done once at init time.
479  */
480 static void
481 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
482 		 unsigned int socket_id)
483 {
484 	char pool_name[RTE_MEMPOOL_NAMESIZE];
485 	struct rte_mempool *rte_mp = NULL;
486 	uint32_t mb_size;
487 
488 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
489 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
490 
491 	RTE_LOG(INFO, USER1,
492 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
493 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
494 
495 #ifdef RTE_LIBRTE_PMD_XENVIRT
496 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
497 		(unsigned) mb_mempool_cache,
498 		sizeof(struct rte_pktmbuf_pool_private),
499 		rte_pktmbuf_pool_init, NULL,
500 		rte_pktmbuf_init, NULL,
501 		socket_id, 0);
502 #endif
503 
504 	/* if the former XEN allocation failed fall back to normal allocation */
505 	if (rte_mp == NULL) {
506 		if (mp_anon != 0) {
507 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
508 				mb_size, (unsigned) mb_mempool_cache,
509 				sizeof(struct rte_pktmbuf_pool_private),
510 				socket_id, 0);
511 			if (rte_mp == NULL)
512 				goto err;
513 
514 			if (rte_mempool_populate_anon(rte_mp) == 0) {
515 				rte_mempool_free(rte_mp);
516 				rte_mp = NULL;
517 				goto err;
518 			}
519 			rte_pktmbuf_pool_init(rte_mp, NULL);
520 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
521 		} else {
522 			/* wrapper to rte_mempool_create() */
523 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
524 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
525 		}
526 	}
527 
528 err:
529 	if (rte_mp == NULL) {
530 		rte_exit(EXIT_FAILURE,
531 			"Creation of mbuf pool for socket %u failed: %s\n",
532 			socket_id, rte_strerror(rte_errno));
533 	} else if (verbose_level > 0) {
534 		rte_mempool_dump(stdout, rte_mp);
535 	}
536 }
537 
538 /*
539  * Check given socket id is valid or not with NUMA mode,
540  * if valid, return 0, else return -1
541  */
542 static int
543 check_socket_id(const unsigned int socket_id)
544 {
545 	static int warning_once = 0;
546 
547 	if (new_socket_id(socket_id)) {
548 		if (!warning_once && numa_support)
549 			printf("Warning: NUMA should be configured manually by"
550 			       " using --port-numa-config and"
551 			       " --ring-numa-config parameters along with"
552 			       " --numa.\n");
553 		warning_once = 1;
554 		return -1;
555 	}
556 	return 0;
557 }
558 
559 static void
560 init_config(void)
561 {
562 	portid_t pid;
563 	struct rte_port *port;
564 	struct rte_mempool *mbp;
565 	unsigned int nb_mbuf_per_pool;
566 	lcoreid_t  lc_id;
567 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
568 
569 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
570 
571 	if (numa_support) {
572 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
573 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
574 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
575 	}
576 
577 	/* Configuration of logical cores. */
578 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
579 				sizeof(struct fwd_lcore *) * nb_lcores,
580 				RTE_CACHE_LINE_SIZE);
581 	if (fwd_lcores == NULL) {
582 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
583 							"failed\n", nb_lcores);
584 	}
585 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
586 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
587 					       sizeof(struct fwd_lcore),
588 					       RTE_CACHE_LINE_SIZE);
589 		if (fwd_lcores[lc_id] == NULL) {
590 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
591 								"failed\n");
592 		}
593 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
594 	}
595 
596 	RTE_ETH_FOREACH_DEV(pid) {
597 		port = &ports[pid];
598 		rte_eth_dev_info_get(pid, &port->dev_info);
599 
600 		if (numa_support) {
601 			if (port_numa[pid] != NUMA_NO_CONFIG)
602 				port_per_socket[port_numa[pid]]++;
603 			else {
604 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
605 
606 				/* if socket_id is invalid, set to 0 */
607 				if (check_socket_id(socket_id) < 0)
608 					socket_id = 0;
609 				port_per_socket[socket_id]++;
610 			}
611 		}
612 
613 		/* set flag to initialize port/queue */
614 		port->need_reconfig = 1;
615 		port->need_reconfig_queues = 1;
616 	}
617 
618 	/*
619 	 * Create pools of mbuf.
620 	 * If NUMA support is disabled, create a single pool of mbuf in
621 	 * socket 0 memory by default.
622 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
623 	 *
624 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
625 	 * nb_txd can be configured at run time.
626 	 */
627 	if (param_total_num_mbufs)
628 		nb_mbuf_per_pool = param_total_num_mbufs;
629 	else {
630 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
631 			(nb_lcores * mb_mempool_cache) +
632 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
633 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
634 	}
635 
636 	if (numa_support) {
637 		uint8_t i;
638 
639 		for (i = 0; i < num_sockets; i++)
640 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
641 					 socket_ids[i]);
642 	} else {
643 		if (socket_num == UMA_NO_CONFIG)
644 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
645 		else
646 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
647 						 socket_num);
648 	}
649 
650 	init_port_config();
651 
652 	/*
653 	 * Records which Mbuf pool to use by each logical core, if needed.
654 	 */
655 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
656 		mbp = mbuf_pool_find(
657 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
658 
659 		if (mbp == NULL)
660 			mbp = mbuf_pool_find(0);
661 		fwd_lcores[lc_id]->mbp = mbp;
662 	}
663 
664 	/* Configuration of packet forwarding streams. */
665 	if (init_fwd_streams() < 0)
666 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
667 
668 	fwd_config_setup();
669 }
670 
671 
672 void
673 reconfig(portid_t new_port_id, unsigned socket_id)
674 {
675 	struct rte_port *port;
676 
677 	/* Reconfiguration of Ethernet ports. */
678 	port = &ports[new_port_id];
679 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
680 
681 	/* set flag to initialize port/queue */
682 	port->need_reconfig = 1;
683 	port->need_reconfig_queues = 1;
684 	port->socket_id = socket_id;
685 
686 	init_port_config();
687 }
688 
689 
690 int
691 init_fwd_streams(void)
692 {
693 	portid_t pid;
694 	struct rte_port *port;
695 	streamid_t sm_id, nb_fwd_streams_new;
696 	queueid_t q;
697 
698 	/* set socket id according to numa or not */
699 	RTE_ETH_FOREACH_DEV(pid) {
700 		port = &ports[pid];
701 		if (nb_rxq > port->dev_info.max_rx_queues) {
702 			printf("Fail: nb_rxq(%d) is greater than "
703 				"max_rx_queues(%d)\n", nb_rxq,
704 				port->dev_info.max_rx_queues);
705 			return -1;
706 		}
707 		if (nb_txq > port->dev_info.max_tx_queues) {
708 			printf("Fail: nb_txq(%d) is greater than "
709 				"max_tx_queues(%d)\n", nb_txq,
710 				port->dev_info.max_tx_queues);
711 			return -1;
712 		}
713 		if (numa_support) {
714 			if (port_numa[pid] != NUMA_NO_CONFIG)
715 				port->socket_id = port_numa[pid];
716 			else {
717 				port->socket_id = rte_eth_dev_socket_id(pid);
718 
719 				/* if socket_id is invalid, set to 0 */
720 				if (check_socket_id(port->socket_id) < 0)
721 					port->socket_id = 0;
722 			}
723 		}
724 		else {
725 			if (socket_num == UMA_NO_CONFIG)
726 				port->socket_id = 0;
727 			else
728 				port->socket_id = socket_num;
729 		}
730 	}
731 
732 	q = RTE_MAX(nb_rxq, nb_txq);
733 	if (q == 0) {
734 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
735 		return -1;
736 	}
737 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
738 	if (nb_fwd_streams_new == nb_fwd_streams)
739 		return 0;
740 	/* clear the old */
741 	if (fwd_streams != NULL) {
742 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
743 			if (fwd_streams[sm_id] == NULL)
744 				continue;
745 			rte_free(fwd_streams[sm_id]);
746 			fwd_streams[sm_id] = NULL;
747 		}
748 		rte_free(fwd_streams);
749 		fwd_streams = NULL;
750 	}
751 
752 	/* init new */
753 	nb_fwd_streams = nb_fwd_streams_new;
754 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
755 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
756 	if (fwd_streams == NULL)
757 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
758 						"failed\n", nb_fwd_streams);
759 
760 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
761 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
762 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
763 		if (fwd_streams[sm_id] == NULL)
764 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
765 								" failed\n");
766 	}
767 
768 	return 0;
769 }
770 
771 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
772 static void
773 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
774 {
775 	unsigned int total_burst;
776 	unsigned int nb_burst;
777 	unsigned int burst_stats[3];
778 	uint16_t pktnb_stats[3];
779 	uint16_t nb_pkt;
780 	int burst_percent[3];
781 
782 	/*
783 	 * First compute the total number of packet bursts and the
784 	 * two highest numbers of bursts of the same number of packets.
785 	 */
786 	total_burst = 0;
787 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
788 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
789 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
790 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
791 		if (nb_burst == 0)
792 			continue;
793 		total_burst += nb_burst;
794 		if (nb_burst > burst_stats[0]) {
795 			burst_stats[1] = burst_stats[0];
796 			pktnb_stats[1] = pktnb_stats[0];
797 			burst_stats[0] = nb_burst;
798 			pktnb_stats[0] = nb_pkt;
799 		}
800 	}
801 	if (total_burst == 0)
802 		return;
803 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
804 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
805 	       burst_percent[0], (int) pktnb_stats[0]);
806 	if (burst_stats[0] == total_burst) {
807 		printf("]\n");
808 		return;
809 	}
810 	if (burst_stats[0] + burst_stats[1] == total_burst) {
811 		printf(" + %d%% of %d pkts]\n",
812 		       100 - burst_percent[0], pktnb_stats[1]);
813 		return;
814 	}
815 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
816 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
817 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
818 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
819 		return;
820 	}
821 	printf(" + %d%% of %d pkts + %d%% of others]\n",
822 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
823 }
824 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
825 
826 static void
827 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
828 {
829 	struct rte_port *port;
830 	uint8_t i;
831 
832 	static const char *fwd_stats_border = "----------------------";
833 
834 	port = &ports[port_id];
835 	printf("\n  %s Forward statistics for port %-2d %s\n",
836 	       fwd_stats_border, port_id, fwd_stats_border);
837 
838 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
839 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
840 		       "%-"PRIu64"\n",
841 		       stats->ipackets, stats->imissed,
842 		       (uint64_t) (stats->ipackets + stats->imissed));
843 
844 		if (cur_fwd_eng == &csum_fwd_engine)
845 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
846 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
847 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
848 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
849 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
850 		}
851 
852 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
853 		       "%-"PRIu64"\n",
854 		       stats->opackets, port->tx_dropped,
855 		       (uint64_t) (stats->opackets + port->tx_dropped));
856 	}
857 	else {
858 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
859 		       "%14"PRIu64"\n",
860 		       stats->ipackets, stats->imissed,
861 		       (uint64_t) (stats->ipackets + stats->imissed));
862 
863 		if (cur_fwd_eng == &csum_fwd_engine)
864 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
865 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
866 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
867 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
868 			printf("  RX-nombufs:             %14"PRIu64"\n",
869 			       stats->rx_nombuf);
870 		}
871 
872 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
873 		       "%14"PRIu64"\n",
874 		       stats->opackets, port->tx_dropped,
875 		       (uint64_t) (stats->opackets + port->tx_dropped));
876 	}
877 
878 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
879 	if (port->rx_stream)
880 		pkt_burst_stats_display("RX",
881 			&port->rx_stream->rx_burst_stats);
882 	if (port->tx_stream)
883 		pkt_burst_stats_display("TX",
884 			&port->tx_stream->tx_burst_stats);
885 #endif
886 
887 	if (port->rx_queue_stats_mapping_enabled) {
888 		printf("\n");
889 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
890 			printf("  Stats reg %2d RX-packets:%14"PRIu64
891 			       "     RX-errors:%14"PRIu64
892 			       "    RX-bytes:%14"PRIu64"\n",
893 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
894 		}
895 		printf("\n");
896 	}
897 	if (port->tx_queue_stats_mapping_enabled) {
898 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
899 			printf("  Stats reg %2d TX-packets:%14"PRIu64
900 			       "                                 TX-bytes:%14"PRIu64"\n",
901 			       i, stats->q_opackets[i], stats->q_obytes[i]);
902 		}
903 	}
904 
905 	printf("  %s--------------------------------%s\n",
906 	       fwd_stats_border, fwd_stats_border);
907 }
908 
909 static void
910 fwd_stream_stats_display(streamid_t stream_id)
911 {
912 	struct fwd_stream *fs;
913 	static const char *fwd_top_stats_border = "-------";
914 
915 	fs = fwd_streams[stream_id];
916 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
917 	    (fs->fwd_dropped == 0))
918 		return;
919 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
920 	       "TX Port=%2d/Queue=%2d %s\n",
921 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
922 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
923 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
924 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
925 
926 	/* if checksum mode */
927 	if (cur_fwd_eng == &csum_fwd_engine) {
928 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
929 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
930 	}
931 
932 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
933 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
934 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
935 #endif
936 }
937 
938 static void
939 flush_fwd_rx_queues(void)
940 {
941 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
942 	portid_t  rxp;
943 	portid_t port_id;
944 	queueid_t rxq;
945 	uint16_t  nb_rx;
946 	uint16_t  i;
947 	uint8_t   j;
948 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
949 	uint64_t timer_period;
950 
951 	/* convert to number of cycles */
952 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
953 
954 	for (j = 0; j < 2; j++) {
955 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
956 			for (rxq = 0; rxq < nb_rxq; rxq++) {
957 				port_id = fwd_ports_ids[rxp];
958 				/**
959 				* testpmd can stuck in the below do while loop
960 				* if rte_eth_rx_burst() always returns nonzero
961 				* packets. So timer is added to exit this loop
962 				* after 1sec timer expiry.
963 				*/
964 				prev_tsc = rte_rdtsc();
965 				do {
966 					nb_rx = rte_eth_rx_burst(port_id, rxq,
967 						pkts_burst, MAX_PKT_BURST);
968 					for (i = 0; i < nb_rx; i++)
969 						rte_pktmbuf_free(pkts_burst[i]);
970 
971 					cur_tsc = rte_rdtsc();
972 					diff_tsc = cur_tsc - prev_tsc;
973 					timer_tsc += diff_tsc;
974 				} while ((nb_rx > 0) &&
975 					(timer_tsc < timer_period));
976 				timer_tsc = 0;
977 			}
978 		}
979 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
980 	}
981 }
982 
983 static void
984 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
985 {
986 	struct fwd_stream **fsm;
987 	streamid_t nb_fs;
988 	streamid_t sm_id;
989 #ifdef RTE_LIBRTE_BITRATE
990 	uint64_t tics_per_1sec;
991 	uint64_t tics_datum;
992 	uint64_t tics_current;
993 	uint8_t idx_port, cnt_ports;
994 
995 	cnt_ports = rte_eth_dev_count();
996 	tics_datum = rte_rdtsc();
997 	tics_per_1sec = rte_get_timer_hz();
998 #endif
999 	fsm = &fwd_streams[fc->stream_idx];
1000 	nb_fs = fc->stream_nb;
1001 	do {
1002 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1003 			(*pkt_fwd)(fsm[sm_id]);
1004 #ifdef RTE_LIBRTE_BITRATE
1005 		if (bitrate_enabled != 0 &&
1006 				bitrate_lcore_id == rte_lcore_id()) {
1007 			tics_current = rte_rdtsc();
1008 			if (tics_current - tics_datum >= tics_per_1sec) {
1009 				/* Periodic bitrate calculation */
1010 				for (idx_port = 0;
1011 						idx_port < cnt_ports;
1012 						idx_port++)
1013 					rte_stats_bitrate_calc(bitrate_data,
1014 						idx_port);
1015 				tics_datum = tics_current;
1016 			}
1017 		}
1018 #endif
1019 #ifdef RTE_LIBRTE_LATENCY_STATS
1020 		if (latencystats_enabled != 0 &&
1021 				latencystats_lcore_id == rte_lcore_id())
1022 			rte_latencystats_update();
1023 #endif
1024 
1025 	} while (! fc->stopped);
1026 }
1027 
1028 static int
1029 start_pkt_forward_on_core(void *fwd_arg)
1030 {
1031 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1032 			     cur_fwd_config.fwd_eng->packet_fwd);
1033 	return 0;
1034 }
1035 
1036 /*
1037  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1038  * Used to start communication flows in network loopback test configurations.
1039  */
1040 static int
1041 run_one_txonly_burst_on_core(void *fwd_arg)
1042 {
1043 	struct fwd_lcore *fwd_lc;
1044 	struct fwd_lcore tmp_lcore;
1045 
1046 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1047 	tmp_lcore = *fwd_lc;
1048 	tmp_lcore.stopped = 1;
1049 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1050 	return 0;
1051 }
1052 
1053 /*
1054  * Launch packet forwarding:
1055  *     - Setup per-port forwarding context.
1056  *     - launch logical cores with their forwarding configuration.
1057  */
1058 static void
1059 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1060 {
1061 	port_fwd_begin_t port_fwd_begin;
1062 	unsigned int i;
1063 	unsigned int lc_id;
1064 	int diag;
1065 
1066 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1067 	if (port_fwd_begin != NULL) {
1068 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1069 			(*port_fwd_begin)(fwd_ports_ids[i]);
1070 	}
1071 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1072 		lc_id = fwd_lcores_cpuids[i];
1073 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1074 			fwd_lcores[i]->stopped = 0;
1075 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1076 						     fwd_lcores[i], lc_id);
1077 			if (diag != 0)
1078 				printf("launch lcore %u failed - diag=%d\n",
1079 				       lc_id, diag);
1080 		}
1081 	}
1082 }
1083 
1084 /*
1085  * Launch packet forwarding configuration.
1086  */
1087 void
1088 start_packet_forwarding(int with_tx_first)
1089 {
1090 	port_fwd_begin_t port_fwd_begin;
1091 	port_fwd_end_t  port_fwd_end;
1092 	struct rte_port *port;
1093 	unsigned int i;
1094 	portid_t   pt_id;
1095 	streamid_t sm_id;
1096 
1097 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1098 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1099 
1100 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1101 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1102 
1103 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1104 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1105 		(!nb_rxq || !nb_txq))
1106 		rte_exit(EXIT_FAILURE,
1107 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1108 			cur_fwd_eng->fwd_mode_name);
1109 
1110 	if (all_ports_started() == 0) {
1111 		printf("Not all ports were started\n");
1112 		return;
1113 	}
1114 	if (test_done == 0) {
1115 		printf("Packet forwarding already started\n");
1116 		return;
1117 	}
1118 
1119 	if (init_fwd_streams() < 0) {
1120 		printf("Fail from init_fwd_streams()\n");
1121 		return;
1122 	}
1123 
1124 	if(dcb_test) {
1125 		for (i = 0; i < nb_fwd_ports; i++) {
1126 			pt_id = fwd_ports_ids[i];
1127 			port = &ports[pt_id];
1128 			if (!port->dcb_flag) {
1129 				printf("In DCB mode, all forwarding ports must "
1130                                        "be configured in this mode.\n");
1131 				return;
1132 			}
1133 		}
1134 		if (nb_fwd_lcores == 1) {
1135 			printf("In DCB mode,the nb forwarding cores "
1136                                "should be larger than 1.\n");
1137 			return;
1138 		}
1139 	}
1140 	test_done = 0;
1141 
1142 	if(!no_flush_rx)
1143 		flush_fwd_rx_queues();
1144 
1145 	fwd_config_setup();
1146 	pkt_fwd_config_display(&cur_fwd_config);
1147 	rxtx_config_display();
1148 
1149 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1150 		pt_id = fwd_ports_ids[i];
1151 		port = &ports[pt_id];
1152 		rte_eth_stats_get(pt_id, &port->stats);
1153 		port->tx_dropped = 0;
1154 
1155 		map_port_queue_stats_mapping_registers(pt_id, port);
1156 	}
1157 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1158 		fwd_streams[sm_id]->rx_packets = 0;
1159 		fwd_streams[sm_id]->tx_packets = 0;
1160 		fwd_streams[sm_id]->fwd_dropped = 0;
1161 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1162 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1163 
1164 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1165 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1166 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1167 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1168 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1169 #endif
1170 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1171 		fwd_streams[sm_id]->core_cycles = 0;
1172 #endif
1173 	}
1174 	if (with_tx_first) {
1175 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1176 		if (port_fwd_begin != NULL) {
1177 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1178 				(*port_fwd_begin)(fwd_ports_ids[i]);
1179 		}
1180 		while (with_tx_first--) {
1181 			launch_packet_forwarding(
1182 					run_one_txonly_burst_on_core);
1183 			rte_eal_mp_wait_lcore();
1184 		}
1185 		port_fwd_end = tx_only_engine.port_fwd_end;
1186 		if (port_fwd_end != NULL) {
1187 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1188 				(*port_fwd_end)(fwd_ports_ids[i]);
1189 		}
1190 	}
1191 	launch_packet_forwarding(start_pkt_forward_on_core);
1192 }
1193 
1194 void
1195 stop_packet_forwarding(void)
1196 {
1197 	struct rte_eth_stats stats;
1198 	struct rte_port *port;
1199 	port_fwd_end_t  port_fwd_end;
1200 	int i;
1201 	portid_t   pt_id;
1202 	streamid_t sm_id;
1203 	lcoreid_t  lc_id;
1204 	uint64_t total_recv;
1205 	uint64_t total_xmit;
1206 	uint64_t total_rx_dropped;
1207 	uint64_t total_tx_dropped;
1208 	uint64_t total_rx_nombuf;
1209 	uint64_t tx_dropped;
1210 	uint64_t rx_bad_ip_csum;
1211 	uint64_t rx_bad_l4_csum;
1212 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1213 	uint64_t fwd_cycles;
1214 #endif
1215 	static const char *acc_stats_border = "+++++++++++++++";
1216 
1217 	if (test_done) {
1218 		printf("Packet forwarding not started\n");
1219 		return;
1220 	}
1221 	printf("Telling cores to stop...");
1222 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1223 		fwd_lcores[lc_id]->stopped = 1;
1224 	printf("\nWaiting for lcores to finish...\n");
1225 	rte_eal_mp_wait_lcore();
1226 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1227 	if (port_fwd_end != NULL) {
1228 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1229 			pt_id = fwd_ports_ids[i];
1230 			(*port_fwd_end)(pt_id);
1231 		}
1232 	}
1233 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1234 	fwd_cycles = 0;
1235 #endif
1236 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1237 		if (cur_fwd_config.nb_fwd_streams >
1238 		    cur_fwd_config.nb_fwd_ports) {
1239 			fwd_stream_stats_display(sm_id);
1240 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1241 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1242 		} else {
1243 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1244 				fwd_streams[sm_id];
1245 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1246 				fwd_streams[sm_id];
1247 		}
1248 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1249 		tx_dropped = (uint64_t) (tx_dropped +
1250 					 fwd_streams[sm_id]->fwd_dropped);
1251 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1252 
1253 		rx_bad_ip_csum =
1254 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1255 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1256 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1257 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1258 							rx_bad_ip_csum;
1259 
1260 		rx_bad_l4_csum =
1261 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1262 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1263 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1264 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1265 							rx_bad_l4_csum;
1266 
1267 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1268 		fwd_cycles = (uint64_t) (fwd_cycles +
1269 					 fwd_streams[sm_id]->core_cycles);
1270 #endif
1271 	}
1272 	total_recv = 0;
1273 	total_xmit = 0;
1274 	total_rx_dropped = 0;
1275 	total_tx_dropped = 0;
1276 	total_rx_nombuf  = 0;
1277 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1278 		pt_id = fwd_ports_ids[i];
1279 
1280 		port = &ports[pt_id];
1281 		rte_eth_stats_get(pt_id, &stats);
1282 		stats.ipackets -= port->stats.ipackets;
1283 		port->stats.ipackets = 0;
1284 		stats.opackets -= port->stats.opackets;
1285 		port->stats.opackets = 0;
1286 		stats.ibytes   -= port->stats.ibytes;
1287 		port->stats.ibytes = 0;
1288 		stats.obytes   -= port->stats.obytes;
1289 		port->stats.obytes = 0;
1290 		stats.imissed  -= port->stats.imissed;
1291 		port->stats.imissed = 0;
1292 		stats.oerrors  -= port->stats.oerrors;
1293 		port->stats.oerrors = 0;
1294 		stats.rx_nombuf -= port->stats.rx_nombuf;
1295 		port->stats.rx_nombuf = 0;
1296 
1297 		total_recv += stats.ipackets;
1298 		total_xmit += stats.opackets;
1299 		total_rx_dropped += stats.imissed;
1300 		total_tx_dropped += port->tx_dropped;
1301 		total_rx_nombuf  += stats.rx_nombuf;
1302 
1303 		fwd_port_stats_display(pt_id, &stats);
1304 	}
1305 	printf("\n  %s Accumulated forward statistics for all ports"
1306 	       "%s\n",
1307 	       acc_stats_border, acc_stats_border);
1308 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1309 	       "%-"PRIu64"\n"
1310 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1311 	       "%-"PRIu64"\n",
1312 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1313 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1314 	if (total_rx_nombuf > 0)
1315 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1316 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1317 	       "%s\n",
1318 	       acc_stats_border, acc_stats_border);
1319 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1320 	if (total_recv > 0)
1321 		printf("\n  CPU cycles/packet=%u (total cycles="
1322 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1323 		       (unsigned int)(fwd_cycles / total_recv),
1324 		       fwd_cycles, total_recv);
1325 #endif
1326 	printf("\nDone.\n");
1327 	test_done = 1;
1328 }
1329 
1330 void
1331 dev_set_link_up(portid_t pid)
1332 {
1333 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1334 		printf("\nSet link up fail.\n");
1335 }
1336 
1337 void
1338 dev_set_link_down(portid_t pid)
1339 {
1340 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1341 		printf("\nSet link down fail.\n");
1342 }
1343 
1344 static int
1345 all_ports_started(void)
1346 {
1347 	portid_t pi;
1348 	struct rte_port *port;
1349 
1350 	RTE_ETH_FOREACH_DEV(pi) {
1351 		port = &ports[pi];
1352 		/* Check if there is a port which is not started */
1353 		if ((port->port_status != RTE_PORT_STARTED) &&
1354 			(port->slave_flag == 0))
1355 			return 0;
1356 	}
1357 
1358 	/* No port is not started */
1359 	return 1;
1360 }
1361 
1362 int
1363 all_ports_stopped(void)
1364 {
1365 	portid_t pi;
1366 	struct rte_port *port;
1367 
1368 	RTE_ETH_FOREACH_DEV(pi) {
1369 		port = &ports[pi];
1370 		if ((port->port_status != RTE_PORT_STOPPED) &&
1371 			(port->slave_flag == 0))
1372 			return 0;
1373 	}
1374 
1375 	return 1;
1376 }
1377 
1378 int
1379 port_is_started(portid_t port_id)
1380 {
1381 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1382 		return 0;
1383 
1384 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1385 		return 0;
1386 
1387 	return 1;
1388 }
1389 
1390 static int
1391 port_is_closed(portid_t port_id)
1392 {
1393 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1394 		return 0;
1395 
1396 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1397 		return 0;
1398 
1399 	return 1;
1400 }
1401 
1402 int
1403 start_port(portid_t pid)
1404 {
1405 	int diag, need_check_link_status = -1;
1406 	portid_t pi;
1407 	queueid_t qi;
1408 	struct rte_port *port;
1409 	struct ether_addr mac_addr;
1410 	enum rte_eth_event_type event_type;
1411 
1412 	if (port_id_is_invalid(pid, ENABLED_WARN))
1413 		return 0;
1414 
1415 	if(dcb_config)
1416 		dcb_test = 1;
1417 	RTE_ETH_FOREACH_DEV(pi) {
1418 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1419 			continue;
1420 
1421 		need_check_link_status = 0;
1422 		port = &ports[pi];
1423 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1424 						 RTE_PORT_HANDLING) == 0) {
1425 			printf("Port %d is now not stopped\n", pi);
1426 			continue;
1427 		}
1428 
1429 		if (port->need_reconfig > 0) {
1430 			port->need_reconfig = 0;
1431 
1432 			printf("Configuring Port %d (socket %u)\n", pi,
1433 					port->socket_id);
1434 			/* configure port */
1435 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1436 						&(port->dev_conf));
1437 			if (diag != 0) {
1438 				if (rte_atomic16_cmpset(&(port->port_status),
1439 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1440 					printf("Port %d can not be set back "
1441 							"to stopped\n", pi);
1442 				printf("Fail to configure port %d\n", pi);
1443 				/* try to reconfigure port next time */
1444 				port->need_reconfig = 1;
1445 				return -1;
1446 			}
1447 		}
1448 		if (port->need_reconfig_queues > 0) {
1449 			port->need_reconfig_queues = 0;
1450 			/* setup tx queues */
1451 			for (qi = 0; qi < nb_txq; qi++) {
1452 				if ((numa_support) &&
1453 					(txring_numa[pi] != NUMA_NO_CONFIG))
1454 					diag = rte_eth_tx_queue_setup(pi, qi,
1455 						nb_txd,txring_numa[pi],
1456 						&(port->tx_conf));
1457 				else
1458 					diag = rte_eth_tx_queue_setup(pi, qi,
1459 						nb_txd,port->socket_id,
1460 						&(port->tx_conf));
1461 
1462 				if (diag == 0)
1463 					continue;
1464 
1465 				/* Fail to setup tx queue, return */
1466 				if (rte_atomic16_cmpset(&(port->port_status),
1467 							RTE_PORT_HANDLING,
1468 							RTE_PORT_STOPPED) == 0)
1469 					printf("Port %d can not be set back "
1470 							"to stopped\n", pi);
1471 				printf("Fail to configure port %d tx queues\n", pi);
1472 				/* try to reconfigure queues next time */
1473 				port->need_reconfig_queues = 1;
1474 				return -1;
1475 			}
1476 			/* setup rx queues */
1477 			for (qi = 0; qi < nb_rxq; qi++) {
1478 				if ((numa_support) &&
1479 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1480 					struct rte_mempool * mp =
1481 						mbuf_pool_find(rxring_numa[pi]);
1482 					if (mp == NULL) {
1483 						printf("Failed to setup RX queue:"
1484 							"No mempool allocation"
1485 							" on the socket %d\n",
1486 							rxring_numa[pi]);
1487 						return -1;
1488 					}
1489 
1490 					diag = rte_eth_rx_queue_setup(pi, qi,
1491 					     nb_rxd,rxring_numa[pi],
1492 					     &(port->rx_conf),mp);
1493 				} else {
1494 					struct rte_mempool *mp =
1495 						mbuf_pool_find(port->socket_id);
1496 					if (mp == NULL) {
1497 						printf("Failed to setup RX queue:"
1498 							"No mempool allocation"
1499 							" on the socket %d\n",
1500 							port->socket_id);
1501 						return -1;
1502 					}
1503 					diag = rte_eth_rx_queue_setup(pi, qi,
1504 					     nb_rxd,port->socket_id,
1505 					     &(port->rx_conf), mp);
1506 				}
1507 				if (diag == 0)
1508 					continue;
1509 
1510 				/* Fail to setup rx queue, return */
1511 				if (rte_atomic16_cmpset(&(port->port_status),
1512 							RTE_PORT_HANDLING,
1513 							RTE_PORT_STOPPED) == 0)
1514 					printf("Port %d can not be set back "
1515 							"to stopped\n", pi);
1516 				printf("Fail to configure port %d rx queues\n", pi);
1517 				/* try to reconfigure queues next time */
1518 				port->need_reconfig_queues = 1;
1519 				return -1;
1520 			}
1521 		}
1522 
1523 		for (event_type = RTE_ETH_EVENT_UNKNOWN;
1524 		     event_type < RTE_ETH_EVENT_MAX;
1525 		     event_type++) {
1526 			diag = rte_eth_dev_callback_register(pi,
1527 							event_type,
1528 							eth_event_callback,
1529 							NULL);
1530 			if (diag) {
1531 				printf("Failed to setup even callback for event %d\n",
1532 					event_type);
1533 				return -1;
1534 			}
1535 		}
1536 
1537 		/* start port */
1538 		if (rte_eth_dev_start(pi) < 0) {
1539 			printf("Fail to start port %d\n", pi);
1540 
1541 			/* Fail to setup rx queue, return */
1542 			if (rte_atomic16_cmpset(&(port->port_status),
1543 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1544 				printf("Port %d can not be set back to "
1545 							"stopped\n", pi);
1546 			continue;
1547 		}
1548 
1549 		if (rte_atomic16_cmpset(&(port->port_status),
1550 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1551 			printf("Port %d can not be set into started\n", pi);
1552 
1553 		rte_eth_macaddr_get(pi, &mac_addr);
1554 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1555 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1556 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1557 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1558 
1559 		/* at least one port started, need checking link status */
1560 		need_check_link_status = 1;
1561 	}
1562 
1563 	if (need_check_link_status == 1 && !no_link_check)
1564 		check_all_ports_link_status(RTE_PORT_ALL);
1565 	else if (need_check_link_status == 0)
1566 		printf("Please stop the ports first\n");
1567 
1568 	printf("Done\n");
1569 	return 0;
1570 }
1571 
1572 void
1573 stop_port(portid_t pid)
1574 {
1575 	portid_t pi;
1576 	struct rte_port *port;
1577 	int need_check_link_status = 0;
1578 
1579 	if (dcb_test) {
1580 		dcb_test = 0;
1581 		dcb_config = 0;
1582 	}
1583 
1584 	if (port_id_is_invalid(pid, ENABLED_WARN))
1585 		return;
1586 
1587 	printf("Stopping ports...\n");
1588 
1589 	RTE_ETH_FOREACH_DEV(pi) {
1590 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1591 			continue;
1592 
1593 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1594 			printf("Please remove port %d from forwarding configuration.\n", pi);
1595 			continue;
1596 		}
1597 
1598 		if (port_is_bonding_slave(pi)) {
1599 			printf("Please remove port %d from bonded device.\n", pi);
1600 			continue;
1601 		}
1602 
1603 		port = &ports[pi];
1604 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1605 						RTE_PORT_HANDLING) == 0)
1606 			continue;
1607 
1608 		rte_eth_dev_stop(pi);
1609 
1610 		if (rte_atomic16_cmpset(&(port->port_status),
1611 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1612 			printf("Port %d can not be set into stopped\n", pi);
1613 		need_check_link_status = 1;
1614 	}
1615 	if (need_check_link_status && !no_link_check)
1616 		check_all_ports_link_status(RTE_PORT_ALL);
1617 
1618 	printf("Done\n");
1619 }
1620 
1621 void
1622 close_port(portid_t pid)
1623 {
1624 	portid_t pi;
1625 	struct rte_port *port;
1626 
1627 	if (port_id_is_invalid(pid, ENABLED_WARN))
1628 		return;
1629 
1630 	printf("Closing ports...\n");
1631 
1632 	RTE_ETH_FOREACH_DEV(pi) {
1633 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1634 			continue;
1635 
1636 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1637 			printf("Please remove port %d from forwarding configuration.\n", pi);
1638 			continue;
1639 		}
1640 
1641 		if (port_is_bonding_slave(pi)) {
1642 			printf("Please remove port %d from bonded device.\n", pi);
1643 			continue;
1644 		}
1645 
1646 		port = &ports[pi];
1647 		if (rte_atomic16_cmpset(&(port->port_status),
1648 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1649 			printf("Port %d is already closed\n", pi);
1650 			continue;
1651 		}
1652 
1653 		if (rte_atomic16_cmpset(&(port->port_status),
1654 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1655 			printf("Port %d is now not stopped\n", pi);
1656 			continue;
1657 		}
1658 
1659 		if (port->flow_list)
1660 			port_flow_flush(pi);
1661 		rte_eth_dev_close(pi);
1662 
1663 		if (rte_atomic16_cmpset(&(port->port_status),
1664 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1665 			printf("Port %d cannot be set to closed\n", pi);
1666 	}
1667 
1668 	printf("Done\n");
1669 }
1670 
1671 void
1672 attach_port(char *identifier)
1673 {
1674 	portid_t pi = 0;
1675 	unsigned int socket_id;
1676 
1677 	printf("Attaching a new port...\n");
1678 
1679 	if (identifier == NULL) {
1680 		printf("Invalid parameters are specified\n");
1681 		return;
1682 	}
1683 
1684 	if (rte_eth_dev_attach(identifier, &pi))
1685 		return;
1686 
1687 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1688 	/* if socket_id is invalid, set to 0 */
1689 	if (check_socket_id(socket_id) < 0)
1690 		socket_id = 0;
1691 	reconfig(pi, socket_id);
1692 	rte_eth_promiscuous_enable(pi);
1693 
1694 	nb_ports = rte_eth_dev_count();
1695 
1696 	ports[pi].port_status = RTE_PORT_STOPPED;
1697 
1698 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1699 	printf("Done\n");
1700 }
1701 
1702 void
1703 detach_port(uint8_t port_id)
1704 {
1705 	char name[RTE_ETH_NAME_MAX_LEN];
1706 
1707 	printf("Detaching a port...\n");
1708 
1709 	if (!port_is_closed(port_id)) {
1710 		printf("Please close port first\n");
1711 		return;
1712 	}
1713 
1714 	if (ports[port_id].flow_list)
1715 		port_flow_flush(port_id);
1716 
1717 	if (rte_eth_dev_detach(port_id, name))
1718 		return;
1719 
1720 	nb_ports = rte_eth_dev_count();
1721 
1722 	printf("Port '%s' is detached. Now total ports is %d\n",
1723 			name, nb_ports);
1724 	printf("Done\n");
1725 	return;
1726 }
1727 
1728 void
1729 pmd_test_exit(void)
1730 {
1731 	portid_t pt_id;
1732 
1733 	if (test_done == 0)
1734 		stop_packet_forwarding();
1735 
1736 	if (ports != NULL) {
1737 		no_link_check = 1;
1738 		RTE_ETH_FOREACH_DEV(pt_id) {
1739 			printf("\nShutting down port %d...\n", pt_id);
1740 			fflush(stdout);
1741 			stop_port(pt_id);
1742 			close_port(pt_id);
1743 		}
1744 	}
1745 	printf("\nBye...\n");
1746 }
1747 
1748 typedef void (*cmd_func_t)(void);
1749 struct pmd_test_command {
1750 	const char *cmd_name;
1751 	cmd_func_t cmd_func;
1752 };
1753 
1754 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1755 
1756 /* Check the link status of all ports in up to 9s, and print them finally */
1757 static void
1758 check_all_ports_link_status(uint32_t port_mask)
1759 {
1760 #define CHECK_INTERVAL 100 /* 100ms */
1761 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1762 	uint8_t portid, count, all_ports_up, print_flag = 0;
1763 	struct rte_eth_link link;
1764 
1765 	printf("Checking link statuses...\n");
1766 	fflush(stdout);
1767 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1768 		all_ports_up = 1;
1769 		RTE_ETH_FOREACH_DEV(portid) {
1770 			if ((port_mask & (1 << portid)) == 0)
1771 				continue;
1772 			memset(&link, 0, sizeof(link));
1773 			rte_eth_link_get_nowait(portid, &link);
1774 			/* print link status if flag set */
1775 			if (print_flag == 1) {
1776 				if (link.link_status)
1777 					printf("Port %d Link Up - speed %u "
1778 						"Mbps - %s\n", (uint8_t)portid,
1779 						(unsigned)link.link_speed,
1780 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1781 					("full-duplex") : ("half-duplex\n"));
1782 				else
1783 					printf("Port %d Link Down\n",
1784 						(uint8_t)portid);
1785 				continue;
1786 			}
1787 			/* clear all_ports_up flag if any link down */
1788 			if (link.link_status == ETH_LINK_DOWN) {
1789 				all_ports_up = 0;
1790 				break;
1791 			}
1792 		}
1793 		/* after finally printing all link status, get out */
1794 		if (print_flag == 1)
1795 			break;
1796 
1797 		if (all_ports_up == 0) {
1798 			fflush(stdout);
1799 			rte_delay_ms(CHECK_INTERVAL);
1800 		}
1801 
1802 		/* set the print_flag if all ports up or timeout */
1803 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1804 			print_flag = 1;
1805 		}
1806 
1807 		if (lsc_interrupt)
1808 			break;
1809 	}
1810 }
1811 
1812 static void
1813 rmv_event_callback(void *arg)
1814 {
1815 	struct rte_eth_dev *dev;
1816 	uint8_t port_id = (intptr_t)arg;
1817 
1818 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1819 	dev = &rte_eth_devices[port_id];
1820 
1821 	stop_port(port_id);
1822 	close_port(port_id);
1823 	printf("removing device %s\n", dev->device->name);
1824 	rte_eal_dev_detach(dev->device);
1825 	dev->state = RTE_ETH_DEV_UNUSED;
1826 }
1827 
1828 /* This function is used by the interrupt thread */
1829 static int
1830 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param,
1831 		  void *ret_param)
1832 {
1833 	static const char * const event_desc[] = {
1834 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1835 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
1836 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1837 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1838 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1839 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
1840 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
1841 		[RTE_ETH_EVENT_MAX] = NULL,
1842 	};
1843 
1844 	RTE_SET_USED(param);
1845 	RTE_SET_USED(ret_param);
1846 
1847 	if (type >= RTE_ETH_EVENT_MAX) {
1848 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1849 			port_id, __func__, type);
1850 		fflush(stderr);
1851 	} else if (event_print_mask & (UINT32_C(1) << type)) {
1852 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
1853 			event_desc[type]);
1854 		fflush(stdout);
1855 	}
1856 
1857 	switch (type) {
1858 	case RTE_ETH_EVENT_INTR_RMV:
1859 		if (rte_eal_alarm_set(100000,
1860 				rmv_event_callback, (void *)(intptr_t)port_id))
1861 			fprintf(stderr, "Could not set up deferred device removal\n");
1862 		break;
1863 	default:
1864 		break;
1865 	}
1866 	return 0;
1867 }
1868 
1869 static int
1870 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1871 {
1872 	uint16_t i;
1873 	int diag;
1874 	uint8_t mapping_found = 0;
1875 
1876 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1877 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1878 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1879 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1880 					tx_queue_stats_mappings[i].queue_id,
1881 					tx_queue_stats_mappings[i].stats_counter_id);
1882 			if (diag != 0)
1883 				return diag;
1884 			mapping_found = 1;
1885 		}
1886 	}
1887 	if (mapping_found)
1888 		port->tx_queue_stats_mapping_enabled = 1;
1889 	return 0;
1890 }
1891 
1892 static int
1893 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1894 {
1895 	uint16_t i;
1896 	int diag;
1897 	uint8_t mapping_found = 0;
1898 
1899 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1900 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1901 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1902 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1903 					rx_queue_stats_mappings[i].queue_id,
1904 					rx_queue_stats_mappings[i].stats_counter_id);
1905 			if (diag != 0)
1906 				return diag;
1907 			mapping_found = 1;
1908 		}
1909 	}
1910 	if (mapping_found)
1911 		port->rx_queue_stats_mapping_enabled = 1;
1912 	return 0;
1913 }
1914 
1915 static void
1916 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1917 {
1918 	int diag = 0;
1919 
1920 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1921 	if (diag != 0) {
1922 		if (diag == -ENOTSUP) {
1923 			port->tx_queue_stats_mapping_enabled = 0;
1924 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1925 		}
1926 		else
1927 			rte_exit(EXIT_FAILURE,
1928 					"set_tx_queue_stats_mapping_registers "
1929 					"failed for port id=%d diag=%d\n",
1930 					pi, diag);
1931 	}
1932 
1933 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1934 	if (diag != 0) {
1935 		if (diag == -ENOTSUP) {
1936 			port->rx_queue_stats_mapping_enabled = 0;
1937 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1938 		}
1939 		else
1940 			rte_exit(EXIT_FAILURE,
1941 					"set_rx_queue_stats_mapping_registers "
1942 					"failed for port id=%d diag=%d\n",
1943 					pi, diag);
1944 	}
1945 }
1946 
1947 static void
1948 rxtx_port_config(struct rte_port *port)
1949 {
1950 	port->rx_conf = port->dev_info.default_rxconf;
1951 	port->tx_conf = port->dev_info.default_txconf;
1952 
1953 	/* Check if any RX/TX parameters have been passed */
1954 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1955 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1956 
1957 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1958 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1959 
1960 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1961 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1962 
1963 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1964 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1965 
1966 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1967 		port->rx_conf.rx_drop_en = rx_drop_en;
1968 
1969 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1970 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1971 
1972 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1973 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1974 
1975 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1976 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1977 
1978 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1979 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1980 
1981 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1982 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1983 
1984 	if (txq_flags != RTE_PMD_PARAM_UNSET)
1985 		port->tx_conf.txq_flags = txq_flags;
1986 }
1987 
1988 void
1989 init_port_config(void)
1990 {
1991 	portid_t pid;
1992 	struct rte_port *port;
1993 
1994 	RTE_ETH_FOREACH_DEV(pid) {
1995 		port = &ports[pid];
1996 		port->dev_conf.rxmode = rx_mode;
1997 		port->dev_conf.fdir_conf = fdir_conf;
1998 		if (nb_rxq > 1) {
1999 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2000 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2001 		} else {
2002 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2003 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2004 		}
2005 
2006 		if (port->dcb_flag == 0) {
2007 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2008 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2009 			else
2010 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2011 		}
2012 
2013 		rxtx_port_config(port);
2014 
2015 		rte_eth_macaddr_get(pid, &port->eth_addr);
2016 
2017 		map_port_queue_stats_mapping_registers(pid, port);
2018 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2019 		rte_pmd_ixgbe_bypass_init(pid);
2020 #endif
2021 
2022 		if (lsc_interrupt &&
2023 		    (rte_eth_devices[pid].data->dev_flags &
2024 		     RTE_ETH_DEV_INTR_LSC))
2025 			port->dev_conf.intr_conf.lsc = 1;
2026 		if (rmv_interrupt &&
2027 		    (rte_eth_devices[pid].data->dev_flags &
2028 		     RTE_ETH_DEV_INTR_RMV))
2029 			port->dev_conf.intr_conf.rmv = 1;
2030 	}
2031 }
2032 
2033 void set_port_slave_flag(portid_t slave_pid)
2034 {
2035 	struct rte_port *port;
2036 
2037 	port = &ports[slave_pid];
2038 	port->slave_flag = 1;
2039 }
2040 
2041 void clear_port_slave_flag(portid_t slave_pid)
2042 {
2043 	struct rte_port *port;
2044 
2045 	port = &ports[slave_pid];
2046 	port->slave_flag = 0;
2047 }
2048 
2049 uint8_t port_is_bonding_slave(portid_t slave_pid)
2050 {
2051 	struct rte_port *port;
2052 
2053 	port = &ports[slave_pid];
2054 	return port->slave_flag;
2055 }
2056 
2057 const uint16_t vlan_tags[] = {
2058 		0,  1,  2,  3,  4,  5,  6,  7,
2059 		8,  9, 10, 11,  12, 13, 14, 15,
2060 		16, 17, 18, 19, 20, 21, 22, 23,
2061 		24, 25, 26, 27, 28, 29, 30, 31
2062 };
2063 
2064 static  int
2065 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2066 		 enum dcb_mode_enable dcb_mode,
2067 		 enum rte_eth_nb_tcs num_tcs,
2068 		 uint8_t pfc_en)
2069 {
2070 	uint8_t i;
2071 
2072 	/*
2073 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2074 	 * given above, and the number of traffic classes available for use.
2075 	 */
2076 	if (dcb_mode == DCB_VT_ENABLED) {
2077 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2078 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2079 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2080 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2081 
2082 		/* VMDQ+DCB RX and TX configurations */
2083 		vmdq_rx_conf->enable_default_pool = 0;
2084 		vmdq_rx_conf->default_pool = 0;
2085 		vmdq_rx_conf->nb_queue_pools =
2086 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2087 		vmdq_tx_conf->nb_queue_pools =
2088 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2089 
2090 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2091 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2092 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2093 			vmdq_rx_conf->pool_map[i].pools =
2094 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2095 		}
2096 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2097 			vmdq_rx_conf->dcb_tc[i] = i;
2098 			vmdq_tx_conf->dcb_tc[i] = i;
2099 		}
2100 
2101 		/* set DCB mode of RX and TX of multiple queues */
2102 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2103 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2104 	} else {
2105 		struct rte_eth_dcb_rx_conf *rx_conf =
2106 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2107 		struct rte_eth_dcb_tx_conf *tx_conf =
2108 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2109 
2110 		rx_conf->nb_tcs = num_tcs;
2111 		tx_conf->nb_tcs = num_tcs;
2112 
2113 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2114 			rx_conf->dcb_tc[i] = i % num_tcs;
2115 			tx_conf->dcb_tc[i] = i % num_tcs;
2116 		}
2117 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2118 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2119 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2120 	}
2121 
2122 	if (pfc_en)
2123 		eth_conf->dcb_capability_en =
2124 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2125 	else
2126 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2127 
2128 	return 0;
2129 }
2130 
2131 int
2132 init_port_dcb_config(portid_t pid,
2133 		     enum dcb_mode_enable dcb_mode,
2134 		     enum rte_eth_nb_tcs num_tcs,
2135 		     uint8_t pfc_en)
2136 {
2137 	struct rte_eth_conf port_conf;
2138 	struct rte_port *rte_port;
2139 	int retval;
2140 	uint16_t i;
2141 
2142 	rte_port = &ports[pid];
2143 
2144 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2145 	/* Enter DCB configuration status */
2146 	dcb_config = 1;
2147 
2148 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2149 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2150 	if (retval < 0)
2151 		return retval;
2152 	port_conf.rxmode.hw_vlan_filter = 1;
2153 
2154 	/**
2155 	 * Write the configuration into the device.
2156 	 * Set the numbers of RX & TX queues to 0, so
2157 	 * the RX & TX queues will not be setup.
2158 	 */
2159 	(void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2160 
2161 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2162 
2163 	/* If dev_info.vmdq_pool_base is greater than 0,
2164 	 * the queue id of vmdq pools is started after pf queues.
2165 	 */
2166 	if (dcb_mode == DCB_VT_ENABLED &&
2167 	    rte_port->dev_info.vmdq_pool_base > 0) {
2168 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2169 			" for port %d.", pid);
2170 		return -1;
2171 	}
2172 
2173 	/* Assume the ports in testpmd have the same dcb capability
2174 	 * and has the same number of rxq and txq in dcb mode
2175 	 */
2176 	if (dcb_mode == DCB_VT_ENABLED) {
2177 		if (rte_port->dev_info.max_vfs > 0) {
2178 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2179 			nb_txq = rte_port->dev_info.nb_tx_queues;
2180 		} else {
2181 			nb_rxq = rte_port->dev_info.max_rx_queues;
2182 			nb_txq = rte_port->dev_info.max_tx_queues;
2183 		}
2184 	} else {
2185 		/*if vt is disabled, use all pf queues */
2186 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2187 			nb_rxq = rte_port->dev_info.max_rx_queues;
2188 			nb_txq = rte_port->dev_info.max_tx_queues;
2189 		} else {
2190 			nb_rxq = (queueid_t)num_tcs;
2191 			nb_txq = (queueid_t)num_tcs;
2192 
2193 		}
2194 	}
2195 	rx_free_thresh = 64;
2196 
2197 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2198 
2199 	rxtx_port_config(rte_port);
2200 	/* VLAN filter */
2201 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2202 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2203 		rx_vft_set(pid, vlan_tags[i], 1);
2204 
2205 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2206 	map_port_queue_stats_mapping_registers(pid, rte_port);
2207 
2208 	rte_port->dcb_flag = 1;
2209 
2210 	return 0;
2211 }
2212 
2213 static void
2214 init_port(void)
2215 {
2216 	/* Configuration of Ethernet ports. */
2217 	ports = rte_zmalloc("testpmd: ports",
2218 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2219 			    RTE_CACHE_LINE_SIZE);
2220 	if (ports == NULL) {
2221 		rte_exit(EXIT_FAILURE,
2222 				"rte_zmalloc(%d struct rte_port) failed\n",
2223 				RTE_MAX_ETHPORTS);
2224 	}
2225 }
2226 
2227 static void
2228 force_quit(void)
2229 {
2230 	pmd_test_exit();
2231 	prompt_exit();
2232 }
2233 
2234 static void
2235 print_stats(void)
2236 {
2237 	uint8_t i;
2238 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2239 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2240 
2241 	/* Clear screen and move to top left */
2242 	printf("%s%s", clr, top_left);
2243 
2244 	printf("\nPort statistics ====================================");
2245 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2246 		nic_stats_display(fwd_ports_ids[i]);
2247 }
2248 
2249 static void
2250 signal_handler(int signum)
2251 {
2252 	if (signum == SIGINT || signum == SIGTERM) {
2253 		printf("\nSignal %d received, preparing to exit...\n",
2254 				signum);
2255 #ifdef RTE_LIBRTE_PDUMP
2256 		/* uninitialize packet capture framework */
2257 		rte_pdump_uninit();
2258 #endif
2259 #ifdef RTE_LIBRTE_LATENCY_STATS
2260 		rte_latencystats_uninit();
2261 #endif
2262 		force_quit();
2263 		/* exit with the expected status */
2264 		signal(signum, SIG_DFL);
2265 		kill(getpid(), signum);
2266 	}
2267 }
2268 
2269 int
2270 main(int argc, char** argv)
2271 {
2272 	int  diag;
2273 	uint8_t port_id;
2274 
2275 	signal(SIGINT, signal_handler);
2276 	signal(SIGTERM, signal_handler);
2277 
2278 	diag = rte_eal_init(argc, argv);
2279 	if (diag < 0)
2280 		rte_panic("Cannot init EAL\n");
2281 
2282 #ifdef RTE_LIBRTE_PDUMP
2283 	/* initialize packet capture framework */
2284 	rte_pdump_init(NULL);
2285 #endif
2286 
2287 	nb_ports = (portid_t) rte_eth_dev_count();
2288 	if (nb_ports == 0)
2289 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2290 
2291 	/* allocate port structures, and init them */
2292 	init_port();
2293 
2294 	set_def_fwd_config();
2295 	if (nb_lcores == 0)
2296 		rte_panic("Empty set of forwarding logical cores - check the "
2297 			  "core mask supplied in the command parameters\n");
2298 
2299 	/* Bitrate/latency stats disabled by default */
2300 #ifdef RTE_LIBRTE_BITRATE
2301 	bitrate_enabled = 0;
2302 #endif
2303 #ifdef RTE_LIBRTE_LATENCY_STATS
2304 	latencystats_enabled = 0;
2305 #endif
2306 
2307 	argc -= diag;
2308 	argv += diag;
2309 	if (argc > 1)
2310 		launch_args_parse(argc, argv);
2311 
2312 	if (tx_first && interactive)
2313 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2314 				"interactive mode.\n");
2315 	if (!nb_rxq && !nb_txq)
2316 		printf("Warning: Either rx or tx queues should be non-zero\n");
2317 
2318 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2319 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2320 		       "but nb_txq=%d will prevent to fully test it.\n",
2321 		       nb_rxq, nb_txq);
2322 
2323 	init_config();
2324 	if (start_port(RTE_PORT_ALL) != 0)
2325 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2326 
2327 	/* set all ports to promiscuous mode by default */
2328 	RTE_ETH_FOREACH_DEV(port_id)
2329 		rte_eth_promiscuous_enable(port_id);
2330 
2331 	/* Init metrics library */
2332 	rte_metrics_init(rte_socket_id());
2333 
2334 #ifdef RTE_LIBRTE_LATENCY_STATS
2335 	if (latencystats_enabled != 0) {
2336 		int ret = rte_latencystats_init(1, NULL);
2337 		if (ret)
2338 			printf("Warning: latencystats init()"
2339 				" returned error %d\n",	ret);
2340 		printf("Latencystats running on lcore %d\n",
2341 			latencystats_lcore_id);
2342 	}
2343 #endif
2344 
2345 	/* Setup bitrate stats */
2346 #ifdef RTE_LIBRTE_BITRATE
2347 	if (bitrate_enabled != 0) {
2348 		bitrate_data = rte_stats_bitrate_create();
2349 		if (bitrate_data == NULL)
2350 			rte_exit(EXIT_FAILURE,
2351 				"Could not allocate bitrate data.\n");
2352 		rte_stats_bitrate_reg(bitrate_data);
2353 	}
2354 #endif
2355 
2356 #ifdef RTE_LIBRTE_CMDLINE
2357 	if (strlen(cmdline_filename) != 0)
2358 		cmdline_read_from_file(cmdline_filename);
2359 
2360 	if (interactive == 1) {
2361 		if (auto_start) {
2362 			printf("Start automatic packet forwarding\n");
2363 			start_packet_forwarding(0);
2364 		}
2365 		prompt();
2366 		pmd_test_exit();
2367 	} else
2368 #endif
2369 	{
2370 		char c;
2371 		int rc;
2372 
2373 		printf("No commandline core given, start packet forwarding\n");
2374 		start_packet_forwarding(tx_first);
2375 		if (stats_period != 0) {
2376 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2377 			uint64_t timer_period;
2378 
2379 			/* Convert to number of cycles */
2380 			timer_period = stats_period * rte_get_timer_hz();
2381 
2382 			while (1) {
2383 				cur_time = rte_get_timer_cycles();
2384 				diff_time += cur_time - prev_time;
2385 
2386 				if (diff_time >= timer_period) {
2387 					print_stats();
2388 					/* Reset the timer */
2389 					diff_time = 0;
2390 				}
2391 				/* Sleep to avoid unnecessary checks */
2392 				prev_time = cur_time;
2393 				sleep(1);
2394 			}
2395 		}
2396 
2397 		printf("Press enter to exit\n");
2398 		rc = read(0, &c, 1);
2399 		pmd_test_exit();
2400 		if (rc < 0)
2401 			return 1;
2402 	}
2403 
2404 	return 0;
2405 }
2406