xref: /dpdk/app/test-pmd/testpmd.c (revision f26ab687a74fc86455e0c98371553a53bf68c76e)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_IXGBE_PMD
77 #include <rte_pmd_ixgbe.h>
78 #endif
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
81 #endif
82 #include <rte_flow.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
86 #endif
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
89 #endif
90 #include <rte_gro.h>
91 
92 #include "testpmd.h"
93 
94 uint16_t verbose_level = 0; /**< Silent by default. */
95 
96 /* use master core for command line ? */
97 uint8_t interactive = 0;
98 uint8_t auto_start = 0;
99 uint8_t tx_first;
100 char cmdline_filename[PATH_MAX] = {0};
101 
102 /*
103  * NUMA support configuration.
104  * When set, the NUMA support attempts to dispatch the allocation of the
105  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
106  * probed ports among the CPU sockets 0 and 1.
107  * Otherwise, all memory is allocated from CPU socket 0.
108  */
109 uint8_t numa_support = 1; /**< numa enabled by default */
110 
111 /*
112  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
113  * not configured.
114  */
115 uint8_t socket_num = UMA_NO_CONFIG;
116 
117 /*
118  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
119  */
120 uint8_t mp_anon = 0;
121 
122 /*
123  * Record the Ethernet address of peer target ports to which packets are
124  * forwarded.
125  * Must be instantiated with the ethernet addresses of peer traffic generator
126  * ports.
127  */
128 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
129 portid_t nb_peer_eth_addrs = 0;
130 
131 /*
132  * Probed Target Environment.
133  */
134 struct rte_port *ports;	       /**< For all probed ethernet ports. */
135 portid_t nb_ports;             /**< Number of probed ethernet ports. */
136 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
137 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
138 
139 /*
140  * Test Forwarding Configuration.
141  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
142  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
143  */
144 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
145 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
146 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
147 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
148 
149 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
150 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
151 
152 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
153 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
154 
155 /*
156  * Forwarding engines.
157  */
158 struct fwd_engine * fwd_engines[] = {
159 	&io_fwd_engine,
160 	&mac_fwd_engine,
161 	&mac_swap_engine,
162 	&flow_gen_engine,
163 	&rx_only_engine,
164 	&tx_only_engine,
165 	&csum_fwd_engine,
166 	&icmp_echo_engine,
167 #ifdef RTE_LIBRTE_IEEE1588
168 	&ieee1588_fwd_engine,
169 #endif
170 	NULL,
171 };
172 
173 struct fwd_config cur_fwd_config;
174 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
175 uint32_t retry_enabled;
176 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
177 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
178 
179 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
180 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
181                                       * specified on command-line. */
182 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
183 /*
184  * Configuration of packet segments used by the "txonly" processing engine.
185  */
186 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
187 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
188 	TXONLY_DEF_PACKET_LEN,
189 };
190 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
191 
192 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
193 /**< Split policy for packets to TX. */
194 
195 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
196 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
197 
198 /* current configuration is in DCB or not,0 means it is not in DCB mode */
199 uint8_t dcb_config = 0;
200 
201 /* Whether the dcb is in testing status */
202 uint8_t dcb_test = 0;
203 
204 /*
205  * Configurable number of RX/TX queues.
206  */
207 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
208 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
209 
210 /*
211  * Configurable number of RX/TX ring descriptors.
212  */
213 #define RTE_TEST_RX_DESC_DEFAULT 128
214 #define RTE_TEST_TX_DESC_DEFAULT 512
215 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
216 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
217 
218 #define RTE_PMD_PARAM_UNSET -1
219 /*
220  * Configurable values of RX and TX ring threshold registers.
221  */
222 
223 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
224 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
225 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
226 
227 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
228 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
229 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
230 
231 /*
232  * Configurable value of RX free threshold.
233  */
234 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
235 
236 /*
237  * Configurable value of RX drop enable.
238  */
239 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
240 
241 /*
242  * Configurable value of TX free threshold.
243  */
244 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
245 
246 /*
247  * Configurable value of TX RS bit threshold.
248  */
249 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
250 
251 /*
252  * Configurable value of TX queue flags.
253  */
254 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
255 
256 /*
257  * Receive Side Scaling (RSS) configuration.
258  */
259 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
260 
261 /*
262  * Port topology configuration
263  */
264 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
265 
266 /*
267  * Avoids to flush all the RX streams before starts forwarding.
268  */
269 uint8_t no_flush_rx = 0; /* flush by default */
270 
271 /*
272  * Flow API isolated mode.
273  */
274 uint8_t flow_isolate_all;
275 
276 /*
277  * Avoids to check link status when starting/stopping a port.
278  */
279 uint8_t no_link_check = 0; /* check by default */
280 
281 /*
282  * Enable link status change notification
283  */
284 uint8_t lsc_interrupt = 1; /* enabled by default */
285 
286 /*
287  * Enable device removal notification.
288  */
289 uint8_t rmv_interrupt = 1; /* enabled by default */
290 
291 /*
292  * Display or mask ether events
293  * Default to all events except VF_MBOX
294  */
295 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
296 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
297 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
298 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
299 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
300 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
301 
302 /*
303  * NIC bypass mode configuration options.
304  */
305 
306 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
307 /* The NIC bypass watchdog timeout. */
308 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
309 #endif
310 
311 
312 #ifdef RTE_LIBRTE_LATENCY_STATS
313 
314 /*
315  * Set when latency stats is enabled in the commandline
316  */
317 uint8_t latencystats_enabled;
318 
319 /*
320  * Lcore ID to serive latency statistics.
321  */
322 lcoreid_t latencystats_lcore_id = -1;
323 
324 #endif
325 
326 /*
327  * Ethernet device configuration.
328  */
329 struct rte_eth_rxmode rx_mode = {
330 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
331 	.split_hdr_size = 0,
332 	.header_split   = 0, /**< Header Split disabled. */
333 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
334 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
335 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
336 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
337 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
338 	.hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
339 };
340 
341 struct rte_fdir_conf fdir_conf = {
342 	.mode = RTE_FDIR_MODE_NONE,
343 	.pballoc = RTE_FDIR_PBALLOC_64K,
344 	.status = RTE_FDIR_REPORT_STATUS,
345 	.mask = {
346 		.vlan_tci_mask = 0x0,
347 		.ipv4_mask     = {
348 			.src_ip = 0xFFFFFFFF,
349 			.dst_ip = 0xFFFFFFFF,
350 		},
351 		.ipv6_mask     = {
352 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
353 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
354 		},
355 		.src_port_mask = 0xFFFF,
356 		.dst_port_mask = 0xFFFF,
357 		.mac_addr_byte_mask = 0xFF,
358 		.tunnel_type_mask = 1,
359 		.tunnel_id_mask = 0xFFFFFFFF,
360 	},
361 	.drop_queue = 127,
362 };
363 
364 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
365 
366 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
367 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
368 
369 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
370 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
371 
372 uint16_t nb_tx_queue_stats_mappings = 0;
373 uint16_t nb_rx_queue_stats_mappings = 0;
374 
375 unsigned int num_sockets = 0;
376 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
377 
378 #ifdef RTE_LIBRTE_BITRATE
379 /* Bitrate statistics */
380 struct rte_stats_bitrates *bitrate_data;
381 lcoreid_t bitrate_lcore_id;
382 uint8_t bitrate_enabled;
383 #endif
384 
385 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
386 
387 /* Forward function declarations */
388 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
389 static void check_all_ports_link_status(uint32_t port_mask);
390 static int eth_event_callback(portid_t port_id,
391 			      enum rte_eth_event_type type,
392 			      void *param, void *ret_param);
393 
394 /*
395  * Check if all the ports are started.
396  * If yes, return positive value. If not, return zero.
397  */
398 static int all_ports_started(void);
399 
400 /*
401  * Helper function to check if socket is already discovered.
402  * If yes, return positive value. If not, return zero.
403  */
404 int
405 new_socket_id(unsigned int socket_id)
406 {
407 	unsigned int i;
408 
409 	for (i = 0; i < num_sockets; i++) {
410 		if (socket_ids[i] == socket_id)
411 			return 0;
412 	}
413 	return 1;
414 }
415 
416 /*
417  * Setup default configuration.
418  */
419 static void
420 set_default_fwd_lcores_config(void)
421 {
422 	unsigned int i;
423 	unsigned int nb_lc;
424 	unsigned int sock_num;
425 
426 	nb_lc = 0;
427 	for (i = 0; i < RTE_MAX_LCORE; i++) {
428 		sock_num = rte_lcore_to_socket_id(i);
429 		if (new_socket_id(sock_num)) {
430 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
431 				rte_exit(EXIT_FAILURE,
432 					 "Total sockets greater than %u\n",
433 					 RTE_MAX_NUMA_NODES);
434 			}
435 			socket_ids[num_sockets++] = sock_num;
436 		}
437 		if (!rte_lcore_is_enabled(i))
438 			continue;
439 		if (i == rte_get_master_lcore())
440 			continue;
441 		fwd_lcores_cpuids[nb_lc++] = i;
442 	}
443 	nb_lcores = (lcoreid_t) nb_lc;
444 	nb_cfg_lcores = nb_lcores;
445 	nb_fwd_lcores = 1;
446 }
447 
448 static void
449 set_def_peer_eth_addrs(void)
450 {
451 	portid_t i;
452 
453 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
454 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
455 		peer_eth_addrs[i].addr_bytes[5] = i;
456 	}
457 }
458 
459 static void
460 set_default_fwd_ports_config(void)
461 {
462 	portid_t pt_id;
463 
464 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
465 		fwd_ports_ids[pt_id] = pt_id;
466 
467 	nb_cfg_ports = nb_ports;
468 	nb_fwd_ports = nb_ports;
469 }
470 
471 void
472 set_def_fwd_config(void)
473 {
474 	set_default_fwd_lcores_config();
475 	set_def_peer_eth_addrs();
476 	set_default_fwd_ports_config();
477 }
478 
479 /*
480  * Configuration initialisation done once at init time.
481  */
482 static void
483 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
484 		 unsigned int socket_id)
485 {
486 	char pool_name[RTE_MEMPOOL_NAMESIZE];
487 	struct rte_mempool *rte_mp = NULL;
488 	uint32_t mb_size;
489 
490 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
491 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
492 
493 	RTE_LOG(INFO, USER1,
494 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
495 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
496 
497 	if (mp_anon != 0) {
498 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
499 			mb_size, (unsigned) mb_mempool_cache,
500 			sizeof(struct rte_pktmbuf_pool_private),
501 			socket_id, 0);
502 		if (rte_mp == NULL)
503 			goto err;
504 
505 		if (rte_mempool_populate_anon(rte_mp) == 0) {
506 			rte_mempool_free(rte_mp);
507 			rte_mp = NULL;
508 			goto err;
509 		}
510 		rte_pktmbuf_pool_init(rte_mp, NULL);
511 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
512 	} else {
513 		/* wrapper to rte_mempool_create() */
514 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
515 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
516 	}
517 
518 err:
519 	if (rte_mp == NULL) {
520 		rte_exit(EXIT_FAILURE,
521 			"Creation of mbuf pool for socket %u failed: %s\n",
522 			socket_id, rte_strerror(rte_errno));
523 	} else if (verbose_level > 0) {
524 		rte_mempool_dump(stdout, rte_mp);
525 	}
526 }
527 
528 /*
529  * Check given socket id is valid or not with NUMA mode,
530  * if valid, return 0, else return -1
531  */
532 static int
533 check_socket_id(const unsigned int socket_id)
534 {
535 	static int warning_once = 0;
536 
537 	if (new_socket_id(socket_id)) {
538 		if (!warning_once && numa_support)
539 			printf("Warning: NUMA should be configured manually by"
540 			       " using --port-numa-config and"
541 			       " --ring-numa-config parameters along with"
542 			       " --numa.\n");
543 		warning_once = 1;
544 		return -1;
545 	}
546 	return 0;
547 }
548 
549 static void
550 init_config(void)
551 {
552 	portid_t pid;
553 	struct rte_port *port;
554 	struct rte_mempool *mbp;
555 	unsigned int nb_mbuf_per_pool;
556 	lcoreid_t  lc_id;
557 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
558 
559 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
560 
561 	if (numa_support) {
562 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
563 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
564 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
565 	}
566 
567 	/* Configuration of logical cores. */
568 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
569 				sizeof(struct fwd_lcore *) * nb_lcores,
570 				RTE_CACHE_LINE_SIZE);
571 	if (fwd_lcores == NULL) {
572 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
573 							"failed\n", nb_lcores);
574 	}
575 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
576 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
577 					       sizeof(struct fwd_lcore),
578 					       RTE_CACHE_LINE_SIZE);
579 		if (fwd_lcores[lc_id] == NULL) {
580 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
581 								"failed\n");
582 		}
583 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
584 	}
585 
586 	RTE_ETH_FOREACH_DEV(pid) {
587 		port = &ports[pid];
588 		rte_eth_dev_info_get(pid, &port->dev_info);
589 
590 		if (numa_support) {
591 			if (port_numa[pid] != NUMA_NO_CONFIG)
592 				port_per_socket[port_numa[pid]]++;
593 			else {
594 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
595 
596 				/* if socket_id is invalid, set to 0 */
597 				if (check_socket_id(socket_id) < 0)
598 					socket_id = 0;
599 				port_per_socket[socket_id]++;
600 			}
601 		}
602 
603 		/* set flag to initialize port/queue */
604 		port->need_reconfig = 1;
605 		port->need_reconfig_queues = 1;
606 	}
607 
608 	/*
609 	 * Create pools of mbuf.
610 	 * If NUMA support is disabled, create a single pool of mbuf in
611 	 * socket 0 memory by default.
612 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
613 	 *
614 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
615 	 * nb_txd can be configured at run time.
616 	 */
617 	if (param_total_num_mbufs)
618 		nb_mbuf_per_pool = param_total_num_mbufs;
619 	else {
620 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
621 			(nb_lcores * mb_mempool_cache) +
622 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
623 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
624 	}
625 
626 	if (numa_support) {
627 		uint8_t i;
628 
629 		for (i = 0; i < num_sockets; i++)
630 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
631 					 socket_ids[i]);
632 	} else {
633 		if (socket_num == UMA_NO_CONFIG)
634 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
635 		else
636 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
637 						 socket_num);
638 	}
639 
640 	init_port_config();
641 
642 	/*
643 	 * Records which Mbuf pool to use by each logical core, if needed.
644 	 */
645 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
646 		mbp = mbuf_pool_find(
647 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
648 
649 		if (mbp == NULL)
650 			mbp = mbuf_pool_find(0);
651 		fwd_lcores[lc_id]->mbp = mbp;
652 	}
653 
654 	/* Configuration of packet forwarding streams. */
655 	if (init_fwd_streams() < 0)
656 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
657 
658 	fwd_config_setup();
659 }
660 
661 
662 void
663 reconfig(portid_t new_port_id, unsigned socket_id)
664 {
665 	struct rte_port *port;
666 
667 	/* Reconfiguration of Ethernet ports. */
668 	port = &ports[new_port_id];
669 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
670 
671 	/* set flag to initialize port/queue */
672 	port->need_reconfig = 1;
673 	port->need_reconfig_queues = 1;
674 	port->socket_id = socket_id;
675 
676 	init_port_config();
677 }
678 
679 
680 int
681 init_fwd_streams(void)
682 {
683 	portid_t pid;
684 	struct rte_port *port;
685 	streamid_t sm_id, nb_fwd_streams_new;
686 	queueid_t q;
687 
688 	/* set socket id according to numa or not */
689 	RTE_ETH_FOREACH_DEV(pid) {
690 		port = &ports[pid];
691 		if (nb_rxq > port->dev_info.max_rx_queues) {
692 			printf("Fail: nb_rxq(%d) is greater than "
693 				"max_rx_queues(%d)\n", nb_rxq,
694 				port->dev_info.max_rx_queues);
695 			return -1;
696 		}
697 		if (nb_txq > port->dev_info.max_tx_queues) {
698 			printf("Fail: nb_txq(%d) is greater than "
699 				"max_tx_queues(%d)\n", nb_txq,
700 				port->dev_info.max_tx_queues);
701 			return -1;
702 		}
703 		if (numa_support) {
704 			if (port_numa[pid] != NUMA_NO_CONFIG)
705 				port->socket_id = port_numa[pid];
706 			else {
707 				port->socket_id = rte_eth_dev_socket_id(pid);
708 
709 				/* if socket_id is invalid, set to 0 */
710 				if (check_socket_id(port->socket_id) < 0)
711 					port->socket_id = 0;
712 			}
713 		}
714 		else {
715 			if (socket_num == UMA_NO_CONFIG)
716 				port->socket_id = 0;
717 			else
718 				port->socket_id = socket_num;
719 		}
720 	}
721 
722 	q = RTE_MAX(nb_rxq, nb_txq);
723 	if (q == 0) {
724 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
725 		return -1;
726 	}
727 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
728 	if (nb_fwd_streams_new == nb_fwd_streams)
729 		return 0;
730 	/* clear the old */
731 	if (fwd_streams != NULL) {
732 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
733 			if (fwd_streams[sm_id] == NULL)
734 				continue;
735 			rte_free(fwd_streams[sm_id]);
736 			fwd_streams[sm_id] = NULL;
737 		}
738 		rte_free(fwd_streams);
739 		fwd_streams = NULL;
740 	}
741 
742 	/* init new */
743 	nb_fwd_streams = nb_fwd_streams_new;
744 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
745 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
746 	if (fwd_streams == NULL)
747 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
748 						"failed\n", nb_fwd_streams);
749 
750 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
751 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
752 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
753 		if (fwd_streams[sm_id] == NULL)
754 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
755 								" failed\n");
756 	}
757 
758 	return 0;
759 }
760 
761 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
762 static void
763 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
764 {
765 	unsigned int total_burst;
766 	unsigned int nb_burst;
767 	unsigned int burst_stats[3];
768 	uint16_t pktnb_stats[3];
769 	uint16_t nb_pkt;
770 	int burst_percent[3];
771 
772 	/*
773 	 * First compute the total number of packet bursts and the
774 	 * two highest numbers of bursts of the same number of packets.
775 	 */
776 	total_burst = 0;
777 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
778 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
779 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
780 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
781 		if (nb_burst == 0)
782 			continue;
783 		total_burst += nb_burst;
784 		if (nb_burst > burst_stats[0]) {
785 			burst_stats[1] = burst_stats[0];
786 			pktnb_stats[1] = pktnb_stats[0];
787 			burst_stats[0] = nb_burst;
788 			pktnb_stats[0] = nb_pkt;
789 		}
790 	}
791 	if (total_burst == 0)
792 		return;
793 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
794 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
795 	       burst_percent[0], (int) pktnb_stats[0]);
796 	if (burst_stats[0] == total_burst) {
797 		printf("]\n");
798 		return;
799 	}
800 	if (burst_stats[0] + burst_stats[1] == total_burst) {
801 		printf(" + %d%% of %d pkts]\n",
802 		       100 - burst_percent[0], pktnb_stats[1]);
803 		return;
804 	}
805 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
806 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
807 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
808 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
809 		return;
810 	}
811 	printf(" + %d%% of %d pkts + %d%% of others]\n",
812 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
813 }
814 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
815 
816 static void
817 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
818 {
819 	struct rte_port *port;
820 	uint8_t i;
821 
822 	static const char *fwd_stats_border = "----------------------";
823 
824 	port = &ports[port_id];
825 	printf("\n  %s Forward statistics for port %-2d %s\n",
826 	       fwd_stats_border, port_id, fwd_stats_border);
827 
828 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
829 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
830 		       "%-"PRIu64"\n",
831 		       stats->ipackets, stats->imissed,
832 		       (uint64_t) (stats->ipackets + stats->imissed));
833 
834 		if (cur_fwd_eng == &csum_fwd_engine)
835 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
836 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
837 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
838 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
839 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
840 		}
841 
842 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
843 		       "%-"PRIu64"\n",
844 		       stats->opackets, port->tx_dropped,
845 		       (uint64_t) (stats->opackets + port->tx_dropped));
846 	}
847 	else {
848 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
849 		       "%14"PRIu64"\n",
850 		       stats->ipackets, stats->imissed,
851 		       (uint64_t) (stats->ipackets + stats->imissed));
852 
853 		if (cur_fwd_eng == &csum_fwd_engine)
854 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
855 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
856 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
857 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
858 			printf("  RX-nombufs:             %14"PRIu64"\n",
859 			       stats->rx_nombuf);
860 		}
861 
862 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
863 		       "%14"PRIu64"\n",
864 		       stats->opackets, port->tx_dropped,
865 		       (uint64_t) (stats->opackets + port->tx_dropped));
866 	}
867 
868 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
869 	if (port->rx_stream)
870 		pkt_burst_stats_display("RX",
871 			&port->rx_stream->rx_burst_stats);
872 	if (port->tx_stream)
873 		pkt_burst_stats_display("TX",
874 			&port->tx_stream->tx_burst_stats);
875 #endif
876 
877 	if (port->rx_queue_stats_mapping_enabled) {
878 		printf("\n");
879 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
880 			printf("  Stats reg %2d RX-packets:%14"PRIu64
881 			       "     RX-errors:%14"PRIu64
882 			       "    RX-bytes:%14"PRIu64"\n",
883 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
884 		}
885 		printf("\n");
886 	}
887 	if (port->tx_queue_stats_mapping_enabled) {
888 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
889 			printf("  Stats reg %2d TX-packets:%14"PRIu64
890 			       "                                 TX-bytes:%14"PRIu64"\n",
891 			       i, stats->q_opackets[i], stats->q_obytes[i]);
892 		}
893 	}
894 
895 	printf("  %s--------------------------------%s\n",
896 	       fwd_stats_border, fwd_stats_border);
897 }
898 
899 static void
900 fwd_stream_stats_display(streamid_t stream_id)
901 {
902 	struct fwd_stream *fs;
903 	static const char *fwd_top_stats_border = "-------";
904 
905 	fs = fwd_streams[stream_id];
906 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
907 	    (fs->fwd_dropped == 0))
908 		return;
909 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
910 	       "TX Port=%2d/Queue=%2d %s\n",
911 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
912 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
913 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
914 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
915 
916 	/* if checksum mode */
917 	if (cur_fwd_eng == &csum_fwd_engine) {
918 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
919 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
920 	}
921 
922 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
923 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
924 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
925 #endif
926 }
927 
928 static void
929 flush_fwd_rx_queues(void)
930 {
931 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
932 	portid_t  rxp;
933 	portid_t port_id;
934 	queueid_t rxq;
935 	uint16_t  nb_rx;
936 	uint16_t  i;
937 	uint8_t   j;
938 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
939 	uint64_t timer_period;
940 
941 	/* convert to number of cycles */
942 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
943 
944 	for (j = 0; j < 2; j++) {
945 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
946 			for (rxq = 0; rxq < nb_rxq; rxq++) {
947 				port_id = fwd_ports_ids[rxp];
948 				/**
949 				* testpmd can stuck in the below do while loop
950 				* if rte_eth_rx_burst() always returns nonzero
951 				* packets. So timer is added to exit this loop
952 				* after 1sec timer expiry.
953 				*/
954 				prev_tsc = rte_rdtsc();
955 				do {
956 					nb_rx = rte_eth_rx_burst(port_id, rxq,
957 						pkts_burst, MAX_PKT_BURST);
958 					for (i = 0; i < nb_rx; i++)
959 						rte_pktmbuf_free(pkts_burst[i]);
960 
961 					cur_tsc = rte_rdtsc();
962 					diff_tsc = cur_tsc - prev_tsc;
963 					timer_tsc += diff_tsc;
964 				} while ((nb_rx > 0) &&
965 					(timer_tsc < timer_period));
966 				timer_tsc = 0;
967 			}
968 		}
969 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
970 	}
971 }
972 
973 static void
974 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
975 {
976 	struct fwd_stream **fsm;
977 	streamid_t nb_fs;
978 	streamid_t sm_id;
979 #ifdef RTE_LIBRTE_BITRATE
980 	uint64_t tics_per_1sec;
981 	uint64_t tics_datum;
982 	uint64_t tics_current;
983 	uint8_t idx_port, cnt_ports;
984 
985 	cnt_ports = rte_eth_dev_count();
986 	tics_datum = rte_rdtsc();
987 	tics_per_1sec = rte_get_timer_hz();
988 #endif
989 	fsm = &fwd_streams[fc->stream_idx];
990 	nb_fs = fc->stream_nb;
991 	do {
992 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
993 			(*pkt_fwd)(fsm[sm_id]);
994 #ifdef RTE_LIBRTE_BITRATE
995 		if (bitrate_enabled != 0 &&
996 				bitrate_lcore_id == rte_lcore_id()) {
997 			tics_current = rte_rdtsc();
998 			if (tics_current - tics_datum >= tics_per_1sec) {
999 				/* Periodic bitrate calculation */
1000 				for (idx_port = 0;
1001 						idx_port < cnt_ports;
1002 						idx_port++)
1003 					rte_stats_bitrate_calc(bitrate_data,
1004 						idx_port);
1005 				tics_datum = tics_current;
1006 			}
1007 		}
1008 #endif
1009 #ifdef RTE_LIBRTE_LATENCY_STATS
1010 		if (latencystats_enabled != 0 &&
1011 				latencystats_lcore_id == rte_lcore_id())
1012 			rte_latencystats_update();
1013 #endif
1014 
1015 	} while (! fc->stopped);
1016 }
1017 
1018 static int
1019 start_pkt_forward_on_core(void *fwd_arg)
1020 {
1021 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1022 			     cur_fwd_config.fwd_eng->packet_fwd);
1023 	return 0;
1024 }
1025 
1026 /*
1027  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1028  * Used to start communication flows in network loopback test configurations.
1029  */
1030 static int
1031 run_one_txonly_burst_on_core(void *fwd_arg)
1032 {
1033 	struct fwd_lcore *fwd_lc;
1034 	struct fwd_lcore tmp_lcore;
1035 
1036 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1037 	tmp_lcore = *fwd_lc;
1038 	tmp_lcore.stopped = 1;
1039 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1040 	return 0;
1041 }
1042 
1043 /*
1044  * Launch packet forwarding:
1045  *     - Setup per-port forwarding context.
1046  *     - launch logical cores with their forwarding configuration.
1047  */
1048 static void
1049 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1050 {
1051 	port_fwd_begin_t port_fwd_begin;
1052 	unsigned int i;
1053 	unsigned int lc_id;
1054 	int diag;
1055 
1056 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1057 	if (port_fwd_begin != NULL) {
1058 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1059 			(*port_fwd_begin)(fwd_ports_ids[i]);
1060 	}
1061 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1062 		lc_id = fwd_lcores_cpuids[i];
1063 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1064 			fwd_lcores[i]->stopped = 0;
1065 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1066 						     fwd_lcores[i], lc_id);
1067 			if (diag != 0)
1068 				printf("launch lcore %u failed - diag=%d\n",
1069 				       lc_id, diag);
1070 		}
1071 	}
1072 }
1073 
1074 /*
1075  * Launch packet forwarding configuration.
1076  */
1077 void
1078 start_packet_forwarding(int with_tx_first)
1079 {
1080 	port_fwd_begin_t port_fwd_begin;
1081 	port_fwd_end_t  port_fwd_end;
1082 	struct rte_port *port;
1083 	unsigned int i;
1084 	portid_t   pt_id;
1085 	streamid_t sm_id;
1086 
1087 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1088 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1089 
1090 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1091 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1092 
1093 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1094 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1095 		(!nb_rxq || !nb_txq))
1096 		rte_exit(EXIT_FAILURE,
1097 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1098 			cur_fwd_eng->fwd_mode_name);
1099 
1100 	if (all_ports_started() == 0) {
1101 		printf("Not all ports were started\n");
1102 		return;
1103 	}
1104 	if (test_done == 0) {
1105 		printf("Packet forwarding already started\n");
1106 		return;
1107 	}
1108 
1109 	if (init_fwd_streams() < 0) {
1110 		printf("Fail from init_fwd_streams()\n");
1111 		return;
1112 	}
1113 
1114 	if(dcb_test) {
1115 		for (i = 0; i < nb_fwd_ports; i++) {
1116 			pt_id = fwd_ports_ids[i];
1117 			port = &ports[pt_id];
1118 			if (!port->dcb_flag) {
1119 				printf("In DCB mode, all forwarding ports must "
1120                                        "be configured in this mode.\n");
1121 				return;
1122 			}
1123 		}
1124 		if (nb_fwd_lcores == 1) {
1125 			printf("In DCB mode,the nb forwarding cores "
1126                                "should be larger than 1.\n");
1127 			return;
1128 		}
1129 	}
1130 	test_done = 0;
1131 
1132 	if(!no_flush_rx)
1133 		flush_fwd_rx_queues();
1134 
1135 	fwd_config_setup();
1136 	pkt_fwd_config_display(&cur_fwd_config);
1137 	rxtx_config_display();
1138 
1139 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1140 		pt_id = fwd_ports_ids[i];
1141 		port = &ports[pt_id];
1142 		rte_eth_stats_get(pt_id, &port->stats);
1143 		port->tx_dropped = 0;
1144 
1145 		map_port_queue_stats_mapping_registers(pt_id, port);
1146 	}
1147 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1148 		fwd_streams[sm_id]->rx_packets = 0;
1149 		fwd_streams[sm_id]->tx_packets = 0;
1150 		fwd_streams[sm_id]->fwd_dropped = 0;
1151 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1152 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1153 
1154 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1155 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1156 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1157 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1158 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1159 #endif
1160 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1161 		fwd_streams[sm_id]->core_cycles = 0;
1162 #endif
1163 	}
1164 	if (with_tx_first) {
1165 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1166 		if (port_fwd_begin != NULL) {
1167 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1168 				(*port_fwd_begin)(fwd_ports_ids[i]);
1169 		}
1170 		while (with_tx_first--) {
1171 			launch_packet_forwarding(
1172 					run_one_txonly_burst_on_core);
1173 			rte_eal_mp_wait_lcore();
1174 		}
1175 		port_fwd_end = tx_only_engine.port_fwd_end;
1176 		if (port_fwd_end != NULL) {
1177 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1178 				(*port_fwd_end)(fwd_ports_ids[i]);
1179 		}
1180 	}
1181 	launch_packet_forwarding(start_pkt_forward_on_core);
1182 }
1183 
1184 void
1185 stop_packet_forwarding(void)
1186 {
1187 	struct rte_eth_stats stats;
1188 	struct rte_port *port;
1189 	port_fwd_end_t  port_fwd_end;
1190 	int i;
1191 	portid_t   pt_id;
1192 	streamid_t sm_id;
1193 	lcoreid_t  lc_id;
1194 	uint64_t total_recv;
1195 	uint64_t total_xmit;
1196 	uint64_t total_rx_dropped;
1197 	uint64_t total_tx_dropped;
1198 	uint64_t total_rx_nombuf;
1199 	uint64_t tx_dropped;
1200 	uint64_t rx_bad_ip_csum;
1201 	uint64_t rx_bad_l4_csum;
1202 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1203 	uint64_t fwd_cycles;
1204 #endif
1205 	static const char *acc_stats_border = "+++++++++++++++";
1206 
1207 	if (test_done) {
1208 		printf("Packet forwarding not started\n");
1209 		return;
1210 	}
1211 	printf("Telling cores to stop...");
1212 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1213 		fwd_lcores[lc_id]->stopped = 1;
1214 	printf("\nWaiting for lcores to finish...\n");
1215 	rte_eal_mp_wait_lcore();
1216 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1217 	if (port_fwd_end != NULL) {
1218 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1219 			pt_id = fwd_ports_ids[i];
1220 			(*port_fwd_end)(pt_id);
1221 		}
1222 	}
1223 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1224 	fwd_cycles = 0;
1225 #endif
1226 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1227 		if (cur_fwd_config.nb_fwd_streams >
1228 		    cur_fwd_config.nb_fwd_ports) {
1229 			fwd_stream_stats_display(sm_id);
1230 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1231 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1232 		} else {
1233 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1234 				fwd_streams[sm_id];
1235 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1236 				fwd_streams[sm_id];
1237 		}
1238 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1239 		tx_dropped = (uint64_t) (tx_dropped +
1240 					 fwd_streams[sm_id]->fwd_dropped);
1241 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1242 
1243 		rx_bad_ip_csum =
1244 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1245 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1246 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1247 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1248 							rx_bad_ip_csum;
1249 
1250 		rx_bad_l4_csum =
1251 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1252 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1253 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1254 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1255 							rx_bad_l4_csum;
1256 
1257 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1258 		fwd_cycles = (uint64_t) (fwd_cycles +
1259 					 fwd_streams[sm_id]->core_cycles);
1260 #endif
1261 	}
1262 	total_recv = 0;
1263 	total_xmit = 0;
1264 	total_rx_dropped = 0;
1265 	total_tx_dropped = 0;
1266 	total_rx_nombuf  = 0;
1267 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1268 		pt_id = fwd_ports_ids[i];
1269 
1270 		port = &ports[pt_id];
1271 		rte_eth_stats_get(pt_id, &stats);
1272 		stats.ipackets -= port->stats.ipackets;
1273 		port->stats.ipackets = 0;
1274 		stats.opackets -= port->stats.opackets;
1275 		port->stats.opackets = 0;
1276 		stats.ibytes   -= port->stats.ibytes;
1277 		port->stats.ibytes = 0;
1278 		stats.obytes   -= port->stats.obytes;
1279 		port->stats.obytes = 0;
1280 		stats.imissed  -= port->stats.imissed;
1281 		port->stats.imissed = 0;
1282 		stats.oerrors  -= port->stats.oerrors;
1283 		port->stats.oerrors = 0;
1284 		stats.rx_nombuf -= port->stats.rx_nombuf;
1285 		port->stats.rx_nombuf = 0;
1286 
1287 		total_recv += stats.ipackets;
1288 		total_xmit += stats.opackets;
1289 		total_rx_dropped += stats.imissed;
1290 		total_tx_dropped += port->tx_dropped;
1291 		total_rx_nombuf  += stats.rx_nombuf;
1292 
1293 		fwd_port_stats_display(pt_id, &stats);
1294 	}
1295 	printf("\n  %s Accumulated forward statistics for all ports"
1296 	       "%s\n",
1297 	       acc_stats_border, acc_stats_border);
1298 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1299 	       "%-"PRIu64"\n"
1300 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1301 	       "%-"PRIu64"\n",
1302 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1303 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1304 	if (total_rx_nombuf > 0)
1305 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1306 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1307 	       "%s\n",
1308 	       acc_stats_border, acc_stats_border);
1309 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1310 	if (total_recv > 0)
1311 		printf("\n  CPU cycles/packet=%u (total cycles="
1312 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1313 		       (unsigned int)(fwd_cycles / total_recv),
1314 		       fwd_cycles, total_recv);
1315 #endif
1316 	printf("\nDone.\n");
1317 	test_done = 1;
1318 }
1319 
1320 void
1321 dev_set_link_up(portid_t pid)
1322 {
1323 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1324 		printf("\nSet link up fail.\n");
1325 }
1326 
1327 void
1328 dev_set_link_down(portid_t pid)
1329 {
1330 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1331 		printf("\nSet link down fail.\n");
1332 }
1333 
1334 static int
1335 all_ports_started(void)
1336 {
1337 	portid_t pi;
1338 	struct rte_port *port;
1339 
1340 	RTE_ETH_FOREACH_DEV(pi) {
1341 		port = &ports[pi];
1342 		/* Check if there is a port which is not started */
1343 		if ((port->port_status != RTE_PORT_STARTED) &&
1344 			(port->slave_flag == 0))
1345 			return 0;
1346 	}
1347 
1348 	/* No port is not started */
1349 	return 1;
1350 }
1351 
1352 int
1353 all_ports_stopped(void)
1354 {
1355 	portid_t pi;
1356 	struct rte_port *port;
1357 
1358 	RTE_ETH_FOREACH_DEV(pi) {
1359 		port = &ports[pi];
1360 		if ((port->port_status != RTE_PORT_STOPPED) &&
1361 			(port->slave_flag == 0))
1362 			return 0;
1363 	}
1364 
1365 	return 1;
1366 }
1367 
1368 int
1369 port_is_started(portid_t port_id)
1370 {
1371 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1372 		return 0;
1373 
1374 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1375 		return 0;
1376 
1377 	return 1;
1378 }
1379 
1380 static int
1381 port_is_closed(portid_t port_id)
1382 {
1383 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1384 		return 0;
1385 
1386 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1387 		return 0;
1388 
1389 	return 1;
1390 }
1391 
1392 int
1393 start_port(portid_t pid)
1394 {
1395 	int diag, need_check_link_status = -1;
1396 	portid_t pi;
1397 	queueid_t qi;
1398 	struct rte_port *port;
1399 	struct ether_addr mac_addr;
1400 	enum rte_eth_event_type event_type;
1401 
1402 	if (port_id_is_invalid(pid, ENABLED_WARN))
1403 		return 0;
1404 
1405 	if(dcb_config)
1406 		dcb_test = 1;
1407 	RTE_ETH_FOREACH_DEV(pi) {
1408 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1409 			continue;
1410 
1411 		need_check_link_status = 0;
1412 		port = &ports[pi];
1413 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1414 						 RTE_PORT_HANDLING) == 0) {
1415 			printf("Port %d is now not stopped\n", pi);
1416 			continue;
1417 		}
1418 
1419 		if (port->need_reconfig > 0) {
1420 			port->need_reconfig = 0;
1421 
1422 			if (flow_isolate_all) {
1423 				int ret = port_flow_isolate(pi, 1);
1424 				if (ret) {
1425 					printf("Failed to apply isolated"
1426 					       " mode on port %d\n", pi);
1427 					return -1;
1428 				}
1429 			}
1430 
1431 			printf("Configuring Port %d (socket %u)\n", pi,
1432 					port->socket_id);
1433 			/* configure port */
1434 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1435 						&(port->dev_conf));
1436 			if (diag != 0) {
1437 				if (rte_atomic16_cmpset(&(port->port_status),
1438 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1439 					printf("Port %d can not be set back "
1440 							"to stopped\n", pi);
1441 				printf("Fail to configure port %d\n", pi);
1442 				/* try to reconfigure port next time */
1443 				port->need_reconfig = 1;
1444 				return -1;
1445 			}
1446 		}
1447 		if (port->need_reconfig_queues > 0) {
1448 			port->need_reconfig_queues = 0;
1449 			/* setup tx queues */
1450 			for (qi = 0; qi < nb_txq; qi++) {
1451 				if ((numa_support) &&
1452 					(txring_numa[pi] != NUMA_NO_CONFIG))
1453 					diag = rte_eth_tx_queue_setup(pi, qi,
1454 						nb_txd,txring_numa[pi],
1455 						&(port->tx_conf));
1456 				else
1457 					diag = rte_eth_tx_queue_setup(pi, qi,
1458 						nb_txd,port->socket_id,
1459 						&(port->tx_conf));
1460 
1461 				if (diag == 0)
1462 					continue;
1463 
1464 				/* Fail to setup tx queue, return */
1465 				if (rte_atomic16_cmpset(&(port->port_status),
1466 							RTE_PORT_HANDLING,
1467 							RTE_PORT_STOPPED) == 0)
1468 					printf("Port %d can not be set back "
1469 							"to stopped\n", pi);
1470 				printf("Fail to configure port %d tx queues\n", pi);
1471 				/* try to reconfigure queues next time */
1472 				port->need_reconfig_queues = 1;
1473 				return -1;
1474 			}
1475 			/* setup rx queues */
1476 			for (qi = 0; qi < nb_rxq; qi++) {
1477 				if ((numa_support) &&
1478 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1479 					struct rte_mempool * mp =
1480 						mbuf_pool_find(rxring_numa[pi]);
1481 					if (mp == NULL) {
1482 						printf("Failed to setup RX queue:"
1483 							"No mempool allocation"
1484 							" on the socket %d\n",
1485 							rxring_numa[pi]);
1486 						return -1;
1487 					}
1488 
1489 					diag = rte_eth_rx_queue_setup(pi, qi,
1490 					     nb_rxd,rxring_numa[pi],
1491 					     &(port->rx_conf),mp);
1492 				} else {
1493 					struct rte_mempool *mp =
1494 						mbuf_pool_find(port->socket_id);
1495 					if (mp == NULL) {
1496 						printf("Failed to setup RX queue:"
1497 							"No mempool allocation"
1498 							" on the socket %d\n",
1499 							port->socket_id);
1500 						return -1;
1501 					}
1502 					diag = rte_eth_rx_queue_setup(pi, qi,
1503 					     nb_rxd,port->socket_id,
1504 					     &(port->rx_conf), mp);
1505 				}
1506 				if (diag == 0)
1507 					continue;
1508 
1509 				/* Fail to setup rx queue, return */
1510 				if (rte_atomic16_cmpset(&(port->port_status),
1511 							RTE_PORT_HANDLING,
1512 							RTE_PORT_STOPPED) == 0)
1513 					printf("Port %d can not be set back "
1514 							"to stopped\n", pi);
1515 				printf("Fail to configure port %d rx queues\n", pi);
1516 				/* try to reconfigure queues next time */
1517 				port->need_reconfig_queues = 1;
1518 				return -1;
1519 			}
1520 		}
1521 
1522 		for (event_type = RTE_ETH_EVENT_UNKNOWN;
1523 		     event_type < RTE_ETH_EVENT_MAX;
1524 		     event_type++) {
1525 			diag = rte_eth_dev_callback_register(pi,
1526 							event_type,
1527 							eth_event_callback,
1528 							NULL);
1529 			if (diag) {
1530 				printf("Failed to setup even callback for event %d\n",
1531 					event_type);
1532 				return -1;
1533 			}
1534 		}
1535 
1536 		/* start port */
1537 		if (rte_eth_dev_start(pi) < 0) {
1538 			printf("Fail to start port %d\n", pi);
1539 
1540 			/* Fail to setup rx queue, return */
1541 			if (rte_atomic16_cmpset(&(port->port_status),
1542 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1543 				printf("Port %d can not be set back to "
1544 							"stopped\n", pi);
1545 			continue;
1546 		}
1547 
1548 		if (rte_atomic16_cmpset(&(port->port_status),
1549 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1550 			printf("Port %d can not be set into started\n", pi);
1551 
1552 		rte_eth_macaddr_get(pi, &mac_addr);
1553 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1554 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1555 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1556 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1557 
1558 		/* at least one port started, need checking link status */
1559 		need_check_link_status = 1;
1560 	}
1561 
1562 	if (need_check_link_status == 1 && !no_link_check)
1563 		check_all_ports_link_status(RTE_PORT_ALL);
1564 	else if (need_check_link_status == 0)
1565 		printf("Please stop the ports first\n");
1566 
1567 	printf("Done\n");
1568 	return 0;
1569 }
1570 
1571 void
1572 stop_port(portid_t pid)
1573 {
1574 	portid_t pi;
1575 	struct rte_port *port;
1576 	int need_check_link_status = 0;
1577 
1578 	if (dcb_test) {
1579 		dcb_test = 0;
1580 		dcb_config = 0;
1581 	}
1582 
1583 	if (port_id_is_invalid(pid, ENABLED_WARN))
1584 		return;
1585 
1586 	printf("Stopping ports...\n");
1587 
1588 	RTE_ETH_FOREACH_DEV(pi) {
1589 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1590 			continue;
1591 
1592 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1593 			printf("Please remove port %d from forwarding configuration.\n", pi);
1594 			continue;
1595 		}
1596 
1597 		if (port_is_bonding_slave(pi)) {
1598 			printf("Please remove port %d from bonded device.\n", pi);
1599 			continue;
1600 		}
1601 
1602 		port = &ports[pi];
1603 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1604 						RTE_PORT_HANDLING) == 0)
1605 			continue;
1606 
1607 		rte_eth_dev_stop(pi);
1608 
1609 		if (rte_atomic16_cmpset(&(port->port_status),
1610 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1611 			printf("Port %d can not be set into stopped\n", pi);
1612 		need_check_link_status = 1;
1613 	}
1614 	if (need_check_link_status && !no_link_check)
1615 		check_all_ports_link_status(RTE_PORT_ALL);
1616 
1617 	printf("Done\n");
1618 }
1619 
1620 void
1621 close_port(portid_t pid)
1622 {
1623 	portid_t pi;
1624 	struct rte_port *port;
1625 
1626 	if (port_id_is_invalid(pid, ENABLED_WARN))
1627 		return;
1628 
1629 	printf("Closing ports...\n");
1630 
1631 	RTE_ETH_FOREACH_DEV(pi) {
1632 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1633 			continue;
1634 
1635 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1636 			printf("Please remove port %d from forwarding configuration.\n", pi);
1637 			continue;
1638 		}
1639 
1640 		if (port_is_bonding_slave(pi)) {
1641 			printf("Please remove port %d from bonded device.\n", pi);
1642 			continue;
1643 		}
1644 
1645 		port = &ports[pi];
1646 		if (rte_atomic16_cmpset(&(port->port_status),
1647 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1648 			printf("Port %d is already closed\n", pi);
1649 			continue;
1650 		}
1651 
1652 		if (rte_atomic16_cmpset(&(port->port_status),
1653 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1654 			printf("Port %d is now not stopped\n", pi);
1655 			continue;
1656 		}
1657 
1658 		if (port->flow_list)
1659 			port_flow_flush(pi);
1660 		rte_eth_dev_close(pi);
1661 
1662 		if (rte_atomic16_cmpset(&(port->port_status),
1663 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1664 			printf("Port %d cannot be set to closed\n", pi);
1665 	}
1666 
1667 	printf("Done\n");
1668 }
1669 
1670 void
1671 reset_port(portid_t pid)
1672 {
1673 	int diag;
1674 	portid_t pi;
1675 	struct rte_port *port;
1676 
1677 	if (port_id_is_invalid(pid, ENABLED_WARN))
1678 		return;
1679 
1680 	printf("Resetting ports...\n");
1681 
1682 	RTE_ETH_FOREACH_DEV(pi) {
1683 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1684 			continue;
1685 
1686 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1687 			printf("Please remove port %d from forwarding "
1688 			       "configuration.\n", pi);
1689 			continue;
1690 		}
1691 
1692 		if (port_is_bonding_slave(pi)) {
1693 			printf("Please remove port %d from bonded device.\n",
1694 			       pi);
1695 			continue;
1696 		}
1697 
1698 		diag = rte_eth_dev_reset(pi);
1699 		if (diag == 0) {
1700 			port = &ports[pi];
1701 			port->need_reconfig = 1;
1702 			port->need_reconfig_queues = 1;
1703 		} else {
1704 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1705 		}
1706 	}
1707 
1708 	printf("Done\n");
1709 }
1710 
1711 void
1712 attach_port(char *identifier)
1713 {
1714 	portid_t pi = 0;
1715 	unsigned int socket_id;
1716 
1717 	printf("Attaching a new port...\n");
1718 
1719 	if (identifier == NULL) {
1720 		printf("Invalid parameters are specified\n");
1721 		return;
1722 	}
1723 
1724 	if (rte_eth_dev_attach(identifier, &pi))
1725 		return;
1726 
1727 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1728 	/* if socket_id is invalid, set to 0 */
1729 	if (check_socket_id(socket_id) < 0)
1730 		socket_id = 0;
1731 	reconfig(pi, socket_id);
1732 	rte_eth_promiscuous_enable(pi);
1733 
1734 	nb_ports = rte_eth_dev_count();
1735 
1736 	ports[pi].port_status = RTE_PORT_STOPPED;
1737 
1738 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1739 	printf("Done\n");
1740 }
1741 
1742 void
1743 detach_port(uint8_t port_id)
1744 {
1745 	char name[RTE_ETH_NAME_MAX_LEN];
1746 
1747 	printf("Detaching a port...\n");
1748 
1749 	if (!port_is_closed(port_id)) {
1750 		printf("Please close port first\n");
1751 		return;
1752 	}
1753 
1754 	if (ports[port_id].flow_list)
1755 		port_flow_flush(port_id);
1756 
1757 	if (rte_eth_dev_detach(port_id, name)) {
1758 		RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
1759 		return;
1760 	}
1761 
1762 	nb_ports = rte_eth_dev_count();
1763 
1764 	printf("Port '%s' is detached. Now total ports is %d\n",
1765 			name, nb_ports);
1766 	printf("Done\n");
1767 	return;
1768 }
1769 
1770 void
1771 pmd_test_exit(void)
1772 {
1773 	portid_t pt_id;
1774 
1775 	if (test_done == 0)
1776 		stop_packet_forwarding();
1777 
1778 	if (ports != NULL) {
1779 		no_link_check = 1;
1780 		RTE_ETH_FOREACH_DEV(pt_id) {
1781 			printf("\nShutting down port %d...\n", pt_id);
1782 			fflush(stdout);
1783 			stop_port(pt_id);
1784 			close_port(pt_id);
1785 		}
1786 	}
1787 	printf("\nBye...\n");
1788 }
1789 
1790 typedef void (*cmd_func_t)(void);
1791 struct pmd_test_command {
1792 	const char *cmd_name;
1793 	cmd_func_t cmd_func;
1794 };
1795 
1796 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1797 
1798 /* Check the link status of all ports in up to 9s, and print them finally */
1799 static void
1800 check_all_ports_link_status(uint32_t port_mask)
1801 {
1802 #define CHECK_INTERVAL 100 /* 100ms */
1803 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1804 	portid_t portid;
1805 	uint8_t count, all_ports_up, print_flag = 0;
1806 	struct rte_eth_link link;
1807 
1808 	printf("Checking link statuses...\n");
1809 	fflush(stdout);
1810 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1811 		all_ports_up = 1;
1812 		RTE_ETH_FOREACH_DEV(portid) {
1813 			if ((port_mask & (1 << portid)) == 0)
1814 				continue;
1815 			memset(&link, 0, sizeof(link));
1816 			rte_eth_link_get_nowait(portid, &link);
1817 			/* print link status if flag set */
1818 			if (print_flag == 1) {
1819 				if (link.link_status)
1820 					printf(
1821 					"Port%d Link Up. speed %u Mbps- %s\n",
1822 					portid, link.link_speed,
1823 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1824 					("full-duplex") : ("half-duplex\n"));
1825 				else
1826 					printf("Port %d Link Down\n", portid);
1827 				continue;
1828 			}
1829 			/* clear all_ports_up flag if any link down */
1830 			if (link.link_status == ETH_LINK_DOWN) {
1831 				all_ports_up = 0;
1832 				break;
1833 			}
1834 		}
1835 		/* after finally printing all link status, get out */
1836 		if (print_flag == 1)
1837 			break;
1838 
1839 		if (all_ports_up == 0) {
1840 			fflush(stdout);
1841 			rte_delay_ms(CHECK_INTERVAL);
1842 		}
1843 
1844 		/* set the print_flag if all ports up or timeout */
1845 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1846 			print_flag = 1;
1847 		}
1848 
1849 		if (lsc_interrupt)
1850 			break;
1851 	}
1852 }
1853 
1854 static void
1855 rmv_event_callback(void *arg)
1856 {
1857 	struct rte_eth_dev *dev;
1858 	uint8_t port_id = (intptr_t)arg;
1859 
1860 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1861 	dev = &rte_eth_devices[port_id];
1862 
1863 	stop_port(port_id);
1864 	close_port(port_id);
1865 	printf("removing device %s\n", dev->device->name);
1866 	if (rte_eal_dev_detach(dev->device))
1867 		RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
1868 			dev->device->name);
1869 }
1870 
1871 /* This function is used by the interrupt thread */
1872 static int
1873 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1874 		  void *ret_param)
1875 {
1876 	static const char * const event_desc[] = {
1877 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1878 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
1879 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1880 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1881 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1882 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
1883 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
1884 		[RTE_ETH_EVENT_MAX] = NULL,
1885 	};
1886 
1887 	RTE_SET_USED(param);
1888 	RTE_SET_USED(ret_param);
1889 
1890 	if (type >= RTE_ETH_EVENT_MAX) {
1891 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1892 			port_id, __func__, type);
1893 		fflush(stderr);
1894 	} else if (event_print_mask & (UINT32_C(1) << type)) {
1895 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
1896 			event_desc[type]);
1897 		fflush(stdout);
1898 	}
1899 
1900 	switch (type) {
1901 	case RTE_ETH_EVENT_INTR_RMV:
1902 		if (rte_eal_alarm_set(100000,
1903 				rmv_event_callback, (void *)(intptr_t)port_id))
1904 			fprintf(stderr, "Could not set up deferred device removal\n");
1905 		break;
1906 	default:
1907 		break;
1908 	}
1909 	return 0;
1910 }
1911 
1912 static int
1913 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1914 {
1915 	uint16_t i;
1916 	int diag;
1917 	uint8_t mapping_found = 0;
1918 
1919 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1920 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1921 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1922 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1923 					tx_queue_stats_mappings[i].queue_id,
1924 					tx_queue_stats_mappings[i].stats_counter_id);
1925 			if (diag != 0)
1926 				return diag;
1927 			mapping_found = 1;
1928 		}
1929 	}
1930 	if (mapping_found)
1931 		port->tx_queue_stats_mapping_enabled = 1;
1932 	return 0;
1933 }
1934 
1935 static int
1936 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1937 {
1938 	uint16_t i;
1939 	int diag;
1940 	uint8_t mapping_found = 0;
1941 
1942 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1943 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1944 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1945 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1946 					rx_queue_stats_mappings[i].queue_id,
1947 					rx_queue_stats_mappings[i].stats_counter_id);
1948 			if (diag != 0)
1949 				return diag;
1950 			mapping_found = 1;
1951 		}
1952 	}
1953 	if (mapping_found)
1954 		port->rx_queue_stats_mapping_enabled = 1;
1955 	return 0;
1956 }
1957 
1958 static void
1959 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1960 {
1961 	int diag = 0;
1962 
1963 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1964 	if (diag != 0) {
1965 		if (diag == -ENOTSUP) {
1966 			port->tx_queue_stats_mapping_enabled = 0;
1967 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1968 		}
1969 		else
1970 			rte_exit(EXIT_FAILURE,
1971 					"set_tx_queue_stats_mapping_registers "
1972 					"failed for port id=%d diag=%d\n",
1973 					pi, diag);
1974 	}
1975 
1976 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1977 	if (diag != 0) {
1978 		if (diag == -ENOTSUP) {
1979 			port->rx_queue_stats_mapping_enabled = 0;
1980 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1981 		}
1982 		else
1983 			rte_exit(EXIT_FAILURE,
1984 					"set_rx_queue_stats_mapping_registers "
1985 					"failed for port id=%d diag=%d\n",
1986 					pi, diag);
1987 	}
1988 }
1989 
1990 static void
1991 rxtx_port_config(struct rte_port *port)
1992 {
1993 	port->rx_conf = port->dev_info.default_rxconf;
1994 	port->tx_conf = port->dev_info.default_txconf;
1995 
1996 	/* Check if any RX/TX parameters have been passed */
1997 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1998 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1999 
2000 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2001 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2002 
2003 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2004 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2005 
2006 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2007 		port->rx_conf.rx_free_thresh = rx_free_thresh;
2008 
2009 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2010 		port->rx_conf.rx_drop_en = rx_drop_en;
2011 
2012 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2013 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2014 
2015 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2016 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2017 
2018 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2019 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2020 
2021 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2022 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2023 
2024 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2025 		port->tx_conf.tx_free_thresh = tx_free_thresh;
2026 
2027 	if (txq_flags != RTE_PMD_PARAM_UNSET)
2028 		port->tx_conf.txq_flags = txq_flags;
2029 }
2030 
2031 void
2032 init_port_config(void)
2033 {
2034 	portid_t pid;
2035 	struct rte_port *port;
2036 
2037 	RTE_ETH_FOREACH_DEV(pid) {
2038 		port = &ports[pid];
2039 		port->dev_conf.rxmode = rx_mode;
2040 		port->dev_conf.fdir_conf = fdir_conf;
2041 		if (nb_rxq > 1) {
2042 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2043 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2044 		} else {
2045 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2046 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2047 		}
2048 
2049 		if (port->dcb_flag == 0) {
2050 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2051 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2052 			else
2053 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2054 		}
2055 
2056 		rxtx_port_config(port);
2057 
2058 		rte_eth_macaddr_get(pid, &port->eth_addr);
2059 
2060 		map_port_queue_stats_mapping_registers(pid, port);
2061 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2062 		rte_pmd_ixgbe_bypass_init(pid);
2063 #endif
2064 
2065 		if (lsc_interrupt &&
2066 		    (rte_eth_devices[pid].data->dev_flags &
2067 		     RTE_ETH_DEV_INTR_LSC))
2068 			port->dev_conf.intr_conf.lsc = 1;
2069 		if (rmv_interrupt &&
2070 		    (rte_eth_devices[pid].data->dev_flags &
2071 		     RTE_ETH_DEV_INTR_RMV))
2072 			port->dev_conf.intr_conf.rmv = 1;
2073 	}
2074 }
2075 
2076 void set_port_slave_flag(portid_t slave_pid)
2077 {
2078 	struct rte_port *port;
2079 
2080 	port = &ports[slave_pid];
2081 	port->slave_flag = 1;
2082 }
2083 
2084 void clear_port_slave_flag(portid_t slave_pid)
2085 {
2086 	struct rte_port *port;
2087 
2088 	port = &ports[slave_pid];
2089 	port->slave_flag = 0;
2090 }
2091 
2092 uint8_t port_is_bonding_slave(portid_t slave_pid)
2093 {
2094 	struct rte_port *port;
2095 
2096 	port = &ports[slave_pid];
2097 	return port->slave_flag;
2098 }
2099 
2100 const uint16_t vlan_tags[] = {
2101 		0,  1,  2,  3,  4,  5,  6,  7,
2102 		8,  9, 10, 11,  12, 13, 14, 15,
2103 		16, 17, 18, 19, 20, 21, 22, 23,
2104 		24, 25, 26, 27, 28, 29, 30, 31
2105 };
2106 
2107 static  int
2108 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2109 		 enum dcb_mode_enable dcb_mode,
2110 		 enum rte_eth_nb_tcs num_tcs,
2111 		 uint8_t pfc_en)
2112 {
2113 	uint8_t i;
2114 
2115 	/*
2116 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2117 	 * given above, and the number of traffic classes available for use.
2118 	 */
2119 	if (dcb_mode == DCB_VT_ENABLED) {
2120 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2121 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2122 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2123 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2124 
2125 		/* VMDQ+DCB RX and TX configurations */
2126 		vmdq_rx_conf->enable_default_pool = 0;
2127 		vmdq_rx_conf->default_pool = 0;
2128 		vmdq_rx_conf->nb_queue_pools =
2129 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2130 		vmdq_tx_conf->nb_queue_pools =
2131 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2132 
2133 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2134 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2135 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2136 			vmdq_rx_conf->pool_map[i].pools =
2137 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2138 		}
2139 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2140 			vmdq_rx_conf->dcb_tc[i] = i;
2141 			vmdq_tx_conf->dcb_tc[i] = i;
2142 		}
2143 
2144 		/* set DCB mode of RX and TX of multiple queues */
2145 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2146 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2147 	} else {
2148 		struct rte_eth_dcb_rx_conf *rx_conf =
2149 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2150 		struct rte_eth_dcb_tx_conf *tx_conf =
2151 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2152 
2153 		rx_conf->nb_tcs = num_tcs;
2154 		tx_conf->nb_tcs = num_tcs;
2155 
2156 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2157 			rx_conf->dcb_tc[i] = i % num_tcs;
2158 			tx_conf->dcb_tc[i] = i % num_tcs;
2159 		}
2160 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2161 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2162 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2163 	}
2164 
2165 	if (pfc_en)
2166 		eth_conf->dcb_capability_en =
2167 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2168 	else
2169 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2170 
2171 	return 0;
2172 }
2173 
2174 int
2175 init_port_dcb_config(portid_t pid,
2176 		     enum dcb_mode_enable dcb_mode,
2177 		     enum rte_eth_nb_tcs num_tcs,
2178 		     uint8_t pfc_en)
2179 {
2180 	struct rte_eth_conf port_conf;
2181 	struct rte_port *rte_port;
2182 	int retval;
2183 	uint16_t i;
2184 
2185 	rte_port = &ports[pid];
2186 
2187 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2188 	/* Enter DCB configuration status */
2189 	dcb_config = 1;
2190 
2191 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2192 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2193 	if (retval < 0)
2194 		return retval;
2195 	port_conf.rxmode.hw_vlan_filter = 1;
2196 
2197 	/**
2198 	 * Write the configuration into the device.
2199 	 * Set the numbers of RX & TX queues to 0, so
2200 	 * the RX & TX queues will not be setup.
2201 	 */
2202 	rte_eth_dev_configure(pid, 0, 0, &port_conf);
2203 
2204 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2205 
2206 	/* If dev_info.vmdq_pool_base is greater than 0,
2207 	 * the queue id of vmdq pools is started after pf queues.
2208 	 */
2209 	if (dcb_mode == DCB_VT_ENABLED &&
2210 	    rte_port->dev_info.vmdq_pool_base > 0) {
2211 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2212 			" for port %d.", pid);
2213 		return -1;
2214 	}
2215 
2216 	/* Assume the ports in testpmd have the same dcb capability
2217 	 * and has the same number of rxq and txq in dcb mode
2218 	 */
2219 	if (dcb_mode == DCB_VT_ENABLED) {
2220 		if (rte_port->dev_info.max_vfs > 0) {
2221 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2222 			nb_txq = rte_port->dev_info.nb_tx_queues;
2223 		} else {
2224 			nb_rxq = rte_port->dev_info.max_rx_queues;
2225 			nb_txq = rte_port->dev_info.max_tx_queues;
2226 		}
2227 	} else {
2228 		/*if vt is disabled, use all pf queues */
2229 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2230 			nb_rxq = rte_port->dev_info.max_rx_queues;
2231 			nb_txq = rte_port->dev_info.max_tx_queues;
2232 		} else {
2233 			nb_rxq = (queueid_t)num_tcs;
2234 			nb_txq = (queueid_t)num_tcs;
2235 
2236 		}
2237 	}
2238 	rx_free_thresh = 64;
2239 
2240 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2241 
2242 	rxtx_port_config(rte_port);
2243 	/* VLAN filter */
2244 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2245 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2246 		rx_vft_set(pid, vlan_tags[i], 1);
2247 
2248 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2249 	map_port_queue_stats_mapping_registers(pid, rte_port);
2250 
2251 	rte_port->dcb_flag = 1;
2252 
2253 	return 0;
2254 }
2255 
2256 static void
2257 init_port(void)
2258 {
2259 	/* Configuration of Ethernet ports. */
2260 	ports = rte_zmalloc("testpmd: ports",
2261 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2262 			    RTE_CACHE_LINE_SIZE);
2263 	if (ports == NULL) {
2264 		rte_exit(EXIT_FAILURE,
2265 				"rte_zmalloc(%d struct rte_port) failed\n",
2266 				RTE_MAX_ETHPORTS);
2267 	}
2268 }
2269 
2270 static void
2271 force_quit(void)
2272 {
2273 	pmd_test_exit();
2274 	prompt_exit();
2275 }
2276 
2277 static void
2278 print_stats(void)
2279 {
2280 	uint8_t i;
2281 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2282 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2283 
2284 	/* Clear screen and move to top left */
2285 	printf("%s%s", clr, top_left);
2286 
2287 	printf("\nPort statistics ====================================");
2288 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2289 		nic_stats_display(fwd_ports_ids[i]);
2290 }
2291 
2292 static void
2293 signal_handler(int signum)
2294 {
2295 	if (signum == SIGINT || signum == SIGTERM) {
2296 		printf("\nSignal %d received, preparing to exit...\n",
2297 				signum);
2298 #ifdef RTE_LIBRTE_PDUMP
2299 		/* uninitialize packet capture framework */
2300 		rte_pdump_uninit();
2301 #endif
2302 #ifdef RTE_LIBRTE_LATENCY_STATS
2303 		rte_latencystats_uninit();
2304 #endif
2305 		force_quit();
2306 		/* exit with the expected status */
2307 		signal(signum, SIG_DFL);
2308 		kill(getpid(), signum);
2309 	}
2310 }
2311 
2312 int
2313 main(int argc, char** argv)
2314 {
2315 	int  diag;
2316 	portid_t port_id;
2317 
2318 	signal(SIGINT, signal_handler);
2319 	signal(SIGTERM, signal_handler);
2320 
2321 	diag = rte_eal_init(argc, argv);
2322 	if (diag < 0)
2323 		rte_panic("Cannot init EAL\n");
2324 
2325 #ifdef RTE_LIBRTE_PDUMP
2326 	/* initialize packet capture framework */
2327 	rte_pdump_init(NULL);
2328 #endif
2329 
2330 	nb_ports = (portid_t) rte_eth_dev_count();
2331 	if (nb_ports == 0)
2332 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2333 
2334 	/* allocate port structures, and init them */
2335 	init_port();
2336 
2337 	set_def_fwd_config();
2338 	if (nb_lcores == 0)
2339 		rte_panic("Empty set of forwarding logical cores - check the "
2340 			  "core mask supplied in the command parameters\n");
2341 
2342 	/* Bitrate/latency stats disabled by default */
2343 #ifdef RTE_LIBRTE_BITRATE
2344 	bitrate_enabled = 0;
2345 #endif
2346 #ifdef RTE_LIBRTE_LATENCY_STATS
2347 	latencystats_enabled = 0;
2348 #endif
2349 
2350 	argc -= diag;
2351 	argv += diag;
2352 	if (argc > 1)
2353 		launch_args_parse(argc, argv);
2354 
2355 	if (tx_first && interactive)
2356 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2357 				"interactive mode.\n");
2358 
2359 	if (tx_first && lsc_interrupt) {
2360 		printf("Warning: lsc_interrupt needs to be off when "
2361 				" using tx_first. Disabling.\n");
2362 		lsc_interrupt = 0;
2363 	}
2364 
2365 	if (!nb_rxq && !nb_txq)
2366 		printf("Warning: Either rx or tx queues should be non-zero\n");
2367 
2368 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2369 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2370 		       "but nb_txq=%d will prevent to fully test it.\n",
2371 		       nb_rxq, nb_txq);
2372 
2373 	init_config();
2374 	if (start_port(RTE_PORT_ALL) != 0)
2375 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2376 
2377 	/* set all ports to promiscuous mode by default */
2378 	RTE_ETH_FOREACH_DEV(port_id)
2379 		rte_eth_promiscuous_enable(port_id);
2380 
2381 	/* Init metrics library */
2382 	rte_metrics_init(rte_socket_id());
2383 
2384 #ifdef RTE_LIBRTE_LATENCY_STATS
2385 	if (latencystats_enabled != 0) {
2386 		int ret = rte_latencystats_init(1, NULL);
2387 		if (ret)
2388 			printf("Warning: latencystats init()"
2389 				" returned error %d\n",	ret);
2390 		printf("Latencystats running on lcore %d\n",
2391 			latencystats_lcore_id);
2392 	}
2393 #endif
2394 
2395 	/* Setup bitrate stats */
2396 #ifdef RTE_LIBRTE_BITRATE
2397 	if (bitrate_enabled != 0) {
2398 		bitrate_data = rte_stats_bitrate_create();
2399 		if (bitrate_data == NULL)
2400 			rte_exit(EXIT_FAILURE,
2401 				"Could not allocate bitrate data.\n");
2402 		rte_stats_bitrate_reg(bitrate_data);
2403 	}
2404 #endif
2405 
2406 #ifdef RTE_LIBRTE_CMDLINE
2407 	if (strlen(cmdline_filename) != 0)
2408 		cmdline_read_from_file(cmdline_filename);
2409 
2410 	if (interactive == 1) {
2411 		if (auto_start) {
2412 			printf("Start automatic packet forwarding\n");
2413 			start_packet_forwarding(0);
2414 		}
2415 		prompt();
2416 		pmd_test_exit();
2417 	} else
2418 #endif
2419 	{
2420 		char c;
2421 		int rc;
2422 
2423 		printf("No commandline core given, start packet forwarding\n");
2424 		start_packet_forwarding(tx_first);
2425 		if (stats_period != 0) {
2426 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2427 			uint64_t timer_period;
2428 
2429 			/* Convert to number of cycles */
2430 			timer_period = stats_period * rte_get_timer_hz();
2431 
2432 			while (1) {
2433 				cur_time = rte_get_timer_cycles();
2434 				diff_time += cur_time - prev_time;
2435 
2436 				if (diff_time >= timer_period) {
2437 					print_stats();
2438 					/* Reset the timer */
2439 					diff_time = 0;
2440 				}
2441 				/* Sleep to avoid unnecessary checks */
2442 				prev_time = cur_time;
2443 				sleep(1);
2444 			}
2445 		}
2446 
2447 		printf("Press enter to exit\n");
2448 		rc = read(0, &c, 1);
2449 		pmd_test_exit();
2450 		if (rc < 0)
2451 			return 1;
2452 	}
2453 
2454 	return 0;
2455 }
2456