xref: /dpdk/app/test-pmd/testpmd.c (revision b7091f1dcfbc62f358b4c1882d51c434471d51da)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/mman.h>
42 #include <sys/types.h>
43 #include <errno.h>
44 
45 #include <sys/queue.h>
46 #include <sys/stat.h>
47 
48 #include <stdint.h>
49 #include <unistd.h>
50 #include <inttypes.h>
51 
52 #include <rte_common.h>
53 #include <rte_errno.h>
54 #include <rte_byteorder.h>
55 #include <rte_log.h>
56 #include <rte_debug.h>
57 #include <rte_cycles.h>
58 #include <rte_memory.h>
59 #include <rte_memcpy.h>
60 #include <rte_memzone.h>
61 #include <rte_launch.h>
62 #include <rte_eal.h>
63 #include <rte_alarm.h>
64 #include <rte_per_lcore.h>
65 #include <rte_lcore.h>
66 #include <rte_atomic.h>
67 #include <rte_branch_prediction.h>
68 #include <rte_mempool.h>
69 #include <rte_malloc.h>
70 #include <rte_mbuf.h>
71 #include <rte_interrupts.h>
72 #include <rte_pci.h>
73 #include <rte_ether.h>
74 #include <rte_ethdev.h>
75 #include <rte_dev.h>
76 #include <rte_string_fns.h>
77 #ifdef RTE_LIBRTE_IXGBE_PMD
78 #include <rte_pmd_ixgbe.h>
79 #endif
80 #ifdef RTE_LIBRTE_PDUMP
81 #include <rte_pdump.h>
82 #endif
83 #include <rte_flow.h>
84 #include <rte_metrics.h>
85 #ifdef RTE_LIBRTE_BITRATE
86 #include <rte_bitrate.h>
87 #endif
88 #ifdef RTE_LIBRTE_LATENCY_STATS
89 #include <rte_latencystats.h>
90 #endif
91 
92 #include "testpmd.h"
93 
94 uint16_t verbose_level = 0; /**< Silent by default. */
95 
96 /* use master core for command line ? */
97 uint8_t interactive = 0;
98 uint8_t auto_start = 0;
99 uint8_t tx_first;
100 char cmdline_filename[PATH_MAX] = {0};
101 
102 /*
103  * NUMA support configuration.
104  * When set, the NUMA support attempts to dispatch the allocation of the
105  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
106  * probed ports among the CPU sockets 0 and 1.
107  * Otherwise, all memory is allocated from CPU socket 0.
108  */
109 uint8_t numa_support = 1; /**< numa enabled by default */
110 
111 /*
112  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
113  * not configured.
114  */
115 uint8_t socket_num = UMA_NO_CONFIG;
116 
117 /*
118  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
119  */
120 uint8_t mp_anon = 0;
121 
122 /*
123  * Record the Ethernet address of peer target ports to which packets are
124  * forwarded.
125  * Must be instantiated with the ethernet addresses of peer traffic generator
126  * ports.
127  */
128 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
129 portid_t nb_peer_eth_addrs = 0;
130 
131 /*
132  * Probed Target Environment.
133  */
134 struct rte_port *ports;	       /**< For all probed ethernet ports. */
135 portid_t nb_ports;             /**< Number of probed ethernet ports. */
136 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
137 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
138 
139 /*
140  * Test Forwarding Configuration.
141  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
142  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
143  */
144 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
145 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
146 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
147 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
148 
149 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
150 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
151 
152 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
153 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
154 
155 /*
156  * Forwarding engines.
157  */
158 struct fwd_engine * fwd_engines[] = {
159 	&io_fwd_engine,
160 	&mac_fwd_engine,
161 	&mac_swap_engine,
162 	&flow_gen_engine,
163 	&rx_only_engine,
164 	&tx_only_engine,
165 	&csum_fwd_engine,
166 	&icmp_echo_engine,
167 #ifdef RTE_LIBRTE_IEEE1588
168 	&ieee1588_fwd_engine,
169 #endif
170 	NULL,
171 };
172 
173 struct fwd_config cur_fwd_config;
174 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
175 uint32_t retry_enabled;
176 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
177 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
178 
179 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
180 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
181                                       * specified on command-line. */
182 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
183 /*
184  * Configuration of packet segments used by the "txonly" processing engine.
185  */
186 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
187 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
188 	TXONLY_DEF_PACKET_LEN,
189 };
190 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
191 
192 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
193 /**< Split policy for packets to TX. */
194 
195 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
196 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
197 
198 /* current configuration is in DCB or not,0 means it is not in DCB mode */
199 uint8_t dcb_config = 0;
200 
201 /* Whether the dcb is in testing status */
202 uint8_t dcb_test = 0;
203 
204 /*
205  * Configurable number of RX/TX queues.
206  */
207 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
208 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
209 
210 /*
211  * Configurable number of RX/TX ring descriptors.
212  */
213 #define RTE_TEST_RX_DESC_DEFAULT 128
214 #define RTE_TEST_TX_DESC_DEFAULT 512
215 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
216 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
217 
218 #define RTE_PMD_PARAM_UNSET -1
219 /*
220  * Configurable values of RX and TX ring threshold registers.
221  */
222 
223 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
224 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
225 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
226 
227 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
228 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
229 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
230 
231 /*
232  * Configurable value of RX free threshold.
233  */
234 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
235 
236 /*
237  * Configurable value of RX drop enable.
238  */
239 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
240 
241 /*
242  * Configurable value of TX free threshold.
243  */
244 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
245 
246 /*
247  * Configurable value of TX RS bit threshold.
248  */
249 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
250 
251 /*
252  * Configurable value of TX queue flags.
253  */
254 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
255 
256 /*
257  * Receive Side Scaling (RSS) configuration.
258  */
259 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
260 
261 /*
262  * Port topology configuration
263  */
264 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
265 
266 /*
267  * Avoids to flush all the RX streams before starts forwarding.
268  */
269 uint8_t no_flush_rx = 0; /* flush by default */
270 
271 /*
272  * Flow API isolated mode.
273  */
274 uint8_t flow_isolate_all;
275 
276 /*
277  * Avoids to check link status when starting/stopping a port.
278  */
279 uint8_t no_link_check = 0; /* check by default */
280 
281 /*
282  * Enable link status change notification
283  */
284 uint8_t lsc_interrupt = 1; /* enabled by default */
285 
286 /*
287  * Enable device removal notification.
288  */
289 uint8_t rmv_interrupt = 1; /* enabled by default */
290 
291 /*
292  * Display or mask ether events
293  * Default to all events except VF_MBOX
294  */
295 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
296 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
297 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
298 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
299 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
300 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
301 
302 /*
303  * NIC bypass mode configuration options.
304  */
305 
306 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
307 /* The NIC bypass watchdog timeout. */
308 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
309 #endif
310 
311 
312 #ifdef RTE_LIBRTE_LATENCY_STATS
313 
314 /*
315  * Set when latency stats is enabled in the commandline
316  */
317 uint8_t latencystats_enabled;
318 
319 /*
320  * Lcore ID to serive latency statistics.
321  */
322 lcoreid_t latencystats_lcore_id = -1;
323 
324 #endif
325 
326 /*
327  * Ethernet device configuration.
328  */
329 struct rte_eth_rxmode rx_mode = {
330 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
331 	.split_hdr_size = 0,
332 	.header_split   = 0, /**< Header Split disabled. */
333 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
334 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
335 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
336 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
337 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
338 	.hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
339 };
340 
341 struct rte_fdir_conf fdir_conf = {
342 	.mode = RTE_FDIR_MODE_NONE,
343 	.pballoc = RTE_FDIR_PBALLOC_64K,
344 	.status = RTE_FDIR_REPORT_STATUS,
345 	.mask = {
346 		.vlan_tci_mask = 0x0,
347 		.ipv4_mask     = {
348 			.src_ip = 0xFFFFFFFF,
349 			.dst_ip = 0xFFFFFFFF,
350 		},
351 		.ipv6_mask     = {
352 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
353 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
354 		},
355 		.src_port_mask = 0xFFFF,
356 		.dst_port_mask = 0xFFFF,
357 		.mac_addr_byte_mask = 0xFF,
358 		.tunnel_type_mask = 1,
359 		.tunnel_id_mask = 0xFFFFFFFF,
360 	},
361 	.drop_queue = 127,
362 };
363 
364 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
365 
366 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
367 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
368 
369 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
370 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
371 
372 uint16_t nb_tx_queue_stats_mappings = 0;
373 uint16_t nb_rx_queue_stats_mappings = 0;
374 
375 unsigned int num_sockets = 0;
376 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
377 
378 #ifdef RTE_LIBRTE_BITRATE
379 /* Bitrate statistics */
380 struct rte_stats_bitrates *bitrate_data;
381 lcoreid_t bitrate_lcore_id;
382 uint8_t bitrate_enabled;
383 #endif
384 
385 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
386 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
387 
388 /* Forward function declarations */
389 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
390 static void check_all_ports_link_status(uint32_t port_mask);
391 static int eth_event_callback(portid_t port_id,
392 			      enum rte_eth_event_type type,
393 			      void *param, void *ret_param);
394 
395 /*
396  * Check if all the ports are started.
397  * If yes, return positive value. If not, return zero.
398  */
399 static int all_ports_started(void);
400 
401 /*
402  * Helper function to check if socket is already discovered.
403  * If yes, return positive value. If not, return zero.
404  */
405 int
406 new_socket_id(unsigned int socket_id)
407 {
408 	unsigned int i;
409 
410 	for (i = 0; i < num_sockets; i++) {
411 		if (socket_ids[i] == socket_id)
412 			return 0;
413 	}
414 	return 1;
415 }
416 
417 /*
418  * Setup default configuration.
419  */
420 static void
421 set_default_fwd_lcores_config(void)
422 {
423 	unsigned int i;
424 	unsigned int nb_lc;
425 	unsigned int sock_num;
426 
427 	nb_lc = 0;
428 	for (i = 0; i < RTE_MAX_LCORE; i++) {
429 		sock_num = rte_lcore_to_socket_id(i);
430 		if (new_socket_id(sock_num)) {
431 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
432 				rte_exit(EXIT_FAILURE,
433 					 "Total sockets greater than %u\n",
434 					 RTE_MAX_NUMA_NODES);
435 			}
436 			socket_ids[num_sockets++] = sock_num;
437 		}
438 		if (!rte_lcore_is_enabled(i))
439 			continue;
440 		if (i == rte_get_master_lcore())
441 			continue;
442 		fwd_lcores_cpuids[nb_lc++] = i;
443 	}
444 	nb_lcores = (lcoreid_t) nb_lc;
445 	nb_cfg_lcores = nb_lcores;
446 	nb_fwd_lcores = 1;
447 }
448 
449 static void
450 set_def_peer_eth_addrs(void)
451 {
452 	portid_t i;
453 
454 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
455 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
456 		peer_eth_addrs[i].addr_bytes[5] = i;
457 	}
458 }
459 
460 static void
461 set_default_fwd_ports_config(void)
462 {
463 	portid_t pt_id;
464 
465 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
466 		fwd_ports_ids[pt_id] = pt_id;
467 
468 	nb_cfg_ports = nb_ports;
469 	nb_fwd_ports = nb_ports;
470 }
471 
472 void
473 set_def_fwd_config(void)
474 {
475 	set_default_fwd_lcores_config();
476 	set_def_peer_eth_addrs();
477 	set_default_fwd_ports_config();
478 }
479 
480 /*
481  * Configuration initialisation done once at init time.
482  */
483 static void
484 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
485 		 unsigned int socket_id)
486 {
487 	char pool_name[RTE_MEMPOOL_NAMESIZE];
488 	struct rte_mempool *rte_mp = NULL;
489 	uint32_t mb_size;
490 
491 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
492 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
493 
494 	RTE_LOG(INFO, USER1,
495 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
496 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
497 
498 	if (mp_anon != 0) {
499 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
500 			mb_size, (unsigned) mb_mempool_cache,
501 			sizeof(struct rte_pktmbuf_pool_private),
502 			socket_id, 0);
503 		if (rte_mp == NULL)
504 			goto err;
505 
506 		if (rte_mempool_populate_anon(rte_mp) == 0) {
507 			rte_mempool_free(rte_mp);
508 			rte_mp = NULL;
509 			goto err;
510 		}
511 		rte_pktmbuf_pool_init(rte_mp, NULL);
512 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
513 	} else {
514 		/* wrapper to rte_mempool_create() */
515 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
516 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
517 	}
518 
519 err:
520 	if (rte_mp == NULL) {
521 		rte_exit(EXIT_FAILURE,
522 			"Creation of mbuf pool for socket %u failed: %s\n",
523 			socket_id, rte_strerror(rte_errno));
524 	} else if (verbose_level > 0) {
525 		rte_mempool_dump(stdout, rte_mp);
526 	}
527 }
528 
529 /*
530  * Check given socket id is valid or not with NUMA mode,
531  * if valid, return 0, else return -1
532  */
533 static int
534 check_socket_id(const unsigned int socket_id)
535 {
536 	static int warning_once = 0;
537 
538 	if (new_socket_id(socket_id)) {
539 		if (!warning_once && numa_support)
540 			printf("Warning: NUMA should be configured manually by"
541 			       " using --port-numa-config and"
542 			       " --ring-numa-config parameters along with"
543 			       " --numa.\n");
544 		warning_once = 1;
545 		return -1;
546 	}
547 	return 0;
548 }
549 
550 static void
551 init_config(void)
552 {
553 	portid_t pid;
554 	struct rte_port *port;
555 	struct rte_mempool *mbp;
556 	unsigned int nb_mbuf_per_pool;
557 	lcoreid_t  lc_id;
558 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
559 	struct rte_gro_param gro_param;
560 
561 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
562 
563 	if (numa_support) {
564 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
565 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
566 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
567 	}
568 
569 	/* Configuration of logical cores. */
570 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
571 				sizeof(struct fwd_lcore *) * nb_lcores,
572 				RTE_CACHE_LINE_SIZE);
573 	if (fwd_lcores == NULL) {
574 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
575 							"failed\n", nb_lcores);
576 	}
577 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
578 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
579 					       sizeof(struct fwd_lcore),
580 					       RTE_CACHE_LINE_SIZE);
581 		if (fwd_lcores[lc_id] == NULL) {
582 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
583 								"failed\n");
584 		}
585 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
586 	}
587 
588 	RTE_ETH_FOREACH_DEV(pid) {
589 		port = &ports[pid];
590 		rte_eth_dev_info_get(pid, &port->dev_info);
591 
592 		if (numa_support) {
593 			if (port_numa[pid] != NUMA_NO_CONFIG)
594 				port_per_socket[port_numa[pid]]++;
595 			else {
596 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
597 
598 				/* if socket_id is invalid, set to 0 */
599 				if (check_socket_id(socket_id) < 0)
600 					socket_id = 0;
601 				port_per_socket[socket_id]++;
602 			}
603 		}
604 
605 		/* set flag to initialize port/queue */
606 		port->need_reconfig = 1;
607 		port->need_reconfig_queues = 1;
608 	}
609 
610 	/*
611 	 * Create pools of mbuf.
612 	 * If NUMA support is disabled, create a single pool of mbuf in
613 	 * socket 0 memory by default.
614 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
615 	 *
616 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
617 	 * nb_txd can be configured at run time.
618 	 */
619 	if (param_total_num_mbufs)
620 		nb_mbuf_per_pool = param_total_num_mbufs;
621 	else {
622 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
623 			(nb_lcores * mb_mempool_cache) +
624 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
625 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
626 	}
627 
628 	if (numa_support) {
629 		uint8_t i;
630 
631 		for (i = 0; i < num_sockets; i++)
632 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
633 					 socket_ids[i]);
634 	} else {
635 		if (socket_num == UMA_NO_CONFIG)
636 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
637 		else
638 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
639 						 socket_num);
640 	}
641 
642 	init_port_config();
643 
644 	/*
645 	 * Records which Mbuf pool to use by each logical core, if needed.
646 	 */
647 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
648 		mbp = mbuf_pool_find(
649 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
650 
651 		if (mbp == NULL)
652 			mbp = mbuf_pool_find(0);
653 		fwd_lcores[lc_id]->mbp = mbp;
654 	}
655 
656 	/* Configuration of packet forwarding streams. */
657 	if (init_fwd_streams() < 0)
658 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
659 
660 	fwd_config_setup();
661 
662 	/* create a gro context for each lcore */
663 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
664 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
665 	gro_param.max_item_per_flow = MAX_PKT_BURST;
666 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
667 		gro_param.socket_id = rte_lcore_to_socket_id(
668 				fwd_lcores_cpuids[lc_id]);
669 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
670 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
671 			rte_exit(EXIT_FAILURE,
672 					"rte_gro_ctx_create() failed\n");
673 		}
674 	}
675 }
676 
677 
678 void
679 reconfig(portid_t new_port_id, unsigned socket_id)
680 {
681 	struct rte_port *port;
682 
683 	/* Reconfiguration of Ethernet ports. */
684 	port = &ports[new_port_id];
685 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
686 
687 	/* set flag to initialize port/queue */
688 	port->need_reconfig = 1;
689 	port->need_reconfig_queues = 1;
690 	port->socket_id = socket_id;
691 
692 	init_port_config();
693 }
694 
695 
696 int
697 init_fwd_streams(void)
698 {
699 	portid_t pid;
700 	struct rte_port *port;
701 	streamid_t sm_id, nb_fwd_streams_new;
702 	queueid_t q;
703 
704 	/* set socket id according to numa or not */
705 	RTE_ETH_FOREACH_DEV(pid) {
706 		port = &ports[pid];
707 		if (nb_rxq > port->dev_info.max_rx_queues) {
708 			printf("Fail: nb_rxq(%d) is greater than "
709 				"max_rx_queues(%d)\n", nb_rxq,
710 				port->dev_info.max_rx_queues);
711 			return -1;
712 		}
713 		if (nb_txq > port->dev_info.max_tx_queues) {
714 			printf("Fail: nb_txq(%d) is greater than "
715 				"max_tx_queues(%d)\n", nb_txq,
716 				port->dev_info.max_tx_queues);
717 			return -1;
718 		}
719 		if (numa_support) {
720 			if (port_numa[pid] != NUMA_NO_CONFIG)
721 				port->socket_id = port_numa[pid];
722 			else {
723 				port->socket_id = rte_eth_dev_socket_id(pid);
724 
725 				/* if socket_id is invalid, set to 0 */
726 				if (check_socket_id(port->socket_id) < 0)
727 					port->socket_id = 0;
728 			}
729 		}
730 		else {
731 			if (socket_num == UMA_NO_CONFIG)
732 				port->socket_id = 0;
733 			else
734 				port->socket_id = socket_num;
735 		}
736 	}
737 
738 	q = RTE_MAX(nb_rxq, nb_txq);
739 	if (q == 0) {
740 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
741 		return -1;
742 	}
743 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
744 	if (nb_fwd_streams_new == nb_fwd_streams)
745 		return 0;
746 	/* clear the old */
747 	if (fwd_streams != NULL) {
748 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
749 			if (fwd_streams[sm_id] == NULL)
750 				continue;
751 			rte_free(fwd_streams[sm_id]);
752 			fwd_streams[sm_id] = NULL;
753 		}
754 		rte_free(fwd_streams);
755 		fwd_streams = NULL;
756 	}
757 
758 	/* init new */
759 	nb_fwd_streams = nb_fwd_streams_new;
760 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
761 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
762 	if (fwd_streams == NULL)
763 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
764 						"failed\n", nb_fwd_streams);
765 
766 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
767 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
768 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
769 		if (fwd_streams[sm_id] == NULL)
770 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
771 								" failed\n");
772 	}
773 
774 	return 0;
775 }
776 
777 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
778 static void
779 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
780 {
781 	unsigned int total_burst;
782 	unsigned int nb_burst;
783 	unsigned int burst_stats[3];
784 	uint16_t pktnb_stats[3];
785 	uint16_t nb_pkt;
786 	int burst_percent[3];
787 
788 	/*
789 	 * First compute the total number of packet bursts and the
790 	 * two highest numbers of bursts of the same number of packets.
791 	 */
792 	total_burst = 0;
793 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
794 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
795 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
796 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
797 		if (nb_burst == 0)
798 			continue;
799 		total_burst += nb_burst;
800 		if (nb_burst > burst_stats[0]) {
801 			burst_stats[1] = burst_stats[0];
802 			pktnb_stats[1] = pktnb_stats[0];
803 			burst_stats[0] = nb_burst;
804 			pktnb_stats[0] = nb_pkt;
805 		}
806 	}
807 	if (total_burst == 0)
808 		return;
809 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
810 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
811 	       burst_percent[0], (int) pktnb_stats[0]);
812 	if (burst_stats[0] == total_burst) {
813 		printf("]\n");
814 		return;
815 	}
816 	if (burst_stats[0] + burst_stats[1] == total_burst) {
817 		printf(" + %d%% of %d pkts]\n",
818 		       100 - burst_percent[0], pktnb_stats[1]);
819 		return;
820 	}
821 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
822 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
823 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
824 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
825 		return;
826 	}
827 	printf(" + %d%% of %d pkts + %d%% of others]\n",
828 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
829 }
830 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
831 
832 static void
833 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
834 {
835 	struct rte_port *port;
836 	uint8_t i;
837 
838 	static const char *fwd_stats_border = "----------------------";
839 
840 	port = &ports[port_id];
841 	printf("\n  %s Forward statistics for port %-2d %s\n",
842 	       fwd_stats_border, port_id, fwd_stats_border);
843 
844 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
845 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
846 		       "%-"PRIu64"\n",
847 		       stats->ipackets, stats->imissed,
848 		       (uint64_t) (stats->ipackets + stats->imissed));
849 
850 		if (cur_fwd_eng == &csum_fwd_engine)
851 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
852 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
853 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
854 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
855 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
856 		}
857 
858 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
859 		       "%-"PRIu64"\n",
860 		       stats->opackets, port->tx_dropped,
861 		       (uint64_t) (stats->opackets + port->tx_dropped));
862 	}
863 	else {
864 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
865 		       "%14"PRIu64"\n",
866 		       stats->ipackets, stats->imissed,
867 		       (uint64_t) (stats->ipackets + stats->imissed));
868 
869 		if (cur_fwd_eng == &csum_fwd_engine)
870 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
871 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
872 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
873 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
874 			printf("  RX-nombufs:             %14"PRIu64"\n",
875 			       stats->rx_nombuf);
876 		}
877 
878 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
879 		       "%14"PRIu64"\n",
880 		       stats->opackets, port->tx_dropped,
881 		       (uint64_t) (stats->opackets + port->tx_dropped));
882 	}
883 
884 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
885 	if (port->rx_stream)
886 		pkt_burst_stats_display("RX",
887 			&port->rx_stream->rx_burst_stats);
888 	if (port->tx_stream)
889 		pkt_burst_stats_display("TX",
890 			&port->tx_stream->tx_burst_stats);
891 #endif
892 
893 	if (port->rx_queue_stats_mapping_enabled) {
894 		printf("\n");
895 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
896 			printf("  Stats reg %2d RX-packets:%14"PRIu64
897 			       "     RX-errors:%14"PRIu64
898 			       "    RX-bytes:%14"PRIu64"\n",
899 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
900 		}
901 		printf("\n");
902 	}
903 	if (port->tx_queue_stats_mapping_enabled) {
904 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
905 			printf("  Stats reg %2d TX-packets:%14"PRIu64
906 			       "                                 TX-bytes:%14"PRIu64"\n",
907 			       i, stats->q_opackets[i], stats->q_obytes[i]);
908 		}
909 	}
910 
911 	printf("  %s--------------------------------%s\n",
912 	       fwd_stats_border, fwd_stats_border);
913 }
914 
915 static void
916 fwd_stream_stats_display(streamid_t stream_id)
917 {
918 	struct fwd_stream *fs;
919 	static const char *fwd_top_stats_border = "-------";
920 
921 	fs = fwd_streams[stream_id];
922 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
923 	    (fs->fwd_dropped == 0))
924 		return;
925 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
926 	       "TX Port=%2d/Queue=%2d %s\n",
927 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
928 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
929 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
930 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
931 
932 	/* if checksum mode */
933 	if (cur_fwd_eng == &csum_fwd_engine) {
934 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
935 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
936 	}
937 
938 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
939 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
940 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
941 #endif
942 }
943 
944 static void
945 flush_fwd_rx_queues(void)
946 {
947 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
948 	portid_t  rxp;
949 	portid_t port_id;
950 	queueid_t rxq;
951 	uint16_t  nb_rx;
952 	uint16_t  i;
953 	uint8_t   j;
954 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
955 	uint64_t timer_period;
956 
957 	/* convert to number of cycles */
958 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
959 
960 	for (j = 0; j < 2; j++) {
961 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
962 			for (rxq = 0; rxq < nb_rxq; rxq++) {
963 				port_id = fwd_ports_ids[rxp];
964 				/**
965 				* testpmd can stuck in the below do while loop
966 				* if rte_eth_rx_burst() always returns nonzero
967 				* packets. So timer is added to exit this loop
968 				* after 1sec timer expiry.
969 				*/
970 				prev_tsc = rte_rdtsc();
971 				do {
972 					nb_rx = rte_eth_rx_burst(port_id, rxq,
973 						pkts_burst, MAX_PKT_BURST);
974 					for (i = 0; i < nb_rx; i++)
975 						rte_pktmbuf_free(pkts_burst[i]);
976 
977 					cur_tsc = rte_rdtsc();
978 					diff_tsc = cur_tsc - prev_tsc;
979 					timer_tsc += diff_tsc;
980 				} while ((nb_rx > 0) &&
981 					(timer_tsc < timer_period));
982 				timer_tsc = 0;
983 			}
984 		}
985 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
986 	}
987 }
988 
989 static void
990 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
991 {
992 	struct fwd_stream **fsm;
993 	streamid_t nb_fs;
994 	streamid_t sm_id;
995 #ifdef RTE_LIBRTE_BITRATE
996 	uint64_t tics_per_1sec;
997 	uint64_t tics_datum;
998 	uint64_t tics_current;
999 	uint8_t idx_port, cnt_ports;
1000 
1001 	cnt_ports = rte_eth_dev_count();
1002 	tics_datum = rte_rdtsc();
1003 	tics_per_1sec = rte_get_timer_hz();
1004 #endif
1005 	fsm = &fwd_streams[fc->stream_idx];
1006 	nb_fs = fc->stream_nb;
1007 	do {
1008 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1009 			(*pkt_fwd)(fsm[sm_id]);
1010 #ifdef RTE_LIBRTE_BITRATE
1011 		if (bitrate_enabled != 0 &&
1012 				bitrate_lcore_id == rte_lcore_id()) {
1013 			tics_current = rte_rdtsc();
1014 			if (tics_current - tics_datum >= tics_per_1sec) {
1015 				/* Periodic bitrate calculation */
1016 				for (idx_port = 0;
1017 						idx_port < cnt_ports;
1018 						idx_port++)
1019 					rte_stats_bitrate_calc(bitrate_data,
1020 						idx_port);
1021 				tics_datum = tics_current;
1022 			}
1023 		}
1024 #endif
1025 #ifdef RTE_LIBRTE_LATENCY_STATS
1026 		if (latencystats_enabled != 0 &&
1027 				latencystats_lcore_id == rte_lcore_id())
1028 			rte_latencystats_update();
1029 #endif
1030 
1031 	} while (! fc->stopped);
1032 }
1033 
1034 static int
1035 start_pkt_forward_on_core(void *fwd_arg)
1036 {
1037 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1038 			     cur_fwd_config.fwd_eng->packet_fwd);
1039 	return 0;
1040 }
1041 
1042 /*
1043  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1044  * Used to start communication flows in network loopback test configurations.
1045  */
1046 static int
1047 run_one_txonly_burst_on_core(void *fwd_arg)
1048 {
1049 	struct fwd_lcore *fwd_lc;
1050 	struct fwd_lcore tmp_lcore;
1051 
1052 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1053 	tmp_lcore = *fwd_lc;
1054 	tmp_lcore.stopped = 1;
1055 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1056 	return 0;
1057 }
1058 
1059 /*
1060  * Launch packet forwarding:
1061  *     - Setup per-port forwarding context.
1062  *     - launch logical cores with their forwarding configuration.
1063  */
1064 static void
1065 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1066 {
1067 	port_fwd_begin_t port_fwd_begin;
1068 	unsigned int i;
1069 	unsigned int lc_id;
1070 	int diag;
1071 
1072 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1073 	if (port_fwd_begin != NULL) {
1074 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1075 			(*port_fwd_begin)(fwd_ports_ids[i]);
1076 	}
1077 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1078 		lc_id = fwd_lcores_cpuids[i];
1079 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1080 			fwd_lcores[i]->stopped = 0;
1081 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1082 						     fwd_lcores[i], lc_id);
1083 			if (diag != 0)
1084 				printf("launch lcore %u failed - diag=%d\n",
1085 				       lc_id, diag);
1086 		}
1087 	}
1088 }
1089 
1090 /*
1091  * Launch packet forwarding configuration.
1092  */
1093 void
1094 start_packet_forwarding(int with_tx_first)
1095 {
1096 	port_fwd_begin_t port_fwd_begin;
1097 	port_fwd_end_t  port_fwd_end;
1098 	struct rte_port *port;
1099 	unsigned int i;
1100 	portid_t   pt_id;
1101 	streamid_t sm_id;
1102 
1103 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1104 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1105 
1106 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1107 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1108 
1109 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1110 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1111 		(!nb_rxq || !nb_txq))
1112 		rte_exit(EXIT_FAILURE,
1113 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1114 			cur_fwd_eng->fwd_mode_name);
1115 
1116 	if (all_ports_started() == 0) {
1117 		printf("Not all ports were started\n");
1118 		return;
1119 	}
1120 	if (test_done == 0) {
1121 		printf("Packet forwarding already started\n");
1122 		return;
1123 	}
1124 
1125 	if (init_fwd_streams() < 0) {
1126 		printf("Fail from init_fwd_streams()\n");
1127 		return;
1128 	}
1129 
1130 	if(dcb_test) {
1131 		for (i = 0; i < nb_fwd_ports; i++) {
1132 			pt_id = fwd_ports_ids[i];
1133 			port = &ports[pt_id];
1134 			if (!port->dcb_flag) {
1135 				printf("In DCB mode, all forwarding ports must "
1136                                        "be configured in this mode.\n");
1137 				return;
1138 			}
1139 		}
1140 		if (nb_fwd_lcores == 1) {
1141 			printf("In DCB mode,the nb forwarding cores "
1142                                "should be larger than 1.\n");
1143 			return;
1144 		}
1145 	}
1146 	test_done = 0;
1147 
1148 	if(!no_flush_rx)
1149 		flush_fwd_rx_queues();
1150 
1151 	fwd_config_setup();
1152 	pkt_fwd_config_display(&cur_fwd_config);
1153 	rxtx_config_display();
1154 
1155 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1156 		pt_id = fwd_ports_ids[i];
1157 		port = &ports[pt_id];
1158 		rte_eth_stats_get(pt_id, &port->stats);
1159 		port->tx_dropped = 0;
1160 
1161 		map_port_queue_stats_mapping_registers(pt_id, port);
1162 	}
1163 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1164 		fwd_streams[sm_id]->rx_packets = 0;
1165 		fwd_streams[sm_id]->tx_packets = 0;
1166 		fwd_streams[sm_id]->fwd_dropped = 0;
1167 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1168 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1169 
1170 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1171 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1172 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1173 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1174 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1175 #endif
1176 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1177 		fwd_streams[sm_id]->core_cycles = 0;
1178 #endif
1179 	}
1180 	if (with_tx_first) {
1181 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1182 		if (port_fwd_begin != NULL) {
1183 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1184 				(*port_fwd_begin)(fwd_ports_ids[i]);
1185 		}
1186 		while (with_tx_first--) {
1187 			launch_packet_forwarding(
1188 					run_one_txonly_burst_on_core);
1189 			rte_eal_mp_wait_lcore();
1190 		}
1191 		port_fwd_end = tx_only_engine.port_fwd_end;
1192 		if (port_fwd_end != NULL) {
1193 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1194 				(*port_fwd_end)(fwd_ports_ids[i]);
1195 		}
1196 	}
1197 	launch_packet_forwarding(start_pkt_forward_on_core);
1198 }
1199 
1200 void
1201 stop_packet_forwarding(void)
1202 {
1203 	struct rte_eth_stats stats;
1204 	struct rte_port *port;
1205 	port_fwd_end_t  port_fwd_end;
1206 	int i;
1207 	portid_t   pt_id;
1208 	streamid_t sm_id;
1209 	lcoreid_t  lc_id;
1210 	uint64_t total_recv;
1211 	uint64_t total_xmit;
1212 	uint64_t total_rx_dropped;
1213 	uint64_t total_tx_dropped;
1214 	uint64_t total_rx_nombuf;
1215 	uint64_t tx_dropped;
1216 	uint64_t rx_bad_ip_csum;
1217 	uint64_t rx_bad_l4_csum;
1218 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1219 	uint64_t fwd_cycles;
1220 #endif
1221 
1222 	static const char *acc_stats_border = "+++++++++++++++";
1223 
1224 	if (test_done) {
1225 		printf("Packet forwarding not started\n");
1226 		return;
1227 	}
1228 	printf("Telling cores to stop...");
1229 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1230 		fwd_lcores[lc_id]->stopped = 1;
1231 	printf("\nWaiting for lcores to finish...\n");
1232 	rte_eal_mp_wait_lcore();
1233 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1234 	if (port_fwd_end != NULL) {
1235 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1236 			pt_id = fwd_ports_ids[i];
1237 			(*port_fwd_end)(pt_id);
1238 		}
1239 	}
1240 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1241 	fwd_cycles = 0;
1242 #endif
1243 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1244 		if (cur_fwd_config.nb_fwd_streams >
1245 		    cur_fwd_config.nb_fwd_ports) {
1246 			fwd_stream_stats_display(sm_id);
1247 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1248 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1249 		} else {
1250 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1251 				fwd_streams[sm_id];
1252 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1253 				fwd_streams[sm_id];
1254 		}
1255 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1256 		tx_dropped = (uint64_t) (tx_dropped +
1257 					 fwd_streams[sm_id]->fwd_dropped);
1258 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1259 
1260 		rx_bad_ip_csum =
1261 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1262 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1263 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1264 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1265 							rx_bad_ip_csum;
1266 
1267 		rx_bad_l4_csum =
1268 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1269 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1270 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1271 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1272 							rx_bad_l4_csum;
1273 
1274 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1275 		fwd_cycles = (uint64_t) (fwd_cycles +
1276 					 fwd_streams[sm_id]->core_cycles);
1277 #endif
1278 	}
1279 	total_recv = 0;
1280 	total_xmit = 0;
1281 	total_rx_dropped = 0;
1282 	total_tx_dropped = 0;
1283 	total_rx_nombuf  = 0;
1284 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1285 		pt_id = fwd_ports_ids[i];
1286 
1287 		port = &ports[pt_id];
1288 		rte_eth_stats_get(pt_id, &stats);
1289 		stats.ipackets -= port->stats.ipackets;
1290 		port->stats.ipackets = 0;
1291 		stats.opackets -= port->stats.opackets;
1292 		port->stats.opackets = 0;
1293 		stats.ibytes   -= port->stats.ibytes;
1294 		port->stats.ibytes = 0;
1295 		stats.obytes   -= port->stats.obytes;
1296 		port->stats.obytes = 0;
1297 		stats.imissed  -= port->stats.imissed;
1298 		port->stats.imissed = 0;
1299 		stats.oerrors  -= port->stats.oerrors;
1300 		port->stats.oerrors = 0;
1301 		stats.rx_nombuf -= port->stats.rx_nombuf;
1302 		port->stats.rx_nombuf = 0;
1303 
1304 		total_recv += stats.ipackets;
1305 		total_xmit += stats.opackets;
1306 		total_rx_dropped += stats.imissed;
1307 		total_tx_dropped += port->tx_dropped;
1308 		total_rx_nombuf  += stats.rx_nombuf;
1309 
1310 		fwd_port_stats_display(pt_id, &stats);
1311 	}
1312 
1313 	printf("\n  %s Accumulated forward statistics for all ports"
1314 	       "%s\n",
1315 	       acc_stats_border, acc_stats_border);
1316 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1317 	       "%-"PRIu64"\n"
1318 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1319 	       "%-"PRIu64"\n",
1320 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1321 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1322 	if (total_rx_nombuf > 0)
1323 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1324 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1325 	       "%s\n",
1326 	       acc_stats_border, acc_stats_border);
1327 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1328 	if (total_recv > 0)
1329 		printf("\n  CPU cycles/packet=%u (total cycles="
1330 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1331 		       (unsigned int)(fwd_cycles / total_recv),
1332 		       fwd_cycles, total_recv);
1333 #endif
1334 	printf("\nDone.\n");
1335 	test_done = 1;
1336 }
1337 
1338 void
1339 dev_set_link_up(portid_t pid)
1340 {
1341 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1342 		printf("\nSet link up fail.\n");
1343 }
1344 
1345 void
1346 dev_set_link_down(portid_t pid)
1347 {
1348 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1349 		printf("\nSet link down fail.\n");
1350 }
1351 
1352 static int
1353 all_ports_started(void)
1354 {
1355 	portid_t pi;
1356 	struct rte_port *port;
1357 
1358 	RTE_ETH_FOREACH_DEV(pi) {
1359 		port = &ports[pi];
1360 		/* Check if there is a port which is not started */
1361 		if ((port->port_status != RTE_PORT_STARTED) &&
1362 			(port->slave_flag == 0))
1363 			return 0;
1364 	}
1365 
1366 	/* No port is not started */
1367 	return 1;
1368 }
1369 
1370 int
1371 all_ports_stopped(void)
1372 {
1373 	portid_t pi;
1374 	struct rte_port *port;
1375 
1376 	RTE_ETH_FOREACH_DEV(pi) {
1377 		port = &ports[pi];
1378 		if ((port->port_status != RTE_PORT_STOPPED) &&
1379 			(port->slave_flag == 0))
1380 			return 0;
1381 	}
1382 
1383 	return 1;
1384 }
1385 
1386 int
1387 port_is_started(portid_t port_id)
1388 {
1389 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1390 		return 0;
1391 
1392 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1393 		return 0;
1394 
1395 	return 1;
1396 }
1397 
1398 static int
1399 port_is_closed(portid_t port_id)
1400 {
1401 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1402 		return 0;
1403 
1404 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1405 		return 0;
1406 
1407 	return 1;
1408 }
1409 
1410 int
1411 start_port(portid_t pid)
1412 {
1413 	int diag, need_check_link_status = -1;
1414 	portid_t pi;
1415 	queueid_t qi;
1416 	struct rte_port *port;
1417 	struct ether_addr mac_addr;
1418 	enum rte_eth_event_type event_type;
1419 
1420 	if (port_id_is_invalid(pid, ENABLED_WARN))
1421 		return 0;
1422 
1423 	if(dcb_config)
1424 		dcb_test = 1;
1425 	RTE_ETH_FOREACH_DEV(pi) {
1426 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1427 			continue;
1428 
1429 		need_check_link_status = 0;
1430 		port = &ports[pi];
1431 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1432 						 RTE_PORT_HANDLING) == 0) {
1433 			printf("Port %d is now not stopped\n", pi);
1434 			continue;
1435 		}
1436 
1437 		if (port->need_reconfig > 0) {
1438 			port->need_reconfig = 0;
1439 
1440 			if (flow_isolate_all) {
1441 				int ret = port_flow_isolate(pi, 1);
1442 				if (ret) {
1443 					printf("Failed to apply isolated"
1444 					       " mode on port %d\n", pi);
1445 					return -1;
1446 				}
1447 			}
1448 
1449 			printf("Configuring Port %d (socket %u)\n", pi,
1450 					port->socket_id);
1451 			/* configure port */
1452 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1453 						&(port->dev_conf));
1454 			if (diag != 0) {
1455 				if (rte_atomic16_cmpset(&(port->port_status),
1456 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1457 					printf("Port %d can not be set back "
1458 							"to stopped\n", pi);
1459 				printf("Fail to configure port %d\n", pi);
1460 				/* try to reconfigure port next time */
1461 				port->need_reconfig = 1;
1462 				return -1;
1463 			}
1464 		}
1465 		if (port->need_reconfig_queues > 0) {
1466 			port->need_reconfig_queues = 0;
1467 			/* setup tx queues */
1468 			for (qi = 0; qi < nb_txq; qi++) {
1469 				if ((numa_support) &&
1470 					(txring_numa[pi] != NUMA_NO_CONFIG))
1471 					diag = rte_eth_tx_queue_setup(pi, qi,
1472 						nb_txd,txring_numa[pi],
1473 						&(port->tx_conf));
1474 				else
1475 					diag = rte_eth_tx_queue_setup(pi, qi,
1476 						nb_txd,port->socket_id,
1477 						&(port->tx_conf));
1478 
1479 				if (diag == 0)
1480 					continue;
1481 
1482 				/* Fail to setup tx queue, return */
1483 				if (rte_atomic16_cmpset(&(port->port_status),
1484 							RTE_PORT_HANDLING,
1485 							RTE_PORT_STOPPED) == 0)
1486 					printf("Port %d can not be set back "
1487 							"to stopped\n", pi);
1488 				printf("Fail to configure port %d tx queues\n", pi);
1489 				/* try to reconfigure queues next time */
1490 				port->need_reconfig_queues = 1;
1491 				return -1;
1492 			}
1493 			/* setup rx queues */
1494 			for (qi = 0; qi < nb_rxq; qi++) {
1495 				if ((numa_support) &&
1496 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1497 					struct rte_mempool * mp =
1498 						mbuf_pool_find(rxring_numa[pi]);
1499 					if (mp == NULL) {
1500 						printf("Failed to setup RX queue:"
1501 							"No mempool allocation"
1502 							" on the socket %d\n",
1503 							rxring_numa[pi]);
1504 						return -1;
1505 					}
1506 
1507 					diag = rte_eth_rx_queue_setup(pi, qi,
1508 					     nb_rxd,rxring_numa[pi],
1509 					     &(port->rx_conf),mp);
1510 				} else {
1511 					struct rte_mempool *mp =
1512 						mbuf_pool_find(port->socket_id);
1513 					if (mp == NULL) {
1514 						printf("Failed to setup RX queue:"
1515 							"No mempool allocation"
1516 							" on the socket %d\n",
1517 							port->socket_id);
1518 						return -1;
1519 					}
1520 					diag = rte_eth_rx_queue_setup(pi, qi,
1521 					     nb_rxd,port->socket_id,
1522 					     &(port->rx_conf), mp);
1523 				}
1524 				if (diag == 0)
1525 					continue;
1526 
1527 				/* Fail to setup rx queue, return */
1528 				if (rte_atomic16_cmpset(&(port->port_status),
1529 							RTE_PORT_HANDLING,
1530 							RTE_PORT_STOPPED) == 0)
1531 					printf("Port %d can not be set back "
1532 							"to stopped\n", pi);
1533 				printf("Fail to configure port %d rx queues\n", pi);
1534 				/* try to reconfigure queues next time */
1535 				port->need_reconfig_queues = 1;
1536 				return -1;
1537 			}
1538 		}
1539 
1540 		for (event_type = RTE_ETH_EVENT_UNKNOWN;
1541 		     event_type < RTE_ETH_EVENT_MAX;
1542 		     event_type++) {
1543 			diag = rte_eth_dev_callback_register(pi,
1544 							event_type,
1545 							eth_event_callback,
1546 							NULL);
1547 			if (diag) {
1548 				printf("Failed to setup even callback for event %d\n",
1549 					event_type);
1550 				return -1;
1551 			}
1552 		}
1553 
1554 		/* start port */
1555 		if (rte_eth_dev_start(pi) < 0) {
1556 			printf("Fail to start port %d\n", pi);
1557 
1558 			/* Fail to setup rx queue, return */
1559 			if (rte_atomic16_cmpset(&(port->port_status),
1560 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1561 				printf("Port %d can not be set back to "
1562 							"stopped\n", pi);
1563 			continue;
1564 		}
1565 
1566 		if (rte_atomic16_cmpset(&(port->port_status),
1567 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1568 			printf("Port %d can not be set into started\n", pi);
1569 
1570 		rte_eth_macaddr_get(pi, &mac_addr);
1571 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1572 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1573 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1574 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1575 
1576 		/* at least one port started, need checking link status */
1577 		need_check_link_status = 1;
1578 	}
1579 
1580 	if (need_check_link_status == 1 && !no_link_check)
1581 		check_all_ports_link_status(RTE_PORT_ALL);
1582 	else if (need_check_link_status == 0)
1583 		printf("Please stop the ports first\n");
1584 
1585 	printf("Done\n");
1586 	return 0;
1587 }
1588 
1589 void
1590 stop_port(portid_t pid)
1591 {
1592 	portid_t pi;
1593 	struct rte_port *port;
1594 	int need_check_link_status = 0;
1595 
1596 	if (dcb_test) {
1597 		dcb_test = 0;
1598 		dcb_config = 0;
1599 	}
1600 
1601 	if (port_id_is_invalid(pid, ENABLED_WARN))
1602 		return;
1603 
1604 	printf("Stopping ports...\n");
1605 
1606 	RTE_ETH_FOREACH_DEV(pi) {
1607 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1608 			continue;
1609 
1610 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1611 			printf("Please remove port %d from forwarding configuration.\n", pi);
1612 			continue;
1613 		}
1614 
1615 		if (port_is_bonding_slave(pi)) {
1616 			printf("Please remove port %d from bonded device.\n", pi);
1617 			continue;
1618 		}
1619 
1620 		port = &ports[pi];
1621 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1622 						RTE_PORT_HANDLING) == 0)
1623 			continue;
1624 
1625 		rte_eth_dev_stop(pi);
1626 
1627 		if (rte_atomic16_cmpset(&(port->port_status),
1628 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1629 			printf("Port %d can not be set into stopped\n", pi);
1630 		need_check_link_status = 1;
1631 	}
1632 	if (need_check_link_status && !no_link_check)
1633 		check_all_ports_link_status(RTE_PORT_ALL);
1634 
1635 	printf("Done\n");
1636 }
1637 
1638 void
1639 close_port(portid_t pid)
1640 {
1641 	portid_t pi;
1642 	struct rte_port *port;
1643 
1644 	if (port_id_is_invalid(pid, ENABLED_WARN))
1645 		return;
1646 
1647 	printf("Closing ports...\n");
1648 
1649 	RTE_ETH_FOREACH_DEV(pi) {
1650 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1651 			continue;
1652 
1653 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1654 			printf("Please remove port %d from forwarding configuration.\n", pi);
1655 			continue;
1656 		}
1657 
1658 		if (port_is_bonding_slave(pi)) {
1659 			printf("Please remove port %d from bonded device.\n", pi);
1660 			continue;
1661 		}
1662 
1663 		port = &ports[pi];
1664 		if (rte_atomic16_cmpset(&(port->port_status),
1665 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1666 			printf("Port %d is already closed\n", pi);
1667 			continue;
1668 		}
1669 
1670 		if (rte_atomic16_cmpset(&(port->port_status),
1671 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1672 			printf("Port %d is now not stopped\n", pi);
1673 			continue;
1674 		}
1675 
1676 		if (port->flow_list)
1677 			port_flow_flush(pi);
1678 		rte_eth_dev_close(pi);
1679 
1680 		if (rte_atomic16_cmpset(&(port->port_status),
1681 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1682 			printf("Port %d cannot be set to closed\n", pi);
1683 	}
1684 
1685 	printf("Done\n");
1686 }
1687 
1688 void
1689 reset_port(portid_t pid)
1690 {
1691 	int diag;
1692 	portid_t pi;
1693 	struct rte_port *port;
1694 
1695 	if (port_id_is_invalid(pid, ENABLED_WARN))
1696 		return;
1697 
1698 	printf("Resetting ports...\n");
1699 
1700 	RTE_ETH_FOREACH_DEV(pi) {
1701 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1702 			continue;
1703 
1704 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1705 			printf("Please remove port %d from forwarding "
1706 			       "configuration.\n", pi);
1707 			continue;
1708 		}
1709 
1710 		if (port_is_bonding_slave(pi)) {
1711 			printf("Please remove port %d from bonded device.\n",
1712 			       pi);
1713 			continue;
1714 		}
1715 
1716 		diag = rte_eth_dev_reset(pi);
1717 		if (diag == 0) {
1718 			port = &ports[pi];
1719 			port->need_reconfig = 1;
1720 			port->need_reconfig_queues = 1;
1721 		} else {
1722 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1723 		}
1724 	}
1725 
1726 	printf("Done\n");
1727 }
1728 
1729 void
1730 attach_port(char *identifier)
1731 {
1732 	portid_t pi = 0;
1733 	unsigned int socket_id;
1734 
1735 	printf("Attaching a new port...\n");
1736 
1737 	if (identifier == NULL) {
1738 		printf("Invalid parameters are specified\n");
1739 		return;
1740 	}
1741 
1742 	if (rte_eth_dev_attach(identifier, &pi))
1743 		return;
1744 
1745 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1746 	/* if socket_id is invalid, set to 0 */
1747 	if (check_socket_id(socket_id) < 0)
1748 		socket_id = 0;
1749 	reconfig(pi, socket_id);
1750 	rte_eth_promiscuous_enable(pi);
1751 
1752 	nb_ports = rte_eth_dev_count();
1753 
1754 	ports[pi].port_status = RTE_PORT_STOPPED;
1755 
1756 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1757 	printf("Done\n");
1758 }
1759 
1760 void
1761 detach_port(uint8_t port_id)
1762 {
1763 	char name[RTE_ETH_NAME_MAX_LEN];
1764 
1765 	printf("Detaching a port...\n");
1766 
1767 	if (!port_is_closed(port_id)) {
1768 		printf("Please close port first\n");
1769 		return;
1770 	}
1771 
1772 	if (ports[port_id].flow_list)
1773 		port_flow_flush(port_id);
1774 
1775 	if (rte_eth_dev_detach(port_id, name)) {
1776 		RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
1777 		return;
1778 	}
1779 
1780 	nb_ports = rte_eth_dev_count();
1781 
1782 	printf("Port '%s' is detached. Now total ports is %d\n",
1783 			name, nb_ports);
1784 	printf("Done\n");
1785 	return;
1786 }
1787 
1788 void
1789 pmd_test_exit(void)
1790 {
1791 	portid_t pt_id;
1792 
1793 	if (test_done == 0)
1794 		stop_packet_forwarding();
1795 
1796 	if (ports != NULL) {
1797 		no_link_check = 1;
1798 		RTE_ETH_FOREACH_DEV(pt_id) {
1799 			printf("\nShutting down port %d...\n", pt_id);
1800 			fflush(stdout);
1801 			stop_port(pt_id);
1802 			close_port(pt_id);
1803 		}
1804 	}
1805 	printf("\nBye...\n");
1806 }
1807 
1808 typedef void (*cmd_func_t)(void);
1809 struct pmd_test_command {
1810 	const char *cmd_name;
1811 	cmd_func_t cmd_func;
1812 };
1813 
1814 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1815 
1816 /* Check the link status of all ports in up to 9s, and print them finally */
1817 static void
1818 check_all_ports_link_status(uint32_t port_mask)
1819 {
1820 #define CHECK_INTERVAL 100 /* 100ms */
1821 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1822 	portid_t portid;
1823 	uint8_t count, all_ports_up, print_flag = 0;
1824 	struct rte_eth_link link;
1825 
1826 	printf("Checking link statuses...\n");
1827 	fflush(stdout);
1828 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1829 		all_ports_up = 1;
1830 		RTE_ETH_FOREACH_DEV(portid) {
1831 			if ((port_mask & (1 << portid)) == 0)
1832 				continue;
1833 			memset(&link, 0, sizeof(link));
1834 			rte_eth_link_get_nowait(portid, &link);
1835 			/* print link status if flag set */
1836 			if (print_flag == 1) {
1837 				if (link.link_status)
1838 					printf(
1839 					"Port%d Link Up. speed %u Mbps- %s\n",
1840 					portid, link.link_speed,
1841 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1842 					("full-duplex") : ("half-duplex\n"));
1843 				else
1844 					printf("Port %d Link Down\n", portid);
1845 				continue;
1846 			}
1847 			/* clear all_ports_up flag if any link down */
1848 			if (link.link_status == ETH_LINK_DOWN) {
1849 				all_ports_up = 0;
1850 				break;
1851 			}
1852 		}
1853 		/* after finally printing all link status, get out */
1854 		if (print_flag == 1)
1855 			break;
1856 
1857 		if (all_ports_up == 0) {
1858 			fflush(stdout);
1859 			rte_delay_ms(CHECK_INTERVAL);
1860 		}
1861 
1862 		/* set the print_flag if all ports up or timeout */
1863 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1864 			print_flag = 1;
1865 		}
1866 
1867 		if (lsc_interrupt)
1868 			break;
1869 	}
1870 }
1871 
1872 static void
1873 rmv_event_callback(void *arg)
1874 {
1875 	struct rte_eth_dev *dev;
1876 	uint8_t port_id = (intptr_t)arg;
1877 
1878 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1879 	dev = &rte_eth_devices[port_id];
1880 
1881 	stop_port(port_id);
1882 	close_port(port_id);
1883 	printf("removing device %s\n", dev->device->name);
1884 	if (rte_eal_dev_detach(dev->device))
1885 		RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
1886 			dev->device->name);
1887 }
1888 
1889 /* This function is used by the interrupt thread */
1890 static int
1891 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1892 		  void *ret_param)
1893 {
1894 	static const char * const event_desc[] = {
1895 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1896 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
1897 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1898 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1899 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1900 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
1901 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
1902 		[RTE_ETH_EVENT_MAX] = NULL,
1903 	};
1904 
1905 	RTE_SET_USED(param);
1906 	RTE_SET_USED(ret_param);
1907 
1908 	if (type >= RTE_ETH_EVENT_MAX) {
1909 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1910 			port_id, __func__, type);
1911 		fflush(stderr);
1912 	} else if (event_print_mask & (UINT32_C(1) << type)) {
1913 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
1914 			event_desc[type]);
1915 		fflush(stdout);
1916 	}
1917 
1918 	switch (type) {
1919 	case RTE_ETH_EVENT_INTR_RMV:
1920 		if (rte_eal_alarm_set(100000,
1921 				rmv_event_callback, (void *)(intptr_t)port_id))
1922 			fprintf(stderr, "Could not set up deferred device removal\n");
1923 		break;
1924 	default:
1925 		break;
1926 	}
1927 	return 0;
1928 }
1929 
1930 static int
1931 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1932 {
1933 	uint16_t i;
1934 	int diag;
1935 	uint8_t mapping_found = 0;
1936 
1937 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1938 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1939 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1940 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1941 					tx_queue_stats_mappings[i].queue_id,
1942 					tx_queue_stats_mappings[i].stats_counter_id);
1943 			if (diag != 0)
1944 				return diag;
1945 			mapping_found = 1;
1946 		}
1947 	}
1948 	if (mapping_found)
1949 		port->tx_queue_stats_mapping_enabled = 1;
1950 	return 0;
1951 }
1952 
1953 static int
1954 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1955 {
1956 	uint16_t i;
1957 	int diag;
1958 	uint8_t mapping_found = 0;
1959 
1960 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1961 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1962 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1963 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1964 					rx_queue_stats_mappings[i].queue_id,
1965 					rx_queue_stats_mappings[i].stats_counter_id);
1966 			if (diag != 0)
1967 				return diag;
1968 			mapping_found = 1;
1969 		}
1970 	}
1971 	if (mapping_found)
1972 		port->rx_queue_stats_mapping_enabled = 1;
1973 	return 0;
1974 }
1975 
1976 static void
1977 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1978 {
1979 	int diag = 0;
1980 
1981 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1982 	if (diag != 0) {
1983 		if (diag == -ENOTSUP) {
1984 			port->tx_queue_stats_mapping_enabled = 0;
1985 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1986 		}
1987 		else
1988 			rte_exit(EXIT_FAILURE,
1989 					"set_tx_queue_stats_mapping_registers "
1990 					"failed for port id=%d diag=%d\n",
1991 					pi, diag);
1992 	}
1993 
1994 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1995 	if (diag != 0) {
1996 		if (diag == -ENOTSUP) {
1997 			port->rx_queue_stats_mapping_enabled = 0;
1998 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1999 		}
2000 		else
2001 			rte_exit(EXIT_FAILURE,
2002 					"set_rx_queue_stats_mapping_registers "
2003 					"failed for port id=%d diag=%d\n",
2004 					pi, diag);
2005 	}
2006 }
2007 
2008 static void
2009 rxtx_port_config(struct rte_port *port)
2010 {
2011 	port->rx_conf = port->dev_info.default_rxconf;
2012 	port->tx_conf = port->dev_info.default_txconf;
2013 
2014 	/* Check if any RX/TX parameters have been passed */
2015 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2016 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2017 
2018 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2019 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2020 
2021 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2022 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2023 
2024 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2025 		port->rx_conf.rx_free_thresh = rx_free_thresh;
2026 
2027 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2028 		port->rx_conf.rx_drop_en = rx_drop_en;
2029 
2030 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2031 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2032 
2033 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2034 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2035 
2036 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2037 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2038 
2039 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2040 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2041 
2042 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2043 		port->tx_conf.tx_free_thresh = tx_free_thresh;
2044 
2045 	if (txq_flags != RTE_PMD_PARAM_UNSET)
2046 		port->tx_conf.txq_flags = txq_flags;
2047 }
2048 
2049 void
2050 init_port_config(void)
2051 {
2052 	portid_t pid;
2053 	struct rte_port *port;
2054 
2055 	RTE_ETH_FOREACH_DEV(pid) {
2056 		port = &ports[pid];
2057 		port->dev_conf.rxmode = rx_mode;
2058 		port->dev_conf.fdir_conf = fdir_conf;
2059 		if (nb_rxq > 1) {
2060 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2061 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2062 		} else {
2063 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2064 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2065 		}
2066 
2067 		if (port->dcb_flag == 0) {
2068 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2069 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2070 			else
2071 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2072 		}
2073 
2074 		rxtx_port_config(port);
2075 
2076 		rte_eth_macaddr_get(pid, &port->eth_addr);
2077 
2078 		map_port_queue_stats_mapping_registers(pid, port);
2079 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2080 		rte_pmd_ixgbe_bypass_init(pid);
2081 #endif
2082 
2083 		if (lsc_interrupt &&
2084 		    (rte_eth_devices[pid].data->dev_flags &
2085 		     RTE_ETH_DEV_INTR_LSC))
2086 			port->dev_conf.intr_conf.lsc = 1;
2087 		if (rmv_interrupt &&
2088 		    (rte_eth_devices[pid].data->dev_flags &
2089 		     RTE_ETH_DEV_INTR_RMV))
2090 			port->dev_conf.intr_conf.rmv = 1;
2091 	}
2092 }
2093 
2094 void set_port_slave_flag(portid_t slave_pid)
2095 {
2096 	struct rte_port *port;
2097 
2098 	port = &ports[slave_pid];
2099 	port->slave_flag = 1;
2100 }
2101 
2102 void clear_port_slave_flag(portid_t slave_pid)
2103 {
2104 	struct rte_port *port;
2105 
2106 	port = &ports[slave_pid];
2107 	port->slave_flag = 0;
2108 }
2109 
2110 uint8_t port_is_bonding_slave(portid_t slave_pid)
2111 {
2112 	struct rte_port *port;
2113 
2114 	port = &ports[slave_pid];
2115 	return port->slave_flag;
2116 }
2117 
2118 const uint16_t vlan_tags[] = {
2119 		0,  1,  2,  3,  4,  5,  6,  7,
2120 		8,  9, 10, 11,  12, 13, 14, 15,
2121 		16, 17, 18, 19, 20, 21, 22, 23,
2122 		24, 25, 26, 27, 28, 29, 30, 31
2123 };
2124 
2125 static  int
2126 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2127 		 enum dcb_mode_enable dcb_mode,
2128 		 enum rte_eth_nb_tcs num_tcs,
2129 		 uint8_t pfc_en)
2130 {
2131 	uint8_t i;
2132 
2133 	/*
2134 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2135 	 * given above, and the number of traffic classes available for use.
2136 	 */
2137 	if (dcb_mode == DCB_VT_ENABLED) {
2138 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2139 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2140 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2141 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2142 
2143 		/* VMDQ+DCB RX and TX configurations */
2144 		vmdq_rx_conf->enable_default_pool = 0;
2145 		vmdq_rx_conf->default_pool = 0;
2146 		vmdq_rx_conf->nb_queue_pools =
2147 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2148 		vmdq_tx_conf->nb_queue_pools =
2149 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2150 
2151 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2152 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2153 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2154 			vmdq_rx_conf->pool_map[i].pools =
2155 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2156 		}
2157 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2158 			vmdq_rx_conf->dcb_tc[i] = i;
2159 			vmdq_tx_conf->dcb_tc[i] = i;
2160 		}
2161 
2162 		/* set DCB mode of RX and TX of multiple queues */
2163 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2164 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2165 	} else {
2166 		struct rte_eth_dcb_rx_conf *rx_conf =
2167 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2168 		struct rte_eth_dcb_tx_conf *tx_conf =
2169 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2170 
2171 		rx_conf->nb_tcs = num_tcs;
2172 		tx_conf->nb_tcs = num_tcs;
2173 
2174 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2175 			rx_conf->dcb_tc[i] = i % num_tcs;
2176 			tx_conf->dcb_tc[i] = i % num_tcs;
2177 		}
2178 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2179 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2180 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2181 	}
2182 
2183 	if (pfc_en)
2184 		eth_conf->dcb_capability_en =
2185 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2186 	else
2187 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2188 
2189 	return 0;
2190 }
2191 
2192 int
2193 init_port_dcb_config(portid_t pid,
2194 		     enum dcb_mode_enable dcb_mode,
2195 		     enum rte_eth_nb_tcs num_tcs,
2196 		     uint8_t pfc_en)
2197 {
2198 	struct rte_eth_conf port_conf;
2199 	struct rte_port *rte_port;
2200 	int retval;
2201 	uint16_t i;
2202 
2203 	rte_port = &ports[pid];
2204 
2205 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2206 	/* Enter DCB configuration status */
2207 	dcb_config = 1;
2208 
2209 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2210 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2211 	if (retval < 0)
2212 		return retval;
2213 	port_conf.rxmode.hw_vlan_filter = 1;
2214 
2215 	/**
2216 	 * Write the configuration into the device.
2217 	 * Set the numbers of RX & TX queues to 0, so
2218 	 * the RX & TX queues will not be setup.
2219 	 */
2220 	rte_eth_dev_configure(pid, 0, 0, &port_conf);
2221 
2222 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2223 
2224 	/* If dev_info.vmdq_pool_base is greater than 0,
2225 	 * the queue id of vmdq pools is started after pf queues.
2226 	 */
2227 	if (dcb_mode == DCB_VT_ENABLED &&
2228 	    rte_port->dev_info.vmdq_pool_base > 0) {
2229 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2230 			" for port %d.", pid);
2231 		return -1;
2232 	}
2233 
2234 	/* Assume the ports in testpmd have the same dcb capability
2235 	 * and has the same number of rxq and txq in dcb mode
2236 	 */
2237 	if (dcb_mode == DCB_VT_ENABLED) {
2238 		if (rte_port->dev_info.max_vfs > 0) {
2239 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2240 			nb_txq = rte_port->dev_info.nb_tx_queues;
2241 		} else {
2242 			nb_rxq = rte_port->dev_info.max_rx_queues;
2243 			nb_txq = rte_port->dev_info.max_tx_queues;
2244 		}
2245 	} else {
2246 		/*if vt is disabled, use all pf queues */
2247 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2248 			nb_rxq = rte_port->dev_info.max_rx_queues;
2249 			nb_txq = rte_port->dev_info.max_tx_queues;
2250 		} else {
2251 			nb_rxq = (queueid_t)num_tcs;
2252 			nb_txq = (queueid_t)num_tcs;
2253 
2254 		}
2255 	}
2256 	rx_free_thresh = 64;
2257 
2258 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2259 
2260 	rxtx_port_config(rte_port);
2261 	/* VLAN filter */
2262 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2263 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2264 		rx_vft_set(pid, vlan_tags[i], 1);
2265 
2266 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2267 	map_port_queue_stats_mapping_registers(pid, rte_port);
2268 
2269 	rte_port->dcb_flag = 1;
2270 
2271 	return 0;
2272 }
2273 
2274 static void
2275 init_port(void)
2276 {
2277 	/* Configuration of Ethernet ports. */
2278 	ports = rte_zmalloc("testpmd: ports",
2279 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2280 			    RTE_CACHE_LINE_SIZE);
2281 	if (ports == NULL) {
2282 		rte_exit(EXIT_FAILURE,
2283 				"rte_zmalloc(%d struct rte_port) failed\n",
2284 				RTE_MAX_ETHPORTS);
2285 	}
2286 }
2287 
2288 static void
2289 force_quit(void)
2290 {
2291 	pmd_test_exit();
2292 	prompt_exit();
2293 }
2294 
2295 static void
2296 print_stats(void)
2297 {
2298 	uint8_t i;
2299 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2300 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2301 
2302 	/* Clear screen and move to top left */
2303 	printf("%s%s", clr, top_left);
2304 
2305 	printf("\nPort statistics ====================================");
2306 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2307 		nic_stats_display(fwd_ports_ids[i]);
2308 }
2309 
2310 static void
2311 signal_handler(int signum)
2312 {
2313 	if (signum == SIGINT || signum == SIGTERM) {
2314 		printf("\nSignal %d received, preparing to exit...\n",
2315 				signum);
2316 #ifdef RTE_LIBRTE_PDUMP
2317 		/* uninitialize packet capture framework */
2318 		rte_pdump_uninit();
2319 #endif
2320 #ifdef RTE_LIBRTE_LATENCY_STATS
2321 		rte_latencystats_uninit();
2322 #endif
2323 		force_quit();
2324 		/* exit with the expected status */
2325 		signal(signum, SIG_DFL);
2326 		kill(getpid(), signum);
2327 	}
2328 }
2329 
2330 int
2331 main(int argc, char** argv)
2332 {
2333 	int  diag;
2334 	portid_t port_id;
2335 
2336 	signal(SIGINT, signal_handler);
2337 	signal(SIGTERM, signal_handler);
2338 
2339 	diag = rte_eal_init(argc, argv);
2340 	if (diag < 0)
2341 		rte_panic("Cannot init EAL\n");
2342 
2343 	if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2344 		RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n",
2345 			strerror(errno));
2346 	}
2347 
2348 #ifdef RTE_LIBRTE_PDUMP
2349 	/* initialize packet capture framework */
2350 	rte_pdump_init(NULL);
2351 #endif
2352 
2353 	nb_ports = (portid_t) rte_eth_dev_count();
2354 	if (nb_ports == 0)
2355 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2356 
2357 	/* allocate port structures, and init them */
2358 	init_port();
2359 
2360 	set_def_fwd_config();
2361 	if (nb_lcores == 0)
2362 		rte_panic("Empty set of forwarding logical cores - check the "
2363 			  "core mask supplied in the command parameters\n");
2364 
2365 	/* Bitrate/latency stats disabled by default */
2366 #ifdef RTE_LIBRTE_BITRATE
2367 	bitrate_enabled = 0;
2368 #endif
2369 #ifdef RTE_LIBRTE_LATENCY_STATS
2370 	latencystats_enabled = 0;
2371 #endif
2372 
2373 	argc -= diag;
2374 	argv += diag;
2375 	if (argc > 1)
2376 		launch_args_parse(argc, argv);
2377 
2378 	if (tx_first && interactive)
2379 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2380 				"interactive mode.\n");
2381 
2382 	if (tx_first && lsc_interrupt) {
2383 		printf("Warning: lsc_interrupt needs to be off when "
2384 				" using tx_first. Disabling.\n");
2385 		lsc_interrupt = 0;
2386 	}
2387 
2388 	if (!nb_rxq && !nb_txq)
2389 		printf("Warning: Either rx or tx queues should be non-zero\n");
2390 
2391 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2392 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2393 		       "but nb_txq=%d will prevent to fully test it.\n",
2394 		       nb_rxq, nb_txq);
2395 
2396 	init_config();
2397 	if (start_port(RTE_PORT_ALL) != 0)
2398 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2399 
2400 	/* set all ports to promiscuous mode by default */
2401 	RTE_ETH_FOREACH_DEV(port_id)
2402 		rte_eth_promiscuous_enable(port_id);
2403 
2404 	/* Init metrics library */
2405 	rte_metrics_init(rte_socket_id());
2406 
2407 #ifdef RTE_LIBRTE_LATENCY_STATS
2408 	if (latencystats_enabled != 0) {
2409 		int ret = rte_latencystats_init(1, NULL);
2410 		if (ret)
2411 			printf("Warning: latencystats init()"
2412 				" returned error %d\n",	ret);
2413 		printf("Latencystats running on lcore %d\n",
2414 			latencystats_lcore_id);
2415 	}
2416 #endif
2417 
2418 	/* Setup bitrate stats */
2419 #ifdef RTE_LIBRTE_BITRATE
2420 	if (bitrate_enabled != 0) {
2421 		bitrate_data = rte_stats_bitrate_create();
2422 		if (bitrate_data == NULL)
2423 			rte_exit(EXIT_FAILURE,
2424 				"Could not allocate bitrate data.\n");
2425 		rte_stats_bitrate_reg(bitrate_data);
2426 	}
2427 #endif
2428 
2429 #ifdef RTE_LIBRTE_CMDLINE
2430 	if (strlen(cmdline_filename) != 0)
2431 		cmdline_read_from_file(cmdline_filename);
2432 
2433 	if (interactive == 1) {
2434 		if (auto_start) {
2435 			printf("Start automatic packet forwarding\n");
2436 			start_packet_forwarding(0);
2437 		}
2438 		prompt();
2439 		pmd_test_exit();
2440 	} else
2441 #endif
2442 	{
2443 		char c;
2444 		int rc;
2445 
2446 		printf("No commandline core given, start packet forwarding\n");
2447 		start_packet_forwarding(tx_first);
2448 		if (stats_period != 0) {
2449 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2450 			uint64_t timer_period;
2451 
2452 			/* Convert to number of cycles */
2453 			timer_period = stats_period * rte_get_timer_hz();
2454 
2455 			while (1) {
2456 				cur_time = rte_get_timer_cycles();
2457 				diff_time += cur_time - prev_time;
2458 
2459 				if (diff_time >= timer_period) {
2460 					print_stats();
2461 					/* Reset the timer */
2462 					diff_time = 0;
2463 				}
2464 				/* Sleep to avoid unnecessary checks */
2465 				prev_time = cur_time;
2466 				sleep(1);
2467 			}
2468 		}
2469 
2470 		printf("Press enter to exit\n");
2471 		rc = read(0, &c, 1);
2472 		pmd_test_exit();
2473 		if (rc < 0)
2474 			return 1;
2475 	}
2476 
2477 	return 0;
2478 }
2479