xref: /dpdk/app/test-pmd/testpmd.c (revision 12a33b67dd814ab1f497e51bf3069d0482b8e069)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/mman.h>
42 #include <sys/types.h>
43 #include <errno.h>
44 
45 #include <sys/queue.h>
46 #include <sys/stat.h>
47 
48 #include <stdint.h>
49 #include <unistd.h>
50 #include <inttypes.h>
51 
52 #include <rte_common.h>
53 #include <rte_errno.h>
54 #include <rte_byteorder.h>
55 #include <rte_log.h>
56 #include <rte_debug.h>
57 #include <rte_cycles.h>
58 #include <rte_memory.h>
59 #include <rte_memcpy.h>
60 #include <rte_memzone.h>
61 #include <rte_launch.h>
62 #include <rte_eal.h>
63 #include <rte_alarm.h>
64 #include <rte_per_lcore.h>
65 #include <rte_lcore.h>
66 #include <rte_atomic.h>
67 #include <rte_branch_prediction.h>
68 #include <rte_mempool.h>
69 #include <rte_malloc.h>
70 #include <rte_mbuf.h>
71 #include <rte_interrupts.h>
72 #include <rte_pci.h>
73 #include <rte_ether.h>
74 #include <rte_ethdev.h>
75 #include <rte_dev.h>
76 #include <rte_string_fns.h>
77 #ifdef RTE_LIBRTE_IXGBE_PMD
78 #include <rte_pmd_ixgbe.h>
79 #endif
80 #ifdef RTE_LIBRTE_PDUMP
81 #include <rte_pdump.h>
82 #endif
83 #include <rte_flow.h>
84 #include <rte_metrics.h>
85 #ifdef RTE_LIBRTE_BITRATE
86 #include <rte_bitrate.h>
87 #endif
88 #ifdef RTE_LIBRTE_LATENCY_STATS
89 #include <rte_latencystats.h>
90 #endif
91 
92 #include "testpmd.h"
93 
94 uint16_t verbose_level = 0; /**< Silent by default. */
95 
96 /* use master core for command line ? */
97 uint8_t interactive = 0;
98 uint8_t auto_start = 0;
99 uint8_t tx_first;
100 char cmdline_filename[PATH_MAX] = {0};
101 
102 /*
103  * NUMA support configuration.
104  * When set, the NUMA support attempts to dispatch the allocation of the
105  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
106  * probed ports among the CPU sockets 0 and 1.
107  * Otherwise, all memory is allocated from CPU socket 0.
108  */
109 uint8_t numa_support = 1; /**< numa enabled by default */
110 
111 /*
112  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
113  * not configured.
114  */
115 uint8_t socket_num = UMA_NO_CONFIG;
116 
117 /*
118  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
119  */
120 uint8_t mp_anon = 0;
121 
122 /*
123  * Record the Ethernet address of peer target ports to which packets are
124  * forwarded.
125  * Must be instantiated with the ethernet addresses of peer traffic generator
126  * ports.
127  */
128 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
129 portid_t nb_peer_eth_addrs = 0;
130 
131 /*
132  * Probed Target Environment.
133  */
134 struct rte_port *ports;	       /**< For all probed ethernet ports. */
135 portid_t nb_ports;             /**< Number of probed ethernet ports. */
136 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
137 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
138 
139 /*
140  * Test Forwarding Configuration.
141  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
142  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
143  */
144 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
145 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
146 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
147 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
148 
149 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
150 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
151 
152 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
153 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
154 
155 /*
156  * Forwarding engines.
157  */
158 struct fwd_engine * fwd_engines[] = {
159 	&io_fwd_engine,
160 	&mac_fwd_engine,
161 	&mac_swap_engine,
162 	&flow_gen_engine,
163 	&rx_only_engine,
164 	&tx_only_engine,
165 	&csum_fwd_engine,
166 	&icmp_echo_engine,
167 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
168 	&softnic_tm_engine,
169 	&softnic_tm_bypass_engine,
170 #endif
171 #ifdef RTE_LIBRTE_IEEE1588
172 	&ieee1588_fwd_engine,
173 #endif
174 	NULL,
175 };
176 
177 struct fwd_config cur_fwd_config;
178 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
179 uint32_t retry_enabled;
180 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
181 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
182 
183 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
184 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
185                                       * specified on command-line. */
186 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
187 
188 /*
189  * In container, it cannot terminate the process which running with 'stats-period'
190  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
191  */
192 uint8_t f_quit;
193 
194 /*
195  * Configuration of packet segments used by the "txonly" processing engine.
196  */
197 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
198 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
199 	TXONLY_DEF_PACKET_LEN,
200 };
201 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
202 
203 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
204 /**< Split policy for packets to TX. */
205 
206 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
207 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
208 
209 /* current configuration is in DCB or not,0 means it is not in DCB mode */
210 uint8_t dcb_config = 0;
211 
212 /* Whether the dcb is in testing status */
213 uint8_t dcb_test = 0;
214 
215 /*
216  * Configurable number of RX/TX queues.
217  */
218 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
219 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
220 
221 /*
222  * Configurable number of RX/TX ring descriptors.
223  */
224 #define RTE_TEST_RX_DESC_DEFAULT 128
225 #define RTE_TEST_TX_DESC_DEFAULT 512
226 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
227 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
228 
229 #define RTE_PMD_PARAM_UNSET -1
230 /*
231  * Configurable values of RX and TX ring threshold registers.
232  */
233 
234 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
235 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
236 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
237 
238 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
239 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
240 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
241 
242 /*
243  * Configurable value of RX free threshold.
244  */
245 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
246 
247 /*
248  * Configurable value of RX drop enable.
249  */
250 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
251 
252 /*
253  * Configurable value of TX free threshold.
254  */
255 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
256 
257 /*
258  * Configurable value of TX RS bit threshold.
259  */
260 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
261 
262 /*
263  * Configurable value of TX queue flags.
264  */
265 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
266 
267 /*
268  * Receive Side Scaling (RSS) configuration.
269  */
270 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
271 
272 /*
273  * Port topology configuration
274  */
275 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
276 
277 /*
278  * Avoids to flush all the RX streams before starts forwarding.
279  */
280 uint8_t no_flush_rx = 0; /* flush by default */
281 
282 /*
283  * Flow API isolated mode.
284  */
285 uint8_t flow_isolate_all;
286 
287 /*
288  * Avoids to check link status when starting/stopping a port.
289  */
290 uint8_t no_link_check = 0; /* check by default */
291 
292 /*
293  * Enable link status change notification
294  */
295 uint8_t lsc_interrupt = 1; /* enabled by default */
296 
297 /*
298  * Enable device removal notification.
299  */
300 uint8_t rmv_interrupt = 1; /* enabled by default */
301 
302 /*
303  * Display or mask ether events
304  * Default to all events except VF_MBOX
305  */
306 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
307 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
308 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
309 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
310 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
311 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
312 
313 /*
314  * NIC bypass mode configuration options.
315  */
316 
317 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
318 /* The NIC bypass watchdog timeout. */
319 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
320 #endif
321 
322 
323 #ifdef RTE_LIBRTE_LATENCY_STATS
324 
325 /*
326  * Set when latency stats is enabled in the commandline
327  */
328 uint8_t latencystats_enabled;
329 
330 /*
331  * Lcore ID to serive latency statistics.
332  */
333 lcoreid_t latencystats_lcore_id = -1;
334 
335 #endif
336 
337 /*
338  * Ethernet device configuration.
339  */
340 struct rte_eth_rxmode rx_mode = {
341 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
342 	.split_hdr_size = 0,
343 	.header_split   = 0, /**< Header Split disabled. */
344 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
345 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
346 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
347 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
348 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
349 	.hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
350 	.hw_timestamp   = 0, /**< HW timestamp enabled. */
351 };
352 
353 struct rte_fdir_conf fdir_conf = {
354 	.mode = RTE_FDIR_MODE_NONE,
355 	.pballoc = RTE_FDIR_PBALLOC_64K,
356 	.status = RTE_FDIR_REPORT_STATUS,
357 	.mask = {
358 		.vlan_tci_mask = 0x0,
359 		.ipv4_mask     = {
360 			.src_ip = 0xFFFFFFFF,
361 			.dst_ip = 0xFFFFFFFF,
362 		},
363 		.ipv6_mask     = {
364 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
365 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
366 		},
367 		.src_port_mask = 0xFFFF,
368 		.dst_port_mask = 0xFFFF,
369 		.mac_addr_byte_mask = 0xFF,
370 		.tunnel_type_mask = 1,
371 		.tunnel_id_mask = 0xFFFFFFFF,
372 	},
373 	.drop_queue = 127,
374 };
375 
376 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
377 
378 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
379 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
380 
381 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
382 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
383 
384 uint16_t nb_tx_queue_stats_mappings = 0;
385 uint16_t nb_rx_queue_stats_mappings = 0;
386 
387 /*
388  * Display zero values by default for xstats
389  */
390 uint8_t xstats_hide_zero;
391 
392 unsigned int num_sockets = 0;
393 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
394 
395 #ifdef RTE_LIBRTE_BITRATE
396 /* Bitrate statistics */
397 struct rte_stats_bitrates *bitrate_data;
398 lcoreid_t bitrate_lcore_id;
399 uint8_t bitrate_enabled;
400 #endif
401 
402 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
403 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
404 
405 /* Forward function declarations */
406 static void map_port_queue_stats_mapping_registers(portid_t pi,
407 						   struct rte_port *port);
408 static void check_all_ports_link_status(uint32_t port_mask);
409 static int eth_event_callback(portid_t port_id,
410 			      enum rte_eth_event_type type,
411 			      void *param, void *ret_param);
412 
413 /*
414  * Check if all the ports are started.
415  * If yes, return positive value. If not, return zero.
416  */
417 static int all_ports_started(void);
418 
419 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
420 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
421 
422 /*
423  * Helper function to check if socket is already discovered.
424  * If yes, return positive value. If not, return zero.
425  */
426 int
427 new_socket_id(unsigned int socket_id)
428 {
429 	unsigned int i;
430 
431 	for (i = 0; i < num_sockets; i++) {
432 		if (socket_ids[i] == socket_id)
433 			return 0;
434 	}
435 	return 1;
436 }
437 
438 /*
439  * Setup default configuration.
440  */
441 static void
442 set_default_fwd_lcores_config(void)
443 {
444 	unsigned int i;
445 	unsigned int nb_lc;
446 	unsigned int sock_num;
447 
448 	nb_lc = 0;
449 	for (i = 0; i < RTE_MAX_LCORE; i++) {
450 		sock_num = rte_lcore_to_socket_id(i);
451 		if (new_socket_id(sock_num)) {
452 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
453 				rte_exit(EXIT_FAILURE,
454 					 "Total sockets greater than %u\n",
455 					 RTE_MAX_NUMA_NODES);
456 			}
457 			socket_ids[num_sockets++] = sock_num;
458 		}
459 		if (!rte_lcore_is_enabled(i))
460 			continue;
461 		if (i == rte_get_master_lcore())
462 			continue;
463 		fwd_lcores_cpuids[nb_lc++] = i;
464 	}
465 	nb_lcores = (lcoreid_t) nb_lc;
466 	nb_cfg_lcores = nb_lcores;
467 	nb_fwd_lcores = 1;
468 }
469 
470 static void
471 set_def_peer_eth_addrs(void)
472 {
473 	portid_t i;
474 
475 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
476 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
477 		peer_eth_addrs[i].addr_bytes[5] = i;
478 	}
479 }
480 
481 static void
482 set_default_fwd_ports_config(void)
483 {
484 	portid_t pt_id;
485 	int i = 0;
486 
487 	RTE_ETH_FOREACH_DEV(pt_id)
488 		fwd_ports_ids[i++] = pt_id;
489 
490 	nb_cfg_ports = nb_ports;
491 	nb_fwd_ports = nb_ports;
492 }
493 
494 void
495 set_def_fwd_config(void)
496 {
497 	set_default_fwd_lcores_config();
498 	set_def_peer_eth_addrs();
499 	set_default_fwd_ports_config();
500 }
501 
502 /*
503  * Configuration initialisation done once at init time.
504  */
505 static void
506 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
507 		 unsigned int socket_id)
508 {
509 	char pool_name[RTE_MEMPOOL_NAMESIZE];
510 	struct rte_mempool *rte_mp = NULL;
511 	uint32_t mb_size;
512 
513 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
514 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
515 
516 	RTE_LOG(INFO, USER1,
517 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
518 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
519 
520 	if (mp_anon != 0) {
521 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
522 			mb_size, (unsigned) mb_mempool_cache,
523 			sizeof(struct rte_pktmbuf_pool_private),
524 			socket_id, 0);
525 		if (rte_mp == NULL)
526 			goto err;
527 
528 		if (rte_mempool_populate_anon(rte_mp) == 0) {
529 			rte_mempool_free(rte_mp);
530 			rte_mp = NULL;
531 			goto err;
532 		}
533 		rte_pktmbuf_pool_init(rte_mp, NULL);
534 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
535 	} else {
536 		/* wrapper to rte_mempool_create() */
537 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
538 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
539 	}
540 
541 err:
542 	if (rte_mp == NULL) {
543 		rte_exit(EXIT_FAILURE,
544 			"Creation of mbuf pool for socket %u failed: %s\n",
545 			socket_id, rte_strerror(rte_errno));
546 	} else if (verbose_level > 0) {
547 		rte_mempool_dump(stdout, rte_mp);
548 	}
549 }
550 
551 /*
552  * Check given socket id is valid or not with NUMA mode,
553  * if valid, return 0, else return -1
554  */
555 static int
556 check_socket_id(const unsigned int socket_id)
557 {
558 	static int warning_once = 0;
559 
560 	if (new_socket_id(socket_id)) {
561 		if (!warning_once && numa_support)
562 			printf("Warning: NUMA should be configured manually by"
563 			       " using --port-numa-config and"
564 			       " --ring-numa-config parameters along with"
565 			       " --numa.\n");
566 		warning_once = 1;
567 		return -1;
568 	}
569 	return 0;
570 }
571 
572 static void
573 init_config(void)
574 {
575 	portid_t pid;
576 	struct rte_port *port;
577 	struct rte_mempool *mbp;
578 	unsigned int nb_mbuf_per_pool;
579 	lcoreid_t  lc_id;
580 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
581 	struct rte_gro_param gro_param;
582 	uint32_t gso_types;
583 
584 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
585 
586 	if (numa_support) {
587 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
588 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
589 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
590 	}
591 
592 	/* Configuration of logical cores. */
593 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
594 				sizeof(struct fwd_lcore *) * nb_lcores,
595 				RTE_CACHE_LINE_SIZE);
596 	if (fwd_lcores == NULL) {
597 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
598 							"failed\n", nb_lcores);
599 	}
600 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
601 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
602 					       sizeof(struct fwd_lcore),
603 					       RTE_CACHE_LINE_SIZE);
604 		if (fwd_lcores[lc_id] == NULL) {
605 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
606 								"failed\n");
607 		}
608 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
609 	}
610 
611 	RTE_ETH_FOREACH_DEV(pid) {
612 		port = &ports[pid];
613 		rte_eth_dev_info_get(pid, &port->dev_info);
614 
615 		if (numa_support) {
616 			if (port_numa[pid] != NUMA_NO_CONFIG)
617 				port_per_socket[port_numa[pid]]++;
618 			else {
619 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
620 
621 				/* if socket_id is invalid, set to 0 */
622 				if (check_socket_id(socket_id) < 0)
623 					socket_id = 0;
624 				port_per_socket[socket_id]++;
625 			}
626 		}
627 
628 		/* set flag to initialize port/queue */
629 		port->need_reconfig = 1;
630 		port->need_reconfig_queues = 1;
631 	}
632 
633 	/*
634 	 * Create pools of mbuf.
635 	 * If NUMA support is disabled, create a single pool of mbuf in
636 	 * socket 0 memory by default.
637 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
638 	 *
639 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
640 	 * nb_txd can be configured at run time.
641 	 */
642 	if (param_total_num_mbufs)
643 		nb_mbuf_per_pool = param_total_num_mbufs;
644 	else {
645 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
646 			(nb_lcores * mb_mempool_cache) +
647 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
648 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
649 	}
650 
651 	if (numa_support) {
652 		uint8_t i;
653 
654 		for (i = 0; i < num_sockets; i++)
655 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
656 					 socket_ids[i]);
657 	} else {
658 		if (socket_num == UMA_NO_CONFIG)
659 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
660 		else
661 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
662 						 socket_num);
663 	}
664 
665 	init_port_config();
666 
667 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
668 		DEV_TX_OFFLOAD_GRE_TNL_TSO;
669 	/*
670 	 * Records which Mbuf pool to use by each logical core, if needed.
671 	 */
672 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
673 		mbp = mbuf_pool_find(
674 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
675 
676 		if (mbp == NULL)
677 			mbp = mbuf_pool_find(0);
678 		fwd_lcores[lc_id]->mbp = mbp;
679 		/* initialize GSO context */
680 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
681 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
682 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
683 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
684 			ETHER_CRC_LEN;
685 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
686 	}
687 
688 	/* Configuration of packet forwarding streams. */
689 	if (init_fwd_streams() < 0)
690 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
691 
692 	fwd_config_setup();
693 
694 	/* create a gro context for each lcore */
695 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
696 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
697 	gro_param.max_item_per_flow = MAX_PKT_BURST;
698 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
699 		gro_param.socket_id = rte_lcore_to_socket_id(
700 				fwd_lcores_cpuids[lc_id]);
701 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
702 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
703 			rte_exit(EXIT_FAILURE,
704 					"rte_gro_ctx_create() failed\n");
705 		}
706 	}
707 }
708 
709 
710 void
711 reconfig(portid_t new_port_id, unsigned socket_id)
712 {
713 	struct rte_port *port;
714 
715 	/* Reconfiguration of Ethernet ports. */
716 	port = &ports[new_port_id];
717 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
718 
719 	/* set flag to initialize port/queue */
720 	port->need_reconfig = 1;
721 	port->need_reconfig_queues = 1;
722 	port->socket_id = socket_id;
723 
724 	init_port_config();
725 }
726 
727 
728 int
729 init_fwd_streams(void)
730 {
731 	portid_t pid;
732 	struct rte_port *port;
733 	streamid_t sm_id, nb_fwd_streams_new;
734 	queueid_t q;
735 
736 	/* set socket id according to numa or not */
737 	RTE_ETH_FOREACH_DEV(pid) {
738 		port = &ports[pid];
739 		if (nb_rxq > port->dev_info.max_rx_queues) {
740 			printf("Fail: nb_rxq(%d) is greater than "
741 				"max_rx_queues(%d)\n", nb_rxq,
742 				port->dev_info.max_rx_queues);
743 			return -1;
744 		}
745 		if (nb_txq > port->dev_info.max_tx_queues) {
746 			printf("Fail: nb_txq(%d) is greater than "
747 				"max_tx_queues(%d)\n", nb_txq,
748 				port->dev_info.max_tx_queues);
749 			return -1;
750 		}
751 		if (numa_support) {
752 			if (port_numa[pid] != NUMA_NO_CONFIG)
753 				port->socket_id = port_numa[pid];
754 			else {
755 				port->socket_id = rte_eth_dev_socket_id(pid);
756 
757 				/* if socket_id is invalid, set to 0 */
758 				if (check_socket_id(port->socket_id) < 0)
759 					port->socket_id = 0;
760 			}
761 		}
762 		else {
763 			if (socket_num == UMA_NO_CONFIG)
764 				port->socket_id = 0;
765 			else
766 				port->socket_id = socket_num;
767 		}
768 	}
769 
770 	q = RTE_MAX(nb_rxq, nb_txq);
771 	if (q == 0) {
772 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
773 		return -1;
774 	}
775 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
776 	if (nb_fwd_streams_new == nb_fwd_streams)
777 		return 0;
778 	/* clear the old */
779 	if (fwd_streams != NULL) {
780 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
781 			if (fwd_streams[sm_id] == NULL)
782 				continue;
783 			rte_free(fwd_streams[sm_id]);
784 			fwd_streams[sm_id] = NULL;
785 		}
786 		rte_free(fwd_streams);
787 		fwd_streams = NULL;
788 	}
789 
790 	/* init new */
791 	nb_fwd_streams = nb_fwd_streams_new;
792 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
793 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
794 	if (fwd_streams == NULL)
795 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
796 						"failed\n", nb_fwd_streams);
797 
798 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
799 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
800 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
801 		if (fwd_streams[sm_id] == NULL)
802 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
803 								" failed\n");
804 	}
805 
806 	return 0;
807 }
808 
809 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
810 static void
811 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
812 {
813 	unsigned int total_burst;
814 	unsigned int nb_burst;
815 	unsigned int burst_stats[3];
816 	uint16_t pktnb_stats[3];
817 	uint16_t nb_pkt;
818 	int burst_percent[3];
819 
820 	/*
821 	 * First compute the total number of packet bursts and the
822 	 * two highest numbers of bursts of the same number of packets.
823 	 */
824 	total_burst = 0;
825 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
826 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
827 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
828 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
829 		if (nb_burst == 0)
830 			continue;
831 		total_burst += nb_burst;
832 		if (nb_burst > burst_stats[0]) {
833 			burst_stats[1] = burst_stats[0];
834 			pktnb_stats[1] = pktnb_stats[0];
835 			burst_stats[0] = nb_burst;
836 			pktnb_stats[0] = nb_pkt;
837 		}
838 	}
839 	if (total_burst == 0)
840 		return;
841 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
842 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
843 	       burst_percent[0], (int) pktnb_stats[0]);
844 	if (burst_stats[0] == total_burst) {
845 		printf("]\n");
846 		return;
847 	}
848 	if (burst_stats[0] + burst_stats[1] == total_burst) {
849 		printf(" + %d%% of %d pkts]\n",
850 		       100 - burst_percent[0], pktnb_stats[1]);
851 		return;
852 	}
853 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
854 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
855 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
856 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
857 		return;
858 	}
859 	printf(" + %d%% of %d pkts + %d%% of others]\n",
860 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
861 }
862 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
863 
864 static void
865 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
866 {
867 	struct rte_port *port;
868 	uint8_t i;
869 
870 	static const char *fwd_stats_border = "----------------------";
871 
872 	port = &ports[port_id];
873 	printf("\n  %s Forward statistics for port %-2d %s\n",
874 	       fwd_stats_border, port_id, fwd_stats_border);
875 
876 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
877 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
878 		       "%-"PRIu64"\n",
879 		       stats->ipackets, stats->imissed,
880 		       (uint64_t) (stats->ipackets + stats->imissed));
881 
882 		if (cur_fwd_eng == &csum_fwd_engine)
883 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
884 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
885 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
886 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
887 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
888 		}
889 
890 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
891 		       "%-"PRIu64"\n",
892 		       stats->opackets, port->tx_dropped,
893 		       (uint64_t) (stats->opackets + port->tx_dropped));
894 	}
895 	else {
896 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
897 		       "%14"PRIu64"\n",
898 		       stats->ipackets, stats->imissed,
899 		       (uint64_t) (stats->ipackets + stats->imissed));
900 
901 		if (cur_fwd_eng == &csum_fwd_engine)
902 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
903 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
904 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
905 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
906 			printf("  RX-nombufs:             %14"PRIu64"\n",
907 			       stats->rx_nombuf);
908 		}
909 
910 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
911 		       "%14"PRIu64"\n",
912 		       stats->opackets, port->tx_dropped,
913 		       (uint64_t) (stats->opackets + port->tx_dropped));
914 	}
915 
916 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
917 	if (port->rx_stream)
918 		pkt_burst_stats_display("RX",
919 			&port->rx_stream->rx_burst_stats);
920 	if (port->tx_stream)
921 		pkt_burst_stats_display("TX",
922 			&port->tx_stream->tx_burst_stats);
923 #endif
924 
925 	if (port->rx_queue_stats_mapping_enabled) {
926 		printf("\n");
927 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
928 			printf("  Stats reg %2d RX-packets:%14"PRIu64
929 			       "     RX-errors:%14"PRIu64
930 			       "    RX-bytes:%14"PRIu64"\n",
931 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
932 		}
933 		printf("\n");
934 	}
935 	if (port->tx_queue_stats_mapping_enabled) {
936 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
937 			printf("  Stats reg %2d TX-packets:%14"PRIu64
938 			       "                                 TX-bytes:%14"PRIu64"\n",
939 			       i, stats->q_opackets[i], stats->q_obytes[i]);
940 		}
941 	}
942 
943 	printf("  %s--------------------------------%s\n",
944 	       fwd_stats_border, fwd_stats_border);
945 }
946 
947 static void
948 fwd_stream_stats_display(streamid_t stream_id)
949 {
950 	struct fwd_stream *fs;
951 	static const char *fwd_top_stats_border = "-------";
952 
953 	fs = fwd_streams[stream_id];
954 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
955 	    (fs->fwd_dropped == 0))
956 		return;
957 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
958 	       "TX Port=%2d/Queue=%2d %s\n",
959 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
960 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
961 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
962 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
963 
964 	/* if checksum mode */
965 	if (cur_fwd_eng == &csum_fwd_engine) {
966 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
967 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
968 	}
969 
970 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
971 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
972 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
973 #endif
974 }
975 
976 static void
977 flush_fwd_rx_queues(void)
978 {
979 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
980 	portid_t  rxp;
981 	portid_t port_id;
982 	queueid_t rxq;
983 	uint16_t  nb_rx;
984 	uint16_t  i;
985 	uint8_t   j;
986 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
987 	uint64_t timer_period;
988 
989 	/* convert to number of cycles */
990 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
991 
992 	for (j = 0; j < 2; j++) {
993 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
994 			for (rxq = 0; rxq < nb_rxq; rxq++) {
995 				port_id = fwd_ports_ids[rxp];
996 				/**
997 				* testpmd can stuck in the below do while loop
998 				* if rte_eth_rx_burst() always returns nonzero
999 				* packets. So timer is added to exit this loop
1000 				* after 1sec timer expiry.
1001 				*/
1002 				prev_tsc = rte_rdtsc();
1003 				do {
1004 					nb_rx = rte_eth_rx_burst(port_id, rxq,
1005 						pkts_burst, MAX_PKT_BURST);
1006 					for (i = 0; i < nb_rx; i++)
1007 						rte_pktmbuf_free(pkts_burst[i]);
1008 
1009 					cur_tsc = rte_rdtsc();
1010 					diff_tsc = cur_tsc - prev_tsc;
1011 					timer_tsc += diff_tsc;
1012 				} while ((nb_rx > 0) &&
1013 					(timer_tsc < timer_period));
1014 				timer_tsc = 0;
1015 			}
1016 		}
1017 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1018 	}
1019 }
1020 
1021 static void
1022 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1023 {
1024 	struct fwd_stream **fsm;
1025 	streamid_t nb_fs;
1026 	streamid_t sm_id;
1027 #ifdef RTE_LIBRTE_BITRATE
1028 	uint64_t tics_per_1sec;
1029 	uint64_t tics_datum;
1030 	uint64_t tics_current;
1031 	uint8_t idx_port, cnt_ports;
1032 
1033 	cnt_ports = rte_eth_dev_count();
1034 	tics_datum = rte_rdtsc();
1035 	tics_per_1sec = rte_get_timer_hz();
1036 #endif
1037 	fsm = &fwd_streams[fc->stream_idx];
1038 	nb_fs = fc->stream_nb;
1039 	do {
1040 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1041 			(*pkt_fwd)(fsm[sm_id]);
1042 #ifdef RTE_LIBRTE_BITRATE
1043 		if (bitrate_enabled != 0 &&
1044 				bitrate_lcore_id == rte_lcore_id()) {
1045 			tics_current = rte_rdtsc();
1046 			if (tics_current - tics_datum >= tics_per_1sec) {
1047 				/* Periodic bitrate calculation */
1048 				for (idx_port = 0;
1049 						idx_port < cnt_ports;
1050 						idx_port++)
1051 					rte_stats_bitrate_calc(bitrate_data,
1052 						idx_port);
1053 				tics_datum = tics_current;
1054 			}
1055 		}
1056 #endif
1057 #ifdef RTE_LIBRTE_LATENCY_STATS
1058 		if (latencystats_enabled != 0 &&
1059 				latencystats_lcore_id == rte_lcore_id())
1060 			rte_latencystats_update();
1061 #endif
1062 
1063 	} while (! fc->stopped);
1064 }
1065 
1066 static int
1067 start_pkt_forward_on_core(void *fwd_arg)
1068 {
1069 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1070 			     cur_fwd_config.fwd_eng->packet_fwd);
1071 	return 0;
1072 }
1073 
1074 /*
1075  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1076  * Used to start communication flows in network loopback test configurations.
1077  */
1078 static int
1079 run_one_txonly_burst_on_core(void *fwd_arg)
1080 {
1081 	struct fwd_lcore *fwd_lc;
1082 	struct fwd_lcore tmp_lcore;
1083 
1084 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1085 	tmp_lcore = *fwd_lc;
1086 	tmp_lcore.stopped = 1;
1087 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1088 	return 0;
1089 }
1090 
1091 /*
1092  * Launch packet forwarding:
1093  *     - Setup per-port forwarding context.
1094  *     - launch logical cores with their forwarding configuration.
1095  */
1096 static void
1097 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1098 {
1099 	port_fwd_begin_t port_fwd_begin;
1100 	unsigned int i;
1101 	unsigned int lc_id;
1102 	int diag;
1103 
1104 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1105 	if (port_fwd_begin != NULL) {
1106 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1107 			(*port_fwd_begin)(fwd_ports_ids[i]);
1108 	}
1109 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1110 		lc_id = fwd_lcores_cpuids[i];
1111 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1112 			fwd_lcores[i]->stopped = 0;
1113 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1114 						     fwd_lcores[i], lc_id);
1115 			if (diag != 0)
1116 				printf("launch lcore %u failed - diag=%d\n",
1117 				       lc_id, diag);
1118 		}
1119 	}
1120 }
1121 
1122 /*
1123  * Launch packet forwarding configuration.
1124  */
1125 void
1126 start_packet_forwarding(int with_tx_first)
1127 {
1128 	port_fwd_begin_t port_fwd_begin;
1129 	port_fwd_end_t  port_fwd_end;
1130 	struct rte_port *port;
1131 	unsigned int i;
1132 	portid_t   pt_id;
1133 	streamid_t sm_id;
1134 
1135 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1136 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1137 
1138 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1139 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1140 
1141 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1142 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1143 		(!nb_rxq || !nb_txq))
1144 		rte_exit(EXIT_FAILURE,
1145 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1146 			cur_fwd_eng->fwd_mode_name);
1147 
1148 	if (all_ports_started() == 0) {
1149 		printf("Not all ports were started\n");
1150 		return;
1151 	}
1152 	if (test_done == 0) {
1153 		printf("Packet forwarding already started\n");
1154 		return;
1155 	}
1156 
1157 	if (init_fwd_streams() < 0) {
1158 		printf("Fail from init_fwd_streams()\n");
1159 		return;
1160 	}
1161 
1162 	if(dcb_test) {
1163 		for (i = 0; i < nb_fwd_ports; i++) {
1164 			pt_id = fwd_ports_ids[i];
1165 			port = &ports[pt_id];
1166 			if (!port->dcb_flag) {
1167 				printf("In DCB mode, all forwarding ports must "
1168                                        "be configured in this mode.\n");
1169 				return;
1170 			}
1171 		}
1172 		if (nb_fwd_lcores == 1) {
1173 			printf("In DCB mode,the nb forwarding cores "
1174                                "should be larger than 1.\n");
1175 			return;
1176 		}
1177 	}
1178 	test_done = 0;
1179 
1180 	if(!no_flush_rx)
1181 		flush_fwd_rx_queues();
1182 
1183 	fwd_config_setup();
1184 	pkt_fwd_config_display(&cur_fwd_config);
1185 	rxtx_config_display();
1186 
1187 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1188 		pt_id = fwd_ports_ids[i];
1189 		port = &ports[pt_id];
1190 		rte_eth_stats_get(pt_id, &port->stats);
1191 		port->tx_dropped = 0;
1192 
1193 		map_port_queue_stats_mapping_registers(pt_id, port);
1194 	}
1195 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1196 		fwd_streams[sm_id]->rx_packets = 0;
1197 		fwd_streams[sm_id]->tx_packets = 0;
1198 		fwd_streams[sm_id]->fwd_dropped = 0;
1199 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1200 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1201 
1202 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1203 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1204 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1205 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1206 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1207 #endif
1208 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1209 		fwd_streams[sm_id]->core_cycles = 0;
1210 #endif
1211 	}
1212 	if (with_tx_first) {
1213 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1214 		if (port_fwd_begin != NULL) {
1215 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1216 				(*port_fwd_begin)(fwd_ports_ids[i]);
1217 		}
1218 		while (with_tx_first--) {
1219 			launch_packet_forwarding(
1220 					run_one_txonly_burst_on_core);
1221 			rte_eal_mp_wait_lcore();
1222 		}
1223 		port_fwd_end = tx_only_engine.port_fwd_end;
1224 		if (port_fwd_end != NULL) {
1225 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1226 				(*port_fwd_end)(fwd_ports_ids[i]);
1227 		}
1228 	}
1229 	launch_packet_forwarding(start_pkt_forward_on_core);
1230 }
1231 
1232 void
1233 stop_packet_forwarding(void)
1234 {
1235 	struct rte_eth_stats stats;
1236 	struct rte_port *port;
1237 	port_fwd_end_t  port_fwd_end;
1238 	int i;
1239 	portid_t   pt_id;
1240 	streamid_t sm_id;
1241 	lcoreid_t  lc_id;
1242 	uint64_t total_recv;
1243 	uint64_t total_xmit;
1244 	uint64_t total_rx_dropped;
1245 	uint64_t total_tx_dropped;
1246 	uint64_t total_rx_nombuf;
1247 	uint64_t tx_dropped;
1248 	uint64_t rx_bad_ip_csum;
1249 	uint64_t rx_bad_l4_csum;
1250 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1251 	uint64_t fwd_cycles;
1252 #endif
1253 
1254 	static const char *acc_stats_border = "+++++++++++++++";
1255 
1256 	if (test_done) {
1257 		printf("Packet forwarding not started\n");
1258 		return;
1259 	}
1260 	printf("Telling cores to stop...");
1261 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1262 		fwd_lcores[lc_id]->stopped = 1;
1263 	printf("\nWaiting for lcores to finish...\n");
1264 	rte_eal_mp_wait_lcore();
1265 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1266 	if (port_fwd_end != NULL) {
1267 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1268 			pt_id = fwd_ports_ids[i];
1269 			(*port_fwd_end)(pt_id);
1270 		}
1271 	}
1272 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1273 	fwd_cycles = 0;
1274 #endif
1275 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1276 		if (cur_fwd_config.nb_fwd_streams >
1277 		    cur_fwd_config.nb_fwd_ports) {
1278 			fwd_stream_stats_display(sm_id);
1279 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1280 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1281 		} else {
1282 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1283 				fwd_streams[sm_id];
1284 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1285 				fwd_streams[sm_id];
1286 		}
1287 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1288 		tx_dropped = (uint64_t) (tx_dropped +
1289 					 fwd_streams[sm_id]->fwd_dropped);
1290 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1291 
1292 		rx_bad_ip_csum =
1293 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1294 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1295 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1296 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1297 							rx_bad_ip_csum;
1298 
1299 		rx_bad_l4_csum =
1300 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1301 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1302 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1303 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1304 							rx_bad_l4_csum;
1305 
1306 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1307 		fwd_cycles = (uint64_t) (fwd_cycles +
1308 					 fwd_streams[sm_id]->core_cycles);
1309 #endif
1310 	}
1311 	total_recv = 0;
1312 	total_xmit = 0;
1313 	total_rx_dropped = 0;
1314 	total_tx_dropped = 0;
1315 	total_rx_nombuf  = 0;
1316 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1317 		pt_id = fwd_ports_ids[i];
1318 
1319 		port = &ports[pt_id];
1320 		rte_eth_stats_get(pt_id, &stats);
1321 		stats.ipackets -= port->stats.ipackets;
1322 		port->stats.ipackets = 0;
1323 		stats.opackets -= port->stats.opackets;
1324 		port->stats.opackets = 0;
1325 		stats.ibytes   -= port->stats.ibytes;
1326 		port->stats.ibytes = 0;
1327 		stats.obytes   -= port->stats.obytes;
1328 		port->stats.obytes = 0;
1329 		stats.imissed  -= port->stats.imissed;
1330 		port->stats.imissed = 0;
1331 		stats.oerrors  -= port->stats.oerrors;
1332 		port->stats.oerrors = 0;
1333 		stats.rx_nombuf -= port->stats.rx_nombuf;
1334 		port->stats.rx_nombuf = 0;
1335 
1336 		total_recv += stats.ipackets;
1337 		total_xmit += stats.opackets;
1338 		total_rx_dropped += stats.imissed;
1339 		total_tx_dropped += port->tx_dropped;
1340 		total_rx_nombuf  += stats.rx_nombuf;
1341 
1342 		fwd_port_stats_display(pt_id, &stats);
1343 	}
1344 
1345 	printf("\n  %s Accumulated forward statistics for all ports"
1346 	       "%s\n",
1347 	       acc_stats_border, acc_stats_border);
1348 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1349 	       "%-"PRIu64"\n"
1350 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1351 	       "%-"PRIu64"\n",
1352 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1353 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1354 	if (total_rx_nombuf > 0)
1355 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1356 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1357 	       "%s\n",
1358 	       acc_stats_border, acc_stats_border);
1359 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1360 	if (total_recv > 0)
1361 		printf("\n  CPU cycles/packet=%u (total cycles="
1362 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1363 		       (unsigned int)(fwd_cycles / total_recv),
1364 		       fwd_cycles, total_recv);
1365 #endif
1366 	printf("\nDone.\n");
1367 	test_done = 1;
1368 }
1369 
1370 void
1371 dev_set_link_up(portid_t pid)
1372 {
1373 	if (rte_eth_dev_set_link_up(pid) < 0)
1374 		printf("\nSet link up fail.\n");
1375 }
1376 
1377 void
1378 dev_set_link_down(portid_t pid)
1379 {
1380 	if (rte_eth_dev_set_link_down(pid) < 0)
1381 		printf("\nSet link down fail.\n");
1382 }
1383 
1384 static int
1385 all_ports_started(void)
1386 {
1387 	portid_t pi;
1388 	struct rte_port *port;
1389 
1390 	RTE_ETH_FOREACH_DEV(pi) {
1391 		port = &ports[pi];
1392 		/* Check if there is a port which is not started */
1393 		if ((port->port_status != RTE_PORT_STARTED) &&
1394 			(port->slave_flag == 0))
1395 			return 0;
1396 	}
1397 
1398 	/* No port is not started */
1399 	return 1;
1400 }
1401 
1402 int
1403 all_ports_stopped(void)
1404 {
1405 	portid_t pi;
1406 	struct rte_port *port;
1407 
1408 	RTE_ETH_FOREACH_DEV(pi) {
1409 		port = &ports[pi];
1410 		if ((port->port_status != RTE_PORT_STOPPED) &&
1411 			(port->slave_flag == 0))
1412 			return 0;
1413 	}
1414 
1415 	return 1;
1416 }
1417 
1418 int
1419 port_is_started(portid_t port_id)
1420 {
1421 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1422 		return 0;
1423 
1424 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1425 		return 0;
1426 
1427 	return 1;
1428 }
1429 
1430 static int
1431 port_is_closed(portid_t port_id)
1432 {
1433 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1434 		return 0;
1435 
1436 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1437 		return 0;
1438 
1439 	return 1;
1440 }
1441 
1442 int
1443 start_port(portid_t pid)
1444 {
1445 	int diag, need_check_link_status = -1;
1446 	portid_t pi;
1447 	queueid_t qi;
1448 	struct rte_port *port;
1449 	struct ether_addr mac_addr;
1450 	enum rte_eth_event_type event_type;
1451 
1452 	if (port_id_is_invalid(pid, ENABLED_WARN))
1453 		return 0;
1454 
1455 	if(dcb_config)
1456 		dcb_test = 1;
1457 	RTE_ETH_FOREACH_DEV(pi) {
1458 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1459 			continue;
1460 
1461 		need_check_link_status = 0;
1462 		port = &ports[pi];
1463 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1464 						 RTE_PORT_HANDLING) == 0) {
1465 			printf("Port %d is now not stopped\n", pi);
1466 			continue;
1467 		}
1468 
1469 		if (port->need_reconfig > 0) {
1470 			port->need_reconfig = 0;
1471 
1472 			if (flow_isolate_all) {
1473 				int ret = port_flow_isolate(pi, 1);
1474 				if (ret) {
1475 					printf("Failed to apply isolated"
1476 					       " mode on port %d\n", pi);
1477 					return -1;
1478 				}
1479 			}
1480 
1481 			printf("Configuring Port %d (socket %u)\n", pi,
1482 					port->socket_id);
1483 			/* configure port */
1484 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1485 						&(port->dev_conf));
1486 			if (diag != 0) {
1487 				if (rte_atomic16_cmpset(&(port->port_status),
1488 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1489 					printf("Port %d can not be set back "
1490 							"to stopped\n", pi);
1491 				printf("Fail to configure port %d\n", pi);
1492 				/* try to reconfigure port next time */
1493 				port->need_reconfig = 1;
1494 				return -1;
1495 			}
1496 		}
1497 		if (port->need_reconfig_queues > 0) {
1498 			port->need_reconfig_queues = 0;
1499 			/* setup tx queues */
1500 			for (qi = 0; qi < nb_txq; qi++) {
1501 				if ((numa_support) &&
1502 					(txring_numa[pi] != NUMA_NO_CONFIG))
1503 					diag = rte_eth_tx_queue_setup(pi, qi,
1504 						nb_txd,txring_numa[pi],
1505 						&(port->tx_conf));
1506 				else
1507 					diag = rte_eth_tx_queue_setup(pi, qi,
1508 						nb_txd,port->socket_id,
1509 						&(port->tx_conf));
1510 
1511 				if (diag == 0)
1512 					continue;
1513 
1514 				/* Fail to setup tx queue, return */
1515 				if (rte_atomic16_cmpset(&(port->port_status),
1516 							RTE_PORT_HANDLING,
1517 							RTE_PORT_STOPPED) == 0)
1518 					printf("Port %d can not be set back "
1519 							"to stopped\n", pi);
1520 				printf("Fail to configure port %d tx queues\n", pi);
1521 				/* try to reconfigure queues next time */
1522 				port->need_reconfig_queues = 1;
1523 				return -1;
1524 			}
1525 			/* setup rx queues */
1526 			for (qi = 0; qi < nb_rxq; qi++) {
1527 				if ((numa_support) &&
1528 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1529 					struct rte_mempool * mp =
1530 						mbuf_pool_find(rxring_numa[pi]);
1531 					if (mp == NULL) {
1532 						printf("Failed to setup RX queue:"
1533 							"No mempool allocation"
1534 							" on the socket %d\n",
1535 							rxring_numa[pi]);
1536 						return -1;
1537 					}
1538 
1539 					diag = rte_eth_rx_queue_setup(pi, qi,
1540 					     nb_rxd,rxring_numa[pi],
1541 					     &(port->rx_conf),mp);
1542 				} else {
1543 					struct rte_mempool *mp =
1544 						mbuf_pool_find(port->socket_id);
1545 					if (mp == NULL) {
1546 						printf("Failed to setup RX queue:"
1547 							"No mempool allocation"
1548 							" on the socket %d\n",
1549 							port->socket_id);
1550 						return -1;
1551 					}
1552 					diag = rte_eth_rx_queue_setup(pi, qi,
1553 					     nb_rxd,port->socket_id,
1554 					     &(port->rx_conf), mp);
1555 				}
1556 				if (diag == 0)
1557 					continue;
1558 
1559 				/* Fail to setup rx queue, return */
1560 				if (rte_atomic16_cmpset(&(port->port_status),
1561 							RTE_PORT_HANDLING,
1562 							RTE_PORT_STOPPED) == 0)
1563 					printf("Port %d can not be set back "
1564 							"to stopped\n", pi);
1565 				printf("Fail to configure port %d rx queues\n", pi);
1566 				/* try to reconfigure queues next time */
1567 				port->need_reconfig_queues = 1;
1568 				return -1;
1569 			}
1570 		}
1571 
1572 		for (event_type = RTE_ETH_EVENT_UNKNOWN;
1573 		     event_type < RTE_ETH_EVENT_MAX;
1574 		     event_type++) {
1575 			diag = rte_eth_dev_callback_register(pi,
1576 							event_type,
1577 							eth_event_callback,
1578 							NULL);
1579 			if (diag) {
1580 				printf("Failed to setup even callback for event %d\n",
1581 					event_type);
1582 				return -1;
1583 			}
1584 		}
1585 
1586 		/* start port */
1587 		if (rte_eth_dev_start(pi) < 0) {
1588 			printf("Fail to start port %d\n", pi);
1589 
1590 			/* Fail to setup rx queue, return */
1591 			if (rte_atomic16_cmpset(&(port->port_status),
1592 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1593 				printf("Port %d can not be set back to "
1594 							"stopped\n", pi);
1595 			continue;
1596 		}
1597 
1598 		if (rte_atomic16_cmpset(&(port->port_status),
1599 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1600 			printf("Port %d can not be set into started\n", pi);
1601 
1602 		rte_eth_macaddr_get(pi, &mac_addr);
1603 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1604 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1605 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1606 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1607 
1608 		/* at least one port started, need checking link status */
1609 		need_check_link_status = 1;
1610 	}
1611 
1612 	if (need_check_link_status == 1 && !no_link_check)
1613 		check_all_ports_link_status(RTE_PORT_ALL);
1614 	else if (need_check_link_status == 0)
1615 		printf("Please stop the ports first\n");
1616 
1617 	printf("Done\n");
1618 	return 0;
1619 }
1620 
1621 void
1622 stop_port(portid_t pid)
1623 {
1624 	portid_t pi;
1625 	struct rte_port *port;
1626 	int need_check_link_status = 0;
1627 
1628 	if (dcb_test) {
1629 		dcb_test = 0;
1630 		dcb_config = 0;
1631 	}
1632 
1633 	if (port_id_is_invalid(pid, ENABLED_WARN))
1634 		return;
1635 
1636 	printf("Stopping ports...\n");
1637 
1638 	RTE_ETH_FOREACH_DEV(pi) {
1639 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1640 			continue;
1641 
1642 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1643 			printf("Please remove port %d from forwarding configuration.\n", pi);
1644 			continue;
1645 		}
1646 
1647 		if (port_is_bonding_slave(pi)) {
1648 			printf("Please remove port %d from bonded device.\n", pi);
1649 			continue;
1650 		}
1651 
1652 		port = &ports[pi];
1653 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1654 						RTE_PORT_HANDLING) == 0)
1655 			continue;
1656 
1657 		rte_eth_dev_stop(pi);
1658 
1659 		if (rte_atomic16_cmpset(&(port->port_status),
1660 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1661 			printf("Port %d can not be set into stopped\n", pi);
1662 		need_check_link_status = 1;
1663 	}
1664 	if (need_check_link_status && !no_link_check)
1665 		check_all_ports_link_status(RTE_PORT_ALL);
1666 
1667 	printf("Done\n");
1668 }
1669 
1670 void
1671 close_port(portid_t pid)
1672 {
1673 	portid_t pi;
1674 	struct rte_port *port;
1675 
1676 	if (port_id_is_invalid(pid, ENABLED_WARN))
1677 		return;
1678 
1679 	printf("Closing ports...\n");
1680 
1681 	RTE_ETH_FOREACH_DEV(pi) {
1682 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1683 			continue;
1684 
1685 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1686 			printf("Please remove port %d from forwarding configuration.\n", pi);
1687 			continue;
1688 		}
1689 
1690 		if (port_is_bonding_slave(pi)) {
1691 			printf("Please remove port %d from bonded device.\n", pi);
1692 			continue;
1693 		}
1694 
1695 		port = &ports[pi];
1696 		if (rte_atomic16_cmpset(&(port->port_status),
1697 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1698 			printf("Port %d is already closed\n", pi);
1699 			continue;
1700 		}
1701 
1702 		if (rte_atomic16_cmpset(&(port->port_status),
1703 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1704 			printf("Port %d is now not stopped\n", pi);
1705 			continue;
1706 		}
1707 
1708 		if (port->flow_list)
1709 			port_flow_flush(pi);
1710 		rte_eth_dev_close(pi);
1711 
1712 		if (rte_atomic16_cmpset(&(port->port_status),
1713 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1714 			printf("Port %d cannot be set to closed\n", pi);
1715 	}
1716 
1717 	printf("Done\n");
1718 }
1719 
1720 void
1721 reset_port(portid_t pid)
1722 {
1723 	int diag;
1724 	portid_t pi;
1725 	struct rte_port *port;
1726 
1727 	if (port_id_is_invalid(pid, ENABLED_WARN))
1728 		return;
1729 
1730 	printf("Resetting ports...\n");
1731 
1732 	RTE_ETH_FOREACH_DEV(pi) {
1733 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1734 			continue;
1735 
1736 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1737 			printf("Please remove port %d from forwarding "
1738 			       "configuration.\n", pi);
1739 			continue;
1740 		}
1741 
1742 		if (port_is_bonding_slave(pi)) {
1743 			printf("Please remove port %d from bonded device.\n",
1744 			       pi);
1745 			continue;
1746 		}
1747 
1748 		diag = rte_eth_dev_reset(pi);
1749 		if (diag == 0) {
1750 			port = &ports[pi];
1751 			port->need_reconfig = 1;
1752 			port->need_reconfig_queues = 1;
1753 		} else {
1754 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1755 		}
1756 	}
1757 
1758 	printf("Done\n");
1759 }
1760 
1761 void
1762 attach_port(char *identifier)
1763 {
1764 	portid_t pi = 0;
1765 	unsigned int socket_id;
1766 
1767 	printf("Attaching a new port...\n");
1768 
1769 	if (identifier == NULL) {
1770 		printf("Invalid parameters are specified\n");
1771 		return;
1772 	}
1773 
1774 	if (rte_eth_dev_attach(identifier, &pi))
1775 		return;
1776 
1777 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1778 	/* if socket_id is invalid, set to 0 */
1779 	if (check_socket_id(socket_id) < 0)
1780 		socket_id = 0;
1781 	reconfig(pi, socket_id);
1782 	rte_eth_promiscuous_enable(pi);
1783 
1784 	nb_ports = rte_eth_dev_count();
1785 
1786 	ports[pi].port_status = RTE_PORT_STOPPED;
1787 
1788 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1789 	printf("Done\n");
1790 }
1791 
1792 void
1793 detach_port(portid_t port_id)
1794 {
1795 	char name[RTE_ETH_NAME_MAX_LEN];
1796 
1797 	printf("Detaching a port...\n");
1798 
1799 	if (!port_is_closed(port_id)) {
1800 		printf("Please close port first\n");
1801 		return;
1802 	}
1803 
1804 	if (ports[port_id].flow_list)
1805 		port_flow_flush(port_id);
1806 
1807 	if (rte_eth_dev_detach(port_id, name)) {
1808 		RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
1809 		return;
1810 	}
1811 
1812 	nb_ports = rte_eth_dev_count();
1813 
1814 	printf("Port '%s' is detached. Now total ports is %d\n",
1815 			name, nb_ports);
1816 	printf("Done\n");
1817 	return;
1818 }
1819 
1820 void
1821 pmd_test_exit(void)
1822 {
1823 	portid_t pt_id;
1824 
1825 	if (test_done == 0)
1826 		stop_packet_forwarding();
1827 
1828 	if (ports != NULL) {
1829 		no_link_check = 1;
1830 		RTE_ETH_FOREACH_DEV(pt_id) {
1831 			printf("\nShutting down port %d...\n", pt_id);
1832 			fflush(stdout);
1833 			stop_port(pt_id);
1834 			close_port(pt_id);
1835 		}
1836 	}
1837 	printf("\nBye...\n");
1838 }
1839 
1840 typedef void (*cmd_func_t)(void);
1841 struct pmd_test_command {
1842 	const char *cmd_name;
1843 	cmd_func_t cmd_func;
1844 };
1845 
1846 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1847 
1848 /* Check the link status of all ports in up to 9s, and print them finally */
1849 static void
1850 check_all_ports_link_status(uint32_t port_mask)
1851 {
1852 #define CHECK_INTERVAL 100 /* 100ms */
1853 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1854 	portid_t portid;
1855 	uint8_t count, all_ports_up, print_flag = 0;
1856 	struct rte_eth_link link;
1857 
1858 	printf("Checking link statuses...\n");
1859 	fflush(stdout);
1860 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1861 		all_ports_up = 1;
1862 		RTE_ETH_FOREACH_DEV(portid) {
1863 			if ((port_mask & (1 << portid)) == 0)
1864 				continue;
1865 			memset(&link, 0, sizeof(link));
1866 			rte_eth_link_get_nowait(portid, &link);
1867 			/* print link status if flag set */
1868 			if (print_flag == 1) {
1869 				if (link.link_status)
1870 					printf(
1871 					"Port%d Link Up. speed %u Mbps- %s\n",
1872 					portid, link.link_speed,
1873 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1874 					("full-duplex") : ("half-duplex\n"));
1875 				else
1876 					printf("Port %d Link Down\n", portid);
1877 				continue;
1878 			}
1879 			/* clear all_ports_up flag if any link down */
1880 			if (link.link_status == ETH_LINK_DOWN) {
1881 				all_ports_up = 0;
1882 				break;
1883 			}
1884 		}
1885 		/* after finally printing all link status, get out */
1886 		if (print_flag == 1)
1887 			break;
1888 
1889 		if (all_ports_up == 0) {
1890 			fflush(stdout);
1891 			rte_delay_ms(CHECK_INTERVAL);
1892 		}
1893 
1894 		/* set the print_flag if all ports up or timeout */
1895 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1896 			print_flag = 1;
1897 		}
1898 
1899 		if (lsc_interrupt)
1900 			break;
1901 	}
1902 }
1903 
1904 static void
1905 rmv_event_callback(void *arg)
1906 {
1907 	struct rte_eth_dev *dev;
1908 	portid_t port_id = (intptr_t)arg;
1909 
1910 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1911 	dev = &rte_eth_devices[port_id];
1912 
1913 	stop_port(port_id);
1914 	close_port(port_id);
1915 	printf("removing device %s\n", dev->device->name);
1916 	if (rte_eal_dev_detach(dev->device))
1917 		RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
1918 			dev->device->name);
1919 }
1920 
1921 /* This function is used by the interrupt thread */
1922 static int
1923 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1924 		  void *ret_param)
1925 {
1926 	static const char * const event_desc[] = {
1927 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1928 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
1929 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1930 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1931 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1932 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
1933 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
1934 		[RTE_ETH_EVENT_MAX] = NULL,
1935 	};
1936 
1937 	RTE_SET_USED(param);
1938 	RTE_SET_USED(ret_param);
1939 
1940 	if (type >= RTE_ETH_EVENT_MAX) {
1941 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1942 			port_id, __func__, type);
1943 		fflush(stderr);
1944 	} else if (event_print_mask & (UINT32_C(1) << type)) {
1945 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
1946 			event_desc[type]);
1947 		fflush(stdout);
1948 	}
1949 
1950 	switch (type) {
1951 	case RTE_ETH_EVENT_INTR_RMV:
1952 		if (rte_eal_alarm_set(100000,
1953 				rmv_event_callback, (void *)(intptr_t)port_id))
1954 			fprintf(stderr, "Could not set up deferred device removal\n");
1955 		break;
1956 	default:
1957 		break;
1958 	}
1959 	return 0;
1960 }
1961 
1962 static int
1963 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
1964 {
1965 	uint16_t i;
1966 	int diag;
1967 	uint8_t mapping_found = 0;
1968 
1969 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1970 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1971 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1972 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1973 					tx_queue_stats_mappings[i].queue_id,
1974 					tx_queue_stats_mappings[i].stats_counter_id);
1975 			if (diag != 0)
1976 				return diag;
1977 			mapping_found = 1;
1978 		}
1979 	}
1980 	if (mapping_found)
1981 		port->tx_queue_stats_mapping_enabled = 1;
1982 	return 0;
1983 }
1984 
1985 static int
1986 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
1987 {
1988 	uint16_t i;
1989 	int diag;
1990 	uint8_t mapping_found = 0;
1991 
1992 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1993 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1994 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1995 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1996 					rx_queue_stats_mappings[i].queue_id,
1997 					rx_queue_stats_mappings[i].stats_counter_id);
1998 			if (diag != 0)
1999 				return diag;
2000 			mapping_found = 1;
2001 		}
2002 	}
2003 	if (mapping_found)
2004 		port->rx_queue_stats_mapping_enabled = 1;
2005 	return 0;
2006 }
2007 
2008 static void
2009 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2010 {
2011 	int diag = 0;
2012 
2013 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2014 	if (diag != 0) {
2015 		if (diag == -ENOTSUP) {
2016 			port->tx_queue_stats_mapping_enabled = 0;
2017 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2018 		}
2019 		else
2020 			rte_exit(EXIT_FAILURE,
2021 					"set_tx_queue_stats_mapping_registers "
2022 					"failed for port id=%d diag=%d\n",
2023 					pi, diag);
2024 	}
2025 
2026 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2027 	if (diag != 0) {
2028 		if (diag == -ENOTSUP) {
2029 			port->rx_queue_stats_mapping_enabled = 0;
2030 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2031 		}
2032 		else
2033 			rte_exit(EXIT_FAILURE,
2034 					"set_rx_queue_stats_mapping_registers "
2035 					"failed for port id=%d diag=%d\n",
2036 					pi, diag);
2037 	}
2038 }
2039 
2040 static void
2041 rxtx_port_config(struct rte_port *port)
2042 {
2043 	port->rx_conf = port->dev_info.default_rxconf;
2044 	port->tx_conf = port->dev_info.default_txconf;
2045 
2046 	/* Check if any RX/TX parameters have been passed */
2047 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2048 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2049 
2050 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2051 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2052 
2053 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2054 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2055 
2056 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2057 		port->rx_conf.rx_free_thresh = rx_free_thresh;
2058 
2059 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2060 		port->rx_conf.rx_drop_en = rx_drop_en;
2061 
2062 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2063 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2064 
2065 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2066 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2067 
2068 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2069 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2070 
2071 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2072 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2073 
2074 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2075 		port->tx_conf.tx_free_thresh = tx_free_thresh;
2076 
2077 	if (txq_flags != RTE_PMD_PARAM_UNSET)
2078 		port->tx_conf.txq_flags = txq_flags;
2079 }
2080 
2081 void
2082 init_port_config(void)
2083 {
2084 	portid_t pid;
2085 	struct rte_port *port;
2086 
2087 	RTE_ETH_FOREACH_DEV(pid) {
2088 		port = &ports[pid];
2089 		port->dev_conf.rxmode = rx_mode;
2090 		port->dev_conf.fdir_conf = fdir_conf;
2091 		if (nb_rxq > 1) {
2092 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2093 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2094 		} else {
2095 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2096 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2097 		}
2098 
2099 		if (port->dcb_flag == 0) {
2100 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2101 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2102 			else
2103 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2104 		}
2105 
2106 		rxtx_port_config(port);
2107 
2108 		rte_eth_macaddr_get(pid, &port->eth_addr);
2109 
2110 		map_port_queue_stats_mapping_registers(pid, port);
2111 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2112 		rte_pmd_ixgbe_bypass_init(pid);
2113 #endif
2114 
2115 		if (lsc_interrupt &&
2116 		    (rte_eth_devices[pid].data->dev_flags &
2117 		     RTE_ETH_DEV_INTR_LSC))
2118 			port->dev_conf.intr_conf.lsc = 1;
2119 		if (rmv_interrupt &&
2120 		    (rte_eth_devices[pid].data->dev_flags &
2121 		     RTE_ETH_DEV_INTR_RMV))
2122 			port->dev_conf.intr_conf.rmv = 1;
2123 
2124 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2125 		/* Detect softnic port */
2126 		if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2127 			port->softnic_enable = 1;
2128 			memset(&port->softport, 0, sizeof(struct softnic_port));
2129 
2130 			if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2131 				port->softport.tm_flag = 1;
2132 		}
2133 #endif
2134 	}
2135 }
2136 
2137 void set_port_slave_flag(portid_t slave_pid)
2138 {
2139 	struct rte_port *port;
2140 
2141 	port = &ports[slave_pid];
2142 	port->slave_flag = 1;
2143 }
2144 
2145 void clear_port_slave_flag(portid_t slave_pid)
2146 {
2147 	struct rte_port *port;
2148 
2149 	port = &ports[slave_pid];
2150 	port->slave_flag = 0;
2151 }
2152 
2153 uint8_t port_is_bonding_slave(portid_t slave_pid)
2154 {
2155 	struct rte_port *port;
2156 
2157 	port = &ports[slave_pid];
2158 	return port->slave_flag;
2159 }
2160 
2161 const uint16_t vlan_tags[] = {
2162 		0,  1,  2,  3,  4,  5,  6,  7,
2163 		8,  9, 10, 11,  12, 13, 14, 15,
2164 		16, 17, 18, 19, 20, 21, 22, 23,
2165 		24, 25, 26, 27, 28, 29, 30, 31
2166 };
2167 
2168 static  int
2169 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2170 		 enum dcb_mode_enable dcb_mode,
2171 		 enum rte_eth_nb_tcs num_tcs,
2172 		 uint8_t pfc_en)
2173 {
2174 	uint8_t i;
2175 
2176 	/*
2177 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2178 	 * given above, and the number of traffic classes available for use.
2179 	 */
2180 	if (dcb_mode == DCB_VT_ENABLED) {
2181 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2182 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2183 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2184 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2185 
2186 		/* VMDQ+DCB RX and TX configurations */
2187 		vmdq_rx_conf->enable_default_pool = 0;
2188 		vmdq_rx_conf->default_pool = 0;
2189 		vmdq_rx_conf->nb_queue_pools =
2190 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2191 		vmdq_tx_conf->nb_queue_pools =
2192 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2193 
2194 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2195 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2196 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2197 			vmdq_rx_conf->pool_map[i].pools =
2198 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2199 		}
2200 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2201 			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2202 			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2203 		}
2204 
2205 		/* set DCB mode of RX and TX of multiple queues */
2206 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2207 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2208 	} else {
2209 		struct rte_eth_dcb_rx_conf *rx_conf =
2210 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2211 		struct rte_eth_dcb_tx_conf *tx_conf =
2212 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2213 
2214 		rx_conf->nb_tcs = num_tcs;
2215 		tx_conf->nb_tcs = num_tcs;
2216 
2217 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2218 			rx_conf->dcb_tc[i] = i % num_tcs;
2219 			tx_conf->dcb_tc[i] = i % num_tcs;
2220 		}
2221 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2222 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2223 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2224 	}
2225 
2226 	if (pfc_en)
2227 		eth_conf->dcb_capability_en =
2228 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2229 	else
2230 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2231 
2232 	return 0;
2233 }
2234 
2235 int
2236 init_port_dcb_config(portid_t pid,
2237 		     enum dcb_mode_enable dcb_mode,
2238 		     enum rte_eth_nb_tcs num_tcs,
2239 		     uint8_t pfc_en)
2240 {
2241 	struct rte_eth_conf port_conf;
2242 	struct rte_port *rte_port;
2243 	int retval;
2244 	uint16_t i;
2245 
2246 	rte_port = &ports[pid];
2247 
2248 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2249 	/* Enter DCB configuration status */
2250 	dcb_config = 1;
2251 
2252 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2253 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2254 	if (retval < 0)
2255 		return retval;
2256 	port_conf.rxmode.hw_vlan_filter = 1;
2257 
2258 	/**
2259 	 * Write the configuration into the device.
2260 	 * Set the numbers of RX & TX queues to 0, so
2261 	 * the RX & TX queues will not be setup.
2262 	 */
2263 	rte_eth_dev_configure(pid, 0, 0, &port_conf);
2264 
2265 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2266 
2267 	/* If dev_info.vmdq_pool_base is greater than 0,
2268 	 * the queue id of vmdq pools is started after pf queues.
2269 	 */
2270 	if (dcb_mode == DCB_VT_ENABLED &&
2271 	    rte_port->dev_info.vmdq_pool_base > 0) {
2272 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2273 			" for port %d.", pid);
2274 		return -1;
2275 	}
2276 
2277 	/* Assume the ports in testpmd have the same dcb capability
2278 	 * and has the same number of rxq and txq in dcb mode
2279 	 */
2280 	if (dcb_mode == DCB_VT_ENABLED) {
2281 		if (rte_port->dev_info.max_vfs > 0) {
2282 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2283 			nb_txq = rte_port->dev_info.nb_tx_queues;
2284 		} else {
2285 			nb_rxq = rte_port->dev_info.max_rx_queues;
2286 			nb_txq = rte_port->dev_info.max_tx_queues;
2287 		}
2288 	} else {
2289 		/*if vt is disabled, use all pf queues */
2290 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2291 			nb_rxq = rte_port->dev_info.max_rx_queues;
2292 			nb_txq = rte_port->dev_info.max_tx_queues;
2293 		} else {
2294 			nb_rxq = (queueid_t)num_tcs;
2295 			nb_txq = (queueid_t)num_tcs;
2296 
2297 		}
2298 	}
2299 	rx_free_thresh = 64;
2300 
2301 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2302 
2303 	rxtx_port_config(rte_port);
2304 	/* VLAN filter */
2305 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2306 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2307 		rx_vft_set(pid, vlan_tags[i], 1);
2308 
2309 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2310 	map_port_queue_stats_mapping_registers(pid, rte_port);
2311 
2312 	rte_port->dcb_flag = 1;
2313 
2314 	return 0;
2315 }
2316 
2317 static void
2318 init_port(void)
2319 {
2320 	/* Configuration of Ethernet ports. */
2321 	ports = rte_zmalloc("testpmd: ports",
2322 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2323 			    RTE_CACHE_LINE_SIZE);
2324 	if (ports == NULL) {
2325 		rte_exit(EXIT_FAILURE,
2326 				"rte_zmalloc(%d struct rte_port) failed\n",
2327 				RTE_MAX_ETHPORTS);
2328 	}
2329 }
2330 
2331 static void
2332 force_quit(void)
2333 {
2334 	pmd_test_exit();
2335 	prompt_exit();
2336 }
2337 
2338 static void
2339 print_stats(void)
2340 {
2341 	uint8_t i;
2342 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2343 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2344 
2345 	/* Clear screen and move to top left */
2346 	printf("%s%s", clr, top_left);
2347 
2348 	printf("\nPort statistics ====================================");
2349 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2350 		nic_stats_display(fwd_ports_ids[i]);
2351 }
2352 
2353 static void
2354 signal_handler(int signum)
2355 {
2356 	if (signum == SIGINT || signum == SIGTERM) {
2357 		printf("\nSignal %d received, preparing to exit...\n",
2358 				signum);
2359 #ifdef RTE_LIBRTE_PDUMP
2360 		/* uninitialize packet capture framework */
2361 		rte_pdump_uninit();
2362 #endif
2363 #ifdef RTE_LIBRTE_LATENCY_STATS
2364 		rte_latencystats_uninit();
2365 #endif
2366 		force_quit();
2367 		/* Set flag to indicate the force termination. */
2368 		f_quit = 1;
2369 		/* exit with the expected status */
2370 		signal(signum, SIG_DFL);
2371 		kill(getpid(), signum);
2372 	}
2373 }
2374 
2375 int
2376 main(int argc, char** argv)
2377 {
2378 	int  diag;
2379 	portid_t port_id;
2380 
2381 	signal(SIGINT, signal_handler);
2382 	signal(SIGTERM, signal_handler);
2383 
2384 	diag = rte_eal_init(argc, argv);
2385 	if (diag < 0)
2386 		rte_panic("Cannot init EAL\n");
2387 
2388 	if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2389 		RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n",
2390 			strerror(errno));
2391 	}
2392 
2393 #ifdef RTE_LIBRTE_PDUMP
2394 	/* initialize packet capture framework */
2395 	rte_pdump_init(NULL);
2396 #endif
2397 
2398 	nb_ports = (portid_t) rte_eth_dev_count();
2399 	if (nb_ports == 0)
2400 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2401 
2402 	/* allocate port structures, and init them */
2403 	init_port();
2404 
2405 	set_def_fwd_config();
2406 	if (nb_lcores == 0)
2407 		rte_panic("Empty set of forwarding logical cores - check the "
2408 			  "core mask supplied in the command parameters\n");
2409 
2410 	/* Bitrate/latency stats disabled by default */
2411 #ifdef RTE_LIBRTE_BITRATE
2412 	bitrate_enabled = 0;
2413 #endif
2414 #ifdef RTE_LIBRTE_LATENCY_STATS
2415 	latencystats_enabled = 0;
2416 #endif
2417 
2418 	argc -= diag;
2419 	argv += diag;
2420 	if (argc > 1)
2421 		launch_args_parse(argc, argv);
2422 
2423 	if (tx_first && interactive)
2424 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2425 				"interactive mode.\n");
2426 
2427 	if (tx_first && lsc_interrupt) {
2428 		printf("Warning: lsc_interrupt needs to be off when "
2429 				" using tx_first. Disabling.\n");
2430 		lsc_interrupt = 0;
2431 	}
2432 
2433 	if (!nb_rxq && !nb_txq)
2434 		printf("Warning: Either rx or tx queues should be non-zero\n");
2435 
2436 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2437 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2438 		       "but nb_txq=%d will prevent to fully test it.\n",
2439 		       nb_rxq, nb_txq);
2440 
2441 	init_config();
2442 	if (start_port(RTE_PORT_ALL) != 0)
2443 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2444 
2445 	/* set all ports to promiscuous mode by default */
2446 	RTE_ETH_FOREACH_DEV(port_id)
2447 		rte_eth_promiscuous_enable(port_id);
2448 
2449 	/* Init metrics library */
2450 	rte_metrics_init(rte_socket_id());
2451 
2452 #ifdef RTE_LIBRTE_LATENCY_STATS
2453 	if (latencystats_enabled != 0) {
2454 		int ret = rte_latencystats_init(1, NULL);
2455 		if (ret)
2456 			printf("Warning: latencystats init()"
2457 				" returned error %d\n",	ret);
2458 		printf("Latencystats running on lcore %d\n",
2459 			latencystats_lcore_id);
2460 	}
2461 #endif
2462 
2463 	/* Setup bitrate stats */
2464 #ifdef RTE_LIBRTE_BITRATE
2465 	if (bitrate_enabled != 0) {
2466 		bitrate_data = rte_stats_bitrate_create();
2467 		if (bitrate_data == NULL)
2468 			rte_exit(EXIT_FAILURE,
2469 				"Could not allocate bitrate data.\n");
2470 		rte_stats_bitrate_reg(bitrate_data);
2471 	}
2472 #endif
2473 
2474 #ifdef RTE_LIBRTE_CMDLINE
2475 	if (strlen(cmdline_filename) != 0)
2476 		cmdline_read_from_file(cmdline_filename);
2477 
2478 	if (interactive == 1) {
2479 		if (auto_start) {
2480 			printf("Start automatic packet forwarding\n");
2481 			start_packet_forwarding(0);
2482 		}
2483 		prompt();
2484 		pmd_test_exit();
2485 	} else
2486 #endif
2487 	{
2488 		char c;
2489 		int rc;
2490 
2491 		f_quit = 0;
2492 
2493 		printf("No commandline core given, start packet forwarding\n");
2494 		start_packet_forwarding(tx_first);
2495 		if (stats_period != 0) {
2496 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2497 			uint64_t timer_period;
2498 
2499 			/* Convert to number of cycles */
2500 			timer_period = stats_period * rte_get_timer_hz();
2501 
2502 			while (f_quit == 0) {
2503 				cur_time = rte_get_timer_cycles();
2504 				diff_time += cur_time - prev_time;
2505 
2506 				if (diff_time >= timer_period) {
2507 					print_stats();
2508 					/* Reset the timer */
2509 					diff_time = 0;
2510 				}
2511 				/* Sleep to avoid unnecessary checks */
2512 				prev_time = cur_time;
2513 				sleep(1);
2514 			}
2515 		}
2516 
2517 		printf("Press enter to exit\n");
2518 		rc = read(0, &c, 1);
2519 		pmd_test_exit();
2520 		if (rc < 0)
2521 			return 1;
2522 	}
2523 
2524 	return 0;
2525 }
2526