xref: /dpdk/app/test-pmd/testpmd.c (revision 5b590fbe09b6163a55f279bc5d4b85dce39f1d49)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/mman.h>
42 #include <sys/types.h>
43 #include <errno.h>
44 
45 #include <sys/queue.h>
46 #include <sys/stat.h>
47 
48 #include <stdint.h>
49 #include <unistd.h>
50 #include <inttypes.h>
51 
52 #include <rte_common.h>
53 #include <rte_errno.h>
54 #include <rte_byteorder.h>
55 #include <rte_log.h>
56 #include <rte_debug.h>
57 #include <rte_cycles.h>
58 #include <rte_memory.h>
59 #include <rte_memcpy.h>
60 #include <rte_memzone.h>
61 #include <rte_launch.h>
62 #include <rte_eal.h>
63 #include <rte_alarm.h>
64 #include <rte_per_lcore.h>
65 #include <rte_lcore.h>
66 #include <rte_atomic.h>
67 #include <rte_branch_prediction.h>
68 #include <rte_mempool.h>
69 #include <rte_malloc.h>
70 #include <rte_mbuf.h>
71 #include <rte_interrupts.h>
72 #include <rte_pci.h>
73 #include <rte_ether.h>
74 #include <rte_ethdev.h>
75 #include <rte_dev.h>
76 #include <rte_string_fns.h>
77 #ifdef RTE_LIBRTE_IXGBE_PMD
78 #include <rte_pmd_ixgbe.h>
79 #endif
80 #ifdef RTE_LIBRTE_PDUMP
81 #include <rte_pdump.h>
82 #endif
83 #include <rte_flow.h>
84 #include <rte_metrics.h>
85 #ifdef RTE_LIBRTE_BITRATE
86 #include <rte_bitrate.h>
87 #endif
88 #ifdef RTE_LIBRTE_LATENCY_STATS
89 #include <rte_latencystats.h>
90 #endif
91 
92 #include "testpmd.h"
93 
94 uint16_t verbose_level = 0; /**< Silent by default. */
95 
96 /* use master core for command line ? */
97 uint8_t interactive = 0;
98 uint8_t auto_start = 0;
99 uint8_t tx_first;
100 char cmdline_filename[PATH_MAX] = {0};
101 
102 /*
103  * NUMA support configuration.
104  * When set, the NUMA support attempts to dispatch the allocation of the
105  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
106  * probed ports among the CPU sockets 0 and 1.
107  * Otherwise, all memory is allocated from CPU socket 0.
108  */
109 uint8_t numa_support = 1; /**< numa enabled by default */
110 
111 /*
112  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
113  * not configured.
114  */
115 uint8_t socket_num = UMA_NO_CONFIG;
116 
117 /*
118  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
119  */
120 uint8_t mp_anon = 0;
121 
122 /*
123  * Record the Ethernet address of peer target ports to which packets are
124  * forwarded.
125  * Must be instantiated with the ethernet addresses of peer traffic generator
126  * ports.
127  */
128 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
129 portid_t nb_peer_eth_addrs = 0;
130 
131 /*
132  * Probed Target Environment.
133  */
134 struct rte_port *ports;	       /**< For all probed ethernet ports. */
135 portid_t nb_ports;             /**< Number of probed ethernet ports. */
136 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
137 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
138 
139 /*
140  * Test Forwarding Configuration.
141  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
142  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
143  */
144 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
145 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
146 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
147 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
148 
149 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
150 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
151 
152 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
153 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
154 
155 /*
156  * Forwarding engines.
157  */
158 struct fwd_engine * fwd_engines[] = {
159 	&io_fwd_engine,
160 	&mac_fwd_engine,
161 	&mac_swap_engine,
162 	&flow_gen_engine,
163 	&rx_only_engine,
164 	&tx_only_engine,
165 	&csum_fwd_engine,
166 	&icmp_echo_engine,
167 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
168 	&softnic_tm_engine,
169 	&softnic_tm_bypass_engine,
170 #endif
171 #ifdef RTE_LIBRTE_IEEE1588
172 	&ieee1588_fwd_engine,
173 #endif
174 	NULL,
175 };
176 
177 struct fwd_config cur_fwd_config;
178 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
179 uint32_t retry_enabled;
180 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
181 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
182 
183 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
184 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
185                                       * specified on command-line. */
186 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
187 
188 /*
189  * In container, it cannot terminate the process which running with 'stats-period'
190  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
191  */
192 uint8_t f_quit;
193 
194 /*
195  * Configuration of packet segments used by the "txonly" processing engine.
196  */
197 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
198 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
199 	TXONLY_DEF_PACKET_LEN,
200 };
201 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
202 
203 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
204 /**< Split policy for packets to TX. */
205 
206 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
207 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
208 
209 /* current configuration is in DCB or not,0 means it is not in DCB mode */
210 uint8_t dcb_config = 0;
211 
212 /* Whether the dcb is in testing status */
213 uint8_t dcb_test = 0;
214 
215 /*
216  * Configurable number of RX/TX queues.
217  */
218 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
219 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
220 
221 /*
222  * Configurable number of RX/TX ring descriptors.
223  */
224 #define RTE_TEST_RX_DESC_DEFAULT 128
225 #define RTE_TEST_TX_DESC_DEFAULT 512
226 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
227 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
228 
229 #define RTE_PMD_PARAM_UNSET -1
230 /*
231  * Configurable values of RX and TX ring threshold registers.
232  */
233 
234 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
235 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
236 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
237 
238 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
239 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
240 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
241 
242 /*
243  * Configurable value of RX free threshold.
244  */
245 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
246 
247 /*
248  * Configurable value of RX drop enable.
249  */
250 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
251 
252 /*
253  * Configurable value of TX free threshold.
254  */
255 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
256 
257 /*
258  * Configurable value of TX RS bit threshold.
259  */
260 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
261 
262 /*
263  * Configurable value of TX queue flags.
264  */
265 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
266 
267 /*
268  * Receive Side Scaling (RSS) configuration.
269  */
270 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
271 
272 /*
273  * Port topology configuration
274  */
275 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
276 
277 /*
278  * Avoids to flush all the RX streams before starts forwarding.
279  */
280 uint8_t no_flush_rx = 0; /* flush by default */
281 
282 /*
283  * Flow API isolated mode.
284  */
285 uint8_t flow_isolate_all;
286 
287 /*
288  * Avoids to check link status when starting/stopping a port.
289  */
290 uint8_t no_link_check = 0; /* check by default */
291 
292 /*
293  * Enable link status change notification
294  */
295 uint8_t lsc_interrupt = 1; /* enabled by default */
296 
297 /*
298  * Enable device removal notification.
299  */
300 uint8_t rmv_interrupt = 1; /* enabled by default */
301 
302 /*
303  * Display or mask ether events
304  * Default to all events except VF_MBOX
305  */
306 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
307 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
308 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
309 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
310 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
311 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
312 
313 /*
314  * NIC bypass mode configuration options.
315  */
316 
317 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
318 /* The NIC bypass watchdog timeout. */
319 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
320 #endif
321 
322 
323 #ifdef RTE_LIBRTE_LATENCY_STATS
324 
325 /*
326  * Set when latency stats is enabled in the commandline
327  */
328 uint8_t latencystats_enabled;
329 
330 /*
331  * Lcore ID to serive latency statistics.
332  */
333 lcoreid_t latencystats_lcore_id = -1;
334 
335 #endif
336 
337 /*
338  * Ethernet device configuration.
339  */
340 struct rte_eth_rxmode rx_mode = {
341 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
342 	.split_hdr_size = 0,
343 	.header_split   = 0, /**< Header Split disabled. */
344 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
345 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
346 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
347 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
348 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
349 	.hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
350 };
351 
352 struct rte_fdir_conf fdir_conf = {
353 	.mode = RTE_FDIR_MODE_NONE,
354 	.pballoc = RTE_FDIR_PBALLOC_64K,
355 	.status = RTE_FDIR_REPORT_STATUS,
356 	.mask = {
357 		.vlan_tci_mask = 0x0,
358 		.ipv4_mask     = {
359 			.src_ip = 0xFFFFFFFF,
360 			.dst_ip = 0xFFFFFFFF,
361 		},
362 		.ipv6_mask     = {
363 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
364 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
365 		},
366 		.src_port_mask = 0xFFFF,
367 		.dst_port_mask = 0xFFFF,
368 		.mac_addr_byte_mask = 0xFF,
369 		.tunnel_type_mask = 1,
370 		.tunnel_id_mask = 0xFFFFFFFF,
371 	},
372 	.drop_queue = 127,
373 };
374 
375 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
376 
377 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
378 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
379 
380 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
381 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
382 
383 uint16_t nb_tx_queue_stats_mappings = 0;
384 uint16_t nb_rx_queue_stats_mappings = 0;
385 
386 unsigned int num_sockets = 0;
387 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
388 
389 #ifdef RTE_LIBRTE_BITRATE
390 /* Bitrate statistics */
391 struct rte_stats_bitrates *bitrate_data;
392 lcoreid_t bitrate_lcore_id;
393 uint8_t bitrate_enabled;
394 #endif
395 
396 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
397 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
398 
399 /* Forward function declarations */
400 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
401 static void check_all_ports_link_status(uint32_t port_mask);
402 static int eth_event_callback(portid_t port_id,
403 			      enum rte_eth_event_type type,
404 			      void *param, void *ret_param);
405 
406 /*
407  * Check if all the ports are started.
408  * If yes, return positive value. If not, return zero.
409  */
410 static int all_ports_started(void);
411 
412 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
413 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
414 
415 /*
416  * Helper function to check if socket is already discovered.
417  * If yes, return positive value. If not, return zero.
418  */
419 int
420 new_socket_id(unsigned int socket_id)
421 {
422 	unsigned int i;
423 
424 	for (i = 0; i < num_sockets; i++) {
425 		if (socket_ids[i] == socket_id)
426 			return 0;
427 	}
428 	return 1;
429 }
430 
431 /*
432  * Setup default configuration.
433  */
434 static void
435 set_default_fwd_lcores_config(void)
436 {
437 	unsigned int i;
438 	unsigned int nb_lc;
439 	unsigned int sock_num;
440 
441 	nb_lc = 0;
442 	for (i = 0; i < RTE_MAX_LCORE; i++) {
443 		sock_num = rte_lcore_to_socket_id(i);
444 		if (new_socket_id(sock_num)) {
445 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
446 				rte_exit(EXIT_FAILURE,
447 					 "Total sockets greater than %u\n",
448 					 RTE_MAX_NUMA_NODES);
449 			}
450 			socket_ids[num_sockets++] = sock_num;
451 		}
452 		if (!rte_lcore_is_enabled(i))
453 			continue;
454 		if (i == rte_get_master_lcore())
455 			continue;
456 		fwd_lcores_cpuids[nb_lc++] = i;
457 	}
458 	nb_lcores = (lcoreid_t) nb_lc;
459 	nb_cfg_lcores = nb_lcores;
460 	nb_fwd_lcores = 1;
461 }
462 
463 static void
464 set_def_peer_eth_addrs(void)
465 {
466 	portid_t i;
467 
468 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
469 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
470 		peer_eth_addrs[i].addr_bytes[5] = i;
471 	}
472 }
473 
474 static void
475 set_default_fwd_ports_config(void)
476 {
477 	portid_t pt_id;
478 	int i = 0;
479 
480 	RTE_ETH_FOREACH_DEV(pt_id)
481 		fwd_ports_ids[i++] = pt_id;
482 
483 	nb_cfg_ports = nb_ports;
484 	nb_fwd_ports = nb_ports;
485 }
486 
487 void
488 set_def_fwd_config(void)
489 {
490 	set_default_fwd_lcores_config();
491 	set_def_peer_eth_addrs();
492 	set_default_fwd_ports_config();
493 }
494 
495 /*
496  * Configuration initialisation done once at init time.
497  */
498 static void
499 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
500 		 unsigned int socket_id)
501 {
502 	char pool_name[RTE_MEMPOOL_NAMESIZE];
503 	struct rte_mempool *rte_mp = NULL;
504 	uint32_t mb_size;
505 
506 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
507 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
508 
509 	RTE_LOG(INFO, USER1,
510 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
511 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
512 
513 	if (mp_anon != 0) {
514 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
515 			mb_size, (unsigned) mb_mempool_cache,
516 			sizeof(struct rte_pktmbuf_pool_private),
517 			socket_id, 0);
518 		if (rte_mp == NULL)
519 			goto err;
520 
521 		if (rte_mempool_populate_anon(rte_mp) == 0) {
522 			rte_mempool_free(rte_mp);
523 			rte_mp = NULL;
524 			goto err;
525 		}
526 		rte_pktmbuf_pool_init(rte_mp, NULL);
527 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
528 	} else {
529 		/* wrapper to rte_mempool_create() */
530 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
531 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
532 	}
533 
534 err:
535 	if (rte_mp == NULL) {
536 		rte_exit(EXIT_FAILURE,
537 			"Creation of mbuf pool for socket %u failed: %s\n",
538 			socket_id, rte_strerror(rte_errno));
539 	} else if (verbose_level > 0) {
540 		rte_mempool_dump(stdout, rte_mp);
541 	}
542 }
543 
544 /*
545  * Check given socket id is valid or not with NUMA mode,
546  * if valid, return 0, else return -1
547  */
548 static int
549 check_socket_id(const unsigned int socket_id)
550 {
551 	static int warning_once = 0;
552 
553 	if (new_socket_id(socket_id)) {
554 		if (!warning_once && numa_support)
555 			printf("Warning: NUMA should be configured manually by"
556 			       " using --port-numa-config and"
557 			       " --ring-numa-config parameters along with"
558 			       " --numa.\n");
559 		warning_once = 1;
560 		return -1;
561 	}
562 	return 0;
563 }
564 
565 static void
566 init_config(void)
567 {
568 	portid_t pid;
569 	struct rte_port *port;
570 	struct rte_mempool *mbp;
571 	unsigned int nb_mbuf_per_pool;
572 	lcoreid_t  lc_id;
573 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
574 	struct rte_gro_param gro_param;
575 	uint32_t gso_types;
576 
577 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
578 
579 	if (numa_support) {
580 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
581 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
582 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
583 	}
584 
585 	/* Configuration of logical cores. */
586 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
587 				sizeof(struct fwd_lcore *) * nb_lcores,
588 				RTE_CACHE_LINE_SIZE);
589 	if (fwd_lcores == NULL) {
590 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
591 							"failed\n", nb_lcores);
592 	}
593 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
594 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
595 					       sizeof(struct fwd_lcore),
596 					       RTE_CACHE_LINE_SIZE);
597 		if (fwd_lcores[lc_id] == NULL) {
598 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
599 								"failed\n");
600 		}
601 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
602 	}
603 
604 	RTE_ETH_FOREACH_DEV(pid) {
605 		port = &ports[pid];
606 		rte_eth_dev_info_get(pid, &port->dev_info);
607 
608 		if (numa_support) {
609 			if (port_numa[pid] != NUMA_NO_CONFIG)
610 				port_per_socket[port_numa[pid]]++;
611 			else {
612 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
613 
614 				/* if socket_id is invalid, set to 0 */
615 				if (check_socket_id(socket_id) < 0)
616 					socket_id = 0;
617 				port_per_socket[socket_id]++;
618 			}
619 		}
620 
621 		/* set flag to initialize port/queue */
622 		port->need_reconfig = 1;
623 		port->need_reconfig_queues = 1;
624 	}
625 
626 	/*
627 	 * Create pools of mbuf.
628 	 * If NUMA support is disabled, create a single pool of mbuf in
629 	 * socket 0 memory by default.
630 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
631 	 *
632 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
633 	 * nb_txd can be configured at run time.
634 	 */
635 	if (param_total_num_mbufs)
636 		nb_mbuf_per_pool = param_total_num_mbufs;
637 	else {
638 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
639 			(nb_lcores * mb_mempool_cache) +
640 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
641 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
642 	}
643 
644 	if (numa_support) {
645 		uint8_t i;
646 
647 		for (i = 0; i < num_sockets; i++)
648 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
649 					 socket_ids[i]);
650 	} else {
651 		if (socket_num == UMA_NO_CONFIG)
652 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
653 		else
654 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
655 						 socket_num);
656 	}
657 
658 	init_port_config();
659 
660 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
661 		DEV_TX_OFFLOAD_GRE_TNL_TSO;
662 	/*
663 	 * Records which Mbuf pool to use by each logical core, if needed.
664 	 */
665 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
666 		mbp = mbuf_pool_find(
667 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
668 
669 		if (mbp == NULL)
670 			mbp = mbuf_pool_find(0);
671 		fwd_lcores[lc_id]->mbp = mbp;
672 		/* initialize GSO context */
673 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
674 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
675 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
676 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
677 			ETHER_CRC_LEN;
678 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
679 	}
680 
681 	/* Configuration of packet forwarding streams. */
682 	if (init_fwd_streams() < 0)
683 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
684 
685 	fwd_config_setup();
686 
687 	/* create a gro context for each lcore */
688 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
689 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
690 	gro_param.max_item_per_flow = MAX_PKT_BURST;
691 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
692 		gro_param.socket_id = rte_lcore_to_socket_id(
693 				fwd_lcores_cpuids[lc_id]);
694 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
695 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
696 			rte_exit(EXIT_FAILURE,
697 					"rte_gro_ctx_create() failed\n");
698 		}
699 	}
700 }
701 
702 
703 void
704 reconfig(portid_t new_port_id, unsigned socket_id)
705 {
706 	struct rte_port *port;
707 
708 	/* Reconfiguration of Ethernet ports. */
709 	port = &ports[new_port_id];
710 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
711 
712 	/* set flag to initialize port/queue */
713 	port->need_reconfig = 1;
714 	port->need_reconfig_queues = 1;
715 	port->socket_id = socket_id;
716 
717 	init_port_config();
718 }
719 
720 
721 int
722 init_fwd_streams(void)
723 {
724 	portid_t pid;
725 	struct rte_port *port;
726 	streamid_t sm_id, nb_fwd_streams_new;
727 	queueid_t q;
728 
729 	/* set socket id according to numa or not */
730 	RTE_ETH_FOREACH_DEV(pid) {
731 		port = &ports[pid];
732 		if (nb_rxq > port->dev_info.max_rx_queues) {
733 			printf("Fail: nb_rxq(%d) is greater than "
734 				"max_rx_queues(%d)\n", nb_rxq,
735 				port->dev_info.max_rx_queues);
736 			return -1;
737 		}
738 		if (nb_txq > port->dev_info.max_tx_queues) {
739 			printf("Fail: nb_txq(%d) is greater than "
740 				"max_tx_queues(%d)\n", nb_txq,
741 				port->dev_info.max_tx_queues);
742 			return -1;
743 		}
744 		if (numa_support) {
745 			if (port_numa[pid] != NUMA_NO_CONFIG)
746 				port->socket_id = port_numa[pid];
747 			else {
748 				port->socket_id = rte_eth_dev_socket_id(pid);
749 
750 				/* if socket_id is invalid, set to 0 */
751 				if (check_socket_id(port->socket_id) < 0)
752 					port->socket_id = 0;
753 			}
754 		}
755 		else {
756 			if (socket_num == UMA_NO_CONFIG)
757 				port->socket_id = 0;
758 			else
759 				port->socket_id = socket_num;
760 		}
761 	}
762 
763 	q = RTE_MAX(nb_rxq, nb_txq);
764 	if (q == 0) {
765 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
766 		return -1;
767 	}
768 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
769 	if (nb_fwd_streams_new == nb_fwd_streams)
770 		return 0;
771 	/* clear the old */
772 	if (fwd_streams != NULL) {
773 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
774 			if (fwd_streams[sm_id] == NULL)
775 				continue;
776 			rte_free(fwd_streams[sm_id]);
777 			fwd_streams[sm_id] = NULL;
778 		}
779 		rte_free(fwd_streams);
780 		fwd_streams = NULL;
781 	}
782 
783 	/* init new */
784 	nb_fwd_streams = nb_fwd_streams_new;
785 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
786 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
787 	if (fwd_streams == NULL)
788 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
789 						"failed\n", nb_fwd_streams);
790 
791 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
792 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
793 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
794 		if (fwd_streams[sm_id] == NULL)
795 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
796 								" failed\n");
797 	}
798 
799 	return 0;
800 }
801 
802 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
803 static void
804 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
805 {
806 	unsigned int total_burst;
807 	unsigned int nb_burst;
808 	unsigned int burst_stats[3];
809 	uint16_t pktnb_stats[3];
810 	uint16_t nb_pkt;
811 	int burst_percent[3];
812 
813 	/*
814 	 * First compute the total number of packet bursts and the
815 	 * two highest numbers of bursts of the same number of packets.
816 	 */
817 	total_burst = 0;
818 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
819 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
820 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
821 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
822 		if (nb_burst == 0)
823 			continue;
824 		total_burst += nb_burst;
825 		if (nb_burst > burst_stats[0]) {
826 			burst_stats[1] = burst_stats[0];
827 			pktnb_stats[1] = pktnb_stats[0];
828 			burst_stats[0] = nb_burst;
829 			pktnb_stats[0] = nb_pkt;
830 		}
831 	}
832 	if (total_burst == 0)
833 		return;
834 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
835 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
836 	       burst_percent[0], (int) pktnb_stats[0]);
837 	if (burst_stats[0] == total_burst) {
838 		printf("]\n");
839 		return;
840 	}
841 	if (burst_stats[0] + burst_stats[1] == total_burst) {
842 		printf(" + %d%% of %d pkts]\n",
843 		       100 - burst_percent[0], pktnb_stats[1]);
844 		return;
845 	}
846 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
847 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
848 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
849 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
850 		return;
851 	}
852 	printf(" + %d%% of %d pkts + %d%% of others]\n",
853 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
854 }
855 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
856 
857 static void
858 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
859 {
860 	struct rte_port *port;
861 	uint8_t i;
862 
863 	static const char *fwd_stats_border = "----------------------";
864 
865 	port = &ports[port_id];
866 	printf("\n  %s Forward statistics for port %-2d %s\n",
867 	       fwd_stats_border, port_id, fwd_stats_border);
868 
869 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
870 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
871 		       "%-"PRIu64"\n",
872 		       stats->ipackets, stats->imissed,
873 		       (uint64_t) (stats->ipackets + stats->imissed));
874 
875 		if (cur_fwd_eng == &csum_fwd_engine)
876 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
877 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
878 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
879 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
880 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
881 		}
882 
883 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
884 		       "%-"PRIu64"\n",
885 		       stats->opackets, port->tx_dropped,
886 		       (uint64_t) (stats->opackets + port->tx_dropped));
887 	}
888 	else {
889 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
890 		       "%14"PRIu64"\n",
891 		       stats->ipackets, stats->imissed,
892 		       (uint64_t) (stats->ipackets + stats->imissed));
893 
894 		if (cur_fwd_eng == &csum_fwd_engine)
895 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
896 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
897 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
898 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
899 			printf("  RX-nombufs:             %14"PRIu64"\n",
900 			       stats->rx_nombuf);
901 		}
902 
903 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
904 		       "%14"PRIu64"\n",
905 		       stats->opackets, port->tx_dropped,
906 		       (uint64_t) (stats->opackets + port->tx_dropped));
907 	}
908 
909 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
910 	if (port->rx_stream)
911 		pkt_burst_stats_display("RX",
912 			&port->rx_stream->rx_burst_stats);
913 	if (port->tx_stream)
914 		pkt_burst_stats_display("TX",
915 			&port->tx_stream->tx_burst_stats);
916 #endif
917 
918 	if (port->rx_queue_stats_mapping_enabled) {
919 		printf("\n");
920 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
921 			printf("  Stats reg %2d RX-packets:%14"PRIu64
922 			       "     RX-errors:%14"PRIu64
923 			       "    RX-bytes:%14"PRIu64"\n",
924 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
925 		}
926 		printf("\n");
927 	}
928 	if (port->tx_queue_stats_mapping_enabled) {
929 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
930 			printf("  Stats reg %2d TX-packets:%14"PRIu64
931 			       "                                 TX-bytes:%14"PRIu64"\n",
932 			       i, stats->q_opackets[i], stats->q_obytes[i]);
933 		}
934 	}
935 
936 	printf("  %s--------------------------------%s\n",
937 	       fwd_stats_border, fwd_stats_border);
938 }
939 
940 static void
941 fwd_stream_stats_display(streamid_t stream_id)
942 {
943 	struct fwd_stream *fs;
944 	static const char *fwd_top_stats_border = "-------";
945 
946 	fs = fwd_streams[stream_id];
947 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
948 	    (fs->fwd_dropped == 0))
949 		return;
950 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
951 	       "TX Port=%2d/Queue=%2d %s\n",
952 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
953 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
954 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
955 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
956 
957 	/* if checksum mode */
958 	if (cur_fwd_eng == &csum_fwd_engine) {
959 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
960 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
961 	}
962 
963 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
964 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
965 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
966 #endif
967 }
968 
969 static void
970 flush_fwd_rx_queues(void)
971 {
972 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
973 	portid_t  rxp;
974 	portid_t port_id;
975 	queueid_t rxq;
976 	uint16_t  nb_rx;
977 	uint16_t  i;
978 	uint8_t   j;
979 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
980 	uint64_t timer_period;
981 
982 	/* convert to number of cycles */
983 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
984 
985 	for (j = 0; j < 2; j++) {
986 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
987 			for (rxq = 0; rxq < nb_rxq; rxq++) {
988 				port_id = fwd_ports_ids[rxp];
989 				/**
990 				* testpmd can stuck in the below do while loop
991 				* if rte_eth_rx_burst() always returns nonzero
992 				* packets. So timer is added to exit this loop
993 				* after 1sec timer expiry.
994 				*/
995 				prev_tsc = rte_rdtsc();
996 				do {
997 					nb_rx = rte_eth_rx_burst(port_id, rxq,
998 						pkts_burst, MAX_PKT_BURST);
999 					for (i = 0; i < nb_rx; i++)
1000 						rte_pktmbuf_free(pkts_burst[i]);
1001 
1002 					cur_tsc = rte_rdtsc();
1003 					diff_tsc = cur_tsc - prev_tsc;
1004 					timer_tsc += diff_tsc;
1005 				} while ((nb_rx > 0) &&
1006 					(timer_tsc < timer_period));
1007 				timer_tsc = 0;
1008 			}
1009 		}
1010 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1011 	}
1012 }
1013 
1014 static void
1015 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1016 {
1017 	struct fwd_stream **fsm;
1018 	streamid_t nb_fs;
1019 	streamid_t sm_id;
1020 #ifdef RTE_LIBRTE_BITRATE
1021 	uint64_t tics_per_1sec;
1022 	uint64_t tics_datum;
1023 	uint64_t tics_current;
1024 	uint8_t idx_port, cnt_ports;
1025 
1026 	cnt_ports = rte_eth_dev_count();
1027 	tics_datum = rte_rdtsc();
1028 	tics_per_1sec = rte_get_timer_hz();
1029 #endif
1030 	fsm = &fwd_streams[fc->stream_idx];
1031 	nb_fs = fc->stream_nb;
1032 	do {
1033 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1034 			(*pkt_fwd)(fsm[sm_id]);
1035 #ifdef RTE_LIBRTE_BITRATE
1036 		if (bitrate_enabled != 0 &&
1037 				bitrate_lcore_id == rte_lcore_id()) {
1038 			tics_current = rte_rdtsc();
1039 			if (tics_current - tics_datum >= tics_per_1sec) {
1040 				/* Periodic bitrate calculation */
1041 				for (idx_port = 0;
1042 						idx_port < cnt_ports;
1043 						idx_port++)
1044 					rte_stats_bitrate_calc(bitrate_data,
1045 						idx_port);
1046 				tics_datum = tics_current;
1047 			}
1048 		}
1049 #endif
1050 #ifdef RTE_LIBRTE_LATENCY_STATS
1051 		if (latencystats_enabled != 0 &&
1052 				latencystats_lcore_id == rte_lcore_id())
1053 			rte_latencystats_update();
1054 #endif
1055 
1056 	} while (! fc->stopped);
1057 }
1058 
1059 static int
1060 start_pkt_forward_on_core(void *fwd_arg)
1061 {
1062 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1063 			     cur_fwd_config.fwd_eng->packet_fwd);
1064 	return 0;
1065 }
1066 
1067 /*
1068  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1069  * Used to start communication flows in network loopback test configurations.
1070  */
1071 static int
1072 run_one_txonly_burst_on_core(void *fwd_arg)
1073 {
1074 	struct fwd_lcore *fwd_lc;
1075 	struct fwd_lcore tmp_lcore;
1076 
1077 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1078 	tmp_lcore = *fwd_lc;
1079 	tmp_lcore.stopped = 1;
1080 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1081 	return 0;
1082 }
1083 
1084 /*
1085  * Launch packet forwarding:
1086  *     - Setup per-port forwarding context.
1087  *     - launch logical cores with their forwarding configuration.
1088  */
1089 static void
1090 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1091 {
1092 	port_fwd_begin_t port_fwd_begin;
1093 	unsigned int i;
1094 	unsigned int lc_id;
1095 	int diag;
1096 
1097 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1098 	if (port_fwd_begin != NULL) {
1099 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1100 			(*port_fwd_begin)(fwd_ports_ids[i]);
1101 	}
1102 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1103 		lc_id = fwd_lcores_cpuids[i];
1104 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1105 			fwd_lcores[i]->stopped = 0;
1106 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1107 						     fwd_lcores[i], lc_id);
1108 			if (diag != 0)
1109 				printf("launch lcore %u failed - diag=%d\n",
1110 				       lc_id, diag);
1111 		}
1112 	}
1113 }
1114 
1115 /*
1116  * Launch packet forwarding configuration.
1117  */
1118 void
1119 start_packet_forwarding(int with_tx_first)
1120 {
1121 	port_fwd_begin_t port_fwd_begin;
1122 	port_fwd_end_t  port_fwd_end;
1123 	struct rte_port *port;
1124 	unsigned int i;
1125 	portid_t   pt_id;
1126 	streamid_t sm_id;
1127 
1128 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1129 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1130 
1131 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1132 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1133 
1134 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1135 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1136 		(!nb_rxq || !nb_txq))
1137 		rte_exit(EXIT_FAILURE,
1138 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1139 			cur_fwd_eng->fwd_mode_name);
1140 
1141 	if (all_ports_started() == 0) {
1142 		printf("Not all ports were started\n");
1143 		return;
1144 	}
1145 	if (test_done == 0) {
1146 		printf("Packet forwarding already started\n");
1147 		return;
1148 	}
1149 
1150 	if (init_fwd_streams() < 0) {
1151 		printf("Fail from init_fwd_streams()\n");
1152 		return;
1153 	}
1154 
1155 	if(dcb_test) {
1156 		for (i = 0; i < nb_fwd_ports; i++) {
1157 			pt_id = fwd_ports_ids[i];
1158 			port = &ports[pt_id];
1159 			if (!port->dcb_flag) {
1160 				printf("In DCB mode, all forwarding ports must "
1161                                        "be configured in this mode.\n");
1162 				return;
1163 			}
1164 		}
1165 		if (nb_fwd_lcores == 1) {
1166 			printf("In DCB mode,the nb forwarding cores "
1167                                "should be larger than 1.\n");
1168 			return;
1169 		}
1170 	}
1171 	test_done = 0;
1172 
1173 	if(!no_flush_rx)
1174 		flush_fwd_rx_queues();
1175 
1176 	fwd_config_setup();
1177 	pkt_fwd_config_display(&cur_fwd_config);
1178 	rxtx_config_display();
1179 
1180 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1181 		pt_id = fwd_ports_ids[i];
1182 		port = &ports[pt_id];
1183 		rte_eth_stats_get(pt_id, &port->stats);
1184 		port->tx_dropped = 0;
1185 
1186 		map_port_queue_stats_mapping_registers(pt_id, port);
1187 	}
1188 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1189 		fwd_streams[sm_id]->rx_packets = 0;
1190 		fwd_streams[sm_id]->tx_packets = 0;
1191 		fwd_streams[sm_id]->fwd_dropped = 0;
1192 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1193 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1194 
1195 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1196 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1197 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1198 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1199 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1200 #endif
1201 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1202 		fwd_streams[sm_id]->core_cycles = 0;
1203 #endif
1204 	}
1205 	if (with_tx_first) {
1206 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1207 		if (port_fwd_begin != NULL) {
1208 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1209 				(*port_fwd_begin)(fwd_ports_ids[i]);
1210 		}
1211 		while (with_tx_first--) {
1212 			launch_packet_forwarding(
1213 					run_one_txonly_burst_on_core);
1214 			rte_eal_mp_wait_lcore();
1215 		}
1216 		port_fwd_end = tx_only_engine.port_fwd_end;
1217 		if (port_fwd_end != NULL) {
1218 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1219 				(*port_fwd_end)(fwd_ports_ids[i]);
1220 		}
1221 	}
1222 	launch_packet_forwarding(start_pkt_forward_on_core);
1223 }
1224 
1225 void
1226 stop_packet_forwarding(void)
1227 {
1228 	struct rte_eth_stats stats;
1229 	struct rte_port *port;
1230 	port_fwd_end_t  port_fwd_end;
1231 	int i;
1232 	portid_t   pt_id;
1233 	streamid_t sm_id;
1234 	lcoreid_t  lc_id;
1235 	uint64_t total_recv;
1236 	uint64_t total_xmit;
1237 	uint64_t total_rx_dropped;
1238 	uint64_t total_tx_dropped;
1239 	uint64_t total_rx_nombuf;
1240 	uint64_t tx_dropped;
1241 	uint64_t rx_bad_ip_csum;
1242 	uint64_t rx_bad_l4_csum;
1243 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1244 	uint64_t fwd_cycles;
1245 #endif
1246 
1247 	static const char *acc_stats_border = "+++++++++++++++";
1248 
1249 	if (test_done) {
1250 		printf("Packet forwarding not started\n");
1251 		return;
1252 	}
1253 	printf("Telling cores to stop...");
1254 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1255 		fwd_lcores[lc_id]->stopped = 1;
1256 	printf("\nWaiting for lcores to finish...\n");
1257 	rte_eal_mp_wait_lcore();
1258 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1259 	if (port_fwd_end != NULL) {
1260 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1261 			pt_id = fwd_ports_ids[i];
1262 			(*port_fwd_end)(pt_id);
1263 		}
1264 	}
1265 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1266 	fwd_cycles = 0;
1267 #endif
1268 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1269 		if (cur_fwd_config.nb_fwd_streams >
1270 		    cur_fwd_config.nb_fwd_ports) {
1271 			fwd_stream_stats_display(sm_id);
1272 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1273 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1274 		} else {
1275 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1276 				fwd_streams[sm_id];
1277 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1278 				fwd_streams[sm_id];
1279 		}
1280 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1281 		tx_dropped = (uint64_t) (tx_dropped +
1282 					 fwd_streams[sm_id]->fwd_dropped);
1283 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1284 
1285 		rx_bad_ip_csum =
1286 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1287 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1288 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1289 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1290 							rx_bad_ip_csum;
1291 
1292 		rx_bad_l4_csum =
1293 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1294 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1295 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1296 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1297 							rx_bad_l4_csum;
1298 
1299 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1300 		fwd_cycles = (uint64_t) (fwd_cycles +
1301 					 fwd_streams[sm_id]->core_cycles);
1302 #endif
1303 	}
1304 	total_recv = 0;
1305 	total_xmit = 0;
1306 	total_rx_dropped = 0;
1307 	total_tx_dropped = 0;
1308 	total_rx_nombuf  = 0;
1309 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1310 		pt_id = fwd_ports_ids[i];
1311 
1312 		port = &ports[pt_id];
1313 		rte_eth_stats_get(pt_id, &stats);
1314 		stats.ipackets -= port->stats.ipackets;
1315 		port->stats.ipackets = 0;
1316 		stats.opackets -= port->stats.opackets;
1317 		port->stats.opackets = 0;
1318 		stats.ibytes   -= port->stats.ibytes;
1319 		port->stats.ibytes = 0;
1320 		stats.obytes   -= port->stats.obytes;
1321 		port->stats.obytes = 0;
1322 		stats.imissed  -= port->stats.imissed;
1323 		port->stats.imissed = 0;
1324 		stats.oerrors  -= port->stats.oerrors;
1325 		port->stats.oerrors = 0;
1326 		stats.rx_nombuf -= port->stats.rx_nombuf;
1327 		port->stats.rx_nombuf = 0;
1328 
1329 		total_recv += stats.ipackets;
1330 		total_xmit += stats.opackets;
1331 		total_rx_dropped += stats.imissed;
1332 		total_tx_dropped += port->tx_dropped;
1333 		total_rx_nombuf  += stats.rx_nombuf;
1334 
1335 		fwd_port_stats_display(pt_id, &stats);
1336 	}
1337 
1338 	printf("\n  %s Accumulated forward statistics for all ports"
1339 	       "%s\n",
1340 	       acc_stats_border, acc_stats_border);
1341 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1342 	       "%-"PRIu64"\n"
1343 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1344 	       "%-"PRIu64"\n",
1345 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1346 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1347 	if (total_rx_nombuf > 0)
1348 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1349 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1350 	       "%s\n",
1351 	       acc_stats_border, acc_stats_border);
1352 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1353 	if (total_recv > 0)
1354 		printf("\n  CPU cycles/packet=%u (total cycles="
1355 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1356 		       (unsigned int)(fwd_cycles / total_recv),
1357 		       fwd_cycles, total_recv);
1358 #endif
1359 	printf("\nDone.\n");
1360 	test_done = 1;
1361 }
1362 
1363 void
1364 dev_set_link_up(portid_t pid)
1365 {
1366 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1367 		printf("\nSet link up fail.\n");
1368 }
1369 
1370 void
1371 dev_set_link_down(portid_t pid)
1372 {
1373 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1374 		printf("\nSet link down fail.\n");
1375 }
1376 
1377 static int
1378 all_ports_started(void)
1379 {
1380 	portid_t pi;
1381 	struct rte_port *port;
1382 
1383 	RTE_ETH_FOREACH_DEV(pi) {
1384 		port = &ports[pi];
1385 		/* Check if there is a port which is not started */
1386 		if ((port->port_status != RTE_PORT_STARTED) &&
1387 			(port->slave_flag == 0))
1388 			return 0;
1389 	}
1390 
1391 	/* No port is not started */
1392 	return 1;
1393 }
1394 
1395 int
1396 all_ports_stopped(void)
1397 {
1398 	portid_t pi;
1399 	struct rte_port *port;
1400 
1401 	RTE_ETH_FOREACH_DEV(pi) {
1402 		port = &ports[pi];
1403 		if ((port->port_status != RTE_PORT_STOPPED) &&
1404 			(port->slave_flag == 0))
1405 			return 0;
1406 	}
1407 
1408 	return 1;
1409 }
1410 
1411 int
1412 port_is_started(portid_t port_id)
1413 {
1414 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1415 		return 0;
1416 
1417 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1418 		return 0;
1419 
1420 	return 1;
1421 }
1422 
1423 static int
1424 port_is_closed(portid_t port_id)
1425 {
1426 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1427 		return 0;
1428 
1429 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1430 		return 0;
1431 
1432 	return 1;
1433 }
1434 
1435 int
1436 start_port(portid_t pid)
1437 {
1438 	int diag, need_check_link_status = -1;
1439 	portid_t pi;
1440 	queueid_t qi;
1441 	struct rte_port *port;
1442 	struct ether_addr mac_addr;
1443 	enum rte_eth_event_type event_type;
1444 
1445 	if (port_id_is_invalid(pid, ENABLED_WARN))
1446 		return 0;
1447 
1448 	if(dcb_config)
1449 		dcb_test = 1;
1450 	RTE_ETH_FOREACH_DEV(pi) {
1451 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1452 			continue;
1453 
1454 		need_check_link_status = 0;
1455 		port = &ports[pi];
1456 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1457 						 RTE_PORT_HANDLING) == 0) {
1458 			printf("Port %d is now not stopped\n", pi);
1459 			continue;
1460 		}
1461 
1462 		if (port->need_reconfig > 0) {
1463 			port->need_reconfig = 0;
1464 
1465 			if (flow_isolate_all) {
1466 				int ret = port_flow_isolate(pi, 1);
1467 				if (ret) {
1468 					printf("Failed to apply isolated"
1469 					       " mode on port %d\n", pi);
1470 					return -1;
1471 				}
1472 			}
1473 
1474 			printf("Configuring Port %d (socket %u)\n", pi,
1475 					port->socket_id);
1476 			/* configure port */
1477 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1478 						&(port->dev_conf));
1479 			if (diag != 0) {
1480 				if (rte_atomic16_cmpset(&(port->port_status),
1481 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1482 					printf("Port %d can not be set back "
1483 							"to stopped\n", pi);
1484 				printf("Fail to configure port %d\n", pi);
1485 				/* try to reconfigure port next time */
1486 				port->need_reconfig = 1;
1487 				return -1;
1488 			}
1489 		}
1490 		if (port->need_reconfig_queues > 0) {
1491 			port->need_reconfig_queues = 0;
1492 			/* setup tx queues */
1493 			for (qi = 0; qi < nb_txq; qi++) {
1494 				if ((numa_support) &&
1495 					(txring_numa[pi] != NUMA_NO_CONFIG))
1496 					diag = rte_eth_tx_queue_setup(pi, qi,
1497 						nb_txd,txring_numa[pi],
1498 						&(port->tx_conf));
1499 				else
1500 					diag = rte_eth_tx_queue_setup(pi, qi,
1501 						nb_txd,port->socket_id,
1502 						&(port->tx_conf));
1503 
1504 				if (diag == 0)
1505 					continue;
1506 
1507 				/* Fail to setup tx queue, return */
1508 				if (rte_atomic16_cmpset(&(port->port_status),
1509 							RTE_PORT_HANDLING,
1510 							RTE_PORT_STOPPED) == 0)
1511 					printf("Port %d can not be set back "
1512 							"to stopped\n", pi);
1513 				printf("Fail to configure port %d tx queues\n", pi);
1514 				/* try to reconfigure queues next time */
1515 				port->need_reconfig_queues = 1;
1516 				return -1;
1517 			}
1518 			/* setup rx queues */
1519 			for (qi = 0; qi < nb_rxq; qi++) {
1520 				if ((numa_support) &&
1521 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1522 					struct rte_mempool * mp =
1523 						mbuf_pool_find(rxring_numa[pi]);
1524 					if (mp == NULL) {
1525 						printf("Failed to setup RX queue:"
1526 							"No mempool allocation"
1527 							" on the socket %d\n",
1528 							rxring_numa[pi]);
1529 						return -1;
1530 					}
1531 
1532 					diag = rte_eth_rx_queue_setup(pi, qi,
1533 					     nb_rxd,rxring_numa[pi],
1534 					     &(port->rx_conf),mp);
1535 				} else {
1536 					struct rte_mempool *mp =
1537 						mbuf_pool_find(port->socket_id);
1538 					if (mp == NULL) {
1539 						printf("Failed to setup RX queue:"
1540 							"No mempool allocation"
1541 							" on the socket %d\n",
1542 							port->socket_id);
1543 						return -1;
1544 					}
1545 					diag = rte_eth_rx_queue_setup(pi, qi,
1546 					     nb_rxd,port->socket_id,
1547 					     &(port->rx_conf), mp);
1548 				}
1549 				if (diag == 0)
1550 					continue;
1551 
1552 				/* Fail to setup rx queue, return */
1553 				if (rte_atomic16_cmpset(&(port->port_status),
1554 							RTE_PORT_HANDLING,
1555 							RTE_PORT_STOPPED) == 0)
1556 					printf("Port %d can not be set back "
1557 							"to stopped\n", pi);
1558 				printf("Fail to configure port %d rx queues\n", pi);
1559 				/* try to reconfigure queues next time */
1560 				port->need_reconfig_queues = 1;
1561 				return -1;
1562 			}
1563 		}
1564 
1565 		for (event_type = RTE_ETH_EVENT_UNKNOWN;
1566 		     event_type < RTE_ETH_EVENT_MAX;
1567 		     event_type++) {
1568 			diag = rte_eth_dev_callback_register(pi,
1569 							event_type,
1570 							eth_event_callback,
1571 							NULL);
1572 			if (diag) {
1573 				printf("Failed to setup even callback for event %d\n",
1574 					event_type);
1575 				return -1;
1576 			}
1577 		}
1578 
1579 		/* start port */
1580 		if (rte_eth_dev_start(pi) < 0) {
1581 			printf("Fail to start port %d\n", pi);
1582 
1583 			/* Fail to setup rx queue, return */
1584 			if (rte_atomic16_cmpset(&(port->port_status),
1585 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1586 				printf("Port %d can not be set back to "
1587 							"stopped\n", pi);
1588 			continue;
1589 		}
1590 
1591 		if (rte_atomic16_cmpset(&(port->port_status),
1592 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1593 			printf("Port %d can not be set into started\n", pi);
1594 
1595 		rte_eth_macaddr_get(pi, &mac_addr);
1596 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1597 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1598 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1599 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1600 
1601 		/* at least one port started, need checking link status */
1602 		need_check_link_status = 1;
1603 	}
1604 
1605 	if (need_check_link_status == 1 && !no_link_check)
1606 		check_all_ports_link_status(RTE_PORT_ALL);
1607 	else if (need_check_link_status == 0)
1608 		printf("Please stop the ports first\n");
1609 
1610 	printf("Done\n");
1611 	return 0;
1612 }
1613 
1614 void
1615 stop_port(portid_t pid)
1616 {
1617 	portid_t pi;
1618 	struct rte_port *port;
1619 	int need_check_link_status = 0;
1620 
1621 	if (dcb_test) {
1622 		dcb_test = 0;
1623 		dcb_config = 0;
1624 	}
1625 
1626 	if (port_id_is_invalid(pid, ENABLED_WARN))
1627 		return;
1628 
1629 	printf("Stopping ports...\n");
1630 
1631 	RTE_ETH_FOREACH_DEV(pi) {
1632 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1633 			continue;
1634 
1635 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1636 			printf("Please remove port %d from forwarding configuration.\n", pi);
1637 			continue;
1638 		}
1639 
1640 		if (port_is_bonding_slave(pi)) {
1641 			printf("Please remove port %d from bonded device.\n", pi);
1642 			continue;
1643 		}
1644 
1645 		port = &ports[pi];
1646 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1647 						RTE_PORT_HANDLING) == 0)
1648 			continue;
1649 
1650 		rte_eth_dev_stop(pi);
1651 
1652 		if (rte_atomic16_cmpset(&(port->port_status),
1653 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1654 			printf("Port %d can not be set into stopped\n", pi);
1655 		need_check_link_status = 1;
1656 	}
1657 	if (need_check_link_status && !no_link_check)
1658 		check_all_ports_link_status(RTE_PORT_ALL);
1659 
1660 	printf("Done\n");
1661 }
1662 
1663 void
1664 close_port(portid_t pid)
1665 {
1666 	portid_t pi;
1667 	struct rte_port *port;
1668 
1669 	if (port_id_is_invalid(pid, ENABLED_WARN))
1670 		return;
1671 
1672 	printf("Closing ports...\n");
1673 
1674 	RTE_ETH_FOREACH_DEV(pi) {
1675 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1676 			continue;
1677 
1678 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1679 			printf("Please remove port %d from forwarding configuration.\n", pi);
1680 			continue;
1681 		}
1682 
1683 		if (port_is_bonding_slave(pi)) {
1684 			printf("Please remove port %d from bonded device.\n", pi);
1685 			continue;
1686 		}
1687 
1688 		port = &ports[pi];
1689 		if (rte_atomic16_cmpset(&(port->port_status),
1690 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1691 			printf("Port %d is already closed\n", pi);
1692 			continue;
1693 		}
1694 
1695 		if (rte_atomic16_cmpset(&(port->port_status),
1696 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1697 			printf("Port %d is now not stopped\n", pi);
1698 			continue;
1699 		}
1700 
1701 		if (port->flow_list)
1702 			port_flow_flush(pi);
1703 		rte_eth_dev_close(pi);
1704 
1705 		if (rte_atomic16_cmpset(&(port->port_status),
1706 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1707 			printf("Port %d cannot be set to closed\n", pi);
1708 	}
1709 
1710 	printf("Done\n");
1711 }
1712 
1713 void
1714 reset_port(portid_t pid)
1715 {
1716 	int diag;
1717 	portid_t pi;
1718 	struct rte_port *port;
1719 
1720 	if (port_id_is_invalid(pid, ENABLED_WARN))
1721 		return;
1722 
1723 	printf("Resetting ports...\n");
1724 
1725 	RTE_ETH_FOREACH_DEV(pi) {
1726 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1727 			continue;
1728 
1729 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1730 			printf("Please remove port %d from forwarding "
1731 			       "configuration.\n", pi);
1732 			continue;
1733 		}
1734 
1735 		if (port_is_bonding_slave(pi)) {
1736 			printf("Please remove port %d from bonded device.\n",
1737 			       pi);
1738 			continue;
1739 		}
1740 
1741 		diag = rte_eth_dev_reset(pi);
1742 		if (diag == 0) {
1743 			port = &ports[pi];
1744 			port->need_reconfig = 1;
1745 			port->need_reconfig_queues = 1;
1746 		} else {
1747 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1748 		}
1749 	}
1750 
1751 	printf("Done\n");
1752 }
1753 
1754 void
1755 attach_port(char *identifier)
1756 {
1757 	portid_t pi = 0;
1758 	unsigned int socket_id;
1759 
1760 	printf("Attaching a new port...\n");
1761 
1762 	if (identifier == NULL) {
1763 		printf("Invalid parameters are specified\n");
1764 		return;
1765 	}
1766 
1767 	if (rte_eth_dev_attach(identifier, &pi))
1768 		return;
1769 
1770 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1771 	/* if socket_id is invalid, set to 0 */
1772 	if (check_socket_id(socket_id) < 0)
1773 		socket_id = 0;
1774 	reconfig(pi, socket_id);
1775 	rte_eth_promiscuous_enable(pi);
1776 
1777 	nb_ports = rte_eth_dev_count();
1778 
1779 	ports[pi].port_status = RTE_PORT_STOPPED;
1780 
1781 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1782 	printf("Done\n");
1783 }
1784 
1785 void
1786 detach_port(uint8_t port_id)
1787 {
1788 	char name[RTE_ETH_NAME_MAX_LEN];
1789 
1790 	printf("Detaching a port...\n");
1791 
1792 	if (!port_is_closed(port_id)) {
1793 		printf("Please close port first\n");
1794 		return;
1795 	}
1796 
1797 	if (ports[port_id].flow_list)
1798 		port_flow_flush(port_id);
1799 
1800 	if (rte_eth_dev_detach(port_id, name)) {
1801 		RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
1802 		return;
1803 	}
1804 
1805 	nb_ports = rte_eth_dev_count();
1806 
1807 	printf("Port '%s' is detached. Now total ports is %d\n",
1808 			name, nb_ports);
1809 	printf("Done\n");
1810 	return;
1811 }
1812 
1813 void
1814 pmd_test_exit(void)
1815 {
1816 	portid_t pt_id;
1817 
1818 	if (test_done == 0)
1819 		stop_packet_forwarding();
1820 
1821 	if (ports != NULL) {
1822 		no_link_check = 1;
1823 		RTE_ETH_FOREACH_DEV(pt_id) {
1824 			printf("\nShutting down port %d...\n", pt_id);
1825 			fflush(stdout);
1826 			stop_port(pt_id);
1827 			close_port(pt_id);
1828 		}
1829 	}
1830 	printf("\nBye...\n");
1831 }
1832 
1833 typedef void (*cmd_func_t)(void);
1834 struct pmd_test_command {
1835 	const char *cmd_name;
1836 	cmd_func_t cmd_func;
1837 };
1838 
1839 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1840 
1841 /* Check the link status of all ports in up to 9s, and print them finally */
1842 static void
1843 check_all_ports_link_status(uint32_t port_mask)
1844 {
1845 #define CHECK_INTERVAL 100 /* 100ms */
1846 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1847 	portid_t portid;
1848 	uint8_t count, all_ports_up, print_flag = 0;
1849 	struct rte_eth_link link;
1850 
1851 	printf("Checking link statuses...\n");
1852 	fflush(stdout);
1853 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1854 		all_ports_up = 1;
1855 		RTE_ETH_FOREACH_DEV(portid) {
1856 			if ((port_mask & (1 << portid)) == 0)
1857 				continue;
1858 			memset(&link, 0, sizeof(link));
1859 			rte_eth_link_get_nowait(portid, &link);
1860 			/* print link status if flag set */
1861 			if (print_flag == 1) {
1862 				if (link.link_status)
1863 					printf(
1864 					"Port%d Link Up. speed %u Mbps- %s\n",
1865 					portid, link.link_speed,
1866 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1867 					("full-duplex") : ("half-duplex\n"));
1868 				else
1869 					printf("Port %d Link Down\n", portid);
1870 				continue;
1871 			}
1872 			/* clear all_ports_up flag if any link down */
1873 			if (link.link_status == ETH_LINK_DOWN) {
1874 				all_ports_up = 0;
1875 				break;
1876 			}
1877 		}
1878 		/* after finally printing all link status, get out */
1879 		if (print_flag == 1)
1880 			break;
1881 
1882 		if (all_ports_up == 0) {
1883 			fflush(stdout);
1884 			rte_delay_ms(CHECK_INTERVAL);
1885 		}
1886 
1887 		/* set the print_flag if all ports up or timeout */
1888 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1889 			print_flag = 1;
1890 		}
1891 
1892 		if (lsc_interrupt)
1893 			break;
1894 	}
1895 }
1896 
1897 static void
1898 rmv_event_callback(void *arg)
1899 {
1900 	struct rte_eth_dev *dev;
1901 	uint8_t port_id = (intptr_t)arg;
1902 
1903 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1904 	dev = &rte_eth_devices[port_id];
1905 
1906 	stop_port(port_id);
1907 	close_port(port_id);
1908 	printf("removing device %s\n", dev->device->name);
1909 	if (rte_eal_dev_detach(dev->device))
1910 		RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
1911 			dev->device->name);
1912 }
1913 
1914 /* This function is used by the interrupt thread */
1915 static int
1916 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1917 		  void *ret_param)
1918 {
1919 	static const char * const event_desc[] = {
1920 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1921 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
1922 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1923 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1924 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1925 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
1926 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
1927 		[RTE_ETH_EVENT_MAX] = NULL,
1928 	};
1929 
1930 	RTE_SET_USED(param);
1931 	RTE_SET_USED(ret_param);
1932 
1933 	if (type >= RTE_ETH_EVENT_MAX) {
1934 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1935 			port_id, __func__, type);
1936 		fflush(stderr);
1937 	} else if (event_print_mask & (UINT32_C(1) << type)) {
1938 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
1939 			event_desc[type]);
1940 		fflush(stdout);
1941 	}
1942 
1943 	switch (type) {
1944 	case RTE_ETH_EVENT_INTR_RMV:
1945 		if (rte_eal_alarm_set(100000,
1946 				rmv_event_callback, (void *)(intptr_t)port_id))
1947 			fprintf(stderr, "Could not set up deferred device removal\n");
1948 		break;
1949 	default:
1950 		break;
1951 	}
1952 	return 0;
1953 }
1954 
1955 static int
1956 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1957 {
1958 	uint16_t i;
1959 	int diag;
1960 	uint8_t mapping_found = 0;
1961 
1962 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1963 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1964 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1965 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1966 					tx_queue_stats_mappings[i].queue_id,
1967 					tx_queue_stats_mappings[i].stats_counter_id);
1968 			if (diag != 0)
1969 				return diag;
1970 			mapping_found = 1;
1971 		}
1972 	}
1973 	if (mapping_found)
1974 		port->tx_queue_stats_mapping_enabled = 1;
1975 	return 0;
1976 }
1977 
1978 static int
1979 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1980 {
1981 	uint16_t i;
1982 	int diag;
1983 	uint8_t mapping_found = 0;
1984 
1985 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1986 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1987 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1988 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1989 					rx_queue_stats_mappings[i].queue_id,
1990 					rx_queue_stats_mappings[i].stats_counter_id);
1991 			if (diag != 0)
1992 				return diag;
1993 			mapping_found = 1;
1994 		}
1995 	}
1996 	if (mapping_found)
1997 		port->rx_queue_stats_mapping_enabled = 1;
1998 	return 0;
1999 }
2000 
2001 static void
2002 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
2003 {
2004 	int diag = 0;
2005 
2006 	diag = set_tx_queue_stats_mapping_registers(pi, port);
2007 	if (diag != 0) {
2008 		if (diag == -ENOTSUP) {
2009 			port->tx_queue_stats_mapping_enabled = 0;
2010 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2011 		}
2012 		else
2013 			rte_exit(EXIT_FAILURE,
2014 					"set_tx_queue_stats_mapping_registers "
2015 					"failed for port id=%d diag=%d\n",
2016 					pi, diag);
2017 	}
2018 
2019 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2020 	if (diag != 0) {
2021 		if (diag == -ENOTSUP) {
2022 			port->rx_queue_stats_mapping_enabled = 0;
2023 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2024 		}
2025 		else
2026 			rte_exit(EXIT_FAILURE,
2027 					"set_rx_queue_stats_mapping_registers "
2028 					"failed for port id=%d diag=%d\n",
2029 					pi, diag);
2030 	}
2031 }
2032 
2033 static void
2034 rxtx_port_config(struct rte_port *port)
2035 {
2036 	port->rx_conf = port->dev_info.default_rxconf;
2037 	port->tx_conf = port->dev_info.default_txconf;
2038 
2039 	/* Check if any RX/TX parameters have been passed */
2040 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2041 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2042 
2043 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2044 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2045 
2046 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2047 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2048 
2049 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2050 		port->rx_conf.rx_free_thresh = rx_free_thresh;
2051 
2052 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2053 		port->rx_conf.rx_drop_en = rx_drop_en;
2054 
2055 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2056 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2057 
2058 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2059 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2060 
2061 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2062 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2063 
2064 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2065 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2066 
2067 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2068 		port->tx_conf.tx_free_thresh = tx_free_thresh;
2069 
2070 	if (txq_flags != RTE_PMD_PARAM_UNSET)
2071 		port->tx_conf.txq_flags = txq_flags;
2072 }
2073 
2074 void
2075 init_port_config(void)
2076 {
2077 	portid_t pid;
2078 	struct rte_port *port;
2079 
2080 	RTE_ETH_FOREACH_DEV(pid) {
2081 		port = &ports[pid];
2082 		port->dev_conf.rxmode = rx_mode;
2083 		port->dev_conf.fdir_conf = fdir_conf;
2084 		if (nb_rxq > 1) {
2085 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2086 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2087 		} else {
2088 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2089 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2090 		}
2091 
2092 		if (port->dcb_flag == 0) {
2093 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2094 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2095 			else
2096 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2097 		}
2098 
2099 		rxtx_port_config(port);
2100 
2101 		rte_eth_macaddr_get(pid, &port->eth_addr);
2102 
2103 		map_port_queue_stats_mapping_registers(pid, port);
2104 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2105 		rte_pmd_ixgbe_bypass_init(pid);
2106 #endif
2107 
2108 		if (lsc_interrupt &&
2109 		    (rte_eth_devices[pid].data->dev_flags &
2110 		     RTE_ETH_DEV_INTR_LSC))
2111 			port->dev_conf.intr_conf.lsc = 1;
2112 		if (rmv_interrupt &&
2113 		    (rte_eth_devices[pid].data->dev_flags &
2114 		     RTE_ETH_DEV_INTR_RMV))
2115 			port->dev_conf.intr_conf.rmv = 1;
2116 
2117 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2118 		/* Detect softnic port */
2119 		if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2120 			port->softnic_enable = 1;
2121 			memset(&port->softport, 0, sizeof(struct softnic_port));
2122 
2123 			if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2124 				port->softport.tm_flag = 1;
2125 		}
2126 #endif
2127 	}
2128 }
2129 
2130 void set_port_slave_flag(portid_t slave_pid)
2131 {
2132 	struct rte_port *port;
2133 
2134 	port = &ports[slave_pid];
2135 	port->slave_flag = 1;
2136 }
2137 
2138 void clear_port_slave_flag(portid_t slave_pid)
2139 {
2140 	struct rte_port *port;
2141 
2142 	port = &ports[slave_pid];
2143 	port->slave_flag = 0;
2144 }
2145 
2146 uint8_t port_is_bonding_slave(portid_t slave_pid)
2147 {
2148 	struct rte_port *port;
2149 
2150 	port = &ports[slave_pid];
2151 	return port->slave_flag;
2152 }
2153 
2154 const uint16_t vlan_tags[] = {
2155 		0,  1,  2,  3,  4,  5,  6,  7,
2156 		8,  9, 10, 11,  12, 13, 14, 15,
2157 		16, 17, 18, 19, 20, 21, 22, 23,
2158 		24, 25, 26, 27, 28, 29, 30, 31
2159 };
2160 
2161 static  int
2162 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2163 		 enum dcb_mode_enable dcb_mode,
2164 		 enum rte_eth_nb_tcs num_tcs,
2165 		 uint8_t pfc_en)
2166 {
2167 	uint8_t i;
2168 
2169 	/*
2170 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2171 	 * given above, and the number of traffic classes available for use.
2172 	 */
2173 	if (dcb_mode == DCB_VT_ENABLED) {
2174 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2175 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2176 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2177 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2178 
2179 		/* VMDQ+DCB RX and TX configurations */
2180 		vmdq_rx_conf->enable_default_pool = 0;
2181 		vmdq_rx_conf->default_pool = 0;
2182 		vmdq_rx_conf->nb_queue_pools =
2183 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2184 		vmdq_tx_conf->nb_queue_pools =
2185 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2186 
2187 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2188 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2189 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2190 			vmdq_rx_conf->pool_map[i].pools =
2191 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2192 		}
2193 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2194 			vmdq_rx_conf->dcb_tc[i] = i;
2195 			vmdq_tx_conf->dcb_tc[i] = i;
2196 		}
2197 
2198 		/* set DCB mode of RX and TX of multiple queues */
2199 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2200 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2201 	} else {
2202 		struct rte_eth_dcb_rx_conf *rx_conf =
2203 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2204 		struct rte_eth_dcb_tx_conf *tx_conf =
2205 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2206 
2207 		rx_conf->nb_tcs = num_tcs;
2208 		tx_conf->nb_tcs = num_tcs;
2209 
2210 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2211 			rx_conf->dcb_tc[i] = i % num_tcs;
2212 			tx_conf->dcb_tc[i] = i % num_tcs;
2213 		}
2214 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2215 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2216 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2217 	}
2218 
2219 	if (pfc_en)
2220 		eth_conf->dcb_capability_en =
2221 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2222 	else
2223 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2224 
2225 	return 0;
2226 }
2227 
2228 int
2229 init_port_dcb_config(portid_t pid,
2230 		     enum dcb_mode_enable dcb_mode,
2231 		     enum rte_eth_nb_tcs num_tcs,
2232 		     uint8_t pfc_en)
2233 {
2234 	struct rte_eth_conf port_conf;
2235 	struct rte_port *rte_port;
2236 	int retval;
2237 	uint16_t i;
2238 
2239 	rte_port = &ports[pid];
2240 
2241 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2242 	/* Enter DCB configuration status */
2243 	dcb_config = 1;
2244 
2245 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2246 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2247 	if (retval < 0)
2248 		return retval;
2249 	port_conf.rxmode.hw_vlan_filter = 1;
2250 
2251 	/**
2252 	 * Write the configuration into the device.
2253 	 * Set the numbers of RX & TX queues to 0, so
2254 	 * the RX & TX queues will not be setup.
2255 	 */
2256 	rte_eth_dev_configure(pid, 0, 0, &port_conf);
2257 
2258 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2259 
2260 	/* If dev_info.vmdq_pool_base is greater than 0,
2261 	 * the queue id of vmdq pools is started after pf queues.
2262 	 */
2263 	if (dcb_mode == DCB_VT_ENABLED &&
2264 	    rte_port->dev_info.vmdq_pool_base > 0) {
2265 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2266 			" for port %d.", pid);
2267 		return -1;
2268 	}
2269 
2270 	/* Assume the ports in testpmd have the same dcb capability
2271 	 * and has the same number of rxq and txq in dcb mode
2272 	 */
2273 	if (dcb_mode == DCB_VT_ENABLED) {
2274 		if (rte_port->dev_info.max_vfs > 0) {
2275 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2276 			nb_txq = rte_port->dev_info.nb_tx_queues;
2277 		} else {
2278 			nb_rxq = rte_port->dev_info.max_rx_queues;
2279 			nb_txq = rte_port->dev_info.max_tx_queues;
2280 		}
2281 	} else {
2282 		/*if vt is disabled, use all pf queues */
2283 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2284 			nb_rxq = rte_port->dev_info.max_rx_queues;
2285 			nb_txq = rte_port->dev_info.max_tx_queues;
2286 		} else {
2287 			nb_rxq = (queueid_t)num_tcs;
2288 			nb_txq = (queueid_t)num_tcs;
2289 
2290 		}
2291 	}
2292 	rx_free_thresh = 64;
2293 
2294 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2295 
2296 	rxtx_port_config(rte_port);
2297 	/* VLAN filter */
2298 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2299 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2300 		rx_vft_set(pid, vlan_tags[i], 1);
2301 
2302 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2303 	map_port_queue_stats_mapping_registers(pid, rte_port);
2304 
2305 	rte_port->dcb_flag = 1;
2306 
2307 	return 0;
2308 }
2309 
2310 static void
2311 init_port(void)
2312 {
2313 	/* Configuration of Ethernet ports. */
2314 	ports = rte_zmalloc("testpmd: ports",
2315 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2316 			    RTE_CACHE_LINE_SIZE);
2317 	if (ports == NULL) {
2318 		rte_exit(EXIT_FAILURE,
2319 				"rte_zmalloc(%d struct rte_port) failed\n",
2320 				RTE_MAX_ETHPORTS);
2321 	}
2322 }
2323 
2324 static void
2325 force_quit(void)
2326 {
2327 	pmd_test_exit();
2328 	prompt_exit();
2329 }
2330 
2331 static void
2332 print_stats(void)
2333 {
2334 	uint8_t i;
2335 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2336 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2337 
2338 	/* Clear screen and move to top left */
2339 	printf("%s%s", clr, top_left);
2340 
2341 	printf("\nPort statistics ====================================");
2342 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2343 		nic_stats_display(fwd_ports_ids[i]);
2344 }
2345 
2346 static void
2347 signal_handler(int signum)
2348 {
2349 	if (signum == SIGINT || signum == SIGTERM) {
2350 		printf("\nSignal %d received, preparing to exit...\n",
2351 				signum);
2352 #ifdef RTE_LIBRTE_PDUMP
2353 		/* uninitialize packet capture framework */
2354 		rte_pdump_uninit();
2355 #endif
2356 #ifdef RTE_LIBRTE_LATENCY_STATS
2357 		rte_latencystats_uninit();
2358 #endif
2359 		force_quit();
2360 		/* Set flag to indicate the force termination. */
2361 		f_quit = 1;
2362 		/* exit with the expected status */
2363 		signal(signum, SIG_DFL);
2364 		kill(getpid(), signum);
2365 	}
2366 }
2367 
2368 int
2369 main(int argc, char** argv)
2370 {
2371 	int  diag;
2372 	portid_t port_id;
2373 
2374 	signal(SIGINT, signal_handler);
2375 	signal(SIGTERM, signal_handler);
2376 
2377 	diag = rte_eal_init(argc, argv);
2378 	if (diag < 0)
2379 		rte_panic("Cannot init EAL\n");
2380 
2381 	if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2382 		RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n",
2383 			strerror(errno));
2384 	}
2385 
2386 #ifdef RTE_LIBRTE_PDUMP
2387 	/* initialize packet capture framework */
2388 	rte_pdump_init(NULL);
2389 #endif
2390 
2391 	nb_ports = (portid_t) rte_eth_dev_count();
2392 	if (nb_ports == 0)
2393 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2394 
2395 	/* allocate port structures, and init them */
2396 	init_port();
2397 
2398 	set_def_fwd_config();
2399 	if (nb_lcores == 0)
2400 		rte_panic("Empty set of forwarding logical cores - check the "
2401 			  "core mask supplied in the command parameters\n");
2402 
2403 	/* Bitrate/latency stats disabled by default */
2404 #ifdef RTE_LIBRTE_BITRATE
2405 	bitrate_enabled = 0;
2406 #endif
2407 #ifdef RTE_LIBRTE_LATENCY_STATS
2408 	latencystats_enabled = 0;
2409 #endif
2410 
2411 	argc -= diag;
2412 	argv += diag;
2413 	if (argc > 1)
2414 		launch_args_parse(argc, argv);
2415 
2416 	if (tx_first && interactive)
2417 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2418 				"interactive mode.\n");
2419 
2420 	if (tx_first && lsc_interrupt) {
2421 		printf("Warning: lsc_interrupt needs to be off when "
2422 				" using tx_first. Disabling.\n");
2423 		lsc_interrupt = 0;
2424 	}
2425 
2426 	if (!nb_rxq && !nb_txq)
2427 		printf("Warning: Either rx or tx queues should be non-zero\n");
2428 
2429 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2430 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2431 		       "but nb_txq=%d will prevent to fully test it.\n",
2432 		       nb_rxq, nb_txq);
2433 
2434 	init_config();
2435 	if (start_port(RTE_PORT_ALL) != 0)
2436 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2437 
2438 	/* set all ports to promiscuous mode by default */
2439 	RTE_ETH_FOREACH_DEV(port_id)
2440 		rte_eth_promiscuous_enable(port_id);
2441 
2442 	/* Init metrics library */
2443 	rte_metrics_init(rte_socket_id());
2444 
2445 #ifdef RTE_LIBRTE_LATENCY_STATS
2446 	if (latencystats_enabled != 0) {
2447 		int ret = rte_latencystats_init(1, NULL);
2448 		if (ret)
2449 			printf("Warning: latencystats init()"
2450 				" returned error %d\n",	ret);
2451 		printf("Latencystats running on lcore %d\n",
2452 			latencystats_lcore_id);
2453 	}
2454 #endif
2455 
2456 	/* Setup bitrate stats */
2457 #ifdef RTE_LIBRTE_BITRATE
2458 	if (bitrate_enabled != 0) {
2459 		bitrate_data = rte_stats_bitrate_create();
2460 		if (bitrate_data == NULL)
2461 			rte_exit(EXIT_FAILURE,
2462 				"Could not allocate bitrate data.\n");
2463 		rte_stats_bitrate_reg(bitrate_data);
2464 	}
2465 #endif
2466 
2467 #ifdef RTE_LIBRTE_CMDLINE
2468 	if (strlen(cmdline_filename) != 0)
2469 		cmdline_read_from_file(cmdline_filename);
2470 
2471 	if (interactive == 1) {
2472 		if (auto_start) {
2473 			printf("Start automatic packet forwarding\n");
2474 			start_packet_forwarding(0);
2475 		}
2476 		prompt();
2477 		pmd_test_exit();
2478 	} else
2479 #endif
2480 	{
2481 		char c;
2482 		int rc;
2483 
2484 		f_quit = 0;
2485 
2486 		printf("No commandline core given, start packet forwarding\n");
2487 		start_packet_forwarding(tx_first);
2488 		if (stats_period != 0) {
2489 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2490 			uint64_t timer_period;
2491 
2492 			/* Convert to number of cycles */
2493 			timer_period = stats_period * rte_get_timer_hz();
2494 
2495 			while (f_quit == 0) {
2496 				cur_time = rte_get_timer_cycles();
2497 				diff_time += cur_time - prev_time;
2498 
2499 				if (diff_time >= timer_period) {
2500 					print_stats();
2501 					/* Reset the timer */
2502 					diff_time = 0;
2503 				}
2504 				/* Sleep to avoid unnecessary checks */
2505 				prev_time = cur_time;
2506 				sleep(1);
2507 			}
2508 		}
2509 
2510 		printf("Press enter to exit\n");
2511 		rc = read(0, &c, 1);
2512 		pmd_test_exit();
2513 		if (rc < 0)
2514 			return 1;
2515 	}
2516 
2517 	return 0;
2518 }
2519