xref: /dpdk/app/test-pmd/testpmd.c (revision 65a7360cc3378f5b3edd59f230141462dd98cfe9)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/mman.h>
42 #include <sys/types.h>
43 #include <errno.h>
44 
45 #include <sys/queue.h>
46 #include <sys/stat.h>
47 
48 #include <stdint.h>
49 #include <unistd.h>
50 #include <inttypes.h>
51 
52 #include <rte_common.h>
53 #include <rte_errno.h>
54 #include <rte_byteorder.h>
55 #include <rte_log.h>
56 #include <rte_debug.h>
57 #include <rte_cycles.h>
58 #include <rte_memory.h>
59 #include <rte_memcpy.h>
60 #include <rte_memzone.h>
61 #include <rte_launch.h>
62 #include <rte_eal.h>
63 #include <rte_alarm.h>
64 #include <rte_per_lcore.h>
65 #include <rte_lcore.h>
66 #include <rte_atomic.h>
67 #include <rte_branch_prediction.h>
68 #include <rte_mempool.h>
69 #include <rte_malloc.h>
70 #include <rte_mbuf.h>
71 #include <rte_interrupts.h>
72 #include <rte_pci.h>
73 #include <rte_ether.h>
74 #include <rte_ethdev.h>
75 #include <rte_dev.h>
76 #include <rte_string_fns.h>
77 #ifdef RTE_LIBRTE_IXGBE_PMD
78 #include <rte_pmd_ixgbe.h>
79 #endif
80 #ifdef RTE_LIBRTE_PDUMP
81 #include <rte_pdump.h>
82 #endif
83 #include <rte_flow.h>
84 #include <rte_metrics.h>
85 #ifdef RTE_LIBRTE_BITRATE
86 #include <rte_bitrate.h>
87 #endif
88 #ifdef RTE_LIBRTE_LATENCY_STATS
89 #include <rte_latencystats.h>
90 #endif
91 
92 #include "testpmd.h"
93 
94 uint16_t verbose_level = 0; /**< Silent by default. */
95 
96 /* use master core for command line ? */
97 uint8_t interactive = 0;
98 uint8_t auto_start = 0;
99 uint8_t tx_first;
100 char cmdline_filename[PATH_MAX] = {0};
101 
102 /*
103  * NUMA support configuration.
104  * When set, the NUMA support attempts to dispatch the allocation of the
105  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
106  * probed ports among the CPU sockets 0 and 1.
107  * Otherwise, all memory is allocated from CPU socket 0.
108  */
109 uint8_t numa_support = 1; /**< numa enabled by default */
110 
111 /*
112  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
113  * not configured.
114  */
115 uint8_t socket_num = UMA_NO_CONFIG;
116 
117 /*
118  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
119  */
120 uint8_t mp_anon = 0;
121 
122 /*
123  * Record the Ethernet address of peer target ports to which packets are
124  * forwarded.
125  * Must be instantiated with the ethernet addresses of peer traffic generator
126  * ports.
127  */
128 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
129 portid_t nb_peer_eth_addrs = 0;
130 
131 /*
132  * Probed Target Environment.
133  */
134 struct rte_port *ports;	       /**< For all probed ethernet ports. */
135 portid_t nb_ports;             /**< Number of probed ethernet ports. */
136 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
137 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
138 
139 /*
140  * Test Forwarding Configuration.
141  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
142  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
143  */
144 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
145 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
146 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
147 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
148 
149 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
150 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
151 
152 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
153 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
154 
155 /*
156  * Forwarding engines.
157  */
158 struct fwd_engine * fwd_engines[] = {
159 	&io_fwd_engine,
160 	&mac_fwd_engine,
161 	&mac_swap_engine,
162 	&flow_gen_engine,
163 	&rx_only_engine,
164 	&tx_only_engine,
165 	&csum_fwd_engine,
166 	&icmp_echo_engine,
167 #ifdef RTE_LIBRTE_IEEE1588
168 	&ieee1588_fwd_engine,
169 #endif
170 	NULL,
171 };
172 
173 struct fwd_config cur_fwd_config;
174 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
175 uint32_t retry_enabled;
176 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
177 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
178 
179 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
180 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
181                                       * specified on command-line. */
182 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
183 /*
184  * Configuration of packet segments used by the "txonly" processing engine.
185  */
186 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
187 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
188 	TXONLY_DEF_PACKET_LEN,
189 };
190 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
191 
192 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
193 /**< Split policy for packets to TX. */
194 
195 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
196 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
197 
198 /* current configuration is in DCB or not,0 means it is not in DCB mode */
199 uint8_t dcb_config = 0;
200 
201 /* Whether the dcb is in testing status */
202 uint8_t dcb_test = 0;
203 
204 /*
205  * Configurable number of RX/TX queues.
206  */
207 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
208 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
209 
210 /*
211  * Configurable number of RX/TX ring descriptors.
212  */
213 #define RTE_TEST_RX_DESC_DEFAULT 128
214 #define RTE_TEST_TX_DESC_DEFAULT 512
215 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
216 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
217 
218 #define RTE_PMD_PARAM_UNSET -1
219 /*
220  * Configurable values of RX and TX ring threshold registers.
221  */
222 
223 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
224 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
225 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
226 
227 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
228 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
229 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
230 
231 /*
232  * Configurable value of RX free threshold.
233  */
234 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
235 
236 /*
237  * Configurable value of RX drop enable.
238  */
239 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
240 
241 /*
242  * Configurable value of TX free threshold.
243  */
244 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
245 
246 /*
247  * Configurable value of TX RS bit threshold.
248  */
249 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
250 
251 /*
252  * Configurable value of TX queue flags.
253  */
254 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
255 
256 /*
257  * Receive Side Scaling (RSS) configuration.
258  */
259 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
260 
261 /*
262  * Port topology configuration
263  */
264 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
265 
266 /*
267  * Avoids to flush all the RX streams before starts forwarding.
268  */
269 uint8_t no_flush_rx = 0; /* flush by default */
270 
271 /*
272  * Flow API isolated mode.
273  */
274 uint8_t flow_isolate_all;
275 
276 /*
277  * Avoids to check link status when starting/stopping a port.
278  */
279 uint8_t no_link_check = 0; /* check by default */
280 
281 /*
282  * Enable link status change notification
283  */
284 uint8_t lsc_interrupt = 1; /* enabled by default */
285 
286 /*
287  * Enable device removal notification.
288  */
289 uint8_t rmv_interrupt = 1; /* enabled by default */
290 
291 /*
292  * Display or mask ether events
293  * Default to all events except VF_MBOX
294  */
295 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
296 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
297 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
298 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
299 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
300 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
301 
302 /*
303  * NIC bypass mode configuration options.
304  */
305 
306 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
307 /* The NIC bypass watchdog timeout. */
308 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
309 #endif
310 
311 
312 #ifdef RTE_LIBRTE_LATENCY_STATS
313 
314 /*
315  * Set when latency stats is enabled in the commandline
316  */
317 uint8_t latencystats_enabled;
318 
319 /*
320  * Lcore ID to serive latency statistics.
321  */
322 lcoreid_t latencystats_lcore_id = -1;
323 
324 #endif
325 
326 /*
327  * Ethernet device configuration.
328  */
329 struct rte_eth_rxmode rx_mode = {
330 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
331 	.split_hdr_size = 0,
332 	.header_split   = 0, /**< Header Split disabled. */
333 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
334 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
335 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
336 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
337 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
338 	.hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
339 };
340 
341 struct rte_fdir_conf fdir_conf = {
342 	.mode = RTE_FDIR_MODE_NONE,
343 	.pballoc = RTE_FDIR_PBALLOC_64K,
344 	.status = RTE_FDIR_REPORT_STATUS,
345 	.mask = {
346 		.vlan_tci_mask = 0x0,
347 		.ipv4_mask     = {
348 			.src_ip = 0xFFFFFFFF,
349 			.dst_ip = 0xFFFFFFFF,
350 		},
351 		.ipv6_mask     = {
352 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
353 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
354 		},
355 		.src_port_mask = 0xFFFF,
356 		.dst_port_mask = 0xFFFF,
357 		.mac_addr_byte_mask = 0xFF,
358 		.tunnel_type_mask = 1,
359 		.tunnel_id_mask = 0xFFFFFFFF,
360 	},
361 	.drop_queue = 127,
362 };
363 
364 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
365 
366 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
367 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
368 
369 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
370 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
371 
372 uint16_t nb_tx_queue_stats_mappings = 0;
373 uint16_t nb_rx_queue_stats_mappings = 0;
374 
375 unsigned int num_sockets = 0;
376 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
377 
378 #ifdef RTE_LIBRTE_BITRATE
379 /* Bitrate statistics */
380 struct rte_stats_bitrates *bitrate_data;
381 lcoreid_t bitrate_lcore_id;
382 uint8_t bitrate_enabled;
383 #endif
384 
385 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
386 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
387 
388 /* Forward function declarations */
389 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
390 static void check_all_ports_link_status(uint32_t port_mask);
391 static int eth_event_callback(portid_t port_id,
392 			      enum rte_eth_event_type type,
393 			      void *param, void *ret_param);
394 
395 /*
396  * Check if all the ports are started.
397  * If yes, return positive value. If not, return zero.
398  */
399 static int all_ports_started(void);
400 
401 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
402 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
403 
404 /*
405  * Helper function to check if socket is already discovered.
406  * If yes, return positive value. If not, return zero.
407  */
408 int
409 new_socket_id(unsigned int socket_id)
410 {
411 	unsigned int i;
412 
413 	for (i = 0; i < num_sockets; i++) {
414 		if (socket_ids[i] == socket_id)
415 			return 0;
416 	}
417 	return 1;
418 }
419 
420 /*
421  * Setup default configuration.
422  */
423 static void
424 set_default_fwd_lcores_config(void)
425 {
426 	unsigned int i;
427 	unsigned int nb_lc;
428 	unsigned int sock_num;
429 
430 	nb_lc = 0;
431 	for (i = 0; i < RTE_MAX_LCORE; i++) {
432 		sock_num = rte_lcore_to_socket_id(i);
433 		if (new_socket_id(sock_num)) {
434 			if (num_sockets >= RTE_MAX_NUMA_NODES) {
435 				rte_exit(EXIT_FAILURE,
436 					 "Total sockets greater than %u\n",
437 					 RTE_MAX_NUMA_NODES);
438 			}
439 			socket_ids[num_sockets++] = sock_num;
440 		}
441 		if (!rte_lcore_is_enabled(i))
442 			continue;
443 		if (i == rte_get_master_lcore())
444 			continue;
445 		fwd_lcores_cpuids[nb_lc++] = i;
446 	}
447 	nb_lcores = (lcoreid_t) nb_lc;
448 	nb_cfg_lcores = nb_lcores;
449 	nb_fwd_lcores = 1;
450 }
451 
452 static void
453 set_def_peer_eth_addrs(void)
454 {
455 	portid_t i;
456 
457 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
458 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
459 		peer_eth_addrs[i].addr_bytes[5] = i;
460 	}
461 }
462 
463 static void
464 set_default_fwd_ports_config(void)
465 {
466 	portid_t pt_id;
467 	int i = 0;
468 
469 	RTE_ETH_FOREACH_DEV(pt_id)
470 		fwd_ports_ids[i++] = pt_id;
471 
472 	nb_cfg_ports = nb_ports;
473 	nb_fwd_ports = nb_ports;
474 }
475 
476 void
477 set_def_fwd_config(void)
478 {
479 	set_default_fwd_lcores_config();
480 	set_def_peer_eth_addrs();
481 	set_default_fwd_ports_config();
482 }
483 
484 /*
485  * Configuration initialisation done once at init time.
486  */
487 static void
488 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
489 		 unsigned int socket_id)
490 {
491 	char pool_name[RTE_MEMPOOL_NAMESIZE];
492 	struct rte_mempool *rte_mp = NULL;
493 	uint32_t mb_size;
494 
495 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
496 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
497 
498 	RTE_LOG(INFO, USER1,
499 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
500 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
501 
502 	if (mp_anon != 0) {
503 		rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
504 			mb_size, (unsigned) mb_mempool_cache,
505 			sizeof(struct rte_pktmbuf_pool_private),
506 			socket_id, 0);
507 		if (rte_mp == NULL)
508 			goto err;
509 
510 		if (rte_mempool_populate_anon(rte_mp) == 0) {
511 			rte_mempool_free(rte_mp);
512 			rte_mp = NULL;
513 			goto err;
514 		}
515 		rte_pktmbuf_pool_init(rte_mp, NULL);
516 		rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
517 	} else {
518 		/* wrapper to rte_mempool_create() */
519 		rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
520 			mb_mempool_cache, 0, mbuf_seg_size, socket_id);
521 	}
522 
523 err:
524 	if (rte_mp == NULL) {
525 		rte_exit(EXIT_FAILURE,
526 			"Creation of mbuf pool for socket %u failed: %s\n",
527 			socket_id, rte_strerror(rte_errno));
528 	} else if (verbose_level > 0) {
529 		rte_mempool_dump(stdout, rte_mp);
530 	}
531 }
532 
533 /*
534  * Check given socket id is valid or not with NUMA mode,
535  * if valid, return 0, else return -1
536  */
537 static int
538 check_socket_id(const unsigned int socket_id)
539 {
540 	static int warning_once = 0;
541 
542 	if (new_socket_id(socket_id)) {
543 		if (!warning_once && numa_support)
544 			printf("Warning: NUMA should be configured manually by"
545 			       " using --port-numa-config and"
546 			       " --ring-numa-config parameters along with"
547 			       " --numa.\n");
548 		warning_once = 1;
549 		return -1;
550 	}
551 	return 0;
552 }
553 
554 static void
555 init_config(void)
556 {
557 	portid_t pid;
558 	struct rte_port *port;
559 	struct rte_mempool *mbp;
560 	unsigned int nb_mbuf_per_pool;
561 	lcoreid_t  lc_id;
562 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
563 	struct rte_gro_param gro_param;
564 	uint32_t gso_types;
565 
566 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
567 
568 	if (numa_support) {
569 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
570 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
571 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
572 	}
573 
574 	/* Configuration of logical cores. */
575 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
576 				sizeof(struct fwd_lcore *) * nb_lcores,
577 				RTE_CACHE_LINE_SIZE);
578 	if (fwd_lcores == NULL) {
579 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
580 							"failed\n", nb_lcores);
581 	}
582 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
583 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
584 					       sizeof(struct fwd_lcore),
585 					       RTE_CACHE_LINE_SIZE);
586 		if (fwd_lcores[lc_id] == NULL) {
587 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
588 								"failed\n");
589 		}
590 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
591 	}
592 
593 	RTE_ETH_FOREACH_DEV(pid) {
594 		port = &ports[pid];
595 		rte_eth_dev_info_get(pid, &port->dev_info);
596 
597 		if (numa_support) {
598 			if (port_numa[pid] != NUMA_NO_CONFIG)
599 				port_per_socket[port_numa[pid]]++;
600 			else {
601 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
602 
603 				/* if socket_id is invalid, set to 0 */
604 				if (check_socket_id(socket_id) < 0)
605 					socket_id = 0;
606 				port_per_socket[socket_id]++;
607 			}
608 		}
609 
610 		/* set flag to initialize port/queue */
611 		port->need_reconfig = 1;
612 		port->need_reconfig_queues = 1;
613 	}
614 
615 	/*
616 	 * Create pools of mbuf.
617 	 * If NUMA support is disabled, create a single pool of mbuf in
618 	 * socket 0 memory by default.
619 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
620 	 *
621 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
622 	 * nb_txd can be configured at run time.
623 	 */
624 	if (param_total_num_mbufs)
625 		nb_mbuf_per_pool = param_total_num_mbufs;
626 	else {
627 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
628 			(nb_lcores * mb_mempool_cache) +
629 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
630 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
631 	}
632 
633 	if (numa_support) {
634 		uint8_t i;
635 
636 		for (i = 0; i < num_sockets; i++)
637 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
638 					 socket_ids[i]);
639 	} else {
640 		if (socket_num == UMA_NO_CONFIG)
641 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
642 		else
643 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
644 						 socket_num);
645 	}
646 
647 	init_port_config();
648 
649 	gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
650 		DEV_TX_OFFLOAD_GRE_TNL_TSO;
651 	/*
652 	 * Records which Mbuf pool to use by each logical core, if needed.
653 	 */
654 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
655 		mbp = mbuf_pool_find(
656 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
657 
658 		if (mbp == NULL)
659 			mbp = mbuf_pool_find(0);
660 		fwd_lcores[lc_id]->mbp = mbp;
661 		/* initialize GSO context */
662 		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
663 		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
664 		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
665 		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
666 			ETHER_CRC_LEN;
667 		fwd_lcores[lc_id]->gso_ctx.flag = 0;
668 	}
669 
670 	/* Configuration of packet forwarding streams. */
671 	if (init_fwd_streams() < 0)
672 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
673 
674 	fwd_config_setup();
675 
676 	/* create a gro context for each lcore */
677 	gro_param.gro_types = RTE_GRO_TCP_IPV4;
678 	gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
679 	gro_param.max_item_per_flow = MAX_PKT_BURST;
680 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
681 		gro_param.socket_id = rte_lcore_to_socket_id(
682 				fwd_lcores_cpuids[lc_id]);
683 		fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
684 		if (fwd_lcores[lc_id]->gro_ctx == NULL) {
685 			rte_exit(EXIT_FAILURE,
686 					"rte_gro_ctx_create() failed\n");
687 		}
688 	}
689 }
690 
691 
692 void
693 reconfig(portid_t new_port_id, unsigned socket_id)
694 {
695 	struct rte_port *port;
696 
697 	/* Reconfiguration of Ethernet ports. */
698 	port = &ports[new_port_id];
699 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
700 
701 	/* set flag to initialize port/queue */
702 	port->need_reconfig = 1;
703 	port->need_reconfig_queues = 1;
704 	port->socket_id = socket_id;
705 
706 	init_port_config();
707 }
708 
709 
710 int
711 init_fwd_streams(void)
712 {
713 	portid_t pid;
714 	struct rte_port *port;
715 	streamid_t sm_id, nb_fwd_streams_new;
716 	queueid_t q;
717 
718 	/* set socket id according to numa or not */
719 	RTE_ETH_FOREACH_DEV(pid) {
720 		port = &ports[pid];
721 		if (nb_rxq > port->dev_info.max_rx_queues) {
722 			printf("Fail: nb_rxq(%d) is greater than "
723 				"max_rx_queues(%d)\n", nb_rxq,
724 				port->dev_info.max_rx_queues);
725 			return -1;
726 		}
727 		if (nb_txq > port->dev_info.max_tx_queues) {
728 			printf("Fail: nb_txq(%d) is greater than "
729 				"max_tx_queues(%d)\n", nb_txq,
730 				port->dev_info.max_tx_queues);
731 			return -1;
732 		}
733 		if (numa_support) {
734 			if (port_numa[pid] != NUMA_NO_CONFIG)
735 				port->socket_id = port_numa[pid];
736 			else {
737 				port->socket_id = rte_eth_dev_socket_id(pid);
738 
739 				/* if socket_id is invalid, set to 0 */
740 				if (check_socket_id(port->socket_id) < 0)
741 					port->socket_id = 0;
742 			}
743 		}
744 		else {
745 			if (socket_num == UMA_NO_CONFIG)
746 				port->socket_id = 0;
747 			else
748 				port->socket_id = socket_num;
749 		}
750 	}
751 
752 	q = RTE_MAX(nb_rxq, nb_txq);
753 	if (q == 0) {
754 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
755 		return -1;
756 	}
757 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
758 	if (nb_fwd_streams_new == nb_fwd_streams)
759 		return 0;
760 	/* clear the old */
761 	if (fwd_streams != NULL) {
762 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
763 			if (fwd_streams[sm_id] == NULL)
764 				continue;
765 			rte_free(fwd_streams[sm_id]);
766 			fwd_streams[sm_id] = NULL;
767 		}
768 		rte_free(fwd_streams);
769 		fwd_streams = NULL;
770 	}
771 
772 	/* init new */
773 	nb_fwd_streams = nb_fwd_streams_new;
774 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
775 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
776 	if (fwd_streams == NULL)
777 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
778 						"failed\n", nb_fwd_streams);
779 
780 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
781 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
782 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
783 		if (fwd_streams[sm_id] == NULL)
784 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
785 								" failed\n");
786 	}
787 
788 	return 0;
789 }
790 
791 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
792 static void
793 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
794 {
795 	unsigned int total_burst;
796 	unsigned int nb_burst;
797 	unsigned int burst_stats[3];
798 	uint16_t pktnb_stats[3];
799 	uint16_t nb_pkt;
800 	int burst_percent[3];
801 
802 	/*
803 	 * First compute the total number of packet bursts and the
804 	 * two highest numbers of bursts of the same number of packets.
805 	 */
806 	total_burst = 0;
807 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
808 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
809 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
810 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
811 		if (nb_burst == 0)
812 			continue;
813 		total_burst += nb_burst;
814 		if (nb_burst > burst_stats[0]) {
815 			burst_stats[1] = burst_stats[0];
816 			pktnb_stats[1] = pktnb_stats[0];
817 			burst_stats[0] = nb_burst;
818 			pktnb_stats[0] = nb_pkt;
819 		}
820 	}
821 	if (total_burst == 0)
822 		return;
823 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
824 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
825 	       burst_percent[0], (int) pktnb_stats[0]);
826 	if (burst_stats[0] == total_burst) {
827 		printf("]\n");
828 		return;
829 	}
830 	if (burst_stats[0] + burst_stats[1] == total_burst) {
831 		printf(" + %d%% of %d pkts]\n",
832 		       100 - burst_percent[0], pktnb_stats[1]);
833 		return;
834 	}
835 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
836 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
837 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
838 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
839 		return;
840 	}
841 	printf(" + %d%% of %d pkts + %d%% of others]\n",
842 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
843 }
844 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
845 
846 static void
847 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
848 {
849 	struct rte_port *port;
850 	uint8_t i;
851 
852 	static const char *fwd_stats_border = "----------------------";
853 
854 	port = &ports[port_id];
855 	printf("\n  %s Forward statistics for port %-2d %s\n",
856 	       fwd_stats_border, port_id, fwd_stats_border);
857 
858 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
859 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
860 		       "%-"PRIu64"\n",
861 		       stats->ipackets, stats->imissed,
862 		       (uint64_t) (stats->ipackets + stats->imissed));
863 
864 		if (cur_fwd_eng == &csum_fwd_engine)
865 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
866 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
867 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
868 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
869 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
870 		}
871 
872 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
873 		       "%-"PRIu64"\n",
874 		       stats->opackets, port->tx_dropped,
875 		       (uint64_t) (stats->opackets + port->tx_dropped));
876 	}
877 	else {
878 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
879 		       "%14"PRIu64"\n",
880 		       stats->ipackets, stats->imissed,
881 		       (uint64_t) (stats->ipackets + stats->imissed));
882 
883 		if (cur_fwd_eng == &csum_fwd_engine)
884 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
885 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
886 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
887 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
888 			printf("  RX-nombufs:             %14"PRIu64"\n",
889 			       stats->rx_nombuf);
890 		}
891 
892 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
893 		       "%14"PRIu64"\n",
894 		       stats->opackets, port->tx_dropped,
895 		       (uint64_t) (stats->opackets + port->tx_dropped));
896 	}
897 
898 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
899 	if (port->rx_stream)
900 		pkt_burst_stats_display("RX",
901 			&port->rx_stream->rx_burst_stats);
902 	if (port->tx_stream)
903 		pkt_burst_stats_display("TX",
904 			&port->tx_stream->tx_burst_stats);
905 #endif
906 
907 	if (port->rx_queue_stats_mapping_enabled) {
908 		printf("\n");
909 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
910 			printf("  Stats reg %2d RX-packets:%14"PRIu64
911 			       "     RX-errors:%14"PRIu64
912 			       "    RX-bytes:%14"PRIu64"\n",
913 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
914 		}
915 		printf("\n");
916 	}
917 	if (port->tx_queue_stats_mapping_enabled) {
918 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
919 			printf("  Stats reg %2d TX-packets:%14"PRIu64
920 			       "                                 TX-bytes:%14"PRIu64"\n",
921 			       i, stats->q_opackets[i], stats->q_obytes[i]);
922 		}
923 	}
924 
925 	printf("  %s--------------------------------%s\n",
926 	       fwd_stats_border, fwd_stats_border);
927 }
928 
929 static void
930 fwd_stream_stats_display(streamid_t stream_id)
931 {
932 	struct fwd_stream *fs;
933 	static const char *fwd_top_stats_border = "-------";
934 
935 	fs = fwd_streams[stream_id];
936 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
937 	    (fs->fwd_dropped == 0))
938 		return;
939 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
940 	       "TX Port=%2d/Queue=%2d %s\n",
941 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
942 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
943 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
944 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
945 
946 	/* if checksum mode */
947 	if (cur_fwd_eng == &csum_fwd_engine) {
948 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
949 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
950 	}
951 
952 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
953 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
954 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
955 #endif
956 }
957 
958 static void
959 flush_fwd_rx_queues(void)
960 {
961 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
962 	portid_t  rxp;
963 	portid_t port_id;
964 	queueid_t rxq;
965 	uint16_t  nb_rx;
966 	uint16_t  i;
967 	uint8_t   j;
968 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
969 	uint64_t timer_period;
970 
971 	/* convert to number of cycles */
972 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
973 
974 	for (j = 0; j < 2; j++) {
975 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
976 			for (rxq = 0; rxq < nb_rxq; rxq++) {
977 				port_id = fwd_ports_ids[rxp];
978 				/**
979 				* testpmd can stuck in the below do while loop
980 				* if rte_eth_rx_burst() always returns nonzero
981 				* packets. So timer is added to exit this loop
982 				* after 1sec timer expiry.
983 				*/
984 				prev_tsc = rte_rdtsc();
985 				do {
986 					nb_rx = rte_eth_rx_burst(port_id, rxq,
987 						pkts_burst, MAX_PKT_BURST);
988 					for (i = 0; i < nb_rx; i++)
989 						rte_pktmbuf_free(pkts_burst[i]);
990 
991 					cur_tsc = rte_rdtsc();
992 					diff_tsc = cur_tsc - prev_tsc;
993 					timer_tsc += diff_tsc;
994 				} while ((nb_rx > 0) &&
995 					(timer_tsc < timer_period));
996 				timer_tsc = 0;
997 			}
998 		}
999 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1000 	}
1001 }
1002 
1003 static void
1004 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1005 {
1006 	struct fwd_stream **fsm;
1007 	streamid_t nb_fs;
1008 	streamid_t sm_id;
1009 #ifdef RTE_LIBRTE_BITRATE
1010 	uint64_t tics_per_1sec;
1011 	uint64_t tics_datum;
1012 	uint64_t tics_current;
1013 	uint8_t idx_port, cnt_ports;
1014 
1015 	cnt_ports = rte_eth_dev_count();
1016 	tics_datum = rte_rdtsc();
1017 	tics_per_1sec = rte_get_timer_hz();
1018 #endif
1019 	fsm = &fwd_streams[fc->stream_idx];
1020 	nb_fs = fc->stream_nb;
1021 	do {
1022 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
1023 			(*pkt_fwd)(fsm[sm_id]);
1024 #ifdef RTE_LIBRTE_BITRATE
1025 		if (bitrate_enabled != 0 &&
1026 				bitrate_lcore_id == rte_lcore_id()) {
1027 			tics_current = rte_rdtsc();
1028 			if (tics_current - tics_datum >= tics_per_1sec) {
1029 				/* Periodic bitrate calculation */
1030 				for (idx_port = 0;
1031 						idx_port < cnt_ports;
1032 						idx_port++)
1033 					rte_stats_bitrate_calc(bitrate_data,
1034 						idx_port);
1035 				tics_datum = tics_current;
1036 			}
1037 		}
1038 #endif
1039 #ifdef RTE_LIBRTE_LATENCY_STATS
1040 		if (latencystats_enabled != 0 &&
1041 				latencystats_lcore_id == rte_lcore_id())
1042 			rte_latencystats_update();
1043 #endif
1044 
1045 	} while (! fc->stopped);
1046 }
1047 
1048 static int
1049 start_pkt_forward_on_core(void *fwd_arg)
1050 {
1051 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1052 			     cur_fwd_config.fwd_eng->packet_fwd);
1053 	return 0;
1054 }
1055 
1056 /*
1057  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1058  * Used to start communication flows in network loopback test configurations.
1059  */
1060 static int
1061 run_one_txonly_burst_on_core(void *fwd_arg)
1062 {
1063 	struct fwd_lcore *fwd_lc;
1064 	struct fwd_lcore tmp_lcore;
1065 
1066 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1067 	tmp_lcore = *fwd_lc;
1068 	tmp_lcore.stopped = 1;
1069 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1070 	return 0;
1071 }
1072 
1073 /*
1074  * Launch packet forwarding:
1075  *     - Setup per-port forwarding context.
1076  *     - launch logical cores with their forwarding configuration.
1077  */
1078 static void
1079 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1080 {
1081 	port_fwd_begin_t port_fwd_begin;
1082 	unsigned int i;
1083 	unsigned int lc_id;
1084 	int diag;
1085 
1086 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1087 	if (port_fwd_begin != NULL) {
1088 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1089 			(*port_fwd_begin)(fwd_ports_ids[i]);
1090 	}
1091 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1092 		lc_id = fwd_lcores_cpuids[i];
1093 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1094 			fwd_lcores[i]->stopped = 0;
1095 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1096 						     fwd_lcores[i], lc_id);
1097 			if (diag != 0)
1098 				printf("launch lcore %u failed - diag=%d\n",
1099 				       lc_id, diag);
1100 		}
1101 	}
1102 }
1103 
1104 /*
1105  * Launch packet forwarding configuration.
1106  */
1107 void
1108 start_packet_forwarding(int with_tx_first)
1109 {
1110 	port_fwd_begin_t port_fwd_begin;
1111 	port_fwd_end_t  port_fwd_end;
1112 	struct rte_port *port;
1113 	unsigned int i;
1114 	portid_t   pt_id;
1115 	streamid_t sm_id;
1116 
1117 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1118 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1119 
1120 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1121 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1122 
1123 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1124 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1125 		(!nb_rxq || !nb_txq))
1126 		rte_exit(EXIT_FAILURE,
1127 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1128 			cur_fwd_eng->fwd_mode_name);
1129 
1130 	if (all_ports_started() == 0) {
1131 		printf("Not all ports were started\n");
1132 		return;
1133 	}
1134 	if (test_done == 0) {
1135 		printf("Packet forwarding already started\n");
1136 		return;
1137 	}
1138 
1139 	if (init_fwd_streams() < 0) {
1140 		printf("Fail from init_fwd_streams()\n");
1141 		return;
1142 	}
1143 
1144 	if(dcb_test) {
1145 		for (i = 0; i < nb_fwd_ports; i++) {
1146 			pt_id = fwd_ports_ids[i];
1147 			port = &ports[pt_id];
1148 			if (!port->dcb_flag) {
1149 				printf("In DCB mode, all forwarding ports must "
1150                                        "be configured in this mode.\n");
1151 				return;
1152 			}
1153 		}
1154 		if (nb_fwd_lcores == 1) {
1155 			printf("In DCB mode,the nb forwarding cores "
1156                                "should be larger than 1.\n");
1157 			return;
1158 		}
1159 	}
1160 	test_done = 0;
1161 
1162 	if(!no_flush_rx)
1163 		flush_fwd_rx_queues();
1164 
1165 	fwd_config_setup();
1166 	pkt_fwd_config_display(&cur_fwd_config);
1167 	rxtx_config_display();
1168 
1169 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1170 		pt_id = fwd_ports_ids[i];
1171 		port = &ports[pt_id];
1172 		rte_eth_stats_get(pt_id, &port->stats);
1173 		port->tx_dropped = 0;
1174 
1175 		map_port_queue_stats_mapping_registers(pt_id, port);
1176 	}
1177 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1178 		fwd_streams[sm_id]->rx_packets = 0;
1179 		fwd_streams[sm_id]->tx_packets = 0;
1180 		fwd_streams[sm_id]->fwd_dropped = 0;
1181 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1182 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1183 
1184 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1185 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1186 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1187 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1188 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1189 #endif
1190 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1191 		fwd_streams[sm_id]->core_cycles = 0;
1192 #endif
1193 	}
1194 	if (with_tx_first) {
1195 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1196 		if (port_fwd_begin != NULL) {
1197 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1198 				(*port_fwd_begin)(fwd_ports_ids[i]);
1199 		}
1200 		while (with_tx_first--) {
1201 			launch_packet_forwarding(
1202 					run_one_txonly_burst_on_core);
1203 			rte_eal_mp_wait_lcore();
1204 		}
1205 		port_fwd_end = tx_only_engine.port_fwd_end;
1206 		if (port_fwd_end != NULL) {
1207 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1208 				(*port_fwd_end)(fwd_ports_ids[i]);
1209 		}
1210 	}
1211 	launch_packet_forwarding(start_pkt_forward_on_core);
1212 }
1213 
1214 void
1215 stop_packet_forwarding(void)
1216 {
1217 	struct rte_eth_stats stats;
1218 	struct rte_port *port;
1219 	port_fwd_end_t  port_fwd_end;
1220 	int i;
1221 	portid_t   pt_id;
1222 	streamid_t sm_id;
1223 	lcoreid_t  lc_id;
1224 	uint64_t total_recv;
1225 	uint64_t total_xmit;
1226 	uint64_t total_rx_dropped;
1227 	uint64_t total_tx_dropped;
1228 	uint64_t total_rx_nombuf;
1229 	uint64_t tx_dropped;
1230 	uint64_t rx_bad_ip_csum;
1231 	uint64_t rx_bad_l4_csum;
1232 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1233 	uint64_t fwd_cycles;
1234 #endif
1235 
1236 	static const char *acc_stats_border = "+++++++++++++++";
1237 
1238 	if (test_done) {
1239 		printf("Packet forwarding not started\n");
1240 		return;
1241 	}
1242 	printf("Telling cores to stop...");
1243 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1244 		fwd_lcores[lc_id]->stopped = 1;
1245 	printf("\nWaiting for lcores to finish...\n");
1246 	rte_eal_mp_wait_lcore();
1247 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1248 	if (port_fwd_end != NULL) {
1249 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1250 			pt_id = fwd_ports_ids[i];
1251 			(*port_fwd_end)(pt_id);
1252 		}
1253 	}
1254 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1255 	fwd_cycles = 0;
1256 #endif
1257 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1258 		if (cur_fwd_config.nb_fwd_streams >
1259 		    cur_fwd_config.nb_fwd_ports) {
1260 			fwd_stream_stats_display(sm_id);
1261 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1262 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1263 		} else {
1264 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1265 				fwd_streams[sm_id];
1266 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1267 				fwd_streams[sm_id];
1268 		}
1269 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1270 		tx_dropped = (uint64_t) (tx_dropped +
1271 					 fwd_streams[sm_id]->fwd_dropped);
1272 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1273 
1274 		rx_bad_ip_csum =
1275 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1276 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1277 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1278 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1279 							rx_bad_ip_csum;
1280 
1281 		rx_bad_l4_csum =
1282 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1283 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1284 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1285 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1286 							rx_bad_l4_csum;
1287 
1288 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1289 		fwd_cycles = (uint64_t) (fwd_cycles +
1290 					 fwd_streams[sm_id]->core_cycles);
1291 #endif
1292 	}
1293 	total_recv = 0;
1294 	total_xmit = 0;
1295 	total_rx_dropped = 0;
1296 	total_tx_dropped = 0;
1297 	total_rx_nombuf  = 0;
1298 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1299 		pt_id = fwd_ports_ids[i];
1300 
1301 		port = &ports[pt_id];
1302 		rte_eth_stats_get(pt_id, &stats);
1303 		stats.ipackets -= port->stats.ipackets;
1304 		port->stats.ipackets = 0;
1305 		stats.opackets -= port->stats.opackets;
1306 		port->stats.opackets = 0;
1307 		stats.ibytes   -= port->stats.ibytes;
1308 		port->stats.ibytes = 0;
1309 		stats.obytes   -= port->stats.obytes;
1310 		port->stats.obytes = 0;
1311 		stats.imissed  -= port->stats.imissed;
1312 		port->stats.imissed = 0;
1313 		stats.oerrors  -= port->stats.oerrors;
1314 		port->stats.oerrors = 0;
1315 		stats.rx_nombuf -= port->stats.rx_nombuf;
1316 		port->stats.rx_nombuf = 0;
1317 
1318 		total_recv += stats.ipackets;
1319 		total_xmit += stats.opackets;
1320 		total_rx_dropped += stats.imissed;
1321 		total_tx_dropped += port->tx_dropped;
1322 		total_rx_nombuf  += stats.rx_nombuf;
1323 
1324 		fwd_port_stats_display(pt_id, &stats);
1325 	}
1326 
1327 	printf("\n  %s Accumulated forward statistics for all ports"
1328 	       "%s\n",
1329 	       acc_stats_border, acc_stats_border);
1330 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1331 	       "%-"PRIu64"\n"
1332 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1333 	       "%-"PRIu64"\n",
1334 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1335 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1336 	if (total_rx_nombuf > 0)
1337 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1338 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1339 	       "%s\n",
1340 	       acc_stats_border, acc_stats_border);
1341 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1342 	if (total_recv > 0)
1343 		printf("\n  CPU cycles/packet=%u (total cycles="
1344 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1345 		       (unsigned int)(fwd_cycles / total_recv),
1346 		       fwd_cycles, total_recv);
1347 #endif
1348 	printf("\nDone.\n");
1349 	test_done = 1;
1350 }
1351 
1352 void
1353 dev_set_link_up(portid_t pid)
1354 {
1355 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1356 		printf("\nSet link up fail.\n");
1357 }
1358 
1359 void
1360 dev_set_link_down(portid_t pid)
1361 {
1362 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1363 		printf("\nSet link down fail.\n");
1364 }
1365 
1366 static int
1367 all_ports_started(void)
1368 {
1369 	portid_t pi;
1370 	struct rte_port *port;
1371 
1372 	RTE_ETH_FOREACH_DEV(pi) {
1373 		port = &ports[pi];
1374 		/* Check if there is a port which is not started */
1375 		if ((port->port_status != RTE_PORT_STARTED) &&
1376 			(port->slave_flag == 0))
1377 			return 0;
1378 	}
1379 
1380 	/* No port is not started */
1381 	return 1;
1382 }
1383 
1384 int
1385 all_ports_stopped(void)
1386 {
1387 	portid_t pi;
1388 	struct rte_port *port;
1389 
1390 	RTE_ETH_FOREACH_DEV(pi) {
1391 		port = &ports[pi];
1392 		if ((port->port_status != RTE_PORT_STOPPED) &&
1393 			(port->slave_flag == 0))
1394 			return 0;
1395 	}
1396 
1397 	return 1;
1398 }
1399 
1400 int
1401 port_is_started(portid_t port_id)
1402 {
1403 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1404 		return 0;
1405 
1406 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1407 		return 0;
1408 
1409 	return 1;
1410 }
1411 
1412 static int
1413 port_is_closed(portid_t port_id)
1414 {
1415 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1416 		return 0;
1417 
1418 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1419 		return 0;
1420 
1421 	return 1;
1422 }
1423 
1424 int
1425 start_port(portid_t pid)
1426 {
1427 	int diag, need_check_link_status = -1;
1428 	portid_t pi;
1429 	queueid_t qi;
1430 	struct rte_port *port;
1431 	struct ether_addr mac_addr;
1432 	enum rte_eth_event_type event_type;
1433 
1434 	if (port_id_is_invalid(pid, ENABLED_WARN))
1435 		return 0;
1436 
1437 	if(dcb_config)
1438 		dcb_test = 1;
1439 	RTE_ETH_FOREACH_DEV(pi) {
1440 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1441 			continue;
1442 
1443 		need_check_link_status = 0;
1444 		port = &ports[pi];
1445 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1446 						 RTE_PORT_HANDLING) == 0) {
1447 			printf("Port %d is now not stopped\n", pi);
1448 			continue;
1449 		}
1450 
1451 		if (port->need_reconfig > 0) {
1452 			port->need_reconfig = 0;
1453 
1454 			if (flow_isolate_all) {
1455 				int ret = port_flow_isolate(pi, 1);
1456 				if (ret) {
1457 					printf("Failed to apply isolated"
1458 					       " mode on port %d\n", pi);
1459 					return -1;
1460 				}
1461 			}
1462 
1463 			printf("Configuring Port %d (socket %u)\n", pi,
1464 					port->socket_id);
1465 			/* configure port */
1466 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1467 						&(port->dev_conf));
1468 			if (diag != 0) {
1469 				if (rte_atomic16_cmpset(&(port->port_status),
1470 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1471 					printf("Port %d can not be set back "
1472 							"to stopped\n", pi);
1473 				printf("Fail to configure port %d\n", pi);
1474 				/* try to reconfigure port next time */
1475 				port->need_reconfig = 1;
1476 				return -1;
1477 			}
1478 		}
1479 		if (port->need_reconfig_queues > 0) {
1480 			port->need_reconfig_queues = 0;
1481 			/* setup tx queues */
1482 			for (qi = 0; qi < nb_txq; qi++) {
1483 				if ((numa_support) &&
1484 					(txring_numa[pi] != NUMA_NO_CONFIG))
1485 					diag = rte_eth_tx_queue_setup(pi, qi,
1486 						nb_txd,txring_numa[pi],
1487 						&(port->tx_conf));
1488 				else
1489 					diag = rte_eth_tx_queue_setup(pi, qi,
1490 						nb_txd,port->socket_id,
1491 						&(port->tx_conf));
1492 
1493 				if (diag == 0)
1494 					continue;
1495 
1496 				/* Fail to setup tx queue, return */
1497 				if (rte_atomic16_cmpset(&(port->port_status),
1498 							RTE_PORT_HANDLING,
1499 							RTE_PORT_STOPPED) == 0)
1500 					printf("Port %d can not be set back "
1501 							"to stopped\n", pi);
1502 				printf("Fail to configure port %d tx queues\n", pi);
1503 				/* try to reconfigure queues next time */
1504 				port->need_reconfig_queues = 1;
1505 				return -1;
1506 			}
1507 			/* setup rx queues */
1508 			for (qi = 0; qi < nb_rxq; qi++) {
1509 				if ((numa_support) &&
1510 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1511 					struct rte_mempool * mp =
1512 						mbuf_pool_find(rxring_numa[pi]);
1513 					if (mp == NULL) {
1514 						printf("Failed to setup RX queue:"
1515 							"No mempool allocation"
1516 							" on the socket %d\n",
1517 							rxring_numa[pi]);
1518 						return -1;
1519 					}
1520 
1521 					diag = rte_eth_rx_queue_setup(pi, qi,
1522 					     nb_rxd,rxring_numa[pi],
1523 					     &(port->rx_conf),mp);
1524 				} else {
1525 					struct rte_mempool *mp =
1526 						mbuf_pool_find(port->socket_id);
1527 					if (mp == NULL) {
1528 						printf("Failed to setup RX queue:"
1529 							"No mempool allocation"
1530 							" on the socket %d\n",
1531 							port->socket_id);
1532 						return -1;
1533 					}
1534 					diag = rte_eth_rx_queue_setup(pi, qi,
1535 					     nb_rxd,port->socket_id,
1536 					     &(port->rx_conf), mp);
1537 				}
1538 				if (diag == 0)
1539 					continue;
1540 
1541 				/* Fail to setup rx queue, return */
1542 				if (rte_atomic16_cmpset(&(port->port_status),
1543 							RTE_PORT_HANDLING,
1544 							RTE_PORT_STOPPED) == 0)
1545 					printf("Port %d can not be set back "
1546 							"to stopped\n", pi);
1547 				printf("Fail to configure port %d rx queues\n", pi);
1548 				/* try to reconfigure queues next time */
1549 				port->need_reconfig_queues = 1;
1550 				return -1;
1551 			}
1552 		}
1553 
1554 		for (event_type = RTE_ETH_EVENT_UNKNOWN;
1555 		     event_type < RTE_ETH_EVENT_MAX;
1556 		     event_type++) {
1557 			diag = rte_eth_dev_callback_register(pi,
1558 							event_type,
1559 							eth_event_callback,
1560 							NULL);
1561 			if (diag) {
1562 				printf("Failed to setup even callback for event %d\n",
1563 					event_type);
1564 				return -1;
1565 			}
1566 		}
1567 
1568 		/* start port */
1569 		if (rte_eth_dev_start(pi) < 0) {
1570 			printf("Fail to start port %d\n", pi);
1571 
1572 			/* Fail to setup rx queue, return */
1573 			if (rte_atomic16_cmpset(&(port->port_status),
1574 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1575 				printf("Port %d can not be set back to "
1576 							"stopped\n", pi);
1577 			continue;
1578 		}
1579 
1580 		if (rte_atomic16_cmpset(&(port->port_status),
1581 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1582 			printf("Port %d can not be set into started\n", pi);
1583 
1584 		rte_eth_macaddr_get(pi, &mac_addr);
1585 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1586 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1587 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1588 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1589 
1590 		/* at least one port started, need checking link status */
1591 		need_check_link_status = 1;
1592 	}
1593 
1594 	if (need_check_link_status == 1 && !no_link_check)
1595 		check_all_ports_link_status(RTE_PORT_ALL);
1596 	else if (need_check_link_status == 0)
1597 		printf("Please stop the ports first\n");
1598 
1599 	printf("Done\n");
1600 	return 0;
1601 }
1602 
1603 void
1604 stop_port(portid_t pid)
1605 {
1606 	portid_t pi;
1607 	struct rte_port *port;
1608 	int need_check_link_status = 0;
1609 
1610 	if (dcb_test) {
1611 		dcb_test = 0;
1612 		dcb_config = 0;
1613 	}
1614 
1615 	if (port_id_is_invalid(pid, ENABLED_WARN))
1616 		return;
1617 
1618 	printf("Stopping ports...\n");
1619 
1620 	RTE_ETH_FOREACH_DEV(pi) {
1621 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1622 			continue;
1623 
1624 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1625 			printf("Please remove port %d from forwarding configuration.\n", pi);
1626 			continue;
1627 		}
1628 
1629 		if (port_is_bonding_slave(pi)) {
1630 			printf("Please remove port %d from bonded device.\n", pi);
1631 			continue;
1632 		}
1633 
1634 		port = &ports[pi];
1635 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1636 						RTE_PORT_HANDLING) == 0)
1637 			continue;
1638 
1639 		rte_eth_dev_stop(pi);
1640 
1641 		if (rte_atomic16_cmpset(&(port->port_status),
1642 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1643 			printf("Port %d can not be set into stopped\n", pi);
1644 		need_check_link_status = 1;
1645 	}
1646 	if (need_check_link_status && !no_link_check)
1647 		check_all_ports_link_status(RTE_PORT_ALL);
1648 
1649 	printf("Done\n");
1650 }
1651 
1652 void
1653 close_port(portid_t pid)
1654 {
1655 	portid_t pi;
1656 	struct rte_port *port;
1657 
1658 	if (port_id_is_invalid(pid, ENABLED_WARN))
1659 		return;
1660 
1661 	printf("Closing ports...\n");
1662 
1663 	RTE_ETH_FOREACH_DEV(pi) {
1664 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1665 			continue;
1666 
1667 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1668 			printf("Please remove port %d from forwarding configuration.\n", pi);
1669 			continue;
1670 		}
1671 
1672 		if (port_is_bonding_slave(pi)) {
1673 			printf("Please remove port %d from bonded device.\n", pi);
1674 			continue;
1675 		}
1676 
1677 		port = &ports[pi];
1678 		if (rte_atomic16_cmpset(&(port->port_status),
1679 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1680 			printf("Port %d is already closed\n", pi);
1681 			continue;
1682 		}
1683 
1684 		if (rte_atomic16_cmpset(&(port->port_status),
1685 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1686 			printf("Port %d is now not stopped\n", pi);
1687 			continue;
1688 		}
1689 
1690 		if (port->flow_list)
1691 			port_flow_flush(pi);
1692 		rte_eth_dev_close(pi);
1693 
1694 		if (rte_atomic16_cmpset(&(port->port_status),
1695 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1696 			printf("Port %d cannot be set to closed\n", pi);
1697 	}
1698 
1699 	printf("Done\n");
1700 }
1701 
1702 void
1703 reset_port(portid_t pid)
1704 {
1705 	int diag;
1706 	portid_t pi;
1707 	struct rte_port *port;
1708 
1709 	if (port_id_is_invalid(pid, ENABLED_WARN))
1710 		return;
1711 
1712 	printf("Resetting ports...\n");
1713 
1714 	RTE_ETH_FOREACH_DEV(pi) {
1715 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1716 			continue;
1717 
1718 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1719 			printf("Please remove port %d from forwarding "
1720 			       "configuration.\n", pi);
1721 			continue;
1722 		}
1723 
1724 		if (port_is_bonding_slave(pi)) {
1725 			printf("Please remove port %d from bonded device.\n",
1726 			       pi);
1727 			continue;
1728 		}
1729 
1730 		diag = rte_eth_dev_reset(pi);
1731 		if (diag == 0) {
1732 			port = &ports[pi];
1733 			port->need_reconfig = 1;
1734 			port->need_reconfig_queues = 1;
1735 		} else {
1736 			printf("Failed to reset port %d. diag=%d\n", pi, diag);
1737 		}
1738 	}
1739 
1740 	printf("Done\n");
1741 }
1742 
1743 void
1744 attach_port(char *identifier)
1745 {
1746 	portid_t pi = 0;
1747 	unsigned int socket_id;
1748 
1749 	printf("Attaching a new port...\n");
1750 
1751 	if (identifier == NULL) {
1752 		printf("Invalid parameters are specified\n");
1753 		return;
1754 	}
1755 
1756 	if (rte_eth_dev_attach(identifier, &pi))
1757 		return;
1758 
1759 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1760 	/* if socket_id is invalid, set to 0 */
1761 	if (check_socket_id(socket_id) < 0)
1762 		socket_id = 0;
1763 	reconfig(pi, socket_id);
1764 	rte_eth_promiscuous_enable(pi);
1765 
1766 	nb_ports = rte_eth_dev_count();
1767 
1768 	ports[pi].port_status = RTE_PORT_STOPPED;
1769 
1770 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1771 	printf("Done\n");
1772 }
1773 
1774 void
1775 detach_port(uint8_t port_id)
1776 {
1777 	char name[RTE_ETH_NAME_MAX_LEN];
1778 
1779 	printf("Detaching a port...\n");
1780 
1781 	if (!port_is_closed(port_id)) {
1782 		printf("Please close port first\n");
1783 		return;
1784 	}
1785 
1786 	if (ports[port_id].flow_list)
1787 		port_flow_flush(port_id);
1788 
1789 	if (rte_eth_dev_detach(port_id, name)) {
1790 		RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
1791 		return;
1792 	}
1793 
1794 	nb_ports = rte_eth_dev_count();
1795 
1796 	printf("Port '%s' is detached. Now total ports is %d\n",
1797 			name, nb_ports);
1798 	printf("Done\n");
1799 	return;
1800 }
1801 
1802 void
1803 pmd_test_exit(void)
1804 {
1805 	portid_t pt_id;
1806 
1807 	if (test_done == 0)
1808 		stop_packet_forwarding();
1809 
1810 	if (ports != NULL) {
1811 		no_link_check = 1;
1812 		RTE_ETH_FOREACH_DEV(pt_id) {
1813 			printf("\nShutting down port %d...\n", pt_id);
1814 			fflush(stdout);
1815 			stop_port(pt_id);
1816 			close_port(pt_id);
1817 		}
1818 	}
1819 	printf("\nBye...\n");
1820 }
1821 
1822 typedef void (*cmd_func_t)(void);
1823 struct pmd_test_command {
1824 	const char *cmd_name;
1825 	cmd_func_t cmd_func;
1826 };
1827 
1828 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1829 
1830 /* Check the link status of all ports in up to 9s, and print them finally */
1831 static void
1832 check_all_ports_link_status(uint32_t port_mask)
1833 {
1834 #define CHECK_INTERVAL 100 /* 100ms */
1835 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1836 	portid_t portid;
1837 	uint8_t count, all_ports_up, print_flag = 0;
1838 	struct rte_eth_link link;
1839 
1840 	printf("Checking link statuses...\n");
1841 	fflush(stdout);
1842 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1843 		all_ports_up = 1;
1844 		RTE_ETH_FOREACH_DEV(portid) {
1845 			if ((port_mask & (1 << portid)) == 0)
1846 				continue;
1847 			memset(&link, 0, sizeof(link));
1848 			rte_eth_link_get_nowait(portid, &link);
1849 			/* print link status if flag set */
1850 			if (print_flag == 1) {
1851 				if (link.link_status)
1852 					printf(
1853 					"Port%d Link Up. speed %u Mbps- %s\n",
1854 					portid, link.link_speed,
1855 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1856 					("full-duplex") : ("half-duplex\n"));
1857 				else
1858 					printf("Port %d Link Down\n", portid);
1859 				continue;
1860 			}
1861 			/* clear all_ports_up flag if any link down */
1862 			if (link.link_status == ETH_LINK_DOWN) {
1863 				all_ports_up = 0;
1864 				break;
1865 			}
1866 		}
1867 		/* after finally printing all link status, get out */
1868 		if (print_flag == 1)
1869 			break;
1870 
1871 		if (all_ports_up == 0) {
1872 			fflush(stdout);
1873 			rte_delay_ms(CHECK_INTERVAL);
1874 		}
1875 
1876 		/* set the print_flag if all ports up or timeout */
1877 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1878 			print_flag = 1;
1879 		}
1880 
1881 		if (lsc_interrupt)
1882 			break;
1883 	}
1884 }
1885 
1886 static void
1887 rmv_event_callback(void *arg)
1888 {
1889 	struct rte_eth_dev *dev;
1890 	uint8_t port_id = (intptr_t)arg;
1891 
1892 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1893 	dev = &rte_eth_devices[port_id];
1894 
1895 	stop_port(port_id);
1896 	close_port(port_id);
1897 	printf("removing device %s\n", dev->device->name);
1898 	if (rte_eal_dev_detach(dev->device))
1899 		RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
1900 			dev->device->name);
1901 }
1902 
1903 /* This function is used by the interrupt thread */
1904 static int
1905 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
1906 		  void *ret_param)
1907 {
1908 	static const char * const event_desc[] = {
1909 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1910 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
1911 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1912 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1913 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1914 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
1915 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
1916 		[RTE_ETH_EVENT_MAX] = NULL,
1917 	};
1918 
1919 	RTE_SET_USED(param);
1920 	RTE_SET_USED(ret_param);
1921 
1922 	if (type >= RTE_ETH_EVENT_MAX) {
1923 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1924 			port_id, __func__, type);
1925 		fflush(stderr);
1926 	} else if (event_print_mask & (UINT32_C(1) << type)) {
1927 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
1928 			event_desc[type]);
1929 		fflush(stdout);
1930 	}
1931 
1932 	switch (type) {
1933 	case RTE_ETH_EVENT_INTR_RMV:
1934 		if (rte_eal_alarm_set(100000,
1935 				rmv_event_callback, (void *)(intptr_t)port_id))
1936 			fprintf(stderr, "Could not set up deferred device removal\n");
1937 		break;
1938 	default:
1939 		break;
1940 	}
1941 	return 0;
1942 }
1943 
1944 static int
1945 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1946 {
1947 	uint16_t i;
1948 	int diag;
1949 	uint8_t mapping_found = 0;
1950 
1951 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1952 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1953 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1954 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1955 					tx_queue_stats_mappings[i].queue_id,
1956 					tx_queue_stats_mappings[i].stats_counter_id);
1957 			if (diag != 0)
1958 				return diag;
1959 			mapping_found = 1;
1960 		}
1961 	}
1962 	if (mapping_found)
1963 		port->tx_queue_stats_mapping_enabled = 1;
1964 	return 0;
1965 }
1966 
1967 static int
1968 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1969 {
1970 	uint16_t i;
1971 	int diag;
1972 	uint8_t mapping_found = 0;
1973 
1974 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1975 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1976 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1977 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1978 					rx_queue_stats_mappings[i].queue_id,
1979 					rx_queue_stats_mappings[i].stats_counter_id);
1980 			if (diag != 0)
1981 				return diag;
1982 			mapping_found = 1;
1983 		}
1984 	}
1985 	if (mapping_found)
1986 		port->rx_queue_stats_mapping_enabled = 1;
1987 	return 0;
1988 }
1989 
1990 static void
1991 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1992 {
1993 	int diag = 0;
1994 
1995 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1996 	if (diag != 0) {
1997 		if (diag == -ENOTSUP) {
1998 			port->tx_queue_stats_mapping_enabled = 0;
1999 			printf("TX queue stats mapping not supported port id=%d\n", pi);
2000 		}
2001 		else
2002 			rte_exit(EXIT_FAILURE,
2003 					"set_tx_queue_stats_mapping_registers "
2004 					"failed for port id=%d diag=%d\n",
2005 					pi, diag);
2006 	}
2007 
2008 	diag = set_rx_queue_stats_mapping_registers(pi, port);
2009 	if (diag != 0) {
2010 		if (diag == -ENOTSUP) {
2011 			port->rx_queue_stats_mapping_enabled = 0;
2012 			printf("RX queue stats mapping not supported port id=%d\n", pi);
2013 		}
2014 		else
2015 			rte_exit(EXIT_FAILURE,
2016 					"set_rx_queue_stats_mapping_registers "
2017 					"failed for port id=%d diag=%d\n",
2018 					pi, diag);
2019 	}
2020 }
2021 
2022 static void
2023 rxtx_port_config(struct rte_port *port)
2024 {
2025 	port->rx_conf = port->dev_info.default_rxconf;
2026 	port->tx_conf = port->dev_info.default_txconf;
2027 
2028 	/* Check if any RX/TX parameters have been passed */
2029 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2030 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2031 
2032 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2033 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2034 
2035 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2036 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2037 
2038 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2039 		port->rx_conf.rx_free_thresh = rx_free_thresh;
2040 
2041 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2042 		port->rx_conf.rx_drop_en = rx_drop_en;
2043 
2044 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2045 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2046 
2047 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2048 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2049 
2050 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2051 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2052 
2053 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2054 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2055 
2056 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2057 		port->tx_conf.tx_free_thresh = tx_free_thresh;
2058 
2059 	if (txq_flags != RTE_PMD_PARAM_UNSET)
2060 		port->tx_conf.txq_flags = txq_flags;
2061 }
2062 
2063 void
2064 init_port_config(void)
2065 {
2066 	portid_t pid;
2067 	struct rte_port *port;
2068 
2069 	RTE_ETH_FOREACH_DEV(pid) {
2070 		port = &ports[pid];
2071 		port->dev_conf.rxmode = rx_mode;
2072 		port->dev_conf.fdir_conf = fdir_conf;
2073 		if (nb_rxq > 1) {
2074 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2075 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2076 		} else {
2077 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2078 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2079 		}
2080 
2081 		if (port->dcb_flag == 0) {
2082 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2083 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2084 			else
2085 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2086 		}
2087 
2088 		rxtx_port_config(port);
2089 
2090 		rte_eth_macaddr_get(pid, &port->eth_addr);
2091 
2092 		map_port_queue_stats_mapping_registers(pid, port);
2093 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2094 		rte_pmd_ixgbe_bypass_init(pid);
2095 #endif
2096 
2097 		if (lsc_interrupt &&
2098 		    (rte_eth_devices[pid].data->dev_flags &
2099 		     RTE_ETH_DEV_INTR_LSC))
2100 			port->dev_conf.intr_conf.lsc = 1;
2101 		if (rmv_interrupt &&
2102 		    (rte_eth_devices[pid].data->dev_flags &
2103 		     RTE_ETH_DEV_INTR_RMV))
2104 			port->dev_conf.intr_conf.rmv = 1;
2105 	}
2106 }
2107 
2108 void set_port_slave_flag(portid_t slave_pid)
2109 {
2110 	struct rte_port *port;
2111 
2112 	port = &ports[slave_pid];
2113 	port->slave_flag = 1;
2114 }
2115 
2116 void clear_port_slave_flag(portid_t slave_pid)
2117 {
2118 	struct rte_port *port;
2119 
2120 	port = &ports[slave_pid];
2121 	port->slave_flag = 0;
2122 }
2123 
2124 uint8_t port_is_bonding_slave(portid_t slave_pid)
2125 {
2126 	struct rte_port *port;
2127 
2128 	port = &ports[slave_pid];
2129 	return port->slave_flag;
2130 }
2131 
2132 const uint16_t vlan_tags[] = {
2133 		0,  1,  2,  3,  4,  5,  6,  7,
2134 		8,  9, 10, 11,  12, 13, 14, 15,
2135 		16, 17, 18, 19, 20, 21, 22, 23,
2136 		24, 25, 26, 27, 28, 29, 30, 31
2137 };
2138 
2139 static  int
2140 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2141 		 enum dcb_mode_enable dcb_mode,
2142 		 enum rte_eth_nb_tcs num_tcs,
2143 		 uint8_t pfc_en)
2144 {
2145 	uint8_t i;
2146 
2147 	/*
2148 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2149 	 * given above, and the number of traffic classes available for use.
2150 	 */
2151 	if (dcb_mode == DCB_VT_ENABLED) {
2152 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2153 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2154 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2155 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2156 
2157 		/* VMDQ+DCB RX and TX configurations */
2158 		vmdq_rx_conf->enable_default_pool = 0;
2159 		vmdq_rx_conf->default_pool = 0;
2160 		vmdq_rx_conf->nb_queue_pools =
2161 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2162 		vmdq_tx_conf->nb_queue_pools =
2163 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2164 
2165 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2166 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2167 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2168 			vmdq_rx_conf->pool_map[i].pools =
2169 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2170 		}
2171 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2172 			vmdq_rx_conf->dcb_tc[i] = i;
2173 			vmdq_tx_conf->dcb_tc[i] = i;
2174 		}
2175 
2176 		/* set DCB mode of RX and TX of multiple queues */
2177 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2178 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2179 	} else {
2180 		struct rte_eth_dcb_rx_conf *rx_conf =
2181 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2182 		struct rte_eth_dcb_tx_conf *tx_conf =
2183 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2184 
2185 		rx_conf->nb_tcs = num_tcs;
2186 		tx_conf->nb_tcs = num_tcs;
2187 
2188 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2189 			rx_conf->dcb_tc[i] = i % num_tcs;
2190 			tx_conf->dcb_tc[i] = i % num_tcs;
2191 		}
2192 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2193 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2194 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2195 	}
2196 
2197 	if (pfc_en)
2198 		eth_conf->dcb_capability_en =
2199 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2200 	else
2201 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2202 
2203 	return 0;
2204 }
2205 
2206 int
2207 init_port_dcb_config(portid_t pid,
2208 		     enum dcb_mode_enable dcb_mode,
2209 		     enum rte_eth_nb_tcs num_tcs,
2210 		     uint8_t pfc_en)
2211 {
2212 	struct rte_eth_conf port_conf;
2213 	struct rte_port *rte_port;
2214 	int retval;
2215 	uint16_t i;
2216 
2217 	rte_port = &ports[pid];
2218 
2219 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2220 	/* Enter DCB configuration status */
2221 	dcb_config = 1;
2222 
2223 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2224 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2225 	if (retval < 0)
2226 		return retval;
2227 	port_conf.rxmode.hw_vlan_filter = 1;
2228 
2229 	/**
2230 	 * Write the configuration into the device.
2231 	 * Set the numbers of RX & TX queues to 0, so
2232 	 * the RX & TX queues will not be setup.
2233 	 */
2234 	rte_eth_dev_configure(pid, 0, 0, &port_conf);
2235 
2236 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2237 
2238 	/* If dev_info.vmdq_pool_base is greater than 0,
2239 	 * the queue id of vmdq pools is started after pf queues.
2240 	 */
2241 	if (dcb_mode == DCB_VT_ENABLED &&
2242 	    rte_port->dev_info.vmdq_pool_base > 0) {
2243 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2244 			" for port %d.", pid);
2245 		return -1;
2246 	}
2247 
2248 	/* Assume the ports in testpmd have the same dcb capability
2249 	 * and has the same number of rxq and txq in dcb mode
2250 	 */
2251 	if (dcb_mode == DCB_VT_ENABLED) {
2252 		if (rte_port->dev_info.max_vfs > 0) {
2253 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2254 			nb_txq = rte_port->dev_info.nb_tx_queues;
2255 		} else {
2256 			nb_rxq = rte_port->dev_info.max_rx_queues;
2257 			nb_txq = rte_port->dev_info.max_tx_queues;
2258 		}
2259 	} else {
2260 		/*if vt is disabled, use all pf queues */
2261 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2262 			nb_rxq = rte_port->dev_info.max_rx_queues;
2263 			nb_txq = rte_port->dev_info.max_tx_queues;
2264 		} else {
2265 			nb_rxq = (queueid_t)num_tcs;
2266 			nb_txq = (queueid_t)num_tcs;
2267 
2268 		}
2269 	}
2270 	rx_free_thresh = 64;
2271 
2272 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2273 
2274 	rxtx_port_config(rte_port);
2275 	/* VLAN filter */
2276 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2277 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2278 		rx_vft_set(pid, vlan_tags[i], 1);
2279 
2280 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2281 	map_port_queue_stats_mapping_registers(pid, rte_port);
2282 
2283 	rte_port->dcb_flag = 1;
2284 
2285 	return 0;
2286 }
2287 
2288 static void
2289 init_port(void)
2290 {
2291 	/* Configuration of Ethernet ports. */
2292 	ports = rte_zmalloc("testpmd: ports",
2293 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2294 			    RTE_CACHE_LINE_SIZE);
2295 	if (ports == NULL) {
2296 		rte_exit(EXIT_FAILURE,
2297 				"rte_zmalloc(%d struct rte_port) failed\n",
2298 				RTE_MAX_ETHPORTS);
2299 	}
2300 }
2301 
2302 static void
2303 force_quit(void)
2304 {
2305 	pmd_test_exit();
2306 	prompt_exit();
2307 }
2308 
2309 static void
2310 print_stats(void)
2311 {
2312 	uint8_t i;
2313 	const char clr[] = { 27, '[', '2', 'J', '\0' };
2314 	const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2315 
2316 	/* Clear screen and move to top left */
2317 	printf("%s%s", clr, top_left);
2318 
2319 	printf("\nPort statistics ====================================");
2320 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2321 		nic_stats_display(fwd_ports_ids[i]);
2322 }
2323 
2324 static void
2325 signal_handler(int signum)
2326 {
2327 	if (signum == SIGINT || signum == SIGTERM) {
2328 		printf("\nSignal %d received, preparing to exit...\n",
2329 				signum);
2330 #ifdef RTE_LIBRTE_PDUMP
2331 		/* uninitialize packet capture framework */
2332 		rte_pdump_uninit();
2333 #endif
2334 #ifdef RTE_LIBRTE_LATENCY_STATS
2335 		rte_latencystats_uninit();
2336 #endif
2337 		force_quit();
2338 		/* exit with the expected status */
2339 		signal(signum, SIG_DFL);
2340 		kill(getpid(), signum);
2341 	}
2342 }
2343 
2344 int
2345 main(int argc, char** argv)
2346 {
2347 	int  diag;
2348 	portid_t port_id;
2349 
2350 	signal(SIGINT, signal_handler);
2351 	signal(SIGTERM, signal_handler);
2352 
2353 	diag = rte_eal_init(argc, argv);
2354 	if (diag < 0)
2355 		rte_panic("Cannot init EAL\n");
2356 
2357 	if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2358 		RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n",
2359 			strerror(errno));
2360 	}
2361 
2362 #ifdef RTE_LIBRTE_PDUMP
2363 	/* initialize packet capture framework */
2364 	rte_pdump_init(NULL);
2365 #endif
2366 
2367 	nb_ports = (portid_t) rte_eth_dev_count();
2368 	if (nb_ports == 0)
2369 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2370 
2371 	/* allocate port structures, and init them */
2372 	init_port();
2373 
2374 	set_def_fwd_config();
2375 	if (nb_lcores == 0)
2376 		rte_panic("Empty set of forwarding logical cores - check the "
2377 			  "core mask supplied in the command parameters\n");
2378 
2379 	/* Bitrate/latency stats disabled by default */
2380 #ifdef RTE_LIBRTE_BITRATE
2381 	bitrate_enabled = 0;
2382 #endif
2383 #ifdef RTE_LIBRTE_LATENCY_STATS
2384 	latencystats_enabled = 0;
2385 #endif
2386 
2387 	argc -= diag;
2388 	argv += diag;
2389 	if (argc > 1)
2390 		launch_args_parse(argc, argv);
2391 
2392 	if (tx_first && interactive)
2393 		rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2394 				"interactive mode.\n");
2395 
2396 	if (tx_first && lsc_interrupt) {
2397 		printf("Warning: lsc_interrupt needs to be off when "
2398 				" using tx_first. Disabling.\n");
2399 		lsc_interrupt = 0;
2400 	}
2401 
2402 	if (!nb_rxq && !nb_txq)
2403 		printf("Warning: Either rx or tx queues should be non-zero\n");
2404 
2405 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2406 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2407 		       "but nb_txq=%d will prevent to fully test it.\n",
2408 		       nb_rxq, nb_txq);
2409 
2410 	init_config();
2411 	if (start_port(RTE_PORT_ALL) != 0)
2412 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2413 
2414 	/* set all ports to promiscuous mode by default */
2415 	RTE_ETH_FOREACH_DEV(port_id)
2416 		rte_eth_promiscuous_enable(port_id);
2417 
2418 	/* Init metrics library */
2419 	rte_metrics_init(rte_socket_id());
2420 
2421 #ifdef RTE_LIBRTE_LATENCY_STATS
2422 	if (latencystats_enabled != 0) {
2423 		int ret = rte_latencystats_init(1, NULL);
2424 		if (ret)
2425 			printf("Warning: latencystats init()"
2426 				" returned error %d\n",	ret);
2427 		printf("Latencystats running on lcore %d\n",
2428 			latencystats_lcore_id);
2429 	}
2430 #endif
2431 
2432 	/* Setup bitrate stats */
2433 #ifdef RTE_LIBRTE_BITRATE
2434 	if (bitrate_enabled != 0) {
2435 		bitrate_data = rte_stats_bitrate_create();
2436 		if (bitrate_data == NULL)
2437 			rte_exit(EXIT_FAILURE,
2438 				"Could not allocate bitrate data.\n");
2439 		rte_stats_bitrate_reg(bitrate_data);
2440 	}
2441 #endif
2442 
2443 #ifdef RTE_LIBRTE_CMDLINE
2444 	if (strlen(cmdline_filename) != 0)
2445 		cmdline_read_from_file(cmdline_filename);
2446 
2447 	if (interactive == 1) {
2448 		if (auto_start) {
2449 			printf("Start automatic packet forwarding\n");
2450 			start_packet_forwarding(0);
2451 		}
2452 		prompt();
2453 		pmd_test_exit();
2454 	} else
2455 #endif
2456 	{
2457 		char c;
2458 		int rc;
2459 
2460 		printf("No commandline core given, start packet forwarding\n");
2461 		start_packet_forwarding(tx_first);
2462 		if (stats_period != 0) {
2463 			uint64_t prev_time = 0, cur_time, diff_time = 0;
2464 			uint64_t timer_period;
2465 
2466 			/* Convert to number of cycles */
2467 			timer_period = stats_period * rte_get_timer_hz();
2468 
2469 			while (1) {
2470 				cur_time = rte_get_timer_cycles();
2471 				diff_time += cur_time - prev_time;
2472 
2473 				if (diff_time >= timer_period) {
2474 					print_stats();
2475 					/* Reset the timer */
2476 					diff_time = 0;
2477 				}
2478 				/* Sleep to avoid unnecessary checks */
2479 				prev_time = cur_time;
2480 				sleep(1);
2481 			}
2482 		}
2483 
2484 		printf("Press enter to exit\n");
2485 		rc = read(0, &c, 1);
2486 		pmd_test_exit();
2487 		if (rc < 0)
2488 			return 1;
2489 	}
2490 
2491 	return 0;
2492 }
2493