xref: /dpdk/app/test-pmd/testpmd.c (revision 81ef862b51dc416217ca4ac1d0bf894a0baec9cc)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
78 #endif
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
81 #endif
82 #include <rte_flow.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
86 #endif
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
89 #endif
90 
91 #include "testpmd.h"
92 
93 uint16_t verbose_level = 0; /**< Silent by default. */
94 
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
98 char cmdline_filename[PATH_MAX] = {0};
99 
100 /*
101  * NUMA support configuration.
102  * When set, the NUMA support attempts to dispatch the allocation of the
103  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
104  * probed ports among the CPU sockets 0 and 1.
105  * Otherwise, all memory is allocated from CPU socket 0.
106  */
107 uint8_t numa_support = 1; /**< numa enabled by default */
108 
109 /*
110  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
111  * not configured.
112  */
113 uint8_t socket_num = UMA_NO_CONFIG;
114 
115 /*
116  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
117  */
118 uint8_t mp_anon = 0;
119 
120 /*
121  * Record the Ethernet address of peer target ports to which packets are
122  * forwarded.
123  * Must be instantiated with the ethernet addresses of peer traffic generator
124  * ports.
125  */
126 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
127 portid_t nb_peer_eth_addrs = 0;
128 
129 /*
130  * Probed Target Environment.
131  */
132 struct rte_port *ports;	       /**< For all probed ethernet ports. */
133 portid_t nb_ports;             /**< Number of probed ethernet ports. */
134 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
135 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
136 
137 /*
138  * Test Forwarding Configuration.
139  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
140  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
141  */
142 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
143 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
144 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
145 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
146 
147 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
148 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
149 
150 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
151 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
152 
153 /*
154  * Forwarding engines.
155  */
156 struct fwd_engine * fwd_engines[] = {
157 	&io_fwd_engine,
158 	&mac_fwd_engine,
159 	&mac_swap_engine,
160 	&flow_gen_engine,
161 	&rx_only_engine,
162 	&tx_only_engine,
163 	&csum_fwd_engine,
164 	&icmp_echo_engine,
165 #ifdef RTE_LIBRTE_IEEE1588
166 	&ieee1588_fwd_engine,
167 #endif
168 	NULL,
169 };
170 
171 struct fwd_config cur_fwd_config;
172 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
173 uint32_t retry_enabled;
174 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
175 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
176 
177 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
178 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
179                                       * specified on command-line. */
180 
181 /*
182  * Configuration of packet segments used by the "txonly" processing engine.
183  */
184 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
185 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
186 	TXONLY_DEF_PACKET_LEN,
187 };
188 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
189 
190 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
191 /**< Split policy for packets to TX. */
192 
193 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
194 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
195 
196 /* current configuration is in DCB or not,0 means it is not in DCB mode */
197 uint8_t dcb_config = 0;
198 
199 /* Whether the dcb is in testing status */
200 uint8_t dcb_test = 0;
201 
202 /*
203  * Configurable number of RX/TX queues.
204  */
205 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
206 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
207 
208 /*
209  * Configurable number of RX/TX ring descriptors.
210  */
211 #define RTE_TEST_RX_DESC_DEFAULT 128
212 #define RTE_TEST_TX_DESC_DEFAULT 512
213 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
214 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
215 
216 #define RTE_PMD_PARAM_UNSET -1
217 /*
218  * Configurable values of RX and TX ring threshold registers.
219  */
220 
221 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
222 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
223 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
224 
225 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
226 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
227 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
228 
229 /*
230  * Configurable value of RX free threshold.
231  */
232 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
233 
234 /*
235  * Configurable value of RX drop enable.
236  */
237 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
238 
239 /*
240  * Configurable value of TX free threshold.
241  */
242 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
243 
244 /*
245  * Configurable value of TX RS bit threshold.
246  */
247 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
248 
249 /*
250  * Configurable value of TX queue flags.
251  */
252 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
253 
254 /*
255  * Receive Side Scaling (RSS) configuration.
256  */
257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
258 
259 /*
260  * Port topology configuration
261  */
262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
263 
264 /*
265  * Avoids to flush all the RX streams before starts forwarding.
266  */
267 uint8_t no_flush_rx = 0; /* flush by default */
268 
269 /*
270  * Avoids to check link status when starting/stopping a port.
271  */
272 uint8_t no_link_check = 0; /* check by default */
273 
274 /*
275  * Enable link status change notification
276  */
277 uint8_t lsc_interrupt = 1; /* enabled by default */
278 
279 /*
280  * Enable device removal notification.
281  */
282 uint8_t rmv_interrupt = 1; /* enabled by default */
283 
284 /*
285  * NIC bypass mode configuration options.
286  */
287 #ifdef RTE_NIC_BYPASS
288 
289 /* The NIC bypass watchdog timeout. */
290 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
291 
292 #endif
293 
294 #ifdef RTE_LIBRTE_LATENCY_STATS
295 
296 /*
297  * Set when latency stats is enabled in the commandline
298  */
299 uint8_t latencystats_enabled;
300 
301 /*
302  * Lcore ID to serive latency statistics.
303  */
304 lcoreid_t latencystats_lcore_id = -1;
305 
306 #endif
307 
308 /*
309  * Ethernet device configuration.
310  */
311 struct rte_eth_rxmode rx_mode = {
312 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
313 	.split_hdr_size = 0,
314 	.header_split   = 0, /**< Header Split disabled. */
315 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
316 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
317 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
318 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
319 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
320 	.hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
321 };
322 
323 struct rte_fdir_conf fdir_conf = {
324 	.mode = RTE_FDIR_MODE_NONE,
325 	.pballoc = RTE_FDIR_PBALLOC_64K,
326 	.status = RTE_FDIR_REPORT_STATUS,
327 	.mask = {
328 		.vlan_tci_mask = 0x0,
329 		.ipv4_mask     = {
330 			.src_ip = 0xFFFFFFFF,
331 			.dst_ip = 0xFFFFFFFF,
332 		},
333 		.ipv6_mask     = {
334 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
335 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
336 		},
337 		.src_port_mask = 0xFFFF,
338 		.dst_port_mask = 0xFFFF,
339 		.mac_addr_byte_mask = 0xFF,
340 		.tunnel_type_mask = 1,
341 		.tunnel_id_mask = 0xFFFFFFFF,
342 	},
343 	.drop_queue = 127,
344 };
345 
346 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
347 
348 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
349 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
350 
351 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
352 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
353 
354 uint16_t nb_tx_queue_stats_mappings = 0;
355 uint16_t nb_rx_queue_stats_mappings = 0;
356 
357 unsigned max_socket = 0;
358 
359 #ifdef RTE_LIBRTE_BITRATE
360 /* Bitrate statistics */
361 struct rte_stats_bitrates *bitrate_data;
362 lcoreid_t bitrate_lcore_id;
363 uint8_t bitrate_enabled;
364 #endif
365 
366 /* Forward function declarations */
367 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
368 static void check_all_ports_link_status(uint32_t port_mask);
369 static void eth_event_callback(uint8_t port_id,
370 			       enum rte_eth_event_type type,
371 			       void *param);
372 
373 /*
374  * Check if all the ports are started.
375  * If yes, return positive value. If not, return zero.
376  */
377 static int all_ports_started(void);
378 
379 /*
380  * Setup default configuration.
381  */
382 static void
383 set_default_fwd_lcores_config(void)
384 {
385 	unsigned int i;
386 	unsigned int nb_lc;
387 	unsigned int sock_num;
388 
389 	nb_lc = 0;
390 	for (i = 0; i < RTE_MAX_LCORE; i++) {
391 		sock_num = rte_lcore_to_socket_id(i) + 1;
392 		if (sock_num > max_socket) {
393 			if (sock_num > RTE_MAX_NUMA_NODES)
394 				rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
395 			max_socket = sock_num;
396 		}
397 		if (!rte_lcore_is_enabled(i))
398 			continue;
399 		if (i == rte_get_master_lcore())
400 			continue;
401 		fwd_lcores_cpuids[nb_lc++] = i;
402 	}
403 	nb_lcores = (lcoreid_t) nb_lc;
404 	nb_cfg_lcores = nb_lcores;
405 	nb_fwd_lcores = 1;
406 }
407 
408 static void
409 set_def_peer_eth_addrs(void)
410 {
411 	portid_t i;
412 
413 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
414 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
415 		peer_eth_addrs[i].addr_bytes[5] = i;
416 	}
417 }
418 
419 static void
420 set_default_fwd_ports_config(void)
421 {
422 	portid_t pt_id;
423 
424 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
425 		fwd_ports_ids[pt_id] = pt_id;
426 
427 	nb_cfg_ports = nb_ports;
428 	nb_fwd_ports = nb_ports;
429 }
430 
431 void
432 set_def_fwd_config(void)
433 {
434 	set_default_fwd_lcores_config();
435 	set_def_peer_eth_addrs();
436 	set_default_fwd_ports_config();
437 }
438 
439 /*
440  * Configuration initialisation done once at init time.
441  */
442 static void
443 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
444 		 unsigned int socket_id)
445 {
446 	char pool_name[RTE_MEMPOOL_NAMESIZE];
447 	struct rte_mempool *rte_mp = NULL;
448 	uint32_t mb_size;
449 
450 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
451 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
452 
453 	RTE_LOG(INFO, USER1,
454 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
455 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
456 
457 #ifdef RTE_LIBRTE_PMD_XENVIRT
458 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
459 		(unsigned) mb_mempool_cache,
460 		sizeof(struct rte_pktmbuf_pool_private),
461 		rte_pktmbuf_pool_init, NULL,
462 		rte_pktmbuf_init, NULL,
463 		socket_id, 0);
464 #endif
465 
466 	/* if the former XEN allocation failed fall back to normal allocation */
467 	if (rte_mp == NULL) {
468 		if (mp_anon != 0) {
469 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
470 				mb_size, (unsigned) mb_mempool_cache,
471 				sizeof(struct rte_pktmbuf_pool_private),
472 				socket_id, 0);
473 			if (rte_mp == NULL)
474 				goto err;
475 
476 			if (rte_mempool_populate_anon(rte_mp) == 0) {
477 				rte_mempool_free(rte_mp);
478 				rte_mp = NULL;
479 				goto err;
480 			}
481 			rte_pktmbuf_pool_init(rte_mp, NULL);
482 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
483 		} else {
484 			/* wrapper to rte_mempool_create() */
485 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
486 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
487 		}
488 	}
489 
490 err:
491 	if (rte_mp == NULL) {
492 		rte_exit(EXIT_FAILURE,
493 			"Creation of mbuf pool for socket %u failed: %s\n",
494 			socket_id, rte_strerror(rte_errno));
495 	} else if (verbose_level > 0) {
496 		rte_mempool_dump(stdout, rte_mp);
497 	}
498 }
499 
500 /*
501  * Check given socket id is valid or not with NUMA mode,
502  * if valid, return 0, else return -1
503  */
504 static int
505 check_socket_id(const unsigned int socket_id)
506 {
507 	static int warning_once = 0;
508 
509 	if (socket_id >= max_socket) {
510 		if (!warning_once && numa_support)
511 			printf("Warning: NUMA should be configured manually by"
512 			       " using --port-numa-config and"
513 			       " --ring-numa-config parameters along with"
514 			       " --numa.\n");
515 		warning_once = 1;
516 		return -1;
517 	}
518 	return 0;
519 }
520 
521 static void
522 init_config(void)
523 {
524 	portid_t pid;
525 	struct rte_port *port;
526 	struct rte_mempool *mbp;
527 	unsigned int nb_mbuf_per_pool;
528 	lcoreid_t  lc_id;
529 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
530 
531 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
532 	/* Configuration of logical cores. */
533 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
534 				sizeof(struct fwd_lcore *) * nb_lcores,
535 				RTE_CACHE_LINE_SIZE);
536 	if (fwd_lcores == NULL) {
537 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
538 							"failed\n", nb_lcores);
539 	}
540 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
541 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
542 					       sizeof(struct fwd_lcore),
543 					       RTE_CACHE_LINE_SIZE);
544 		if (fwd_lcores[lc_id] == NULL) {
545 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
546 								"failed\n");
547 		}
548 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
549 	}
550 
551 	RTE_ETH_FOREACH_DEV(pid) {
552 		port = &ports[pid];
553 		rte_eth_dev_info_get(pid, &port->dev_info);
554 
555 		if (numa_support) {
556 			if (port_numa[pid] != NUMA_NO_CONFIG)
557 				port_per_socket[port_numa[pid]]++;
558 			else {
559 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
560 
561 				/* if socket_id is invalid, set to 0 */
562 				if (check_socket_id(socket_id) < 0)
563 					socket_id = 0;
564 				port_per_socket[socket_id]++;
565 			}
566 		}
567 
568 		/* set flag to initialize port/queue */
569 		port->need_reconfig = 1;
570 		port->need_reconfig_queues = 1;
571 	}
572 
573 	/*
574 	 * Create pools of mbuf.
575 	 * If NUMA support is disabled, create a single pool of mbuf in
576 	 * socket 0 memory by default.
577 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
578 	 *
579 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
580 	 * nb_txd can be configured at run time.
581 	 */
582 	if (param_total_num_mbufs)
583 		nb_mbuf_per_pool = param_total_num_mbufs;
584 	else {
585 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
586 			(nb_lcores * mb_mempool_cache) +
587 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
588 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
589 	}
590 
591 	if (numa_support) {
592 		uint8_t i;
593 
594 		for (i = 0; i < max_socket; i++)
595 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, i);
596 	} else {
597 		if (socket_num == UMA_NO_CONFIG)
598 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
599 		else
600 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
601 						 socket_num);
602 	}
603 
604 	init_port_config();
605 
606 	/*
607 	 * Records which Mbuf pool to use by each logical core, if needed.
608 	 */
609 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
610 		mbp = mbuf_pool_find(
611 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
612 
613 		if (mbp == NULL)
614 			mbp = mbuf_pool_find(0);
615 		fwd_lcores[lc_id]->mbp = mbp;
616 	}
617 
618 	/* Configuration of packet forwarding streams. */
619 	if (init_fwd_streams() < 0)
620 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
621 
622 	fwd_config_setup();
623 }
624 
625 
626 void
627 reconfig(portid_t new_port_id, unsigned socket_id)
628 {
629 	struct rte_port *port;
630 
631 	/* Reconfiguration of Ethernet ports. */
632 	port = &ports[new_port_id];
633 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
634 
635 	/* set flag to initialize port/queue */
636 	port->need_reconfig = 1;
637 	port->need_reconfig_queues = 1;
638 	port->socket_id = socket_id;
639 
640 	init_port_config();
641 }
642 
643 
644 int
645 init_fwd_streams(void)
646 {
647 	portid_t pid;
648 	struct rte_port *port;
649 	streamid_t sm_id, nb_fwd_streams_new;
650 	queueid_t q;
651 
652 	/* set socket id according to numa or not */
653 	RTE_ETH_FOREACH_DEV(pid) {
654 		port = &ports[pid];
655 		if (nb_rxq > port->dev_info.max_rx_queues) {
656 			printf("Fail: nb_rxq(%d) is greater than "
657 				"max_rx_queues(%d)\n", nb_rxq,
658 				port->dev_info.max_rx_queues);
659 			return -1;
660 		}
661 		if (nb_txq > port->dev_info.max_tx_queues) {
662 			printf("Fail: nb_txq(%d) is greater than "
663 				"max_tx_queues(%d)\n", nb_txq,
664 				port->dev_info.max_tx_queues);
665 			return -1;
666 		}
667 		if (numa_support) {
668 			if (port_numa[pid] != NUMA_NO_CONFIG)
669 				port->socket_id = port_numa[pid];
670 			else {
671 				port->socket_id = rte_eth_dev_socket_id(pid);
672 
673 				/* if socket_id is invalid, set to 0 */
674 				if (check_socket_id(port->socket_id) < 0)
675 					port->socket_id = 0;
676 			}
677 		}
678 		else {
679 			if (socket_num == UMA_NO_CONFIG)
680 				port->socket_id = 0;
681 			else
682 				port->socket_id = socket_num;
683 		}
684 	}
685 
686 	q = RTE_MAX(nb_rxq, nb_txq);
687 	if (q == 0) {
688 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
689 		return -1;
690 	}
691 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
692 	if (nb_fwd_streams_new == nb_fwd_streams)
693 		return 0;
694 	/* clear the old */
695 	if (fwd_streams != NULL) {
696 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
697 			if (fwd_streams[sm_id] == NULL)
698 				continue;
699 			rte_free(fwd_streams[sm_id]);
700 			fwd_streams[sm_id] = NULL;
701 		}
702 		rte_free(fwd_streams);
703 		fwd_streams = NULL;
704 	}
705 
706 	/* init new */
707 	nb_fwd_streams = nb_fwd_streams_new;
708 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
709 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
710 	if (fwd_streams == NULL)
711 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
712 						"failed\n", nb_fwd_streams);
713 
714 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
715 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
716 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
717 		if (fwd_streams[sm_id] == NULL)
718 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
719 								" failed\n");
720 	}
721 
722 	return 0;
723 }
724 
725 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
726 static void
727 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
728 {
729 	unsigned int total_burst;
730 	unsigned int nb_burst;
731 	unsigned int burst_stats[3];
732 	uint16_t pktnb_stats[3];
733 	uint16_t nb_pkt;
734 	int burst_percent[3];
735 
736 	/*
737 	 * First compute the total number of packet bursts and the
738 	 * two highest numbers of bursts of the same number of packets.
739 	 */
740 	total_burst = 0;
741 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
742 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
743 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
744 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
745 		if (nb_burst == 0)
746 			continue;
747 		total_burst += nb_burst;
748 		if (nb_burst > burst_stats[0]) {
749 			burst_stats[1] = burst_stats[0];
750 			pktnb_stats[1] = pktnb_stats[0];
751 			burst_stats[0] = nb_burst;
752 			pktnb_stats[0] = nb_pkt;
753 		}
754 	}
755 	if (total_burst == 0)
756 		return;
757 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
758 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
759 	       burst_percent[0], (int) pktnb_stats[0]);
760 	if (burst_stats[0] == total_burst) {
761 		printf("]\n");
762 		return;
763 	}
764 	if (burst_stats[0] + burst_stats[1] == total_burst) {
765 		printf(" + %d%% of %d pkts]\n",
766 		       100 - burst_percent[0], pktnb_stats[1]);
767 		return;
768 	}
769 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
770 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
771 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
772 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
773 		return;
774 	}
775 	printf(" + %d%% of %d pkts + %d%% of others]\n",
776 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
777 }
778 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
779 
780 static void
781 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
782 {
783 	struct rte_port *port;
784 	uint8_t i;
785 
786 	static const char *fwd_stats_border = "----------------------";
787 
788 	port = &ports[port_id];
789 	printf("\n  %s Forward statistics for port %-2d %s\n",
790 	       fwd_stats_border, port_id, fwd_stats_border);
791 
792 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
793 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
794 		       "%-"PRIu64"\n",
795 		       stats->ipackets, stats->imissed,
796 		       (uint64_t) (stats->ipackets + stats->imissed));
797 
798 		if (cur_fwd_eng == &csum_fwd_engine)
799 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
800 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
801 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
802 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
803 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
804 		}
805 
806 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
807 		       "%-"PRIu64"\n",
808 		       stats->opackets, port->tx_dropped,
809 		       (uint64_t) (stats->opackets + port->tx_dropped));
810 	}
811 	else {
812 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
813 		       "%14"PRIu64"\n",
814 		       stats->ipackets, stats->imissed,
815 		       (uint64_t) (stats->ipackets + stats->imissed));
816 
817 		if (cur_fwd_eng == &csum_fwd_engine)
818 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
819 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
820 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
821 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
822 			printf("  RX-nombufs:             %14"PRIu64"\n",
823 			       stats->rx_nombuf);
824 		}
825 
826 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
827 		       "%14"PRIu64"\n",
828 		       stats->opackets, port->tx_dropped,
829 		       (uint64_t) (stats->opackets + port->tx_dropped));
830 	}
831 
832 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
833 	if (port->rx_stream)
834 		pkt_burst_stats_display("RX",
835 			&port->rx_stream->rx_burst_stats);
836 	if (port->tx_stream)
837 		pkt_burst_stats_display("TX",
838 			&port->tx_stream->tx_burst_stats);
839 #endif
840 
841 	if (port->rx_queue_stats_mapping_enabled) {
842 		printf("\n");
843 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
844 			printf("  Stats reg %2d RX-packets:%14"PRIu64
845 			       "     RX-errors:%14"PRIu64
846 			       "    RX-bytes:%14"PRIu64"\n",
847 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
848 		}
849 		printf("\n");
850 	}
851 	if (port->tx_queue_stats_mapping_enabled) {
852 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
853 			printf("  Stats reg %2d TX-packets:%14"PRIu64
854 			       "                                 TX-bytes:%14"PRIu64"\n",
855 			       i, stats->q_opackets[i], stats->q_obytes[i]);
856 		}
857 	}
858 
859 	printf("  %s--------------------------------%s\n",
860 	       fwd_stats_border, fwd_stats_border);
861 }
862 
863 static void
864 fwd_stream_stats_display(streamid_t stream_id)
865 {
866 	struct fwd_stream *fs;
867 	static const char *fwd_top_stats_border = "-------";
868 
869 	fs = fwd_streams[stream_id];
870 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
871 	    (fs->fwd_dropped == 0))
872 		return;
873 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
874 	       "TX Port=%2d/Queue=%2d %s\n",
875 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
876 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
877 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
878 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
879 
880 	/* if checksum mode */
881 	if (cur_fwd_eng == &csum_fwd_engine) {
882 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
883 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
884 	}
885 
886 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
887 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
888 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
889 #endif
890 }
891 
892 static void
893 flush_fwd_rx_queues(void)
894 {
895 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
896 	portid_t  rxp;
897 	portid_t port_id;
898 	queueid_t rxq;
899 	uint16_t  nb_rx;
900 	uint16_t  i;
901 	uint8_t   j;
902 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
903 	uint64_t timer_period;
904 
905 	/* convert to number of cycles */
906 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
907 
908 	for (j = 0; j < 2; j++) {
909 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
910 			for (rxq = 0; rxq < nb_rxq; rxq++) {
911 				port_id = fwd_ports_ids[rxp];
912 				/**
913 				* testpmd can stuck in the below do while loop
914 				* if rte_eth_rx_burst() always returns nonzero
915 				* packets. So timer is added to exit this loop
916 				* after 1sec timer expiry.
917 				*/
918 				prev_tsc = rte_rdtsc();
919 				do {
920 					nb_rx = rte_eth_rx_burst(port_id, rxq,
921 						pkts_burst, MAX_PKT_BURST);
922 					for (i = 0; i < nb_rx; i++)
923 						rte_pktmbuf_free(pkts_burst[i]);
924 
925 					cur_tsc = rte_rdtsc();
926 					diff_tsc = cur_tsc - prev_tsc;
927 					timer_tsc += diff_tsc;
928 				} while ((nb_rx > 0) &&
929 					(timer_tsc < timer_period));
930 				timer_tsc = 0;
931 			}
932 		}
933 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
934 	}
935 }
936 
937 static void
938 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
939 {
940 	struct fwd_stream **fsm;
941 	streamid_t nb_fs;
942 	streamid_t sm_id;
943 #ifdef RTE_LIBRTE_BITRATE
944 	uint64_t tics_per_1sec;
945 	uint64_t tics_datum;
946 	uint64_t tics_current;
947 	uint8_t idx_port, cnt_ports;
948 
949 	cnt_ports = rte_eth_dev_count();
950 	tics_datum = rte_rdtsc();
951 	tics_per_1sec = rte_get_timer_hz();
952 #endif
953 	fsm = &fwd_streams[fc->stream_idx];
954 	nb_fs = fc->stream_nb;
955 	do {
956 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
957 			(*pkt_fwd)(fsm[sm_id]);
958 #ifdef RTE_LIBRTE_BITRATE
959 		if (bitrate_enabled != 0 &&
960 				bitrate_lcore_id == rte_lcore_id()) {
961 			tics_current = rte_rdtsc();
962 			if (tics_current - tics_datum >= tics_per_1sec) {
963 				/* Periodic bitrate calculation */
964 				for (idx_port = 0;
965 						idx_port < cnt_ports;
966 						idx_port++)
967 					rte_stats_bitrate_calc(bitrate_data,
968 						idx_port);
969 				tics_datum = tics_current;
970 			}
971 		}
972 #endif
973 #ifdef RTE_LIBRTE_LATENCY_STATS
974 		if (latencystats_lcore_id == rte_lcore_id())
975 			rte_latencystats_update();
976 #endif
977 
978 	} while (! fc->stopped);
979 }
980 
981 static int
982 start_pkt_forward_on_core(void *fwd_arg)
983 {
984 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
985 			     cur_fwd_config.fwd_eng->packet_fwd);
986 	return 0;
987 }
988 
989 /*
990  * Run the TXONLY packet forwarding engine to send a single burst of packets.
991  * Used to start communication flows in network loopback test configurations.
992  */
993 static int
994 run_one_txonly_burst_on_core(void *fwd_arg)
995 {
996 	struct fwd_lcore *fwd_lc;
997 	struct fwd_lcore tmp_lcore;
998 
999 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1000 	tmp_lcore = *fwd_lc;
1001 	tmp_lcore.stopped = 1;
1002 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1003 	return 0;
1004 }
1005 
1006 /*
1007  * Launch packet forwarding:
1008  *     - Setup per-port forwarding context.
1009  *     - launch logical cores with their forwarding configuration.
1010  */
1011 static void
1012 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1013 {
1014 	port_fwd_begin_t port_fwd_begin;
1015 	unsigned int i;
1016 	unsigned int lc_id;
1017 	int diag;
1018 
1019 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1020 	if (port_fwd_begin != NULL) {
1021 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1022 			(*port_fwd_begin)(fwd_ports_ids[i]);
1023 	}
1024 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1025 		lc_id = fwd_lcores_cpuids[i];
1026 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1027 			fwd_lcores[i]->stopped = 0;
1028 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1029 						     fwd_lcores[i], lc_id);
1030 			if (diag != 0)
1031 				printf("launch lcore %u failed - diag=%d\n",
1032 				       lc_id, diag);
1033 		}
1034 	}
1035 }
1036 
1037 /*
1038  * Launch packet forwarding configuration.
1039  */
1040 void
1041 start_packet_forwarding(int with_tx_first)
1042 {
1043 	port_fwd_begin_t port_fwd_begin;
1044 	port_fwd_end_t  port_fwd_end;
1045 	struct rte_port *port;
1046 	unsigned int i;
1047 	portid_t   pt_id;
1048 	streamid_t sm_id;
1049 
1050 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1051 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1052 
1053 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1054 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1055 
1056 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1057 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1058 		(!nb_rxq || !nb_txq))
1059 		rte_exit(EXIT_FAILURE,
1060 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1061 			cur_fwd_eng->fwd_mode_name);
1062 
1063 	if (all_ports_started() == 0) {
1064 		printf("Not all ports were started\n");
1065 		return;
1066 	}
1067 	if (test_done == 0) {
1068 		printf("Packet forwarding already started\n");
1069 		return;
1070 	}
1071 
1072 	if (init_fwd_streams() < 0) {
1073 		printf("Fail from init_fwd_streams()\n");
1074 		return;
1075 	}
1076 
1077 	if(dcb_test) {
1078 		for (i = 0; i < nb_fwd_ports; i++) {
1079 			pt_id = fwd_ports_ids[i];
1080 			port = &ports[pt_id];
1081 			if (!port->dcb_flag) {
1082 				printf("In DCB mode, all forwarding ports must "
1083                                        "be configured in this mode.\n");
1084 				return;
1085 			}
1086 		}
1087 		if (nb_fwd_lcores == 1) {
1088 			printf("In DCB mode,the nb forwarding cores "
1089                                "should be larger than 1.\n");
1090 			return;
1091 		}
1092 	}
1093 	test_done = 0;
1094 
1095 	if(!no_flush_rx)
1096 		flush_fwd_rx_queues();
1097 
1098 	fwd_config_setup();
1099 	pkt_fwd_config_display(&cur_fwd_config);
1100 	rxtx_config_display();
1101 
1102 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1103 		pt_id = fwd_ports_ids[i];
1104 		port = &ports[pt_id];
1105 		rte_eth_stats_get(pt_id, &port->stats);
1106 		port->tx_dropped = 0;
1107 
1108 		map_port_queue_stats_mapping_registers(pt_id, port);
1109 	}
1110 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1111 		fwd_streams[sm_id]->rx_packets = 0;
1112 		fwd_streams[sm_id]->tx_packets = 0;
1113 		fwd_streams[sm_id]->fwd_dropped = 0;
1114 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1115 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1116 
1117 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1118 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1119 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1120 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1121 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1122 #endif
1123 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1124 		fwd_streams[sm_id]->core_cycles = 0;
1125 #endif
1126 	}
1127 	if (with_tx_first) {
1128 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1129 		if (port_fwd_begin != NULL) {
1130 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1131 				(*port_fwd_begin)(fwd_ports_ids[i]);
1132 		}
1133 		while (with_tx_first--) {
1134 			launch_packet_forwarding(
1135 					run_one_txonly_burst_on_core);
1136 			rte_eal_mp_wait_lcore();
1137 		}
1138 		port_fwd_end = tx_only_engine.port_fwd_end;
1139 		if (port_fwd_end != NULL) {
1140 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1141 				(*port_fwd_end)(fwd_ports_ids[i]);
1142 		}
1143 	}
1144 	launch_packet_forwarding(start_pkt_forward_on_core);
1145 }
1146 
1147 void
1148 stop_packet_forwarding(void)
1149 {
1150 	struct rte_eth_stats stats;
1151 	struct rte_port *port;
1152 	port_fwd_end_t  port_fwd_end;
1153 	int i;
1154 	portid_t   pt_id;
1155 	streamid_t sm_id;
1156 	lcoreid_t  lc_id;
1157 	uint64_t total_recv;
1158 	uint64_t total_xmit;
1159 	uint64_t total_rx_dropped;
1160 	uint64_t total_tx_dropped;
1161 	uint64_t total_rx_nombuf;
1162 	uint64_t tx_dropped;
1163 	uint64_t rx_bad_ip_csum;
1164 	uint64_t rx_bad_l4_csum;
1165 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1166 	uint64_t fwd_cycles;
1167 #endif
1168 	static const char *acc_stats_border = "+++++++++++++++";
1169 
1170 	if (test_done) {
1171 		printf("Packet forwarding not started\n");
1172 		return;
1173 	}
1174 	printf("Telling cores to stop...");
1175 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1176 		fwd_lcores[lc_id]->stopped = 1;
1177 	printf("\nWaiting for lcores to finish...\n");
1178 	rte_eal_mp_wait_lcore();
1179 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1180 	if (port_fwd_end != NULL) {
1181 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1182 			pt_id = fwd_ports_ids[i];
1183 			(*port_fwd_end)(pt_id);
1184 		}
1185 	}
1186 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1187 	fwd_cycles = 0;
1188 #endif
1189 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1190 		if (cur_fwd_config.nb_fwd_streams >
1191 		    cur_fwd_config.nb_fwd_ports) {
1192 			fwd_stream_stats_display(sm_id);
1193 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1194 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1195 		} else {
1196 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1197 				fwd_streams[sm_id];
1198 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1199 				fwd_streams[sm_id];
1200 		}
1201 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1202 		tx_dropped = (uint64_t) (tx_dropped +
1203 					 fwd_streams[sm_id]->fwd_dropped);
1204 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1205 
1206 		rx_bad_ip_csum =
1207 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1208 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1209 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1210 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1211 							rx_bad_ip_csum;
1212 
1213 		rx_bad_l4_csum =
1214 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1215 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1216 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1217 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1218 							rx_bad_l4_csum;
1219 
1220 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1221 		fwd_cycles = (uint64_t) (fwd_cycles +
1222 					 fwd_streams[sm_id]->core_cycles);
1223 #endif
1224 	}
1225 	total_recv = 0;
1226 	total_xmit = 0;
1227 	total_rx_dropped = 0;
1228 	total_tx_dropped = 0;
1229 	total_rx_nombuf  = 0;
1230 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1231 		pt_id = fwd_ports_ids[i];
1232 
1233 		port = &ports[pt_id];
1234 		rte_eth_stats_get(pt_id, &stats);
1235 		stats.ipackets -= port->stats.ipackets;
1236 		port->stats.ipackets = 0;
1237 		stats.opackets -= port->stats.opackets;
1238 		port->stats.opackets = 0;
1239 		stats.ibytes   -= port->stats.ibytes;
1240 		port->stats.ibytes = 0;
1241 		stats.obytes   -= port->stats.obytes;
1242 		port->stats.obytes = 0;
1243 		stats.imissed  -= port->stats.imissed;
1244 		port->stats.imissed = 0;
1245 		stats.oerrors  -= port->stats.oerrors;
1246 		port->stats.oerrors = 0;
1247 		stats.rx_nombuf -= port->stats.rx_nombuf;
1248 		port->stats.rx_nombuf = 0;
1249 
1250 		total_recv += stats.ipackets;
1251 		total_xmit += stats.opackets;
1252 		total_rx_dropped += stats.imissed;
1253 		total_tx_dropped += port->tx_dropped;
1254 		total_rx_nombuf  += stats.rx_nombuf;
1255 
1256 		fwd_port_stats_display(pt_id, &stats);
1257 	}
1258 	printf("\n  %s Accumulated forward statistics for all ports"
1259 	       "%s\n",
1260 	       acc_stats_border, acc_stats_border);
1261 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1262 	       "%-"PRIu64"\n"
1263 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1264 	       "%-"PRIu64"\n",
1265 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1266 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1267 	if (total_rx_nombuf > 0)
1268 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1269 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1270 	       "%s\n",
1271 	       acc_stats_border, acc_stats_border);
1272 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1273 	if (total_recv > 0)
1274 		printf("\n  CPU cycles/packet=%u (total cycles="
1275 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1276 		       (unsigned int)(fwd_cycles / total_recv),
1277 		       fwd_cycles, total_recv);
1278 #endif
1279 	printf("\nDone.\n");
1280 	test_done = 1;
1281 }
1282 
1283 void
1284 dev_set_link_up(portid_t pid)
1285 {
1286 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1287 		printf("\nSet link up fail.\n");
1288 }
1289 
1290 void
1291 dev_set_link_down(portid_t pid)
1292 {
1293 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1294 		printf("\nSet link down fail.\n");
1295 }
1296 
1297 static int
1298 all_ports_started(void)
1299 {
1300 	portid_t pi;
1301 	struct rte_port *port;
1302 
1303 	RTE_ETH_FOREACH_DEV(pi) {
1304 		port = &ports[pi];
1305 		/* Check if there is a port which is not started */
1306 		if ((port->port_status != RTE_PORT_STARTED) &&
1307 			(port->slave_flag == 0))
1308 			return 0;
1309 	}
1310 
1311 	/* No port is not started */
1312 	return 1;
1313 }
1314 
1315 int
1316 all_ports_stopped(void)
1317 {
1318 	portid_t pi;
1319 	struct rte_port *port;
1320 
1321 	RTE_ETH_FOREACH_DEV(pi) {
1322 		port = &ports[pi];
1323 		if ((port->port_status != RTE_PORT_STOPPED) &&
1324 			(port->slave_flag == 0))
1325 			return 0;
1326 	}
1327 
1328 	return 1;
1329 }
1330 
1331 int
1332 port_is_started(portid_t port_id)
1333 {
1334 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1335 		return 0;
1336 
1337 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1338 		return 0;
1339 
1340 	return 1;
1341 }
1342 
1343 static int
1344 port_is_closed(portid_t port_id)
1345 {
1346 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1347 		return 0;
1348 
1349 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1350 		return 0;
1351 
1352 	return 1;
1353 }
1354 
1355 int
1356 start_port(portid_t pid)
1357 {
1358 	int diag, need_check_link_status = -1;
1359 	portid_t pi;
1360 	queueid_t qi;
1361 	struct rte_port *port;
1362 	struct ether_addr mac_addr;
1363 	enum rte_eth_event_type event_type;
1364 
1365 	if (port_id_is_invalid(pid, ENABLED_WARN))
1366 		return 0;
1367 
1368 	if(dcb_config)
1369 		dcb_test = 1;
1370 	RTE_ETH_FOREACH_DEV(pi) {
1371 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1372 			continue;
1373 
1374 		need_check_link_status = 0;
1375 		port = &ports[pi];
1376 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1377 						 RTE_PORT_HANDLING) == 0) {
1378 			printf("Port %d is now not stopped\n", pi);
1379 			continue;
1380 		}
1381 
1382 		if (port->need_reconfig > 0) {
1383 			port->need_reconfig = 0;
1384 
1385 			printf("Configuring Port %d (socket %u)\n", pi,
1386 					port->socket_id);
1387 			/* configure port */
1388 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1389 						&(port->dev_conf));
1390 			if (diag != 0) {
1391 				if (rte_atomic16_cmpset(&(port->port_status),
1392 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1393 					printf("Port %d can not be set back "
1394 							"to stopped\n", pi);
1395 				printf("Fail to configure port %d\n", pi);
1396 				/* try to reconfigure port next time */
1397 				port->need_reconfig = 1;
1398 				return -1;
1399 			}
1400 		}
1401 		if (port->need_reconfig_queues > 0) {
1402 			port->need_reconfig_queues = 0;
1403 			/* setup tx queues */
1404 			for (qi = 0; qi < nb_txq; qi++) {
1405 				if ((numa_support) &&
1406 					(txring_numa[pi] != NUMA_NO_CONFIG))
1407 					diag = rte_eth_tx_queue_setup(pi, qi,
1408 						nb_txd,txring_numa[pi],
1409 						&(port->tx_conf));
1410 				else
1411 					diag = rte_eth_tx_queue_setup(pi, qi,
1412 						nb_txd,port->socket_id,
1413 						&(port->tx_conf));
1414 
1415 				if (diag == 0)
1416 					continue;
1417 
1418 				/* Fail to setup tx queue, return */
1419 				if (rte_atomic16_cmpset(&(port->port_status),
1420 							RTE_PORT_HANDLING,
1421 							RTE_PORT_STOPPED) == 0)
1422 					printf("Port %d can not be set back "
1423 							"to stopped\n", pi);
1424 				printf("Fail to configure port %d tx queues\n", pi);
1425 				/* try to reconfigure queues next time */
1426 				port->need_reconfig_queues = 1;
1427 				return -1;
1428 			}
1429 			/* setup rx queues */
1430 			for (qi = 0; qi < nb_rxq; qi++) {
1431 				if ((numa_support) &&
1432 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1433 					struct rte_mempool * mp =
1434 						mbuf_pool_find(rxring_numa[pi]);
1435 					if (mp == NULL) {
1436 						printf("Failed to setup RX queue:"
1437 							"No mempool allocation"
1438 							" on the socket %d\n",
1439 							rxring_numa[pi]);
1440 						return -1;
1441 					}
1442 
1443 					diag = rte_eth_rx_queue_setup(pi, qi,
1444 					     nb_rxd,rxring_numa[pi],
1445 					     &(port->rx_conf),mp);
1446 				} else {
1447 					struct rte_mempool *mp =
1448 						mbuf_pool_find(port->socket_id);
1449 					if (mp == NULL) {
1450 						printf("Failed to setup RX queue:"
1451 							"No mempool allocation"
1452 							" on the socket %d\n",
1453 							port->socket_id);
1454 						return -1;
1455 					}
1456 					diag = rte_eth_rx_queue_setup(pi, qi,
1457 					     nb_rxd,port->socket_id,
1458 					     &(port->rx_conf), mp);
1459 				}
1460 				if (diag == 0)
1461 					continue;
1462 
1463 				/* Fail to setup rx queue, return */
1464 				if (rte_atomic16_cmpset(&(port->port_status),
1465 							RTE_PORT_HANDLING,
1466 							RTE_PORT_STOPPED) == 0)
1467 					printf("Port %d can not be set back "
1468 							"to stopped\n", pi);
1469 				printf("Fail to configure port %d rx queues\n", pi);
1470 				/* try to reconfigure queues next time */
1471 				port->need_reconfig_queues = 1;
1472 				return -1;
1473 			}
1474 		}
1475 
1476 		for (event_type = RTE_ETH_EVENT_UNKNOWN;
1477 		     event_type < RTE_ETH_EVENT_MAX;
1478 		     event_type++) {
1479 			diag = rte_eth_dev_callback_register(pi,
1480 							event_type,
1481 							eth_event_callback,
1482 							NULL);
1483 			if (diag) {
1484 				printf("Failed to setup even callback for event %d\n",
1485 					event_type);
1486 				return -1;
1487 			}
1488 		}
1489 
1490 		/* start port */
1491 		if (rte_eth_dev_start(pi) < 0) {
1492 			printf("Fail to start port %d\n", pi);
1493 
1494 			/* Fail to setup rx queue, return */
1495 			if (rte_atomic16_cmpset(&(port->port_status),
1496 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1497 				printf("Port %d can not be set back to "
1498 							"stopped\n", pi);
1499 			continue;
1500 		}
1501 
1502 		if (rte_atomic16_cmpset(&(port->port_status),
1503 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1504 			printf("Port %d can not be set into started\n", pi);
1505 
1506 		rte_eth_macaddr_get(pi, &mac_addr);
1507 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1508 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1509 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1510 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1511 
1512 		/* at least one port started, need checking link status */
1513 		need_check_link_status = 1;
1514 	}
1515 
1516 	if (need_check_link_status == 1 && !no_link_check)
1517 		check_all_ports_link_status(RTE_PORT_ALL);
1518 	else if (need_check_link_status == 0)
1519 		printf("Please stop the ports first\n");
1520 
1521 	printf("Done\n");
1522 	return 0;
1523 }
1524 
1525 void
1526 stop_port(portid_t pid)
1527 {
1528 	portid_t pi;
1529 	struct rte_port *port;
1530 	int need_check_link_status = 0;
1531 
1532 	if (dcb_test) {
1533 		dcb_test = 0;
1534 		dcb_config = 0;
1535 	}
1536 
1537 	if (port_id_is_invalid(pid, ENABLED_WARN))
1538 		return;
1539 
1540 	printf("Stopping ports...\n");
1541 
1542 	RTE_ETH_FOREACH_DEV(pi) {
1543 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1544 			continue;
1545 
1546 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1547 			printf("Please remove port %d from forwarding configuration.\n", pi);
1548 			continue;
1549 		}
1550 
1551 		if (port_is_bonding_slave(pi)) {
1552 			printf("Please remove port %d from bonded device.\n", pi);
1553 			continue;
1554 		}
1555 
1556 		port = &ports[pi];
1557 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1558 						RTE_PORT_HANDLING) == 0)
1559 			continue;
1560 
1561 		rte_eth_dev_stop(pi);
1562 
1563 		if (rte_atomic16_cmpset(&(port->port_status),
1564 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1565 			printf("Port %d can not be set into stopped\n", pi);
1566 		need_check_link_status = 1;
1567 	}
1568 	if (need_check_link_status && !no_link_check)
1569 		check_all_ports_link_status(RTE_PORT_ALL);
1570 
1571 	printf("Done\n");
1572 }
1573 
1574 void
1575 close_port(portid_t pid)
1576 {
1577 	portid_t pi;
1578 	struct rte_port *port;
1579 
1580 	if (port_id_is_invalid(pid, ENABLED_WARN))
1581 		return;
1582 
1583 	printf("Closing ports...\n");
1584 
1585 	RTE_ETH_FOREACH_DEV(pi) {
1586 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1587 			continue;
1588 
1589 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1590 			printf("Please remove port %d from forwarding configuration.\n", pi);
1591 			continue;
1592 		}
1593 
1594 		if (port_is_bonding_slave(pi)) {
1595 			printf("Please remove port %d from bonded device.\n", pi);
1596 			continue;
1597 		}
1598 
1599 		port = &ports[pi];
1600 		if (rte_atomic16_cmpset(&(port->port_status),
1601 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1602 			printf("Port %d is already closed\n", pi);
1603 			continue;
1604 		}
1605 
1606 		if (rte_atomic16_cmpset(&(port->port_status),
1607 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1608 			printf("Port %d is now not stopped\n", pi);
1609 			continue;
1610 		}
1611 
1612 		if (port->flow_list)
1613 			port_flow_flush(pi);
1614 		rte_eth_dev_close(pi);
1615 
1616 		if (rte_atomic16_cmpset(&(port->port_status),
1617 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1618 			printf("Port %d cannot be set to closed\n", pi);
1619 	}
1620 
1621 	printf("Done\n");
1622 }
1623 
1624 void
1625 attach_port(char *identifier)
1626 {
1627 	portid_t pi = 0;
1628 	unsigned int socket_id;
1629 
1630 	printf("Attaching a new port...\n");
1631 
1632 	if (identifier == NULL) {
1633 		printf("Invalid parameters are specified\n");
1634 		return;
1635 	}
1636 
1637 	if (rte_eth_dev_attach(identifier, &pi))
1638 		return;
1639 
1640 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1641 	/* if socket_id is invalid, set to 0 */
1642 	if (check_socket_id(socket_id) < 0)
1643 		socket_id = 0;
1644 	reconfig(pi, socket_id);
1645 	rte_eth_promiscuous_enable(pi);
1646 
1647 	nb_ports = rte_eth_dev_count();
1648 
1649 	ports[pi].port_status = RTE_PORT_STOPPED;
1650 
1651 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1652 	printf("Done\n");
1653 }
1654 
1655 void
1656 detach_port(uint8_t port_id)
1657 {
1658 	char name[RTE_ETH_NAME_MAX_LEN];
1659 
1660 	printf("Detaching a port...\n");
1661 
1662 	if (!port_is_closed(port_id)) {
1663 		printf("Please close port first\n");
1664 		return;
1665 	}
1666 
1667 	if (ports[port_id].flow_list)
1668 		port_flow_flush(port_id);
1669 
1670 	if (rte_eth_dev_detach(port_id, name))
1671 		return;
1672 
1673 	nb_ports = rte_eth_dev_count();
1674 
1675 	printf("Port '%s' is detached. Now total ports is %d\n",
1676 			name, nb_ports);
1677 	printf("Done\n");
1678 	return;
1679 }
1680 
1681 void
1682 pmd_test_exit(void)
1683 {
1684 	portid_t pt_id;
1685 
1686 	if (test_done == 0)
1687 		stop_packet_forwarding();
1688 
1689 	if (ports != NULL) {
1690 		no_link_check = 1;
1691 		RTE_ETH_FOREACH_DEV(pt_id) {
1692 			printf("\nShutting down port %d...\n", pt_id);
1693 			fflush(stdout);
1694 			stop_port(pt_id);
1695 			close_port(pt_id);
1696 		}
1697 	}
1698 	printf("\nBye...\n");
1699 }
1700 
1701 typedef void (*cmd_func_t)(void);
1702 struct pmd_test_command {
1703 	const char *cmd_name;
1704 	cmd_func_t cmd_func;
1705 };
1706 
1707 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1708 
1709 /* Check the link status of all ports in up to 9s, and print them finally */
1710 static void
1711 check_all_ports_link_status(uint32_t port_mask)
1712 {
1713 #define CHECK_INTERVAL 100 /* 100ms */
1714 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1715 	uint8_t portid, count, all_ports_up, print_flag = 0;
1716 	struct rte_eth_link link;
1717 
1718 	printf("Checking link statuses...\n");
1719 	fflush(stdout);
1720 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1721 		all_ports_up = 1;
1722 		RTE_ETH_FOREACH_DEV(portid) {
1723 			if ((port_mask & (1 << portid)) == 0)
1724 				continue;
1725 			memset(&link, 0, sizeof(link));
1726 			rte_eth_link_get_nowait(portid, &link);
1727 			/* print link status if flag set */
1728 			if (print_flag == 1) {
1729 				if (link.link_status)
1730 					printf("Port %d Link Up - speed %u "
1731 						"Mbps - %s\n", (uint8_t)portid,
1732 						(unsigned)link.link_speed,
1733 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1734 					("full-duplex") : ("half-duplex\n"));
1735 				else
1736 					printf("Port %d Link Down\n",
1737 						(uint8_t)portid);
1738 				continue;
1739 			}
1740 			/* clear all_ports_up flag if any link down */
1741 			if (link.link_status == ETH_LINK_DOWN) {
1742 				all_ports_up = 0;
1743 				break;
1744 			}
1745 		}
1746 		/* after finally printing all link status, get out */
1747 		if (print_flag == 1)
1748 			break;
1749 
1750 		if (all_ports_up == 0) {
1751 			fflush(stdout);
1752 			rte_delay_ms(CHECK_INTERVAL);
1753 		}
1754 
1755 		/* set the print_flag if all ports up or timeout */
1756 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1757 			print_flag = 1;
1758 		}
1759 
1760 		if (lsc_interrupt)
1761 			break;
1762 	}
1763 }
1764 
1765 static void
1766 rmv_event_callback(void *arg)
1767 {
1768 	struct rte_eth_dev *dev;
1769 	struct rte_devargs *da;
1770 	char name[32] = "";
1771 	uint8_t port_id = (intptr_t)arg;
1772 
1773 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1774 	dev = &rte_eth_devices[port_id];
1775 	da = dev->device->devargs;
1776 
1777 	stop_port(port_id);
1778 	close_port(port_id);
1779 	if (da->type == RTE_DEVTYPE_VIRTUAL)
1780 		snprintf(name, sizeof(name), "%s", da->virt.drv_name);
1781 	else if (da->type == RTE_DEVTYPE_WHITELISTED_PCI)
1782 		rte_eal_pci_device_name(&da->pci.addr, name, sizeof(name));
1783 	printf("removing device %s\n", name);
1784 	rte_eal_dev_detach(name);
1785 	dev->state = RTE_ETH_DEV_UNUSED;
1786 }
1787 
1788 /* This function is used by the interrupt thread */
1789 static void
1790 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
1791 {
1792 	static const char * const event_desc[] = {
1793 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1794 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
1795 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1796 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1797 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1798 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
1799 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
1800 		[RTE_ETH_EVENT_MAX] = NULL,
1801 	};
1802 
1803 	RTE_SET_USED(param);
1804 
1805 	if (type >= RTE_ETH_EVENT_MAX) {
1806 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1807 			port_id, __func__, type);
1808 		fflush(stderr);
1809 	} else {
1810 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
1811 			event_desc[type]);
1812 		fflush(stdout);
1813 	}
1814 
1815 	switch (type) {
1816 	case RTE_ETH_EVENT_INTR_RMV:
1817 		if (rte_eal_alarm_set(100000,
1818 				rmv_event_callback, (void *)(intptr_t)port_id))
1819 			fprintf(stderr, "Could not set up deferred device removal\n");
1820 		break;
1821 	default:
1822 		break;
1823 	}
1824 }
1825 
1826 static int
1827 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1828 {
1829 	uint16_t i;
1830 	int diag;
1831 	uint8_t mapping_found = 0;
1832 
1833 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1834 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1835 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1836 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1837 					tx_queue_stats_mappings[i].queue_id,
1838 					tx_queue_stats_mappings[i].stats_counter_id);
1839 			if (diag != 0)
1840 				return diag;
1841 			mapping_found = 1;
1842 		}
1843 	}
1844 	if (mapping_found)
1845 		port->tx_queue_stats_mapping_enabled = 1;
1846 	return 0;
1847 }
1848 
1849 static int
1850 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1851 {
1852 	uint16_t i;
1853 	int diag;
1854 	uint8_t mapping_found = 0;
1855 
1856 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1857 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1858 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1859 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1860 					rx_queue_stats_mappings[i].queue_id,
1861 					rx_queue_stats_mappings[i].stats_counter_id);
1862 			if (diag != 0)
1863 				return diag;
1864 			mapping_found = 1;
1865 		}
1866 	}
1867 	if (mapping_found)
1868 		port->rx_queue_stats_mapping_enabled = 1;
1869 	return 0;
1870 }
1871 
1872 static void
1873 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1874 {
1875 	int diag = 0;
1876 
1877 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1878 	if (diag != 0) {
1879 		if (diag == -ENOTSUP) {
1880 			port->tx_queue_stats_mapping_enabled = 0;
1881 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1882 		}
1883 		else
1884 			rte_exit(EXIT_FAILURE,
1885 					"set_tx_queue_stats_mapping_registers "
1886 					"failed for port id=%d diag=%d\n",
1887 					pi, diag);
1888 	}
1889 
1890 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1891 	if (diag != 0) {
1892 		if (diag == -ENOTSUP) {
1893 			port->rx_queue_stats_mapping_enabled = 0;
1894 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1895 		}
1896 		else
1897 			rte_exit(EXIT_FAILURE,
1898 					"set_rx_queue_stats_mapping_registers "
1899 					"failed for port id=%d diag=%d\n",
1900 					pi, diag);
1901 	}
1902 }
1903 
1904 static void
1905 rxtx_port_config(struct rte_port *port)
1906 {
1907 	port->rx_conf = port->dev_info.default_rxconf;
1908 	port->tx_conf = port->dev_info.default_txconf;
1909 
1910 	/* Check if any RX/TX parameters have been passed */
1911 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1912 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1913 
1914 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1915 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1916 
1917 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1918 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1919 
1920 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1921 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1922 
1923 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1924 		port->rx_conf.rx_drop_en = rx_drop_en;
1925 
1926 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1927 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1928 
1929 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1930 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1931 
1932 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1933 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1934 
1935 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1936 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1937 
1938 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1939 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1940 
1941 	if (txq_flags != RTE_PMD_PARAM_UNSET)
1942 		port->tx_conf.txq_flags = txq_flags;
1943 }
1944 
1945 void
1946 init_port_config(void)
1947 {
1948 	portid_t pid;
1949 	struct rte_port *port;
1950 
1951 	RTE_ETH_FOREACH_DEV(pid) {
1952 		port = &ports[pid];
1953 		port->dev_conf.rxmode = rx_mode;
1954 		port->dev_conf.fdir_conf = fdir_conf;
1955 		if (nb_rxq > 1) {
1956 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1957 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1958 		} else {
1959 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1960 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1961 		}
1962 
1963 		if (port->dcb_flag == 0) {
1964 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1965 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1966 			else
1967 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1968 		}
1969 
1970 		rxtx_port_config(port);
1971 
1972 		rte_eth_macaddr_get(pid, &port->eth_addr);
1973 
1974 		map_port_queue_stats_mapping_registers(pid, port);
1975 #ifdef RTE_NIC_BYPASS
1976 		rte_eth_dev_bypass_init(pid);
1977 #endif
1978 
1979 		if (lsc_interrupt &&
1980 		    (rte_eth_devices[pid].data->dev_flags &
1981 		     RTE_ETH_DEV_INTR_LSC))
1982 			port->dev_conf.intr_conf.lsc = 1;
1983 		if (rmv_interrupt &&
1984 		    (rte_eth_devices[pid].data->dev_flags &
1985 		     RTE_ETH_DEV_INTR_RMV))
1986 			port->dev_conf.intr_conf.rmv = 1;
1987 	}
1988 }
1989 
1990 void set_port_slave_flag(portid_t slave_pid)
1991 {
1992 	struct rte_port *port;
1993 
1994 	port = &ports[slave_pid];
1995 	port->slave_flag = 1;
1996 }
1997 
1998 void clear_port_slave_flag(portid_t slave_pid)
1999 {
2000 	struct rte_port *port;
2001 
2002 	port = &ports[slave_pid];
2003 	port->slave_flag = 0;
2004 }
2005 
2006 uint8_t port_is_bonding_slave(portid_t slave_pid)
2007 {
2008 	struct rte_port *port;
2009 
2010 	port = &ports[slave_pid];
2011 	return port->slave_flag;
2012 }
2013 
2014 const uint16_t vlan_tags[] = {
2015 		0,  1,  2,  3,  4,  5,  6,  7,
2016 		8,  9, 10, 11,  12, 13, 14, 15,
2017 		16, 17, 18, 19, 20, 21, 22, 23,
2018 		24, 25, 26, 27, 28, 29, 30, 31
2019 };
2020 
2021 static  int
2022 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2023 		 enum dcb_mode_enable dcb_mode,
2024 		 enum rte_eth_nb_tcs num_tcs,
2025 		 uint8_t pfc_en)
2026 {
2027 	uint8_t i;
2028 
2029 	/*
2030 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2031 	 * given above, and the number of traffic classes available for use.
2032 	 */
2033 	if (dcb_mode == DCB_VT_ENABLED) {
2034 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2035 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2036 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2037 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2038 
2039 		/* VMDQ+DCB RX and TX configurations */
2040 		vmdq_rx_conf->enable_default_pool = 0;
2041 		vmdq_rx_conf->default_pool = 0;
2042 		vmdq_rx_conf->nb_queue_pools =
2043 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2044 		vmdq_tx_conf->nb_queue_pools =
2045 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2046 
2047 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2048 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2049 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2050 			vmdq_rx_conf->pool_map[i].pools =
2051 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2052 		}
2053 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2054 			vmdq_rx_conf->dcb_tc[i] = i;
2055 			vmdq_tx_conf->dcb_tc[i] = i;
2056 		}
2057 
2058 		/* set DCB mode of RX and TX of multiple queues */
2059 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2060 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2061 	} else {
2062 		struct rte_eth_dcb_rx_conf *rx_conf =
2063 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2064 		struct rte_eth_dcb_tx_conf *tx_conf =
2065 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2066 
2067 		rx_conf->nb_tcs = num_tcs;
2068 		tx_conf->nb_tcs = num_tcs;
2069 
2070 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2071 			rx_conf->dcb_tc[i] = i % num_tcs;
2072 			tx_conf->dcb_tc[i] = i % num_tcs;
2073 		}
2074 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2075 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2076 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2077 	}
2078 
2079 	if (pfc_en)
2080 		eth_conf->dcb_capability_en =
2081 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2082 	else
2083 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2084 
2085 	return 0;
2086 }
2087 
2088 int
2089 init_port_dcb_config(portid_t pid,
2090 		     enum dcb_mode_enable dcb_mode,
2091 		     enum rte_eth_nb_tcs num_tcs,
2092 		     uint8_t pfc_en)
2093 {
2094 	struct rte_eth_conf port_conf;
2095 	struct rte_port *rte_port;
2096 	int retval;
2097 	uint16_t i;
2098 
2099 	rte_port = &ports[pid];
2100 
2101 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2102 	/* Enter DCB configuration status */
2103 	dcb_config = 1;
2104 
2105 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2106 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2107 	if (retval < 0)
2108 		return retval;
2109 	port_conf.rxmode.hw_vlan_filter = 1;
2110 
2111 	/**
2112 	 * Write the configuration into the device.
2113 	 * Set the numbers of RX & TX queues to 0, so
2114 	 * the RX & TX queues will not be setup.
2115 	 */
2116 	(void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2117 
2118 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2119 
2120 	/* If dev_info.vmdq_pool_base is greater than 0,
2121 	 * the queue id of vmdq pools is started after pf queues.
2122 	 */
2123 	if (dcb_mode == DCB_VT_ENABLED &&
2124 	    rte_port->dev_info.vmdq_pool_base > 0) {
2125 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2126 			" for port %d.", pid);
2127 		return -1;
2128 	}
2129 
2130 	/* Assume the ports in testpmd have the same dcb capability
2131 	 * and has the same number of rxq and txq in dcb mode
2132 	 */
2133 	if (dcb_mode == DCB_VT_ENABLED) {
2134 		if (rte_port->dev_info.max_vfs > 0) {
2135 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2136 			nb_txq = rte_port->dev_info.nb_tx_queues;
2137 		} else {
2138 			nb_rxq = rte_port->dev_info.max_rx_queues;
2139 			nb_txq = rte_port->dev_info.max_tx_queues;
2140 		}
2141 	} else {
2142 		/*if vt is disabled, use all pf queues */
2143 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2144 			nb_rxq = rte_port->dev_info.max_rx_queues;
2145 			nb_txq = rte_port->dev_info.max_tx_queues;
2146 		} else {
2147 			nb_rxq = (queueid_t)num_tcs;
2148 			nb_txq = (queueid_t)num_tcs;
2149 
2150 		}
2151 	}
2152 	rx_free_thresh = 64;
2153 
2154 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2155 
2156 	rxtx_port_config(rte_port);
2157 	/* VLAN filter */
2158 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2159 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2160 		rx_vft_set(pid, vlan_tags[i], 1);
2161 
2162 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2163 	map_port_queue_stats_mapping_registers(pid, rte_port);
2164 
2165 	rte_port->dcb_flag = 1;
2166 
2167 	return 0;
2168 }
2169 
2170 static void
2171 init_port(void)
2172 {
2173 	/* Configuration of Ethernet ports. */
2174 	ports = rte_zmalloc("testpmd: ports",
2175 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2176 			    RTE_CACHE_LINE_SIZE);
2177 	if (ports == NULL) {
2178 		rte_exit(EXIT_FAILURE,
2179 				"rte_zmalloc(%d struct rte_port) failed\n",
2180 				RTE_MAX_ETHPORTS);
2181 	}
2182 }
2183 
2184 static void
2185 force_quit(void)
2186 {
2187 	pmd_test_exit();
2188 	prompt_exit();
2189 }
2190 
2191 static void
2192 signal_handler(int signum)
2193 {
2194 	if (signum == SIGINT || signum == SIGTERM) {
2195 		printf("\nSignal %d received, preparing to exit...\n",
2196 				signum);
2197 #ifdef RTE_LIBRTE_PDUMP
2198 		/* uninitialize packet capture framework */
2199 		rte_pdump_uninit();
2200 #endif
2201 #ifdef RTE_LIBRTE_LATENCY_STATS
2202 		rte_latencystats_uninit();
2203 #endif
2204 		force_quit();
2205 		/* exit with the expected status */
2206 		signal(signum, SIG_DFL);
2207 		kill(getpid(), signum);
2208 	}
2209 }
2210 
2211 int
2212 main(int argc, char** argv)
2213 {
2214 	int  diag;
2215 	uint8_t port_id;
2216 
2217 	signal(SIGINT, signal_handler);
2218 	signal(SIGTERM, signal_handler);
2219 
2220 	diag = rte_eal_init(argc, argv);
2221 	if (diag < 0)
2222 		rte_panic("Cannot init EAL\n");
2223 
2224 #ifdef RTE_LIBRTE_PDUMP
2225 	/* initialize packet capture framework */
2226 	rte_pdump_init(NULL);
2227 #endif
2228 
2229 	nb_ports = (portid_t) rte_eth_dev_count();
2230 	if (nb_ports == 0)
2231 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2232 
2233 	/* allocate port structures, and init them */
2234 	init_port();
2235 
2236 	set_def_fwd_config();
2237 	if (nb_lcores == 0)
2238 		rte_panic("Empty set of forwarding logical cores - check the "
2239 			  "core mask supplied in the command parameters\n");
2240 
2241 	/* Bitrate stats disabled by default */
2242 	bitrate_enabled = 0;
2243 
2244 	argc -= diag;
2245 	argv += diag;
2246 	if (argc > 1)
2247 		launch_args_parse(argc, argv);
2248 
2249 	if (!nb_rxq && !nb_txq)
2250 		printf("Warning: Either rx or tx queues should be non-zero\n");
2251 
2252 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2253 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2254 		       "but nb_txq=%d will prevent to fully test it.\n",
2255 		       nb_rxq, nb_txq);
2256 
2257 	init_config();
2258 	if (start_port(RTE_PORT_ALL) != 0)
2259 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2260 
2261 	/* set all ports to promiscuous mode by default */
2262 	RTE_ETH_FOREACH_DEV(port_id)
2263 		rte_eth_promiscuous_enable(port_id);
2264 
2265 	/* Init metrics library */
2266 	rte_metrics_init(rte_socket_id());
2267 
2268 #ifdef RTE_LIBRTE_LATENCY_STATS
2269 	if (latencystats_enabled != 0) {
2270 		int ret = rte_latencystats_init(1, NULL);
2271 		if (ret)
2272 			printf("Warning: latencystats init()"
2273 				" returned error %d\n",	ret);
2274 		printf("Latencystats running on lcore %d\n",
2275 			latencystats_lcore_id);
2276 	}
2277 #endif
2278 
2279 	/* Setup bitrate stats */
2280 #ifdef RTE_LIBRTE_BITRATE
2281 	if (bitrate_enabled != 0) {
2282 		bitrate_data = rte_stats_bitrate_create();
2283 		if (bitrate_data == NULL)
2284 			rte_exit(EXIT_FAILURE,
2285 				"Could not allocate bitrate data.\n");
2286 		rte_stats_bitrate_reg(bitrate_data);
2287 	}
2288 #endif
2289 
2290 #ifdef RTE_LIBRTE_CMDLINE
2291 	if (strlen(cmdline_filename) != 0)
2292 		cmdline_read_from_file(cmdline_filename);
2293 
2294 	if (interactive == 1) {
2295 		if (auto_start) {
2296 			printf("Start automatic packet forwarding\n");
2297 			start_packet_forwarding(0);
2298 		}
2299 		prompt();
2300 		pmd_test_exit();
2301 	} else
2302 #endif
2303 	{
2304 		char c;
2305 		int rc;
2306 
2307 		printf("No commandline core given, start packet forwarding\n");
2308 		start_packet_forwarding(0);
2309 		printf("Press enter to exit\n");
2310 		rc = read(0, &c, 1);
2311 		pmd_test_exit();
2312 		if (rc < 0)
2313 			return 1;
2314 	}
2315 
2316 	return 0;
2317 }
2318