xref: /dpdk/app/test-pmd/testpmd.c (revision 3ab64341daf8bae485a7e27c68f1dd80c7fd5130)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
78 #endif
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
81 #endif
82 #include <rte_flow.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
86 #endif
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
89 #endif
90 
91 #include "testpmd.h"
92 
93 uint16_t verbose_level = 0; /**< Silent by default. */
94 
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
98 
99 /*
100  * NUMA support configuration.
101  * When set, the NUMA support attempts to dispatch the allocation of the
102  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
103  * probed ports among the CPU sockets 0 and 1.
104  * Otherwise, all memory is allocated from CPU socket 0.
105  */
106 uint8_t numa_support = 1; /**< numa enabled by default */
107 
108 /*
109  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
110  * not configured.
111  */
112 uint8_t socket_num = UMA_NO_CONFIG;
113 
114 /*
115  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
116  */
117 uint8_t mp_anon = 0;
118 
119 /*
120  * Record the Ethernet address of peer target ports to which packets are
121  * forwarded.
122  * Must be instantiated with the ethernet addresses of peer traffic generator
123  * ports.
124  */
125 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
126 portid_t nb_peer_eth_addrs = 0;
127 
128 /*
129  * Probed Target Environment.
130  */
131 struct rte_port *ports;	       /**< For all probed ethernet ports. */
132 portid_t nb_ports;             /**< Number of probed ethernet ports. */
133 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
134 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
135 
136 /*
137  * Test Forwarding Configuration.
138  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
139  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
140  */
141 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
142 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
143 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
144 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
145 
146 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
147 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
148 
149 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
150 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
151 
152 /*
153  * Forwarding engines.
154  */
155 struct fwd_engine * fwd_engines[] = {
156 	&io_fwd_engine,
157 	&mac_fwd_engine,
158 	&mac_swap_engine,
159 	&flow_gen_engine,
160 	&rx_only_engine,
161 	&tx_only_engine,
162 	&csum_fwd_engine,
163 	&icmp_echo_engine,
164 #ifdef RTE_LIBRTE_IEEE1588
165 	&ieee1588_fwd_engine,
166 #endif
167 	NULL,
168 };
169 
170 struct fwd_config cur_fwd_config;
171 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
172 uint32_t retry_enabled;
173 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
174 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
175 
176 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
177 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
178                                       * specified on command-line. */
179 
180 /*
181  * Configuration of packet segments used by the "txonly" processing engine.
182  */
183 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
184 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
185 	TXONLY_DEF_PACKET_LEN,
186 };
187 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
188 
189 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
190 /**< Split policy for packets to TX. */
191 
192 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
193 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
194 
195 /* current configuration is in DCB or not,0 means it is not in DCB mode */
196 uint8_t dcb_config = 0;
197 
198 /* Whether the dcb is in testing status */
199 uint8_t dcb_test = 0;
200 
201 /*
202  * Configurable number of RX/TX queues.
203  */
204 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
205 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
206 
207 /*
208  * Configurable number of RX/TX ring descriptors.
209  */
210 #define RTE_TEST_RX_DESC_DEFAULT 128
211 #define RTE_TEST_TX_DESC_DEFAULT 512
212 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
213 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
214 
215 #define RTE_PMD_PARAM_UNSET -1
216 /*
217  * Configurable values of RX and TX ring threshold registers.
218  */
219 
220 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
221 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
222 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
223 
224 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
225 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
226 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
227 
228 /*
229  * Configurable value of RX free threshold.
230  */
231 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
232 
233 /*
234  * Configurable value of RX drop enable.
235  */
236 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
237 
238 /*
239  * Configurable value of TX free threshold.
240  */
241 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
242 
243 /*
244  * Configurable value of TX RS bit threshold.
245  */
246 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
247 
248 /*
249  * Configurable value of TX queue flags.
250  */
251 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
252 
253 /*
254  * Receive Side Scaling (RSS) configuration.
255  */
256 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
257 
258 /*
259  * Port topology configuration
260  */
261 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
262 
263 /*
264  * Avoids to flush all the RX streams before starts forwarding.
265  */
266 uint8_t no_flush_rx = 0; /* flush by default */
267 
268 /*
269  * Avoids to check link status when starting/stopping a port.
270  */
271 uint8_t no_link_check = 0; /* check by default */
272 
273 /*
274  * Enable link status change notification
275  */
276 uint8_t lsc_interrupt = 1; /* enabled by default */
277 
278 /*
279  * Enable device removal notification.
280  */
281 uint8_t rmv_interrupt = 1; /* enabled by default */
282 
283 /*
284  * NIC bypass mode configuration options.
285  */
286 #ifdef RTE_NIC_BYPASS
287 
288 /* The NIC bypass watchdog timeout. */
289 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
290 
291 #endif
292 
293 #ifdef RTE_LIBRTE_LATENCY_STATS
294 
295 /*
296  * Set when latency stats is enabled in the commandline
297  */
298 uint8_t latencystats_enabled;
299 
300 /*
301  * Lcore ID to serive latency statistics.
302  */
303 lcoreid_t latencystats_lcore_id = -1;
304 
305 #endif
306 
307 /*
308  * Ethernet device configuration.
309  */
310 struct rte_eth_rxmode rx_mode = {
311 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
312 	.split_hdr_size = 0,
313 	.header_split   = 0, /**< Header Split disabled. */
314 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
315 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
316 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
317 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
318 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
319 	.hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
320 };
321 
322 struct rte_fdir_conf fdir_conf = {
323 	.mode = RTE_FDIR_MODE_NONE,
324 	.pballoc = RTE_FDIR_PBALLOC_64K,
325 	.status = RTE_FDIR_REPORT_STATUS,
326 	.mask = {
327 		.vlan_tci_mask = 0x0,
328 		.ipv4_mask     = {
329 			.src_ip = 0xFFFFFFFF,
330 			.dst_ip = 0xFFFFFFFF,
331 		},
332 		.ipv6_mask     = {
333 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
334 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
335 		},
336 		.src_port_mask = 0xFFFF,
337 		.dst_port_mask = 0xFFFF,
338 		.mac_addr_byte_mask = 0xFF,
339 		.tunnel_type_mask = 1,
340 		.tunnel_id_mask = 0xFFFFFFFF,
341 	},
342 	.drop_queue = 127,
343 };
344 
345 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
346 
347 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
348 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
349 
350 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
351 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
352 
353 uint16_t nb_tx_queue_stats_mappings = 0;
354 uint16_t nb_rx_queue_stats_mappings = 0;
355 
356 unsigned max_socket = 0;
357 
358 /* Bitrate statistics */
359 struct rte_stats_bitrates *bitrate_data;
360 
361 /* Forward function declarations */
362 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
363 static void check_all_ports_link_status(uint32_t port_mask);
364 static void eth_event_callback(uint8_t port_id,
365 			       enum rte_eth_event_type type,
366 			       void *param);
367 
368 /*
369  * Check if all the ports are started.
370  * If yes, return positive value. If not, return zero.
371  */
372 static int all_ports_started(void);
373 
374 /*
375  * Setup default configuration.
376  */
377 static void
378 set_default_fwd_lcores_config(void)
379 {
380 	unsigned int i;
381 	unsigned int nb_lc;
382 	unsigned int sock_num;
383 
384 	nb_lc = 0;
385 	for (i = 0; i < RTE_MAX_LCORE; i++) {
386 		sock_num = rte_lcore_to_socket_id(i) + 1;
387 		if (sock_num > max_socket) {
388 			if (sock_num > RTE_MAX_NUMA_NODES)
389 				rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
390 			max_socket = sock_num;
391 		}
392 		if (!rte_lcore_is_enabled(i))
393 			continue;
394 		if (i == rte_get_master_lcore())
395 			continue;
396 		fwd_lcores_cpuids[nb_lc++] = i;
397 	}
398 	nb_lcores = (lcoreid_t) nb_lc;
399 	nb_cfg_lcores = nb_lcores;
400 	nb_fwd_lcores = 1;
401 }
402 
403 static void
404 set_def_peer_eth_addrs(void)
405 {
406 	portid_t i;
407 
408 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
409 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
410 		peer_eth_addrs[i].addr_bytes[5] = i;
411 	}
412 }
413 
414 static void
415 set_default_fwd_ports_config(void)
416 {
417 	portid_t pt_id;
418 
419 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
420 		fwd_ports_ids[pt_id] = pt_id;
421 
422 	nb_cfg_ports = nb_ports;
423 	nb_fwd_ports = nb_ports;
424 }
425 
426 void
427 set_def_fwd_config(void)
428 {
429 	set_default_fwd_lcores_config();
430 	set_def_peer_eth_addrs();
431 	set_default_fwd_ports_config();
432 }
433 
434 /*
435  * Configuration initialisation done once at init time.
436  */
437 static void
438 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
439 		 unsigned int socket_id)
440 {
441 	char pool_name[RTE_MEMPOOL_NAMESIZE];
442 	struct rte_mempool *rte_mp = NULL;
443 	uint32_t mb_size;
444 
445 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
446 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
447 
448 	RTE_LOG(INFO, USER1,
449 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
450 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
451 
452 #ifdef RTE_LIBRTE_PMD_XENVIRT
453 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
454 		(unsigned) mb_mempool_cache,
455 		sizeof(struct rte_pktmbuf_pool_private),
456 		rte_pktmbuf_pool_init, NULL,
457 		rte_pktmbuf_init, NULL,
458 		socket_id, 0);
459 #endif
460 
461 	/* if the former XEN allocation failed fall back to normal allocation */
462 	if (rte_mp == NULL) {
463 		if (mp_anon != 0) {
464 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
465 				mb_size, (unsigned) mb_mempool_cache,
466 				sizeof(struct rte_pktmbuf_pool_private),
467 				socket_id, 0);
468 			if (rte_mp == NULL)
469 				goto err;
470 
471 			if (rte_mempool_populate_anon(rte_mp) == 0) {
472 				rte_mempool_free(rte_mp);
473 				rte_mp = NULL;
474 				goto err;
475 			}
476 			rte_pktmbuf_pool_init(rte_mp, NULL);
477 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
478 		} else {
479 			/* wrapper to rte_mempool_create() */
480 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
481 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
482 		}
483 	}
484 
485 err:
486 	if (rte_mp == NULL) {
487 		rte_exit(EXIT_FAILURE,
488 			"Creation of mbuf pool for socket %u failed: %s\n",
489 			socket_id, rte_strerror(rte_errno));
490 	} else if (verbose_level > 0) {
491 		rte_mempool_dump(stdout, rte_mp);
492 	}
493 }
494 
495 /*
496  * Check given socket id is valid or not with NUMA mode,
497  * if valid, return 0, else return -1
498  */
499 static int
500 check_socket_id(const unsigned int socket_id)
501 {
502 	static int warning_once = 0;
503 
504 	if (socket_id >= max_socket) {
505 		if (!warning_once && numa_support)
506 			printf("Warning: NUMA should be configured manually by"
507 			       " using --port-numa-config and"
508 			       " --ring-numa-config parameters along with"
509 			       " --numa.\n");
510 		warning_once = 1;
511 		return -1;
512 	}
513 	return 0;
514 }
515 
516 static void
517 init_config(void)
518 {
519 	portid_t pid;
520 	struct rte_port *port;
521 	struct rte_mempool *mbp;
522 	unsigned int nb_mbuf_per_pool;
523 	lcoreid_t  lc_id;
524 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
525 
526 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
527 	/* Configuration of logical cores. */
528 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
529 				sizeof(struct fwd_lcore *) * nb_lcores,
530 				RTE_CACHE_LINE_SIZE);
531 	if (fwd_lcores == NULL) {
532 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
533 							"failed\n", nb_lcores);
534 	}
535 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
536 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
537 					       sizeof(struct fwd_lcore),
538 					       RTE_CACHE_LINE_SIZE);
539 		if (fwd_lcores[lc_id] == NULL) {
540 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
541 								"failed\n");
542 		}
543 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
544 	}
545 
546 	RTE_ETH_FOREACH_DEV(pid) {
547 		port = &ports[pid];
548 		rte_eth_dev_info_get(pid, &port->dev_info);
549 
550 		if (numa_support) {
551 			if (port_numa[pid] != NUMA_NO_CONFIG)
552 				port_per_socket[port_numa[pid]]++;
553 			else {
554 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
555 
556 				/* if socket_id is invalid, set to 0 */
557 				if (check_socket_id(socket_id) < 0)
558 					socket_id = 0;
559 				port_per_socket[socket_id]++;
560 			}
561 		}
562 
563 		/* set flag to initialize port/queue */
564 		port->need_reconfig = 1;
565 		port->need_reconfig_queues = 1;
566 	}
567 
568 	/*
569 	 * Create pools of mbuf.
570 	 * If NUMA support is disabled, create a single pool of mbuf in
571 	 * socket 0 memory by default.
572 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
573 	 *
574 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
575 	 * nb_txd can be configured at run time.
576 	 */
577 	if (param_total_num_mbufs)
578 		nb_mbuf_per_pool = param_total_num_mbufs;
579 	else {
580 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
581 			(nb_lcores * mb_mempool_cache) +
582 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
583 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
584 	}
585 
586 	if (numa_support) {
587 		uint8_t i;
588 
589 		for (i = 0; i < max_socket; i++)
590 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, i);
591 	} else {
592 		if (socket_num == UMA_NO_CONFIG)
593 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
594 		else
595 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
596 						 socket_num);
597 	}
598 
599 	init_port_config();
600 
601 	/*
602 	 * Records which Mbuf pool to use by each logical core, if needed.
603 	 */
604 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
605 		mbp = mbuf_pool_find(
606 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
607 
608 		if (mbp == NULL)
609 			mbp = mbuf_pool_find(0);
610 		fwd_lcores[lc_id]->mbp = mbp;
611 	}
612 
613 	/* Configuration of packet forwarding streams. */
614 	if (init_fwd_streams() < 0)
615 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
616 
617 	fwd_config_setup();
618 }
619 
620 
621 void
622 reconfig(portid_t new_port_id, unsigned socket_id)
623 {
624 	struct rte_port *port;
625 
626 	/* Reconfiguration of Ethernet ports. */
627 	port = &ports[new_port_id];
628 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
629 
630 	/* set flag to initialize port/queue */
631 	port->need_reconfig = 1;
632 	port->need_reconfig_queues = 1;
633 	port->socket_id = socket_id;
634 
635 	init_port_config();
636 }
637 
638 
639 int
640 init_fwd_streams(void)
641 {
642 	portid_t pid;
643 	struct rte_port *port;
644 	streamid_t sm_id, nb_fwd_streams_new;
645 	queueid_t q;
646 
647 	/* set socket id according to numa or not */
648 	RTE_ETH_FOREACH_DEV(pid) {
649 		port = &ports[pid];
650 		if (nb_rxq > port->dev_info.max_rx_queues) {
651 			printf("Fail: nb_rxq(%d) is greater than "
652 				"max_rx_queues(%d)\n", nb_rxq,
653 				port->dev_info.max_rx_queues);
654 			return -1;
655 		}
656 		if (nb_txq > port->dev_info.max_tx_queues) {
657 			printf("Fail: nb_txq(%d) is greater than "
658 				"max_tx_queues(%d)\n", nb_txq,
659 				port->dev_info.max_tx_queues);
660 			return -1;
661 		}
662 		if (numa_support) {
663 			if (port_numa[pid] != NUMA_NO_CONFIG)
664 				port->socket_id = port_numa[pid];
665 			else {
666 				port->socket_id = rte_eth_dev_socket_id(pid);
667 
668 				/* if socket_id is invalid, set to 0 */
669 				if (check_socket_id(port->socket_id) < 0)
670 					port->socket_id = 0;
671 			}
672 		}
673 		else {
674 			if (socket_num == UMA_NO_CONFIG)
675 				port->socket_id = 0;
676 			else
677 				port->socket_id = socket_num;
678 		}
679 	}
680 
681 	q = RTE_MAX(nb_rxq, nb_txq);
682 	if (q == 0) {
683 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
684 		return -1;
685 	}
686 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
687 	if (nb_fwd_streams_new == nb_fwd_streams)
688 		return 0;
689 	/* clear the old */
690 	if (fwd_streams != NULL) {
691 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
692 			if (fwd_streams[sm_id] == NULL)
693 				continue;
694 			rte_free(fwd_streams[sm_id]);
695 			fwd_streams[sm_id] = NULL;
696 		}
697 		rte_free(fwd_streams);
698 		fwd_streams = NULL;
699 	}
700 
701 	/* init new */
702 	nb_fwd_streams = nb_fwd_streams_new;
703 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
704 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
705 	if (fwd_streams == NULL)
706 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
707 						"failed\n", nb_fwd_streams);
708 
709 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
710 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
711 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
712 		if (fwd_streams[sm_id] == NULL)
713 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
714 								" failed\n");
715 	}
716 
717 	return 0;
718 }
719 
720 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
721 static void
722 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
723 {
724 	unsigned int total_burst;
725 	unsigned int nb_burst;
726 	unsigned int burst_stats[3];
727 	uint16_t pktnb_stats[3];
728 	uint16_t nb_pkt;
729 	int burst_percent[3];
730 
731 	/*
732 	 * First compute the total number of packet bursts and the
733 	 * two highest numbers of bursts of the same number of packets.
734 	 */
735 	total_burst = 0;
736 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
737 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
738 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
739 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
740 		if (nb_burst == 0)
741 			continue;
742 		total_burst += nb_burst;
743 		if (nb_burst > burst_stats[0]) {
744 			burst_stats[1] = burst_stats[0];
745 			pktnb_stats[1] = pktnb_stats[0];
746 			burst_stats[0] = nb_burst;
747 			pktnb_stats[0] = nb_pkt;
748 		}
749 	}
750 	if (total_burst == 0)
751 		return;
752 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
753 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
754 	       burst_percent[0], (int) pktnb_stats[0]);
755 	if (burst_stats[0] == total_burst) {
756 		printf("]\n");
757 		return;
758 	}
759 	if (burst_stats[0] + burst_stats[1] == total_burst) {
760 		printf(" + %d%% of %d pkts]\n",
761 		       100 - burst_percent[0], pktnb_stats[1]);
762 		return;
763 	}
764 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
765 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
766 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
767 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
768 		return;
769 	}
770 	printf(" + %d%% of %d pkts + %d%% of others]\n",
771 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
772 }
773 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
774 
775 static void
776 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
777 {
778 	struct rte_port *port;
779 	uint8_t i;
780 
781 	static const char *fwd_stats_border = "----------------------";
782 
783 	port = &ports[port_id];
784 	printf("\n  %s Forward statistics for port %-2d %s\n",
785 	       fwd_stats_border, port_id, fwd_stats_border);
786 
787 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
788 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
789 		       "%-"PRIu64"\n",
790 		       stats->ipackets, stats->imissed,
791 		       (uint64_t) (stats->ipackets + stats->imissed));
792 
793 		if (cur_fwd_eng == &csum_fwd_engine)
794 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
795 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
796 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
797 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
798 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
799 		}
800 
801 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
802 		       "%-"PRIu64"\n",
803 		       stats->opackets, port->tx_dropped,
804 		       (uint64_t) (stats->opackets + port->tx_dropped));
805 	}
806 	else {
807 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
808 		       "%14"PRIu64"\n",
809 		       stats->ipackets, stats->imissed,
810 		       (uint64_t) (stats->ipackets + stats->imissed));
811 
812 		if (cur_fwd_eng == &csum_fwd_engine)
813 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
814 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
815 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
816 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
817 			printf("  RX-nombufs:             %14"PRIu64"\n",
818 			       stats->rx_nombuf);
819 		}
820 
821 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
822 		       "%14"PRIu64"\n",
823 		       stats->opackets, port->tx_dropped,
824 		       (uint64_t) (stats->opackets + port->tx_dropped));
825 	}
826 
827 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
828 	if (port->rx_stream)
829 		pkt_burst_stats_display("RX",
830 			&port->rx_stream->rx_burst_stats);
831 	if (port->tx_stream)
832 		pkt_burst_stats_display("TX",
833 			&port->tx_stream->tx_burst_stats);
834 #endif
835 
836 	if (port->rx_queue_stats_mapping_enabled) {
837 		printf("\n");
838 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
839 			printf("  Stats reg %2d RX-packets:%14"PRIu64
840 			       "     RX-errors:%14"PRIu64
841 			       "    RX-bytes:%14"PRIu64"\n",
842 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
843 		}
844 		printf("\n");
845 	}
846 	if (port->tx_queue_stats_mapping_enabled) {
847 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
848 			printf("  Stats reg %2d TX-packets:%14"PRIu64
849 			       "                                 TX-bytes:%14"PRIu64"\n",
850 			       i, stats->q_opackets[i], stats->q_obytes[i]);
851 		}
852 	}
853 
854 	printf("  %s--------------------------------%s\n",
855 	       fwd_stats_border, fwd_stats_border);
856 }
857 
858 static void
859 fwd_stream_stats_display(streamid_t stream_id)
860 {
861 	struct fwd_stream *fs;
862 	static const char *fwd_top_stats_border = "-------";
863 
864 	fs = fwd_streams[stream_id];
865 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
866 	    (fs->fwd_dropped == 0))
867 		return;
868 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
869 	       "TX Port=%2d/Queue=%2d %s\n",
870 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
871 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
872 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
873 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
874 
875 	/* if checksum mode */
876 	if (cur_fwd_eng == &csum_fwd_engine) {
877 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
878 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
879 	}
880 
881 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
882 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
883 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
884 #endif
885 }
886 
887 static void
888 flush_fwd_rx_queues(void)
889 {
890 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
891 	portid_t  rxp;
892 	portid_t port_id;
893 	queueid_t rxq;
894 	uint16_t  nb_rx;
895 	uint16_t  i;
896 	uint8_t   j;
897 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
898 	uint64_t timer_period;
899 
900 	/* convert to number of cycles */
901 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
902 
903 	for (j = 0; j < 2; j++) {
904 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
905 			for (rxq = 0; rxq < nb_rxq; rxq++) {
906 				port_id = fwd_ports_ids[rxp];
907 				/**
908 				* testpmd can stuck in the below do while loop
909 				* if rte_eth_rx_burst() always returns nonzero
910 				* packets. So timer is added to exit this loop
911 				* after 1sec timer expiry.
912 				*/
913 				prev_tsc = rte_rdtsc();
914 				do {
915 					nb_rx = rte_eth_rx_burst(port_id, rxq,
916 						pkts_burst, MAX_PKT_BURST);
917 					for (i = 0; i < nb_rx; i++)
918 						rte_pktmbuf_free(pkts_burst[i]);
919 
920 					cur_tsc = rte_rdtsc();
921 					diff_tsc = cur_tsc - prev_tsc;
922 					timer_tsc += diff_tsc;
923 				} while ((nb_rx > 0) &&
924 					(timer_tsc < timer_period));
925 				timer_tsc = 0;
926 			}
927 		}
928 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
929 	}
930 }
931 
932 static void
933 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
934 {
935 	struct fwd_stream **fsm;
936 	streamid_t nb_fs;
937 	streamid_t sm_id;
938 #ifdef RTE_LIBRTE_BITRATE
939 	uint64_t tics_per_1sec;
940 	uint64_t tics_datum;
941 	uint64_t tics_current;
942 	uint8_t idx_port, cnt_ports;
943 
944 	cnt_ports = rte_eth_dev_count();
945 	tics_datum = rte_rdtsc();
946 	tics_per_1sec = rte_get_timer_hz();
947 #endif
948 	fsm = &fwd_streams[fc->stream_idx];
949 	nb_fs = fc->stream_nb;
950 	do {
951 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
952 			(*pkt_fwd)(fsm[sm_id]);
953 #ifdef RTE_LIBRTE_BITRATE
954 		tics_current = rte_rdtsc();
955 		if (tics_current - tics_datum >= tics_per_1sec) {
956 			/* Periodic bitrate calculation */
957 			for (idx_port = 0; idx_port < cnt_ports; idx_port++)
958 				rte_stats_bitrate_calc(bitrate_data, idx_port);
959 			tics_datum = tics_current;
960 		}
961 #endif
962 #ifdef RTE_LIBRTE_LATENCY_STATS
963 		if (latencystats_lcore_id == rte_lcore_id())
964 			rte_latencystats_update();
965 #endif
966 
967 	} while (! fc->stopped);
968 }
969 
970 static int
971 start_pkt_forward_on_core(void *fwd_arg)
972 {
973 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
974 			     cur_fwd_config.fwd_eng->packet_fwd);
975 	return 0;
976 }
977 
978 /*
979  * Run the TXONLY packet forwarding engine to send a single burst of packets.
980  * Used to start communication flows in network loopback test configurations.
981  */
982 static int
983 run_one_txonly_burst_on_core(void *fwd_arg)
984 {
985 	struct fwd_lcore *fwd_lc;
986 	struct fwd_lcore tmp_lcore;
987 
988 	fwd_lc = (struct fwd_lcore *) fwd_arg;
989 	tmp_lcore = *fwd_lc;
990 	tmp_lcore.stopped = 1;
991 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
992 	return 0;
993 }
994 
995 /*
996  * Launch packet forwarding:
997  *     - Setup per-port forwarding context.
998  *     - launch logical cores with their forwarding configuration.
999  */
1000 static void
1001 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1002 {
1003 	port_fwd_begin_t port_fwd_begin;
1004 	unsigned int i;
1005 	unsigned int lc_id;
1006 	int diag;
1007 
1008 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1009 	if (port_fwd_begin != NULL) {
1010 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1011 			(*port_fwd_begin)(fwd_ports_ids[i]);
1012 	}
1013 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1014 		lc_id = fwd_lcores_cpuids[i];
1015 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1016 			fwd_lcores[i]->stopped = 0;
1017 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1018 						     fwd_lcores[i], lc_id);
1019 			if (diag != 0)
1020 				printf("launch lcore %u failed - diag=%d\n",
1021 				       lc_id, diag);
1022 		}
1023 	}
1024 }
1025 
1026 /*
1027  * Launch packet forwarding configuration.
1028  */
1029 void
1030 start_packet_forwarding(int with_tx_first)
1031 {
1032 	port_fwd_begin_t port_fwd_begin;
1033 	port_fwd_end_t  port_fwd_end;
1034 	struct rte_port *port;
1035 	unsigned int i;
1036 	portid_t   pt_id;
1037 	streamid_t sm_id;
1038 
1039 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1040 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1041 
1042 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1043 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1044 
1045 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1046 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1047 		(!nb_rxq || !nb_txq))
1048 		rte_exit(EXIT_FAILURE,
1049 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1050 			cur_fwd_eng->fwd_mode_name);
1051 
1052 	if (all_ports_started() == 0) {
1053 		printf("Not all ports were started\n");
1054 		return;
1055 	}
1056 	if (test_done == 0) {
1057 		printf("Packet forwarding already started\n");
1058 		return;
1059 	}
1060 
1061 	if (init_fwd_streams() < 0) {
1062 		printf("Fail from init_fwd_streams()\n");
1063 		return;
1064 	}
1065 
1066 	if(dcb_test) {
1067 		for (i = 0; i < nb_fwd_ports; i++) {
1068 			pt_id = fwd_ports_ids[i];
1069 			port = &ports[pt_id];
1070 			if (!port->dcb_flag) {
1071 				printf("In DCB mode, all forwarding ports must "
1072                                        "be configured in this mode.\n");
1073 				return;
1074 			}
1075 		}
1076 		if (nb_fwd_lcores == 1) {
1077 			printf("In DCB mode,the nb forwarding cores "
1078                                "should be larger than 1.\n");
1079 			return;
1080 		}
1081 	}
1082 	test_done = 0;
1083 
1084 	if(!no_flush_rx)
1085 		flush_fwd_rx_queues();
1086 
1087 	fwd_config_setup();
1088 	pkt_fwd_config_display(&cur_fwd_config);
1089 	rxtx_config_display();
1090 
1091 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1092 		pt_id = fwd_ports_ids[i];
1093 		port = &ports[pt_id];
1094 		rte_eth_stats_get(pt_id, &port->stats);
1095 		port->tx_dropped = 0;
1096 
1097 		map_port_queue_stats_mapping_registers(pt_id, port);
1098 	}
1099 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1100 		fwd_streams[sm_id]->rx_packets = 0;
1101 		fwd_streams[sm_id]->tx_packets = 0;
1102 		fwd_streams[sm_id]->fwd_dropped = 0;
1103 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1104 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1105 
1106 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1107 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1108 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1109 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1110 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1111 #endif
1112 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1113 		fwd_streams[sm_id]->core_cycles = 0;
1114 #endif
1115 	}
1116 	if (with_tx_first) {
1117 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1118 		if (port_fwd_begin != NULL) {
1119 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1120 				(*port_fwd_begin)(fwd_ports_ids[i]);
1121 		}
1122 		while (with_tx_first--) {
1123 			launch_packet_forwarding(
1124 					run_one_txonly_burst_on_core);
1125 			rte_eal_mp_wait_lcore();
1126 		}
1127 		port_fwd_end = tx_only_engine.port_fwd_end;
1128 		if (port_fwd_end != NULL) {
1129 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1130 				(*port_fwd_end)(fwd_ports_ids[i]);
1131 		}
1132 	}
1133 	launch_packet_forwarding(start_pkt_forward_on_core);
1134 }
1135 
1136 void
1137 stop_packet_forwarding(void)
1138 {
1139 	struct rte_eth_stats stats;
1140 	struct rte_port *port;
1141 	port_fwd_end_t  port_fwd_end;
1142 	int i;
1143 	portid_t   pt_id;
1144 	streamid_t sm_id;
1145 	lcoreid_t  lc_id;
1146 	uint64_t total_recv;
1147 	uint64_t total_xmit;
1148 	uint64_t total_rx_dropped;
1149 	uint64_t total_tx_dropped;
1150 	uint64_t total_rx_nombuf;
1151 	uint64_t tx_dropped;
1152 	uint64_t rx_bad_ip_csum;
1153 	uint64_t rx_bad_l4_csum;
1154 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1155 	uint64_t fwd_cycles;
1156 #endif
1157 	static const char *acc_stats_border = "+++++++++++++++";
1158 
1159 	if (test_done) {
1160 		printf("Packet forwarding not started\n");
1161 		return;
1162 	}
1163 	printf("Telling cores to stop...");
1164 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1165 		fwd_lcores[lc_id]->stopped = 1;
1166 	printf("\nWaiting for lcores to finish...\n");
1167 	rte_eal_mp_wait_lcore();
1168 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1169 	if (port_fwd_end != NULL) {
1170 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1171 			pt_id = fwd_ports_ids[i];
1172 			(*port_fwd_end)(pt_id);
1173 		}
1174 	}
1175 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1176 	fwd_cycles = 0;
1177 #endif
1178 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1179 		if (cur_fwd_config.nb_fwd_streams >
1180 		    cur_fwd_config.nb_fwd_ports) {
1181 			fwd_stream_stats_display(sm_id);
1182 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1183 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1184 		} else {
1185 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1186 				fwd_streams[sm_id];
1187 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1188 				fwd_streams[sm_id];
1189 		}
1190 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1191 		tx_dropped = (uint64_t) (tx_dropped +
1192 					 fwd_streams[sm_id]->fwd_dropped);
1193 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1194 
1195 		rx_bad_ip_csum =
1196 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1197 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1198 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1199 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1200 							rx_bad_ip_csum;
1201 
1202 		rx_bad_l4_csum =
1203 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1204 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1205 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1206 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1207 							rx_bad_l4_csum;
1208 
1209 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1210 		fwd_cycles = (uint64_t) (fwd_cycles +
1211 					 fwd_streams[sm_id]->core_cycles);
1212 #endif
1213 	}
1214 	total_recv = 0;
1215 	total_xmit = 0;
1216 	total_rx_dropped = 0;
1217 	total_tx_dropped = 0;
1218 	total_rx_nombuf  = 0;
1219 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1220 		pt_id = fwd_ports_ids[i];
1221 
1222 		port = &ports[pt_id];
1223 		rte_eth_stats_get(pt_id, &stats);
1224 		stats.ipackets -= port->stats.ipackets;
1225 		port->stats.ipackets = 0;
1226 		stats.opackets -= port->stats.opackets;
1227 		port->stats.opackets = 0;
1228 		stats.ibytes   -= port->stats.ibytes;
1229 		port->stats.ibytes = 0;
1230 		stats.obytes   -= port->stats.obytes;
1231 		port->stats.obytes = 0;
1232 		stats.imissed  -= port->stats.imissed;
1233 		port->stats.imissed = 0;
1234 		stats.oerrors  -= port->stats.oerrors;
1235 		port->stats.oerrors = 0;
1236 		stats.rx_nombuf -= port->stats.rx_nombuf;
1237 		port->stats.rx_nombuf = 0;
1238 
1239 		total_recv += stats.ipackets;
1240 		total_xmit += stats.opackets;
1241 		total_rx_dropped += stats.imissed;
1242 		total_tx_dropped += port->tx_dropped;
1243 		total_rx_nombuf  += stats.rx_nombuf;
1244 
1245 		fwd_port_stats_display(pt_id, &stats);
1246 	}
1247 	printf("\n  %s Accumulated forward statistics for all ports"
1248 	       "%s\n",
1249 	       acc_stats_border, acc_stats_border);
1250 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1251 	       "%-"PRIu64"\n"
1252 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1253 	       "%-"PRIu64"\n",
1254 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1255 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1256 	if (total_rx_nombuf > 0)
1257 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1258 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1259 	       "%s\n",
1260 	       acc_stats_border, acc_stats_border);
1261 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1262 	if (total_recv > 0)
1263 		printf("\n  CPU cycles/packet=%u (total cycles="
1264 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1265 		       (unsigned int)(fwd_cycles / total_recv),
1266 		       fwd_cycles, total_recv);
1267 #endif
1268 	printf("\nDone.\n");
1269 	test_done = 1;
1270 }
1271 
1272 void
1273 dev_set_link_up(portid_t pid)
1274 {
1275 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1276 		printf("\nSet link up fail.\n");
1277 }
1278 
1279 void
1280 dev_set_link_down(portid_t pid)
1281 {
1282 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1283 		printf("\nSet link down fail.\n");
1284 }
1285 
1286 static int
1287 all_ports_started(void)
1288 {
1289 	portid_t pi;
1290 	struct rte_port *port;
1291 
1292 	RTE_ETH_FOREACH_DEV(pi) {
1293 		port = &ports[pi];
1294 		/* Check if there is a port which is not started */
1295 		if ((port->port_status != RTE_PORT_STARTED) &&
1296 			(port->slave_flag == 0))
1297 			return 0;
1298 	}
1299 
1300 	/* No port is not started */
1301 	return 1;
1302 }
1303 
1304 int
1305 all_ports_stopped(void)
1306 {
1307 	portid_t pi;
1308 	struct rte_port *port;
1309 
1310 	RTE_ETH_FOREACH_DEV(pi) {
1311 		port = &ports[pi];
1312 		if ((port->port_status != RTE_PORT_STOPPED) &&
1313 			(port->slave_flag == 0))
1314 			return 0;
1315 	}
1316 
1317 	return 1;
1318 }
1319 
1320 int
1321 port_is_started(portid_t port_id)
1322 {
1323 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1324 		return 0;
1325 
1326 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1327 		return 0;
1328 
1329 	return 1;
1330 }
1331 
1332 static int
1333 port_is_closed(portid_t port_id)
1334 {
1335 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1336 		return 0;
1337 
1338 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1339 		return 0;
1340 
1341 	return 1;
1342 }
1343 
1344 int
1345 start_port(portid_t pid)
1346 {
1347 	int diag, need_check_link_status = -1;
1348 	portid_t pi;
1349 	queueid_t qi;
1350 	struct rte_port *port;
1351 	struct ether_addr mac_addr;
1352 	enum rte_eth_event_type event_type;
1353 
1354 	if (port_id_is_invalid(pid, ENABLED_WARN))
1355 		return 0;
1356 
1357 	if(dcb_config)
1358 		dcb_test = 1;
1359 	RTE_ETH_FOREACH_DEV(pi) {
1360 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1361 			continue;
1362 
1363 		need_check_link_status = 0;
1364 		port = &ports[pi];
1365 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1366 						 RTE_PORT_HANDLING) == 0) {
1367 			printf("Port %d is now not stopped\n", pi);
1368 			continue;
1369 		}
1370 
1371 		if (port->need_reconfig > 0) {
1372 			port->need_reconfig = 0;
1373 
1374 			printf("Configuring Port %d (socket %u)\n", pi,
1375 					port->socket_id);
1376 			/* configure port */
1377 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1378 						&(port->dev_conf));
1379 			if (diag != 0) {
1380 				if (rte_atomic16_cmpset(&(port->port_status),
1381 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1382 					printf("Port %d can not be set back "
1383 							"to stopped\n", pi);
1384 				printf("Fail to configure port %d\n", pi);
1385 				/* try to reconfigure port next time */
1386 				port->need_reconfig = 1;
1387 				return -1;
1388 			}
1389 		}
1390 		if (port->need_reconfig_queues > 0) {
1391 			port->need_reconfig_queues = 0;
1392 			/* setup tx queues */
1393 			for (qi = 0; qi < nb_txq; qi++) {
1394 				if ((numa_support) &&
1395 					(txring_numa[pi] != NUMA_NO_CONFIG))
1396 					diag = rte_eth_tx_queue_setup(pi, qi,
1397 						nb_txd,txring_numa[pi],
1398 						&(port->tx_conf));
1399 				else
1400 					diag = rte_eth_tx_queue_setup(pi, qi,
1401 						nb_txd,port->socket_id,
1402 						&(port->tx_conf));
1403 
1404 				if (diag == 0)
1405 					continue;
1406 
1407 				/* Fail to setup tx queue, return */
1408 				if (rte_atomic16_cmpset(&(port->port_status),
1409 							RTE_PORT_HANDLING,
1410 							RTE_PORT_STOPPED) == 0)
1411 					printf("Port %d can not be set back "
1412 							"to stopped\n", pi);
1413 				printf("Fail to configure port %d tx queues\n", pi);
1414 				/* try to reconfigure queues next time */
1415 				port->need_reconfig_queues = 1;
1416 				return -1;
1417 			}
1418 			/* setup rx queues */
1419 			for (qi = 0; qi < nb_rxq; qi++) {
1420 				if ((numa_support) &&
1421 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1422 					struct rte_mempool * mp =
1423 						mbuf_pool_find(rxring_numa[pi]);
1424 					if (mp == NULL) {
1425 						printf("Failed to setup RX queue:"
1426 							"No mempool allocation"
1427 							" on the socket %d\n",
1428 							rxring_numa[pi]);
1429 						return -1;
1430 					}
1431 
1432 					diag = rte_eth_rx_queue_setup(pi, qi,
1433 					     nb_rxd,rxring_numa[pi],
1434 					     &(port->rx_conf),mp);
1435 				} else {
1436 					struct rte_mempool *mp =
1437 						mbuf_pool_find(port->socket_id);
1438 					if (mp == NULL) {
1439 						printf("Failed to setup RX queue:"
1440 							"No mempool allocation"
1441 							" on the socket %d\n",
1442 							port->socket_id);
1443 						return -1;
1444 					}
1445 					diag = rte_eth_rx_queue_setup(pi, qi,
1446 					     nb_rxd,port->socket_id,
1447 					     &(port->rx_conf), mp);
1448 				}
1449 				if (diag == 0)
1450 					continue;
1451 
1452 				/* Fail to setup rx queue, return */
1453 				if (rte_atomic16_cmpset(&(port->port_status),
1454 							RTE_PORT_HANDLING,
1455 							RTE_PORT_STOPPED) == 0)
1456 					printf("Port %d can not be set back "
1457 							"to stopped\n", pi);
1458 				printf("Fail to configure port %d rx queues\n", pi);
1459 				/* try to reconfigure queues next time */
1460 				port->need_reconfig_queues = 1;
1461 				return -1;
1462 			}
1463 		}
1464 
1465 		for (event_type = RTE_ETH_EVENT_UNKNOWN;
1466 		     event_type < RTE_ETH_EVENT_MAX;
1467 		     event_type++) {
1468 			diag = rte_eth_dev_callback_register(pi,
1469 							event_type,
1470 							eth_event_callback,
1471 							NULL);
1472 			if (diag) {
1473 				printf("Failed to setup even callback for event %d\n",
1474 					event_type);
1475 				return -1;
1476 			}
1477 		}
1478 
1479 		/* start port */
1480 		if (rte_eth_dev_start(pi) < 0) {
1481 			printf("Fail to start port %d\n", pi);
1482 
1483 			/* Fail to setup rx queue, return */
1484 			if (rte_atomic16_cmpset(&(port->port_status),
1485 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1486 				printf("Port %d can not be set back to "
1487 							"stopped\n", pi);
1488 			continue;
1489 		}
1490 
1491 		if (rte_atomic16_cmpset(&(port->port_status),
1492 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1493 			printf("Port %d can not be set into started\n", pi);
1494 
1495 		rte_eth_macaddr_get(pi, &mac_addr);
1496 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1497 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1498 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1499 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1500 
1501 		/* at least one port started, need checking link status */
1502 		need_check_link_status = 1;
1503 	}
1504 
1505 	if (need_check_link_status == 1 && !no_link_check)
1506 		check_all_ports_link_status(RTE_PORT_ALL);
1507 	else if (need_check_link_status == 0)
1508 		printf("Please stop the ports first\n");
1509 
1510 	printf("Done\n");
1511 	return 0;
1512 }
1513 
1514 void
1515 stop_port(portid_t pid)
1516 {
1517 	portid_t pi;
1518 	struct rte_port *port;
1519 	int need_check_link_status = 0;
1520 
1521 	if (dcb_test) {
1522 		dcb_test = 0;
1523 		dcb_config = 0;
1524 	}
1525 
1526 	if (port_id_is_invalid(pid, ENABLED_WARN))
1527 		return;
1528 
1529 	printf("Stopping ports...\n");
1530 
1531 	RTE_ETH_FOREACH_DEV(pi) {
1532 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1533 			continue;
1534 
1535 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1536 			printf("Please remove port %d from forwarding configuration.\n", pi);
1537 			continue;
1538 		}
1539 
1540 		if (port_is_bonding_slave(pi)) {
1541 			printf("Please remove port %d from bonded device.\n", pi);
1542 			continue;
1543 		}
1544 
1545 		port = &ports[pi];
1546 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1547 						RTE_PORT_HANDLING) == 0)
1548 			continue;
1549 
1550 		rte_eth_dev_stop(pi);
1551 
1552 		if (rte_atomic16_cmpset(&(port->port_status),
1553 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1554 			printf("Port %d can not be set into stopped\n", pi);
1555 		need_check_link_status = 1;
1556 	}
1557 	if (need_check_link_status && !no_link_check)
1558 		check_all_ports_link_status(RTE_PORT_ALL);
1559 
1560 	printf("Done\n");
1561 }
1562 
1563 void
1564 close_port(portid_t pid)
1565 {
1566 	portid_t pi;
1567 	struct rte_port *port;
1568 
1569 	if (port_id_is_invalid(pid, ENABLED_WARN))
1570 		return;
1571 
1572 	printf("Closing ports...\n");
1573 
1574 	RTE_ETH_FOREACH_DEV(pi) {
1575 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1576 			continue;
1577 
1578 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1579 			printf("Please remove port %d from forwarding configuration.\n", pi);
1580 			continue;
1581 		}
1582 
1583 		if (port_is_bonding_slave(pi)) {
1584 			printf("Please remove port %d from bonded device.\n", pi);
1585 			continue;
1586 		}
1587 
1588 		port = &ports[pi];
1589 		if (rte_atomic16_cmpset(&(port->port_status),
1590 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1591 			printf("Port %d is already closed\n", pi);
1592 			continue;
1593 		}
1594 
1595 		if (rte_atomic16_cmpset(&(port->port_status),
1596 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1597 			printf("Port %d is now not stopped\n", pi);
1598 			continue;
1599 		}
1600 
1601 		if (port->flow_list)
1602 			port_flow_flush(pi);
1603 		rte_eth_dev_close(pi);
1604 
1605 		if (rte_atomic16_cmpset(&(port->port_status),
1606 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1607 			printf("Port %d cannot be set to closed\n", pi);
1608 	}
1609 
1610 	printf("Done\n");
1611 }
1612 
1613 void
1614 attach_port(char *identifier)
1615 {
1616 	portid_t pi = 0;
1617 	unsigned int socket_id;
1618 
1619 	printf("Attaching a new port...\n");
1620 
1621 	if (identifier == NULL) {
1622 		printf("Invalid parameters are specified\n");
1623 		return;
1624 	}
1625 
1626 	if (rte_eth_dev_attach(identifier, &pi))
1627 		return;
1628 
1629 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1630 	/* if socket_id is invalid, set to 0 */
1631 	if (check_socket_id(socket_id) < 0)
1632 		socket_id = 0;
1633 	reconfig(pi, socket_id);
1634 	rte_eth_promiscuous_enable(pi);
1635 
1636 	nb_ports = rte_eth_dev_count();
1637 
1638 	ports[pi].port_status = RTE_PORT_STOPPED;
1639 
1640 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1641 	printf("Done\n");
1642 }
1643 
1644 void
1645 detach_port(uint8_t port_id)
1646 {
1647 	char name[RTE_ETH_NAME_MAX_LEN];
1648 
1649 	printf("Detaching a port...\n");
1650 
1651 	if (!port_is_closed(port_id)) {
1652 		printf("Please close port first\n");
1653 		return;
1654 	}
1655 
1656 	if (ports[port_id].flow_list)
1657 		port_flow_flush(port_id);
1658 
1659 	if (rte_eth_dev_detach(port_id, name))
1660 		return;
1661 
1662 	nb_ports = rte_eth_dev_count();
1663 
1664 	printf("Port '%s' is detached. Now total ports is %d\n",
1665 			name, nb_ports);
1666 	printf("Done\n");
1667 	return;
1668 }
1669 
1670 void
1671 pmd_test_exit(void)
1672 {
1673 	portid_t pt_id;
1674 
1675 	if (test_done == 0)
1676 		stop_packet_forwarding();
1677 
1678 	if (ports != NULL) {
1679 		no_link_check = 1;
1680 		RTE_ETH_FOREACH_DEV(pt_id) {
1681 			printf("\nShutting down port %d...\n", pt_id);
1682 			fflush(stdout);
1683 			stop_port(pt_id);
1684 			close_port(pt_id);
1685 		}
1686 	}
1687 	printf("\nBye...\n");
1688 }
1689 
1690 typedef void (*cmd_func_t)(void);
1691 struct pmd_test_command {
1692 	const char *cmd_name;
1693 	cmd_func_t cmd_func;
1694 };
1695 
1696 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1697 
1698 /* Check the link status of all ports in up to 9s, and print them finally */
1699 static void
1700 check_all_ports_link_status(uint32_t port_mask)
1701 {
1702 #define CHECK_INTERVAL 100 /* 100ms */
1703 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1704 	uint8_t portid, count, all_ports_up, print_flag = 0;
1705 	struct rte_eth_link link;
1706 
1707 	printf("Checking link statuses...\n");
1708 	fflush(stdout);
1709 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1710 		all_ports_up = 1;
1711 		RTE_ETH_FOREACH_DEV(portid) {
1712 			if ((port_mask & (1 << portid)) == 0)
1713 				continue;
1714 			memset(&link, 0, sizeof(link));
1715 			rte_eth_link_get_nowait(portid, &link);
1716 			/* print link status if flag set */
1717 			if (print_flag == 1) {
1718 				if (link.link_status)
1719 					printf("Port %d Link Up - speed %u "
1720 						"Mbps - %s\n", (uint8_t)portid,
1721 						(unsigned)link.link_speed,
1722 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1723 					("full-duplex") : ("half-duplex\n"));
1724 				else
1725 					printf("Port %d Link Down\n",
1726 						(uint8_t)portid);
1727 				continue;
1728 			}
1729 			/* clear all_ports_up flag if any link down */
1730 			if (link.link_status == ETH_LINK_DOWN) {
1731 				all_ports_up = 0;
1732 				break;
1733 			}
1734 		}
1735 		/* after finally printing all link status, get out */
1736 		if (print_flag == 1)
1737 			break;
1738 
1739 		if (all_ports_up == 0) {
1740 			fflush(stdout);
1741 			rte_delay_ms(CHECK_INTERVAL);
1742 		}
1743 
1744 		/* set the print_flag if all ports up or timeout */
1745 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1746 			print_flag = 1;
1747 		}
1748 
1749 		if (lsc_interrupt)
1750 			break;
1751 	}
1752 }
1753 
1754 static void
1755 rmv_event_callback(void *arg)
1756 {
1757 	struct rte_eth_dev *dev;
1758 	struct rte_devargs *da;
1759 	char name[32] = "";
1760 	uint8_t port_id = (intptr_t)arg;
1761 
1762 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1763 	dev = &rte_eth_devices[port_id];
1764 	da = dev->device->devargs;
1765 
1766 	stop_port(port_id);
1767 	close_port(port_id);
1768 	if (da->type == RTE_DEVTYPE_VIRTUAL)
1769 		snprintf(name, sizeof(name), "%s", da->virt.drv_name);
1770 	else if (da->type == RTE_DEVTYPE_WHITELISTED_PCI)
1771 		rte_eal_pci_device_name(&da->pci.addr, name, sizeof(name));
1772 	printf("removing device %s\n", name);
1773 	rte_eal_dev_detach(name);
1774 	dev->state = RTE_ETH_DEV_UNUSED;
1775 }
1776 
1777 /* This function is used by the interrupt thread */
1778 static void
1779 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
1780 {
1781 	static const char * const event_desc[] = {
1782 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1783 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
1784 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1785 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1786 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1787 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
1788 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
1789 		[RTE_ETH_EVENT_MAX] = NULL,
1790 	};
1791 
1792 	RTE_SET_USED(param);
1793 
1794 	if (type >= RTE_ETH_EVENT_MAX) {
1795 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1796 			port_id, __func__, type);
1797 		fflush(stderr);
1798 	} else {
1799 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
1800 			event_desc[type]);
1801 		fflush(stdout);
1802 	}
1803 
1804 	switch (type) {
1805 	case RTE_ETH_EVENT_INTR_RMV:
1806 		if (rte_eal_alarm_set(100000,
1807 				rmv_event_callback, (void *)(intptr_t)port_id))
1808 			fprintf(stderr, "Could not set up deferred device removal\n");
1809 		break;
1810 	default:
1811 		break;
1812 	}
1813 }
1814 
1815 static int
1816 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1817 {
1818 	uint16_t i;
1819 	int diag;
1820 	uint8_t mapping_found = 0;
1821 
1822 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1823 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1824 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1825 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1826 					tx_queue_stats_mappings[i].queue_id,
1827 					tx_queue_stats_mappings[i].stats_counter_id);
1828 			if (diag != 0)
1829 				return diag;
1830 			mapping_found = 1;
1831 		}
1832 	}
1833 	if (mapping_found)
1834 		port->tx_queue_stats_mapping_enabled = 1;
1835 	return 0;
1836 }
1837 
1838 static int
1839 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1840 {
1841 	uint16_t i;
1842 	int diag;
1843 	uint8_t mapping_found = 0;
1844 
1845 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1846 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1847 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1848 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1849 					rx_queue_stats_mappings[i].queue_id,
1850 					rx_queue_stats_mappings[i].stats_counter_id);
1851 			if (diag != 0)
1852 				return diag;
1853 			mapping_found = 1;
1854 		}
1855 	}
1856 	if (mapping_found)
1857 		port->rx_queue_stats_mapping_enabled = 1;
1858 	return 0;
1859 }
1860 
1861 static void
1862 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1863 {
1864 	int diag = 0;
1865 
1866 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1867 	if (diag != 0) {
1868 		if (diag == -ENOTSUP) {
1869 			port->tx_queue_stats_mapping_enabled = 0;
1870 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1871 		}
1872 		else
1873 			rte_exit(EXIT_FAILURE,
1874 					"set_tx_queue_stats_mapping_registers "
1875 					"failed for port id=%d diag=%d\n",
1876 					pi, diag);
1877 	}
1878 
1879 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1880 	if (diag != 0) {
1881 		if (diag == -ENOTSUP) {
1882 			port->rx_queue_stats_mapping_enabled = 0;
1883 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1884 		}
1885 		else
1886 			rte_exit(EXIT_FAILURE,
1887 					"set_rx_queue_stats_mapping_registers "
1888 					"failed for port id=%d diag=%d\n",
1889 					pi, diag);
1890 	}
1891 }
1892 
1893 static void
1894 rxtx_port_config(struct rte_port *port)
1895 {
1896 	port->rx_conf = port->dev_info.default_rxconf;
1897 	port->tx_conf = port->dev_info.default_txconf;
1898 
1899 	/* Check if any RX/TX parameters have been passed */
1900 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1901 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1902 
1903 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1904 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1905 
1906 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1907 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1908 
1909 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1910 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1911 
1912 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1913 		port->rx_conf.rx_drop_en = rx_drop_en;
1914 
1915 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1916 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1917 
1918 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1919 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1920 
1921 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1922 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1923 
1924 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1925 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1926 
1927 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1928 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1929 
1930 	if (txq_flags != RTE_PMD_PARAM_UNSET)
1931 		port->tx_conf.txq_flags = txq_flags;
1932 }
1933 
1934 void
1935 init_port_config(void)
1936 {
1937 	portid_t pid;
1938 	struct rte_port *port;
1939 
1940 	RTE_ETH_FOREACH_DEV(pid) {
1941 		port = &ports[pid];
1942 		port->dev_conf.rxmode = rx_mode;
1943 		port->dev_conf.fdir_conf = fdir_conf;
1944 		if (nb_rxq > 1) {
1945 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1946 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1947 		} else {
1948 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1949 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1950 		}
1951 
1952 		if (port->dcb_flag == 0) {
1953 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1954 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1955 			else
1956 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1957 		}
1958 
1959 		rxtx_port_config(port);
1960 
1961 		rte_eth_macaddr_get(pid, &port->eth_addr);
1962 
1963 		map_port_queue_stats_mapping_registers(pid, port);
1964 #ifdef RTE_NIC_BYPASS
1965 		rte_eth_dev_bypass_init(pid);
1966 #endif
1967 
1968 		if (lsc_interrupt &&
1969 		    (rte_eth_devices[pid].data->dev_flags &
1970 		     RTE_ETH_DEV_INTR_LSC))
1971 			port->dev_conf.intr_conf.lsc = 1;
1972 		if (rmv_interrupt &&
1973 		    (rte_eth_devices[pid].data->dev_flags &
1974 		     RTE_ETH_DEV_INTR_RMV))
1975 			port->dev_conf.intr_conf.rmv = 1;
1976 	}
1977 }
1978 
1979 void set_port_slave_flag(portid_t slave_pid)
1980 {
1981 	struct rte_port *port;
1982 
1983 	port = &ports[slave_pid];
1984 	port->slave_flag = 1;
1985 }
1986 
1987 void clear_port_slave_flag(portid_t slave_pid)
1988 {
1989 	struct rte_port *port;
1990 
1991 	port = &ports[slave_pid];
1992 	port->slave_flag = 0;
1993 }
1994 
1995 uint8_t port_is_bonding_slave(portid_t slave_pid)
1996 {
1997 	struct rte_port *port;
1998 
1999 	port = &ports[slave_pid];
2000 	return port->slave_flag;
2001 }
2002 
2003 const uint16_t vlan_tags[] = {
2004 		0,  1,  2,  3,  4,  5,  6,  7,
2005 		8,  9, 10, 11,  12, 13, 14, 15,
2006 		16, 17, 18, 19, 20, 21, 22, 23,
2007 		24, 25, 26, 27, 28, 29, 30, 31
2008 };
2009 
2010 static  int
2011 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2012 		 enum dcb_mode_enable dcb_mode,
2013 		 enum rte_eth_nb_tcs num_tcs,
2014 		 uint8_t pfc_en)
2015 {
2016 	uint8_t i;
2017 
2018 	/*
2019 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2020 	 * given above, and the number of traffic classes available for use.
2021 	 */
2022 	if (dcb_mode == DCB_VT_ENABLED) {
2023 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2024 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2025 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2026 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2027 
2028 		/* VMDQ+DCB RX and TX configurations */
2029 		vmdq_rx_conf->enable_default_pool = 0;
2030 		vmdq_rx_conf->default_pool = 0;
2031 		vmdq_rx_conf->nb_queue_pools =
2032 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2033 		vmdq_tx_conf->nb_queue_pools =
2034 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2035 
2036 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2037 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2038 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2039 			vmdq_rx_conf->pool_map[i].pools =
2040 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2041 		}
2042 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2043 			vmdq_rx_conf->dcb_tc[i] = i;
2044 			vmdq_tx_conf->dcb_tc[i] = i;
2045 		}
2046 
2047 		/* set DCB mode of RX and TX of multiple queues */
2048 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2049 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2050 	} else {
2051 		struct rte_eth_dcb_rx_conf *rx_conf =
2052 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2053 		struct rte_eth_dcb_tx_conf *tx_conf =
2054 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2055 
2056 		rx_conf->nb_tcs = num_tcs;
2057 		tx_conf->nb_tcs = num_tcs;
2058 
2059 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2060 			rx_conf->dcb_tc[i] = i % num_tcs;
2061 			tx_conf->dcb_tc[i] = i % num_tcs;
2062 		}
2063 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2064 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2065 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2066 	}
2067 
2068 	if (pfc_en)
2069 		eth_conf->dcb_capability_en =
2070 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2071 	else
2072 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2073 
2074 	return 0;
2075 }
2076 
2077 int
2078 init_port_dcb_config(portid_t pid,
2079 		     enum dcb_mode_enable dcb_mode,
2080 		     enum rte_eth_nb_tcs num_tcs,
2081 		     uint8_t pfc_en)
2082 {
2083 	struct rte_eth_conf port_conf;
2084 	struct rte_port *rte_port;
2085 	int retval;
2086 	uint16_t i;
2087 
2088 	rte_port = &ports[pid];
2089 
2090 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2091 	/* Enter DCB configuration status */
2092 	dcb_config = 1;
2093 
2094 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2095 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2096 	if (retval < 0)
2097 		return retval;
2098 	port_conf.rxmode.hw_vlan_filter = 1;
2099 
2100 	/**
2101 	 * Write the configuration into the device.
2102 	 * Set the numbers of RX & TX queues to 0, so
2103 	 * the RX & TX queues will not be setup.
2104 	 */
2105 	(void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2106 
2107 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2108 
2109 	/* If dev_info.vmdq_pool_base is greater than 0,
2110 	 * the queue id of vmdq pools is started after pf queues.
2111 	 */
2112 	if (dcb_mode == DCB_VT_ENABLED &&
2113 	    rte_port->dev_info.vmdq_pool_base > 0) {
2114 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2115 			" for port %d.", pid);
2116 		return -1;
2117 	}
2118 
2119 	/* Assume the ports in testpmd have the same dcb capability
2120 	 * and has the same number of rxq and txq in dcb mode
2121 	 */
2122 	if (dcb_mode == DCB_VT_ENABLED) {
2123 		if (rte_port->dev_info.max_vfs > 0) {
2124 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2125 			nb_txq = rte_port->dev_info.nb_tx_queues;
2126 		} else {
2127 			nb_rxq = rte_port->dev_info.max_rx_queues;
2128 			nb_txq = rte_port->dev_info.max_tx_queues;
2129 		}
2130 	} else {
2131 		/*if vt is disabled, use all pf queues */
2132 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2133 			nb_rxq = rte_port->dev_info.max_rx_queues;
2134 			nb_txq = rte_port->dev_info.max_tx_queues;
2135 		} else {
2136 			nb_rxq = (queueid_t)num_tcs;
2137 			nb_txq = (queueid_t)num_tcs;
2138 
2139 		}
2140 	}
2141 	rx_free_thresh = 64;
2142 
2143 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2144 
2145 	rxtx_port_config(rte_port);
2146 	/* VLAN filter */
2147 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2148 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2149 		rx_vft_set(pid, vlan_tags[i], 1);
2150 
2151 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2152 	map_port_queue_stats_mapping_registers(pid, rte_port);
2153 
2154 	rte_port->dcb_flag = 1;
2155 
2156 	return 0;
2157 }
2158 
2159 static void
2160 init_port(void)
2161 {
2162 	/* Configuration of Ethernet ports. */
2163 	ports = rte_zmalloc("testpmd: ports",
2164 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2165 			    RTE_CACHE_LINE_SIZE);
2166 	if (ports == NULL) {
2167 		rte_exit(EXIT_FAILURE,
2168 				"rte_zmalloc(%d struct rte_port) failed\n",
2169 				RTE_MAX_ETHPORTS);
2170 	}
2171 }
2172 
2173 static void
2174 force_quit(void)
2175 {
2176 	pmd_test_exit();
2177 	prompt_exit();
2178 }
2179 
2180 static void
2181 signal_handler(int signum)
2182 {
2183 	if (signum == SIGINT || signum == SIGTERM) {
2184 		printf("\nSignal %d received, preparing to exit...\n",
2185 				signum);
2186 #ifdef RTE_LIBRTE_PDUMP
2187 		/* uninitialize packet capture framework */
2188 		rte_pdump_uninit();
2189 #endif
2190 #ifdef RTE_LIBRTE_LATENCY_STATS
2191 		rte_latencystats_uninit();
2192 #endif
2193 		force_quit();
2194 		/* exit with the expected status */
2195 		signal(signum, SIG_DFL);
2196 		kill(getpid(), signum);
2197 	}
2198 }
2199 
2200 int
2201 main(int argc, char** argv)
2202 {
2203 	int  diag;
2204 	uint8_t port_id;
2205 
2206 	signal(SIGINT, signal_handler);
2207 	signal(SIGTERM, signal_handler);
2208 
2209 	diag = rte_eal_init(argc, argv);
2210 	if (diag < 0)
2211 		rte_panic("Cannot init EAL\n");
2212 
2213 #ifdef RTE_LIBRTE_PDUMP
2214 	/* initialize packet capture framework */
2215 	rte_pdump_init(NULL);
2216 #endif
2217 
2218 	nb_ports = (portid_t) rte_eth_dev_count();
2219 	if (nb_ports == 0)
2220 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2221 
2222 	/* allocate port structures, and init them */
2223 	init_port();
2224 
2225 	set_def_fwd_config();
2226 	if (nb_lcores == 0)
2227 		rte_panic("Empty set of forwarding logical cores - check the "
2228 			  "core mask supplied in the command parameters\n");
2229 
2230 	argc -= diag;
2231 	argv += diag;
2232 	if (argc > 1)
2233 		launch_args_parse(argc, argv);
2234 
2235 	if (!nb_rxq && !nb_txq)
2236 		printf("Warning: Either rx or tx queues should be non-zero\n");
2237 
2238 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2239 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2240 		       "but nb_txq=%d will prevent to fully test it.\n",
2241 		       nb_rxq, nb_txq);
2242 
2243 	init_config();
2244 	if (start_port(RTE_PORT_ALL) != 0)
2245 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2246 
2247 	/* set all ports to promiscuous mode by default */
2248 	RTE_ETH_FOREACH_DEV(port_id)
2249 		rte_eth_promiscuous_enable(port_id);
2250 
2251 	/* Init metrics library */
2252 	rte_metrics_init(rte_socket_id());
2253 
2254 #ifdef RTE_LIBRTE_LATENCY_STATS
2255 	if (latencystats_enabled != 0) {
2256 		int ret = rte_latencystats_init(1, NULL);
2257 		if (ret)
2258 			printf("Warning: latencystats init()"
2259 				" returned error %d\n",	ret);
2260 		printf("Latencystats running on lcore %d\n",
2261 			latencystats_lcore_id);
2262 	}
2263 #endif
2264 
2265 	/* Setup bitrate stats */
2266 #ifdef RTE_LIBRTE_BITRATE
2267 	bitrate_data = rte_stats_bitrate_create();
2268 	if (bitrate_data == NULL)
2269 		rte_exit(EXIT_FAILURE, "Could not allocate bitrate data.\n");
2270 	rte_stats_bitrate_reg(bitrate_data);
2271 #endif
2272 
2273 
2274 #ifdef RTE_LIBRTE_CMDLINE
2275 	if (interactive == 1) {
2276 		if (auto_start) {
2277 			printf("Start automatic packet forwarding\n");
2278 			start_packet_forwarding(0);
2279 		}
2280 		prompt();
2281 	} else
2282 #endif
2283 	{
2284 		char c;
2285 		int rc;
2286 
2287 		printf("No commandline core given, start packet forwarding\n");
2288 		start_packet_forwarding(0);
2289 		printf("Press enter to exit\n");
2290 		rc = read(0, &c, 1);
2291 		pmd_test_exit();
2292 		if (rc < 0)
2293 			return 1;
2294 	}
2295 
2296 	return 0;
2297 }
2298