xref: /dpdk/app/test-pmd/testpmd.c (revision 6d3fa481ee6644a23ad65b8141f7eaa163e865c5)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
78 #endif
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
81 #endif
82 #include <rte_flow.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
86 #endif
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
89 #endif
90 
91 #include "testpmd.h"
92 
93 uint16_t verbose_level = 0; /**< Silent by default. */
94 
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
98 char cmdline_filename[PATH_MAX] = {0};
99 
100 /*
101  * NUMA support configuration.
102  * When set, the NUMA support attempts to dispatch the allocation of the
103  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
104  * probed ports among the CPU sockets 0 and 1.
105  * Otherwise, all memory is allocated from CPU socket 0.
106  */
107 uint8_t numa_support = 1; /**< numa enabled by default */
108 
109 /*
110  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
111  * not configured.
112  */
113 uint8_t socket_num = UMA_NO_CONFIG;
114 
115 /*
116  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
117  */
118 uint8_t mp_anon = 0;
119 
120 /*
121  * Record the Ethernet address of peer target ports to which packets are
122  * forwarded.
123  * Must be instantiated with the ethernet addresses of peer traffic generator
124  * ports.
125  */
126 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
127 portid_t nb_peer_eth_addrs = 0;
128 
129 /*
130  * Probed Target Environment.
131  */
132 struct rte_port *ports;	       /**< For all probed ethernet ports. */
133 portid_t nb_ports;             /**< Number of probed ethernet ports. */
134 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
135 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
136 
137 /*
138  * Test Forwarding Configuration.
139  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
140  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
141  */
142 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
143 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
144 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
145 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
146 
147 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
148 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
149 
150 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
151 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
152 
153 /*
154  * Forwarding engines.
155  */
156 struct fwd_engine * fwd_engines[] = {
157 	&io_fwd_engine,
158 	&mac_fwd_engine,
159 	&mac_swap_engine,
160 	&flow_gen_engine,
161 	&rx_only_engine,
162 	&tx_only_engine,
163 	&csum_fwd_engine,
164 	&icmp_echo_engine,
165 #ifdef RTE_LIBRTE_IEEE1588
166 	&ieee1588_fwd_engine,
167 #endif
168 	NULL,
169 };
170 
171 struct fwd_config cur_fwd_config;
172 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
173 uint32_t retry_enabled;
174 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
175 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
176 
177 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
178 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
179                                       * specified on command-line. */
180 
181 /*
182  * Configuration of packet segments used by the "txonly" processing engine.
183  */
184 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
185 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
186 	TXONLY_DEF_PACKET_LEN,
187 };
188 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
189 
190 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
191 /**< Split policy for packets to TX. */
192 
193 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
194 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
195 
196 /* current configuration is in DCB or not,0 means it is not in DCB mode */
197 uint8_t dcb_config = 0;
198 
199 /* Whether the dcb is in testing status */
200 uint8_t dcb_test = 0;
201 
202 /*
203  * Configurable number of RX/TX queues.
204  */
205 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
206 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
207 
208 /*
209  * Configurable number of RX/TX ring descriptors.
210  */
211 #define RTE_TEST_RX_DESC_DEFAULT 128
212 #define RTE_TEST_TX_DESC_DEFAULT 512
213 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
214 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
215 
216 #define RTE_PMD_PARAM_UNSET -1
217 /*
218  * Configurable values of RX and TX ring threshold registers.
219  */
220 
221 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
222 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
223 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
224 
225 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
226 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
227 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
228 
229 /*
230  * Configurable value of RX free threshold.
231  */
232 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
233 
234 /*
235  * Configurable value of RX drop enable.
236  */
237 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
238 
239 /*
240  * Configurable value of TX free threshold.
241  */
242 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
243 
244 /*
245  * Configurable value of TX RS bit threshold.
246  */
247 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
248 
249 /*
250  * Configurable value of TX queue flags.
251  */
252 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
253 
254 /*
255  * Receive Side Scaling (RSS) configuration.
256  */
257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
258 
259 /*
260  * Port topology configuration
261  */
262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
263 
264 /*
265  * Avoids to flush all the RX streams before starts forwarding.
266  */
267 uint8_t no_flush_rx = 0; /* flush by default */
268 
269 /*
270  * Avoids to check link status when starting/stopping a port.
271  */
272 uint8_t no_link_check = 0; /* check by default */
273 
274 /*
275  * Enable link status change notification
276  */
277 uint8_t lsc_interrupt = 1; /* enabled by default */
278 
279 /*
280  * Enable device removal notification.
281  */
282 uint8_t rmv_interrupt = 1; /* enabled by default */
283 
284 /*
285  * Display or mask ether events
286  * Default to all events except VF_MBOX
287  */
288 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
289 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
290 			    (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
291 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
292 			    (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
293 			    (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
294 
295 /*
296  * NIC bypass mode configuration options.
297  */
298 #ifdef RTE_NIC_BYPASS
299 
300 /* The NIC bypass watchdog timeout. */
301 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
302 
303 #endif
304 
305 #ifdef RTE_LIBRTE_LATENCY_STATS
306 
307 /*
308  * Set when latency stats is enabled in the commandline
309  */
310 uint8_t latencystats_enabled;
311 
312 /*
313  * Lcore ID to serive latency statistics.
314  */
315 lcoreid_t latencystats_lcore_id = -1;
316 
317 #endif
318 
319 /*
320  * Ethernet device configuration.
321  */
322 struct rte_eth_rxmode rx_mode = {
323 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
324 	.split_hdr_size = 0,
325 	.header_split   = 0, /**< Header Split disabled. */
326 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
327 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
328 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
329 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
330 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
331 	.hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
332 };
333 
334 struct rte_fdir_conf fdir_conf = {
335 	.mode = RTE_FDIR_MODE_NONE,
336 	.pballoc = RTE_FDIR_PBALLOC_64K,
337 	.status = RTE_FDIR_REPORT_STATUS,
338 	.mask = {
339 		.vlan_tci_mask = 0x0,
340 		.ipv4_mask     = {
341 			.src_ip = 0xFFFFFFFF,
342 			.dst_ip = 0xFFFFFFFF,
343 		},
344 		.ipv6_mask     = {
345 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
346 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
347 		},
348 		.src_port_mask = 0xFFFF,
349 		.dst_port_mask = 0xFFFF,
350 		.mac_addr_byte_mask = 0xFF,
351 		.tunnel_type_mask = 1,
352 		.tunnel_id_mask = 0xFFFFFFFF,
353 	},
354 	.drop_queue = 127,
355 };
356 
357 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
358 
359 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
360 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
361 
362 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
363 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
364 
365 uint16_t nb_tx_queue_stats_mappings = 0;
366 uint16_t nb_rx_queue_stats_mappings = 0;
367 
368 unsigned max_socket = 0;
369 
370 #ifdef RTE_LIBRTE_BITRATE
371 /* Bitrate statistics */
372 struct rte_stats_bitrates *bitrate_data;
373 lcoreid_t bitrate_lcore_id;
374 uint8_t bitrate_enabled;
375 #endif
376 
377 /* Forward function declarations */
378 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
379 static void check_all_ports_link_status(uint32_t port_mask);
380 static void eth_event_callback(uint8_t port_id,
381 			       enum rte_eth_event_type type,
382 			       void *param);
383 
384 /*
385  * Check if all the ports are started.
386  * If yes, return positive value. If not, return zero.
387  */
388 static int all_ports_started(void);
389 
390 /*
391  * Setup default configuration.
392  */
393 static void
394 set_default_fwd_lcores_config(void)
395 {
396 	unsigned int i;
397 	unsigned int nb_lc;
398 	unsigned int sock_num;
399 
400 	nb_lc = 0;
401 	for (i = 0; i < RTE_MAX_LCORE; i++) {
402 		sock_num = rte_lcore_to_socket_id(i) + 1;
403 		if (sock_num > max_socket) {
404 			if (sock_num > RTE_MAX_NUMA_NODES)
405 				rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
406 			max_socket = sock_num;
407 		}
408 		if (!rte_lcore_is_enabled(i))
409 			continue;
410 		if (i == rte_get_master_lcore())
411 			continue;
412 		fwd_lcores_cpuids[nb_lc++] = i;
413 	}
414 	nb_lcores = (lcoreid_t) nb_lc;
415 	nb_cfg_lcores = nb_lcores;
416 	nb_fwd_lcores = 1;
417 }
418 
419 static void
420 set_def_peer_eth_addrs(void)
421 {
422 	portid_t i;
423 
424 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
425 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
426 		peer_eth_addrs[i].addr_bytes[5] = i;
427 	}
428 }
429 
430 static void
431 set_default_fwd_ports_config(void)
432 {
433 	portid_t pt_id;
434 
435 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
436 		fwd_ports_ids[pt_id] = pt_id;
437 
438 	nb_cfg_ports = nb_ports;
439 	nb_fwd_ports = nb_ports;
440 }
441 
442 void
443 set_def_fwd_config(void)
444 {
445 	set_default_fwd_lcores_config();
446 	set_def_peer_eth_addrs();
447 	set_default_fwd_ports_config();
448 }
449 
450 /*
451  * Configuration initialisation done once at init time.
452  */
453 static void
454 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
455 		 unsigned int socket_id)
456 {
457 	char pool_name[RTE_MEMPOOL_NAMESIZE];
458 	struct rte_mempool *rte_mp = NULL;
459 	uint32_t mb_size;
460 
461 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
462 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
463 
464 	RTE_LOG(INFO, USER1,
465 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
466 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
467 
468 #ifdef RTE_LIBRTE_PMD_XENVIRT
469 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
470 		(unsigned) mb_mempool_cache,
471 		sizeof(struct rte_pktmbuf_pool_private),
472 		rte_pktmbuf_pool_init, NULL,
473 		rte_pktmbuf_init, NULL,
474 		socket_id, 0);
475 #endif
476 
477 	/* if the former XEN allocation failed fall back to normal allocation */
478 	if (rte_mp == NULL) {
479 		if (mp_anon != 0) {
480 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
481 				mb_size, (unsigned) mb_mempool_cache,
482 				sizeof(struct rte_pktmbuf_pool_private),
483 				socket_id, 0);
484 			if (rte_mp == NULL)
485 				goto err;
486 
487 			if (rte_mempool_populate_anon(rte_mp) == 0) {
488 				rte_mempool_free(rte_mp);
489 				rte_mp = NULL;
490 				goto err;
491 			}
492 			rte_pktmbuf_pool_init(rte_mp, NULL);
493 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
494 		} else {
495 			/* wrapper to rte_mempool_create() */
496 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
497 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
498 		}
499 	}
500 
501 err:
502 	if (rte_mp == NULL) {
503 		rte_exit(EXIT_FAILURE,
504 			"Creation of mbuf pool for socket %u failed: %s\n",
505 			socket_id, rte_strerror(rte_errno));
506 	} else if (verbose_level > 0) {
507 		rte_mempool_dump(stdout, rte_mp);
508 	}
509 }
510 
511 /*
512  * Check given socket id is valid or not with NUMA mode,
513  * if valid, return 0, else return -1
514  */
515 static int
516 check_socket_id(const unsigned int socket_id)
517 {
518 	static int warning_once = 0;
519 
520 	if (socket_id >= max_socket) {
521 		if (!warning_once && numa_support)
522 			printf("Warning: NUMA should be configured manually by"
523 			       " using --port-numa-config and"
524 			       " --ring-numa-config parameters along with"
525 			       " --numa.\n");
526 		warning_once = 1;
527 		return -1;
528 	}
529 	return 0;
530 }
531 
532 static void
533 init_config(void)
534 {
535 	portid_t pid;
536 	struct rte_port *port;
537 	struct rte_mempool *mbp;
538 	unsigned int nb_mbuf_per_pool;
539 	lcoreid_t  lc_id;
540 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
541 
542 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
543 
544 	if (numa_support) {
545 		memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
546 		memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
547 		memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
548 	}
549 
550 	/* Configuration of logical cores. */
551 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
552 				sizeof(struct fwd_lcore *) * nb_lcores,
553 				RTE_CACHE_LINE_SIZE);
554 	if (fwd_lcores == NULL) {
555 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
556 							"failed\n", nb_lcores);
557 	}
558 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
559 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
560 					       sizeof(struct fwd_lcore),
561 					       RTE_CACHE_LINE_SIZE);
562 		if (fwd_lcores[lc_id] == NULL) {
563 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
564 								"failed\n");
565 		}
566 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
567 	}
568 
569 	RTE_ETH_FOREACH_DEV(pid) {
570 		port = &ports[pid];
571 		rte_eth_dev_info_get(pid, &port->dev_info);
572 
573 		if (numa_support) {
574 			if (port_numa[pid] != NUMA_NO_CONFIG)
575 				port_per_socket[port_numa[pid]]++;
576 			else {
577 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
578 
579 				/* if socket_id is invalid, set to 0 */
580 				if (check_socket_id(socket_id) < 0)
581 					socket_id = 0;
582 				port_per_socket[socket_id]++;
583 			}
584 		}
585 
586 		/* set flag to initialize port/queue */
587 		port->need_reconfig = 1;
588 		port->need_reconfig_queues = 1;
589 	}
590 
591 	/*
592 	 * Create pools of mbuf.
593 	 * If NUMA support is disabled, create a single pool of mbuf in
594 	 * socket 0 memory by default.
595 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
596 	 *
597 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
598 	 * nb_txd can be configured at run time.
599 	 */
600 	if (param_total_num_mbufs)
601 		nb_mbuf_per_pool = param_total_num_mbufs;
602 	else {
603 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
604 			(nb_lcores * mb_mempool_cache) +
605 			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
606 		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
607 	}
608 
609 	if (numa_support) {
610 		uint8_t i;
611 
612 		for (i = 0; i < max_socket; i++)
613 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, i);
614 	} else {
615 		if (socket_num == UMA_NO_CONFIG)
616 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
617 		else
618 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
619 						 socket_num);
620 	}
621 
622 	init_port_config();
623 
624 	/*
625 	 * Records which Mbuf pool to use by each logical core, if needed.
626 	 */
627 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
628 		mbp = mbuf_pool_find(
629 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
630 
631 		if (mbp == NULL)
632 			mbp = mbuf_pool_find(0);
633 		fwd_lcores[lc_id]->mbp = mbp;
634 	}
635 
636 	/* Configuration of packet forwarding streams. */
637 	if (init_fwd_streams() < 0)
638 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
639 
640 	fwd_config_setup();
641 }
642 
643 
644 void
645 reconfig(portid_t new_port_id, unsigned socket_id)
646 {
647 	struct rte_port *port;
648 
649 	/* Reconfiguration of Ethernet ports. */
650 	port = &ports[new_port_id];
651 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
652 
653 	/* set flag to initialize port/queue */
654 	port->need_reconfig = 1;
655 	port->need_reconfig_queues = 1;
656 	port->socket_id = socket_id;
657 
658 	init_port_config();
659 }
660 
661 
662 int
663 init_fwd_streams(void)
664 {
665 	portid_t pid;
666 	struct rte_port *port;
667 	streamid_t sm_id, nb_fwd_streams_new;
668 	queueid_t q;
669 
670 	/* set socket id according to numa or not */
671 	RTE_ETH_FOREACH_DEV(pid) {
672 		port = &ports[pid];
673 		if (nb_rxq > port->dev_info.max_rx_queues) {
674 			printf("Fail: nb_rxq(%d) is greater than "
675 				"max_rx_queues(%d)\n", nb_rxq,
676 				port->dev_info.max_rx_queues);
677 			return -1;
678 		}
679 		if (nb_txq > port->dev_info.max_tx_queues) {
680 			printf("Fail: nb_txq(%d) is greater than "
681 				"max_tx_queues(%d)\n", nb_txq,
682 				port->dev_info.max_tx_queues);
683 			return -1;
684 		}
685 		if (numa_support) {
686 			if (port_numa[pid] != NUMA_NO_CONFIG)
687 				port->socket_id = port_numa[pid];
688 			else {
689 				port->socket_id = rte_eth_dev_socket_id(pid);
690 
691 				/* if socket_id is invalid, set to 0 */
692 				if (check_socket_id(port->socket_id) < 0)
693 					port->socket_id = 0;
694 			}
695 		}
696 		else {
697 			if (socket_num == UMA_NO_CONFIG)
698 				port->socket_id = 0;
699 			else
700 				port->socket_id = socket_num;
701 		}
702 	}
703 
704 	q = RTE_MAX(nb_rxq, nb_txq);
705 	if (q == 0) {
706 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
707 		return -1;
708 	}
709 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
710 	if (nb_fwd_streams_new == nb_fwd_streams)
711 		return 0;
712 	/* clear the old */
713 	if (fwd_streams != NULL) {
714 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
715 			if (fwd_streams[sm_id] == NULL)
716 				continue;
717 			rte_free(fwd_streams[sm_id]);
718 			fwd_streams[sm_id] = NULL;
719 		}
720 		rte_free(fwd_streams);
721 		fwd_streams = NULL;
722 	}
723 
724 	/* init new */
725 	nb_fwd_streams = nb_fwd_streams_new;
726 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
727 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
728 	if (fwd_streams == NULL)
729 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
730 						"failed\n", nb_fwd_streams);
731 
732 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
733 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
734 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
735 		if (fwd_streams[sm_id] == NULL)
736 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
737 								" failed\n");
738 	}
739 
740 	return 0;
741 }
742 
743 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
744 static void
745 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
746 {
747 	unsigned int total_burst;
748 	unsigned int nb_burst;
749 	unsigned int burst_stats[3];
750 	uint16_t pktnb_stats[3];
751 	uint16_t nb_pkt;
752 	int burst_percent[3];
753 
754 	/*
755 	 * First compute the total number of packet bursts and the
756 	 * two highest numbers of bursts of the same number of packets.
757 	 */
758 	total_burst = 0;
759 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
760 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
761 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
762 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
763 		if (nb_burst == 0)
764 			continue;
765 		total_burst += nb_burst;
766 		if (nb_burst > burst_stats[0]) {
767 			burst_stats[1] = burst_stats[0];
768 			pktnb_stats[1] = pktnb_stats[0];
769 			burst_stats[0] = nb_burst;
770 			pktnb_stats[0] = nb_pkt;
771 		}
772 	}
773 	if (total_burst == 0)
774 		return;
775 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
776 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
777 	       burst_percent[0], (int) pktnb_stats[0]);
778 	if (burst_stats[0] == total_burst) {
779 		printf("]\n");
780 		return;
781 	}
782 	if (burst_stats[0] + burst_stats[1] == total_burst) {
783 		printf(" + %d%% of %d pkts]\n",
784 		       100 - burst_percent[0], pktnb_stats[1]);
785 		return;
786 	}
787 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
788 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
789 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
790 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
791 		return;
792 	}
793 	printf(" + %d%% of %d pkts + %d%% of others]\n",
794 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
795 }
796 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
797 
798 static void
799 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
800 {
801 	struct rte_port *port;
802 	uint8_t i;
803 
804 	static const char *fwd_stats_border = "----------------------";
805 
806 	port = &ports[port_id];
807 	printf("\n  %s Forward statistics for port %-2d %s\n",
808 	       fwd_stats_border, port_id, fwd_stats_border);
809 
810 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
811 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
812 		       "%-"PRIu64"\n",
813 		       stats->ipackets, stats->imissed,
814 		       (uint64_t) (stats->ipackets + stats->imissed));
815 
816 		if (cur_fwd_eng == &csum_fwd_engine)
817 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
818 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
819 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
820 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
821 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
822 		}
823 
824 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
825 		       "%-"PRIu64"\n",
826 		       stats->opackets, port->tx_dropped,
827 		       (uint64_t) (stats->opackets + port->tx_dropped));
828 	}
829 	else {
830 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
831 		       "%14"PRIu64"\n",
832 		       stats->ipackets, stats->imissed,
833 		       (uint64_t) (stats->ipackets + stats->imissed));
834 
835 		if (cur_fwd_eng == &csum_fwd_engine)
836 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
837 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
838 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
839 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
840 			printf("  RX-nombufs:             %14"PRIu64"\n",
841 			       stats->rx_nombuf);
842 		}
843 
844 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
845 		       "%14"PRIu64"\n",
846 		       stats->opackets, port->tx_dropped,
847 		       (uint64_t) (stats->opackets + port->tx_dropped));
848 	}
849 
850 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
851 	if (port->rx_stream)
852 		pkt_burst_stats_display("RX",
853 			&port->rx_stream->rx_burst_stats);
854 	if (port->tx_stream)
855 		pkt_burst_stats_display("TX",
856 			&port->tx_stream->tx_burst_stats);
857 #endif
858 
859 	if (port->rx_queue_stats_mapping_enabled) {
860 		printf("\n");
861 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
862 			printf("  Stats reg %2d RX-packets:%14"PRIu64
863 			       "     RX-errors:%14"PRIu64
864 			       "    RX-bytes:%14"PRIu64"\n",
865 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
866 		}
867 		printf("\n");
868 	}
869 	if (port->tx_queue_stats_mapping_enabled) {
870 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
871 			printf("  Stats reg %2d TX-packets:%14"PRIu64
872 			       "                                 TX-bytes:%14"PRIu64"\n",
873 			       i, stats->q_opackets[i], stats->q_obytes[i]);
874 		}
875 	}
876 
877 	printf("  %s--------------------------------%s\n",
878 	       fwd_stats_border, fwd_stats_border);
879 }
880 
881 static void
882 fwd_stream_stats_display(streamid_t stream_id)
883 {
884 	struct fwd_stream *fs;
885 	static const char *fwd_top_stats_border = "-------";
886 
887 	fs = fwd_streams[stream_id];
888 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
889 	    (fs->fwd_dropped == 0))
890 		return;
891 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
892 	       "TX Port=%2d/Queue=%2d %s\n",
893 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
894 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
895 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
896 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
897 
898 	/* if checksum mode */
899 	if (cur_fwd_eng == &csum_fwd_engine) {
900 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
901 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
902 	}
903 
904 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
905 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
906 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
907 #endif
908 }
909 
910 static void
911 flush_fwd_rx_queues(void)
912 {
913 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
914 	portid_t  rxp;
915 	portid_t port_id;
916 	queueid_t rxq;
917 	uint16_t  nb_rx;
918 	uint16_t  i;
919 	uint8_t   j;
920 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
921 	uint64_t timer_period;
922 
923 	/* convert to number of cycles */
924 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
925 
926 	for (j = 0; j < 2; j++) {
927 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
928 			for (rxq = 0; rxq < nb_rxq; rxq++) {
929 				port_id = fwd_ports_ids[rxp];
930 				/**
931 				* testpmd can stuck in the below do while loop
932 				* if rte_eth_rx_burst() always returns nonzero
933 				* packets. So timer is added to exit this loop
934 				* after 1sec timer expiry.
935 				*/
936 				prev_tsc = rte_rdtsc();
937 				do {
938 					nb_rx = rte_eth_rx_burst(port_id, rxq,
939 						pkts_burst, MAX_PKT_BURST);
940 					for (i = 0; i < nb_rx; i++)
941 						rte_pktmbuf_free(pkts_burst[i]);
942 
943 					cur_tsc = rte_rdtsc();
944 					diff_tsc = cur_tsc - prev_tsc;
945 					timer_tsc += diff_tsc;
946 				} while ((nb_rx > 0) &&
947 					(timer_tsc < timer_period));
948 				timer_tsc = 0;
949 			}
950 		}
951 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
952 	}
953 }
954 
955 static void
956 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
957 {
958 	struct fwd_stream **fsm;
959 	streamid_t nb_fs;
960 	streamid_t sm_id;
961 #ifdef RTE_LIBRTE_BITRATE
962 	uint64_t tics_per_1sec;
963 	uint64_t tics_datum;
964 	uint64_t tics_current;
965 	uint8_t idx_port, cnt_ports;
966 
967 	cnt_ports = rte_eth_dev_count();
968 	tics_datum = rte_rdtsc();
969 	tics_per_1sec = rte_get_timer_hz();
970 #endif
971 	fsm = &fwd_streams[fc->stream_idx];
972 	nb_fs = fc->stream_nb;
973 	do {
974 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
975 			(*pkt_fwd)(fsm[sm_id]);
976 #ifdef RTE_LIBRTE_BITRATE
977 		if (bitrate_enabled != 0 &&
978 				bitrate_lcore_id == rte_lcore_id()) {
979 			tics_current = rte_rdtsc();
980 			if (tics_current - tics_datum >= tics_per_1sec) {
981 				/* Periodic bitrate calculation */
982 				for (idx_port = 0;
983 						idx_port < cnt_ports;
984 						idx_port++)
985 					rte_stats_bitrate_calc(bitrate_data,
986 						idx_port);
987 				tics_datum = tics_current;
988 			}
989 		}
990 #endif
991 #ifdef RTE_LIBRTE_LATENCY_STATS
992 		if (latencystats_enabled != 0 &&
993 				latencystats_lcore_id == rte_lcore_id())
994 			rte_latencystats_update();
995 #endif
996 
997 	} while (! fc->stopped);
998 }
999 
1000 static int
1001 start_pkt_forward_on_core(void *fwd_arg)
1002 {
1003 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1004 			     cur_fwd_config.fwd_eng->packet_fwd);
1005 	return 0;
1006 }
1007 
1008 /*
1009  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1010  * Used to start communication flows in network loopback test configurations.
1011  */
1012 static int
1013 run_one_txonly_burst_on_core(void *fwd_arg)
1014 {
1015 	struct fwd_lcore *fwd_lc;
1016 	struct fwd_lcore tmp_lcore;
1017 
1018 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1019 	tmp_lcore = *fwd_lc;
1020 	tmp_lcore.stopped = 1;
1021 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1022 	return 0;
1023 }
1024 
1025 /*
1026  * Launch packet forwarding:
1027  *     - Setup per-port forwarding context.
1028  *     - launch logical cores with their forwarding configuration.
1029  */
1030 static void
1031 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1032 {
1033 	port_fwd_begin_t port_fwd_begin;
1034 	unsigned int i;
1035 	unsigned int lc_id;
1036 	int diag;
1037 
1038 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1039 	if (port_fwd_begin != NULL) {
1040 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1041 			(*port_fwd_begin)(fwd_ports_ids[i]);
1042 	}
1043 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1044 		lc_id = fwd_lcores_cpuids[i];
1045 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1046 			fwd_lcores[i]->stopped = 0;
1047 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1048 						     fwd_lcores[i], lc_id);
1049 			if (diag != 0)
1050 				printf("launch lcore %u failed - diag=%d\n",
1051 				       lc_id, diag);
1052 		}
1053 	}
1054 }
1055 
1056 /*
1057  * Launch packet forwarding configuration.
1058  */
1059 void
1060 start_packet_forwarding(int with_tx_first)
1061 {
1062 	port_fwd_begin_t port_fwd_begin;
1063 	port_fwd_end_t  port_fwd_end;
1064 	struct rte_port *port;
1065 	unsigned int i;
1066 	portid_t   pt_id;
1067 	streamid_t sm_id;
1068 
1069 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1070 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1071 
1072 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1073 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1074 
1075 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1076 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1077 		(!nb_rxq || !nb_txq))
1078 		rte_exit(EXIT_FAILURE,
1079 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1080 			cur_fwd_eng->fwd_mode_name);
1081 
1082 	if (all_ports_started() == 0) {
1083 		printf("Not all ports were started\n");
1084 		return;
1085 	}
1086 	if (test_done == 0) {
1087 		printf("Packet forwarding already started\n");
1088 		return;
1089 	}
1090 
1091 	if (init_fwd_streams() < 0) {
1092 		printf("Fail from init_fwd_streams()\n");
1093 		return;
1094 	}
1095 
1096 	if(dcb_test) {
1097 		for (i = 0; i < nb_fwd_ports; i++) {
1098 			pt_id = fwd_ports_ids[i];
1099 			port = &ports[pt_id];
1100 			if (!port->dcb_flag) {
1101 				printf("In DCB mode, all forwarding ports must "
1102                                        "be configured in this mode.\n");
1103 				return;
1104 			}
1105 		}
1106 		if (nb_fwd_lcores == 1) {
1107 			printf("In DCB mode,the nb forwarding cores "
1108                                "should be larger than 1.\n");
1109 			return;
1110 		}
1111 	}
1112 	test_done = 0;
1113 
1114 	if(!no_flush_rx)
1115 		flush_fwd_rx_queues();
1116 
1117 	fwd_config_setup();
1118 	pkt_fwd_config_display(&cur_fwd_config);
1119 	rxtx_config_display();
1120 
1121 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1122 		pt_id = fwd_ports_ids[i];
1123 		port = &ports[pt_id];
1124 		rte_eth_stats_get(pt_id, &port->stats);
1125 		port->tx_dropped = 0;
1126 
1127 		map_port_queue_stats_mapping_registers(pt_id, port);
1128 	}
1129 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1130 		fwd_streams[sm_id]->rx_packets = 0;
1131 		fwd_streams[sm_id]->tx_packets = 0;
1132 		fwd_streams[sm_id]->fwd_dropped = 0;
1133 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1134 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1135 
1136 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1137 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1138 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1139 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1140 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1141 #endif
1142 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1143 		fwd_streams[sm_id]->core_cycles = 0;
1144 #endif
1145 	}
1146 	if (with_tx_first) {
1147 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1148 		if (port_fwd_begin != NULL) {
1149 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1150 				(*port_fwd_begin)(fwd_ports_ids[i]);
1151 		}
1152 		while (with_tx_first--) {
1153 			launch_packet_forwarding(
1154 					run_one_txonly_burst_on_core);
1155 			rte_eal_mp_wait_lcore();
1156 		}
1157 		port_fwd_end = tx_only_engine.port_fwd_end;
1158 		if (port_fwd_end != NULL) {
1159 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1160 				(*port_fwd_end)(fwd_ports_ids[i]);
1161 		}
1162 	}
1163 	launch_packet_forwarding(start_pkt_forward_on_core);
1164 }
1165 
1166 void
1167 stop_packet_forwarding(void)
1168 {
1169 	struct rte_eth_stats stats;
1170 	struct rte_port *port;
1171 	port_fwd_end_t  port_fwd_end;
1172 	int i;
1173 	portid_t   pt_id;
1174 	streamid_t sm_id;
1175 	lcoreid_t  lc_id;
1176 	uint64_t total_recv;
1177 	uint64_t total_xmit;
1178 	uint64_t total_rx_dropped;
1179 	uint64_t total_tx_dropped;
1180 	uint64_t total_rx_nombuf;
1181 	uint64_t tx_dropped;
1182 	uint64_t rx_bad_ip_csum;
1183 	uint64_t rx_bad_l4_csum;
1184 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1185 	uint64_t fwd_cycles;
1186 #endif
1187 	static const char *acc_stats_border = "+++++++++++++++";
1188 
1189 	if (test_done) {
1190 		printf("Packet forwarding not started\n");
1191 		return;
1192 	}
1193 	printf("Telling cores to stop...");
1194 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1195 		fwd_lcores[lc_id]->stopped = 1;
1196 	printf("\nWaiting for lcores to finish...\n");
1197 	rte_eal_mp_wait_lcore();
1198 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1199 	if (port_fwd_end != NULL) {
1200 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1201 			pt_id = fwd_ports_ids[i];
1202 			(*port_fwd_end)(pt_id);
1203 		}
1204 	}
1205 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1206 	fwd_cycles = 0;
1207 #endif
1208 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1209 		if (cur_fwd_config.nb_fwd_streams >
1210 		    cur_fwd_config.nb_fwd_ports) {
1211 			fwd_stream_stats_display(sm_id);
1212 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1213 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1214 		} else {
1215 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1216 				fwd_streams[sm_id];
1217 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1218 				fwd_streams[sm_id];
1219 		}
1220 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1221 		tx_dropped = (uint64_t) (tx_dropped +
1222 					 fwd_streams[sm_id]->fwd_dropped);
1223 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1224 
1225 		rx_bad_ip_csum =
1226 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1227 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1228 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1229 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1230 							rx_bad_ip_csum;
1231 
1232 		rx_bad_l4_csum =
1233 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1234 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1235 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1236 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1237 							rx_bad_l4_csum;
1238 
1239 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1240 		fwd_cycles = (uint64_t) (fwd_cycles +
1241 					 fwd_streams[sm_id]->core_cycles);
1242 #endif
1243 	}
1244 	total_recv = 0;
1245 	total_xmit = 0;
1246 	total_rx_dropped = 0;
1247 	total_tx_dropped = 0;
1248 	total_rx_nombuf  = 0;
1249 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1250 		pt_id = fwd_ports_ids[i];
1251 
1252 		port = &ports[pt_id];
1253 		rte_eth_stats_get(pt_id, &stats);
1254 		stats.ipackets -= port->stats.ipackets;
1255 		port->stats.ipackets = 0;
1256 		stats.opackets -= port->stats.opackets;
1257 		port->stats.opackets = 0;
1258 		stats.ibytes   -= port->stats.ibytes;
1259 		port->stats.ibytes = 0;
1260 		stats.obytes   -= port->stats.obytes;
1261 		port->stats.obytes = 0;
1262 		stats.imissed  -= port->stats.imissed;
1263 		port->stats.imissed = 0;
1264 		stats.oerrors  -= port->stats.oerrors;
1265 		port->stats.oerrors = 0;
1266 		stats.rx_nombuf -= port->stats.rx_nombuf;
1267 		port->stats.rx_nombuf = 0;
1268 
1269 		total_recv += stats.ipackets;
1270 		total_xmit += stats.opackets;
1271 		total_rx_dropped += stats.imissed;
1272 		total_tx_dropped += port->tx_dropped;
1273 		total_rx_nombuf  += stats.rx_nombuf;
1274 
1275 		fwd_port_stats_display(pt_id, &stats);
1276 	}
1277 	printf("\n  %s Accumulated forward statistics for all ports"
1278 	       "%s\n",
1279 	       acc_stats_border, acc_stats_border);
1280 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1281 	       "%-"PRIu64"\n"
1282 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1283 	       "%-"PRIu64"\n",
1284 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1285 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1286 	if (total_rx_nombuf > 0)
1287 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1288 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1289 	       "%s\n",
1290 	       acc_stats_border, acc_stats_border);
1291 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1292 	if (total_recv > 0)
1293 		printf("\n  CPU cycles/packet=%u (total cycles="
1294 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1295 		       (unsigned int)(fwd_cycles / total_recv),
1296 		       fwd_cycles, total_recv);
1297 #endif
1298 	printf("\nDone.\n");
1299 	test_done = 1;
1300 }
1301 
1302 void
1303 dev_set_link_up(portid_t pid)
1304 {
1305 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1306 		printf("\nSet link up fail.\n");
1307 }
1308 
1309 void
1310 dev_set_link_down(portid_t pid)
1311 {
1312 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1313 		printf("\nSet link down fail.\n");
1314 }
1315 
1316 static int
1317 all_ports_started(void)
1318 {
1319 	portid_t pi;
1320 	struct rte_port *port;
1321 
1322 	RTE_ETH_FOREACH_DEV(pi) {
1323 		port = &ports[pi];
1324 		/* Check if there is a port which is not started */
1325 		if ((port->port_status != RTE_PORT_STARTED) &&
1326 			(port->slave_flag == 0))
1327 			return 0;
1328 	}
1329 
1330 	/* No port is not started */
1331 	return 1;
1332 }
1333 
1334 int
1335 all_ports_stopped(void)
1336 {
1337 	portid_t pi;
1338 	struct rte_port *port;
1339 
1340 	RTE_ETH_FOREACH_DEV(pi) {
1341 		port = &ports[pi];
1342 		if ((port->port_status != RTE_PORT_STOPPED) &&
1343 			(port->slave_flag == 0))
1344 			return 0;
1345 	}
1346 
1347 	return 1;
1348 }
1349 
1350 int
1351 port_is_started(portid_t port_id)
1352 {
1353 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1354 		return 0;
1355 
1356 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1357 		return 0;
1358 
1359 	return 1;
1360 }
1361 
1362 static int
1363 port_is_closed(portid_t port_id)
1364 {
1365 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1366 		return 0;
1367 
1368 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1369 		return 0;
1370 
1371 	return 1;
1372 }
1373 
1374 int
1375 start_port(portid_t pid)
1376 {
1377 	int diag, need_check_link_status = -1;
1378 	portid_t pi;
1379 	queueid_t qi;
1380 	struct rte_port *port;
1381 	struct ether_addr mac_addr;
1382 	enum rte_eth_event_type event_type;
1383 
1384 	if (port_id_is_invalid(pid, ENABLED_WARN))
1385 		return 0;
1386 
1387 	if(dcb_config)
1388 		dcb_test = 1;
1389 	RTE_ETH_FOREACH_DEV(pi) {
1390 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1391 			continue;
1392 
1393 		need_check_link_status = 0;
1394 		port = &ports[pi];
1395 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1396 						 RTE_PORT_HANDLING) == 0) {
1397 			printf("Port %d is now not stopped\n", pi);
1398 			continue;
1399 		}
1400 
1401 		if (port->need_reconfig > 0) {
1402 			port->need_reconfig = 0;
1403 
1404 			printf("Configuring Port %d (socket %u)\n", pi,
1405 					port->socket_id);
1406 			/* configure port */
1407 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1408 						&(port->dev_conf));
1409 			if (diag != 0) {
1410 				if (rte_atomic16_cmpset(&(port->port_status),
1411 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1412 					printf("Port %d can not be set back "
1413 							"to stopped\n", pi);
1414 				printf("Fail to configure port %d\n", pi);
1415 				/* try to reconfigure port next time */
1416 				port->need_reconfig = 1;
1417 				return -1;
1418 			}
1419 		}
1420 		if (port->need_reconfig_queues > 0) {
1421 			port->need_reconfig_queues = 0;
1422 			/* setup tx queues */
1423 			for (qi = 0; qi < nb_txq; qi++) {
1424 				if ((numa_support) &&
1425 					(txring_numa[pi] != NUMA_NO_CONFIG))
1426 					diag = rte_eth_tx_queue_setup(pi, qi,
1427 						nb_txd,txring_numa[pi],
1428 						&(port->tx_conf));
1429 				else
1430 					diag = rte_eth_tx_queue_setup(pi, qi,
1431 						nb_txd,port->socket_id,
1432 						&(port->tx_conf));
1433 
1434 				if (diag == 0)
1435 					continue;
1436 
1437 				/* Fail to setup tx queue, return */
1438 				if (rte_atomic16_cmpset(&(port->port_status),
1439 							RTE_PORT_HANDLING,
1440 							RTE_PORT_STOPPED) == 0)
1441 					printf("Port %d can not be set back "
1442 							"to stopped\n", pi);
1443 				printf("Fail to configure port %d tx queues\n", pi);
1444 				/* try to reconfigure queues next time */
1445 				port->need_reconfig_queues = 1;
1446 				return -1;
1447 			}
1448 			/* setup rx queues */
1449 			for (qi = 0; qi < nb_rxq; qi++) {
1450 				if ((numa_support) &&
1451 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1452 					struct rte_mempool * mp =
1453 						mbuf_pool_find(rxring_numa[pi]);
1454 					if (mp == NULL) {
1455 						printf("Failed to setup RX queue:"
1456 							"No mempool allocation"
1457 							" on the socket %d\n",
1458 							rxring_numa[pi]);
1459 						return -1;
1460 					}
1461 
1462 					diag = rte_eth_rx_queue_setup(pi, qi,
1463 					     nb_rxd,rxring_numa[pi],
1464 					     &(port->rx_conf),mp);
1465 				} else {
1466 					struct rte_mempool *mp =
1467 						mbuf_pool_find(port->socket_id);
1468 					if (mp == NULL) {
1469 						printf("Failed to setup RX queue:"
1470 							"No mempool allocation"
1471 							" on the socket %d\n",
1472 							port->socket_id);
1473 						return -1;
1474 					}
1475 					diag = rte_eth_rx_queue_setup(pi, qi,
1476 					     nb_rxd,port->socket_id,
1477 					     &(port->rx_conf), mp);
1478 				}
1479 				if (diag == 0)
1480 					continue;
1481 
1482 				/* Fail to setup rx queue, return */
1483 				if (rte_atomic16_cmpset(&(port->port_status),
1484 							RTE_PORT_HANDLING,
1485 							RTE_PORT_STOPPED) == 0)
1486 					printf("Port %d can not be set back "
1487 							"to stopped\n", pi);
1488 				printf("Fail to configure port %d rx queues\n", pi);
1489 				/* try to reconfigure queues next time */
1490 				port->need_reconfig_queues = 1;
1491 				return -1;
1492 			}
1493 		}
1494 
1495 		for (event_type = RTE_ETH_EVENT_UNKNOWN;
1496 		     event_type < RTE_ETH_EVENT_MAX;
1497 		     event_type++) {
1498 			diag = rte_eth_dev_callback_register(pi,
1499 							event_type,
1500 							eth_event_callback,
1501 							NULL);
1502 			if (diag) {
1503 				printf("Failed to setup even callback for event %d\n",
1504 					event_type);
1505 				return -1;
1506 			}
1507 		}
1508 
1509 		/* start port */
1510 		if (rte_eth_dev_start(pi) < 0) {
1511 			printf("Fail to start port %d\n", pi);
1512 
1513 			/* Fail to setup rx queue, return */
1514 			if (rte_atomic16_cmpset(&(port->port_status),
1515 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1516 				printf("Port %d can not be set back to "
1517 							"stopped\n", pi);
1518 			continue;
1519 		}
1520 
1521 		if (rte_atomic16_cmpset(&(port->port_status),
1522 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1523 			printf("Port %d can not be set into started\n", pi);
1524 
1525 		rte_eth_macaddr_get(pi, &mac_addr);
1526 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1527 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1528 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1529 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1530 
1531 		/* at least one port started, need checking link status */
1532 		need_check_link_status = 1;
1533 	}
1534 
1535 	if (need_check_link_status == 1 && !no_link_check)
1536 		check_all_ports_link_status(RTE_PORT_ALL);
1537 	else if (need_check_link_status == 0)
1538 		printf("Please stop the ports first\n");
1539 
1540 	printf("Done\n");
1541 	return 0;
1542 }
1543 
1544 void
1545 stop_port(portid_t pid)
1546 {
1547 	portid_t pi;
1548 	struct rte_port *port;
1549 	int need_check_link_status = 0;
1550 
1551 	if (dcb_test) {
1552 		dcb_test = 0;
1553 		dcb_config = 0;
1554 	}
1555 
1556 	if (port_id_is_invalid(pid, ENABLED_WARN))
1557 		return;
1558 
1559 	printf("Stopping ports...\n");
1560 
1561 	RTE_ETH_FOREACH_DEV(pi) {
1562 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1563 			continue;
1564 
1565 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1566 			printf("Please remove port %d from forwarding configuration.\n", pi);
1567 			continue;
1568 		}
1569 
1570 		if (port_is_bonding_slave(pi)) {
1571 			printf("Please remove port %d from bonded device.\n", pi);
1572 			continue;
1573 		}
1574 
1575 		port = &ports[pi];
1576 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1577 						RTE_PORT_HANDLING) == 0)
1578 			continue;
1579 
1580 		rte_eth_dev_stop(pi);
1581 
1582 		if (rte_atomic16_cmpset(&(port->port_status),
1583 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1584 			printf("Port %d can not be set into stopped\n", pi);
1585 		need_check_link_status = 1;
1586 	}
1587 	if (need_check_link_status && !no_link_check)
1588 		check_all_ports_link_status(RTE_PORT_ALL);
1589 
1590 	printf("Done\n");
1591 }
1592 
1593 void
1594 close_port(portid_t pid)
1595 {
1596 	portid_t pi;
1597 	struct rte_port *port;
1598 
1599 	if (port_id_is_invalid(pid, ENABLED_WARN))
1600 		return;
1601 
1602 	printf("Closing ports...\n");
1603 
1604 	RTE_ETH_FOREACH_DEV(pi) {
1605 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1606 			continue;
1607 
1608 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1609 			printf("Please remove port %d from forwarding configuration.\n", pi);
1610 			continue;
1611 		}
1612 
1613 		if (port_is_bonding_slave(pi)) {
1614 			printf("Please remove port %d from bonded device.\n", pi);
1615 			continue;
1616 		}
1617 
1618 		port = &ports[pi];
1619 		if (rte_atomic16_cmpset(&(port->port_status),
1620 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1621 			printf("Port %d is already closed\n", pi);
1622 			continue;
1623 		}
1624 
1625 		if (rte_atomic16_cmpset(&(port->port_status),
1626 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1627 			printf("Port %d is now not stopped\n", pi);
1628 			continue;
1629 		}
1630 
1631 		if (port->flow_list)
1632 			port_flow_flush(pi);
1633 		rte_eth_dev_close(pi);
1634 
1635 		if (rte_atomic16_cmpset(&(port->port_status),
1636 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1637 			printf("Port %d cannot be set to closed\n", pi);
1638 	}
1639 
1640 	printf("Done\n");
1641 }
1642 
1643 void
1644 attach_port(char *identifier)
1645 {
1646 	portid_t pi = 0;
1647 	unsigned int socket_id;
1648 
1649 	printf("Attaching a new port...\n");
1650 
1651 	if (identifier == NULL) {
1652 		printf("Invalid parameters are specified\n");
1653 		return;
1654 	}
1655 
1656 	if (rte_eth_dev_attach(identifier, &pi))
1657 		return;
1658 
1659 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1660 	/* if socket_id is invalid, set to 0 */
1661 	if (check_socket_id(socket_id) < 0)
1662 		socket_id = 0;
1663 	reconfig(pi, socket_id);
1664 	rte_eth_promiscuous_enable(pi);
1665 
1666 	nb_ports = rte_eth_dev_count();
1667 
1668 	ports[pi].port_status = RTE_PORT_STOPPED;
1669 
1670 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1671 	printf("Done\n");
1672 }
1673 
1674 void
1675 detach_port(uint8_t port_id)
1676 {
1677 	char name[RTE_ETH_NAME_MAX_LEN];
1678 
1679 	printf("Detaching a port...\n");
1680 
1681 	if (!port_is_closed(port_id)) {
1682 		printf("Please close port first\n");
1683 		return;
1684 	}
1685 
1686 	if (ports[port_id].flow_list)
1687 		port_flow_flush(port_id);
1688 
1689 	if (rte_eth_dev_detach(port_id, name))
1690 		return;
1691 
1692 	nb_ports = rte_eth_dev_count();
1693 
1694 	printf("Port '%s' is detached. Now total ports is %d\n",
1695 			name, nb_ports);
1696 	printf("Done\n");
1697 	return;
1698 }
1699 
1700 void
1701 pmd_test_exit(void)
1702 {
1703 	portid_t pt_id;
1704 
1705 	if (test_done == 0)
1706 		stop_packet_forwarding();
1707 
1708 	if (ports != NULL) {
1709 		no_link_check = 1;
1710 		RTE_ETH_FOREACH_DEV(pt_id) {
1711 			printf("\nShutting down port %d...\n", pt_id);
1712 			fflush(stdout);
1713 			stop_port(pt_id);
1714 			close_port(pt_id);
1715 		}
1716 	}
1717 	printf("\nBye...\n");
1718 }
1719 
1720 typedef void (*cmd_func_t)(void);
1721 struct pmd_test_command {
1722 	const char *cmd_name;
1723 	cmd_func_t cmd_func;
1724 };
1725 
1726 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1727 
1728 /* Check the link status of all ports in up to 9s, and print them finally */
1729 static void
1730 check_all_ports_link_status(uint32_t port_mask)
1731 {
1732 #define CHECK_INTERVAL 100 /* 100ms */
1733 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1734 	uint8_t portid, count, all_ports_up, print_flag = 0;
1735 	struct rte_eth_link link;
1736 
1737 	printf("Checking link statuses...\n");
1738 	fflush(stdout);
1739 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1740 		all_ports_up = 1;
1741 		RTE_ETH_FOREACH_DEV(portid) {
1742 			if ((port_mask & (1 << portid)) == 0)
1743 				continue;
1744 			memset(&link, 0, sizeof(link));
1745 			rte_eth_link_get_nowait(portid, &link);
1746 			/* print link status if flag set */
1747 			if (print_flag == 1) {
1748 				if (link.link_status)
1749 					printf("Port %d Link Up - speed %u "
1750 						"Mbps - %s\n", (uint8_t)portid,
1751 						(unsigned)link.link_speed,
1752 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1753 					("full-duplex") : ("half-duplex\n"));
1754 				else
1755 					printf("Port %d Link Down\n",
1756 						(uint8_t)portid);
1757 				continue;
1758 			}
1759 			/* clear all_ports_up flag if any link down */
1760 			if (link.link_status == ETH_LINK_DOWN) {
1761 				all_ports_up = 0;
1762 				break;
1763 			}
1764 		}
1765 		/* after finally printing all link status, get out */
1766 		if (print_flag == 1)
1767 			break;
1768 
1769 		if (all_ports_up == 0) {
1770 			fflush(stdout);
1771 			rte_delay_ms(CHECK_INTERVAL);
1772 		}
1773 
1774 		/* set the print_flag if all ports up or timeout */
1775 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1776 			print_flag = 1;
1777 		}
1778 
1779 		if (lsc_interrupt)
1780 			break;
1781 	}
1782 }
1783 
1784 static void
1785 rmv_event_callback(void *arg)
1786 {
1787 	struct rte_eth_dev *dev;
1788 	struct rte_devargs *da;
1789 	char name[32] = "";
1790 	uint8_t port_id = (intptr_t)arg;
1791 
1792 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1793 	dev = &rte_eth_devices[port_id];
1794 	da = dev->device->devargs;
1795 
1796 	stop_port(port_id);
1797 	close_port(port_id);
1798 	if (da->type == RTE_DEVTYPE_VIRTUAL)
1799 		snprintf(name, sizeof(name), "%s", da->virt.drv_name);
1800 	else if (da->type == RTE_DEVTYPE_WHITELISTED_PCI)
1801 		rte_pci_device_name(&da->pci.addr, name, sizeof(name));
1802 	printf("removing device %s\n", name);
1803 	rte_eal_dev_detach(name);
1804 	dev->state = RTE_ETH_DEV_UNUSED;
1805 }
1806 
1807 /* This function is used by the interrupt thread */
1808 static void
1809 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
1810 {
1811 	static const char * const event_desc[] = {
1812 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1813 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
1814 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1815 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1816 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1817 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
1818 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
1819 		[RTE_ETH_EVENT_MAX] = NULL,
1820 	};
1821 
1822 	RTE_SET_USED(param);
1823 
1824 	if (type >= RTE_ETH_EVENT_MAX) {
1825 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1826 			port_id, __func__, type);
1827 		fflush(stderr);
1828 	} else if (event_print_mask & (UINT32_C(1) << type)) {
1829 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
1830 			event_desc[type]);
1831 		fflush(stdout);
1832 	}
1833 
1834 	switch (type) {
1835 	case RTE_ETH_EVENT_INTR_RMV:
1836 		if (rte_eal_alarm_set(100000,
1837 				rmv_event_callback, (void *)(intptr_t)port_id))
1838 			fprintf(stderr, "Could not set up deferred device removal\n");
1839 		break;
1840 	default:
1841 		break;
1842 	}
1843 }
1844 
1845 static int
1846 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1847 {
1848 	uint16_t i;
1849 	int diag;
1850 	uint8_t mapping_found = 0;
1851 
1852 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1853 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1854 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1855 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1856 					tx_queue_stats_mappings[i].queue_id,
1857 					tx_queue_stats_mappings[i].stats_counter_id);
1858 			if (diag != 0)
1859 				return diag;
1860 			mapping_found = 1;
1861 		}
1862 	}
1863 	if (mapping_found)
1864 		port->tx_queue_stats_mapping_enabled = 1;
1865 	return 0;
1866 }
1867 
1868 static int
1869 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1870 {
1871 	uint16_t i;
1872 	int diag;
1873 	uint8_t mapping_found = 0;
1874 
1875 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1876 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1877 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1878 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1879 					rx_queue_stats_mappings[i].queue_id,
1880 					rx_queue_stats_mappings[i].stats_counter_id);
1881 			if (diag != 0)
1882 				return diag;
1883 			mapping_found = 1;
1884 		}
1885 	}
1886 	if (mapping_found)
1887 		port->rx_queue_stats_mapping_enabled = 1;
1888 	return 0;
1889 }
1890 
1891 static void
1892 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1893 {
1894 	int diag = 0;
1895 
1896 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1897 	if (diag != 0) {
1898 		if (diag == -ENOTSUP) {
1899 			port->tx_queue_stats_mapping_enabled = 0;
1900 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1901 		}
1902 		else
1903 			rte_exit(EXIT_FAILURE,
1904 					"set_tx_queue_stats_mapping_registers "
1905 					"failed for port id=%d diag=%d\n",
1906 					pi, diag);
1907 	}
1908 
1909 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1910 	if (diag != 0) {
1911 		if (diag == -ENOTSUP) {
1912 			port->rx_queue_stats_mapping_enabled = 0;
1913 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1914 		}
1915 		else
1916 			rte_exit(EXIT_FAILURE,
1917 					"set_rx_queue_stats_mapping_registers "
1918 					"failed for port id=%d diag=%d\n",
1919 					pi, diag);
1920 	}
1921 }
1922 
1923 static void
1924 rxtx_port_config(struct rte_port *port)
1925 {
1926 	port->rx_conf = port->dev_info.default_rxconf;
1927 	port->tx_conf = port->dev_info.default_txconf;
1928 
1929 	/* Check if any RX/TX parameters have been passed */
1930 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1931 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1932 
1933 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1934 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1935 
1936 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1937 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1938 
1939 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1940 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1941 
1942 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1943 		port->rx_conf.rx_drop_en = rx_drop_en;
1944 
1945 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1946 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1947 
1948 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1949 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1950 
1951 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1952 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1953 
1954 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1955 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1956 
1957 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1958 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1959 
1960 	if (txq_flags != RTE_PMD_PARAM_UNSET)
1961 		port->tx_conf.txq_flags = txq_flags;
1962 }
1963 
1964 void
1965 init_port_config(void)
1966 {
1967 	portid_t pid;
1968 	struct rte_port *port;
1969 
1970 	RTE_ETH_FOREACH_DEV(pid) {
1971 		port = &ports[pid];
1972 		port->dev_conf.rxmode = rx_mode;
1973 		port->dev_conf.fdir_conf = fdir_conf;
1974 		if (nb_rxq > 1) {
1975 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1976 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1977 		} else {
1978 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1979 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1980 		}
1981 
1982 		if (port->dcb_flag == 0) {
1983 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1984 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1985 			else
1986 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1987 		}
1988 
1989 		rxtx_port_config(port);
1990 
1991 		rte_eth_macaddr_get(pid, &port->eth_addr);
1992 
1993 		map_port_queue_stats_mapping_registers(pid, port);
1994 #ifdef RTE_NIC_BYPASS
1995 		rte_eth_dev_bypass_init(pid);
1996 #endif
1997 
1998 		if (lsc_interrupt &&
1999 		    (rte_eth_devices[pid].data->dev_flags &
2000 		     RTE_ETH_DEV_INTR_LSC))
2001 			port->dev_conf.intr_conf.lsc = 1;
2002 		if (rmv_interrupt &&
2003 		    (rte_eth_devices[pid].data->dev_flags &
2004 		     RTE_ETH_DEV_INTR_RMV))
2005 			port->dev_conf.intr_conf.rmv = 1;
2006 	}
2007 }
2008 
2009 void set_port_slave_flag(portid_t slave_pid)
2010 {
2011 	struct rte_port *port;
2012 
2013 	port = &ports[slave_pid];
2014 	port->slave_flag = 1;
2015 }
2016 
2017 void clear_port_slave_flag(portid_t slave_pid)
2018 {
2019 	struct rte_port *port;
2020 
2021 	port = &ports[slave_pid];
2022 	port->slave_flag = 0;
2023 }
2024 
2025 uint8_t port_is_bonding_slave(portid_t slave_pid)
2026 {
2027 	struct rte_port *port;
2028 
2029 	port = &ports[slave_pid];
2030 	return port->slave_flag;
2031 }
2032 
2033 const uint16_t vlan_tags[] = {
2034 		0,  1,  2,  3,  4,  5,  6,  7,
2035 		8,  9, 10, 11,  12, 13, 14, 15,
2036 		16, 17, 18, 19, 20, 21, 22, 23,
2037 		24, 25, 26, 27, 28, 29, 30, 31
2038 };
2039 
2040 static  int
2041 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2042 		 enum dcb_mode_enable dcb_mode,
2043 		 enum rte_eth_nb_tcs num_tcs,
2044 		 uint8_t pfc_en)
2045 {
2046 	uint8_t i;
2047 
2048 	/*
2049 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2050 	 * given above, and the number of traffic classes available for use.
2051 	 */
2052 	if (dcb_mode == DCB_VT_ENABLED) {
2053 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2054 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2055 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2056 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2057 
2058 		/* VMDQ+DCB RX and TX configurations */
2059 		vmdq_rx_conf->enable_default_pool = 0;
2060 		vmdq_rx_conf->default_pool = 0;
2061 		vmdq_rx_conf->nb_queue_pools =
2062 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2063 		vmdq_tx_conf->nb_queue_pools =
2064 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2065 
2066 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2067 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2068 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2069 			vmdq_rx_conf->pool_map[i].pools =
2070 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2071 		}
2072 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2073 			vmdq_rx_conf->dcb_tc[i] = i;
2074 			vmdq_tx_conf->dcb_tc[i] = i;
2075 		}
2076 
2077 		/* set DCB mode of RX and TX of multiple queues */
2078 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2079 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2080 	} else {
2081 		struct rte_eth_dcb_rx_conf *rx_conf =
2082 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2083 		struct rte_eth_dcb_tx_conf *tx_conf =
2084 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2085 
2086 		rx_conf->nb_tcs = num_tcs;
2087 		tx_conf->nb_tcs = num_tcs;
2088 
2089 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2090 			rx_conf->dcb_tc[i] = i % num_tcs;
2091 			tx_conf->dcb_tc[i] = i % num_tcs;
2092 		}
2093 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2094 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2095 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2096 	}
2097 
2098 	if (pfc_en)
2099 		eth_conf->dcb_capability_en =
2100 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2101 	else
2102 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2103 
2104 	return 0;
2105 }
2106 
2107 int
2108 init_port_dcb_config(portid_t pid,
2109 		     enum dcb_mode_enable dcb_mode,
2110 		     enum rte_eth_nb_tcs num_tcs,
2111 		     uint8_t pfc_en)
2112 {
2113 	struct rte_eth_conf port_conf;
2114 	struct rte_port *rte_port;
2115 	int retval;
2116 	uint16_t i;
2117 
2118 	rte_port = &ports[pid];
2119 
2120 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2121 	/* Enter DCB configuration status */
2122 	dcb_config = 1;
2123 
2124 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2125 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2126 	if (retval < 0)
2127 		return retval;
2128 	port_conf.rxmode.hw_vlan_filter = 1;
2129 
2130 	/**
2131 	 * Write the configuration into the device.
2132 	 * Set the numbers of RX & TX queues to 0, so
2133 	 * the RX & TX queues will not be setup.
2134 	 */
2135 	(void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2136 
2137 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2138 
2139 	/* If dev_info.vmdq_pool_base is greater than 0,
2140 	 * the queue id of vmdq pools is started after pf queues.
2141 	 */
2142 	if (dcb_mode == DCB_VT_ENABLED &&
2143 	    rte_port->dev_info.vmdq_pool_base > 0) {
2144 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2145 			" for port %d.", pid);
2146 		return -1;
2147 	}
2148 
2149 	/* Assume the ports in testpmd have the same dcb capability
2150 	 * and has the same number of rxq and txq in dcb mode
2151 	 */
2152 	if (dcb_mode == DCB_VT_ENABLED) {
2153 		if (rte_port->dev_info.max_vfs > 0) {
2154 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2155 			nb_txq = rte_port->dev_info.nb_tx_queues;
2156 		} else {
2157 			nb_rxq = rte_port->dev_info.max_rx_queues;
2158 			nb_txq = rte_port->dev_info.max_tx_queues;
2159 		}
2160 	} else {
2161 		/*if vt is disabled, use all pf queues */
2162 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2163 			nb_rxq = rte_port->dev_info.max_rx_queues;
2164 			nb_txq = rte_port->dev_info.max_tx_queues;
2165 		} else {
2166 			nb_rxq = (queueid_t)num_tcs;
2167 			nb_txq = (queueid_t)num_tcs;
2168 
2169 		}
2170 	}
2171 	rx_free_thresh = 64;
2172 
2173 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2174 
2175 	rxtx_port_config(rte_port);
2176 	/* VLAN filter */
2177 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2178 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2179 		rx_vft_set(pid, vlan_tags[i], 1);
2180 
2181 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2182 	map_port_queue_stats_mapping_registers(pid, rte_port);
2183 
2184 	rte_port->dcb_flag = 1;
2185 
2186 	return 0;
2187 }
2188 
2189 static void
2190 init_port(void)
2191 {
2192 	/* Configuration of Ethernet ports. */
2193 	ports = rte_zmalloc("testpmd: ports",
2194 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2195 			    RTE_CACHE_LINE_SIZE);
2196 	if (ports == NULL) {
2197 		rte_exit(EXIT_FAILURE,
2198 				"rte_zmalloc(%d struct rte_port) failed\n",
2199 				RTE_MAX_ETHPORTS);
2200 	}
2201 }
2202 
2203 static void
2204 force_quit(void)
2205 {
2206 	pmd_test_exit();
2207 	prompt_exit();
2208 }
2209 
2210 static void
2211 signal_handler(int signum)
2212 {
2213 	if (signum == SIGINT || signum == SIGTERM) {
2214 		printf("\nSignal %d received, preparing to exit...\n",
2215 				signum);
2216 #ifdef RTE_LIBRTE_PDUMP
2217 		/* uninitialize packet capture framework */
2218 		rte_pdump_uninit();
2219 #endif
2220 #ifdef RTE_LIBRTE_LATENCY_STATS
2221 		rte_latencystats_uninit();
2222 #endif
2223 		force_quit();
2224 		/* exit with the expected status */
2225 		signal(signum, SIG_DFL);
2226 		kill(getpid(), signum);
2227 	}
2228 }
2229 
2230 int
2231 main(int argc, char** argv)
2232 {
2233 	int  diag;
2234 	uint8_t port_id;
2235 
2236 	signal(SIGINT, signal_handler);
2237 	signal(SIGTERM, signal_handler);
2238 
2239 	diag = rte_eal_init(argc, argv);
2240 	if (diag < 0)
2241 		rte_panic("Cannot init EAL\n");
2242 
2243 #ifdef RTE_LIBRTE_PDUMP
2244 	/* initialize packet capture framework */
2245 	rte_pdump_init(NULL);
2246 #endif
2247 
2248 	nb_ports = (portid_t) rte_eth_dev_count();
2249 	if (nb_ports == 0)
2250 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2251 
2252 	/* allocate port structures, and init them */
2253 	init_port();
2254 
2255 	set_def_fwd_config();
2256 	if (nb_lcores == 0)
2257 		rte_panic("Empty set of forwarding logical cores - check the "
2258 			  "core mask supplied in the command parameters\n");
2259 
2260 	/* Bitrate/latency stats disabled by default */
2261 #ifdef RTE_LIBRTE_BITRATE
2262 	bitrate_enabled = 0;
2263 #endif
2264 #ifdef RTE_LIBRTE_LATENCY_STATS
2265 	latencystats_enabled = 0;
2266 #endif
2267 
2268 	argc -= diag;
2269 	argv += diag;
2270 	if (argc > 1)
2271 		launch_args_parse(argc, argv);
2272 
2273 	if (!nb_rxq && !nb_txq)
2274 		printf("Warning: Either rx or tx queues should be non-zero\n");
2275 
2276 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2277 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2278 		       "but nb_txq=%d will prevent to fully test it.\n",
2279 		       nb_rxq, nb_txq);
2280 
2281 	init_config();
2282 	if (start_port(RTE_PORT_ALL) != 0)
2283 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2284 
2285 	/* set all ports to promiscuous mode by default */
2286 	RTE_ETH_FOREACH_DEV(port_id)
2287 		rte_eth_promiscuous_enable(port_id);
2288 
2289 	/* Init metrics library */
2290 	rte_metrics_init(rte_socket_id());
2291 
2292 #ifdef RTE_LIBRTE_LATENCY_STATS
2293 	if (latencystats_enabled != 0) {
2294 		int ret = rte_latencystats_init(1, NULL);
2295 		if (ret)
2296 			printf("Warning: latencystats init()"
2297 				" returned error %d\n",	ret);
2298 		printf("Latencystats running on lcore %d\n",
2299 			latencystats_lcore_id);
2300 	}
2301 #endif
2302 
2303 	/* Setup bitrate stats */
2304 #ifdef RTE_LIBRTE_BITRATE
2305 	if (bitrate_enabled != 0) {
2306 		bitrate_data = rte_stats_bitrate_create();
2307 		if (bitrate_data == NULL)
2308 			rte_exit(EXIT_FAILURE,
2309 				"Could not allocate bitrate data.\n");
2310 		rte_stats_bitrate_reg(bitrate_data);
2311 	}
2312 #endif
2313 
2314 #ifdef RTE_LIBRTE_CMDLINE
2315 	if (strlen(cmdline_filename) != 0)
2316 		cmdline_read_from_file(cmdline_filename);
2317 
2318 	if (interactive == 1) {
2319 		if (auto_start) {
2320 			printf("Start automatic packet forwarding\n");
2321 			start_packet_forwarding(0);
2322 		}
2323 		prompt();
2324 		pmd_test_exit();
2325 	} else
2326 #endif
2327 	{
2328 		char c;
2329 		int rc;
2330 
2331 		printf("No commandline core given, start packet forwarding\n");
2332 		start_packet_forwarding(0);
2333 		printf("Press enter to exit\n");
2334 		rc = read(0, &c, 1);
2335 		pmd_test_exit();
2336 		if (rc < 0)
2337 			return 1;
2338 	}
2339 
2340 	return 0;
2341 }
2342