xref: /dpdk/app/test-pmd/testpmd.c (revision 284c908cc588e6448e25199773aea998c7cccd78)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
78 #endif
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
81 #endif
82 #include <rte_flow.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
86 #endif
87 #include <rte_metrics.h>
88 #ifdef RTE_LIBRTE_LATENCY_STATS
89 #include <rte_latencystats.h>
90 #endif
91 
92 #include "testpmd.h"
93 
94 uint16_t verbose_level = 0; /**< Silent by default. */
95 
96 /* use master core for command line ? */
97 uint8_t interactive = 0;
98 uint8_t auto_start = 0;
99 
100 /*
101  * NUMA support configuration.
102  * When set, the NUMA support attempts to dispatch the allocation of the
103  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
104  * probed ports among the CPU sockets 0 and 1.
105  * Otherwise, all memory is allocated from CPU socket 0.
106  */
107 uint8_t numa_support = 0; /**< No numa support by default */
108 
109 /*
110  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
111  * not configured.
112  */
113 uint8_t socket_num = UMA_NO_CONFIG;
114 
115 /*
116  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
117  */
118 uint8_t mp_anon = 0;
119 
120 /*
121  * Record the Ethernet address of peer target ports to which packets are
122  * forwarded.
123  * Must be instantiated with the ethernet addresses of peer traffic generator
124  * ports.
125  */
126 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
127 portid_t nb_peer_eth_addrs = 0;
128 
129 /*
130  * Probed Target Environment.
131  */
132 struct rte_port *ports;	       /**< For all probed ethernet ports. */
133 portid_t nb_ports;             /**< Number of probed ethernet ports. */
134 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
135 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
136 
137 /*
138  * Test Forwarding Configuration.
139  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
140  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
141  */
142 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
143 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
144 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
145 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
146 
147 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
148 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
149 
150 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
151 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
152 
153 /*
154  * Forwarding engines.
155  */
156 struct fwd_engine * fwd_engines[] = {
157 	&io_fwd_engine,
158 	&mac_fwd_engine,
159 	&mac_swap_engine,
160 	&flow_gen_engine,
161 	&rx_only_engine,
162 	&tx_only_engine,
163 	&csum_fwd_engine,
164 	&icmp_echo_engine,
165 #ifdef RTE_LIBRTE_IEEE1588
166 	&ieee1588_fwd_engine,
167 #endif
168 	NULL,
169 };
170 
171 struct fwd_config cur_fwd_config;
172 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
173 uint32_t retry_enabled;
174 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
175 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
176 
177 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
178 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
179                                       * specified on command-line. */
180 
181 /*
182  * Configuration of packet segments used by the "txonly" processing engine.
183  */
184 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
185 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
186 	TXONLY_DEF_PACKET_LEN,
187 };
188 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
189 
190 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
191 /**< Split policy for packets to TX. */
192 
193 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
194 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
195 
196 /* current configuration is in DCB or not,0 means it is not in DCB mode */
197 uint8_t dcb_config = 0;
198 
199 /* Whether the dcb is in testing status */
200 uint8_t dcb_test = 0;
201 
202 /*
203  * Configurable number of RX/TX queues.
204  */
205 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
206 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
207 
208 /*
209  * Configurable number of RX/TX ring descriptors.
210  */
211 #define RTE_TEST_RX_DESC_DEFAULT 128
212 #define RTE_TEST_TX_DESC_DEFAULT 512
213 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
214 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
215 
216 #define RTE_PMD_PARAM_UNSET -1
217 /*
218  * Configurable values of RX and TX ring threshold registers.
219  */
220 
221 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
222 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
223 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
224 
225 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
226 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
227 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
228 
229 /*
230  * Configurable value of RX free threshold.
231  */
232 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
233 
234 /*
235  * Configurable value of RX drop enable.
236  */
237 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
238 
239 /*
240  * Configurable value of TX free threshold.
241  */
242 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
243 
244 /*
245  * Configurable value of TX RS bit threshold.
246  */
247 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
248 
249 /*
250  * Configurable value of TX queue flags.
251  */
252 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
253 
254 /*
255  * Receive Side Scaling (RSS) configuration.
256  */
257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
258 
259 /*
260  * Port topology configuration
261  */
262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
263 
264 /*
265  * Avoids to flush all the RX streams before starts forwarding.
266  */
267 uint8_t no_flush_rx = 0; /* flush by default */
268 
269 /*
270  * Avoids to check link status when starting/stopping a port.
271  */
272 uint8_t no_link_check = 0; /* check by default */
273 
274 /*
275  * Enable link status change notification
276  */
277 uint8_t lsc_interrupt = 1; /* enabled by default */
278 
279 /*
280  * Enable device removal notification.
281  */
282 uint8_t rmv_interrupt = 1; /* enabled by default */
283 
284 /*
285  * NIC bypass mode configuration options.
286  */
287 #ifdef RTE_NIC_BYPASS
288 
289 /* The NIC bypass watchdog timeout. */
290 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
291 
292 #endif
293 
294 #ifdef RTE_LIBRTE_LATENCY_STATS
295 
296 /*
297  * Set when latency stats is enabled in the commandline
298  */
299 uint8_t latencystats_enabled;
300 
301 /*
302  * Lcore ID to serive latency statistics.
303  */
304 lcoreid_t latencystats_lcore_id = -1;
305 
306 #endif
307 
308 /*
309  * Ethernet device configuration.
310  */
311 struct rte_eth_rxmode rx_mode = {
312 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
313 	.split_hdr_size = 0,
314 	.header_split   = 0, /**< Header Split disabled. */
315 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
316 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
317 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
318 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
319 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
320 	.hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
321 };
322 
323 struct rte_fdir_conf fdir_conf = {
324 	.mode = RTE_FDIR_MODE_NONE,
325 	.pballoc = RTE_FDIR_PBALLOC_64K,
326 	.status = RTE_FDIR_REPORT_STATUS,
327 	.mask = {
328 		.vlan_tci_mask = 0x0,
329 		.ipv4_mask     = {
330 			.src_ip = 0xFFFFFFFF,
331 			.dst_ip = 0xFFFFFFFF,
332 		},
333 		.ipv6_mask     = {
334 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
335 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
336 		},
337 		.src_port_mask = 0xFFFF,
338 		.dst_port_mask = 0xFFFF,
339 		.mac_addr_byte_mask = 0xFF,
340 		.tunnel_type_mask = 1,
341 		.tunnel_id_mask = 0xFFFFFFFF,
342 	},
343 	.drop_queue = 127,
344 };
345 
346 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
347 
348 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
349 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
350 
351 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
352 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
353 
354 uint16_t nb_tx_queue_stats_mappings = 0;
355 uint16_t nb_rx_queue_stats_mappings = 0;
356 
357 unsigned max_socket = 0;
358 
359 /* Bitrate statistics */
360 struct rte_stats_bitrates *bitrate_data;
361 
362 /* Forward function declarations */
363 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
364 static void check_all_ports_link_status(uint32_t port_mask);
365 static void eth_event_callback(uint8_t port_id,
366 			       enum rte_eth_event_type type,
367 			       void *param);
368 
369 /*
370  * Check if all the ports are started.
371  * If yes, return positive value. If not, return zero.
372  */
373 static int all_ports_started(void);
374 
375 /*
376  * Setup default configuration.
377  */
378 static void
379 set_default_fwd_lcores_config(void)
380 {
381 	unsigned int i;
382 	unsigned int nb_lc;
383 	unsigned int sock_num;
384 
385 	nb_lc = 0;
386 	for (i = 0; i < RTE_MAX_LCORE; i++) {
387 		sock_num = rte_lcore_to_socket_id(i) + 1;
388 		if (sock_num > max_socket) {
389 			if (sock_num > RTE_MAX_NUMA_NODES)
390 				rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
391 			max_socket = sock_num;
392 		}
393 		if (!rte_lcore_is_enabled(i))
394 			continue;
395 		if (i == rte_get_master_lcore())
396 			continue;
397 		fwd_lcores_cpuids[nb_lc++] = i;
398 	}
399 	nb_lcores = (lcoreid_t) nb_lc;
400 	nb_cfg_lcores = nb_lcores;
401 	nb_fwd_lcores = 1;
402 }
403 
404 static void
405 set_def_peer_eth_addrs(void)
406 {
407 	portid_t i;
408 
409 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
410 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
411 		peer_eth_addrs[i].addr_bytes[5] = i;
412 	}
413 }
414 
415 static void
416 set_default_fwd_ports_config(void)
417 {
418 	portid_t pt_id;
419 
420 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
421 		fwd_ports_ids[pt_id] = pt_id;
422 
423 	nb_cfg_ports = nb_ports;
424 	nb_fwd_ports = nb_ports;
425 }
426 
427 void
428 set_def_fwd_config(void)
429 {
430 	set_default_fwd_lcores_config();
431 	set_def_peer_eth_addrs();
432 	set_default_fwd_ports_config();
433 }
434 
435 /*
436  * Configuration initialisation done once at init time.
437  */
438 static void
439 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
440 		 unsigned int socket_id)
441 {
442 	char pool_name[RTE_MEMPOOL_NAMESIZE];
443 	struct rte_mempool *rte_mp = NULL;
444 	uint32_t mb_size;
445 
446 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
447 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
448 
449 	RTE_LOG(INFO, USER1,
450 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
451 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
452 
453 #ifdef RTE_LIBRTE_PMD_XENVIRT
454 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
455 		(unsigned) mb_mempool_cache,
456 		sizeof(struct rte_pktmbuf_pool_private),
457 		rte_pktmbuf_pool_init, NULL,
458 		rte_pktmbuf_init, NULL,
459 		socket_id, 0);
460 #endif
461 
462 	/* if the former XEN allocation failed fall back to normal allocation */
463 	if (rte_mp == NULL) {
464 		if (mp_anon != 0) {
465 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
466 				mb_size, (unsigned) mb_mempool_cache,
467 				sizeof(struct rte_pktmbuf_pool_private),
468 				socket_id, 0);
469 			if (rte_mp == NULL)
470 				goto err;
471 
472 			if (rte_mempool_populate_anon(rte_mp) == 0) {
473 				rte_mempool_free(rte_mp);
474 				rte_mp = NULL;
475 				goto err;
476 			}
477 			rte_pktmbuf_pool_init(rte_mp, NULL);
478 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
479 		} else {
480 			/* wrapper to rte_mempool_create() */
481 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
482 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
483 		}
484 	}
485 
486 err:
487 	if (rte_mp == NULL) {
488 		rte_exit(EXIT_FAILURE,
489 			"Creation of mbuf pool for socket %u failed: %s\n",
490 			socket_id, rte_strerror(rte_errno));
491 	} else if (verbose_level > 0) {
492 		rte_mempool_dump(stdout, rte_mp);
493 	}
494 }
495 
496 /*
497  * Check given socket id is valid or not with NUMA mode,
498  * if valid, return 0, else return -1
499  */
500 static int
501 check_socket_id(const unsigned int socket_id)
502 {
503 	static int warning_once = 0;
504 
505 	if (socket_id >= max_socket) {
506 		if (!warning_once && numa_support)
507 			printf("Warning: NUMA should be configured manually by"
508 			       " using --port-numa-config and"
509 			       " --ring-numa-config parameters along with"
510 			       " --numa.\n");
511 		warning_once = 1;
512 		return -1;
513 	}
514 	return 0;
515 }
516 
517 static void
518 init_config(void)
519 {
520 	portid_t pid;
521 	struct rte_port *port;
522 	struct rte_mempool *mbp;
523 	unsigned int nb_mbuf_per_pool;
524 	lcoreid_t  lc_id;
525 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
526 
527 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
528 	/* Configuration of logical cores. */
529 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
530 				sizeof(struct fwd_lcore *) * nb_lcores,
531 				RTE_CACHE_LINE_SIZE);
532 	if (fwd_lcores == NULL) {
533 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
534 							"failed\n", nb_lcores);
535 	}
536 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
537 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
538 					       sizeof(struct fwd_lcore),
539 					       RTE_CACHE_LINE_SIZE);
540 		if (fwd_lcores[lc_id] == NULL) {
541 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
542 								"failed\n");
543 		}
544 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
545 	}
546 
547 	/*
548 	 * Create pools of mbuf.
549 	 * If NUMA support is disabled, create a single pool of mbuf in
550 	 * socket 0 memory by default.
551 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
552 	 *
553 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
554 	 * nb_txd can be configured at run time.
555 	 */
556 	if (param_total_num_mbufs)
557 		nb_mbuf_per_pool = param_total_num_mbufs;
558 	else {
559 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
560 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
561 
562 		if (!numa_support)
563 			nb_mbuf_per_pool =
564 				(nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
565 	}
566 
567 	if (!numa_support) {
568 		if (socket_num == UMA_NO_CONFIG)
569 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
570 		else
571 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
572 						 socket_num);
573 	}
574 
575 	RTE_ETH_FOREACH_DEV(pid) {
576 		port = &ports[pid];
577 		rte_eth_dev_info_get(pid, &port->dev_info);
578 
579 		if (numa_support) {
580 			if (port_numa[pid] != NUMA_NO_CONFIG)
581 				port_per_socket[port_numa[pid]]++;
582 			else {
583 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
584 
585 				/* if socket_id is invalid, set to 0 */
586 				if (check_socket_id(socket_id) < 0)
587 					socket_id = 0;
588 				port_per_socket[socket_id]++;
589 			}
590 		}
591 
592 		/* set flag to initialize port/queue */
593 		port->need_reconfig = 1;
594 		port->need_reconfig_queues = 1;
595 	}
596 
597 	if (numa_support) {
598 		uint8_t i;
599 		unsigned int nb_mbuf;
600 
601 		if (param_total_num_mbufs)
602 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
603 
604 		for (i = 0; i < max_socket; i++) {
605 			nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
606 			if (nb_mbuf)
607 				mbuf_pool_create(mbuf_data_size,
608 						nb_mbuf,i);
609 		}
610 	}
611 	init_port_config();
612 
613 	/*
614 	 * Records which Mbuf pool to use by each logical core, if needed.
615 	 */
616 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
617 		mbp = mbuf_pool_find(
618 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
619 
620 		if (mbp == NULL)
621 			mbp = mbuf_pool_find(0);
622 		fwd_lcores[lc_id]->mbp = mbp;
623 	}
624 
625 	/* Configuration of packet forwarding streams. */
626 	if (init_fwd_streams() < 0)
627 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
628 
629 	fwd_config_setup();
630 }
631 
632 
633 void
634 reconfig(portid_t new_port_id, unsigned socket_id)
635 {
636 	struct rte_port *port;
637 
638 	/* Reconfiguration of Ethernet ports. */
639 	port = &ports[new_port_id];
640 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
641 
642 	/* set flag to initialize port/queue */
643 	port->need_reconfig = 1;
644 	port->need_reconfig_queues = 1;
645 	port->socket_id = socket_id;
646 
647 	init_port_config();
648 }
649 
650 
651 int
652 init_fwd_streams(void)
653 {
654 	portid_t pid;
655 	struct rte_port *port;
656 	streamid_t sm_id, nb_fwd_streams_new;
657 	queueid_t q;
658 
659 	/* set socket id according to numa or not */
660 	RTE_ETH_FOREACH_DEV(pid) {
661 		port = &ports[pid];
662 		if (nb_rxq > port->dev_info.max_rx_queues) {
663 			printf("Fail: nb_rxq(%d) is greater than "
664 				"max_rx_queues(%d)\n", nb_rxq,
665 				port->dev_info.max_rx_queues);
666 			return -1;
667 		}
668 		if (nb_txq > port->dev_info.max_tx_queues) {
669 			printf("Fail: nb_txq(%d) is greater than "
670 				"max_tx_queues(%d)\n", nb_txq,
671 				port->dev_info.max_tx_queues);
672 			return -1;
673 		}
674 		if (numa_support) {
675 			if (port_numa[pid] != NUMA_NO_CONFIG)
676 				port->socket_id = port_numa[pid];
677 			else {
678 				port->socket_id = rte_eth_dev_socket_id(pid);
679 
680 				/* if socket_id is invalid, set to 0 */
681 				if (check_socket_id(port->socket_id) < 0)
682 					port->socket_id = 0;
683 			}
684 		}
685 		else {
686 			if (socket_num == UMA_NO_CONFIG)
687 				port->socket_id = 0;
688 			else
689 				port->socket_id = socket_num;
690 		}
691 	}
692 
693 	q = RTE_MAX(nb_rxq, nb_txq);
694 	if (q == 0) {
695 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
696 		return -1;
697 	}
698 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
699 	if (nb_fwd_streams_new == nb_fwd_streams)
700 		return 0;
701 	/* clear the old */
702 	if (fwd_streams != NULL) {
703 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
704 			if (fwd_streams[sm_id] == NULL)
705 				continue;
706 			rte_free(fwd_streams[sm_id]);
707 			fwd_streams[sm_id] = NULL;
708 		}
709 		rte_free(fwd_streams);
710 		fwd_streams = NULL;
711 	}
712 
713 	/* init new */
714 	nb_fwd_streams = nb_fwd_streams_new;
715 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
716 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
717 	if (fwd_streams == NULL)
718 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
719 						"failed\n", nb_fwd_streams);
720 
721 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
722 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
723 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
724 		if (fwd_streams[sm_id] == NULL)
725 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
726 								" failed\n");
727 	}
728 
729 	return 0;
730 }
731 
732 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
733 static void
734 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
735 {
736 	unsigned int total_burst;
737 	unsigned int nb_burst;
738 	unsigned int burst_stats[3];
739 	uint16_t pktnb_stats[3];
740 	uint16_t nb_pkt;
741 	int burst_percent[3];
742 
743 	/*
744 	 * First compute the total number of packet bursts and the
745 	 * two highest numbers of bursts of the same number of packets.
746 	 */
747 	total_burst = 0;
748 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
749 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
750 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
751 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
752 		if (nb_burst == 0)
753 			continue;
754 		total_burst += nb_burst;
755 		if (nb_burst > burst_stats[0]) {
756 			burst_stats[1] = burst_stats[0];
757 			pktnb_stats[1] = pktnb_stats[0];
758 			burst_stats[0] = nb_burst;
759 			pktnb_stats[0] = nb_pkt;
760 		}
761 	}
762 	if (total_burst == 0)
763 		return;
764 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
765 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
766 	       burst_percent[0], (int) pktnb_stats[0]);
767 	if (burst_stats[0] == total_burst) {
768 		printf("]\n");
769 		return;
770 	}
771 	if (burst_stats[0] + burst_stats[1] == total_burst) {
772 		printf(" + %d%% of %d pkts]\n",
773 		       100 - burst_percent[0], pktnb_stats[1]);
774 		return;
775 	}
776 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
777 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
778 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
779 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
780 		return;
781 	}
782 	printf(" + %d%% of %d pkts + %d%% of others]\n",
783 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
784 }
785 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
786 
787 static void
788 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
789 {
790 	struct rte_port *port;
791 	uint8_t i;
792 
793 	static const char *fwd_stats_border = "----------------------";
794 
795 	port = &ports[port_id];
796 	printf("\n  %s Forward statistics for port %-2d %s\n",
797 	       fwd_stats_border, port_id, fwd_stats_border);
798 
799 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
800 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
801 		       "%-"PRIu64"\n",
802 		       stats->ipackets, stats->imissed,
803 		       (uint64_t) (stats->ipackets + stats->imissed));
804 
805 		if (cur_fwd_eng == &csum_fwd_engine)
806 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
807 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
808 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
809 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
810 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
811 		}
812 
813 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
814 		       "%-"PRIu64"\n",
815 		       stats->opackets, port->tx_dropped,
816 		       (uint64_t) (stats->opackets + port->tx_dropped));
817 	}
818 	else {
819 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
820 		       "%14"PRIu64"\n",
821 		       stats->ipackets, stats->imissed,
822 		       (uint64_t) (stats->ipackets + stats->imissed));
823 
824 		if (cur_fwd_eng == &csum_fwd_engine)
825 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
826 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
827 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
828 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
829 			printf("  RX-nombufs:             %14"PRIu64"\n",
830 			       stats->rx_nombuf);
831 		}
832 
833 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
834 		       "%14"PRIu64"\n",
835 		       stats->opackets, port->tx_dropped,
836 		       (uint64_t) (stats->opackets + port->tx_dropped));
837 	}
838 
839 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
840 	if (port->rx_stream)
841 		pkt_burst_stats_display("RX",
842 			&port->rx_stream->rx_burst_stats);
843 	if (port->tx_stream)
844 		pkt_burst_stats_display("TX",
845 			&port->tx_stream->tx_burst_stats);
846 #endif
847 
848 	if (port->rx_queue_stats_mapping_enabled) {
849 		printf("\n");
850 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
851 			printf("  Stats reg %2d RX-packets:%14"PRIu64
852 			       "     RX-errors:%14"PRIu64
853 			       "    RX-bytes:%14"PRIu64"\n",
854 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
855 		}
856 		printf("\n");
857 	}
858 	if (port->tx_queue_stats_mapping_enabled) {
859 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
860 			printf("  Stats reg %2d TX-packets:%14"PRIu64
861 			       "                                 TX-bytes:%14"PRIu64"\n",
862 			       i, stats->q_opackets[i], stats->q_obytes[i]);
863 		}
864 	}
865 
866 	printf("  %s--------------------------------%s\n",
867 	       fwd_stats_border, fwd_stats_border);
868 }
869 
870 static void
871 fwd_stream_stats_display(streamid_t stream_id)
872 {
873 	struct fwd_stream *fs;
874 	static const char *fwd_top_stats_border = "-------";
875 
876 	fs = fwd_streams[stream_id];
877 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
878 	    (fs->fwd_dropped == 0))
879 		return;
880 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
881 	       "TX Port=%2d/Queue=%2d %s\n",
882 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
883 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
884 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
885 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
886 
887 	/* if checksum mode */
888 	if (cur_fwd_eng == &csum_fwd_engine) {
889 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
890 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
891 	}
892 
893 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
894 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
895 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
896 #endif
897 }
898 
899 static void
900 flush_fwd_rx_queues(void)
901 {
902 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
903 	portid_t  rxp;
904 	portid_t port_id;
905 	queueid_t rxq;
906 	uint16_t  nb_rx;
907 	uint16_t  i;
908 	uint8_t   j;
909 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
910 	uint64_t timer_period;
911 
912 	/* convert to number of cycles */
913 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
914 
915 	for (j = 0; j < 2; j++) {
916 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
917 			for (rxq = 0; rxq < nb_rxq; rxq++) {
918 				port_id = fwd_ports_ids[rxp];
919 				/**
920 				* testpmd can stuck in the below do while loop
921 				* if rte_eth_rx_burst() always returns nonzero
922 				* packets. So timer is added to exit this loop
923 				* after 1sec timer expiry.
924 				*/
925 				prev_tsc = rte_rdtsc();
926 				do {
927 					nb_rx = rte_eth_rx_burst(port_id, rxq,
928 						pkts_burst, MAX_PKT_BURST);
929 					for (i = 0; i < nb_rx; i++)
930 						rte_pktmbuf_free(pkts_burst[i]);
931 
932 					cur_tsc = rte_rdtsc();
933 					diff_tsc = cur_tsc - prev_tsc;
934 					timer_tsc += diff_tsc;
935 				} while ((nb_rx > 0) &&
936 					(timer_tsc < timer_period));
937 				timer_tsc = 0;
938 			}
939 		}
940 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
941 	}
942 }
943 
944 static void
945 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
946 {
947 	struct fwd_stream **fsm;
948 	streamid_t nb_fs;
949 	streamid_t sm_id;
950 #ifdef RTE_LIBRTE_BITRATE
951 	uint64_t tics_per_1sec;
952 	uint64_t tics_datum;
953 	uint64_t tics_current;
954 	uint8_t idx_port, cnt_ports;
955 
956 	cnt_ports = rte_eth_dev_count();
957 	tics_datum = rte_rdtsc();
958 	tics_per_1sec = rte_get_timer_hz();
959 #endif
960 	fsm = &fwd_streams[fc->stream_idx];
961 	nb_fs = fc->stream_nb;
962 	do {
963 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
964 			(*pkt_fwd)(fsm[sm_id]);
965 #ifdef RTE_LIBRTE_BITRATE
966 		tics_current = rte_rdtsc();
967 		if (tics_current - tics_datum >= tics_per_1sec) {
968 			/* Periodic bitrate calculation */
969 			for (idx_port = 0; idx_port < cnt_ports; idx_port++)
970 				rte_stats_bitrate_calc(bitrate_data, idx_port);
971 			tics_datum = tics_current;
972 		}
973 #endif
974 #ifdef RTE_LIBRTE_LATENCY_STATS
975 		if (latencystats_lcore_id == rte_lcore_id())
976 			rte_latencystats_update();
977 #endif
978 
979 	} while (! fc->stopped);
980 }
981 
982 static int
983 start_pkt_forward_on_core(void *fwd_arg)
984 {
985 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
986 			     cur_fwd_config.fwd_eng->packet_fwd);
987 	return 0;
988 }
989 
990 /*
991  * Run the TXONLY packet forwarding engine to send a single burst of packets.
992  * Used to start communication flows in network loopback test configurations.
993  */
994 static int
995 run_one_txonly_burst_on_core(void *fwd_arg)
996 {
997 	struct fwd_lcore *fwd_lc;
998 	struct fwd_lcore tmp_lcore;
999 
1000 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1001 	tmp_lcore = *fwd_lc;
1002 	tmp_lcore.stopped = 1;
1003 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1004 	return 0;
1005 }
1006 
1007 /*
1008  * Launch packet forwarding:
1009  *     - Setup per-port forwarding context.
1010  *     - launch logical cores with their forwarding configuration.
1011  */
1012 static void
1013 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1014 {
1015 	port_fwd_begin_t port_fwd_begin;
1016 	unsigned int i;
1017 	unsigned int lc_id;
1018 	int diag;
1019 
1020 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1021 	if (port_fwd_begin != NULL) {
1022 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1023 			(*port_fwd_begin)(fwd_ports_ids[i]);
1024 	}
1025 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1026 		lc_id = fwd_lcores_cpuids[i];
1027 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1028 			fwd_lcores[i]->stopped = 0;
1029 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1030 						     fwd_lcores[i], lc_id);
1031 			if (diag != 0)
1032 				printf("launch lcore %u failed - diag=%d\n",
1033 				       lc_id, diag);
1034 		}
1035 	}
1036 }
1037 
1038 /*
1039  * Launch packet forwarding configuration.
1040  */
1041 void
1042 start_packet_forwarding(int with_tx_first)
1043 {
1044 	port_fwd_begin_t port_fwd_begin;
1045 	port_fwd_end_t  port_fwd_end;
1046 	struct rte_port *port;
1047 	unsigned int i;
1048 	portid_t   pt_id;
1049 	streamid_t sm_id;
1050 
1051 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1052 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1053 
1054 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1055 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1056 
1057 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1058 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1059 		(!nb_rxq || !nb_txq))
1060 		rte_exit(EXIT_FAILURE,
1061 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1062 			cur_fwd_eng->fwd_mode_name);
1063 
1064 	if (all_ports_started() == 0) {
1065 		printf("Not all ports were started\n");
1066 		return;
1067 	}
1068 	if (test_done == 0) {
1069 		printf("Packet forwarding already started\n");
1070 		return;
1071 	}
1072 
1073 	if (init_fwd_streams() < 0) {
1074 		printf("Fail from init_fwd_streams()\n");
1075 		return;
1076 	}
1077 
1078 	if(dcb_test) {
1079 		for (i = 0; i < nb_fwd_ports; i++) {
1080 			pt_id = fwd_ports_ids[i];
1081 			port = &ports[pt_id];
1082 			if (!port->dcb_flag) {
1083 				printf("In DCB mode, all forwarding ports must "
1084                                        "be configured in this mode.\n");
1085 				return;
1086 			}
1087 		}
1088 		if (nb_fwd_lcores == 1) {
1089 			printf("In DCB mode,the nb forwarding cores "
1090                                "should be larger than 1.\n");
1091 			return;
1092 		}
1093 	}
1094 	test_done = 0;
1095 
1096 	if(!no_flush_rx)
1097 		flush_fwd_rx_queues();
1098 
1099 	fwd_config_setup();
1100 	pkt_fwd_config_display(&cur_fwd_config);
1101 	rxtx_config_display();
1102 
1103 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1104 		pt_id = fwd_ports_ids[i];
1105 		port = &ports[pt_id];
1106 		rte_eth_stats_get(pt_id, &port->stats);
1107 		port->tx_dropped = 0;
1108 
1109 		map_port_queue_stats_mapping_registers(pt_id, port);
1110 	}
1111 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1112 		fwd_streams[sm_id]->rx_packets = 0;
1113 		fwd_streams[sm_id]->tx_packets = 0;
1114 		fwd_streams[sm_id]->fwd_dropped = 0;
1115 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1116 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1117 
1118 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1119 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1120 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1121 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1122 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1123 #endif
1124 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1125 		fwd_streams[sm_id]->core_cycles = 0;
1126 #endif
1127 	}
1128 	if (with_tx_first) {
1129 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1130 		if (port_fwd_begin != NULL) {
1131 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1132 				(*port_fwd_begin)(fwd_ports_ids[i]);
1133 		}
1134 		while (with_tx_first--) {
1135 			launch_packet_forwarding(
1136 					run_one_txonly_burst_on_core);
1137 			rte_eal_mp_wait_lcore();
1138 		}
1139 		port_fwd_end = tx_only_engine.port_fwd_end;
1140 		if (port_fwd_end != NULL) {
1141 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1142 				(*port_fwd_end)(fwd_ports_ids[i]);
1143 		}
1144 	}
1145 	launch_packet_forwarding(start_pkt_forward_on_core);
1146 }
1147 
1148 void
1149 stop_packet_forwarding(void)
1150 {
1151 	struct rte_eth_stats stats;
1152 	struct rte_port *port;
1153 	port_fwd_end_t  port_fwd_end;
1154 	int i;
1155 	portid_t   pt_id;
1156 	streamid_t sm_id;
1157 	lcoreid_t  lc_id;
1158 	uint64_t total_recv;
1159 	uint64_t total_xmit;
1160 	uint64_t total_rx_dropped;
1161 	uint64_t total_tx_dropped;
1162 	uint64_t total_rx_nombuf;
1163 	uint64_t tx_dropped;
1164 	uint64_t rx_bad_ip_csum;
1165 	uint64_t rx_bad_l4_csum;
1166 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1167 	uint64_t fwd_cycles;
1168 #endif
1169 	static const char *acc_stats_border = "+++++++++++++++";
1170 
1171 	if (test_done) {
1172 		printf("Packet forwarding not started\n");
1173 		return;
1174 	}
1175 	printf("Telling cores to stop...");
1176 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1177 		fwd_lcores[lc_id]->stopped = 1;
1178 	printf("\nWaiting for lcores to finish...\n");
1179 	rte_eal_mp_wait_lcore();
1180 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1181 	if (port_fwd_end != NULL) {
1182 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1183 			pt_id = fwd_ports_ids[i];
1184 			(*port_fwd_end)(pt_id);
1185 		}
1186 	}
1187 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1188 	fwd_cycles = 0;
1189 #endif
1190 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1191 		if (cur_fwd_config.nb_fwd_streams >
1192 		    cur_fwd_config.nb_fwd_ports) {
1193 			fwd_stream_stats_display(sm_id);
1194 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1195 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1196 		} else {
1197 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1198 				fwd_streams[sm_id];
1199 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1200 				fwd_streams[sm_id];
1201 		}
1202 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1203 		tx_dropped = (uint64_t) (tx_dropped +
1204 					 fwd_streams[sm_id]->fwd_dropped);
1205 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1206 
1207 		rx_bad_ip_csum =
1208 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1209 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1210 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1211 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1212 							rx_bad_ip_csum;
1213 
1214 		rx_bad_l4_csum =
1215 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1216 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1217 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1218 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1219 							rx_bad_l4_csum;
1220 
1221 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1222 		fwd_cycles = (uint64_t) (fwd_cycles +
1223 					 fwd_streams[sm_id]->core_cycles);
1224 #endif
1225 	}
1226 	total_recv = 0;
1227 	total_xmit = 0;
1228 	total_rx_dropped = 0;
1229 	total_tx_dropped = 0;
1230 	total_rx_nombuf  = 0;
1231 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1232 		pt_id = fwd_ports_ids[i];
1233 
1234 		port = &ports[pt_id];
1235 		rte_eth_stats_get(pt_id, &stats);
1236 		stats.ipackets -= port->stats.ipackets;
1237 		port->stats.ipackets = 0;
1238 		stats.opackets -= port->stats.opackets;
1239 		port->stats.opackets = 0;
1240 		stats.ibytes   -= port->stats.ibytes;
1241 		port->stats.ibytes = 0;
1242 		stats.obytes   -= port->stats.obytes;
1243 		port->stats.obytes = 0;
1244 		stats.imissed  -= port->stats.imissed;
1245 		port->stats.imissed = 0;
1246 		stats.oerrors  -= port->stats.oerrors;
1247 		port->stats.oerrors = 0;
1248 		stats.rx_nombuf -= port->stats.rx_nombuf;
1249 		port->stats.rx_nombuf = 0;
1250 
1251 		total_recv += stats.ipackets;
1252 		total_xmit += stats.opackets;
1253 		total_rx_dropped += stats.imissed;
1254 		total_tx_dropped += port->tx_dropped;
1255 		total_rx_nombuf  += stats.rx_nombuf;
1256 
1257 		fwd_port_stats_display(pt_id, &stats);
1258 	}
1259 	printf("\n  %s Accumulated forward statistics for all ports"
1260 	       "%s\n",
1261 	       acc_stats_border, acc_stats_border);
1262 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1263 	       "%-"PRIu64"\n"
1264 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1265 	       "%-"PRIu64"\n",
1266 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1267 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1268 	if (total_rx_nombuf > 0)
1269 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1270 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1271 	       "%s\n",
1272 	       acc_stats_border, acc_stats_border);
1273 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1274 	if (total_recv > 0)
1275 		printf("\n  CPU cycles/packet=%u (total cycles="
1276 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1277 		       (unsigned int)(fwd_cycles / total_recv),
1278 		       fwd_cycles, total_recv);
1279 #endif
1280 	printf("\nDone.\n");
1281 	test_done = 1;
1282 }
1283 
1284 void
1285 dev_set_link_up(portid_t pid)
1286 {
1287 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1288 		printf("\nSet link up fail.\n");
1289 }
1290 
1291 void
1292 dev_set_link_down(portid_t pid)
1293 {
1294 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1295 		printf("\nSet link down fail.\n");
1296 }
1297 
1298 static int
1299 all_ports_started(void)
1300 {
1301 	portid_t pi;
1302 	struct rte_port *port;
1303 
1304 	RTE_ETH_FOREACH_DEV(pi) {
1305 		port = &ports[pi];
1306 		/* Check if there is a port which is not started */
1307 		if ((port->port_status != RTE_PORT_STARTED) &&
1308 			(port->slave_flag == 0))
1309 			return 0;
1310 	}
1311 
1312 	/* No port is not started */
1313 	return 1;
1314 }
1315 
1316 int
1317 all_ports_stopped(void)
1318 {
1319 	portid_t pi;
1320 	struct rte_port *port;
1321 
1322 	RTE_ETH_FOREACH_DEV(pi) {
1323 		port = &ports[pi];
1324 		if ((port->port_status != RTE_PORT_STOPPED) &&
1325 			(port->slave_flag == 0))
1326 			return 0;
1327 	}
1328 
1329 	return 1;
1330 }
1331 
1332 int
1333 port_is_started(portid_t port_id)
1334 {
1335 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1336 		return 0;
1337 
1338 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1339 		return 0;
1340 
1341 	return 1;
1342 }
1343 
1344 static int
1345 port_is_closed(portid_t port_id)
1346 {
1347 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1348 		return 0;
1349 
1350 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1351 		return 0;
1352 
1353 	return 1;
1354 }
1355 
1356 int
1357 start_port(portid_t pid)
1358 {
1359 	int diag, need_check_link_status = -1;
1360 	portid_t pi;
1361 	queueid_t qi;
1362 	struct rte_port *port;
1363 	struct ether_addr mac_addr;
1364 	enum rte_eth_event_type event_type;
1365 
1366 	if (port_id_is_invalid(pid, ENABLED_WARN))
1367 		return 0;
1368 
1369 	if(dcb_config)
1370 		dcb_test = 1;
1371 	RTE_ETH_FOREACH_DEV(pi) {
1372 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1373 			continue;
1374 
1375 		need_check_link_status = 0;
1376 		port = &ports[pi];
1377 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1378 						 RTE_PORT_HANDLING) == 0) {
1379 			printf("Port %d is now not stopped\n", pi);
1380 			continue;
1381 		}
1382 
1383 		if (port->need_reconfig > 0) {
1384 			port->need_reconfig = 0;
1385 
1386 			printf("Configuring Port %d (socket %u)\n", pi,
1387 					port->socket_id);
1388 			/* configure port */
1389 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1390 						&(port->dev_conf));
1391 			if (diag != 0) {
1392 				if (rte_atomic16_cmpset(&(port->port_status),
1393 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1394 					printf("Port %d can not be set back "
1395 							"to stopped\n", pi);
1396 				printf("Fail to configure port %d\n", pi);
1397 				/* try to reconfigure port next time */
1398 				port->need_reconfig = 1;
1399 				return -1;
1400 			}
1401 		}
1402 		if (port->need_reconfig_queues > 0) {
1403 			port->need_reconfig_queues = 0;
1404 			/* setup tx queues */
1405 			for (qi = 0; qi < nb_txq; qi++) {
1406 				if ((numa_support) &&
1407 					(txring_numa[pi] != NUMA_NO_CONFIG))
1408 					diag = rte_eth_tx_queue_setup(pi, qi,
1409 						nb_txd,txring_numa[pi],
1410 						&(port->tx_conf));
1411 				else
1412 					diag = rte_eth_tx_queue_setup(pi, qi,
1413 						nb_txd,port->socket_id,
1414 						&(port->tx_conf));
1415 
1416 				if (diag == 0)
1417 					continue;
1418 
1419 				/* Fail to setup tx queue, return */
1420 				if (rte_atomic16_cmpset(&(port->port_status),
1421 							RTE_PORT_HANDLING,
1422 							RTE_PORT_STOPPED) == 0)
1423 					printf("Port %d can not be set back "
1424 							"to stopped\n", pi);
1425 				printf("Fail to configure port %d tx queues\n", pi);
1426 				/* try to reconfigure queues next time */
1427 				port->need_reconfig_queues = 1;
1428 				return -1;
1429 			}
1430 			/* setup rx queues */
1431 			for (qi = 0; qi < nb_rxq; qi++) {
1432 				if ((numa_support) &&
1433 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1434 					struct rte_mempool * mp =
1435 						mbuf_pool_find(rxring_numa[pi]);
1436 					if (mp == NULL) {
1437 						printf("Failed to setup RX queue:"
1438 							"No mempool allocation"
1439 							" on the socket %d\n",
1440 							rxring_numa[pi]);
1441 						return -1;
1442 					}
1443 
1444 					diag = rte_eth_rx_queue_setup(pi, qi,
1445 					     nb_rxd,rxring_numa[pi],
1446 					     &(port->rx_conf),mp);
1447 				} else {
1448 					struct rte_mempool *mp =
1449 						mbuf_pool_find(port->socket_id);
1450 					if (mp == NULL) {
1451 						printf("Failed to setup RX queue:"
1452 							"No mempool allocation"
1453 							" on the socket %d\n",
1454 							port->socket_id);
1455 						return -1;
1456 					}
1457 					diag = rte_eth_rx_queue_setup(pi, qi,
1458 					     nb_rxd,port->socket_id,
1459 					     &(port->rx_conf), mp);
1460 				}
1461 				if (diag == 0)
1462 					continue;
1463 
1464 				/* Fail to setup rx queue, return */
1465 				if (rte_atomic16_cmpset(&(port->port_status),
1466 							RTE_PORT_HANDLING,
1467 							RTE_PORT_STOPPED) == 0)
1468 					printf("Port %d can not be set back "
1469 							"to stopped\n", pi);
1470 				printf("Fail to configure port %d rx queues\n", pi);
1471 				/* try to reconfigure queues next time */
1472 				port->need_reconfig_queues = 1;
1473 				return -1;
1474 			}
1475 		}
1476 
1477 		for (event_type = RTE_ETH_EVENT_UNKNOWN;
1478 		     event_type < RTE_ETH_EVENT_MAX;
1479 		     event_type++) {
1480 			diag = rte_eth_dev_callback_register(pi,
1481 							event_type,
1482 							eth_event_callback,
1483 							NULL);
1484 			if (diag) {
1485 				printf("Failed to setup even callback for event %d\n",
1486 					event_type);
1487 				return -1;
1488 			}
1489 		}
1490 
1491 		/* start port */
1492 		if (rte_eth_dev_start(pi) < 0) {
1493 			printf("Fail to start port %d\n", pi);
1494 
1495 			/* Fail to setup rx queue, return */
1496 			if (rte_atomic16_cmpset(&(port->port_status),
1497 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1498 				printf("Port %d can not be set back to "
1499 							"stopped\n", pi);
1500 			continue;
1501 		}
1502 
1503 		if (rte_atomic16_cmpset(&(port->port_status),
1504 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1505 			printf("Port %d can not be set into started\n", pi);
1506 
1507 		rte_eth_macaddr_get(pi, &mac_addr);
1508 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1509 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1510 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1511 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1512 
1513 		/* at least one port started, need checking link status */
1514 		need_check_link_status = 1;
1515 	}
1516 
1517 	if (need_check_link_status == 1 && !no_link_check)
1518 		check_all_ports_link_status(RTE_PORT_ALL);
1519 	else if (need_check_link_status == 0)
1520 		printf("Please stop the ports first\n");
1521 
1522 	printf("Done\n");
1523 	return 0;
1524 }
1525 
1526 void
1527 stop_port(portid_t pid)
1528 {
1529 	portid_t pi;
1530 	struct rte_port *port;
1531 	int need_check_link_status = 0;
1532 
1533 	if (dcb_test) {
1534 		dcb_test = 0;
1535 		dcb_config = 0;
1536 	}
1537 
1538 	if (port_id_is_invalid(pid, ENABLED_WARN))
1539 		return;
1540 
1541 	printf("Stopping ports...\n");
1542 
1543 	RTE_ETH_FOREACH_DEV(pi) {
1544 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1545 			continue;
1546 
1547 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1548 			printf("Please remove port %d from forwarding configuration.\n", pi);
1549 			continue;
1550 		}
1551 
1552 		if (port_is_bonding_slave(pi)) {
1553 			printf("Please remove port %d from bonded device.\n", pi);
1554 			continue;
1555 		}
1556 
1557 		port = &ports[pi];
1558 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1559 						RTE_PORT_HANDLING) == 0)
1560 			continue;
1561 
1562 		rte_eth_dev_stop(pi);
1563 
1564 		if (rte_atomic16_cmpset(&(port->port_status),
1565 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1566 			printf("Port %d can not be set into stopped\n", pi);
1567 		need_check_link_status = 1;
1568 	}
1569 	if (need_check_link_status && !no_link_check)
1570 		check_all_ports_link_status(RTE_PORT_ALL);
1571 
1572 	printf("Done\n");
1573 }
1574 
1575 void
1576 close_port(portid_t pid)
1577 {
1578 	portid_t pi;
1579 	struct rte_port *port;
1580 
1581 	if (port_id_is_invalid(pid, ENABLED_WARN))
1582 		return;
1583 
1584 	printf("Closing ports...\n");
1585 
1586 	RTE_ETH_FOREACH_DEV(pi) {
1587 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1588 			continue;
1589 
1590 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1591 			printf("Please remove port %d from forwarding configuration.\n", pi);
1592 			continue;
1593 		}
1594 
1595 		if (port_is_bonding_slave(pi)) {
1596 			printf("Please remove port %d from bonded device.\n", pi);
1597 			continue;
1598 		}
1599 
1600 		port = &ports[pi];
1601 		if (rte_atomic16_cmpset(&(port->port_status),
1602 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1603 			printf("Port %d is already closed\n", pi);
1604 			continue;
1605 		}
1606 
1607 		if (rte_atomic16_cmpset(&(port->port_status),
1608 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1609 			printf("Port %d is now not stopped\n", pi);
1610 			continue;
1611 		}
1612 
1613 		if (port->flow_list)
1614 			port_flow_flush(pi);
1615 		rte_eth_dev_close(pi);
1616 
1617 		if (rte_atomic16_cmpset(&(port->port_status),
1618 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1619 			printf("Port %d cannot be set to closed\n", pi);
1620 	}
1621 
1622 	printf("Done\n");
1623 }
1624 
1625 void
1626 attach_port(char *identifier)
1627 {
1628 	portid_t pi = 0;
1629 	unsigned int socket_id;
1630 
1631 	printf("Attaching a new port...\n");
1632 
1633 	if (identifier == NULL) {
1634 		printf("Invalid parameters are specified\n");
1635 		return;
1636 	}
1637 
1638 	if (rte_eth_dev_attach(identifier, &pi))
1639 		return;
1640 
1641 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1642 	/* if socket_id is invalid, set to 0 */
1643 	if (check_socket_id(socket_id) < 0)
1644 		socket_id = 0;
1645 	reconfig(pi, socket_id);
1646 	rte_eth_promiscuous_enable(pi);
1647 
1648 	nb_ports = rte_eth_dev_count();
1649 
1650 	ports[pi].port_status = RTE_PORT_STOPPED;
1651 
1652 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1653 	printf("Done\n");
1654 }
1655 
1656 void
1657 detach_port(uint8_t port_id)
1658 {
1659 	char name[RTE_ETH_NAME_MAX_LEN];
1660 
1661 	printf("Detaching a port...\n");
1662 
1663 	if (!port_is_closed(port_id)) {
1664 		printf("Please close port first\n");
1665 		return;
1666 	}
1667 
1668 	if (ports[port_id].flow_list)
1669 		port_flow_flush(port_id);
1670 
1671 	if (rte_eth_dev_detach(port_id, name))
1672 		return;
1673 
1674 	nb_ports = rte_eth_dev_count();
1675 
1676 	printf("Port '%s' is detached. Now total ports is %d\n",
1677 			name, nb_ports);
1678 	printf("Done\n");
1679 	return;
1680 }
1681 
1682 void
1683 pmd_test_exit(void)
1684 {
1685 	portid_t pt_id;
1686 
1687 	if (test_done == 0)
1688 		stop_packet_forwarding();
1689 
1690 	if (ports != NULL) {
1691 		no_link_check = 1;
1692 		RTE_ETH_FOREACH_DEV(pt_id) {
1693 			printf("\nShutting down port %d...\n", pt_id);
1694 			fflush(stdout);
1695 			stop_port(pt_id);
1696 			close_port(pt_id);
1697 		}
1698 	}
1699 	printf("\nBye...\n");
1700 }
1701 
1702 typedef void (*cmd_func_t)(void);
1703 struct pmd_test_command {
1704 	const char *cmd_name;
1705 	cmd_func_t cmd_func;
1706 };
1707 
1708 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1709 
1710 /* Check the link status of all ports in up to 9s, and print them finally */
1711 static void
1712 check_all_ports_link_status(uint32_t port_mask)
1713 {
1714 #define CHECK_INTERVAL 100 /* 100ms */
1715 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1716 	uint8_t portid, count, all_ports_up, print_flag = 0;
1717 	struct rte_eth_link link;
1718 
1719 	printf("Checking link statuses...\n");
1720 	fflush(stdout);
1721 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1722 		all_ports_up = 1;
1723 		RTE_ETH_FOREACH_DEV(portid) {
1724 			if ((port_mask & (1 << portid)) == 0)
1725 				continue;
1726 			memset(&link, 0, sizeof(link));
1727 			rte_eth_link_get_nowait(portid, &link);
1728 			/* print link status if flag set */
1729 			if (print_flag == 1) {
1730 				if (link.link_status)
1731 					printf("Port %d Link Up - speed %u "
1732 						"Mbps - %s\n", (uint8_t)portid,
1733 						(unsigned)link.link_speed,
1734 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1735 					("full-duplex") : ("half-duplex\n"));
1736 				else
1737 					printf("Port %d Link Down\n",
1738 						(uint8_t)portid);
1739 				continue;
1740 			}
1741 			/* clear all_ports_up flag if any link down */
1742 			if (link.link_status == ETH_LINK_DOWN) {
1743 				all_ports_up = 0;
1744 				break;
1745 			}
1746 		}
1747 		/* after finally printing all link status, get out */
1748 		if (print_flag == 1)
1749 			break;
1750 
1751 		if (all_ports_up == 0) {
1752 			fflush(stdout);
1753 			rte_delay_ms(CHECK_INTERVAL);
1754 		}
1755 
1756 		/* set the print_flag if all ports up or timeout */
1757 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1758 			print_flag = 1;
1759 		}
1760 
1761 		if (lsc_interrupt)
1762 			break;
1763 	}
1764 }
1765 
1766 static void
1767 rmv_event_callback(void *arg)
1768 {
1769 	struct rte_eth_dev *dev;
1770 	struct rte_devargs *da;
1771 	char name[32] = "";
1772 	uint8_t port_id = (intptr_t)arg;
1773 
1774 	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1775 	dev = &rte_eth_devices[port_id];
1776 	da = dev->device->devargs;
1777 
1778 	stop_port(port_id);
1779 	close_port(port_id);
1780 	if (da->type == RTE_DEVTYPE_VIRTUAL)
1781 		snprintf(name, sizeof(name), "%s", da->virt.drv_name);
1782 	else if (da->type == RTE_DEVTYPE_WHITELISTED_PCI)
1783 		rte_eal_pci_device_name(&da->pci.addr, name, sizeof(name));
1784 	printf("removing device %s\n", name);
1785 	rte_eal_dev_detach(name);
1786 	dev->state = RTE_ETH_DEV_UNUSED;
1787 }
1788 
1789 /* This function is used by the interrupt thread */
1790 static void
1791 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
1792 {
1793 	static const char * const event_desc[] = {
1794 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1795 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
1796 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1797 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1798 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1799 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
1800 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
1801 		[RTE_ETH_EVENT_MAX] = NULL,
1802 	};
1803 
1804 	RTE_SET_USED(param);
1805 
1806 	if (type >= RTE_ETH_EVENT_MAX) {
1807 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1808 			port_id, __func__, type);
1809 		fflush(stderr);
1810 	} else {
1811 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
1812 			event_desc[type]);
1813 		fflush(stdout);
1814 	}
1815 
1816 	switch (type) {
1817 	case RTE_ETH_EVENT_INTR_RMV:
1818 		if (rte_eal_alarm_set(100000,
1819 				rmv_event_callback, (void *)(intptr_t)port_id))
1820 			fprintf(stderr, "Could not set up deferred device removal\n");
1821 		break;
1822 	default:
1823 		break;
1824 	}
1825 }
1826 
1827 static int
1828 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1829 {
1830 	uint16_t i;
1831 	int diag;
1832 	uint8_t mapping_found = 0;
1833 
1834 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1835 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1836 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1837 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1838 					tx_queue_stats_mappings[i].queue_id,
1839 					tx_queue_stats_mappings[i].stats_counter_id);
1840 			if (diag != 0)
1841 				return diag;
1842 			mapping_found = 1;
1843 		}
1844 	}
1845 	if (mapping_found)
1846 		port->tx_queue_stats_mapping_enabled = 1;
1847 	return 0;
1848 }
1849 
1850 static int
1851 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1852 {
1853 	uint16_t i;
1854 	int diag;
1855 	uint8_t mapping_found = 0;
1856 
1857 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1858 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1859 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1860 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1861 					rx_queue_stats_mappings[i].queue_id,
1862 					rx_queue_stats_mappings[i].stats_counter_id);
1863 			if (diag != 0)
1864 				return diag;
1865 			mapping_found = 1;
1866 		}
1867 	}
1868 	if (mapping_found)
1869 		port->rx_queue_stats_mapping_enabled = 1;
1870 	return 0;
1871 }
1872 
1873 static void
1874 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1875 {
1876 	int diag = 0;
1877 
1878 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1879 	if (diag != 0) {
1880 		if (diag == -ENOTSUP) {
1881 			port->tx_queue_stats_mapping_enabled = 0;
1882 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1883 		}
1884 		else
1885 			rte_exit(EXIT_FAILURE,
1886 					"set_tx_queue_stats_mapping_registers "
1887 					"failed for port id=%d diag=%d\n",
1888 					pi, diag);
1889 	}
1890 
1891 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1892 	if (diag != 0) {
1893 		if (diag == -ENOTSUP) {
1894 			port->rx_queue_stats_mapping_enabled = 0;
1895 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1896 		}
1897 		else
1898 			rte_exit(EXIT_FAILURE,
1899 					"set_rx_queue_stats_mapping_registers "
1900 					"failed for port id=%d diag=%d\n",
1901 					pi, diag);
1902 	}
1903 }
1904 
1905 static void
1906 rxtx_port_config(struct rte_port *port)
1907 {
1908 	port->rx_conf = port->dev_info.default_rxconf;
1909 	port->tx_conf = port->dev_info.default_txconf;
1910 
1911 	/* Check if any RX/TX parameters have been passed */
1912 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1913 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1914 
1915 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1916 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1917 
1918 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1919 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1920 
1921 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1922 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1923 
1924 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1925 		port->rx_conf.rx_drop_en = rx_drop_en;
1926 
1927 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1928 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1929 
1930 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1931 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1932 
1933 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1934 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1935 
1936 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1937 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1938 
1939 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1940 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1941 
1942 	if (txq_flags != RTE_PMD_PARAM_UNSET)
1943 		port->tx_conf.txq_flags = txq_flags;
1944 }
1945 
1946 void
1947 init_port_config(void)
1948 {
1949 	portid_t pid;
1950 	struct rte_port *port;
1951 
1952 	RTE_ETH_FOREACH_DEV(pid) {
1953 		port = &ports[pid];
1954 		port->dev_conf.rxmode = rx_mode;
1955 		port->dev_conf.fdir_conf = fdir_conf;
1956 		if (nb_rxq > 1) {
1957 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1958 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1959 		} else {
1960 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1961 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1962 		}
1963 
1964 		if (port->dcb_flag == 0) {
1965 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1966 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1967 			else
1968 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1969 		}
1970 
1971 		rxtx_port_config(port);
1972 
1973 		rte_eth_macaddr_get(pid, &port->eth_addr);
1974 
1975 		map_port_queue_stats_mapping_registers(pid, port);
1976 #ifdef RTE_NIC_BYPASS
1977 		rte_eth_dev_bypass_init(pid);
1978 #endif
1979 
1980 		if (lsc_interrupt &&
1981 		    (rte_eth_devices[pid].data->dev_flags &
1982 		     RTE_ETH_DEV_INTR_LSC))
1983 			port->dev_conf.intr_conf.lsc = 1;
1984 		if (rmv_interrupt &&
1985 		    (rte_eth_devices[pid].data->dev_flags &
1986 		     RTE_ETH_DEV_INTR_RMV))
1987 			port->dev_conf.intr_conf.rmv = 1;
1988 	}
1989 }
1990 
1991 void set_port_slave_flag(portid_t slave_pid)
1992 {
1993 	struct rte_port *port;
1994 
1995 	port = &ports[slave_pid];
1996 	port->slave_flag = 1;
1997 }
1998 
1999 void clear_port_slave_flag(portid_t slave_pid)
2000 {
2001 	struct rte_port *port;
2002 
2003 	port = &ports[slave_pid];
2004 	port->slave_flag = 0;
2005 }
2006 
2007 uint8_t port_is_bonding_slave(portid_t slave_pid)
2008 {
2009 	struct rte_port *port;
2010 
2011 	port = &ports[slave_pid];
2012 	return port->slave_flag;
2013 }
2014 
2015 const uint16_t vlan_tags[] = {
2016 		0,  1,  2,  3,  4,  5,  6,  7,
2017 		8,  9, 10, 11,  12, 13, 14, 15,
2018 		16, 17, 18, 19, 20, 21, 22, 23,
2019 		24, 25, 26, 27, 28, 29, 30, 31
2020 };
2021 
2022 static  int
2023 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2024 		 enum dcb_mode_enable dcb_mode,
2025 		 enum rte_eth_nb_tcs num_tcs,
2026 		 uint8_t pfc_en)
2027 {
2028 	uint8_t i;
2029 
2030 	/*
2031 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2032 	 * given above, and the number of traffic classes available for use.
2033 	 */
2034 	if (dcb_mode == DCB_VT_ENABLED) {
2035 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2036 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
2037 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2038 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2039 
2040 		/* VMDQ+DCB RX and TX configurations */
2041 		vmdq_rx_conf->enable_default_pool = 0;
2042 		vmdq_rx_conf->default_pool = 0;
2043 		vmdq_rx_conf->nb_queue_pools =
2044 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2045 		vmdq_tx_conf->nb_queue_pools =
2046 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2047 
2048 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2049 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2050 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2051 			vmdq_rx_conf->pool_map[i].pools =
2052 				1 << (i % vmdq_rx_conf->nb_queue_pools);
2053 		}
2054 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2055 			vmdq_rx_conf->dcb_tc[i] = i;
2056 			vmdq_tx_conf->dcb_tc[i] = i;
2057 		}
2058 
2059 		/* set DCB mode of RX and TX of multiple queues */
2060 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2061 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2062 	} else {
2063 		struct rte_eth_dcb_rx_conf *rx_conf =
2064 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2065 		struct rte_eth_dcb_tx_conf *tx_conf =
2066 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2067 
2068 		rx_conf->nb_tcs = num_tcs;
2069 		tx_conf->nb_tcs = num_tcs;
2070 
2071 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2072 			rx_conf->dcb_tc[i] = i % num_tcs;
2073 			tx_conf->dcb_tc[i] = i % num_tcs;
2074 		}
2075 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2076 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2077 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2078 	}
2079 
2080 	if (pfc_en)
2081 		eth_conf->dcb_capability_en =
2082 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2083 	else
2084 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2085 
2086 	return 0;
2087 }
2088 
2089 int
2090 init_port_dcb_config(portid_t pid,
2091 		     enum dcb_mode_enable dcb_mode,
2092 		     enum rte_eth_nb_tcs num_tcs,
2093 		     uint8_t pfc_en)
2094 {
2095 	struct rte_eth_conf port_conf;
2096 	struct rte_port *rte_port;
2097 	int retval;
2098 	uint16_t i;
2099 
2100 	rte_port = &ports[pid];
2101 
2102 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2103 	/* Enter DCB configuration status */
2104 	dcb_config = 1;
2105 
2106 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2107 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2108 	if (retval < 0)
2109 		return retval;
2110 	port_conf.rxmode.hw_vlan_filter = 1;
2111 
2112 	/**
2113 	 * Write the configuration into the device.
2114 	 * Set the numbers of RX & TX queues to 0, so
2115 	 * the RX & TX queues will not be setup.
2116 	 */
2117 	(void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2118 
2119 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2120 
2121 	/* If dev_info.vmdq_pool_base is greater than 0,
2122 	 * the queue id of vmdq pools is started after pf queues.
2123 	 */
2124 	if (dcb_mode == DCB_VT_ENABLED &&
2125 	    rte_port->dev_info.vmdq_pool_base > 0) {
2126 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2127 			" for port %d.", pid);
2128 		return -1;
2129 	}
2130 
2131 	/* Assume the ports in testpmd have the same dcb capability
2132 	 * and has the same number of rxq and txq in dcb mode
2133 	 */
2134 	if (dcb_mode == DCB_VT_ENABLED) {
2135 		if (rte_port->dev_info.max_vfs > 0) {
2136 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2137 			nb_txq = rte_port->dev_info.nb_tx_queues;
2138 		} else {
2139 			nb_rxq = rte_port->dev_info.max_rx_queues;
2140 			nb_txq = rte_port->dev_info.max_tx_queues;
2141 		}
2142 	} else {
2143 		/*if vt is disabled, use all pf queues */
2144 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2145 			nb_rxq = rte_port->dev_info.max_rx_queues;
2146 			nb_txq = rte_port->dev_info.max_tx_queues;
2147 		} else {
2148 			nb_rxq = (queueid_t)num_tcs;
2149 			nb_txq = (queueid_t)num_tcs;
2150 
2151 		}
2152 	}
2153 	rx_free_thresh = 64;
2154 
2155 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2156 
2157 	rxtx_port_config(rte_port);
2158 	/* VLAN filter */
2159 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2160 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2161 		rx_vft_set(pid, vlan_tags[i], 1);
2162 
2163 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2164 	map_port_queue_stats_mapping_registers(pid, rte_port);
2165 
2166 	rte_port->dcb_flag = 1;
2167 
2168 	return 0;
2169 }
2170 
2171 static void
2172 init_port(void)
2173 {
2174 	/* Configuration of Ethernet ports. */
2175 	ports = rte_zmalloc("testpmd: ports",
2176 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2177 			    RTE_CACHE_LINE_SIZE);
2178 	if (ports == NULL) {
2179 		rte_exit(EXIT_FAILURE,
2180 				"rte_zmalloc(%d struct rte_port) failed\n",
2181 				RTE_MAX_ETHPORTS);
2182 	}
2183 }
2184 
2185 static void
2186 force_quit(void)
2187 {
2188 	pmd_test_exit();
2189 	prompt_exit();
2190 }
2191 
2192 static void
2193 signal_handler(int signum)
2194 {
2195 	if (signum == SIGINT || signum == SIGTERM) {
2196 		printf("\nSignal %d received, preparing to exit...\n",
2197 				signum);
2198 #ifdef RTE_LIBRTE_PDUMP
2199 		/* uninitialize packet capture framework */
2200 		rte_pdump_uninit();
2201 #endif
2202 #ifdef RTE_LIBRTE_LATENCY_STATS
2203 		rte_latencystats_uninit();
2204 #endif
2205 		force_quit();
2206 		/* exit with the expected status */
2207 		signal(signum, SIG_DFL);
2208 		kill(getpid(), signum);
2209 	}
2210 }
2211 
2212 int
2213 main(int argc, char** argv)
2214 {
2215 	int  diag;
2216 	uint8_t port_id;
2217 
2218 	signal(SIGINT, signal_handler);
2219 	signal(SIGTERM, signal_handler);
2220 
2221 	diag = rte_eal_init(argc, argv);
2222 	if (diag < 0)
2223 		rte_panic("Cannot init EAL\n");
2224 
2225 #ifdef RTE_LIBRTE_PDUMP
2226 	/* initialize packet capture framework */
2227 	rte_pdump_init(NULL);
2228 #endif
2229 
2230 	nb_ports = (portid_t) rte_eth_dev_count();
2231 	if (nb_ports == 0)
2232 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2233 
2234 	/* allocate port structures, and init them */
2235 	init_port();
2236 
2237 	set_def_fwd_config();
2238 	if (nb_lcores == 0)
2239 		rte_panic("Empty set of forwarding logical cores - check the "
2240 			  "core mask supplied in the command parameters\n");
2241 
2242 	argc -= diag;
2243 	argv += diag;
2244 	if (argc > 1)
2245 		launch_args_parse(argc, argv);
2246 
2247 	if (!nb_rxq && !nb_txq)
2248 		printf("Warning: Either rx or tx queues should be non-zero\n");
2249 
2250 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2251 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2252 		       "but nb_txq=%d will prevent to fully test it.\n",
2253 		       nb_rxq, nb_txq);
2254 
2255 	init_config();
2256 	if (start_port(RTE_PORT_ALL) != 0)
2257 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2258 
2259 	/* set all ports to promiscuous mode by default */
2260 	RTE_ETH_FOREACH_DEV(port_id)
2261 		rte_eth_promiscuous_enable(port_id);
2262 
2263 	/* Init metrics library */
2264 	rte_metrics_init(rte_socket_id());
2265 
2266 #ifdef RTE_LIBRTE_LATENCY_STATS
2267 	if (latencystats_enabled != 0) {
2268 		int ret = rte_latencystats_init(1, NULL);
2269 		if (ret)
2270 			printf("Warning: latencystats init()"
2271 				" returned error %d\n",	ret);
2272 		printf("Latencystats running on lcore %d\n",
2273 			latencystats_lcore_id);
2274 	}
2275 #endif
2276 
2277 	/* Setup bitrate stats */
2278 #ifdef RTE_LIBRTE_BITRATE
2279 	bitrate_data = rte_stats_bitrate_create();
2280 	if (bitrate_data == NULL)
2281 		rte_exit(EXIT_FAILURE, "Could not allocate bitrate data.\n");
2282 	rte_stats_bitrate_reg(bitrate_data);
2283 #endif
2284 
2285 
2286 #ifdef RTE_LIBRTE_CMDLINE
2287 	if (interactive == 1) {
2288 		if (auto_start) {
2289 			printf("Start automatic packet forwarding\n");
2290 			start_packet_forwarding(0);
2291 		}
2292 		prompt();
2293 	} else
2294 #endif
2295 	{
2296 		char c;
2297 		int rc;
2298 
2299 		printf("No commandline core given, start packet forwarding\n");
2300 		start_packet_forwarding(0);
2301 		printf("Press enter to exit\n");
2302 		rc = read(0, &c, 1);
2303 		pmd_test_exit();
2304 		if (rc < 0)
2305 			return 1;
2306 	}
2307 
2308 	return 0;
2309 }
2310