xref: /dpdk/app/test-pmd/testpmd.c (revision 76ad4a2d82d4d72c3a7ed4675d77268b5fae3cc9)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
68 #include <rte_mbuf.h>
69 #include <rte_interrupts.h>
70 #include <rte_pci.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
73 #include <rte_dev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78 #ifdef RTE_LIBRTE_PDUMP
79 #include <rte_pdump.h>
80 #endif
81 #include <rte_flow.h>
82 #include <rte_metrics.h>
83 #ifdef RTE_LIBRTE_BITRATE
84 #include <rte_bitrate.h>
85 #endif
86 #include <rte_metrics.h>
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
89 #endif
90 
91 #include "testpmd.h"
92 
93 uint16_t verbose_level = 0; /**< Silent by default. */
94 
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
98 
99 /*
100  * NUMA support configuration.
101  * When set, the NUMA support attempts to dispatch the allocation of the
102  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
103  * probed ports among the CPU sockets 0 and 1.
104  * Otherwise, all memory is allocated from CPU socket 0.
105  */
106 uint8_t numa_support = 0; /**< No numa support by default */
107 
108 /*
109  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
110  * not configured.
111  */
112 uint8_t socket_num = UMA_NO_CONFIG;
113 
114 /*
115  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
116  */
117 uint8_t mp_anon = 0;
118 
119 /*
120  * Record the Ethernet address of peer target ports to which packets are
121  * forwarded.
122  * Must be instantiated with the ethernet addresses of peer traffic generator
123  * ports.
124  */
125 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
126 portid_t nb_peer_eth_addrs = 0;
127 
128 /*
129  * Probed Target Environment.
130  */
131 struct rte_port *ports;	       /**< For all probed ethernet ports. */
132 portid_t nb_ports;             /**< Number of probed ethernet ports. */
133 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
134 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
135 
136 /*
137  * Test Forwarding Configuration.
138  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
139  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
140  */
141 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
142 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
143 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
144 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
145 
146 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
147 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
148 
149 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
150 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
151 
152 /*
153  * Forwarding engines.
154  */
155 struct fwd_engine * fwd_engines[] = {
156 	&io_fwd_engine,
157 	&mac_fwd_engine,
158 	&mac_swap_engine,
159 	&flow_gen_engine,
160 	&rx_only_engine,
161 	&tx_only_engine,
162 	&csum_fwd_engine,
163 	&icmp_echo_engine,
164 #ifdef RTE_LIBRTE_IEEE1588
165 	&ieee1588_fwd_engine,
166 #endif
167 	NULL,
168 };
169 
170 struct fwd_config cur_fwd_config;
171 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
172 uint32_t retry_enabled;
173 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
174 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
175 
176 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
177 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
178                                       * specified on command-line. */
179 
180 /*
181  * Configuration of packet segments used by the "txonly" processing engine.
182  */
183 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
184 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
185 	TXONLY_DEF_PACKET_LEN,
186 };
187 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
188 
189 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
190 /**< Split policy for packets to TX. */
191 
192 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
193 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
194 
195 /* current configuration is in DCB or not,0 means it is not in DCB mode */
196 uint8_t dcb_config = 0;
197 
198 /* Whether the dcb is in testing status */
199 uint8_t dcb_test = 0;
200 
201 /*
202  * Configurable number of RX/TX queues.
203  */
204 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
205 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
206 
207 /*
208  * Configurable number of RX/TX ring descriptors.
209  */
210 #define RTE_TEST_RX_DESC_DEFAULT 128
211 #define RTE_TEST_TX_DESC_DEFAULT 512
212 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
213 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
214 
215 #define RTE_PMD_PARAM_UNSET -1
216 /*
217  * Configurable values of RX and TX ring threshold registers.
218  */
219 
220 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
221 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
222 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
223 
224 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
225 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
226 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
227 
228 /*
229  * Configurable value of RX free threshold.
230  */
231 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
232 
233 /*
234  * Configurable value of RX drop enable.
235  */
236 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
237 
238 /*
239  * Configurable value of TX free threshold.
240  */
241 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
242 
243 /*
244  * Configurable value of TX RS bit threshold.
245  */
246 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
247 
248 /*
249  * Configurable value of TX queue flags.
250  */
251 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
252 
253 /*
254  * Receive Side Scaling (RSS) configuration.
255  */
256 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
257 
258 /*
259  * Port topology configuration
260  */
261 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
262 
263 /*
264  * Avoids to flush all the RX streams before starts forwarding.
265  */
266 uint8_t no_flush_rx = 0; /* flush by default */
267 
268 /*
269  * Avoids to check link status when starting/stopping a port.
270  */
271 uint8_t no_link_check = 0; /* check by default */
272 
273 /*
274  * NIC bypass mode configuration options.
275  */
276 #ifdef RTE_NIC_BYPASS
277 
278 /* The NIC bypass watchdog timeout. */
279 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
280 
281 #endif
282 
283 #ifdef RTE_LIBRTE_LATENCY_STATS
284 
285 /*
286  * Set when latency stats is enabled in the commandline
287  */
288 uint8_t latencystats_enabled;
289 
290 /*
291  * Lcore ID to serive latency statistics.
292  */
293 lcoreid_t latencystats_lcore_id = -1;
294 
295 #endif
296 
297 /*
298  * Ethernet device configuration.
299  */
300 struct rte_eth_rxmode rx_mode = {
301 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
302 	.split_hdr_size = 0,
303 	.header_split   = 0, /**< Header Split disabled. */
304 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
305 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
306 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
307 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
308 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
309 	.hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
310 };
311 
312 struct rte_fdir_conf fdir_conf = {
313 	.mode = RTE_FDIR_MODE_NONE,
314 	.pballoc = RTE_FDIR_PBALLOC_64K,
315 	.status = RTE_FDIR_REPORT_STATUS,
316 	.mask = {
317 		.vlan_tci_mask = 0x0,
318 		.ipv4_mask     = {
319 			.src_ip = 0xFFFFFFFF,
320 			.dst_ip = 0xFFFFFFFF,
321 		},
322 		.ipv6_mask     = {
323 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
324 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
325 		},
326 		.src_port_mask = 0xFFFF,
327 		.dst_port_mask = 0xFFFF,
328 		.mac_addr_byte_mask = 0xFF,
329 		.tunnel_type_mask = 1,
330 		.tunnel_id_mask = 0xFFFFFFFF,
331 	},
332 	.drop_queue = 127,
333 };
334 
335 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
336 
337 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
338 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
339 
340 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
341 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
342 
343 uint16_t nb_tx_queue_stats_mappings = 0;
344 uint16_t nb_rx_queue_stats_mappings = 0;
345 
346 unsigned max_socket = 0;
347 
348 /* Bitrate statistics */
349 struct rte_stats_bitrates *bitrate_data;
350 
351 /* Forward function declarations */
352 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
353 static void check_all_ports_link_status(uint32_t port_mask);
354 static void eth_event_callback(uint8_t port_id,
355 			       enum rte_eth_event_type type,
356 			       void *param);
357 
358 /*
359  * Check if all the ports are started.
360  * If yes, return positive value. If not, return zero.
361  */
362 static int all_ports_started(void);
363 
364 /*
365  * Setup default configuration.
366  */
367 static void
368 set_default_fwd_lcores_config(void)
369 {
370 	unsigned int i;
371 	unsigned int nb_lc;
372 	unsigned int sock_num;
373 
374 	nb_lc = 0;
375 	for (i = 0; i < RTE_MAX_LCORE; i++) {
376 		sock_num = rte_lcore_to_socket_id(i) + 1;
377 		if (sock_num > max_socket) {
378 			if (sock_num > RTE_MAX_NUMA_NODES)
379 				rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
380 			max_socket = sock_num;
381 		}
382 		if (!rte_lcore_is_enabled(i))
383 			continue;
384 		if (i == rte_get_master_lcore())
385 			continue;
386 		fwd_lcores_cpuids[nb_lc++] = i;
387 	}
388 	nb_lcores = (lcoreid_t) nb_lc;
389 	nb_cfg_lcores = nb_lcores;
390 	nb_fwd_lcores = 1;
391 }
392 
393 static void
394 set_def_peer_eth_addrs(void)
395 {
396 	portid_t i;
397 
398 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
399 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
400 		peer_eth_addrs[i].addr_bytes[5] = i;
401 	}
402 }
403 
404 static void
405 set_default_fwd_ports_config(void)
406 {
407 	portid_t pt_id;
408 
409 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
410 		fwd_ports_ids[pt_id] = pt_id;
411 
412 	nb_cfg_ports = nb_ports;
413 	nb_fwd_ports = nb_ports;
414 }
415 
416 void
417 set_def_fwd_config(void)
418 {
419 	set_default_fwd_lcores_config();
420 	set_def_peer_eth_addrs();
421 	set_default_fwd_ports_config();
422 }
423 
424 /*
425  * Configuration initialisation done once at init time.
426  */
427 static void
428 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
429 		 unsigned int socket_id)
430 {
431 	char pool_name[RTE_MEMPOOL_NAMESIZE];
432 	struct rte_mempool *rte_mp = NULL;
433 	uint32_t mb_size;
434 
435 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
436 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
437 
438 	RTE_LOG(INFO, USER1,
439 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
440 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
441 
442 #ifdef RTE_LIBRTE_PMD_XENVIRT
443 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
444 		(unsigned) mb_mempool_cache,
445 		sizeof(struct rte_pktmbuf_pool_private),
446 		rte_pktmbuf_pool_init, NULL,
447 		rte_pktmbuf_init, NULL,
448 		socket_id, 0);
449 #endif
450 
451 	/* if the former XEN allocation failed fall back to normal allocation */
452 	if (rte_mp == NULL) {
453 		if (mp_anon != 0) {
454 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
455 				mb_size, (unsigned) mb_mempool_cache,
456 				sizeof(struct rte_pktmbuf_pool_private),
457 				socket_id, 0);
458 			if (rte_mp == NULL)
459 				goto err;
460 
461 			if (rte_mempool_populate_anon(rte_mp) == 0) {
462 				rte_mempool_free(rte_mp);
463 				rte_mp = NULL;
464 				goto err;
465 			}
466 			rte_pktmbuf_pool_init(rte_mp, NULL);
467 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
468 		} else {
469 			/* wrapper to rte_mempool_create() */
470 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
471 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
472 		}
473 	}
474 
475 err:
476 	if (rte_mp == NULL) {
477 		rte_exit(EXIT_FAILURE,
478 			"Creation of mbuf pool for socket %u failed: %s\n",
479 			socket_id, rte_strerror(rte_errno));
480 	} else if (verbose_level > 0) {
481 		rte_mempool_dump(stdout, rte_mp);
482 	}
483 }
484 
485 /*
486  * Check given socket id is valid or not with NUMA mode,
487  * if valid, return 0, else return -1
488  */
489 static int
490 check_socket_id(const unsigned int socket_id)
491 {
492 	static int warning_once = 0;
493 
494 	if (socket_id >= max_socket) {
495 		if (!warning_once && numa_support)
496 			printf("Warning: NUMA should be configured manually by"
497 			       " using --port-numa-config and"
498 			       " --ring-numa-config parameters along with"
499 			       " --numa.\n");
500 		warning_once = 1;
501 		return -1;
502 	}
503 	return 0;
504 }
505 
506 static void
507 init_config(void)
508 {
509 	portid_t pid;
510 	struct rte_port *port;
511 	struct rte_mempool *mbp;
512 	unsigned int nb_mbuf_per_pool;
513 	lcoreid_t  lc_id;
514 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
515 
516 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
517 	/* Configuration of logical cores. */
518 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
519 				sizeof(struct fwd_lcore *) * nb_lcores,
520 				RTE_CACHE_LINE_SIZE);
521 	if (fwd_lcores == NULL) {
522 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
523 							"failed\n", nb_lcores);
524 	}
525 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
526 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
527 					       sizeof(struct fwd_lcore),
528 					       RTE_CACHE_LINE_SIZE);
529 		if (fwd_lcores[lc_id] == NULL) {
530 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
531 								"failed\n");
532 		}
533 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
534 	}
535 
536 	/*
537 	 * Create pools of mbuf.
538 	 * If NUMA support is disabled, create a single pool of mbuf in
539 	 * socket 0 memory by default.
540 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
541 	 *
542 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
543 	 * nb_txd can be configured at run time.
544 	 */
545 	if (param_total_num_mbufs)
546 		nb_mbuf_per_pool = param_total_num_mbufs;
547 	else {
548 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
549 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
550 
551 		if (!numa_support)
552 			nb_mbuf_per_pool =
553 				(nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
554 	}
555 
556 	if (!numa_support) {
557 		if (socket_num == UMA_NO_CONFIG)
558 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
559 		else
560 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
561 						 socket_num);
562 	}
563 
564 	RTE_ETH_FOREACH_DEV(pid) {
565 		port = &ports[pid];
566 		rte_eth_dev_info_get(pid, &port->dev_info);
567 
568 		if (numa_support) {
569 			if (port_numa[pid] != NUMA_NO_CONFIG)
570 				port_per_socket[port_numa[pid]]++;
571 			else {
572 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
573 
574 				/* if socket_id is invalid, set to 0 */
575 				if (check_socket_id(socket_id) < 0)
576 					socket_id = 0;
577 				port_per_socket[socket_id]++;
578 			}
579 		}
580 
581 		/* set flag to initialize port/queue */
582 		port->need_reconfig = 1;
583 		port->need_reconfig_queues = 1;
584 	}
585 
586 	if (numa_support) {
587 		uint8_t i;
588 		unsigned int nb_mbuf;
589 
590 		if (param_total_num_mbufs)
591 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
592 
593 		for (i = 0; i < max_socket; i++) {
594 			nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
595 			if (nb_mbuf)
596 				mbuf_pool_create(mbuf_data_size,
597 						nb_mbuf,i);
598 		}
599 	}
600 	init_port_config();
601 
602 	/*
603 	 * Records which Mbuf pool to use by each logical core, if needed.
604 	 */
605 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
606 		mbp = mbuf_pool_find(
607 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
608 
609 		if (mbp == NULL)
610 			mbp = mbuf_pool_find(0);
611 		fwd_lcores[lc_id]->mbp = mbp;
612 	}
613 
614 	/* Configuration of packet forwarding streams. */
615 	if (init_fwd_streams() < 0)
616 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
617 
618 	fwd_config_setup();
619 }
620 
621 
622 void
623 reconfig(portid_t new_port_id, unsigned socket_id)
624 {
625 	struct rte_port *port;
626 
627 	/* Reconfiguration of Ethernet ports. */
628 	port = &ports[new_port_id];
629 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
630 
631 	/* set flag to initialize port/queue */
632 	port->need_reconfig = 1;
633 	port->need_reconfig_queues = 1;
634 	port->socket_id = socket_id;
635 
636 	init_port_config();
637 }
638 
639 
640 int
641 init_fwd_streams(void)
642 {
643 	portid_t pid;
644 	struct rte_port *port;
645 	streamid_t sm_id, nb_fwd_streams_new;
646 	queueid_t q;
647 
648 	/* set socket id according to numa or not */
649 	RTE_ETH_FOREACH_DEV(pid) {
650 		port = &ports[pid];
651 		if (nb_rxq > port->dev_info.max_rx_queues) {
652 			printf("Fail: nb_rxq(%d) is greater than "
653 				"max_rx_queues(%d)\n", nb_rxq,
654 				port->dev_info.max_rx_queues);
655 			return -1;
656 		}
657 		if (nb_txq > port->dev_info.max_tx_queues) {
658 			printf("Fail: nb_txq(%d) is greater than "
659 				"max_tx_queues(%d)\n", nb_txq,
660 				port->dev_info.max_tx_queues);
661 			return -1;
662 		}
663 		if (numa_support) {
664 			if (port_numa[pid] != NUMA_NO_CONFIG)
665 				port->socket_id = port_numa[pid];
666 			else {
667 				port->socket_id = rte_eth_dev_socket_id(pid);
668 
669 				/* if socket_id is invalid, set to 0 */
670 				if (check_socket_id(port->socket_id) < 0)
671 					port->socket_id = 0;
672 			}
673 		}
674 		else {
675 			if (socket_num == UMA_NO_CONFIG)
676 				port->socket_id = 0;
677 			else
678 				port->socket_id = socket_num;
679 		}
680 	}
681 
682 	q = RTE_MAX(nb_rxq, nb_txq);
683 	if (q == 0) {
684 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
685 		return -1;
686 	}
687 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
688 	if (nb_fwd_streams_new == nb_fwd_streams)
689 		return 0;
690 	/* clear the old */
691 	if (fwd_streams != NULL) {
692 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
693 			if (fwd_streams[sm_id] == NULL)
694 				continue;
695 			rte_free(fwd_streams[sm_id]);
696 			fwd_streams[sm_id] = NULL;
697 		}
698 		rte_free(fwd_streams);
699 		fwd_streams = NULL;
700 	}
701 
702 	/* init new */
703 	nb_fwd_streams = nb_fwd_streams_new;
704 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
705 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
706 	if (fwd_streams == NULL)
707 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
708 						"failed\n", nb_fwd_streams);
709 
710 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
711 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
712 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
713 		if (fwd_streams[sm_id] == NULL)
714 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
715 								" failed\n");
716 	}
717 
718 	return 0;
719 }
720 
721 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
722 static void
723 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
724 {
725 	unsigned int total_burst;
726 	unsigned int nb_burst;
727 	unsigned int burst_stats[3];
728 	uint16_t pktnb_stats[3];
729 	uint16_t nb_pkt;
730 	int burst_percent[3];
731 
732 	/*
733 	 * First compute the total number of packet bursts and the
734 	 * two highest numbers of bursts of the same number of packets.
735 	 */
736 	total_burst = 0;
737 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
738 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
739 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
740 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
741 		if (nb_burst == 0)
742 			continue;
743 		total_burst += nb_burst;
744 		if (nb_burst > burst_stats[0]) {
745 			burst_stats[1] = burst_stats[0];
746 			pktnb_stats[1] = pktnb_stats[0];
747 			burst_stats[0] = nb_burst;
748 			pktnb_stats[0] = nb_pkt;
749 		}
750 	}
751 	if (total_burst == 0)
752 		return;
753 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
754 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
755 	       burst_percent[0], (int) pktnb_stats[0]);
756 	if (burst_stats[0] == total_burst) {
757 		printf("]\n");
758 		return;
759 	}
760 	if (burst_stats[0] + burst_stats[1] == total_burst) {
761 		printf(" + %d%% of %d pkts]\n",
762 		       100 - burst_percent[0], pktnb_stats[1]);
763 		return;
764 	}
765 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
766 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
767 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
768 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
769 		return;
770 	}
771 	printf(" + %d%% of %d pkts + %d%% of others]\n",
772 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
773 }
774 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
775 
776 static void
777 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
778 {
779 	struct rte_port *port;
780 	uint8_t i;
781 
782 	static const char *fwd_stats_border = "----------------------";
783 
784 	port = &ports[port_id];
785 	printf("\n  %s Forward statistics for port %-2d %s\n",
786 	       fwd_stats_border, port_id, fwd_stats_border);
787 
788 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
789 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
790 		       "%-"PRIu64"\n",
791 		       stats->ipackets, stats->imissed,
792 		       (uint64_t) (stats->ipackets + stats->imissed));
793 
794 		if (cur_fwd_eng == &csum_fwd_engine)
795 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
796 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
797 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
798 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
799 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
800 		}
801 
802 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
803 		       "%-"PRIu64"\n",
804 		       stats->opackets, port->tx_dropped,
805 		       (uint64_t) (stats->opackets + port->tx_dropped));
806 	}
807 	else {
808 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
809 		       "%14"PRIu64"\n",
810 		       stats->ipackets, stats->imissed,
811 		       (uint64_t) (stats->ipackets + stats->imissed));
812 
813 		if (cur_fwd_eng == &csum_fwd_engine)
814 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
815 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
816 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
817 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
818 			printf("  RX-nombufs:             %14"PRIu64"\n",
819 			       stats->rx_nombuf);
820 		}
821 
822 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
823 		       "%14"PRIu64"\n",
824 		       stats->opackets, port->tx_dropped,
825 		       (uint64_t) (stats->opackets + port->tx_dropped));
826 	}
827 
828 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
829 	if (port->rx_stream)
830 		pkt_burst_stats_display("RX",
831 			&port->rx_stream->rx_burst_stats);
832 	if (port->tx_stream)
833 		pkt_burst_stats_display("TX",
834 			&port->tx_stream->tx_burst_stats);
835 #endif
836 
837 	if (port->rx_queue_stats_mapping_enabled) {
838 		printf("\n");
839 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
840 			printf("  Stats reg %2d RX-packets:%14"PRIu64
841 			       "     RX-errors:%14"PRIu64
842 			       "    RX-bytes:%14"PRIu64"\n",
843 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
844 		}
845 		printf("\n");
846 	}
847 	if (port->tx_queue_stats_mapping_enabled) {
848 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
849 			printf("  Stats reg %2d TX-packets:%14"PRIu64
850 			       "                                 TX-bytes:%14"PRIu64"\n",
851 			       i, stats->q_opackets[i], stats->q_obytes[i]);
852 		}
853 	}
854 
855 	printf("  %s--------------------------------%s\n",
856 	       fwd_stats_border, fwd_stats_border);
857 }
858 
859 static void
860 fwd_stream_stats_display(streamid_t stream_id)
861 {
862 	struct fwd_stream *fs;
863 	static const char *fwd_top_stats_border = "-------";
864 
865 	fs = fwd_streams[stream_id];
866 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
867 	    (fs->fwd_dropped == 0))
868 		return;
869 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
870 	       "TX Port=%2d/Queue=%2d %s\n",
871 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
872 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
873 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
874 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
875 
876 	/* if checksum mode */
877 	if (cur_fwd_eng == &csum_fwd_engine) {
878 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
879 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
880 	}
881 
882 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
883 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
884 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
885 #endif
886 }
887 
888 static void
889 flush_fwd_rx_queues(void)
890 {
891 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
892 	portid_t  rxp;
893 	portid_t port_id;
894 	queueid_t rxq;
895 	uint16_t  nb_rx;
896 	uint16_t  i;
897 	uint8_t   j;
898 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
899 	uint64_t timer_period;
900 
901 	/* convert to number of cycles */
902 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
903 
904 	for (j = 0; j < 2; j++) {
905 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
906 			for (rxq = 0; rxq < nb_rxq; rxq++) {
907 				port_id = fwd_ports_ids[rxp];
908 				/**
909 				* testpmd can stuck in the below do while loop
910 				* if rte_eth_rx_burst() always returns nonzero
911 				* packets. So timer is added to exit this loop
912 				* after 1sec timer expiry.
913 				*/
914 				prev_tsc = rte_rdtsc();
915 				do {
916 					nb_rx = rte_eth_rx_burst(port_id, rxq,
917 						pkts_burst, MAX_PKT_BURST);
918 					for (i = 0; i < nb_rx; i++)
919 						rte_pktmbuf_free(pkts_burst[i]);
920 
921 					cur_tsc = rte_rdtsc();
922 					diff_tsc = cur_tsc - prev_tsc;
923 					timer_tsc += diff_tsc;
924 				} while ((nb_rx > 0) &&
925 					(timer_tsc < timer_period));
926 				timer_tsc = 0;
927 			}
928 		}
929 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
930 	}
931 }
932 
933 static void
934 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
935 {
936 	struct fwd_stream **fsm;
937 	streamid_t nb_fs;
938 	streamid_t sm_id;
939 #ifdef RTE_LIBRTE_BITRATE
940 	uint64_t tics_per_1sec;
941 	uint64_t tics_datum;
942 	uint64_t tics_current;
943 	uint8_t idx_port, cnt_ports;
944 
945 	cnt_ports = rte_eth_dev_count();
946 	tics_datum = rte_rdtsc();
947 	tics_per_1sec = rte_get_timer_hz();
948 #endif
949 	fsm = &fwd_streams[fc->stream_idx];
950 	nb_fs = fc->stream_nb;
951 	do {
952 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
953 			(*pkt_fwd)(fsm[sm_id]);
954 #ifdef RTE_LIBRTE_BITRATE
955 		tics_current = rte_rdtsc();
956 		if (tics_current - tics_datum >= tics_per_1sec) {
957 			/* Periodic bitrate calculation */
958 			for (idx_port = 0; idx_port < cnt_ports; idx_port++)
959 				rte_stats_bitrate_calc(bitrate_data, idx_port);
960 			tics_datum = tics_current;
961 		}
962 #endif
963 #ifdef RTE_LIBRTE_LATENCY_STATS
964 		if (latencystats_lcore_id == rte_lcore_id())
965 			rte_latencystats_update();
966 #endif
967 
968 	} while (! fc->stopped);
969 }
970 
971 static int
972 start_pkt_forward_on_core(void *fwd_arg)
973 {
974 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
975 			     cur_fwd_config.fwd_eng->packet_fwd);
976 	return 0;
977 }
978 
979 /*
980  * Run the TXONLY packet forwarding engine to send a single burst of packets.
981  * Used to start communication flows in network loopback test configurations.
982  */
983 static int
984 run_one_txonly_burst_on_core(void *fwd_arg)
985 {
986 	struct fwd_lcore *fwd_lc;
987 	struct fwd_lcore tmp_lcore;
988 
989 	fwd_lc = (struct fwd_lcore *) fwd_arg;
990 	tmp_lcore = *fwd_lc;
991 	tmp_lcore.stopped = 1;
992 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
993 	return 0;
994 }
995 
996 /*
997  * Launch packet forwarding:
998  *     - Setup per-port forwarding context.
999  *     - launch logical cores with their forwarding configuration.
1000  */
1001 static void
1002 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1003 {
1004 	port_fwd_begin_t port_fwd_begin;
1005 	unsigned int i;
1006 	unsigned int lc_id;
1007 	int diag;
1008 
1009 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1010 	if (port_fwd_begin != NULL) {
1011 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1012 			(*port_fwd_begin)(fwd_ports_ids[i]);
1013 	}
1014 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1015 		lc_id = fwd_lcores_cpuids[i];
1016 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1017 			fwd_lcores[i]->stopped = 0;
1018 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1019 						     fwd_lcores[i], lc_id);
1020 			if (diag != 0)
1021 				printf("launch lcore %u failed - diag=%d\n",
1022 				       lc_id, diag);
1023 		}
1024 	}
1025 }
1026 
1027 /*
1028  * Launch packet forwarding configuration.
1029  */
1030 void
1031 start_packet_forwarding(int with_tx_first)
1032 {
1033 	port_fwd_begin_t port_fwd_begin;
1034 	port_fwd_end_t  port_fwd_end;
1035 	struct rte_port *port;
1036 	unsigned int i;
1037 	portid_t   pt_id;
1038 	streamid_t sm_id;
1039 
1040 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1041 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1042 
1043 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1044 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1045 
1046 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1047 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1048 		(!nb_rxq || !nb_txq))
1049 		rte_exit(EXIT_FAILURE,
1050 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1051 			cur_fwd_eng->fwd_mode_name);
1052 
1053 	if (all_ports_started() == 0) {
1054 		printf("Not all ports were started\n");
1055 		return;
1056 	}
1057 	if (test_done == 0) {
1058 		printf("Packet forwarding already started\n");
1059 		return;
1060 	}
1061 
1062 	if (init_fwd_streams() < 0) {
1063 		printf("Fail from init_fwd_streams()\n");
1064 		return;
1065 	}
1066 
1067 	if(dcb_test) {
1068 		for (i = 0; i < nb_fwd_ports; i++) {
1069 			pt_id = fwd_ports_ids[i];
1070 			port = &ports[pt_id];
1071 			if (!port->dcb_flag) {
1072 				printf("In DCB mode, all forwarding ports must "
1073                                        "be configured in this mode.\n");
1074 				return;
1075 			}
1076 		}
1077 		if (nb_fwd_lcores == 1) {
1078 			printf("In DCB mode,the nb forwarding cores "
1079                                "should be larger than 1.\n");
1080 			return;
1081 		}
1082 	}
1083 	test_done = 0;
1084 
1085 	if(!no_flush_rx)
1086 		flush_fwd_rx_queues();
1087 
1088 	fwd_config_setup();
1089 	pkt_fwd_config_display(&cur_fwd_config);
1090 	rxtx_config_display();
1091 
1092 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1093 		pt_id = fwd_ports_ids[i];
1094 		port = &ports[pt_id];
1095 		rte_eth_stats_get(pt_id, &port->stats);
1096 		port->tx_dropped = 0;
1097 
1098 		map_port_queue_stats_mapping_registers(pt_id, port);
1099 	}
1100 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1101 		fwd_streams[sm_id]->rx_packets = 0;
1102 		fwd_streams[sm_id]->tx_packets = 0;
1103 		fwd_streams[sm_id]->fwd_dropped = 0;
1104 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1105 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1106 
1107 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1108 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1109 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1110 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1111 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1112 #endif
1113 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1114 		fwd_streams[sm_id]->core_cycles = 0;
1115 #endif
1116 	}
1117 	if (with_tx_first) {
1118 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1119 		if (port_fwd_begin != NULL) {
1120 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1121 				(*port_fwd_begin)(fwd_ports_ids[i]);
1122 		}
1123 		while (with_tx_first--) {
1124 			launch_packet_forwarding(
1125 					run_one_txonly_burst_on_core);
1126 			rte_eal_mp_wait_lcore();
1127 		}
1128 		port_fwd_end = tx_only_engine.port_fwd_end;
1129 		if (port_fwd_end != NULL) {
1130 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1131 				(*port_fwd_end)(fwd_ports_ids[i]);
1132 		}
1133 	}
1134 	launch_packet_forwarding(start_pkt_forward_on_core);
1135 }
1136 
1137 void
1138 stop_packet_forwarding(void)
1139 {
1140 	struct rte_eth_stats stats;
1141 	struct rte_port *port;
1142 	port_fwd_end_t  port_fwd_end;
1143 	int i;
1144 	portid_t   pt_id;
1145 	streamid_t sm_id;
1146 	lcoreid_t  lc_id;
1147 	uint64_t total_recv;
1148 	uint64_t total_xmit;
1149 	uint64_t total_rx_dropped;
1150 	uint64_t total_tx_dropped;
1151 	uint64_t total_rx_nombuf;
1152 	uint64_t tx_dropped;
1153 	uint64_t rx_bad_ip_csum;
1154 	uint64_t rx_bad_l4_csum;
1155 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1156 	uint64_t fwd_cycles;
1157 #endif
1158 	static const char *acc_stats_border = "+++++++++++++++";
1159 
1160 	if (test_done) {
1161 		printf("Packet forwarding not started\n");
1162 		return;
1163 	}
1164 	printf("Telling cores to stop...");
1165 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1166 		fwd_lcores[lc_id]->stopped = 1;
1167 	printf("\nWaiting for lcores to finish...\n");
1168 	rte_eal_mp_wait_lcore();
1169 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1170 	if (port_fwd_end != NULL) {
1171 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1172 			pt_id = fwd_ports_ids[i];
1173 			(*port_fwd_end)(pt_id);
1174 		}
1175 	}
1176 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1177 	fwd_cycles = 0;
1178 #endif
1179 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1180 		if (cur_fwd_config.nb_fwd_streams >
1181 		    cur_fwd_config.nb_fwd_ports) {
1182 			fwd_stream_stats_display(sm_id);
1183 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1184 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1185 		} else {
1186 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1187 				fwd_streams[sm_id];
1188 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1189 				fwd_streams[sm_id];
1190 		}
1191 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1192 		tx_dropped = (uint64_t) (tx_dropped +
1193 					 fwd_streams[sm_id]->fwd_dropped);
1194 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1195 
1196 		rx_bad_ip_csum =
1197 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1198 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1199 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1200 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1201 							rx_bad_ip_csum;
1202 
1203 		rx_bad_l4_csum =
1204 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1205 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1206 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1207 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1208 							rx_bad_l4_csum;
1209 
1210 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1211 		fwd_cycles = (uint64_t) (fwd_cycles +
1212 					 fwd_streams[sm_id]->core_cycles);
1213 #endif
1214 	}
1215 	total_recv = 0;
1216 	total_xmit = 0;
1217 	total_rx_dropped = 0;
1218 	total_tx_dropped = 0;
1219 	total_rx_nombuf  = 0;
1220 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1221 		pt_id = fwd_ports_ids[i];
1222 
1223 		port = &ports[pt_id];
1224 		rte_eth_stats_get(pt_id, &stats);
1225 		stats.ipackets -= port->stats.ipackets;
1226 		port->stats.ipackets = 0;
1227 		stats.opackets -= port->stats.opackets;
1228 		port->stats.opackets = 0;
1229 		stats.ibytes   -= port->stats.ibytes;
1230 		port->stats.ibytes = 0;
1231 		stats.obytes   -= port->stats.obytes;
1232 		port->stats.obytes = 0;
1233 		stats.imissed  -= port->stats.imissed;
1234 		port->stats.imissed = 0;
1235 		stats.oerrors  -= port->stats.oerrors;
1236 		port->stats.oerrors = 0;
1237 		stats.rx_nombuf -= port->stats.rx_nombuf;
1238 		port->stats.rx_nombuf = 0;
1239 
1240 		total_recv += stats.ipackets;
1241 		total_xmit += stats.opackets;
1242 		total_rx_dropped += stats.imissed;
1243 		total_tx_dropped += port->tx_dropped;
1244 		total_rx_nombuf  += stats.rx_nombuf;
1245 
1246 		fwd_port_stats_display(pt_id, &stats);
1247 	}
1248 	printf("\n  %s Accumulated forward statistics for all ports"
1249 	       "%s\n",
1250 	       acc_stats_border, acc_stats_border);
1251 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1252 	       "%-"PRIu64"\n"
1253 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1254 	       "%-"PRIu64"\n",
1255 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1256 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1257 	if (total_rx_nombuf > 0)
1258 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1259 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1260 	       "%s\n",
1261 	       acc_stats_border, acc_stats_border);
1262 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1263 	if (total_recv > 0)
1264 		printf("\n  CPU cycles/packet=%u (total cycles="
1265 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1266 		       (unsigned int)(fwd_cycles / total_recv),
1267 		       fwd_cycles, total_recv);
1268 #endif
1269 	printf("\nDone.\n");
1270 	test_done = 1;
1271 }
1272 
1273 void
1274 dev_set_link_up(portid_t pid)
1275 {
1276 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1277 		printf("\nSet link up fail.\n");
1278 }
1279 
1280 void
1281 dev_set_link_down(portid_t pid)
1282 {
1283 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1284 		printf("\nSet link down fail.\n");
1285 }
1286 
1287 static int
1288 all_ports_started(void)
1289 {
1290 	portid_t pi;
1291 	struct rte_port *port;
1292 
1293 	RTE_ETH_FOREACH_DEV(pi) {
1294 		port = &ports[pi];
1295 		/* Check if there is a port which is not started */
1296 		if ((port->port_status != RTE_PORT_STARTED) &&
1297 			(port->slave_flag == 0))
1298 			return 0;
1299 	}
1300 
1301 	/* No port is not started */
1302 	return 1;
1303 }
1304 
1305 int
1306 all_ports_stopped(void)
1307 {
1308 	portid_t pi;
1309 	struct rte_port *port;
1310 
1311 	RTE_ETH_FOREACH_DEV(pi) {
1312 		port = &ports[pi];
1313 		if ((port->port_status != RTE_PORT_STOPPED) &&
1314 			(port->slave_flag == 0))
1315 			return 0;
1316 	}
1317 
1318 	return 1;
1319 }
1320 
1321 int
1322 port_is_started(portid_t port_id)
1323 {
1324 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1325 		return 0;
1326 
1327 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1328 		return 0;
1329 
1330 	return 1;
1331 }
1332 
1333 static int
1334 port_is_closed(portid_t port_id)
1335 {
1336 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1337 		return 0;
1338 
1339 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1340 		return 0;
1341 
1342 	return 1;
1343 }
1344 
1345 int
1346 start_port(portid_t pid)
1347 {
1348 	int diag, need_check_link_status = -1;
1349 	portid_t pi;
1350 	queueid_t qi;
1351 	struct rte_port *port;
1352 	struct ether_addr mac_addr;
1353 	enum rte_eth_event_type event_type;
1354 
1355 	if (port_id_is_invalid(pid, ENABLED_WARN))
1356 		return 0;
1357 
1358 	if(dcb_config)
1359 		dcb_test = 1;
1360 	RTE_ETH_FOREACH_DEV(pi) {
1361 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1362 			continue;
1363 
1364 		need_check_link_status = 0;
1365 		port = &ports[pi];
1366 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1367 						 RTE_PORT_HANDLING) == 0) {
1368 			printf("Port %d is now not stopped\n", pi);
1369 			continue;
1370 		}
1371 
1372 		if (port->need_reconfig > 0) {
1373 			port->need_reconfig = 0;
1374 
1375 			printf("Configuring Port %d (socket %u)\n", pi,
1376 					port->socket_id);
1377 			/* configure port */
1378 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1379 						&(port->dev_conf));
1380 			if (diag != 0) {
1381 				if (rte_atomic16_cmpset(&(port->port_status),
1382 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1383 					printf("Port %d can not be set back "
1384 							"to stopped\n", pi);
1385 				printf("Fail to configure port %d\n", pi);
1386 				/* try to reconfigure port next time */
1387 				port->need_reconfig = 1;
1388 				return -1;
1389 			}
1390 		}
1391 		if (port->need_reconfig_queues > 0) {
1392 			port->need_reconfig_queues = 0;
1393 			/* setup tx queues */
1394 			for (qi = 0; qi < nb_txq; qi++) {
1395 				if ((numa_support) &&
1396 					(txring_numa[pi] != NUMA_NO_CONFIG))
1397 					diag = rte_eth_tx_queue_setup(pi, qi,
1398 						nb_txd,txring_numa[pi],
1399 						&(port->tx_conf));
1400 				else
1401 					diag = rte_eth_tx_queue_setup(pi, qi,
1402 						nb_txd,port->socket_id,
1403 						&(port->tx_conf));
1404 
1405 				if (diag == 0)
1406 					continue;
1407 
1408 				/* Fail to setup tx queue, return */
1409 				if (rte_atomic16_cmpset(&(port->port_status),
1410 							RTE_PORT_HANDLING,
1411 							RTE_PORT_STOPPED) == 0)
1412 					printf("Port %d can not be set back "
1413 							"to stopped\n", pi);
1414 				printf("Fail to configure port %d tx queues\n", pi);
1415 				/* try to reconfigure queues next time */
1416 				port->need_reconfig_queues = 1;
1417 				return -1;
1418 			}
1419 			/* setup rx queues */
1420 			for (qi = 0; qi < nb_rxq; qi++) {
1421 				if ((numa_support) &&
1422 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1423 					struct rte_mempool * mp =
1424 						mbuf_pool_find(rxring_numa[pi]);
1425 					if (mp == NULL) {
1426 						printf("Failed to setup RX queue:"
1427 							"No mempool allocation"
1428 							" on the socket %d\n",
1429 							rxring_numa[pi]);
1430 						return -1;
1431 					}
1432 
1433 					diag = rte_eth_rx_queue_setup(pi, qi,
1434 					     nb_rxd,rxring_numa[pi],
1435 					     &(port->rx_conf),mp);
1436 				} else {
1437 					struct rte_mempool *mp =
1438 						mbuf_pool_find(port->socket_id);
1439 					if (mp == NULL) {
1440 						printf("Failed to setup RX queue:"
1441 							"No mempool allocation"
1442 							" on the socket %d\n",
1443 							port->socket_id);
1444 						return -1;
1445 					}
1446 					diag = rte_eth_rx_queue_setup(pi, qi,
1447 					     nb_rxd,port->socket_id,
1448 					     &(port->rx_conf), mp);
1449 				}
1450 				if (diag == 0)
1451 					continue;
1452 
1453 				/* Fail to setup rx queue, return */
1454 				if (rte_atomic16_cmpset(&(port->port_status),
1455 							RTE_PORT_HANDLING,
1456 							RTE_PORT_STOPPED) == 0)
1457 					printf("Port %d can not be set back "
1458 							"to stopped\n", pi);
1459 				printf("Fail to configure port %d rx queues\n", pi);
1460 				/* try to reconfigure queues next time */
1461 				port->need_reconfig_queues = 1;
1462 				return -1;
1463 			}
1464 		}
1465 
1466 		for (event_type = RTE_ETH_EVENT_UNKNOWN;
1467 		     event_type < RTE_ETH_EVENT_MAX;
1468 		     event_type++) {
1469 			diag = rte_eth_dev_callback_register(pi,
1470 							event_type,
1471 							eth_event_callback,
1472 							NULL);
1473 			if (diag) {
1474 				printf("Failed to setup even callback for event %d\n",
1475 					event_type);
1476 				return -1;
1477 			}
1478 		}
1479 
1480 		/* start port */
1481 		if (rte_eth_dev_start(pi) < 0) {
1482 			printf("Fail to start port %d\n", pi);
1483 
1484 			/* Fail to setup rx queue, return */
1485 			if (rte_atomic16_cmpset(&(port->port_status),
1486 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1487 				printf("Port %d can not be set back to "
1488 							"stopped\n", pi);
1489 			continue;
1490 		}
1491 
1492 		if (rte_atomic16_cmpset(&(port->port_status),
1493 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1494 			printf("Port %d can not be set into started\n", pi);
1495 
1496 		rte_eth_macaddr_get(pi, &mac_addr);
1497 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1498 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1499 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1500 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1501 
1502 		/* at least one port started, need checking link status */
1503 		need_check_link_status = 1;
1504 	}
1505 
1506 	if (need_check_link_status == 1 && !no_link_check)
1507 		check_all_ports_link_status(RTE_PORT_ALL);
1508 	else if (need_check_link_status == 0)
1509 		printf("Please stop the ports first\n");
1510 
1511 	printf("Done\n");
1512 	return 0;
1513 }
1514 
1515 void
1516 stop_port(portid_t pid)
1517 {
1518 	portid_t pi;
1519 	struct rte_port *port;
1520 	int need_check_link_status = 0;
1521 
1522 	if (dcb_test) {
1523 		dcb_test = 0;
1524 		dcb_config = 0;
1525 	}
1526 
1527 	if (port_id_is_invalid(pid, ENABLED_WARN))
1528 		return;
1529 
1530 	printf("Stopping ports...\n");
1531 
1532 	RTE_ETH_FOREACH_DEV(pi) {
1533 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1534 			continue;
1535 
1536 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1537 			printf("Please remove port %d from forwarding configuration.\n", pi);
1538 			continue;
1539 		}
1540 
1541 		if (port_is_bonding_slave(pi)) {
1542 			printf("Please remove port %d from bonded device.\n", pi);
1543 			continue;
1544 		}
1545 
1546 		port = &ports[pi];
1547 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1548 						RTE_PORT_HANDLING) == 0)
1549 			continue;
1550 
1551 		rte_eth_dev_stop(pi);
1552 
1553 		if (rte_atomic16_cmpset(&(port->port_status),
1554 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1555 			printf("Port %d can not be set into stopped\n", pi);
1556 		need_check_link_status = 1;
1557 	}
1558 	if (need_check_link_status && !no_link_check)
1559 		check_all_ports_link_status(RTE_PORT_ALL);
1560 
1561 	printf("Done\n");
1562 }
1563 
1564 void
1565 close_port(portid_t pid)
1566 {
1567 	portid_t pi;
1568 	struct rte_port *port;
1569 
1570 	if (port_id_is_invalid(pid, ENABLED_WARN))
1571 		return;
1572 
1573 	printf("Closing ports...\n");
1574 
1575 	RTE_ETH_FOREACH_DEV(pi) {
1576 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1577 			continue;
1578 
1579 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1580 			printf("Please remove port %d from forwarding configuration.\n", pi);
1581 			continue;
1582 		}
1583 
1584 		if (port_is_bonding_slave(pi)) {
1585 			printf("Please remove port %d from bonded device.\n", pi);
1586 			continue;
1587 		}
1588 
1589 		port = &ports[pi];
1590 		if (rte_atomic16_cmpset(&(port->port_status),
1591 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1592 			printf("Port %d is already closed\n", pi);
1593 			continue;
1594 		}
1595 
1596 		if (rte_atomic16_cmpset(&(port->port_status),
1597 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1598 			printf("Port %d is now not stopped\n", pi);
1599 			continue;
1600 		}
1601 
1602 		if (port->flow_list)
1603 			port_flow_flush(pi);
1604 		rte_eth_dev_close(pi);
1605 
1606 		if (rte_atomic16_cmpset(&(port->port_status),
1607 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1608 			printf("Port %d cannot be set to closed\n", pi);
1609 	}
1610 
1611 	printf("Done\n");
1612 }
1613 
1614 void
1615 attach_port(char *identifier)
1616 {
1617 	portid_t pi = 0;
1618 	unsigned int socket_id;
1619 
1620 	printf("Attaching a new port...\n");
1621 
1622 	if (identifier == NULL) {
1623 		printf("Invalid parameters are specified\n");
1624 		return;
1625 	}
1626 
1627 	if (rte_eth_dev_attach(identifier, &pi))
1628 		return;
1629 
1630 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1631 	/* if socket_id is invalid, set to 0 */
1632 	if (check_socket_id(socket_id) < 0)
1633 		socket_id = 0;
1634 	reconfig(pi, socket_id);
1635 	rte_eth_promiscuous_enable(pi);
1636 
1637 	nb_ports = rte_eth_dev_count();
1638 
1639 	ports[pi].port_status = RTE_PORT_STOPPED;
1640 
1641 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1642 	printf("Done\n");
1643 }
1644 
1645 void
1646 detach_port(uint8_t port_id)
1647 {
1648 	char name[RTE_ETH_NAME_MAX_LEN];
1649 
1650 	printf("Detaching a port...\n");
1651 
1652 	if (!port_is_closed(port_id)) {
1653 		printf("Please close port first\n");
1654 		return;
1655 	}
1656 
1657 	if (ports[port_id].flow_list)
1658 		port_flow_flush(port_id);
1659 
1660 	if (rte_eth_dev_detach(port_id, name))
1661 		return;
1662 
1663 	nb_ports = rte_eth_dev_count();
1664 
1665 	printf("Port '%s' is detached. Now total ports is %d\n",
1666 			name, nb_ports);
1667 	printf("Done\n");
1668 	return;
1669 }
1670 
1671 void
1672 pmd_test_exit(void)
1673 {
1674 	portid_t pt_id;
1675 
1676 	if (test_done == 0)
1677 		stop_packet_forwarding();
1678 
1679 	if (ports != NULL) {
1680 		no_link_check = 1;
1681 		RTE_ETH_FOREACH_DEV(pt_id) {
1682 			printf("\nShutting down port %d...\n", pt_id);
1683 			fflush(stdout);
1684 			stop_port(pt_id);
1685 			close_port(pt_id);
1686 		}
1687 	}
1688 	printf("\nBye...\n");
1689 }
1690 
1691 typedef void (*cmd_func_t)(void);
1692 struct pmd_test_command {
1693 	const char *cmd_name;
1694 	cmd_func_t cmd_func;
1695 };
1696 
1697 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1698 
1699 /* Check the link status of all ports in up to 9s, and print them finally */
1700 static void
1701 check_all_ports_link_status(uint32_t port_mask)
1702 {
1703 #define CHECK_INTERVAL 100 /* 100ms */
1704 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1705 	uint8_t portid, count, all_ports_up, print_flag = 0;
1706 	struct rte_eth_link link;
1707 
1708 	printf("Checking link statuses...\n");
1709 	fflush(stdout);
1710 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1711 		all_ports_up = 1;
1712 		RTE_ETH_FOREACH_DEV(portid) {
1713 			if ((port_mask & (1 << portid)) == 0)
1714 				continue;
1715 			memset(&link, 0, sizeof(link));
1716 			rte_eth_link_get_nowait(portid, &link);
1717 			/* print link status if flag set */
1718 			if (print_flag == 1) {
1719 				if (link.link_status)
1720 					printf("Port %d Link Up - speed %u "
1721 						"Mbps - %s\n", (uint8_t)portid,
1722 						(unsigned)link.link_speed,
1723 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1724 					("full-duplex") : ("half-duplex\n"));
1725 				else
1726 					printf("Port %d Link Down\n",
1727 						(uint8_t)portid);
1728 				continue;
1729 			}
1730 			/* clear all_ports_up flag if any link down */
1731 			if (link.link_status == ETH_LINK_DOWN) {
1732 				all_ports_up = 0;
1733 				break;
1734 			}
1735 		}
1736 		/* after finally printing all link status, get out */
1737 		if (print_flag == 1)
1738 			break;
1739 
1740 		if (all_ports_up == 0) {
1741 			fflush(stdout);
1742 			rte_delay_ms(CHECK_INTERVAL);
1743 		}
1744 
1745 		/* set the print_flag if all ports up or timeout */
1746 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1747 			print_flag = 1;
1748 		}
1749 	}
1750 }
1751 
1752 /* This function is used by the interrupt thread */
1753 static void
1754 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
1755 {
1756 	static const char * const event_desc[] = {
1757 		[RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1758 		[RTE_ETH_EVENT_INTR_LSC] = "LSC",
1759 		[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1760 		[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1761 		[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1762 		[RTE_ETH_EVENT_MACSEC] = "MACsec",
1763 		[RTE_ETH_EVENT_INTR_RMV] = "device removal",
1764 		[RTE_ETH_EVENT_MAX] = NULL,
1765 	};
1766 
1767 	RTE_SET_USED(param);
1768 
1769 	if (type >= RTE_ETH_EVENT_MAX) {
1770 		fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1771 			port_id, __func__, type);
1772 		fflush(stderr);
1773 	} else {
1774 		printf("\nPort %" PRIu8 ": %s event\n", port_id,
1775 			event_desc[type]);
1776 		fflush(stdout);
1777 	}
1778 }
1779 
1780 static int
1781 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1782 {
1783 	uint16_t i;
1784 	int diag;
1785 	uint8_t mapping_found = 0;
1786 
1787 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1788 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1789 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1790 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1791 					tx_queue_stats_mappings[i].queue_id,
1792 					tx_queue_stats_mappings[i].stats_counter_id);
1793 			if (diag != 0)
1794 				return diag;
1795 			mapping_found = 1;
1796 		}
1797 	}
1798 	if (mapping_found)
1799 		port->tx_queue_stats_mapping_enabled = 1;
1800 	return 0;
1801 }
1802 
1803 static int
1804 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1805 {
1806 	uint16_t i;
1807 	int diag;
1808 	uint8_t mapping_found = 0;
1809 
1810 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1811 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1812 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1813 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1814 					rx_queue_stats_mappings[i].queue_id,
1815 					rx_queue_stats_mappings[i].stats_counter_id);
1816 			if (diag != 0)
1817 				return diag;
1818 			mapping_found = 1;
1819 		}
1820 	}
1821 	if (mapping_found)
1822 		port->rx_queue_stats_mapping_enabled = 1;
1823 	return 0;
1824 }
1825 
1826 static void
1827 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1828 {
1829 	int diag = 0;
1830 
1831 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1832 	if (diag != 0) {
1833 		if (diag == -ENOTSUP) {
1834 			port->tx_queue_stats_mapping_enabled = 0;
1835 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1836 		}
1837 		else
1838 			rte_exit(EXIT_FAILURE,
1839 					"set_tx_queue_stats_mapping_registers "
1840 					"failed for port id=%d diag=%d\n",
1841 					pi, diag);
1842 	}
1843 
1844 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1845 	if (diag != 0) {
1846 		if (diag == -ENOTSUP) {
1847 			port->rx_queue_stats_mapping_enabled = 0;
1848 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1849 		}
1850 		else
1851 			rte_exit(EXIT_FAILURE,
1852 					"set_rx_queue_stats_mapping_registers "
1853 					"failed for port id=%d diag=%d\n",
1854 					pi, diag);
1855 	}
1856 }
1857 
1858 static void
1859 rxtx_port_config(struct rte_port *port)
1860 {
1861 	port->rx_conf = port->dev_info.default_rxconf;
1862 	port->tx_conf = port->dev_info.default_txconf;
1863 
1864 	/* Check if any RX/TX parameters have been passed */
1865 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1866 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1867 
1868 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1869 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1870 
1871 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1872 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1873 
1874 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1875 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1876 
1877 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1878 		port->rx_conf.rx_drop_en = rx_drop_en;
1879 
1880 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1881 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1882 
1883 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1884 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1885 
1886 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1887 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1888 
1889 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1890 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1891 
1892 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1893 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1894 
1895 	if (txq_flags != RTE_PMD_PARAM_UNSET)
1896 		port->tx_conf.txq_flags = txq_flags;
1897 }
1898 
1899 void
1900 init_port_config(void)
1901 {
1902 	portid_t pid;
1903 	struct rte_port *port;
1904 
1905 	RTE_ETH_FOREACH_DEV(pid) {
1906 		port = &ports[pid];
1907 		port->dev_conf.rxmode = rx_mode;
1908 		port->dev_conf.fdir_conf = fdir_conf;
1909 		if (nb_rxq > 1) {
1910 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1911 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1912 		} else {
1913 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1914 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1915 		}
1916 
1917 		if (port->dcb_flag == 0) {
1918 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1919 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1920 			else
1921 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1922 		}
1923 
1924 		rxtx_port_config(port);
1925 
1926 		rte_eth_macaddr_get(pid, &port->eth_addr);
1927 
1928 		map_port_queue_stats_mapping_registers(pid, port);
1929 #ifdef RTE_NIC_BYPASS
1930 		rte_eth_dev_bypass_init(pid);
1931 #endif
1932 	}
1933 }
1934 
1935 void set_port_slave_flag(portid_t slave_pid)
1936 {
1937 	struct rte_port *port;
1938 
1939 	port = &ports[slave_pid];
1940 	port->slave_flag = 1;
1941 }
1942 
1943 void clear_port_slave_flag(portid_t slave_pid)
1944 {
1945 	struct rte_port *port;
1946 
1947 	port = &ports[slave_pid];
1948 	port->slave_flag = 0;
1949 }
1950 
1951 uint8_t port_is_bonding_slave(portid_t slave_pid)
1952 {
1953 	struct rte_port *port;
1954 
1955 	port = &ports[slave_pid];
1956 	return port->slave_flag;
1957 }
1958 
1959 const uint16_t vlan_tags[] = {
1960 		0,  1,  2,  3,  4,  5,  6,  7,
1961 		8,  9, 10, 11,  12, 13, 14, 15,
1962 		16, 17, 18, 19, 20, 21, 22, 23,
1963 		24, 25, 26, 27, 28, 29, 30, 31
1964 };
1965 
1966 static  int
1967 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1968 		 enum dcb_mode_enable dcb_mode,
1969 		 enum rte_eth_nb_tcs num_tcs,
1970 		 uint8_t pfc_en)
1971 {
1972 	uint8_t i;
1973 
1974 	/*
1975 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1976 	 * given above, and the number of traffic classes available for use.
1977 	 */
1978 	if (dcb_mode == DCB_VT_ENABLED) {
1979 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1980 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
1981 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1982 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1983 
1984 		/* VMDQ+DCB RX and TX configurations */
1985 		vmdq_rx_conf->enable_default_pool = 0;
1986 		vmdq_rx_conf->default_pool = 0;
1987 		vmdq_rx_conf->nb_queue_pools =
1988 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1989 		vmdq_tx_conf->nb_queue_pools =
1990 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1991 
1992 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1993 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1994 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1995 			vmdq_rx_conf->pool_map[i].pools =
1996 				1 << (i % vmdq_rx_conf->nb_queue_pools);
1997 		}
1998 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1999 			vmdq_rx_conf->dcb_tc[i] = i;
2000 			vmdq_tx_conf->dcb_tc[i] = i;
2001 		}
2002 
2003 		/* set DCB mode of RX and TX of multiple queues */
2004 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2005 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2006 	} else {
2007 		struct rte_eth_dcb_rx_conf *rx_conf =
2008 				&eth_conf->rx_adv_conf.dcb_rx_conf;
2009 		struct rte_eth_dcb_tx_conf *tx_conf =
2010 				&eth_conf->tx_adv_conf.dcb_tx_conf;
2011 
2012 		rx_conf->nb_tcs = num_tcs;
2013 		tx_conf->nb_tcs = num_tcs;
2014 
2015 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2016 			rx_conf->dcb_tc[i] = i % num_tcs;
2017 			tx_conf->dcb_tc[i] = i % num_tcs;
2018 		}
2019 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2020 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2021 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2022 	}
2023 
2024 	if (pfc_en)
2025 		eth_conf->dcb_capability_en =
2026 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2027 	else
2028 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2029 
2030 	return 0;
2031 }
2032 
2033 int
2034 init_port_dcb_config(portid_t pid,
2035 		     enum dcb_mode_enable dcb_mode,
2036 		     enum rte_eth_nb_tcs num_tcs,
2037 		     uint8_t pfc_en)
2038 {
2039 	struct rte_eth_conf port_conf;
2040 	struct rte_port *rte_port;
2041 	int retval;
2042 	uint16_t i;
2043 
2044 	rte_port = &ports[pid];
2045 
2046 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2047 	/* Enter DCB configuration status */
2048 	dcb_config = 1;
2049 
2050 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2051 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2052 	if (retval < 0)
2053 		return retval;
2054 	port_conf.rxmode.hw_vlan_filter = 1;
2055 
2056 	/**
2057 	 * Write the configuration into the device.
2058 	 * Set the numbers of RX & TX queues to 0, so
2059 	 * the RX & TX queues will not be setup.
2060 	 */
2061 	(void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2062 
2063 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2064 
2065 	/* If dev_info.vmdq_pool_base is greater than 0,
2066 	 * the queue id of vmdq pools is started after pf queues.
2067 	 */
2068 	if (dcb_mode == DCB_VT_ENABLED &&
2069 	    rte_port->dev_info.vmdq_pool_base > 0) {
2070 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2071 			" for port %d.", pid);
2072 		return -1;
2073 	}
2074 
2075 	/* Assume the ports in testpmd have the same dcb capability
2076 	 * and has the same number of rxq and txq in dcb mode
2077 	 */
2078 	if (dcb_mode == DCB_VT_ENABLED) {
2079 		if (rte_port->dev_info.max_vfs > 0) {
2080 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2081 			nb_txq = rte_port->dev_info.nb_tx_queues;
2082 		} else {
2083 			nb_rxq = rte_port->dev_info.max_rx_queues;
2084 			nb_txq = rte_port->dev_info.max_tx_queues;
2085 		}
2086 	} else {
2087 		/*if vt is disabled, use all pf queues */
2088 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2089 			nb_rxq = rte_port->dev_info.max_rx_queues;
2090 			nb_txq = rte_port->dev_info.max_tx_queues;
2091 		} else {
2092 			nb_rxq = (queueid_t)num_tcs;
2093 			nb_txq = (queueid_t)num_tcs;
2094 
2095 		}
2096 	}
2097 	rx_free_thresh = 64;
2098 
2099 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2100 
2101 	rxtx_port_config(rte_port);
2102 	/* VLAN filter */
2103 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2104 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2105 		rx_vft_set(pid, vlan_tags[i], 1);
2106 
2107 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2108 	map_port_queue_stats_mapping_registers(pid, rte_port);
2109 
2110 	rte_port->dcb_flag = 1;
2111 
2112 	return 0;
2113 }
2114 
2115 static void
2116 init_port(void)
2117 {
2118 	/* Configuration of Ethernet ports. */
2119 	ports = rte_zmalloc("testpmd: ports",
2120 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2121 			    RTE_CACHE_LINE_SIZE);
2122 	if (ports == NULL) {
2123 		rte_exit(EXIT_FAILURE,
2124 				"rte_zmalloc(%d struct rte_port) failed\n",
2125 				RTE_MAX_ETHPORTS);
2126 	}
2127 }
2128 
2129 static void
2130 force_quit(void)
2131 {
2132 	pmd_test_exit();
2133 	prompt_exit();
2134 }
2135 
2136 static void
2137 signal_handler(int signum)
2138 {
2139 	if (signum == SIGINT || signum == SIGTERM) {
2140 		printf("\nSignal %d received, preparing to exit...\n",
2141 				signum);
2142 #ifdef RTE_LIBRTE_PDUMP
2143 		/* uninitialize packet capture framework */
2144 		rte_pdump_uninit();
2145 #endif
2146 #ifdef RTE_LIBRTE_LATENCY_STATS
2147 		rte_latencystats_uninit();
2148 #endif
2149 		force_quit();
2150 		/* exit with the expected status */
2151 		signal(signum, SIG_DFL);
2152 		kill(getpid(), signum);
2153 	}
2154 }
2155 
2156 int
2157 main(int argc, char** argv)
2158 {
2159 	int  diag;
2160 	uint8_t port_id;
2161 
2162 	signal(SIGINT, signal_handler);
2163 	signal(SIGTERM, signal_handler);
2164 
2165 	diag = rte_eal_init(argc, argv);
2166 	if (diag < 0)
2167 		rte_panic("Cannot init EAL\n");
2168 
2169 #ifdef RTE_LIBRTE_PDUMP
2170 	/* initialize packet capture framework */
2171 	rte_pdump_init(NULL);
2172 #endif
2173 
2174 	nb_ports = (portid_t) rte_eth_dev_count();
2175 	if (nb_ports == 0)
2176 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2177 
2178 	/* allocate port structures, and init them */
2179 	init_port();
2180 
2181 	set_def_fwd_config();
2182 	if (nb_lcores == 0)
2183 		rte_panic("Empty set of forwarding logical cores - check the "
2184 			  "core mask supplied in the command parameters\n");
2185 
2186 	argc -= diag;
2187 	argv += diag;
2188 	if (argc > 1)
2189 		launch_args_parse(argc, argv);
2190 
2191 	if (!nb_rxq && !nb_txq)
2192 		printf("Warning: Either rx or tx queues should be non-zero\n");
2193 
2194 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2195 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2196 		       "but nb_txq=%d will prevent to fully test it.\n",
2197 		       nb_rxq, nb_txq);
2198 
2199 	init_config();
2200 	if (start_port(RTE_PORT_ALL) != 0)
2201 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2202 
2203 	/* set all ports to promiscuous mode by default */
2204 	RTE_ETH_FOREACH_DEV(port_id)
2205 		rte_eth_promiscuous_enable(port_id);
2206 
2207 	/* Init metrics library */
2208 	rte_metrics_init(rte_socket_id());
2209 
2210 #ifdef RTE_LIBRTE_LATENCY_STATS
2211 	if (latencystats_enabled != 0) {
2212 		int ret = rte_latencystats_init(1, NULL);
2213 		if (ret)
2214 			printf("Warning: latencystats init()"
2215 				" returned error %d\n",	ret);
2216 		printf("Latencystats running on lcore %d\n",
2217 			latencystats_lcore_id);
2218 	}
2219 #endif
2220 
2221 	/* Setup bitrate stats */
2222 #ifdef RTE_LIBRTE_BITRATE
2223 	bitrate_data = rte_stats_bitrate_create();
2224 	if (bitrate_data == NULL)
2225 		rte_exit(EXIT_FAILURE, "Could not allocate bitrate data.\n");
2226 	rte_stats_bitrate_reg(bitrate_data);
2227 #endif
2228 
2229 
2230 #ifdef RTE_LIBRTE_CMDLINE
2231 	if (interactive == 1) {
2232 		if (auto_start) {
2233 			printf("Start automatic packet forwarding\n");
2234 			start_packet_forwarding(0);
2235 		}
2236 		prompt();
2237 	} else
2238 #endif
2239 	{
2240 		char c;
2241 		int rc;
2242 
2243 		printf("No commandline core given, start packet forwarding\n");
2244 		start_packet_forwarding(0);
2245 		printf("Press enter to exit\n");
2246 		rc = read(0, &c, 1);
2247 		pmd_test_exit();
2248 		if (rc < 0)
2249 			return 1;
2250 	}
2251 
2252 	return 0;
2253 }
2254