xref: /dpdk/app/test-pmd/testpmd.c (revision 62d3216d61945ec6a97a67b5e7292fd108f38087)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
68 #include <rte_mbuf.h>
69 #include <rte_interrupts.h>
70 #include <rte_pci.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
73 #include <rte_dev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78 #ifdef RTE_LIBRTE_PDUMP
79 #include <rte_pdump.h>
80 #endif
81 #include <rte_flow.h>
82 #include <rte_metrics.h>
83 #ifdef RTE_LIBRTE_BITRATE
84 #include <rte_bitrate.h>
85 #endif
86 #include <rte_metrics.h>
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
89 #endif
90 
91 #include "testpmd.h"
92 
93 uint16_t verbose_level = 0; /**< Silent by default. */
94 
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
98 
99 /*
100  * NUMA support configuration.
101  * When set, the NUMA support attempts to dispatch the allocation of the
102  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
103  * probed ports among the CPU sockets 0 and 1.
104  * Otherwise, all memory is allocated from CPU socket 0.
105  */
106 uint8_t numa_support = 0; /**< No numa support by default */
107 
108 /*
109  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
110  * not configured.
111  */
112 uint8_t socket_num = UMA_NO_CONFIG;
113 
114 /*
115  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
116  */
117 uint8_t mp_anon = 0;
118 
119 /*
120  * Record the Ethernet address of peer target ports to which packets are
121  * forwarded.
122  * Must be instantiated with the ethernet addresses of peer traffic generator
123  * ports.
124  */
125 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
126 portid_t nb_peer_eth_addrs = 0;
127 
128 /*
129  * Probed Target Environment.
130  */
131 struct rte_port *ports;	       /**< For all probed ethernet ports. */
132 portid_t nb_ports;             /**< Number of probed ethernet ports. */
133 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
134 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
135 
136 /*
137  * Test Forwarding Configuration.
138  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
139  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
140  */
141 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
142 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
143 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
144 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
145 
146 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
147 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
148 
149 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
150 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
151 
152 /*
153  * Forwarding engines.
154  */
155 struct fwd_engine * fwd_engines[] = {
156 	&io_fwd_engine,
157 	&mac_fwd_engine,
158 	&mac_swap_engine,
159 	&flow_gen_engine,
160 	&rx_only_engine,
161 	&tx_only_engine,
162 	&csum_fwd_engine,
163 	&icmp_echo_engine,
164 #ifdef RTE_LIBRTE_IEEE1588
165 	&ieee1588_fwd_engine,
166 #endif
167 	NULL,
168 };
169 
170 struct fwd_config cur_fwd_config;
171 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
172 uint32_t retry_enabled;
173 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
174 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
175 
176 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
177 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
178                                       * specified on command-line. */
179 
180 /*
181  * Configuration of packet segments used by the "txonly" processing engine.
182  */
183 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
184 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
185 	TXONLY_DEF_PACKET_LEN,
186 };
187 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
188 
189 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
190 /**< Split policy for packets to TX. */
191 
192 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
193 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
194 
195 /* current configuration is in DCB or not,0 means it is not in DCB mode */
196 uint8_t dcb_config = 0;
197 
198 /* Whether the dcb is in testing status */
199 uint8_t dcb_test = 0;
200 
201 /*
202  * Configurable number of RX/TX queues.
203  */
204 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
205 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
206 
207 /*
208  * Configurable number of RX/TX ring descriptors.
209  */
210 #define RTE_TEST_RX_DESC_DEFAULT 128
211 #define RTE_TEST_TX_DESC_DEFAULT 512
212 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
213 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
214 
215 #define RTE_PMD_PARAM_UNSET -1
216 /*
217  * Configurable values of RX and TX ring threshold registers.
218  */
219 
220 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
221 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
222 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
223 
224 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
225 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
226 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
227 
228 /*
229  * Configurable value of RX free threshold.
230  */
231 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
232 
233 /*
234  * Configurable value of RX drop enable.
235  */
236 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
237 
238 /*
239  * Configurable value of TX free threshold.
240  */
241 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
242 
243 /*
244  * Configurable value of TX RS bit threshold.
245  */
246 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
247 
248 /*
249  * Configurable value of TX queue flags.
250  */
251 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
252 
253 /*
254  * Receive Side Scaling (RSS) configuration.
255  */
256 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
257 
258 /*
259  * Port topology configuration
260  */
261 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
262 
263 /*
264  * Avoids to flush all the RX streams before starts forwarding.
265  */
266 uint8_t no_flush_rx = 0; /* flush by default */
267 
268 /*
269  * Avoids to check link status when starting/stopping a port.
270  */
271 uint8_t no_link_check = 0; /* check by default */
272 
273 /*
274  * NIC bypass mode configuration options.
275  */
276 #ifdef RTE_NIC_BYPASS
277 
278 /* The NIC bypass watchdog timeout. */
279 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
280 
281 #endif
282 
283 #ifdef RTE_LIBRTE_LATENCY_STATS
284 
285 /*
286  * Set when latency stats is enabled in the commandline
287  */
288 uint8_t latencystats_enabled;
289 
290 /*
291  * Lcore ID to serive latency statistics.
292  */
293 lcoreid_t latencystats_lcore_id = -1;
294 
295 #endif
296 
297 /*
298  * Ethernet device configuration.
299  */
300 struct rte_eth_rxmode rx_mode = {
301 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
302 	.split_hdr_size = 0,
303 	.header_split   = 0, /**< Header Split disabled. */
304 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
305 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
306 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
307 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
308 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
309 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
310 };
311 
312 struct rte_fdir_conf fdir_conf = {
313 	.mode = RTE_FDIR_MODE_NONE,
314 	.pballoc = RTE_FDIR_PBALLOC_64K,
315 	.status = RTE_FDIR_REPORT_STATUS,
316 	.mask = {
317 		.vlan_tci_mask = 0x0,
318 		.ipv4_mask     = {
319 			.src_ip = 0xFFFFFFFF,
320 			.dst_ip = 0xFFFFFFFF,
321 		},
322 		.ipv6_mask     = {
323 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
324 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
325 		},
326 		.src_port_mask = 0xFFFF,
327 		.dst_port_mask = 0xFFFF,
328 		.mac_addr_byte_mask = 0xFF,
329 		.tunnel_type_mask = 1,
330 		.tunnel_id_mask = 0xFFFFFFFF,
331 	},
332 	.drop_queue = 127,
333 };
334 
335 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
336 
337 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
338 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
339 
340 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
341 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
342 
343 uint16_t nb_tx_queue_stats_mappings = 0;
344 uint16_t nb_rx_queue_stats_mappings = 0;
345 
346 unsigned max_socket = 0;
347 
348 /* Bitrate statistics */
349 struct rte_stats_bitrates *bitrate_data;
350 
351 /* Forward function declarations */
352 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
353 static void check_all_ports_link_status(uint32_t port_mask);
354 
355 /*
356  * Check if all the ports are started.
357  * If yes, return positive value. If not, return zero.
358  */
359 static int all_ports_started(void);
360 
361 /*
362  * Find next enabled port
363  */
364 portid_t
365 find_next_port(portid_t p, struct rte_port *ports, int size)
366 {
367 	if (ports == NULL)
368 		rte_exit(-EINVAL, "failed to find a next port id\n");
369 
370 	while ((p < size) && (ports[p].enabled == 0))
371 		p++;
372 	return p;
373 }
374 
375 /*
376  * Setup default configuration.
377  */
378 static void
379 set_default_fwd_lcores_config(void)
380 {
381 	unsigned int i;
382 	unsigned int nb_lc;
383 	unsigned int sock_num;
384 
385 	nb_lc = 0;
386 	for (i = 0; i < RTE_MAX_LCORE; i++) {
387 		sock_num = rte_lcore_to_socket_id(i) + 1;
388 		if (sock_num > max_socket) {
389 			if (sock_num > RTE_MAX_NUMA_NODES)
390 				rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
391 			max_socket = sock_num;
392 		}
393 		if (!rte_lcore_is_enabled(i))
394 			continue;
395 		if (i == rte_get_master_lcore())
396 			continue;
397 		fwd_lcores_cpuids[nb_lc++] = i;
398 	}
399 	nb_lcores = (lcoreid_t) nb_lc;
400 	nb_cfg_lcores = nb_lcores;
401 	nb_fwd_lcores = 1;
402 }
403 
404 static void
405 set_def_peer_eth_addrs(void)
406 {
407 	portid_t i;
408 
409 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
410 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
411 		peer_eth_addrs[i].addr_bytes[5] = i;
412 	}
413 }
414 
415 static void
416 set_default_fwd_ports_config(void)
417 {
418 	portid_t pt_id;
419 
420 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
421 		fwd_ports_ids[pt_id] = pt_id;
422 
423 	nb_cfg_ports = nb_ports;
424 	nb_fwd_ports = nb_ports;
425 }
426 
427 void
428 set_def_fwd_config(void)
429 {
430 	set_default_fwd_lcores_config();
431 	set_def_peer_eth_addrs();
432 	set_default_fwd_ports_config();
433 }
434 
435 /*
436  * Configuration initialisation done once at init time.
437  */
438 static void
439 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
440 		 unsigned int socket_id)
441 {
442 	char pool_name[RTE_MEMPOOL_NAMESIZE];
443 	struct rte_mempool *rte_mp = NULL;
444 	uint32_t mb_size;
445 
446 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
447 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
448 
449 	RTE_LOG(INFO, USER1,
450 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
451 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
452 
453 #ifdef RTE_LIBRTE_PMD_XENVIRT
454 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
455 		(unsigned) mb_mempool_cache,
456 		sizeof(struct rte_pktmbuf_pool_private),
457 		rte_pktmbuf_pool_init, NULL,
458 		rte_pktmbuf_init, NULL,
459 		socket_id, 0);
460 #endif
461 
462 	/* if the former XEN allocation failed fall back to normal allocation */
463 	if (rte_mp == NULL) {
464 		if (mp_anon != 0) {
465 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
466 				mb_size, (unsigned) mb_mempool_cache,
467 				sizeof(struct rte_pktmbuf_pool_private),
468 				socket_id, 0);
469 			if (rte_mp == NULL)
470 				goto err;
471 
472 			if (rte_mempool_populate_anon(rte_mp) == 0) {
473 				rte_mempool_free(rte_mp);
474 				rte_mp = NULL;
475 				goto err;
476 			}
477 			rte_pktmbuf_pool_init(rte_mp, NULL);
478 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
479 		} else {
480 			/* wrapper to rte_mempool_create() */
481 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
482 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
483 		}
484 	}
485 
486 err:
487 	if (rte_mp == NULL) {
488 		rte_exit(EXIT_FAILURE,
489 			"Creation of mbuf pool for socket %u failed: %s\n",
490 			socket_id, rte_strerror(rte_errno));
491 	} else if (verbose_level > 0) {
492 		rte_mempool_dump(stdout, rte_mp);
493 	}
494 }
495 
496 /*
497  * Check given socket id is valid or not with NUMA mode,
498  * if valid, return 0, else return -1
499  */
500 static int
501 check_socket_id(const unsigned int socket_id)
502 {
503 	static int warning_once = 0;
504 
505 	if (socket_id >= max_socket) {
506 		if (!warning_once && numa_support)
507 			printf("Warning: NUMA should be configured manually by"
508 			       " using --port-numa-config and"
509 			       " --ring-numa-config parameters along with"
510 			       " --numa.\n");
511 		warning_once = 1;
512 		return -1;
513 	}
514 	return 0;
515 }
516 
517 static void
518 init_config(void)
519 {
520 	portid_t pid;
521 	struct rte_port *port;
522 	struct rte_mempool *mbp;
523 	unsigned int nb_mbuf_per_pool;
524 	lcoreid_t  lc_id;
525 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
526 
527 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
528 	/* Configuration of logical cores. */
529 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
530 				sizeof(struct fwd_lcore *) * nb_lcores,
531 				RTE_CACHE_LINE_SIZE);
532 	if (fwd_lcores == NULL) {
533 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
534 							"failed\n", nb_lcores);
535 	}
536 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
537 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
538 					       sizeof(struct fwd_lcore),
539 					       RTE_CACHE_LINE_SIZE);
540 		if (fwd_lcores[lc_id] == NULL) {
541 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
542 								"failed\n");
543 		}
544 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
545 	}
546 
547 	/*
548 	 * Create pools of mbuf.
549 	 * If NUMA support is disabled, create a single pool of mbuf in
550 	 * socket 0 memory by default.
551 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
552 	 *
553 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
554 	 * nb_txd can be configured at run time.
555 	 */
556 	if (param_total_num_mbufs)
557 		nb_mbuf_per_pool = param_total_num_mbufs;
558 	else {
559 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
560 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
561 
562 		if (!numa_support)
563 			nb_mbuf_per_pool =
564 				(nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
565 	}
566 
567 	if (!numa_support) {
568 		if (socket_num == UMA_NO_CONFIG)
569 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
570 		else
571 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
572 						 socket_num);
573 	}
574 
575 	FOREACH_PORT(pid, ports) {
576 		port = &ports[pid];
577 		rte_eth_dev_info_get(pid, &port->dev_info);
578 
579 		if (numa_support) {
580 			if (port_numa[pid] != NUMA_NO_CONFIG)
581 				port_per_socket[port_numa[pid]]++;
582 			else {
583 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
584 
585 				/* if socket_id is invalid, set to 0 */
586 				if (check_socket_id(socket_id) < 0)
587 					socket_id = 0;
588 				port_per_socket[socket_id]++;
589 			}
590 		}
591 
592 		/* set flag to initialize port/queue */
593 		port->need_reconfig = 1;
594 		port->need_reconfig_queues = 1;
595 	}
596 
597 	if (numa_support) {
598 		uint8_t i;
599 		unsigned int nb_mbuf;
600 
601 		if (param_total_num_mbufs)
602 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
603 
604 		for (i = 0; i < max_socket; i++) {
605 			nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
606 			if (nb_mbuf)
607 				mbuf_pool_create(mbuf_data_size,
608 						nb_mbuf,i);
609 		}
610 	}
611 	init_port_config();
612 
613 	/*
614 	 * Records which Mbuf pool to use by each logical core, if needed.
615 	 */
616 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
617 		mbp = mbuf_pool_find(
618 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
619 
620 		if (mbp == NULL)
621 			mbp = mbuf_pool_find(0);
622 		fwd_lcores[lc_id]->mbp = mbp;
623 	}
624 
625 	/* Configuration of packet forwarding streams. */
626 	if (init_fwd_streams() < 0)
627 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
628 
629 	fwd_config_setup();
630 }
631 
632 
633 void
634 reconfig(portid_t new_port_id, unsigned socket_id)
635 {
636 	struct rte_port *port;
637 
638 	/* Reconfiguration of Ethernet ports. */
639 	port = &ports[new_port_id];
640 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
641 
642 	/* set flag to initialize port/queue */
643 	port->need_reconfig = 1;
644 	port->need_reconfig_queues = 1;
645 	port->socket_id = socket_id;
646 
647 	init_port_config();
648 }
649 
650 
651 int
652 init_fwd_streams(void)
653 {
654 	portid_t pid;
655 	struct rte_port *port;
656 	streamid_t sm_id, nb_fwd_streams_new;
657 	queueid_t q;
658 
659 	/* set socket id according to numa or not */
660 	FOREACH_PORT(pid, ports) {
661 		port = &ports[pid];
662 		if (nb_rxq > port->dev_info.max_rx_queues) {
663 			printf("Fail: nb_rxq(%d) is greater than "
664 				"max_rx_queues(%d)\n", nb_rxq,
665 				port->dev_info.max_rx_queues);
666 			return -1;
667 		}
668 		if (nb_txq > port->dev_info.max_tx_queues) {
669 			printf("Fail: nb_txq(%d) is greater than "
670 				"max_tx_queues(%d)\n", nb_txq,
671 				port->dev_info.max_tx_queues);
672 			return -1;
673 		}
674 		if (numa_support) {
675 			if (port_numa[pid] != NUMA_NO_CONFIG)
676 				port->socket_id = port_numa[pid];
677 			else {
678 				port->socket_id = rte_eth_dev_socket_id(pid);
679 
680 				/* if socket_id is invalid, set to 0 */
681 				if (check_socket_id(port->socket_id) < 0)
682 					port->socket_id = 0;
683 			}
684 		}
685 		else {
686 			if (socket_num == UMA_NO_CONFIG)
687 				port->socket_id = 0;
688 			else
689 				port->socket_id = socket_num;
690 		}
691 	}
692 
693 	q = RTE_MAX(nb_rxq, nb_txq);
694 	if (q == 0) {
695 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
696 		return -1;
697 	}
698 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
699 	if (nb_fwd_streams_new == nb_fwd_streams)
700 		return 0;
701 	/* clear the old */
702 	if (fwd_streams != NULL) {
703 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
704 			if (fwd_streams[sm_id] == NULL)
705 				continue;
706 			rte_free(fwd_streams[sm_id]);
707 			fwd_streams[sm_id] = NULL;
708 		}
709 		rte_free(fwd_streams);
710 		fwd_streams = NULL;
711 	}
712 
713 	/* init new */
714 	nb_fwd_streams = nb_fwd_streams_new;
715 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
716 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
717 	if (fwd_streams == NULL)
718 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
719 						"failed\n", nb_fwd_streams);
720 
721 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
722 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
723 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
724 		if (fwd_streams[sm_id] == NULL)
725 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
726 								" failed\n");
727 	}
728 
729 	return 0;
730 }
731 
732 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
733 static void
734 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
735 {
736 	unsigned int total_burst;
737 	unsigned int nb_burst;
738 	unsigned int burst_stats[3];
739 	uint16_t pktnb_stats[3];
740 	uint16_t nb_pkt;
741 	int burst_percent[3];
742 
743 	/*
744 	 * First compute the total number of packet bursts and the
745 	 * two highest numbers of bursts of the same number of packets.
746 	 */
747 	total_burst = 0;
748 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
749 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
750 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
751 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
752 		if (nb_burst == 0)
753 			continue;
754 		total_burst += nb_burst;
755 		if (nb_burst > burst_stats[0]) {
756 			burst_stats[1] = burst_stats[0];
757 			pktnb_stats[1] = pktnb_stats[0];
758 			burst_stats[0] = nb_burst;
759 			pktnb_stats[0] = nb_pkt;
760 		}
761 	}
762 	if (total_burst == 0)
763 		return;
764 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
765 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
766 	       burst_percent[0], (int) pktnb_stats[0]);
767 	if (burst_stats[0] == total_burst) {
768 		printf("]\n");
769 		return;
770 	}
771 	if (burst_stats[0] + burst_stats[1] == total_burst) {
772 		printf(" + %d%% of %d pkts]\n",
773 		       100 - burst_percent[0], pktnb_stats[1]);
774 		return;
775 	}
776 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
777 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
778 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
779 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
780 		return;
781 	}
782 	printf(" + %d%% of %d pkts + %d%% of others]\n",
783 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
784 }
785 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
786 
787 static void
788 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
789 {
790 	struct rte_port *port;
791 	uint8_t i;
792 
793 	static const char *fwd_stats_border = "----------------------";
794 
795 	port = &ports[port_id];
796 	printf("\n  %s Forward statistics for port %-2d %s\n",
797 	       fwd_stats_border, port_id, fwd_stats_border);
798 
799 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
800 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
801 		       "%-"PRIu64"\n",
802 		       stats->ipackets, stats->imissed,
803 		       (uint64_t) (stats->ipackets + stats->imissed));
804 
805 		if (cur_fwd_eng == &csum_fwd_engine)
806 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
807 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
808 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
809 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
810 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
811 		}
812 
813 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
814 		       "%-"PRIu64"\n",
815 		       stats->opackets, port->tx_dropped,
816 		       (uint64_t) (stats->opackets + port->tx_dropped));
817 	}
818 	else {
819 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
820 		       "%14"PRIu64"\n",
821 		       stats->ipackets, stats->imissed,
822 		       (uint64_t) (stats->ipackets + stats->imissed));
823 
824 		if (cur_fwd_eng == &csum_fwd_engine)
825 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
826 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
827 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
828 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
829 			printf("  RX-nombufs:             %14"PRIu64"\n",
830 			       stats->rx_nombuf);
831 		}
832 
833 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
834 		       "%14"PRIu64"\n",
835 		       stats->opackets, port->tx_dropped,
836 		       (uint64_t) (stats->opackets + port->tx_dropped));
837 	}
838 
839 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
840 	if (port->rx_stream)
841 		pkt_burst_stats_display("RX",
842 			&port->rx_stream->rx_burst_stats);
843 	if (port->tx_stream)
844 		pkt_burst_stats_display("TX",
845 			&port->tx_stream->tx_burst_stats);
846 #endif
847 
848 	if (port->rx_queue_stats_mapping_enabled) {
849 		printf("\n");
850 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
851 			printf("  Stats reg %2d RX-packets:%14"PRIu64
852 			       "     RX-errors:%14"PRIu64
853 			       "    RX-bytes:%14"PRIu64"\n",
854 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
855 		}
856 		printf("\n");
857 	}
858 	if (port->tx_queue_stats_mapping_enabled) {
859 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
860 			printf("  Stats reg %2d TX-packets:%14"PRIu64
861 			       "                                 TX-bytes:%14"PRIu64"\n",
862 			       i, stats->q_opackets[i], stats->q_obytes[i]);
863 		}
864 	}
865 
866 	printf("  %s--------------------------------%s\n",
867 	       fwd_stats_border, fwd_stats_border);
868 }
869 
870 static void
871 fwd_stream_stats_display(streamid_t stream_id)
872 {
873 	struct fwd_stream *fs;
874 	static const char *fwd_top_stats_border = "-------";
875 
876 	fs = fwd_streams[stream_id];
877 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
878 	    (fs->fwd_dropped == 0))
879 		return;
880 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
881 	       "TX Port=%2d/Queue=%2d %s\n",
882 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
883 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
884 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
885 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
886 
887 	/* if checksum mode */
888 	if (cur_fwd_eng == &csum_fwd_engine) {
889 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
890 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
891 	}
892 
893 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
894 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
895 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
896 #endif
897 }
898 
899 static void
900 flush_fwd_rx_queues(void)
901 {
902 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
903 	portid_t  rxp;
904 	portid_t port_id;
905 	queueid_t rxq;
906 	uint16_t  nb_rx;
907 	uint16_t  i;
908 	uint8_t   j;
909 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
910 	uint64_t timer_period;
911 
912 	/* convert to number of cycles */
913 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
914 
915 	for (j = 0; j < 2; j++) {
916 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
917 			for (rxq = 0; rxq < nb_rxq; rxq++) {
918 				port_id = fwd_ports_ids[rxp];
919 				/**
920 				* testpmd can stuck in the below do while loop
921 				* if rte_eth_rx_burst() always returns nonzero
922 				* packets. So timer is added to exit this loop
923 				* after 1sec timer expiry.
924 				*/
925 				prev_tsc = rte_rdtsc();
926 				do {
927 					nb_rx = rte_eth_rx_burst(port_id, rxq,
928 						pkts_burst, MAX_PKT_BURST);
929 					for (i = 0; i < nb_rx; i++)
930 						rte_pktmbuf_free(pkts_burst[i]);
931 
932 					cur_tsc = rte_rdtsc();
933 					diff_tsc = cur_tsc - prev_tsc;
934 					timer_tsc += diff_tsc;
935 				} while ((nb_rx > 0) &&
936 					(timer_tsc < timer_period));
937 				timer_tsc = 0;
938 			}
939 		}
940 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
941 	}
942 }
943 
944 static void
945 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
946 {
947 	struct fwd_stream **fsm;
948 	streamid_t nb_fs;
949 	streamid_t sm_id;
950 #ifdef RTE_LIBRTE_BITRATE
951 	uint64_t tics_per_1sec;
952 	uint64_t tics_datum;
953 	uint64_t tics_current;
954 	uint8_t idx_port, cnt_ports;
955 
956 	cnt_ports = rte_eth_dev_count();
957 	tics_datum = rte_rdtsc();
958 	tics_per_1sec = rte_get_timer_hz();
959 #endif
960 	fsm = &fwd_streams[fc->stream_idx];
961 	nb_fs = fc->stream_nb;
962 	do {
963 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
964 			(*pkt_fwd)(fsm[sm_id]);
965 #ifdef RTE_LIBRTE_BITRATE
966 		tics_current = rte_rdtsc();
967 		if (tics_current - tics_datum >= tics_per_1sec) {
968 			/* Periodic bitrate calculation */
969 			for (idx_port = 0; idx_port < cnt_ports; idx_port++)
970 				rte_stats_bitrate_calc(bitrate_data, idx_port);
971 			tics_datum = tics_current;
972 		}
973 #endif
974 #ifdef RTE_LIBRTE_LATENCY_STATS
975 		if (latencystats_lcore_id == rte_lcore_id())
976 			rte_latencystats_update();
977 #endif
978 
979 	} while (! fc->stopped);
980 }
981 
982 static int
983 start_pkt_forward_on_core(void *fwd_arg)
984 {
985 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
986 			     cur_fwd_config.fwd_eng->packet_fwd);
987 	return 0;
988 }
989 
990 /*
991  * Run the TXONLY packet forwarding engine to send a single burst of packets.
992  * Used to start communication flows in network loopback test configurations.
993  */
994 static int
995 run_one_txonly_burst_on_core(void *fwd_arg)
996 {
997 	struct fwd_lcore *fwd_lc;
998 	struct fwd_lcore tmp_lcore;
999 
1000 	fwd_lc = (struct fwd_lcore *) fwd_arg;
1001 	tmp_lcore = *fwd_lc;
1002 	tmp_lcore.stopped = 1;
1003 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1004 	return 0;
1005 }
1006 
1007 /*
1008  * Launch packet forwarding:
1009  *     - Setup per-port forwarding context.
1010  *     - launch logical cores with their forwarding configuration.
1011  */
1012 static void
1013 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1014 {
1015 	port_fwd_begin_t port_fwd_begin;
1016 	unsigned int i;
1017 	unsigned int lc_id;
1018 	int diag;
1019 
1020 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1021 	if (port_fwd_begin != NULL) {
1022 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1023 			(*port_fwd_begin)(fwd_ports_ids[i]);
1024 	}
1025 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1026 		lc_id = fwd_lcores_cpuids[i];
1027 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1028 			fwd_lcores[i]->stopped = 0;
1029 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1030 						     fwd_lcores[i], lc_id);
1031 			if (diag != 0)
1032 				printf("launch lcore %u failed - diag=%d\n",
1033 				       lc_id, diag);
1034 		}
1035 	}
1036 }
1037 
1038 /*
1039  * Launch packet forwarding configuration.
1040  */
1041 void
1042 start_packet_forwarding(int with_tx_first)
1043 {
1044 	port_fwd_begin_t port_fwd_begin;
1045 	port_fwd_end_t  port_fwd_end;
1046 	struct rte_port *port;
1047 	unsigned int i;
1048 	portid_t   pt_id;
1049 	streamid_t sm_id;
1050 
1051 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1052 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1053 
1054 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1055 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1056 
1057 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1058 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1059 		(!nb_rxq || !nb_txq))
1060 		rte_exit(EXIT_FAILURE,
1061 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1062 			cur_fwd_eng->fwd_mode_name);
1063 
1064 	if (all_ports_started() == 0) {
1065 		printf("Not all ports were started\n");
1066 		return;
1067 	}
1068 	if (test_done == 0) {
1069 		printf("Packet forwarding already started\n");
1070 		return;
1071 	}
1072 
1073 	if (init_fwd_streams() < 0) {
1074 		printf("Fail from init_fwd_streams()\n");
1075 		return;
1076 	}
1077 
1078 	if(dcb_test) {
1079 		for (i = 0; i < nb_fwd_ports; i++) {
1080 			pt_id = fwd_ports_ids[i];
1081 			port = &ports[pt_id];
1082 			if (!port->dcb_flag) {
1083 				printf("In DCB mode, all forwarding ports must "
1084                                        "be configured in this mode.\n");
1085 				return;
1086 			}
1087 		}
1088 		if (nb_fwd_lcores == 1) {
1089 			printf("In DCB mode,the nb forwarding cores "
1090                                "should be larger than 1.\n");
1091 			return;
1092 		}
1093 	}
1094 	test_done = 0;
1095 
1096 	if(!no_flush_rx)
1097 		flush_fwd_rx_queues();
1098 
1099 	fwd_config_setup();
1100 	pkt_fwd_config_display(&cur_fwd_config);
1101 	rxtx_config_display();
1102 
1103 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1104 		pt_id = fwd_ports_ids[i];
1105 		port = &ports[pt_id];
1106 		rte_eth_stats_get(pt_id, &port->stats);
1107 		port->tx_dropped = 0;
1108 
1109 		map_port_queue_stats_mapping_registers(pt_id, port);
1110 	}
1111 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1112 		fwd_streams[sm_id]->rx_packets = 0;
1113 		fwd_streams[sm_id]->tx_packets = 0;
1114 		fwd_streams[sm_id]->fwd_dropped = 0;
1115 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1116 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1117 
1118 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1119 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1120 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1121 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1122 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1123 #endif
1124 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1125 		fwd_streams[sm_id]->core_cycles = 0;
1126 #endif
1127 	}
1128 	if (with_tx_first) {
1129 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1130 		if (port_fwd_begin != NULL) {
1131 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1132 				(*port_fwd_begin)(fwd_ports_ids[i]);
1133 		}
1134 		while (with_tx_first--) {
1135 			launch_packet_forwarding(
1136 					run_one_txonly_burst_on_core);
1137 			rte_eal_mp_wait_lcore();
1138 		}
1139 		port_fwd_end = tx_only_engine.port_fwd_end;
1140 		if (port_fwd_end != NULL) {
1141 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1142 				(*port_fwd_end)(fwd_ports_ids[i]);
1143 		}
1144 	}
1145 	launch_packet_forwarding(start_pkt_forward_on_core);
1146 }
1147 
1148 void
1149 stop_packet_forwarding(void)
1150 {
1151 	struct rte_eth_stats stats;
1152 	struct rte_port *port;
1153 	port_fwd_end_t  port_fwd_end;
1154 	int i;
1155 	portid_t   pt_id;
1156 	streamid_t sm_id;
1157 	lcoreid_t  lc_id;
1158 	uint64_t total_recv;
1159 	uint64_t total_xmit;
1160 	uint64_t total_rx_dropped;
1161 	uint64_t total_tx_dropped;
1162 	uint64_t total_rx_nombuf;
1163 	uint64_t tx_dropped;
1164 	uint64_t rx_bad_ip_csum;
1165 	uint64_t rx_bad_l4_csum;
1166 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1167 	uint64_t fwd_cycles;
1168 #endif
1169 	static const char *acc_stats_border = "+++++++++++++++";
1170 
1171 	if (test_done) {
1172 		printf("Packet forwarding not started\n");
1173 		return;
1174 	}
1175 	printf("Telling cores to stop...");
1176 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1177 		fwd_lcores[lc_id]->stopped = 1;
1178 	printf("\nWaiting for lcores to finish...\n");
1179 	rte_eal_mp_wait_lcore();
1180 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1181 	if (port_fwd_end != NULL) {
1182 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1183 			pt_id = fwd_ports_ids[i];
1184 			(*port_fwd_end)(pt_id);
1185 		}
1186 	}
1187 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1188 	fwd_cycles = 0;
1189 #endif
1190 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1191 		if (cur_fwd_config.nb_fwd_streams >
1192 		    cur_fwd_config.nb_fwd_ports) {
1193 			fwd_stream_stats_display(sm_id);
1194 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1195 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1196 		} else {
1197 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1198 				fwd_streams[sm_id];
1199 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1200 				fwd_streams[sm_id];
1201 		}
1202 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1203 		tx_dropped = (uint64_t) (tx_dropped +
1204 					 fwd_streams[sm_id]->fwd_dropped);
1205 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1206 
1207 		rx_bad_ip_csum =
1208 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1209 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1210 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1211 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1212 							rx_bad_ip_csum;
1213 
1214 		rx_bad_l4_csum =
1215 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1216 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1217 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1218 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1219 							rx_bad_l4_csum;
1220 
1221 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1222 		fwd_cycles = (uint64_t) (fwd_cycles +
1223 					 fwd_streams[sm_id]->core_cycles);
1224 #endif
1225 	}
1226 	total_recv = 0;
1227 	total_xmit = 0;
1228 	total_rx_dropped = 0;
1229 	total_tx_dropped = 0;
1230 	total_rx_nombuf  = 0;
1231 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1232 		pt_id = fwd_ports_ids[i];
1233 
1234 		port = &ports[pt_id];
1235 		rte_eth_stats_get(pt_id, &stats);
1236 		stats.ipackets -= port->stats.ipackets;
1237 		port->stats.ipackets = 0;
1238 		stats.opackets -= port->stats.opackets;
1239 		port->stats.opackets = 0;
1240 		stats.ibytes   -= port->stats.ibytes;
1241 		port->stats.ibytes = 0;
1242 		stats.obytes   -= port->stats.obytes;
1243 		port->stats.obytes = 0;
1244 		stats.imissed  -= port->stats.imissed;
1245 		port->stats.imissed = 0;
1246 		stats.oerrors  -= port->stats.oerrors;
1247 		port->stats.oerrors = 0;
1248 		stats.rx_nombuf -= port->stats.rx_nombuf;
1249 		port->stats.rx_nombuf = 0;
1250 
1251 		total_recv += stats.ipackets;
1252 		total_xmit += stats.opackets;
1253 		total_rx_dropped += stats.imissed;
1254 		total_tx_dropped += port->tx_dropped;
1255 		total_rx_nombuf  += stats.rx_nombuf;
1256 
1257 		fwd_port_stats_display(pt_id, &stats);
1258 	}
1259 	printf("\n  %s Accumulated forward statistics for all ports"
1260 	       "%s\n",
1261 	       acc_stats_border, acc_stats_border);
1262 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1263 	       "%-"PRIu64"\n"
1264 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1265 	       "%-"PRIu64"\n",
1266 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1267 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1268 	if (total_rx_nombuf > 0)
1269 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1270 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1271 	       "%s\n",
1272 	       acc_stats_border, acc_stats_border);
1273 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1274 	if (total_recv > 0)
1275 		printf("\n  CPU cycles/packet=%u (total cycles="
1276 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1277 		       (unsigned int)(fwd_cycles / total_recv),
1278 		       fwd_cycles, total_recv);
1279 #endif
1280 	printf("\nDone.\n");
1281 	test_done = 1;
1282 }
1283 
1284 void
1285 dev_set_link_up(portid_t pid)
1286 {
1287 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1288 		printf("\nSet link up fail.\n");
1289 }
1290 
1291 void
1292 dev_set_link_down(portid_t pid)
1293 {
1294 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1295 		printf("\nSet link down fail.\n");
1296 }
1297 
1298 static int
1299 all_ports_started(void)
1300 {
1301 	portid_t pi;
1302 	struct rte_port *port;
1303 
1304 	FOREACH_PORT(pi, ports) {
1305 		port = &ports[pi];
1306 		/* Check if there is a port which is not started */
1307 		if ((port->port_status != RTE_PORT_STARTED) &&
1308 			(port->slave_flag == 0))
1309 			return 0;
1310 	}
1311 
1312 	/* No port is not started */
1313 	return 1;
1314 }
1315 
1316 int
1317 all_ports_stopped(void)
1318 {
1319 	portid_t pi;
1320 	struct rte_port *port;
1321 
1322 	FOREACH_PORT(pi, ports) {
1323 		port = &ports[pi];
1324 		if ((port->port_status != RTE_PORT_STOPPED) &&
1325 			(port->slave_flag == 0))
1326 			return 0;
1327 	}
1328 
1329 	return 1;
1330 }
1331 
1332 int
1333 port_is_started(portid_t port_id)
1334 {
1335 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1336 		return 0;
1337 
1338 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1339 		return 0;
1340 
1341 	return 1;
1342 }
1343 
1344 static int
1345 port_is_closed(portid_t port_id)
1346 {
1347 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1348 		return 0;
1349 
1350 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1351 		return 0;
1352 
1353 	return 1;
1354 }
1355 
1356 int
1357 start_port(portid_t pid)
1358 {
1359 	int diag, need_check_link_status = -1;
1360 	portid_t pi;
1361 	queueid_t qi;
1362 	struct rte_port *port;
1363 	struct ether_addr mac_addr;
1364 
1365 	if (port_id_is_invalid(pid, ENABLED_WARN))
1366 		return 0;
1367 
1368 	if(dcb_config)
1369 		dcb_test = 1;
1370 	FOREACH_PORT(pi, ports) {
1371 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1372 			continue;
1373 
1374 		need_check_link_status = 0;
1375 		port = &ports[pi];
1376 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1377 						 RTE_PORT_HANDLING) == 0) {
1378 			printf("Port %d is now not stopped\n", pi);
1379 			continue;
1380 		}
1381 
1382 		if (port->need_reconfig > 0) {
1383 			port->need_reconfig = 0;
1384 
1385 			printf("Configuring Port %d (socket %u)\n", pi,
1386 					port->socket_id);
1387 			/* configure port */
1388 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1389 						&(port->dev_conf));
1390 			if (diag != 0) {
1391 				if (rte_atomic16_cmpset(&(port->port_status),
1392 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1393 					printf("Port %d can not be set back "
1394 							"to stopped\n", pi);
1395 				printf("Fail to configure port %d\n", pi);
1396 				/* try to reconfigure port next time */
1397 				port->need_reconfig = 1;
1398 				return -1;
1399 			}
1400 		}
1401 		if (port->need_reconfig_queues > 0) {
1402 			port->need_reconfig_queues = 0;
1403 			/* setup tx queues */
1404 			for (qi = 0; qi < nb_txq; qi++) {
1405 				if ((numa_support) &&
1406 					(txring_numa[pi] != NUMA_NO_CONFIG))
1407 					diag = rte_eth_tx_queue_setup(pi, qi,
1408 						nb_txd,txring_numa[pi],
1409 						&(port->tx_conf));
1410 				else
1411 					diag = rte_eth_tx_queue_setup(pi, qi,
1412 						nb_txd,port->socket_id,
1413 						&(port->tx_conf));
1414 
1415 				if (diag == 0)
1416 					continue;
1417 
1418 				/* Fail to setup tx queue, return */
1419 				if (rte_atomic16_cmpset(&(port->port_status),
1420 							RTE_PORT_HANDLING,
1421 							RTE_PORT_STOPPED) == 0)
1422 					printf("Port %d can not be set back "
1423 							"to stopped\n", pi);
1424 				printf("Fail to configure port %d tx queues\n", pi);
1425 				/* try to reconfigure queues next time */
1426 				port->need_reconfig_queues = 1;
1427 				return -1;
1428 			}
1429 			/* setup rx queues */
1430 			for (qi = 0; qi < nb_rxq; qi++) {
1431 				if ((numa_support) &&
1432 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1433 					struct rte_mempool * mp =
1434 						mbuf_pool_find(rxring_numa[pi]);
1435 					if (mp == NULL) {
1436 						printf("Failed to setup RX queue:"
1437 							"No mempool allocation"
1438 							" on the socket %d\n",
1439 							rxring_numa[pi]);
1440 						return -1;
1441 					}
1442 
1443 					diag = rte_eth_rx_queue_setup(pi, qi,
1444 					     nb_rxd,rxring_numa[pi],
1445 					     &(port->rx_conf),mp);
1446 				} else {
1447 					struct rte_mempool *mp =
1448 						mbuf_pool_find(port->socket_id);
1449 					if (mp == NULL) {
1450 						printf("Failed to setup RX queue:"
1451 							"No mempool allocation"
1452 							" on the socket %d\n",
1453 							port->socket_id);
1454 						return -1;
1455 					}
1456 					diag = rte_eth_rx_queue_setup(pi, qi,
1457 					     nb_rxd,port->socket_id,
1458 					     &(port->rx_conf), mp);
1459 				}
1460 				if (diag == 0)
1461 					continue;
1462 
1463 				/* Fail to setup rx queue, return */
1464 				if (rte_atomic16_cmpset(&(port->port_status),
1465 							RTE_PORT_HANDLING,
1466 							RTE_PORT_STOPPED) == 0)
1467 					printf("Port %d can not be set back "
1468 							"to stopped\n", pi);
1469 				printf("Fail to configure port %d rx queues\n", pi);
1470 				/* try to reconfigure queues next time */
1471 				port->need_reconfig_queues = 1;
1472 				return -1;
1473 			}
1474 		}
1475 		/* start port */
1476 		if (rte_eth_dev_start(pi) < 0) {
1477 			printf("Fail to start port %d\n", pi);
1478 
1479 			/* Fail to setup rx queue, return */
1480 			if (rte_atomic16_cmpset(&(port->port_status),
1481 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1482 				printf("Port %d can not be set back to "
1483 							"stopped\n", pi);
1484 			continue;
1485 		}
1486 
1487 		if (rte_atomic16_cmpset(&(port->port_status),
1488 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1489 			printf("Port %d can not be set into started\n", pi);
1490 
1491 		rte_eth_macaddr_get(pi, &mac_addr);
1492 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1493 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1494 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1495 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1496 
1497 		/* at least one port started, need checking link status */
1498 		need_check_link_status = 1;
1499 	}
1500 
1501 	if (need_check_link_status == 1 && !no_link_check)
1502 		check_all_ports_link_status(RTE_PORT_ALL);
1503 	else if (need_check_link_status == 0)
1504 		printf("Please stop the ports first\n");
1505 
1506 	printf("Done\n");
1507 	return 0;
1508 }
1509 
1510 void
1511 stop_port(portid_t pid)
1512 {
1513 	portid_t pi;
1514 	struct rte_port *port;
1515 	int need_check_link_status = 0;
1516 
1517 	if (dcb_test) {
1518 		dcb_test = 0;
1519 		dcb_config = 0;
1520 	}
1521 
1522 	if (port_id_is_invalid(pid, ENABLED_WARN))
1523 		return;
1524 
1525 	printf("Stopping ports...\n");
1526 
1527 	FOREACH_PORT(pi, ports) {
1528 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1529 			continue;
1530 
1531 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1532 			printf("Please remove port %d from forwarding configuration.\n", pi);
1533 			continue;
1534 		}
1535 
1536 		if (port_is_bonding_slave(pi)) {
1537 			printf("Please remove port %d from bonded device.\n", pi);
1538 			continue;
1539 		}
1540 
1541 		port = &ports[pi];
1542 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1543 						RTE_PORT_HANDLING) == 0)
1544 			continue;
1545 
1546 		rte_eth_dev_stop(pi);
1547 
1548 		if (rte_atomic16_cmpset(&(port->port_status),
1549 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1550 			printf("Port %d can not be set into stopped\n", pi);
1551 		need_check_link_status = 1;
1552 	}
1553 	if (need_check_link_status && !no_link_check)
1554 		check_all_ports_link_status(RTE_PORT_ALL);
1555 
1556 	printf("Done\n");
1557 }
1558 
1559 void
1560 close_port(portid_t pid)
1561 {
1562 	portid_t pi;
1563 	struct rte_port *port;
1564 
1565 	if (port_id_is_invalid(pid, ENABLED_WARN))
1566 		return;
1567 
1568 	printf("Closing ports...\n");
1569 
1570 	FOREACH_PORT(pi, ports) {
1571 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1572 			continue;
1573 
1574 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1575 			printf("Please remove port %d from forwarding configuration.\n", pi);
1576 			continue;
1577 		}
1578 
1579 		if (port_is_bonding_slave(pi)) {
1580 			printf("Please remove port %d from bonded device.\n", pi);
1581 			continue;
1582 		}
1583 
1584 		port = &ports[pi];
1585 		if (rte_atomic16_cmpset(&(port->port_status),
1586 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1587 			printf("Port %d is already closed\n", pi);
1588 			continue;
1589 		}
1590 
1591 		if (rte_atomic16_cmpset(&(port->port_status),
1592 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1593 			printf("Port %d is now not stopped\n", pi);
1594 			continue;
1595 		}
1596 
1597 		if (port->flow_list)
1598 			port_flow_flush(pi);
1599 		rte_eth_dev_close(pi);
1600 
1601 		if (rte_atomic16_cmpset(&(port->port_status),
1602 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1603 			printf("Port %d cannot be set to closed\n", pi);
1604 	}
1605 
1606 	printf("Done\n");
1607 }
1608 
1609 void
1610 attach_port(char *identifier)
1611 {
1612 	portid_t pi = 0;
1613 	unsigned int socket_id;
1614 
1615 	printf("Attaching a new port...\n");
1616 
1617 	if (identifier == NULL) {
1618 		printf("Invalid parameters are specified\n");
1619 		return;
1620 	}
1621 
1622 	if (rte_eth_dev_attach(identifier, &pi))
1623 		return;
1624 
1625 	ports[pi].enabled = 1;
1626 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1627 	/* if socket_id is invalid, set to 0 */
1628 	if (check_socket_id(socket_id) < 0)
1629 		socket_id = 0;
1630 	reconfig(pi, socket_id);
1631 	rte_eth_promiscuous_enable(pi);
1632 
1633 	nb_ports = rte_eth_dev_count();
1634 
1635 	ports[pi].port_status = RTE_PORT_STOPPED;
1636 
1637 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1638 	printf("Done\n");
1639 }
1640 
1641 void
1642 detach_port(uint8_t port_id)
1643 {
1644 	char name[RTE_ETH_NAME_MAX_LEN];
1645 
1646 	printf("Detaching a port...\n");
1647 
1648 	if (!port_is_closed(port_id)) {
1649 		printf("Please close port first\n");
1650 		return;
1651 	}
1652 
1653 	if (ports[port_id].flow_list)
1654 		port_flow_flush(port_id);
1655 
1656 	if (rte_eth_dev_detach(port_id, name))
1657 		return;
1658 
1659 	ports[port_id].enabled = 0;
1660 	nb_ports = rte_eth_dev_count();
1661 
1662 	printf("Port '%s' is detached. Now total ports is %d\n",
1663 			name, nb_ports);
1664 	printf("Done\n");
1665 	return;
1666 }
1667 
1668 void
1669 pmd_test_exit(void)
1670 {
1671 	portid_t pt_id;
1672 
1673 	if (test_done == 0)
1674 		stop_packet_forwarding();
1675 
1676 	if (ports != NULL) {
1677 		no_link_check = 1;
1678 		FOREACH_PORT(pt_id, ports) {
1679 			printf("\nShutting down port %d...\n", pt_id);
1680 			fflush(stdout);
1681 			stop_port(pt_id);
1682 			close_port(pt_id);
1683 		}
1684 	}
1685 	printf("\nBye...\n");
1686 }
1687 
1688 typedef void (*cmd_func_t)(void);
1689 struct pmd_test_command {
1690 	const char *cmd_name;
1691 	cmd_func_t cmd_func;
1692 };
1693 
1694 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1695 
1696 /* Check the link status of all ports in up to 9s, and print them finally */
1697 static void
1698 check_all_ports_link_status(uint32_t port_mask)
1699 {
1700 #define CHECK_INTERVAL 100 /* 100ms */
1701 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1702 	uint8_t portid, count, all_ports_up, print_flag = 0;
1703 	struct rte_eth_link link;
1704 
1705 	printf("Checking link statuses...\n");
1706 	fflush(stdout);
1707 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1708 		all_ports_up = 1;
1709 		FOREACH_PORT(portid, ports) {
1710 			if ((port_mask & (1 << portid)) == 0)
1711 				continue;
1712 			memset(&link, 0, sizeof(link));
1713 			rte_eth_link_get_nowait(portid, &link);
1714 			/* print link status if flag set */
1715 			if (print_flag == 1) {
1716 				if (link.link_status)
1717 					printf("Port %d Link Up - speed %u "
1718 						"Mbps - %s\n", (uint8_t)portid,
1719 						(unsigned)link.link_speed,
1720 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1721 					("full-duplex") : ("half-duplex\n"));
1722 				else
1723 					printf("Port %d Link Down\n",
1724 						(uint8_t)portid);
1725 				continue;
1726 			}
1727 			/* clear all_ports_up flag if any link down */
1728 			if (link.link_status == ETH_LINK_DOWN) {
1729 				all_ports_up = 0;
1730 				break;
1731 			}
1732 		}
1733 		/* after finally printing all link status, get out */
1734 		if (print_flag == 1)
1735 			break;
1736 
1737 		if (all_ports_up == 0) {
1738 			fflush(stdout);
1739 			rte_delay_ms(CHECK_INTERVAL);
1740 		}
1741 
1742 		/* set the print_flag if all ports up or timeout */
1743 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1744 			print_flag = 1;
1745 		}
1746 	}
1747 }
1748 
1749 static int
1750 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1751 {
1752 	uint16_t i;
1753 	int diag;
1754 	uint8_t mapping_found = 0;
1755 
1756 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1757 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1758 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1759 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1760 					tx_queue_stats_mappings[i].queue_id,
1761 					tx_queue_stats_mappings[i].stats_counter_id);
1762 			if (diag != 0)
1763 				return diag;
1764 			mapping_found = 1;
1765 		}
1766 	}
1767 	if (mapping_found)
1768 		port->tx_queue_stats_mapping_enabled = 1;
1769 	return 0;
1770 }
1771 
1772 static int
1773 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1774 {
1775 	uint16_t i;
1776 	int diag;
1777 	uint8_t mapping_found = 0;
1778 
1779 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1780 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1781 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1782 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1783 					rx_queue_stats_mappings[i].queue_id,
1784 					rx_queue_stats_mappings[i].stats_counter_id);
1785 			if (diag != 0)
1786 				return diag;
1787 			mapping_found = 1;
1788 		}
1789 	}
1790 	if (mapping_found)
1791 		port->rx_queue_stats_mapping_enabled = 1;
1792 	return 0;
1793 }
1794 
1795 static void
1796 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1797 {
1798 	int diag = 0;
1799 
1800 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1801 	if (diag != 0) {
1802 		if (diag == -ENOTSUP) {
1803 			port->tx_queue_stats_mapping_enabled = 0;
1804 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1805 		}
1806 		else
1807 			rte_exit(EXIT_FAILURE,
1808 					"set_tx_queue_stats_mapping_registers "
1809 					"failed for port id=%d diag=%d\n",
1810 					pi, diag);
1811 	}
1812 
1813 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1814 	if (diag != 0) {
1815 		if (diag == -ENOTSUP) {
1816 			port->rx_queue_stats_mapping_enabled = 0;
1817 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1818 		}
1819 		else
1820 			rte_exit(EXIT_FAILURE,
1821 					"set_rx_queue_stats_mapping_registers "
1822 					"failed for port id=%d diag=%d\n",
1823 					pi, diag);
1824 	}
1825 }
1826 
1827 static void
1828 rxtx_port_config(struct rte_port *port)
1829 {
1830 	port->rx_conf = port->dev_info.default_rxconf;
1831 	port->tx_conf = port->dev_info.default_txconf;
1832 
1833 	/* Check if any RX/TX parameters have been passed */
1834 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1835 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1836 
1837 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1838 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1839 
1840 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1841 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1842 
1843 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1844 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1845 
1846 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1847 		port->rx_conf.rx_drop_en = rx_drop_en;
1848 
1849 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1850 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1851 
1852 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1853 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1854 
1855 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1856 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1857 
1858 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1859 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1860 
1861 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1862 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1863 
1864 	if (txq_flags != RTE_PMD_PARAM_UNSET)
1865 		port->tx_conf.txq_flags = txq_flags;
1866 }
1867 
1868 void
1869 init_port_config(void)
1870 {
1871 	portid_t pid;
1872 	struct rte_port *port;
1873 
1874 	FOREACH_PORT(pid, ports) {
1875 		port = &ports[pid];
1876 		port->dev_conf.rxmode = rx_mode;
1877 		port->dev_conf.fdir_conf = fdir_conf;
1878 		if (nb_rxq > 1) {
1879 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1880 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1881 		} else {
1882 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1883 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1884 		}
1885 
1886 		if (port->dcb_flag == 0) {
1887 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1888 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1889 			else
1890 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1891 		}
1892 
1893 		rxtx_port_config(port);
1894 
1895 		rte_eth_macaddr_get(pid, &port->eth_addr);
1896 
1897 		map_port_queue_stats_mapping_registers(pid, port);
1898 #ifdef RTE_NIC_BYPASS
1899 		rte_eth_dev_bypass_init(pid);
1900 #endif
1901 	}
1902 }
1903 
1904 void set_port_slave_flag(portid_t slave_pid)
1905 {
1906 	struct rte_port *port;
1907 
1908 	port = &ports[slave_pid];
1909 	port->slave_flag = 1;
1910 }
1911 
1912 void clear_port_slave_flag(portid_t slave_pid)
1913 {
1914 	struct rte_port *port;
1915 
1916 	port = &ports[slave_pid];
1917 	port->slave_flag = 0;
1918 }
1919 
1920 uint8_t port_is_bonding_slave(portid_t slave_pid)
1921 {
1922 	struct rte_port *port;
1923 
1924 	port = &ports[slave_pid];
1925 	return port->slave_flag;
1926 }
1927 
1928 const uint16_t vlan_tags[] = {
1929 		0,  1,  2,  3,  4,  5,  6,  7,
1930 		8,  9, 10, 11,  12, 13, 14, 15,
1931 		16, 17, 18, 19, 20, 21, 22, 23,
1932 		24, 25, 26, 27, 28, 29, 30, 31
1933 };
1934 
1935 static  int
1936 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1937 		 enum dcb_mode_enable dcb_mode,
1938 		 enum rte_eth_nb_tcs num_tcs,
1939 		 uint8_t pfc_en)
1940 {
1941 	uint8_t i;
1942 
1943 	/*
1944 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1945 	 * given above, and the number of traffic classes available for use.
1946 	 */
1947 	if (dcb_mode == DCB_VT_ENABLED) {
1948 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1949 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
1950 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1951 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1952 
1953 		/* VMDQ+DCB RX and TX configurations */
1954 		vmdq_rx_conf->enable_default_pool = 0;
1955 		vmdq_rx_conf->default_pool = 0;
1956 		vmdq_rx_conf->nb_queue_pools =
1957 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1958 		vmdq_tx_conf->nb_queue_pools =
1959 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1960 
1961 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1962 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1963 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1964 			vmdq_rx_conf->pool_map[i].pools =
1965 				1 << (i % vmdq_rx_conf->nb_queue_pools);
1966 		}
1967 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1968 			vmdq_rx_conf->dcb_tc[i] = i;
1969 			vmdq_tx_conf->dcb_tc[i] = i;
1970 		}
1971 
1972 		/* set DCB mode of RX and TX of multiple queues */
1973 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1974 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1975 	} else {
1976 		struct rte_eth_dcb_rx_conf *rx_conf =
1977 				&eth_conf->rx_adv_conf.dcb_rx_conf;
1978 		struct rte_eth_dcb_tx_conf *tx_conf =
1979 				&eth_conf->tx_adv_conf.dcb_tx_conf;
1980 
1981 		rx_conf->nb_tcs = num_tcs;
1982 		tx_conf->nb_tcs = num_tcs;
1983 
1984 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1985 			rx_conf->dcb_tc[i] = i % num_tcs;
1986 			tx_conf->dcb_tc[i] = i % num_tcs;
1987 		}
1988 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1989 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1990 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1991 	}
1992 
1993 	if (pfc_en)
1994 		eth_conf->dcb_capability_en =
1995 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1996 	else
1997 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1998 
1999 	return 0;
2000 }
2001 
2002 int
2003 init_port_dcb_config(portid_t pid,
2004 		     enum dcb_mode_enable dcb_mode,
2005 		     enum rte_eth_nb_tcs num_tcs,
2006 		     uint8_t pfc_en)
2007 {
2008 	struct rte_eth_conf port_conf;
2009 	struct rte_port *rte_port;
2010 	int retval;
2011 	uint16_t i;
2012 
2013 	rte_port = &ports[pid];
2014 
2015 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2016 	/* Enter DCB configuration status */
2017 	dcb_config = 1;
2018 
2019 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
2020 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2021 	if (retval < 0)
2022 		return retval;
2023 	port_conf.rxmode.hw_vlan_filter = 1;
2024 
2025 	/**
2026 	 * Write the configuration into the device.
2027 	 * Set the numbers of RX & TX queues to 0, so
2028 	 * the RX & TX queues will not be setup.
2029 	 */
2030 	(void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2031 
2032 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2033 
2034 	/* If dev_info.vmdq_pool_base is greater than 0,
2035 	 * the queue id of vmdq pools is started after pf queues.
2036 	 */
2037 	if (dcb_mode == DCB_VT_ENABLED &&
2038 	    rte_port->dev_info.vmdq_pool_base > 0) {
2039 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2040 			" for port %d.", pid);
2041 		return -1;
2042 	}
2043 
2044 	/* Assume the ports in testpmd have the same dcb capability
2045 	 * and has the same number of rxq and txq in dcb mode
2046 	 */
2047 	if (dcb_mode == DCB_VT_ENABLED) {
2048 		if (rte_port->dev_info.max_vfs > 0) {
2049 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2050 			nb_txq = rte_port->dev_info.nb_tx_queues;
2051 		} else {
2052 			nb_rxq = rte_port->dev_info.max_rx_queues;
2053 			nb_txq = rte_port->dev_info.max_tx_queues;
2054 		}
2055 	} else {
2056 		/*if vt is disabled, use all pf queues */
2057 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2058 			nb_rxq = rte_port->dev_info.max_rx_queues;
2059 			nb_txq = rte_port->dev_info.max_tx_queues;
2060 		} else {
2061 			nb_rxq = (queueid_t)num_tcs;
2062 			nb_txq = (queueid_t)num_tcs;
2063 
2064 		}
2065 	}
2066 	rx_free_thresh = 64;
2067 
2068 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2069 
2070 	rxtx_port_config(rte_port);
2071 	/* VLAN filter */
2072 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2073 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2074 		rx_vft_set(pid, vlan_tags[i], 1);
2075 
2076 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2077 	map_port_queue_stats_mapping_registers(pid, rte_port);
2078 
2079 	rte_port->dcb_flag = 1;
2080 
2081 	return 0;
2082 }
2083 
2084 static void
2085 init_port(void)
2086 {
2087 	portid_t pid;
2088 
2089 	/* Configuration of Ethernet ports. */
2090 	ports = rte_zmalloc("testpmd: ports",
2091 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2092 			    RTE_CACHE_LINE_SIZE);
2093 	if (ports == NULL) {
2094 		rte_exit(EXIT_FAILURE,
2095 				"rte_zmalloc(%d struct rte_port) failed\n",
2096 				RTE_MAX_ETHPORTS);
2097 	}
2098 
2099 	/* enabled allocated ports */
2100 	for (pid = 0; pid < nb_ports; pid++)
2101 		ports[pid].enabled = 1;
2102 }
2103 
2104 static void
2105 force_quit(void)
2106 {
2107 	pmd_test_exit();
2108 	prompt_exit();
2109 }
2110 
2111 static void
2112 signal_handler(int signum)
2113 {
2114 	if (signum == SIGINT || signum == SIGTERM) {
2115 		printf("\nSignal %d received, preparing to exit...\n",
2116 				signum);
2117 #ifdef RTE_LIBRTE_PDUMP
2118 		/* uninitialize packet capture framework */
2119 		rte_pdump_uninit();
2120 #endif
2121 #ifdef RTE_LIBRTE_LATENCY_STATS
2122 		rte_latencystats_uninit();
2123 #endif
2124 		force_quit();
2125 		/* exit with the expected status */
2126 		signal(signum, SIG_DFL);
2127 		kill(getpid(), signum);
2128 	}
2129 }
2130 
2131 int
2132 main(int argc, char** argv)
2133 {
2134 	int  diag;
2135 	uint8_t port_id;
2136 
2137 	signal(SIGINT, signal_handler);
2138 	signal(SIGTERM, signal_handler);
2139 
2140 	diag = rte_eal_init(argc, argv);
2141 	if (diag < 0)
2142 		rte_panic("Cannot init EAL\n");
2143 
2144 #ifdef RTE_LIBRTE_PDUMP
2145 	/* initialize packet capture framework */
2146 	rte_pdump_init(NULL);
2147 #endif
2148 
2149 	nb_ports = (portid_t) rte_eth_dev_count();
2150 	if (nb_ports == 0)
2151 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2152 
2153 	/* allocate port structures, and init them */
2154 	init_port();
2155 
2156 	set_def_fwd_config();
2157 	if (nb_lcores == 0)
2158 		rte_panic("Empty set of forwarding logical cores - check the "
2159 			  "core mask supplied in the command parameters\n");
2160 
2161 	argc -= diag;
2162 	argv += diag;
2163 	if (argc > 1)
2164 		launch_args_parse(argc, argv);
2165 
2166 	if (!nb_rxq && !nb_txq)
2167 		printf("Warning: Either rx or tx queues should be non-zero\n");
2168 
2169 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2170 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2171 		       "but nb_txq=%d will prevent to fully test it.\n",
2172 		       nb_rxq, nb_txq);
2173 
2174 	init_config();
2175 	if (start_port(RTE_PORT_ALL) != 0)
2176 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2177 
2178 	/* set all ports to promiscuous mode by default */
2179 	FOREACH_PORT(port_id, ports)
2180 		rte_eth_promiscuous_enable(port_id);
2181 
2182 	/* Init metrics library */
2183 	rte_metrics_init(rte_socket_id());
2184 
2185 #ifdef RTE_LIBRTE_LATENCY_STATS
2186 	if (latencystats_enabled != 0) {
2187 		int ret = rte_latencystats_init(1, NULL);
2188 		if (ret)
2189 			printf("Warning: latencystats init()"
2190 				" returned error %d\n",	ret);
2191 		printf("Latencystats running on lcore %d\n",
2192 			latencystats_lcore_id);
2193 	}
2194 #endif
2195 
2196 	/* Setup bitrate stats */
2197 #ifdef RTE_LIBRTE_BITRATE
2198 	bitrate_data = rte_stats_bitrate_create();
2199 	if (bitrate_data == NULL)
2200 		rte_exit(EXIT_FAILURE, "Could not allocate bitrate data.\n");
2201 	rte_stats_bitrate_reg(bitrate_data);
2202 #endif
2203 
2204 
2205 #ifdef RTE_LIBRTE_CMDLINE
2206 	if (interactive == 1) {
2207 		if (auto_start) {
2208 			printf("Start automatic packet forwarding\n");
2209 			start_packet_forwarding(0);
2210 		}
2211 		prompt();
2212 	} else
2213 #endif
2214 	{
2215 		char c;
2216 		int rc;
2217 
2218 		printf("No commandline core given, start packet forwarding\n");
2219 		start_packet_forwarding(0);
2220 		printf("Press enter to exit\n");
2221 		rc = read(0, &c, 1);
2222 		pmd_test_exit();
2223 		if (rc < 0)
2224 			return 1;
2225 	}
2226 
2227 	return 0;
2228 }
2229