xref: /dpdk/app/test-pmd/testpmd.c (revision b19a0c75a0d4ce1ddfe9c5f2c7d65e8a1f209663)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_eal.h>
61 #include <rte_per_lcore.h>
62 #include <rte_lcore.h>
63 #include <rte_atomic.h>
64 #include <rte_branch_prediction.h>
65 #include <rte_ring.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
68 #include <rte_mbuf.h>
69 #include <rte_interrupts.h>
70 #include <rte_pci.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
73 #include <rte_dev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78 
79 #include "testpmd.h"
80 
81 uint16_t verbose_level = 0; /**< Silent by default. */
82 
83 /* use master core for command line ? */
84 uint8_t interactive = 0;
85 uint8_t auto_start = 0;
86 
87 /*
88  * NUMA support configuration.
89  * When set, the NUMA support attempts to dispatch the allocation of the
90  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
91  * probed ports among the CPU sockets 0 and 1.
92  * Otherwise, all memory is allocated from CPU socket 0.
93  */
94 uint8_t numa_support = 0; /**< No numa support by default */
95 
96 /*
97  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
98  * not configured.
99  */
100 uint8_t socket_num = UMA_NO_CONFIG;
101 
102 /*
103  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
104  */
105 uint8_t mp_anon = 0;
106 
107 /*
108  * Record the Ethernet address of peer target ports to which packets are
109  * forwarded.
110  * Must be instanciated with the ethernet addresses of peer traffic generator
111  * ports.
112  */
113 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
114 portid_t nb_peer_eth_addrs = 0;
115 
116 /*
117  * Probed Target Environment.
118  */
119 struct rte_port *ports;	       /**< For all probed ethernet ports. */
120 portid_t nb_ports;             /**< Number of probed ethernet ports. */
121 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
122 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
123 
124 /*
125  * Test Forwarding Configuration.
126  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
127  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
128  */
129 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
130 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
131 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
132 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
133 
134 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
135 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
136 
137 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
138 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
139 
140 /*
141  * Forwarding engines.
142  */
143 struct fwd_engine * fwd_engines[] = {
144 	&io_fwd_engine,
145 	&mac_fwd_engine,
146 	&mac_retry_fwd_engine,
147 	&mac_swap_engine,
148 	&flow_gen_engine,
149 	&rx_only_engine,
150 	&tx_only_engine,
151 	&csum_fwd_engine,
152 	&icmp_echo_engine,
153 #ifdef RTE_LIBRTE_IEEE1588
154 	&ieee1588_fwd_engine,
155 #endif
156 	NULL,
157 };
158 
159 struct fwd_config cur_fwd_config;
160 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
161 
162 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
163 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
164                                       * specified on command-line. */
165 
166 /*
167  * Configuration of packet segments used by the "txonly" processing engine.
168  */
169 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
170 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
171 	TXONLY_DEF_PACKET_LEN,
172 };
173 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
174 
175 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
176 /**< Split policy for packets to TX. */
177 
178 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
179 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
180 
181 /* current configuration is in DCB or not,0 means it is not in DCB mode */
182 uint8_t dcb_config = 0;
183 
184 /* Whether the dcb is in testing status */
185 uint8_t dcb_test = 0;
186 
187 /*
188  * Configurable number of RX/TX queues.
189  */
190 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
191 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
192 
193 /*
194  * Configurable number of RX/TX ring descriptors.
195  */
196 #define RTE_TEST_RX_DESC_DEFAULT 128
197 #define RTE_TEST_TX_DESC_DEFAULT 512
198 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
199 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
200 
201 #define RTE_PMD_PARAM_UNSET -1
202 /*
203  * Configurable values of RX and TX ring threshold registers.
204  */
205 
206 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
207 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
209 
210 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
211 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
213 
214 /*
215  * Configurable value of RX free threshold.
216  */
217 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
218 
219 /*
220  * Configurable value of RX drop enable.
221  */
222 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
223 
224 /*
225  * Configurable value of TX free threshold.
226  */
227 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
228 
229 /*
230  * Configurable value of TX RS bit threshold.
231  */
232 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
233 
234 /*
235  * Configurable value of TX queue flags.
236  */
237 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
238 
239 /*
240  * Receive Side Scaling (RSS) configuration.
241  */
242 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
243 
244 /*
245  * Port topology configuration
246  */
247 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
248 
249 /*
250  * Avoids to flush all the RX streams before starts forwarding.
251  */
252 uint8_t no_flush_rx = 0; /* flush by default */
253 
254 /*
255  * Avoids to check link status when starting/stopping a port.
256  */
257 uint8_t no_link_check = 0; /* check by default */
258 
259 /*
260  * NIC bypass mode configuration options.
261  */
262 #ifdef RTE_NIC_BYPASS
263 
264 /* The NIC bypass watchdog timeout. */
265 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
266 
267 #endif
268 
269 /*
270  * Ethernet device configuration.
271  */
272 struct rte_eth_rxmode rx_mode = {
273 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
274 	.split_hdr_size = 0,
275 	.header_split   = 0, /**< Header Split disabled. */
276 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
277 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
278 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
279 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
280 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
281 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
282 };
283 
284 struct rte_fdir_conf fdir_conf = {
285 	.mode = RTE_FDIR_MODE_NONE,
286 	.pballoc = RTE_FDIR_PBALLOC_64K,
287 	.status = RTE_FDIR_REPORT_STATUS,
288 	.mask = {
289 		.vlan_tci_mask = 0x0,
290 		.ipv4_mask     = {
291 			.src_ip = 0xFFFFFFFF,
292 			.dst_ip = 0xFFFFFFFF,
293 		},
294 		.ipv6_mask     = {
295 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
296 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
297 		},
298 		.src_port_mask = 0xFFFF,
299 		.dst_port_mask = 0xFFFF,
300 		.mac_addr_byte_mask = 0xFF,
301 		.tunnel_type_mask = 1,
302 		.tunnel_id_mask = 0xFFFFFFFF,
303 	},
304 	.drop_queue = 127,
305 };
306 
307 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
308 
309 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
310 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
311 
312 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
313 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
314 
315 uint16_t nb_tx_queue_stats_mappings = 0;
316 uint16_t nb_rx_queue_stats_mappings = 0;
317 
318 unsigned max_socket = 0;
319 
320 /* Forward function declarations */
321 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
322 static void check_all_ports_link_status(uint32_t port_mask);
323 
324 /*
325  * Check if all the ports are started.
326  * If yes, return positive value. If not, return zero.
327  */
328 static int all_ports_started(void);
329 
330 /*
331  * Find next enabled port
332  */
333 portid_t
334 find_next_port(portid_t p, struct rte_port *ports, int size)
335 {
336 	if (ports == NULL)
337 		rte_exit(-EINVAL, "failed to find a next port id\n");
338 
339 	while ((p < size) && (ports[p].enabled == 0))
340 		p++;
341 	return p;
342 }
343 
344 /*
345  * Setup default configuration.
346  */
347 static void
348 set_default_fwd_lcores_config(void)
349 {
350 	unsigned int i;
351 	unsigned int nb_lc;
352 	unsigned int sock_num;
353 
354 	nb_lc = 0;
355 	for (i = 0; i < RTE_MAX_LCORE; i++) {
356 		sock_num = rte_lcore_to_socket_id(i) + 1;
357 		if (sock_num > max_socket) {
358 			if (sock_num > RTE_MAX_NUMA_NODES)
359 				rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
360 			max_socket = sock_num;
361 		}
362 		if (!rte_lcore_is_enabled(i))
363 			continue;
364 		if (i == rte_get_master_lcore())
365 			continue;
366 		fwd_lcores_cpuids[nb_lc++] = i;
367 	}
368 	nb_lcores = (lcoreid_t) nb_lc;
369 	nb_cfg_lcores = nb_lcores;
370 	nb_fwd_lcores = 1;
371 }
372 
373 static void
374 set_def_peer_eth_addrs(void)
375 {
376 	portid_t i;
377 
378 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
379 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
380 		peer_eth_addrs[i].addr_bytes[5] = i;
381 	}
382 }
383 
384 static void
385 set_default_fwd_ports_config(void)
386 {
387 	portid_t pt_id;
388 
389 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
390 		fwd_ports_ids[pt_id] = pt_id;
391 
392 	nb_cfg_ports = nb_ports;
393 	nb_fwd_ports = nb_ports;
394 }
395 
396 void
397 set_def_fwd_config(void)
398 {
399 	set_default_fwd_lcores_config();
400 	set_def_peer_eth_addrs();
401 	set_default_fwd_ports_config();
402 }
403 
404 /*
405  * Configuration initialisation done once at init time.
406  */
407 static void
408 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
409 		 unsigned int socket_id)
410 {
411 	char pool_name[RTE_MEMPOOL_NAMESIZE];
412 	struct rte_mempool *rte_mp = NULL;
413 	uint32_t mb_size;
414 
415 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
416 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
417 
418 #ifdef RTE_LIBRTE_PMD_XENVIRT
419 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
420 		(unsigned) mb_mempool_cache,
421 		sizeof(struct rte_pktmbuf_pool_private),
422 		rte_pktmbuf_pool_init, NULL,
423 		rte_pktmbuf_init, NULL,
424 		socket_id, 0);
425 #endif
426 
427 	/* if the former XEN allocation failed fall back to normal allocation */
428 	if (rte_mp == NULL) {
429 		if (mp_anon != 0) {
430 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
431 				mb_size, (unsigned) mb_mempool_cache,
432 				sizeof(struct rte_pktmbuf_pool_private),
433 				socket_id, 0);
434 
435 			if (rte_mempool_populate_anon(rte_mp) == 0) {
436 				rte_mempool_free(rte_mp);
437 				rte_mp = NULL;
438 			}
439 			rte_pktmbuf_pool_init(rte_mp, NULL);
440 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
441 		} else {
442 			/* wrapper to rte_mempool_create() */
443 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
444 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
445 		}
446 	}
447 
448 	if (rte_mp == NULL) {
449 		rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
450 						"failed\n", socket_id);
451 	} else if (verbose_level > 0) {
452 		rte_mempool_dump(stdout, rte_mp);
453 	}
454 }
455 
456 /*
457  * Check given socket id is valid or not with NUMA mode,
458  * if valid, return 0, else return -1
459  */
460 static int
461 check_socket_id(const unsigned int socket_id)
462 {
463 	static int warning_once = 0;
464 
465 	if (socket_id >= max_socket) {
466 		if (!warning_once && numa_support)
467 			printf("Warning: NUMA should be configured manually by"
468 			       " using --port-numa-config and"
469 			       " --ring-numa-config parameters along with"
470 			       " --numa.\n");
471 		warning_once = 1;
472 		return -1;
473 	}
474 	return 0;
475 }
476 
477 static void
478 init_config(void)
479 {
480 	portid_t pid;
481 	struct rte_port *port;
482 	struct rte_mempool *mbp;
483 	unsigned int nb_mbuf_per_pool;
484 	lcoreid_t  lc_id;
485 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
486 
487 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
488 	/* Configuration of logical cores. */
489 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
490 				sizeof(struct fwd_lcore *) * nb_lcores,
491 				RTE_CACHE_LINE_SIZE);
492 	if (fwd_lcores == NULL) {
493 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
494 							"failed\n", nb_lcores);
495 	}
496 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
497 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
498 					       sizeof(struct fwd_lcore),
499 					       RTE_CACHE_LINE_SIZE);
500 		if (fwd_lcores[lc_id] == NULL) {
501 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
502 								"failed\n");
503 		}
504 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
505 	}
506 
507 	/*
508 	 * Create pools of mbuf.
509 	 * If NUMA support is disabled, create a single pool of mbuf in
510 	 * socket 0 memory by default.
511 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
512 	 *
513 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
514 	 * nb_txd can be configured at run time.
515 	 */
516 	if (param_total_num_mbufs)
517 		nb_mbuf_per_pool = param_total_num_mbufs;
518 	else {
519 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
520 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
521 
522 		if (!numa_support)
523 			nb_mbuf_per_pool =
524 				(nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
525 	}
526 
527 	if (!numa_support) {
528 		if (socket_num == UMA_NO_CONFIG)
529 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
530 		else
531 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
532 						 socket_num);
533 	}
534 
535 	FOREACH_PORT(pid, ports) {
536 		port = &ports[pid];
537 		rte_eth_dev_info_get(pid, &port->dev_info);
538 
539 		if (numa_support) {
540 			if (port_numa[pid] != NUMA_NO_CONFIG)
541 				port_per_socket[port_numa[pid]]++;
542 			else {
543 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
544 
545 				/* if socket_id is invalid, set to 0 */
546 				if (check_socket_id(socket_id) < 0)
547 					socket_id = 0;
548 				port_per_socket[socket_id]++;
549 			}
550 		}
551 
552 		/* set flag to initialize port/queue */
553 		port->need_reconfig = 1;
554 		port->need_reconfig_queues = 1;
555 	}
556 
557 	if (numa_support) {
558 		uint8_t i;
559 		unsigned int nb_mbuf;
560 
561 		if (param_total_num_mbufs)
562 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
563 
564 		for (i = 0; i < max_socket; i++) {
565 			nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
566 			if (nb_mbuf)
567 				mbuf_pool_create(mbuf_data_size,
568 						nb_mbuf,i);
569 		}
570 	}
571 	init_port_config();
572 
573 	/*
574 	 * Records which Mbuf pool to use by each logical core, if needed.
575 	 */
576 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
577 		mbp = mbuf_pool_find(
578 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
579 
580 		if (mbp == NULL)
581 			mbp = mbuf_pool_find(0);
582 		fwd_lcores[lc_id]->mbp = mbp;
583 	}
584 
585 	/* Configuration of packet forwarding streams. */
586 	if (init_fwd_streams() < 0)
587 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
588 }
589 
590 
591 void
592 reconfig(portid_t new_port_id, unsigned socket_id)
593 {
594 	struct rte_port *port;
595 
596 	/* Reconfiguration of Ethernet ports. */
597 	port = &ports[new_port_id];
598 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
599 
600 	/* set flag to initialize port/queue */
601 	port->need_reconfig = 1;
602 	port->need_reconfig_queues = 1;
603 	port->socket_id = socket_id;
604 
605 	init_port_config();
606 }
607 
608 
609 int
610 init_fwd_streams(void)
611 {
612 	portid_t pid;
613 	struct rte_port *port;
614 	streamid_t sm_id, nb_fwd_streams_new;
615 	queueid_t q;
616 
617 	/* set socket id according to numa or not */
618 	FOREACH_PORT(pid, ports) {
619 		port = &ports[pid];
620 		if (nb_rxq > port->dev_info.max_rx_queues) {
621 			printf("Fail: nb_rxq(%d) is greater than "
622 				"max_rx_queues(%d)\n", nb_rxq,
623 				port->dev_info.max_rx_queues);
624 			return -1;
625 		}
626 		if (nb_txq > port->dev_info.max_tx_queues) {
627 			printf("Fail: nb_txq(%d) is greater than "
628 				"max_tx_queues(%d)\n", nb_txq,
629 				port->dev_info.max_tx_queues);
630 			return -1;
631 		}
632 		if (numa_support) {
633 			if (port_numa[pid] != NUMA_NO_CONFIG)
634 				port->socket_id = port_numa[pid];
635 			else {
636 				port->socket_id = rte_eth_dev_socket_id(pid);
637 
638 				/* if socket_id is invalid, set to 0 */
639 				if (check_socket_id(port->socket_id) < 0)
640 					port->socket_id = 0;
641 			}
642 		}
643 		else {
644 			if (socket_num == UMA_NO_CONFIG)
645 				port->socket_id = 0;
646 			else
647 				port->socket_id = socket_num;
648 		}
649 	}
650 
651 	q = RTE_MAX(nb_rxq, nb_txq);
652 	if (q == 0) {
653 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
654 		return -1;
655 	}
656 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
657 	if (nb_fwd_streams_new == nb_fwd_streams)
658 		return 0;
659 	/* clear the old */
660 	if (fwd_streams != NULL) {
661 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
662 			if (fwd_streams[sm_id] == NULL)
663 				continue;
664 			rte_free(fwd_streams[sm_id]);
665 			fwd_streams[sm_id] = NULL;
666 		}
667 		rte_free(fwd_streams);
668 		fwd_streams = NULL;
669 	}
670 
671 	/* init new */
672 	nb_fwd_streams = nb_fwd_streams_new;
673 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
674 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
675 	if (fwd_streams == NULL)
676 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
677 						"failed\n", nb_fwd_streams);
678 
679 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
680 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
681 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
682 		if (fwd_streams[sm_id] == NULL)
683 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
684 								" failed\n");
685 	}
686 
687 	return 0;
688 }
689 
690 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
691 static void
692 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
693 {
694 	unsigned int total_burst;
695 	unsigned int nb_burst;
696 	unsigned int burst_stats[3];
697 	uint16_t pktnb_stats[3];
698 	uint16_t nb_pkt;
699 	int burst_percent[3];
700 
701 	/*
702 	 * First compute the total number of packet bursts and the
703 	 * two highest numbers of bursts of the same number of packets.
704 	 */
705 	total_burst = 0;
706 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
707 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
708 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
709 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
710 		if (nb_burst == 0)
711 			continue;
712 		total_burst += nb_burst;
713 		if (nb_burst > burst_stats[0]) {
714 			burst_stats[1] = burst_stats[0];
715 			pktnb_stats[1] = pktnb_stats[0];
716 			burst_stats[0] = nb_burst;
717 			pktnb_stats[0] = nb_pkt;
718 		}
719 	}
720 	if (total_burst == 0)
721 		return;
722 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
723 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
724 	       burst_percent[0], (int) pktnb_stats[0]);
725 	if (burst_stats[0] == total_burst) {
726 		printf("]\n");
727 		return;
728 	}
729 	if (burst_stats[0] + burst_stats[1] == total_burst) {
730 		printf(" + %d%% of %d pkts]\n",
731 		       100 - burst_percent[0], pktnb_stats[1]);
732 		return;
733 	}
734 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
735 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
736 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
737 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
738 		return;
739 	}
740 	printf(" + %d%% of %d pkts + %d%% of others]\n",
741 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
742 }
743 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
744 
745 static void
746 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
747 {
748 	struct rte_port *port;
749 	uint8_t i;
750 
751 	static const char *fwd_stats_border = "----------------------";
752 
753 	port = &ports[port_id];
754 	printf("\n  %s Forward statistics for port %-2d %s\n",
755 	       fwd_stats_border, port_id, fwd_stats_border);
756 
757 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
758 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
759 		       "%-"PRIu64"\n",
760 		       stats->ipackets, stats->imissed,
761 		       (uint64_t) (stats->ipackets + stats->imissed));
762 
763 		if (cur_fwd_eng == &csum_fwd_engine)
764 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
765 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
766 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
767 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
768 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
769 		}
770 
771 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
772 		       "%-"PRIu64"\n",
773 		       stats->opackets, port->tx_dropped,
774 		       (uint64_t) (stats->opackets + port->tx_dropped));
775 	}
776 	else {
777 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
778 		       "%14"PRIu64"\n",
779 		       stats->ipackets, stats->imissed,
780 		       (uint64_t) (stats->ipackets + stats->imissed));
781 
782 		if (cur_fwd_eng == &csum_fwd_engine)
783 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
784 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
785 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
786 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
787 			printf("  RX-nombufs:             %14"PRIu64"\n",
788 			       stats->rx_nombuf);
789 		}
790 
791 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
792 		       "%14"PRIu64"\n",
793 		       stats->opackets, port->tx_dropped,
794 		       (uint64_t) (stats->opackets + port->tx_dropped));
795 	}
796 
797 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
798 	if (port->rx_stream)
799 		pkt_burst_stats_display("RX",
800 			&port->rx_stream->rx_burst_stats);
801 	if (port->tx_stream)
802 		pkt_burst_stats_display("TX",
803 			&port->tx_stream->tx_burst_stats);
804 #endif
805 
806 	if (port->rx_queue_stats_mapping_enabled) {
807 		printf("\n");
808 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
809 			printf("  Stats reg %2d RX-packets:%14"PRIu64
810 			       "     RX-errors:%14"PRIu64
811 			       "    RX-bytes:%14"PRIu64"\n",
812 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
813 		}
814 		printf("\n");
815 	}
816 	if (port->tx_queue_stats_mapping_enabled) {
817 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
818 			printf("  Stats reg %2d TX-packets:%14"PRIu64
819 			       "                                 TX-bytes:%14"PRIu64"\n",
820 			       i, stats->q_opackets[i], stats->q_obytes[i]);
821 		}
822 	}
823 
824 	printf("  %s--------------------------------%s\n",
825 	       fwd_stats_border, fwd_stats_border);
826 }
827 
828 static void
829 fwd_stream_stats_display(streamid_t stream_id)
830 {
831 	struct fwd_stream *fs;
832 	static const char *fwd_top_stats_border = "-------";
833 
834 	fs = fwd_streams[stream_id];
835 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
836 	    (fs->fwd_dropped == 0))
837 		return;
838 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
839 	       "TX Port=%2d/Queue=%2d %s\n",
840 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
841 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
842 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
843 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
844 
845 	/* if checksum mode */
846 	if (cur_fwd_eng == &csum_fwd_engine) {
847 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
848 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
849 	}
850 
851 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
852 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
853 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
854 #endif
855 }
856 
857 static void
858 flush_fwd_rx_queues(void)
859 {
860 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
861 	portid_t  rxp;
862 	portid_t port_id;
863 	queueid_t rxq;
864 	uint16_t  nb_rx;
865 	uint16_t  i;
866 	uint8_t   j;
867 
868 	for (j = 0; j < 2; j++) {
869 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
870 			for (rxq = 0; rxq < nb_rxq; rxq++) {
871 				port_id = fwd_ports_ids[rxp];
872 				do {
873 					nb_rx = rte_eth_rx_burst(port_id, rxq,
874 						pkts_burst, MAX_PKT_BURST);
875 					for (i = 0; i < nb_rx; i++)
876 						rte_pktmbuf_free(pkts_burst[i]);
877 				} while (nb_rx > 0);
878 			}
879 		}
880 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
881 	}
882 }
883 
884 static void
885 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
886 {
887 	struct fwd_stream **fsm;
888 	streamid_t nb_fs;
889 	streamid_t sm_id;
890 
891 	fsm = &fwd_streams[fc->stream_idx];
892 	nb_fs = fc->stream_nb;
893 	do {
894 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
895 			(*pkt_fwd)(fsm[sm_id]);
896 	} while (! fc->stopped);
897 }
898 
899 static int
900 start_pkt_forward_on_core(void *fwd_arg)
901 {
902 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
903 			     cur_fwd_config.fwd_eng->packet_fwd);
904 	return 0;
905 }
906 
907 /*
908  * Run the TXONLY packet forwarding engine to send a single burst of packets.
909  * Used to start communication flows in network loopback test configurations.
910  */
911 static int
912 run_one_txonly_burst_on_core(void *fwd_arg)
913 {
914 	struct fwd_lcore *fwd_lc;
915 	struct fwd_lcore tmp_lcore;
916 
917 	fwd_lc = (struct fwd_lcore *) fwd_arg;
918 	tmp_lcore = *fwd_lc;
919 	tmp_lcore.stopped = 1;
920 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
921 	return 0;
922 }
923 
924 /*
925  * Launch packet forwarding:
926  *     - Setup per-port forwarding context.
927  *     - launch logical cores with their forwarding configuration.
928  */
929 static void
930 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
931 {
932 	port_fwd_begin_t port_fwd_begin;
933 	unsigned int i;
934 	unsigned int lc_id;
935 	int diag;
936 
937 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
938 	if (port_fwd_begin != NULL) {
939 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
940 			(*port_fwd_begin)(fwd_ports_ids[i]);
941 	}
942 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
943 		lc_id = fwd_lcores_cpuids[i];
944 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
945 			fwd_lcores[i]->stopped = 0;
946 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
947 						     fwd_lcores[i], lc_id);
948 			if (diag != 0)
949 				printf("launch lcore %u failed - diag=%d\n",
950 				       lc_id, diag);
951 		}
952 	}
953 }
954 
955 /*
956  * Launch packet forwarding configuration.
957  */
958 void
959 start_packet_forwarding(int with_tx_first)
960 {
961 	port_fwd_begin_t port_fwd_begin;
962 	port_fwd_end_t  port_fwd_end;
963 	struct rte_port *port;
964 	unsigned int i;
965 	portid_t   pt_id;
966 	streamid_t sm_id;
967 
968 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
969 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
970 
971 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
972 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
973 
974 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
975 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
976 		(!nb_rxq || !nb_txq))
977 		rte_exit(EXIT_FAILURE,
978 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
979 			cur_fwd_eng->fwd_mode_name);
980 
981 	if (all_ports_started() == 0) {
982 		printf("Not all ports were started\n");
983 		return;
984 	}
985 	if (test_done == 0) {
986 		printf("Packet forwarding already started\n");
987 		return;
988 	}
989 	if(dcb_test) {
990 		for (i = 0; i < nb_fwd_ports; i++) {
991 			pt_id = fwd_ports_ids[i];
992 			port = &ports[pt_id];
993 			if (!port->dcb_flag) {
994 				printf("In DCB mode, all forwarding ports must "
995                                        "be configured in this mode.\n");
996 				return;
997 			}
998 		}
999 		if (nb_fwd_lcores == 1) {
1000 			printf("In DCB mode,the nb forwarding cores "
1001                                "should be larger than 1.\n");
1002 			return;
1003 		}
1004 	}
1005 	test_done = 0;
1006 
1007 	if(!no_flush_rx)
1008 		flush_fwd_rx_queues();
1009 
1010 	fwd_config_setup();
1011 	rxtx_config_display();
1012 
1013 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1014 		pt_id = fwd_ports_ids[i];
1015 		port = &ports[pt_id];
1016 		rte_eth_stats_get(pt_id, &port->stats);
1017 		port->tx_dropped = 0;
1018 
1019 		map_port_queue_stats_mapping_registers(pt_id, port);
1020 	}
1021 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1022 		fwd_streams[sm_id]->rx_packets = 0;
1023 		fwd_streams[sm_id]->tx_packets = 0;
1024 		fwd_streams[sm_id]->fwd_dropped = 0;
1025 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1026 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1027 
1028 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1029 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1030 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1031 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1032 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1033 #endif
1034 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1035 		fwd_streams[sm_id]->core_cycles = 0;
1036 #endif
1037 	}
1038 	if (with_tx_first) {
1039 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1040 		if (port_fwd_begin != NULL) {
1041 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1042 				(*port_fwd_begin)(fwd_ports_ids[i]);
1043 		}
1044 		launch_packet_forwarding(run_one_txonly_burst_on_core);
1045 		rte_eal_mp_wait_lcore();
1046 		port_fwd_end = tx_only_engine.port_fwd_end;
1047 		if (port_fwd_end != NULL) {
1048 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1049 				(*port_fwd_end)(fwd_ports_ids[i]);
1050 		}
1051 	}
1052 	launch_packet_forwarding(start_pkt_forward_on_core);
1053 }
1054 
1055 void
1056 stop_packet_forwarding(void)
1057 {
1058 	struct rte_eth_stats stats;
1059 	struct rte_port *port;
1060 	port_fwd_end_t  port_fwd_end;
1061 	int i;
1062 	portid_t   pt_id;
1063 	streamid_t sm_id;
1064 	lcoreid_t  lc_id;
1065 	uint64_t total_recv;
1066 	uint64_t total_xmit;
1067 	uint64_t total_rx_dropped;
1068 	uint64_t total_tx_dropped;
1069 	uint64_t total_rx_nombuf;
1070 	uint64_t tx_dropped;
1071 	uint64_t rx_bad_ip_csum;
1072 	uint64_t rx_bad_l4_csum;
1073 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1074 	uint64_t fwd_cycles;
1075 #endif
1076 	static const char *acc_stats_border = "+++++++++++++++";
1077 
1078 	if (all_ports_started() == 0) {
1079 		printf("Not all ports were started\n");
1080 		return;
1081 	}
1082 	if (test_done) {
1083 		printf("Packet forwarding not started\n");
1084 		return;
1085 	}
1086 	printf("Telling cores to stop...");
1087 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1088 		fwd_lcores[lc_id]->stopped = 1;
1089 	printf("\nWaiting for lcores to finish...\n");
1090 	rte_eal_mp_wait_lcore();
1091 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1092 	if (port_fwd_end != NULL) {
1093 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1094 			pt_id = fwd_ports_ids[i];
1095 			(*port_fwd_end)(pt_id);
1096 		}
1097 	}
1098 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1099 	fwd_cycles = 0;
1100 #endif
1101 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1102 		if (cur_fwd_config.nb_fwd_streams >
1103 		    cur_fwd_config.nb_fwd_ports) {
1104 			fwd_stream_stats_display(sm_id);
1105 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1106 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1107 		} else {
1108 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1109 				fwd_streams[sm_id];
1110 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1111 				fwd_streams[sm_id];
1112 		}
1113 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1114 		tx_dropped = (uint64_t) (tx_dropped +
1115 					 fwd_streams[sm_id]->fwd_dropped);
1116 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1117 
1118 		rx_bad_ip_csum =
1119 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1120 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1121 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1122 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1123 							rx_bad_ip_csum;
1124 
1125 		rx_bad_l4_csum =
1126 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1127 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1128 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1129 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1130 							rx_bad_l4_csum;
1131 
1132 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1133 		fwd_cycles = (uint64_t) (fwd_cycles +
1134 					 fwd_streams[sm_id]->core_cycles);
1135 #endif
1136 	}
1137 	total_recv = 0;
1138 	total_xmit = 0;
1139 	total_rx_dropped = 0;
1140 	total_tx_dropped = 0;
1141 	total_rx_nombuf  = 0;
1142 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1143 		pt_id = fwd_ports_ids[i];
1144 
1145 		port = &ports[pt_id];
1146 		rte_eth_stats_get(pt_id, &stats);
1147 		stats.ipackets -= port->stats.ipackets;
1148 		port->stats.ipackets = 0;
1149 		stats.opackets -= port->stats.opackets;
1150 		port->stats.opackets = 0;
1151 		stats.ibytes   -= port->stats.ibytes;
1152 		port->stats.ibytes = 0;
1153 		stats.obytes   -= port->stats.obytes;
1154 		port->stats.obytes = 0;
1155 		stats.imissed  -= port->stats.imissed;
1156 		port->stats.imissed = 0;
1157 		stats.oerrors  -= port->stats.oerrors;
1158 		port->stats.oerrors = 0;
1159 		stats.rx_nombuf -= port->stats.rx_nombuf;
1160 		port->stats.rx_nombuf = 0;
1161 
1162 		total_recv += stats.ipackets;
1163 		total_xmit += stats.opackets;
1164 		total_rx_dropped += stats.imissed;
1165 		total_tx_dropped += port->tx_dropped;
1166 		total_rx_nombuf  += stats.rx_nombuf;
1167 
1168 		fwd_port_stats_display(pt_id, &stats);
1169 	}
1170 	printf("\n  %s Accumulated forward statistics for all ports"
1171 	       "%s\n",
1172 	       acc_stats_border, acc_stats_border);
1173 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1174 	       "%-"PRIu64"\n"
1175 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1176 	       "%-"PRIu64"\n",
1177 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1178 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1179 	if (total_rx_nombuf > 0)
1180 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1181 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1182 	       "%s\n",
1183 	       acc_stats_border, acc_stats_border);
1184 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1185 	if (total_recv > 0)
1186 		printf("\n  CPU cycles/packet=%u (total cycles="
1187 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1188 		       (unsigned int)(fwd_cycles / total_recv),
1189 		       fwd_cycles, total_recv);
1190 #endif
1191 	printf("\nDone.\n");
1192 	test_done = 1;
1193 }
1194 
1195 void
1196 dev_set_link_up(portid_t pid)
1197 {
1198 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1199 		printf("\nSet link up fail.\n");
1200 }
1201 
1202 void
1203 dev_set_link_down(portid_t pid)
1204 {
1205 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1206 		printf("\nSet link down fail.\n");
1207 }
1208 
1209 static int
1210 all_ports_started(void)
1211 {
1212 	portid_t pi;
1213 	struct rte_port *port;
1214 
1215 	FOREACH_PORT(pi, ports) {
1216 		port = &ports[pi];
1217 		/* Check if there is a port which is not started */
1218 		if ((port->port_status != RTE_PORT_STARTED) &&
1219 			(port->slave_flag == 0))
1220 			return 0;
1221 	}
1222 
1223 	/* No port is not started */
1224 	return 1;
1225 }
1226 
1227 int
1228 all_ports_stopped(void)
1229 {
1230 	portid_t pi;
1231 	struct rte_port *port;
1232 
1233 	FOREACH_PORT(pi, ports) {
1234 		port = &ports[pi];
1235 		if ((port->port_status != RTE_PORT_STOPPED) &&
1236 			(port->slave_flag == 0))
1237 			return 0;
1238 	}
1239 
1240 	return 1;
1241 }
1242 
1243 int
1244 port_is_started(portid_t port_id)
1245 {
1246 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1247 		return 0;
1248 
1249 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1250 		return 0;
1251 
1252 	return 1;
1253 }
1254 
1255 static int
1256 port_is_closed(portid_t port_id)
1257 {
1258 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1259 		return 0;
1260 
1261 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1262 		return 0;
1263 
1264 	return 1;
1265 }
1266 
1267 int
1268 start_port(portid_t pid)
1269 {
1270 	int diag, need_check_link_status = -1;
1271 	portid_t pi;
1272 	queueid_t qi;
1273 	struct rte_port *port;
1274 	struct ether_addr mac_addr;
1275 
1276 	if (test_done == 0) {
1277 		printf("Please stop forwarding first\n");
1278 		return -1;
1279 	}
1280 
1281 	if (port_id_is_invalid(pid, ENABLED_WARN))
1282 		return 0;
1283 
1284 	if (init_fwd_streams() < 0) {
1285 		printf("Fail from init_fwd_streams()\n");
1286 		return -1;
1287 	}
1288 
1289 	if(dcb_config)
1290 		dcb_test = 1;
1291 	FOREACH_PORT(pi, ports) {
1292 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1293 			continue;
1294 
1295 		need_check_link_status = 0;
1296 		port = &ports[pi];
1297 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1298 						 RTE_PORT_HANDLING) == 0) {
1299 			printf("Port %d is now not stopped\n", pi);
1300 			continue;
1301 		}
1302 
1303 		if (port->need_reconfig > 0) {
1304 			port->need_reconfig = 0;
1305 
1306 			printf("Configuring Port %d (socket %u)\n", pi,
1307 					port->socket_id);
1308 			/* configure port */
1309 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1310 						&(port->dev_conf));
1311 			if (diag != 0) {
1312 				if (rte_atomic16_cmpset(&(port->port_status),
1313 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1314 					printf("Port %d can not be set back "
1315 							"to stopped\n", pi);
1316 				printf("Fail to configure port %d\n", pi);
1317 				/* try to reconfigure port next time */
1318 				port->need_reconfig = 1;
1319 				return -1;
1320 			}
1321 		}
1322 		if (port->need_reconfig_queues > 0) {
1323 			port->need_reconfig_queues = 0;
1324 			/* setup tx queues */
1325 			for (qi = 0; qi < nb_txq; qi++) {
1326 				if ((numa_support) &&
1327 					(txring_numa[pi] != NUMA_NO_CONFIG))
1328 					diag = rte_eth_tx_queue_setup(pi, qi,
1329 						nb_txd,txring_numa[pi],
1330 						&(port->tx_conf));
1331 				else
1332 					diag = rte_eth_tx_queue_setup(pi, qi,
1333 						nb_txd,port->socket_id,
1334 						&(port->tx_conf));
1335 
1336 				if (diag == 0)
1337 					continue;
1338 
1339 				/* Fail to setup tx queue, return */
1340 				if (rte_atomic16_cmpset(&(port->port_status),
1341 							RTE_PORT_HANDLING,
1342 							RTE_PORT_STOPPED) == 0)
1343 					printf("Port %d can not be set back "
1344 							"to stopped\n", pi);
1345 				printf("Fail to configure port %d tx queues\n", pi);
1346 				/* try to reconfigure queues next time */
1347 				port->need_reconfig_queues = 1;
1348 				return -1;
1349 			}
1350 			/* setup rx queues */
1351 			for (qi = 0; qi < nb_rxq; qi++) {
1352 				if ((numa_support) &&
1353 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1354 					struct rte_mempool * mp =
1355 						mbuf_pool_find(rxring_numa[pi]);
1356 					if (mp == NULL) {
1357 						printf("Failed to setup RX queue:"
1358 							"No mempool allocation"
1359 							"on the socket %d\n",
1360 							rxring_numa[pi]);
1361 						return -1;
1362 					}
1363 
1364 					diag = rte_eth_rx_queue_setup(pi, qi,
1365 					     nb_rxd,rxring_numa[pi],
1366 					     &(port->rx_conf),mp);
1367 				}
1368 				else
1369 					diag = rte_eth_rx_queue_setup(pi, qi,
1370 					     nb_rxd,port->socket_id,
1371 					     &(port->rx_conf),
1372 				             mbuf_pool_find(port->socket_id));
1373 
1374 				if (diag == 0)
1375 					continue;
1376 
1377 
1378 				/* Fail to setup rx queue, return */
1379 				if (rte_atomic16_cmpset(&(port->port_status),
1380 							RTE_PORT_HANDLING,
1381 							RTE_PORT_STOPPED) == 0)
1382 					printf("Port %d can not be set back "
1383 							"to stopped\n", pi);
1384 				printf("Fail to configure port %d rx queues\n", pi);
1385 				/* try to reconfigure queues next time */
1386 				port->need_reconfig_queues = 1;
1387 				return -1;
1388 			}
1389 		}
1390 		/* start port */
1391 		if (rte_eth_dev_start(pi) < 0) {
1392 			printf("Fail to start port %d\n", pi);
1393 
1394 			/* Fail to setup rx queue, return */
1395 			if (rte_atomic16_cmpset(&(port->port_status),
1396 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1397 				printf("Port %d can not be set back to "
1398 							"stopped\n", pi);
1399 			continue;
1400 		}
1401 
1402 		if (rte_atomic16_cmpset(&(port->port_status),
1403 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1404 			printf("Port %d can not be set into started\n", pi);
1405 
1406 		rte_eth_macaddr_get(pi, &mac_addr);
1407 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1408 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1409 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1410 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1411 
1412 		/* at least one port started, need checking link status */
1413 		need_check_link_status = 1;
1414 	}
1415 
1416 	if (need_check_link_status == 1 && !no_link_check)
1417 		check_all_ports_link_status(RTE_PORT_ALL);
1418 	else if (need_check_link_status == 0)
1419 		printf("Please stop the ports first\n");
1420 
1421 	printf("Done\n");
1422 	return 0;
1423 }
1424 
1425 void
1426 stop_port(portid_t pid)
1427 {
1428 	portid_t pi;
1429 	struct rte_port *port;
1430 	int need_check_link_status = 0;
1431 
1432 	if (test_done == 0) {
1433 		printf("Please stop forwarding first\n");
1434 		return;
1435 	}
1436 	if (dcb_test) {
1437 		dcb_test = 0;
1438 		dcb_config = 0;
1439 	}
1440 
1441 	if (port_id_is_invalid(pid, ENABLED_WARN))
1442 		return;
1443 
1444 	printf("Stopping ports...\n");
1445 
1446 	FOREACH_PORT(pi, ports) {
1447 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1448 			continue;
1449 
1450 		port = &ports[pi];
1451 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1452 						RTE_PORT_HANDLING) == 0)
1453 			continue;
1454 
1455 		rte_eth_dev_stop(pi);
1456 
1457 		if (rte_atomic16_cmpset(&(port->port_status),
1458 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1459 			printf("Port %d can not be set into stopped\n", pi);
1460 		need_check_link_status = 1;
1461 	}
1462 	if (need_check_link_status && !no_link_check)
1463 		check_all_ports_link_status(RTE_PORT_ALL);
1464 
1465 	printf("Done\n");
1466 }
1467 
1468 void
1469 close_port(portid_t pid)
1470 {
1471 	portid_t pi;
1472 	struct rte_port *port;
1473 
1474 	if (test_done == 0) {
1475 		printf("Please stop forwarding first\n");
1476 		return;
1477 	}
1478 
1479 	if (port_id_is_invalid(pid, ENABLED_WARN))
1480 		return;
1481 
1482 	printf("Closing ports...\n");
1483 
1484 	FOREACH_PORT(pi, ports) {
1485 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1486 			continue;
1487 
1488 		port = &ports[pi];
1489 		if (rte_atomic16_cmpset(&(port->port_status),
1490 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1491 			printf("Port %d is already closed\n", pi);
1492 			continue;
1493 		}
1494 
1495 		if (rte_atomic16_cmpset(&(port->port_status),
1496 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1497 			printf("Port %d is now not stopped\n", pi);
1498 			continue;
1499 		}
1500 
1501 		rte_eth_dev_close(pi);
1502 
1503 		if (rte_atomic16_cmpset(&(port->port_status),
1504 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1505 			printf("Port %d can not be set into stopped\n", pi);
1506 	}
1507 
1508 	printf("Done\n");
1509 }
1510 
1511 void
1512 attach_port(char *identifier)
1513 {
1514 	portid_t i, j, pi = 0;
1515 
1516 	printf("Attaching a new port...\n");
1517 
1518 	if (identifier == NULL) {
1519 		printf("Invalid parameters are specified\n");
1520 		return;
1521 	}
1522 
1523 	if (test_done == 0) {
1524 		printf("Please stop forwarding first\n");
1525 		return;
1526 	}
1527 
1528 	if (rte_eth_dev_attach(identifier, &pi))
1529 		return;
1530 
1531 	ports[pi].enabled = 1;
1532 	reconfig(pi, rte_eth_dev_socket_id(pi));
1533 	rte_eth_promiscuous_enable(pi);
1534 
1535 	nb_ports = rte_eth_dev_count();
1536 
1537 	/* set_default_fwd_ports_config(); */
1538 	memset(fwd_ports_ids, 0, sizeof(fwd_ports_ids));
1539 	i = 0;
1540 	FOREACH_PORT(j, ports) {
1541 		fwd_ports_ids[i] = j;
1542 		i++;
1543 	}
1544 	nb_cfg_ports = nb_ports;
1545 	nb_fwd_ports++;
1546 
1547 	ports[pi].port_status = RTE_PORT_STOPPED;
1548 
1549 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1550 	printf("Done\n");
1551 }
1552 
1553 void
1554 detach_port(uint8_t port_id)
1555 {
1556 	portid_t i, pi = 0;
1557 	char name[RTE_ETH_NAME_MAX_LEN];
1558 
1559 	printf("Detaching a port...\n");
1560 
1561 	if (!port_is_closed(port_id)) {
1562 		printf("Please close port first\n");
1563 		return;
1564 	}
1565 
1566 	if (rte_eth_dev_detach(port_id, name))
1567 		return;
1568 
1569 	ports[port_id].enabled = 0;
1570 	nb_ports = rte_eth_dev_count();
1571 
1572 	/* set_default_fwd_ports_config(); */
1573 	memset(fwd_ports_ids, 0, sizeof(fwd_ports_ids));
1574 	i = 0;
1575 	FOREACH_PORT(pi, ports) {
1576 		fwd_ports_ids[i] = pi;
1577 		i++;
1578 	}
1579 	nb_cfg_ports = nb_ports;
1580 	nb_fwd_ports--;
1581 
1582 	printf("Port '%s' is detached. Now total ports is %d\n",
1583 			name, nb_ports);
1584 	printf("Done\n");
1585 	return;
1586 }
1587 
1588 void
1589 pmd_test_exit(void)
1590 {
1591 	portid_t pt_id;
1592 
1593 	if (test_done == 0)
1594 		stop_packet_forwarding();
1595 
1596 	if (ports != NULL) {
1597 		no_link_check = 1;
1598 		FOREACH_PORT(pt_id, ports) {
1599 			printf("\nShutting down port %d...\n", pt_id);
1600 			fflush(stdout);
1601 			stop_port(pt_id);
1602 			close_port(pt_id);
1603 		}
1604 	}
1605 	printf("\nBye...\n");
1606 }
1607 
1608 typedef void (*cmd_func_t)(void);
1609 struct pmd_test_command {
1610 	const char *cmd_name;
1611 	cmd_func_t cmd_func;
1612 };
1613 
1614 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1615 
1616 /* Check the link status of all ports in up to 9s, and print them finally */
1617 static void
1618 check_all_ports_link_status(uint32_t port_mask)
1619 {
1620 #define CHECK_INTERVAL 100 /* 100ms */
1621 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1622 	uint8_t portid, count, all_ports_up, print_flag = 0;
1623 	struct rte_eth_link link;
1624 
1625 	printf("Checking link statuses...\n");
1626 	fflush(stdout);
1627 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1628 		all_ports_up = 1;
1629 		FOREACH_PORT(portid, ports) {
1630 			if ((port_mask & (1 << portid)) == 0)
1631 				continue;
1632 			memset(&link, 0, sizeof(link));
1633 			rte_eth_link_get_nowait(portid, &link);
1634 			/* print link status if flag set */
1635 			if (print_flag == 1) {
1636 				if (link.link_status)
1637 					printf("Port %d Link Up - speed %u "
1638 						"Mbps - %s\n", (uint8_t)portid,
1639 						(unsigned)link.link_speed,
1640 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1641 					("full-duplex") : ("half-duplex\n"));
1642 				else
1643 					printf("Port %d Link Down\n",
1644 						(uint8_t)portid);
1645 				continue;
1646 			}
1647 			/* clear all_ports_up flag if any link down */
1648 			if (link.link_status == ETH_LINK_DOWN) {
1649 				all_ports_up = 0;
1650 				break;
1651 			}
1652 		}
1653 		/* after finally printing all link status, get out */
1654 		if (print_flag == 1)
1655 			break;
1656 
1657 		if (all_ports_up == 0) {
1658 			fflush(stdout);
1659 			rte_delay_ms(CHECK_INTERVAL);
1660 		}
1661 
1662 		/* set the print_flag if all ports up or timeout */
1663 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1664 			print_flag = 1;
1665 		}
1666 	}
1667 }
1668 
1669 static int
1670 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1671 {
1672 	uint16_t i;
1673 	int diag;
1674 	uint8_t mapping_found = 0;
1675 
1676 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1677 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1678 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1679 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1680 					tx_queue_stats_mappings[i].queue_id,
1681 					tx_queue_stats_mappings[i].stats_counter_id);
1682 			if (diag != 0)
1683 				return diag;
1684 			mapping_found = 1;
1685 		}
1686 	}
1687 	if (mapping_found)
1688 		port->tx_queue_stats_mapping_enabled = 1;
1689 	return 0;
1690 }
1691 
1692 static int
1693 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1694 {
1695 	uint16_t i;
1696 	int diag;
1697 	uint8_t mapping_found = 0;
1698 
1699 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1700 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1701 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1702 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1703 					rx_queue_stats_mappings[i].queue_id,
1704 					rx_queue_stats_mappings[i].stats_counter_id);
1705 			if (diag != 0)
1706 				return diag;
1707 			mapping_found = 1;
1708 		}
1709 	}
1710 	if (mapping_found)
1711 		port->rx_queue_stats_mapping_enabled = 1;
1712 	return 0;
1713 }
1714 
1715 static void
1716 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1717 {
1718 	int diag = 0;
1719 
1720 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1721 	if (diag != 0) {
1722 		if (diag == -ENOTSUP) {
1723 			port->tx_queue_stats_mapping_enabled = 0;
1724 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1725 		}
1726 		else
1727 			rte_exit(EXIT_FAILURE,
1728 					"set_tx_queue_stats_mapping_registers "
1729 					"failed for port id=%d diag=%d\n",
1730 					pi, diag);
1731 	}
1732 
1733 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1734 	if (diag != 0) {
1735 		if (diag == -ENOTSUP) {
1736 			port->rx_queue_stats_mapping_enabled = 0;
1737 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1738 		}
1739 		else
1740 			rte_exit(EXIT_FAILURE,
1741 					"set_rx_queue_stats_mapping_registers "
1742 					"failed for port id=%d diag=%d\n",
1743 					pi, diag);
1744 	}
1745 }
1746 
1747 static void
1748 rxtx_port_config(struct rte_port *port)
1749 {
1750 	port->rx_conf = port->dev_info.default_rxconf;
1751 	port->tx_conf = port->dev_info.default_txconf;
1752 
1753 	/* Check if any RX/TX parameters have been passed */
1754 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1755 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1756 
1757 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1758 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1759 
1760 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1761 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1762 
1763 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1764 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1765 
1766 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1767 		port->rx_conf.rx_drop_en = rx_drop_en;
1768 
1769 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1770 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1771 
1772 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1773 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1774 
1775 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1776 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1777 
1778 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1779 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1780 
1781 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1782 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1783 
1784 	if (txq_flags != RTE_PMD_PARAM_UNSET)
1785 		port->tx_conf.txq_flags = txq_flags;
1786 }
1787 
1788 void
1789 init_port_config(void)
1790 {
1791 	portid_t pid;
1792 	struct rte_port *port;
1793 
1794 	FOREACH_PORT(pid, ports) {
1795 		port = &ports[pid];
1796 		port->dev_conf.rxmode = rx_mode;
1797 		port->dev_conf.fdir_conf = fdir_conf;
1798 		if (nb_rxq > 1) {
1799 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1800 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1801 		} else {
1802 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1803 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1804 		}
1805 
1806 		if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1807 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1808 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1809 			else
1810 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1811 		}
1812 
1813 		if (port->dev_info.max_vfs != 0) {
1814 			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1815 				port->dev_conf.rxmode.mq_mode =
1816 					ETH_MQ_RX_VMDQ_RSS;
1817 			else
1818 				port->dev_conf.rxmode.mq_mode =
1819 					ETH_MQ_RX_NONE;
1820 
1821 			port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1822 		}
1823 
1824 		rxtx_port_config(port);
1825 
1826 		rte_eth_macaddr_get(pid, &port->eth_addr);
1827 
1828 		map_port_queue_stats_mapping_registers(pid, port);
1829 #ifdef RTE_NIC_BYPASS
1830 		rte_eth_dev_bypass_init(pid);
1831 #endif
1832 	}
1833 }
1834 
1835 void set_port_slave_flag(portid_t slave_pid)
1836 {
1837 	struct rte_port *port;
1838 
1839 	port = &ports[slave_pid];
1840 	port->slave_flag = 1;
1841 }
1842 
1843 void clear_port_slave_flag(portid_t slave_pid)
1844 {
1845 	struct rte_port *port;
1846 
1847 	port = &ports[slave_pid];
1848 	port->slave_flag = 0;
1849 }
1850 
1851 const uint16_t vlan_tags[] = {
1852 		0,  1,  2,  3,  4,  5,  6,  7,
1853 		8,  9, 10, 11,  12, 13, 14, 15,
1854 		16, 17, 18, 19, 20, 21, 22, 23,
1855 		24, 25, 26, 27, 28, 29, 30, 31
1856 };
1857 
1858 static  int
1859 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1860 		 enum dcb_mode_enable dcb_mode,
1861 		 enum rte_eth_nb_tcs num_tcs,
1862 		 uint8_t pfc_en)
1863 {
1864 	uint8_t i;
1865 
1866 	/*
1867 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1868 	 * given above, and the number of traffic classes available for use.
1869 	 */
1870 	if (dcb_mode == DCB_VT_ENABLED) {
1871 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1872 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
1873 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1874 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1875 
1876 		/* VMDQ+DCB RX and TX configrations */
1877 		vmdq_rx_conf->enable_default_pool = 0;
1878 		vmdq_rx_conf->default_pool = 0;
1879 		vmdq_rx_conf->nb_queue_pools =
1880 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1881 		vmdq_tx_conf->nb_queue_pools =
1882 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1883 
1884 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1885 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1886 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1887 			vmdq_rx_conf->pool_map[i].pools =
1888 				1 << (i % vmdq_rx_conf->nb_queue_pools);
1889 		}
1890 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1891 			vmdq_rx_conf->dcb_tc[i] = i;
1892 			vmdq_tx_conf->dcb_tc[i] = i;
1893 		}
1894 
1895 		/* set DCB mode of RX and TX of multiple queues */
1896 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1897 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1898 	} else {
1899 		struct rte_eth_dcb_rx_conf *rx_conf =
1900 				&eth_conf->rx_adv_conf.dcb_rx_conf;
1901 		struct rte_eth_dcb_tx_conf *tx_conf =
1902 				&eth_conf->tx_adv_conf.dcb_tx_conf;
1903 
1904 		rx_conf->nb_tcs = num_tcs;
1905 		tx_conf->nb_tcs = num_tcs;
1906 
1907 		for (i = 0; i < num_tcs; i++) {
1908 			rx_conf->dcb_tc[i] = i;
1909 			tx_conf->dcb_tc[i] = i;
1910 		}
1911 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1912 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1913 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1914 	}
1915 
1916 	if (pfc_en)
1917 		eth_conf->dcb_capability_en =
1918 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1919 	else
1920 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1921 
1922 	return 0;
1923 }
1924 
1925 int
1926 init_port_dcb_config(portid_t pid,
1927 		     enum dcb_mode_enable dcb_mode,
1928 		     enum rte_eth_nb_tcs num_tcs,
1929 		     uint8_t pfc_en)
1930 {
1931 	struct rte_eth_conf port_conf;
1932 	struct rte_eth_dev_info dev_info;
1933 	struct rte_port *rte_port;
1934 	int retval;
1935 	uint16_t i;
1936 
1937 	rte_eth_dev_info_get(pid, &dev_info);
1938 
1939 	/* If dev_info.vmdq_pool_base is greater than 0,
1940 	 * the queue id of vmdq pools is started after pf queues.
1941 	 */
1942 	if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
1943 		printf("VMDQ_DCB multi-queue mode is nonsensical"
1944 			" for port %d.", pid);
1945 		return -1;
1946 	}
1947 
1948 	/* Assume the ports in testpmd have the same dcb capability
1949 	 * and has the same number of rxq and txq in dcb mode
1950 	 */
1951 	if (dcb_mode == DCB_VT_ENABLED) {
1952 		nb_rxq = dev_info.max_rx_queues;
1953 		nb_txq = dev_info.max_tx_queues;
1954 	} else {
1955 		/*if vt is disabled, use all pf queues */
1956 		if (dev_info.vmdq_pool_base == 0) {
1957 			nb_rxq = dev_info.max_rx_queues;
1958 			nb_txq = dev_info.max_tx_queues;
1959 		} else {
1960 			nb_rxq = (queueid_t)num_tcs;
1961 			nb_txq = (queueid_t)num_tcs;
1962 
1963 		}
1964 	}
1965 	rx_free_thresh = 64;
1966 
1967 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1968 	/* Enter DCB configuration status */
1969 	dcb_config = 1;
1970 
1971 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1972 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
1973 	if (retval < 0)
1974 		return retval;
1975 
1976 	rte_port = &ports[pid];
1977 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
1978 
1979 	rxtx_port_config(rte_port);
1980 	/* VLAN filter */
1981 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1982 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
1983 		rx_vft_set(pid, vlan_tags[i], 1);
1984 
1985 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1986 	map_port_queue_stats_mapping_registers(pid, rte_port);
1987 
1988 	rte_port->dcb_flag = 1;
1989 
1990 	return 0;
1991 }
1992 
1993 static void
1994 init_port(void)
1995 {
1996 	portid_t pid;
1997 
1998 	/* Configuration of Ethernet ports. */
1999 	ports = rte_zmalloc("testpmd: ports",
2000 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2001 			    RTE_CACHE_LINE_SIZE);
2002 	if (ports == NULL) {
2003 		rte_exit(EXIT_FAILURE,
2004 				"rte_zmalloc(%d struct rte_port) failed\n",
2005 				RTE_MAX_ETHPORTS);
2006 	}
2007 
2008 	/* enabled allocated ports */
2009 	for (pid = 0; pid < nb_ports; pid++)
2010 		ports[pid].enabled = 1;
2011 }
2012 
2013 static void
2014 force_quit(void)
2015 {
2016 	pmd_test_exit();
2017 	prompt_exit();
2018 }
2019 
2020 static void
2021 signal_handler(int signum)
2022 {
2023 	if (signum == SIGINT || signum == SIGTERM) {
2024 		printf("\nSignal %d received, preparing to exit...\n",
2025 				signum);
2026 		force_quit();
2027 		/* exit with the expected status */
2028 		signal(signum, SIG_DFL);
2029 		kill(getpid(), signum);
2030 	}
2031 }
2032 
2033 int
2034 main(int argc, char** argv)
2035 {
2036 	int  diag;
2037 	uint8_t port_id;
2038 
2039 	signal(SIGINT, signal_handler);
2040 	signal(SIGTERM, signal_handler);
2041 
2042 	diag = rte_eal_init(argc, argv);
2043 	if (diag < 0)
2044 		rte_panic("Cannot init EAL\n");
2045 
2046 	nb_ports = (portid_t) rte_eth_dev_count();
2047 	if (nb_ports == 0)
2048 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2049 
2050 	/* allocate port structures, and init them */
2051 	init_port();
2052 
2053 	set_def_fwd_config();
2054 	if (nb_lcores == 0)
2055 		rte_panic("Empty set of forwarding logical cores - check the "
2056 			  "core mask supplied in the command parameters\n");
2057 
2058 	argc -= diag;
2059 	argv += diag;
2060 	if (argc > 1)
2061 		launch_args_parse(argc, argv);
2062 
2063 	if (!nb_rxq && !nb_txq)
2064 		printf("Warning: Either rx or tx queues should be non-zero\n");
2065 
2066 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2067 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2068 		       "but nb_txq=%d will prevent to fully test it.\n",
2069 		       nb_rxq, nb_txq);
2070 
2071 	init_config();
2072 	if (start_port(RTE_PORT_ALL) != 0)
2073 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2074 
2075 	/* set all ports to promiscuous mode by default */
2076 	FOREACH_PORT(port_id, ports)
2077 		rte_eth_promiscuous_enable(port_id);
2078 
2079 #ifdef RTE_LIBRTE_CMDLINE
2080 	if (interactive == 1) {
2081 		if (auto_start) {
2082 			printf("Start automatic packet forwarding\n");
2083 			start_packet_forwarding(0);
2084 		}
2085 		prompt();
2086 	} else
2087 #endif
2088 	{
2089 		char c;
2090 		int rc;
2091 
2092 		printf("No commandline core given, start packet forwarding\n");
2093 		start_packet_forwarding(0);
2094 		printf("Press enter to exit\n");
2095 		rc = read(0, &c, 1);
2096 		pmd_test_exit();
2097 		if (rc < 0)
2098 			return 1;
2099 	}
2100 
2101 	return 0;
2102 }
2103