xref: /dpdk/app/test-pmd/testpmd.c (revision f56620dddb11be9602e89206a60186cbb2bd1b13)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
78 #endif
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
81 #endif
82 
83 #include "testpmd.h"
84 
85 uint16_t verbose_level = 0; /**< Silent by default. */
86 
87 /* use master core for command line ? */
88 uint8_t interactive = 0;
89 uint8_t auto_start = 0;
90 
91 /*
92  * NUMA support configuration.
93  * When set, the NUMA support attempts to dispatch the allocation of the
94  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
95  * probed ports among the CPU sockets 0 and 1.
96  * Otherwise, all memory is allocated from CPU socket 0.
97  */
98 uint8_t numa_support = 0; /**< No numa support by default */
99 
100 /*
101  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
102  * not configured.
103  */
104 uint8_t socket_num = UMA_NO_CONFIG;
105 
106 /*
107  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
108  */
109 uint8_t mp_anon = 0;
110 
111 /*
112  * Record the Ethernet address of peer target ports to which packets are
113  * forwarded.
114  * Must be instanciated with the ethernet addresses of peer traffic generator
115  * ports.
116  */
117 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
118 portid_t nb_peer_eth_addrs = 0;
119 
120 /*
121  * Probed Target Environment.
122  */
123 struct rte_port *ports;	       /**< For all probed ethernet ports. */
124 portid_t nb_ports;             /**< Number of probed ethernet ports. */
125 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
126 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
127 
128 /*
129  * Test Forwarding Configuration.
130  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
131  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
132  */
133 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
134 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
135 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
136 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
137 
138 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
139 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
140 
141 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
142 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
143 
144 /*
145  * Forwarding engines.
146  */
147 struct fwd_engine * fwd_engines[] = {
148 	&io_fwd_engine,
149 	&mac_fwd_engine,
150 	&mac_swap_engine,
151 	&flow_gen_engine,
152 	&rx_only_engine,
153 	&tx_only_engine,
154 	&csum_fwd_engine,
155 	&icmp_echo_engine,
156 #ifdef RTE_LIBRTE_IEEE1588
157 	&ieee1588_fwd_engine,
158 #endif
159 	NULL,
160 };
161 
162 struct fwd_config cur_fwd_config;
163 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
164 uint32_t retry_enabled;
165 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
166 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
167 
168 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
169 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
170                                       * specified on command-line. */
171 
172 /*
173  * Configuration of packet segments used by the "txonly" processing engine.
174  */
175 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
176 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
177 	TXONLY_DEF_PACKET_LEN,
178 };
179 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
180 
181 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
182 /**< Split policy for packets to TX. */
183 
184 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
185 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
186 
187 /* current configuration is in DCB or not,0 means it is not in DCB mode */
188 uint8_t dcb_config = 0;
189 
190 /* Whether the dcb is in testing status */
191 uint8_t dcb_test = 0;
192 
193 /*
194  * Configurable number of RX/TX queues.
195  */
196 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
197 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
198 
199 /*
200  * Configurable number of RX/TX ring descriptors.
201  */
202 #define RTE_TEST_RX_DESC_DEFAULT 128
203 #define RTE_TEST_TX_DESC_DEFAULT 512
204 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
205 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
206 
207 #define RTE_PMD_PARAM_UNSET -1
208 /*
209  * Configurable values of RX and TX ring threshold registers.
210  */
211 
212 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
213 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
214 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
215 
216 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
217 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
218 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
219 
220 /*
221  * Configurable value of RX free threshold.
222  */
223 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
224 
225 /*
226  * Configurable value of RX drop enable.
227  */
228 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
229 
230 /*
231  * Configurable value of TX free threshold.
232  */
233 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
234 
235 /*
236  * Configurable value of TX RS bit threshold.
237  */
238 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
239 
240 /*
241  * Configurable value of TX queue flags.
242  */
243 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
244 
245 /*
246  * Receive Side Scaling (RSS) configuration.
247  */
248 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
249 
250 /*
251  * Port topology configuration
252  */
253 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
254 
255 /*
256  * Avoids to flush all the RX streams before starts forwarding.
257  */
258 uint8_t no_flush_rx = 0; /* flush by default */
259 
260 /*
261  * Avoids to check link status when starting/stopping a port.
262  */
263 uint8_t no_link_check = 0; /* check by default */
264 
265 /*
266  * NIC bypass mode configuration options.
267  */
268 #ifdef RTE_NIC_BYPASS
269 
270 /* The NIC bypass watchdog timeout. */
271 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
272 
273 #endif
274 
275 /*
276  * Ethernet device configuration.
277  */
278 struct rte_eth_rxmode rx_mode = {
279 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
280 	.split_hdr_size = 0,
281 	.header_split   = 0, /**< Header Split disabled. */
282 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
283 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
284 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
285 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
286 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
287 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
288 };
289 
290 struct rte_fdir_conf fdir_conf = {
291 	.mode = RTE_FDIR_MODE_NONE,
292 	.pballoc = RTE_FDIR_PBALLOC_64K,
293 	.status = RTE_FDIR_REPORT_STATUS,
294 	.mask = {
295 		.vlan_tci_mask = 0x0,
296 		.ipv4_mask     = {
297 			.src_ip = 0xFFFFFFFF,
298 			.dst_ip = 0xFFFFFFFF,
299 		},
300 		.ipv6_mask     = {
301 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
302 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
303 		},
304 		.src_port_mask = 0xFFFF,
305 		.dst_port_mask = 0xFFFF,
306 		.mac_addr_byte_mask = 0xFF,
307 		.tunnel_type_mask = 1,
308 		.tunnel_id_mask = 0xFFFFFFFF,
309 	},
310 	.drop_queue = 127,
311 };
312 
313 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
314 
315 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
316 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
317 
318 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
319 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
320 
321 uint16_t nb_tx_queue_stats_mappings = 0;
322 uint16_t nb_rx_queue_stats_mappings = 0;
323 
324 unsigned max_socket = 0;
325 
326 /* Forward function declarations */
327 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
328 static void check_all_ports_link_status(uint32_t port_mask);
329 
330 /*
331  * Check if all the ports are started.
332  * If yes, return positive value. If not, return zero.
333  */
334 static int all_ports_started(void);
335 
336 /*
337  * Find next enabled port
338  */
339 portid_t
340 find_next_port(portid_t p, struct rte_port *ports, int size)
341 {
342 	if (ports == NULL)
343 		rte_exit(-EINVAL, "failed to find a next port id\n");
344 
345 	while ((p < size) && (ports[p].enabled == 0))
346 		p++;
347 	return p;
348 }
349 
350 /*
351  * Setup default configuration.
352  */
353 static void
354 set_default_fwd_lcores_config(void)
355 {
356 	unsigned int i;
357 	unsigned int nb_lc;
358 	unsigned int sock_num;
359 
360 	nb_lc = 0;
361 	for (i = 0; i < RTE_MAX_LCORE; i++) {
362 		sock_num = rte_lcore_to_socket_id(i) + 1;
363 		if (sock_num > max_socket) {
364 			if (sock_num > RTE_MAX_NUMA_NODES)
365 				rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
366 			max_socket = sock_num;
367 		}
368 		if (!rte_lcore_is_enabled(i))
369 			continue;
370 		if (i == rte_get_master_lcore())
371 			continue;
372 		fwd_lcores_cpuids[nb_lc++] = i;
373 	}
374 	nb_lcores = (lcoreid_t) nb_lc;
375 	nb_cfg_lcores = nb_lcores;
376 	nb_fwd_lcores = 1;
377 }
378 
379 static void
380 set_def_peer_eth_addrs(void)
381 {
382 	portid_t i;
383 
384 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
385 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
386 		peer_eth_addrs[i].addr_bytes[5] = i;
387 	}
388 }
389 
390 static void
391 set_default_fwd_ports_config(void)
392 {
393 	portid_t pt_id;
394 
395 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
396 		fwd_ports_ids[pt_id] = pt_id;
397 
398 	nb_cfg_ports = nb_ports;
399 	nb_fwd_ports = nb_ports;
400 }
401 
402 void
403 set_def_fwd_config(void)
404 {
405 	set_default_fwd_lcores_config();
406 	set_def_peer_eth_addrs();
407 	set_default_fwd_ports_config();
408 }
409 
410 /*
411  * Configuration initialisation done once at init time.
412  */
413 static void
414 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
415 		 unsigned int socket_id)
416 {
417 	char pool_name[RTE_MEMPOOL_NAMESIZE];
418 	struct rte_mempool *rte_mp = NULL;
419 	uint32_t mb_size;
420 
421 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
422 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
423 
424 	RTE_LOG(INFO, USER1,
425 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
426 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
427 
428 #ifdef RTE_LIBRTE_PMD_XENVIRT
429 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
430 		(unsigned) mb_mempool_cache,
431 		sizeof(struct rte_pktmbuf_pool_private),
432 		rte_pktmbuf_pool_init, NULL,
433 		rte_pktmbuf_init, NULL,
434 		socket_id, 0);
435 #endif
436 
437 	/* if the former XEN allocation failed fall back to normal allocation */
438 	if (rte_mp == NULL) {
439 		if (mp_anon != 0) {
440 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
441 				mb_size, (unsigned) mb_mempool_cache,
442 				sizeof(struct rte_pktmbuf_pool_private),
443 				socket_id, 0);
444 
445 			if (rte_mempool_populate_anon(rte_mp) == 0) {
446 				rte_mempool_free(rte_mp);
447 				rte_mp = NULL;
448 			}
449 			rte_pktmbuf_pool_init(rte_mp, NULL);
450 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
451 		} else {
452 			/* wrapper to rte_mempool_create() */
453 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
454 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
455 		}
456 	}
457 
458 	if (rte_mp == NULL) {
459 		rte_exit(EXIT_FAILURE,
460 			"Creation of mbuf pool for socket %u failed: %s\n",
461 			socket_id, rte_strerror(rte_errno));
462 	} else if (verbose_level > 0) {
463 		rte_mempool_dump(stdout, rte_mp);
464 	}
465 }
466 
467 /*
468  * Check given socket id is valid or not with NUMA mode,
469  * if valid, return 0, else return -1
470  */
471 static int
472 check_socket_id(const unsigned int socket_id)
473 {
474 	static int warning_once = 0;
475 
476 	if (socket_id >= max_socket) {
477 		if (!warning_once && numa_support)
478 			printf("Warning: NUMA should be configured manually by"
479 			       " using --port-numa-config and"
480 			       " --ring-numa-config parameters along with"
481 			       " --numa.\n");
482 		warning_once = 1;
483 		return -1;
484 	}
485 	return 0;
486 }
487 
488 static void
489 init_config(void)
490 {
491 	portid_t pid;
492 	struct rte_port *port;
493 	struct rte_mempool *mbp;
494 	unsigned int nb_mbuf_per_pool;
495 	lcoreid_t  lc_id;
496 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
497 
498 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
499 	/* Configuration of logical cores. */
500 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
501 				sizeof(struct fwd_lcore *) * nb_lcores,
502 				RTE_CACHE_LINE_SIZE);
503 	if (fwd_lcores == NULL) {
504 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
505 							"failed\n", nb_lcores);
506 	}
507 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
508 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
509 					       sizeof(struct fwd_lcore),
510 					       RTE_CACHE_LINE_SIZE);
511 		if (fwd_lcores[lc_id] == NULL) {
512 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
513 								"failed\n");
514 		}
515 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
516 	}
517 
518 	/*
519 	 * Create pools of mbuf.
520 	 * If NUMA support is disabled, create a single pool of mbuf in
521 	 * socket 0 memory by default.
522 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
523 	 *
524 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
525 	 * nb_txd can be configured at run time.
526 	 */
527 	if (param_total_num_mbufs)
528 		nb_mbuf_per_pool = param_total_num_mbufs;
529 	else {
530 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
531 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
532 
533 		if (!numa_support)
534 			nb_mbuf_per_pool =
535 				(nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
536 	}
537 
538 	if (!numa_support) {
539 		if (socket_num == UMA_NO_CONFIG)
540 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
541 		else
542 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
543 						 socket_num);
544 	}
545 
546 	FOREACH_PORT(pid, ports) {
547 		port = &ports[pid];
548 		rte_eth_dev_info_get(pid, &port->dev_info);
549 
550 		if (numa_support) {
551 			if (port_numa[pid] != NUMA_NO_CONFIG)
552 				port_per_socket[port_numa[pid]]++;
553 			else {
554 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
555 
556 				/* if socket_id is invalid, set to 0 */
557 				if (check_socket_id(socket_id) < 0)
558 					socket_id = 0;
559 				port_per_socket[socket_id]++;
560 			}
561 		}
562 
563 		/* set flag to initialize port/queue */
564 		port->need_reconfig = 1;
565 		port->need_reconfig_queues = 1;
566 	}
567 
568 	if (numa_support) {
569 		uint8_t i;
570 		unsigned int nb_mbuf;
571 
572 		if (param_total_num_mbufs)
573 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
574 
575 		for (i = 0; i < max_socket; i++) {
576 			nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
577 			if (nb_mbuf)
578 				mbuf_pool_create(mbuf_data_size,
579 						nb_mbuf,i);
580 		}
581 	}
582 	init_port_config();
583 
584 	/*
585 	 * Records which Mbuf pool to use by each logical core, if needed.
586 	 */
587 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
588 		mbp = mbuf_pool_find(
589 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
590 
591 		if (mbp == NULL)
592 			mbp = mbuf_pool_find(0);
593 		fwd_lcores[lc_id]->mbp = mbp;
594 	}
595 
596 	/* Configuration of packet forwarding streams. */
597 	if (init_fwd_streams() < 0)
598 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
599 
600 	fwd_config_setup();
601 }
602 
603 
604 void
605 reconfig(portid_t new_port_id, unsigned socket_id)
606 {
607 	struct rte_port *port;
608 
609 	/* Reconfiguration of Ethernet ports. */
610 	port = &ports[new_port_id];
611 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
612 
613 	/* set flag to initialize port/queue */
614 	port->need_reconfig = 1;
615 	port->need_reconfig_queues = 1;
616 	port->socket_id = socket_id;
617 
618 	init_port_config();
619 }
620 
621 
622 int
623 init_fwd_streams(void)
624 {
625 	portid_t pid;
626 	struct rte_port *port;
627 	streamid_t sm_id, nb_fwd_streams_new;
628 	queueid_t q;
629 
630 	/* set socket id according to numa or not */
631 	FOREACH_PORT(pid, ports) {
632 		port = &ports[pid];
633 		if (nb_rxq > port->dev_info.max_rx_queues) {
634 			printf("Fail: nb_rxq(%d) is greater than "
635 				"max_rx_queues(%d)\n", nb_rxq,
636 				port->dev_info.max_rx_queues);
637 			return -1;
638 		}
639 		if (nb_txq > port->dev_info.max_tx_queues) {
640 			printf("Fail: nb_txq(%d) is greater than "
641 				"max_tx_queues(%d)\n", nb_txq,
642 				port->dev_info.max_tx_queues);
643 			return -1;
644 		}
645 		if (numa_support) {
646 			if (port_numa[pid] != NUMA_NO_CONFIG)
647 				port->socket_id = port_numa[pid];
648 			else {
649 				port->socket_id = rte_eth_dev_socket_id(pid);
650 
651 				/* if socket_id is invalid, set to 0 */
652 				if (check_socket_id(port->socket_id) < 0)
653 					port->socket_id = 0;
654 			}
655 		}
656 		else {
657 			if (socket_num == UMA_NO_CONFIG)
658 				port->socket_id = 0;
659 			else
660 				port->socket_id = socket_num;
661 		}
662 	}
663 
664 	q = RTE_MAX(nb_rxq, nb_txq);
665 	if (q == 0) {
666 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
667 		return -1;
668 	}
669 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
670 	if (nb_fwd_streams_new == nb_fwd_streams)
671 		return 0;
672 	/* clear the old */
673 	if (fwd_streams != NULL) {
674 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
675 			if (fwd_streams[sm_id] == NULL)
676 				continue;
677 			rte_free(fwd_streams[sm_id]);
678 			fwd_streams[sm_id] = NULL;
679 		}
680 		rte_free(fwd_streams);
681 		fwd_streams = NULL;
682 	}
683 
684 	/* init new */
685 	nb_fwd_streams = nb_fwd_streams_new;
686 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
687 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
688 	if (fwd_streams == NULL)
689 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
690 						"failed\n", nb_fwd_streams);
691 
692 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
693 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
694 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
695 		if (fwd_streams[sm_id] == NULL)
696 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
697 								" failed\n");
698 	}
699 
700 	return 0;
701 }
702 
703 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
704 static void
705 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
706 {
707 	unsigned int total_burst;
708 	unsigned int nb_burst;
709 	unsigned int burst_stats[3];
710 	uint16_t pktnb_stats[3];
711 	uint16_t nb_pkt;
712 	int burst_percent[3];
713 
714 	/*
715 	 * First compute the total number of packet bursts and the
716 	 * two highest numbers of bursts of the same number of packets.
717 	 */
718 	total_burst = 0;
719 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
720 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
721 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
722 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
723 		if (nb_burst == 0)
724 			continue;
725 		total_burst += nb_burst;
726 		if (nb_burst > burst_stats[0]) {
727 			burst_stats[1] = burst_stats[0];
728 			pktnb_stats[1] = pktnb_stats[0];
729 			burst_stats[0] = nb_burst;
730 			pktnb_stats[0] = nb_pkt;
731 		}
732 	}
733 	if (total_burst == 0)
734 		return;
735 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
736 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
737 	       burst_percent[0], (int) pktnb_stats[0]);
738 	if (burst_stats[0] == total_burst) {
739 		printf("]\n");
740 		return;
741 	}
742 	if (burst_stats[0] + burst_stats[1] == total_burst) {
743 		printf(" + %d%% of %d pkts]\n",
744 		       100 - burst_percent[0], pktnb_stats[1]);
745 		return;
746 	}
747 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
748 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
749 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
750 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
751 		return;
752 	}
753 	printf(" + %d%% of %d pkts + %d%% of others]\n",
754 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
755 }
756 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
757 
758 static void
759 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
760 {
761 	struct rte_port *port;
762 	uint8_t i;
763 
764 	static const char *fwd_stats_border = "----------------------";
765 
766 	port = &ports[port_id];
767 	printf("\n  %s Forward statistics for port %-2d %s\n",
768 	       fwd_stats_border, port_id, fwd_stats_border);
769 
770 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
771 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
772 		       "%-"PRIu64"\n",
773 		       stats->ipackets, stats->imissed,
774 		       (uint64_t) (stats->ipackets + stats->imissed));
775 
776 		if (cur_fwd_eng == &csum_fwd_engine)
777 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
778 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
779 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
780 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
781 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
782 		}
783 
784 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
785 		       "%-"PRIu64"\n",
786 		       stats->opackets, port->tx_dropped,
787 		       (uint64_t) (stats->opackets + port->tx_dropped));
788 	}
789 	else {
790 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
791 		       "%14"PRIu64"\n",
792 		       stats->ipackets, stats->imissed,
793 		       (uint64_t) (stats->ipackets + stats->imissed));
794 
795 		if (cur_fwd_eng == &csum_fwd_engine)
796 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
797 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
798 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
799 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
800 			printf("  RX-nombufs:             %14"PRIu64"\n",
801 			       stats->rx_nombuf);
802 		}
803 
804 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
805 		       "%14"PRIu64"\n",
806 		       stats->opackets, port->tx_dropped,
807 		       (uint64_t) (stats->opackets + port->tx_dropped));
808 	}
809 
810 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
811 	if (port->rx_stream)
812 		pkt_burst_stats_display("RX",
813 			&port->rx_stream->rx_burst_stats);
814 	if (port->tx_stream)
815 		pkt_burst_stats_display("TX",
816 			&port->tx_stream->tx_burst_stats);
817 #endif
818 
819 	if (port->rx_queue_stats_mapping_enabled) {
820 		printf("\n");
821 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
822 			printf("  Stats reg %2d RX-packets:%14"PRIu64
823 			       "     RX-errors:%14"PRIu64
824 			       "    RX-bytes:%14"PRIu64"\n",
825 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
826 		}
827 		printf("\n");
828 	}
829 	if (port->tx_queue_stats_mapping_enabled) {
830 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
831 			printf("  Stats reg %2d TX-packets:%14"PRIu64
832 			       "                                 TX-bytes:%14"PRIu64"\n",
833 			       i, stats->q_opackets[i], stats->q_obytes[i]);
834 		}
835 	}
836 
837 	printf("  %s--------------------------------%s\n",
838 	       fwd_stats_border, fwd_stats_border);
839 }
840 
841 static void
842 fwd_stream_stats_display(streamid_t stream_id)
843 {
844 	struct fwd_stream *fs;
845 	static const char *fwd_top_stats_border = "-------";
846 
847 	fs = fwd_streams[stream_id];
848 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
849 	    (fs->fwd_dropped == 0))
850 		return;
851 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
852 	       "TX Port=%2d/Queue=%2d %s\n",
853 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
854 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
855 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
856 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
857 
858 	/* if checksum mode */
859 	if (cur_fwd_eng == &csum_fwd_engine) {
860 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
861 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
862 	}
863 
864 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
865 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
866 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
867 #endif
868 }
869 
870 static void
871 flush_fwd_rx_queues(void)
872 {
873 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
874 	portid_t  rxp;
875 	portid_t port_id;
876 	queueid_t rxq;
877 	uint16_t  nb_rx;
878 	uint16_t  i;
879 	uint8_t   j;
880 
881 	for (j = 0; j < 2; j++) {
882 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
883 			for (rxq = 0; rxq < nb_rxq; rxq++) {
884 				port_id = fwd_ports_ids[rxp];
885 				do {
886 					nb_rx = rte_eth_rx_burst(port_id, rxq,
887 						pkts_burst, MAX_PKT_BURST);
888 					for (i = 0; i < nb_rx; i++)
889 						rte_pktmbuf_free(pkts_burst[i]);
890 				} while (nb_rx > 0);
891 			}
892 		}
893 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
894 	}
895 }
896 
897 static void
898 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
899 {
900 	struct fwd_stream **fsm;
901 	streamid_t nb_fs;
902 	streamid_t sm_id;
903 
904 	fsm = &fwd_streams[fc->stream_idx];
905 	nb_fs = fc->stream_nb;
906 	do {
907 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
908 			(*pkt_fwd)(fsm[sm_id]);
909 	} while (! fc->stopped);
910 }
911 
912 static int
913 start_pkt_forward_on_core(void *fwd_arg)
914 {
915 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
916 			     cur_fwd_config.fwd_eng->packet_fwd);
917 	return 0;
918 }
919 
920 /*
921  * Run the TXONLY packet forwarding engine to send a single burst of packets.
922  * Used to start communication flows in network loopback test configurations.
923  */
924 static int
925 run_one_txonly_burst_on_core(void *fwd_arg)
926 {
927 	struct fwd_lcore *fwd_lc;
928 	struct fwd_lcore tmp_lcore;
929 
930 	fwd_lc = (struct fwd_lcore *) fwd_arg;
931 	tmp_lcore = *fwd_lc;
932 	tmp_lcore.stopped = 1;
933 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
934 	return 0;
935 }
936 
937 /*
938  * Launch packet forwarding:
939  *     - Setup per-port forwarding context.
940  *     - launch logical cores with their forwarding configuration.
941  */
942 static void
943 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
944 {
945 	port_fwd_begin_t port_fwd_begin;
946 	unsigned int i;
947 	unsigned int lc_id;
948 	int diag;
949 
950 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
951 	if (port_fwd_begin != NULL) {
952 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
953 			(*port_fwd_begin)(fwd_ports_ids[i]);
954 	}
955 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
956 		lc_id = fwd_lcores_cpuids[i];
957 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
958 			fwd_lcores[i]->stopped = 0;
959 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
960 						     fwd_lcores[i], lc_id);
961 			if (diag != 0)
962 				printf("launch lcore %u failed - diag=%d\n",
963 				       lc_id, diag);
964 		}
965 	}
966 }
967 
968 /*
969  * Launch packet forwarding configuration.
970  */
971 void
972 start_packet_forwarding(int with_tx_first)
973 {
974 	port_fwd_begin_t port_fwd_begin;
975 	port_fwd_end_t  port_fwd_end;
976 	struct rte_port *port;
977 	unsigned int i;
978 	portid_t   pt_id;
979 	streamid_t sm_id;
980 
981 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
982 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
983 
984 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
985 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
986 
987 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
988 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
989 		(!nb_rxq || !nb_txq))
990 		rte_exit(EXIT_FAILURE,
991 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
992 			cur_fwd_eng->fwd_mode_name);
993 
994 	if (all_ports_started() == 0) {
995 		printf("Not all ports were started\n");
996 		return;
997 	}
998 	if (test_done == 0) {
999 		printf("Packet forwarding already started\n");
1000 		return;
1001 	}
1002 
1003 	if (init_fwd_streams() < 0) {
1004 		printf("Fail from init_fwd_streams()\n");
1005 		return;
1006 	}
1007 
1008 	if(dcb_test) {
1009 		for (i = 0; i < nb_fwd_ports; i++) {
1010 			pt_id = fwd_ports_ids[i];
1011 			port = &ports[pt_id];
1012 			if (!port->dcb_flag) {
1013 				printf("In DCB mode, all forwarding ports must "
1014                                        "be configured in this mode.\n");
1015 				return;
1016 			}
1017 		}
1018 		if (nb_fwd_lcores == 1) {
1019 			printf("In DCB mode,the nb forwarding cores "
1020                                "should be larger than 1.\n");
1021 			return;
1022 		}
1023 	}
1024 	test_done = 0;
1025 
1026 	if(!no_flush_rx)
1027 		flush_fwd_rx_queues();
1028 
1029 	fwd_config_setup();
1030 	pkt_fwd_config_display(&cur_fwd_config);
1031 	rxtx_config_display();
1032 
1033 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1034 		pt_id = fwd_ports_ids[i];
1035 		port = &ports[pt_id];
1036 		rte_eth_stats_get(pt_id, &port->stats);
1037 		port->tx_dropped = 0;
1038 
1039 		map_port_queue_stats_mapping_registers(pt_id, port);
1040 	}
1041 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1042 		fwd_streams[sm_id]->rx_packets = 0;
1043 		fwd_streams[sm_id]->tx_packets = 0;
1044 		fwd_streams[sm_id]->fwd_dropped = 0;
1045 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1046 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1047 
1048 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1049 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1050 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1051 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1052 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1053 #endif
1054 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1055 		fwd_streams[sm_id]->core_cycles = 0;
1056 #endif
1057 	}
1058 	if (with_tx_first) {
1059 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1060 		if (port_fwd_begin != NULL) {
1061 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1062 				(*port_fwd_begin)(fwd_ports_ids[i]);
1063 		}
1064 		while (with_tx_first--) {
1065 			launch_packet_forwarding(
1066 					run_one_txonly_burst_on_core);
1067 			rte_eal_mp_wait_lcore();
1068 		}
1069 		port_fwd_end = tx_only_engine.port_fwd_end;
1070 		if (port_fwd_end != NULL) {
1071 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1072 				(*port_fwd_end)(fwd_ports_ids[i]);
1073 		}
1074 	}
1075 	launch_packet_forwarding(start_pkt_forward_on_core);
1076 }
1077 
1078 void
1079 stop_packet_forwarding(void)
1080 {
1081 	struct rte_eth_stats stats;
1082 	struct rte_port *port;
1083 	port_fwd_end_t  port_fwd_end;
1084 	int i;
1085 	portid_t   pt_id;
1086 	streamid_t sm_id;
1087 	lcoreid_t  lc_id;
1088 	uint64_t total_recv;
1089 	uint64_t total_xmit;
1090 	uint64_t total_rx_dropped;
1091 	uint64_t total_tx_dropped;
1092 	uint64_t total_rx_nombuf;
1093 	uint64_t tx_dropped;
1094 	uint64_t rx_bad_ip_csum;
1095 	uint64_t rx_bad_l4_csum;
1096 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1097 	uint64_t fwd_cycles;
1098 #endif
1099 	static const char *acc_stats_border = "+++++++++++++++";
1100 
1101 	if (test_done) {
1102 		printf("Packet forwarding not started\n");
1103 		return;
1104 	}
1105 	printf("Telling cores to stop...");
1106 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1107 		fwd_lcores[lc_id]->stopped = 1;
1108 	printf("\nWaiting for lcores to finish...\n");
1109 	rte_eal_mp_wait_lcore();
1110 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1111 	if (port_fwd_end != NULL) {
1112 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1113 			pt_id = fwd_ports_ids[i];
1114 			(*port_fwd_end)(pt_id);
1115 		}
1116 	}
1117 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1118 	fwd_cycles = 0;
1119 #endif
1120 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1121 		if (cur_fwd_config.nb_fwd_streams >
1122 		    cur_fwd_config.nb_fwd_ports) {
1123 			fwd_stream_stats_display(sm_id);
1124 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1125 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1126 		} else {
1127 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1128 				fwd_streams[sm_id];
1129 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1130 				fwd_streams[sm_id];
1131 		}
1132 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1133 		tx_dropped = (uint64_t) (tx_dropped +
1134 					 fwd_streams[sm_id]->fwd_dropped);
1135 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1136 
1137 		rx_bad_ip_csum =
1138 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1139 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1140 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1141 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1142 							rx_bad_ip_csum;
1143 
1144 		rx_bad_l4_csum =
1145 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1146 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1147 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1148 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1149 							rx_bad_l4_csum;
1150 
1151 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1152 		fwd_cycles = (uint64_t) (fwd_cycles +
1153 					 fwd_streams[sm_id]->core_cycles);
1154 #endif
1155 	}
1156 	total_recv = 0;
1157 	total_xmit = 0;
1158 	total_rx_dropped = 0;
1159 	total_tx_dropped = 0;
1160 	total_rx_nombuf  = 0;
1161 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1162 		pt_id = fwd_ports_ids[i];
1163 
1164 		port = &ports[pt_id];
1165 		rte_eth_stats_get(pt_id, &stats);
1166 		stats.ipackets -= port->stats.ipackets;
1167 		port->stats.ipackets = 0;
1168 		stats.opackets -= port->stats.opackets;
1169 		port->stats.opackets = 0;
1170 		stats.ibytes   -= port->stats.ibytes;
1171 		port->stats.ibytes = 0;
1172 		stats.obytes   -= port->stats.obytes;
1173 		port->stats.obytes = 0;
1174 		stats.imissed  -= port->stats.imissed;
1175 		port->stats.imissed = 0;
1176 		stats.oerrors  -= port->stats.oerrors;
1177 		port->stats.oerrors = 0;
1178 		stats.rx_nombuf -= port->stats.rx_nombuf;
1179 		port->stats.rx_nombuf = 0;
1180 
1181 		total_recv += stats.ipackets;
1182 		total_xmit += stats.opackets;
1183 		total_rx_dropped += stats.imissed;
1184 		total_tx_dropped += port->tx_dropped;
1185 		total_rx_nombuf  += stats.rx_nombuf;
1186 
1187 		fwd_port_stats_display(pt_id, &stats);
1188 	}
1189 	printf("\n  %s Accumulated forward statistics for all ports"
1190 	       "%s\n",
1191 	       acc_stats_border, acc_stats_border);
1192 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1193 	       "%-"PRIu64"\n"
1194 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1195 	       "%-"PRIu64"\n",
1196 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1197 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1198 	if (total_rx_nombuf > 0)
1199 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1200 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1201 	       "%s\n",
1202 	       acc_stats_border, acc_stats_border);
1203 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1204 	if (total_recv > 0)
1205 		printf("\n  CPU cycles/packet=%u (total cycles="
1206 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1207 		       (unsigned int)(fwd_cycles / total_recv),
1208 		       fwd_cycles, total_recv);
1209 #endif
1210 	printf("\nDone.\n");
1211 	test_done = 1;
1212 }
1213 
1214 void
1215 dev_set_link_up(portid_t pid)
1216 {
1217 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1218 		printf("\nSet link up fail.\n");
1219 }
1220 
1221 void
1222 dev_set_link_down(portid_t pid)
1223 {
1224 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1225 		printf("\nSet link down fail.\n");
1226 }
1227 
1228 static int
1229 all_ports_started(void)
1230 {
1231 	portid_t pi;
1232 	struct rte_port *port;
1233 
1234 	FOREACH_PORT(pi, ports) {
1235 		port = &ports[pi];
1236 		/* Check if there is a port which is not started */
1237 		if ((port->port_status != RTE_PORT_STARTED) &&
1238 			(port->slave_flag == 0))
1239 			return 0;
1240 	}
1241 
1242 	/* No port is not started */
1243 	return 1;
1244 }
1245 
1246 int
1247 all_ports_stopped(void)
1248 {
1249 	portid_t pi;
1250 	struct rte_port *port;
1251 
1252 	FOREACH_PORT(pi, ports) {
1253 		port = &ports[pi];
1254 		if ((port->port_status != RTE_PORT_STOPPED) &&
1255 			(port->slave_flag == 0))
1256 			return 0;
1257 	}
1258 
1259 	return 1;
1260 }
1261 
1262 int
1263 port_is_started(portid_t port_id)
1264 {
1265 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1266 		return 0;
1267 
1268 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1269 		return 0;
1270 
1271 	return 1;
1272 }
1273 
1274 static int
1275 port_is_closed(portid_t port_id)
1276 {
1277 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1278 		return 0;
1279 
1280 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1281 		return 0;
1282 
1283 	return 1;
1284 }
1285 
1286 int
1287 start_port(portid_t pid)
1288 {
1289 	int diag, need_check_link_status = -1;
1290 	portid_t pi;
1291 	queueid_t qi;
1292 	struct rte_port *port;
1293 	struct ether_addr mac_addr;
1294 
1295 	if (port_id_is_invalid(pid, ENABLED_WARN))
1296 		return 0;
1297 
1298 	if(dcb_config)
1299 		dcb_test = 1;
1300 	FOREACH_PORT(pi, ports) {
1301 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1302 			continue;
1303 
1304 		need_check_link_status = 0;
1305 		port = &ports[pi];
1306 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1307 						 RTE_PORT_HANDLING) == 0) {
1308 			printf("Port %d is now not stopped\n", pi);
1309 			continue;
1310 		}
1311 
1312 		if (port->need_reconfig > 0) {
1313 			port->need_reconfig = 0;
1314 
1315 			printf("Configuring Port %d (socket %u)\n", pi,
1316 					port->socket_id);
1317 			/* configure port */
1318 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1319 						&(port->dev_conf));
1320 			if (diag != 0) {
1321 				if (rte_atomic16_cmpset(&(port->port_status),
1322 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1323 					printf("Port %d can not be set back "
1324 							"to stopped\n", pi);
1325 				printf("Fail to configure port %d\n", pi);
1326 				/* try to reconfigure port next time */
1327 				port->need_reconfig = 1;
1328 				return -1;
1329 			}
1330 		}
1331 		if (port->need_reconfig_queues > 0) {
1332 			port->need_reconfig_queues = 0;
1333 			/* setup tx queues */
1334 			for (qi = 0; qi < nb_txq; qi++) {
1335 				if ((numa_support) &&
1336 					(txring_numa[pi] != NUMA_NO_CONFIG))
1337 					diag = rte_eth_tx_queue_setup(pi, qi,
1338 						nb_txd,txring_numa[pi],
1339 						&(port->tx_conf));
1340 				else
1341 					diag = rte_eth_tx_queue_setup(pi, qi,
1342 						nb_txd,port->socket_id,
1343 						&(port->tx_conf));
1344 
1345 				if (diag == 0)
1346 					continue;
1347 
1348 				/* Fail to setup tx queue, return */
1349 				if (rte_atomic16_cmpset(&(port->port_status),
1350 							RTE_PORT_HANDLING,
1351 							RTE_PORT_STOPPED) == 0)
1352 					printf("Port %d can not be set back "
1353 							"to stopped\n", pi);
1354 				printf("Fail to configure port %d tx queues\n", pi);
1355 				/* try to reconfigure queues next time */
1356 				port->need_reconfig_queues = 1;
1357 				return -1;
1358 			}
1359 			/* setup rx queues */
1360 			for (qi = 0; qi < nb_rxq; qi++) {
1361 				if ((numa_support) &&
1362 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1363 					struct rte_mempool * mp =
1364 						mbuf_pool_find(rxring_numa[pi]);
1365 					if (mp == NULL) {
1366 						printf("Failed to setup RX queue:"
1367 							"No mempool allocation"
1368 							" on the socket %d\n",
1369 							rxring_numa[pi]);
1370 						return -1;
1371 					}
1372 
1373 					diag = rte_eth_rx_queue_setup(pi, qi,
1374 					     nb_rxd,rxring_numa[pi],
1375 					     &(port->rx_conf),mp);
1376 				} else {
1377 					struct rte_mempool *mp =
1378 						mbuf_pool_find(port->socket_id);
1379 					if (mp == NULL) {
1380 						printf("Failed to setup RX queue:"
1381 							"No mempool allocation"
1382 							" on the socket %d\n",
1383 							port->socket_id);
1384 						return -1;
1385 					}
1386 					diag = rte_eth_rx_queue_setup(pi, qi,
1387 					     nb_rxd,port->socket_id,
1388 					     &(port->rx_conf), mp);
1389 				}
1390 				if (diag == 0)
1391 					continue;
1392 
1393 				/* Fail to setup rx queue, return */
1394 				if (rte_atomic16_cmpset(&(port->port_status),
1395 							RTE_PORT_HANDLING,
1396 							RTE_PORT_STOPPED) == 0)
1397 					printf("Port %d can not be set back "
1398 							"to stopped\n", pi);
1399 				printf("Fail to configure port %d rx queues\n", pi);
1400 				/* try to reconfigure queues next time */
1401 				port->need_reconfig_queues = 1;
1402 				return -1;
1403 			}
1404 		}
1405 		/* start port */
1406 		if (rte_eth_dev_start(pi) < 0) {
1407 			printf("Fail to start port %d\n", pi);
1408 
1409 			/* Fail to setup rx queue, return */
1410 			if (rte_atomic16_cmpset(&(port->port_status),
1411 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1412 				printf("Port %d can not be set back to "
1413 							"stopped\n", pi);
1414 			continue;
1415 		}
1416 
1417 		if (rte_atomic16_cmpset(&(port->port_status),
1418 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1419 			printf("Port %d can not be set into started\n", pi);
1420 
1421 		rte_eth_macaddr_get(pi, &mac_addr);
1422 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1423 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1424 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1425 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1426 
1427 		/* at least one port started, need checking link status */
1428 		need_check_link_status = 1;
1429 	}
1430 
1431 	if (need_check_link_status == 1 && !no_link_check)
1432 		check_all_ports_link_status(RTE_PORT_ALL);
1433 	else if (need_check_link_status == 0)
1434 		printf("Please stop the ports first\n");
1435 
1436 	printf("Done\n");
1437 	return 0;
1438 }
1439 
1440 void
1441 stop_port(portid_t pid)
1442 {
1443 	portid_t pi;
1444 	struct rte_port *port;
1445 	int need_check_link_status = 0;
1446 
1447 	if (dcb_test) {
1448 		dcb_test = 0;
1449 		dcb_config = 0;
1450 	}
1451 
1452 	if (port_id_is_invalid(pid, ENABLED_WARN))
1453 		return;
1454 
1455 	printf("Stopping ports...\n");
1456 
1457 	FOREACH_PORT(pi, ports) {
1458 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1459 			continue;
1460 
1461 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1462 			printf("Please remove port %d from forwarding configuration.\n", pi);
1463 			continue;
1464 		}
1465 
1466 		if (port_is_bonding_slave(pi)) {
1467 			printf("Please remove port %d from bonded device.\n", pi);
1468 			continue;
1469 		}
1470 
1471 		port = &ports[pi];
1472 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1473 						RTE_PORT_HANDLING) == 0)
1474 			continue;
1475 
1476 		rte_eth_dev_stop(pi);
1477 
1478 		if (rte_atomic16_cmpset(&(port->port_status),
1479 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1480 			printf("Port %d can not be set into stopped\n", pi);
1481 		need_check_link_status = 1;
1482 	}
1483 	if (need_check_link_status && !no_link_check)
1484 		check_all_ports_link_status(RTE_PORT_ALL);
1485 
1486 	printf("Done\n");
1487 }
1488 
1489 void
1490 close_port(portid_t pid)
1491 {
1492 	portid_t pi;
1493 	struct rte_port *port;
1494 
1495 	if (port_id_is_invalid(pid, ENABLED_WARN))
1496 		return;
1497 
1498 	printf("Closing ports...\n");
1499 
1500 	FOREACH_PORT(pi, ports) {
1501 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1502 			continue;
1503 
1504 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1505 			printf("Please remove port %d from forwarding configuration.\n", pi);
1506 			continue;
1507 		}
1508 
1509 		if (port_is_bonding_slave(pi)) {
1510 			printf("Please remove port %d from bonded device.\n", pi);
1511 			continue;
1512 		}
1513 
1514 		port = &ports[pi];
1515 		if (rte_atomic16_cmpset(&(port->port_status),
1516 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1517 			printf("Port %d is already closed\n", pi);
1518 			continue;
1519 		}
1520 
1521 		if (rte_atomic16_cmpset(&(port->port_status),
1522 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1523 			printf("Port %d is now not stopped\n", pi);
1524 			continue;
1525 		}
1526 
1527 		rte_eth_dev_close(pi);
1528 
1529 		if (rte_atomic16_cmpset(&(port->port_status),
1530 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1531 			printf("Port %d cannot be set to closed\n", pi);
1532 	}
1533 
1534 	printf("Done\n");
1535 }
1536 
1537 void
1538 attach_port(char *identifier)
1539 {
1540 	portid_t pi = 0;
1541 	unsigned int socket_id;
1542 
1543 	printf("Attaching a new port...\n");
1544 
1545 	if (identifier == NULL) {
1546 		printf("Invalid parameters are specified\n");
1547 		return;
1548 	}
1549 
1550 	if (rte_eth_dev_attach(identifier, &pi))
1551 		return;
1552 
1553 	ports[pi].enabled = 1;
1554 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1555 	/* if socket_id is invalid, set to 0 */
1556 	if (check_socket_id(socket_id) < 0)
1557 		socket_id = 0;
1558 	reconfig(pi, socket_id);
1559 	rte_eth_promiscuous_enable(pi);
1560 
1561 	nb_ports = rte_eth_dev_count();
1562 
1563 	ports[pi].port_status = RTE_PORT_STOPPED;
1564 
1565 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1566 	printf("Done\n");
1567 }
1568 
1569 void
1570 detach_port(uint8_t port_id)
1571 {
1572 	char name[RTE_ETH_NAME_MAX_LEN];
1573 
1574 	printf("Detaching a port...\n");
1575 
1576 	if (!port_is_closed(port_id)) {
1577 		printf("Please close port first\n");
1578 		return;
1579 	}
1580 
1581 	if (rte_eth_dev_detach(port_id, name))
1582 		return;
1583 
1584 	ports[port_id].enabled = 0;
1585 	nb_ports = rte_eth_dev_count();
1586 
1587 	printf("Port '%s' is detached. Now total ports is %d\n",
1588 			name, nb_ports);
1589 	printf("Done\n");
1590 	return;
1591 }
1592 
1593 void
1594 pmd_test_exit(void)
1595 {
1596 	portid_t pt_id;
1597 
1598 	if (test_done == 0)
1599 		stop_packet_forwarding();
1600 
1601 	if (ports != NULL) {
1602 		no_link_check = 1;
1603 		FOREACH_PORT(pt_id, ports) {
1604 			printf("\nShutting down port %d...\n", pt_id);
1605 			fflush(stdout);
1606 			stop_port(pt_id);
1607 			close_port(pt_id);
1608 		}
1609 	}
1610 	printf("\nBye...\n");
1611 }
1612 
1613 typedef void (*cmd_func_t)(void);
1614 struct pmd_test_command {
1615 	const char *cmd_name;
1616 	cmd_func_t cmd_func;
1617 };
1618 
1619 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1620 
1621 /* Check the link status of all ports in up to 9s, and print them finally */
1622 static void
1623 check_all_ports_link_status(uint32_t port_mask)
1624 {
1625 #define CHECK_INTERVAL 100 /* 100ms */
1626 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1627 	uint8_t portid, count, all_ports_up, print_flag = 0;
1628 	struct rte_eth_link link;
1629 
1630 	printf("Checking link statuses...\n");
1631 	fflush(stdout);
1632 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1633 		all_ports_up = 1;
1634 		FOREACH_PORT(portid, ports) {
1635 			if ((port_mask & (1 << portid)) == 0)
1636 				continue;
1637 			memset(&link, 0, sizeof(link));
1638 			rte_eth_link_get_nowait(portid, &link);
1639 			/* print link status if flag set */
1640 			if (print_flag == 1) {
1641 				if (link.link_status)
1642 					printf("Port %d Link Up - speed %u "
1643 						"Mbps - %s\n", (uint8_t)portid,
1644 						(unsigned)link.link_speed,
1645 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1646 					("full-duplex") : ("half-duplex\n"));
1647 				else
1648 					printf("Port %d Link Down\n",
1649 						(uint8_t)portid);
1650 				continue;
1651 			}
1652 			/* clear all_ports_up flag if any link down */
1653 			if (link.link_status == ETH_LINK_DOWN) {
1654 				all_ports_up = 0;
1655 				break;
1656 			}
1657 		}
1658 		/* after finally printing all link status, get out */
1659 		if (print_flag == 1)
1660 			break;
1661 
1662 		if (all_ports_up == 0) {
1663 			fflush(stdout);
1664 			rte_delay_ms(CHECK_INTERVAL);
1665 		}
1666 
1667 		/* set the print_flag if all ports up or timeout */
1668 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1669 			print_flag = 1;
1670 		}
1671 	}
1672 }
1673 
1674 static int
1675 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1676 {
1677 	uint16_t i;
1678 	int diag;
1679 	uint8_t mapping_found = 0;
1680 
1681 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1682 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1683 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1684 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1685 					tx_queue_stats_mappings[i].queue_id,
1686 					tx_queue_stats_mappings[i].stats_counter_id);
1687 			if (diag != 0)
1688 				return diag;
1689 			mapping_found = 1;
1690 		}
1691 	}
1692 	if (mapping_found)
1693 		port->tx_queue_stats_mapping_enabled = 1;
1694 	return 0;
1695 }
1696 
1697 static int
1698 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1699 {
1700 	uint16_t i;
1701 	int diag;
1702 	uint8_t mapping_found = 0;
1703 
1704 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1705 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1706 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1707 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1708 					rx_queue_stats_mappings[i].queue_id,
1709 					rx_queue_stats_mappings[i].stats_counter_id);
1710 			if (diag != 0)
1711 				return diag;
1712 			mapping_found = 1;
1713 		}
1714 	}
1715 	if (mapping_found)
1716 		port->rx_queue_stats_mapping_enabled = 1;
1717 	return 0;
1718 }
1719 
1720 static void
1721 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1722 {
1723 	int diag = 0;
1724 
1725 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1726 	if (diag != 0) {
1727 		if (diag == -ENOTSUP) {
1728 			port->tx_queue_stats_mapping_enabled = 0;
1729 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1730 		}
1731 		else
1732 			rte_exit(EXIT_FAILURE,
1733 					"set_tx_queue_stats_mapping_registers "
1734 					"failed for port id=%d diag=%d\n",
1735 					pi, diag);
1736 	}
1737 
1738 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1739 	if (diag != 0) {
1740 		if (diag == -ENOTSUP) {
1741 			port->rx_queue_stats_mapping_enabled = 0;
1742 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1743 		}
1744 		else
1745 			rte_exit(EXIT_FAILURE,
1746 					"set_rx_queue_stats_mapping_registers "
1747 					"failed for port id=%d diag=%d\n",
1748 					pi, diag);
1749 	}
1750 }
1751 
1752 static void
1753 rxtx_port_config(struct rte_port *port)
1754 {
1755 	port->rx_conf = port->dev_info.default_rxconf;
1756 	port->tx_conf = port->dev_info.default_txconf;
1757 
1758 	/* Check if any RX/TX parameters have been passed */
1759 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1760 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1761 
1762 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1763 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1764 
1765 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1766 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1767 
1768 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1769 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1770 
1771 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1772 		port->rx_conf.rx_drop_en = rx_drop_en;
1773 
1774 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1775 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1776 
1777 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1778 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1779 
1780 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1781 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1782 
1783 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1784 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1785 
1786 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1787 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1788 
1789 	if (txq_flags != RTE_PMD_PARAM_UNSET)
1790 		port->tx_conf.txq_flags = txq_flags;
1791 }
1792 
1793 void
1794 init_port_config(void)
1795 {
1796 	portid_t pid;
1797 	struct rte_port *port;
1798 
1799 	FOREACH_PORT(pid, ports) {
1800 		port = &ports[pid];
1801 		port->dev_conf.rxmode = rx_mode;
1802 		port->dev_conf.fdir_conf = fdir_conf;
1803 		if (nb_rxq > 1) {
1804 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1805 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1806 		} else {
1807 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1808 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1809 		}
1810 
1811 		if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1812 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1813 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1814 			else
1815 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1816 		}
1817 
1818 		if (port->dev_info.max_vfs != 0) {
1819 			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1820 				port->dev_conf.rxmode.mq_mode =
1821 					ETH_MQ_RX_VMDQ_RSS;
1822 			else
1823 				port->dev_conf.rxmode.mq_mode =
1824 					ETH_MQ_RX_NONE;
1825 
1826 			port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1827 		}
1828 
1829 		rxtx_port_config(port);
1830 
1831 		rte_eth_macaddr_get(pid, &port->eth_addr);
1832 
1833 		map_port_queue_stats_mapping_registers(pid, port);
1834 #ifdef RTE_NIC_BYPASS
1835 		rte_eth_dev_bypass_init(pid);
1836 #endif
1837 	}
1838 }
1839 
1840 void set_port_slave_flag(portid_t slave_pid)
1841 {
1842 	struct rte_port *port;
1843 
1844 	port = &ports[slave_pid];
1845 	port->slave_flag = 1;
1846 }
1847 
1848 void clear_port_slave_flag(portid_t slave_pid)
1849 {
1850 	struct rte_port *port;
1851 
1852 	port = &ports[slave_pid];
1853 	port->slave_flag = 0;
1854 }
1855 
1856 uint8_t port_is_bonding_slave(portid_t slave_pid)
1857 {
1858 	struct rte_port *port;
1859 
1860 	port = &ports[slave_pid];
1861 	return port->slave_flag;
1862 }
1863 
1864 const uint16_t vlan_tags[] = {
1865 		0,  1,  2,  3,  4,  5,  6,  7,
1866 		8,  9, 10, 11,  12, 13, 14, 15,
1867 		16, 17, 18, 19, 20, 21, 22, 23,
1868 		24, 25, 26, 27, 28, 29, 30, 31
1869 };
1870 
1871 static  int
1872 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1873 		 enum dcb_mode_enable dcb_mode,
1874 		 enum rte_eth_nb_tcs num_tcs,
1875 		 uint8_t pfc_en)
1876 {
1877 	uint8_t i;
1878 
1879 	/*
1880 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1881 	 * given above, and the number of traffic classes available for use.
1882 	 */
1883 	if (dcb_mode == DCB_VT_ENABLED) {
1884 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1885 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
1886 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1887 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1888 
1889 		/* VMDQ+DCB RX and TX configrations */
1890 		vmdq_rx_conf->enable_default_pool = 0;
1891 		vmdq_rx_conf->default_pool = 0;
1892 		vmdq_rx_conf->nb_queue_pools =
1893 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1894 		vmdq_tx_conf->nb_queue_pools =
1895 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1896 
1897 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1898 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1899 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1900 			vmdq_rx_conf->pool_map[i].pools =
1901 				1 << (i % vmdq_rx_conf->nb_queue_pools);
1902 		}
1903 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1904 			vmdq_rx_conf->dcb_tc[i] = i;
1905 			vmdq_tx_conf->dcb_tc[i] = i;
1906 		}
1907 
1908 		/* set DCB mode of RX and TX of multiple queues */
1909 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1910 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1911 	} else {
1912 		struct rte_eth_dcb_rx_conf *rx_conf =
1913 				&eth_conf->rx_adv_conf.dcb_rx_conf;
1914 		struct rte_eth_dcb_tx_conf *tx_conf =
1915 				&eth_conf->tx_adv_conf.dcb_tx_conf;
1916 
1917 		rx_conf->nb_tcs = num_tcs;
1918 		tx_conf->nb_tcs = num_tcs;
1919 
1920 		for (i = 0; i < num_tcs; i++) {
1921 			rx_conf->dcb_tc[i] = i;
1922 			tx_conf->dcb_tc[i] = i;
1923 		}
1924 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1925 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1926 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1927 	}
1928 
1929 	if (pfc_en)
1930 		eth_conf->dcb_capability_en =
1931 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1932 	else
1933 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1934 
1935 	return 0;
1936 }
1937 
1938 int
1939 init_port_dcb_config(portid_t pid,
1940 		     enum dcb_mode_enable dcb_mode,
1941 		     enum rte_eth_nb_tcs num_tcs,
1942 		     uint8_t pfc_en)
1943 {
1944 	struct rte_eth_conf port_conf;
1945 	struct rte_eth_dev_info dev_info;
1946 	struct rte_port *rte_port;
1947 	int retval;
1948 	uint16_t i;
1949 
1950 	rte_eth_dev_info_get(pid, &dev_info);
1951 
1952 	/* If dev_info.vmdq_pool_base is greater than 0,
1953 	 * the queue id of vmdq pools is started after pf queues.
1954 	 */
1955 	if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
1956 		printf("VMDQ_DCB multi-queue mode is nonsensical"
1957 			" for port %d.", pid);
1958 		return -1;
1959 	}
1960 
1961 	/* Assume the ports in testpmd have the same dcb capability
1962 	 * and has the same number of rxq and txq in dcb mode
1963 	 */
1964 	if (dcb_mode == DCB_VT_ENABLED) {
1965 		nb_rxq = dev_info.max_rx_queues;
1966 		nb_txq = dev_info.max_tx_queues;
1967 	} else {
1968 		/*if vt is disabled, use all pf queues */
1969 		if (dev_info.vmdq_pool_base == 0) {
1970 			nb_rxq = dev_info.max_rx_queues;
1971 			nb_txq = dev_info.max_tx_queues;
1972 		} else {
1973 			nb_rxq = (queueid_t)num_tcs;
1974 			nb_txq = (queueid_t)num_tcs;
1975 
1976 		}
1977 	}
1978 	rx_free_thresh = 64;
1979 
1980 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1981 	/* Enter DCB configuration status */
1982 	dcb_config = 1;
1983 
1984 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1985 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
1986 	if (retval < 0)
1987 		return retval;
1988 
1989 	rte_port = &ports[pid];
1990 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
1991 
1992 	rxtx_port_config(rte_port);
1993 	/* VLAN filter */
1994 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1995 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
1996 		rx_vft_set(pid, vlan_tags[i], 1);
1997 
1998 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1999 	map_port_queue_stats_mapping_registers(pid, rte_port);
2000 
2001 	rte_port->dcb_flag = 1;
2002 
2003 	return 0;
2004 }
2005 
2006 static void
2007 init_port(void)
2008 {
2009 	portid_t pid;
2010 
2011 	/* Configuration of Ethernet ports. */
2012 	ports = rte_zmalloc("testpmd: ports",
2013 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2014 			    RTE_CACHE_LINE_SIZE);
2015 	if (ports == NULL) {
2016 		rte_exit(EXIT_FAILURE,
2017 				"rte_zmalloc(%d struct rte_port) failed\n",
2018 				RTE_MAX_ETHPORTS);
2019 	}
2020 
2021 	/* enabled allocated ports */
2022 	for (pid = 0; pid < nb_ports; pid++)
2023 		ports[pid].enabled = 1;
2024 }
2025 
2026 static void
2027 force_quit(void)
2028 {
2029 	pmd_test_exit();
2030 	prompt_exit();
2031 }
2032 
2033 static void
2034 signal_handler(int signum)
2035 {
2036 	if (signum == SIGINT || signum == SIGTERM) {
2037 		printf("\nSignal %d received, preparing to exit...\n",
2038 				signum);
2039 #ifdef RTE_LIBRTE_PDUMP
2040 		/* uninitialize packet capture framework */
2041 		rte_pdump_uninit();
2042 #endif
2043 		force_quit();
2044 		/* exit with the expected status */
2045 		signal(signum, SIG_DFL);
2046 		kill(getpid(), signum);
2047 	}
2048 }
2049 
2050 int
2051 main(int argc, char** argv)
2052 {
2053 	int  diag;
2054 	uint8_t port_id;
2055 
2056 	signal(SIGINT, signal_handler);
2057 	signal(SIGTERM, signal_handler);
2058 
2059 	diag = rte_eal_init(argc, argv);
2060 	if (diag < 0)
2061 		rte_panic("Cannot init EAL\n");
2062 
2063 #ifdef RTE_LIBRTE_PDUMP
2064 	/* initialize packet capture framework */
2065 	rte_pdump_init(NULL);
2066 #endif
2067 
2068 	nb_ports = (portid_t) rte_eth_dev_count();
2069 	if (nb_ports == 0)
2070 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2071 
2072 	/* allocate port structures, and init them */
2073 	init_port();
2074 
2075 	set_def_fwd_config();
2076 	if (nb_lcores == 0)
2077 		rte_panic("Empty set of forwarding logical cores - check the "
2078 			  "core mask supplied in the command parameters\n");
2079 
2080 	argc -= diag;
2081 	argv += diag;
2082 	if (argc > 1)
2083 		launch_args_parse(argc, argv);
2084 
2085 	if (!nb_rxq && !nb_txq)
2086 		printf("Warning: Either rx or tx queues should be non-zero\n");
2087 
2088 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2089 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2090 		       "but nb_txq=%d will prevent to fully test it.\n",
2091 		       nb_rxq, nb_txq);
2092 
2093 	init_config();
2094 	if (start_port(RTE_PORT_ALL) != 0)
2095 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2096 
2097 	/* set all ports to promiscuous mode by default */
2098 	FOREACH_PORT(port_id, ports)
2099 		rte_eth_promiscuous_enable(port_id);
2100 
2101 #ifdef RTE_LIBRTE_CMDLINE
2102 	if (interactive == 1) {
2103 		if (auto_start) {
2104 			printf("Start automatic packet forwarding\n");
2105 			start_packet_forwarding(0);
2106 		}
2107 		prompt();
2108 	} else
2109 #endif
2110 	{
2111 		char c;
2112 		int rc;
2113 
2114 		printf("No commandline core given, start packet forwarding\n");
2115 		start_packet_forwarding(0);
2116 		printf("Press enter to exit\n");
2117 		rc = read(0, &c, 1);
2118 		pmd_test_exit();
2119 		if (rc < 0)
2120 			return 1;
2121 	}
2122 
2123 	return 0;
2124 }
2125