xref: /dpdk/app/test-pmd/testpmd.c (revision a997a33b2a0145ad3e6320ea1fc7df8d51a2fcdf)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
68 #include <rte_mbuf.h>
69 #include <rte_interrupts.h>
70 #include <rte_pci.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
73 #include <rte_dev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78 #ifdef RTE_LIBRTE_PDUMP
79 #include <rte_pdump.h>
80 #endif
81 #include <rte_flow.h>
82 
83 #include "testpmd.h"
84 
85 uint16_t verbose_level = 0; /**< Silent by default. */
86 
87 /* use master core for command line ? */
88 uint8_t interactive = 0;
89 uint8_t auto_start = 0;
90 
91 /*
92  * NUMA support configuration.
93  * When set, the NUMA support attempts to dispatch the allocation of the
94  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
95  * probed ports among the CPU sockets 0 and 1.
96  * Otherwise, all memory is allocated from CPU socket 0.
97  */
98 uint8_t numa_support = 0; /**< No numa support by default */
99 
100 /*
101  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
102  * not configured.
103  */
104 uint8_t socket_num = UMA_NO_CONFIG;
105 
106 /*
107  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
108  */
109 uint8_t mp_anon = 0;
110 
111 /*
112  * Record the Ethernet address of peer target ports to which packets are
113  * forwarded.
114  * Must be instanciated with the ethernet addresses of peer traffic generator
115  * ports.
116  */
117 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
118 portid_t nb_peer_eth_addrs = 0;
119 
120 /*
121  * Probed Target Environment.
122  */
123 struct rte_port *ports;	       /**< For all probed ethernet ports. */
124 portid_t nb_ports;             /**< Number of probed ethernet ports. */
125 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
126 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
127 
128 /*
129  * Test Forwarding Configuration.
130  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
131  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
132  */
133 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
134 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
135 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
136 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
137 
138 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
139 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
140 
141 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
142 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
143 
144 /*
145  * Forwarding engines.
146  */
147 struct fwd_engine * fwd_engines[] = {
148 	&io_fwd_engine,
149 	&mac_fwd_engine,
150 	&mac_swap_engine,
151 	&flow_gen_engine,
152 	&rx_only_engine,
153 	&tx_only_engine,
154 	&csum_fwd_engine,
155 	&icmp_echo_engine,
156 #ifdef RTE_LIBRTE_IEEE1588
157 	&ieee1588_fwd_engine,
158 #endif
159 	NULL,
160 };
161 
162 struct fwd_config cur_fwd_config;
163 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
164 uint32_t retry_enabled;
165 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
166 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
167 
168 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
169 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
170                                       * specified on command-line. */
171 
172 /*
173  * Configuration of packet segments used by the "txonly" processing engine.
174  */
175 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
176 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
177 	TXONLY_DEF_PACKET_LEN,
178 };
179 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
180 
181 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
182 /**< Split policy for packets to TX. */
183 
184 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
185 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
186 
187 /* current configuration is in DCB or not,0 means it is not in DCB mode */
188 uint8_t dcb_config = 0;
189 
190 /* Whether the dcb is in testing status */
191 uint8_t dcb_test = 0;
192 
193 /*
194  * Configurable number of RX/TX queues.
195  */
196 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
197 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
198 
199 /*
200  * Configurable number of RX/TX ring descriptors.
201  */
202 #define RTE_TEST_RX_DESC_DEFAULT 128
203 #define RTE_TEST_TX_DESC_DEFAULT 512
204 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
205 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
206 
207 #define RTE_PMD_PARAM_UNSET -1
208 /*
209  * Configurable values of RX and TX ring threshold registers.
210  */
211 
212 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
213 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
214 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
215 
216 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
217 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
218 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
219 
220 /*
221  * Configurable value of RX free threshold.
222  */
223 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
224 
225 /*
226  * Configurable value of RX drop enable.
227  */
228 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
229 
230 /*
231  * Configurable value of TX free threshold.
232  */
233 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
234 
235 /*
236  * Configurable value of TX RS bit threshold.
237  */
238 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
239 
240 /*
241  * Configurable value of TX queue flags.
242  */
243 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
244 
245 /*
246  * Receive Side Scaling (RSS) configuration.
247  */
248 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
249 
250 /*
251  * Port topology configuration
252  */
253 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
254 
255 /*
256  * Avoids to flush all the RX streams before starts forwarding.
257  */
258 uint8_t no_flush_rx = 0; /* flush by default */
259 
260 /*
261  * Avoids to check link status when starting/stopping a port.
262  */
263 uint8_t no_link_check = 0; /* check by default */
264 
265 /*
266  * NIC bypass mode configuration options.
267  */
268 #ifdef RTE_NIC_BYPASS
269 
270 /* The NIC bypass watchdog timeout. */
271 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
272 
273 #endif
274 
275 /*
276  * Ethernet device configuration.
277  */
278 struct rte_eth_rxmode rx_mode = {
279 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
280 	.split_hdr_size = 0,
281 	.header_split   = 0, /**< Header Split disabled. */
282 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
283 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
284 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
285 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
286 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
287 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
288 };
289 
290 struct rte_fdir_conf fdir_conf = {
291 	.mode = RTE_FDIR_MODE_NONE,
292 	.pballoc = RTE_FDIR_PBALLOC_64K,
293 	.status = RTE_FDIR_REPORT_STATUS,
294 	.mask = {
295 		.vlan_tci_mask = 0x0,
296 		.ipv4_mask     = {
297 			.src_ip = 0xFFFFFFFF,
298 			.dst_ip = 0xFFFFFFFF,
299 		},
300 		.ipv6_mask     = {
301 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
302 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
303 		},
304 		.src_port_mask = 0xFFFF,
305 		.dst_port_mask = 0xFFFF,
306 		.mac_addr_byte_mask = 0xFF,
307 		.tunnel_type_mask = 1,
308 		.tunnel_id_mask = 0xFFFFFFFF,
309 	},
310 	.drop_queue = 127,
311 };
312 
313 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
314 
315 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
316 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
317 
318 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
319 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
320 
321 uint16_t nb_tx_queue_stats_mappings = 0;
322 uint16_t nb_rx_queue_stats_mappings = 0;
323 
324 unsigned max_socket = 0;
325 
326 /* Forward function declarations */
327 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
328 static void check_all_ports_link_status(uint32_t port_mask);
329 
330 /*
331  * Check if all the ports are started.
332  * If yes, return positive value. If not, return zero.
333  */
334 static int all_ports_started(void);
335 
336 /*
337  * Find next enabled port
338  */
339 portid_t
340 find_next_port(portid_t p, struct rte_port *ports, int size)
341 {
342 	if (ports == NULL)
343 		rte_exit(-EINVAL, "failed to find a next port id\n");
344 
345 	while ((p < size) && (ports[p].enabled == 0))
346 		p++;
347 	return p;
348 }
349 
350 /*
351  * Setup default configuration.
352  */
353 static void
354 set_default_fwd_lcores_config(void)
355 {
356 	unsigned int i;
357 	unsigned int nb_lc;
358 	unsigned int sock_num;
359 
360 	nb_lc = 0;
361 	for (i = 0; i < RTE_MAX_LCORE; i++) {
362 		sock_num = rte_lcore_to_socket_id(i) + 1;
363 		if (sock_num > max_socket) {
364 			if (sock_num > RTE_MAX_NUMA_NODES)
365 				rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
366 			max_socket = sock_num;
367 		}
368 		if (!rte_lcore_is_enabled(i))
369 			continue;
370 		if (i == rte_get_master_lcore())
371 			continue;
372 		fwd_lcores_cpuids[nb_lc++] = i;
373 	}
374 	nb_lcores = (lcoreid_t) nb_lc;
375 	nb_cfg_lcores = nb_lcores;
376 	nb_fwd_lcores = 1;
377 }
378 
379 static void
380 set_def_peer_eth_addrs(void)
381 {
382 	portid_t i;
383 
384 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
385 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
386 		peer_eth_addrs[i].addr_bytes[5] = i;
387 	}
388 }
389 
390 static void
391 set_default_fwd_ports_config(void)
392 {
393 	portid_t pt_id;
394 
395 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
396 		fwd_ports_ids[pt_id] = pt_id;
397 
398 	nb_cfg_ports = nb_ports;
399 	nb_fwd_ports = nb_ports;
400 }
401 
402 void
403 set_def_fwd_config(void)
404 {
405 	set_default_fwd_lcores_config();
406 	set_def_peer_eth_addrs();
407 	set_default_fwd_ports_config();
408 }
409 
410 /*
411  * Configuration initialisation done once at init time.
412  */
413 static void
414 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
415 		 unsigned int socket_id)
416 {
417 	char pool_name[RTE_MEMPOOL_NAMESIZE];
418 	struct rte_mempool *rte_mp = NULL;
419 	uint32_t mb_size;
420 
421 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
422 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
423 
424 	RTE_LOG(INFO, USER1,
425 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
426 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
427 
428 #ifdef RTE_LIBRTE_PMD_XENVIRT
429 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
430 		(unsigned) mb_mempool_cache,
431 		sizeof(struct rte_pktmbuf_pool_private),
432 		rte_pktmbuf_pool_init, NULL,
433 		rte_pktmbuf_init, NULL,
434 		socket_id, 0);
435 #endif
436 
437 	/* if the former XEN allocation failed fall back to normal allocation */
438 	if (rte_mp == NULL) {
439 		if (mp_anon != 0) {
440 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
441 				mb_size, (unsigned) mb_mempool_cache,
442 				sizeof(struct rte_pktmbuf_pool_private),
443 				socket_id, 0);
444 			if (rte_mp == NULL)
445 				goto err;
446 
447 			if (rte_mempool_populate_anon(rte_mp) == 0) {
448 				rte_mempool_free(rte_mp);
449 				rte_mp = NULL;
450 				goto err;
451 			}
452 			rte_pktmbuf_pool_init(rte_mp, NULL);
453 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
454 		} else {
455 			/* wrapper to rte_mempool_create() */
456 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
457 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
458 		}
459 	}
460 
461 err:
462 	if (rte_mp == NULL) {
463 		rte_exit(EXIT_FAILURE,
464 			"Creation of mbuf pool for socket %u failed: %s\n",
465 			socket_id, rte_strerror(rte_errno));
466 	} else if (verbose_level > 0) {
467 		rte_mempool_dump(stdout, rte_mp);
468 	}
469 }
470 
471 /*
472  * Check given socket id is valid or not with NUMA mode,
473  * if valid, return 0, else return -1
474  */
475 static int
476 check_socket_id(const unsigned int socket_id)
477 {
478 	static int warning_once = 0;
479 
480 	if (socket_id >= max_socket) {
481 		if (!warning_once && numa_support)
482 			printf("Warning: NUMA should be configured manually by"
483 			       " using --port-numa-config and"
484 			       " --ring-numa-config parameters along with"
485 			       " --numa.\n");
486 		warning_once = 1;
487 		return -1;
488 	}
489 	return 0;
490 }
491 
492 static void
493 init_config(void)
494 {
495 	portid_t pid;
496 	struct rte_port *port;
497 	struct rte_mempool *mbp;
498 	unsigned int nb_mbuf_per_pool;
499 	lcoreid_t  lc_id;
500 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
501 
502 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
503 	/* Configuration of logical cores. */
504 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
505 				sizeof(struct fwd_lcore *) * nb_lcores,
506 				RTE_CACHE_LINE_SIZE);
507 	if (fwd_lcores == NULL) {
508 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
509 							"failed\n", nb_lcores);
510 	}
511 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
512 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
513 					       sizeof(struct fwd_lcore),
514 					       RTE_CACHE_LINE_SIZE);
515 		if (fwd_lcores[lc_id] == NULL) {
516 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
517 								"failed\n");
518 		}
519 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
520 	}
521 
522 	/*
523 	 * Create pools of mbuf.
524 	 * If NUMA support is disabled, create a single pool of mbuf in
525 	 * socket 0 memory by default.
526 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
527 	 *
528 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
529 	 * nb_txd can be configured at run time.
530 	 */
531 	if (param_total_num_mbufs)
532 		nb_mbuf_per_pool = param_total_num_mbufs;
533 	else {
534 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
535 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
536 
537 		if (!numa_support)
538 			nb_mbuf_per_pool =
539 				(nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
540 	}
541 
542 	if (!numa_support) {
543 		if (socket_num == UMA_NO_CONFIG)
544 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
545 		else
546 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
547 						 socket_num);
548 	}
549 
550 	FOREACH_PORT(pid, ports) {
551 		port = &ports[pid];
552 		rte_eth_dev_info_get(pid, &port->dev_info);
553 
554 		if (numa_support) {
555 			if (port_numa[pid] != NUMA_NO_CONFIG)
556 				port_per_socket[port_numa[pid]]++;
557 			else {
558 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
559 
560 				/* if socket_id is invalid, set to 0 */
561 				if (check_socket_id(socket_id) < 0)
562 					socket_id = 0;
563 				port_per_socket[socket_id]++;
564 			}
565 		}
566 
567 		/* set flag to initialize port/queue */
568 		port->need_reconfig = 1;
569 		port->need_reconfig_queues = 1;
570 	}
571 
572 	if (numa_support) {
573 		uint8_t i;
574 		unsigned int nb_mbuf;
575 
576 		if (param_total_num_mbufs)
577 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
578 
579 		for (i = 0; i < max_socket; i++) {
580 			nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
581 			if (nb_mbuf)
582 				mbuf_pool_create(mbuf_data_size,
583 						nb_mbuf,i);
584 		}
585 	}
586 	init_port_config();
587 
588 	/*
589 	 * Records which Mbuf pool to use by each logical core, if needed.
590 	 */
591 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
592 		mbp = mbuf_pool_find(
593 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
594 
595 		if (mbp == NULL)
596 			mbp = mbuf_pool_find(0);
597 		fwd_lcores[lc_id]->mbp = mbp;
598 	}
599 
600 	/* Configuration of packet forwarding streams. */
601 	if (init_fwd_streams() < 0)
602 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
603 
604 	fwd_config_setup();
605 }
606 
607 
608 void
609 reconfig(portid_t new_port_id, unsigned socket_id)
610 {
611 	struct rte_port *port;
612 
613 	/* Reconfiguration of Ethernet ports. */
614 	port = &ports[new_port_id];
615 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
616 
617 	/* set flag to initialize port/queue */
618 	port->need_reconfig = 1;
619 	port->need_reconfig_queues = 1;
620 	port->socket_id = socket_id;
621 
622 	init_port_config();
623 }
624 
625 
626 int
627 init_fwd_streams(void)
628 {
629 	portid_t pid;
630 	struct rte_port *port;
631 	streamid_t sm_id, nb_fwd_streams_new;
632 	queueid_t q;
633 
634 	/* set socket id according to numa or not */
635 	FOREACH_PORT(pid, ports) {
636 		port = &ports[pid];
637 		if (nb_rxq > port->dev_info.max_rx_queues) {
638 			printf("Fail: nb_rxq(%d) is greater than "
639 				"max_rx_queues(%d)\n", nb_rxq,
640 				port->dev_info.max_rx_queues);
641 			return -1;
642 		}
643 		if (nb_txq > port->dev_info.max_tx_queues) {
644 			printf("Fail: nb_txq(%d) is greater than "
645 				"max_tx_queues(%d)\n", nb_txq,
646 				port->dev_info.max_tx_queues);
647 			return -1;
648 		}
649 		if (numa_support) {
650 			if (port_numa[pid] != NUMA_NO_CONFIG)
651 				port->socket_id = port_numa[pid];
652 			else {
653 				port->socket_id = rte_eth_dev_socket_id(pid);
654 
655 				/* if socket_id is invalid, set to 0 */
656 				if (check_socket_id(port->socket_id) < 0)
657 					port->socket_id = 0;
658 			}
659 		}
660 		else {
661 			if (socket_num == UMA_NO_CONFIG)
662 				port->socket_id = 0;
663 			else
664 				port->socket_id = socket_num;
665 		}
666 	}
667 
668 	q = RTE_MAX(nb_rxq, nb_txq);
669 	if (q == 0) {
670 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
671 		return -1;
672 	}
673 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
674 	if (nb_fwd_streams_new == nb_fwd_streams)
675 		return 0;
676 	/* clear the old */
677 	if (fwd_streams != NULL) {
678 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
679 			if (fwd_streams[sm_id] == NULL)
680 				continue;
681 			rte_free(fwd_streams[sm_id]);
682 			fwd_streams[sm_id] = NULL;
683 		}
684 		rte_free(fwd_streams);
685 		fwd_streams = NULL;
686 	}
687 
688 	/* init new */
689 	nb_fwd_streams = nb_fwd_streams_new;
690 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
691 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
692 	if (fwd_streams == NULL)
693 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
694 						"failed\n", nb_fwd_streams);
695 
696 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
697 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
698 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
699 		if (fwd_streams[sm_id] == NULL)
700 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
701 								" failed\n");
702 	}
703 
704 	return 0;
705 }
706 
707 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
708 static void
709 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
710 {
711 	unsigned int total_burst;
712 	unsigned int nb_burst;
713 	unsigned int burst_stats[3];
714 	uint16_t pktnb_stats[3];
715 	uint16_t nb_pkt;
716 	int burst_percent[3];
717 
718 	/*
719 	 * First compute the total number of packet bursts and the
720 	 * two highest numbers of bursts of the same number of packets.
721 	 */
722 	total_burst = 0;
723 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
724 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
725 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
726 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
727 		if (nb_burst == 0)
728 			continue;
729 		total_burst += nb_burst;
730 		if (nb_burst > burst_stats[0]) {
731 			burst_stats[1] = burst_stats[0];
732 			pktnb_stats[1] = pktnb_stats[0];
733 			burst_stats[0] = nb_burst;
734 			pktnb_stats[0] = nb_pkt;
735 		}
736 	}
737 	if (total_burst == 0)
738 		return;
739 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
740 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
741 	       burst_percent[0], (int) pktnb_stats[0]);
742 	if (burst_stats[0] == total_burst) {
743 		printf("]\n");
744 		return;
745 	}
746 	if (burst_stats[0] + burst_stats[1] == total_burst) {
747 		printf(" + %d%% of %d pkts]\n",
748 		       100 - burst_percent[0], pktnb_stats[1]);
749 		return;
750 	}
751 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
752 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
753 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
754 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
755 		return;
756 	}
757 	printf(" + %d%% of %d pkts + %d%% of others]\n",
758 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
759 }
760 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
761 
762 static void
763 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
764 {
765 	struct rte_port *port;
766 	uint8_t i;
767 
768 	static const char *fwd_stats_border = "----------------------";
769 
770 	port = &ports[port_id];
771 	printf("\n  %s Forward statistics for port %-2d %s\n",
772 	       fwd_stats_border, port_id, fwd_stats_border);
773 
774 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
775 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
776 		       "%-"PRIu64"\n",
777 		       stats->ipackets, stats->imissed,
778 		       (uint64_t) (stats->ipackets + stats->imissed));
779 
780 		if (cur_fwd_eng == &csum_fwd_engine)
781 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
782 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
783 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
784 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
785 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
786 		}
787 
788 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
789 		       "%-"PRIu64"\n",
790 		       stats->opackets, port->tx_dropped,
791 		       (uint64_t) (stats->opackets + port->tx_dropped));
792 	}
793 	else {
794 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
795 		       "%14"PRIu64"\n",
796 		       stats->ipackets, stats->imissed,
797 		       (uint64_t) (stats->ipackets + stats->imissed));
798 
799 		if (cur_fwd_eng == &csum_fwd_engine)
800 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
801 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
802 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
803 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
804 			printf("  RX-nombufs:             %14"PRIu64"\n",
805 			       stats->rx_nombuf);
806 		}
807 
808 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
809 		       "%14"PRIu64"\n",
810 		       stats->opackets, port->tx_dropped,
811 		       (uint64_t) (stats->opackets + port->tx_dropped));
812 	}
813 
814 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
815 	if (port->rx_stream)
816 		pkt_burst_stats_display("RX",
817 			&port->rx_stream->rx_burst_stats);
818 	if (port->tx_stream)
819 		pkt_burst_stats_display("TX",
820 			&port->tx_stream->tx_burst_stats);
821 #endif
822 
823 	if (port->rx_queue_stats_mapping_enabled) {
824 		printf("\n");
825 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
826 			printf("  Stats reg %2d RX-packets:%14"PRIu64
827 			       "     RX-errors:%14"PRIu64
828 			       "    RX-bytes:%14"PRIu64"\n",
829 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
830 		}
831 		printf("\n");
832 	}
833 	if (port->tx_queue_stats_mapping_enabled) {
834 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
835 			printf("  Stats reg %2d TX-packets:%14"PRIu64
836 			       "                                 TX-bytes:%14"PRIu64"\n",
837 			       i, stats->q_opackets[i], stats->q_obytes[i]);
838 		}
839 	}
840 
841 	printf("  %s--------------------------------%s\n",
842 	       fwd_stats_border, fwd_stats_border);
843 }
844 
845 static void
846 fwd_stream_stats_display(streamid_t stream_id)
847 {
848 	struct fwd_stream *fs;
849 	static const char *fwd_top_stats_border = "-------";
850 
851 	fs = fwd_streams[stream_id];
852 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
853 	    (fs->fwd_dropped == 0))
854 		return;
855 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
856 	       "TX Port=%2d/Queue=%2d %s\n",
857 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
858 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
859 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
860 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
861 
862 	/* if checksum mode */
863 	if (cur_fwd_eng == &csum_fwd_engine) {
864 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
865 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
866 	}
867 
868 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
869 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
870 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
871 #endif
872 }
873 
874 static void
875 flush_fwd_rx_queues(void)
876 {
877 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
878 	portid_t  rxp;
879 	portid_t port_id;
880 	queueid_t rxq;
881 	uint16_t  nb_rx;
882 	uint16_t  i;
883 	uint8_t   j;
884 	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
885 	uint64_t timer_period;
886 
887 	/* convert to number of cycles */
888 	timer_period = rte_get_timer_hz(); /* 1 second timeout */
889 
890 	for (j = 0; j < 2; j++) {
891 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
892 			for (rxq = 0; rxq < nb_rxq; rxq++) {
893 				port_id = fwd_ports_ids[rxp];
894 				/**
895 				* testpmd can stuck in the below do while loop
896 				* if rte_eth_rx_burst() always returns nonzero
897 				* packets. So timer is added to exit this loop
898 				* after 1sec timer expiry.
899 				*/
900 				prev_tsc = rte_rdtsc();
901 				do {
902 					nb_rx = rte_eth_rx_burst(port_id, rxq,
903 						pkts_burst, MAX_PKT_BURST);
904 					for (i = 0; i < nb_rx; i++)
905 						rte_pktmbuf_free(pkts_burst[i]);
906 
907 					cur_tsc = rte_rdtsc();
908 					diff_tsc = cur_tsc - prev_tsc;
909 					timer_tsc += diff_tsc;
910 				} while ((nb_rx > 0) &&
911 					(timer_tsc < timer_period));
912 				timer_tsc = 0;
913 			}
914 		}
915 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
916 	}
917 }
918 
919 static void
920 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
921 {
922 	struct fwd_stream **fsm;
923 	streamid_t nb_fs;
924 	streamid_t sm_id;
925 
926 	fsm = &fwd_streams[fc->stream_idx];
927 	nb_fs = fc->stream_nb;
928 	do {
929 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
930 			(*pkt_fwd)(fsm[sm_id]);
931 	} while (! fc->stopped);
932 }
933 
934 static int
935 start_pkt_forward_on_core(void *fwd_arg)
936 {
937 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
938 			     cur_fwd_config.fwd_eng->packet_fwd);
939 	return 0;
940 }
941 
942 /*
943  * Run the TXONLY packet forwarding engine to send a single burst of packets.
944  * Used to start communication flows in network loopback test configurations.
945  */
946 static int
947 run_one_txonly_burst_on_core(void *fwd_arg)
948 {
949 	struct fwd_lcore *fwd_lc;
950 	struct fwd_lcore tmp_lcore;
951 
952 	fwd_lc = (struct fwd_lcore *) fwd_arg;
953 	tmp_lcore = *fwd_lc;
954 	tmp_lcore.stopped = 1;
955 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
956 	return 0;
957 }
958 
959 /*
960  * Launch packet forwarding:
961  *     - Setup per-port forwarding context.
962  *     - launch logical cores with their forwarding configuration.
963  */
964 static void
965 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
966 {
967 	port_fwd_begin_t port_fwd_begin;
968 	unsigned int i;
969 	unsigned int lc_id;
970 	int diag;
971 
972 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
973 	if (port_fwd_begin != NULL) {
974 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
975 			(*port_fwd_begin)(fwd_ports_ids[i]);
976 	}
977 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
978 		lc_id = fwd_lcores_cpuids[i];
979 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
980 			fwd_lcores[i]->stopped = 0;
981 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
982 						     fwd_lcores[i], lc_id);
983 			if (diag != 0)
984 				printf("launch lcore %u failed - diag=%d\n",
985 				       lc_id, diag);
986 		}
987 	}
988 }
989 
990 /*
991  * Launch packet forwarding configuration.
992  */
993 void
994 start_packet_forwarding(int with_tx_first)
995 {
996 	port_fwd_begin_t port_fwd_begin;
997 	port_fwd_end_t  port_fwd_end;
998 	struct rte_port *port;
999 	unsigned int i;
1000 	portid_t   pt_id;
1001 	streamid_t sm_id;
1002 
1003 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1004 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1005 
1006 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1007 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1008 
1009 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1010 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1011 		(!nb_rxq || !nb_txq))
1012 		rte_exit(EXIT_FAILURE,
1013 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1014 			cur_fwd_eng->fwd_mode_name);
1015 
1016 	if (all_ports_started() == 0) {
1017 		printf("Not all ports were started\n");
1018 		return;
1019 	}
1020 	if (test_done == 0) {
1021 		printf("Packet forwarding already started\n");
1022 		return;
1023 	}
1024 
1025 	if (init_fwd_streams() < 0) {
1026 		printf("Fail from init_fwd_streams()\n");
1027 		return;
1028 	}
1029 
1030 	if(dcb_test) {
1031 		for (i = 0; i < nb_fwd_ports; i++) {
1032 			pt_id = fwd_ports_ids[i];
1033 			port = &ports[pt_id];
1034 			if (!port->dcb_flag) {
1035 				printf("In DCB mode, all forwarding ports must "
1036                                        "be configured in this mode.\n");
1037 				return;
1038 			}
1039 		}
1040 		if (nb_fwd_lcores == 1) {
1041 			printf("In DCB mode,the nb forwarding cores "
1042                                "should be larger than 1.\n");
1043 			return;
1044 		}
1045 	}
1046 	test_done = 0;
1047 
1048 	if(!no_flush_rx)
1049 		flush_fwd_rx_queues();
1050 
1051 	fwd_config_setup();
1052 	pkt_fwd_config_display(&cur_fwd_config);
1053 	rxtx_config_display();
1054 
1055 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1056 		pt_id = fwd_ports_ids[i];
1057 		port = &ports[pt_id];
1058 		rte_eth_stats_get(pt_id, &port->stats);
1059 		port->tx_dropped = 0;
1060 
1061 		map_port_queue_stats_mapping_registers(pt_id, port);
1062 	}
1063 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1064 		fwd_streams[sm_id]->rx_packets = 0;
1065 		fwd_streams[sm_id]->tx_packets = 0;
1066 		fwd_streams[sm_id]->fwd_dropped = 0;
1067 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1068 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1069 
1070 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1071 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1072 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1073 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1074 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1075 #endif
1076 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1077 		fwd_streams[sm_id]->core_cycles = 0;
1078 #endif
1079 	}
1080 	if (with_tx_first) {
1081 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1082 		if (port_fwd_begin != NULL) {
1083 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1084 				(*port_fwd_begin)(fwd_ports_ids[i]);
1085 		}
1086 		while (with_tx_first--) {
1087 			launch_packet_forwarding(
1088 					run_one_txonly_burst_on_core);
1089 			rte_eal_mp_wait_lcore();
1090 		}
1091 		port_fwd_end = tx_only_engine.port_fwd_end;
1092 		if (port_fwd_end != NULL) {
1093 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1094 				(*port_fwd_end)(fwd_ports_ids[i]);
1095 		}
1096 	}
1097 	launch_packet_forwarding(start_pkt_forward_on_core);
1098 }
1099 
1100 void
1101 stop_packet_forwarding(void)
1102 {
1103 	struct rte_eth_stats stats;
1104 	struct rte_port *port;
1105 	port_fwd_end_t  port_fwd_end;
1106 	int i;
1107 	portid_t   pt_id;
1108 	streamid_t sm_id;
1109 	lcoreid_t  lc_id;
1110 	uint64_t total_recv;
1111 	uint64_t total_xmit;
1112 	uint64_t total_rx_dropped;
1113 	uint64_t total_tx_dropped;
1114 	uint64_t total_rx_nombuf;
1115 	uint64_t tx_dropped;
1116 	uint64_t rx_bad_ip_csum;
1117 	uint64_t rx_bad_l4_csum;
1118 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1119 	uint64_t fwd_cycles;
1120 #endif
1121 	static const char *acc_stats_border = "+++++++++++++++";
1122 
1123 	if (test_done) {
1124 		printf("Packet forwarding not started\n");
1125 		return;
1126 	}
1127 	printf("Telling cores to stop...");
1128 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1129 		fwd_lcores[lc_id]->stopped = 1;
1130 	printf("\nWaiting for lcores to finish...\n");
1131 	rte_eal_mp_wait_lcore();
1132 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1133 	if (port_fwd_end != NULL) {
1134 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1135 			pt_id = fwd_ports_ids[i];
1136 			(*port_fwd_end)(pt_id);
1137 		}
1138 	}
1139 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1140 	fwd_cycles = 0;
1141 #endif
1142 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1143 		if (cur_fwd_config.nb_fwd_streams >
1144 		    cur_fwd_config.nb_fwd_ports) {
1145 			fwd_stream_stats_display(sm_id);
1146 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1147 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1148 		} else {
1149 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1150 				fwd_streams[sm_id];
1151 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1152 				fwd_streams[sm_id];
1153 		}
1154 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1155 		tx_dropped = (uint64_t) (tx_dropped +
1156 					 fwd_streams[sm_id]->fwd_dropped);
1157 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1158 
1159 		rx_bad_ip_csum =
1160 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1161 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1162 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1163 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1164 							rx_bad_ip_csum;
1165 
1166 		rx_bad_l4_csum =
1167 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1168 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1169 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1170 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1171 							rx_bad_l4_csum;
1172 
1173 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1174 		fwd_cycles = (uint64_t) (fwd_cycles +
1175 					 fwd_streams[sm_id]->core_cycles);
1176 #endif
1177 	}
1178 	total_recv = 0;
1179 	total_xmit = 0;
1180 	total_rx_dropped = 0;
1181 	total_tx_dropped = 0;
1182 	total_rx_nombuf  = 0;
1183 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1184 		pt_id = fwd_ports_ids[i];
1185 
1186 		port = &ports[pt_id];
1187 		rte_eth_stats_get(pt_id, &stats);
1188 		stats.ipackets -= port->stats.ipackets;
1189 		port->stats.ipackets = 0;
1190 		stats.opackets -= port->stats.opackets;
1191 		port->stats.opackets = 0;
1192 		stats.ibytes   -= port->stats.ibytes;
1193 		port->stats.ibytes = 0;
1194 		stats.obytes   -= port->stats.obytes;
1195 		port->stats.obytes = 0;
1196 		stats.imissed  -= port->stats.imissed;
1197 		port->stats.imissed = 0;
1198 		stats.oerrors  -= port->stats.oerrors;
1199 		port->stats.oerrors = 0;
1200 		stats.rx_nombuf -= port->stats.rx_nombuf;
1201 		port->stats.rx_nombuf = 0;
1202 
1203 		total_recv += stats.ipackets;
1204 		total_xmit += stats.opackets;
1205 		total_rx_dropped += stats.imissed;
1206 		total_tx_dropped += port->tx_dropped;
1207 		total_rx_nombuf  += stats.rx_nombuf;
1208 
1209 		fwd_port_stats_display(pt_id, &stats);
1210 	}
1211 	printf("\n  %s Accumulated forward statistics for all ports"
1212 	       "%s\n",
1213 	       acc_stats_border, acc_stats_border);
1214 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1215 	       "%-"PRIu64"\n"
1216 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1217 	       "%-"PRIu64"\n",
1218 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1219 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1220 	if (total_rx_nombuf > 0)
1221 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1222 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1223 	       "%s\n",
1224 	       acc_stats_border, acc_stats_border);
1225 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1226 	if (total_recv > 0)
1227 		printf("\n  CPU cycles/packet=%u (total cycles="
1228 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1229 		       (unsigned int)(fwd_cycles / total_recv),
1230 		       fwd_cycles, total_recv);
1231 #endif
1232 	printf("\nDone.\n");
1233 	test_done = 1;
1234 }
1235 
1236 void
1237 dev_set_link_up(portid_t pid)
1238 {
1239 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1240 		printf("\nSet link up fail.\n");
1241 }
1242 
1243 void
1244 dev_set_link_down(portid_t pid)
1245 {
1246 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1247 		printf("\nSet link down fail.\n");
1248 }
1249 
1250 static int
1251 all_ports_started(void)
1252 {
1253 	portid_t pi;
1254 	struct rte_port *port;
1255 
1256 	FOREACH_PORT(pi, ports) {
1257 		port = &ports[pi];
1258 		/* Check if there is a port which is not started */
1259 		if ((port->port_status != RTE_PORT_STARTED) &&
1260 			(port->slave_flag == 0))
1261 			return 0;
1262 	}
1263 
1264 	/* No port is not started */
1265 	return 1;
1266 }
1267 
1268 int
1269 all_ports_stopped(void)
1270 {
1271 	portid_t pi;
1272 	struct rte_port *port;
1273 
1274 	FOREACH_PORT(pi, ports) {
1275 		port = &ports[pi];
1276 		if ((port->port_status != RTE_PORT_STOPPED) &&
1277 			(port->slave_flag == 0))
1278 			return 0;
1279 	}
1280 
1281 	return 1;
1282 }
1283 
1284 int
1285 port_is_started(portid_t port_id)
1286 {
1287 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1288 		return 0;
1289 
1290 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1291 		return 0;
1292 
1293 	return 1;
1294 }
1295 
1296 static int
1297 port_is_closed(portid_t port_id)
1298 {
1299 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1300 		return 0;
1301 
1302 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1303 		return 0;
1304 
1305 	return 1;
1306 }
1307 
1308 int
1309 start_port(portid_t pid)
1310 {
1311 	int diag, need_check_link_status = -1;
1312 	portid_t pi;
1313 	queueid_t qi;
1314 	struct rte_port *port;
1315 	struct ether_addr mac_addr;
1316 
1317 	if (port_id_is_invalid(pid, ENABLED_WARN))
1318 		return 0;
1319 
1320 	if(dcb_config)
1321 		dcb_test = 1;
1322 	FOREACH_PORT(pi, ports) {
1323 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1324 			continue;
1325 
1326 		need_check_link_status = 0;
1327 		port = &ports[pi];
1328 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1329 						 RTE_PORT_HANDLING) == 0) {
1330 			printf("Port %d is now not stopped\n", pi);
1331 			continue;
1332 		}
1333 
1334 		if (port->need_reconfig > 0) {
1335 			port->need_reconfig = 0;
1336 
1337 			printf("Configuring Port %d (socket %u)\n", pi,
1338 					port->socket_id);
1339 			/* configure port */
1340 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1341 						&(port->dev_conf));
1342 			if (diag != 0) {
1343 				if (rte_atomic16_cmpset(&(port->port_status),
1344 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1345 					printf("Port %d can not be set back "
1346 							"to stopped\n", pi);
1347 				printf("Fail to configure port %d\n", pi);
1348 				/* try to reconfigure port next time */
1349 				port->need_reconfig = 1;
1350 				return -1;
1351 			}
1352 		}
1353 		if (port->need_reconfig_queues > 0) {
1354 			port->need_reconfig_queues = 0;
1355 			/* setup tx queues */
1356 			for (qi = 0; qi < nb_txq; qi++) {
1357 				if ((numa_support) &&
1358 					(txring_numa[pi] != NUMA_NO_CONFIG))
1359 					diag = rte_eth_tx_queue_setup(pi, qi,
1360 						nb_txd,txring_numa[pi],
1361 						&(port->tx_conf));
1362 				else
1363 					diag = rte_eth_tx_queue_setup(pi, qi,
1364 						nb_txd,port->socket_id,
1365 						&(port->tx_conf));
1366 
1367 				if (diag == 0)
1368 					continue;
1369 
1370 				/* Fail to setup tx queue, return */
1371 				if (rte_atomic16_cmpset(&(port->port_status),
1372 							RTE_PORT_HANDLING,
1373 							RTE_PORT_STOPPED) == 0)
1374 					printf("Port %d can not be set back "
1375 							"to stopped\n", pi);
1376 				printf("Fail to configure port %d tx queues\n", pi);
1377 				/* try to reconfigure queues next time */
1378 				port->need_reconfig_queues = 1;
1379 				return -1;
1380 			}
1381 			/* setup rx queues */
1382 			for (qi = 0; qi < nb_rxq; qi++) {
1383 				if ((numa_support) &&
1384 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1385 					struct rte_mempool * mp =
1386 						mbuf_pool_find(rxring_numa[pi]);
1387 					if (mp == NULL) {
1388 						printf("Failed to setup RX queue:"
1389 							"No mempool allocation"
1390 							" on the socket %d\n",
1391 							rxring_numa[pi]);
1392 						return -1;
1393 					}
1394 
1395 					diag = rte_eth_rx_queue_setup(pi, qi,
1396 					     nb_rxd,rxring_numa[pi],
1397 					     &(port->rx_conf),mp);
1398 				} else {
1399 					struct rte_mempool *mp =
1400 						mbuf_pool_find(port->socket_id);
1401 					if (mp == NULL) {
1402 						printf("Failed to setup RX queue:"
1403 							"No mempool allocation"
1404 							" on the socket %d\n",
1405 							port->socket_id);
1406 						return -1;
1407 					}
1408 					diag = rte_eth_rx_queue_setup(pi, qi,
1409 					     nb_rxd,port->socket_id,
1410 					     &(port->rx_conf), mp);
1411 				}
1412 				if (diag == 0)
1413 					continue;
1414 
1415 				/* Fail to setup rx queue, return */
1416 				if (rte_atomic16_cmpset(&(port->port_status),
1417 							RTE_PORT_HANDLING,
1418 							RTE_PORT_STOPPED) == 0)
1419 					printf("Port %d can not be set back "
1420 							"to stopped\n", pi);
1421 				printf("Fail to configure port %d rx queues\n", pi);
1422 				/* try to reconfigure queues next time */
1423 				port->need_reconfig_queues = 1;
1424 				return -1;
1425 			}
1426 		}
1427 		/* start port */
1428 		if (rte_eth_dev_start(pi) < 0) {
1429 			printf("Fail to start port %d\n", pi);
1430 
1431 			/* Fail to setup rx queue, return */
1432 			if (rte_atomic16_cmpset(&(port->port_status),
1433 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1434 				printf("Port %d can not be set back to "
1435 							"stopped\n", pi);
1436 			continue;
1437 		}
1438 
1439 		if (rte_atomic16_cmpset(&(port->port_status),
1440 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1441 			printf("Port %d can not be set into started\n", pi);
1442 
1443 		rte_eth_macaddr_get(pi, &mac_addr);
1444 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1445 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1446 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1447 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1448 
1449 		/* at least one port started, need checking link status */
1450 		need_check_link_status = 1;
1451 	}
1452 
1453 	if (need_check_link_status == 1 && !no_link_check)
1454 		check_all_ports_link_status(RTE_PORT_ALL);
1455 	else if (need_check_link_status == 0)
1456 		printf("Please stop the ports first\n");
1457 
1458 	printf("Done\n");
1459 	return 0;
1460 }
1461 
1462 void
1463 stop_port(portid_t pid)
1464 {
1465 	portid_t pi;
1466 	struct rte_port *port;
1467 	int need_check_link_status = 0;
1468 
1469 	if (dcb_test) {
1470 		dcb_test = 0;
1471 		dcb_config = 0;
1472 	}
1473 
1474 	if (port_id_is_invalid(pid, ENABLED_WARN))
1475 		return;
1476 
1477 	printf("Stopping ports...\n");
1478 
1479 	FOREACH_PORT(pi, ports) {
1480 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1481 			continue;
1482 
1483 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1484 			printf("Please remove port %d from forwarding configuration.\n", pi);
1485 			continue;
1486 		}
1487 
1488 		if (port_is_bonding_slave(pi)) {
1489 			printf("Please remove port %d from bonded device.\n", pi);
1490 			continue;
1491 		}
1492 
1493 		port = &ports[pi];
1494 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1495 						RTE_PORT_HANDLING) == 0)
1496 			continue;
1497 
1498 		rte_eth_dev_stop(pi);
1499 
1500 		if (rte_atomic16_cmpset(&(port->port_status),
1501 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1502 			printf("Port %d can not be set into stopped\n", pi);
1503 		need_check_link_status = 1;
1504 	}
1505 	if (need_check_link_status && !no_link_check)
1506 		check_all_ports_link_status(RTE_PORT_ALL);
1507 
1508 	printf("Done\n");
1509 }
1510 
1511 void
1512 close_port(portid_t pid)
1513 {
1514 	portid_t pi;
1515 	struct rte_port *port;
1516 
1517 	if (port_id_is_invalid(pid, ENABLED_WARN))
1518 		return;
1519 
1520 	printf("Closing ports...\n");
1521 
1522 	FOREACH_PORT(pi, ports) {
1523 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1524 			continue;
1525 
1526 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1527 			printf("Please remove port %d from forwarding configuration.\n", pi);
1528 			continue;
1529 		}
1530 
1531 		if (port_is_bonding_slave(pi)) {
1532 			printf("Please remove port %d from bonded device.\n", pi);
1533 			continue;
1534 		}
1535 
1536 		port = &ports[pi];
1537 		if (rte_atomic16_cmpset(&(port->port_status),
1538 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1539 			printf("Port %d is already closed\n", pi);
1540 			continue;
1541 		}
1542 
1543 		if (rte_atomic16_cmpset(&(port->port_status),
1544 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1545 			printf("Port %d is now not stopped\n", pi);
1546 			continue;
1547 		}
1548 
1549 		if (port->flow_list)
1550 			port_flow_flush(pi);
1551 		rte_eth_dev_close(pi);
1552 
1553 		if (rte_atomic16_cmpset(&(port->port_status),
1554 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1555 			printf("Port %d cannot be set to closed\n", pi);
1556 	}
1557 
1558 	printf("Done\n");
1559 }
1560 
1561 void
1562 attach_port(char *identifier)
1563 {
1564 	portid_t pi = 0;
1565 	unsigned int socket_id;
1566 
1567 	printf("Attaching a new port...\n");
1568 
1569 	if (identifier == NULL) {
1570 		printf("Invalid parameters are specified\n");
1571 		return;
1572 	}
1573 
1574 	if (rte_eth_dev_attach(identifier, &pi))
1575 		return;
1576 
1577 	ports[pi].enabled = 1;
1578 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1579 	/* if socket_id is invalid, set to 0 */
1580 	if (check_socket_id(socket_id) < 0)
1581 		socket_id = 0;
1582 	reconfig(pi, socket_id);
1583 	rte_eth_promiscuous_enable(pi);
1584 
1585 	nb_ports = rte_eth_dev_count();
1586 
1587 	ports[pi].port_status = RTE_PORT_STOPPED;
1588 
1589 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1590 	printf("Done\n");
1591 }
1592 
1593 void
1594 detach_port(uint8_t port_id)
1595 {
1596 	char name[RTE_ETH_NAME_MAX_LEN];
1597 
1598 	printf("Detaching a port...\n");
1599 
1600 	if (!port_is_closed(port_id)) {
1601 		printf("Please close port first\n");
1602 		return;
1603 	}
1604 
1605 	if (ports[port_id].flow_list)
1606 		port_flow_flush(port_id);
1607 
1608 	if (rte_eth_dev_detach(port_id, name))
1609 		return;
1610 
1611 	ports[port_id].enabled = 0;
1612 	nb_ports = rte_eth_dev_count();
1613 
1614 	printf("Port '%s' is detached. Now total ports is %d\n",
1615 			name, nb_ports);
1616 	printf("Done\n");
1617 	return;
1618 }
1619 
1620 void
1621 pmd_test_exit(void)
1622 {
1623 	portid_t pt_id;
1624 
1625 	if (test_done == 0)
1626 		stop_packet_forwarding();
1627 
1628 	if (ports != NULL) {
1629 		no_link_check = 1;
1630 		FOREACH_PORT(pt_id, ports) {
1631 			printf("\nShutting down port %d...\n", pt_id);
1632 			fflush(stdout);
1633 			stop_port(pt_id);
1634 			close_port(pt_id);
1635 		}
1636 	}
1637 	printf("\nBye...\n");
1638 }
1639 
1640 typedef void (*cmd_func_t)(void);
1641 struct pmd_test_command {
1642 	const char *cmd_name;
1643 	cmd_func_t cmd_func;
1644 };
1645 
1646 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1647 
1648 /* Check the link status of all ports in up to 9s, and print them finally */
1649 static void
1650 check_all_ports_link_status(uint32_t port_mask)
1651 {
1652 #define CHECK_INTERVAL 100 /* 100ms */
1653 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1654 	uint8_t portid, count, all_ports_up, print_flag = 0;
1655 	struct rte_eth_link link;
1656 
1657 	printf("Checking link statuses...\n");
1658 	fflush(stdout);
1659 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1660 		all_ports_up = 1;
1661 		FOREACH_PORT(portid, ports) {
1662 			if ((port_mask & (1 << portid)) == 0)
1663 				continue;
1664 			memset(&link, 0, sizeof(link));
1665 			rte_eth_link_get_nowait(portid, &link);
1666 			/* print link status if flag set */
1667 			if (print_flag == 1) {
1668 				if (link.link_status)
1669 					printf("Port %d Link Up - speed %u "
1670 						"Mbps - %s\n", (uint8_t)portid,
1671 						(unsigned)link.link_speed,
1672 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1673 					("full-duplex") : ("half-duplex\n"));
1674 				else
1675 					printf("Port %d Link Down\n",
1676 						(uint8_t)portid);
1677 				continue;
1678 			}
1679 			/* clear all_ports_up flag if any link down */
1680 			if (link.link_status == ETH_LINK_DOWN) {
1681 				all_ports_up = 0;
1682 				break;
1683 			}
1684 		}
1685 		/* after finally printing all link status, get out */
1686 		if (print_flag == 1)
1687 			break;
1688 
1689 		if (all_ports_up == 0) {
1690 			fflush(stdout);
1691 			rte_delay_ms(CHECK_INTERVAL);
1692 		}
1693 
1694 		/* set the print_flag if all ports up or timeout */
1695 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1696 			print_flag = 1;
1697 		}
1698 	}
1699 }
1700 
1701 static int
1702 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1703 {
1704 	uint16_t i;
1705 	int diag;
1706 	uint8_t mapping_found = 0;
1707 
1708 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1709 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1710 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1711 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1712 					tx_queue_stats_mappings[i].queue_id,
1713 					tx_queue_stats_mappings[i].stats_counter_id);
1714 			if (diag != 0)
1715 				return diag;
1716 			mapping_found = 1;
1717 		}
1718 	}
1719 	if (mapping_found)
1720 		port->tx_queue_stats_mapping_enabled = 1;
1721 	return 0;
1722 }
1723 
1724 static int
1725 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1726 {
1727 	uint16_t i;
1728 	int diag;
1729 	uint8_t mapping_found = 0;
1730 
1731 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1732 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1733 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1734 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1735 					rx_queue_stats_mappings[i].queue_id,
1736 					rx_queue_stats_mappings[i].stats_counter_id);
1737 			if (diag != 0)
1738 				return diag;
1739 			mapping_found = 1;
1740 		}
1741 	}
1742 	if (mapping_found)
1743 		port->rx_queue_stats_mapping_enabled = 1;
1744 	return 0;
1745 }
1746 
1747 static void
1748 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1749 {
1750 	int diag = 0;
1751 
1752 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1753 	if (diag != 0) {
1754 		if (diag == -ENOTSUP) {
1755 			port->tx_queue_stats_mapping_enabled = 0;
1756 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1757 		}
1758 		else
1759 			rte_exit(EXIT_FAILURE,
1760 					"set_tx_queue_stats_mapping_registers "
1761 					"failed for port id=%d diag=%d\n",
1762 					pi, diag);
1763 	}
1764 
1765 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1766 	if (diag != 0) {
1767 		if (diag == -ENOTSUP) {
1768 			port->rx_queue_stats_mapping_enabled = 0;
1769 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1770 		}
1771 		else
1772 			rte_exit(EXIT_FAILURE,
1773 					"set_rx_queue_stats_mapping_registers "
1774 					"failed for port id=%d diag=%d\n",
1775 					pi, diag);
1776 	}
1777 }
1778 
1779 static void
1780 rxtx_port_config(struct rte_port *port)
1781 {
1782 	port->rx_conf = port->dev_info.default_rxconf;
1783 	port->tx_conf = port->dev_info.default_txconf;
1784 
1785 	/* Check if any RX/TX parameters have been passed */
1786 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1787 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1788 
1789 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1790 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1791 
1792 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1793 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1794 
1795 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1796 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1797 
1798 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1799 		port->rx_conf.rx_drop_en = rx_drop_en;
1800 
1801 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1802 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1803 
1804 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1805 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1806 
1807 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1808 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1809 
1810 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1811 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1812 
1813 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1814 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1815 
1816 	if (txq_flags != RTE_PMD_PARAM_UNSET)
1817 		port->tx_conf.txq_flags = txq_flags;
1818 }
1819 
1820 void
1821 init_port_config(void)
1822 {
1823 	portid_t pid;
1824 	struct rte_port *port;
1825 
1826 	FOREACH_PORT(pid, ports) {
1827 		port = &ports[pid];
1828 		port->dev_conf.rxmode = rx_mode;
1829 		port->dev_conf.fdir_conf = fdir_conf;
1830 		if (nb_rxq > 1) {
1831 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1832 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1833 		} else {
1834 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1835 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1836 		}
1837 
1838 		if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1839 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1840 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1841 			else
1842 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1843 		}
1844 
1845 		if (port->dev_info.max_vfs != 0) {
1846 			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1847 				port->dev_conf.rxmode.mq_mode =
1848 					ETH_MQ_RX_VMDQ_RSS;
1849 			else
1850 				port->dev_conf.rxmode.mq_mode =
1851 					ETH_MQ_RX_NONE;
1852 
1853 			port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1854 		}
1855 
1856 		rxtx_port_config(port);
1857 
1858 		rte_eth_macaddr_get(pid, &port->eth_addr);
1859 
1860 		map_port_queue_stats_mapping_registers(pid, port);
1861 #ifdef RTE_NIC_BYPASS
1862 		rte_eth_dev_bypass_init(pid);
1863 #endif
1864 	}
1865 }
1866 
1867 void set_port_slave_flag(portid_t slave_pid)
1868 {
1869 	struct rte_port *port;
1870 
1871 	port = &ports[slave_pid];
1872 	port->slave_flag = 1;
1873 }
1874 
1875 void clear_port_slave_flag(portid_t slave_pid)
1876 {
1877 	struct rte_port *port;
1878 
1879 	port = &ports[slave_pid];
1880 	port->slave_flag = 0;
1881 }
1882 
1883 uint8_t port_is_bonding_slave(portid_t slave_pid)
1884 {
1885 	struct rte_port *port;
1886 
1887 	port = &ports[slave_pid];
1888 	return port->slave_flag;
1889 }
1890 
1891 const uint16_t vlan_tags[] = {
1892 		0,  1,  2,  3,  4,  5,  6,  7,
1893 		8,  9, 10, 11,  12, 13, 14, 15,
1894 		16, 17, 18, 19, 20, 21, 22, 23,
1895 		24, 25, 26, 27, 28, 29, 30, 31
1896 };
1897 
1898 static  int
1899 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1900 		 enum dcb_mode_enable dcb_mode,
1901 		 enum rte_eth_nb_tcs num_tcs,
1902 		 uint8_t pfc_en)
1903 {
1904 	uint8_t i;
1905 
1906 	/*
1907 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1908 	 * given above, and the number of traffic classes available for use.
1909 	 */
1910 	if (dcb_mode == DCB_VT_ENABLED) {
1911 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1912 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
1913 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1914 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1915 
1916 		/* VMDQ+DCB RX and TX configrations */
1917 		vmdq_rx_conf->enable_default_pool = 0;
1918 		vmdq_rx_conf->default_pool = 0;
1919 		vmdq_rx_conf->nb_queue_pools =
1920 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1921 		vmdq_tx_conf->nb_queue_pools =
1922 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1923 
1924 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1925 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1926 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1927 			vmdq_rx_conf->pool_map[i].pools =
1928 				1 << (i % vmdq_rx_conf->nb_queue_pools);
1929 		}
1930 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1931 			vmdq_rx_conf->dcb_tc[i] = i;
1932 			vmdq_tx_conf->dcb_tc[i] = i;
1933 		}
1934 
1935 		/* set DCB mode of RX and TX of multiple queues */
1936 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1937 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1938 	} else {
1939 		struct rte_eth_dcb_rx_conf *rx_conf =
1940 				&eth_conf->rx_adv_conf.dcb_rx_conf;
1941 		struct rte_eth_dcb_tx_conf *tx_conf =
1942 				&eth_conf->tx_adv_conf.dcb_tx_conf;
1943 
1944 		rx_conf->nb_tcs = num_tcs;
1945 		tx_conf->nb_tcs = num_tcs;
1946 
1947 		for (i = 0; i < num_tcs; i++) {
1948 			rx_conf->dcb_tc[i] = i;
1949 			tx_conf->dcb_tc[i] = i;
1950 		}
1951 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1952 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1953 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1954 	}
1955 
1956 	if (pfc_en)
1957 		eth_conf->dcb_capability_en =
1958 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1959 	else
1960 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1961 
1962 	return 0;
1963 }
1964 
1965 int
1966 init_port_dcb_config(portid_t pid,
1967 		     enum dcb_mode_enable dcb_mode,
1968 		     enum rte_eth_nb_tcs num_tcs,
1969 		     uint8_t pfc_en)
1970 {
1971 	struct rte_eth_conf port_conf;
1972 	struct rte_port *rte_port;
1973 	int retval;
1974 	uint16_t i;
1975 
1976 	rte_port = &ports[pid];
1977 
1978 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1979 	/* Enter DCB configuration status */
1980 	dcb_config = 1;
1981 
1982 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1983 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
1984 	if (retval < 0)
1985 		return retval;
1986 	port_conf.rxmode.hw_vlan_filter = 1;
1987 
1988 	/**
1989 	 * Write the configuration into the device.
1990 	 * Set the numbers of RX & TX queues to 0, so
1991 	 * the RX & TX queues will not be setup.
1992 	 */
1993 	(void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
1994 
1995 	rte_eth_dev_info_get(pid, &rte_port->dev_info);
1996 
1997 	/* If dev_info.vmdq_pool_base is greater than 0,
1998 	 * the queue id of vmdq pools is started after pf queues.
1999 	 */
2000 	if (dcb_mode == DCB_VT_ENABLED &&
2001 	    rte_port->dev_info.vmdq_pool_base > 0) {
2002 		printf("VMDQ_DCB multi-queue mode is nonsensical"
2003 			" for port %d.", pid);
2004 		return -1;
2005 	}
2006 
2007 	/* Assume the ports in testpmd have the same dcb capability
2008 	 * and has the same number of rxq and txq in dcb mode
2009 	 */
2010 	if (dcb_mode == DCB_VT_ENABLED) {
2011 		if (rte_port->dev_info.max_vfs > 0) {
2012 			nb_rxq = rte_port->dev_info.nb_rx_queues;
2013 			nb_txq = rte_port->dev_info.nb_tx_queues;
2014 		} else {
2015 			nb_rxq = rte_port->dev_info.max_rx_queues;
2016 			nb_txq = rte_port->dev_info.max_tx_queues;
2017 		}
2018 	} else {
2019 		/*if vt is disabled, use all pf queues */
2020 		if (rte_port->dev_info.vmdq_pool_base == 0) {
2021 			nb_rxq = rte_port->dev_info.max_rx_queues;
2022 			nb_txq = rte_port->dev_info.max_tx_queues;
2023 		} else {
2024 			nb_rxq = (queueid_t)num_tcs;
2025 			nb_txq = (queueid_t)num_tcs;
2026 
2027 		}
2028 	}
2029 	rx_free_thresh = 64;
2030 
2031 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2032 
2033 	rxtx_port_config(rte_port);
2034 	/* VLAN filter */
2035 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2036 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2037 		rx_vft_set(pid, vlan_tags[i], 1);
2038 
2039 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2040 	map_port_queue_stats_mapping_registers(pid, rte_port);
2041 
2042 	rte_port->dcb_flag = 1;
2043 
2044 	return 0;
2045 }
2046 
2047 static void
2048 init_port(void)
2049 {
2050 	portid_t pid;
2051 
2052 	/* Configuration of Ethernet ports. */
2053 	ports = rte_zmalloc("testpmd: ports",
2054 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2055 			    RTE_CACHE_LINE_SIZE);
2056 	if (ports == NULL) {
2057 		rte_exit(EXIT_FAILURE,
2058 				"rte_zmalloc(%d struct rte_port) failed\n",
2059 				RTE_MAX_ETHPORTS);
2060 	}
2061 
2062 	/* enabled allocated ports */
2063 	for (pid = 0; pid < nb_ports; pid++)
2064 		ports[pid].enabled = 1;
2065 }
2066 
2067 static void
2068 force_quit(void)
2069 {
2070 	pmd_test_exit();
2071 	prompt_exit();
2072 }
2073 
2074 static void
2075 signal_handler(int signum)
2076 {
2077 	if (signum == SIGINT || signum == SIGTERM) {
2078 		printf("\nSignal %d received, preparing to exit...\n",
2079 				signum);
2080 #ifdef RTE_LIBRTE_PDUMP
2081 		/* uninitialize packet capture framework */
2082 		rte_pdump_uninit();
2083 #endif
2084 		force_quit();
2085 		/* exit with the expected status */
2086 		signal(signum, SIG_DFL);
2087 		kill(getpid(), signum);
2088 	}
2089 }
2090 
2091 int
2092 main(int argc, char** argv)
2093 {
2094 	int  diag;
2095 	uint8_t port_id;
2096 
2097 	signal(SIGINT, signal_handler);
2098 	signal(SIGTERM, signal_handler);
2099 
2100 	diag = rte_eal_init(argc, argv);
2101 	if (diag < 0)
2102 		rte_panic("Cannot init EAL\n");
2103 
2104 #ifdef RTE_LIBRTE_PDUMP
2105 	/* initialize packet capture framework */
2106 	rte_pdump_init(NULL);
2107 #endif
2108 
2109 	nb_ports = (portid_t) rte_eth_dev_count();
2110 	if (nb_ports == 0)
2111 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2112 
2113 	/* allocate port structures, and init them */
2114 	init_port();
2115 
2116 	set_def_fwd_config();
2117 	if (nb_lcores == 0)
2118 		rte_panic("Empty set of forwarding logical cores - check the "
2119 			  "core mask supplied in the command parameters\n");
2120 
2121 	argc -= diag;
2122 	argv += diag;
2123 	if (argc > 1)
2124 		launch_args_parse(argc, argv);
2125 
2126 	if (!nb_rxq && !nb_txq)
2127 		printf("Warning: Either rx or tx queues should be non-zero\n");
2128 
2129 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2130 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2131 		       "but nb_txq=%d will prevent to fully test it.\n",
2132 		       nb_rxq, nb_txq);
2133 
2134 	init_config();
2135 	if (start_port(RTE_PORT_ALL) != 0)
2136 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2137 
2138 	/* set all ports to promiscuous mode by default */
2139 	FOREACH_PORT(port_id, ports)
2140 		rte_eth_promiscuous_enable(port_id);
2141 
2142 #ifdef RTE_LIBRTE_CMDLINE
2143 	if (interactive == 1) {
2144 		if (auto_start) {
2145 			printf("Start automatic packet forwarding\n");
2146 			start_packet_forwarding(0);
2147 		}
2148 		prompt();
2149 	} else
2150 #endif
2151 	{
2152 		char c;
2153 		int rc;
2154 
2155 		printf("No commandline core given, start packet forwarding\n");
2156 		start_packet_forwarding(0);
2157 		printf("Press enter to exit\n");
2158 		rc = read(0, &c, 1);
2159 		pmd_test_exit();
2160 		if (rc < 0)
2161 			return 1;
2162 	}
2163 
2164 	return 0;
2165 }
2166