xref: /dpdk/app/test-pmd/testpmd.c (revision bf56fce1fb45b83747527e6312f61c0fcf3789b8)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43 
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46 
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50 
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
78 #endif
79 
80 #include "testpmd.h"
81 
82 uint16_t verbose_level = 0; /**< Silent by default. */
83 
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
87 
88 /*
89  * NUMA support configuration.
90  * When set, the NUMA support attempts to dispatch the allocation of the
91  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92  * probed ports among the CPU sockets 0 and 1.
93  * Otherwise, all memory is allocated from CPU socket 0.
94  */
95 uint8_t numa_support = 0; /**< No numa support by default */
96 
97 /*
98  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
99  * not configured.
100  */
101 uint8_t socket_num = UMA_NO_CONFIG;
102 
103 /*
104  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105  */
106 uint8_t mp_anon = 0;
107 
108 /*
109  * Record the Ethernet address of peer target ports to which packets are
110  * forwarded.
111  * Must be instanciated with the ethernet addresses of peer traffic generator
112  * ports.
113  */
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
116 
117 /*
118  * Probed Target Environment.
119  */
120 struct rte_port *ports;	       /**< For all probed ethernet ports. */
121 portid_t nb_ports;             /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124 
125 /*
126  * Test Forwarding Configuration.
127  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129  */
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134 
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137 
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140 
141 /*
142  * Forwarding engines.
143  */
144 struct fwd_engine * fwd_engines[] = {
145 	&io_fwd_engine,
146 	&mac_fwd_engine,
147 	&mac_swap_engine,
148 	&flow_gen_engine,
149 	&rx_only_engine,
150 	&tx_only_engine,
151 	&csum_fwd_engine,
152 	&icmp_echo_engine,
153 #ifdef RTE_LIBRTE_IEEE1588
154 	&ieee1588_fwd_engine,
155 #endif
156 	NULL,
157 };
158 
159 struct fwd_config cur_fwd_config;
160 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
161 uint32_t retry_enabled;
162 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
163 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
164 
165 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
166 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
167                                       * specified on command-line. */
168 
169 /*
170  * Configuration of packet segments used by the "txonly" processing engine.
171  */
172 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
173 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
174 	TXONLY_DEF_PACKET_LEN,
175 };
176 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
177 
178 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
179 /**< Split policy for packets to TX. */
180 
181 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
182 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
183 
184 /* current configuration is in DCB or not,0 means it is not in DCB mode */
185 uint8_t dcb_config = 0;
186 
187 /* Whether the dcb is in testing status */
188 uint8_t dcb_test = 0;
189 
190 /*
191  * Configurable number of RX/TX queues.
192  */
193 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
194 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
195 
196 /*
197  * Configurable number of RX/TX ring descriptors.
198  */
199 #define RTE_TEST_RX_DESC_DEFAULT 128
200 #define RTE_TEST_TX_DESC_DEFAULT 512
201 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
202 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
203 
204 #define RTE_PMD_PARAM_UNSET -1
205 /*
206  * Configurable values of RX and TX ring threshold registers.
207  */
208 
209 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
210 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
211 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
212 
213 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
214 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
215 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
216 
217 /*
218  * Configurable value of RX free threshold.
219  */
220 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
221 
222 /*
223  * Configurable value of RX drop enable.
224  */
225 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
226 
227 /*
228  * Configurable value of TX free threshold.
229  */
230 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
231 
232 /*
233  * Configurable value of TX RS bit threshold.
234  */
235 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
236 
237 /*
238  * Configurable value of TX queue flags.
239  */
240 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
241 
242 /*
243  * Receive Side Scaling (RSS) configuration.
244  */
245 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
246 
247 /*
248  * Port topology configuration
249  */
250 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
251 
252 /*
253  * Avoids to flush all the RX streams before starts forwarding.
254  */
255 uint8_t no_flush_rx = 0; /* flush by default */
256 
257 /*
258  * Avoids to check link status when starting/stopping a port.
259  */
260 uint8_t no_link_check = 0; /* check by default */
261 
262 /*
263  * NIC bypass mode configuration options.
264  */
265 #ifdef RTE_NIC_BYPASS
266 
267 /* The NIC bypass watchdog timeout. */
268 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
269 
270 #endif
271 
272 /*
273  * Ethernet device configuration.
274  */
275 struct rte_eth_rxmode rx_mode = {
276 	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
277 	.split_hdr_size = 0,
278 	.header_split   = 0, /**< Header Split disabled. */
279 	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
280 	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
281 	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
282 	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
283 	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
284 	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
285 };
286 
287 struct rte_fdir_conf fdir_conf = {
288 	.mode = RTE_FDIR_MODE_NONE,
289 	.pballoc = RTE_FDIR_PBALLOC_64K,
290 	.status = RTE_FDIR_REPORT_STATUS,
291 	.mask = {
292 		.vlan_tci_mask = 0x0,
293 		.ipv4_mask     = {
294 			.src_ip = 0xFFFFFFFF,
295 			.dst_ip = 0xFFFFFFFF,
296 		},
297 		.ipv6_mask     = {
298 			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
299 			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
300 		},
301 		.src_port_mask = 0xFFFF,
302 		.dst_port_mask = 0xFFFF,
303 		.mac_addr_byte_mask = 0xFF,
304 		.tunnel_type_mask = 1,
305 		.tunnel_id_mask = 0xFFFFFFFF,
306 	},
307 	.drop_queue = 127,
308 };
309 
310 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
311 
312 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
313 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
314 
315 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
316 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
317 
318 uint16_t nb_tx_queue_stats_mappings = 0;
319 uint16_t nb_rx_queue_stats_mappings = 0;
320 
321 unsigned max_socket = 0;
322 
323 /* Forward function declarations */
324 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
325 static void check_all_ports_link_status(uint32_t port_mask);
326 
327 /*
328  * Check if all the ports are started.
329  * If yes, return positive value. If not, return zero.
330  */
331 static int all_ports_started(void);
332 
333 /*
334  * Find next enabled port
335  */
336 portid_t
337 find_next_port(portid_t p, struct rte_port *ports, int size)
338 {
339 	if (ports == NULL)
340 		rte_exit(-EINVAL, "failed to find a next port id\n");
341 
342 	while ((p < size) && (ports[p].enabled == 0))
343 		p++;
344 	return p;
345 }
346 
347 /*
348  * Setup default configuration.
349  */
350 static void
351 set_default_fwd_lcores_config(void)
352 {
353 	unsigned int i;
354 	unsigned int nb_lc;
355 	unsigned int sock_num;
356 
357 	nb_lc = 0;
358 	for (i = 0; i < RTE_MAX_LCORE; i++) {
359 		sock_num = rte_lcore_to_socket_id(i) + 1;
360 		if (sock_num > max_socket) {
361 			if (sock_num > RTE_MAX_NUMA_NODES)
362 				rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
363 			max_socket = sock_num;
364 		}
365 		if (!rte_lcore_is_enabled(i))
366 			continue;
367 		if (i == rte_get_master_lcore())
368 			continue;
369 		fwd_lcores_cpuids[nb_lc++] = i;
370 	}
371 	nb_lcores = (lcoreid_t) nb_lc;
372 	nb_cfg_lcores = nb_lcores;
373 	nb_fwd_lcores = 1;
374 }
375 
376 static void
377 set_def_peer_eth_addrs(void)
378 {
379 	portid_t i;
380 
381 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
382 		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
383 		peer_eth_addrs[i].addr_bytes[5] = i;
384 	}
385 }
386 
387 static void
388 set_default_fwd_ports_config(void)
389 {
390 	portid_t pt_id;
391 
392 	for (pt_id = 0; pt_id < nb_ports; pt_id++)
393 		fwd_ports_ids[pt_id] = pt_id;
394 
395 	nb_cfg_ports = nb_ports;
396 	nb_fwd_ports = nb_ports;
397 }
398 
399 void
400 set_def_fwd_config(void)
401 {
402 	set_default_fwd_lcores_config();
403 	set_def_peer_eth_addrs();
404 	set_default_fwd_ports_config();
405 }
406 
407 /*
408  * Configuration initialisation done once at init time.
409  */
410 static void
411 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
412 		 unsigned int socket_id)
413 {
414 	char pool_name[RTE_MEMPOOL_NAMESIZE];
415 	struct rte_mempool *rte_mp = NULL;
416 	uint32_t mb_size;
417 
418 	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
419 	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
420 
421 	RTE_LOG(INFO, USER1,
422 		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
423 		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
424 
425 #ifdef RTE_LIBRTE_PMD_XENVIRT
426 	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
427 		(unsigned) mb_mempool_cache,
428 		sizeof(struct rte_pktmbuf_pool_private),
429 		rte_pktmbuf_pool_init, NULL,
430 		rte_pktmbuf_init, NULL,
431 		socket_id, 0);
432 #endif
433 
434 	/* if the former XEN allocation failed fall back to normal allocation */
435 	if (rte_mp == NULL) {
436 		if (mp_anon != 0) {
437 			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
438 				mb_size, (unsigned) mb_mempool_cache,
439 				sizeof(struct rte_pktmbuf_pool_private),
440 				socket_id, 0);
441 
442 			if (rte_mempool_populate_anon(rte_mp) == 0) {
443 				rte_mempool_free(rte_mp);
444 				rte_mp = NULL;
445 			}
446 			rte_pktmbuf_pool_init(rte_mp, NULL);
447 			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
448 		} else {
449 			/* wrapper to rte_mempool_create() */
450 			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
451 				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
452 		}
453 	}
454 
455 	if (rte_mp == NULL) {
456 		rte_exit(EXIT_FAILURE,
457 			"Creation of mbuf pool for socket %u failed: %s\n",
458 			socket_id, rte_strerror(rte_errno));
459 	} else if (verbose_level > 0) {
460 		rte_mempool_dump(stdout, rte_mp);
461 	}
462 }
463 
464 /*
465  * Check given socket id is valid or not with NUMA mode,
466  * if valid, return 0, else return -1
467  */
468 static int
469 check_socket_id(const unsigned int socket_id)
470 {
471 	static int warning_once = 0;
472 
473 	if (socket_id >= max_socket) {
474 		if (!warning_once && numa_support)
475 			printf("Warning: NUMA should be configured manually by"
476 			       " using --port-numa-config and"
477 			       " --ring-numa-config parameters along with"
478 			       " --numa.\n");
479 		warning_once = 1;
480 		return -1;
481 	}
482 	return 0;
483 }
484 
485 static void
486 init_config(void)
487 {
488 	portid_t pid;
489 	struct rte_port *port;
490 	struct rte_mempool *mbp;
491 	unsigned int nb_mbuf_per_pool;
492 	lcoreid_t  lc_id;
493 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
494 
495 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
496 	/* Configuration of logical cores. */
497 	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
498 				sizeof(struct fwd_lcore *) * nb_lcores,
499 				RTE_CACHE_LINE_SIZE);
500 	if (fwd_lcores == NULL) {
501 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
502 							"failed\n", nb_lcores);
503 	}
504 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
505 		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
506 					       sizeof(struct fwd_lcore),
507 					       RTE_CACHE_LINE_SIZE);
508 		if (fwd_lcores[lc_id] == NULL) {
509 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
510 								"failed\n");
511 		}
512 		fwd_lcores[lc_id]->cpuid_idx = lc_id;
513 	}
514 
515 	/*
516 	 * Create pools of mbuf.
517 	 * If NUMA support is disabled, create a single pool of mbuf in
518 	 * socket 0 memory by default.
519 	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
520 	 *
521 	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
522 	 * nb_txd can be configured at run time.
523 	 */
524 	if (param_total_num_mbufs)
525 		nb_mbuf_per_pool = param_total_num_mbufs;
526 	else {
527 		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
528 				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
529 
530 		if (!numa_support)
531 			nb_mbuf_per_pool =
532 				(nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
533 	}
534 
535 	if (!numa_support) {
536 		if (socket_num == UMA_NO_CONFIG)
537 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
538 		else
539 			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
540 						 socket_num);
541 	}
542 
543 	FOREACH_PORT(pid, ports) {
544 		port = &ports[pid];
545 		rte_eth_dev_info_get(pid, &port->dev_info);
546 
547 		if (numa_support) {
548 			if (port_numa[pid] != NUMA_NO_CONFIG)
549 				port_per_socket[port_numa[pid]]++;
550 			else {
551 				uint32_t socket_id = rte_eth_dev_socket_id(pid);
552 
553 				/* if socket_id is invalid, set to 0 */
554 				if (check_socket_id(socket_id) < 0)
555 					socket_id = 0;
556 				port_per_socket[socket_id]++;
557 			}
558 		}
559 
560 		/* set flag to initialize port/queue */
561 		port->need_reconfig = 1;
562 		port->need_reconfig_queues = 1;
563 	}
564 
565 	if (numa_support) {
566 		uint8_t i;
567 		unsigned int nb_mbuf;
568 
569 		if (param_total_num_mbufs)
570 			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
571 
572 		for (i = 0; i < max_socket; i++) {
573 			nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
574 			if (nb_mbuf)
575 				mbuf_pool_create(mbuf_data_size,
576 						nb_mbuf,i);
577 		}
578 	}
579 	init_port_config();
580 
581 	/*
582 	 * Records which Mbuf pool to use by each logical core, if needed.
583 	 */
584 	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
585 		mbp = mbuf_pool_find(
586 			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
587 
588 		if (mbp == NULL)
589 			mbp = mbuf_pool_find(0);
590 		fwd_lcores[lc_id]->mbp = mbp;
591 	}
592 
593 	/* Configuration of packet forwarding streams. */
594 	if (init_fwd_streams() < 0)
595 		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
596 
597 	fwd_config_setup();
598 }
599 
600 
601 void
602 reconfig(portid_t new_port_id, unsigned socket_id)
603 {
604 	struct rte_port *port;
605 
606 	/* Reconfiguration of Ethernet ports. */
607 	port = &ports[new_port_id];
608 	rte_eth_dev_info_get(new_port_id, &port->dev_info);
609 
610 	/* set flag to initialize port/queue */
611 	port->need_reconfig = 1;
612 	port->need_reconfig_queues = 1;
613 	port->socket_id = socket_id;
614 
615 	init_port_config();
616 }
617 
618 
619 int
620 init_fwd_streams(void)
621 {
622 	portid_t pid;
623 	struct rte_port *port;
624 	streamid_t sm_id, nb_fwd_streams_new;
625 	queueid_t q;
626 
627 	/* set socket id according to numa or not */
628 	FOREACH_PORT(pid, ports) {
629 		port = &ports[pid];
630 		if (nb_rxq > port->dev_info.max_rx_queues) {
631 			printf("Fail: nb_rxq(%d) is greater than "
632 				"max_rx_queues(%d)\n", nb_rxq,
633 				port->dev_info.max_rx_queues);
634 			return -1;
635 		}
636 		if (nb_txq > port->dev_info.max_tx_queues) {
637 			printf("Fail: nb_txq(%d) is greater than "
638 				"max_tx_queues(%d)\n", nb_txq,
639 				port->dev_info.max_tx_queues);
640 			return -1;
641 		}
642 		if (numa_support) {
643 			if (port_numa[pid] != NUMA_NO_CONFIG)
644 				port->socket_id = port_numa[pid];
645 			else {
646 				port->socket_id = rte_eth_dev_socket_id(pid);
647 
648 				/* if socket_id is invalid, set to 0 */
649 				if (check_socket_id(port->socket_id) < 0)
650 					port->socket_id = 0;
651 			}
652 		}
653 		else {
654 			if (socket_num == UMA_NO_CONFIG)
655 				port->socket_id = 0;
656 			else
657 				port->socket_id = socket_num;
658 		}
659 	}
660 
661 	q = RTE_MAX(nb_rxq, nb_txq);
662 	if (q == 0) {
663 		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
664 		return -1;
665 	}
666 	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
667 	if (nb_fwd_streams_new == nb_fwd_streams)
668 		return 0;
669 	/* clear the old */
670 	if (fwd_streams != NULL) {
671 		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
672 			if (fwd_streams[sm_id] == NULL)
673 				continue;
674 			rte_free(fwd_streams[sm_id]);
675 			fwd_streams[sm_id] = NULL;
676 		}
677 		rte_free(fwd_streams);
678 		fwd_streams = NULL;
679 	}
680 
681 	/* init new */
682 	nb_fwd_streams = nb_fwd_streams_new;
683 	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
684 		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
685 	if (fwd_streams == NULL)
686 		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
687 						"failed\n", nb_fwd_streams);
688 
689 	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
690 		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
691 				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
692 		if (fwd_streams[sm_id] == NULL)
693 			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
694 								" failed\n");
695 	}
696 
697 	return 0;
698 }
699 
700 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
701 static void
702 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
703 {
704 	unsigned int total_burst;
705 	unsigned int nb_burst;
706 	unsigned int burst_stats[3];
707 	uint16_t pktnb_stats[3];
708 	uint16_t nb_pkt;
709 	int burst_percent[3];
710 
711 	/*
712 	 * First compute the total number of packet bursts and the
713 	 * two highest numbers of bursts of the same number of packets.
714 	 */
715 	total_burst = 0;
716 	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
717 	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
718 	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
719 		nb_burst = pbs->pkt_burst_spread[nb_pkt];
720 		if (nb_burst == 0)
721 			continue;
722 		total_burst += nb_burst;
723 		if (nb_burst > burst_stats[0]) {
724 			burst_stats[1] = burst_stats[0];
725 			pktnb_stats[1] = pktnb_stats[0];
726 			burst_stats[0] = nb_burst;
727 			pktnb_stats[0] = nb_pkt;
728 		}
729 	}
730 	if (total_burst == 0)
731 		return;
732 	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
733 	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
734 	       burst_percent[0], (int) pktnb_stats[0]);
735 	if (burst_stats[0] == total_burst) {
736 		printf("]\n");
737 		return;
738 	}
739 	if (burst_stats[0] + burst_stats[1] == total_burst) {
740 		printf(" + %d%% of %d pkts]\n",
741 		       100 - burst_percent[0], pktnb_stats[1]);
742 		return;
743 	}
744 	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
745 	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
746 	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
747 		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
748 		return;
749 	}
750 	printf(" + %d%% of %d pkts + %d%% of others]\n",
751 	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
752 }
753 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
754 
755 static void
756 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
757 {
758 	struct rte_port *port;
759 	uint8_t i;
760 
761 	static const char *fwd_stats_border = "----------------------";
762 
763 	port = &ports[port_id];
764 	printf("\n  %s Forward statistics for port %-2d %s\n",
765 	       fwd_stats_border, port_id, fwd_stats_border);
766 
767 	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
768 		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
769 		       "%-"PRIu64"\n",
770 		       stats->ipackets, stats->imissed,
771 		       (uint64_t) (stats->ipackets + stats->imissed));
772 
773 		if (cur_fwd_eng == &csum_fwd_engine)
774 			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
775 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
776 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
777 			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
778 			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
779 		}
780 
781 		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
782 		       "%-"PRIu64"\n",
783 		       stats->opackets, port->tx_dropped,
784 		       (uint64_t) (stats->opackets + port->tx_dropped));
785 	}
786 	else {
787 		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
788 		       "%14"PRIu64"\n",
789 		       stats->ipackets, stats->imissed,
790 		       (uint64_t) (stats->ipackets + stats->imissed));
791 
792 		if (cur_fwd_eng == &csum_fwd_engine)
793 			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
794 			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
795 		if ((stats->ierrors + stats->rx_nombuf) > 0) {
796 			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
797 			printf("  RX-nombufs:             %14"PRIu64"\n",
798 			       stats->rx_nombuf);
799 		}
800 
801 		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
802 		       "%14"PRIu64"\n",
803 		       stats->opackets, port->tx_dropped,
804 		       (uint64_t) (stats->opackets + port->tx_dropped));
805 	}
806 
807 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
808 	if (port->rx_stream)
809 		pkt_burst_stats_display("RX",
810 			&port->rx_stream->rx_burst_stats);
811 	if (port->tx_stream)
812 		pkt_burst_stats_display("TX",
813 			&port->tx_stream->tx_burst_stats);
814 #endif
815 
816 	if (port->rx_queue_stats_mapping_enabled) {
817 		printf("\n");
818 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
819 			printf("  Stats reg %2d RX-packets:%14"PRIu64
820 			       "     RX-errors:%14"PRIu64
821 			       "    RX-bytes:%14"PRIu64"\n",
822 			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
823 		}
824 		printf("\n");
825 	}
826 	if (port->tx_queue_stats_mapping_enabled) {
827 		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
828 			printf("  Stats reg %2d TX-packets:%14"PRIu64
829 			       "                                 TX-bytes:%14"PRIu64"\n",
830 			       i, stats->q_opackets[i], stats->q_obytes[i]);
831 		}
832 	}
833 
834 	printf("  %s--------------------------------%s\n",
835 	       fwd_stats_border, fwd_stats_border);
836 }
837 
838 static void
839 fwd_stream_stats_display(streamid_t stream_id)
840 {
841 	struct fwd_stream *fs;
842 	static const char *fwd_top_stats_border = "-------";
843 
844 	fs = fwd_streams[stream_id];
845 	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
846 	    (fs->fwd_dropped == 0))
847 		return;
848 	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
849 	       "TX Port=%2d/Queue=%2d %s\n",
850 	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
851 	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
852 	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
853 	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
854 
855 	/* if checksum mode */
856 	if (cur_fwd_eng == &csum_fwd_engine) {
857 	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
858 			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
859 	}
860 
861 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
862 	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
863 	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
864 #endif
865 }
866 
867 static void
868 flush_fwd_rx_queues(void)
869 {
870 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
871 	portid_t  rxp;
872 	portid_t port_id;
873 	queueid_t rxq;
874 	uint16_t  nb_rx;
875 	uint16_t  i;
876 	uint8_t   j;
877 
878 	for (j = 0; j < 2; j++) {
879 		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
880 			for (rxq = 0; rxq < nb_rxq; rxq++) {
881 				port_id = fwd_ports_ids[rxp];
882 				do {
883 					nb_rx = rte_eth_rx_burst(port_id, rxq,
884 						pkts_burst, MAX_PKT_BURST);
885 					for (i = 0; i < nb_rx; i++)
886 						rte_pktmbuf_free(pkts_burst[i]);
887 				} while (nb_rx > 0);
888 			}
889 		}
890 		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
891 	}
892 }
893 
894 static void
895 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
896 {
897 	struct fwd_stream **fsm;
898 	streamid_t nb_fs;
899 	streamid_t sm_id;
900 
901 	fsm = &fwd_streams[fc->stream_idx];
902 	nb_fs = fc->stream_nb;
903 	do {
904 		for (sm_id = 0; sm_id < nb_fs; sm_id++)
905 			(*pkt_fwd)(fsm[sm_id]);
906 	} while (! fc->stopped);
907 }
908 
909 static int
910 start_pkt_forward_on_core(void *fwd_arg)
911 {
912 	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
913 			     cur_fwd_config.fwd_eng->packet_fwd);
914 	return 0;
915 }
916 
917 /*
918  * Run the TXONLY packet forwarding engine to send a single burst of packets.
919  * Used to start communication flows in network loopback test configurations.
920  */
921 static int
922 run_one_txonly_burst_on_core(void *fwd_arg)
923 {
924 	struct fwd_lcore *fwd_lc;
925 	struct fwd_lcore tmp_lcore;
926 
927 	fwd_lc = (struct fwd_lcore *) fwd_arg;
928 	tmp_lcore = *fwd_lc;
929 	tmp_lcore.stopped = 1;
930 	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
931 	return 0;
932 }
933 
934 /*
935  * Launch packet forwarding:
936  *     - Setup per-port forwarding context.
937  *     - launch logical cores with their forwarding configuration.
938  */
939 static void
940 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
941 {
942 	port_fwd_begin_t port_fwd_begin;
943 	unsigned int i;
944 	unsigned int lc_id;
945 	int diag;
946 
947 	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
948 	if (port_fwd_begin != NULL) {
949 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
950 			(*port_fwd_begin)(fwd_ports_ids[i]);
951 	}
952 	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
953 		lc_id = fwd_lcores_cpuids[i];
954 		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
955 			fwd_lcores[i]->stopped = 0;
956 			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
957 						     fwd_lcores[i], lc_id);
958 			if (diag != 0)
959 				printf("launch lcore %u failed - diag=%d\n",
960 				       lc_id, diag);
961 		}
962 	}
963 }
964 
965 /*
966  * Launch packet forwarding configuration.
967  */
968 void
969 start_packet_forwarding(int with_tx_first)
970 {
971 	port_fwd_begin_t port_fwd_begin;
972 	port_fwd_end_t  port_fwd_end;
973 	struct rte_port *port;
974 	unsigned int i;
975 	portid_t   pt_id;
976 	streamid_t sm_id;
977 
978 	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
979 		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
980 
981 	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
982 		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
983 
984 	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
985 		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
986 		(!nb_rxq || !nb_txq))
987 		rte_exit(EXIT_FAILURE,
988 			"Either rxq or txq are 0, cannot use %s fwd mode\n",
989 			cur_fwd_eng->fwd_mode_name);
990 
991 	if (all_ports_started() == 0) {
992 		printf("Not all ports were started\n");
993 		return;
994 	}
995 	if (test_done == 0) {
996 		printf("Packet forwarding already started\n");
997 		return;
998 	}
999 
1000 	if (init_fwd_streams() < 0) {
1001 		printf("Fail from init_fwd_streams()\n");
1002 		return;
1003 	}
1004 
1005 	if(dcb_test) {
1006 		for (i = 0; i < nb_fwd_ports; i++) {
1007 			pt_id = fwd_ports_ids[i];
1008 			port = &ports[pt_id];
1009 			if (!port->dcb_flag) {
1010 				printf("In DCB mode, all forwarding ports must "
1011                                        "be configured in this mode.\n");
1012 				return;
1013 			}
1014 		}
1015 		if (nb_fwd_lcores == 1) {
1016 			printf("In DCB mode,the nb forwarding cores "
1017                                "should be larger than 1.\n");
1018 			return;
1019 		}
1020 	}
1021 	test_done = 0;
1022 
1023 	if(!no_flush_rx)
1024 		flush_fwd_rx_queues();
1025 
1026 	fwd_config_setup();
1027 	rxtx_config_display();
1028 
1029 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1030 		pt_id = fwd_ports_ids[i];
1031 		port = &ports[pt_id];
1032 		rte_eth_stats_get(pt_id, &port->stats);
1033 		port->tx_dropped = 0;
1034 
1035 		map_port_queue_stats_mapping_registers(pt_id, port);
1036 	}
1037 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1038 		fwd_streams[sm_id]->rx_packets = 0;
1039 		fwd_streams[sm_id]->tx_packets = 0;
1040 		fwd_streams[sm_id]->fwd_dropped = 0;
1041 		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1042 		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1043 
1044 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1045 		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1046 		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1047 		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1048 		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1049 #endif
1050 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1051 		fwd_streams[sm_id]->core_cycles = 0;
1052 #endif
1053 	}
1054 	if (with_tx_first) {
1055 		port_fwd_begin = tx_only_engine.port_fwd_begin;
1056 		if (port_fwd_begin != NULL) {
1057 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1058 				(*port_fwd_begin)(fwd_ports_ids[i]);
1059 		}
1060 		launch_packet_forwarding(run_one_txonly_burst_on_core);
1061 		rte_eal_mp_wait_lcore();
1062 		port_fwd_end = tx_only_engine.port_fwd_end;
1063 		if (port_fwd_end != NULL) {
1064 			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1065 				(*port_fwd_end)(fwd_ports_ids[i]);
1066 		}
1067 	}
1068 	launch_packet_forwarding(start_pkt_forward_on_core);
1069 }
1070 
1071 void
1072 stop_packet_forwarding(void)
1073 {
1074 	struct rte_eth_stats stats;
1075 	struct rte_port *port;
1076 	port_fwd_end_t  port_fwd_end;
1077 	int i;
1078 	portid_t   pt_id;
1079 	streamid_t sm_id;
1080 	lcoreid_t  lc_id;
1081 	uint64_t total_recv;
1082 	uint64_t total_xmit;
1083 	uint64_t total_rx_dropped;
1084 	uint64_t total_tx_dropped;
1085 	uint64_t total_rx_nombuf;
1086 	uint64_t tx_dropped;
1087 	uint64_t rx_bad_ip_csum;
1088 	uint64_t rx_bad_l4_csum;
1089 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1090 	uint64_t fwd_cycles;
1091 #endif
1092 	static const char *acc_stats_border = "+++++++++++++++";
1093 
1094 	if (test_done) {
1095 		printf("Packet forwarding not started\n");
1096 		return;
1097 	}
1098 	printf("Telling cores to stop...");
1099 	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1100 		fwd_lcores[lc_id]->stopped = 1;
1101 	printf("\nWaiting for lcores to finish...\n");
1102 	rte_eal_mp_wait_lcore();
1103 	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1104 	if (port_fwd_end != NULL) {
1105 		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1106 			pt_id = fwd_ports_ids[i];
1107 			(*port_fwd_end)(pt_id);
1108 		}
1109 	}
1110 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1111 	fwd_cycles = 0;
1112 #endif
1113 	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1114 		if (cur_fwd_config.nb_fwd_streams >
1115 		    cur_fwd_config.nb_fwd_ports) {
1116 			fwd_stream_stats_display(sm_id);
1117 			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1118 			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1119 		} else {
1120 			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1121 				fwd_streams[sm_id];
1122 			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1123 				fwd_streams[sm_id];
1124 		}
1125 		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1126 		tx_dropped = (uint64_t) (tx_dropped +
1127 					 fwd_streams[sm_id]->fwd_dropped);
1128 		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1129 
1130 		rx_bad_ip_csum =
1131 			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1132 		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1133 					 fwd_streams[sm_id]->rx_bad_ip_csum);
1134 		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1135 							rx_bad_ip_csum;
1136 
1137 		rx_bad_l4_csum =
1138 			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1139 		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1140 					 fwd_streams[sm_id]->rx_bad_l4_csum);
1141 		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1142 							rx_bad_l4_csum;
1143 
1144 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1145 		fwd_cycles = (uint64_t) (fwd_cycles +
1146 					 fwd_streams[sm_id]->core_cycles);
1147 #endif
1148 	}
1149 	total_recv = 0;
1150 	total_xmit = 0;
1151 	total_rx_dropped = 0;
1152 	total_tx_dropped = 0;
1153 	total_rx_nombuf  = 0;
1154 	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1155 		pt_id = fwd_ports_ids[i];
1156 
1157 		port = &ports[pt_id];
1158 		rte_eth_stats_get(pt_id, &stats);
1159 		stats.ipackets -= port->stats.ipackets;
1160 		port->stats.ipackets = 0;
1161 		stats.opackets -= port->stats.opackets;
1162 		port->stats.opackets = 0;
1163 		stats.ibytes   -= port->stats.ibytes;
1164 		port->stats.ibytes = 0;
1165 		stats.obytes   -= port->stats.obytes;
1166 		port->stats.obytes = 0;
1167 		stats.imissed  -= port->stats.imissed;
1168 		port->stats.imissed = 0;
1169 		stats.oerrors  -= port->stats.oerrors;
1170 		port->stats.oerrors = 0;
1171 		stats.rx_nombuf -= port->stats.rx_nombuf;
1172 		port->stats.rx_nombuf = 0;
1173 
1174 		total_recv += stats.ipackets;
1175 		total_xmit += stats.opackets;
1176 		total_rx_dropped += stats.imissed;
1177 		total_tx_dropped += port->tx_dropped;
1178 		total_rx_nombuf  += stats.rx_nombuf;
1179 
1180 		fwd_port_stats_display(pt_id, &stats);
1181 	}
1182 	printf("\n  %s Accumulated forward statistics for all ports"
1183 	       "%s\n",
1184 	       acc_stats_border, acc_stats_border);
1185 	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1186 	       "%-"PRIu64"\n"
1187 	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1188 	       "%-"PRIu64"\n",
1189 	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1190 	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1191 	if (total_rx_nombuf > 0)
1192 		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1193 	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1194 	       "%s\n",
1195 	       acc_stats_border, acc_stats_border);
1196 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1197 	if (total_recv > 0)
1198 		printf("\n  CPU cycles/packet=%u (total cycles="
1199 		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1200 		       (unsigned int)(fwd_cycles / total_recv),
1201 		       fwd_cycles, total_recv);
1202 #endif
1203 	printf("\nDone.\n");
1204 	test_done = 1;
1205 }
1206 
1207 void
1208 dev_set_link_up(portid_t pid)
1209 {
1210 	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1211 		printf("\nSet link up fail.\n");
1212 }
1213 
1214 void
1215 dev_set_link_down(portid_t pid)
1216 {
1217 	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1218 		printf("\nSet link down fail.\n");
1219 }
1220 
1221 static int
1222 all_ports_started(void)
1223 {
1224 	portid_t pi;
1225 	struct rte_port *port;
1226 
1227 	FOREACH_PORT(pi, ports) {
1228 		port = &ports[pi];
1229 		/* Check if there is a port which is not started */
1230 		if ((port->port_status != RTE_PORT_STARTED) &&
1231 			(port->slave_flag == 0))
1232 			return 0;
1233 	}
1234 
1235 	/* No port is not started */
1236 	return 1;
1237 }
1238 
1239 int
1240 all_ports_stopped(void)
1241 {
1242 	portid_t pi;
1243 	struct rte_port *port;
1244 
1245 	FOREACH_PORT(pi, ports) {
1246 		port = &ports[pi];
1247 		if ((port->port_status != RTE_PORT_STOPPED) &&
1248 			(port->slave_flag == 0))
1249 			return 0;
1250 	}
1251 
1252 	return 1;
1253 }
1254 
1255 int
1256 port_is_started(portid_t port_id)
1257 {
1258 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1259 		return 0;
1260 
1261 	if (ports[port_id].port_status != RTE_PORT_STARTED)
1262 		return 0;
1263 
1264 	return 1;
1265 }
1266 
1267 static int
1268 port_is_closed(portid_t port_id)
1269 {
1270 	if (port_id_is_invalid(port_id, ENABLED_WARN))
1271 		return 0;
1272 
1273 	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1274 		return 0;
1275 
1276 	return 1;
1277 }
1278 
1279 int
1280 start_port(portid_t pid)
1281 {
1282 	int diag, need_check_link_status = -1;
1283 	portid_t pi;
1284 	queueid_t qi;
1285 	struct rte_port *port;
1286 	struct ether_addr mac_addr;
1287 
1288 	if (port_id_is_invalid(pid, ENABLED_WARN))
1289 		return 0;
1290 
1291 	if(dcb_config)
1292 		dcb_test = 1;
1293 	FOREACH_PORT(pi, ports) {
1294 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1295 			continue;
1296 
1297 		need_check_link_status = 0;
1298 		port = &ports[pi];
1299 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1300 						 RTE_PORT_HANDLING) == 0) {
1301 			printf("Port %d is now not stopped\n", pi);
1302 			continue;
1303 		}
1304 
1305 		if (port->need_reconfig > 0) {
1306 			port->need_reconfig = 0;
1307 
1308 			printf("Configuring Port %d (socket %u)\n", pi,
1309 					port->socket_id);
1310 			/* configure port */
1311 			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1312 						&(port->dev_conf));
1313 			if (diag != 0) {
1314 				if (rte_atomic16_cmpset(&(port->port_status),
1315 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1316 					printf("Port %d can not be set back "
1317 							"to stopped\n", pi);
1318 				printf("Fail to configure port %d\n", pi);
1319 				/* try to reconfigure port next time */
1320 				port->need_reconfig = 1;
1321 				return -1;
1322 			}
1323 		}
1324 		if (port->need_reconfig_queues > 0) {
1325 			port->need_reconfig_queues = 0;
1326 			/* setup tx queues */
1327 			for (qi = 0; qi < nb_txq; qi++) {
1328 				if ((numa_support) &&
1329 					(txring_numa[pi] != NUMA_NO_CONFIG))
1330 					diag = rte_eth_tx_queue_setup(pi, qi,
1331 						nb_txd,txring_numa[pi],
1332 						&(port->tx_conf));
1333 				else
1334 					diag = rte_eth_tx_queue_setup(pi, qi,
1335 						nb_txd,port->socket_id,
1336 						&(port->tx_conf));
1337 
1338 				if (diag == 0)
1339 					continue;
1340 
1341 				/* Fail to setup tx queue, return */
1342 				if (rte_atomic16_cmpset(&(port->port_status),
1343 							RTE_PORT_HANDLING,
1344 							RTE_PORT_STOPPED) == 0)
1345 					printf("Port %d can not be set back "
1346 							"to stopped\n", pi);
1347 				printf("Fail to configure port %d tx queues\n", pi);
1348 				/* try to reconfigure queues next time */
1349 				port->need_reconfig_queues = 1;
1350 				return -1;
1351 			}
1352 			/* setup rx queues */
1353 			for (qi = 0; qi < nb_rxq; qi++) {
1354 				if ((numa_support) &&
1355 					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1356 					struct rte_mempool * mp =
1357 						mbuf_pool_find(rxring_numa[pi]);
1358 					if (mp == NULL) {
1359 						printf("Failed to setup RX queue:"
1360 							"No mempool allocation"
1361 							" on the socket %d\n",
1362 							rxring_numa[pi]);
1363 						return -1;
1364 					}
1365 
1366 					diag = rte_eth_rx_queue_setup(pi, qi,
1367 					     nb_rxd,rxring_numa[pi],
1368 					     &(port->rx_conf),mp);
1369 				} else {
1370 					struct rte_mempool *mp =
1371 						mbuf_pool_find(port->socket_id);
1372 					if (mp == NULL) {
1373 						printf("Failed to setup RX queue:"
1374 							"No mempool allocation"
1375 							" on the socket %d\n",
1376 							port->socket_id);
1377 						return -1;
1378 					}
1379 					diag = rte_eth_rx_queue_setup(pi, qi,
1380 					     nb_rxd,port->socket_id,
1381 					     &(port->rx_conf), mp);
1382 				}
1383 				if (diag == 0)
1384 					continue;
1385 
1386 				/* Fail to setup rx queue, return */
1387 				if (rte_atomic16_cmpset(&(port->port_status),
1388 							RTE_PORT_HANDLING,
1389 							RTE_PORT_STOPPED) == 0)
1390 					printf("Port %d can not be set back "
1391 							"to stopped\n", pi);
1392 				printf("Fail to configure port %d rx queues\n", pi);
1393 				/* try to reconfigure queues next time */
1394 				port->need_reconfig_queues = 1;
1395 				return -1;
1396 			}
1397 		}
1398 		/* start port */
1399 		if (rte_eth_dev_start(pi) < 0) {
1400 			printf("Fail to start port %d\n", pi);
1401 
1402 			/* Fail to setup rx queue, return */
1403 			if (rte_atomic16_cmpset(&(port->port_status),
1404 				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1405 				printf("Port %d can not be set back to "
1406 							"stopped\n", pi);
1407 			continue;
1408 		}
1409 
1410 		if (rte_atomic16_cmpset(&(port->port_status),
1411 			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1412 			printf("Port %d can not be set into started\n", pi);
1413 
1414 		rte_eth_macaddr_get(pi, &mac_addr);
1415 		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1416 				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1417 				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1418 				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1419 
1420 		/* at least one port started, need checking link status */
1421 		need_check_link_status = 1;
1422 	}
1423 
1424 	if (need_check_link_status == 1 && !no_link_check)
1425 		check_all_ports_link_status(RTE_PORT_ALL);
1426 	else if (need_check_link_status == 0)
1427 		printf("Please stop the ports first\n");
1428 
1429 	printf("Done\n");
1430 	return 0;
1431 }
1432 
1433 void
1434 stop_port(portid_t pid)
1435 {
1436 	portid_t pi;
1437 	struct rte_port *port;
1438 	int need_check_link_status = 0;
1439 
1440 	if (dcb_test) {
1441 		dcb_test = 0;
1442 		dcb_config = 0;
1443 	}
1444 
1445 	if (port_id_is_invalid(pid, ENABLED_WARN))
1446 		return;
1447 
1448 	printf("Stopping ports...\n");
1449 
1450 	FOREACH_PORT(pi, ports) {
1451 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1452 			continue;
1453 
1454 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1455 			printf("Please remove port %d from forwarding configuration.\n", pi);
1456 			continue;
1457 		}
1458 
1459 		if (port_is_bonding_slave(pi)) {
1460 			printf("Please remove port %d from bonded device.\n", pi);
1461 			continue;
1462 		}
1463 
1464 		port = &ports[pi];
1465 		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1466 						RTE_PORT_HANDLING) == 0)
1467 			continue;
1468 
1469 		rte_eth_dev_stop(pi);
1470 
1471 		if (rte_atomic16_cmpset(&(port->port_status),
1472 			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1473 			printf("Port %d can not be set into stopped\n", pi);
1474 		need_check_link_status = 1;
1475 	}
1476 	if (need_check_link_status && !no_link_check)
1477 		check_all_ports_link_status(RTE_PORT_ALL);
1478 
1479 	printf("Done\n");
1480 }
1481 
1482 void
1483 close_port(portid_t pid)
1484 {
1485 	portid_t pi;
1486 	struct rte_port *port;
1487 
1488 	if (port_id_is_invalid(pid, ENABLED_WARN))
1489 		return;
1490 
1491 	printf("Closing ports...\n");
1492 
1493 	FOREACH_PORT(pi, ports) {
1494 		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1495 			continue;
1496 
1497 		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1498 			printf("Please remove port %d from forwarding configuration.\n", pi);
1499 			continue;
1500 		}
1501 
1502 		if (port_is_bonding_slave(pi)) {
1503 			printf("Please remove port %d from bonded device.\n", pi);
1504 			continue;
1505 		}
1506 
1507 		port = &ports[pi];
1508 		if (rte_atomic16_cmpset(&(port->port_status),
1509 			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1510 			printf("Port %d is already closed\n", pi);
1511 			continue;
1512 		}
1513 
1514 		if (rte_atomic16_cmpset(&(port->port_status),
1515 			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1516 			printf("Port %d is now not stopped\n", pi);
1517 			continue;
1518 		}
1519 
1520 		rte_eth_dev_close(pi);
1521 
1522 		if (rte_atomic16_cmpset(&(port->port_status),
1523 			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1524 			printf("Port %d cannot be set to closed\n", pi);
1525 	}
1526 
1527 	printf("Done\n");
1528 }
1529 
1530 void
1531 attach_port(char *identifier)
1532 {
1533 	portid_t pi = 0;
1534 	unsigned int socket_id;
1535 
1536 	printf("Attaching a new port...\n");
1537 
1538 	if (identifier == NULL) {
1539 		printf("Invalid parameters are specified\n");
1540 		return;
1541 	}
1542 
1543 	if (rte_eth_dev_attach(identifier, &pi))
1544 		return;
1545 
1546 	ports[pi].enabled = 1;
1547 	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1548 	/* if socket_id is invalid, set to 0 */
1549 	if (check_socket_id(socket_id) < 0)
1550 		socket_id = 0;
1551 	reconfig(pi, socket_id);
1552 	rte_eth_promiscuous_enable(pi);
1553 
1554 	nb_ports = rte_eth_dev_count();
1555 
1556 	ports[pi].port_status = RTE_PORT_STOPPED;
1557 
1558 	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1559 	printf("Done\n");
1560 }
1561 
1562 void
1563 detach_port(uint8_t port_id)
1564 {
1565 	char name[RTE_ETH_NAME_MAX_LEN];
1566 
1567 	printf("Detaching a port...\n");
1568 
1569 	if (!port_is_closed(port_id)) {
1570 		printf("Please close port first\n");
1571 		return;
1572 	}
1573 
1574 	if (rte_eth_dev_detach(port_id, name))
1575 		return;
1576 
1577 	ports[port_id].enabled = 0;
1578 	nb_ports = rte_eth_dev_count();
1579 
1580 	printf("Port '%s' is detached. Now total ports is %d\n",
1581 			name, nb_ports);
1582 	printf("Done\n");
1583 	return;
1584 }
1585 
1586 void
1587 pmd_test_exit(void)
1588 {
1589 	portid_t pt_id;
1590 
1591 	if (test_done == 0)
1592 		stop_packet_forwarding();
1593 
1594 	if (ports != NULL) {
1595 		no_link_check = 1;
1596 		FOREACH_PORT(pt_id, ports) {
1597 			printf("\nShutting down port %d...\n", pt_id);
1598 			fflush(stdout);
1599 			stop_port(pt_id);
1600 			close_port(pt_id);
1601 		}
1602 	}
1603 	printf("\nBye...\n");
1604 }
1605 
1606 typedef void (*cmd_func_t)(void);
1607 struct pmd_test_command {
1608 	const char *cmd_name;
1609 	cmd_func_t cmd_func;
1610 };
1611 
1612 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1613 
1614 /* Check the link status of all ports in up to 9s, and print them finally */
1615 static void
1616 check_all_ports_link_status(uint32_t port_mask)
1617 {
1618 #define CHECK_INTERVAL 100 /* 100ms */
1619 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1620 	uint8_t portid, count, all_ports_up, print_flag = 0;
1621 	struct rte_eth_link link;
1622 
1623 	printf("Checking link statuses...\n");
1624 	fflush(stdout);
1625 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1626 		all_ports_up = 1;
1627 		FOREACH_PORT(portid, ports) {
1628 			if ((port_mask & (1 << portid)) == 0)
1629 				continue;
1630 			memset(&link, 0, sizeof(link));
1631 			rte_eth_link_get_nowait(portid, &link);
1632 			/* print link status if flag set */
1633 			if (print_flag == 1) {
1634 				if (link.link_status)
1635 					printf("Port %d Link Up - speed %u "
1636 						"Mbps - %s\n", (uint8_t)portid,
1637 						(unsigned)link.link_speed,
1638 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1639 					("full-duplex") : ("half-duplex\n"));
1640 				else
1641 					printf("Port %d Link Down\n",
1642 						(uint8_t)portid);
1643 				continue;
1644 			}
1645 			/* clear all_ports_up flag if any link down */
1646 			if (link.link_status == ETH_LINK_DOWN) {
1647 				all_ports_up = 0;
1648 				break;
1649 			}
1650 		}
1651 		/* after finally printing all link status, get out */
1652 		if (print_flag == 1)
1653 			break;
1654 
1655 		if (all_ports_up == 0) {
1656 			fflush(stdout);
1657 			rte_delay_ms(CHECK_INTERVAL);
1658 		}
1659 
1660 		/* set the print_flag if all ports up or timeout */
1661 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1662 			print_flag = 1;
1663 		}
1664 	}
1665 }
1666 
1667 static int
1668 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1669 {
1670 	uint16_t i;
1671 	int diag;
1672 	uint8_t mapping_found = 0;
1673 
1674 	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1675 		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1676 				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1677 			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1678 					tx_queue_stats_mappings[i].queue_id,
1679 					tx_queue_stats_mappings[i].stats_counter_id);
1680 			if (diag != 0)
1681 				return diag;
1682 			mapping_found = 1;
1683 		}
1684 	}
1685 	if (mapping_found)
1686 		port->tx_queue_stats_mapping_enabled = 1;
1687 	return 0;
1688 }
1689 
1690 static int
1691 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1692 {
1693 	uint16_t i;
1694 	int diag;
1695 	uint8_t mapping_found = 0;
1696 
1697 	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1698 		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1699 				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1700 			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1701 					rx_queue_stats_mappings[i].queue_id,
1702 					rx_queue_stats_mappings[i].stats_counter_id);
1703 			if (diag != 0)
1704 				return diag;
1705 			mapping_found = 1;
1706 		}
1707 	}
1708 	if (mapping_found)
1709 		port->rx_queue_stats_mapping_enabled = 1;
1710 	return 0;
1711 }
1712 
1713 static void
1714 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1715 {
1716 	int diag = 0;
1717 
1718 	diag = set_tx_queue_stats_mapping_registers(pi, port);
1719 	if (diag != 0) {
1720 		if (diag == -ENOTSUP) {
1721 			port->tx_queue_stats_mapping_enabled = 0;
1722 			printf("TX queue stats mapping not supported port id=%d\n", pi);
1723 		}
1724 		else
1725 			rte_exit(EXIT_FAILURE,
1726 					"set_tx_queue_stats_mapping_registers "
1727 					"failed for port id=%d diag=%d\n",
1728 					pi, diag);
1729 	}
1730 
1731 	diag = set_rx_queue_stats_mapping_registers(pi, port);
1732 	if (diag != 0) {
1733 		if (diag == -ENOTSUP) {
1734 			port->rx_queue_stats_mapping_enabled = 0;
1735 			printf("RX queue stats mapping not supported port id=%d\n", pi);
1736 		}
1737 		else
1738 			rte_exit(EXIT_FAILURE,
1739 					"set_rx_queue_stats_mapping_registers "
1740 					"failed for port id=%d diag=%d\n",
1741 					pi, diag);
1742 	}
1743 }
1744 
1745 static void
1746 rxtx_port_config(struct rte_port *port)
1747 {
1748 	port->rx_conf = port->dev_info.default_rxconf;
1749 	port->tx_conf = port->dev_info.default_txconf;
1750 
1751 	/* Check if any RX/TX parameters have been passed */
1752 	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1753 		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1754 
1755 	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1756 		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1757 
1758 	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1759 		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1760 
1761 	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1762 		port->rx_conf.rx_free_thresh = rx_free_thresh;
1763 
1764 	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1765 		port->rx_conf.rx_drop_en = rx_drop_en;
1766 
1767 	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1768 		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1769 
1770 	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1771 		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1772 
1773 	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1774 		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1775 
1776 	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1777 		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1778 
1779 	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1780 		port->tx_conf.tx_free_thresh = tx_free_thresh;
1781 
1782 	if (txq_flags != RTE_PMD_PARAM_UNSET)
1783 		port->tx_conf.txq_flags = txq_flags;
1784 }
1785 
1786 void
1787 init_port_config(void)
1788 {
1789 	portid_t pid;
1790 	struct rte_port *port;
1791 
1792 	FOREACH_PORT(pid, ports) {
1793 		port = &ports[pid];
1794 		port->dev_conf.rxmode = rx_mode;
1795 		port->dev_conf.fdir_conf = fdir_conf;
1796 		if (nb_rxq > 1) {
1797 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1798 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1799 		} else {
1800 			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1801 			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1802 		}
1803 
1804 		if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1805 			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1806 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1807 			else
1808 				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1809 		}
1810 
1811 		if (port->dev_info.max_vfs != 0) {
1812 			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1813 				port->dev_conf.rxmode.mq_mode =
1814 					ETH_MQ_RX_VMDQ_RSS;
1815 			else
1816 				port->dev_conf.rxmode.mq_mode =
1817 					ETH_MQ_RX_NONE;
1818 
1819 			port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1820 		}
1821 
1822 		rxtx_port_config(port);
1823 
1824 		rte_eth_macaddr_get(pid, &port->eth_addr);
1825 
1826 		map_port_queue_stats_mapping_registers(pid, port);
1827 #ifdef RTE_NIC_BYPASS
1828 		rte_eth_dev_bypass_init(pid);
1829 #endif
1830 	}
1831 }
1832 
1833 void set_port_slave_flag(portid_t slave_pid)
1834 {
1835 	struct rte_port *port;
1836 
1837 	port = &ports[slave_pid];
1838 	port->slave_flag = 1;
1839 }
1840 
1841 void clear_port_slave_flag(portid_t slave_pid)
1842 {
1843 	struct rte_port *port;
1844 
1845 	port = &ports[slave_pid];
1846 	port->slave_flag = 0;
1847 }
1848 
1849 uint8_t port_is_bonding_slave(portid_t slave_pid)
1850 {
1851 	struct rte_port *port;
1852 
1853 	port = &ports[slave_pid];
1854 	return port->slave_flag;
1855 }
1856 
1857 const uint16_t vlan_tags[] = {
1858 		0,  1,  2,  3,  4,  5,  6,  7,
1859 		8,  9, 10, 11,  12, 13, 14, 15,
1860 		16, 17, 18, 19, 20, 21, 22, 23,
1861 		24, 25, 26, 27, 28, 29, 30, 31
1862 };
1863 
1864 static  int
1865 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1866 		 enum dcb_mode_enable dcb_mode,
1867 		 enum rte_eth_nb_tcs num_tcs,
1868 		 uint8_t pfc_en)
1869 {
1870 	uint8_t i;
1871 
1872 	/*
1873 	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1874 	 * given above, and the number of traffic classes available for use.
1875 	 */
1876 	if (dcb_mode == DCB_VT_ENABLED) {
1877 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1878 				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
1879 		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1880 				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1881 
1882 		/* VMDQ+DCB RX and TX configrations */
1883 		vmdq_rx_conf->enable_default_pool = 0;
1884 		vmdq_rx_conf->default_pool = 0;
1885 		vmdq_rx_conf->nb_queue_pools =
1886 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1887 		vmdq_tx_conf->nb_queue_pools =
1888 			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1889 
1890 		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1891 		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1892 			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1893 			vmdq_rx_conf->pool_map[i].pools =
1894 				1 << (i % vmdq_rx_conf->nb_queue_pools);
1895 		}
1896 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1897 			vmdq_rx_conf->dcb_tc[i] = i;
1898 			vmdq_tx_conf->dcb_tc[i] = i;
1899 		}
1900 
1901 		/* set DCB mode of RX and TX of multiple queues */
1902 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1903 		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1904 	} else {
1905 		struct rte_eth_dcb_rx_conf *rx_conf =
1906 				&eth_conf->rx_adv_conf.dcb_rx_conf;
1907 		struct rte_eth_dcb_tx_conf *tx_conf =
1908 				&eth_conf->tx_adv_conf.dcb_tx_conf;
1909 
1910 		rx_conf->nb_tcs = num_tcs;
1911 		tx_conf->nb_tcs = num_tcs;
1912 
1913 		for (i = 0; i < num_tcs; i++) {
1914 			rx_conf->dcb_tc[i] = i;
1915 			tx_conf->dcb_tc[i] = i;
1916 		}
1917 		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1918 		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1919 		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1920 	}
1921 
1922 	if (pfc_en)
1923 		eth_conf->dcb_capability_en =
1924 				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1925 	else
1926 		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1927 
1928 	return 0;
1929 }
1930 
1931 int
1932 init_port_dcb_config(portid_t pid,
1933 		     enum dcb_mode_enable dcb_mode,
1934 		     enum rte_eth_nb_tcs num_tcs,
1935 		     uint8_t pfc_en)
1936 {
1937 	struct rte_eth_conf port_conf;
1938 	struct rte_eth_dev_info dev_info;
1939 	struct rte_port *rte_port;
1940 	int retval;
1941 	uint16_t i;
1942 
1943 	rte_eth_dev_info_get(pid, &dev_info);
1944 
1945 	/* If dev_info.vmdq_pool_base is greater than 0,
1946 	 * the queue id of vmdq pools is started after pf queues.
1947 	 */
1948 	if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
1949 		printf("VMDQ_DCB multi-queue mode is nonsensical"
1950 			" for port %d.", pid);
1951 		return -1;
1952 	}
1953 
1954 	/* Assume the ports in testpmd have the same dcb capability
1955 	 * and has the same number of rxq and txq in dcb mode
1956 	 */
1957 	if (dcb_mode == DCB_VT_ENABLED) {
1958 		nb_rxq = dev_info.max_rx_queues;
1959 		nb_txq = dev_info.max_tx_queues;
1960 	} else {
1961 		/*if vt is disabled, use all pf queues */
1962 		if (dev_info.vmdq_pool_base == 0) {
1963 			nb_rxq = dev_info.max_rx_queues;
1964 			nb_txq = dev_info.max_tx_queues;
1965 		} else {
1966 			nb_rxq = (queueid_t)num_tcs;
1967 			nb_txq = (queueid_t)num_tcs;
1968 
1969 		}
1970 	}
1971 	rx_free_thresh = 64;
1972 
1973 	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1974 	/* Enter DCB configuration status */
1975 	dcb_config = 1;
1976 
1977 	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1978 	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
1979 	if (retval < 0)
1980 		return retval;
1981 
1982 	rte_port = &ports[pid];
1983 	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
1984 
1985 	rxtx_port_config(rte_port);
1986 	/* VLAN filter */
1987 	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1988 	for (i = 0; i < RTE_DIM(vlan_tags); i++)
1989 		rx_vft_set(pid, vlan_tags[i], 1);
1990 
1991 	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1992 	map_port_queue_stats_mapping_registers(pid, rte_port);
1993 
1994 	rte_port->dcb_flag = 1;
1995 
1996 	return 0;
1997 }
1998 
1999 static void
2000 init_port(void)
2001 {
2002 	portid_t pid;
2003 
2004 	/* Configuration of Ethernet ports. */
2005 	ports = rte_zmalloc("testpmd: ports",
2006 			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2007 			    RTE_CACHE_LINE_SIZE);
2008 	if (ports == NULL) {
2009 		rte_exit(EXIT_FAILURE,
2010 				"rte_zmalloc(%d struct rte_port) failed\n",
2011 				RTE_MAX_ETHPORTS);
2012 	}
2013 
2014 	/* enabled allocated ports */
2015 	for (pid = 0; pid < nb_ports; pid++)
2016 		ports[pid].enabled = 1;
2017 }
2018 
2019 static void
2020 force_quit(void)
2021 {
2022 	pmd_test_exit();
2023 	prompt_exit();
2024 }
2025 
2026 static void
2027 signal_handler(int signum)
2028 {
2029 	if (signum == SIGINT || signum == SIGTERM) {
2030 		printf("\nSignal %d received, preparing to exit...\n",
2031 				signum);
2032 		force_quit();
2033 		/* exit with the expected status */
2034 		signal(signum, SIG_DFL);
2035 		kill(getpid(), signum);
2036 	}
2037 }
2038 
2039 int
2040 main(int argc, char** argv)
2041 {
2042 	int  diag;
2043 	uint8_t port_id;
2044 
2045 	signal(SIGINT, signal_handler);
2046 	signal(SIGTERM, signal_handler);
2047 
2048 	diag = rte_eal_init(argc, argv);
2049 	if (diag < 0)
2050 		rte_panic("Cannot init EAL\n");
2051 
2052 	nb_ports = (portid_t) rte_eth_dev_count();
2053 	if (nb_ports == 0)
2054 		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2055 
2056 	/* allocate port structures, and init them */
2057 	init_port();
2058 
2059 	set_def_fwd_config();
2060 	if (nb_lcores == 0)
2061 		rte_panic("Empty set of forwarding logical cores - check the "
2062 			  "core mask supplied in the command parameters\n");
2063 
2064 	argc -= diag;
2065 	argv += diag;
2066 	if (argc > 1)
2067 		launch_args_parse(argc, argv);
2068 
2069 	if (!nb_rxq && !nb_txq)
2070 		printf("Warning: Either rx or tx queues should be non-zero\n");
2071 
2072 	if (nb_rxq > 1 && nb_rxq > nb_txq)
2073 		printf("Warning: nb_rxq=%d enables RSS configuration, "
2074 		       "but nb_txq=%d will prevent to fully test it.\n",
2075 		       nb_rxq, nb_txq);
2076 
2077 	init_config();
2078 	if (start_port(RTE_PORT_ALL) != 0)
2079 		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2080 
2081 	/* set all ports to promiscuous mode by default */
2082 	FOREACH_PORT(port_id, ports)
2083 		rte_eth_promiscuous_enable(port_id);
2084 
2085 #ifdef RTE_LIBRTE_CMDLINE
2086 	if (interactive == 1) {
2087 		if (auto_start) {
2088 			printf("Start automatic packet forwarding\n");
2089 			start_packet_forwarding(0);
2090 		}
2091 		prompt();
2092 	} else
2093 #endif
2094 	{
2095 		char c;
2096 		int rc;
2097 
2098 		printf("No commandline core given, start packet forwarding\n");
2099 		start_packet_forwarding(0);
2100 		printf("Press enter to exit\n");
2101 		rc = read(0, &c, 1);
2102 		pmd_test_exit();
2103 		if (rc < 0)
2104 			return 1;
2105 	}
2106 
2107 	return 0;
2108 }
2109